^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * kernel/workqueue_internal.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Workqueue internal header file. Only to be included by workqueue and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * core kernel subsystems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #ifndef _KERNEL_WORKQUEUE_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define _KERNEL_WORKQUEUE_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/preempt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct worker_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * The poor guys doing the actual heavy lifting. All on-duty workers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * either serving the manager role, on idle list or on busy hash. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * details on the locking annotation (L, I, X...), refer to workqueue.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Only to be used in workqueue and async.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct worker {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* on idle list while idle, on busy hash table while busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct list_head entry; /* L: while idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct hlist_node hentry; /* L: while busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct work_struct *current_work; /* L: work being processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) work_func_t current_func; /* L: current_work's fn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct pool_workqueue *current_pwq; /* L: current_work's pwq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct list_head scheduled; /* L: scheduled works */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* 64 bytes boundary on 64bit, 32 on 32bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct task_struct *task; /* I: worker task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct worker_pool *pool; /* A: the associated pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* L: for rescuers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct list_head node; /* A: anchored at pool->workers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* A: runs through worker->node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned long last_active; /* L: last active timestamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned int flags; /* X: flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) int id; /* I: worker id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int sleeping; /* None */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Opaque string set with work_set_desc(). Printed out with task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * dump for debugging - WARN, BUG, panic or sysrq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) char desc[WORKER_DESC_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* used only by rescuers to point to the target workqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* used by the scheduler to determine a worker's last known identity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) work_func_t last_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * current_wq_worker - return struct worker if %current is a workqueue worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static inline struct worker *current_wq_worker(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (in_task() && (current->flags & PF_WQ_WORKER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return kthread_data(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Scheduler hooks for concurrency managed workqueue. Only to be used from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * sched/ and workqueue.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) void wq_worker_running(struct task_struct *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) void wq_worker_sleeping(struct task_struct *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) work_func_t wq_worker_last_func(struct task_struct *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */