blob: e00b1204a8e93015b20ac179c91a73468945feea [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Tejun Heoea138442013-01-18 14:05:55 -08002/*
3 * kernel/workqueue_internal.h
4 *
5 * Workqueue internal header file. Only to be included by workqueue and
6 * core kernel subsystems.
7 */
8#ifndef _KERNEL_WORKQUEUE_INTERNAL_H
9#define _KERNEL_WORKQUEUE_INTERNAL_H
10
Tejun Heo2eaebdb2013-01-18 14:05:55 -080011#include <linux/workqueue.h>
Tejun Heo84b233a2013-01-18 14:05:56 -080012#include <linux/kthread.h>
Li Bincef572a2017-10-28 11:07:28 +080013#include <linux/preempt.h>
Tejun Heo2eaebdb2013-01-18 14:05:55 -080014
Tejun Heo2eaebdb2013-01-18 14:05:55 -080015struct worker_pool;
16
17/*
18 * The poor guys doing the actual heavy lifting. All on-duty workers are
19 * either serving the manager role, on idle list or on busy hash. For
20 * details on the locking annotation (L, I, X...), refer to workqueue.c.
21 *
22 * Only to be used in workqueue and async.
23 */
24struct worker {
25 /* on idle list while idle, on busy hash table while busy */
26 union {
27 struct list_head entry; /* L: while idle */
28 struct hlist_node hentry; /* L: while busy */
29 };
30
31 struct work_struct *current_work; /* L: work being processed */
32 work_func_t current_func; /* L: current_work's fn */
Lai Jiangshand8127962021-08-17 09:32:38 +080033 struct pool_workqueue *current_pwq; /* L: current_work's pwq */
34 unsigned int current_color; /* L: current_work's color */
Tejun Heo2eaebdb2013-01-18 14:05:55 -080035 struct list_head scheduled; /* L: scheduled works */
Tejun Heo3d1cb202013-04-30 15:27:22 -070036
37 /* 64 bytes boundary on 64bit, 32 on 32bit */
38
Tejun Heo2eaebdb2013-01-18 14:05:55 -080039 struct task_struct *task; /* I: worker task */
Tejun Heoa2d812a2018-05-18 08:47:13 -070040 struct worker_pool *pool; /* A: the associated pool */
Lai Jiangshanb3104102013-02-19 12:17:02 -080041 /* L: for rescuers */
Lai Jiangshan92f9c5c2014-05-20 17:46:34 +080042 struct list_head node; /* A: anchored at pool->workers */
43 /* A: runs through worker->node */
Tejun Heo3d1cb202013-04-30 15:27:22 -070044
Tejun Heo2eaebdb2013-01-18 14:05:55 -080045 unsigned long last_active; /* L: last active timestamp */
46 unsigned int flags; /* X: flags */
47 int id; /* I: worker id */
Thomas Gleixner6d25be52019-03-13 17:55:48 +010048 int sleeping; /* None */
Tejun Heo2eaebdb2013-01-18 14:05:55 -080049
Tejun Heo3d1cb202013-04-30 15:27:22 -070050 /*
51 * Opaque string set with work_set_desc(). Printed out with task
52 * dump for debugging - WARN, BUG, panic or sysrq.
53 */
54 char desc[WORKER_DESC_LEN];
55
Tejun Heo2eaebdb2013-01-18 14:05:55 -080056 /* used only by rescuers to point to the target workqueue */
57 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
Johannes Weiner1b69ac62019-02-01 14:20:42 -080058
59 /* used by the scheduler to determine a worker's last known identity */
60 work_func_t last_func;
Tejun Heo2eaebdb2013-01-18 14:05:55 -080061};
62
Tejun Heo84b233a2013-01-18 14:05:56 -080063/**
64 * current_wq_worker - return struct worker if %current is a workqueue worker
65 */
66static inline struct worker *current_wq_worker(void)
67{
Li Bincef572a2017-10-28 11:07:28 +080068 if (in_task() && (current->flags & PF_WQ_WORKER))
Tejun Heo84b233a2013-01-18 14:05:56 -080069 return kthread_data(current);
70 return NULL;
71}
72
Tejun Heoea138442013-01-18 14:05:55 -080073/*
74 * Scheduler hooks for concurrency managed workqueue. Only to be used from
Johannes Weiner1b69ac62019-02-01 14:20:42 -080075 * sched/ and workqueue.c.
Tejun Heoea138442013-01-18 14:05:55 -080076 */
Thomas Gleixner6d25be52019-03-13 17:55:48 +010077void wq_worker_running(struct task_struct *task);
78void wq_worker_sleeping(struct task_struct *task);
Johannes Weiner1b69ac62019-02-01 14:20:42 -080079work_func_t wq_worker_last_func(struct task_struct *task);
Tejun Heoea138442013-01-18 14:05:55 -080080
81#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */