blob: 600e0158cba78c64daf5a34946f3e0a485a34c01 [file] [log] [blame]
Jens Axboe771b53d02019-10-22 10:25:58 -06001#ifndef INTERNAL_IO_WQ_H
2#define INTERNAL_IO_WQ_H
3
4struct io_wq;
5
6enum {
7 IO_WQ_WORK_CANCEL = 1,
8 IO_WQ_WORK_HAS_MM = 2,
9 IO_WQ_WORK_HASHED = 4,
10 IO_WQ_WORK_NEEDS_USER = 8,
Jens Axboefcb323c2019-10-24 12:39:47 -060011 IO_WQ_WORK_NEEDS_FILES = 16,
Jens Axboec5def4a2019-11-07 11:41:16 -070012 IO_WQ_WORK_UNBOUND = 32,
Jens Axboe7d723062019-11-12 22:31:31 -070013 IO_WQ_WORK_INTERNAL = 64,
Jens Axboeb76da702019-11-20 13:05:32 -070014 IO_WQ_WORK_CB = 128,
Jens Axboe771b53d02019-10-22 10:25:58 -060015
16 IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
17};
18
19enum io_wq_cancel {
20 IO_WQ_CANCEL_OK, /* cancelled before started */
21 IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
22 IO_WQ_CANCEL_NOTFOUND, /* work not found */
23};
24
Jens Axboe6206f0e2019-11-26 11:59:32 -070025struct io_wq_work_node {
26 struct io_wq_work_node *next;
27};
28
29struct io_wq_work_list {
30 struct io_wq_work_node *first;
31 struct io_wq_work_node *last;
32};
33
34static inline void wq_list_add_tail(struct io_wq_work_node *node,
35 struct io_wq_work_list *list)
36{
37 if (!list->first) {
38 list->first = list->last = node;
39 } else {
40 list->last->next = node;
41 list->last = node;
42 }
43}
44
45static inline void wq_node_del(struct io_wq_work_list *list,
46 struct io_wq_work_node *node,
47 struct io_wq_work_node *prev)
48{
49 if (node == list->first)
50 list->first = node->next;
51 if (node == list->last)
52 list->last = prev;
53 if (prev)
54 prev->next = node->next;
55}
56
57#define wq_list_for_each(pos, prv, head) \
58 for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
59
60#define wq_list_empty(list) ((list)->first == NULL)
61#define INIT_WQ_LIST(list) do { \
62 (list)->first = NULL; \
63 (list)->last = NULL; \
64} while (0)
65
Jens Axboe771b53d02019-10-22 10:25:58 -060066struct io_wq_work {
Jens Axboeb76da702019-11-20 13:05:32 -070067 union {
Jens Axboe6206f0e2019-11-26 11:59:32 -070068 struct io_wq_work_node list;
Jens Axboeb76da702019-11-20 13:05:32 -070069 void *data;
70 };
Jens Axboe771b53d02019-10-22 10:25:58 -060071 void (*func)(struct io_wq_work **);
Jens Axboefcb323c2019-10-24 12:39:47 -060072 struct files_struct *files;
Jens Axboe6206f0e2019-11-26 11:59:32 -070073 unsigned flags;
Jens Axboe771b53d02019-10-22 10:25:58 -060074};
75
76#define INIT_IO_WORK(work, _func) \
77 do { \
Jens Axboe6206f0e2019-11-26 11:59:32 -070078 (work)->list.next = NULL; \
Jens Axboe771b53d02019-10-22 10:25:58 -060079 (work)->func = _func; \
80 (work)->flags = 0; \
Jens Axboefcb323c2019-10-24 12:39:47 -060081 (work)->files = NULL; \
Jens Axboe771b53d02019-10-22 10:25:58 -060082 } while (0) \
83
Jens Axboe7d723062019-11-12 22:31:31 -070084typedef void (get_work_fn)(struct io_wq_work *);
85typedef void (put_work_fn)(struct io_wq_work *);
86
Jens Axboe576a3472019-11-25 08:49:20 -070087struct io_wq_data {
88 struct mm_struct *mm;
89 struct user_struct *user;
Jens Axboe181e4482019-11-25 08:52:30 -070090 struct cred *creds;
Jens Axboe576a3472019-11-25 08:49:20 -070091
92 get_work_fn *get_work;
93 put_work_fn *put_work;
94};
95
96struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
Jens Axboe771b53d02019-10-22 10:25:58 -060097void io_wq_destroy(struct io_wq *wq);
98
99void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
100void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val);
101void io_wq_flush(struct io_wq *wq);
102
103void io_wq_cancel_all(struct io_wq *wq);
104enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
105
Jens Axboe62755e32019-10-28 21:49:21 -0600106typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
107
108enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
109 void *data);
110
Jens Axboe771b53d02019-10-22 10:25:58 -0600111#if defined(CONFIG_IO_WQ)
112extern void io_wq_worker_sleeping(struct task_struct *);
113extern void io_wq_worker_running(struct task_struct *);
114#else
115static inline void io_wq_worker_sleeping(struct task_struct *tsk)
116{
117}
118static inline void io_wq_worker_running(struct task_struct *tsk)
119{
120}
121#endif
122
Jens Axboe960e4322019-11-12 07:56:39 -0700123static inline bool io_wq_current_is_worker(void)
124{
125 return in_task() && (current->flags & PF_IO_WORKER);
126}
Jens Axboe771b53d02019-10-22 10:25:58 -0600127#endif