blob: 04d60ad38dfc9db3d14f563dd58382ff28e614cb [file] [log] [blame]
Jens Axboe771b53d02019-10-22 10:25:58 -06001#ifndef INTERNAL_IO_WQ_H
2#define INTERNAL_IO_WQ_H
3
4struct io_wq;
5
6enum {
7 IO_WQ_WORK_CANCEL = 1,
8 IO_WQ_WORK_HAS_MM = 2,
9 IO_WQ_WORK_HASHED = 4,
10 IO_WQ_WORK_NEEDS_USER = 8,
Jens Axboefcb323c2019-10-24 12:39:47 -060011 IO_WQ_WORK_NEEDS_FILES = 16,
Jens Axboec5def4a2019-11-07 11:41:16 -070012 IO_WQ_WORK_UNBOUND = 32,
Jens Axboe7d723062019-11-12 22:31:31 -070013 IO_WQ_WORK_INTERNAL = 64,
Jens Axboeb76da702019-11-20 13:05:32 -070014 IO_WQ_WORK_CB = 128,
Jens Axboe0c9d5cc2019-12-11 19:29:43 -070015 IO_WQ_WORK_NO_CANCEL = 256,
Jens Axboe771b53d02019-10-22 10:25:58 -060016
17 IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
18};
19
20enum io_wq_cancel {
21 IO_WQ_CANCEL_OK, /* cancelled before started */
22 IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
23 IO_WQ_CANCEL_NOTFOUND, /* work not found */
24};
25
Jens Axboe6206f0e2019-11-26 11:59:32 -070026struct io_wq_work_node {
27 struct io_wq_work_node *next;
28};
29
30struct io_wq_work_list {
31 struct io_wq_work_node *first;
32 struct io_wq_work_node *last;
33};
34
35static inline void wq_list_add_tail(struct io_wq_work_node *node,
36 struct io_wq_work_list *list)
37{
38 if (!list->first) {
Jens Axboee995d512019-12-07 21:06:46 -070039 list->last = node;
40 WRITE_ONCE(list->first, node);
Jens Axboe6206f0e2019-11-26 11:59:32 -070041 } else {
42 list->last->next = node;
43 list->last = node;
44 }
45}
46
47static inline void wq_node_del(struct io_wq_work_list *list,
48 struct io_wq_work_node *node,
49 struct io_wq_work_node *prev)
50{
51 if (node == list->first)
Jens Axboee995d512019-12-07 21:06:46 -070052 WRITE_ONCE(list->first, node->next);
Jens Axboe6206f0e2019-11-26 11:59:32 -070053 if (node == list->last)
54 list->last = prev;
55 if (prev)
56 prev->next = node->next;
Jens Axboe08bdcc32019-12-04 17:19:44 -070057 node->next = NULL;
Jens Axboe6206f0e2019-11-26 11:59:32 -070058}
59
60#define wq_list_for_each(pos, prv, head) \
61 for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
62
Jens Axboee995d512019-12-07 21:06:46 -070063#define wq_list_empty(list) (READ_ONCE((list)->first) == NULL)
Jens Axboe6206f0e2019-11-26 11:59:32 -070064#define INIT_WQ_LIST(list) do { \
65 (list)->first = NULL; \
66 (list)->last = NULL; \
67} while (0)
68
Jens Axboe771b53d02019-10-22 10:25:58 -060069struct io_wq_work {
Jens Axboeb76da702019-11-20 13:05:32 -070070 union {
Jens Axboe6206f0e2019-11-26 11:59:32 -070071 struct io_wq_work_node list;
Jens Axboeb76da702019-11-20 13:05:32 -070072 void *data;
73 };
Jens Axboe771b53d02019-10-22 10:25:58 -060074 void (*func)(struct io_wq_work **);
Jens Axboefcb323c2019-10-24 12:39:47 -060075 struct files_struct *files;
Jens Axboe6206f0e2019-11-26 11:59:32 -070076 unsigned flags;
Jens Axboe771b53d02019-10-22 10:25:58 -060077};
78
79#define INIT_IO_WORK(work, _func) \
80 do { \
Jens Axboe6206f0e2019-11-26 11:59:32 -070081 (work)->list.next = NULL; \
Jens Axboe771b53d02019-10-22 10:25:58 -060082 (work)->func = _func; \
83 (work)->flags = 0; \
Jens Axboefcb323c2019-10-24 12:39:47 -060084 (work)->files = NULL; \
Jens Axboe771b53d02019-10-22 10:25:58 -060085 } while (0) \
86
Jens Axboe7d723062019-11-12 22:31:31 -070087typedef void (get_work_fn)(struct io_wq_work *);
88typedef void (put_work_fn)(struct io_wq_work *);
89
Jens Axboe576a3472019-11-25 08:49:20 -070090struct io_wq_data {
91 struct mm_struct *mm;
92 struct user_struct *user;
Jens Axboe0b8c0ec2019-12-02 08:50:00 -070093 const struct cred *creds;
Jens Axboe576a3472019-11-25 08:49:20 -070094
95 get_work_fn *get_work;
96 put_work_fn *put_work;
97};
98
99struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
Jens Axboe771b53d02019-10-22 10:25:58 -0600100void io_wq_destroy(struct io_wq *wq);
101
102void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
103void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val);
104void io_wq_flush(struct io_wq *wq);
105
106void io_wq_cancel_all(struct io_wq *wq);
107enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
108
Jens Axboe62755e32019-10-28 21:49:21 -0600109typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
110
111enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
112 void *data);
113
Jens Axboe771b53d02019-10-22 10:25:58 -0600114#if defined(CONFIG_IO_WQ)
115extern void io_wq_worker_sleeping(struct task_struct *);
116extern void io_wq_worker_running(struct task_struct *);
117#else
118static inline void io_wq_worker_sleeping(struct task_struct *tsk)
119{
120}
121static inline void io_wq_worker_running(struct task_struct *tsk)
122{
123}
Jens Axboe525b3052019-12-17 14:13:37 -0700124#endif
Jens Axboe771b53d02019-10-22 10:25:58 -0600125
Jens Axboe525b3052019-12-17 14:13:37 -0700126static inline bool io_wq_current_is_worker(void)
127{
128 return in_task() && (current->flags & PF_IO_WORKER);
129}
130#endif