blob: fb993b2bd0ef019e76438bc5483b2995ba49fc8a [file] [log] [blame]
Jens Axboe771b53d02019-10-22 10:25:58 -06001#ifndef INTERNAL_IO_WQ_H
2#define INTERNAL_IO_WQ_H
3
4struct io_wq;
5
6enum {
7 IO_WQ_WORK_CANCEL = 1,
8 IO_WQ_WORK_HAS_MM = 2,
9 IO_WQ_WORK_HASHED = 4,
10 IO_WQ_WORK_NEEDS_USER = 8,
Jens Axboefcb323c2019-10-24 12:39:47 -060011 IO_WQ_WORK_NEEDS_FILES = 16,
Jens Axboec5def4a2019-11-07 11:41:16 -070012 IO_WQ_WORK_UNBOUND = 32,
Jens Axboe7d723062019-11-12 22:31:31 -070013 IO_WQ_WORK_INTERNAL = 64,
Jens Axboeb76da702019-11-20 13:05:32 -070014 IO_WQ_WORK_CB = 128,
Jens Axboe771b53d02019-10-22 10:25:58 -060015
16 IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
17};
18
19enum io_wq_cancel {
20 IO_WQ_CANCEL_OK, /* cancelled before started */
21 IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
22 IO_WQ_CANCEL_NOTFOUND, /* work not found */
23};
24
Jens Axboe6206f0e2019-11-26 11:59:32 -070025struct io_wq_work_node {
26 struct io_wq_work_node *next;
27};
28
29struct io_wq_work_list {
30 struct io_wq_work_node *first;
31 struct io_wq_work_node *last;
32};
33
34static inline void wq_list_add_tail(struct io_wq_work_node *node,
35 struct io_wq_work_list *list)
36{
37 if (!list->first) {
Jens Axboee995d512019-12-07 21:06:46 -070038 list->last = node;
39 WRITE_ONCE(list->first, node);
Jens Axboe6206f0e2019-11-26 11:59:32 -070040 } else {
41 list->last->next = node;
42 list->last = node;
43 }
44}
45
46static inline void wq_node_del(struct io_wq_work_list *list,
47 struct io_wq_work_node *node,
48 struct io_wq_work_node *prev)
49{
50 if (node == list->first)
Jens Axboee995d512019-12-07 21:06:46 -070051 WRITE_ONCE(list->first, node->next);
Jens Axboe6206f0e2019-11-26 11:59:32 -070052 if (node == list->last)
53 list->last = prev;
54 if (prev)
55 prev->next = node->next;
Jens Axboe08bdcc32019-12-04 17:19:44 -070056 node->next = NULL;
Jens Axboe6206f0e2019-11-26 11:59:32 -070057}
58
59#define wq_list_for_each(pos, prv, head) \
60 for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
61
Jens Axboee995d512019-12-07 21:06:46 -070062#define wq_list_empty(list) (READ_ONCE((list)->first) == NULL)
Jens Axboe6206f0e2019-11-26 11:59:32 -070063#define INIT_WQ_LIST(list) do { \
64 (list)->first = NULL; \
65 (list)->last = NULL; \
66} while (0)
67
Jens Axboe771b53d02019-10-22 10:25:58 -060068struct io_wq_work {
Jens Axboeb76da702019-11-20 13:05:32 -070069 union {
Jens Axboe6206f0e2019-11-26 11:59:32 -070070 struct io_wq_work_node list;
Jens Axboeb76da702019-11-20 13:05:32 -070071 void *data;
72 };
Jens Axboe771b53d02019-10-22 10:25:58 -060073 void (*func)(struct io_wq_work **);
Jens Axboefcb323c2019-10-24 12:39:47 -060074 struct files_struct *files;
Jens Axboe6206f0e2019-11-26 11:59:32 -070075 unsigned flags;
Jens Axboe771b53d02019-10-22 10:25:58 -060076};
77
78#define INIT_IO_WORK(work, _func) \
79 do { \
Jens Axboe6206f0e2019-11-26 11:59:32 -070080 (work)->list.next = NULL; \
Jens Axboe771b53d02019-10-22 10:25:58 -060081 (work)->func = _func; \
82 (work)->flags = 0; \
Jens Axboefcb323c2019-10-24 12:39:47 -060083 (work)->files = NULL; \
Jens Axboe771b53d02019-10-22 10:25:58 -060084 } while (0) \
85
Jens Axboe7d723062019-11-12 22:31:31 -070086typedef void (get_work_fn)(struct io_wq_work *);
87typedef void (put_work_fn)(struct io_wq_work *);
88
Jens Axboe576a3472019-11-25 08:49:20 -070089struct io_wq_data {
90 struct mm_struct *mm;
91 struct user_struct *user;
Jens Axboe0b8c0ec2019-12-02 08:50:00 -070092 const struct cred *creds;
Jens Axboe576a3472019-11-25 08:49:20 -070093
94 get_work_fn *get_work;
95 put_work_fn *put_work;
96};
97
98struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
Jens Axboe771b53d02019-10-22 10:25:58 -060099void io_wq_destroy(struct io_wq *wq);
100
101void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
102void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val);
103void io_wq_flush(struct io_wq *wq);
104
105void io_wq_cancel_all(struct io_wq *wq);
106enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
107
Jens Axboe62755e32019-10-28 21:49:21 -0600108typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
109
110enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
111 void *data);
112
Jens Axboe771b53d02019-10-22 10:25:58 -0600113#if defined(CONFIG_IO_WQ)
114extern void io_wq_worker_sleeping(struct task_struct *);
115extern void io_wq_worker_running(struct task_struct *);
116#else
117static inline void io_wq_worker_sleeping(struct task_struct *tsk)
118{
119}
120static inline void io_wq_worker_running(struct task_struct *tsk)
121{
122}
Jackie Liu8cdda872019-12-02 17:14:53 +0800123#endif /* CONFIG_IO_WQ */
Jens Axboe771b53d02019-10-22 10:25:58 -0600124
Jackie Liu8cdda872019-12-02 17:14:53 +0800125#endif /* INTERNAL_IO_WQ_H */