Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1 | #ifndef INTERNAL_IO_WQ_H |
| 2 | #define INTERNAL_IO_WQ_H |
| 3 | |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 4 | #include <linux/refcount.h> |
Jens Axboe | 98447d6 | 2020-10-14 10:48:51 -0600 | [diff] [blame] | 5 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 6 | struct io_wq; |
| 7 | |
| 8 | enum { |
| 9 | IO_WQ_WORK_CANCEL = 1, |
Pavel Begunkov | e883a79 | 2020-06-25 18:20:53 +0300 | [diff] [blame] | 10 | IO_WQ_WORK_HASHED = 2, |
| 11 | IO_WQ_WORK_UNBOUND = 4, |
Pavel Begunkov | e883a79 | 2020-06-25 18:20:53 +0300 | [diff] [blame] | 12 | IO_WQ_WORK_CONCURRENT = 16, |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 13 | |
| 14 | IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ |
| 15 | }; |
| 16 | |
| 17 | enum io_wq_cancel { |
| 18 | IO_WQ_CANCEL_OK, /* cancelled before started */ |
| 19 | IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */ |
| 20 | IO_WQ_CANCEL_NOTFOUND, /* work not found */ |
| 21 | }; |
| 22 | |
Stefan Metzmacher | 53e043b | 2021-03-15 12:56:56 +0100 | [diff] [blame] | 23 | struct io_wq_work_node { |
| 24 | struct io_wq_work_node *next; |
| 25 | }; |
| 26 | |
| 27 | struct io_wq_work_list { |
| 28 | struct io_wq_work_node *first; |
| 29 | struct io_wq_work_node *last; |
| 30 | }; |
| 31 | |
Pavel Begunkov | 0d9521b | 2021-09-24 21:59:46 +0100 | [diff] [blame] | 32 | #define wq_list_for_each(pos, prv, head) \ |
| 33 | for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next) |
| 34 | |
Pavel Begunkov | 5eef4e8 | 2021-09-24 21:59:49 +0100 | [diff] [blame] | 35 | #define wq_list_for_each_resume(pos, prv) \ |
| 36 | for (; pos; prv = pos, pos = (pos)->next) |
| 37 | |
Pavel Begunkov | 0d9521b | 2021-09-24 21:59:46 +0100 | [diff] [blame] | 38 | #define wq_list_empty(list) (READ_ONCE((list)->first) == NULL) |
| 39 | #define INIT_WQ_LIST(list) do { \ |
| 40 | (list)->first = NULL; \ |
Pavel Begunkov | 0d9521b | 2021-09-24 21:59:46 +0100 | [diff] [blame] | 41 | } while (0) |
| 42 | |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 43 | static inline void wq_list_add_after(struct io_wq_work_node *node, |
| 44 | struct io_wq_work_node *pos, |
| 45 | struct io_wq_work_list *list) |
| 46 | { |
| 47 | struct io_wq_work_node *next = pos->next; |
| 48 | |
| 49 | pos->next = node; |
| 50 | node->next = next; |
| 51 | if (!next) |
| 52 | list->last = node; |
| 53 | } |
| 54 | |
Hao Xu | 24115c4 | 2021-12-07 17:39:47 +0800 | [diff] [blame] | 55 | /** |
| 56 | * wq_list_merge - merge the second list to the first one. |
| 57 | * @list0: the first list |
| 58 | * @list1: the second list |
| 59 | * Return the first node after mergence. |
| 60 | */ |
| 61 | static inline struct io_wq_work_node *wq_list_merge(struct io_wq_work_list *list0, |
| 62 | struct io_wq_work_list *list1) |
| 63 | { |
| 64 | struct io_wq_work_node *ret; |
| 65 | |
| 66 | if (!list0->first) { |
| 67 | ret = list1->first; |
| 68 | } else { |
| 69 | ret = list0->first; |
| 70 | list0->last->next = list1->first; |
| 71 | } |
| 72 | INIT_WQ_LIST(list0); |
| 73 | INIT_WQ_LIST(list1); |
| 74 | return ret; |
| 75 | } |
| 76 | |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 77 | static inline void wq_list_add_tail(struct io_wq_work_node *node, |
| 78 | struct io_wq_work_list *list) |
| 79 | { |
Pavel Begunkov | 8724dd8 | 2021-08-09 13:04:07 +0100 | [diff] [blame] | 80 | node->next = NULL; |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 81 | if (!list->first) { |
Jens Axboe | e995d51 | 2019-12-07 21:06:46 -0700 | [diff] [blame] | 82 | list->last = node; |
| 83 | WRITE_ONCE(list->first, node); |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 84 | } else { |
| 85 | list->last->next = node; |
| 86 | list->last = node; |
| 87 | } |
| 88 | } |
| 89 | |
Pavel Begunkov | 0d9521b | 2021-09-24 21:59:46 +0100 | [diff] [blame] | 90 | static inline void wq_list_add_head(struct io_wq_work_node *node, |
| 91 | struct io_wq_work_list *list) |
| 92 | { |
| 93 | node->next = list->first; |
| 94 | if (!node->next) |
| 95 | list->last = node; |
| 96 | WRITE_ONCE(list->first, node); |
| 97 | } |
| 98 | |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 99 | static inline void wq_list_cut(struct io_wq_work_list *list, |
| 100 | struct io_wq_work_node *last, |
| 101 | struct io_wq_work_node *prev) |
| 102 | { |
| 103 | /* first in the list, if prev==NULL */ |
| 104 | if (!prev) |
| 105 | WRITE_ONCE(list->first, last->next); |
| 106 | else |
| 107 | prev->next = last->next; |
| 108 | |
| 109 | if (last == list->last) |
| 110 | list->last = prev; |
| 111 | last->next = NULL; |
| 112 | } |
| 113 | |
Pavel Begunkov | 0d9521b | 2021-09-24 21:59:46 +0100 | [diff] [blame] | 114 | static inline void __wq_list_splice(struct io_wq_work_list *list, |
| 115 | struct io_wq_work_node *to) |
| 116 | { |
| 117 | list->last->next = to->next; |
| 118 | to->next = list->first; |
| 119 | INIT_WQ_LIST(list); |
| 120 | } |
| 121 | |
| 122 | static inline bool wq_list_splice(struct io_wq_work_list *list, |
| 123 | struct io_wq_work_node *to) |
| 124 | { |
| 125 | if (!wq_list_empty(list)) { |
| 126 | __wq_list_splice(list, to); |
| 127 | return true; |
| 128 | } |
| 129 | return false; |
| 130 | } |
| 131 | |
| 132 | static inline void wq_stack_add_head(struct io_wq_work_node *node, |
| 133 | struct io_wq_work_node *stack) |
| 134 | { |
| 135 | node->next = stack->next; |
| 136 | stack->next = node; |
| 137 | } |
| 138 | |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 139 | static inline void wq_list_del(struct io_wq_work_list *list, |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 140 | struct io_wq_work_node *node, |
| 141 | struct io_wq_work_node *prev) |
| 142 | { |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 143 | wq_list_cut(list, node, prev); |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 144 | } |
| 145 | |
Pavel Begunkov | 0d9521b | 2021-09-24 21:59:46 +0100 | [diff] [blame] | 146 | static inline |
| 147 | struct io_wq_work_node *wq_stack_extract(struct io_wq_work_node *stack) |
| 148 | { |
| 149 | struct io_wq_work_node *node = stack->next; |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 150 | |
Pavel Begunkov | 0d9521b | 2021-09-24 21:59:46 +0100 | [diff] [blame] | 151 | stack->next = node->next; |
| 152 | return node; |
| 153 | } |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 154 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 155 | struct io_wq_work { |
Pavel Begunkov | 18a542f | 2020-03-23 00:23:29 +0300 | [diff] [blame] | 156 | struct io_wq_work_node list; |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 157 | unsigned flags; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 158 | }; |
| 159 | |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 160 | static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) |
| 161 | { |
| 162 | if (!work->list.next) |
| 163 | return NULL; |
| 164 | |
| 165 | return container_of(work->list.next, struct io_wq_work, list); |
| 166 | } |
| 167 | |
Pavel Begunkov | 5280f7e | 2021-02-04 13:52:08 +0000 | [diff] [blame] | 168 | typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *); |
| 169 | typedef void (io_wq_work_fn)(struct io_wq_work *); |
Jens Axboe | 7d72306 | 2019-11-12 22:31:31 -0700 | [diff] [blame] | 170 | |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 171 | struct io_wq_hash { |
| 172 | refcount_t refs; |
| 173 | unsigned long map; |
| 174 | struct wait_queue_head wait; |
| 175 | }; |
| 176 | |
| 177 | static inline void io_wq_put_hash(struct io_wq_hash *hash) |
| 178 | { |
| 179 | if (refcount_dec_and_test(&hash->refs)) |
| 180 | kfree(hash); |
| 181 | } |
| 182 | |
Jens Axboe | 576a347 | 2019-11-25 08:49:20 -0700 | [diff] [blame] | 183 | struct io_wq_data { |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 184 | struct io_wq_hash *hash; |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 185 | struct task_struct *task; |
Pavel Begunkov | f5fa38c | 2020-06-08 21:08:20 +0300 | [diff] [blame] | 186 | io_wq_work_fn *do_work; |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 187 | free_work_fn *free_work; |
Jens Axboe | 576a347 | 2019-11-25 08:49:20 -0700 | [diff] [blame] | 188 | }; |
| 189 | |
| 190 | struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); |
Pavel Begunkov | 17a9105 | 2021-05-23 15:48:39 +0100 | [diff] [blame] | 191 | void io_wq_exit_start(struct io_wq *wq); |
Jens Axboe | afcc401 | 2021-02-26 13:48:19 -0700 | [diff] [blame] | 192 | void io_wq_put_and_exit(struct io_wq *wq); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 193 | |
| 194 | void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); |
Pavel Begunkov | 8766dd5 | 2020-03-14 00:31:04 +0300 | [diff] [blame] | 195 | void io_wq_hash_work(struct io_wq_work *work, void *val); |
| 196 | |
Jens Axboe | fe76421 | 2021-06-17 10:19:54 -0600 | [diff] [blame] | 197 | int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask); |
Jens Axboe | 2e48005 | 2021-08-27 11:33:19 -0600 | [diff] [blame] | 198 | int io_wq_max_workers(struct io_wq *wq, int *new_count); |
Jens Axboe | fe76421 | 2021-06-17 10:19:54 -0600 | [diff] [blame] | 199 | |
Pavel Begunkov | 8766dd5 | 2020-03-14 00:31:04 +0300 | [diff] [blame] | 200 | static inline bool io_wq_is_hashed(struct io_wq_work *work) |
| 201 | { |
| 202 | return work->flags & IO_WQ_WORK_HASHED; |
| 203 | } |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 204 | |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 205 | typedef bool (work_cancel_fn)(struct io_wq_work *, void *); |
| 206 | |
| 207 | enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, |
Pavel Begunkov | 4f26bda | 2020-06-15 10:24:03 +0300 | [diff] [blame] | 208 | void *data, bool cancel_all); |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 209 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 210 | #if defined(CONFIG_IO_WQ) |
| 211 | extern void io_wq_worker_sleeping(struct task_struct *); |
| 212 | extern void io_wq_worker_running(struct task_struct *); |
| 213 | #else |
| 214 | static inline void io_wq_worker_sleeping(struct task_struct *tsk) |
| 215 | { |
| 216 | } |
| 217 | static inline void io_wq_worker_running(struct task_struct *tsk) |
| 218 | { |
| 219 | } |
Jens Axboe | 525b305 | 2019-12-17 14:13:37 -0700 | [diff] [blame] | 220 | #endif |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 221 | |
Jens Axboe | 525b305 | 2019-12-17 14:13:37 -0700 | [diff] [blame] | 222 | static inline bool io_wq_current_is_worker(void) |
| 223 | { |
Jens Axboe | 3bfe610 | 2021-02-16 14:15:30 -0700 | [diff] [blame] | 224 | return in_task() && (current->flags & PF_IO_WORKER) && |
Eric W. Biederman | e32cf5d | 2021-12-22 22:10:09 -0600 | [diff] [blame] | 225 | current->worker_private; |
Jens Axboe | 525b305 | 2019-12-17 14:13:37 -0700 | [diff] [blame] | 226 | } |
| 227 | #endif |