Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1 | #ifndef INTERNAL_IO_WQ_H |
| 2 | #define INTERNAL_IO_WQ_H |
| 3 | |
| 4 | struct io_wq; |
| 5 | |
| 6 | enum { |
| 7 | IO_WQ_WORK_CANCEL = 1, |
Pavel Begunkov | e883a79 | 2020-06-25 18:20:53 +0300 | [diff] [blame] | 8 | IO_WQ_WORK_HASHED = 2, |
| 9 | IO_WQ_WORK_UNBOUND = 4, |
| 10 | IO_WQ_WORK_NO_CANCEL = 8, |
| 11 | IO_WQ_WORK_CONCURRENT = 16, |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 12 | |
| 13 | IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ |
| 14 | }; |
| 15 | |
| 16 | enum io_wq_cancel { |
| 17 | IO_WQ_CANCEL_OK, /* cancelled before started */ |
| 18 | IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */ |
| 19 | IO_WQ_CANCEL_NOTFOUND, /* work not found */ |
| 20 | }; |
| 21 | |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 22 | struct io_wq_work_node { |
| 23 | struct io_wq_work_node *next; |
| 24 | }; |
| 25 | |
| 26 | struct io_wq_work_list { |
| 27 | struct io_wq_work_node *first; |
| 28 | struct io_wq_work_node *last; |
| 29 | }; |
| 30 | |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 31 | static inline void wq_list_add_after(struct io_wq_work_node *node, |
| 32 | struct io_wq_work_node *pos, |
| 33 | struct io_wq_work_list *list) |
| 34 | { |
| 35 | struct io_wq_work_node *next = pos->next; |
| 36 | |
| 37 | pos->next = node; |
| 38 | node->next = next; |
| 39 | if (!next) |
| 40 | list->last = node; |
| 41 | } |
| 42 | |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 43 | static inline void wq_list_add_tail(struct io_wq_work_node *node, |
| 44 | struct io_wq_work_list *list) |
| 45 | { |
| 46 | if (!list->first) { |
Jens Axboe | e995d51 | 2019-12-07 21:06:46 -0700 | [diff] [blame] | 47 | list->last = node; |
| 48 | WRITE_ONCE(list->first, node); |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 49 | } else { |
| 50 | list->last->next = node; |
| 51 | list->last = node; |
| 52 | } |
| 53 | } |
| 54 | |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 55 | static inline void wq_list_cut(struct io_wq_work_list *list, |
| 56 | struct io_wq_work_node *last, |
| 57 | struct io_wq_work_node *prev) |
| 58 | { |
| 59 | /* first in the list, if prev==NULL */ |
| 60 | if (!prev) |
| 61 | WRITE_ONCE(list->first, last->next); |
| 62 | else |
| 63 | prev->next = last->next; |
| 64 | |
| 65 | if (last == list->last) |
| 66 | list->last = prev; |
| 67 | last->next = NULL; |
| 68 | } |
| 69 | |
| 70 | static inline void wq_list_del(struct io_wq_work_list *list, |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 71 | struct io_wq_work_node *node, |
| 72 | struct io_wq_work_node *prev) |
| 73 | { |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 74 | wq_list_cut(list, node, prev); |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 75 | } |
| 76 | |
| 77 | #define wq_list_for_each(pos, prv, head) \ |
| 78 | for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next) |
| 79 | |
Jens Axboe | e995d51 | 2019-12-07 21:06:46 -0700 | [diff] [blame] | 80 | #define wq_list_empty(list) (READ_ONCE((list)->first) == NULL) |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 81 | #define INIT_WQ_LIST(list) do { \ |
| 82 | (list)->first = NULL; \ |
| 83 | (list)->last = NULL; \ |
| 84 | } while (0) |
| 85 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 86 | struct io_wq_work { |
Pavel Begunkov | 18a542f | 2020-03-23 00:23:29 +0300 | [diff] [blame] | 87 | struct io_wq_work_node list; |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 88 | struct files_struct *files; |
Jens Axboe | cccf0ee | 2020-01-27 16:34:48 -0700 | [diff] [blame] | 89 | struct mm_struct *mm; |
| 90 | const struct cred *creds; |
Jens Axboe | 9392a27 | 2020-02-06 21:42:51 -0700 | [diff] [blame] | 91 | struct fs_struct *fs; |
Pavel Begunkov | 57f1a64 | 2020-07-15 12:46:52 +0300 | [diff] [blame] | 92 | unsigned long fsize; |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 93 | unsigned flags; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 94 | }; |
| 95 | |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 96 | static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) |
| 97 | { |
| 98 | if (!work->list.next) |
| 99 | return NULL; |
| 100 | |
| 101 | return container_of(work->list.next, struct io_wq_work, list); |
| 102 | } |
| 103 | |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 104 | typedef void (free_work_fn)(struct io_wq_work *); |
Pavel Begunkov | f4db718 | 2020-06-25 18:20:54 +0300 | [diff] [blame] | 105 | typedef struct io_wq_work *(io_wq_work_fn)(struct io_wq_work *); |
Jens Axboe | 7d72306 | 2019-11-12 22:31:31 -0700 | [diff] [blame] | 106 | |
Jens Axboe | 576a347 | 2019-11-25 08:49:20 -0700 | [diff] [blame] | 107 | struct io_wq_data { |
Jens Axboe | 576a347 | 2019-11-25 08:49:20 -0700 | [diff] [blame] | 108 | struct user_struct *user; |
| 109 | |
Pavel Begunkov | f5fa38c | 2020-06-08 21:08:20 +0300 | [diff] [blame] | 110 | io_wq_work_fn *do_work; |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 111 | free_work_fn *free_work; |
Jens Axboe | 576a347 | 2019-11-25 08:49:20 -0700 | [diff] [blame] | 112 | }; |
| 113 | |
| 114 | struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); |
Pavel Begunkov | eba6f5a | 2020-01-28 03:15:47 +0300 | [diff] [blame] | 115 | bool io_wq_get(struct io_wq *wq, struct io_wq_data *data); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 116 | void io_wq_destroy(struct io_wq *wq); |
| 117 | |
| 118 | void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); |
Pavel Begunkov | 8766dd5 | 2020-03-14 00:31:04 +0300 | [diff] [blame] | 119 | void io_wq_hash_work(struct io_wq_work *work, void *val); |
| 120 | |
| 121 | static inline bool io_wq_is_hashed(struct io_wq_work *work) |
| 122 | { |
| 123 | return work->flags & IO_WQ_WORK_HASHED; |
| 124 | } |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 125 | |
| 126 | void io_wq_cancel_all(struct io_wq *wq); |
| 127 | enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork); |
| 128 | |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 129 | typedef bool (work_cancel_fn)(struct io_wq_work *, void *); |
| 130 | |
| 131 | enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, |
Pavel Begunkov | 4f26bda | 2020-06-15 10:24:03 +0300 | [diff] [blame] | 132 | void *data, bool cancel_all); |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 133 | |
Jens Axboe | aa96bf8 | 2020-04-03 11:26:26 -0600 | [diff] [blame] | 134 | struct task_struct *io_wq_get_task(struct io_wq *wq); |
| 135 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 136 | #if defined(CONFIG_IO_WQ) |
| 137 | extern void io_wq_worker_sleeping(struct task_struct *); |
| 138 | extern void io_wq_worker_running(struct task_struct *); |
| 139 | #else |
| 140 | static inline void io_wq_worker_sleeping(struct task_struct *tsk) |
| 141 | { |
| 142 | } |
| 143 | static inline void io_wq_worker_running(struct task_struct *tsk) |
| 144 | { |
| 145 | } |
Jens Axboe | 525b305 | 2019-12-17 14:13:37 -0700 | [diff] [blame] | 146 | #endif |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 147 | |
Jens Axboe | 525b305 | 2019-12-17 14:13:37 -0700 | [diff] [blame] | 148 | static inline bool io_wq_current_is_worker(void) |
| 149 | { |
| 150 | return in_task() && (current->flags & PF_IO_WORKER); |
| 151 | } |
| 152 | #endif |