Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1 | #ifndef INTERNAL_IO_WQ_H |
| 2 | #define INTERNAL_IO_WQ_H |
| 3 | |
| 4 | struct io_wq; |
| 5 | |
| 6 | enum { |
| 7 | IO_WQ_WORK_CANCEL = 1, |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 8 | IO_WQ_WORK_HASHED = 4, |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 9 | IO_WQ_WORK_UNBOUND = 32, |
Jens Axboe | 0c9d5cc | 2019-12-11 19:29:43 -0700 | [diff] [blame] | 10 | IO_WQ_WORK_NO_CANCEL = 256, |
Jens Axboe | 895e2ca | 2019-12-17 08:46:33 -0700 | [diff] [blame] | 11 | IO_WQ_WORK_CONCURRENT = 512, |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 12 | |
| 13 | IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ |
| 14 | }; |
| 15 | |
| 16 | enum io_wq_cancel { |
| 17 | IO_WQ_CANCEL_OK, /* cancelled before started */ |
| 18 | IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */ |
| 19 | IO_WQ_CANCEL_NOTFOUND, /* work not found */ |
| 20 | }; |
| 21 | |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 22 | struct io_wq_work_node { |
| 23 | struct io_wq_work_node *next; |
| 24 | }; |
| 25 | |
| 26 | struct io_wq_work_list { |
| 27 | struct io_wq_work_node *first; |
| 28 | struct io_wq_work_node *last; |
| 29 | }; |
| 30 | |
| 31 | static inline void wq_list_add_tail(struct io_wq_work_node *node, |
| 32 | struct io_wq_work_list *list) |
| 33 | { |
| 34 | if (!list->first) { |
Jens Axboe | e995d51 | 2019-12-07 21:06:46 -0700 | [diff] [blame] | 35 | list->last = node; |
| 36 | WRITE_ONCE(list->first, node); |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 37 | } else { |
| 38 | list->last->next = node; |
| 39 | list->last = node; |
| 40 | } |
| 41 | } |
| 42 | |
| 43 | static inline void wq_node_del(struct io_wq_work_list *list, |
| 44 | struct io_wq_work_node *node, |
| 45 | struct io_wq_work_node *prev) |
| 46 | { |
| 47 | if (node == list->first) |
Jens Axboe | e995d51 | 2019-12-07 21:06:46 -0700 | [diff] [blame] | 48 | WRITE_ONCE(list->first, node->next); |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 49 | if (node == list->last) |
| 50 | list->last = prev; |
| 51 | if (prev) |
| 52 | prev->next = node->next; |
Jens Axboe | 08bdcc3 | 2019-12-04 17:19:44 -0700 | [diff] [blame] | 53 | node->next = NULL; |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 54 | } |
| 55 | |
| 56 | #define wq_list_for_each(pos, prv, head) \ |
| 57 | for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next) |
| 58 | |
Jens Axboe | e995d51 | 2019-12-07 21:06:46 -0700 | [diff] [blame] | 59 | #define wq_list_empty(list) (READ_ONCE((list)->first) == NULL) |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 60 | #define INIT_WQ_LIST(list) do { \ |
| 61 | (list)->first = NULL; \ |
| 62 | (list)->last = NULL; \ |
| 63 | } while (0) |
| 64 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 65 | struct io_wq_work { |
Jens Axboe | b76da70 | 2019-11-20 13:05:32 -0700 | [diff] [blame] | 66 | union { |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 67 | struct io_wq_work_node list; |
Jens Axboe | b76da70 | 2019-11-20 13:05:32 -0700 | [diff] [blame] | 68 | void *data; |
| 69 | }; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 70 | void (*func)(struct io_wq_work **); |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 71 | struct files_struct *files; |
Jens Axboe | cccf0ee | 2020-01-27 16:34:48 -0700 | [diff] [blame] | 72 | struct mm_struct *mm; |
| 73 | const struct cred *creds; |
Jens Axboe | 9392a27 | 2020-02-06 21:42:51 -0700 | [diff] [blame] | 74 | struct fs_struct *fs; |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 75 | unsigned flags; |
Jens Axboe | 3628288 | 2020-02-08 19:16:39 -0700 | [diff] [blame] | 76 | pid_t task_pid; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 77 | }; |
| 78 | |
Jens Axboe | 2d141dd | 2020-02-25 11:52:56 -0700 | [diff] [blame] | 79 | #define INIT_IO_WORK(work, _func) \ |
| 80 | do { \ |
| 81 | *(work) = (struct io_wq_work){ .func = _func }; \ |
| 82 | } while (0) \ |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 83 | |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 84 | typedef void (free_work_fn)(struct io_wq_work *); |
Jens Axboe | 7d72306 | 2019-11-12 22:31:31 -0700 | [diff] [blame] | 85 | |
Jens Axboe | 576a347 | 2019-11-25 08:49:20 -0700 | [diff] [blame] | 86 | struct io_wq_data { |
Jens Axboe | 576a347 | 2019-11-25 08:49:20 -0700 | [diff] [blame] | 87 | struct user_struct *user; |
| 88 | |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 89 | free_work_fn *free_work; |
Jens Axboe | 576a347 | 2019-11-25 08:49:20 -0700 | [diff] [blame] | 90 | }; |
| 91 | |
| 92 | struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); |
Pavel Begunkov | eba6f5a | 2020-01-28 03:15:47 +0300 | [diff] [blame] | 93 | bool io_wq_get(struct io_wq *wq, struct io_wq_data *data); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 94 | void io_wq_destroy(struct io_wq *wq); |
| 95 | |
| 96 | void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); |
Pavel Begunkov | 8766dd5 | 2020-03-14 00:31:04 +0300 | [diff] [blame^] | 97 | void io_wq_hash_work(struct io_wq_work *work, void *val); |
| 98 | |
| 99 | static inline bool io_wq_is_hashed(struct io_wq_work *work) |
| 100 | { |
| 101 | return work->flags & IO_WQ_WORK_HASHED; |
| 102 | } |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 103 | |
| 104 | void io_wq_cancel_all(struct io_wq *wq); |
| 105 | enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork); |
Jens Axboe | 3628288 | 2020-02-08 19:16:39 -0700 | [diff] [blame] | 106 | enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 107 | |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 108 | typedef bool (work_cancel_fn)(struct io_wq_work *, void *); |
| 109 | |
| 110 | enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, |
| 111 | void *data); |
| 112 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 113 | #if defined(CONFIG_IO_WQ) |
| 114 | extern void io_wq_worker_sleeping(struct task_struct *); |
| 115 | extern void io_wq_worker_running(struct task_struct *); |
| 116 | #else |
| 117 | static inline void io_wq_worker_sleeping(struct task_struct *tsk) |
| 118 | { |
| 119 | } |
| 120 | static inline void io_wq_worker_running(struct task_struct *tsk) |
| 121 | { |
| 122 | } |
Jens Axboe | 525b305 | 2019-12-17 14:13:37 -0700 | [diff] [blame] | 123 | #endif |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 124 | |
Jens Axboe | 525b305 | 2019-12-17 14:13:37 -0700 | [diff] [blame] | 125 | static inline bool io_wq_current_is_worker(void) |
| 126 | { |
| 127 | return in_task() && (current->flags & PF_IO_WORKER); |
| 128 | } |
| 129 | #endif |