Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1 | #ifndef INTERNAL_IO_WQ_H |
| 2 | #define INTERNAL_IO_WQ_H |
| 3 | |
| 4 | struct io_wq; |
| 5 | |
| 6 | enum { |
| 7 | IO_WQ_WORK_CANCEL = 1, |
| 8 | IO_WQ_WORK_HAS_MM = 2, |
| 9 | IO_WQ_WORK_HASHED = 4, |
| 10 | IO_WQ_WORK_NEEDS_USER = 8, |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 11 | IO_WQ_WORK_NEEDS_FILES = 16, |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 12 | IO_WQ_WORK_UNBOUND = 32, |
Jens Axboe | 7d72306 | 2019-11-12 22:31:31 -0700 | [diff] [blame] | 13 | IO_WQ_WORK_INTERNAL = 64, |
Jens Axboe | b76da70 | 2019-11-20 13:05:32 -0700 | [diff] [blame] | 14 | IO_WQ_WORK_CB = 128, |
Jens Axboe | 0c9d5cc | 2019-12-11 19:29:43 -0700 | [diff] [blame^] | 15 | IO_WQ_WORK_NO_CANCEL = 256, |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 16 | |
| 17 | IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ |
| 18 | }; |
| 19 | |
| 20 | enum io_wq_cancel { |
| 21 | IO_WQ_CANCEL_OK, /* cancelled before started */ |
| 22 | IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */ |
| 23 | IO_WQ_CANCEL_NOTFOUND, /* work not found */ |
| 24 | }; |
| 25 | |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 26 | struct io_wq_work_node { |
| 27 | struct io_wq_work_node *next; |
| 28 | }; |
| 29 | |
| 30 | struct io_wq_work_list { |
| 31 | struct io_wq_work_node *first; |
| 32 | struct io_wq_work_node *last; |
| 33 | }; |
| 34 | |
| 35 | static inline void wq_list_add_tail(struct io_wq_work_node *node, |
| 36 | struct io_wq_work_list *list) |
| 37 | { |
| 38 | if (!list->first) { |
Jens Axboe | e995d51 | 2019-12-07 21:06:46 -0700 | [diff] [blame] | 39 | list->last = node; |
| 40 | WRITE_ONCE(list->first, node); |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 41 | } else { |
| 42 | list->last->next = node; |
| 43 | list->last = node; |
| 44 | } |
| 45 | } |
| 46 | |
| 47 | static inline void wq_node_del(struct io_wq_work_list *list, |
| 48 | struct io_wq_work_node *node, |
| 49 | struct io_wq_work_node *prev) |
| 50 | { |
| 51 | if (node == list->first) |
Jens Axboe | e995d51 | 2019-12-07 21:06:46 -0700 | [diff] [blame] | 52 | WRITE_ONCE(list->first, node->next); |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 53 | if (node == list->last) |
| 54 | list->last = prev; |
| 55 | if (prev) |
| 56 | prev->next = node->next; |
Jens Axboe | 08bdcc3 | 2019-12-04 17:19:44 -0700 | [diff] [blame] | 57 | node->next = NULL; |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 58 | } |
| 59 | |
| 60 | #define wq_list_for_each(pos, prv, head) \ |
| 61 | for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next) |
| 62 | |
Jens Axboe | e995d51 | 2019-12-07 21:06:46 -0700 | [diff] [blame] | 63 | #define wq_list_empty(list) (READ_ONCE((list)->first) == NULL) |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 64 | #define INIT_WQ_LIST(list) do { \ |
| 65 | (list)->first = NULL; \ |
| 66 | (list)->last = NULL; \ |
| 67 | } while (0) |
| 68 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 69 | struct io_wq_work { |
Jens Axboe | b76da70 | 2019-11-20 13:05:32 -0700 | [diff] [blame] | 70 | union { |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 71 | struct io_wq_work_node list; |
Jens Axboe | b76da70 | 2019-11-20 13:05:32 -0700 | [diff] [blame] | 72 | void *data; |
| 73 | }; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 74 | void (*func)(struct io_wq_work **); |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 75 | struct files_struct *files; |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 76 | unsigned flags; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 77 | }; |
| 78 | |
| 79 | #define INIT_IO_WORK(work, _func) \ |
| 80 | do { \ |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 81 | (work)->list.next = NULL; \ |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 82 | (work)->func = _func; \ |
| 83 | (work)->flags = 0; \ |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 84 | (work)->files = NULL; \ |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 85 | } while (0) \ |
| 86 | |
Jens Axboe | 7d72306 | 2019-11-12 22:31:31 -0700 | [diff] [blame] | 87 | typedef void (get_work_fn)(struct io_wq_work *); |
| 88 | typedef void (put_work_fn)(struct io_wq_work *); |
| 89 | |
Jens Axboe | 576a347 | 2019-11-25 08:49:20 -0700 | [diff] [blame] | 90 | struct io_wq_data { |
| 91 | struct mm_struct *mm; |
| 92 | struct user_struct *user; |
Jens Axboe | 0b8c0ec | 2019-12-02 08:50:00 -0700 | [diff] [blame] | 93 | const struct cred *creds; |
Jens Axboe | 576a347 | 2019-11-25 08:49:20 -0700 | [diff] [blame] | 94 | |
| 95 | get_work_fn *get_work; |
| 96 | put_work_fn *put_work; |
| 97 | }; |
| 98 | |
| 99 | struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 100 | void io_wq_destroy(struct io_wq *wq); |
| 101 | |
| 102 | void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); |
| 103 | void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val); |
| 104 | void io_wq_flush(struct io_wq *wq); |
| 105 | |
| 106 | void io_wq_cancel_all(struct io_wq *wq); |
| 107 | enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork); |
| 108 | |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 109 | typedef bool (work_cancel_fn)(struct io_wq_work *, void *); |
| 110 | |
| 111 | enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, |
| 112 | void *data); |
| 113 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 114 | #if defined(CONFIG_IO_WQ) |
| 115 | extern void io_wq_worker_sleeping(struct task_struct *); |
| 116 | extern void io_wq_worker_running(struct task_struct *); |
| 117 | #else |
| 118 | static inline void io_wq_worker_sleeping(struct task_struct *tsk) |
| 119 | { |
| 120 | } |
| 121 | static inline void io_wq_worker_running(struct task_struct *tsk) |
| 122 | { |
| 123 | } |
Jens Axboe | 525b305 | 2019-12-17 14:13:37 -0700 | [diff] [blame] | 124 | #endif |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 125 | |
Jens Axboe | 525b305 | 2019-12-17 14:13:37 -0700 | [diff] [blame] | 126 | static inline bool io_wq_current_is_worker(void) |
| 127 | { |
| 128 | return in_task() && (current->flags & PF_IO_WORKER); |
| 129 | } |
| 130 | #endif |