blob: bb8f1c8f8e24aa62de9b03adc2d7f88d6045f5e5 [file] [log] [blame]
Jens Axboe771b53d02019-10-22 10:25:58 -06001#ifndef INTERNAL_IO_WQ_H
2#define INTERNAL_IO_WQ_H
3
4struct io_wq;
5
6enum {
7 IO_WQ_WORK_CANCEL = 1,
8 IO_WQ_WORK_HAS_MM = 2,
9 IO_WQ_WORK_HASHED = 4,
10 IO_WQ_WORK_NEEDS_USER = 8,
Jens Axboefcb323c2019-10-24 12:39:47 -060011 IO_WQ_WORK_NEEDS_FILES = 16,
Jens Axboec5def4a2019-11-07 11:41:16 -070012 IO_WQ_WORK_UNBOUND = 32,
Jens Axboe7d723062019-11-12 22:31:31 -070013 IO_WQ_WORK_INTERNAL = 64,
Jens Axboeb76da702019-11-20 13:05:32 -070014 IO_WQ_WORK_CB = 128,
Jens Axboe771b53d02019-10-22 10:25:58 -060015
16 IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
17};
18
19enum io_wq_cancel {
20 IO_WQ_CANCEL_OK, /* cancelled before started */
21 IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
22 IO_WQ_CANCEL_NOTFOUND, /* work not found */
23};
24
25struct io_wq_work {
Jens Axboeb76da702019-11-20 13:05:32 -070026 union {
27 struct list_head list;
28 void *data;
29 };
Jens Axboe771b53d02019-10-22 10:25:58 -060030 void (*func)(struct io_wq_work **);
31 unsigned flags;
Jens Axboefcb323c2019-10-24 12:39:47 -060032 struct files_struct *files;
Jens Axboe771b53d02019-10-22 10:25:58 -060033};
34
35#define INIT_IO_WORK(work, _func) \
36 do { \
37 (work)->func = _func; \
38 (work)->flags = 0; \
Jens Axboefcb323c2019-10-24 12:39:47 -060039 (work)->files = NULL; \
Jens Axboe771b53d02019-10-22 10:25:58 -060040 } while (0) \
41
Jens Axboe7d723062019-11-12 22:31:31 -070042typedef void (get_work_fn)(struct io_wq_work *);
43typedef void (put_work_fn)(struct io_wq_work *);
44
Jens Axboe576a3472019-11-25 08:49:20 -070045struct io_wq_data {
46 struct mm_struct *mm;
47 struct user_struct *user;
48
49 get_work_fn *get_work;
50 put_work_fn *put_work;
51};
52
53struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
Jens Axboe771b53d02019-10-22 10:25:58 -060054void io_wq_destroy(struct io_wq *wq);
55
56void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
57void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val);
58void io_wq_flush(struct io_wq *wq);
59
60void io_wq_cancel_all(struct io_wq *wq);
61enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
62
Jens Axboe62755e32019-10-28 21:49:21 -060063typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
64
65enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
66 void *data);
67
Jens Axboe771b53d02019-10-22 10:25:58 -060068#if defined(CONFIG_IO_WQ)
69extern void io_wq_worker_sleeping(struct task_struct *);
70extern void io_wq_worker_running(struct task_struct *);
71#else
72static inline void io_wq_worker_sleeping(struct task_struct *tsk)
73{
74}
75static inline void io_wq_worker_running(struct task_struct *tsk)
76{
77}
78#endif
79
Jens Axboe960e4322019-11-12 07:56:39 -070080static inline bool io_wq_current_is_worker(void)
81{
82 return in_task() && (current->flags & PF_IO_WORKER);
83}
Jens Axboe771b53d02019-10-22 10:25:58 -060084#endif