Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 1 | #ifndef INT_BLK_MQ_H |
| 2 | #define INT_BLK_MQ_H |
| 3 | |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 4 | #include "blk-stat.h" |
| 5 | |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 6 | struct blk_mq_tag_set; |
| 7 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 8 | struct blk_mq_ctx { |
| 9 | struct { |
| 10 | spinlock_t lock; |
| 11 | struct list_head rq_list; |
| 12 | } ____cacheline_aligned_in_smp; |
| 13 | |
| 14 | unsigned int cpu; |
| 15 | unsigned int index_hw; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 16 | |
| 17 | /* incremented at dispatch time */ |
| 18 | unsigned long rq_dispatched[2]; |
| 19 | unsigned long rq_merged; |
| 20 | |
| 21 | /* incremented at completion time */ |
| 22 | unsigned long ____cacheline_aligned_in_smp rq_completed[2]; |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 23 | struct blk_rq_stat stat[2]; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 24 | |
| 25 | struct request_queue *queue; |
| 26 | struct kobject kobj; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 27 | } ____cacheline_aligned_in_smp; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 28 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 29 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
Tejun Heo | 780db20 | 2014-07-01 10:31:13 -0600 | [diff] [blame] | 30 | void blk_mq_freeze_queue(struct request_queue *q); |
Ming Lei | 3edcc0c | 2013-12-26 21:31:38 +0800 | [diff] [blame] | 31 | void blk_mq_free_queue(struct request_queue *q); |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 32 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); |
Jens Axboe | aed3ea9 | 2014-12-22 14:04:42 -0700 | [diff] [blame] | 33 | void blk_mq_wake_waiters(struct request_queue *q); |
Jens Axboe | f04c3df | 2016-12-07 08:41:17 -0700 | [diff] [blame] | 34 | bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *); |
Jens Axboe | 2c3ad66 | 2016-12-14 14:34:47 -0700 | [diff] [blame] | 35 | void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); |
| 36 | |
| 37 | /* |
| 38 | * Internal helpers for allocating/freeing the request map |
| 39 | */ |
| 40 | void blk_mq_free_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, |
| 41 | unsigned int hctx_idx); |
| 42 | struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, |
| 43 | unsigned int hctx_idx); |
| 44 | |
| 45 | /* |
| 46 | * Internal helpers for request insertion into sw queues |
| 47 | */ |
| 48 | void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, |
| 49 | bool at_head); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 50 | |
| 51 | /* |
| 52 | * CPU hotplug helpers |
| 53 | */ |
Jens Axboe | 676141e | 2014-03-20 13:29:18 -0600 | [diff] [blame] | 54 | void blk_mq_enable_hotplug(void); |
| 55 | void blk_mq_disable_hotplug(void); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 56 | |
| 57 | /* |
| 58 | * CPU -> queue mappings |
| 59 | */ |
Jens Axboe | f14bbe7 | 2014-05-27 12:06:53 -0600 | [diff] [blame] | 60 | extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 61 | |
Christoph Hellwig | 7d7e0f9 | 2016-09-14 16:18:54 +0200 | [diff] [blame] | 62 | static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, |
| 63 | int cpu) |
| 64 | { |
| 65 | return q->queue_hw_ctx[q->mq_map[cpu]]; |
| 66 | } |
| 67 | |
Jens Axboe | e93ecf6 | 2014-05-19 09:17:48 -0600 | [diff] [blame] | 68 | /* |
Jens Axboe | 67aec14 | 2014-05-30 08:25:36 -0600 | [diff] [blame] | 69 | * sysfs helpers |
| 70 | */ |
| 71 | extern int blk_mq_sysfs_register(struct request_queue *q); |
| 72 | extern void blk_mq_sysfs_unregister(struct request_queue *q); |
Keith Busch | 868f2f0 | 2015-12-17 17:08:14 -0700 | [diff] [blame] | 73 | extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); |
Jens Axboe | 67aec14 | 2014-05-30 08:25:36 -0600 | [diff] [blame] | 74 | |
Christoph Hellwig | 9041583 | 2014-09-22 10:21:48 -0600 | [diff] [blame] | 75 | extern void blk_mq_rq_timed_out(struct request *req, bool reserved); |
| 76 | |
Ming Lei | e09aae7 | 2015-01-29 20:17:27 +0800 | [diff] [blame] | 77 | void blk_mq_release(struct request_queue *q); |
| 78 | |
Ming Lei | 1aecfe4 | 2014-06-01 00:43:36 +0800 | [diff] [blame] | 79 | static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, |
| 80 | unsigned int cpu) |
| 81 | { |
| 82 | return per_cpu_ptr(q->queue_ctx, cpu); |
| 83 | } |
| 84 | |
| 85 | /* |
| 86 | * This assumes per-cpu software queueing queues. They could be per-node |
| 87 | * as well, for instance. For now this is hardcoded as-is. Note that we don't |
| 88 | * care about preemption, since we know the ctx's are persistent. This does |
| 89 | * mean that we can't rely on ctx always matching the currently running CPU. |
| 90 | */ |
| 91 | static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) |
| 92 | { |
| 93 | return __blk_mq_get_ctx(q, get_cpu()); |
| 94 | } |
| 95 | |
| 96 | static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) |
| 97 | { |
| 98 | put_cpu(); |
| 99 | } |
| 100 | |
Ming Lei | cb96a42c | 2014-06-01 00:43:37 +0800 | [diff] [blame] | 101 | struct blk_mq_alloc_data { |
| 102 | /* input parameter */ |
| 103 | struct request_queue *q; |
Christoph Hellwig | 6f3b0e8 | 2015-11-26 09:13:05 +0100 | [diff] [blame] | 104 | unsigned int flags; |
Ming Lei | cb96a42c | 2014-06-01 00:43:37 +0800 | [diff] [blame] | 105 | |
| 106 | /* input & output parameter */ |
| 107 | struct blk_mq_ctx *ctx; |
| 108 | struct blk_mq_hw_ctx *hctx; |
| 109 | }; |
| 110 | |
| 111 | static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data, |
Christoph Hellwig | 6f3b0e8 | 2015-11-26 09:13:05 +0100 | [diff] [blame] | 112 | struct request_queue *q, unsigned int flags, |
| 113 | struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx) |
Ming Lei | cb96a42c | 2014-06-01 00:43:37 +0800 | [diff] [blame] | 114 | { |
| 115 | data->q = q; |
Christoph Hellwig | 6f3b0e8 | 2015-11-26 09:13:05 +0100 | [diff] [blame] | 116 | data->flags = flags; |
Ming Lei | cb96a42c | 2014-06-01 00:43:37 +0800 | [diff] [blame] | 117 | data->ctx = ctx; |
| 118 | data->hctx = hctx; |
| 119 | } |
| 120 | |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame^] | 121 | static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) |
| 122 | { |
| 123 | return data->hctx->tags; |
| 124 | } |
| 125 | |
Jens Axboe | 2c3ad66 | 2016-12-14 14:34:47 -0700 | [diff] [blame] | 126 | /* |
| 127 | * Internal helpers for request allocation/init/free |
| 128 | */ |
| 129 | void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, |
| 130 | struct request *rq, unsigned int op); |
| 131 | void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, |
| 132 | struct request *rq); |
| 133 | struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data, |
| 134 | unsigned int op); |
| 135 | |
Bart Van Assche | 5d1b25c | 2016-10-28 17:19:15 -0700 | [diff] [blame] | 136 | static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) |
| 137 | { |
| 138 | return test_bit(BLK_MQ_S_STOPPED, &hctx->state); |
| 139 | } |
| 140 | |
Ming Lei | 19c66e5 | 2014-12-03 19:38:04 +0800 | [diff] [blame] | 141 | static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) |
| 142 | { |
| 143 | return hctx->nr_ctx && hctx->tags; |
| 144 | } |
| 145 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 146 | #endif |