Thomas Gleixner | 09c434b | 2019-05-19 13:08:20 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 2 | //#define DEBUG |
| 3 | #include <linux/spinlock.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 4 | #include <linux/slab.h> |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 5 | #include <linux/blkdev.h> |
| 6 | #include <linux/hdreg.h> |
Paul Gortmaker | 0c8d44f | 2011-07-01 15:56:05 -0400 | [diff] [blame] | 7 | #include <linux/module.h> |
Michael S. Tsirkin | 4678d6f9 | 2012-01-12 15:44:44 +1030 | [diff] [blame] | 8 | #include <linux/mutex.h> |
Christoph Hellwig | ad71473 | 2017-02-05 18:15:25 +0100 | [diff] [blame] | 9 | #include <linux/interrupt.h> |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 10 | #include <linux/virtio.h> |
| 11 | #include <linux/virtio_blk.h> |
Jens Axboe | 3d1266c | 2007-10-24 13:21:21 +0200 | [diff] [blame] | 12 | #include <linux/scatterlist.h> |
Christoph Hellwig | 7a7c924 | 2011-02-01 21:43:48 +0100 | [diff] [blame] | 13 | #include <linux/string_helpers.h> |
Michael S. Tsirkin | 5087a50 | 2011-10-30 21:29:59 +0200 | [diff] [blame] | 14 | #include <linux/idr.h> |
Jens Axboe | 1cf7e9c | 2013-11-01 10:52:52 -0600 | [diff] [blame] | 15 | #include <linux/blk-mq.h> |
Christoph Hellwig | ad71473 | 2017-02-05 18:15:25 +0100 | [diff] [blame] | 16 | #include <linux/blk-mq-virtio.h> |
Jens Axboe | 1cf7e9c | 2013-11-01 10:52:52 -0600 | [diff] [blame] | 17 | #include <linux/numa.h> |
Michael S. Tsirkin | 55a2415 | 2020-04-17 03:14:34 -0400 | [diff] [blame] | 18 | #include <uapi/linux/virtio_ring.h> |
Jens Axboe | 3d1266c | 2007-10-24 13:21:21 +0200 | [diff] [blame] | 19 | |
Christian Borntraeger | 4f3bf19 | 2008-01-31 15:53:53 +0100 | [diff] [blame] | 20 | #define PART_BITS 4 |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 21 | #define VQ_NAME_LEN 16 |
Changpeng Liu | 1f23816 | 2018-11-01 15:40:35 -0700 | [diff] [blame] | 22 | #define MAX_DISCARD_SEGMENTS 256u |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 23 | |
Stefan Hajnoczi | 63947b3 | 2021-05-24 16:40:20 +0100 | [diff] [blame] | 24 | /* The maximum number of sg elements that fit into a virtqueue */ |
| 25 | #define VIRTIO_BLK_MAX_SG_ELEMS 32768 |
| 26 | |
Max Gurtovoy | 02746e2 | 2021-09-01 16:14:34 +0300 | [diff] [blame] | 27 | #ifdef CONFIG_ARCH_NO_SG_CHAIN |
| 28 | #define VIRTIO_BLK_INLINE_SG_CNT 0 |
| 29 | #else |
| 30 | #define VIRTIO_BLK_INLINE_SG_CNT 2 |
| 31 | #endif |
| 32 | |
Max Gurtovoy | 0989c41 | 2021-09-02 23:46:22 +0300 | [diff] [blame] | 33 | static unsigned int num_request_queues; |
Michael S. Tsirkin | ead65f7 | 2021-10-24 09:41:40 -0400 | [diff] [blame] | 34 | module_param(num_request_queues, uint, 0644); |
Max Gurtovoy | 0989c41 | 2021-09-02 23:46:22 +0300 | [diff] [blame] | 35 | MODULE_PARM_DESC(num_request_queues, |
| 36 | "Limit the number of request queues to use for blk device. " |
| 37 | "0 for no limit. " |
| 38 | "Values > nr_cpu_ids truncated to nr_cpu_ids."); |
| 39 | |
Michael S. Tsirkin | 5087a50 | 2011-10-30 21:29:59 +0200 | [diff] [blame] | 40 | static int major; |
| 41 | static DEFINE_IDA(vd_index_ida); |
| 42 | |
Jonghwan Choi | 2a647bf | 2013-05-20 10:25:39 +0930 | [diff] [blame] | 43 | static struct workqueue_struct *virtblk_wq; |
Christian Borntraeger | 4f3bf19 | 2008-01-31 15:53:53 +0100 | [diff] [blame] | 44 | |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 45 | struct virtio_blk_vq { |
| 46 | struct virtqueue *vq; |
| 47 | spinlock_t lock; |
| 48 | char name[VQ_NAME_LEN]; |
| 49 | } ____cacheline_aligned_in_smp; |
| 50 | |
Michael S. Tsirkin | bb6ec57 | 2015-01-15 13:33:31 +0200 | [diff] [blame] | 51 | struct virtio_blk { |
Stefan Hajnoczi | 90b5feb | 2020-04-30 15:04:42 +0100 | [diff] [blame] | 52 | /* |
| 53 | * This mutex must be held by anything that may run after |
| 54 | * virtblk_remove() sets vblk->vdev to NULL. |
| 55 | * |
| 56 | * blk-mq, virtqueue processing, and sysfs attribute code paths are |
| 57 | * shut down before vblk->vdev is set to NULL and therefore do not need |
| 58 | * to hold this mutex. |
| 59 | */ |
| 60 | struct mutex vdev_mutex; |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 61 | struct virtio_device *vdev; |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 62 | |
| 63 | /* The disk structure for the kernel. */ |
| 64 | struct gendisk *disk; |
| 65 | |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 66 | /* Block layer tags. */ |
| 67 | struct blk_mq_tag_set tag_set; |
| 68 | |
Christoph Hellwig | 7a7c924 | 2011-02-01 21:43:48 +0100 | [diff] [blame] | 69 | /* Process context for config space updates */ |
| 70 | struct work_struct config_work; |
| 71 | |
Stefan Hajnoczi | 90b5feb | 2020-04-30 15:04:42 +0100 | [diff] [blame] | 72 | /* |
| 73 | * Tracks references from block_device_operations open/release and |
| 74 | * virtio_driver probe/remove so this object can be freed once no |
| 75 | * longer in use. |
| 76 | */ |
| 77 | refcount_t refs; |
| 78 | |
Rusty Russell | 0864b79 | 2008-12-30 09:26:05 -0600 | [diff] [blame] | 79 | /* What host tells us, plus 2 for header & tailer. */ |
| 80 | unsigned int sg_elems; |
| 81 | |
Michael S. Tsirkin | 5087a50 | 2011-10-30 21:29:59 +0200 | [diff] [blame] | 82 | /* Ida index - used to track minor number allocations. */ |
| 83 | int index; |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 84 | |
| 85 | /* num of vqs */ |
| 86 | int num_vqs; |
| 87 | struct virtio_blk_vq *vqs; |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 88 | }; |
| 89 | |
Michael S. Tsirkin | bb6ec57 | 2015-01-15 13:33:31 +0200 | [diff] [blame] | 90 | struct virtblk_req { |
Christoph Hellwig | 97b50a6 | 2017-01-28 09:32:53 +0100 | [diff] [blame] | 91 | struct virtio_blk_outhdr out_hdr; |
| 92 | u8 status; |
Max Gurtovoy | 02746e2 | 2021-09-01 16:14:34 +0300 | [diff] [blame] | 93 | struct sg_table sg_table; |
Asias He | a98755c | 2012-08-08 16:07:04 +0800 | [diff] [blame] | 94 | struct scatterlist sg[]; |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 95 | }; |
| 96 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 97 | static inline blk_status_t virtblk_result(struct virtblk_req *vbr) |
Asias He | a98755c | 2012-08-08 16:07:04 +0800 | [diff] [blame] | 98 | { |
| 99 | switch (vbr->status) { |
| 100 | case VIRTIO_BLK_S_OK: |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 101 | return BLK_STS_OK; |
Asias He | a98755c | 2012-08-08 16:07:04 +0800 | [diff] [blame] | 102 | case VIRTIO_BLK_S_UNSUPP: |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 103 | return BLK_STS_NOTSUPP; |
Asias He | a98755c | 2012-08-08 16:07:04 +0800 | [diff] [blame] | 104 | default: |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 105 | return BLK_STS_IOERR; |
Asias He | a98755c | 2012-08-08 16:07:04 +0800 | [diff] [blame] | 106 | } |
| 107 | } |
| 108 | |
Christoph Hellwig | 97b50a6 | 2017-01-28 09:32:53 +0100 | [diff] [blame] | 109 | static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr, |
| 110 | struct scatterlist *data_sg, bool have_data) |
| 111 | { |
| 112 | struct scatterlist hdr, status, *sgs[3]; |
| 113 | unsigned int num_out = 0, num_in = 0; |
| 114 | |
| 115 | sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); |
| 116 | sgs[num_out++] = &hdr; |
| 117 | |
| 118 | if (have_data) { |
| 119 | if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT)) |
| 120 | sgs[num_out++] = data_sg; |
| 121 | else |
| 122 | sgs[num_out + num_in++] = data_sg; |
Paolo Bonzini | 8f39db9 | 2013-03-20 15:44:27 +1030 | [diff] [blame] | 123 | } |
| 124 | |
| 125 | sg_init_one(&status, &vbr->status, sizeof(vbr->status)); |
| 126 | sgs[num_out + num_in++] = &status; |
| 127 | |
| 128 | return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); |
Paolo Bonzini | 5ee21a5 | 2013-03-20 15:44:27 +1030 | [diff] [blame] | 129 | } |
Asias He | c85a1f9 | 2012-08-08 16:07:05 +0800 | [diff] [blame] | 130 | |
Changpeng Liu | 1f23816 | 2018-11-01 15:40:35 -0700 | [diff] [blame] | 131 | static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap) |
| 132 | { |
| 133 | unsigned short segments = blk_rq_nr_discard_segments(req); |
| 134 | unsigned short n = 0; |
| 135 | struct virtio_blk_discard_write_zeroes *range; |
| 136 | struct bio *bio; |
| 137 | u32 flags = 0; |
| 138 | |
| 139 | if (unmap) |
| 140 | flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP; |
| 141 | |
| 142 | range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC); |
| 143 | if (!range) |
| 144 | return -ENOMEM; |
| 145 | |
Ming Lei | af822aa | 2020-08-17 17:52:40 +0800 | [diff] [blame] | 146 | /* |
| 147 | * Single max discard segment means multi-range discard isn't |
| 148 | * supported, and block layer only runs contiguity merge like |
| 149 | * normal RW request. So we can't reply on bio for retrieving |
| 150 | * each range info. |
| 151 | */ |
| 152 | if (queue_max_discard_segments(req->q) == 1) { |
| 153 | range[0].flags = cpu_to_le32(flags); |
| 154 | range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req)); |
| 155 | range[0].sector = cpu_to_le64(blk_rq_pos(req)); |
| 156 | n = 1; |
| 157 | } else { |
| 158 | __rq_for_each_bio(bio, req) { |
| 159 | u64 sector = bio->bi_iter.bi_sector; |
| 160 | u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT; |
Changpeng Liu | 1f23816 | 2018-11-01 15:40:35 -0700 | [diff] [blame] | 161 | |
Ming Lei | af822aa | 2020-08-17 17:52:40 +0800 | [diff] [blame] | 162 | range[n].flags = cpu_to_le32(flags); |
| 163 | range[n].num_sectors = cpu_to_le32(num_sectors); |
| 164 | range[n].sector = cpu_to_le64(sector); |
| 165 | n++; |
| 166 | } |
Changpeng Liu | 1f23816 | 2018-11-01 15:40:35 -0700 | [diff] [blame] | 167 | } |
| 168 | |
Ming Lei | af822aa | 2020-08-17 17:52:40 +0800 | [diff] [blame] | 169 | WARN_ON_ONCE(n != segments); |
| 170 | |
Changpeng Liu | 1f23816 | 2018-11-01 15:40:35 -0700 | [diff] [blame] | 171 | req->special_vec.bv_page = virt_to_page(range); |
| 172 | req->special_vec.bv_offset = offset_in_page(range); |
| 173 | req->special_vec.bv_len = sizeof(*range) * segments; |
| 174 | req->rq_flags |= RQF_SPECIAL_PAYLOAD; |
| 175 | |
| 176 | return 0; |
| 177 | } |
| 178 | |
Max Gurtovoy | 02746e2 | 2021-09-01 16:14:34 +0300 | [diff] [blame] | 179 | static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr) |
| 180 | { |
| 181 | if (blk_rq_nr_phys_segments(req)) |
| 182 | sg_free_table_chained(&vbr->sg_table, |
| 183 | VIRTIO_BLK_INLINE_SG_CNT); |
| 184 | } |
| 185 | |
| 186 | static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req, |
| 187 | struct virtblk_req *vbr) |
| 188 | { |
| 189 | int err; |
| 190 | |
| 191 | if (!blk_rq_nr_phys_segments(req)) |
| 192 | return 0; |
| 193 | |
| 194 | vbr->sg_table.sgl = vbr->sg; |
| 195 | err = sg_alloc_table_chained(&vbr->sg_table, |
| 196 | blk_rq_nr_phys_segments(req), |
| 197 | vbr->sg_table.sgl, |
| 198 | VIRTIO_BLK_INLINE_SG_CNT); |
| 199 | if (unlikely(err)) |
| 200 | return -ENOMEM; |
| 201 | |
| 202 | return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl); |
| 203 | } |
| 204 | |
| 205 | static void virtblk_cleanup_cmd(struct request *req) |
| 206 | { |
| 207 | if (req->rq_flags & RQF_SPECIAL_PAYLOAD) |
| 208 | kfree(bvec_virt(&req->special_vec)); |
| 209 | } |
| 210 | |
Michael S. Tsirkin | f083937 | 2021-10-25 03:54:03 -0400 | [diff] [blame] | 211 | static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev, |
| 212 | struct request *req, |
| 213 | struct virtblk_req *vbr) |
Max Gurtovoy | 02746e2 | 2021-09-01 16:14:34 +0300 | [diff] [blame] | 214 | { |
| 215 | bool unmap = false; |
| 216 | u32 type; |
| 217 | |
| 218 | vbr->out_hdr.sector = 0; |
| 219 | |
| 220 | switch (req_op(req)) { |
| 221 | case REQ_OP_READ: |
| 222 | type = VIRTIO_BLK_T_IN; |
| 223 | vbr->out_hdr.sector = cpu_to_virtio64(vdev, |
| 224 | blk_rq_pos(req)); |
| 225 | break; |
| 226 | case REQ_OP_WRITE: |
| 227 | type = VIRTIO_BLK_T_OUT; |
| 228 | vbr->out_hdr.sector = cpu_to_virtio64(vdev, |
| 229 | blk_rq_pos(req)); |
| 230 | break; |
| 231 | case REQ_OP_FLUSH: |
| 232 | type = VIRTIO_BLK_T_FLUSH; |
| 233 | break; |
| 234 | case REQ_OP_DISCARD: |
| 235 | type = VIRTIO_BLK_T_DISCARD; |
| 236 | break; |
| 237 | case REQ_OP_WRITE_ZEROES: |
| 238 | type = VIRTIO_BLK_T_WRITE_ZEROES; |
| 239 | unmap = !(req->cmd_flags & REQ_NOUNMAP); |
| 240 | break; |
| 241 | case REQ_OP_DRV_IN: |
| 242 | type = VIRTIO_BLK_T_GET_ID; |
| 243 | break; |
| 244 | default: |
| 245 | WARN_ON_ONCE(1); |
| 246 | return BLK_STS_IOERR; |
| 247 | } |
| 248 | |
| 249 | vbr->out_hdr.type = cpu_to_virtio32(vdev, type); |
| 250 | vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req)); |
| 251 | |
| 252 | if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) { |
| 253 | if (virtblk_setup_discard_write_zeroes(req, unmap)) |
| 254 | return BLK_STS_RESOURCE; |
| 255 | } |
| 256 | |
| 257 | return 0; |
| 258 | } |
| 259 | |
Christoph Hellwig | 5124c28 | 2014-02-10 03:24:39 -0800 | [diff] [blame] | 260 | static inline void virtblk_request_done(struct request *req) |
Asias He | c85a1f9 | 2012-08-08 16:07:05 +0800 | [diff] [blame] | 261 | { |
Christoph Hellwig | 9d74e25 | 2014-04-14 10:30:07 +0200 | [diff] [blame] | 262 | struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); |
Asias He | a98755c | 2012-08-08 16:07:04 +0800 | [diff] [blame] | 263 | |
Max Gurtovoy | 02746e2 | 2021-09-01 16:14:34 +0300 | [diff] [blame] | 264 | virtblk_unmap_data(req, vbr); |
| 265 | virtblk_cleanup_cmd(req); |
Christoph Hellwig | d19633d | 2017-04-20 16:03:00 +0200 | [diff] [blame] | 266 | blk_mq_end_request(req, virtblk_result(vbr)); |
Asias He | a98755c | 2012-08-08 16:07:04 +0800 | [diff] [blame] | 267 | } |
| 268 | |
| 269 | static void virtblk_done(struct virtqueue *vq) |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 270 | { |
| 271 | struct virtio_blk *vblk = vq->vdev->priv; |
Jens Axboe | 1cf7e9c | 2013-11-01 10:52:52 -0600 | [diff] [blame] | 272 | bool req_done = false; |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 273 | int qid = vq->index; |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 274 | struct virtblk_req *vbr; |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 275 | unsigned long flags; |
Asias He | a98755c | 2012-08-08 16:07:04 +0800 | [diff] [blame] | 276 | unsigned int len; |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 277 | |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 278 | spin_lock_irqsave(&vblk->vqs[qid].lock, flags); |
Asias He | bb81110 | 2012-09-25 10:36:17 +0800 | [diff] [blame] | 279 | do { |
| 280 | virtqueue_disable_cb(vq); |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 281 | while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { |
Christoph Hellwig | 85dada0 | 2017-01-28 09:32:52 +0100 | [diff] [blame] | 282 | struct request *req = blk_mq_rq_from_pdu(vbr); |
| 283 | |
Christoph Hellwig | 15f73f5 | 2020-06-11 08:44:47 +0200 | [diff] [blame] | 284 | if (likely(!blk_should_fake_timeout(req->q))) |
| 285 | blk_mq_complete_request(req); |
Jens Axboe | 1cf7e9c | 2013-11-01 10:52:52 -0600 | [diff] [blame] | 286 | req_done = true; |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 287 | } |
Heinz Graalfs | 7f03b17 | 2013-10-29 09:40:30 +1030 | [diff] [blame] | 288 | if (unlikely(virtqueue_is_broken(vq))) |
| 289 | break; |
Asias He | bb81110 | 2012-09-25 10:36:17 +0800 | [diff] [blame] | 290 | } while (!virtqueue_enable_cb(vq)); |
Jens Axboe | 1cf7e9c | 2013-11-01 10:52:52 -0600 | [diff] [blame] | 291 | |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 292 | /* In case queue is stopped waiting for more buffers. */ |
Asias He | a98755c | 2012-08-08 16:07:04 +0800 | [diff] [blame] | 293 | if (req_done) |
Christoph Hellwig | 1b4a325 | 2014-04-16 09:44:54 +0200 | [diff] [blame] | 294 | blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 295 | spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); |
Asias He | a98755c | 2012-08-08 16:07:04 +0800 | [diff] [blame] | 296 | } |
| 297 | |
Jens Axboe | 944e7c8 | 2018-11-26 11:00:12 -0700 | [diff] [blame] | 298 | static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) |
| 299 | { |
| 300 | struct virtio_blk *vblk = hctx->queue->queuedata; |
| 301 | struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; |
| 302 | bool kick; |
| 303 | |
| 304 | spin_lock_irq(&vq->lock); |
| 305 | kick = virtqueue_kick_prepare(vq->vq); |
| 306 | spin_unlock_irq(&vq->lock); |
| 307 | |
| 308 | if (kick) |
| 309 | virtqueue_notify(vq->vq); |
| 310 | } |
| 311 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 312 | static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, |
Jens Axboe | 74c4505 | 2014-10-29 11:14:52 -0600 | [diff] [blame] | 313 | const struct blk_mq_queue_data *bd) |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 314 | { |
Jens Axboe | 1cf7e9c | 2013-11-01 10:52:52 -0600 | [diff] [blame] | 315 | struct virtio_blk *vblk = hctx->queue->queuedata; |
Jens Axboe | 74c4505 | 2014-10-29 11:14:52 -0600 | [diff] [blame] | 316 | struct request *req = bd->rq; |
Christoph Hellwig | 9d74e25 | 2014-04-14 10:30:07 +0200 | [diff] [blame] | 317 | struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); |
Jens Axboe | 1cf7e9c | 2013-11-01 10:52:52 -0600 | [diff] [blame] | 318 | unsigned long flags; |
Ye Guojin | 0466a39 | 2021-11-17 06:39:55 +0000 | [diff] [blame] | 319 | int num; |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 320 | int qid = hctx->queue_num; |
Ming Lei | e8edca6 | 2014-05-30 10:49:29 +0800 | [diff] [blame] | 321 | bool notify = false; |
Michael S. Tsirkin | f083937 | 2021-10-25 03:54:03 -0400 | [diff] [blame] | 322 | blk_status_t status; |
| 323 | int err; |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 324 | |
Jens Axboe | 1cf7e9c | 2013-11-01 10:52:52 -0600 | [diff] [blame] | 325 | BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 326 | |
Michael S. Tsirkin | f083937 | 2021-10-25 03:54:03 -0400 | [diff] [blame] | 327 | status = virtblk_setup_cmd(vblk->vdev, req, vbr); |
| 328 | if (unlikely(status)) |
| 329 | return status; |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 330 | |
Christoph Hellwig | e249007 | 2014-09-13 16:40:09 -0700 | [diff] [blame] | 331 | blk_mq_start_request(req); |
| 332 | |
Max Gurtovoy | 02746e2 | 2021-09-01 16:14:34 +0300 | [diff] [blame] | 333 | num = virtblk_map_data(hctx, req, vbr); |
| 334 | if (unlikely(num < 0)) { |
| 335 | virtblk_cleanup_cmd(req); |
| 336 | return BLK_STS_RESOURCE; |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 337 | } |
| 338 | |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 339 | spin_lock_irqsave(&vblk->vqs[qid].lock, flags); |
Max Gurtovoy | 02746e2 | 2021-09-01 16:14:34 +0300 | [diff] [blame] | 340 | err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num); |
Rusty Russell | 5261b85 | 2014-03-13 11:23:39 +1030 | [diff] [blame] | 341 | if (err) { |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 342 | virtqueue_kick(vblk->vqs[qid].vq); |
Halil Pasic | f5f6b95 | 2020-02-13 13:37:27 +0100 | [diff] [blame] | 343 | /* Don't stop the queue if -ENOMEM: we may have failed to |
| 344 | * bounce the buffer due to global resource outage. |
| 345 | */ |
| 346 | if (err == -ENOSPC) |
| 347 | blk_mq_stop_hw_queue(hctx); |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 348 | spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); |
Max Gurtovoy | 02746e2 | 2021-09-01 16:14:34 +0300 | [diff] [blame] | 349 | virtblk_unmap_data(req, vbr); |
| 350 | virtblk_cleanup_cmd(req); |
Halil Pasic | 3d973b2 | 2020-02-13 13:37:28 +0100 | [diff] [blame] | 351 | switch (err) { |
| 352 | case -ENOSPC: |
Ming Lei | 86ff7c2 | 2018-01-30 22:04:57 -0500 | [diff] [blame] | 353 | return BLK_STS_DEV_RESOURCE; |
Halil Pasic | 3d973b2 | 2020-02-13 13:37:28 +0100 | [diff] [blame] | 354 | case -ENOMEM: |
| 355 | return BLK_STS_RESOURCE; |
| 356 | default: |
| 357 | return BLK_STS_IOERR; |
| 358 | } |
Asias He | a98755c | 2012-08-08 16:07:04 +0800 | [diff] [blame] | 359 | } |
| 360 | |
Jens Axboe | 74c4505 | 2014-10-29 11:14:52 -0600 | [diff] [blame] | 361 | if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) |
Ming Lei | e8edca6 | 2014-05-30 10:49:29 +0800 | [diff] [blame] | 362 | notify = true; |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 363 | spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); |
Ming Lei | e8edca6 | 2014-05-30 10:49:29 +0800 | [diff] [blame] | 364 | |
| 365 | if (notify) |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 366 | virtqueue_notify(vblk->vqs[qid].vq); |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 367 | return BLK_STS_OK; |
Asias He | a98755c | 2012-08-08 16:07:04 +0800 | [diff] [blame] | 368 | } |
| 369 | |
john cooper | 4cb2ea2 | 2010-03-25 01:33:33 -0400 | [diff] [blame] | 370 | /* return id (s/n) string for *disk to *id_str |
| 371 | */ |
| 372 | static int virtblk_get_id(struct gendisk *disk, char *id_str) |
| 373 | { |
| 374 | struct virtio_blk *vblk = disk->private_data; |
Christoph Hellwig | f959669 | 2016-07-19 11:31:49 +0200 | [diff] [blame] | 375 | struct request_queue *q = vblk->disk->queue; |
john cooper | 4cb2ea2 | 2010-03-25 01:33:33 -0400 | [diff] [blame] | 376 | struct request *req; |
Mike Snitzer | e4c4776 | 2010-10-09 12:12:13 +1030 | [diff] [blame] | 377 | int err; |
john cooper | 4cb2ea2 | 2010-03-25 01:33:33 -0400 | [diff] [blame] | 378 | |
Christoph Hellwig | 0bf6d96 | 2021-10-25 09:05:07 +0200 | [diff] [blame] | 379 | req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0); |
Christoph Hellwig | f959669 | 2016-07-19 11:31:49 +0200 | [diff] [blame] | 380 | if (IS_ERR(req)) |
john cooper | 4cb2ea2 | 2010-03-25 01:33:33 -0400 | [diff] [blame] | 381 | return PTR_ERR(req); |
Mike Snitzer | e4c4776 | 2010-10-09 12:12:13 +1030 | [diff] [blame] | 382 | |
Christoph Hellwig | f959669 | 2016-07-19 11:31:49 +0200 | [diff] [blame] | 383 | err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); |
| 384 | if (err) |
| 385 | goto out; |
| 386 | |
Christoph Hellwig | b84ba30 | 2021-11-26 13:18:01 +0100 | [diff] [blame] | 387 | blk_execute_rq(req, false); |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 388 | err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req))); |
Christoph Hellwig | f959669 | 2016-07-19 11:31:49 +0200 | [diff] [blame] | 389 | out: |
Christoph Hellwig | 0bf6d96 | 2021-10-25 09:05:07 +0200 | [diff] [blame] | 390 | blk_mq_free_request(req); |
Mike Snitzer | e4c4776 | 2010-10-09 12:12:13 +1030 | [diff] [blame] | 391 | return err; |
john cooper | 4cb2ea2 | 2010-03-25 01:33:33 -0400 | [diff] [blame] | 392 | } |
| 393 | |
Stefan Hajnoczi | 90b5feb | 2020-04-30 15:04:42 +0100 | [diff] [blame] | 394 | static void virtblk_get(struct virtio_blk *vblk) |
| 395 | { |
| 396 | refcount_inc(&vblk->refs); |
| 397 | } |
| 398 | |
| 399 | static void virtblk_put(struct virtio_blk *vblk) |
| 400 | { |
| 401 | if (refcount_dec_and_test(&vblk->refs)) { |
| 402 | ida_simple_remove(&vd_index_ida, vblk->index); |
| 403 | mutex_destroy(&vblk->vdev_mutex); |
| 404 | kfree(vblk); |
| 405 | } |
| 406 | } |
| 407 | |
| 408 | static int virtblk_open(struct block_device *bd, fmode_t mode) |
| 409 | { |
| 410 | struct virtio_blk *vblk = bd->bd_disk->private_data; |
| 411 | int ret = 0; |
| 412 | |
| 413 | mutex_lock(&vblk->vdev_mutex); |
| 414 | |
| 415 | if (vblk->vdev) |
| 416 | virtblk_get(vblk); |
| 417 | else |
| 418 | ret = -ENXIO; |
| 419 | |
| 420 | mutex_unlock(&vblk->vdev_mutex); |
| 421 | return ret; |
| 422 | } |
| 423 | |
| 424 | static void virtblk_release(struct gendisk *disk, fmode_t mode) |
| 425 | { |
| 426 | struct virtio_blk *vblk = disk->private_data; |
| 427 | |
| 428 | virtblk_put(vblk); |
| 429 | } |
| 430 | |
Christian Borntraeger | 135da0b | 2008-01-23 17:56:50 +0100 | [diff] [blame] | 431 | /* We provide getgeo only to please some old bootloader/partitioning tools */ |
| 432 | static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) |
| 433 | { |
Ryan Harper | 48e4043 | 2008-04-16 13:56:37 -0500 | [diff] [blame] | 434 | struct virtio_blk *vblk = bd->bd_disk->private_data; |
Stefan Hajnoczi | 90b5feb | 2020-04-30 15:04:42 +0100 | [diff] [blame] | 435 | int ret = 0; |
| 436 | |
| 437 | mutex_lock(&vblk->vdev_mutex); |
| 438 | |
| 439 | if (!vblk->vdev) { |
| 440 | ret = -ENXIO; |
| 441 | goto out; |
| 442 | } |
Ryan Harper | 48e4043 | 2008-04-16 13:56:37 -0500 | [diff] [blame] | 443 | |
| 444 | /* see if the host passed in geometry config */ |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 445 | if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { |
| 446 | virtio_cread(vblk->vdev, struct virtio_blk_config, |
| 447 | geometry.cylinders, &geo->cylinders); |
| 448 | virtio_cread(vblk->vdev, struct virtio_blk_config, |
| 449 | geometry.heads, &geo->heads); |
| 450 | virtio_cread(vblk->vdev, struct virtio_blk_config, |
| 451 | geometry.sectors, &geo->sectors); |
Ryan Harper | 48e4043 | 2008-04-16 13:56:37 -0500 | [diff] [blame] | 452 | } else { |
| 453 | /* some standard values, similar to sd */ |
| 454 | geo->heads = 1 << 6; |
| 455 | geo->sectors = 1 << 5; |
| 456 | geo->cylinders = get_capacity(bd->bd_disk) >> 11; |
| 457 | } |
Stefan Hajnoczi | 90b5feb | 2020-04-30 15:04:42 +0100 | [diff] [blame] | 458 | out: |
| 459 | mutex_unlock(&vblk->vdev_mutex); |
| 460 | return ret; |
Christian Borntraeger | 135da0b | 2008-01-23 17:56:50 +0100 | [diff] [blame] | 461 | } |
| 462 | |
Alexey Dobriyan | 83d5cde | 2009-09-21 17:01:13 -0700 | [diff] [blame] | 463 | static const struct block_device_operations virtblk_fops = { |
Christian Borntraeger | 135da0b | 2008-01-23 17:56:50 +0100 | [diff] [blame] | 464 | .owner = THIS_MODULE, |
Stefan Hajnoczi | 90b5feb | 2020-04-30 15:04:42 +0100 | [diff] [blame] | 465 | .open = virtblk_open, |
| 466 | .release = virtblk_release, |
Christian Borntraeger | 135da0b | 2008-01-23 17:56:50 +0100 | [diff] [blame] | 467 | .getgeo = virtblk_getgeo, |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 468 | }; |
| 469 | |
Christian Borntraeger | d50ed90 | 2008-02-01 09:05:00 +0100 | [diff] [blame] | 470 | static int index_to_minor(int index) |
| 471 | { |
| 472 | return index << PART_BITS; |
| 473 | } |
| 474 | |
Michael S. Tsirkin | 5087a50 | 2011-10-30 21:29:59 +0200 | [diff] [blame] | 475 | static int minor_to_index(int minor) |
| 476 | { |
| 477 | return minor >> PART_BITS; |
| 478 | } |
| 479 | |
Hannes Reinecke | e982c4d | 2018-09-28 08:17:23 +0200 | [diff] [blame] | 480 | static ssize_t serial_show(struct device *dev, |
| 481 | struct device_attribute *attr, char *buf) |
Ryan Harper | a5eb9e4 | 2010-06-23 22:19:57 -0500 | [diff] [blame] | 482 | { |
| 483 | struct gendisk *disk = dev_to_disk(dev); |
| 484 | int err; |
| 485 | |
| 486 | /* sysfs gives us a PAGE_SIZE buffer */ |
| 487 | BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES); |
| 488 | |
| 489 | buf[VIRTIO_BLK_ID_BYTES] = '\0'; |
| 490 | err = virtblk_get_id(disk, buf); |
| 491 | if (!err) |
| 492 | return strlen(buf); |
| 493 | |
| 494 | if (err == -EIO) /* Unsupported? Make it empty. */ |
| 495 | return 0; |
| 496 | |
| 497 | return err; |
| 498 | } |
Michael S. Tsirkin | 393c525 | 2014-10-23 16:08:44 +0300 | [diff] [blame] | 499 | |
Hannes Reinecke | e982c4d | 2018-09-28 08:17:23 +0200 | [diff] [blame] | 500 | static DEVICE_ATTR_RO(serial); |
Ryan Harper | a5eb9e4 | 2010-06-23 22:19:57 -0500 | [diff] [blame] | 501 | |
Stefan Hajnoczi | daf2a50 | 2018-01-03 16:03:39 +0000 | [diff] [blame] | 502 | /* The queue's logical block size must be set before calling this */ |
| 503 | static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize) |
Christoph Hellwig | 7a7c924 | 2011-02-01 21:43:48 +0100 | [diff] [blame] | 504 | { |
Christoph Hellwig | 7a7c924 | 2011-02-01 21:43:48 +0100 | [diff] [blame] | 505 | struct virtio_device *vdev = vblk->vdev; |
| 506 | struct request_queue *q = vblk->disk->queue; |
| 507 | char cap_str_2[10], cap_str_10[10]; |
Stefan Hajnoczi | 1046d30 | 2017-07-26 15:32:23 +0100 | [diff] [blame] | 508 | unsigned long long nblocks; |
James Bottomley | b9f28d8 | 2015-03-05 18:47:01 -0800 | [diff] [blame] | 509 | u64 capacity; |
Christoph Hellwig | 7a7c924 | 2011-02-01 21:43:48 +0100 | [diff] [blame] | 510 | |
| 511 | /* Host must always specify the capacity. */ |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 512 | virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity); |
Christoph Hellwig | 7a7c924 | 2011-02-01 21:43:48 +0100 | [diff] [blame] | 513 | |
Stefan Hajnoczi | 1046d30 | 2017-07-26 15:32:23 +0100 | [diff] [blame] | 514 | nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9); |
| 515 | |
| 516 | string_get_size(nblocks, queue_logical_block_size(q), |
James Bottomley | b9f28d8 | 2015-03-05 18:47:01 -0800 | [diff] [blame] | 517 | STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); |
Stefan Hajnoczi | 1046d30 | 2017-07-26 15:32:23 +0100 | [diff] [blame] | 518 | string_get_size(nblocks, queue_logical_block_size(q), |
James Bottomley | b9f28d8 | 2015-03-05 18:47:01 -0800 | [diff] [blame] | 519 | STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); |
Christoph Hellwig | 7a7c924 | 2011-02-01 21:43:48 +0100 | [diff] [blame] | 520 | |
| 521 | dev_notice(&vdev->dev, |
Stefan Hajnoczi | daf2a50 | 2018-01-03 16:03:39 +0000 | [diff] [blame] | 522 | "[%s] %s%llu %d-byte logical blocks (%s/%s)\n", |
| 523 | vblk->disk->disk_name, |
| 524 | resize ? "new size: " : "", |
Stefan Hajnoczi | 1046d30 | 2017-07-26 15:32:23 +0100 | [diff] [blame] | 525 | nblocks, |
| 526 | queue_logical_block_size(q), |
| 527 | cap_str_10, |
| 528 | cap_str_2); |
Christoph Hellwig | 7a7c924 | 2011-02-01 21:43:48 +0100 | [diff] [blame] | 529 | |
Christoph Hellwig | 449f4ec | 2020-11-16 15:56:56 +0100 | [diff] [blame] | 530 | set_capacity_and_notify(vblk->disk, capacity); |
Stefan Hajnoczi | daf2a50 | 2018-01-03 16:03:39 +0000 | [diff] [blame] | 531 | } |
| 532 | |
| 533 | static void virtblk_config_changed_work(struct work_struct *work) |
| 534 | { |
| 535 | struct virtio_blk *vblk = |
| 536 | container_of(work, struct virtio_blk, config_work); |
Stefan Hajnoczi | daf2a50 | 2018-01-03 16:03:39 +0000 | [diff] [blame] | 537 | |
| 538 | virtblk_update_capacity(vblk, true); |
Christoph Hellwig | 7a7c924 | 2011-02-01 21:43:48 +0100 | [diff] [blame] | 539 | } |
| 540 | |
| 541 | static void virtblk_config_changed(struct virtio_device *vdev) |
| 542 | { |
| 543 | struct virtio_blk *vblk = vdev->priv; |
| 544 | |
| 545 | queue_work(virtblk_wq, &vblk->config_work); |
| 546 | } |
| 547 | |
Amit Shah | 6abd6e5 | 2011-12-22 16:58:29 +0530 | [diff] [blame] | 548 | static int init_vq(struct virtio_blk *vblk) |
| 549 | { |
Markus Elfring | 2ff9844 | 2016-09-13 13:43:50 +0200 | [diff] [blame] | 550 | int err; |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 551 | int i; |
| 552 | vq_callback_t **callbacks; |
| 553 | const char **names; |
| 554 | struct virtqueue **vqs; |
| 555 | unsigned short num_vqs; |
| 556 | struct virtio_device *vdev = vblk->vdev; |
Christoph Hellwig | ad71473 | 2017-02-05 18:15:25 +0100 | [diff] [blame] | 557 | struct irq_affinity desc = { 0, }; |
Amit Shah | 6abd6e5 | 2011-12-22 16:58:29 +0530 | [diff] [blame] | 558 | |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 559 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ, |
| 560 | struct virtio_blk_config, num_queues, |
| 561 | &num_vqs); |
| 562 | if (err) |
| 563 | num_vqs = 1; |
Jason Wang | 6ae6ff6 | 2021-10-19 15:01:43 +0800 | [diff] [blame] | 564 | if (!err && !num_vqs) { |
Colin Ian King | 63b4ffa | 2021-10-25 11:22:40 +0100 | [diff] [blame] | 565 | dev_err(&vdev->dev, "MQ advertised but zero queues reported\n"); |
Jason Wang | 6ae6ff6 | 2021-10-19 15:01:43 +0800 | [diff] [blame] | 566 | return -EINVAL; |
| 567 | } |
Amit Shah | 6abd6e5 | 2011-12-22 16:58:29 +0530 | [diff] [blame] | 568 | |
Max Gurtovoy | 0989c41 | 2021-09-02 23:46:22 +0300 | [diff] [blame] | 569 | num_vqs = min_t(unsigned int, |
| 570 | min_not_zero(num_request_queues, nr_cpu_ids), |
| 571 | num_vqs); |
Dongli Zhang | bf348f9 | 2019-03-27 18:36:34 +0800 | [diff] [blame] | 572 | |
Markus Elfring | 668866b | 2016-09-13 11:32:22 +0200 | [diff] [blame] | 573 | vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); |
Minfei Huang | 347a529 | 2016-08-09 16:39:20 +0800 | [diff] [blame] | 574 | if (!vblk->vqs) |
| 575 | return -ENOMEM; |
| 576 | |
Markus Elfring | 668866b | 2016-09-13 11:32:22 +0200 | [diff] [blame] | 577 | names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL); |
| 578 | callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL); |
| 579 | vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL); |
Minfei Huang | 347a529 | 2016-08-09 16:39:20 +0800 | [diff] [blame] | 580 | if (!names || !callbacks || !vqs) { |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 581 | err = -ENOMEM; |
| 582 | goto out; |
| 583 | } |
| 584 | |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 585 | for (i = 0; i < num_vqs; i++) { |
| 586 | callbacks[i] = virtblk_done; |
| 587 | snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i); |
| 588 | names[i] = vblk->vqs[i].name; |
| 589 | } |
| 590 | |
| 591 | /* Discover virtqueues and write information to configuration. */ |
Michael S. Tsirkin | 9b2bbdb | 2017-03-06 18:19:39 +0200 | [diff] [blame] | 592 | err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc); |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 593 | if (err) |
Minfei Huang | 347a529 | 2016-08-09 16:39:20 +0800 | [diff] [blame] | 594 | goto out; |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 595 | |
| 596 | for (i = 0; i < num_vqs; i++) { |
| 597 | spin_lock_init(&vblk->vqs[i].lock); |
| 598 | vblk->vqs[i].vq = vqs[i]; |
| 599 | } |
| 600 | vblk->num_vqs = num_vqs; |
| 601 | |
Minfei Huang | 347a529 | 2016-08-09 16:39:20 +0800 | [diff] [blame] | 602 | out: |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 603 | kfree(vqs); |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 604 | kfree(callbacks); |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 605 | kfree(names); |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 606 | if (err) |
| 607 | kfree(vblk->vqs); |
Amit Shah | 6abd6e5 | 2011-12-22 16:58:29 +0530 | [diff] [blame] | 608 | return err; |
| 609 | } |
| 610 | |
Ren Mingxin | c0aa3e0 | 2012-04-10 15:28:05 +0800 | [diff] [blame] | 611 | /* |
| 612 | * Legacy naming scheme used for virtio devices. We are stuck with it for |
| 613 | * virtio blk but don't ever use it for any new driver. |
| 614 | */ |
| 615 | static int virtblk_name_format(char *prefix, int index, char *buf, int buflen) |
| 616 | { |
| 617 | const int base = 'z' - 'a' + 1; |
| 618 | char *begin = buf + strlen(prefix); |
| 619 | char *end = buf + buflen; |
| 620 | char *p; |
| 621 | int unit; |
| 622 | |
| 623 | p = end - 1; |
| 624 | *p = '\0'; |
| 625 | unit = base; |
| 626 | do { |
| 627 | if (p == begin) |
| 628 | return -EINVAL; |
| 629 | *--p = 'a' + (index % unit); |
| 630 | index = (index / unit) - 1; |
| 631 | } while (index >= 0); |
| 632 | |
| 633 | memmove(begin, p, end - p); |
| 634 | memcpy(buf, prefix, strlen(prefix)); |
| 635 | |
| 636 | return 0; |
| 637 | } |
| 638 | |
Paolo Bonzini | cd5d503 | 2012-07-03 15:19:37 +0200 | [diff] [blame] | 639 | static int virtblk_get_cache_mode(struct virtio_device *vdev) |
| 640 | { |
| 641 | u8 writeback; |
| 642 | int err; |
| 643 | |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 644 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE, |
| 645 | struct virtio_blk_config, wce, |
| 646 | &writeback); |
Michael S. Tsirkin | 592002f | 2016-02-24 17:07:27 +0200 | [diff] [blame] | 647 | |
| 648 | /* |
| 649 | * If WCE is not configurable and flush is not available, |
| 650 | * assume no writeback cache is in use. |
| 651 | */ |
Paolo Bonzini | cd5d503 | 2012-07-03 15:19:37 +0200 | [diff] [blame] | 652 | if (err) |
Michael S. Tsirkin | 592002f | 2016-02-24 17:07:27 +0200 | [diff] [blame] | 653 | writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH); |
Paolo Bonzini | cd5d503 | 2012-07-03 15:19:37 +0200 | [diff] [blame] | 654 | |
| 655 | return writeback; |
| 656 | } |
| 657 | |
| 658 | static void virtblk_update_cache_mode(struct virtio_device *vdev) |
| 659 | { |
| 660 | u8 writeback = virtblk_get_cache_mode(vdev); |
| 661 | struct virtio_blk *vblk = vdev->priv; |
| 662 | |
Jens Axboe | ad9126a | 2016-03-30 10:12:58 -0600 | [diff] [blame] | 663 | blk_queue_write_cache(vblk->disk->queue, writeback, false); |
Paolo Bonzini | cd5d503 | 2012-07-03 15:19:37 +0200 | [diff] [blame] | 664 | } |
| 665 | |
| 666 | static const char *const virtblk_cache_types[] = { |
| 667 | "write through", "write back" |
| 668 | }; |
| 669 | |
| 670 | static ssize_t |
Hannes Reinecke | e982c4d | 2018-09-28 08:17:23 +0200 | [diff] [blame] | 671 | cache_type_store(struct device *dev, struct device_attribute *attr, |
| 672 | const char *buf, size_t count) |
Paolo Bonzini | cd5d503 | 2012-07-03 15:19:37 +0200 | [diff] [blame] | 673 | { |
| 674 | struct gendisk *disk = dev_to_disk(dev); |
| 675 | struct virtio_blk *vblk = disk->private_data; |
| 676 | struct virtio_device *vdev = vblk->vdev; |
| 677 | int i; |
Paolo Bonzini | cd5d503 | 2012-07-03 15:19:37 +0200 | [diff] [blame] | 678 | |
| 679 | BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); |
Andy Shevchenko | f53d5aa | 2017-06-09 15:07:42 +0300 | [diff] [blame] | 680 | i = sysfs_match_string(virtblk_cache_types, buf); |
Paolo Bonzini | cd5d503 | 2012-07-03 15:19:37 +0200 | [diff] [blame] | 681 | if (i < 0) |
Andy Shevchenko | f53d5aa | 2017-06-09 15:07:42 +0300 | [diff] [blame] | 682 | return i; |
Paolo Bonzini | cd5d503 | 2012-07-03 15:19:37 +0200 | [diff] [blame] | 683 | |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 684 | virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i); |
Paolo Bonzini | cd5d503 | 2012-07-03 15:19:37 +0200 | [diff] [blame] | 685 | virtblk_update_cache_mode(vdev); |
| 686 | return count; |
| 687 | } |
| 688 | |
| 689 | static ssize_t |
Hannes Reinecke | e982c4d | 2018-09-28 08:17:23 +0200 | [diff] [blame] | 690 | cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) |
Paolo Bonzini | cd5d503 | 2012-07-03 15:19:37 +0200 | [diff] [blame] | 691 | { |
| 692 | struct gendisk *disk = dev_to_disk(dev); |
| 693 | struct virtio_blk *vblk = disk->private_data; |
| 694 | u8 writeback = virtblk_get_cache_mode(vblk->vdev); |
| 695 | |
| 696 | BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types)); |
Ye Guojin | f1aa12f | 2021-10-21 06:51:11 +0000 | [diff] [blame] | 697 | return sysfs_emit(buf, "%s\n", virtblk_cache_types[writeback]); |
Paolo Bonzini | cd5d503 | 2012-07-03 15:19:37 +0200 | [diff] [blame] | 698 | } |
| 699 | |
Hannes Reinecke | e982c4d | 2018-09-28 08:17:23 +0200 | [diff] [blame] | 700 | static DEVICE_ATTR_RW(cache_type); |
| 701 | |
| 702 | static struct attribute *virtblk_attrs[] = { |
| 703 | &dev_attr_serial.attr, |
| 704 | &dev_attr_cache_type.attr, |
| 705 | NULL, |
| 706 | }; |
| 707 | |
| 708 | static umode_t virtblk_attrs_are_visible(struct kobject *kobj, |
| 709 | struct attribute *a, int n) |
| 710 | { |
Tian Tao | 4ce7906 | 2020-08-21 09:19:15 +0800 | [diff] [blame] | 711 | struct device *dev = kobj_to_dev(kobj); |
Hannes Reinecke | e982c4d | 2018-09-28 08:17:23 +0200 | [diff] [blame] | 712 | struct gendisk *disk = dev_to_disk(dev); |
| 713 | struct virtio_blk *vblk = disk->private_data; |
| 714 | struct virtio_device *vdev = vblk->vdev; |
| 715 | |
| 716 | if (a == &dev_attr_cache_type.attr && |
| 717 | !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) |
| 718 | return S_IRUGO; |
| 719 | |
| 720 | return a->mode; |
| 721 | } |
| 722 | |
| 723 | static const struct attribute_group virtblk_attr_group = { |
| 724 | .attrs = virtblk_attrs, |
| 725 | .is_visible = virtblk_attrs_are_visible, |
| 726 | }; |
| 727 | |
| 728 | static const struct attribute_group *virtblk_attr_groups[] = { |
| 729 | &virtblk_attr_group, |
| 730 | NULL, |
| 731 | }; |
Paolo Bonzini | cd5d503 | 2012-07-03 15:19:37 +0200 | [diff] [blame] | 732 | |
Christoph Hellwig | ad71473 | 2017-02-05 18:15:25 +0100 | [diff] [blame] | 733 | static int virtblk_map_queues(struct blk_mq_tag_set *set) |
| 734 | { |
| 735 | struct virtio_blk *vblk = set->driver_data; |
| 736 | |
Dongli Zhang | 9bc0075 | 2019-03-12 09:31:56 +0800 | [diff] [blame] | 737 | return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT], |
| 738 | vblk->vdev, 0); |
Christoph Hellwig | ad71473 | 2017-02-05 18:15:25 +0100 | [diff] [blame] | 739 | } |
| 740 | |
Eric Biggers | f363b08 | 2017-03-30 13:39:16 -0700 | [diff] [blame] | 741 | static const struct blk_mq_ops virtio_mq_ops = { |
Jens Axboe | 1cf7e9c | 2013-11-01 10:52:52 -0600 | [diff] [blame] | 742 | .queue_rq = virtio_queue_rq, |
Jens Axboe | 944e7c8 | 2018-11-26 11:00:12 -0700 | [diff] [blame] | 743 | .commit_rqs = virtio_commit_rqs, |
Christoph Hellwig | 5124c28 | 2014-02-10 03:24:39 -0800 | [diff] [blame] | 744 | .complete = virtblk_request_done, |
Christoph Hellwig | ad71473 | 2017-02-05 18:15:25 +0100 | [diff] [blame] | 745 | .map_queues = virtblk_map_queues, |
Jens Axboe | 1cf7e9c | 2013-11-01 10:52:52 -0600 | [diff] [blame] | 746 | }; |
| 747 | |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 748 | static unsigned int virtblk_queue_depth; |
| 749 | module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); |
Jens Axboe | 1cf7e9c | 2013-11-01 10:52:52 -0600 | [diff] [blame] | 750 | |
Greg Kroah-Hartman | 8d85fce | 2012-12-21 15:13:49 -0800 | [diff] [blame] | 751 | static int virtblk_probe(struct virtio_device *vdev) |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 752 | { |
| 753 | struct virtio_blk *vblk; |
Christoph Hellwig | 69740c8 | 2010-02-24 14:22:25 -0600 | [diff] [blame] | 754 | struct request_queue *q; |
Michael S. Tsirkin | 5087a50 | 2011-10-30 21:29:59 +0200 | [diff] [blame] | 755 | int err, index; |
Asias He | a98755c | 2012-08-08 16:07:04 +0800 | [diff] [blame] | 756 | |
Joerg Roedel | fd1068e | 2019-02-07 12:59:17 +0100 | [diff] [blame] | 757 | u32 v, blk_size, max_size, sg_elems, opt_io_size; |
Christoph Hellwig | 69740c8 | 2010-02-24 14:22:25 -0600 | [diff] [blame] | 758 | u16 min_io_size; |
| 759 | u8 physical_block_exp, alignment_offset; |
Joseph Qi | d1e9aa9 | 2021-01-22 17:21:46 +0800 | [diff] [blame] | 760 | unsigned int queue_depth; |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 761 | |
Michael S. Tsirkin | ff63198 | 2021-10-04 11:31:00 -0400 | [diff] [blame] | 762 | if (!vdev->config->get) { |
| 763 | dev_err(&vdev->dev, "%s failure: config access disabled\n", |
| 764 | __func__); |
| 765 | return -EINVAL; |
| 766 | } |
| 767 | |
Michael S. Tsirkin | 5087a50 | 2011-10-30 21:29:59 +0200 | [diff] [blame] | 768 | err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS), |
| 769 | GFP_KERNEL); |
| 770 | if (err < 0) |
| 771 | goto out; |
| 772 | index = err; |
Christian Borntraeger | 4f3bf19 | 2008-01-31 15:53:53 +0100 | [diff] [blame] | 773 | |
Rusty Russell | 0864b79 | 2008-12-30 09:26:05 -0600 | [diff] [blame] | 774 | /* We need to know how many segments before we allocate. */ |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 775 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX, |
| 776 | struct virtio_blk_config, seg_max, |
| 777 | &sg_elems); |
Christoph Hellwig | a5b365a | 2010-05-25 14:17:54 +0200 | [diff] [blame] | 778 | |
| 779 | /* We need at least one SG element, whatever they say. */ |
| 780 | if (err || !sg_elems) |
Rusty Russell | 0864b79 | 2008-12-30 09:26:05 -0600 | [diff] [blame] | 781 | sg_elems = 1; |
| 782 | |
Stefan Hajnoczi | 63947b3 | 2021-05-24 16:40:20 +0100 | [diff] [blame] | 783 | /* Prevent integer overflows and honor max vq size */ |
| 784 | sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2); |
| 785 | |
| 786 | /* We need extra sg elements at head and tail. */ |
Rusty Russell | 0864b79 | 2008-12-30 09:26:05 -0600 | [diff] [blame] | 787 | sg_elems += 2; |
Jens Axboe | 1cf7e9c | 2013-11-01 10:52:52 -0600 | [diff] [blame] | 788 | vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 789 | if (!vblk) { |
| 790 | err = -ENOMEM; |
Michael S. Tsirkin | 5087a50 | 2011-10-30 21:29:59 +0200 | [diff] [blame] | 791 | goto out_free_index; |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 792 | } |
| 793 | |
Stefan Hajnoczi | 90b5feb | 2020-04-30 15:04:42 +0100 | [diff] [blame] | 794 | /* This reference is dropped in virtblk_remove(). */ |
| 795 | refcount_set(&vblk->refs, 1); |
| 796 | mutex_init(&vblk->vdev_mutex); |
| 797 | |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 798 | vblk->vdev = vdev; |
Rusty Russell | 0864b79 | 2008-12-30 09:26:05 -0600 | [diff] [blame] | 799 | vblk->sg_elems = sg_elems; |
Asias He | a98755c | 2012-08-08 16:07:04 +0800 | [diff] [blame] | 800 | |
Christoph Hellwig | 7a7c924 | 2011-02-01 21:43:48 +0100 | [diff] [blame] | 801 | INIT_WORK(&vblk->config_work, virtblk_config_changed_work); |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 802 | |
Amit Shah | 6abd6e5 | 2011-12-22 16:58:29 +0530 | [diff] [blame] | 803 | err = init_vq(vblk); |
| 804 | if (err) |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 805 | goto out_free_vblk; |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 806 | |
Rusty Russell | fc4324b | 2014-03-19 17:08:24 +1030 | [diff] [blame] | 807 | /* Default queue sizing is to fill the ring. */ |
Max Gurtovoy | 6105d1f | 2021-09-05 11:57:17 +0300 | [diff] [blame] | 808 | if (!virtblk_queue_depth) { |
Joseph Qi | d1e9aa9 | 2021-01-22 17:21:46 +0800 | [diff] [blame] | 809 | queue_depth = vblk->vqs[0].vq->num_free; |
Rusty Russell | fc4324b | 2014-03-19 17:08:24 +1030 | [diff] [blame] | 810 | /* ... but without indirect descs, we use 2 descs per req */ |
| 811 | if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) |
Joseph Qi | d1e9aa9 | 2021-01-22 17:21:46 +0800 | [diff] [blame] | 812 | queue_depth /= 2; |
| 813 | } else { |
| 814 | queue_depth = virtblk_queue_depth; |
Rusty Russell | fc4324b | 2014-03-19 17:08:24 +1030 | [diff] [blame] | 815 | } |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 816 | |
| 817 | memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); |
| 818 | vblk->tag_set.ops = &virtio_mq_ops; |
Joseph Qi | d1e9aa9 | 2021-01-22 17:21:46 +0800 | [diff] [blame] | 819 | vblk->tag_set.queue_depth = queue_depth; |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 820 | vblk->tag_set.numa_node = NUMA_NO_NODE; |
| 821 | vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; |
| 822 | vblk->tag_set.cmd_size = |
Jens Axboe | 1cf7e9c | 2013-11-01 10:52:52 -0600 | [diff] [blame] | 823 | sizeof(struct virtblk_req) + |
Max Gurtovoy | 02746e2 | 2021-09-01 16:14:34 +0300 | [diff] [blame] | 824 | sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT; |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 825 | vblk->tag_set.driver_data = vblk; |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 826 | vblk->tag_set.nr_hw_queues = vblk->num_vqs; |
Jens Axboe | 1cf7e9c | 2013-11-01 10:52:52 -0600 | [diff] [blame] | 827 | |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 828 | err = blk_mq_alloc_tag_set(&vblk->tag_set); |
| 829 | if (err) |
Christoph Hellwig | 89a5f06 | 2021-06-02 09:53:19 +0300 | [diff] [blame] | 830 | goto out_free_vq; |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 831 | |
Christoph Hellwig | 89a5f06 | 2021-06-02 09:53:19 +0300 | [diff] [blame] | 832 | vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk); |
| 833 | if (IS_ERR(vblk->disk)) { |
| 834 | err = PTR_ERR(vblk->disk); |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 835 | goto out_free_tags; |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 836 | } |
Christoph Hellwig | 89a5f06 | 2021-06-02 09:53:19 +0300 | [diff] [blame] | 837 | q = vblk->disk->queue; |
Fernando Luis Vázquez Cao | 7d116b6 | 2008-10-27 18:45:15 +0900 | [diff] [blame] | 838 | |
Ren Mingxin | c0aa3e0 | 2012-04-10 15:28:05 +0800 | [diff] [blame] | 839 | virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); |
Christian Borntraeger | d50ed90 | 2008-02-01 09:05:00 +0100 | [diff] [blame] | 840 | |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 841 | vblk->disk->major = major; |
Christian Borntraeger | d50ed90 | 2008-02-01 09:05:00 +0100 | [diff] [blame] | 842 | vblk->disk->first_minor = index_to_minor(index); |
Christoph Hellwig | 89a5f06 | 2021-06-02 09:53:19 +0300 | [diff] [blame] | 843 | vblk->disk->minors = 1 << PART_BITS; |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 844 | vblk->disk->private_data = vblk; |
| 845 | vblk->disk->fops = &virtblk_fops; |
Michael S. Tsirkin | 5087a50 | 2011-10-30 21:29:59 +0200 | [diff] [blame] | 846 | vblk->index = index; |
Christian Borntraeger | 4f3bf19 | 2008-01-31 15:53:53 +0100 | [diff] [blame] | 847 | |
Tejun Heo | 02c42b7 | 2010-09-03 11:56:18 +0200 | [diff] [blame] | 848 | /* configure queue flush support */ |
Paolo Bonzini | cd5d503 | 2012-07-03 15:19:37 +0200 | [diff] [blame] | 849 | virtblk_update_cache_mode(vdev); |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 850 | |
Christian Borntraeger | 3ef5360 | 2008-05-16 11:17:03 +0200 | [diff] [blame] | 851 | /* If disk is read-only in the host, the guest should obey */ |
| 852 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) |
| 853 | set_disk_ro(vblk->disk, 1); |
| 854 | |
Rusty Russell | 0864b79 | 2008-12-30 09:26:05 -0600 | [diff] [blame] | 855 | /* We can handle whatever the host told us to handle. */ |
Martin K. Petersen | ee714f2 | 2010-03-10 00:48:32 -0500 | [diff] [blame] | 856 | blk_queue_max_segments(q, vblk->sg_elems-2); |
Rusty Russell | 0864b79 | 2008-12-30 09:26:05 -0600 | [diff] [blame] | 857 | |
Rusty Russell | 4b7f7e2 | 2008-12-30 09:26:04 -0600 | [diff] [blame] | 858 | /* No real sector limit. */ |
Martin K. Petersen | ee714f2 | 2010-03-10 00:48:32 -0500 | [diff] [blame] | 859 | blk_queue_max_hw_sectors(q, -1U); |
Rusty Russell | 4b7f7e2 | 2008-12-30 09:26:04 -0600 | [diff] [blame] | 860 | |
Joerg Roedel | fd1068e | 2019-02-07 12:59:17 +0100 | [diff] [blame] | 861 | max_size = virtio_max_dma_size(vdev); |
| 862 | |
Rusty Russell | a586d4f | 2008-02-04 23:49:56 -0500 | [diff] [blame] | 863 | /* Host can optionally specify maximum segment size and number of |
| 864 | * segments. */ |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 865 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX, |
| 866 | struct virtio_blk_config, size_max, &v); |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 867 | if (!err) |
Joerg Roedel | fd1068e | 2019-02-07 12:59:17 +0100 | [diff] [blame] | 868 | max_size = min(max_size, v); |
| 869 | |
| 870 | blk_queue_max_segment_size(q, max_size); |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 871 | |
Christian Borntraeger | 066f4d8 | 2008-05-29 11:08:26 +0200 | [diff] [blame] | 872 | /* Host can optionally specify the block size of the device */ |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 873 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, |
| 874 | struct virtio_blk_config, blk_size, |
| 875 | &blk_size); |
Xie Yongji | 57a13a5 | 2021-10-26 22:40:15 +0800 | [diff] [blame] | 876 | if (!err) { |
| 877 | err = blk_validate_block_size(blk_size); |
| 878 | if (err) { |
| 879 | dev_err(&vdev->dev, |
| 880 | "virtio_blk: invalid block size: 0x%x\n", |
| 881 | blk_size); |
| 882 | goto out_cleanup_disk; |
| 883 | } |
| 884 | |
Christoph Hellwig | 69740c8 | 2010-02-24 14:22:25 -0600 | [diff] [blame] | 885 | blk_queue_logical_block_size(q, blk_size); |
Xie Yongji | 57a13a5 | 2021-10-26 22:40:15 +0800 | [diff] [blame] | 886 | } else |
Christoph Hellwig | 69740c8 | 2010-02-24 14:22:25 -0600 | [diff] [blame] | 887 | blk_size = queue_logical_block_size(q); |
| 888 | |
| 889 | /* Use topology information if available */ |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 890 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, |
| 891 | struct virtio_blk_config, physical_block_exp, |
| 892 | &physical_block_exp); |
Christoph Hellwig | 69740c8 | 2010-02-24 14:22:25 -0600 | [diff] [blame] | 893 | if (!err && physical_block_exp) |
| 894 | blk_queue_physical_block_size(q, |
| 895 | blk_size * (1 << physical_block_exp)); |
| 896 | |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 897 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, |
| 898 | struct virtio_blk_config, alignment_offset, |
| 899 | &alignment_offset); |
Christoph Hellwig | 69740c8 | 2010-02-24 14:22:25 -0600 | [diff] [blame] | 900 | if (!err && alignment_offset) |
| 901 | blk_queue_alignment_offset(q, blk_size * alignment_offset); |
| 902 | |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 903 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, |
| 904 | struct virtio_blk_config, min_io_size, |
| 905 | &min_io_size); |
Christoph Hellwig | 69740c8 | 2010-02-24 14:22:25 -0600 | [diff] [blame] | 906 | if (!err && min_io_size) |
| 907 | blk_queue_io_min(q, blk_size * min_io_size); |
| 908 | |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 909 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, |
| 910 | struct virtio_blk_config, opt_io_size, |
| 911 | &opt_io_size); |
Christoph Hellwig | 69740c8 | 2010-02-24 14:22:25 -0600 | [diff] [blame] | 912 | if (!err && opt_io_size) |
| 913 | blk_queue_io_opt(q, blk_size * opt_io_size); |
| 914 | |
Changpeng Liu | 1f23816 | 2018-11-01 15:40:35 -0700 | [diff] [blame] | 915 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) { |
| 916 | q->limits.discard_granularity = blk_size; |
| 917 | |
| 918 | virtio_cread(vdev, struct virtio_blk_config, |
| 919 | discard_sector_alignment, &v); |
| 920 | q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0; |
| 921 | |
| 922 | virtio_cread(vdev, struct virtio_blk_config, |
| 923 | max_discard_sectors, &v); |
| 924 | blk_queue_max_discard_sectors(q, v ? v : UINT_MAX); |
| 925 | |
| 926 | virtio_cread(vdev, struct virtio_blk_config, max_discard_seg, |
| 927 | &v); |
| 928 | blk_queue_max_discard_segments(q, |
| 929 | min_not_zero(v, |
| 930 | MAX_DISCARD_SEGMENTS)); |
| 931 | |
| 932 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); |
| 933 | } |
| 934 | |
| 935 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) { |
| 936 | virtio_cread(vdev, struct virtio_blk_config, |
| 937 | max_write_zeroes_sectors, &v); |
| 938 | blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX); |
| 939 | } |
| 940 | |
Stefan Hajnoczi | daf2a50 | 2018-01-03 16:03:39 +0000 | [diff] [blame] | 941 | virtblk_update_capacity(vblk, false); |
Michael S. Tsirkin | 7a11370 | 2014-10-15 10:22:30 +1030 | [diff] [blame] | 942 | virtio_device_ready(vdev); |
| 943 | |
Luis Chamberlain | dbb301f9 | 2021-08-18 16:45:41 +0200 | [diff] [blame] | 944 | err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups); |
| 945 | if (err) |
| 946 | goto out_cleanup_disk; |
| 947 | |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 948 | return 0; |
| 949 | |
Luis Chamberlain | dbb301f9 | 2021-08-18 16:45:41 +0200 | [diff] [blame] | 950 | out_cleanup_disk: |
Xie Yongji | 82e89ea | 2021-08-09 18:16:09 +0800 | [diff] [blame] | 951 | blk_cleanup_disk(vblk->disk); |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 952 | out_free_tags: |
| 953 | blk_mq_free_tag_set(&vblk->tag_set); |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 954 | out_free_vq: |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 955 | vdev->config->del_vqs(vdev); |
Hou Tao | e7eea44 | 2020-06-15 12:14:59 +0800 | [diff] [blame] | 956 | kfree(vblk->vqs); |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 957 | out_free_vblk: |
| 958 | kfree(vblk); |
Michael S. Tsirkin | 5087a50 | 2011-10-30 21:29:59 +0200 | [diff] [blame] | 959 | out_free_index: |
| 960 | ida_simple_remove(&vd_index_ida, index); |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 961 | out: |
| 962 | return err; |
| 963 | } |
| 964 | |
Greg Kroah-Hartman | 8d85fce | 2012-12-21 15:13:49 -0800 | [diff] [blame] | 965 | static void virtblk_remove(struct virtio_device *vdev) |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 966 | { |
| 967 | struct virtio_blk *vblk = vdev->priv; |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 968 | |
Michael S. Tsirkin | cc74f71 | 2014-10-15 10:22:26 +1030 | [diff] [blame] | 969 | /* Make sure no work handler is accessing the device. */ |
| 970 | flush_work(&vblk->config_work); |
Christoph Hellwig | 7a7c924 | 2011-02-01 21:43:48 +0100 | [diff] [blame] | 971 | |
Asias He | 02e2b12 | 2012-05-25 10:34:47 +0800 | [diff] [blame] | 972 | del_gendisk(vblk->disk); |
Christoph Hellwig | 89a5f06 | 2021-06-02 09:53:19 +0300 | [diff] [blame] | 973 | blk_cleanup_disk(vblk->disk); |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 974 | blk_mq_free_tag_set(&vblk->tag_set); |
| 975 | |
Stefan Hajnoczi | 90b5feb | 2020-04-30 15:04:42 +0100 | [diff] [blame] | 976 | mutex_lock(&vblk->vdev_mutex); |
| 977 | |
Rusty Russell | 6e5aa7e | 2008-02-04 23:50:03 -0500 | [diff] [blame] | 978 | /* Stop all the virtqueues. */ |
Michael S. Tsirkin | d9679d0 | 2021-10-13 06:55:44 -0400 | [diff] [blame] | 979 | virtio_reset_device(vdev); |
Rusty Russell | 6e5aa7e | 2008-02-04 23:50:03 -0500 | [diff] [blame] | 980 | |
Stefan Hajnoczi | 90b5feb | 2020-04-30 15:04:42 +0100 | [diff] [blame] | 981 | /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */ |
| 982 | vblk->vdev = NULL; |
| 983 | |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 984 | vdev->config->del_vqs(vdev); |
Ming Lei | 6a27b65 | 2014-06-26 17:41:48 +0800 | [diff] [blame] | 985 | kfree(vblk->vqs); |
Alexander Graf | f4953fe | 2013-01-02 15:37:17 +1030 | [diff] [blame] | 986 | |
Stefan Hajnoczi | 90b5feb | 2020-04-30 15:04:42 +0100 | [diff] [blame] | 987 | mutex_unlock(&vblk->vdev_mutex); |
| 988 | |
| 989 | virtblk_put(vblk); |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 990 | } |
| 991 | |
Aaron Lu | 8910700 | 2013-09-17 09:25:23 +0930 | [diff] [blame] | 992 | #ifdef CONFIG_PM_SLEEP |
Amit Shah | f8fb5bc | 2011-12-22 16:58:30 +0530 | [diff] [blame] | 993 | static int virtblk_freeze(struct virtio_device *vdev) |
| 994 | { |
| 995 | struct virtio_blk *vblk = vdev->priv; |
| 996 | |
| 997 | /* Ensure we don't receive any more interrupts */ |
Michael S. Tsirkin | d9679d0 | 2021-10-13 06:55:44 -0400 | [diff] [blame] | 998 | virtio_reset_device(vdev); |
Amit Shah | f8fb5bc | 2011-12-22 16:58:30 +0530 | [diff] [blame] | 999 | |
Michael S. Tsirkin | cc74f71 | 2014-10-15 10:22:26 +1030 | [diff] [blame] | 1000 | /* Make sure no work handler is accessing the device. */ |
Amit Shah | f8fb5bc | 2011-12-22 16:58:30 +0530 | [diff] [blame] | 1001 | flush_work(&vblk->config_work); |
| 1002 | |
Sagi Grimberg | 9b3e990 | 2017-07-04 10:03:03 +0300 | [diff] [blame] | 1003 | blk_mq_quiesce_queue(vblk->disk->queue); |
Amit Shah | f8fb5bc | 2011-12-22 16:58:30 +0530 | [diff] [blame] | 1004 | |
| 1005 | vdev->config->del_vqs(vdev); |
Xie Yongji | b71ba22 | 2021-05-17 16:43:32 +0800 | [diff] [blame] | 1006 | kfree(vblk->vqs); |
| 1007 | |
Amit Shah | f8fb5bc | 2011-12-22 16:58:30 +0530 | [diff] [blame] | 1008 | return 0; |
| 1009 | } |
| 1010 | |
| 1011 | static int virtblk_restore(struct virtio_device *vdev) |
| 1012 | { |
| 1013 | struct virtio_blk *vblk = vdev->priv; |
| 1014 | int ret; |
| 1015 | |
Amit Shah | f8fb5bc | 2011-12-22 16:58:30 +0530 | [diff] [blame] | 1016 | ret = init_vq(vdev->priv); |
Michael S. Tsirkin | 6d62c37 | 2014-10-15 10:22:32 +1030 | [diff] [blame] | 1017 | if (ret) |
| 1018 | return ret; |
Jens Axboe | 1cf7e9c | 2013-11-01 10:52:52 -0600 | [diff] [blame] | 1019 | |
Michael S. Tsirkin | 6d62c37 | 2014-10-15 10:22:32 +1030 | [diff] [blame] | 1020 | virtio_device_ready(vdev); |
| 1021 | |
Sagi Grimberg | 9b3e990 | 2017-07-04 10:03:03 +0300 | [diff] [blame] | 1022 | blk_mq_unquiesce_queue(vblk->disk->queue); |
Michael S. Tsirkin | 6d62c37 | 2014-10-15 10:22:32 +1030 | [diff] [blame] | 1023 | return 0; |
Amit Shah | f8fb5bc | 2011-12-22 16:58:30 +0530 | [diff] [blame] | 1024 | } |
| 1025 | #endif |
| 1026 | |
Márton Németh | 47483e2 | 2010-01-10 13:40:02 +0100 | [diff] [blame] | 1027 | static const struct virtio_device_id id_table[] = { |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 1028 | { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, |
| 1029 | { 0 }, |
| 1030 | }; |
| 1031 | |
Michael S. Tsirkin | 19c1c5a | 2014-10-07 16:39:49 +0200 | [diff] [blame] | 1032 | static unsigned int features_legacy[] = { |
Tejun Heo | 02c42b7 | 2010-09-03 11:56:18 +0200 | [diff] [blame] | 1033 | VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, |
Christoph Hellwig | 97b50a6 | 2017-01-28 09:32:53 +0100 | [diff] [blame] | 1034 | VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, |
Michael S. Tsirkin | 592002f | 2016-02-24 17:07:27 +0200 | [diff] [blame] | 1035 | VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, |
Changpeng Liu | 1f23816 | 2018-11-01 15:40:35 -0700 | [diff] [blame] | 1036 | VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES, |
Michael S. Tsirkin | 19c1c5a | 2014-10-07 16:39:49 +0200 | [diff] [blame] | 1037 | } |
| 1038 | ; |
| 1039 | static unsigned int features[] = { |
| 1040 | VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, |
| 1041 | VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, |
Michael S. Tsirkin | 592002f | 2016-02-24 17:07:27 +0200 | [diff] [blame] | 1042 | VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, |
Changpeng Liu | 1f23816 | 2018-11-01 15:40:35 -0700 | [diff] [blame] | 1043 | VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES, |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 1044 | }; |
| 1045 | |
Greg Kroah-Hartman | 8d85fce | 2012-12-21 15:13:49 -0800 | [diff] [blame] | 1046 | static struct virtio_driver virtio_blk = { |
Michael S. Tsirkin | 19c1c5a | 2014-10-07 16:39:49 +0200 | [diff] [blame] | 1047 | .feature_table = features, |
| 1048 | .feature_table_size = ARRAY_SIZE(features), |
| 1049 | .feature_table_legacy = features_legacy, |
| 1050 | .feature_table_size_legacy = ARRAY_SIZE(features_legacy), |
| 1051 | .driver.name = KBUILD_MODNAME, |
| 1052 | .driver.owner = THIS_MODULE, |
| 1053 | .id_table = id_table, |
| 1054 | .probe = virtblk_probe, |
| 1055 | .remove = virtblk_remove, |
| 1056 | .config_changed = virtblk_config_changed, |
Aaron Lu | 8910700 | 2013-09-17 09:25:23 +0930 | [diff] [blame] | 1057 | #ifdef CONFIG_PM_SLEEP |
Michael S. Tsirkin | 19c1c5a | 2014-10-07 16:39:49 +0200 | [diff] [blame] | 1058 | .freeze = virtblk_freeze, |
| 1059 | .restore = virtblk_restore, |
Amit Shah | f8fb5bc | 2011-12-22 16:58:30 +0530 | [diff] [blame] | 1060 | #endif |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 1061 | }; |
| 1062 | |
| 1063 | static int __init init(void) |
| 1064 | { |
Christoph Hellwig | 7a7c924 | 2011-02-01 21:43:48 +0100 | [diff] [blame] | 1065 | int error; |
| 1066 | |
| 1067 | virtblk_wq = alloc_workqueue("virtio-blk", 0, 0); |
| 1068 | if (!virtblk_wq) |
| 1069 | return -ENOMEM; |
| 1070 | |
Christian Borntraeger | 4f3bf19 | 2008-01-31 15:53:53 +0100 | [diff] [blame] | 1071 | major = register_blkdev(0, "virtblk"); |
Christoph Hellwig | 7a7c924 | 2011-02-01 21:43:48 +0100 | [diff] [blame] | 1072 | if (major < 0) { |
| 1073 | error = major; |
| 1074 | goto out_destroy_workqueue; |
| 1075 | } |
| 1076 | |
| 1077 | error = register_virtio_driver(&virtio_blk); |
| 1078 | if (error) |
| 1079 | goto out_unregister_blkdev; |
| 1080 | return 0; |
| 1081 | |
| 1082 | out_unregister_blkdev: |
| 1083 | unregister_blkdev(major, "virtblk"); |
| 1084 | out_destroy_workqueue: |
| 1085 | destroy_workqueue(virtblk_wq); |
| 1086 | return error; |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 1087 | } |
| 1088 | |
| 1089 | static void __exit fini(void) |
| 1090 | { |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 1091 | unregister_virtio_driver(&virtio_blk); |
Michael S. Tsirkin | 38f37b5 | 2014-10-23 18:57:19 +0300 | [diff] [blame] | 1092 | unregister_blkdev(major, "virtblk"); |
Christoph Hellwig | 7a7c924 | 2011-02-01 21:43:48 +0100 | [diff] [blame] | 1093 | destroy_workqueue(virtblk_wq); |
Rusty Russell | e467cde | 2007-10-22 11:03:38 +1000 | [diff] [blame] | 1094 | } |
| 1095 | module_init(init); |
| 1096 | module_exit(fini); |
| 1097 | |
| 1098 | MODULE_DEVICE_TABLE(virtio, id_table); |
| 1099 | MODULE_DESCRIPTION("Virtio block driver"); |
| 1100 | MODULE_LICENSE("GPL"); |