blob: 741804bd8a14030640f6650816041138ed65818c [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Rusty Russelle467cde2007-10-22 11:03:38 +10002//#define DEBUG
3#include <linux/spinlock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09004#include <linux/slab.h>
Rusty Russelle467cde2007-10-22 11:03:38 +10005#include <linux/blkdev.h>
6#include <linux/hdreg.h>
Paul Gortmaker0c8d44f2011-07-01 15:56:05 -04007#include <linux/module.h>
Michael S. Tsirkin4678d6f92012-01-12 15:44:44 +10308#include <linux/mutex.h>
Christoph Hellwigad714732017-02-05 18:15:25 +01009#include <linux/interrupt.h>
Rusty Russelle467cde2007-10-22 11:03:38 +100010#include <linux/virtio.h>
11#include <linux/virtio_blk.h>
Jens Axboe3d1266c2007-10-24 13:21:21 +020012#include <linux/scatterlist.h>
Christoph Hellwig7a7c9242011-02-01 21:43:48 +010013#include <linux/string_helpers.h>
Michael S. Tsirkin5087a502011-10-30 21:29:59 +020014#include <linux/idr.h>
Jens Axboe1cf7e9c2013-11-01 10:52:52 -060015#include <linux/blk-mq.h>
Christoph Hellwigad714732017-02-05 18:15:25 +010016#include <linux/blk-mq-virtio.h>
Jens Axboe1cf7e9c2013-11-01 10:52:52 -060017#include <linux/numa.h>
Michael S. Tsirkin55a24152020-04-17 03:14:34 -040018#include <uapi/linux/virtio_ring.h>
Jens Axboe3d1266c2007-10-24 13:21:21 +020019
Christian Borntraeger4f3bf192008-01-31 15:53:53 +010020#define PART_BITS 4
Ming Lei6a27b652014-06-26 17:41:48 +080021#define VQ_NAME_LEN 16
Changpeng Liu1f238162018-11-01 15:40:35 -070022#define MAX_DISCARD_SEGMENTS 256u
Rusty Russelle467cde2007-10-22 11:03:38 +100023
Michael S. Tsirkin5087a502011-10-30 21:29:59 +020024static int major;
25static DEFINE_IDA(vd_index_ida);
26
Jonghwan Choi2a647bf2013-05-20 10:25:39 +093027static struct workqueue_struct *virtblk_wq;
Christian Borntraeger4f3bf192008-01-31 15:53:53 +010028
Ming Lei6a27b652014-06-26 17:41:48 +080029struct virtio_blk_vq {
30 struct virtqueue *vq;
31 spinlock_t lock;
32 char name[VQ_NAME_LEN];
33} ____cacheline_aligned_in_smp;
34
Michael S. Tsirkinbb6ec572015-01-15 13:33:31 +020035struct virtio_blk {
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +010036 /*
37 * This mutex must be held by anything that may run after
38 * virtblk_remove() sets vblk->vdev to NULL.
39 *
40 * blk-mq, virtqueue processing, and sysfs attribute code paths are
41 * shut down before vblk->vdev is set to NULL and therefore do not need
42 * to hold this mutex.
43 */
44 struct mutex vdev_mutex;
Rusty Russelle467cde2007-10-22 11:03:38 +100045 struct virtio_device *vdev;
Rusty Russelle467cde2007-10-22 11:03:38 +100046
47 /* The disk structure for the kernel. */
48 struct gendisk *disk;
49
Christoph Hellwig24d2f902014-04-15 14:14:00 -060050 /* Block layer tags. */
51 struct blk_mq_tag_set tag_set;
52
Christoph Hellwig7a7c9242011-02-01 21:43:48 +010053 /* Process context for config space updates */
54 struct work_struct config_work;
55
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +010056 /*
57 * Tracks references from block_device_operations open/release and
58 * virtio_driver probe/remove so this object can be freed once no
59 * longer in use.
60 */
61 refcount_t refs;
62
Rusty Russell0864b792008-12-30 09:26:05 -060063 /* What host tells us, plus 2 for header & tailer. */
64 unsigned int sg_elems;
65
Michael S. Tsirkin5087a502011-10-30 21:29:59 +020066 /* Ida index - used to track minor number allocations. */
67 int index;
Ming Lei6a27b652014-06-26 17:41:48 +080068
69 /* num of vqs */
70 int num_vqs;
71 struct virtio_blk_vq *vqs;
Rusty Russelle467cde2007-10-22 11:03:38 +100072};
73
Michael S. Tsirkinbb6ec572015-01-15 13:33:31 +020074struct virtblk_req {
Christoph Hellwig97b50a62017-01-28 09:32:53 +010075 struct virtio_blk_outhdr out_hdr;
76 u8 status;
Asias Hea98755c2012-08-08 16:07:04 +080077 struct scatterlist sg[];
Rusty Russelle467cde2007-10-22 11:03:38 +100078};
79
Christoph Hellwig2a842ac2017-06-03 09:38:04 +020080static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
Asias Hea98755c2012-08-08 16:07:04 +080081{
82 switch (vbr->status) {
83 case VIRTIO_BLK_S_OK:
Christoph Hellwig2a842ac2017-06-03 09:38:04 +020084 return BLK_STS_OK;
Asias Hea98755c2012-08-08 16:07:04 +080085 case VIRTIO_BLK_S_UNSUPP:
Christoph Hellwig2a842ac2017-06-03 09:38:04 +020086 return BLK_STS_NOTSUPP;
Asias Hea98755c2012-08-08 16:07:04 +080087 default:
Christoph Hellwig2a842ac2017-06-03 09:38:04 +020088 return BLK_STS_IOERR;
Asias Hea98755c2012-08-08 16:07:04 +080089 }
90}
91
Christoph Hellwig97b50a62017-01-28 09:32:53 +010092static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
93 struct scatterlist *data_sg, bool have_data)
94{
95 struct scatterlist hdr, status, *sgs[3];
96 unsigned int num_out = 0, num_in = 0;
97
98 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
99 sgs[num_out++] = &hdr;
100
101 if (have_data) {
102 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
103 sgs[num_out++] = data_sg;
104 else
105 sgs[num_out + num_in++] = data_sg;
Paolo Bonzini8f39db92013-03-20 15:44:27 +1030106 }
107
108 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
109 sgs[num_out + num_in++] = &status;
110
111 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
Paolo Bonzini5ee21a52013-03-20 15:44:27 +1030112}
Asias Hec85a1f92012-08-08 16:07:05 +0800113
Changpeng Liu1f238162018-11-01 15:40:35 -0700114static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
115{
116 unsigned short segments = blk_rq_nr_discard_segments(req);
117 unsigned short n = 0;
118 struct virtio_blk_discard_write_zeroes *range;
119 struct bio *bio;
120 u32 flags = 0;
121
122 if (unmap)
123 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
124
125 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
126 if (!range)
127 return -ENOMEM;
128
129 __rq_for_each_bio(bio, req) {
130 u64 sector = bio->bi_iter.bi_sector;
131 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
132
133 range[n].flags = cpu_to_le32(flags);
134 range[n].num_sectors = cpu_to_le32(num_sectors);
135 range[n].sector = cpu_to_le64(sector);
136 n++;
137 }
138
139 req->special_vec.bv_page = virt_to_page(range);
140 req->special_vec.bv_offset = offset_in_page(range);
141 req->special_vec.bv_len = sizeof(*range) * segments;
142 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
143
144 return 0;
145}
146
Christoph Hellwig5124c282014-02-10 03:24:39 -0800147static inline void virtblk_request_done(struct request *req)
Asias Hec85a1f92012-08-08 16:07:05 +0800148{
Christoph Hellwig9d74e252014-04-14 10:30:07 +0200149 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
Asias Hea98755c2012-08-08 16:07:04 +0800150
Changpeng Liu1f238162018-11-01 15:40:35 -0700151 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
152 kfree(page_address(req->special_vec.bv_page) +
153 req->special_vec.bv_offset);
154 }
155
Christoph Hellwigd19633d2017-04-20 16:03:00 +0200156 blk_mq_end_request(req, virtblk_result(vbr));
Asias Hea98755c2012-08-08 16:07:04 +0800157}
158
159static void virtblk_done(struct virtqueue *vq)
Rusty Russelle467cde2007-10-22 11:03:38 +1000160{
161 struct virtio_blk *vblk = vq->vdev->priv;
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600162 bool req_done = false;
Ming Lei6a27b652014-06-26 17:41:48 +0800163 int qid = vq->index;
Rusty Russelle467cde2007-10-22 11:03:38 +1000164 struct virtblk_req *vbr;
Rusty Russelle467cde2007-10-22 11:03:38 +1000165 unsigned long flags;
Asias Hea98755c2012-08-08 16:07:04 +0800166 unsigned int len;
Rusty Russelle467cde2007-10-22 11:03:38 +1000167
Ming Lei6a27b652014-06-26 17:41:48 +0800168 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
Asias Hebb811102012-09-25 10:36:17 +0800169 do {
170 virtqueue_disable_cb(vq);
Ming Lei6a27b652014-06-26 17:41:48 +0800171 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
Christoph Hellwig85dada02017-01-28 09:32:52 +0100172 struct request *req = blk_mq_rq_from_pdu(vbr);
173
Christoph Hellwig15f73f52020-06-11 08:44:47 +0200174 if (likely(!blk_should_fake_timeout(req->q)))
175 blk_mq_complete_request(req);
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600176 req_done = true;
Rusty Russelle467cde2007-10-22 11:03:38 +1000177 }
Heinz Graalfs7f03b172013-10-29 09:40:30 +1030178 if (unlikely(virtqueue_is_broken(vq)))
179 break;
Asias Hebb811102012-09-25 10:36:17 +0800180 } while (!virtqueue_enable_cb(vq));
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600181
Rusty Russelle467cde2007-10-22 11:03:38 +1000182 /* In case queue is stopped waiting for more buffers. */
Asias Hea98755c2012-08-08 16:07:04 +0800183 if (req_done)
Christoph Hellwig1b4a3252014-04-16 09:44:54 +0200184 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
Ming Lei6a27b652014-06-26 17:41:48 +0800185 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
Asias Hea98755c2012-08-08 16:07:04 +0800186}
187
Jens Axboe944e7c82018-11-26 11:00:12 -0700188static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
189{
190 struct virtio_blk *vblk = hctx->queue->queuedata;
191 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
192 bool kick;
193
194 spin_lock_irq(&vq->lock);
195 kick = virtqueue_kick_prepare(vq->vq);
196 spin_unlock_irq(&vq->lock);
197
198 if (kick)
199 virtqueue_notify(vq->vq);
200}
201
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200202static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
Jens Axboe74c45052014-10-29 11:14:52 -0600203 const struct blk_mq_queue_data *bd)
Rusty Russelle467cde2007-10-22 11:03:38 +1000204{
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600205 struct virtio_blk *vblk = hctx->queue->queuedata;
Jens Axboe74c45052014-10-29 11:14:52 -0600206 struct request *req = bd->rq;
Christoph Hellwig9d74e252014-04-14 10:30:07 +0200207 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600208 unsigned long flags;
Paolo Bonzini20af3cf2013-03-20 15:44:27 +1030209 unsigned int num;
Ming Lei6a27b652014-06-26 17:41:48 +0800210 int qid = hctx->queue_num;
Rusty Russell5261b852014-03-13 11:23:39 +1030211 int err;
Ming Leie8edca62014-05-30 10:49:29 +0800212 bool notify = false;
Changpeng Liu1f238162018-11-01 15:40:35 -0700213 bool unmap = false;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100214 u32 type;
Rusty Russelle467cde2007-10-22 11:03:38 +1000215
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600216 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
Rusty Russelle467cde2007-10-22 11:03:38 +1000217
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100218 switch (req_op(req)) {
219 case REQ_OP_READ:
220 case REQ_OP_WRITE:
221 type = 0;
222 break;
223 case REQ_OP_FLUSH:
224 type = VIRTIO_BLK_T_FLUSH;
225 break;
Changpeng Liu1f238162018-11-01 15:40:35 -0700226 case REQ_OP_DISCARD:
227 type = VIRTIO_BLK_T_DISCARD;
228 break;
229 case REQ_OP_WRITE_ZEROES:
230 type = VIRTIO_BLK_T_WRITE_ZEROES;
231 unmap = !(req->cmd_flags & REQ_NOUNMAP);
232 break;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100233 case REQ_OP_DRV_IN:
234 type = VIRTIO_BLK_T_GET_ID;
235 break;
236 default:
237 WARN_ON_ONCE(1);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200238 return BLK_STS_IOERR;
Rusty Russelle467cde2007-10-22 11:03:38 +1000239 }
240
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100241 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
242 vbr->out_hdr.sector = type ?
243 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
244 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
245
Christoph Hellwige2490072014-09-13 16:40:09 -0700246 blk_mq_start_request(req);
247
Changpeng Liu1f238162018-11-01 15:40:35 -0700248 if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
249 err = virtblk_setup_discard_write_zeroes(req, unmap);
250 if (err)
251 return BLK_STS_RESOURCE;
252 }
253
Christoph Hellwig85dada02017-01-28 09:32:52 +0100254 num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
Hannes Reinecke1cde26f2009-05-18 14:41:30 +0200255 if (num) {
Christoph Hellwig85dada02017-01-28 09:32:52 +0100256 if (rq_data_dir(req) == WRITE)
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200257 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
Paolo Bonzini20af3cf2013-03-20 15:44:27 +1030258 else
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200259 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
Rusty Russelle467cde2007-10-22 11:03:38 +1000260 }
261
Ming Lei6a27b652014-06-26 17:41:48 +0800262 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
Christoph Hellwig782e0672019-12-12 17:37:19 +0100263 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
Rusty Russell5261b852014-03-13 11:23:39 +1030264 if (err) {
Ming Lei6a27b652014-06-26 17:41:48 +0800265 virtqueue_kick(vblk->vqs[qid].vq);
Halil Pasicf5f6b952020-02-13 13:37:27 +0100266 /* Don't stop the queue if -ENOMEM: we may have failed to
267 * bounce the buffer due to global resource outage.
268 */
269 if (err == -ENOSPC)
270 blk_mq_stop_hw_queue(hctx);
Ming Lei6a27b652014-06-26 17:41:48 +0800271 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
Halil Pasic3d973b22020-02-13 13:37:28 +0100272 switch (err) {
273 case -ENOSPC:
Ming Lei86ff7c22018-01-30 22:04:57 -0500274 return BLK_STS_DEV_RESOURCE;
Halil Pasic3d973b22020-02-13 13:37:28 +0100275 case -ENOMEM:
276 return BLK_STS_RESOURCE;
277 default:
278 return BLK_STS_IOERR;
279 }
Asias Hea98755c2012-08-08 16:07:04 +0800280 }
281
Jens Axboe74c45052014-10-29 11:14:52 -0600282 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
Ming Leie8edca62014-05-30 10:49:29 +0800283 notify = true;
Ming Lei6a27b652014-06-26 17:41:48 +0800284 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
Ming Leie8edca62014-05-30 10:49:29 +0800285
286 if (notify)
Ming Lei6a27b652014-06-26 17:41:48 +0800287 virtqueue_notify(vblk->vqs[qid].vq);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200288 return BLK_STS_OK;
Asias Hea98755c2012-08-08 16:07:04 +0800289}
290
john cooper4cb2ea22010-03-25 01:33:33 -0400291/* return id (s/n) string for *disk to *id_str
292 */
293static int virtblk_get_id(struct gendisk *disk, char *id_str)
294{
295 struct virtio_blk *vblk = disk->private_data;
Christoph Hellwigf9596692016-07-19 11:31:49 +0200296 struct request_queue *q = vblk->disk->queue;
john cooper4cb2ea22010-03-25 01:33:33 -0400297 struct request *req;
Mike Snitzere4c47762010-10-09 12:12:13 +1030298 int err;
john cooper4cb2ea22010-03-25 01:33:33 -0400299
Christoph Hellwigff005a02018-05-09 09:54:05 +0200300 req = blk_get_request(q, REQ_OP_DRV_IN, 0);
Christoph Hellwigf9596692016-07-19 11:31:49 +0200301 if (IS_ERR(req))
john cooper4cb2ea22010-03-25 01:33:33 -0400302 return PTR_ERR(req);
Mike Snitzere4c47762010-10-09 12:12:13 +1030303
Christoph Hellwigf9596692016-07-19 11:31:49 +0200304 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
305 if (err)
306 goto out;
307
Christoph Hellwigb7819b92017-04-20 16:02:55 +0200308 blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200309 err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
Christoph Hellwigf9596692016-07-19 11:31:49 +0200310out:
311 blk_put_request(req);
Mike Snitzere4c47762010-10-09 12:12:13 +1030312 return err;
john cooper4cb2ea22010-03-25 01:33:33 -0400313}
314
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +0100315static void virtblk_get(struct virtio_blk *vblk)
316{
317 refcount_inc(&vblk->refs);
318}
319
320static void virtblk_put(struct virtio_blk *vblk)
321{
322 if (refcount_dec_and_test(&vblk->refs)) {
323 ida_simple_remove(&vd_index_ida, vblk->index);
324 mutex_destroy(&vblk->vdev_mutex);
325 kfree(vblk);
326 }
327}
328
329static int virtblk_open(struct block_device *bd, fmode_t mode)
330{
331 struct virtio_blk *vblk = bd->bd_disk->private_data;
332 int ret = 0;
333
334 mutex_lock(&vblk->vdev_mutex);
335
336 if (vblk->vdev)
337 virtblk_get(vblk);
338 else
339 ret = -ENXIO;
340
341 mutex_unlock(&vblk->vdev_mutex);
342 return ret;
343}
344
345static void virtblk_release(struct gendisk *disk, fmode_t mode)
346{
347 struct virtio_blk *vblk = disk->private_data;
348
349 virtblk_put(vblk);
350}
351
Christian Borntraeger135da0b2008-01-23 17:56:50 +0100352/* We provide getgeo only to please some old bootloader/partitioning tools */
353static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
354{
Ryan Harper48e40432008-04-16 13:56:37 -0500355 struct virtio_blk *vblk = bd->bd_disk->private_data;
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +0100356 int ret = 0;
357
358 mutex_lock(&vblk->vdev_mutex);
359
360 if (!vblk->vdev) {
361 ret = -ENXIO;
362 goto out;
363 }
Ryan Harper48e40432008-04-16 13:56:37 -0500364
365 /* see if the host passed in geometry config */
Rusty Russell855e0c52013-10-14 18:11:51 +1030366 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
367 virtio_cread(vblk->vdev, struct virtio_blk_config,
368 geometry.cylinders, &geo->cylinders);
369 virtio_cread(vblk->vdev, struct virtio_blk_config,
370 geometry.heads, &geo->heads);
371 virtio_cread(vblk->vdev, struct virtio_blk_config,
372 geometry.sectors, &geo->sectors);
Ryan Harper48e40432008-04-16 13:56:37 -0500373 } else {
374 /* some standard values, similar to sd */
375 geo->heads = 1 << 6;
376 geo->sectors = 1 << 5;
377 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
378 }
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +0100379out:
380 mutex_unlock(&vblk->vdev_mutex);
381 return ret;
Christian Borntraeger135da0b2008-01-23 17:56:50 +0100382}
383
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -0700384static const struct block_device_operations virtblk_fops = {
Christian Borntraeger135da0b2008-01-23 17:56:50 +0100385 .owner = THIS_MODULE,
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +0100386 .open = virtblk_open,
387 .release = virtblk_release,
Christian Borntraeger135da0b2008-01-23 17:56:50 +0100388 .getgeo = virtblk_getgeo,
Rusty Russelle467cde2007-10-22 11:03:38 +1000389};
390
Christian Borntraegerd50ed902008-02-01 09:05:00 +0100391static int index_to_minor(int index)
392{
393 return index << PART_BITS;
394}
395
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200396static int minor_to_index(int minor)
397{
398 return minor >> PART_BITS;
399}
400
Hannes Reineckee982c4d2018-09-28 08:17:23 +0200401static ssize_t serial_show(struct device *dev,
402 struct device_attribute *attr, char *buf)
Ryan Harpera5eb9e42010-06-23 22:19:57 -0500403{
404 struct gendisk *disk = dev_to_disk(dev);
405 int err;
406
407 /* sysfs gives us a PAGE_SIZE buffer */
408 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
409
410 buf[VIRTIO_BLK_ID_BYTES] = '\0';
411 err = virtblk_get_id(disk, buf);
412 if (!err)
413 return strlen(buf);
414
415 if (err == -EIO) /* Unsupported? Make it empty. */
416 return 0;
417
418 return err;
419}
Michael S. Tsirkin393c5252014-10-23 16:08:44 +0300420
Hannes Reineckee982c4d2018-09-28 08:17:23 +0200421static DEVICE_ATTR_RO(serial);
Ryan Harpera5eb9e42010-06-23 22:19:57 -0500422
Stefan Hajnoczidaf2a502018-01-03 16:03:39 +0000423/* The queue's logical block size must be set before calling this */
424static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100425{
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100426 struct virtio_device *vdev = vblk->vdev;
427 struct request_queue *q = vblk->disk->queue;
428 char cap_str_2[10], cap_str_10[10];
Stefan Hajnoczi1046d302017-07-26 15:32:23 +0100429 unsigned long long nblocks;
James Bottomleyb9f28d82015-03-05 18:47:01 -0800430 u64 capacity;
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100431
432 /* Host must always specify the capacity. */
Rusty Russell855e0c52013-10-14 18:11:51 +1030433 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100434
435 /* If capacity is too big, truncate with warning. */
436 if ((sector_t)capacity != capacity) {
437 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
438 (unsigned long long)capacity);
439 capacity = (sector_t)-1;
440 }
441
Stefan Hajnoczi1046d302017-07-26 15:32:23 +0100442 nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
443
444 string_get_size(nblocks, queue_logical_block_size(q),
James Bottomleyb9f28d82015-03-05 18:47:01 -0800445 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
Stefan Hajnoczi1046d302017-07-26 15:32:23 +0100446 string_get_size(nblocks, queue_logical_block_size(q),
James Bottomleyb9f28d82015-03-05 18:47:01 -0800447 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100448
449 dev_notice(&vdev->dev,
Stefan Hajnoczidaf2a502018-01-03 16:03:39 +0000450 "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
451 vblk->disk->disk_name,
452 resize ? "new size: " : "",
Stefan Hajnoczi1046d302017-07-26 15:32:23 +0100453 nblocks,
454 queue_logical_block_size(q),
455 cap_str_10,
456 cap_str_2);
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100457
Balbir Singh662155e2020-03-13 05:30:06 +0000458 set_capacity_revalidate_and_notify(vblk->disk, capacity, true);
Stefan Hajnoczidaf2a502018-01-03 16:03:39 +0000459}
460
461static void virtblk_config_changed_work(struct work_struct *work)
462{
463 struct virtio_blk *vblk =
464 container_of(work, struct virtio_blk, config_work);
Stefan Hajnoczidaf2a502018-01-03 16:03:39 +0000465
466 virtblk_update_capacity(vblk, true);
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100467}
468
469static void virtblk_config_changed(struct virtio_device *vdev)
470{
471 struct virtio_blk *vblk = vdev->priv;
472
473 queue_work(virtblk_wq, &vblk->config_work);
474}
475
Amit Shah6abd6e52011-12-22 16:58:29 +0530476static int init_vq(struct virtio_blk *vblk)
477{
Markus Elfring2ff98442016-09-13 13:43:50 +0200478 int err;
Ming Lei6a27b652014-06-26 17:41:48 +0800479 int i;
480 vq_callback_t **callbacks;
481 const char **names;
482 struct virtqueue **vqs;
483 unsigned short num_vqs;
484 struct virtio_device *vdev = vblk->vdev;
Christoph Hellwigad714732017-02-05 18:15:25 +0100485 struct irq_affinity desc = { 0, };
Amit Shah6abd6e52011-12-22 16:58:29 +0530486
Ming Lei6a27b652014-06-26 17:41:48 +0800487 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
488 struct virtio_blk_config, num_queues,
489 &num_vqs);
490 if (err)
491 num_vqs = 1;
Amit Shah6abd6e52011-12-22 16:58:29 +0530492
Dongli Zhangbf348f92019-03-27 18:36:34 +0800493 num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
494
Markus Elfring668866b2016-09-13 11:32:22 +0200495 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
Minfei Huang347a5292016-08-09 16:39:20 +0800496 if (!vblk->vqs)
497 return -ENOMEM;
498
Markus Elfring668866b2016-09-13 11:32:22 +0200499 names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
500 callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
501 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
Minfei Huang347a5292016-08-09 16:39:20 +0800502 if (!names || !callbacks || !vqs) {
Ming Lei6a27b652014-06-26 17:41:48 +0800503 err = -ENOMEM;
504 goto out;
505 }
506
Ming Lei6a27b652014-06-26 17:41:48 +0800507 for (i = 0; i < num_vqs; i++) {
508 callbacks[i] = virtblk_done;
509 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
510 names[i] = vblk->vqs[i].name;
511 }
512
513 /* Discover virtqueues and write information to configuration. */
Michael S. Tsirkin9b2bbdb2017-03-06 18:19:39 +0200514 err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
Ming Lei6a27b652014-06-26 17:41:48 +0800515 if (err)
Minfei Huang347a5292016-08-09 16:39:20 +0800516 goto out;
Ming Lei6a27b652014-06-26 17:41:48 +0800517
518 for (i = 0; i < num_vqs; i++) {
519 spin_lock_init(&vblk->vqs[i].lock);
520 vblk->vqs[i].vq = vqs[i];
521 }
522 vblk->num_vqs = num_vqs;
523
Minfei Huang347a5292016-08-09 16:39:20 +0800524out:
Ming Lei6a27b652014-06-26 17:41:48 +0800525 kfree(vqs);
Ming Lei6a27b652014-06-26 17:41:48 +0800526 kfree(callbacks);
Ming Lei6a27b652014-06-26 17:41:48 +0800527 kfree(names);
Ming Lei6a27b652014-06-26 17:41:48 +0800528 if (err)
529 kfree(vblk->vqs);
Amit Shah6abd6e52011-12-22 16:58:29 +0530530 return err;
531}
532
Ren Mingxinc0aa3e02012-04-10 15:28:05 +0800533/*
534 * Legacy naming scheme used for virtio devices. We are stuck with it for
535 * virtio blk but don't ever use it for any new driver.
536 */
537static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
538{
539 const int base = 'z' - 'a' + 1;
540 char *begin = buf + strlen(prefix);
541 char *end = buf + buflen;
542 char *p;
543 int unit;
544
545 p = end - 1;
546 *p = '\0';
547 unit = base;
548 do {
549 if (p == begin)
550 return -EINVAL;
551 *--p = 'a' + (index % unit);
552 index = (index / unit) - 1;
553 } while (index >= 0);
554
555 memmove(begin, p, end - p);
556 memcpy(buf, prefix, strlen(prefix));
557
558 return 0;
559}
560
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200561static int virtblk_get_cache_mode(struct virtio_device *vdev)
562{
563 u8 writeback;
564 int err;
565
Rusty Russell855e0c52013-10-14 18:11:51 +1030566 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
567 struct virtio_blk_config, wce,
568 &writeback);
Michael S. Tsirkin592002f2016-02-24 17:07:27 +0200569
570 /*
571 * If WCE is not configurable and flush is not available,
572 * assume no writeback cache is in use.
573 */
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200574 if (err)
Michael S. Tsirkin592002f2016-02-24 17:07:27 +0200575 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200576
577 return writeback;
578}
579
580static void virtblk_update_cache_mode(struct virtio_device *vdev)
581{
582 u8 writeback = virtblk_get_cache_mode(vdev);
583 struct virtio_blk *vblk = vdev->priv;
584
Jens Axboead9126a2016-03-30 10:12:58 -0600585 blk_queue_write_cache(vblk->disk->queue, writeback, false);
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200586 revalidate_disk(vblk->disk);
587}
588
589static const char *const virtblk_cache_types[] = {
590 "write through", "write back"
591};
592
593static ssize_t
Hannes Reineckee982c4d2018-09-28 08:17:23 +0200594cache_type_store(struct device *dev, struct device_attribute *attr,
595 const char *buf, size_t count)
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200596{
597 struct gendisk *disk = dev_to_disk(dev);
598 struct virtio_blk *vblk = disk->private_data;
599 struct virtio_device *vdev = vblk->vdev;
600 int i;
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200601
602 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
Andy Shevchenkof53d5aa2017-06-09 15:07:42 +0300603 i = sysfs_match_string(virtblk_cache_types, buf);
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200604 if (i < 0)
Andy Shevchenkof53d5aa2017-06-09 15:07:42 +0300605 return i;
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200606
Rusty Russell855e0c52013-10-14 18:11:51 +1030607 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200608 virtblk_update_cache_mode(vdev);
609 return count;
610}
611
612static ssize_t
Hannes Reineckee982c4d2018-09-28 08:17:23 +0200613cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200614{
615 struct gendisk *disk = dev_to_disk(dev);
616 struct virtio_blk *vblk = disk->private_data;
617 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
618
619 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
620 return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
621}
622
Hannes Reineckee982c4d2018-09-28 08:17:23 +0200623static DEVICE_ATTR_RW(cache_type);
624
625static struct attribute *virtblk_attrs[] = {
626 &dev_attr_serial.attr,
627 &dev_attr_cache_type.attr,
628 NULL,
629};
630
631static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
632 struct attribute *a, int n)
633{
634 struct device *dev = container_of(kobj, struct device, kobj);
635 struct gendisk *disk = dev_to_disk(dev);
636 struct virtio_blk *vblk = disk->private_data;
637 struct virtio_device *vdev = vblk->vdev;
638
639 if (a == &dev_attr_cache_type.attr &&
640 !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
641 return S_IRUGO;
642
643 return a->mode;
644}
645
646static const struct attribute_group virtblk_attr_group = {
647 .attrs = virtblk_attrs,
648 .is_visible = virtblk_attrs_are_visible,
649};
650
651static const struct attribute_group *virtblk_attr_groups[] = {
652 &virtblk_attr_group,
653 NULL,
654};
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200655
Christoph Hellwigd6296d392017-05-01 10:19:08 -0600656static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
657 unsigned int hctx_idx, unsigned int numa_node)
Christoph Hellwige9b267d2014-04-15 13:59:10 -0600658{
Christoph Hellwigd6296d392017-05-01 10:19:08 -0600659 struct virtio_blk *vblk = set->driver_data;
Christoph Hellwige9b267d2014-04-15 13:59:10 -0600660 struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
661
662 sg_init_table(vbr->sg, vblk->sg_elems);
663 return 0;
664}
665
Christoph Hellwigad714732017-02-05 18:15:25 +0100666static int virtblk_map_queues(struct blk_mq_tag_set *set)
667{
668 struct virtio_blk *vblk = set->driver_data;
669
Dongli Zhang9bc00752019-03-12 09:31:56 +0800670 return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
671 vblk->vdev, 0);
Christoph Hellwigad714732017-02-05 18:15:25 +0100672}
673
Eric Biggersf363b082017-03-30 13:39:16 -0700674static const struct blk_mq_ops virtio_mq_ops = {
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600675 .queue_rq = virtio_queue_rq,
Jens Axboe944e7c82018-11-26 11:00:12 -0700676 .commit_rqs = virtio_commit_rqs,
Christoph Hellwig5124c282014-02-10 03:24:39 -0800677 .complete = virtblk_request_done,
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600678 .init_request = virtblk_init_request,
Christoph Hellwigad714732017-02-05 18:15:25 +0100679 .map_queues = virtblk_map_queues,
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600680};
681
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600682static unsigned int virtblk_queue_depth;
683module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600684
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -0800685static int virtblk_probe(struct virtio_device *vdev)
Rusty Russelle467cde2007-10-22 11:03:38 +1000686{
687 struct virtio_blk *vblk;
Christoph Hellwig69740c82010-02-24 14:22:25 -0600688 struct request_queue *q;
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200689 int err, index;
Asias Hea98755c2012-08-08 16:07:04 +0800690
Joerg Roedelfd1068e2019-02-07 12:59:17 +0100691 u32 v, blk_size, max_size, sg_elems, opt_io_size;
Christoph Hellwig69740c82010-02-24 14:22:25 -0600692 u16 min_io_size;
693 u8 physical_block_exp, alignment_offset;
Rusty Russelle467cde2007-10-22 11:03:38 +1000694
Michael S. Tsirkina4379fd2015-01-12 16:23:37 +0200695 if (!vdev->config->get) {
696 dev_err(&vdev->dev, "%s failure: config access disabled\n",
697 __func__);
698 return -EINVAL;
699 }
700
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200701 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
702 GFP_KERNEL);
703 if (err < 0)
704 goto out;
705 index = err;
Christian Borntraeger4f3bf192008-01-31 15:53:53 +0100706
Rusty Russell0864b792008-12-30 09:26:05 -0600707 /* We need to know how many segments before we allocate. */
Rusty Russell855e0c52013-10-14 18:11:51 +1030708 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
709 struct virtio_blk_config, seg_max,
710 &sg_elems);
Christoph Hellwiga5b365a2010-05-25 14:17:54 +0200711
712 /* We need at least one SG element, whatever they say. */
713 if (err || !sg_elems)
Rusty Russell0864b792008-12-30 09:26:05 -0600714 sg_elems = 1;
715
716 /* We need an extra sg elements at head and tail. */
717 sg_elems += 2;
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600718 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
Rusty Russelle467cde2007-10-22 11:03:38 +1000719 if (!vblk) {
720 err = -ENOMEM;
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200721 goto out_free_index;
Rusty Russelle467cde2007-10-22 11:03:38 +1000722 }
723
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +0100724 /* This reference is dropped in virtblk_remove(). */
725 refcount_set(&vblk->refs, 1);
726 mutex_init(&vblk->vdev_mutex);
727
Rusty Russelle467cde2007-10-22 11:03:38 +1000728 vblk->vdev = vdev;
Rusty Russell0864b792008-12-30 09:26:05 -0600729 vblk->sg_elems = sg_elems;
Asias Hea98755c2012-08-08 16:07:04 +0800730
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100731 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
Rusty Russelle467cde2007-10-22 11:03:38 +1000732
Amit Shah6abd6e52011-12-22 16:58:29 +0530733 err = init_vq(vblk);
734 if (err)
Rusty Russelle467cde2007-10-22 11:03:38 +1000735 goto out_free_vblk;
Rusty Russelle467cde2007-10-22 11:03:38 +1000736
Rusty Russelle467cde2007-10-22 11:03:38 +1000737 /* FIXME: How many partitions? How long is a piece of string? */
Christian Borntraeger4f3bf192008-01-31 15:53:53 +0100738 vblk->disk = alloc_disk(1 << PART_BITS);
Rusty Russelle467cde2007-10-22 11:03:38 +1000739 if (!vblk->disk) {
740 err = -ENOMEM;
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600741 goto out_free_vq;
Rusty Russelle467cde2007-10-22 11:03:38 +1000742 }
743
Rusty Russellfc4324b2014-03-19 17:08:24 +1030744 /* Default queue sizing is to fill the ring. */
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600745 if (!virtblk_queue_depth) {
Ming Lei6a27b652014-06-26 17:41:48 +0800746 virtblk_queue_depth = vblk->vqs[0].vq->num_free;
Rusty Russellfc4324b2014-03-19 17:08:24 +1030747 /* ... but without indirect descs, we use 2 descs per req */
748 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600749 virtblk_queue_depth /= 2;
Rusty Russellfc4324b2014-03-19 17:08:24 +1030750 }
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600751
752 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
753 vblk->tag_set.ops = &virtio_mq_ops;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600754 vblk->tag_set.queue_depth = virtblk_queue_depth;
755 vblk->tag_set.numa_node = NUMA_NO_NODE;
756 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
757 vblk->tag_set.cmd_size =
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600758 sizeof(struct virtblk_req) +
759 sizeof(struct scatterlist) * sg_elems;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600760 vblk->tag_set.driver_data = vblk;
Ming Lei6a27b652014-06-26 17:41:48 +0800761 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600762
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600763 err = blk_mq_alloc_tag_set(&vblk->tag_set);
764 if (err)
765 goto out_put_disk;
766
Omar Sandoval6bf6b0a2017-01-09 11:44:12 -0800767 q = blk_mq_init_queue(&vblk->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +0000768 if (IS_ERR(q)) {
Rusty Russelle467cde2007-10-22 11:03:38 +1000769 err = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600770 goto out_free_tags;
Rusty Russelle467cde2007-10-22 11:03:38 +1000771 }
Omar Sandoval6bf6b0a2017-01-09 11:44:12 -0800772 vblk->disk->queue = q;
Rusty Russelle467cde2007-10-22 11:03:38 +1000773
Christoph Hellwig69740c82010-02-24 14:22:25 -0600774 q->queuedata = vblk;
Fernando Luis Vázquez Cao7d116b62008-10-27 18:45:15 +0900775
Ren Mingxinc0aa3e02012-04-10 15:28:05 +0800776 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
Christian Borntraegerd50ed902008-02-01 09:05:00 +0100777
Rusty Russelle467cde2007-10-22 11:03:38 +1000778 vblk->disk->major = major;
Christian Borntraegerd50ed902008-02-01 09:05:00 +0100779 vblk->disk->first_minor = index_to_minor(index);
Rusty Russelle467cde2007-10-22 11:03:38 +1000780 vblk->disk->private_data = vblk;
781 vblk->disk->fops = &virtblk_fops;
Fam Zheng5fa31422015-09-06 17:05:42 +0800782 vblk->disk->flags |= GENHD_FL_EXT_DEVT;
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200783 vblk->index = index;
Christian Borntraeger4f3bf192008-01-31 15:53:53 +0100784
Tejun Heo02c42b72010-09-03 11:56:18 +0200785 /* configure queue flush support */
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200786 virtblk_update_cache_mode(vdev);
Rusty Russelle467cde2007-10-22 11:03:38 +1000787
Christian Borntraeger3ef53602008-05-16 11:17:03 +0200788 /* If disk is read-only in the host, the guest should obey */
789 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
790 set_disk_ro(vblk->disk, 1);
791
Rusty Russell0864b792008-12-30 09:26:05 -0600792 /* We can handle whatever the host told us to handle. */
Martin K. Petersenee714f22010-03-10 00:48:32 -0500793 blk_queue_max_segments(q, vblk->sg_elems-2);
Rusty Russell0864b792008-12-30 09:26:05 -0600794
Rusty Russell4b7f7e22008-12-30 09:26:04 -0600795 /* No real sector limit. */
Martin K. Petersenee714f22010-03-10 00:48:32 -0500796 blk_queue_max_hw_sectors(q, -1U);
Rusty Russell4b7f7e22008-12-30 09:26:04 -0600797
Joerg Roedelfd1068e2019-02-07 12:59:17 +0100798 max_size = virtio_max_dma_size(vdev);
799
Rusty Russella586d4f2008-02-04 23:49:56 -0500800 /* Host can optionally specify maximum segment size and number of
801 * segments. */
Rusty Russell855e0c52013-10-14 18:11:51 +1030802 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
803 struct virtio_blk_config, size_max, &v);
Rusty Russelle467cde2007-10-22 11:03:38 +1000804 if (!err)
Joerg Roedelfd1068e2019-02-07 12:59:17 +0100805 max_size = min(max_size, v);
806
807 blk_queue_max_segment_size(q, max_size);
Rusty Russelle467cde2007-10-22 11:03:38 +1000808
Christian Borntraeger066f4d82008-05-29 11:08:26 +0200809 /* Host can optionally specify the block size of the device */
Rusty Russell855e0c52013-10-14 18:11:51 +1030810 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
811 struct virtio_blk_config, blk_size,
812 &blk_size);
Christian Borntraeger066f4d82008-05-29 11:08:26 +0200813 if (!err)
Christoph Hellwig69740c82010-02-24 14:22:25 -0600814 blk_queue_logical_block_size(q, blk_size);
815 else
816 blk_size = queue_logical_block_size(q);
817
818 /* Use topology information if available */
Rusty Russell855e0c52013-10-14 18:11:51 +1030819 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
820 struct virtio_blk_config, physical_block_exp,
821 &physical_block_exp);
Christoph Hellwig69740c82010-02-24 14:22:25 -0600822 if (!err && physical_block_exp)
823 blk_queue_physical_block_size(q,
824 blk_size * (1 << physical_block_exp));
825
Rusty Russell855e0c52013-10-14 18:11:51 +1030826 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
827 struct virtio_blk_config, alignment_offset,
828 &alignment_offset);
Christoph Hellwig69740c82010-02-24 14:22:25 -0600829 if (!err && alignment_offset)
830 blk_queue_alignment_offset(q, blk_size * alignment_offset);
831
Rusty Russell855e0c52013-10-14 18:11:51 +1030832 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
833 struct virtio_blk_config, min_io_size,
834 &min_io_size);
Christoph Hellwig69740c82010-02-24 14:22:25 -0600835 if (!err && min_io_size)
836 blk_queue_io_min(q, blk_size * min_io_size);
837
Rusty Russell855e0c52013-10-14 18:11:51 +1030838 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
839 struct virtio_blk_config, opt_io_size,
840 &opt_io_size);
Christoph Hellwig69740c82010-02-24 14:22:25 -0600841 if (!err && opt_io_size)
842 blk_queue_io_opt(q, blk_size * opt_io_size);
843
Changpeng Liu1f238162018-11-01 15:40:35 -0700844 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
845 q->limits.discard_granularity = blk_size;
846
847 virtio_cread(vdev, struct virtio_blk_config,
848 discard_sector_alignment, &v);
849 q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
850
851 virtio_cread(vdev, struct virtio_blk_config,
852 max_discard_sectors, &v);
853 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
854
855 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
856 &v);
857 blk_queue_max_discard_segments(q,
858 min_not_zero(v,
859 MAX_DISCARD_SEGMENTS));
860
861 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
862 }
863
864 if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
865 virtio_cread(vdev, struct virtio_blk_config,
866 max_write_zeroes_sectors, &v);
867 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
868 }
869
Stefan Hajnoczidaf2a502018-01-03 16:03:39 +0000870 virtblk_update_capacity(vblk, false);
Michael S. Tsirkin7a113702014-10-15 10:22:30 +1030871 virtio_device_ready(vdev);
872
Hannes Reineckee982c4d2018-09-28 08:17:23 +0200873 device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
Rusty Russelle467cde2007-10-22 11:03:38 +1000874 return 0;
875
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600876out_free_tags:
877 blk_mq_free_tag_set(&vblk->tag_set);
Rusty Russelle467cde2007-10-22 11:03:38 +1000878out_put_disk:
879 put_disk(vblk->disk);
Rusty Russelle467cde2007-10-22 11:03:38 +1000880out_free_vq:
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600881 vdev->config->del_vqs(vdev);
Rusty Russelle467cde2007-10-22 11:03:38 +1000882out_free_vblk:
883 kfree(vblk);
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200884out_free_index:
885 ida_simple_remove(&vd_index_ida, index);
Rusty Russelle467cde2007-10-22 11:03:38 +1000886out:
887 return err;
888}
889
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -0800890static void virtblk_remove(struct virtio_device *vdev)
Rusty Russelle467cde2007-10-22 11:03:38 +1000891{
892 struct virtio_blk *vblk = vdev->priv;
Rusty Russelle467cde2007-10-22 11:03:38 +1000893
Michael S. Tsirkincc74f712014-10-15 10:22:26 +1030894 /* Make sure no work handler is accessing the device. */
895 flush_work(&vblk->config_work);
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100896
Asias He02e2b122012-05-25 10:34:47 +0800897 del_gendisk(vblk->disk);
Asias He483001c2012-05-25 10:34:48 +0800898 blk_cleanup_queue(vblk->disk->queue);
Asias He02e2b122012-05-25 10:34:47 +0800899
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600900 blk_mq_free_tag_set(&vblk->tag_set);
901
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +0100902 mutex_lock(&vblk->vdev_mutex);
903
Rusty Russell6e5aa7e2008-02-04 23:50:03 -0500904 /* Stop all the virtqueues. */
905 vdev->config->reset(vdev);
906
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +0100907 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
908 vblk->vdev = NULL;
909
Rusty Russelle467cde2007-10-22 11:03:38 +1000910 put_disk(vblk->disk);
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600911 vdev->config->del_vqs(vdev);
Ming Lei6a27b652014-06-26 17:41:48 +0800912 kfree(vblk->vqs);
Alexander Graff4953fe2013-01-02 15:37:17 +1030913
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +0100914 mutex_unlock(&vblk->vdev_mutex);
915
916 virtblk_put(vblk);
Rusty Russelle467cde2007-10-22 11:03:38 +1000917}
918
Aaron Lu89107002013-09-17 09:25:23 +0930919#ifdef CONFIG_PM_SLEEP
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530920static int virtblk_freeze(struct virtio_device *vdev)
921{
922 struct virtio_blk *vblk = vdev->priv;
923
924 /* Ensure we don't receive any more interrupts */
925 vdev->config->reset(vdev);
926
Michael S. Tsirkincc74f712014-10-15 10:22:26 +1030927 /* Make sure no work handler is accessing the device. */
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530928 flush_work(&vblk->config_work);
929
Sagi Grimberg9b3e9902017-07-04 10:03:03 +0300930 blk_mq_quiesce_queue(vblk->disk->queue);
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530931
932 vdev->config->del_vqs(vdev);
933 return 0;
934}
935
936static int virtblk_restore(struct virtio_device *vdev)
937{
938 struct virtio_blk *vblk = vdev->priv;
939 int ret;
940
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530941 ret = init_vq(vdev->priv);
Michael S. Tsirkin6d62c372014-10-15 10:22:32 +1030942 if (ret)
943 return ret;
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600944
Michael S. Tsirkin6d62c372014-10-15 10:22:32 +1030945 virtio_device_ready(vdev);
946
Sagi Grimberg9b3e9902017-07-04 10:03:03 +0300947 blk_mq_unquiesce_queue(vblk->disk->queue);
Michael S. Tsirkin6d62c372014-10-15 10:22:32 +1030948 return 0;
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530949}
950#endif
951
Márton Németh47483e22010-01-10 13:40:02 +0100952static const struct virtio_device_id id_table[] = {
Rusty Russelle467cde2007-10-22 11:03:38 +1000953 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
954 { 0 },
955};
956
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200957static unsigned int features_legacy[] = {
Tejun Heo02c42b72010-09-03 11:56:18 +0200958 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
Christoph Hellwig97b50a62017-01-28 09:32:53 +0100959 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
Michael S. Tsirkin592002f2016-02-24 17:07:27 +0200960 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
Changpeng Liu1f238162018-11-01 15:40:35 -0700961 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200962}
963;
964static unsigned int features[] = {
965 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
966 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
Michael S. Tsirkin592002f2016-02-24 17:07:27 +0200967 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
Changpeng Liu1f238162018-11-01 15:40:35 -0700968 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
Rusty Russellc45a6812008-05-02 21:50:50 -0500969};
970
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -0800971static struct virtio_driver virtio_blk = {
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200972 .feature_table = features,
973 .feature_table_size = ARRAY_SIZE(features),
974 .feature_table_legacy = features_legacy,
975 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
976 .driver.name = KBUILD_MODNAME,
977 .driver.owner = THIS_MODULE,
978 .id_table = id_table,
979 .probe = virtblk_probe,
980 .remove = virtblk_remove,
981 .config_changed = virtblk_config_changed,
Aaron Lu89107002013-09-17 09:25:23 +0930982#ifdef CONFIG_PM_SLEEP
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200983 .freeze = virtblk_freeze,
984 .restore = virtblk_restore,
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530985#endif
Rusty Russelle467cde2007-10-22 11:03:38 +1000986};
987
988static int __init init(void)
989{
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100990 int error;
991
992 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
993 if (!virtblk_wq)
994 return -ENOMEM;
995
Christian Borntraeger4f3bf192008-01-31 15:53:53 +0100996 major = register_blkdev(0, "virtblk");
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100997 if (major < 0) {
998 error = major;
999 goto out_destroy_workqueue;
1000 }
1001
1002 error = register_virtio_driver(&virtio_blk);
1003 if (error)
1004 goto out_unregister_blkdev;
1005 return 0;
1006
1007out_unregister_blkdev:
1008 unregister_blkdev(major, "virtblk");
1009out_destroy_workqueue:
1010 destroy_workqueue(virtblk_wq);
1011 return error;
Rusty Russelle467cde2007-10-22 11:03:38 +10001012}
1013
1014static void __exit fini(void)
1015{
Rusty Russelle467cde2007-10-22 11:03:38 +10001016 unregister_virtio_driver(&virtio_blk);
Michael S. Tsirkin38f37b52014-10-23 18:57:19 +03001017 unregister_blkdev(major, "virtblk");
Christoph Hellwig7a7c9242011-02-01 21:43:48 +01001018 destroy_workqueue(virtblk_wq);
Rusty Russelle467cde2007-10-22 11:03:38 +10001019}
1020module_init(init);
1021module_exit(fini);
1022
1023MODULE_DEVICE_TABLE(virtio, id_table);
1024MODULE_DESCRIPTION("Virtio block driver");
1025MODULE_LICENSE("GPL");