blob: 9d21bf0f155eed65fa150d71e6b92104fd701960 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Rusty Russelle467cde2007-10-22 11:03:38 +10002//#define DEBUG
3#include <linux/spinlock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09004#include <linux/slab.h>
Rusty Russelle467cde2007-10-22 11:03:38 +10005#include <linux/blkdev.h>
6#include <linux/hdreg.h>
Paul Gortmaker0c8d44f2011-07-01 15:56:05 -04007#include <linux/module.h>
Michael S. Tsirkin4678d6f92012-01-12 15:44:44 +10308#include <linux/mutex.h>
Christoph Hellwigad714732017-02-05 18:15:25 +01009#include <linux/interrupt.h>
Rusty Russelle467cde2007-10-22 11:03:38 +100010#include <linux/virtio.h>
11#include <linux/virtio_blk.h>
Jens Axboe3d1266c2007-10-24 13:21:21 +020012#include <linux/scatterlist.h>
Christoph Hellwig7a7c9242011-02-01 21:43:48 +010013#include <linux/string_helpers.h>
Michael S. Tsirkin5087a502011-10-30 21:29:59 +020014#include <linux/idr.h>
Jens Axboe1cf7e9c2013-11-01 10:52:52 -060015#include <linux/blk-mq.h>
Christoph Hellwigad714732017-02-05 18:15:25 +010016#include <linux/blk-mq-virtio.h>
Jens Axboe1cf7e9c2013-11-01 10:52:52 -060017#include <linux/numa.h>
Michael S. Tsirkin55a24152020-04-17 03:14:34 -040018#include <uapi/linux/virtio_ring.h>
Jens Axboe3d1266c2007-10-24 13:21:21 +020019
Christian Borntraeger4f3bf192008-01-31 15:53:53 +010020#define PART_BITS 4
Ming Lei6a27b652014-06-26 17:41:48 +080021#define VQ_NAME_LEN 16
Changpeng Liu1f238162018-11-01 15:40:35 -070022#define MAX_DISCARD_SEGMENTS 256u
Rusty Russelle467cde2007-10-22 11:03:38 +100023
Michael S. Tsirkin5087a502011-10-30 21:29:59 +020024static int major;
25static DEFINE_IDA(vd_index_ida);
26
Jonghwan Choi2a647bf2013-05-20 10:25:39 +093027static struct workqueue_struct *virtblk_wq;
Christian Borntraeger4f3bf192008-01-31 15:53:53 +010028
Ming Lei6a27b652014-06-26 17:41:48 +080029struct virtio_blk_vq {
30 struct virtqueue *vq;
31 spinlock_t lock;
32 char name[VQ_NAME_LEN];
33} ____cacheline_aligned_in_smp;
34
Michael S. Tsirkinbb6ec572015-01-15 13:33:31 +020035struct virtio_blk {
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +010036 /*
37 * This mutex must be held by anything that may run after
38 * virtblk_remove() sets vblk->vdev to NULL.
39 *
40 * blk-mq, virtqueue processing, and sysfs attribute code paths are
41 * shut down before vblk->vdev is set to NULL and therefore do not need
42 * to hold this mutex.
43 */
44 struct mutex vdev_mutex;
Rusty Russelle467cde2007-10-22 11:03:38 +100045 struct virtio_device *vdev;
Rusty Russelle467cde2007-10-22 11:03:38 +100046
47 /* The disk structure for the kernel. */
48 struct gendisk *disk;
49
Christoph Hellwig24d2f902014-04-15 14:14:00 -060050 /* Block layer tags. */
51 struct blk_mq_tag_set tag_set;
52
Christoph Hellwig7a7c9242011-02-01 21:43:48 +010053 /* Process context for config space updates */
54 struct work_struct config_work;
55
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +010056 /*
57 * Tracks references from block_device_operations open/release and
58 * virtio_driver probe/remove so this object can be freed once no
59 * longer in use.
60 */
61 refcount_t refs;
62
Rusty Russell0864b792008-12-30 09:26:05 -060063 /* What host tells us, plus 2 for header & tailer. */
64 unsigned int sg_elems;
65
Michael S. Tsirkin5087a502011-10-30 21:29:59 +020066 /* Ida index - used to track minor number allocations. */
67 int index;
Ming Lei6a27b652014-06-26 17:41:48 +080068
69 /* num of vqs */
70 int num_vqs;
71 struct virtio_blk_vq *vqs;
Rusty Russelle467cde2007-10-22 11:03:38 +100072};
73
Michael S. Tsirkinbb6ec572015-01-15 13:33:31 +020074struct virtblk_req {
Christoph Hellwig97b50a62017-01-28 09:32:53 +010075 struct virtio_blk_outhdr out_hdr;
76 u8 status;
Asias Hea98755c2012-08-08 16:07:04 +080077 struct scatterlist sg[];
Rusty Russelle467cde2007-10-22 11:03:38 +100078};
79
Christoph Hellwig2a842ac2017-06-03 09:38:04 +020080static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
Asias Hea98755c2012-08-08 16:07:04 +080081{
82 switch (vbr->status) {
83 case VIRTIO_BLK_S_OK:
Christoph Hellwig2a842ac2017-06-03 09:38:04 +020084 return BLK_STS_OK;
Asias Hea98755c2012-08-08 16:07:04 +080085 case VIRTIO_BLK_S_UNSUPP:
Christoph Hellwig2a842ac2017-06-03 09:38:04 +020086 return BLK_STS_NOTSUPP;
Asias Hea98755c2012-08-08 16:07:04 +080087 default:
Christoph Hellwig2a842ac2017-06-03 09:38:04 +020088 return BLK_STS_IOERR;
Asias Hea98755c2012-08-08 16:07:04 +080089 }
90}
91
Christoph Hellwig97b50a62017-01-28 09:32:53 +010092static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
93 struct scatterlist *data_sg, bool have_data)
94{
95 struct scatterlist hdr, status, *sgs[3];
96 unsigned int num_out = 0, num_in = 0;
97
98 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
99 sgs[num_out++] = &hdr;
100
101 if (have_data) {
102 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
103 sgs[num_out++] = data_sg;
104 else
105 sgs[num_out + num_in++] = data_sg;
Paolo Bonzini8f39db92013-03-20 15:44:27 +1030106 }
107
108 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
109 sgs[num_out + num_in++] = &status;
110
111 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
Paolo Bonzini5ee21a52013-03-20 15:44:27 +1030112}
Asias Hec85a1f92012-08-08 16:07:05 +0800113
Changpeng Liu1f238162018-11-01 15:40:35 -0700114static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
115{
116 unsigned short segments = blk_rq_nr_discard_segments(req);
117 unsigned short n = 0;
118 struct virtio_blk_discard_write_zeroes *range;
119 struct bio *bio;
120 u32 flags = 0;
121
122 if (unmap)
123 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
124
125 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
126 if (!range)
127 return -ENOMEM;
128
129 __rq_for_each_bio(bio, req) {
130 u64 sector = bio->bi_iter.bi_sector;
131 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
132
133 range[n].flags = cpu_to_le32(flags);
134 range[n].num_sectors = cpu_to_le32(num_sectors);
135 range[n].sector = cpu_to_le64(sector);
136 n++;
137 }
138
139 req->special_vec.bv_page = virt_to_page(range);
140 req->special_vec.bv_offset = offset_in_page(range);
141 req->special_vec.bv_len = sizeof(*range) * segments;
142 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
143
144 return 0;
145}
146
Christoph Hellwig5124c282014-02-10 03:24:39 -0800147static inline void virtblk_request_done(struct request *req)
Asias Hec85a1f92012-08-08 16:07:05 +0800148{
Christoph Hellwig9d74e252014-04-14 10:30:07 +0200149 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
Asias Hea98755c2012-08-08 16:07:04 +0800150
Changpeng Liu1f238162018-11-01 15:40:35 -0700151 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
152 kfree(page_address(req->special_vec.bv_page) +
153 req->special_vec.bv_offset);
154 }
155
Christoph Hellwigd19633d2017-04-20 16:03:00 +0200156 blk_mq_end_request(req, virtblk_result(vbr));
Asias Hea98755c2012-08-08 16:07:04 +0800157}
158
159static void virtblk_done(struct virtqueue *vq)
Rusty Russelle467cde2007-10-22 11:03:38 +1000160{
161 struct virtio_blk *vblk = vq->vdev->priv;
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600162 bool req_done = false;
Ming Lei6a27b652014-06-26 17:41:48 +0800163 int qid = vq->index;
Rusty Russelle467cde2007-10-22 11:03:38 +1000164 struct virtblk_req *vbr;
Rusty Russelle467cde2007-10-22 11:03:38 +1000165 unsigned long flags;
Asias Hea98755c2012-08-08 16:07:04 +0800166 unsigned int len;
Rusty Russelle467cde2007-10-22 11:03:38 +1000167
Ming Lei6a27b652014-06-26 17:41:48 +0800168 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
Asias Hebb811102012-09-25 10:36:17 +0800169 do {
170 virtqueue_disable_cb(vq);
Ming Lei6a27b652014-06-26 17:41:48 +0800171 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
Christoph Hellwig85dada02017-01-28 09:32:52 +0100172 struct request *req = blk_mq_rq_from_pdu(vbr);
173
Christoph Hellwig08e00292017-04-20 16:03:09 +0200174 blk_mq_complete_request(req);
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600175 req_done = true;
Rusty Russelle467cde2007-10-22 11:03:38 +1000176 }
Heinz Graalfs7f03b172013-10-29 09:40:30 +1030177 if (unlikely(virtqueue_is_broken(vq)))
178 break;
Asias Hebb811102012-09-25 10:36:17 +0800179 } while (!virtqueue_enable_cb(vq));
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600180
Rusty Russelle467cde2007-10-22 11:03:38 +1000181 /* In case queue is stopped waiting for more buffers. */
Asias Hea98755c2012-08-08 16:07:04 +0800182 if (req_done)
Christoph Hellwig1b4a3252014-04-16 09:44:54 +0200183 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
Ming Lei6a27b652014-06-26 17:41:48 +0800184 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
Asias Hea98755c2012-08-08 16:07:04 +0800185}
186
Jens Axboe944e7c82018-11-26 11:00:12 -0700187static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
188{
189 struct virtio_blk *vblk = hctx->queue->queuedata;
190 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
191 bool kick;
192
193 spin_lock_irq(&vq->lock);
194 kick = virtqueue_kick_prepare(vq->vq);
195 spin_unlock_irq(&vq->lock);
196
197 if (kick)
198 virtqueue_notify(vq->vq);
199}
200
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200201static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
Jens Axboe74c45052014-10-29 11:14:52 -0600202 const struct blk_mq_queue_data *bd)
Rusty Russelle467cde2007-10-22 11:03:38 +1000203{
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600204 struct virtio_blk *vblk = hctx->queue->queuedata;
Jens Axboe74c45052014-10-29 11:14:52 -0600205 struct request *req = bd->rq;
Christoph Hellwig9d74e252014-04-14 10:30:07 +0200206 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600207 unsigned long flags;
Paolo Bonzini20af3cf2013-03-20 15:44:27 +1030208 unsigned int num;
Ming Lei6a27b652014-06-26 17:41:48 +0800209 int qid = hctx->queue_num;
Rusty Russell5261b852014-03-13 11:23:39 +1030210 int err;
Ming Leie8edca62014-05-30 10:49:29 +0800211 bool notify = false;
Changpeng Liu1f238162018-11-01 15:40:35 -0700212 bool unmap = false;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100213 u32 type;
Rusty Russelle467cde2007-10-22 11:03:38 +1000214
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600215 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
Rusty Russelle467cde2007-10-22 11:03:38 +1000216
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100217 switch (req_op(req)) {
218 case REQ_OP_READ:
219 case REQ_OP_WRITE:
220 type = 0;
221 break;
222 case REQ_OP_FLUSH:
223 type = VIRTIO_BLK_T_FLUSH;
224 break;
Changpeng Liu1f238162018-11-01 15:40:35 -0700225 case REQ_OP_DISCARD:
226 type = VIRTIO_BLK_T_DISCARD;
227 break;
228 case REQ_OP_WRITE_ZEROES:
229 type = VIRTIO_BLK_T_WRITE_ZEROES;
230 unmap = !(req->cmd_flags & REQ_NOUNMAP);
231 break;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100232 case REQ_OP_DRV_IN:
233 type = VIRTIO_BLK_T_GET_ID;
234 break;
235 default:
236 WARN_ON_ONCE(1);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200237 return BLK_STS_IOERR;
Rusty Russelle467cde2007-10-22 11:03:38 +1000238 }
239
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100240 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
241 vbr->out_hdr.sector = type ?
242 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
243 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
244
Christoph Hellwige2490072014-09-13 16:40:09 -0700245 blk_mq_start_request(req);
246
Changpeng Liu1f238162018-11-01 15:40:35 -0700247 if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
248 err = virtblk_setup_discard_write_zeroes(req, unmap);
249 if (err)
250 return BLK_STS_RESOURCE;
251 }
252
Christoph Hellwig85dada02017-01-28 09:32:52 +0100253 num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
Hannes Reinecke1cde26f2009-05-18 14:41:30 +0200254 if (num) {
Christoph Hellwig85dada02017-01-28 09:32:52 +0100255 if (rq_data_dir(req) == WRITE)
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200256 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
Paolo Bonzini20af3cf2013-03-20 15:44:27 +1030257 else
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200258 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
Rusty Russelle467cde2007-10-22 11:03:38 +1000259 }
260
Ming Lei6a27b652014-06-26 17:41:48 +0800261 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
Christoph Hellwig782e0672019-12-12 17:37:19 +0100262 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
Rusty Russell5261b852014-03-13 11:23:39 +1030263 if (err) {
Ming Lei6a27b652014-06-26 17:41:48 +0800264 virtqueue_kick(vblk->vqs[qid].vq);
Halil Pasicf5f6b952020-02-13 13:37:27 +0100265 /* Don't stop the queue if -ENOMEM: we may have failed to
266 * bounce the buffer due to global resource outage.
267 */
268 if (err == -ENOSPC)
269 blk_mq_stop_hw_queue(hctx);
Ming Lei6a27b652014-06-26 17:41:48 +0800270 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
Halil Pasic3d973b22020-02-13 13:37:28 +0100271 switch (err) {
272 case -ENOSPC:
Ming Lei86ff7c22018-01-30 22:04:57 -0500273 return BLK_STS_DEV_RESOURCE;
Halil Pasic3d973b22020-02-13 13:37:28 +0100274 case -ENOMEM:
275 return BLK_STS_RESOURCE;
276 default:
277 return BLK_STS_IOERR;
278 }
Asias Hea98755c2012-08-08 16:07:04 +0800279 }
280
Jens Axboe74c45052014-10-29 11:14:52 -0600281 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
Ming Leie8edca62014-05-30 10:49:29 +0800282 notify = true;
Ming Lei6a27b652014-06-26 17:41:48 +0800283 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
Ming Leie8edca62014-05-30 10:49:29 +0800284
285 if (notify)
Ming Lei6a27b652014-06-26 17:41:48 +0800286 virtqueue_notify(vblk->vqs[qid].vq);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200287 return BLK_STS_OK;
Asias Hea98755c2012-08-08 16:07:04 +0800288}
289
john cooper4cb2ea22010-03-25 01:33:33 -0400290/* return id (s/n) string for *disk to *id_str
291 */
292static int virtblk_get_id(struct gendisk *disk, char *id_str)
293{
294 struct virtio_blk *vblk = disk->private_data;
Christoph Hellwigf9596692016-07-19 11:31:49 +0200295 struct request_queue *q = vblk->disk->queue;
john cooper4cb2ea22010-03-25 01:33:33 -0400296 struct request *req;
Mike Snitzere4c47762010-10-09 12:12:13 +1030297 int err;
john cooper4cb2ea22010-03-25 01:33:33 -0400298
Christoph Hellwigff005a02018-05-09 09:54:05 +0200299 req = blk_get_request(q, REQ_OP_DRV_IN, 0);
Christoph Hellwigf9596692016-07-19 11:31:49 +0200300 if (IS_ERR(req))
john cooper4cb2ea22010-03-25 01:33:33 -0400301 return PTR_ERR(req);
Mike Snitzere4c47762010-10-09 12:12:13 +1030302
Christoph Hellwigf9596692016-07-19 11:31:49 +0200303 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
304 if (err)
305 goto out;
306
Christoph Hellwigb7819b92017-04-20 16:02:55 +0200307 blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200308 err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
Christoph Hellwigf9596692016-07-19 11:31:49 +0200309out:
310 blk_put_request(req);
Mike Snitzere4c47762010-10-09 12:12:13 +1030311 return err;
john cooper4cb2ea22010-03-25 01:33:33 -0400312}
313
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +0100314static void virtblk_get(struct virtio_blk *vblk)
315{
316 refcount_inc(&vblk->refs);
317}
318
319static void virtblk_put(struct virtio_blk *vblk)
320{
321 if (refcount_dec_and_test(&vblk->refs)) {
322 ida_simple_remove(&vd_index_ida, vblk->index);
323 mutex_destroy(&vblk->vdev_mutex);
324 kfree(vblk);
325 }
326}
327
328static int virtblk_open(struct block_device *bd, fmode_t mode)
329{
330 struct virtio_blk *vblk = bd->bd_disk->private_data;
331 int ret = 0;
332
333 mutex_lock(&vblk->vdev_mutex);
334
335 if (vblk->vdev)
336 virtblk_get(vblk);
337 else
338 ret = -ENXIO;
339
340 mutex_unlock(&vblk->vdev_mutex);
341 return ret;
342}
343
344static void virtblk_release(struct gendisk *disk, fmode_t mode)
345{
346 struct virtio_blk *vblk = disk->private_data;
347
348 virtblk_put(vblk);
349}
350
Christian Borntraeger135da0b2008-01-23 17:56:50 +0100351/* We provide getgeo only to please some old bootloader/partitioning tools */
352static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
353{
Ryan Harper48e40432008-04-16 13:56:37 -0500354 struct virtio_blk *vblk = bd->bd_disk->private_data;
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +0100355 int ret = 0;
356
357 mutex_lock(&vblk->vdev_mutex);
358
359 if (!vblk->vdev) {
360 ret = -ENXIO;
361 goto out;
362 }
Ryan Harper48e40432008-04-16 13:56:37 -0500363
364 /* see if the host passed in geometry config */
Rusty Russell855e0c52013-10-14 18:11:51 +1030365 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
366 virtio_cread(vblk->vdev, struct virtio_blk_config,
367 geometry.cylinders, &geo->cylinders);
368 virtio_cread(vblk->vdev, struct virtio_blk_config,
369 geometry.heads, &geo->heads);
370 virtio_cread(vblk->vdev, struct virtio_blk_config,
371 geometry.sectors, &geo->sectors);
Ryan Harper48e40432008-04-16 13:56:37 -0500372 } else {
373 /* some standard values, similar to sd */
374 geo->heads = 1 << 6;
375 geo->sectors = 1 << 5;
376 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
377 }
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +0100378out:
379 mutex_unlock(&vblk->vdev_mutex);
380 return ret;
Christian Borntraeger135da0b2008-01-23 17:56:50 +0100381}
382
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -0700383static const struct block_device_operations virtblk_fops = {
Christian Borntraeger135da0b2008-01-23 17:56:50 +0100384 .owner = THIS_MODULE,
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +0100385 .open = virtblk_open,
386 .release = virtblk_release,
Christian Borntraeger135da0b2008-01-23 17:56:50 +0100387 .getgeo = virtblk_getgeo,
Rusty Russelle467cde2007-10-22 11:03:38 +1000388};
389
Christian Borntraegerd50ed902008-02-01 09:05:00 +0100390static int index_to_minor(int index)
391{
392 return index << PART_BITS;
393}
394
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200395static int minor_to_index(int minor)
396{
397 return minor >> PART_BITS;
398}
399
Hannes Reineckee982c4d2018-09-28 08:17:23 +0200400static ssize_t serial_show(struct device *dev,
401 struct device_attribute *attr, char *buf)
Ryan Harpera5eb9e42010-06-23 22:19:57 -0500402{
403 struct gendisk *disk = dev_to_disk(dev);
404 int err;
405
406 /* sysfs gives us a PAGE_SIZE buffer */
407 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
408
409 buf[VIRTIO_BLK_ID_BYTES] = '\0';
410 err = virtblk_get_id(disk, buf);
411 if (!err)
412 return strlen(buf);
413
414 if (err == -EIO) /* Unsupported? Make it empty. */
415 return 0;
416
417 return err;
418}
Michael S. Tsirkin393c5252014-10-23 16:08:44 +0300419
Hannes Reineckee982c4d2018-09-28 08:17:23 +0200420static DEVICE_ATTR_RO(serial);
Ryan Harpera5eb9e42010-06-23 22:19:57 -0500421
Stefan Hajnoczidaf2a502018-01-03 16:03:39 +0000422/* The queue's logical block size must be set before calling this */
423static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100424{
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100425 struct virtio_device *vdev = vblk->vdev;
426 struct request_queue *q = vblk->disk->queue;
427 char cap_str_2[10], cap_str_10[10];
Stefan Hajnoczi1046d302017-07-26 15:32:23 +0100428 unsigned long long nblocks;
James Bottomleyb9f28d82015-03-05 18:47:01 -0800429 u64 capacity;
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100430
431 /* Host must always specify the capacity. */
Rusty Russell855e0c52013-10-14 18:11:51 +1030432 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100433
434 /* If capacity is too big, truncate with warning. */
435 if ((sector_t)capacity != capacity) {
436 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
437 (unsigned long long)capacity);
438 capacity = (sector_t)-1;
439 }
440
Stefan Hajnoczi1046d302017-07-26 15:32:23 +0100441 nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
442
443 string_get_size(nblocks, queue_logical_block_size(q),
James Bottomleyb9f28d82015-03-05 18:47:01 -0800444 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
Stefan Hajnoczi1046d302017-07-26 15:32:23 +0100445 string_get_size(nblocks, queue_logical_block_size(q),
James Bottomleyb9f28d82015-03-05 18:47:01 -0800446 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100447
448 dev_notice(&vdev->dev,
Stefan Hajnoczidaf2a502018-01-03 16:03:39 +0000449 "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
450 vblk->disk->disk_name,
451 resize ? "new size: " : "",
Stefan Hajnoczi1046d302017-07-26 15:32:23 +0100452 nblocks,
453 queue_logical_block_size(q),
454 cap_str_10,
455 cap_str_2);
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100456
Balbir Singh662155e2020-03-13 05:30:06 +0000457 set_capacity_revalidate_and_notify(vblk->disk, capacity, true);
Stefan Hajnoczidaf2a502018-01-03 16:03:39 +0000458}
459
460static void virtblk_config_changed_work(struct work_struct *work)
461{
462 struct virtio_blk *vblk =
463 container_of(work, struct virtio_blk, config_work);
Stefan Hajnoczidaf2a502018-01-03 16:03:39 +0000464
465 virtblk_update_capacity(vblk, true);
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100466}
467
468static void virtblk_config_changed(struct virtio_device *vdev)
469{
470 struct virtio_blk *vblk = vdev->priv;
471
472 queue_work(virtblk_wq, &vblk->config_work);
473}
474
Amit Shah6abd6e52011-12-22 16:58:29 +0530475static int init_vq(struct virtio_blk *vblk)
476{
Markus Elfring2ff98442016-09-13 13:43:50 +0200477 int err;
Ming Lei6a27b652014-06-26 17:41:48 +0800478 int i;
479 vq_callback_t **callbacks;
480 const char **names;
481 struct virtqueue **vqs;
482 unsigned short num_vqs;
483 struct virtio_device *vdev = vblk->vdev;
Christoph Hellwigad714732017-02-05 18:15:25 +0100484 struct irq_affinity desc = { 0, };
Amit Shah6abd6e52011-12-22 16:58:29 +0530485
Ming Lei6a27b652014-06-26 17:41:48 +0800486 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
487 struct virtio_blk_config, num_queues,
488 &num_vqs);
489 if (err)
490 num_vqs = 1;
Amit Shah6abd6e52011-12-22 16:58:29 +0530491
Dongli Zhangbf348f92019-03-27 18:36:34 +0800492 num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
493
Markus Elfring668866b2016-09-13 11:32:22 +0200494 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
Minfei Huang347a5292016-08-09 16:39:20 +0800495 if (!vblk->vqs)
496 return -ENOMEM;
497
Markus Elfring668866b2016-09-13 11:32:22 +0200498 names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
499 callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
500 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
Minfei Huang347a5292016-08-09 16:39:20 +0800501 if (!names || !callbacks || !vqs) {
Ming Lei6a27b652014-06-26 17:41:48 +0800502 err = -ENOMEM;
503 goto out;
504 }
505
Ming Lei6a27b652014-06-26 17:41:48 +0800506 for (i = 0; i < num_vqs; i++) {
507 callbacks[i] = virtblk_done;
508 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
509 names[i] = vblk->vqs[i].name;
510 }
511
512 /* Discover virtqueues and write information to configuration. */
Michael S. Tsirkin9b2bbdb2017-03-06 18:19:39 +0200513 err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
Ming Lei6a27b652014-06-26 17:41:48 +0800514 if (err)
Minfei Huang347a5292016-08-09 16:39:20 +0800515 goto out;
Ming Lei6a27b652014-06-26 17:41:48 +0800516
517 for (i = 0; i < num_vqs; i++) {
518 spin_lock_init(&vblk->vqs[i].lock);
519 vblk->vqs[i].vq = vqs[i];
520 }
521 vblk->num_vqs = num_vqs;
522
Minfei Huang347a5292016-08-09 16:39:20 +0800523out:
Ming Lei6a27b652014-06-26 17:41:48 +0800524 kfree(vqs);
Ming Lei6a27b652014-06-26 17:41:48 +0800525 kfree(callbacks);
Ming Lei6a27b652014-06-26 17:41:48 +0800526 kfree(names);
Ming Lei6a27b652014-06-26 17:41:48 +0800527 if (err)
528 kfree(vblk->vqs);
Amit Shah6abd6e52011-12-22 16:58:29 +0530529 return err;
530}
531
Ren Mingxinc0aa3e02012-04-10 15:28:05 +0800532/*
533 * Legacy naming scheme used for virtio devices. We are stuck with it for
534 * virtio blk but don't ever use it for any new driver.
535 */
536static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
537{
538 const int base = 'z' - 'a' + 1;
539 char *begin = buf + strlen(prefix);
540 char *end = buf + buflen;
541 char *p;
542 int unit;
543
544 p = end - 1;
545 *p = '\0';
546 unit = base;
547 do {
548 if (p == begin)
549 return -EINVAL;
550 *--p = 'a' + (index % unit);
551 index = (index / unit) - 1;
552 } while (index >= 0);
553
554 memmove(begin, p, end - p);
555 memcpy(buf, prefix, strlen(prefix));
556
557 return 0;
558}
559
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200560static int virtblk_get_cache_mode(struct virtio_device *vdev)
561{
562 u8 writeback;
563 int err;
564
Rusty Russell855e0c52013-10-14 18:11:51 +1030565 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
566 struct virtio_blk_config, wce,
567 &writeback);
Michael S. Tsirkin592002f2016-02-24 17:07:27 +0200568
569 /*
570 * If WCE is not configurable and flush is not available,
571 * assume no writeback cache is in use.
572 */
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200573 if (err)
Michael S. Tsirkin592002f2016-02-24 17:07:27 +0200574 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200575
576 return writeback;
577}
578
579static void virtblk_update_cache_mode(struct virtio_device *vdev)
580{
581 u8 writeback = virtblk_get_cache_mode(vdev);
582 struct virtio_blk *vblk = vdev->priv;
583
Jens Axboead9126a2016-03-30 10:12:58 -0600584 blk_queue_write_cache(vblk->disk->queue, writeback, false);
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200585 revalidate_disk(vblk->disk);
586}
587
588static const char *const virtblk_cache_types[] = {
589 "write through", "write back"
590};
591
592static ssize_t
Hannes Reineckee982c4d2018-09-28 08:17:23 +0200593cache_type_store(struct device *dev, struct device_attribute *attr,
594 const char *buf, size_t count)
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200595{
596 struct gendisk *disk = dev_to_disk(dev);
597 struct virtio_blk *vblk = disk->private_data;
598 struct virtio_device *vdev = vblk->vdev;
599 int i;
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200600
601 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
Andy Shevchenkof53d5aa2017-06-09 15:07:42 +0300602 i = sysfs_match_string(virtblk_cache_types, buf);
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200603 if (i < 0)
Andy Shevchenkof53d5aa2017-06-09 15:07:42 +0300604 return i;
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200605
Rusty Russell855e0c52013-10-14 18:11:51 +1030606 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200607 virtblk_update_cache_mode(vdev);
608 return count;
609}
610
611static ssize_t
Hannes Reineckee982c4d2018-09-28 08:17:23 +0200612cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200613{
614 struct gendisk *disk = dev_to_disk(dev);
615 struct virtio_blk *vblk = disk->private_data;
616 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
617
618 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
619 return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
620}
621
Hannes Reineckee982c4d2018-09-28 08:17:23 +0200622static DEVICE_ATTR_RW(cache_type);
623
624static struct attribute *virtblk_attrs[] = {
625 &dev_attr_serial.attr,
626 &dev_attr_cache_type.attr,
627 NULL,
628};
629
630static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
631 struct attribute *a, int n)
632{
633 struct device *dev = container_of(kobj, struct device, kobj);
634 struct gendisk *disk = dev_to_disk(dev);
635 struct virtio_blk *vblk = disk->private_data;
636 struct virtio_device *vdev = vblk->vdev;
637
638 if (a == &dev_attr_cache_type.attr &&
639 !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
640 return S_IRUGO;
641
642 return a->mode;
643}
644
645static const struct attribute_group virtblk_attr_group = {
646 .attrs = virtblk_attrs,
647 .is_visible = virtblk_attrs_are_visible,
648};
649
650static const struct attribute_group *virtblk_attr_groups[] = {
651 &virtblk_attr_group,
652 NULL,
653};
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200654
Christoph Hellwigd6296d392017-05-01 10:19:08 -0600655static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
656 unsigned int hctx_idx, unsigned int numa_node)
Christoph Hellwige9b267d2014-04-15 13:59:10 -0600657{
Christoph Hellwigd6296d392017-05-01 10:19:08 -0600658 struct virtio_blk *vblk = set->driver_data;
Christoph Hellwige9b267d2014-04-15 13:59:10 -0600659 struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
660
661 sg_init_table(vbr->sg, vblk->sg_elems);
662 return 0;
663}
664
Christoph Hellwigad714732017-02-05 18:15:25 +0100665static int virtblk_map_queues(struct blk_mq_tag_set *set)
666{
667 struct virtio_blk *vblk = set->driver_data;
668
Dongli Zhang9bc00752019-03-12 09:31:56 +0800669 return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
670 vblk->vdev, 0);
Christoph Hellwigad714732017-02-05 18:15:25 +0100671}
672
Eric Biggersf363b082017-03-30 13:39:16 -0700673static const struct blk_mq_ops virtio_mq_ops = {
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600674 .queue_rq = virtio_queue_rq,
Jens Axboe944e7c82018-11-26 11:00:12 -0700675 .commit_rqs = virtio_commit_rqs,
Christoph Hellwig5124c282014-02-10 03:24:39 -0800676 .complete = virtblk_request_done,
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600677 .init_request = virtblk_init_request,
Christoph Hellwigad714732017-02-05 18:15:25 +0100678 .map_queues = virtblk_map_queues,
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600679};
680
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600681static unsigned int virtblk_queue_depth;
682module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600683
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -0800684static int virtblk_probe(struct virtio_device *vdev)
Rusty Russelle467cde2007-10-22 11:03:38 +1000685{
686 struct virtio_blk *vblk;
Christoph Hellwig69740c82010-02-24 14:22:25 -0600687 struct request_queue *q;
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200688 int err, index;
Asias Hea98755c2012-08-08 16:07:04 +0800689
Joerg Roedelfd1068e2019-02-07 12:59:17 +0100690 u32 v, blk_size, max_size, sg_elems, opt_io_size;
Christoph Hellwig69740c82010-02-24 14:22:25 -0600691 u16 min_io_size;
692 u8 physical_block_exp, alignment_offset;
Rusty Russelle467cde2007-10-22 11:03:38 +1000693
Michael S. Tsirkina4379fd2015-01-12 16:23:37 +0200694 if (!vdev->config->get) {
695 dev_err(&vdev->dev, "%s failure: config access disabled\n",
696 __func__);
697 return -EINVAL;
698 }
699
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200700 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
701 GFP_KERNEL);
702 if (err < 0)
703 goto out;
704 index = err;
Christian Borntraeger4f3bf192008-01-31 15:53:53 +0100705
Rusty Russell0864b792008-12-30 09:26:05 -0600706 /* We need to know how many segments before we allocate. */
Rusty Russell855e0c52013-10-14 18:11:51 +1030707 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
708 struct virtio_blk_config, seg_max,
709 &sg_elems);
Christoph Hellwiga5b365a2010-05-25 14:17:54 +0200710
711 /* We need at least one SG element, whatever they say. */
712 if (err || !sg_elems)
Rusty Russell0864b792008-12-30 09:26:05 -0600713 sg_elems = 1;
714
715 /* We need an extra sg elements at head and tail. */
716 sg_elems += 2;
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600717 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
Rusty Russelle467cde2007-10-22 11:03:38 +1000718 if (!vblk) {
719 err = -ENOMEM;
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200720 goto out_free_index;
Rusty Russelle467cde2007-10-22 11:03:38 +1000721 }
722
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +0100723 /* This reference is dropped in virtblk_remove(). */
724 refcount_set(&vblk->refs, 1);
725 mutex_init(&vblk->vdev_mutex);
726
Rusty Russelle467cde2007-10-22 11:03:38 +1000727 vblk->vdev = vdev;
Rusty Russell0864b792008-12-30 09:26:05 -0600728 vblk->sg_elems = sg_elems;
Asias Hea98755c2012-08-08 16:07:04 +0800729
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100730 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
Rusty Russelle467cde2007-10-22 11:03:38 +1000731
Amit Shah6abd6e52011-12-22 16:58:29 +0530732 err = init_vq(vblk);
733 if (err)
Rusty Russelle467cde2007-10-22 11:03:38 +1000734 goto out_free_vblk;
Rusty Russelle467cde2007-10-22 11:03:38 +1000735
Rusty Russelle467cde2007-10-22 11:03:38 +1000736 /* FIXME: How many partitions? How long is a piece of string? */
Christian Borntraeger4f3bf192008-01-31 15:53:53 +0100737 vblk->disk = alloc_disk(1 << PART_BITS);
Rusty Russelle467cde2007-10-22 11:03:38 +1000738 if (!vblk->disk) {
739 err = -ENOMEM;
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600740 goto out_free_vq;
Rusty Russelle467cde2007-10-22 11:03:38 +1000741 }
742
Rusty Russellfc4324b2014-03-19 17:08:24 +1030743 /* Default queue sizing is to fill the ring. */
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600744 if (!virtblk_queue_depth) {
Ming Lei6a27b652014-06-26 17:41:48 +0800745 virtblk_queue_depth = vblk->vqs[0].vq->num_free;
Rusty Russellfc4324b2014-03-19 17:08:24 +1030746 /* ... but without indirect descs, we use 2 descs per req */
747 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600748 virtblk_queue_depth /= 2;
Rusty Russellfc4324b2014-03-19 17:08:24 +1030749 }
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600750
751 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
752 vblk->tag_set.ops = &virtio_mq_ops;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600753 vblk->tag_set.queue_depth = virtblk_queue_depth;
754 vblk->tag_set.numa_node = NUMA_NO_NODE;
755 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
756 vblk->tag_set.cmd_size =
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600757 sizeof(struct virtblk_req) +
758 sizeof(struct scatterlist) * sg_elems;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600759 vblk->tag_set.driver_data = vblk;
Ming Lei6a27b652014-06-26 17:41:48 +0800760 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600761
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600762 err = blk_mq_alloc_tag_set(&vblk->tag_set);
763 if (err)
764 goto out_put_disk;
765
Omar Sandoval6bf6b0a2017-01-09 11:44:12 -0800766 q = blk_mq_init_queue(&vblk->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +0000767 if (IS_ERR(q)) {
Rusty Russelle467cde2007-10-22 11:03:38 +1000768 err = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600769 goto out_free_tags;
Rusty Russelle467cde2007-10-22 11:03:38 +1000770 }
Omar Sandoval6bf6b0a2017-01-09 11:44:12 -0800771 vblk->disk->queue = q;
Rusty Russelle467cde2007-10-22 11:03:38 +1000772
Christoph Hellwig69740c82010-02-24 14:22:25 -0600773 q->queuedata = vblk;
Fernando Luis Vázquez Cao7d116b62008-10-27 18:45:15 +0900774
Ren Mingxinc0aa3e02012-04-10 15:28:05 +0800775 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
Christian Borntraegerd50ed902008-02-01 09:05:00 +0100776
Rusty Russelle467cde2007-10-22 11:03:38 +1000777 vblk->disk->major = major;
Christian Borntraegerd50ed902008-02-01 09:05:00 +0100778 vblk->disk->first_minor = index_to_minor(index);
Rusty Russelle467cde2007-10-22 11:03:38 +1000779 vblk->disk->private_data = vblk;
780 vblk->disk->fops = &virtblk_fops;
Fam Zheng5fa31422015-09-06 17:05:42 +0800781 vblk->disk->flags |= GENHD_FL_EXT_DEVT;
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200782 vblk->index = index;
Christian Borntraeger4f3bf192008-01-31 15:53:53 +0100783
Tejun Heo02c42b72010-09-03 11:56:18 +0200784 /* configure queue flush support */
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200785 virtblk_update_cache_mode(vdev);
Rusty Russelle467cde2007-10-22 11:03:38 +1000786
Christian Borntraeger3ef53602008-05-16 11:17:03 +0200787 /* If disk is read-only in the host, the guest should obey */
788 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
789 set_disk_ro(vblk->disk, 1);
790
Rusty Russell0864b792008-12-30 09:26:05 -0600791 /* We can handle whatever the host told us to handle. */
Martin K. Petersenee714f22010-03-10 00:48:32 -0500792 blk_queue_max_segments(q, vblk->sg_elems-2);
Rusty Russell0864b792008-12-30 09:26:05 -0600793
Rusty Russell4b7f7e22008-12-30 09:26:04 -0600794 /* No real sector limit. */
Martin K. Petersenee714f22010-03-10 00:48:32 -0500795 blk_queue_max_hw_sectors(q, -1U);
Rusty Russell4b7f7e22008-12-30 09:26:04 -0600796
Joerg Roedelfd1068e2019-02-07 12:59:17 +0100797 max_size = virtio_max_dma_size(vdev);
798
Rusty Russella586d4f2008-02-04 23:49:56 -0500799 /* Host can optionally specify maximum segment size and number of
800 * segments. */
Rusty Russell855e0c52013-10-14 18:11:51 +1030801 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
802 struct virtio_blk_config, size_max, &v);
Rusty Russelle467cde2007-10-22 11:03:38 +1000803 if (!err)
Joerg Roedelfd1068e2019-02-07 12:59:17 +0100804 max_size = min(max_size, v);
805
806 blk_queue_max_segment_size(q, max_size);
Rusty Russelle467cde2007-10-22 11:03:38 +1000807
Christian Borntraeger066f4d82008-05-29 11:08:26 +0200808 /* Host can optionally specify the block size of the device */
Rusty Russell855e0c52013-10-14 18:11:51 +1030809 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
810 struct virtio_blk_config, blk_size,
811 &blk_size);
Christian Borntraeger066f4d82008-05-29 11:08:26 +0200812 if (!err)
Christoph Hellwig69740c82010-02-24 14:22:25 -0600813 blk_queue_logical_block_size(q, blk_size);
814 else
815 blk_size = queue_logical_block_size(q);
816
817 /* Use topology information if available */
Rusty Russell855e0c52013-10-14 18:11:51 +1030818 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
819 struct virtio_blk_config, physical_block_exp,
820 &physical_block_exp);
Christoph Hellwig69740c82010-02-24 14:22:25 -0600821 if (!err && physical_block_exp)
822 blk_queue_physical_block_size(q,
823 blk_size * (1 << physical_block_exp));
824
Rusty Russell855e0c52013-10-14 18:11:51 +1030825 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
826 struct virtio_blk_config, alignment_offset,
827 &alignment_offset);
Christoph Hellwig69740c82010-02-24 14:22:25 -0600828 if (!err && alignment_offset)
829 blk_queue_alignment_offset(q, blk_size * alignment_offset);
830
Rusty Russell855e0c52013-10-14 18:11:51 +1030831 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
832 struct virtio_blk_config, min_io_size,
833 &min_io_size);
Christoph Hellwig69740c82010-02-24 14:22:25 -0600834 if (!err && min_io_size)
835 blk_queue_io_min(q, blk_size * min_io_size);
836
Rusty Russell855e0c52013-10-14 18:11:51 +1030837 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
838 struct virtio_blk_config, opt_io_size,
839 &opt_io_size);
Christoph Hellwig69740c82010-02-24 14:22:25 -0600840 if (!err && opt_io_size)
841 blk_queue_io_opt(q, blk_size * opt_io_size);
842
Changpeng Liu1f238162018-11-01 15:40:35 -0700843 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
844 q->limits.discard_granularity = blk_size;
845
846 virtio_cread(vdev, struct virtio_blk_config,
847 discard_sector_alignment, &v);
848 q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
849
850 virtio_cread(vdev, struct virtio_blk_config,
851 max_discard_sectors, &v);
852 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
853
854 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
855 &v);
856 blk_queue_max_discard_segments(q,
857 min_not_zero(v,
858 MAX_DISCARD_SEGMENTS));
859
860 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
861 }
862
863 if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
864 virtio_cread(vdev, struct virtio_blk_config,
865 max_write_zeroes_sectors, &v);
866 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
867 }
868
Stefan Hajnoczidaf2a502018-01-03 16:03:39 +0000869 virtblk_update_capacity(vblk, false);
Michael S. Tsirkin7a113702014-10-15 10:22:30 +1030870 virtio_device_ready(vdev);
871
Hannes Reineckee982c4d2018-09-28 08:17:23 +0200872 device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
Rusty Russelle467cde2007-10-22 11:03:38 +1000873 return 0;
874
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600875out_free_tags:
876 blk_mq_free_tag_set(&vblk->tag_set);
Rusty Russelle467cde2007-10-22 11:03:38 +1000877out_put_disk:
878 put_disk(vblk->disk);
Rusty Russelle467cde2007-10-22 11:03:38 +1000879out_free_vq:
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600880 vdev->config->del_vqs(vdev);
Rusty Russelle467cde2007-10-22 11:03:38 +1000881out_free_vblk:
882 kfree(vblk);
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200883out_free_index:
884 ida_simple_remove(&vd_index_ida, index);
Rusty Russelle467cde2007-10-22 11:03:38 +1000885out:
886 return err;
887}
888
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -0800889static void virtblk_remove(struct virtio_device *vdev)
Rusty Russelle467cde2007-10-22 11:03:38 +1000890{
891 struct virtio_blk *vblk = vdev->priv;
Rusty Russelle467cde2007-10-22 11:03:38 +1000892
Michael S. Tsirkincc74f712014-10-15 10:22:26 +1030893 /* Make sure no work handler is accessing the device. */
894 flush_work(&vblk->config_work);
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100895
Asias He02e2b122012-05-25 10:34:47 +0800896 del_gendisk(vblk->disk);
Asias He483001c2012-05-25 10:34:48 +0800897 blk_cleanup_queue(vblk->disk->queue);
Asias He02e2b122012-05-25 10:34:47 +0800898
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600899 blk_mq_free_tag_set(&vblk->tag_set);
900
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +0100901 mutex_lock(&vblk->vdev_mutex);
902
Rusty Russell6e5aa7e2008-02-04 23:50:03 -0500903 /* Stop all the virtqueues. */
904 vdev->config->reset(vdev);
905
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +0100906 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
907 vblk->vdev = NULL;
908
Rusty Russelle467cde2007-10-22 11:03:38 +1000909 put_disk(vblk->disk);
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600910 vdev->config->del_vqs(vdev);
Ming Lei6a27b652014-06-26 17:41:48 +0800911 kfree(vblk->vqs);
Alexander Graff4953fe2013-01-02 15:37:17 +1030912
Stefan Hajnoczi90b5feb2020-04-30 15:04:42 +0100913 mutex_unlock(&vblk->vdev_mutex);
914
915 virtblk_put(vblk);
Rusty Russelle467cde2007-10-22 11:03:38 +1000916}
917
Aaron Lu89107002013-09-17 09:25:23 +0930918#ifdef CONFIG_PM_SLEEP
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530919static int virtblk_freeze(struct virtio_device *vdev)
920{
921 struct virtio_blk *vblk = vdev->priv;
922
923 /* Ensure we don't receive any more interrupts */
924 vdev->config->reset(vdev);
925
Michael S. Tsirkincc74f712014-10-15 10:22:26 +1030926 /* Make sure no work handler is accessing the device. */
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530927 flush_work(&vblk->config_work);
928
Sagi Grimberg9b3e9902017-07-04 10:03:03 +0300929 blk_mq_quiesce_queue(vblk->disk->queue);
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530930
931 vdev->config->del_vqs(vdev);
932 return 0;
933}
934
935static int virtblk_restore(struct virtio_device *vdev)
936{
937 struct virtio_blk *vblk = vdev->priv;
938 int ret;
939
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530940 ret = init_vq(vdev->priv);
Michael S. Tsirkin6d62c372014-10-15 10:22:32 +1030941 if (ret)
942 return ret;
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600943
Michael S. Tsirkin6d62c372014-10-15 10:22:32 +1030944 virtio_device_ready(vdev);
945
Sagi Grimberg9b3e9902017-07-04 10:03:03 +0300946 blk_mq_unquiesce_queue(vblk->disk->queue);
Michael S. Tsirkin6d62c372014-10-15 10:22:32 +1030947 return 0;
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530948}
949#endif
950
Márton Németh47483e22010-01-10 13:40:02 +0100951static const struct virtio_device_id id_table[] = {
Rusty Russelle467cde2007-10-22 11:03:38 +1000952 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
953 { 0 },
954};
955
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200956static unsigned int features_legacy[] = {
Tejun Heo02c42b72010-09-03 11:56:18 +0200957 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
Christoph Hellwig97b50a62017-01-28 09:32:53 +0100958 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
Michael S. Tsirkin592002f2016-02-24 17:07:27 +0200959 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
Changpeng Liu1f238162018-11-01 15:40:35 -0700960 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200961}
962;
963static unsigned int features[] = {
964 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
965 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
Michael S. Tsirkin592002f2016-02-24 17:07:27 +0200966 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
Changpeng Liu1f238162018-11-01 15:40:35 -0700967 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
Rusty Russellc45a6812008-05-02 21:50:50 -0500968};
969
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -0800970static struct virtio_driver virtio_blk = {
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200971 .feature_table = features,
972 .feature_table_size = ARRAY_SIZE(features),
973 .feature_table_legacy = features_legacy,
974 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
975 .driver.name = KBUILD_MODNAME,
976 .driver.owner = THIS_MODULE,
977 .id_table = id_table,
978 .probe = virtblk_probe,
979 .remove = virtblk_remove,
980 .config_changed = virtblk_config_changed,
Aaron Lu89107002013-09-17 09:25:23 +0930981#ifdef CONFIG_PM_SLEEP
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200982 .freeze = virtblk_freeze,
983 .restore = virtblk_restore,
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530984#endif
Rusty Russelle467cde2007-10-22 11:03:38 +1000985};
986
987static int __init init(void)
988{
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100989 int error;
990
991 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
992 if (!virtblk_wq)
993 return -ENOMEM;
994
Christian Borntraeger4f3bf192008-01-31 15:53:53 +0100995 major = register_blkdev(0, "virtblk");
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100996 if (major < 0) {
997 error = major;
998 goto out_destroy_workqueue;
999 }
1000
1001 error = register_virtio_driver(&virtio_blk);
1002 if (error)
1003 goto out_unregister_blkdev;
1004 return 0;
1005
1006out_unregister_blkdev:
1007 unregister_blkdev(major, "virtblk");
1008out_destroy_workqueue:
1009 destroy_workqueue(virtblk_wq);
1010 return error;
Rusty Russelle467cde2007-10-22 11:03:38 +10001011}
1012
1013static void __exit fini(void)
1014{
Rusty Russelle467cde2007-10-22 11:03:38 +10001015 unregister_virtio_driver(&virtio_blk);
Michael S. Tsirkin38f37b52014-10-23 18:57:19 +03001016 unregister_blkdev(major, "virtblk");
Christoph Hellwig7a7c9242011-02-01 21:43:48 +01001017 destroy_workqueue(virtblk_wq);
Rusty Russelle467cde2007-10-22 11:03:38 +10001018}
1019module_init(init);
1020module_exit(fini);
1021
1022MODULE_DEVICE_TABLE(virtio, id_table);
1023MODULE_DESCRIPTION("Virtio block driver");
1024MODULE_LICENSE("GPL");