blob: cff083049d5c496f84afa8551575ceebfa2f1c39 [file] [log] [blame]
Thomas Gleixnerf33f5fe2019-05-22 09:51:24 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Paolo Bonzini4fe74b12012-02-05 12:16:00 +01002/*
3 * Virtio SCSI HBA driver
4 *
5 * Copyright IBM Corp. 2010
6 * Copyright Red Hat, Inc. 2011
7 *
8 * Authors:
9 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
10 * Paolo Bonzini <pbonzini@redhat.com>
Paolo Bonzini4fe74b12012-02-05 12:16:00 +010011 */
12
Wanlong Gaoba06d1e2013-03-12 15:34:40 +103013#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
Paolo Bonzini4fe74b12012-02-05 12:16:00 +010015#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/mempool.h>
Christoph Hellwig0d9f0a52017-02-05 18:15:26 +010018#include <linux/interrupt.h>
Paolo Bonzini4fe74b12012-02-05 12:16:00 +010019#include <linux/virtio.h>
20#include <linux/virtio_ids.h>
21#include <linux/virtio_config.h>
22#include <linux/virtio_scsi.h>
Paolo Bonzini9141a4c2013-04-08 23:03:25 +093023#include <linux/cpu.h>
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -080024#include <linux/blkdev.h>
Paolo Bonzini4fe74b12012-02-05 12:16:00 +010025#include <scsi/scsi_host.h>
26#include <scsi/scsi_device.h>
27#include <scsi/scsi_cmnd.h>
Venkatesh Srinivas761f1192014-07-06 16:39:27 +020028#include <scsi/scsi_tcq.h>
David Gibson25d1d502017-04-13 12:13:00 +100029#include <scsi/scsi_devinfo.h>
Ming Lei938ece72014-07-06 16:39:26 +020030#include <linux/seqlock.h>
Christoph Hellwig0d9f0a52017-02-05 18:15:26 +010031#include <linux/blk-mq-virtio.h>
Paolo Bonzini4fe74b12012-02-05 12:16:00 +010032
33#define VIRTIO_SCSI_MEMPOOL_SZ 64
Cong Meng365a7152012-07-05 17:06:43 +080034#define VIRTIO_SCSI_EVENT_LEN 8
Paolo Bonzini9141a4c2013-04-08 23:03:25 +093035#define VIRTIO_SCSI_VQ_BASE 2
Paolo Bonzini4fe74b12012-02-05 12:16:00 +010036
37/* Command queue element */
38struct virtio_scsi_cmd {
39 struct scsi_cmnd *sc;
40 struct completion *comp;
41 union {
42 struct virtio_scsi_cmd_req cmd;
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -080043 struct virtio_scsi_cmd_req_pi cmd_pi;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +010044 struct virtio_scsi_ctrl_tmf_req tmf;
45 struct virtio_scsi_ctrl_an_req an;
46 } req;
47 union {
48 struct virtio_scsi_cmd_resp cmd;
49 struct virtio_scsi_ctrl_tmf_resp tmf;
50 struct virtio_scsi_ctrl_an_resp an;
51 struct virtio_scsi_event evt;
52 } resp;
53} ____cacheline_aligned_in_smp;
54
Cong Meng365a7152012-07-05 17:06:43 +080055struct virtio_scsi_event_node {
56 struct virtio_scsi *vscsi;
57 struct virtio_scsi_event event;
58 struct work_struct work;
59};
60
Paolo Bonzini139fe452012-06-13 16:56:32 +020061struct virtio_scsi_vq {
62 /* Protects vq */
Paolo Bonzini4fe74b12012-02-05 12:16:00 +010063 spinlock_t vq_lock;
64
Paolo Bonzini139fe452012-06-13 16:56:32 +020065 struct virtqueue *vq;
66};
67
68/* Driver instance state */
69struct virtio_scsi {
Paolo Bonzini4fe74b12012-02-05 12:16:00 +010070 struct virtio_device *vdev;
Paolo Bonzini2bd37f02012-06-13 16:56:34 +020071
Cong Meng365a7152012-07-05 17:06:43 +080072 /* Get some buffers ready for event vq */
73 struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
Paolo Bonzini9141a4c2013-04-08 23:03:25 +093074
75 u32 num_queues;
76
Sebastian Andrzej Siewior8904f5a2016-09-06 19:04:46 +020077 struct hlist_node node;
Wanlong Gao285e71e2013-04-08 23:05:49 +093078
Michael S. Tsirkine67423c2014-10-15 10:22:33 +103079 /* Protected by event_vq lock */
80 bool stop_events;
81
Paolo Bonzini9141a4c2013-04-08 23:03:25 +093082 struct virtio_scsi_vq ctrl_vq;
83 struct virtio_scsi_vq event_vq;
84 struct virtio_scsi_vq req_vqs[];
Paolo Bonzini4fe74b12012-02-05 12:16:00 +010085};
86
87static struct kmem_cache *virtscsi_cmd_cache;
88static mempool_t *virtscsi_cmd_pool;
89
90static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
91{
92 return vdev->priv;
93}
94
95static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
96{
Christoph Hellwigae3d56d2019-01-29 09:33:07 +010097 if (resid)
Paolo Bonzini4fe74b12012-02-05 12:16:00 +010098 scsi_set_resid(sc, resid);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +010099}
100
101/**
102 * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
103 *
104 * Called with vq_lock held.
105 */
Paolo Bonzini7f82b3c2013-04-08 23:01:38 +0930106static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100107{
108 struct virtio_scsi_cmd *cmd = buf;
109 struct scsi_cmnd *sc = cmd->sc;
110 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
111
112 dev_dbg(&sc->device->sdev_gendev,
113 "cmd %p response %u status %#02x sense_len %u\n",
114 sc, resp->response, resp->status, resp->sense_len);
115
116 sc->result = resp->status;
Michael S. Tsirkind75dff32014-11-23 17:28:57 +0200117 virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid));
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100118 switch (resp->response) {
119 case VIRTIO_SCSI_S_OK:
120 set_host_byte(sc, DID_OK);
121 break;
122 case VIRTIO_SCSI_S_OVERRUN:
123 set_host_byte(sc, DID_ERROR);
124 break;
125 case VIRTIO_SCSI_S_ABORTED:
126 set_host_byte(sc, DID_ABORT);
127 break;
128 case VIRTIO_SCSI_S_BAD_TARGET:
129 set_host_byte(sc, DID_BAD_TARGET);
130 break;
131 case VIRTIO_SCSI_S_RESET:
132 set_host_byte(sc, DID_RESET);
133 break;
134 case VIRTIO_SCSI_S_BUSY:
135 set_host_byte(sc, DID_BUS_BUSY);
136 break;
137 case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
138 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
139 break;
140 case VIRTIO_SCSI_S_TARGET_FAILURE:
141 set_host_byte(sc, DID_TARGET_FAILURE);
142 break;
143 case VIRTIO_SCSI_S_NEXUS_FAILURE:
144 set_host_byte(sc, DID_NEXUS_FAILURE);
145 break;
146 default:
147 scmd_printk(KERN_WARNING, sc, "Unknown response %d",
148 resp->response);
149 /* fall through */
150 case VIRTIO_SCSI_S_FAILURE:
151 set_host_byte(sc, DID_ERROR);
152 break;
153 }
154
Michael S. Tsirkind75dff32014-11-23 17:28:57 +0200155 WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
156 VIRTIO_SCSI_SENSE_SIZE);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100157 if (sc->sense_buffer) {
158 memcpy(sc->sense_buffer, resp->sense,
Michael S. Tsirkind75dff32014-11-23 17:28:57 +0200159 min_t(u32,
160 virtio32_to_cpu(vscsi->vdev, resp->sense_len),
161 VIRTIO_SCSI_SENSE_SIZE));
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100162 if (resp->sense_len)
163 set_driver_byte(sc, DRIVER_SENSE);
164 }
165
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100166 sc->scsi_done(sc);
167}
168
Paolo Bonzini10f34f62013-04-08 23:02:07 +0930169static void virtscsi_vq_done(struct virtio_scsi *vscsi,
170 struct virtio_scsi_vq *virtscsi_vq,
Paolo Bonzini7f82b3c2013-04-08 23:01:38 +0930171 void (*fn)(struct virtio_scsi *vscsi, void *buf))
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100172{
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100173 void *buf;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100174 unsigned int len;
Paolo Bonzini10f34f62013-04-08 23:02:07 +0930175 unsigned long flags;
176 struct virtqueue *vq = virtscsi_vq->vq;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100177
Paolo Bonzini10f34f62013-04-08 23:02:07 +0930178 spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100179 do {
180 virtqueue_disable_cb(vq);
181 while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
Paolo Bonzini7f82b3c2013-04-08 23:01:38 +0930182 fn(vscsi, buf);
Heinz Graalfs2bf4fd32013-11-11 11:52:43 +1030183
184 if (unlikely(virtqueue_is_broken(vq)))
185 break;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100186 } while (!virtqueue_enable_cb(vq));
Paolo Bonzini10f34f62013-04-08 23:02:07 +0930187 spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100188}
189
190static void virtscsi_req_done(struct virtqueue *vq)
191{
Paolo Bonzini139fe452012-06-13 16:56:32 +0200192 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
193 struct virtio_scsi *vscsi = shost_priv(sh);
Paolo Bonzini9141a4c2013-04-08 23:03:25 +0930194 int index = vq->index - VIRTIO_SCSI_VQ_BASE;
195 struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
Paolo Bonzini139fe452012-06-13 16:56:32 +0200196
Paolo Bonzini9141a4c2013-04-08 23:03:25 +0930197 virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100198};
199
Paolo Bonzini8faeb522014-06-04 13:34:58 +0200200static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
201{
202 int i, num_vqs;
203
204 num_vqs = vscsi->num_queues;
205 for (i = 0; i < num_vqs; i++)
206 virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
207 virtscsi_complete_cmd);
208}
209
Paolo Bonzini7f82b3c2013-04-08 23:01:38 +0930210static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100211{
212 struct virtio_scsi_cmd *cmd = buf;
213
214 if (cmd->comp)
Daniel Wagnere8f81422016-09-13 10:58:50 +0200215 complete(cmd->comp);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100216}
217
218static void virtscsi_ctrl_done(struct virtqueue *vq)
219{
Paolo Bonzini139fe452012-06-13 16:56:32 +0200220 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
221 struct virtio_scsi *vscsi = shost_priv(sh);
Paolo Bonzini139fe452012-06-13 16:56:32 +0200222
Paolo Bonzini10f34f62013-04-08 23:02:07 +0930223 virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100224};
225
Paolo Bonzinicdda0e52014-06-04 13:34:56 +0200226static void virtscsi_handle_event(struct work_struct *work);
227
Cong Meng365a7152012-07-05 17:06:43 +0800228static int virtscsi_kick_event(struct virtio_scsi *vscsi,
229 struct virtio_scsi_event_node *event_node)
230{
Rusty Russell4614e512012-10-16 23:56:16 +1030231 int err;
Cong Meng365a7152012-07-05 17:06:43 +0800232 struct scatterlist sg;
233 unsigned long flags;
234
Paolo Bonzinicdda0e52014-06-04 13:34:56 +0200235 INIT_WORK(&event_node->work, virtscsi_handle_event);
Richard W.M. Jones2e9c9df2012-10-02 17:25:46 +0200236 sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
Cong Meng365a7152012-07-05 17:06:43 +0800237
238 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
239
Rusty Russellbf958292013-03-20 15:44:28 +1030240 err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
241 GFP_ATOMIC);
Rusty Russell4614e512012-10-16 23:56:16 +1030242 if (!err)
Cong Meng365a7152012-07-05 17:06:43 +0800243 virtqueue_kick(vscsi->event_vq.vq);
244
245 spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
246
Rusty Russell4614e512012-10-16 23:56:16 +1030247 return err;
Cong Meng365a7152012-07-05 17:06:43 +0800248}
249
250static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
251{
252 int i;
253
254 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
255 vscsi->event_list[i].vscsi = vscsi;
256 virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
257 }
258
259 return 0;
260}
261
262static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
263{
264 int i;
265
Michael S. Tsirkine67423c2014-10-15 10:22:33 +1030266 /* Stop scheduling work before calling cancel_work_sync. */
267 spin_lock_irq(&vscsi->event_vq.vq_lock);
268 vscsi->stop_events = true;
269 spin_unlock_irq(&vscsi->event_vq.vq_lock);
270
Cong Meng365a7152012-07-05 17:06:43 +0800271 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
272 cancel_work_sync(&vscsi->event_list[i].work);
273}
274
275static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
Paolo Bonzini9141a4c2013-04-08 23:03:25 +0930276 struct virtio_scsi_event *event)
Cong Meng365a7152012-07-05 17:06:43 +0800277{
278 struct scsi_device *sdev;
279 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
280 unsigned int target = event->lun[1];
281 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
282
Michael S. Tsirkind75dff32014-11-23 17:28:57 +0200283 switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
Cong Meng365a7152012-07-05 17:06:43 +0800284 case VIRTIO_SCSI_EVT_RESET_RESCAN:
285 scsi_add_device(shost, 0, target, lun);
286 break;
287 case VIRTIO_SCSI_EVT_RESET_REMOVED:
288 sdev = scsi_device_lookup(shost, 0, target, lun);
289 if (sdev) {
290 scsi_remove_device(sdev);
291 scsi_device_put(sdev);
292 } else {
293 pr_err("SCSI device %d 0 %d %d not found\n",
294 shost->host_no, target, lun);
295 }
296 break;
297 default:
298 pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
299 }
300}
301
Paolo Bonzini865b58c2012-10-02 17:25:48 +0200302static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
303 struct virtio_scsi_event *event)
304{
305 struct scsi_device *sdev;
306 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
307 unsigned int target = event->lun[1];
308 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
Michael S. Tsirkind75dff32014-11-23 17:28:57 +0200309 u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255;
310 u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8;
Paolo Bonzini865b58c2012-10-02 17:25:48 +0200311
312 sdev = scsi_device_lookup(shost, 0, target, lun);
313 if (!sdev) {
314 pr_err("SCSI device %d 0 %d %d not found\n",
315 shost->host_no, target, lun);
316 return;
317 }
318
319 /* Handle "Parameters changed", "Mode parameters changed", and
320 "Capacity data has changed". */
321 if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
322 scsi_rescan_device(&sdev->sdev_gendev);
323
324 scsi_device_put(sdev);
325}
326
Cong Meng365a7152012-07-05 17:06:43 +0800327static void virtscsi_handle_event(struct work_struct *work)
328{
329 struct virtio_scsi_event_node *event_node =
330 container_of(work, struct virtio_scsi_event_node, work);
331 struct virtio_scsi *vscsi = event_node->vscsi;
332 struct virtio_scsi_event *event = &event_node->event;
333
Michael S. Tsirkind75dff32014-11-23 17:28:57 +0200334 if (event->event &
335 cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
336 event->event &= ~cpu_to_virtio32(vscsi->vdev,
337 VIRTIO_SCSI_T_EVENTS_MISSED);
Cong Meng365a7152012-07-05 17:06:43 +0800338 scsi_scan_host(virtio_scsi_host(vscsi->vdev));
339 }
340
Michael S. Tsirkind75dff32014-11-23 17:28:57 +0200341 switch (virtio32_to_cpu(vscsi->vdev, event->event)) {
Cong Meng365a7152012-07-05 17:06:43 +0800342 case VIRTIO_SCSI_T_NO_EVENT:
343 break;
344 case VIRTIO_SCSI_T_TRANSPORT_RESET:
345 virtscsi_handle_transport_reset(vscsi, event);
346 break;
Paolo Bonzini865b58c2012-10-02 17:25:48 +0200347 case VIRTIO_SCSI_T_PARAM_CHANGE:
348 virtscsi_handle_param_change(vscsi, event);
349 break;
Cong Meng365a7152012-07-05 17:06:43 +0800350 default:
351 pr_err("Unsupport virtio scsi event %x\n", event->event);
352 }
353 virtscsi_kick_event(vscsi, event_node);
354}
355
Paolo Bonzini7f82b3c2013-04-08 23:01:38 +0930356static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
Cong Meng365a7152012-07-05 17:06:43 +0800357{
358 struct virtio_scsi_event_node *event_node = buf;
359
Michael S. Tsirkine67423c2014-10-15 10:22:33 +1030360 if (!vscsi->stop_events)
361 queue_work(system_freezable_wq, &event_node->work);
Cong Meng365a7152012-07-05 17:06:43 +0800362}
363
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100364static void virtscsi_event_done(struct virtqueue *vq)
365{
Paolo Bonzini139fe452012-06-13 16:56:32 +0200366 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
367 struct virtio_scsi *vscsi = shost_priv(sh);
Paolo Bonzini139fe452012-06-13 16:56:32 +0200368
Paolo Bonzini10f34f62013-04-08 23:02:07 +0930369 virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100370};
371
Paolo Bonzini9e5470f2019-05-30 13:28:11 +0200372static int __virtscsi_add_cmd(struct virtqueue *vq,
Wanlong Gao682993b2013-03-20 15:44:28 +1030373 struct virtio_scsi_cmd *cmd,
Rusty Russellc77fba92014-05-21 11:25:04 +0930374 size_t req_size, size_t resp_size)
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100375{
376 struct scsi_cmnd *sc = cmd->sc;
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800377 struct scatterlist *sgs[6], req, resp;
Wanlong Gao682993b2013-03-20 15:44:28 +1030378 struct sg_table *out, *in;
379 unsigned out_num = 0, in_num = 0;
380
381 out = in = NULL;
382
383 if (sc && sc->sc_data_direction != DMA_NONE) {
384 if (sc->sc_data_direction != DMA_FROM_DEVICE)
Christoph Hellwigae3d56d2019-01-29 09:33:07 +0100385 out = &sc->sdb.table;
Wanlong Gao682993b2013-03-20 15:44:28 +1030386 if (sc->sc_data_direction != DMA_TO_DEVICE)
Christoph Hellwigae3d56d2019-01-29 09:33:07 +0100387 in = &sc->sdb.table;
Wanlong Gao682993b2013-03-20 15:44:28 +1030388 }
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100389
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100390 /* Request header. */
Wanlong Gao682993b2013-03-20 15:44:28 +1030391 sg_init_one(&req, &cmd->req, req_size);
392 sgs[out_num++] = &req;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100393
394 /* Data-out buffer. */
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800395 if (out) {
396 /* Place WRITE protection SGLs before Data OUT payload */
397 if (scsi_prot_sg_count(sc))
398 sgs[out_num++] = scsi_prot_sglist(sc);
Wanlong Gao682993b2013-03-20 15:44:28 +1030399 sgs[out_num++] = out->sgl;
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800400 }
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100401
402 /* Response header. */
Wanlong Gao682993b2013-03-20 15:44:28 +1030403 sg_init_one(&resp, &cmd->resp, resp_size);
404 sgs[out_num + in_num++] = &resp;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100405
406 /* Data-in buffer */
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800407 if (in) {
408 /* Place READ protection SGLs before Data IN payload */
409 if (scsi_prot_sg_count(sc))
410 sgs[out_num + in_num++] = scsi_prot_sglist(sc);
Wanlong Gao682993b2013-03-20 15:44:28 +1030411 sgs[out_num + in_num++] = in->sgl;
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800412 }
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100413
Rusty Russellc77fba92014-05-21 11:25:04 +0930414 return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100415}
416
Paolo Bonzini9e5470f2019-05-30 13:28:11 +0200417static void virtscsi_kick_vq(struct virtio_scsi_vq *vq)
418{
419 bool needs_kick;
420 unsigned long flags;
421
422 spin_lock_irqsave(&vq->vq_lock, flags);
423 needs_kick = virtqueue_kick_prepare(vq->vq);
424 spin_unlock_irqrestore(&vq->vq_lock, flags);
425
426 if (needs_kick)
427 virtqueue_notify(vq->vq);
428}
429
430/**
431 * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it
432 * @vq : the struct virtqueue we're talking about
433 * @cmd : command structure
434 * @req_size : size of the request buffer
435 * @resp_size : size of the response buffer
436 * @kick : whether to kick the virtqueue immediately
437 */
438static int virtscsi_add_cmd(struct virtio_scsi_vq *vq,
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100439 struct virtio_scsi_cmd *cmd,
Paolo Bonzini9e5470f2019-05-30 13:28:11 +0200440 size_t req_size, size_t resp_size,
441 bool kick)
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100442{
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100443 unsigned long flags;
Rusty Russell4614e512012-10-16 23:56:16 +1030444 int err;
445 bool needs_kick = false;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100446
Wanlong Gao682993b2013-03-20 15:44:28 +1030447 spin_lock_irqsave(&vq->vq_lock, flags);
Paolo Bonzini9e5470f2019-05-30 13:28:11 +0200448 err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
449 if (!err && kick)
Rusty Russell4614e512012-10-16 23:56:16 +1030450 needs_kick = virtqueue_kick_prepare(vq->vq);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100451
Paolo Bonzinibce750b2012-06-13 16:56:33 +0200452 spin_unlock_irqrestore(&vq->vq_lock, flags);
Paolo Bonzini139fe452012-06-13 16:56:32 +0200453
Rusty Russell4614e512012-10-16 23:56:16 +1030454 if (needs_kick)
Paolo Bonzini139fe452012-06-13 16:56:32 +0200455 virtqueue_notify(vq->vq);
Rusty Russell4614e512012-10-16 23:56:16 +1030456 return err;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100457}
458
Michael S. Tsirkind75dff32014-11-23 17:28:57 +0200459static void virtio_scsi_init_hdr(struct virtio_device *vdev,
460 struct virtio_scsi_cmd_req *cmd,
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800461 struct scsi_cmnd *sc)
462{
463 cmd->lun[0] = 1;
464 cmd->lun[1] = sc->device->id;
465 cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
466 cmd->lun[3] = sc->device->lun & 0xff;
Michael S. Tsirkind75dff32014-11-23 17:28:57 +0200467 cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc);
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800468 cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
469 cmd->prio = 0;
470 cmd->crn = 0;
471}
472
Christoph Hellwigc5c25672015-04-27 14:56:14 +0200473#ifdef CONFIG_BLK_DEV_INTEGRITY
Michael S. Tsirkind75dff32014-11-23 17:28:57 +0200474static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
475 struct virtio_scsi_cmd_req_pi *cmd_pi,
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800476 struct scsi_cmnd *sc)
477{
478 struct request *rq = sc->request;
479 struct blk_integrity *bi;
480
Michael S. Tsirkind75dff32014-11-23 17:28:57 +0200481 virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800482
483 if (!rq || !scsi_prot_sg_count(sc))
484 return;
485
486 bi = blk_get_integrity(rq->rq_disk);
487
488 if (sc->sc_data_direction == DMA_TO_DEVICE)
Michael S. Tsirkind75dff32014-11-23 17:28:57 +0200489 cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
Greg Edwardscdcdcaa2018-07-26 15:52:54 -0400490 bio_integrity_bytes(bi,
491 blk_rq_sectors(rq)));
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800492 else if (sc->sc_data_direction == DMA_FROM_DEVICE)
Michael S. Tsirkind75dff32014-11-23 17:28:57 +0200493 cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
Greg Edwardscdcdcaa2018-07-26 15:52:54 -0400494 bio_integrity_bytes(bi,
495 blk_rq_sectors(rq)));
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800496}
Christoph Hellwigc5c25672015-04-27 14:56:14 +0200497#endif
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800498
Ming Leic3506df2018-03-13 17:42:43 +0800499static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
500 struct scsi_cmnd *sc)
501{
502 u32 tag = blk_mq_unique_tag(sc->request);
503 u16 hwq = blk_mq_unique_tag_to_hwq(tag);
504
505 return &vscsi->req_vqs[hwq];
506}
507
508static int virtscsi_queuecommand(struct Scsi_Host *shost,
Paolo Bonzini9141a4c2013-04-08 23:03:25 +0930509 struct scsi_cmnd *sc)
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100510{
Ming Leic3506df2018-03-13 17:42:43 +0800511 struct virtio_scsi *vscsi = shost_priv(shost);
512 struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
Christoph Hellwigb54197c2014-05-01 16:51:50 +0200513 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
Paolo Bonzini9e5470f2019-05-30 13:28:11 +0200514 bool kick;
Eric Farman773c7222017-01-13 12:48:06 -0500515 unsigned long flags;
Linus Torvaldsed9ea4e2014-06-12 22:38:32 -0700516 int req_size;
Eric Farman773c7222017-01-13 12:48:06 -0500517 int ret;
Christoph Hellwigb54197c2014-05-01 16:51:50 +0200518
Paolo Bonzini2bd37f02012-06-13 16:56:34 +0200519 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
520
521 /* TODO: check feature bit and fail if unsupported? */
522 BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
523
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100524 dev_dbg(&sc->device->sdev_gendev,
525 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
526
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100527 cmd->sc = sc;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100528
529 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100530
Christoph Hellwigc5c25672015-04-27 14:56:14 +0200531#ifdef CONFIG_BLK_DEV_INTEGRITY
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800532 if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
Michael S. Tsirkind75dff32014-11-23 17:28:57 +0200533 virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800534 memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
535 req_size = sizeof(cmd->req.cmd_pi);
Christoph Hellwigc5c25672015-04-27 14:56:14 +0200536 } else
537#endif
538 {
Michael S. Tsirkind75dff32014-11-23 17:28:57 +0200539 virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800540 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
541 req_size = sizeof(cmd->req.cmd);
542 }
543
Paolo Bonzini9e5470f2019-05-30 13:28:11 +0200544 kick = (sc->flags & SCMD_LAST) != 0;
545 ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick);
Eric Farman773c7222017-01-13 12:48:06 -0500546 if (ret == -EIO) {
547 cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
548 spin_lock_irqsave(&req_vq->vq_lock, flags);
549 virtscsi_complete_cmd(vscsi, cmd);
550 spin_unlock_irqrestore(&req_vq->vq_lock, flags);
551 } else if (ret != 0) {
Christoph Hellwigb54197c2014-05-01 16:51:50 +0200552 return SCSI_MLQUEUE_HOST_BUSY;
Eric Farman773c7222017-01-13 12:48:06 -0500553 }
Christoph Hellwigb54197c2014-05-01 16:51:50 +0200554 return 0;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100555}
556
557static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
558{
559 DECLARE_COMPLETION_ONSTACK(comp);
Paolo Bonzinie4594bb2012-05-04 12:32:04 +0200560 int ret = FAILED;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100561
562 cmd->comp = &comp;
Paolo Bonzini9e5470f2019-05-30 13:28:11 +0200563 if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd,
564 sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0)
Paolo Bonzinie4594bb2012-05-04 12:32:04 +0200565 goto out;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100566
567 wait_for_completion(&comp);
Paolo Bonzinie4594bb2012-05-04 12:32:04 +0200568 if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
569 cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
570 ret = SUCCESS;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100571
Paolo Bonzini8faeb522014-06-04 13:34:58 +0200572 /*
573 * The spec guarantees that all requests related to the TMF have
574 * been completed, but the callback might not have run yet if
575 * we're using independent interrupts (e.g. MSI). Poll the
576 * virtqueues once.
577 *
578 * In the abort case, sc->scsi_done will do nothing, because
579 * the block layer must have detected a timeout and as a result
580 * REQ_ATOM_COMPLETE has been set.
581 */
582 virtscsi_poll_requests(vscsi);
583
Paolo Bonzinie4594bb2012-05-04 12:32:04 +0200584out:
585 mempool_free(cmd, virtscsi_cmd_pool);
586 return ret;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100587}
588
589static int virtscsi_device_reset(struct scsi_cmnd *sc)
590{
591 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
592 struct virtio_scsi_cmd *cmd;
593
594 sdev_printk(KERN_INFO, sc->device, "device reset\n");
595 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
596 if (!cmd)
597 return FAILED;
598
599 memset(cmd, 0, sizeof(*cmd));
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100600 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
601 .type = VIRTIO_SCSI_T_TMF,
Michael S. Tsirkind75dff32014-11-23 17:28:57 +0200602 .subtype = cpu_to_virtio32(vscsi->vdev,
603 VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET),
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100604 .lun[0] = 1,
605 .lun[1] = sc->device->id,
606 .lun[2] = (sc->device->lun >> 8) | 0x40,
607 .lun[3] = sc->device->lun & 0xff,
608 };
609 return virtscsi_tmf(vscsi, cmd);
610}
611
David Gibson25d1d502017-04-13 12:13:00 +1000612static int virtscsi_device_alloc(struct scsi_device *sdevice)
613{
614 /*
615 * Passed through SCSI targets (e.g. with qemu's 'scsi-block')
616 * may have transfer limits which come from the host SCSI
617 * controller or something on the host side other than the
618 * target itself.
619 *
620 * To make this work properly, the hypervisor can adjust the
621 * target's VPD information to advertise these limits. But
622 * for that to work, the guest has to look at the VPD pages,
623 * which we won't do by default if it is an SPC-2 device, even
624 * if it does actually support it.
625 *
626 * So, set the blist to always try to read the VPD pages.
627 */
628 sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES;
629
630 return 0;
631}
632
633
Venkatesh Srinivas761f1192014-07-06 16:39:27 +0200634/**
635 * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
636 * @sdev: Virtscsi target whose queue depth to change
637 * @qdepth: New queue depth
Venkatesh Srinivas761f1192014-07-06 16:39:27 +0200638 */
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +0100639static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
Venkatesh Srinivas761f1192014-07-06 16:39:27 +0200640{
641 struct Scsi_Host *shost = sdev->host;
642 int max_depth = shost->cmd_per_lun;
643
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +0100644 return scsi_change_queue_depth(sdev, min(max_depth, qdepth));
Venkatesh Srinivas761f1192014-07-06 16:39:27 +0200645}
646
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100647static int virtscsi_abort(struct scsi_cmnd *sc)
648{
649 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
650 struct virtio_scsi_cmd *cmd;
651
652 scmd_printk(KERN_INFO, sc, "abort\n");
653 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
654 if (!cmd)
655 return FAILED;
656
657 memset(cmd, 0, sizeof(*cmd));
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100658 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
659 .type = VIRTIO_SCSI_T_TMF,
660 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
661 .lun[0] = 1,
662 .lun[1] = sc->device->id,
663 .lun[2] = (sc->device->lun >> 8) | 0x40,
664 .lun[3] = sc->device->lun & 0xff,
Michael S. Tsirkind75dff32014-11-23 17:28:57 +0200665 .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc),
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100666 };
667 return virtscsi_tmf(vscsi, cmd);
668}
669
Christoph Hellwig0d9f0a52017-02-05 18:15:26 +0100670static int virtscsi_map_queues(struct Scsi_Host *shost)
671{
672 struct virtio_scsi *vscsi = shost_priv(shost);
Dongli Zhang6343e3e2019-03-12 09:00:29 +0800673 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
Christoph Hellwig0d9f0a52017-02-05 18:15:26 +0100674
Jens Axboeed76e322018-10-29 13:06:14 -0600675 return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
Christoph Hellwig0d9f0a52017-02-05 18:15:26 +0100676}
677
Paolo Bonzini9e5470f2019-05-30 13:28:11 +0200678static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
679{
680 struct virtio_scsi *vscsi = shost_priv(shost);
681
682 virtscsi_kick_vq(&vscsi->req_vqs[hwq]);
683}
684
Paolo Bonzinie72c9a22017-06-21 16:35:46 +0200685/*
686 * The host guarantees to respond to each command, although I/O
687 * latencies might be higher than on bare metal. Reset the timer
688 * unconditionally to give the host a chance to perform EH.
689 */
690static enum blk_eh_timer_return virtscsi_eh_timed_out(struct scsi_cmnd *scmnd)
691{
692 return BLK_EH_RESET_TIMER;
693}
694
Ming Leic3506df2018-03-13 17:42:43 +0800695static struct scsi_host_template virtscsi_host_template = {
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100696 .module = THIS_MODULE,
697 .name = "Virtio SCSI HBA",
698 .proc_name = "virtio_scsi",
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100699 .this_id = -1,
Christoph Hellwigb54197c2014-05-01 16:51:50 +0200700 .cmd_size = sizeof(struct virtio_scsi_cmd),
Ming Leic3506df2018-03-13 17:42:43 +0800701 .queuecommand = virtscsi_queuecommand,
Paolo Bonzini9e5470f2019-05-30 13:28:11 +0200702 .commit_rqs = virtscsi_commit_rqs,
Venkatesh Srinivas761f1192014-07-06 16:39:27 +0200703 .change_queue_depth = virtscsi_change_queue_depth,
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100704 .eh_abort_handler = virtscsi_abort,
705 .eh_device_reset_handler = virtscsi_device_reset,
Paolo Bonzinie72c9a22017-06-21 16:35:46 +0200706 .eh_timed_out = virtscsi_eh_timed_out,
Paolo Bonzinia680f1d2017-07-05 10:30:56 +0200707 .slave_alloc = virtscsi_device_alloc,
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100708
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100709 .dma_boundary = UINT_MAX,
Christoph Hellwig0d9f0a52017-02-05 18:15:26 +0100710 .map_queues = virtscsi_map_queues,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +0100711 .track_queue_depth = 1,
Ming Leib5b6e8c2018-03-13 17:42:42 +0800712 .force_blk_mq = 1,
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100713};
714
715#define virtscsi_config_get(vdev, fld) \
716 ({ \
717 typeof(((struct virtio_scsi_config *)0)->fld) __val; \
Rusty Russell855e0c52013-10-14 18:11:51 +1030718 virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100719 __val; \
720 })
721
722#define virtscsi_config_set(vdev, fld, val) \
Rusty Russell855e0c52013-10-14 18:11:51 +1030723 do { \
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100724 typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
Rusty Russell855e0c52013-10-14 18:11:51 +1030725 virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
726 } while(0)
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100727
Paolo Bonzini139fe452012-06-13 16:56:32 +0200728static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
729 struct virtqueue *vq)
730{
731 spin_lock_init(&virtscsi_vq->vq_lock);
732 virtscsi_vq->vq = vq;
733}
734
Paolo Bonzini2bd37f02012-06-13 16:56:34 +0200735static void virtscsi_remove_vqs(struct virtio_device *vdev)
736{
Paolo Bonzini2bd37f02012-06-13 16:56:34 +0200737 /* Stop all the virtqueues. */
738 vdev->config->reset(vdev);
Paolo Bonzini2bd37f02012-06-13 16:56:34 +0200739 vdev->config->del_vqs(vdev);
740}
741
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100742static int virtscsi_init(struct virtio_device *vdev,
Wanlong Gao5c370192013-04-08 23:01:16 +0930743 struct virtio_scsi *vscsi)
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100744{
745 int err;
Paolo Bonzini9141a4c2013-04-08 23:03:25 +0930746 u32 i;
747 u32 num_vqs;
748 vq_callback_t **callbacks;
749 const char **names;
750 struct virtqueue **vqs;
Christoph Hellwig0d9f0a52017-02-05 18:15:26 +0100751 struct irq_affinity desc = { .pre_vectors = 2 };
Paolo Bonzini2bd37f02012-06-13 16:56:34 +0200752
Paolo Bonzini9141a4c2013-04-08 23:03:25 +0930753 num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
Kees Cook6da2ec52018-06-12 13:55:00 -0700754 vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL);
755 callbacks = kmalloc_array(num_vqs, sizeof(vq_callback_t *),
756 GFP_KERNEL);
757 names = kmalloc_array(num_vqs, sizeof(char *), GFP_KERNEL);
Paolo Bonzini9141a4c2013-04-08 23:03:25 +0930758
759 if (!callbacks || !vqs || !names) {
760 err = -ENOMEM;
761 goto out;
762 }
763
764 callbacks[0] = virtscsi_ctrl_done;
765 callbacks[1] = virtscsi_event_done;
766 names[0] = "control";
767 names[1] = "event";
768 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
769 callbacks[i] = virtscsi_req_done;
770 names[i] = "request";
771 }
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100772
773 /* Discover virtqueues and write information to configuration. */
Michael S. Tsirkin9b2bbdb2017-03-06 18:19:39 +0200774 err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100775 if (err)
Paolo Bonzini9141a4c2013-04-08 23:03:25 +0930776 goto out;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100777
Paolo Bonzini139fe452012-06-13 16:56:32 +0200778 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
779 virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
Paolo Bonzini9141a4c2013-04-08 23:03:25 +0930780 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
781 virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
782 vqs[i]);
783
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100784 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
785 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
Paolo Bonzini2bd37f02012-06-13 16:56:34 +0200786
Paolo Bonzini9141a4c2013-04-08 23:03:25 +0930787 err = 0;
788
789out:
790 kfree(names);
791 kfree(callbacks);
792 kfree(vqs);
793 if (err)
794 virtscsi_remove_vqs(vdev);
Paolo Bonzini2bd37f02012-06-13 16:56:34 +0200795 return err;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100796}
797
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800798static int virtscsi_probe(struct virtio_device *vdev)
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100799{
800 struct Scsi_Host *shost;
801 struct virtio_scsi *vscsi;
Stephen Rothwell908a5542015-06-30 10:59:04 +1000802 int err;
Paolo Bonzini2bd37f02012-06-13 16:56:34 +0200803 u32 sg_elems, num_targets;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100804 u32 cmd_per_lun;
Paolo Bonzini9141a4c2013-04-08 23:03:25 +0930805 u32 num_queues;
Paolo Bonzini9141a4c2013-04-08 23:03:25 +0930806
Michael S. Tsirkin8cab3cd2015-01-12 16:23:37 +0200807 if (!vdev->config->get) {
808 dev_err(&vdev->dev, "%s failure: config access disabled\n",
809 __func__);
810 return -EINVAL;
811 }
812
Paolo Bonzini9141a4c2013-04-08 23:03:25 +0930813 /* We need to know how many queues before we allocate. */
814 num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
Dongli Zhang1978f302019-03-27 18:36:35 +0800815 num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100816
Paolo Bonzini2bd37f02012-06-13 16:56:34 +0200817 num_targets = virtscsi_config_get(vdev, max_target) + 1;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100818
Ming Leic3506df2018-03-13 17:42:43 +0800819 shost = scsi_host_alloc(&virtscsi_host_template,
Gustavo A. R. Silva211f70ff2019-06-19 14:28:33 -0500820 struct_size(vscsi, req_vqs, num_queues));
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100821 if (!shost)
822 return -ENOMEM;
823
Paolo Bonzini2bd37f02012-06-13 16:56:34 +0200824 sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100825 shost->sg_tablesize = sg_elems;
826 vscsi = shost_priv(shost);
827 vscsi->vdev = vdev;
Paolo Bonzini9141a4c2013-04-08 23:03:25 +0930828 vscsi->num_queues = num_queues;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100829 vdev->priv = shost;
830
Wanlong Gao5c370192013-04-08 23:01:16 +0930831 err = virtscsi_init(vdev, vscsi);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100832 if (err)
833 goto virtscsi_init_failed;
834
Richard W.M. Jones582b0ab2017-08-10 17:56:52 +0100835 shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq);
836
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100837 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
838 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
839 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
Paolo Bonzini9da5f5a2012-10-02 17:25:47 +0200840
841 /* LUNs > 256 are reported with format 1, so they go in the range
842 * 16640-32767.
843 */
844 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
Paolo Bonzini2bd37f02012-06-13 16:56:34 +0200845 shost->max_id = num_targets;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100846 shost->max_channel = 0;
847 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
Ming Leiccbedf12014-11-15 11:47:14 +0800848 shost->nr_hw_queues = num_queues;
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800849
Christoph Hellwigc5c25672015-04-27 14:56:14 +0200850#ifdef CONFIG_BLK_DEV_INTEGRITY
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800851 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
Stephen Rothwell908a5542015-06-30 10:59:04 +1000852 int host_prot;
853
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800854 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
855 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
856 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
857
858 scsi_host_set_prot(shost, host_prot);
859 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
860 }
Christoph Hellwigc5c25672015-04-27 14:56:14 +0200861#endif
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800862
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100863 err = scsi_add_host(shost, &vdev->dev);
864 if (err)
865 goto scsi_add_host_failed;
Michael S. Tsirkin5d8f16d2014-10-15 10:22:33 +1030866
867 virtio_device_ready(vdev);
868
869 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
870 virtscsi_kick_event_all(vscsi);
871
872 scsi_scan_host(shost);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100873 return 0;
874
875scsi_add_host_failed:
876 vdev->config->del_vqs(vdev);
877virtscsi_init_failed:
878 scsi_host_put(shost);
879 return err;
880}
881
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800882static void virtscsi_remove(struct virtio_device *vdev)
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100883{
884 struct Scsi_Host *shost = virtio_scsi_host(vdev);
Cong Meng365a7152012-07-05 17:06:43 +0800885 struct virtio_scsi *vscsi = shost_priv(shost);
886
887 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
888 virtscsi_cancel_event_work(vscsi);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100889
890 scsi_remove_host(shost);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100891 virtscsi_remove_vqs(vdev);
892 scsi_host_put(shost);
893}
894
Aaron Lu89107002013-09-17 09:25:23 +0930895#ifdef CONFIG_PM_SLEEP
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100896static int virtscsi_freeze(struct virtio_device *vdev)
897{
898 virtscsi_remove_vqs(vdev);
899 return 0;
900}
901
902static int virtscsi_restore(struct virtio_device *vdev)
903{
904 struct Scsi_Host *sh = virtio_scsi_host(vdev);
905 struct virtio_scsi *vscsi = shost_priv(sh);
Asias Hef466f752014-01-16 10:18:48 +1030906 int err;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100907
Asias Hef466f752014-01-16 10:18:48 +1030908 err = virtscsi_init(vdev, vscsi);
909 if (err)
910 return err;
911
Michael S. Tsirkin52c9cf12014-10-15 10:22:32 +1030912 virtio_device_ready(vdev);
913
Michael S. Tsirkincd679042014-10-15 10:22:31 +1030914 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
915 virtscsi_kick_event_all(vscsi);
Asias Hef466f752014-01-16 10:18:48 +1030916
917 return err;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100918}
919#endif
920
921static struct virtio_device_id id_table[] = {
922 { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
923 { 0 },
924};
925
Cong Meng365a7152012-07-05 17:06:43 +0800926static unsigned int features[] = {
Paolo Bonzini865b58c2012-10-02 17:25:48 +0200927 VIRTIO_SCSI_F_HOTPLUG,
928 VIRTIO_SCSI_F_CHANGE,
Christoph Hellwigc5c25672015-04-27 14:56:14 +0200929#ifdef CONFIG_BLK_DEV_INTEGRITY
Nicholas Bellingere6dc783a2014-02-22 18:23:33 -0800930 VIRTIO_SCSI_F_T10_PI,
Christoph Hellwigc5c25672015-04-27 14:56:14 +0200931#endif
Cong Meng365a7152012-07-05 17:06:43 +0800932};
933
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100934static struct virtio_driver virtio_scsi_driver = {
Cong Meng365a7152012-07-05 17:06:43 +0800935 .feature_table = features,
936 .feature_table_size = ARRAY_SIZE(features),
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100937 .driver.name = KBUILD_MODNAME,
938 .driver.owner = THIS_MODULE,
939 .id_table = id_table,
940 .probe = virtscsi_probe,
Aaron Lu89107002013-09-17 09:25:23 +0930941#ifdef CONFIG_PM_SLEEP
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100942 .freeze = virtscsi_freeze,
943 .restore = virtscsi_restore,
944#endif
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800945 .remove = virtscsi_remove,
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100946};
947
948static int __init init(void)
949{
950 int ret = -ENOMEM;
951
952 virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
953 if (!virtscsi_cmd_cache) {
Wanlong Gaoba06d1e2013-03-12 15:34:40 +1030954 pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100955 goto error;
956 }
957
958
959 virtscsi_cmd_pool =
960 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
961 virtscsi_cmd_cache);
962 if (!virtscsi_cmd_pool) {
Wanlong Gaoba06d1e2013-03-12 15:34:40 +1030963 pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100964 goto error;
965 }
966 ret = register_virtio_driver(&virtio_scsi_driver);
967 if (ret < 0)
968 goto error;
969
970 return 0;
971
972error:
973 if (virtscsi_cmd_pool) {
974 mempool_destroy(virtscsi_cmd_pool);
975 virtscsi_cmd_pool = NULL;
976 }
977 if (virtscsi_cmd_cache) {
978 kmem_cache_destroy(virtscsi_cmd_cache);
979 virtscsi_cmd_cache = NULL;
980 }
981 return ret;
982}
983
984static void __exit fini(void)
985{
986 unregister_virtio_driver(&virtio_scsi_driver);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100987 mempool_destroy(virtscsi_cmd_pool);
988 kmem_cache_destroy(virtscsi_cmd_cache);
989}
990module_init(init);
991module_exit(fini);
992
993MODULE_DEVICE_TABLE(virtio, id_table);
994MODULE_DESCRIPTION("Virtio SCSI HBA driver");
995MODULE_LICENSE("GPL");