blob: facfc90ef0058e0d0e3241fe7de55c4ee8111d69 [file] [log] [blame]
Paolo Bonzini4fe74b12012-02-05 12:16:00 +01001/*
2 * Virtio SCSI HBA driver
3 *
4 * Copyright IBM Corp. 2010
5 * Copyright Red Hat, Inc. 2011
6 *
7 * Authors:
8 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/mempool.h>
19#include <linux/virtio.h>
20#include <linux/virtio_ids.h>
21#include <linux/virtio_config.h>
22#include <linux/virtio_scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_device.h>
25#include <scsi/scsi_cmnd.h>
26
27#define VIRTIO_SCSI_MEMPOOL_SZ 64
28
29/* Command queue element */
30struct virtio_scsi_cmd {
31 struct scsi_cmnd *sc;
32 struct completion *comp;
33 union {
34 struct virtio_scsi_cmd_req cmd;
35 struct virtio_scsi_ctrl_tmf_req tmf;
36 struct virtio_scsi_ctrl_an_req an;
37 } req;
38 union {
39 struct virtio_scsi_cmd_resp cmd;
40 struct virtio_scsi_ctrl_tmf_resp tmf;
41 struct virtio_scsi_ctrl_an_resp an;
42 struct virtio_scsi_event evt;
43 } resp;
44} ____cacheline_aligned_in_smp;
45
Paolo Bonzini139fe452012-06-13 16:56:32 +020046struct virtio_scsi_vq {
47 /* Protects vq */
Paolo Bonzini4fe74b12012-02-05 12:16:00 +010048 spinlock_t vq_lock;
49
Paolo Bonzini139fe452012-06-13 16:56:32 +020050 struct virtqueue *vq;
51};
52
53/* Driver instance state */
54struct virtio_scsi {
55 /* Protects sg[]. The lock hierarchy is sg_lock -> vq_lock. */
56 spinlock_t sg_lock;
57
Paolo Bonzini4fe74b12012-02-05 12:16:00 +010058 struct virtio_device *vdev;
Paolo Bonzini139fe452012-06-13 16:56:32 +020059 struct virtio_scsi_vq ctrl_vq;
60 struct virtio_scsi_vq event_vq;
61 struct virtio_scsi_vq req_vq;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +010062
63 /* For sglist construction when adding commands to the virtqueue. */
64 struct scatterlist sg[];
65};
66
67static struct kmem_cache *virtscsi_cmd_cache;
68static mempool_t *virtscsi_cmd_pool;
69
70static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
71{
72 return vdev->priv;
73}
74
75static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
76{
77 if (!resid)
78 return;
79
80 if (!scsi_bidi_cmnd(sc)) {
81 scsi_set_resid(sc, resid);
82 return;
83 }
84
85 scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
86 scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
87}
88
89/**
90 * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
91 *
92 * Called with vq_lock held.
93 */
94static void virtscsi_complete_cmd(void *buf)
95{
96 struct virtio_scsi_cmd *cmd = buf;
97 struct scsi_cmnd *sc = cmd->sc;
98 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
99
100 dev_dbg(&sc->device->sdev_gendev,
101 "cmd %p response %u status %#02x sense_len %u\n",
102 sc, resp->response, resp->status, resp->sense_len);
103
104 sc->result = resp->status;
105 virtscsi_compute_resid(sc, resp->resid);
106 switch (resp->response) {
107 case VIRTIO_SCSI_S_OK:
108 set_host_byte(sc, DID_OK);
109 break;
110 case VIRTIO_SCSI_S_OVERRUN:
111 set_host_byte(sc, DID_ERROR);
112 break;
113 case VIRTIO_SCSI_S_ABORTED:
114 set_host_byte(sc, DID_ABORT);
115 break;
116 case VIRTIO_SCSI_S_BAD_TARGET:
117 set_host_byte(sc, DID_BAD_TARGET);
118 break;
119 case VIRTIO_SCSI_S_RESET:
120 set_host_byte(sc, DID_RESET);
121 break;
122 case VIRTIO_SCSI_S_BUSY:
123 set_host_byte(sc, DID_BUS_BUSY);
124 break;
125 case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
126 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
127 break;
128 case VIRTIO_SCSI_S_TARGET_FAILURE:
129 set_host_byte(sc, DID_TARGET_FAILURE);
130 break;
131 case VIRTIO_SCSI_S_NEXUS_FAILURE:
132 set_host_byte(sc, DID_NEXUS_FAILURE);
133 break;
134 default:
135 scmd_printk(KERN_WARNING, sc, "Unknown response %d",
136 resp->response);
137 /* fall through */
138 case VIRTIO_SCSI_S_FAILURE:
139 set_host_byte(sc, DID_ERROR);
140 break;
141 }
142
143 WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
144 if (sc->sense_buffer) {
145 memcpy(sc->sense_buffer, resp->sense,
146 min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
147 if (resp->sense_len)
148 set_driver_byte(sc, DRIVER_SENSE);
149 }
150
151 mempool_free(cmd, virtscsi_cmd_pool);
152 sc->scsi_done(sc);
153}
154
155static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf))
156{
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100157 void *buf;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100158 unsigned int len;
159
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100160 do {
161 virtqueue_disable_cb(vq);
162 while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
163 fn(buf);
164 } while (!virtqueue_enable_cb(vq));
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100165}
166
167static void virtscsi_req_done(struct virtqueue *vq)
168{
Paolo Bonzini139fe452012-06-13 16:56:32 +0200169 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
170 struct virtio_scsi *vscsi = shost_priv(sh);
171 unsigned long flags;
172
173 spin_lock_irqsave(&vscsi->req_vq.vq_lock, flags);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100174 virtscsi_vq_done(vq, virtscsi_complete_cmd);
Paolo Bonzini139fe452012-06-13 16:56:32 +0200175 spin_unlock_irqrestore(&vscsi->req_vq.vq_lock, flags);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100176};
177
178static void virtscsi_complete_free(void *buf)
179{
180 struct virtio_scsi_cmd *cmd = buf;
181
182 if (cmd->comp)
183 complete_all(cmd->comp);
Paolo Bonzinie4594bb2012-05-04 12:32:04 +0200184 else
185 mempool_free(cmd, virtscsi_cmd_pool);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100186}
187
188static void virtscsi_ctrl_done(struct virtqueue *vq)
189{
Paolo Bonzini139fe452012-06-13 16:56:32 +0200190 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
191 struct virtio_scsi *vscsi = shost_priv(sh);
192 unsigned long flags;
193
194 spin_lock_irqsave(&vscsi->ctrl_vq.vq_lock, flags);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100195 virtscsi_vq_done(vq, virtscsi_complete_free);
Paolo Bonzini139fe452012-06-13 16:56:32 +0200196 spin_unlock_irqrestore(&vscsi->ctrl_vq.vq_lock, flags);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100197};
198
199static void virtscsi_event_done(struct virtqueue *vq)
200{
Paolo Bonzini139fe452012-06-13 16:56:32 +0200201 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
202 struct virtio_scsi *vscsi = shost_priv(sh);
203 unsigned long flags;
204
205 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100206 virtscsi_vq_done(vq, virtscsi_complete_free);
Paolo Bonzini139fe452012-06-13 16:56:32 +0200207 spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100208};
209
210static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx,
211 struct scsi_data_buffer *sdb)
212{
213 struct sg_table *table = &sdb->table;
214 struct scatterlist *sg_elem;
215 unsigned int idx = *p_idx;
216 int i;
217
218 for_each_sg(table->sgl, sg_elem, table->nents, i)
219 sg_set_buf(&sg[idx++], sg_virt(sg_elem), sg_elem->length);
220
221 *p_idx = idx;
222}
223
224/**
225 * virtscsi_map_cmd - map a scsi_cmd to a virtqueue scatterlist
226 * @vscsi : virtio_scsi state
227 * @cmd : command structure
228 * @out_num : number of read-only elements
229 * @in_num : number of write-only elements
230 * @req_size : size of the request buffer
231 * @resp_size : size of the response buffer
232 *
233 * Called with vq_lock held.
234 */
235static void virtscsi_map_cmd(struct virtio_scsi *vscsi,
236 struct virtio_scsi_cmd *cmd,
237 unsigned *out_num, unsigned *in_num,
238 size_t req_size, size_t resp_size)
239{
240 struct scsi_cmnd *sc = cmd->sc;
241 struct scatterlist *sg = vscsi->sg;
242 unsigned int idx = 0;
243
244 if (sc) {
245 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
246 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
247
248 /* TODO: check feature bit and fail if unsupported? */
249 BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
250 }
251
252 /* Request header. */
253 sg_set_buf(&sg[idx++], &cmd->req, req_size);
254
255 /* Data-out buffer. */
256 if (sc && sc->sc_data_direction != DMA_FROM_DEVICE)
257 virtscsi_map_sgl(sg, &idx, scsi_out(sc));
258
259 *out_num = idx;
260
261 /* Response header. */
262 sg_set_buf(&sg[idx++], &cmd->resp, resp_size);
263
264 /* Data-in buffer */
265 if (sc && sc->sc_data_direction != DMA_TO_DEVICE)
266 virtscsi_map_sgl(sg, &idx, scsi_in(sc));
267
268 *in_num = idx - *out_num;
269}
270
Paolo Bonzini139fe452012-06-13 16:56:32 +0200271static int virtscsi_kick_cmd(struct virtio_scsi *vscsi, struct virtio_scsi_vq *vq,
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100272 struct virtio_scsi_cmd *cmd,
273 size_t req_size, size_t resp_size, gfp_t gfp)
274{
275 unsigned int out_num, in_num;
276 unsigned long flags;
277 int ret;
278
Paolo Bonzini139fe452012-06-13 16:56:32 +0200279 spin_lock_irqsave(&vscsi->sg_lock, flags);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100280 virtscsi_map_cmd(vscsi, cmd, &out_num, &in_num, req_size, resp_size);
281
Paolo Bonzini139fe452012-06-13 16:56:32 +0200282 spin_lock(&vq->vq_lock);
283 ret = virtqueue_add_buf(vq->vq, vscsi->sg, out_num, in_num, cmd, gfp);
Paolo Bonzinibce750b2012-06-13 16:56:33 +0200284 spin_unlock(&vscsi->sg_lock);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100285 if (ret >= 0)
Paolo Bonzini139fe452012-06-13 16:56:32 +0200286 ret = virtqueue_kick_prepare(vq->vq);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100287
Paolo Bonzinibce750b2012-06-13 16:56:33 +0200288 spin_unlock_irqrestore(&vq->vq_lock, flags);
Paolo Bonzini139fe452012-06-13 16:56:32 +0200289
Paolo Bonzinib5ee8f22012-06-13 16:56:31 +0200290 if (ret > 0)
Paolo Bonzini139fe452012-06-13 16:56:32 +0200291 virtqueue_notify(vq->vq);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100292 return ret;
293}
294
295static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
296{
297 struct virtio_scsi *vscsi = shost_priv(sh);
298 struct virtio_scsi_cmd *cmd;
299 int ret;
300
301 dev_dbg(&sc->device->sdev_gendev,
302 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
303
304 ret = SCSI_MLQUEUE_HOST_BUSY;
305 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC);
306 if (!cmd)
307 goto out;
308
309 memset(cmd, 0, sizeof(*cmd));
310 cmd->sc = sc;
311 cmd->req.cmd = (struct virtio_scsi_cmd_req){
312 .lun[0] = 1,
313 .lun[1] = sc->device->id,
314 .lun[2] = (sc->device->lun >> 8) | 0x40,
315 .lun[3] = sc->device->lun & 0xff,
316 .tag = (unsigned long)sc,
317 .task_attr = VIRTIO_SCSI_S_SIMPLE,
318 .prio = 0,
319 .crn = 0,
320 };
321
322 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
323 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
324
Paolo Bonzini139fe452012-06-13 16:56:32 +0200325 if (virtscsi_kick_cmd(vscsi, &vscsi->req_vq, cmd,
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100326 sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
327 GFP_ATOMIC) >= 0)
328 ret = 0;
329
330out:
331 return ret;
332}
333
334static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
335{
336 DECLARE_COMPLETION_ONSTACK(comp);
Paolo Bonzinie4594bb2012-05-04 12:32:04 +0200337 int ret = FAILED;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100338
339 cmd->comp = &comp;
Paolo Bonzini139fe452012-06-13 16:56:32 +0200340 if (virtscsi_kick_cmd(vscsi, &vscsi->ctrl_vq, cmd,
Paolo Bonzinie4594bb2012-05-04 12:32:04 +0200341 sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
342 GFP_NOIO) < 0)
343 goto out;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100344
345 wait_for_completion(&comp);
Paolo Bonzinie4594bb2012-05-04 12:32:04 +0200346 if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
347 cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
348 ret = SUCCESS;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100349
Paolo Bonzinie4594bb2012-05-04 12:32:04 +0200350out:
351 mempool_free(cmd, virtscsi_cmd_pool);
352 return ret;
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100353}
354
355static int virtscsi_device_reset(struct scsi_cmnd *sc)
356{
357 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
358 struct virtio_scsi_cmd *cmd;
359
360 sdev_printk(KERN_INFO, sc->device, "device reset\n");
361 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
362 if (!cmd)
363 return FAILED;
364
365 memset(cmd, 0, sizeof(*cmd));
366 cmd->sc = sc;
367 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
368 .type = VIRTIO_SCSI_T_TMF,
369 .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
370 .lun[0] = 1,
371 .lun[1] = sc->device->id,
372 .lun[2] = (sc->device->lun >> 8) | 0x40,
373 .lun[3] = sc->device->lun & 0xff,
374 };
375 return virtscsi_tmf(vscsi, cmd);
376}
377
378static int virtscsi_abort(struct scsi_cmnd *sc)
379{
380 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
381 struct virtio_scsi_cmd *cmd;
382
383 scmd_printk(KERN_INFO, sc, "abort\n");
384 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
385 if (!cmd)
386 return FAILED;
387
388 memset(cmd, 0, sizeof(*cmd));
389 cmd->sc = sc;
390 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
391 .type = VIRTIO_SCSI_T_TMF,
392 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
393 .lun[0] = 1,
394 .lun[1] = sc->device->id,
395 .lun[2] = (sc->device->lun >> 8) | 0x40,
396 .lun[3] = sc->device->lun & 0xff,
397 .tag = (unsigned long)sc,
398 };
399 return virtscsi_tmf(vscsi, cmd);
400}
401
402static struct scsi_host_template virtscsi_host_template = {
403 .module = THIS_MODULE,
404 .name = "Virtio SCSI HBA",
405 .proc_name = "virtio_scsi",
406 .queuecommand = virtscsi_queuecommand,
407 .this_id = -1,
408 .eh_abort_handler = virtscsi_abort,
409 .eh_device_reset_handler = virtscsi_device_reset,
410
411 .can_queue = 1024,
412 .dma_boundary = UINT_MAX,
413 .use_clustering = ENABLE_CLUSTERING,
414};
415
416#define virtscsi_config_get(vdev, fld) \
417 ({ \
418 typeof(((struct virtio_scsi_config *)0)->fld) __val; \
419 vdev->config->get(vdev, \
420 offsetof(struct virtio_scsi_config, fld), \
421 &__val, sizeof(__val)); \
422 __val; \
423 })
424
425#define virtscsi_config_set(vdev, fld, val) \
426 (void)({ \
427 typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
428 vdev->config->set(vdev, \
429 offsetof(struct virtio_scsi_config, fld), \
430 &__val, sizeof(__val)); \
431 })
432
Paolo Bonzini139fe452012-06-13 16:56:32 +0200433static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
434 struct virtqueue *vq)
435{
436 spin_lock_init(&virtscsi_vq->vq_lock);
437 virtscsi_vq->vq = vq;
438}
439
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100440static int virtscsi_init(struct virtio_device *vdev,
441 struct virtio_scsi *vscsi)
442{
443 int err;
444 struct virtqueue *vqs[3];
445 vq_callback_t *callbacks[] = {
446 virtscsi_ctrl_done,
447 virtscsi_event_done,
448 virtscsi_req_done
449 };
450 const char *names[] = {
451 "control",
452 "event",
453 "request"
454 };
455
456 /* Discover virtqueues and write information to configuration. */
457 err = vdev->config->find_vqs(vdev, 3, vqs, callbacks, names);
458 if (err)
459 return err;
460
Paolo Bonzini139fe452012-06-13 16:56:32 +0200461 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
462 virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
463 virtscsi_init_vq(&vscsi->req_vq, vqs[2]);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100464
465 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
466 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
467 return 0;
468}
469
470static int __devinit virtscsi_probe(struct virtio_device *vdev)
471{
472 struct Scsi_Host *shost;
473 struct virtio_scsi *vscsi;
474 int err;
475 u32 sg_elems;
476 u32 cmd_per_lun;
477
478 /* We need to know how many segments before we allocate.
479 * We need an extra sg elements at head and tail.
480 */
481 sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
482
483 /* Allocate memory and link the structs together. */
484 shost = scsi_host_alloc(&virtscsi_host_template,
485 sizeof(*vscsi) + sizeof(vscsi->sg[0]) * (sg_elems + 2));
486
487 if (!shost)
488 return -ENOMEM;
489
490 shost->sg_tablesize = sg_elems;
491 vscsi = shost_priv(shost);
492 vscsi->vdev = vdev;
493 vdev->priv = shost;
494
495 /* Random initializations. */
Paolo Bonzini139fe452012-06-13 16:56:32 +0200496 spin_lock_init(&vscsi->sg_lock);
Paolo Bonzini4fe74b12012-02-05 12:16:00 +0100497 sg_init_table(vscsi->sg, sg_elems + 2);
498
499 err = virtscsi_init(vdev, vscsi);
500 if (err)
501 goto virtscsi_init_failed;
502
503 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
504 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
505 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
506 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1;
507 shost->max_id = virtscsi_config_get(vdev, max_target) + 1;
508 shost->max_channel = 0;
509 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
510 err = scsi_add_host(shost, &vdev->dev);
511 if (err)
512 goto scsi_add_host_failed;
513
514 scsi_scan_host(shost);
515
516 return 0;
517
518scsi_add_host_failed:
519 vdev->config->del_vqs(vdev);
520virtscsi_init_failed:
521 scsi_host_put(shost);
522 return err;
523}
524
525static void virtscsi_remove_vqs(struct virtio_device *vdev)
526{
527 /* Stop all the virtqueues. */
528 vdev->config->reset(vdev);
529
530 vdev->config->del_vqs(vdev);
531}
532
533static void __devexit virtscsi_remove(struct virtio_device *vdev)
534{
535 struct Scsi_Host *shost = virtio_scsi_host(vdev);
536
537 scsi_remove_host(shost);
538
539 virtscsi_remove_vqs(vdev);
540 scsi_host_put(shost);
541}
542
543#ifdef CONFIG_PM
544static int virtscsi_freeze(struct virtio_device *vdev)
545{
546 virtscsi_remove_vqs(vdev);
547 return 0;
548}
549
550static int virtscsi_restore(struct virtio_device *vdev)
551{
552 struct Scsi_Host *sh = virtio_scsi_host(vdev);
553 struct virtio_scsi *vscsi = shost_priv(sh);
554
555 return virtscsi_init(vdev, vscsi);
556}
557#endif
558
559static struct virtio_device_id id_table[] = {
560 { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
561 { 0 },
562};
563
564static struct virtio_driver virtio_scsi_driver = {
565 .driver.name = KBUILD_MODNAME,
566 .driver.owner = THIS_MODULE,
567 .id_table = id_table,
568 .probe = virtscsi_probe,
569#ifdef CONFIG_PM
570 .freeze = virtscsi_freeze,
571 .restore = virtscsi_restore,
572#endif
573 .remove = __devexit_p(virtscsi_remove),
574};
575
576static int __init init(void)
577{
578 int ret = -ENOMEM;
579
580 virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
581 if (!virtscsi_cmd_cache) {
582 printk(KERN_ERR "kmem_cache_create() for "
583 "virtscsi_cmd_cache failed\n");
584 goto error;
585 }
586
587
588 virtscsi_cmd_pool =
589 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
590 virtscsi_cmd_cache);
591 if (!virtscsi_cmd_pool) {
592 printk(KERN_ERR "mempool_create() for"
593 "virtscsi_cmd_pool failed\n");
594 goto error;
595 }
596 ret = register_virtio_driver(&virtio_scsi_driver);
597 if (ret < 0)
598 goto error;
599
600 return 0;
601
602error:
603 if (virtscsi_cmd_pool) {
604 mempool_destroy(virtscsi_cmd_pool);
605 virtscsi_cmd_pool = NULL;
606 }
607 if (virtscsi_cmd_cache) {
608 kmem_cache_destroy(virtscsi_cmd_cache);
609 virtscsi_cmd_cache = NULL;
610 }
611 return ret;
612}
613
614static void __exit fini(void)
615{
616 unregister_virtio_driver(&virtio_scsi_driver);
617 mempool_destroy(virtscsi_cmd_pool);
618 kmem_cache_destroy(virtscsi_cmd_cache);
619}
620module_init(init);
621module_exit(fini);
622
623MODULE_DEVICE_TABLE(virtio, id_table);
624MODULE_DESCRIPTION("Virtio SCSI HBA driver");
625MODULE_LICENSE("GPL");