blob: dfbaf6bd8b1c759f41028c583efc590fba52c261 [file] [log] [blame]
Thomas Gleixner7a338472019-06-04 10:11:15 +02001// SPDX-License-Identifier: GPL-2.0-only
Asias He0ea9e1d2016-07-28 15:36:33 +01002/*
3 * virtio transport for vsock
4 *
5 * Copyright (C) 2013-2015 Red Hat, Inc.
6 * Author: Asias He <asias@redhat.com>
7 * Stefan Hajnoczi <stefanha@redhat.com>
8 *
9 * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s
10 * early virtio-vsock proof-of-concept bits.
Asias He0ea9e1d2016-07-28 15:36:33 +010011 */
12#include <linux/spinlock.h>
13#include <linux/module.h>
14#include <linux/list.h>
15#include <linux/atomic.h>
16#include <linux/virtio.h>
17#include <linux/virtio_ids.h>
18#include <linux/virtio_config.h>
19#include <linux/virtio_vsock.h>
20#include <net/sock.h>
21#include <linux/mutex.h>
22#include <net/af_vsock.h>
23
24static struct workqueue_struct *virtio_vsock_workqueue;
25static struct virtio_vsock *the_virtio_vsock;
26static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */
27
28struct virtio_vsock {
29 struct virtio_device *vdev;
30 struct virtqueue *vqs[VSOCK_VQ_MAX];
31
32 /* Virtqueue processing is deferred to a workqueue */
33 struct work_struct tx_work;
34 struct work_struct rx_work;
35 struct work_struct event_work;
36
37 /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX]
38 * must be accessed with tx_lock held.
39 */
40 struct mutex tx_lock;
Stefano Garzarellab9175072019-07-05 13:04:53 +020041 bool tx_run;
Asias He0ea9e1d2016-07-28 15:36:33 +010042
43 struct work_struct send_pkt_work;
44 spinlock_t send_pkt_list_lock;
45 struct list_head send_pkt_list;
46
47 atomic_t queued_replies;
48
49 /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX]
50 * must be accessed with rx_lock held.
51 */
52 struct mutex rx_lock;
Stefano Garzarellab9175072019-07-05 13:04:53 +020053 bool rx_run;
Asias He0ea9e1d2016-07-28 15:36:33 +010054 int rx_buf_nr;
55 int rx_buf_max_nr;
56
57 /* The following fields are protected by event_lock.
58 * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
59 */
60 struct mutex event_lock;
Stefano Garzarellab9175072019-07-05 13:04:53 +020061 bool event_run;
Asias He0ea9e1d2016-07-28 15:36:33 +010062 struct virtio_vsock_event event_list[8];
63
64 u32 guest_cid;
65};
66
Asias He0ea9e1d2016-07-28 15:36:33 +010067static u32 virtio_transport_get_local_cid(void)
68{
Stefano Garzarella0deab082019-07-05 13:04:52 +020069 struct virtio_vsock *vsock;
70 u32 ret;
Asias He0ea9e1d2016-07-28 15:36:33 +010071
Stefano Garzarella0deab082019-07-05 13:04:52 +020072 rcu_read_lock();
73 vsock = rcu_dereference(the_virtio_vsock);
74 if (!vsock) {
75 ret = VMADDR_CID_ANY;
76 goto out_rcu;
77 }
Stefano Garzarella22b5c0b2019-02-01 12:42:06 +010078
Stefano Garzarella0deab082019-07-05 13:04:52 +020079 ret = vsock->guest_cid;
80out_rcu:
81 rcu_read_unlock();
82 return ret;
Asias He0ea9e1d2016-07-28 15:36:33 +010083}
84
85static void
86virtio_transport_send_pkt_work(struct work_struct *work)
87{
88 struct virtio_vsock *vsock =
89 container_of(work, struct virtio_vsock, send_pkt_work);
90 struct virtqueue *vq;
91 bool added = false;
92 bool restart_rx = false;
93
94 mutex_lock(&vsock->tx_lock);
95
Stefano Garzarellab9175072019-07-05 13:04:53 +020096 if (!vsock->tx_run)
97 goto out;
98
Asias He0ea9e1d2016-07-28 15:36:33 +010099 vq = vsock->vqs[VSOCK_VQ_TX];
100
Asias He0ea9e1d2016-07-28 15:36:33 +0100101 for (;;) {
102 struct virtio_vsock_pkt *pkt;
103 struct scatterlist hdr, buf, *sgs[2];
104 int ret, in_sg = 0, out_sg = 0;
105 bool reply;
106
107 spin_lock_bh(&vsock->send_pkt_list_lock);
108 if (list_empty(&vsock->send_pkt_list)) {
109 spin_unlock_bh(&vsock->send_pkt_list_lock);
Asias He0ea9e1d2016-07-28 15:36:33 +0100110 break;
111 }
112
113 pkt = list_first_entry(&vsock->send_pkt_list,
114 struct virtio_vsock_pkt, list);
115 list_del_init(&pkt->list);
116 spin_unlock_bh(&vsock->send_pkt_list_lock);
117
Gerard Garcia82dfb5402017-04-21 10:10:46 +0100118 virtio_transport_deliver_tap_pkt(pkt);
119
Asias He0ea9e1d2016-07-28 15:36:33 +0100120 reply = pkt->reply;
121
122 sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
123 sgs[out_sg++] = &hdr;
124 if (pkt->buf) {
125 sg_init_one(&buf, pkt->buf, pkt->len);
126 sgs[out_sg++] = &buf;
127 }
128
129 ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
Gerard Garcia21bc54f2016-08-10 17:24:34 +0200130 /* Usually this means that there is no more space available in
131 * the vq
132 */
Asias He0ea9e1d2016-07-28 15:36:33 +0100133 if (ret < 0) {
134 spin_lock_bh(&vsock->send_pkt_list_lock);
135 list_add(&pkt->list, &vsock->send_pkt_list);
136 spin_unlock_bh(&vsock->send_pkt_list_lock);
Asias He0ea9e1d2016-07-28 15:36:33 +0100137 break;
138 }
139
140 if (reply) {
141 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
142 int val;
143
144 val = atomic_dec_return(&vsock->queued_replies);
145
146 /* Do we now have resources to resume rx processing? */
147 if (val + 1 == virtqueue_get_vring_size(rx_vq))
148 restart_rx = true;
149 }
150
151 added = true;
152 }
153
154 if (added)
155 virtqueue_kick(vq);
156
Stefano Garzarellab9175072019-07-05 13:04:53 +0200157out:
Asias He0ea9e1d2016-07-28 15:36:33 +0100158 mutex_unlock(&vsock->tx_lock);
159
160 if (restart_rx)
161 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
162}
163
164static int
165virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
166{
167 struct virtio_vsock *vsock;
168 int len = pkt->len;
169
Stefano Garzarella0deab082019-07-05 13:04:52 +0200170 rcu_read_lock();
171 vsock = rcu_dereference(the_virtio_vsock);
Asias He0ea9e1d2016-07-28 15:36:33 +0100172 if (!vsock) {
173 virtio_transport_free_pkt(pkt);
Stefano Garzarella0deab082019-07-05 13:04:52 +0200174 len = -ENODEV;
175 goto out_rcu;
Asias He0ea9e1d2016-07-28 15:36:33 +0100176 }
177
Stefano Garzarella0deab082019-07-05 13:04:52 +0200178 if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) {
Stefano Garzarellabf5432b2019-12-10 11:43:07 +0100179 virtio_transport_free_pkt(pkt);
180 len = -ENODEV;
Stefano Garzarella0deab082019-07-05 13:04:52 +0200181 goto out_rcu;
182 }
Stefan Hajnoczib9116822016-11-21 13:56:31 +0000183
Asias He0ea9e1d2016-07-28 15:36:33 +0100184 if (pkt->reply)
185 atomic_inc(&vsock->queued_replies);
186
187 spin_lock_bh(&vsock->send_pkt_list_lock);
188 list_add_tail(&pkt->list, &vsock->send_pkt_list);
189 spin_unlock_bh(&vsock->send_pkt_list_lock);
190
191 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
Stefano Garzarella0deab082019-07-05 13:04:52 +0200192
193out_rcu:
194 rcu_read_unlock();
Asias He0ea9e1d2016-07-28 15:36:33 +0100195 return len;
196}
197
Peng Tao073b4f22017-03-15 09:32:16 +0800198static int
199virtio_transport_cancel_pkt(struct vsock_sock *vsk)
200{
201 struct virtio_vsock *vsock;
202 struct virtio_vsock_pkt *pkt, *n;
Stefano Garzarella0deab082019-07-05 13:04:52 +0200203 int cnt = 0, ret;
Peng Tao073b4f22017-03-15 09:32:16 +0800204 LIST_HEAD(freeme);
205
Stefano Garzarella0deab082019-07-05 13:04:52 +0200206 rcu_read_lock();
207 vsock = rcu_dereference(the_virtio_vsock);
Peng Tao073b4f22017-03-15 09:32:16 +0800208 if (!vsock) {
Stefano Garzarella0deab082019-07-05 13:04:52 +0200209 ret = -ENODEV;
210 goto out_rcu;
Peng Tao073b4f22017-03-15 09:32:16 +0800211 }
212
213 spin_lock_bh(&vsock->send_pkt_list_lock);
214 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
215 if (pkt->vsk != vsk)
216 continue;
217 list_move(&pkt->list, &freeme);
218 }
219 spin_unlock_bh(&vsock->send_pkt_list_lock);
220
221 list_for_each_entry_safe(pkt, n, &freeme, list) {
222 if (pkt->reply)
223 cnt++;
224 list_del(&pkt->list);
225 virtio_transport_free_pkt(pkt);
226 }
227
228 if (cnt) {
229 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
230 int new_cnt;
231
232 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
233 if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
234 new_cnt < virtqueue_get_vring_size(rx_vq))
235 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
236 }
237
Stefano Garzarella0deab082019-07-05 13:04:52 +0200238 ret = 0;
239
240out_rcu:
241 rcu_read_unlock();
242 return ret;
Peng Tao073b4f22017-03-15 09:32:16 +0800243}
244
Asias He0ea9e1d2016-07-28 15:36:33 +0100245static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
246{
247 int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
248 struct virtio_vsock_pkt *pkt;
249 struct scatterlist hdr, buf, *sgs[2];
250 struct virtqueue *vq;
251 int ret;
252
253 vq = vsock->vqs[VSOCK_VQ_RX];
254
255 do {
256 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
257 if (!pkt)
258 break;
259
260 pkt->buf = kmalloc(buf_len, GFP_KERNEL);
261 if (!pkt->buf) {
262 virtio_transport_free_pkt(pkt);
263 break;
264 }
265
Stefano Garzarella473c7392019-07-30 17:43:30 +0200266 pkt->buf_len = buf_len;
Asias He0ea9e1d2016-07-28 15:36:33 +0100267 pkt->len = buf_len;
268
269 sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
270 sgs[0] = &hdr;
271
272 sg_init_one(&buf, pkt->buf, buf_len);
273 sgs[1] = &buf;
274 ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
275 if (ret) {
276 virtio_transport_free_pkt(pkt);
277 break;
278 }
279 vsock->rx_buf_nr++;
280 } while (vq->num_free);
281 if (vsock->rx_buf_nr > vsock->rx_buf_max_nr)
282 vsock->rx_buf_max_nr = vsock->rx_buf_nr;
283 virtqueue_kick(vq);
284}
285
286static void virtio_transport_tx_work(struct work_struct *work)
287{
288 struct virtio_vsock *vsock =
289 container_of(work, struct virtio_vsock, tx_work);
290 struct virtqueue *vq;
291 bool added = false;
292
293 vq = vsock->vqs[VSOCK_VQ_TX];
294 mutex_lock(&vsock->tx_lock);
Stefano Garzarellab9175072019-07-05 13:04:53 +0200295
296 if (!vsock->tx_run)
297 goto out;
298
Asias He0ea9e1d2016-07-28 15:36:33 +0100299 do {
300 struct virtio_vsock_pkt *pkt;
301 unsigned int len;
302
303 virtqueue_disable_cb(vq);
304 while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) {
305 virtio_transport_free_pkt(pkt);
306 added = true;
307 }
308 } while (!virtqueue_enable_cb(vq));
Stefano Garzarellab9175072019-07-05 13:04:53 +0200309
310out:
Asias He0ea9e1d2016-07-28 15:36:33 +0100311 mutex_unlock(&vsock->tx_lock);
312
313 if (added)
314 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
315}
316
317/* Is there space left for replies to rx packets? */
318static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
319{
320 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX];
321 int val;
322
323 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
324 val = atomic_read(&vsock->queued_replies);
325
326 return val < virtqueue_get_vring_size(vq);
327}
328
Asias He0ea9e1d2016-07-28 15:36:33 +0100329/* event_lock must be held */
330static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
331 struct virtio_vsock_event *event)
332{
333 struct scatterlist sg;
334 struct virtqueue *vq;
335
336 vq = vsock->vqs[VSOCK_VQ_EVENT];
337
338 sg_init_one(&sg, event, sizeof(*event));
339
340 return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL);
341}
342
343/* event_lock must be held */
344static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
345{
346 size_t i;
347
348 for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) {
349 struct virtio_vsock_event *event = &vsock->event_list[i];
350
351 virtio_vsock_event_fill_one(vsock, event);
352 }
353
354 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
355}
356
357static void virtio_vsock_reset_sock(struct sock *sk)
358{
359 lock_sock(sk);
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -0400360 sk->sk_state = TCP_CLOSE;
Asias He0ea9e1d2016-07-28 15:36:33 +0100361 sk->sk_err = ECONNRESET;
362 sk->sk_error_report(sk);
363 release_sock(sk);
364}
365
366static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
367{
368 struct virtio_device *vdev = vsock->vdev;
Michael S. Tsirkin6c7efaf2016-12-06 06:03:34 +0200369 __le64 guest_cid;
Asias He0ea9e1d2016-07-28 15:36:33 +0100370
371 vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid),
372 &guest_cid, sizeof(guest_cid));
373 vsock->guest_cid = le64_to_cpu(guest_cid);
374}
375
376/* event_lock must be held */
377static void virtio_vsock_event_handle(struct virtio_vsock *vsock,
378 struct virtio_vsock_event *event)
379{
380 switch (le32_to_cpu(event->id)) {
381 case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET:
382 virtio_vsock_update_guest_cid(vsock);
383 vsock_for_each_connected_socket(virtio_vsock_reset_sock);
384 break;
385 }
386}
387
388static void virtio_transport_event_work(struct work_struct *work)
389{
390 struct virtio_vsock *vsock =
391 container_of(work, struct virtio_vsock, event_work);
392 struct virtqueue *vq;
393
394 vq = vsock->vqs[VSOCK_VQ_EVENT];
395
396 mutex_lock(&vsock->event_lock);
397
Stefano Garzarellab9175072019-07-05 13:04:53 +0200398 if (!vsock->event_run)
399 goto out;
400
Asias He0ea9e1d2016-07-28 15:36:33 +0100401 do {
402 struct virtio_vsock_event *event;
403 unsigned int len;
404
405 virtqueue_disable_cb(vq);
406 while ((event = virtqueue_get_buf(vq, &len)) != NULL) {
407 if (len == sizeof(*event))
408 virtio_vsock_event_handle(vsock, event);
409
410 virtio_vsock_event_fill_one(vsock, event);
411 }
412 } while (!virtqueue_enable_cb(vq));
413
414 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
Stefano Garzarellab9175072019-07-05 13:04:53 +0200415out:
Asias He0ea9e1d2016-07-28 15:36:33 +0100416 mutex_unlock(&vsock->event_lock);
417}
418
419static void virtio_vsock_event_done(struct virtqueue *vq)
420{
421 struct virtio_vsock *vsock = vq->vdev->priv;
422
423 if (!vsock)
424 return;
425 queue_work(virtio_vsock_workqueue, &vsock->event_work);
426}
427
428static void virtio_vsock_tx_done(struct virtqueue *vq)
429{
430 struct virtio_vsock *vsock = vq->vdev->priv;
431
432 if (!vsock)
433 return;
434 queue_work(virtio_vsock_workqueue, &vsock->tx_work);
435}
436
437static void virtio_vsock_rx_done(struct virtqueue *vq)
438{
439 struct virtio_vsock *vsock = vq->vdev->priv;
440
441 if (!vsock)
442 return;
443 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
444}
445
446static struct virtio_transport virtio_transport = {
447 .transport = {
Stefano Garzarella6a2c09622019-11-14 10:57:48 +0100448 .module = THIS_MODULE,
449
Asias He0ea9e1d2016-07-28 15:36:33 +0100450 .get_local_cid = virtio_transport_get_local_cid,
451
452 .init = virtio_transport_do_socket_init,
453 .destruct = virtio_transport_destruct,
454 .release = virtio_transport_release,
455 .connect = virtio_transport_connect,
456 .shutdown = virtio_transport_shutdown,
Peng Tao073b4f22017-03-15 09:32:16 +0800457 .cancel_pkt = virtio_transport_cancel_pkt,
Asias He0ea9e1d2016-07-28 15:36:33 +0100458
459 .dgram_bind = virtio_transport_dgram_bind,
460 .dgram_dequeue = virtio_transport_dgram_dequeue,
461 .dgram_enqueue = virtio_transport_dgram_enqueue,
462 .dgram_allow = virtio_transport_dgram_allow,
463
464 .stream_dequeue = virtio_transport_stream_dequeue,
465 .stream_enqueue = virtio_transport_stream_enqueue,
466 .stream_has_data = virtio_transport_stream_has_data,
467 .stream_has_space = virtio_transport_stream_has_space,
468 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
469 .stream_is_active = virtio_transport_stream_is_active,
470 .stream_allow = virtio_transport_stream_allow,
471
472 .notify_poll_in = virtio_transport_notify_poll_in,
473 .notify_poll_out = virtio_transport_notify_poll_out,
474 .notify_recv_init = virtio_transport_notify_recv_init,
475 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
476 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
477 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
478 .notify_send_init = virtio_transport_notify_send_init,
479 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
480 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
481 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +0100482 .notify_buffer_size = virtio_transport_notify_buffer_size,
Asias He0ea9e1d2016-07-28 15:36:33 +0100483 },
484
485 .send_pkt = virtio_transport_send_pkt,
486};
487
Stefano Garzarella4c7246d2019-11-14 10:57:40 +0100488static void virtio_transport_rx_work(struct work_struct *work)
489{
490 struct virtio_vsock *vsock =
491 container_of(work, struct virtio_vsock, rx_work);
492 struct virtqueue *vq;
493
494 vq = vsock->vqs[VSOCK_VQ_RX];
495
496 mutex_lock(&vsock->rx_lock);
497
498 if (!vsock->rx_run)
499 goto out;
500
501 do {
502 virtqueue_disable_cb(vq);
503 for (;;) {
504 struct virtio_vsock_pkt *pkt;
505 unsigned int len;
506
507 if (!virtio_transport_more_replies(vsock)) {
508 /* Stop rx until the device processes already
509 * pending replies. Leave rx virtqueue
510 * callbacks disabled.
511 */
512 goto out;
513 }
514
515 pkt = virtqueue_get_buf(vq, &len);
516 if (!pkt) {
517 break;
518 }
519
520 vsock->rx_buf_nr--;
521
522 /* Drop short/long packets */
523 if (unlikely(len < sizeof(pkt->hdr) ||
524 len > sizeof(pkt->hdr) + pkt->len)) {
525 virtio_transport_free_pkt(pkt);
526 continue;
527 }
528
529 pkt->len = len - sizeof(pkt->hdr);
530 virtio_transport_deliver_tap_pkt(pkt);
531 virtio_transport_recv_pkt(&virtio_transport, pkt);
532 }
533 } while (!virtqueue_enable_cb(vq));
534
535out:
536 if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
537 virtio_vsock_rx_fill(vsock);
538 mutex_unlock(&vsock->rx_lock);
539}
540
Asias He0ea9e1d2016-07-28 15:36:33 +0100541static int virtio_vsock_probe(struct virtio_device *vdev)
542{
543 vq_callback_t *callbacks[] = {
544 virtio_vsock_rx_done,
545 virtio_vsock_tx_done,
546 virtio_vsock_event_done,
547 };
548 static const char * const names[] = {
549 "rx",
550 "tx",
551 "event",
552 };
553 struct virtio_vsock *vsock = NULL;
554 int ret;
555
556 ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
557 if (ret)
558 return ret;
559
560 /* Only one virtio-vsock device per guest is supported */
Stefano Garzarella0deab082019-07-05 13:04:52 +0200561 if (rcu_dereference_protected(the_virtio_vsock,
562 lockdep_is_held(&the_virtio_vsock_mutex))) {
Asias He0ea9e1d2016-07-28 15:36:33 +0100563 ret = -EBUSY;
564 goto out;
565 }
566
567 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
568 if (!vsock) {
569 ret = -ENOMEM;
570 goto out;
571 }
572
573 vsock->vdev = vdev;
574
Michael S. Tsirkin9b2bbdb2017-03-06 18:19:39 +0200575 ret = virtio_find_vqs(vsock->vdev, VSOCK_VQ_MAX,
576 vsock->vqs, callbacks, names,
577 NULL);
Asias He0ea9e1d2016-07-28 15:36:33 +0100578 if (ret < 0)
579 goto out;
580
581 virtio_vsock_update_guest_cid(vsock);
582
Asias He0ea9e1d2016-07-28 15:36:33 +0100583 vsock->rx_buf_nr = 0;
584 vsock->rx_buf_max_nr = 0;
585 atomic_set(&vsock->queued_replies, 0);
586
Asias He0ea9e1d2016-07-28 15:36:33 +0100587 mutex_init(&vsock->tx_lock);
588 mutex_init(&vsock->rx_lock);
589 mutex_init(&vsock->event_lock);
590 spin_lock_init(&vsock->send_pkt_list_lock);
591 INIT_LIST_HEAD(&vsock->send_pkt_list);
592 INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
593 INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
594 INIT_WORK(&vsock->event_work, virtio_transport_event_work);
595 INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
596
Stefano Garzarellab9175072019-07-05 13:04:53 +0200597 mutex_lock(&vsock->tx_lock);
598 vsock->tx_run = true;
599 mutex_unlock(&vsock->tx_lock);
600
Asias He0ea9e1d2016-07-28 15:36:33 +0100601 mutex_lock(&vsock->rx_lock);
602 virtio_vsock_rx_fill(vsock);
Stefano Garzarellab9175072019-07-05 13:04:53 +0200603 vsock->rx_run = true;
Asias He0ea9e1d2016-07-28 15:36:33 +0100604 mutex_unlock(&vsock->rx_lock);
605
606 mutex_lock(&vsock->event_lock);
607 virtio_vsock_event_fill(vsock);
Stefano Garzarellab9175072019-07-05 13:04:53 +0200608 vsock->event_run = true;
Asias He0ea9e1d2016-07-28 15:36:33 +0100609 mutex_unlock(&vsock->event_lock);
610
Stefano Garzarella0deab082019-07-05 13:04:52 +0200611 vdev->priv = vsock;
612 rcu_assign_pointer(the_virtio_vsock, vsock);
613
Asias He0ea9e1d2016-07-28 15:36:33 +0100614 mutex_unlock(&the_virtio_vsock_mutex);
615 return 0;
616
Asias He0ea9e1d2016-07-28 15:36:33 +0100617out:
618 kfree(vsock);
619 mutex_unlock(&the_virtio_vsock_mutex);
620 return ret;
621}
622
623static void virtio_vsock_remove(struct virtio_device *vdev)
624{
625 struct virtio_vsock *vsock = vdev->priv;
626 struct virtio_vsock_pkt *pkt;
627
Stefano Garzarella0deab082019-07-05 13:04:52 +0200628 mutex_lock(&the_virtio_vsock_mutex);
629
630 vdev->priv = NULL;
631 rcu_assign_pointer(the_virtio_vsock, NULL);
632 synchronize_rcu();
633
Stefano Garzarella85965482019-02-01 12:42:07 +0100634 /* Reset all connected sockets when the device disappear */
635 vsock_for_each_connected_socket(virtio_vsock_reset_sock);
636
Stefano Garzarellab9175072019-07-05 13:04:53 +0200637 /* Stop all work handlers to make sure no one is accessing the device,
638 * so we can safely call vdev->config->reset().
639 */
640 mutex_lock(&vsock->rx_lock);
641 vsock->rx_run = false;
642 mutex_unlock(&vsock->rx_lock);
643
644 mutex_lock(&vsock->tx_lock);
645 vsock->tx_run = false;
646 mutex_unlock(&vsock->tx_lock);
647
648 mutex_lock(&vsock->event_lock);
649 vsock->event_run = false;
650 mutex_unlock(&vsock->event_lock);
651
652 /* Flush all device writes and interrupts, device will not use any
653 * more buffers.
654 */
Asias He0ea9e1d2016-07-28 15:36:33 +0100655 vdev->config->reset(vdev);
656
657 mutex_lock(&vsock->rx_lock);
658 while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
659 virtio_transport_free_pkt(pkt);
660 mutex_unlock(&vsock->rx_lock);
661
662 mutex_lock(&vsock->tx_lock);
663 while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
664 virtio_transport_free_pkt(pkt);
665 mutex_unlock(&vsock->tx_lock);
666
667 spin_lock_bh(&vsock->send_pkt_list_lock);
668 while (!list_empty(&vsock->send_pkt_list)) {
669 pkt = list_first_entry(&vsock->send_pkt_list,
670 struct virtio_vsock_pkt, list);
671 list_del(&pkt->list);
672 virtio_transport_free_pkt(pkt);
673 }
674 spin_unlock_bh(&vsock->send_pkt_list_lock);
675
Stefano Garzarellab9175072019-07-05 13:04:53 +0200676 /* Delete virtqueues and flush outstanding callbacks if any */
Asias He0ea9e1d2016-07-28 15:36:33 +0100677 vdev->config->del_vqs(vdev);
678
Stefano Garzarellae2261212019-07-05 13:04:54 +0200679 /* Other works can be queued before 'config->del_vqs()', so we flush
680 * all works before to free the vsock object to avoid use after free.
681 */
Stefano Garzarellae2261212019-07-05 13:04:54 +0200682 flush_work(&vsock->rx_work);
683 flush_work(&vsock->tx_work);
684 flush_work(&vsock->event_work);
685 flush_work(&vsock->send_pkt_work);
686
Stefano Garzarella0deab082019-07-05 13:04:52 +0200687 mutex_unlock(&the_virtio_vsock_mutex);
688
Asias He0ea9e1d2016-07-28 15:36:33 +0100689 kfree(vsock);
690}
691
692static struct virtio_device_id id_table[] = {
693 { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID },
694 { 0 },
695};
696
697static unsigned int features[] = {
698};
699
700static struct virtio_driver virtio_vsock_driver = {
701 .feature_table = features,
702 .feature_table_size = ARRAY_SIZE(features),
703 .driver.name = KBUILD_MODNAME,
704 .driver.owner = THIS_MODULE,
705 .id_table = id_table,
706 .probe = virtio_vsock_probe,
707 .remove = virtio_vsock_remove,
708};
709
710static int __init virtio_vsock_init(void)
711{
712 int ret;
713
714 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
715 if (!virtio_vsock_workqueue)
716 return -ENOMEM;
Stefano Garzarella22b5c0b2019-02-01 12:42:06 +0100717
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100718 ret = vsock_core_register(&virtio_transport.transport,
719 VSOCK_TRANSPORT_F_G2H);
Asias He0ea9e1d2016-07-28 15:36:33 +0100720 if (ret)
Stefano Garzarella22b5c0b2019-02-01 12:42:06 +0100721 goto out_wq;
722
Jorge E. Moreiraba95e5df2019-05-16 13:51:07 -0700723 ret = register_virtio_driver(&virtio_vsock_driver);
Stefano Garzarella22b5c0b2019-02-01 12:42:06 +0100724 if (ret)
Jorge E. Moreiraba95e5df2019-05-16 13:51:07 -0700725 goto out_vci;
Stefano Garzarella22b5c0b2019-02-01 12:42:06 +0100726
727 return 0;
728
Jorge E. Moreiraba95e5df2019-05-16 13:51:07 -0700729out_vci:
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100730 vsock_core_unregister(&virtio_transport.transport);
Stefano Garzarella22b5c0b2019-02-01 12:42:06 +0100731out_wq:
732 destroy_workqueue(virtio_vsock_workqueue);
Asias He0ea9e1d2016-07-28 15:36:33 +0100733 return ret;
734}
735
736static void __exit virtio_vsock_exit(void)
737{
Asias He0ea9e1d2016-07-28 15:36:33 +0100738 unregister_virtio_driver(&virtio_vsock_driver);
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100739 vsock_core_unregister(&virtio_transport.transport);
Asias He0ea9e1d2016-07-28 15:36:33 +0100740 destroy_workqueue(virtio_vsock_workqueue);
741}
742
743module_init(virtio_vsock_init);
744module_exit(virtio_vsock_exit);
745MODULE_LICENSE("GPL v2");
746MODULE_AUTHOR("Asias He");
747MODULE_DESCRIPTION("virtio transport for vsock");
748MODULE_DEVICE_TABLE(virtio, id_table);