blob: 92ab3852c954066ad3db67f3511953515b2fb8a0 [file] [log] [blame]
Thomas Gleixner7a338472019-06-04 10:11:15 +02001// SPDX-License-Identifier: GPL-2.0-only
Asias He433fc582016-07-28 15:36:34 +01002/*
3 * vhost transport for vsock
4 *
5 * Copyright (C) 2013-2015 Red Hat, Inc.
6 * Author: Asias He <asias@redhat.com>
7 * Stefan Hajnoczi <stefanha@redhat.com>
Asias He433fc582016-07-28 15:36:34 +01008 */
9#include <linux/miscdevice.h>
10#include <linux/atomic.h>
11#include <linux/module.h>
12#include <linux/mutex.h>
13#include <linux/vmalloc.h>
14#include <net/sock.h>
15#include <linux/virtio_vsock.h>
16#include <linux/vhost.h>
Stefan Hajnoczi834e7722018-11-05 10:35:47 +000017#include <linux/hashtable.h>
Asias He433fc582016-07-28 15:36:34 +010018
19#include <net/af_vsock.h>
20#include "vhost.h"
21
22#define VHOST_VSOCK_DEFAULT_HOST_CID 2
Jason Wange82b9b02019-05-17 00:29:49 -040023/* Max number of bytes transferred before requeueing the job.
24 * Using this limit prevents one virtqueue from starving others. */
25#define VHOST_VSOCK_WEIGHT 0x80000
26/* Max number of packets transferred before requeueing the job.
27 * Using this limit prevents one virtqueue from starving others with
28 * small pkts.
29 */
30#define VHOST_VSOCK_PKT_WEIGHT 256
Asias He433fc582016-07-28 15:36:34 +010031
32enum {
33 VHOST_VSOCK_FEATURES = VHOST_FEATURES,
34};
35
36/* Used to track all the vhost_vsock instances on the system. */
Stefan Hajnoczi6db3d8d2018-11-05 17:33:22 +000037static DEFINE_MUTEX(vhost_vsock_mutex);
Stefan Hajnoczi834e7722018-11-05 10:35:47 +000038static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
Asias He433fc582016-07-28 15:36:34 +010039
40struct vhost_vsock {
41 struct vhost_dev dev;
42 struct vhost_virtqueue vqs[2];
43
Stefan Hajnoczi6db3d8d2018-11-05 17:33:22 +000044 /* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
Stefan Hajnoczi834e7722018-11-05 10:35:47 +000045 struct hlist_node hash;
Asias He433fc582016-07-28 15:36:34 +010046
47 struct vhost_work send_pkt_work;
48 spinlock_t send_pkt_list_lock;
49 struct list_head send_pkt_list; /* host->guest pending packets */
50
51 atomic_t queued_replies;
52
53 u32 guest_cid;
54};
55
56static u32 vhost_transport_get_local_cid(void)
57{
58 return VHOST_VSOCK_DEFAULT_HOST_CID;
59}
60
Stefan Hajnoczi6db3d8d2018-11-05 17:33:22 +000061/* Callers that dereference the return value must hold vhost_vsock_mutex or the
Stefan Hajnoczi834e7722018-11-05 10:35:47 +000062 * RCU read lock.
63 */
64static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
Asias He433fc582016-07-28 15:36:34 +010065{
66 struct vhost_vsock *vsock;
67
Stefan Hajnoczi834e7722018-11-05 10:35:47 +000068 hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
Asias He433fc582016-07-28 15:36:34 +010069 u32 other_cid = vsock->guest_cid;
70
71 /* Skip instances that have no CID yet */
72 if (other_cid == 0)
73 continue;
74
Vaibhav Murkuteff3c1b12018-03-09 08:26:03 +053075 if (other_cid == guest_cid)
Asias He433fc582016-07-28 15:36:34 +010076 return vsock;
Vaibhav Murkuteff3c1b12018-03-09 08:26:03 +053077
Asias He433fc582016-07-28 15:36:34 +010078 }
Asias He433fc582016-07-28 15:36:34 +010079
80 return NULL;
81}
82
83static void
84vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
85 struct vhost_virtqueue *vq)
86{
87 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
Jason Wange79b4312019-05-17 00:29:51 -040088 int pkts = 0, total_len = 0;
Asias He433fc582016-07-28 15:36:34 +010089 bool added = false;
90 bool restart_tx = false;
91
92 mutex_lock(&vq->mutex);
93
94 if (!vq->private_data)
95 goto out;
96
97 /* Avoid further vmexits, we're already processing the virtqueue */
98 vhost_disable_notify(&vsock->dev, vq);
99
Jason Wange79b4312019-05-17 00:29:51 -0400100 do {
Asias He433fc582016-07-28 15:36:34 +0100101 struct virtio_vsock_pkt *pkt;
102 struct iov_iter iov_iter;
103 unsigned out, in;
104 size_t nbytes;
Stefano Garzarella6dbd3e62019-07-30 17:43:33 +0200105 size_t iov_len, payload_len;
Asias He433fc582016-07-28 15:36:34 +0100106 int head;
107
108 spin_lock_bh(&vsock->send_pkt_list_lock);
109 if (list_empty(&vsock->send_pkt_list)) {
110 spin_unlock_bh(&vsock->send_pkt_list_lock);
111 vhost_enable_notify(&vsock->dev, vq);
112 break;
113 }
114
115 pkt = list_first_entry(&vsock->send_pkt_list,
116 struct virtio_vsock_pkt, list);
117 list_del_init(&pkt->list);
118 spin_unlock_bh(&vsock->send_pkt_list_lock);
119
120 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
121 &out, &in, NULL, NULL);
122 if (head < 0) {
123 spin_lock_bh(&vsock->send_pkt_list_lock);
124 list_add(&pkt->list, &vsock->send_pkt_list);
125 spin_unlock_bh(&vsock->send_pkt_list_lock);
126 break;
127 }
128
129 if (head == vq->num) {
130 spin_lock_bh(&vsock->send_pkt_list_lock);
131 list_add(&pkt->list, &vsock->send_pkt_list);
132 spin_unlock_bh(&vsock->send_pkt_list_lock);
133
134 /* We cannot finish yet if more buffers snuck in while
135 * re-enabling notify.
136 */
137 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
138 vhost_disable_notify(&vsock->dev, vq);
139 continue;
140 }
141 break;
142 }
143
144 if (out) {
145 virtio_transport_free_pkt(pkt);
146 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
147 break;
148 }
149
Stefano Garzarella6dbd3e62019-07-30 17:43:33 +0200150 iov_len = iov_length(&vq->iov[out], in);
151 if (iov_len < sizeof(pkt->hdr)) {
152 virtio_transport_free_pkt(pkt);
153 vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
154 break;
155 }
156
157 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
158 payload_len = pkt->len - pkt->off;
159
160 /* If the packet is greater than the space available in the
161 * buffer, we split it using multiple buffers.
162 */
163 if (payload_len > iov_len - sizeof(pkt->hdr))
164 payload_len = iov_len - sizeof(pkt->hdr);
165
166 /* Set the correct length in the header */
167 pkt->hdr.len = cpu_to_le32(payload_len);
Asias He433fc582016-07-28 15:36:34 +0100168
169 nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
170 if (nbytes != sizeof(pkt->hdr)) {
171 virtio_transport_free_pkt(pkt);
172 vq_err(vq, "Faulted on copying pkt hdr\n");
173 break;
174 }
175
Stefano Garzarella6dbd3e62019-07-30 17:43:33 +0200176 nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
177 &iov_iter);
178 if (nbytes != payload_len) {
Asias He433fc582016-07-28 15:36:34 +0100179 virtio_transport_free_pkt(pkt);
180 vq_err(vq, "Faulted on copying pkt buf\n");
181 break;
182 }
183
Stefano Garzarella6dbd3e62019-07-30 17:43:33 +0200184 vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
Asias He433fc582016-07-28 15:36:34 +0100185 added = true;
186
Gerard Garcia82dfb5402017-04-21 10:10:46 +0100187 /* Deliver to monitoring devices all correctly transmitted
188 * packets.
189 */
190 virtio_transport_deliver_tap_pkt(pkt);
191
Stefano Garzarella6dbd3e62019-07-30 17:43:33 +0200192 pkt->off += payload_len;
193 total_len += payload_len;
194
195 /* If we didn't send all the payload we can requeue the packet
196 * to send it with the next available buffer.
197 */
198 if (pkt->off < pkt->len) {
199 spin_lock_bh(&vsock->send_pkt_list_lock);
200 list_add(&pkt->list, &vsock->send_pkt_list);
201 spin_unlock_bh(&vsock->send_pkt_list_lock);
202 } else {
203 if (pkt->reply) {
204 int val;
205
206 val = atomic_dec_return(&vsock->queued_replies);
207
208 /* Do we have resources to resume tx
209 * processing?
210 */
211 if (val + 1 == tx_vq->num)
212 restart_tx = true;
213 }
214
215 virtio_transport_free_pkt(pkt);
216 }
Jason Wange79b4312019-05-17 00:29:51 -0400217 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
Asias He433fc582016-07-28 15:36:34 +0100218 if (added)
219 vhost_signal(&vsock->dev, vq);
220
221out:
222 mutex_unlock(&vq->mutex);
223
224 if (restart_tx)
225 vhost_poll_queue(&tx_vq->poll);
226}
227
228static void vhost_transport_send_pkt_work(struct vhost_work *work)
229{
230 struct vhost_virtqueue *vq;
231 struct vhost_vsock *vsock;
232
233 vsock = container_of(work, struct vhost_vsock, send_pkt_work);
234 vq = &vsock->vqs[VSOCK_VQ_RX];
235
236 vhost_transport_do_send_pkt(vsock, vq);
237}
238
239static int
240vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
241{
242 struct vhost_vsock *vsock;
Asias He433fc582016-07-28 15:36:34 +0100243 int len = pkt->len;
244
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000245 rcu_read_lock();
246
Asias He433fc582016-07-28 15:36:34 +0100247 /* Find the vhost_vsock according to guest context id */
248 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
249 if (!vsock) {
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000250 rcu_read_unlock();
Asias He433fc582016-07-28 15:36:34 +0100251 virtio_transport_free_pkt(pkt);
252 return -ENODEV;
253 }
254
Asias He433fc582016-07-28 15:36:34 +0100255 if (pkt->reply)
256 atomic_inc(&vsock->queued_replies);
257
258 spin_lock_bh(&vsock->send_pkt_list_lock);
259 list_add_tail(&pkt->list, &vsock->send_pkt_list);
260 spin_unlock_bh(&vsock->send_pkt_list_lock);
261
262 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000263
264 rcu_read_unlock();
Asias He433fc582016-07-28 15:36:34 +0100265 return len;
266}
267
Peng Tao16320f32017-03-15 09:32:15 +0800268static int
269vhost_transport_cancel_pkt(struct vsock_sock *vsk)
270{
271 struct vhost_vsock *vsock;
272 struct virtio_vsock_pkt *pkt, *n;
273 int cnt = 0;
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000274 int ret = -ENODEV;
Peng Tao16320f32017-03-15 09:32:15 +0800275 LIST_HEAD(freeme);
276
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000277 rcu_read_lock();
278
Peng Tao16320f32017-03-15 09:32:15 +0800279 /* Find the vhost_vsock according to guest context id */
280 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
281 if (!vsock)
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000282 goto out;
Peng Tao16320f32017-03-15 09:32:15 +0800283
284 spin_lock_bh(&vsock->send_pkt_list_lock);
285 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
286 if (pkt->vsk != vsk)
287 continue;
288 list_move(&pkt->list, &freeme);
289 }
290 spin_unlock_bh(&vsock->send_pkt_list_lock);
291
292 list_for_each_entry_safe(pkt, n, &freeme, list) {
293 if (pkt->reply)
294 cnt++;
295 list_del(&pkt->list);
296 virtio_transport_free_pkt(pkt);
297 }
298
299 if (cnt) {
300 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
301 int new_cnt;
302
303 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
304 if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
305 vhost_poll_queue(&tx_vq->poll);
306 }
307
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000308 ret = 0;
309out:
310 rcu_read_unlock();
311 return ret;
Peng Tao16320f32017-03-15 09:32:15 +0800312}
313
Asias He433fc582016-07-28 15:36:34 +0100314static struct virtio_vsock_pkt *
315vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
316 unsigned int out, unsigned int in)
317{
318 struct virtio_vsock_pkt *pkt;
319 struct iov_iter iov_iter;
320 size_t nbytes;
321 size_t len;
322
323 if (in != 0) {
324 vq_err(vq, "Expected 0 input buffers, got %u\n", in);
325 return NULL;
326 }
327
328 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
329 if (!pkt)
330 return NULL;
331
332 len = iov_length(vq->iov, out);
333 iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
334
335 nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
336 if (nbytes != sizeof(pkt->hdr)) {
337 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
338 sizeof(pkt->hdr), nbytes);
339 kfree(pkt);
340 return NULL;
341 }
342
343 if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
344 pkt->len = le32_to_cpu(pkt->hdr.len);
345
346 /* No payload */
347 if (!pkt->len)
348 return pkt;
349
350 /* The pkt is too big */
351 if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
352 kfree(pkt);
353 return NULL;
354 }
355
356 pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
357 if (!pkt->buf) {
358 kfree(pkt);
359 return NULL;
360 }
361
Stefano Garzarella473c7392019-07-30 17:43:30 +0200362 pkt->buf_len = pkt->len;
363
Asias He433fc582016-07-28 15:36:34 +0100364 nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
365 if (nbytes != pkt->len) {
366 vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
367 pkt->len, nbytes);
368 virtio_transport_free_pkt(pkt);
369 return NULL;
370 }
371
372 return pkt;
373}
374
375/* Is there space left for replies to rx packets? */
376static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
377{
378 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
379 int val;
380
381 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
382 val = atomic_read(&vsock->queued_replies);
383
384 return val < vq->num;
385}
386
Stefano Garzarella4c7246d2019-11-14 10:57:40 +0100387static struct virtio_transport vhost_transport = {
388 .transport = {
389 .get_local_cid = vhost_transport_get_local_cid,
390
391 .init = virtio_transport_do_socket_init,
392 .destruct = virtio_transport_destruct,
393 .release = virtio_transport_release,
394 .connect = virtio_transport_connect,
395 .shutdown = virtio_transport_shutdown,
396 .cancel_pkt = vhost_transport_cancel_pkt,
397
398 .dgram_enqueue = virtio_transport_dgram_enqueue,
399 .dgram_dequeue = virtio_transport_dgram_dequeue,
400 .dgram_bind = virtio_transport_dgram_bind,
401 .dgram_allow = virtio_transport_dgram_allow,
402
403 .stream_enqueue = virtio_transport_stream_enqueue,
404 .stream_dequeue = virtio_transport_stream_dequeue,
405 .stream_has_data = virtio_transport_stream_has_data,
406 .stream_has_space = virtio_transport_stream_has_space,
407 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
408 .stream_is_active = virtio_transport_stream_is_active,
409 .stream_allow = virtio_transport_stream_allow,
410
411 .notify_poll_in = virtio_transport_notify_poll_in,
412 .notify_poll_out = virtio_transport_notify_poll_out,
413 .notify_recv_init = virtio_transport_notify_recv_init,
414 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
415 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
416 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
417 .notify_send_init = virtio_transport_notify_send_init,
418 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
419 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
420 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
421
422 .set_buffer_size = virtio_transport_set_buffer_size,
423 .set_min_buffer_size = virtio_transport_set_min_buffer_size,
424 .set_max_buffer_size = virtio_transport_set_max_buffer_size,
425 .get_buffer_size = virtio_transport_get_buffer_size,
426 .get_min_buffer_size = virtio_transport_get_min_buffer_size,
427 .get_max_buffer_size = virtio_transport_get_max_buffer_size,
428 },
429
430 .send_pkt = vhost_transport_send_pkt,
431};
432
Asias He433fc582016-07-28 15:36:34 +0100433static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
434{
435 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
436 poll.work);
437 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
438 dev);
439 struct virtio_vsock_pkt *pkt;
Jason Wange79b4312019-05-17 00:29:51 -0400440 int head, pkts = 0, total_len = 0;
Asias He433fc582016-07-28 15:36:34 +0100441 unsigned int out, in;
442 bool added = false;
443
444 mutex_lock(&vq->mutex);
445
446 if (!vq->private_data)
447 goto out;
448
449 vhost_disable_notify(&vsock->dev, vq);
Jason Wange79b4312019-05-17 00:29:51 -0400450 do {
Stefan Hajnoczi3fda5d62016-08-04 14:52:53 +0100451 u32 len;
452
Asias He433fc582016-07-28 15:36:34 +0100453 if (!vhost_vsock_more_replies(vsock)) {
454 /* Stop tx until the device processes already
455 * pending replies. Leave tx virtqueue
456 * callbacks disabled.
457 */
458 goto no_more_replies;
459 }
460
461 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
462 &out, &in, NULL, NULL);
463 if (head < 0)
464 break;
465
466 if (head == vq->num) {
467 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
468 vhost_disable_notify(&vsock->dev, vq);
469 continue;
470 }
471 break;
472 }
473
474 pkt = vhost_vsock_alloc_pkt(vq, out, in);
475 if (!pkt) {
476 vq_err(vq, "Faulted on pkt\n");
477 continue;
478 }
479
Stefan Hajnoczi3fda5d62016-08-04 14:52:53 +0100480 len = pkt->len;
481
Gerard Garcia82dfb5402017-04-21 10:10:46 +0100482 /* Deliver to monitoring devices all received packets */
483 virtio_transport_deliver_tap_pkt(pkt);
484
Asias He433fc582016-07-28 15:36:34 +0100485 /* Only accept correctly addressed packets */
486 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
Stefano Garzarella4c7246d2019-11-14 10:57:40 +0100487 virtio_transport_recv_pkt(&vhost_transport, pkt);
Asias He433fc582016-07-28 15:36:34 +0100488 else
489 virtio_transport_free_pkt(pkt);
490
Jason Wange79b4312019-05-17 00:29:51 -0400491 len += sizeof(pkt->hdr);
492 vhost_add_used(vq, head, len);
493 total_len += len;
Asias He433fc582016-07-28 15:36:34 +0100494 added = true;
Jason Wange79b4312019-05-17 00:29:51 -0400495 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
Asias He433fc582016-07-28 15:36:34 +0100496
497no_more_replies:
498 if (added)
499 vhost_signal(&vsock->dev, vq);
500
501out:
502 mutex_unlock(&vq->mutex);
503}
504
505static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
506{
507 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
508 poll.work);
509 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
510 dev);
511
512 vhost_transport_do_send_pkt(vsock, vq);
513}
514
515static int vhost_vsock_start(struct vhost_vsock *vsock)
516{
Stefan Hajnoczi0516ffd2017-01-19 10:43:53 +0000517 struct vhost_virtqueue *vq;
Asias He433fc582016-07-28 15:36:34 +0100518 size_t i;
519 int ret;
520
521 mutex_lock(&vsock->dev.mutex);
522
523 ret = vhost_dev_check_owner(&vsock->dev);
524 if (ret)
525 goto err;
526
527 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
Stefan Hajnoczi0516ffd2017-01-19 10:43:53 +0000528 vq = &vsock->vqs[i];
Asias He433fc582016-07-28 15:36:34 +0100529
530 mutex_lock(&vq->mutex);
531
532 if (!vhost_vq_access_ok(vq)) {
533 ret = -EFAULT;
Asias He433fc582016-07-28 15:36:34 +0100534 goto err_vq;
535 }
536
537 if (!vq->private_data) {
538 vq->private_data = vsock;
Stefan Hajnoczi0516ffd2017-01-19 10:43:53 +0000539 ret = vhost_vq_init_access(vq);
540 if (ret)
541 goto err_vq;
Asias He433fc582016-07-28 15:36:34 +0100542 }
543
544 mutex_unlock(&vq->mutex);
545 }
546
547 mutex_unlock(&vsock->dev.mutex);
548 return 0;
549
550err_vq:
Stefan Hajnoczi0516ffd2017-01-19 10:43:53 +0000551 vq->private_data = NULL;
552 mutex_unlock(&vq->mutex);
553
Asias He433fc582016-07-28 15:36:34 +0100554 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
Stefan Hajnoczi0516ffd2017-01-19 10:43:53 +0000555 vq = &vsock->vqs[i];
Asias He433fc582016-07-28 15:36:34 +0100556
557 mutex_lock(&vq->mutex);
558 vq->private_data = NULL;
559 mutex_unlock(&vq->mutex);
560 }
561err:
562 mutex_unlock(&vsock->dev.mutex);
563 return ret;
564}
565
566static int vhost_vsock_stop(struct vhost_vsock *vsock)
567{
568 size_t i;
569 int ret;
570
571 mutex_lock(&vsock->dev.mutex);
572
573 ret = vhost_dev_check_owner(&vsock->dev);
574 if (ret)
575 goto err;
576
577 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
578 struct vhost_virtqueue *vq = &vsock->vqs[i];
579
580 mutex_lock(&vq->mutex);
581 vq->private_data = NULL;
582 mutex_unlock(&vq->mutex);
583 }
584
585err:
586 mutex_unlock(&vsock->dev.mutex);
587 return ret;
588}
589
590static void vhost_vsock_free(struct vhost_vsock *vsock)
591{
Wei Yongjunb226aca2016-08-02 13:50:42 +0000592 kvfree(vsock);
Asias He433fc582016-07-28 15:36:34 +0100593}
594
595static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
596{
597 struct vhost_virtqueue **vqs;
598 struct vhost_vsock *vsock;
599 int ret;
600
601 /* This struct is large and allocation could fail, fall back to vmalloc
602 * if there is no other way.
603 */
Michal Hockodcda9b02017-07-12 14:36:45 -0700604 vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
Michal Hocko6c5ab652017-05-08 15:57:15 -0700605 if (!vsock)
606 return -ENOMEM;
Asias He433fc582016-07-28 15:36:34 +0100607
608 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
609 if (!vqs) {
610 ret = -ENOMEM;
611 goto out;
612 }
613
Stefan Hajnoczia72b69d2017-11-09 13:29:10 +0000614 vsock->guest_cid = 0; /* no CID assigned yet */
615
Asias He433fc582016-07-28 15:36:34 +0100616 atomic_set(&vsock->queued_replies, 0);
617
618 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
619 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
620 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
621 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
622
Jason Wange82b9b02019-05-17 00:29:49 -0400623 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
624 UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
625 VHOST_VSOCK_WEIGHT);
Asias He433fc582016-07-28 15:36:34 +0100626
627 file->private_data = vsock;
628 spin_lock_init(&vsock->send_pkt_list_lock);
629 INIT_LIST_HEAD(&vsock->send_pkt_list);
630 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
Asias He433fc582016-07-28 15:36:34 +0100631 return 0;
632
633out:
634 vhost_vsock_free(vsock);
635 return ret;
636}
637
638static void vhost_vsock_flush(struct vhost_vsock *vsock)
639{
640 int i;
641
642 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
643 if (vsock->vqs[i].handle_kick)
644 vhost_poll_flush(&vsock->vqs[i].poll);
645 vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
646}
647
648static void vhost_vsock_reset_orphans(struct sock *sk)
649{
650 struct vsock_sock *vsk = vsock_sk(sk);
651
652 /* vmci_transport.c doesn't take sk_lock here either. At least we're
653 * under vsock_table_lock so the sock cannot disappear while we're
654 * executing.
655 */
656
Stefan Hajnoczic38f57d2018-12-06 19:14:34 +0000657 /* If the peer is still valid, no need to reset connection */
658 if (vhost_vsock_get(vsk->remote_addr.svm_cid))
659 return;
660
661 /* If the close timeout is pending, let it expire. This avoids races
662 * with the timeout callback.
663 */
664 if (vsk->close_work_scheduled)
665 return;
666
667 sock_set_flag(sk, SOCK_DONE);
668 vsk->peer_shutdown = SHUTDOWN_MASK;
669 sk->sk_state = SS_UNCONNECTED;
670 sk->sk_err = ECONNRESET;
671 sk->sk_error_report(sk);
Asias He433fc582016-07-28 15:36:34 +0100672}
673
674static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
675{
676 struct vhost_vsock *vsock = file->private_data;
677
Stefan Hajnoczi6db3d8d2018-11-05 17:33:22 +0000678 mutex_lock(&vhost_vsock_mutex);
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000679 if (vsock->guest_cid)
680 hash_del_rcu(&vsock->hash);
Stefan Hajnoczi6db3d8d2018-11-05 17:33:22 +0000681 mutex_unlock(&vhost_vsock_mutex);
Asias He433fc582016-07-28 15:36:34 +0100682
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000683 /* Wait for other CPUs to finish using vsock */
684 synchronize_rcu();
685
Asias He433fc582016-07-28 15:36:34 +0100686 /* Iterating over all connections for all CIDs to find orphans is
687 * inefficient. Room for improvement here. */
688 vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
689
690 vhost_vsock_stop(vsock);
691 vhost_vsock_flush(vsock);
692 vhost_dev_stop(&vsock->dev);
693
694 spin_lock_bh(&vsock->send_pkt_list_lock);
695 while (!list_empty(&vsock->send_pkt_list)) {
696 struct virtio_vsock_pkt *pkt;
697
698 pkt = list_first_entry(&vsock->send_pkt_list,
699 struct virtio_vsock_pkt, list);
700 list_del_init(&pkt->list);
701 virtio_transport_free_pkt(pkt);
702 }
703 spin_unlock_bh(&vsock->send_pkt_list_lock);
704
夷则(Caspar)f6f93f72017-12-25 00:08:58 +0800705 vhost_dev_cleanup(&vsock->dev);
Asias He433fc582016-07-28 15:36:34 +0100706 kfree(vsock->dev.vqs);
707 vhost_vsock_free(vsock);
708 return 0;
709}
710
711static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
712{
713 struct vhost_vsock *other;
714
715 /* Refuse reserved CIDs */
716 if (guest_cid <= VMADDR_CID_HOST ||
717 guest_cid == U32_MAX)
718 return -EINVAL;
719
720 /* 64-bit CIDs are not yet supported */
721 if (guest_cid > U32_MAX)
722 return -EINVAL;
723
724 /* Refuse if CID is already in use */
Stefan Hajnoczi6db3d8d2018-11-05 17:33:22 +0000725 mutex_lock(&vhost_vsock_mutex);
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000726 other = vhost_vsock_get(guest_cid);
Gao feng6c083c22016-12-14 19:24:36 +0800727 if (other && other != vsock) {
Stefan Hajnoczi6db3d8d2018-11-05 17:33:22 +0000728 mutex_unlock(&vhost_vsock_mutex);
Gao feng6c083c22016-12-14 19:24:36 +0800729 return -EADDRINUSE;
730 }
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000731
732 if (vsock->guest_cid)
733 hash_del_rcu(&vsock->hash);
734
Asias He433fc582016-07-28 15:36:34 +0100735 vsock->guest_cid = guest_cid;
Zha Bin7fbe0782019-01-08 16:07:03 +0800736 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
Stefan Hajnoczi6db3d8d2018-11-05 17:33:22 +0000737 mutex_unlock(&vhost_vsock_mutex);
Asias He433fc582016-07-28 15:36:34 +0100738
739 return 0;
740}
741
742static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
743{
744 struct vhost_virtqueue *vq;
745 int i;
746
747 if (features & ~VHOST_VSOCK_FEATURES)
748 return -EOPNOTSUPP;
749
750 mutex_lock(&vsock->dev.mutex);
751 if ((features & (1 << VHOST_F_LOG_ALL)) &&
752 !vhost_log_access_ok(&vsock->dev)) {
753 mutex_unlock(&vsock->dev.mutex);
754 return -EFAULT;
755 }
756
757 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
758 vq = &vsock->vqs[i];
759 mutex_lock(&vq->mutex);
760 vq->acked_features = features;
761 mutex_unlock(&vq->mutex);
762 }
763 mutex_unlock(&vsock->dev.mutex);
764 return 0;
765}
766
767static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
768 unsigned long arg)
769{
770 struct vhost_vsock *vsock = f->private_data;
771 void __user *argp = (void __user *)arg;
772 u64 guest_cid;
773 u64 features;
774 int start;
775 int r;
776
777 switch (ioctl) {
778 case VHOST_VSOCK_SET_GUEST_CID:
779 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
780 return -EFAULT;
781 return vhost_vsock_set_cid(vsock, guest_cid);
782 case VHOST_VSOCK_SET_RUNNING:
783 if (copy_from_user(&start, argp, sizeof(start)))
784 return -EFAULT;
785 if (start)
786 return vhost_vsock_start(vsock);
787 else
788 return vhost_vsock_stop(vsock);
789 case VHOST_GET_FEATURES:
790 features = VHOST_VSOCK_FEATURES;
791 if (copy_to_user(argp, &features, sizeof(features)))
792 return -EFAULT;
793 return 0;
794 case VHOST_SET_FEATURES:
795 if (copy_from_user(&features, argp, sizeof(features)))
796 return -EFAULT;
797 return vhost_vsock_set_features(vsock, features);
798 default:
799 mutex_lock(&vsock->dev.mutex);
800 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
801 if (r == -ENOIOCTLCMD)
802 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
803 else
804 vhost_vsock_flush(vsock);
805 mutex_unlock(&vsock->dev.mutex);
806 return r;
807 }
808}
809
Sonny Raodc32bb62018-03-14 14:36:25 -0700810#ifdef CONFIG_COMPAT
811static long vhost_vsock_dev_compat_ioctl(struct file *f, unsigned int ioctl,
812 unsigned long arg)
813{
814 return vhost_vsock_dev_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
815}
816#endif
817
Asias He433fc582016-07-28 15:36:34 +0100818static const struct file_operations vhost_vsock_fops = {
819 .owner = THIS_MODULE,
820 .open = vhost_vsock_dev_open,
821 .release = vhost_vsock_dev_release,
822 .llseek = noop_llseek,
823 .unlocked_ioctl = vhost_vsock_dev_ioctl,
Sonny Raodc32bb62018-03-14 14:36:25 -0700824#ifdef CONFIG_COMPAT
825 .compat_ioctl = vhost_vsock_dev_compat_ioctl,
826#endif
Asias He433fc582016-07-28 15:36:34 +0100827};
828
829static struct miscdevice vhost_vsock_misc = {
Stefan Hajnoczif4660cc2017-05-10 10:19:18 -0400830 .minor = VHOST_VSOCK_MINOR,
Asias He433fc582016-07-28 15:36:34 +0100831 .name = "vhost-vsock",
832 .fops = &vhost_vsock_fops,
833};
834
Asias He433fc582016-07-28 15:36:34 +0100835static int __init vhost_vsock_init(void)
836{
837 int ret;
838
839 ret = vsock_core_init(&vhost_transport.transport);
840 if (ret < 0)
841 return ret;
842 return misc_register(&vhost_vsock_misc);
843};
844
845static void __exit vhost_vsock_exit(void)
846{
847 misc_deregister(&vhost_vsock_misc);
848 vsock_core_exit();
849};
850
851module_init(vhost_vsock_init);
852module_exit(vhost_vsock_exit);
853MODULE_LICENSE("GPL v2");
854MODULE_AUTHOR("Asias He");
855MODULE_DESCRIPTION("vhost transport for vsock ");
Stefan Hajnoczif4660cc2017-05-10 10:19:18 -0400856MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
857MODULE_ALIAS("devname:vhost-vsock");