blob: 938aefbc75ecc24cae2637ebf9091d275e98b98c [file] [log] [blame]
Thomas Gleixner7a338472019-06-04 10:11:15 +02001// SPDX-License-Identifier: GPL-2.0-only
Asias He433fc582016-07-28 15:36:34 +01002/*
3 * vhost transport for vsock
4 *
5 * Copyright (C) 2013-2015 Red Hat, Inc.
6 * Author: Asias He <asias@redhat.com>
7 * Stefan Hajnoczi <stefanha@redhat.com>
Asias He433fc582016-07-28 15:36:34 +01008 */
9#include <linux/miscdevice.h>
10#include <linux/atomic.h>
11#include <linux/module.h>
12#include <linux/mutex.h>
13#include <linux/vmalloc.h>
14#include <net/sock.h>
15#include <linux/virtio_vsock.h>
16#include <linux/vhost.h>
Stefan Hajnoczi834e7722018-11-05 10:35:47 +000017#include <linux/hashtable.h>
Asias He433fc582016-07-28 15:36:34 +010018
19#include <net/af_vsock.h>
20#include "vhost.h"
21
22#define VHOST_VSOCK_DEFAULT_HOST_CID 2
Jason Wange82b9b02019-05-17 00:29:49 -040023/* Max number of bytes transferred before requeueing the job.
24 * Using this limit prevents one virtqueue from starving others. */
25#define VHOST_VSOCK_WEIGHT 0x80000
26/* Max number of packets transferred before requeueing the job.
27 * Using this limit prevents one virtqueue from starving others with
28 * small pkts.
29 */
30#define VHOST_VSOCK_PKT_WEIGHT 256
Asias He433fc582016-07-28 15:36:34 +010031
32enum {
Stefano Garzarellae13a6912020-12-23 15:36:38 +010033 VHOST_VSOCK_FEATURES = VHOST_FEATURES |
Arseny Krasnovced7b712021-06-11 14:13:37 +030034 (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
35 (1ULL << VIRTIO_VSOCK_F_SEQPACKET)
Stefano Garzarellae13a6912020-12-23 15:36:38 +010036};
37
38enum {
39 VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
Asias He433fc582016-07-28 15:36:34 +010040};
41
42/* Used to track all the vhost_vsock instances on the system. */
Stefan Hajnoczi6db3d8d2018-11-05 17:33:22 +000043static DEFINE_MUTEX(vhost_vsock_mutex);
Stefan Hajnoczi834e7722018-11-05 10:35:47 +000044static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
Asias He433fc582016-07-28 15:36:34 +010045
46struct vhost_vsock {
47 struct vhost_dev dev;
48 struct vhost_virtqueue vqs[2];
49
Stefan Hajnoczi6db3d8d2018-11-05 17:33:22 +000050 /* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
Stefan Hajnoczi834e7722018-11-05 10:35:47 +000051 struct hlist_node hash;
Asias He433fc582016-07-28 15:36:34 +010052
53 struct vhost_work send_pkt_work;
54 spinlock_t send_pkt_list_lock;
55 struct list_head send_pkt_list; /* host->guest pending packets */
56
57 atomic_t queued_replies;
58
59 u32 guest_cid;
Arseny Krasnovced7b712021-06-11 14:13:37 +030060 bool seqpacket_allow;
Asias He433fc582016-07-28 15:36:34 +010061};
62
63static u32 vhost_transport_get_local_cid(void)
64{
65 return VHOST_VSOCK_DEFAULT_HOST_CID;
66}
67
Stefan Hajnoczi6db3d8d2018-11-05 17:33:22 +000068/* Callers that dereference the return value must hold vhost_vsock_mutex or the
Stefan Hajnoczi834e7722018-11-05 10:35:47 +000069 * RCU read lock.
70 */
71static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
Asias He433fc582016-07-28 15:36:34 +010072{
73 struct vhost_vsock *vsock;
74
Stefan Hajnoczi834e7722018-11-05 10:35:47 +000075 hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
Asias He433fc582016-07-28 15:36:34 +010076 u32 other_cid = vsock->guest_cid;
77
78 /* Skip instances that have no CID yet */
79 if (other_cid == 0)
80 continue;
81
Vaibhav Murkuteff3c1b12018-03-09 08:26:03 +053082 if (other_cid == guest_cid)
Asias He433fc582016-07-28 15:36:34 +010083 return vsock;
Vaibhav Murkuteff3c1b12018-03-09 08:26:03 +053084
Asias He433fc582016-07-28 15:36:34 +010085 }
Asias He433fc582016-07-28 15:36:34 +010086
87 return NULL;
88}
89
90static void
91vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
92 struct vhost_virtqueue *vq)
93{
94 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
Jason Wange79b4312019-05-17 00:29:51 -040095 int pkts = 0, total_len = 0;
Asias He433fc582016-07-28 15:36:34 +010096 bool added = false;
97 bool restart_tx = false;
98
99 mutex_lock(&vq->mutex);
100
Eugenio Pérez247643f2020-03-31 21:27:57 +0200101 if (!vhost_vq_get_backend(vq))
Asias He433fc582016-07-28 15:36:34 +0100102 goto out;
103
Stefano Garzarellae13a6912020-12-23 15:36:38 +0100104 if (!vq_meta_prefetch(vq))
105 goto out;
106
Asias He433fc582016-07-28 15:36:34 +0100107 /* Avoid further vmexits, we're already processing the virtqueue */
108 vhost_disable_notify(&vsock->dev, vq);
109
Jason Wange79b4312019-05-17 00:29:51 -0400110 do {
Asias He433fc582016-07-28 15:36:34 +0100111 struct virtio_vsock_pkt *pkt;
112 struct iov_iter iov_iter;
113 unsigned out, in;
114 size_t nbytes;
Stefano Garzarella6dbd3e62019-07-30 17:43:33 +0200115 size_t iov_len, payload_len;
Asias He433fc582016-07-28 15:36:34 +0100116 int head;
Arseny Krasnov1af7e552021-09-03 15:32:35 +0300117 u32 flags_to_restore = 0;
Asias He433fc582016-07-28 15:36:34 +0100118
119 spin_lock_bh(&vsock->send_pkt_list_lock);
120 if (list_empty(&vsock->send_pkt_list)) {
121 spin_unlock_bh(&vsock->send_pkt_list_lock);
122 vhost_enable_notify(&vsock->dev, vq);
123 break;
124 }
125
126 pkt = list_first_entry(&vsock->send_pkt_list,
127 struct virtio_vsock_pkt, list);
128 list_del_init(&pkt->list);
129 spin_unlock_bh(&vsock->send_pkt_list_lock);
130
131 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
132 &out, &in, NULL, NULL);
133 if (head < 0) {
134 spin_lock_bh(&vsock->send_pkt_list_lock);
135 list_add(&pkt->list, &vsock->send_pkt_list);
136 spin_unlock_bh(&vsock->send_pkt_list_lock);
137 break;
138 }
139
140 if (head == vq->num) {
141 spin_lock_bh(&vsock->send_pkt_list_lock);
142 list_add(&pkt->list, &vsock->send_pkt_list);
143 spin_unlock_bh(&vsock->send_pkt_list_lock);
144
145 /* We cannot finish yet if more buffers snuck in while
146 * re-enabling notify.
147 */
148 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
149 vhost_disable_notify(&vsock->dev, vq);
150 continue;
151 }
152 break;
153 }
154
155 if (out) {
156 virtio_transport_free_pkt(pkt);
157 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
158 break;
159 }
160
Stefano Garzarella6dbd3e62019-07-30 17:43:33 +0200161 iov_len = iov_length(&vq->iov[out], in);
162 if (iov_len < sizeof(pkt->hdr)) {
163 virtio_transport_free_pkt(pkt);
164 vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
165 break;
166 }
167
168 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
169 payload_len = pkt->len - pkt->off;
170
171 /* If the packet is greater than the space available in the
172 * buffer, we split it using multiple buffers.
173 */
Arseny Krasnovced7b712021-06-11 14:13:37 +0300174 if (payload_len > iov_len - sizeof(pkt->hdr)) {
Stefano Garzarella6dbd3e62019-07-30 17:43:33 +0200175 payload_len = iov_len - sizeof(pkt->hdr);
176
Arseny Krasnovced7b712021-06-11 14:13:37 +0300177 /* As we are copying pieces of large packet's buffer to
178 * small rx buffers, headers of packets in rx queue are
179 * created dynamically and are initialized with header
180 * of current packet(except length). But in case of
Arseny Krasnov9af8f102021-09-03 15:31:06 +0300181 * SOCK_SEQPACKET, we also must clear message delimeter
Arseny Krasnov1af7e552021-09-03 15:32:35 +0300182 * bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit
183 * (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise,
184 * there will be sequence of packets with these
185 * bits set. After initialized header will be copied to
186 * rx buffer, these required bits will be restored.
Arseny Krasnovced7b712021-06-11 14:13:37 +0300187 */
Arseny Krasnov9af8f102021-09-03 15:31:06 +0300188 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
189 pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
Arseny Krasnov1af7e552021-09-03 15:32:35 +0300190 flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
191
192 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
193 pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
194 flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
195 }
Arseny Krasnovced7b712021-06-11 14:13:37 +0300196 }
197 }
198
Stefano Garzarella6dbd3e62019-07-30 17:43:33 +0200199 /* Set the correct length in the header */
200 pkt->hdr.len = cpu_to_le32(payload_len);
Asias He433fc582016-07-28 15:36:34 +0100201
202 nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
203 if (nbytes != sizeof(pkt->hdr)) {
204 virtio_transport_free_pkt(pkt);
205 vq_err(vq, "Faulted on copying pkt hdr\n");
206 break;
207 }
208
Stefano Garzarella6dbd3e62019-07-30 17:43:33 +0200209 nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
210 &iov_iter);
211 if (nbytes != payload_len) {
Asias He433fc582016-07-28 15:36:34 +0100212 virtio_transport_free_pkt(pkt);
213 vq_err(vq, "Faulted on copying pkt buf\n");
214 break;
215 }
216
Stefano Garzarella107bc072020-04-24 17:08:29 +0200217 /* Deliver to monitoring devices all packets that we
218 * will transmit.
Gerard Garcia82dfb5402017-04-21 10:10:46 +0100219 */
220 virtio_transport_deliver_tap_pkt(pkt);
221
Stefano Garzarella107bc072020-04-24 17:08:29 +0200222 vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
223 added = true;
224
Stefano Garzarella6dbd3e62019-07-30 17:43:33 +0200225 pkt->off += payload_len;
226 total_len += payload_len;
227
228 /* If we didn't send all the payload we can requeue the packet
229 * to send it with the next available buffer.
230 */
231 if (pkt->off < pkt->len) {
Arseny Krasnov1af7e552021-09-03 15:32:35 +0300232 pkt->hdr.flags |= cpu_to_le32(flags_to_restore);
Arseny Krasnovced7b712021-06-11 14:13:37 +0300233
Stefano Garzarellaa78d1632020-04-24 17:08:30 +0200234 /* We are queueing the same virtio_vsock_pkt to handle
235 * the remaining bytes, and we want to deliver it
236 * to monitoring devices in the next iteration.
237 */
238 pkt->tap_delivered = false;
239
Stefano Garzarella6dbd3e62019-07-30 17:43:33 +0200240 spin_lock_bh(&vsock->send_pkt_list_lock);
241 list_add(&pkt->list, &vsock->send_pkt_list);
242 spin_unlock_bh(&vsock->send_pkt_list_lock);
243 } else {
244 if (pkt->reply) {
245 int val;
246
247 val = atomic_dec_return(&vsock->queued_replies);
248
249 /* Do we have resources to resume tx
250 * processing?
251 */
252 if (val + 1 == tx_vq->num)
253 restart_tx = true;
254 }
255
256 virtio_transport_free_pkt(pkt);
257 }
Jason Wange79b4312019-05-17 00:29:51 -0400258 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
Asias He433fc582016-07-28 15:36:34 +0100259 if (added)
260 vhost_signal(&vsock->dev, vq);
261
262out:
263 mutex_unlock(&vq->mutex);
264
265 if (restart_tx)
266 vhost_poll_queue(&tx_vq->poll);
267}
268
269static void vhost_transport_send_pkt_work(struct vhost_work *work)
270{
271 struct vhost_virtqueue *vq;
272 struct vhost_vsock *vsock;
273
274 vsock = container_of(work, struct vhost_vsock, send_pkt_work);
275 vq = &vsock->vqs[VSOCK_VQ_RX];
276
277 vhost_transport_do_send_pkt(vsock, vq);
278}
279
280static int
281vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
282{
283 struct vhost_vsock *vsock;
Asias He433fc582016-07-28 15:36:34 +0100284 int len = pkt->len;
285
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000286 rcu_read_lock();
287
Asias He433fc582016-07-28 15:36:34 +0100288 /* Find the vhost_vsock according to guest context id */
289 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
290 if (!vsock) {
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000291 rcu_read_unlock();
Asias He433fc582016-07-28 15:36:34 +0100292 virtio_transport_free_pkt(pkt);
293 return -ENODEV;
294 }
295
Asias He433fc582016-07-28 15:36:34 +0100296 if (pkt->reply)
297 atomic_inc(&vsock->queued_replies);
298
299 spin_lock_bh(&vsock->send_pkt_list_lock);
300 list_add_tail(&pkt->list, &vsock->send_pkt_list);
301 spin_unlock_bh(&vsock->send_pkt_list_lock);
302
303 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000304
305 rcu_read_unlock();
Asias He433fc582016-07-28 15:36:34 +0100306 return len;
307}
308
Peng Tao16320f32017-03-15 09:32:15 +0800309static int
310vhost_transport_cancel_pkt(struct vsock_sock *vsk)
311{
312 struct vhost_vsock *vsock;
313 struct virtio_vsock_pkt *pkt, *n;
314 int cnt = 0;
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000315 int ret = -ENODEV;
Peng Tao16320f32017-03-15 09:32:15 +0800316 LIST_HEAD(freeme);
317
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000318 rcu_read_lock();
319
Peng Tao16320f32017-03-15 09:32:15 +0800320 /* Find the vhost_vsock according to guest context id */
321 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
322 if (!vsock)
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000323 goto out;
Peng Tao16320f32017-03-15 09:32:15 +0800324
325 spin_lock_bh(&vsock->send_pkt_list_lock);
326 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
327 if (pkt->vsk != vsk)
328 continue;
329 list_move(&pkt->list, &freeme);
330 }
331 spin_unlock_bh(&vsock->send_pkt_list_lock);
332
333 list_for_each_entry_safe(pkt, n, &freeme, list) {
334 if (pkt->reply)
335 cnt++;
336 list_del(&pkt->list);
337 virtio_transport_free_pkt(pkt);
338 }
339
340 if (cnt) {
341 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
342 int new_cnt;
343
344 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
345 if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
346 vhost_poll_queue(&tx_vq->poll);
347 }
348
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000349 ret = 0;
350out:
351 rcu_read_unlock();
352 return ret;
Peng Tao16320f32017-03-15 09:32:15 +0800353}
354
Asias He433fc582016-07-28 15:36:34 +0100355static struct virtio_vsock_pkt *
356vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
357 unsigned int out, unsigned int in)
358{
359 struct virtio_vsock_pkt *pkt;
360 struct iov_iter iov_iter;
361 size_t nbytes;
362 size_t len;
363
364 if (in != 0) {
365 vq_err(vq, "Expected 0 input buffers, got %u\n", in);
366 return NULL;
367 }
368
369 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
370 if (!pkt)
371 return NULL;
372
373 len = iov_length(vq->iov, out);
374 iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
375
376 nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
377 if (nbytes != sizeof(pkt->hdr)) {
378 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
379 sizeof(pkt->hdr), nbytes);
380 kfree(pkt);
381 return NULL;
382 }
383
Arseny Krasnovced7b712021-06-11 14:13:37 +0300384 pkt->len = le32_to_cpu(pkt->hdr.len);
Asias He433fc582016-07-28 15:36:34 +0100385
386 /* No payload */
387 if (!pkt->len)
388 return pkt;
389
390 /* The pkt is too big */
391 if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
392 kfree(pkt);
393 return NULL;
394 }
395
396 pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
397 if (!pkt->buf) {
398 kfree(pkt);
399 return NULL;
400 }
401
Stefano Garzarella473c7392019-07-30 17:43:30 +0200402 pkt->buf_len = pkt->len;
403
Asias He433fc582016-07-28 15:36:34 +0100404 nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
405 if (nbytes != pkt->len) {
406 vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
407 pkt->len, nbytes);
408 virtio_transport_free_pkt(pkt);
409 return NULL;
410 }
411
412 return pkt;
413}
414
415/* Is there space left for replies to rx packets? */
416static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
417{
418 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
419 int val;
420
421 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
422 val = atomic_read(&vsock->queued_replies);
423
424 return val < vq->num;
425}
426
Arseny Krasnovced7b712021-06-11 14:13:37 +0300427static bool vhost_transport_seqpacket_allow(u32 remote_cid);
428
Stefano Garzarella4c7246d2019-11-14 10:57:40 +0100429static struct virtio_transport vhost_transport = {
430 .transport = {
Stefano Garzarella6a2c09622019-11-14 10:57:48 +0100431 .module = THIS_MODULE,
432
Stefano Garzarella4c7246d2019-11-14 10:57:40 +0100433 .get_local_cid = vhost_transport_get_local_cid,
434
435 .init = virtio_transport_do_socket_init,
436 .destruct = virtio_transport_destruct,
437 .release = virtio_transport_release,
438 .connect = virtio_transport_connect,
439 .shutdown = virtio_transport_shutdown,
440 .cancel_pkt = vhost_transport_cancel_pkt,
441
442 .dgram_enqueue = virtio_transport_dgram_enqueue,
443 .dgram_dequeue = virtio_transport_dgram_dequeue,
444 .dgram_bind = virtio_transport_dgram_bind,
445 .dgram_allow = virtio_transport_dgram_allow,
446
447 .stream_enqueue = virtio_transport_stream_enqueue,
448 .stream_dequeue = virtio_transport_stream_dequeue,
449 .stream_has_data = virtio_transport_stream_has_data,
450 .stream_has_space = virtio_transport_stream_has_space,
451 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
452 .stream_is_active = virtio_transport_stream_is_active,
453 .stream_allow = virtio_transport_stream_allow,
454
Arseny Krasnovced7b712021-06-11 14:13:37 +0300455 .seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
456 .seqpacket_enqueue = virtio_transport_seqpacket_enqueue,
457 .seqpacket_allow = vhost_transport_seqpacket_allow,
458 .seqpacket_has_data = virtio_transport_seqpacket_has_data,
459
Stefano Garzarella4c7246d2019-11-14 10:57:40 +0100460 .notify_poll_in = virtio_transport_notify_poll_in,
461 .notify_poll_out = virtio_transport_notify_poll_out,
462 .notify_recv_init = virtio_transport_notify_recv_init,
463 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
464 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
465 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
466 .notify_send_init = virtio_transport_notify_send_init,
467 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
468 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
469 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +0100470 .notify_buffer_size = virtio_transport_notify_buffer_size,
Stefano Garzarella4c7246d2019-11-14 10:57:40 +0100471
Stefano Garzarella4c7246d2019-11-14 10:57:40 +0100472 },
473
474 .send_pkt = vhost_transport_send_pkt,
475};
476
Arseny Krasnovced7b712021-06-11 14:13:37 +0300477static bool vhost_transport_seqpacket_allow(u32 remote_cid)
478{
479 struct vhost_vsock *vsock;
480 bool seqpacket_allow = false;
481
482 rcu_read_lock();
483 vsock = vhost_vsock_get(remote_cid);
484
485 if (vsock)
486 seqpacket_allow = vsock->seqpacket_allow;
487
488 rcu_read_unlock();
489
490 return seqpacket_allow;
491}
492
Asias He433fc582016-07-28 15:36:34 +0100493static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
494{
495 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
496 poll.work);
497 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
498 dev);
499 struct virtio_vsock_pkt *pkt;
Jason Wange79b4312019-05-17 00:29:51 -0400500 int head, pkts = 0, total_len = 0;
Asias He433fc582016-07-28 15:36:34 +0100501 unsigned int out, in;
502 bool added = false;
503
504 mutex_lock(&vq->mutex);
505
Eugenio Pérez247643f2020-03-31 21:27:57 +0200506 if (!vhost_vq_get_backend(vq))
Asias He433fc582016-07-28 15:36:34 +0100507 goto out;
508
Stefano Garzarellae13a6912020-12-23 15:36:38 +0100509 if (!vq_meta_prefetch(vq))
510 goto out;
511
Asias He433fc582016-07-28 15:36:34 +0100512 vhost_disable_notify(&vsock->dev, vq);
Jason Wange79b4312019-05-17 00:29:51 -0400513 do {
Stefan Hajnoczi3fda5d62016-08-04 14:52:53 +0100514 u32 len;
515
Asias He433fc582016-07-28 15:36:34 +0100516 if (!vhost_vsock_more_replies(vsock)) {
517 /* Stop tx until the device processes already
518 * pending replies. Leave tx virtqueue
519 * callbacks disabled.
520 */
521 goto no_more_replies;
522 }
523
524 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
525 &out, &in, NULL, NULL);
526 if (head < 0)
527 break;
528
529 if (head == vq->num) {
530 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
531 vhost_disable_notify(&vsock->dev, vq);
532 continue;
533 }
534 break;
535 }
536
537 pkt = vhost_vsock_alloc_pkt(vq, out, in);
538 if (!pkt) {
539 vq_err(vq, "Faulted on pkt\n");
540 continue;
541 }
542
Stefan Hajnoczi3fda5d62016-08-04 14:52:53 +0100543 len = pkt->len;
544
Gerard Garcia82dfb5402017-04-21 10:10:46 +0100545 /* Deliver to monitoring devices all received packets */
546 virtio_transport_deliver_tap_pkt(pkt);
547
Asias He433fc582016-07-28 15:36:34 +0100548 /* Only accept correctly addressed packets */
Stefano Garzarella8a3cc292019-12-06 15:39:12 +0100549 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
550 le64_to_cpu(pkt->hdr.dst_cid) ==
551 vhost_transport_get_local_cid())
Stefano Garzarella4c7246d2019-11-14 10:57:40 +0100552 virtio_transport_recv_pkt(&vhost_transport, pkt);
Asias He433fc582016-07-28 15:36:34 +0100553 else
554 virtio_transport_free_pkt(pkt);
555
Jason Wange79b4312019-05-17 00:29:51 -0400556 len += sizeof(pkt->hdr);
557 vhost_add_used(vq, head, len);
558 total_len += len;
Asias He433fc582016-07-28 15:36:34 +0100559 added = true;
Jason Wange79b4312019-05-17 00:29:51 -0400560 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
Asias He433fc582016-07-28 15:36:34 +0100561
562no_more_replies:
563 if (added)
564 vhost_signal(&vsock->dev, vq);
565
566out:
567 mutex_unlock(&vq->mutex);
568}
569
570static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
571{
572 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
573 poll.work);
574 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
575 dev);
576
577 vhost_transport_do_send_pkt(vsock, vq);
578}
579
580static int vhost_vsock_start(struct vhost_vsock *vsock)
581{
Stefan Hajnoczi0516ffd2017-01-19 10:43:53 +0000582 struct vhost_virtqueue *vq;
Asias He433fc582016-07-28 15:36:34 +0100583 size_t i;
584 int ret;
585
586 mutex_lock(&vsock->dev.mutex);
587
588 ret = vhost_dev_check_owner(&vsock->dev);
589 if (ret)
590 goto err;
591
592 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
Stefan Hajnoczi0516ffd2017-01-19 10:43:53 +0000593 vq = &vsock->vqs[i];
Asias He433fc582016-07-28 15:36:34 +0100594
595 mutex_lock(&vq->mutex);
596
597 if (!vhost_vq_access_ok(vq)) {
598 ret = -EFAULT;
Asias He433fc582016-07-28 15:36:34 +0100599 goto err_vq;
600 }
601
Eugenio Pérez247643f2020-03-31 21:27:57 +0200602 if (!vhost_vq_get_backend(vq)) {
603 vhost_vq_set_backend(vq, vsock);
Stefan Hajnoczi0516ffd2017-01-19 10:43:53 +0000604 ret = vhost_vq_init_access(vq);
605 if (ret)
606 goto err_vq;
Asias He433fc582016-07-28 15:36:34 +0100607 }
608
609 mutex_unlock(&vq->mutex);
610 }
611
Jia He0b841032020-05-01 12:38:40 +0800612 /* Some packets may have been queued before the device was started,
613 * let's kick the send worker to send them.
614 */
615 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
616
Asias He433fc582016-07-28 15:36:34 +0100617 mutex_unlock(&vsock->dev.mutex);
618 return 0;
619
620err_vq:
Eugenio Pérez247643f2020-03-31 21:27:57 +0200621 vhost_vq_set_backend(vq, NULL);
Stefan Hajnoczi0516ffd2017-01-19 10:43:53 +0000622 mutex_unlock(&vq->mutex);
623
Asias He433fc582016-07-28 15:36:34 +0100624 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
Stefan Hajnoczi0516ffd2017-01-19 10:43:53 +0000625 vq = &vsock->vqs[i];
Asias He433fc582016-07-28 15:36:34 +0100626
627 mutex_lock(&vq->mutex);
Eugenio Pérez247643f2020-03-31 21:27:57 +0200628 vhost_vq_set_backend(vq, NULL);
Asias He433fc582016-07-28 15:36:34 +0100629 mutex_unlock(&vq->mutex);
630 }
631err:
632 mutex_unlock(&vsock->dev.mutex);
633 return ret;
634}
635
636static int vhost_vsock_stop(struct vhost_vsock *vsock)
637{
638 size_t i;
639 int ret;
640
641 mutex_lock(&vsock->dev.mutex);
642
643 ret = vhost_dev_check_owner(&vsock->dev);
644 if (ret)
645 goto err;
646
647 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
648 struct vhost_virtqueue *vq = &vsock->vqs[i];
649
650 mutex_lock(&vq->mutex);
Eugenio Pérez247643f2020-03-31 21:27:57 +0200651 vhost_vq_set_backend(vq, NULL);
Asias He433fc582016-07-28 15:36:34 +0100652 mutex_unlock(&vq->mutex);
653 }
654
655err:
656 mutex_unlock(&vsock->dev.mutex);
657 return ret;
658}
659
660static void vhost_vsock_free(struct vhost_vsock *vsock)
661{
Wei Yongjunb226aca2016-08-02 13:50:42 +0000662 kvfree(vsock);
Asias He433fc582016-07-28 15:36:34 +0100663}
664
665static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
666{
667 struct vhost_virtqueue **vqs;
668 struct vhost_vsock *vsock;
669 int ret;
670
671 /* This struct is large and allocation could fail, fall back to vmalloc
672 * if there is no other way.
673 */
Michal Hockodcda9b02017-07-12 14:36:45 -0700674 vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
Michal Hocko6c5ab652017-05-08 15:57:15 -0700675 if (!vsock)
676 return -ENOMEM;
Asias He433fc582016-07-28 15:36:34 +0100677
678 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
679 if (!vqs) {
680 ret = -ENOMEM;
681 goto out;
682 }
683
Stefan Hajnoczia72b69d2017-11-09 13:29:10 +0000684 vsock->guest_cid = 0; /* no CID assigned yet */
685
Asias He433fc582016-07-28 15:36:34 +0100686 atomic_set(&vsock->queued_replies, 0);
687
688 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
689 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
690 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
691 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
692
Jason Wange82b9b02019-05-17 00:29:49 -0400693 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
694 UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
Jason Wang01fcb1c2020-05-29 16:02:58 +0800695 VHOST_VSOCK_WEIGHT, true, NULL);
Asias He433fc582016-07-28 15:36:34 +0100696
697 file->private_data = vsock;
698 spin_lock_init(&vsock->send_pkt_list_lock);
699 INIT_LIST_HEAD(&vsock->send_pkt_list);
700 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
Asias He433fc582016-07-28 15:36:34 +0100701 return 0;
702
703out:
704 vhost_vsock_free(vsock);
705 return ret;
706}
707
708static void vhost_vsock_flush(struct vhost_vsock *vsock)
709{
710 int i;
711
712 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
713 if (vsock->vqs[i].handle_kick)
714 vhost_poll_flush(&vsock->vqs[i].poll);
Mike Christie1465cb62021-05-25 12:47:29 -0500715 vhost_work_dev_flush(&vsock->dev);
Asias He433fc582016-07-28 15:36:34 +0100716}
717
718static void vhost_vsock_reset_orphans(struct sock *sk)
719{
720 struct vsock_sock *vsk = vsock_sk(sk);
721
722 /* vmci_transport.c doesn't take sk_lock here either. At least we're
723 * under vsock_table_lock so the sock cannot disappear while we're
724 * executing.
725 */
726
Stefan Hajnoczic38f57d2018-12-06 19:14:34 +0000727 /* If the peer is still valid, no need to reset connection */
728 if (vhost_vsock_get(vsk->remote_addr.svm_cid))
729 return;
730
731 /* If the close timeout is pending, let it expire. This avoids races
732 * with the timeout callback.
733 */
734 if (vsk->close_work_scheduled)
735 return;
736
737 sock_set_flag(sk, SOCK_DONE);
738 vsk->peer_shutdown = SHUTDOWN_MASK;
739 sk->sk_state = SS_UNCONNECTED;
740 sk->sk_err = ECONNRESET;
Alexander Aringe3ae2362021-06-27 18:48:21 -0400741 sk_error_report(sk);
Asias He433fc582016-07-28 15:36:34 +0100742}
743
744static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
745{
746 struct vhost_vsock *vsock = file->private_data;
747
Stefan Hajnoczi6db3d8d2018-11-05 17:33:22 +0000748 mutex_lock(&vhost_vsock_mutex);
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000749 if (vsock->guest_cid)
750 hash_del_rcu(&vsock->hash);
Stefan Hajnoczi6db3d8d2018-11-05 17:33:22 +0000751 mutex_unlock(&vhost_vsock_mutex);
Asias He433fc582016-07-28 15:36:34 +0100752
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000753 /* Wait for other CPUs to finish using vsock */
754 synchronize_rcu();
755
Asias He433fc582016-07-28 15:36:34 +0100756 /* Iterating over all connections for all CIDs to find orphans is
757 * inefficient. Room for improvement here. */
758 vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
759
760 vhost_vsock_stop(vsock);
761 vhost_vsock_flush(vsock);
762 vhost_dev_stop(&vsock->dev);
763
764 spin_lock_bh(&vsock->send_pkt_list_lock);
765 while (!list_empty(&vsock->send_pkt_list)) {
766 struct virtio_vsock_pkt *pkt;
767
768 pkt = list_first_entry(&vsock->send_pkt_list,
769 struct virtio_vsock_pkt, list);
770 list_del_init(&pkt->list);
771 virtio_transport_free_pkt(pkt);
772 }
773 spin_unlock_bh(&vsock->send_pkt_list_lock);
774
夷则(Caspar)f6f93f72017-12-25 00:08:58 +0800775 vhost_dev_cleanup(&vsock->dev);
Asias He433fc582016-07-28 15:36:34 +0100776 kfree(vsock->dev.vqs);
777 vhost_vsock_free(vsock);
778 return 0;
779}
780
781static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
782{
783 struct vhost_vsock *other;
784
785 /* Refuse reserved CIDs */
786 if (guest_cid <= VMADDR_CID_HOST ||
787 guest_cid == U32_MAX)
788 return -EINVAL;
789
790 /* 64-bit CIDs are not yet supported */
791 if (guest_cid > U32_MAX)
792 return -EINVAL;
793
Stefano Garzarellaed8640a2019-11-14 10:57:50 +0100794 /* Refuse if CID is assigned to the guest->host transport (i.e. nested
795 * VM), to make the loopback work.
796 */
797 if (vsock_find_cid(guest_cid))
798 return -EADDRINUSE;
799
Asias He433fc582016-07-28 15:36:34 +0100800 /* Refuse if CID is already in use */
Stefan Hajnoczi6db3d8d2018-11-05 17:33:22 +0000801 mutex_lock(&vhost_vsock_mutex);
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000802 other = vhost_vsock_get(guest_cid);
Gao feng6c083c22016-12-14 19:24:36 +0800803 if (other && other != vsock) {
Stefan Hajnoczi6db3d8d2018-11-05 17:33:22 +0000804 mutex_unlock(&vhost_vsock_mutex);
Gao feng6c083c22016-12-14 19:24:36 +0800805 return -EADDRINUSE;
806 }
Stefan Hajnoczi834e7722018-11-05 10:35:47 +0000807
808 if (vsock->guest_cid)
809 hash_del_rcu(&vsock->hash);
810
Asias He433fc582016-07-28 15:36:34 +0100811 vsock->guest_cid = guest_cid;
Zha Bin7fbe0782019-01-08 16:07:03 +0800812 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
Stefan Hajnoczi6db3d8d2018-11-05 17:33:22 +0000813 mutex_unlock(&vhost_vsock_mutex);
Asias He433fc582016-07-28 15:36:34 +0100814
815 return 0;
816}
817
818static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
819{
820 struct vhost_virtqueue *vq;
821 int i;
822
823 if (features & ~VHOST_VSOCK_FEATURES)
824 return -EOPNOTSUPP;
825
826 mutex_lock(&vsock->dev.mutex);
827 if ((features & (1 << VHOST_F_LOG_ALL)) &&
828 !vhost_log_access_ok(&vsock->dev)) {
Stefano Garzarellae13a6912020-12-23 15:36:38 +0100829 goto err;
830 }
831
832 if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
833 if (vhost_init_device_iotlb(&vsock->dev, true))
834 goto err;
Asias He433fc582016-07-28 15:36:34 +0100835 }
836
Arseny Krasnovced7b712021-06-11 14:13:37 +0300837 if (features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET))
838 vsock->seqpacket_allow = true;
839
Asias He433fc582016-07-28 15:36:34 +0100840 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
841 vq = &vsock->vqs[i];
842 mutex_lock(&vq->mutex);
843 vq->acked_features = features;
844 mutex_unlock(&vq->mutex);
845 }
846 mutex_unlock(&vsock->dev.mutex);
847 return 0;
Stefano Garzarellae13a6912020-12-23 15:36:38 +0100848
849err:
850 mutex_unlock(&vsock->dev.mutex);
851 return -EFAULT;
Asias He433fc582016-07-28 15:36:34 +0100852}
853
854static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
855 unsigned long arg)
856{
857 struct vhost_vsock *vsock = f->private_data;
858 void __user *argp = (void __user *)arg;
859 u64 guest_cid;
860 u64 features;
861 int start;
862 int r;
863
864 switch (ioctl) {
865 case VHOST_VSOCK_SET_GUEST_CID:
866 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
867 return -EFAULT;
868 return vhost_vsock_set_cid(vsock, guest_cid);
869 case VHOST_VSOCK_SET_RUNNING:
870 if (copy_from_user(&start, argp, sizeof(start)))
871 return -EFAULT;
872 if (start)
873 return vhost_vsock_start(vsock);
874 else
875 return vhost_vsock_stop(vsock);
876 case VHOST_GET_FEATURES:
877 features = VHOST_VSOCK_FEATURES;
878 if (copy_to_user(argp, &features, sizeof(features)))
879 return -EFAULT;
880 return 0;
881 case VHOST_SET_FEATURES:
882 if (copy_from_user(&features, argp, sizeof(features)))
883 return -EFAULT;
884 return vhost_vsock_set_features(vsock, features);
Stefano Garzarellae13a6912020-12-23 15:36:38 +0100885 case VHOST_GET_BACKEND_FEATURES:
886 features = VHOST_VSOCK_BACKEND_FEATURES;
887 if (copy_to_user(argp, &features, sizeof(features)))
888 return -EFAULT;
889 return 0;
890 case VHOST_SET_BACKEND_FEATURES:
891 if (copy_from_user(&features, argp, sizeof(features)))
892 return -EFAULT;
893 if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
894 return -EOPNOTSUPP;
895 vhost_set_backend_features(&vsock->dev, features);
896 return 0;
Asias He433fc582016-07-28 15:36:34 +0100897 default:
898 mutex_lock(&vsock->dev.mutex);
899 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
900 if (r == -ENOIOCTLCMD)
901 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
902 else
903 vhost_vsock_flush(vsock);
904 mutex_unlock(&vsock->dev.mutex);
905 return r;
906 }
907}
908
Stefano Garzarellae13a6912020-12-23 15:36:38 +0100909static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
910{
911 struct file *file = iocb->ki_filp;
912 struct vhost_vsock *vsock = file->private_data;
913 struct vhost_dev *dev = &vsock->dev;
914 int noblock = file->f_flags & O_NONBLOCK;
915
916 return vhost_chr_read_iter(dev, to, noblock);
917}
918
919static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
920 struct iov_iter *from)
921{
922 struct file *file = iocb->ki_filp;
923 struct vhost_vsock *vsock = file->private_data;
924 struct vhost_dev *dev = &vsock->dev;
925
926 return vhost_chr_write_iter(dev, from);
927}
928
929static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
930{
931 struct vhost_vsock *vsock = file->private_data;
932 struct vhost_dev *dev = &vsock->dev;
933
934 return vhost_chr_poll(file, dev, wait);
935}
936
Asias He433fc582016-07-28 15:36:34 +0100937static const struct file_operations vhost_vsock_fops = {
938 .owner = THIS_MODULE,
939 .open = vhost_vsock_dev_open,
940 .release = vhost_vsock_dev_release,
941 .llseek = noop_llseek,
942 .unlocked_ioctl = vhost_vsock_dev_ioctl,
Arnd Bergmann407e9ef2018-09-11 17:23:00 +0200943 .compat_ioctl = compat_ptr_ioctl,
Stefano Garzarellae13a6912020-12-23 15:36:38 +0100944 .read_iter = vhost_vsock_chr_read_iter,
945 .write_iter = vhost_vsock_chr_write_iter,
946 .poll = vhost_vsock_chr_poll,
Asias He433fc582016-07-28 15:36:34 +0100947};
948
949static struct miscdevice vhost_vsock_misc = {
Stefan Hajnoczif4660cc2017-05-10 10:19:18 -0400950 .minor = VHOST_VSOCK_MINOR,
Asias He433fc582016-07-28 15:36:34 +0100951 .name = "vhost-vsock",
952 .fops = &vhost_vsock_fops,
953};
954
Asias He433fc582016-07-28 15:36:34 +0100955static int __init vhost_vsock_init(void)
956{
957 int ret;
958
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100959 ret = vsock_core_register(&vhost_transport.transport,
960 VSOCK_TRANSPORT_F_H2G);
Asias He433fc582016-07-28 15:36:34 +0100961 if (ret < 0)
962 return ret;
963 return misc_register(&vhost_vsock_misc);
964};
965
966static void __exit vhost_vsock_exit(void)
967{
968 misc_deregister(&vhost_vsock_misc);
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100969 vsock_core_unregister(&vhost_transport.transport);
Asias He433fc582016-07-28 15:36:34 +0100970};
971
972module_init(vhost_vsock_init);
973module_exit(vhost_vsock_exit);
974MODULE_LICENSE("GPL v2");
975MODULE_AUTHOR("Asias He");
976MODULE_DESCRIPTION("vhost transport for vsock ");
Stefan Hajnoczif4660cc2017-05-10 10:19:18 -0400977MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
978MODULE_ALIAS("devname:vhost-vsock");