Thomas Gleixner | 7a33847 | 2019-06-04 10:11:15 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 2 | /* |
| 3 | * vhost transport for vsock |
| 4 | * |
| 5 | * Copyright (C) 2013-2015 Red Hat, Inc. |
| 6 | * Author: Asias He <asias@redhat.com> |
| 7 | * Stefan Hajnoczi <stefanha@redhat.com> |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 8 | */ |
| 9 | #include <linux/miscdevice.h> |
| 10 | #include <linux/atomic.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/mutex.h> |
| 13 | #include <linux/vmalloc.h> |
| 14 | #include <net/sock.h> |
| 15 | #include <linux/virtio_vsock.h> |
| 16 | #include <linux/vhost.h> |
Stefan Hajnoczi | 834e772 | 2018-11-05 10:35:47 +0000 | [diff] [blame] | 17 | #include <linux/hashtable.h> |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 18 | |
| 19 | #include <net/af_vsock.h> |
| 20 | #include "vhost.h" |
| 21 | |
| 22 | #define VHOST_VSOCK_DEFAULT_HOST_CID 2 |
Jason Wang | e82b9b0 | 2019-05-17 00:29:49 -0400 | [diff] [blame] | 23 | /* Max number of bytes transferred before requeueing the job. |
| 24 | * Using this limit prevents one virtqueue from starving others. */ |
| 25 | #define VHOST_VSOCK_WEIGHT 0x80000 |
| 26 | /* Max number of packets transferred before requeueing the job. |
| 27 | * Using this limit prevents one virtqueue from starving others with |
| 28 | * small pkts. |
| 29 | */ |
| 30 | #define VHOST_VSOCK_PKT_WEIGHT 256 |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 31 | |
| 32 | enum { |
| 33 | VHOST_VSOCK_FEATURES = VHOST_FEATURES, |
| 34 | }; |
| 35 | |
| 36 | /* Used to track all the vhost_vsock instances on the system. */ |
Stefan Hajnoczi | 6db3d8d | 2018-11-05 17:33:22 +0000 | [diff] [blame] | 37 | static DEFINE_MUTEX(vhost_vsock_mutex); |
Stefan Hajnoczi | 834e772 | 2018-11-05 10:35:47 +0000 | [diff] [blame] | 38 | static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8); |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 39 | |
| 40 | struct vhost_vsock { |
| 41 | struct vhost_dev dev; |
| 42 | struct vhost_virtqueue vqs[2]; |
| 43 | |
Stefan Hajnoczi | 6db3d8d | 2018-11-05 17:33:22 +0000 | [diff] [blame] | 44 | /* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */ |
Stefan Hajnoczi | 834e772 | 2018-11-05 10:35:47 +0000 | [diff] [blame] | 45 | struct hlist_node hash; |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 46 | |
| 47 | struct vhost_work send_pkt_work; |
| 48 | spinlock_t send_pkt_list_lock; |
| 49 | struct list_head send_pkt_list; /* host->guest pending packets */ |
| 50 | |
| 51 | atomic_t queued_replies; |
| 52 | |
| 53 | u32 guest_cid; |
| 54 | }; |
| 55 | |
| 56 | static u32 vhost_transport_get_local_cid(void) |
| 57 | { |
| 58 | return VHOST_VSOCK_DEFAULT_HOST_CID; |
| 59 | } |
| 60 | |
Stefan Hajnoczi | 6db3d8d | 2018-11-05 17:33:22 +0000 | [diff] [blame] | 61 | /* Callers that dereference the return value must hold vhost_vsock_mutex or the |
Stefan Hajnoczi | 834e772 | 2018-11-05 10:35:47 +0000 | [diff] [blame] | 62 | * RCU read lock. |
| 63 | */ |
| 64 | static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 65 | { |
| 66 | struct vhost_vsock *vsock; |
| 67 | |
Stefan Hajnoczi | 834e772 | 2018-11-05 10:35:47 +0000 | [diff] [blame] | 68 | hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) { |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 69 | u32 other_cid = vsock->guest_cid; |
| 70 | |
| 71 | /* Skip instances that have no CID yet */ |
| 72 | if (other_cid == 0) |
| 73 | continue; |
| 74 | |
Vaibhav Murkute | ff3c1b1 | 2018-03-09 08:26:03 +0530 | [diff] [blame] | 75 | if (other_cid == guest_cid) |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 76 | return vsock; |
Vaibhav Murkute | ff3c1b1 | 2018-03-09 08:26:03 +0530 | [diff] [blame] | 77 | |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 78 | } |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 79 | |
| 80 | return NULL; |
| 81 | } |
| 82 | |
| 83 | static void |
| 84 | vhost_transport_do_send_pkt(struct vhost_vsock *vsock, |
| 85 | struct vhost_virtqueue *vq) |
| 86 | { |
| 87 | struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; |
Jason Wang | e79b431 | 2019-05-17 00:29:51 -0400 | [diff] [blame] | 88 | int pkts = 0, total_len = 0; |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 89 | bool added = false; |
| 90 | bool restart_tx = false; |
| 91 | |
| 92 | mutex_lock(&vq->mutex); |
| 93 | |
| 94 | if (!vq->private_data) |
| 95 | goto out; |
| 96 | |
| 97 | /* Avoid further vmexits, we're already processing the virtqueue */ |
| 98 | vhost_disable_notify(&vsock->dev, vq); |
| 99 | |
Jason Wang | e79b431 | 2019-05-17 00:29:51 -0400 | [diff] [blame] | 100 | do { |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 101 | struct virtio_vsock_pkt *pkt; |
| 102 | struct iov_iter iov_iter; |
| 103 | unsigned out, in; |
| 104 | size_t nbytes; |
Stefano Garzarella | 6dbd3e6 | 2019-07-30 17:43:33 +0200 | [diff] [blame] | 105 | size_t iov_len, payload_len; |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 106 | int head; |
| 107 | |
| 108 | spin_lock_bh(&vsock->send_pkt_list_lock); |
| 109 | if (list_empty(&vsock->send_pkt_list)) { |
| 110 | spin_unlock_bh(&vsock->send_pkt_list_lock); |
| 111 | vhost_enable_notify(&vsock->dev, vq); |
| 112 | break; |
| 113 | } |
| 114 | |
| 115 | pkt = list_first_entry(&vsock->send_pkt_list, |
| 116 | struct virtio_vsock_pkt, list); |
| 117 | list_del_init(&pkt->list); |
| 118 | spin_unlock_bh(&vsock->send_pkt_list_lock); |
| 119 | |
| 120 | head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), |
| 121 | &out, &in, NULL, NULL); |
| 122 | if (head < 0) { |
| 123 | spin_lock_bh(&vsock->send_pkt_list_lock); |
| 124 | list_add(&pkt->list, &vsock->send_pkt_list); |
| 125 | spin_unlock_bh(&vsock->send_pkt_list_lock); |
| 126 | break; |
| 127 | } |
| 128 | |
| 129 | if (head == vq->num) { |
| 130 | spin_lock_bh(&vsock->send_pkt_list_lock); |
| 131 | list_add(&pkt->list, &vsock->send_pkt_list); |
| 132 | spin_unlock_bh(&vsock->send_pkt_list_lock); |
| 133 | |
| 134 | /* We cannot finish yet if more buffers snuck in while |
| 135 | * re-enabling notify. |
| 136 | */ |
| 137 | if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { |
| 138 | vhost_disable_notify(&vsock->dev, vq); |
| 139 | continue; |
| 140 | } |
| 141 | break; |
| 142 | } |
| 143 | |
| 144 | if (out) { |
| 145 | virtio_transport_free_pkt(pkt); |
| 146 | vq_err(vq, "Expected 0 output buffers, got %u\n", out); |
| 147 | break; |
| 148 | } |
| 149 | |
Stefano Garzarella | 6dbd3e6 | 2019-07-30 17:43:33 +0200 | [diff] [blame] | 150 | iov_len = iov_length(&vq->iov[out], in); |
| 151 | if (iov_len < sizeof(pkt->hdr)) { |
| 152 | virtio_transport_free_pkt(pkt); |
| 153 | vq_err(vq, "Buffer len [%zu] too small\n", iov_len); |
| 154 | break; |
| 155 | } |
| 156 | |
| 157 | iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len); |
| 158 | payload_len = pkt->len - pkt->off; |
| 159 | |
| 160 | /* If the packet is greater than the space available in the |
| 161 | * buffer, we split it using multiple buffers. |
| 162 | */ |
| 163 | if (payload_len > iov_len - sizeof(pkt->hdr)) |
| 164 | payload_len = iov_len - sizeof(pkt->hdr); |
| 165 | |
| 166 | /* Set the correct length in the header */ |
| 167 | pkt->hdr.len = cpu_to_le32(payload_len); |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 168 | |
| 169 | nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); |
| 170 | if (nbytes != sizeof(pkt->hdr)) { |
| 171 | virtio_transport_free_pkt(pkt); |
| 172 | vq_err(vq, "Faulted on copying pkt hdr\n"); |
| 173 | break; |
| 174 | } |
| 175 | |
Stefano Garzarella | 6dbd3e6 | 2019-07-30 17:43:33 +0200 | [diff] [blame] | 176 | nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len, |
| 177 | &iov_iter); |
| 178 | if (nbytes != payload_len) { |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 179 | virtio_transport_free_pkt(pkt); |
| 180 | vq_err(vq, "Faulted on copying pkt buf\n"); |
| 181 | break; |
| 182 | } |
| 183 | |
Stefano Garzarella | 6dbd3e6 | 2019-07-30 17:43:33 +0200 | [diff] [blame] | 184 | vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 185 | added = true; |
| 186 | |
Gerard Garcia | 82dfb540 | 2017-04-21 10:10:46 +0100 | [diff] [blame] | 187 | /* Deliver to monitoring devices all correctly transmitted |
| 188 | * packets. |
| 189 | */ |
| 190 | virtio_transport_deliver_tap_pkt(pkt); |
| 191 | |
Stefano Garzarella | 6dbd3e6 | 2019-07-30 17:43:33 +0200 | [diff] [blame] | 192 | pkt->off += payload_len; |
| 193 | total_len += payload_len; |
| 194 | |
| 195 | /* If we didn't send all the payload we can requeue the packet |
| 196 | * to send it with the next available buffer. |
| 197 | */ |
| 198 | if (pkt->off < pkt->len) { |
| 199 | spin_lock_bh(&vsock->send_pkt_list_lock); |
| 200 | list_add(&pkt->list, &vsock->send_pkt_list); |
| 201 | spin_unlock_bh(&vsock->send_pkt_list_lock); |
| 202 | } else { |
| 203 | if (pkt->reply) { |
| 204 | int val; |
| 205 | |
| 206 | val = atomic_dec_return(&vsock->queued_replies); |
| 207 | |
| 208 | /* Do we have resources to resume tx |
| 209 | * processing? |
| 210 | */ |
| 211 | if (val + 1 == tx_vq->num) |
| 212 | restart_tx = true; |
| 213 | } |
| 214 | |
| 215 | virtio_transport_free_pkt(pkt); |
| 216 | } |
Jason Wang | e79b431 | 2019-05-17 00:29:51 -0400 | [diff] [blame] | 217 | } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 218 | if (added) |
| 219 | vhost_signal(&vsock->dev, vq); |
| 220 | |
| 221 | out: |
| 222 | mutex_unlock(&vq->mutex); |
| 223 | |
| 224 | if (restart_tx) |
| 225 | vhost_poll_queue(&tx_vq->poll); |
| 226 | } |
| 227 | |
| 228 | static void vhost_transport_send_pkt_work(struct vhost_work *work) |
| 229 | { |
| 230 | struct vhost_virtqueue *vq; |
| 231 | struct vhost_vsock *vsock; |
| 232 | |
| 233 | vsock = container_of(work, struct vhost_vsock, send_pkt_work); |
| 234 | vq = &vsock->vqs[VSOCK_VQ_RX]; |
| 235 | |
| 236 | vhost_transport_do_send_pkt(vsock, vq); |
| 237 | } |
| 238 | |
| 239 | static int |
| 240 | vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) |
| 241 | { |
| 242 | struct vhost_vsock *vsock; |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 243 | int len = pkt->len; |
| 244 | |
Stefan Hajnoczi | 834e772 | 2018-11-05 10:35:47 +0000 | [diff] [blame] | 245 | rcu_read_lock(); |
| 246 | |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 247 | /* Find the vhost_vsock according to guest context id */ |
| 248 | vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); |
| 249 | if (!vsock) { |
Stefan Hajnoczi | 834e772 | 2018-11-05 10:35:47 +0000 | [diff] [blame] | 250 | rcu_read_unlock(); |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 251 | virtio_transport_free_pkt(pkt); |
| 252 | return -ENODEV; |
| 253 | } |
| 254 | |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 255 | if (pkt->reply) |
| 256 | atomic_inc(&vsock->queued_replies); |
| 257 | |
| 258 | spin_lock_bh(&vsock->send_pkt_list_lock); |
| 259 | list_add_tail(&pkt->list, &vsock->send_pkt_list); |
| 260 | spin_unlock_bh(&vsock->send_pkt_list_lock); |
| 261 | |
| 262 | vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); |
Stefan Hajnoczi | 834e772 | 2018-11-05 10:35:47 +0000 | [diff] [blame] | 263 | |
| 264 | rcu_read_unlock(); |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 265 | return len; |
| 266 | } |
| 267 | |
Peng Tao | 16320f3 | 2017-03-15 09:32:15 +0800 | [diff] [blame] | 268 | static int |
| 269 | vhost_transport_cancel_pkt(struct vsock_sock *vsk) |
| 270 | { |
| 271 | struct vhost_vsock *vsock; |
| 272 | struct virtio_vsock_pkt *pkt, *n; |
| 273 | int cnt = 0; |
Stefan Hajnoczi | 834e772 | 2018-11-05 10:35:47 +0000 | [diff] [blame] | 274 | int ret = -ENODEV; |
Peng Tao | 16320f3 | 2017-03-15 09:32:15 +0800 | [diff] [blame] | 275 | LIST_HEAD(freeme); |
| 276 | |
Stefan Hajnoczi | 834e772 | 2018-11-05 10:35:47 +0000 | [diff] [blame] | 277 | rcu_read_lock(); |
| 278 | |
Peng Tao | 16320f3 | 2017-03-15 09:32:15 +0800 | [diff] [blame] | 279 | /* Find the vhost_vsock according to guest context id */ |
| 280 | vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); |
| 281 | if (!vsock) |
Stefan Hajnoczi | 834e772 | 2018-11-05 10:35:47 +0000 | [diff] [blame] | 282 | goto out; |
Peng Tao | 16320f3 | 2017-03-15 09:32:15 +0800 | [diff] [blame] | 283 | |
| 284 | spin_lock_bh(&vsock->send_pkt_list_lock); |
| 285 | list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { |
| 286 | if (pkt->vsk != vsk) |
| 287 | continue; |
| 288 | list_move(&pkt->list, &freeme); |
| 289 | } |
| 290 | spin_unlock_bh(&vsock->send_pkt_list_lock); |
| 291 | |
| 292 | list_for_each_entry_safe(pkt, n, &freeme, list) { |
| 293 | if (pkt->reply) |
| 294 | cnt++; |
| 295 | list_del(&pkt->list); |
| 296 | virtio_transport_free_pkt(pkt); |
| 297 | } |
| 298 | |
| 299 | if (cnt) { |
| 300 | struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; |
| 301 | int new_cnt; |
| 302 | |
| 303 | new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); |
| 304 | if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num) |
| 305 | vhost_poll_queue(&tx_vq->poll); |
| 306 | } |
| 307 | |
Stefan Hajnoczi | 834e772 | 2018-11-05 10:35:47 +0000 | [diff] [blame] | 308 | ret = 0; |
| 309 | out: |
| 310 | rcu_read_unlock(); |
| 311 | return ret; |
Peng Tao | 16320f3 | 2017-03-15 09:32:15 +0800 | [diff] [blame] | 312 | } |
| 313 | |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 314 | static struct virtio_vsock_pkt * |
| 315 | vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, |
| 316 | unsigned int out, unsigned int in) |
| 317 | { |
| 318 | struct virtio_vsock_pkt *pkt; |
| 319 | struct iov_iter iov_iter; |
| 320 | size_t nbytes; |
| 321 | size_t len; |
| 322 | |
| 323 | if (in != 0) { |
| 324 | vq_err(vq, "Expected 0 input buffers, got %u\n", in); |
| 325 | return NULL; |
| 326 | } |
| 327 | |
| 328 | pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); |
| 329 | if (!pkt) |
| 330 | return NULL; |
| 331 | |
| 332 | len = iov_length(vq->iov, out); |
| 333 | iov_iter_init(&iov_iter, WRITE, vq->iov, out, len); |
| 334 | |
| 335 | nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); |
| 336 | if (nbytes != sizeof(pkt->hdr)) { |
| 337 | vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n", |
| 338 | sizeof(pkt->hdr), nbytes); |
| 339 | kfree(pkt); |
| 340 | return NULL; |
| 341 | } |
| 342 | |
| 343 | if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM) |
| 344 | pkt->len = le32_to_cpu(pkt->hdr.len); |
| 345 | |
| 346 | /* No payload */ |
| 347 | if (!pkt->len) |
| 348 | return pkt; |
| 349 | |
| 350 | /* The pkt is too big */ |
| 351 | if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) { |
| 352 | kfree(pkt); |
| 353 | return NULL; |
| 354 | } |
| 355 | |
| 356 | pkt->buf = kmalloc(pkt->len, GFP_KERNEL); |
| 357 | if (!pkt->buf) { |
| 358 | kfree(pkt); |
| 359 | return NULL; |
| 360 | } |
| 361 | |
Stefano Garzarella | 473c739 | 2019-07-30 17:43:30 +0200 | [diff] [blame] | 362 | pkt->buf_len = pkt->len; |
| 363 | |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 364 | nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter); |
| 365 | if (nbytes != pkt->len) { |
| 366 | vq_err(vq, "Expected %u byte payload, got %zu bytes\n", |
| 367 | pkt->len, nbytes); |
| 368 | virtio_transport_free_pkt(pkt); |
| 369 | return NULL; |
| 370 | } |
| 371 | |
| 372 | return pkt; |
| 373 | } |
| 374 | |
| 375 | /* Is there space left for replies to rx packets? */ |
| 376 | static bool vhost_vsock_more_replies(struct vhost_vsock *vsock) |
| 377 | { |
| 378 | struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX]; |
| 379 | int val; |
| 380 | |
| 381 | smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */ |
| 382 | val = atomic_read(&vsock->queued_replies); |
| 383 | |
| 384 | return val < vq->num; |
| 385 | } |
| 386 | |
Stefano Garzarella | 4c7246d | 2019-11-14 10:57:40 +0100 | [diff] [blame^] | 387 | static struct virtio_transport vhost_transport = { |
| 388 | .transport = { |
| 389 | .get_local_cid = vhost_transport_get_local_cid, |
| 390 | |
| 391 | .init = virtio_transport_do_socket_init, |
| 392 | .destruct = virtio_transport_destruct, |
| 393 | .release = virtio_transport_release, |
| 394 | .connect = virtio_transport_connect, |
| 395 | .shutdown = virtio_transport_shutdown, |
| 396 | .cancel_pkt = vhost_transport_cancel_pkt, |
| 397 | |
| 398 | .dgram_enqueue = virtio_transport_dgram_enqueue, |
| 399 | .dgram_dequeue = virtio_transport_dgram_dequeue, |
| 400 | .dgram_bind = virtio_transport_dgram_bind, |
| 401 | .dgram_allow = virtio_transport_dgram_allow, |
| 402 | |
| 403 | .stream_enqueue = virtio_transport_stream_enqueue, |
| 404 | .stream_dequeue = virtio_transport_stream_dequeue, |
| 405 | .stream_has_data = virtio_transport_stream_has_data, |
| 406 | .stream_has_space = virtio_transport_stream_has_space, |
| 407 | .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, |
| 408 | .stream_is_active = virtio_transport_stream_is_active, |
| 409 | .stream_allow = virtio_transport_stream_allow, |
| 410 | |
| 411 | .notify_poll_in = virtio_transport_notify_poll_in, |
| 412 | .notify_poll_out = virtio_transport_notify_poll_out, |
| 413 | .notify_recv_init = virtio_transport_notify_recv_init, |
| 414 | .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, |
| 415 | .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, |
| 416 | .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, |
| 417 | .notify_send_init = virtio_transport_notify_send_init, |
| 418 | .notify_send_pre_block = virtio_transport_notify_send_pre_block, |
| 419 | .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, |
| 420 | .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, |
| 421 | |
| 422 | .set_buffer_size = virtio_transport_set_buffer_size, |
| 423 | .set_min_buffer_size = virtio_transport_set_min_buffer_size, |
| 424 | .set_max_buffer_size = virtio_transport_set_max_buffer_size, |
| 425 | .get_buffer_size = virtio_transport_get_buffer_size, |
| 426 | .get_min_buffer_size = virtio_transport_get_min_buffer_size, |
| 427 | .get_max_buffer_size = virtio_transport_get_max_buffer_size, |
| 428 | }, |
| 429 | |
| 430 | .send_pkt = vhost_transport_send_pkt, |
| 431 | }; |
| 432 | |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 433 | static void vhost_vsock_handle_tx_kick(struct vhost_work *work) |
| 434 | { |
| 435 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, |
| 436 | poll.work); |
| 437 | struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, |
| 438 | dev); |
| 439 | struct virtio_vsock_pkt *pkt; |
Jason Wang | e79b431 | 2019-05-17 00:29:51 -0400 | [diff] [blame] | 440 | int head, pkts = 0, total_len = 0; |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 441 | unsigned int out, in; |
| 442 | bool added = false; |
| 443 | |
| 444 | mutex_lock(&vq->mutex); |
| 445 | |
| 446 | if (!vq->private_data) |
| 447 | goto out; |
| 448 | |
| 449 | vhost_disable_notify(&vsock->dev, vq); |
Jason Wang | e79b431 | 2019-05-17 00:29:51 -0400 | [diff] [blame] | 450 | do { |
Stefan Hajnoczi | 3fda5d6 | 2016-08-04 14:52:53 +0100 | [diff] [blame] | 451 | u32 len; |
| 452 | |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 453 | if (!vhost_vsock_more_replies(vsock)) { |
| 454 | /* Stop tx until the device processes already |
| 455 | * pending replies. Leave tx virtqueue |
| 456 | * callbacks disabled. |
| 457 | */ |
| 458 | goto no_more_replies; |
| 459 | } |
| 460 | |
| 461 | head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), |
| 462 | &out, &in, NULL, NULL); |
| 463 | if (head < 0) |
| 464 | break; |
| 465 | |
| 466 | if (head == vq->num) { |
| 467 | if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { |
| 468 | vhost_disable_notify(&vsock->dev, vq); |
| 469 | continue; |
| 470 | } |
| 471 | break; |
| 472 | } |
| 473 | |
| 474 | pkt = vhost_vsock_alloc_pkt(vq, out, in); |
| 475 | if (!pkt) { |
| 476 | vq_err(vq, "Faulted on pkt\n"); |
| 477 | continue; |
| 478 | } |
| 479 | |
Stefan Hajnoczi | 3fda5d6 | 2016-08-04 14:52:53 +0100 | [diff] [blame] | 480 | len = pkt->len; |
| 481 | |
Gerard Garcia | 82dfb540 | 2017-04-21 10:10:46 +0100 | [diff] [blame] | 482 | /* Deliver to monitoring devices all received packets */ |
| 483 | virtio_transport_deliver_tap_pkt(pkt); |
| 484 | |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 485 | /* Only accept correctly addressed packets */ |
| 486 | if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid) |
Stefano Garzarella | 4c7246d | 2019-11-14 10:57:40 +0100 | [diff] [blame^] | 487 | virtio_transport_recv_pkt(&vhost_transport, pkt); |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 488 | else |
| 489 | virtio_transport_free_pkt(pkt); |
| 490 | |
Jason Wang | e79b431 | 2019-05-17 00:29:51 -0400 | [diff] [blame] | 491 | len += sizeof(pkt->hdr); |
| 492 | vhost_add_used(vq, head, len); |
| 493 | total_len += len; |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 494 | added = true; |
Jason Wang | e79b431 | 2019-05-17 00:29:51 -0400 | [diff] [blame] | 495 | } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 496 | |
| 497 | no_more_replies: |
| 498 | if (added) |
| 499 | vhost_signal(&vsock->dev, vq); |
| 500 | |
| 501 | out: |
| 502 | mutex_unlock(&vq->mutex); |
| 503 | } |
| 504 | |
| 505 | static void vhost_vsock_handle_rx_kick(struct vhost_work *work) |
| 506 | { |
| 507 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, |
| 508 | poll.work); |
| 509 | struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, |
| 510 | dev); |
| 511 | |
| 512 | vhost_transport_do_send_pkt(vsock, vq); |
| 513 | } |
| 514 | |
| 515 | static int vhost_vsock_start(struct vhost_vsock *vsock) |
| 516 | { |
Stefan Hajnoczi | 0516ffd | 2017-01-19 10:43:53 +0000 | [diff] [blame] | 517 | struct vhost_virtqueue *vq; |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 518 | size_t i; |
| 519 | int ret; |
| 520 | |
| 521 | mutex_lock(&vsock->dev.mutex); |
| 522 | |
| 523 | ret = vhost_dev_check_owner(&vsock->dev); |
| 524 | if (ret) |
| 525 | goto err; |
| 526 | |
| 527 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { |
Stefan Hajnoczi | 0516ffd | 2017-01-19 10:43:53 +0000 | [diff] [blame] | 528 | vq = &vsock->vqs[i]; |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 529 | |
| 530 | mutex_lock(&vq->mutex); |
| 531 | |
| 532 | if (!vhost_vq_access_ok(vq)) { |
| 533 | ret = -EFAULT; |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 534 | goto err_vq; |
| 535 | } |
| 536 | |
| 537 | if (!vq->private_data) { |
| 538 | vq->private_data = vsock; |
Stefan Hajnoczi | 0516ffd | 2017-01-19 10:43:53 +0000 | [diff] [blame] | 539 | ret = vhost_vq_init_access(vq); |
| 540 | if (ret) |
| 541 | goto err_vq; |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 542 | } |
| 543 | |
| 544 | mutex_unlock(&vq->mutex); |
| 545 | } |
| 546 | |
| 547 | mutex_unlock(&vsock->dev.mutex); |
| 548 | return 0; |
| 549 | |
| 550 | err_vq: |
Stefan Hajnoczi | 0516ffd | 2017-01-19 10:43:53 +0000 | [diff] [blame] | 551 | vq->private_data = NULL; |
| 552 | mutex_unlock(&vq->mutex); |
| 553 | |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 554 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { |
Stefan Hajnoczi | 0516ffd | 2017-01-19 10:43:53 +0000 | [diff] [blame] | 555 | vq = &vsock->vqs[i]; |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 556 | |
| 557 | mutex_lock(&vq->mutex); |
| 558 | vq->private_data = NULL; |
| 559 | mutex_unlock(&vq->mutex); |
| 560 | } |
| 561 | err: |
| 562 | mutex_unlock(&vsock->dev.mutex); |
| 563 | return ret; |
| 564 | } |
| 565 | |
| 566 | static int vhost_vsock_stop(struct vhost_vsock *vsock) |
| 567 | { |
| 568 | size_t i; |
| 569 | int ret; |
| 570 | |
| 571 | mutex_lock(&vsock->dev.mutex); |
| 572 | |
| 573 | ret = vhost_dev_check_owner(&vsock->dev); |
| 574 | if (ret) |
| 575 | goto err; |
| 576 | |
| 577 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { |
| 578 | struct vhost_virtqueue *vq = &vsock->vqs[i]; |
| 579 | |
| 580 | mutex_lock(&vq->mutex); |
| 581 | vq->private_data = NULL; |
| 582 | mutex_unlock(&vq->mutex); |
| 583 | } |
| 584 | |
| 585 | err: |
| 586 | mutex_unlock(&vsock->dev.mutex); |
| 587 | return ret; |
| 588 | } |
| 589 | |
| 590 | static void vhost_vsock_free(struct vhost_vsock *vsock) |
| 591 | { |
Wei Yongjun | b226aca | 2016-08-02 13:50:42 +0000 | [diff] [blame] | 592 | kvfree(vsock); |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 593 | } |
| 594 | |
| 595 | static int vhost_vsock_dev_open(struct inode *inode, struct file *file) |
| 596 | { |
| 597 | struct vhost_virtqueue **vqs; |
| 598 | struct vhost_vsock *vsock; |
| 599 | int ret; |
| 600 | |
| 601 | /* This struct is large and allocation could fail, fall back to vmalloc |
| 602 | * if there is no other way. |
| 603 | */ |
Michal Hocko | dcda9b0 | 2017-07-12 14:36:45 -0700 | [diff] [blame] | 604 | vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL); |
Michal Hocko | 6c5ab65 | 2017-05-08 15:57:15 -0700 | [diff] [blame] | 605 | if (!vsock) |
| 606 | return -ENOMEM; |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 607 | |
| 608 | vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL); |
| 609 | if (!vqs) { |
| 610 | ret = -ENOMEM; |
| 611 | goto out; |
| 612 | } |
| 613 | |
Stefan Hajnoczi | a72b69d | 2017-11-09 13:29:10 +0000 | [diff] [blame] | 614 | vsock->guest_cid = 0; /* no CID assigned yet */ |
| 615 | |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 616 | atomic_set(&vsock->queued_replies, 0); |
| 617 | |
| 618 | vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX]; |
| 619 | vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX]; |
| 620 | vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; |
| 621 | vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; |
| 622 | |
Jason Wang | e82b9b0 | 2019-05-17 00:29:49 -0400 | [diff] [blame] | 623 | vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), |
| 624 | UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT, |
| 625 | VHOST_VSOCK_WEIGHT); |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 626 | |
| 627 | file->private_data = vsock; |
| 628 | spin_lock_init(&vsock->send_pkt_list_lock); |
| 629 | INIT_LIST_HEAD(&vsock->send_pkt_list); |
| 630 | vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 631 | return 0; |
| 632 | |
| 633 | out: |
| 634 | vhost_vsock_free(vsock); |
| 635 | return ret; |
| 636 | } |
| 637 | |
| 638 | static void vhost_vsock_flush(struct vhost_vsock *vsock) |
| 639 | { |
| 640 | int i; |
| 641 | |
| 642 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) |
| 643 | if (vsock->vqs[i].handle_kick) |
| 644 | vhost_poll_flush(&vsock->vqs[i].poll); |
| 645 | vhost_work_flush(&vsock->dev, &vsock->send_pkt_work); |
| 646 | } |
| 647 | |
| 648 | static void vhost_vsock_reset_orphans(struct sock *sk) |
| 649 | { |
| 650 | struct vsock_sock *vsk = vsock_sk(sk); |
| 651 | |
| 652 | /* vmci_transport.c doesn't take sk_lock here either. At least we're |
| 653 | * under vsock_table_lock so the sock cannot disappear while we're |
| 654 | * executing. |
| 655 | */ |
| 656 | |
Stefan Hajnoczi | c38f57d | 2018-12-06 19:14:34 +0000 | [diff] [blame] | 657 | /* If the peer is still valid, no need to reset connection */ |
| 658 | if (vhost_vsock_get(vsk->remote_addr.svm_cid)) |
| 659 | return; |
| 660 | |
| 661 | /* If the close timeout is pending, let it expire. This avoids races |
| 662 | * with the timeout callback. |
| 663 | */ |
| 664 | if (vsk->close_work_scheduled) |
| 665 | return; |
| 666 | |
| 667 | sock_set_flag(sk, SOCK_DONE); |
| 668 | vsk->peer_shutdown = SHUTDOWN_MASK; |
| 669 | sk->sk_state = SS_UNCONNECTED; |
| 670 | sk->sk_err = ECONNRESET; |
| 671 | sk->sk_error_report(sk); |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 672 | } |
| 673 | |
| 674 | static int vhost_vsock_dev_release(struct inode *inode, struct file *file) |
| 675 | { |
| 676 | struct vhost_vsock *vsock = file->private_data; |
| 677 | |
Stefan Hajnoczi | 6db3d8d | 2018-11-05 17:33:22 +0000 | [diff] [blame] | 678 | mutex_lock(&vhost_vsock_mutex); |
Stefan Hajnoczi | 834e772 | 2018-11-05 10:35:47 +0000 | [diff] [blame] | 679 | if (vsock->guest_cid) |
| 680 | hash_del_rcu(&vsock->hash); |
Stefan Hajnoczi | 6db3d8d | 2018-11-05 17:33:22 +0000 | [diff] [blame] | 681 | mutex_unlock(&vhost_vsock_mutex); |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 682 | |
Stefan Hajnoczi | 834e772 | 2018-11-05 10:35:47 +0000 | [diff] [blame] | 683 | /* Wait for other CPUs to finish using vsock */ |
| 684 | synchronize_rcu(); |
| 685 | |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 686 | /* Iterating over all connections for all CIDs to find orphans is |
| 687 | * inefficient. Room for improvement here. */ |
| 688 | vsock_for_each_connected_socket(vhost_vsock_reset_orphans); |
| 689 | |
| 690 | vhost_vsock_stop(vsock); |
| 691 | vhost_vsock_flush(vsock); |
| 692 | vhost_dev_stop(&vsock->dev); |
| 693 | |
| 694 | spin_lock_bh(&vsock->send_pkt_list_lock); |
| 695 | while (!list_empty(&vsock->send_pkt_list)) { |
| 696 | struct virtio_vsock_pkt *pkt; |
| 697 | |
| 698 | pkt = list_first_entry(&vsock->send_pkt_list, |
| 699 | struct virtio_vsock_pkt, list); |
| 700 | list_del_init(&pkt->list); |
| 701 | virtio_transport_free_pkt(pkt); |
| 702 | } |
| 703 | spin_unlock_bh(&vsock->send_pkt_list_lock); |
| 704 | |
夷则(Caspar) | f6f93f7 | 2017-12-25 00:08:58 +0800 | [diff] [blame] | 705 | vhost_dev_cleanup(&vsock->dev); |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 706 | kfree(vsock->dev.vqs); |
| 707 | vhost_vsock_free(vsock); |
| 708 | return 0; |
| 709 | } |
| 710 | |
| 711 | static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) |
| 712 | { |
| 713 | struct vhost_vsock *other; |
| 714 | |
| 715 | /* Refuse reserved CIDs */ |
| 716 | if (guest_cid <= VMADDR_CID_HOST || |
| 717 | guest_cid == U32_MAX) |
| 718 | return -EINVAL; |
| 719 | |
| 720 | /* 64-bit CIDs are not yet supported */ |
| 721 | if (guest_cid > U32_MAX) |
| 722 | return -EINVAL; |
| 723 | |
| 724 | /* Refuse if CID is already in use */ |
Stefan Hajnoczi | 6db3d8d | 2018-11-05 17:33:22 +0000 | [diff] [blame] | 725 | mutex_lock(&vhost_vsock_mutex); |
Stefan Hajnoczi | 834e772 | 2018-11-05 10:35:47 +0000 | [diff] [blame] | 726 | other = vhost_vsock_get(guest_cid); |
Gao feng | 6c083c2 | 2016-12-14 19:24:36 +0800 | [diff] [blame] | 727 | if (other && other != vsock) { |
Stefan Hajnoczi | 6db3d8d | 2018-11-05 17:33:22 +0000 | [diff] [blame] | 728 | mutex_unlock(&vhost_vsock_mutex); |
Gao feng | 6c083c2 | 2016-12-14 19:24:36 +0800 | [diff] [blame] | 729 | return -EADDRINUSE; |
| 730 | } |
Stefan Hajnoczi | 834e772 | 2018-11-05 10:35:47 +0000 | [diff] [blame] | 731 | |
| 732 | if (vsock->guest_cid) |
| 733 | hash_del_rcu(&vsock->hash); |
| 734 | |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 735 | vsock->guest_cid = guest_cid; |
Zha Bin | 7fbe078 | 2019-01-08 16:07:03 +0800 | [diff] [blame] | 736 | hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid); |
Stefan Hajnoczi | 6db3d8d | 2018-11-05 17:33:22 +0000 | [diff] [blame] | 737 | mutex_unlock(&vhost_vsock_mutex); |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 738 | |
| 739 | return 0; |
| 740 | } |
| 741 | |
| 742 | static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features) |
| 743 | { |
| 744 | struct vhost_virtqueue *vq; |
| 745 | int i; |
| 746 | |
| 747 | if (features & ~VHOST_VSOCK_FEATURES) |
| 748 | return -EOPNOTSUPP; |
| 749 | |
| 750 | mutex_lock(&vsock->dev.mutex); |
| 751 | if ((features & (1 << VHOST_F_LOG_ALL)) && |
| 752 | !vhost_log_access_ok(&vsock->dev)) { |
| 753 | mutex_unlock(&vsock->dev.mutex); |
| 754 | return -EFAULT; |
| 755 | } |
| 756 | |
| 757 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { |
| 758 | vq = &vsock->vqs[i]; |
| 759 | mutex_lock(&vq->mutex); |
| 760 | vq->acked_features = features; |
| 761 | mutex_unlock(&vq->mutex); |
| 762 | } |
| 763 | mutex_unlock(&vsock->dev.mutex); |
| 764 | return 0; |
| 765 | } |
| 766 | |
| 767 | static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl, |
| 768 | unsigned long arg) |
| 769 | { |
| 770 | struct vhost_vsock *vsock = f->private_data; |
| 771 | void __user *argp = (void __user *)arg; |
| 772 | u64 guest_cid; |
| 773 | u64 features; |
| 774 | int start; |
| 775 | int r; |
| 776 | |
| 777 | switch (ioctl) { |
| 778 | case VHOST_VSOCK_SET_GUEST_CID: |
| 779 | if (copy_from_user(&guest_cid, argp, sizeof(guest_cid))) |
| 780 | return -EFAULT; |
| 781 | return vhost_vsock_set_cid(vsock, guest_cid); |
| 782 | case VHOST_VSOCK_SET_RUNNING: |
| 783 | if (copy_from_user(&start, argp, sizeof(start))) |
| 784 | return -EFAULT; |
| 785 | if (start) |
| 786 | return vhost_vsock_start(vsock); |
| 787 | else |
| 788 | return vhost_vsock_stop(vsock); |
| 789 | case VHOST_GET_FEATURES: |
| 790 | features = VHOST_VSOCK_FEATURES; |
| 791 | if (copy_to_user(argp, &features, sizeof(features))) |
| 792 | return -EFAULT; |
| 793 | return 0; |
| 794 | case VHOST_SET_FEATURES: |
| 795 | if (copy_from_user(&features, argp, sizeof(features))) |
| 796 | return -EFAULT; |
| 797 | return vhost_vsock_set_features(vsock, features); |
| 798 | default: |
| 799 | mutex_lock(&vsock->dev.mutex); |
| 800 | r = vhost_dev_ioctl(&vsock->dev, ioctl, argp); |
| 801 | if (r == -ENOIOCTLCMD) |
| 802 | r = vhost_vring_ioctl(&vsock->dev, ioctl, argp); |
| 803 | else |
| 804 | vhost_vsock_flush(vsock); |
| 805 | mutex_unlock(&vsock->dev.mutex); |
| 806 | return r; |
| 807 | } |
| 808 | } |
| 809 | |
Sonny Rao | dc32bb6 | 2018-03-14 14:36:25 -0700 | [diff] [blame] | 810 | #ifdef CONFIG_COMPAT |
| 811 | static long vhost_vsock_dev_compat_ioctl(struct file *f, unsigned int ioctl, |
| 812 | unsigned long arg) |
| 813 | { |
| 814 | return vhost_vsock_dev_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); |
| 815 | } |
| 816 | #endif |
| 817 | |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 818 | static const struct file_operations vhost_vsock_fops = { |
| 819 | .owner = THIS_MODULE, |
| 820 | .open = vhost_vsock_dev_open, |
| 821 | .release = vhost_vsock_dev_release, |
| 822 | .llseek = noop_llseek, |
| 823 | .unlocked_ioctl = vhost_vsock_dev_ioctl, |
Sonny Rao | dc32bb6 | 2018-03-14 14:36:25 -0700 | [diff] [blame] | 824 | #ifdef CONFIG_COMPAT |
| 825 | .compat_ioctl = vhost_vsock_dev_compat_ioctl, |
| 826 | #endif |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 827 | }; |
| 828 | |
| 829 | static struct miscdevice vhost_vsock_misc = { |
Stefan Hajnoczi | f4660cc | 2017-05-10 10:19:18 -0400 | [diff] [blame] | 830 | .minor = VHOST_VSOCK_MINOR, |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 831 | .name = "vhost-vsock", |
| 832 | .fops = &vhost_vsock_fops, |
| 833 | }; |
| 834 | |
Asias He | 433fc58 | 2016-07-28 15:36:34 +0100 | [diff] [blame] | 835 | static int __init vhost_vsock_init(void) |
| 836 | { |
| 837 | int ret; |
| 838 | |
| 839 | ret = vsock_core_init(&vhost_transport.transport); |
| 840 | if (ret < 0) |
| 841 | return ret; |
| 842 | return misc_register(&vhost_vsock_misc); |
| 843 | }; |
| 844 | |
| 845 | static void __exit vhost_vsock_exit(void) |
| 846 | { |
| 847 | misc_deregister(&vhost_vsock_misc); |
| 848 | vsock_core_exit(); |
| 849 | }; |
| 850 | |
| 851 | module_init(vhost_vsock_init); |
| 852 | module_exit(vhost_vsock_exit); |
| 853 | MODULE_LICENSE("GPL v2"); |
| 854 | MODULE_AUTHOR("Asias He"); |
| 855 | MODULE_DESCRIPTION("vhost transport for vsock "); |
Stefan Hajnoczi | f4660cc | 2017-05-10 10:19:18 -0400 | [diff] [blame] | 856 | MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR); |
| 857 | MODULE_ALIAS("devname:vhost-vsock"); |