blob: 7dd8e70d78cdeb83a607fc81b8baf1fd410de8a0 [file] [log] [blame]
Thomas Gleixner685a6bf2019-05-29 16:57:36 -07001// SPDX-License-Identifier: GPL-2.0-only
Andy Kingd021c342013-02-06 14:23:56 +00002/*
3 * VMware vSockets Driver
4 *
5 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
Andy Kingd021c342013-02-06 14:23:56 +00006 */
7
8/* Implementation notes:
9 *
10 * - There are two kinds of sockets: those created by user action (such as
11 * calling socket(2)) and those created by incoming connection request packets.
12 *
13 * - There are two "global" tables, one for bound sockets (sockets that have
14 * specified an address that they are responsible for) and one for connected
15 * sockets (sockets that have established a connection with another socket).
16 * These tables are "global" in that all sockets on the system are placed
17 * within them. - Note, though, that the bound table contains an extra entry
18 * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
19 * that list. The bound table is used solely for lookup of sockets when packets
20 * are received and that's not necessary for SOCK_DGRAM sockets since we create
21 * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM
22 * sockets out of the bound hash buckets will reduce the chance of collisions
23 * when looking for SOCK_STREAM sockets and prevents us from having to check the
24 * socket type in the hash table lookups.
25 *
26 * - Sockets created by user action will either be "client" sockets that
27 * initiate a connection or "server" sockets that listen for connections; we do
28 * not support simultaneous connects (two "client" sockets connecting).
29 *
30 * - "Server" sockets are referred to as listener sockets throughout this
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -040031 * implementation because they are in the TCP_LISTEN state. When a
Stefan Hajnocziea3803c2015-10-29 11:57:42 +000032 * connection request is received (the second kind of socket mentioned above),
33 * we create a new socket and refer to it as a pending socket. These pending
34 * sockets are placed on the pending connection list of the listener socket.
35 * When future packets are received for the address the listener socket is
36 * bound to, we check if the source of the packet is from one that has an
37 * existing pending connection. If it does, we process the packet for the
38 * pending socket. When that socket reaches the connected state, it is removed
39 * from the listener socket's pending list and enqueued in the listener
40 * socket's accept queue. Callers of accept(2) will accept connected sockets
41 * from the listener socket's accept queue. If the socket cannot be accepted
42 * for some reason then it is marked rejected. Once the connection is
43 * accepted, it is owned by the user process and the responsibility for cleanup
44 * falls with that user process.
Andy Kingd021c342013-02-06 14:23:56 +000045 *
46 * - It is possible that these pending sockets will never reach the connected
47 * state; in fact, we may never receive another packet after the connection
48 * request. Because of this, we must schedule a cleanup function to run in the
49 * future, after some amount of time passes where a connection should have been
50 * established. This function ensures that the socket is off all lists so it
51 * cannot be retrieved, then drops all references to the socket so it is cleaned
52 * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this
53 * function will also cleanup rejected sockets, those that reach the connected
54 * state but leave it before they have been accepted.
55 *
Stefan Hajnoczi4192f672016-06-23 16:28:58 +010056 * - Lock ordering for pending or accept queue sockets is:
57 *
58 * lock_sock(listener);
59 * lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
60 *
61 * Using explicit nested locking keeps lockdep happy since normally only one
62 * lock of a given class may be taken at a time.
63 *
Andy Kingd021c342013-02-06 14:23:56 +000064 * - Sockets created by user action will be cleaned up when the user process
65 * calls close(2), causing our release implementation to be called. Our release
66 * implementation will perform some cleanup then drop the last reference so our
67 * sk_destruct implementation is invoked. Our sk_destruct implementation will
68 * perform additional cleanup that's common for both types of sockets.
69 *
70 * - A socket's reference count is what ensures that the structure won't be
71 * freed. Each entry in a list (such as the "global" bound and connected tables
72 * and the listener socket's pending list and connected queue) ensures a
73 * reference. When we defer work until process context and pass a socket as our
74 * argument, we must ensure the reference count is increased to ensure the
75 * socket isn't freed before the function is run; the deferred function will
76 * then drop the reference.
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -040077 *
78 * - sk->sk_state uses the TCP state constants because they are widely used by
79 * other address families and exposed to userspace tools like ss(8):
80 *
81 * TCP_CLOSE - unconnected
82 * TCP_SYN_SENT - connecting
83 * TCP_ESTABLISHED - connected
84 * TCP_CLOSING - disconnecting
85 * TCP_LISTEN - listening
Andy Kingd021c342013-02-06 14:23:56 +000086 */
87
88#include <linux/types.h>
Andy Kingd021c342013-02-06 14:23:56 +000089#include <linux/bitops.h>
90#include <linux/cred.h>
91#include <linux/init.h>
92#include <linux/io.h>
93#include <linux/kernel.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010094#include <linux/sched/signal.h>
Andy Kingd021c342013-02-06 14:23:56 +000095#include <linux/kmod.h>
96#include <linux/list.h>
97#include <linux/miscdevice.h>
98#include <linux/module.h>
99#include <linux/mutex.h>
100#include <linux/net.h>
101#include <linux/poll.h>
Lepton Wu8236b082018-12-11 11:12:55 -0800102#include <linux/random.h>
Andy Kingd021c342013-02-06 14:23:56 +0000103#include <linux/skbuff.h>
104#include <linux/smp.h>
105#include <linux/socket.h>
106#include <linux/stddef.h>
107#include <linux/unistd.h>
108#include <linux/wait.h>
109#include <linux/workqueue.h>
110#include <net/sock.h>
Asias He82a54d02013-07-25 17:39:34 +0800111#include <net/af_vsock.h>
Andy Kingd021c342013-02-06 14:23:56 +0000112
113static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
114static void vsock_sk_destruct(struct sock *sk);
115static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
116
117/* Protocol family. */
118static struct proto vsock_proto = {
119 .name = "AF_VSOCK",
120 .owner = THIS_MODULE,
121 .obj_size = sizeof(struct vsock_sock),
122};
123
124/* The default peer timeout indicates how long we will wait for a peer response
125 * to a control message.
126 */
127#define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
128
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +0100129#define VSOCK_DEFAULT_BUFFER_SIZE (1024 * 256)
130#define VSOCK_DEFAULT_BUFFER_MAX_SIZE (1024 * 256)
131#define VSOCK_DEFAULT_BUFFER_MIN_SIZE 128
132
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100133/* Transport used for host->guest communication */
134static const struct vsock_transport *transport_h2g;
135/* Transport used for guest->host communication */
136static const struct vsock_transport *transport_g2h;
137/* Transport used for DGRAM communication */
138static const struct vsock_transport *transport_dgram;
Stefano Garzarella0e121902019-12-10 11:43:04 +0100139/* Transport used for local communication */
140static const struct vsock_transport *transport_local;
Andy Kingd021c342013-02-06 14:23:56 +0000141static DEFINE_MUTEX(vsock_register_mutex);
142
Andy Kingd021c342013-02-06 14:23:56 +0000143/**** UTILS ****/
144
145/* Each bound VSocket is stored in the bind hash table and each connected
146 * VSocket is stored in the connected hash table.
147 *
148 * Unbound sockets are all put on the same list attached to the end of the hash
149 * table (vsock_unbound_sockets). Bound sockets are added to the hash table in
150 * the bucket that their local address hashes to (vsock_bound_sockets(addr)
151 * represents the list that addr hashes to).
152 *
153 * Specifically, we initialize the vsock_bind_table array to a size of
154 * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
155 * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
156 * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function
Asias Hea49dd9d2013-06-20 17:20:33 +0800157 * mods with VSOCK_HASH_SIZE to ensure this.
Andy Kingd021c342013-02-06 14:23:56 +0000158 */
Andy Kingd021c342013-02-06 14:23:56 +0000159#define MAX_PORT_RETRIES 24
160
Asias Hea49dd9d2013-06-20 17:20:33 +0800161#define VSOCK_HASH(addr) ((addr)->svm_port % VSOCK_HASH_SIZE)
Andy Kingd021c342013-02-06 14:23:56 +0000162#define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
163#define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE])
164
165/* XXX This can probably be implemented in a better way. */
166#define VSOCK_CONN_HASH(src, dst) \
Asias Hea49dd9d2013-06-20 17:20:33 +0800167 (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE)
Andy Kingd021c342013-02-06 14:23:56 +0000168#define vsock_connected_sockets(src, dst) \
169 (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
170#define vsock_connected_sockets_vsk(vsk) \
171 vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
172
Stefan Hajnoczi44f20982017-10-05 16:46:50 -0400173struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
174EXPORT_SYMBOL_GPL(vsock_bind_table);
175struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
176EXPORT_SYMBOL_GPL(vsock_connected_table);
177DEFINE_SPINLOCK(vsock_table_lock);
178EXPORT_SYMBOL_GPL(vsock_table_lock);
Andy Kingd021c342013-02-06 14:23:56 +0000179
Asias Heb3a6dfe2013-06-20 17:20:30 +0800180/* Autobind this socket to the local address if necessary. */
181static int vsock_auto_bind(struct vsock_sock *vsk)
182{
183 struct sock *sk = sk_vsock(vsk);
184 struct sockaddr_vm local_addr;
185
186 if (vsock_addr_bound(&vsk->local_addr))
187 return 0;
188 vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
189 return __vsock_bind(sk, &local_addr);
190}
191
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100192static void vsock_init_tables(void)
Andy Kingd021c342013-02-06 14:23:56 +0000193{
194 int i;
195
196 for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++)
197 INIT_LIST_HEAD(&vsock_bind_table[i]);
198
199 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++)
200 INIT_LIST_HEAD(&vsock_connected_table[i]);
201}
202
203static void __vsock_insert_bound(struct list_head *list,
204 struct vsock_sock *vsk)
205{
206 sock_hold(&vsk->sk);
207 list_add(&vsk->bound_table, list);
208}
209
210static void __vsock_insert_connected(struct list_head *list,
211 struct vsock_sock *vsk)
212{
213 sock_hold(&vsk->sk);
214 list_add(&vsk->connected_table, list);
215}
216
217static void __vsock_remove_bound(struct vsock_sock *vsk)
218{
219 list_del_init(&vsk->bound_table);
220 sock_put(&vsk->sk);
221}
222
223static void __vsock_remove_connected(struct vsock_sock *vsk)
224{
225 list_del_init(&vsk->connected_table);
226 sock_put(&vsk->sk);
227}
228
229static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
230{
231 struct vsock_sock *vsk;
232
Stefano Garzarella36c5b482019-11-14 10:57:49 +0100233 list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) {
234 if (vsock_addr_equals_addr(addr, &vsk->local_addr))
Andy Kingd021c342013-02-06 14:23:56 +0000235 return sk_vsock(vsk);
236
Stefano Garzarella36c5b482019-11-14 10:57:49 +0100237 if (addr->svm_port == vsk->local_addr.svm_port &&
238 (vsk->local_addr.svm_cid == VMADDR_CID_ANY ||
239 addr->svm_cid == VMADDR_CID_ANY))
240 return sk_vsock(vsk);
241 }
242
Andy Kingd021c342013-02-06 14:23:56 +0000243 return NULL;
244}
245
246static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
247 struct sockaddr_vm *dst)
248{
249 struct vsock_sock *vsk;
250
251 list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
252 connected_table) {
Reilly Grant990454b2013-04-01 11:41:52 -0700253 if (vsock_addr_equals_addr(src, &vsk->remote_addr) &&
254 dst->svm_port == vsk->local_addr.svm_port) {
Andy Kingd021c342013-02-06 14:23:56 +0000255 return sk_vsock(vsk);
256 }
257 }
258
259 return NULL;
260}
261
Andy Kingd021c342013-02-06 14:23:56 +0000262static void vsock_insert_unbound(struct vsock_sock *vsk)
263{
264 spin_lock_bh(&vsock_table_lock);
265 __vsock_insert_bound(vsock_unbound_sockets, vsk);
266 spin_unlock_bh(&vsock_table_lock);
267}
268
269void vsock_insert_connected(struct vsock_sock *vsk)
270{
271 struct list_head *list = vsock_connected_sockets(
272 &vsk->remote_addr, &vsk->local_addr);
273
274 spin_lock_bh(&vsock_table_lock);
275 __vsock_insert_connected(list, vsk);
276 spin_unlock_bh(&vsock_table_lock);
277}
278EXPORT_SYMBOL_GPL(vsock_insert_connected);
279
280void vsock_remove_bound(struct vsock_sock *vsk)
281{
282 spin_lock_bh(&vsock_table_lock);
Sunil Muthuswamyd5afa822019-06-13 03:52:27 +0000283 if (__vsock_in_bound_table(vsk))
284 __vsock_remove_bound(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000285 spin_unlock_bh(&vsock_table_lock);
286}
287EXPORT_SYMBOL_GPL(vsock_remove_bound);
288
289void vsock_remove_connected(struct vsock_sock *vsk)
290{
291 spin_lock_bh(&vsock_table_lock);
Sunil Muthuswamyd5afa822019-06-13 03:52:27 +0000292 if (__vsock_in_connected_table(vsk))
293 __vsock_remove_connected(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000294 spin_unlock_bh(&vsock_table_lock);
295}
296EXPORT_SYMBOL_GPL(vsock_remove_connected);
297
298struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr)
299{
300 struct sock *sk;
301
302 spin_lock_bh(&vsock_table_lock);
303 sk = __vsock_find_bound_socket(addr);
304 if (sk)
305 sock_hold(sk);
306
307 spin_unlock_bh(&vsock_table_lock);
308
309 return sk;
310}
311EXPORT_SYMBOL_GPL(vsock_find_bound_socket);
312
313struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
314 struct sockaddr_vm *dst)
315{
316 struct sock *sk;
317
318 spin_lock_bh(&vsock_table_lock);
319 sk = __vsock_find_connected_socket(src, dst);
320 if (sk)
321 sock_hold(sk);
322
323 spin_unlock_bh(&vsock_table_lock);
324
325 return sk;
326}
327EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
328
Stefan Hajnoczi6773b7d2016-07-28 15:36:31 +0100329void vsock_remove_sock(struct vsock_sock *vsk)
330{
Sunil Muthuswamyd5afa822019-06-13 03:52:27 +0000331 vsock_remove_bound(vsk);
332 vsock_remove_connected(vsk);
Stefan Hajnoczi6773b7d2016-07-28 15:36:31 +0100333}
334EXPORT_SYMBOL_GPL(vsock_remove_sock);
335
Andy Kingd021c342013-02-06 14:23:56 +0000336void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
337{
338 int i;
339
340 spin_lock_bh(&vsock_table_lock);
341
342 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
343 struct vsock_sock *vsk;
344 list_for_each_entry(vsk, &vsock_connected_table[i],
Julia Lawalld9af2d62013-08-05 16:47:38 +0200345 connected_table)
Andy Kingd021c342013-02-06 14:23:56 +0000346 fn(sk_vsock(vsk));
347 }
348
349 spin_unlock_bh(&vsock_table_lock);
350}
351EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket);
352
353void vsock_add_pending(struct sock *listener, struct sock *pending)
354{
355 struct vsock_sock *vlistener;
356 struct vsock_sock *vpending;
357
358 vlistener = vsock_sk(listener);
359 vpending = vsock_sk(pending);
360
361 sock_hold(pending);
362 sock_hold(listener);
363 list_add_tail(&vpending->pending_links, &vlistener->pending_links);
364}
365EXPORT_SYMBOL_GPL(vsock_add_pending);
366
367void vsock_remove_pending(struct sock *listener, struct sock *pending)
368{
369 struct vsock_sock *vpending = vsock_sk(pending);
370
371 list_del_init(&vpending->pending_links);
372 sock_put(listener);
373 sock_put(pending);
374}
375EXPORT_SYMBOL_GPL(vsock_remove_pending);
376
377void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
378{
379 struct vsock_sock *vlistener;
380 struct vsock_sock *vconnected;
381
382 vlistener = vsock_sk(listener);
383 vconnected = vsock_sk(connected);
384
385 sock_hold(connected);
386 sock_hold(listener);
387 list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue);
388}
389EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
390
Stefano Garzarella408624af2019-12-10 11:43:06 +0100391static bool vsock_use_local_transport(unsigned int remote_cid)
392{
393 if (!transport_local)
394 return false;
395
396 if (remote_cid == VMADDR_CID_LOCAL)
397 return true;
398
399 if (transport_g2h) {
400 return remote_cid == transport_g2h->get_local_cid();
401 } else {
402 return remote_cid == VMADDR_CID_HOST;
403 }
404}
405
Stefano Garzarella6a2c09622019-11-14 10:57:48 +0100406static void vsock_deassign_transport(struct vsock_sock *vsk)
407{
408 if (!vsk->transport)
409 return;
410
411 vsk->transport->destruct(vsk);
412 module_put(vsk->transport->module);
413 vsk->transport = NULL;
414}
415
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100416/* Assign a transport to a socket and call the .init transport callback.
417 *
418 * Note: for stream socket this must be called when vsk->remote_addr is set
419 * (e.g. during the connect() or when a connection request on a listener
420 * socket is received).
421 * The vsk->remote_addr is used to decide which transport to use:
Stefano Garzarella408624af2019-12-10 11:43:06 +0100422 * - remote CID == VMADDR_CID_LOCAL or g2h->local_cid or VMADDR_CID_HOST if
423 * g2h is not loaded, will use local transport;
Andra Paraschiv7f816982020-12-14 18:11:22 +0200424 * - remote CID <= VMADDR_CID_HOST or h2g is not loaded or remote flags field
425 * includes VMADDR_FLAG_TO_HOST flag value, will use guest->host transport;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100426 * - remote CID > VMADDR_CID_HOST will use host->guest transport;
427 */
428int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
429{
430 const struct vsock_transport *new_transport;
431 struct sock *sk = sk_vsock(vsk);
432 unsigned int remote_cid = vsk->remote_addr.svm_cid;
Andra Paraschiv7f816982020-12-14 18:11:22 +0200433 __u8 remote_flags;
Stefano Garzarella039fccc2019-11-21 10:06:09 +0100434 int ret;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100435
Andra Paraschiv1b5f2ab2020-12-14 18:11:21 +0200436 /* If the packet is coming with the source and destination CIDs higher
437 * than VMADDR_CID_HOST, then a vsock channel where all the packets are
438 * forwarded to the host should be established. Then the host will
439 * need to forward the packets to the guest.
440 *
441 * The flag is set on the (listen) receive path (psk is not NULL). On
442 * the connect path the flag can be set by the user space application.
443 */
444 if (psk && vsk->local_addr.svm_cid > VMADDR_CID_HOST &&
445 vsk->remote_addr.svm_cid > VMADDR_CID_HOST)
446 vsk->remote_addr.svm_flags |= VMADDR_FLAG_TO_HOST;
447
Andra Paraschiv7f816982020-12-14 18:11:22 +0200448 remote_flags = vsk->remote_addr.svm_flags;
449
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100450 switch (sk->sk_type) {
451 case SOCK_DGRAM:
452 new_transport = transport_dgram;
453 break;
454 case SOCK_STREAM:
Stefano Garzarella408624af2019-12-10 11:43:06 +0100455 if (vsock_use_local_transport(remote_cid))
456 new_transport = transport_local;
Andra Paraschiv7f816982020-12-14 18:11:22 +0200457 else if (remote_cid <= VMADDR_CID_HOST || !transport_h2g ||
458 (remote_flags & VMADDR_FLAG_TO_HOST))
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100459 new_transport = transport_g2h;
460 else
461 new_transport = transport_h2g;
462 break;
463 default:
464 return -ESOCKTNOSUPPORT;
465 }
466
467 if (vsk->transport) {
468 if (vsk->transport == new_transport)
469 return 0;
470
Stefano Garzarella3f749572020-02-26 11:58:18 +0100471 /* transport->release() must be called with sock lock acquired.
472 * This path can only be taken during vsock_stream_connect(),
473 * where we have already held the sock lock.
474 * In the other cases, this function is called on a new socket
475 * which is not assigned to any transport.
476 */
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100477 vsk->transport->release(vsk);
Stefano Garzarella6a2c09622019-11-14 10:57:48 +0100478 vsock_deassign_transport(vsk);
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100479 }
480
Stefano Garzarella6a2c09622019-11-14 10:57:48 +0100481 /* We increase the module refcnt to prevent the transport unloading
482 * while there are open sockets assigned to it.
483 */
484 if (!new_transport || !try_module_get(new_transport->module))
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100485 return -ENODEV;
486
Stefano Garzarella039fccc2019-11-21 10:06:09 +0100487 ret = new_transport->init(vsk, psk);
488 if (ret) {
489 module_put(new_transport->module);
490 return ret;
491 }
492
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100493 vsk->transport = new_transport;
494
Stefano Garzarella039fccc2019-11-21 10:06:09 +0100495 return 0;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100496}
497EXPORT_SYMBOL_GPL(vsock_assign_transport);
498
499bool vsock_find_cid(unsigned int cid)
500{
501 if (transport_g2h && cid == transport_g2h->get_local_cid())
502 return true;
503
504 if (transport_h2g && cid == VMADDR_CID_HOST)
505 return true;
506
Stefano Garzarella408624af2019-12-10 11:43:06 +0100507 if (transport_local && cid == VMADDR_CID_LOCAL)
508 return true;
509
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100510 return false;
511}
512EXPORT_SYMBOL_GPL(vsock_find_cid);
513
Andy Kingd021c342013-02-06 14:23:56 +0000514static struct sock *vsock_dequeue_accept(struct sock *listener)
515{
516 struct vsock_sock *vlistener;
517 struct vsock_sock *vconnected;
518
519 vlistener = vsock_sk(listener);
520
521 if (list_empty(&vlistener->accept_queue))
522 return NULL;
523
524 vconnected = list_entry(vlistener->accept_queue.next,
525 struct vsock_sock, accept_queue);
526
527 list_del_init(&vconnected->accept_queue);
528 sock_put(listener);
529 /* The caller will need a reference on the connected socket so we let
530 * it call sock_put().
531 */
532
533 return sk_vsock(vconnected);
534}
535
536static bool vsock_is_accept_queue_empty(struct sock *sk)
537{
538 struct vsock_sock *vsk = vsock_sk(sk);
539 return list_empty(&vsk->accept_queue);
540}
541
542static bool vsock_is_pending(struct sock *sk)
543{
544 struct vsock_sock *vsk = vsock_sk(sk);
545 return !list_empty(&vsk->pending_links);
546}
547
548static int vsock_send_shutdown(struct sock *sk, int mode)
549{
Stefano Garzarellafe502c42019-11-14 10:57:39 +0100550 struct vsock_sock *vsk = vsock_sk(sk);
551
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100552 if (!vsk->transport)
553 return -ENODEV;
554
Stefano Garzarellafe502c42019-11-14 10:57:39 +0100555 return vsk->transport->shutdown(vsk, mode);
Andy Kingd021c342013-02-06 14:23:56 +0000556}
557
Cong Wang455f05e2018-08-06 11:06:02 -0700558static void vsock_pending_work(struct work_struct *work)
Andy Kingd021c342013-02-06 14:23:56 +0000559{
560 struct sock *sk;
561 struct sock *listener;
562 struct vsock_sock *vsk;
563 bool cleanup;
564
Cong Wang455f05e2018-08-06 11:06:02 -0700565 vsk = container_of(work, struct vsock_sock, pending_work.work);
Andy Kingd021c342013-02-06 14:23:56 +0000566 sk = sk_vsock(vsk);
567 listener = vsk->listener;
568 cleanup = true;
569
570 lock_sock(listener);
Stefan Hajnoczi4192f672016-06-23 16:28:58 +0100571 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
Andy Kingd021c342013-02-06 14:23:56 +0000572
573 if (vsock_is_pending(sk)) {
574 vsock_remove_pending(listener, sk);
Jorgen Hansen1190cfd2016-09-26 23:59:53 -0700575
Eric Dumazet7976a112019-11-05 14:11:52 -0800576 sk_acceptq_removed(listener);
Andy Kingd021c342013-02-06 14:23:56 +0000577 } else if (!vsk->rejected) {
578 /* We are not on the pending list and accept() did not reject
579 * us, so we must have been accepted by our user process. We
580 * just need to drop our references to the sockets and be on
581 * our way.
582 */
583 cleanup = false;
584 goto out;
585 }
586
Andy Kingd021c342013-02-06 14:23:56 +0000587 /* We need to remove ourself from the global connected sockets list so
588 * incoming packets can't find this socket, and to reduce the reference
589 * count.
590 */
Sunil Muthuswamyd5afa822019-06-13 03:52:27 +0000591 vsock_remove_connected(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000592
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -0400593 sk->sk_state = TCP_CLOSE;
Andy Kingd021c342013-02-06 14:23:56 +0000594
595out:
596 release_sock(sk);
597 release_sock(listener);
598 if (cleanup)
599 sock_put(sk);
600
601 sock_put(sk);
602 sock_put(listener);
603}
Andy Kingd021c342013-02-06 14:23:56 +0000604
605/**** SOCKET OPERATIONS ****/
606
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300607static int __vsock_bind_connectible(struct vsock_sock *vsk,
608 struct sockaddr_vm *addr)
Andy Kingd021c342013-02-06 14:23:56 +0000609{
Lepton Wua22d3252019-01-09 15:45:41 -0800610 static u32 port;
Andy Kingd021c342013-02-06 14:23:56 +0000611 struct sockaddr_vm new_addr;
612
Lepton Wu8236b082018-12-11 11:12:55 -0800613 if (!port)
614 port = LAST_RESERVED_PORT + 1 +
615 prandom_u32_max(U32_MAX - LAST_RESERVED_PORT);
616
Andy Kingd021c342013-02-06 14:23:56 +0000617 vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
618
619 if (addr->svm_port == VMADDR_PORT_ANY) {
620 bool found = false;
621 unsigned int i;
622
623 for (i = 0; i < MAX_PORT_RETRIES; i++) {
624 if (port <= LAST_RESERVED_PORT)
625 port = LAST_RESERVED_PORT + 1;
626
627 new_addr.svm_port = port++;
628
629 if (!__vsock_find_bound_socket(&new_addr)) {
630 found = true;
631 break;
632 }
633 }
634
635 if (!found)
636 return -EADDRNOTAVAIL;
637 } else {
638 /* If port is in reserved range, ensure caller
639 * has necessary privileges.
640 */
641 if (addr->svm_port <= LAST_RESERVED_PORT &&
642 !capable(CAP_NET_BIND_SERVICE)) {
643 return -EACCES;
644 }
645
646 if (__vsock_find_bound_socket(&new_addr))
647 return -EADDRINUSE;
648 }
649
650 vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port);
651
652 /* Remove stream sockets from the unbound list and add them to the hash
653 * table for easy lookup by its address. The unbound list is simply an
654 * extra entry at the end of the hash table, a trick used by AF_UNIX.
655 */
656 __vsock_remove_bound(vsk);
657 __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk);
658
659 return 0;
660}
661
662static int __vsock_bind_dgram(struct vsock_sock *vsk,
663 struct sockaddr_vm *addr)
664{
Stefano Garzarellafe502c42019-11-14 10:57:39 +0100665 return vsk->transport->dgram_bind(vsk, addr);
Andy Kingd021c342013-02-06 14:23:56 +0000666}
667
668static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
669{
670 struct vsock_sock *vsk = vsock_sk(sk);
Andy Kingd021c342013-02-06 14:23:56 +0000671 int retval;
672
673 /* First ensure this socket isn't already bound. */
674 if (vsock_addr_bound(&vsk->local_addr))
675 return -EINVAL;
676
677 /* Now bind to the provided address or select appropriate values if
678 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
679 * like AF_INET prevents binding to a non-local IP address (in most
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100680 * cases), we only allow binding to a local CID.
Andy Kingd021c342013-02-06 14:23:56 +0000681 */
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100682 if (addr->svm_cid != VMADDR_CID_ANY && !vsock_find_cid(addr->svm_cid))
Andy Kingd021c342013-02-06 14:23:56 +0000683 return -EADDRNOTAVAIL;
684
685 switch (sk->sk_socket->type) {
686 case SOCK_STREAM:
687 spin_lock_bh(&vsock_table_lock);
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300688 retval = __vsock_bind_connectible(vsk, addr);
Andy Kingd021c342013-02-06 14:23:56 +0000689 spin_unlock_bh(&vsock_table_lock);
690 break;
691
692 case SOCK_DGRAM:
693 retval = __vsock_bind_dgram(vsk, addr);
694 break;
695
696 default:
697 retval = -EINVAL;
698 break;
699 }
700
701 return retval;
702}
703
Cong Wang455f05e2018-08-06 11:06:02 -0700704static void vsock_connect_timeout(struct work_struct *work);
705
Stefano Garzarellab9ca2f52019-11-14 10:57:43 +0100706static struct sock *__vsock_create(struct net *net,
707 struct socket *sock,
708 struct sock *parent,
709 gfp_t priority,
710 unsigned short type,
711 int kern)
Andy Kingd021c342013-02-06 14:23:56 +0000712{
713 struct sock *sk;
714 struct vsock_sock *psk;
715 struct vsock_sock *vsk;
716
Eric W. Biederman11aa9c22015-05-08 21:09:13 -0500717 sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern);
Andy Kingd021c342013-02-06 14:23:56 +0000718 if (!sk)
719 return NULL;
720
721 sock_init_data(sock, sk);
722
723 /* sk->sk_type is normally set in sock_init_data, but only if sock is
724 * non-NULL. We make sure that our sockets always have a type by
725 * setting it here if needed.
726 */
727 if (!sock)
728 sk->sk_type = type;
729
730 vsk = vsock_sk(sk);
731 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
732 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
733
734 sk->sk_destruct = vsock_sk_destruct;
735 sk->sk_backlog_rcv = vsock_queue_rcv_skb;
Andy Kingd021c342013-02-06 14:23:56 +0000736 sock_reset_flag(sk, SOCK_DONE);
737
738 INIT_LIST_HEAD(&vsk->bound_table);
739 INIT_LIST_HEAD(&vsk->connected_table);
740 vsk->listener = NULL;
741 INIT_LIST_HEAD(&vsk->pending_links);
742 INIT_LIST_HEAD(&vsk->accept_queue);
743 vsk->rejected = false;
744 vsk->sent_request = false;
745 vsk->ignore_connecting_rst = false;
746 vsk->peer_shutdown = 0;
Cong Wang455f05e2018-08-06 11:06:02 -0700747 INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
748 INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
Andy Kingd021c342013-02-06 14:23:56 +0000749
750 psk = parent ? vsock_sk(parent) : NULL;
751 if (parent) {
752 vsk->trusted = psk->trusted;
753 vsk->owner = get_cred(psk->owner);
754 vsk->connect_timeout = psk->connect_timeout;
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +0100755 vsk->buffer_size = psk->buffer_size;
756 vsk->buffer_min_size = psk->buffer_min_size;
757 vsk->buffer_max_size = psk->buffer_max_size;
David Brazdil1f935e82021-03-19 13:05:41 +0000758 security_sk_clone(parent, sk);
Andy Kingd021c342013-02-06 14:23:56 +0000759 } else {
Jeff Vander Stoepaf545bb2020-10-23 16:37:57 +0200760 vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
Andy Kingd021c342013-02-06 14:23:56 +0000761 vsk->owner = get_current_cred();
762 vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +0100763 vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE;
764 vsk->buffer_min_size = VSOCK_DEFAULT_BUFFER_MIN_SIZE;
765 vsk->buffer_max_size = VSOCK_DEFAULT_BUFFER_MAX_SIZE;
Andy Kingd021c342013-02-06 14:23:56 +0000766 }
767
Andy Kingd021c342013-02-06 14:23:56 +0000768 return sk;
769}
Andy Kingd021c342013-02-06 14:23:56 +0000770
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300771static bool sock_type_connectible(u16 type)
772{
773 return type == SOCK_STREAM;
774}
775
Dexuan Cui0d9138f2019-09-30 18:43:50 +0000776static void __vsock_release(struct sock *sk, int level)
Andy Kingd021c342013-02-06 14:23:56 +0000777{
778 if (sk) {
Andy Kingd021c342013-02-06 14:23:56 +0000779 struct sock *pending;
780 struct vsock_sock *vsk;
781
782 vsk = vsock_sk(sk);
783 pending = NULL; /* Compiler warning. */
784
Dexuan Cui0d9138f2019-09-30 18:43:50 +0000785 /* When "level" is SINGLE_DEPTH_NESTING, use the nested
786 * version to avoid the warning "possible recursive locking
787 * detected". When "level" is 0, lock_sock_nested(sk, level)
788 * is the same as lock_sock(sk).
789 */
790 lock_sock_nested(sk, level);
Stefano Garzarella3f749572020-02-26 11:58:18 +0100791
792 if (vsk->transport)
793 vsk->transport->release(vsk);
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300794 else if (sock_type_connectible(sk->sk_type))
Stefano Garzarella3f749572020-02-26 11:58:18 +0100795 vsock_remove_sock(vsk);
796
Andy Kingd021c342013-02-06 14:23:56 +0000797 sock_orphan(sk);
798 sk->sk_shutdown = SHUTDOWN_MASK;
799
Christophe JAILLET3b7ad082019-11-03 07:11:11 +0100800 skb_queue_purge(&sk->sk_receive_queue);
Andy Kingd021c342013-02-06 14:23:56 +0000801
802 /* Clean up any sockets that never were accepted. */
803 while ((pending = vsock_dequeue_accept(sk)) != NULL) {
Dexuan Cui0d9138f2019-09-30 18:43:50 +0000804 __vsock_release(pending, SINGLE_DEPTH_NESTING);
Andy Kingd021c342013-02-06 14:23:56 +0000805 sock_put(pending);
806 }
807
808 release_sock(sk);
809 sock_put(sk);
810 }
811}
812
813static void vsock_sk_destruct(struct sock *sk)
814{
815 struct vsock_sock *vsk = vsock_sk(sk);
816
Stefano Garzarella6a2c09622019-11-14 10:57:48 +0100817 vsock_deassign_transport(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000818
819 /* When clearing these addresses, there's no need to set the family and
820 * possibly register the address family with the kernel.
821 */
822 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
823 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
824
825 put_cred(vsk->owner);
826}
827
828static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
829{
830 int err;
831
832 err = sock_queue_rcv_skb(sk, skb);
833 if (err)
834 kfree_skb(skb);
835
836 return err;
837}
838
Stefano Garzarellab9ca2f52019-11-14 10:57:43 +0100839struct sock *vsock_create_connected(struct sock *parent)
840{
841 return __vsock_create(sock_net(parent), NULL, parent, GFP_KERNEL,
842 parent->sk_type, 0);
843}
844EXPORT_SYMBOL_GPL(vsock_create_connected);
845
Andy Kingd021c342013-02-06 14:23:56 +0000846s64 vsock_stream_has_data(struct vsock_sock *vsk)
847{
Stefano Garzarellafe502c42019-11-14 10:57:39 +0100848 return vsk->transport->stream_has_data(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000849}
850EXPORT_SYMBOL_GPL(vsock_stream_has_data);
851
852s64 vsock_stream_has_space(struct vsock_sock *vsk)
853{
Stefano Garzarellafe502c42019-11-14 10:57:39 +0100854 return vsk->transport->stream_has_space(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000855}
856EXPORT_SYMBOL_GPL(vsock_stream_has_space);
857
858static int vsock_release(struct socket *sock)
859{
Dexuan Cui0d9138f2019-09-30 18:43:50 +0000860 __vsock_release(sock->sk, 0);
Andy Kingd021c342013-02-06 14:23:56 +0000861 sock->sk = NULL;
862 sock->state = SS_FREE;
863
864 return 0;
865}
866
867static int
868vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
869{
870 int err;
871 struct sock *sk;
872 struct sockaddr_vm *vm_addr;
873
874 sk = sock->sk;
875
876 if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0)
877 return -EINVAL;
878
879 lock_sock(sk);
880 err = __vsock_bind(sk, vm_addr);
881 release_sock(sk);
882
883 return err;
884}
885
886static int vsock_getname(struct socket *sock,
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100887 struct sockaddr *addr, int peer)
Andy Kingd021c342013-02-06 14:23:56 +0000888{
889 int err;
890 struct sock *sk;
891 struct vsock_sock *vsk;
892 struct sockaddr_vm *vm_addr;
893
894 sk = sock->sk;
895 vsk = vsock_sk(sk);
896 err = 0;
897
898 lock_sock(sk);
899
900 if (peer) {
901 if (sock->state != SS_CONNECTED) {
902 err = -ENOTCONN;
903 goto out;
904 }
905 vm_addr = &vsk->remote_addr;
906 } else {
907 vm_addr = &vsk->local_addr;
908 }
909
910 if (!vm_addr) {
911 err = -EINVAL;
912 goto out;
913 }
914
915 /* sys_getsockname() and sys_getpeername() pass us a
916 * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately
917 * that macro is defined in socket.c instead of .h, so we hardcode its
918 * value here.
919 */
920 BUILD_BUG_ON(sizeof(*vm_addr) > 128);
921 memcpy(addr, vm_addr, sizeof(*vm_addr));
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100922 err = sizeof(*vm_addr);
Andy Kingd021c342013-02-06 14:23:56 +0000923
924out:
925 release_sock(sk);
926 return err;
927}
928
929static int vsock_shutdown(struct socket *sock, int mode)
930{
931 int err;
932 struct sock *sk;
933
934 /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
935 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
936 * here like the other address families do. Note also that the
937 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
938 * which is what we want.
939 */
940 mode++;
941
942 if ((mode & ~SHUTDOWN_MASK) || !mode)
943 return -EINVAL;
944
945 /* If this is a STREAM socket and it is not connected then bail out
946 * immediately. If it is a DGRAM socket then we must first kick the
947 * socket so that it wakes up from any sleeping calls, for example
948 * recv(), and then afterwards return the error.
949 */
950
951 sk = sock->sk;
Stefano Garzarella1c5fae92021-02-09 09:52:19 +0100952
953 lock_sock(sk);
Andy Kingd021c342013-02-06 14:23:56 +0000954 if (sock->state == SS_UNCONNECTED) {
955 err = -ENOTCONN;
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300956 if (sock_type_connectible(sk->sk_type))
Stefano Garzarella1c5fae92021-02-09 09:52:19 +0100957 goto out;
Andy Kingd021c342013-02-06 14:23:56 +0000958 } else {
959 sock->state = SS_DISCONNECTING;
960 err = 0;
961 }
962
963 /* Receive and send shutdowns are treated alike. */
964 mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
965 if (mode) {
Andy Kingd021c342013-02-06 14:23:56 +0000966 sk->sk_shutdown |= mode;
967 sk->sk_state_change(sk);
Andy Kingd021c342013-02-06 14:23:56 +0000968
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300969 if (sock_type_connectible(sk->sk_type)) {
Andy Kingd021c342013-02-06 14:23:56 +0000970 sock_reset_flag(sk, SOCK_DONE);
971 vsock_send_shutdown(sk, mode);
972 }
973 }
974
Stefano Garzarella1c5fae92021-02-09 09:52:19 +0100975out:
976 release_sock(sk);
Andy Kingd021c342013-02-06 14:23:56 +0000977 return err;
978}
979
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700980static __poll_t vsock_poll(struct file *file, struct socket *sock,
981 poll_table *wait)
Andy Kingd021c342013-02-06 14:23:56 +0000982{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700983 struct sock *sk;
984 __poll_t mask;
985 struct vsock_sock *vsk;
986
987 sk = sock->sk;
988 vsk = vsock_sk(sk);
989
990 poll_wait(file, sk_sleep(sk), wait);
991 mask = 0;
Andy Kingd021c342013-02-06 14:23:56 +0000992
993 if (sk->sk_err)
994 /* Signify that there has been an error on this socket. */
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800995 mask |= EPOLLERR;
Andy Kingd021c342013-02-06 14:23:56 +0000996
997 /* INET sockets treat local write shutdown and peer write shutdown as a
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800998 * case of EPOLLHUP set.
Andy Kingd021c342013-02-06 14:23:56 +0000999 */
1000 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
1001 ((sk->sk_shutdown & SEND_SHUTDOWN) &&
1002 (vsk->peer_shutdown & SEND_SHUTDOWN))) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001003 mask |= EPOLLHUP;
Andy Kingd021c342013-02-06 14:23:56 +00001004 }
1005
1006 if (sk->sk_shutdown & RCV_SHUTDOWN ||
1007 vsk->peer_shutdown & SEND_SHUTDOWN) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001008 mask |= EPOLLRDHUP;
Andy Kingd021c342013-02-06 14:23:56 +00001009 }
1010
1011 if (sock->type == SOCK_DGRAM) {
1012 /* For datagram sockets we can read if there is something in
1013 * the queue and write as long as the socket isn't shutdown for
1014 * sending.
1015 */
Eric Dumazet3ef7cf52019-10-23 22:44:50 -07001016 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
Andy Kingd021c342013-02-06 14:23:56 +00001017 (sk->sk_shutdown & RCV_SHUTDOWN)) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001018 mask |= EPOLLIN | EPOLLRDNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001019 }
1020
1021 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001022 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
Andy Kingd021c342013-02-06 14:23:56 +00001023
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001024 } else if (sock_type_connectible(sk->sk_type)) {
Alexander Popovc518ada2021-02-01 11:47:19 +03001025 const struct vsock_transport *transport;
1026
Andy Kingd021c342013-02-06 14:23:56 +00001027 lock_sock(sk);
1028
Alexander Popovc518ada2021-02-01 11:47:19 +03001029 transport = vsk->transport;
1030
Andy Kingd021c342013-02-06 14:23:56 +00001031 /* Listening sockets that have connections in their accept
1032 * queue can be read.
1033 */
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001034 if (sk->sk_state == TCP_LISTEN
Andy Kingd021c342013-02-06 14:23:56 +00001035 && !vsock_is_accept_queue_empty(sk))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001036 mask |= EPOLLIN | EPOLLRDNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001037
1038 /* If there is something in the queue then we can read. */
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01001039 if (transport && transport->stream_is_active(vsk) &&
Andy Kingd021c342013-02-06 14:23:56 +00001040 !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1041 bool data_ready_now = false;
1042 int ret = transport->notify_poll_in(
1043 vsk, 1, &data_ready_now);
1044 if (ret < 0) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001045 mask |= EPOLLERR;
Andy Kingd021c342013-02-06 14:23:56 +00001046 } else {
1047 if (data_ready_now)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001048 mask |= EPOLLIN | EPOLLRDNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001049
1050 }
1051 }
1052
1053 /* Sockets whose connections have been closed, reset, or
1054 * terminated should also be considered read, and we check the
1055 * shutdown flag for that.
1056 */
1057 if (sk->sk_shutdown & RCV_SHUTDOWN ||
1058 vsk->peer_shutdown & SEND_SHUTDOWN) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001059 mask |= EPOLLIN | EPOLLRDNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001060 }
1061
1062 /* Connected sockets that can produce data can be written. */
Stefano Garzarella1980c052020-08-12 14:56:02 +02001063 if (transport && sk->sk_state == TCP_ESTABLISHED) {
Andy Kingd021c342013-02-06 14:23:56 +00001064 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1065 bool space_avail_now = false;
1066 int ret = transport->notify_poll_out(
1067 vsk, 1, &space_avail_now);
1068 if (ret < 0) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001069 mask |= EPOLLERR;
Andy Kingd021c342013-02-06 14:23:56 +00001070 } else {
1071 if (space_avail_now)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001072 /* Remove EPOLLWRBAND since INET
Andy Kingd021c342013-02-06 14:23:56 +00001073 * sockets are not setting it.
1074 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001075 mask |= EPOLLOUT | EPOLLWRNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001076
1077 }
1078 }
1079 }
1080
1081 /* Simulate INET socket poll behaviors, which sets
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001082 * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read,
Andy Kingd021c342013-02-06 14:23:56 +00001083 * but local send is not shutdown.
1084 */
Stefan Hajnocziba3169f2018-01-26 11:48:25 +00001085 if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) {
Andy Kingd021c342013-02-06 14:23:56 +00001086 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001087 mask |= EPOLLOUT | EPOLLWRNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001088
1089 }
1090
1091 release_sock(sk);
1092 }
1093
1094 return mask;
1095}
1096
Ying Xue1b784142015-03-02 15:37:48 +08001097static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1098 size_t len)
Andy Kingd021c342013-02-06 14:23:56 +00001099{
1100 int err;
1101 struct sock *sk;
1102 struct vsock_sock *vsk;
1103 struct sockaddr_vm *remote_addr;
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001104 const struct vsock_transport *transport;
Andy Kingd021c342013-02-06 14:23:56 +00001105
1106 if (msg->msg_flags & MSG_OOB)
1107 return -EOPNOTSUPP;
1108
1109 /* For now, MSG_DONTWAIT is always assumed... */
1110 err = 0;
1111 sk = sock->sk;
1112 vsk = vsock_sk(sk);
1113
1114 lock_sock(sk);
1115
Alexander Popovc518ada2021-02-01 11:47:19 +03001116 transport = vsk->transport;
1117
Asias Heb3a6dfe2013-06-20 17:20:30 +08001118 err = vsock_auto_bind(vsk);
1119 if (err)
1120 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001121
Andy Kingd021c342013-02-06 14:23:56 +00001122
1123 /* If the provided message contains an address, use that. Otherwise
1124 * fall back on the socket's remote handle (if it has been connected).
1125 */
1126 if (msg->msg_name &&
1127 vsock_addr_cast(msg->msg_name, msg->msg_namelen,
1128 &remote_addr) == 0) {
1129 /* Ensure this address is of the right type and is a valid
1130 * destination.
1131 */
1132
1133 if (remote_addr->svm_cid == VMADDR_CID_ANY)
1134 remote_addr->svm_cid = transport->get_local_cid();
1135
1136 if (!vsock_addr_bound(remote_addr)) {
1137 err = -EINVAL;
1138 goto out;
1139 }
1140 } else if (sock->state == SS_CONNECTED) {
1141 remote_addr = &vsk->remote_addr;
1142
1143 if (remote_addr->svm_cid == VMADDR_CID_ANY)
1144 remote_addr->svm_cid = transport->get_local_cid();
1145
1146 /* XXX Should connect() or this function ensure remote_addr is
1147 * bound?
1148 */
1149 if (!vsock_addr_bound(&vsk->remote_addr)) {
1150 err = -EINVAL;
1151 goto out;
1152 }
1153 } else {
1154 err = -EINVAL;
1155 goto out;
1156 }
1157
1158 if (!transport->dgram_allow(remote_addr->svm_cid,
1159 remote_addr->svm_port)) {
1160 err = -EINVAL;
1161 goto out;
1162 }
1163
Al Viro0f7db232014-11-20 04:05:34 -05001164 err = transport->dgram_enqueue(vsk, remote_addr, msg, len);
Andy Kingd021c342013-02-06 14:23:56 +00001165
1166out:
1167 release_sock(sk);
1168 return err;
1169}
1170
1171static int vsock_dgram_connect(struct socket *sock,
1172 struct sockaddr *addr, int addr_len, int flags)
1173{
1174 int err;
1175 struct sock *sk;
1176 struct vsock_sock *vsk;
1177 struct sockaddr_vm *remote_addr;
1178
1179 sk = sock->sk;
1180 vsk = vsock_sk(sk);
1181
1182 err = vsock_addr_cast(addr, addr_len, &remote_addr);
1183 if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) {
1184 lock_sock(sk);
1185 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY,
1186 VMADDR_PORT_ANY);
1187 sock->state = SS_UNCONNECTED;
1188 release_sock(sk);
1189 return 0;
1190 } else if (err != 0)
1191 return -EINVAL;
1192
1193 lock_sock(sk);
1194
Asias Heb3a6dfe2013-06-20 17:20:30 +08001195 err = vsock_auto_bind(vsk);
1196 if (err)
1197 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001198
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001199 if (!vsk->transport->dgram_allow(remote_addr->svm_cid,
1200 remote_addr->svm_port)) {
Andy Kingd021c342013-02-06 14:23:56 +00001201 err = -EINVAL;
1202 goto out;
1203 }
1204
1205 memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr));
1206 sock->state = SS_CONNECTED;
1207
1208out:
1209 release_sock(sk);
1210 return err;
1211}
1212
Ying Xue1b784142015-03-02 15:37:48 +08001213static int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1214 size_t len, int flags)
Andy Kingd021c342013-02-06 14:23:56 +00001215{
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001216 struct vsock_sock *vsk = vsock_sk(sock->sk);
1217
1218 return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
Andy Kingd021c342013-02-06 14:23:56 +00001219}
1220
1221static const struct proto_ops vsock_dgram_ops = {
1222 .family = PF_VSOCK,
1223 .owner = THIS_MODULE,
1224 .release = vsock_release,
1225 .bind = vsock_bind,
1226 .connect = vsock_dgram_connect,
1227 .socketpair = sock_no_socketpair,
1228 .accept = sock_no_accept,
1229 .getname = vsock_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001230 .poll = vsock_poll,
Andy Kingd021c342013-02-06 14:23:56 +00001231 .ioctl = sock_no_ioctl,
1232 .listen = sock_no_listen,
1233 .shutdown = vsock_shutdown,
Andy Kingd021c342013-02-06 14:23:56 +00001234 .sendmsg = vsock_dgram_sendmsg,
1235 .recvmsg = vsock_dgram_recvmsg,
1236 .mmap = sock_no_mmap,
1237 .sendpage = sock_no_sendpage,
1238};
1239
Peng Tao380feae2017-03-15 09:32:17 +08001240static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
1241{
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001242 const struct vsock_transport *transport = vsk->transport;
1243
Norbert Slusarek5d1cbcc2021-02-05 13:12:06 +01001244 if (!transport || !transport->cancel_pkt)
Peng Tao380feae2017-03-15 09:32:17 +08001245 return -EOPNOTSUPP;
1246
1247 return transport->cancel_pkt(vsk);
1248}
1249
Andy Kingd021c342013-02-06 14:23:56 +00001250static void vsock_connect_timeout(struct work_struct *work)
1251{
1252 struct sock *sk;
1253 struct vsock_sock *vsk;
1254
Cong Wang455f05e2018-08-06 11:06:02 -07001255 vsk = container_of(work, struct vsock_sock, connect_work.work);
Andy Kingd021c342013-02-06 14:23:56 +00001256 sk = sk_vsock(vsk);
1257
1258 lock_sock(sk);
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001259 if (sk->sk_state == TCP_SYN_SENT &&
Andy Kingd021c342013-02-06 14:23:56 +00001260 (sk->sk_shutdown != SHUTDOWN_MASK)) {
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001261 sk->sk_state = TCP_CLOSE;
Andy Kingd021c342013-02-06 14:23:56 +00001262 sk->sk_err = ETIMEDOUT;
1263 sk->sk_error_report(sk);
Norbert Slusarek3d0bc442021-02-05 13:14:05 +01001264 vsock_transport_cancel_pkt(vsk);
Andy Kingd021c342013-02-06 14:23:56 +00001265 }
1266 release_sock(sk);
1267
1268 sock_put(sk);
1269}
1270
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001271static int vsock_connect(struct socket *sock, struct sockaddr *addr,
1272 int addr_len, int flags)
Andy Kingd021c342013-02-06 14:23:56 +00001273{
1274 int err;
1275 struct sock *sk;
1276 struct vsock_sock *vsk;
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001277 const struct vsock_transport *transport;
Andy Kingd021c342013-02-06 14:23:56 +00001278 struct sockaddr_vm *remote_addr;
1279 long timeout;
1280 DEFINE_WAIT(wait);
1281
1282 err = 0;
1283 sk = sock->sk;
1284 vsk = vsock_sk(sk);
1285
1286 lock_sock(sk);
1287
1288 /* XXX AF_UNSPEC should make us disconnect like AF_INET. */
1289 switch (sock->state) {
1290 case SS_CONNECTED:
1291 err = -EISCONN;
1292 goto out;
1293 case SS_DISCONNECTING:
1294 err = -EINVAL;
1295 goto out;
1296 case SS_CONNECTING:
1297 /* This continues on so we can move sock into the SS_CONNECTED
1298 * state once the connection has completed (at which point err
1299 * will be set to zero also). Otherwise, we will either wait
1300 * for the connection or return -EALREADY should this be a
1301 * non-blocking call.
1302 */
1303 err = -EALREADY;
1304 break;
1305 default:
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001306 if ((sk->sk_state == TCP_LISTEN) ||
Andy Kingd021c342013-02-06 14:23:56 +00001307 vsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
1308 err = -EINVAL;
1309 goto out;
1310 }
1311
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01001312 /* Set the remote address that we are connecting to. */
1313 memcpy(&vsk->remote_addr, remote_addr,
1314 sizeof(vsk->remote_addr));
1315
1316 err = vsock_assign_transport(vsk, NULL);
1317 if (err)
1318 goto out;
1319
1320 transport = vsk->transport;
1321
Andy Kingd021c342013-02-06 14:23:56 +00001322 /* The hypervisor and well-known contexts do not have socket
1323 * endpoints.
1324 */
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01001325 if (!transport ||
1326 !transport->stream_allow(remote_addr->svm_cid,
Andy Kingd021c342013-02-06 14:23:56 +00001327 remote_addr->svm_port)) {
1328 err = -ENETUNREACH;
1329 goto out;
1330 }
1331
Asias Heb3a6dfe2013-06-20 17:20:30 +08001332 err = vsock_auto_bind(vsk);
1333 if (err)
1334 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001335
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001336 sk->sk_state = TCP_SYN_SENT;
Andy Kingd021c342013-02-06 14:23:56 +00001337
1338 err = transport->connect(vsk);
1339 if (err < 0)
1340 goto out;
1341
1342 /* Mark sock as connecting and set the error code to in
1343 * progress in case this is a non-blocking connect.
1344 */
1345 sock->state = SS_CONNECTING;
1346 err = -EINPROGRESS;
1347 }
1348
1349 /* The receive path will handle all communication until we are able to
1350 * enter the connected state. Here we wait for the connection to be
1351 * completed or a notification of an error.
1352 */
1353 timeout = vsk->connect_timeout;
1354 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1355
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001356 while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) {
Andy Kingd021c342013-02-06 14:23:56 +00001357 if (flags & O_NONBLOCK) {
1358 /* If we're not going to block, we schedule a timeout
1359 * function to generate a timeout on the connection
1360 * attempt, in case the peer doesn't respond in a
1361 * timely manner. We hold on to the socket until the
1362 * timeout fires.
1363 */
1364 sock_hold(sk);
Cong Wang455f05e2018-08-06 11:06:02 -07001365 schedule_delayed_work(&vsk->connect_work, timeout);
Andy Kingd021c342013-02-06 14:23:56 +00001366
1367 /* Skip ahead to preserve error code set above. */
1368 goto out_wait;
1369 }
1370
1371 release_sock(sk);
1372 timeout = schedule_timeout(timeout);
1373 lock_sock(sk);
1374
1375 if (signal_pending(current)) {
1376 err = sock_intr_errno(timeout);
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001377 sk->sk_state = TCP_CLOSE;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001378 sock->state = SS_UNCONNECTED;
Peng Tao380feae2017-03-15 09:32:17 +08001379 vsock_transport_cancel_pkt(vsk);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001380 goto out_wait;
Andy Kingd021c342013-02-06 14:23:56 +00001381 } else if (timeout == 0) {
1382 err = -ETIMEDOUT;
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001383 sk->sk_state = TCP_CLOSE;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001384 sock->state = SS_UNCONNECTED;
Peng Tao380feae2017-03-15 09:32:17 +08001385 vsock_transport_cancel_pkt(vsk);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001386 goto out_wait;
Andy Kingd021c342013-02-06 14:23:56 +00001387 }
1388
1389 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1390 }
1391
1392 if (sk->sk_err) {
1393 err = -sk->sk_err;
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001394 sk->sk_state = TCP_CLOSE;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001395 sock->state = SS_UNCONNECTED;
1396 } else {
Andy Kingd021c342013-02-06 14:23:56 +00001397 err = 0;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001398 }
Andy Kingd021c342013-02-06 14:23:56 +00001399
1400out_wait:
1401 finish_wait(sk_sleep(sk), &wait);
1402out:
1403 release_sock(sk);
1404 return err;
Andy Kingd021c342013-02-06 14:23:56 +00001405}
1406
David Howellscdfbabf2017-03-09 08:09:05 +00001407static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
1408 bool kern)
Andy Kingd021c342013-02-06 14:23:56 +00001409{
1410 struct sock *listener;
1411 int err;
1412 struct sock *connected;
1413 struct vsock_sock *vconnected;
1414 long timeout;
1415 DEFINE_WAIT(wait);
1416
1417 err = 0;
1418 listener = sock->sk;
1419
1420 lock_sock(listener);
1421
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001422 if (!sock_type_connectible(sock->type)) {
Andy Kingd021c342013-02-06 14:23:56 +00001423 err = -EOPNOTSUPP;
1424 goto out;
1425 }
1426
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001427 if (listener->sk_state != TCP_LISTEN) {
Andy Kingd021c342013-02-06 14:23:56 +00001428 err = -EINVAL;
1429 goto out;
1430 }
1431
1432 /* Wait for children sockets to appear; these are the new sockets
1433 * created upon connection establishment.
1434 */
Stefano Garzarella7e0afbd2020-05-27 09:56:55 +02001435 timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK);
Andy Kingd021c342013-02-06 14:23:56 +00001436 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1437
1438 while ((connected = vsock_dequeue_accept(listener)) == NULL &&
1439 listener->sk_err == 0) {
1440 release_sock(listener);
1441 timeout = schedule_timeout(timeout);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001442 finish_wait(sk_sleep(listener), &wait);
Andy Kingd021c342013-02-06 14:23:56 +00001443 lock_sock(listener);
1444
1445 if (signal_pending(current)) {
1446 err = sock_intr_errno(timeout);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001447 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001448 } else if (timeout == 0) {
1449 err = -EAGAIN;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001450 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001451 }
1452
1453 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1454 }
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001455 finish_wait(sk_sleep(listener), &wait);
Andy Kingd021c342013-02-06 14:23:56 +00001456
1457 if (listener->sk_err)
1458 err = -listener->sk_err;
1459
1460 if (connected) {
Eric Dumazet7976a112019-11-05 14:11:52 -08001461 sk_acceptq_removed(listener);
Andy Kingd021c342013-02-06 14:23:56 +00001462
Stefan Hajnoczi4192f672016-06-23 16:28:58 +01001463 lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
Andy Kingd021c342013-02-06 14:23:56 +00001464 vconnected = vsock_sk(connected);
1465
1466 /* If the listener socket has received an error, then we should
1467 * reject this socket and return. Note that we simply mark the
1468 * socket rejected, drop our reference, and let the cleanup
1469 * function handle the cleanup; the fact that we found it in
1470 * the listener's accept queue guarantees that the cleanup
1471 * function hasn't run yet.
1472 */
1473 if (err) {
1474 vconnected->rejected = true;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001475 } else {
1476 newsock->state = SS_CONNECTED;
1477 sock_graft(connected, newsock);
Andy Kingd021c342013-02-06 14:23:56 +00001478 }
1479
Andy Kingd021c342013-02-06 14:23:56 +00001480 release_sock(connected);
1481 sock_put(connected);
1482 }
1483
Andy Kingd021c342013-02-06 14:23:56 +00001484out:
1485 release_sock(listener);
1486 return err;
1487}
1488
1489static int vsock_listen(struct socket *sock, int backlog)
1490{
1491 int err;
1492 struct sock *sk;
1493 struct vsock_sock *vsk;
1494
1495 sk = sock->sk;
1496
1497 lock_sock(sk);
1498
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001499 if (!sock_type_connectible(sk->sk_type)) {
Andy Kingd021c342013-02-06 14:23:56 +00001500 err = -EOPNOTSUPP;
1501 goto out;
1502 }
1503
1504 if (sock->state != SS_UNCONNECTED) {
1505 err = -EINVAL;
1506 goto out;
1507 }
1508
1509 vsk = vsock_sk(sk);
1510
1511 if (!vsock_addr_bound(&vsk->local_addr)) {
1512 err = -EINVAL;
1513 goto out;
1514 }
1515
1516 sk->sk_max_ack_backlog = backlog;
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001517 sk->sk_state = TCP_LISTEN;
Andy Kingd021c342013-02-06 14:23:56 +00001518
1519 err = 0;
1520
1521out:
1522 release_sock(sk);
1523 return err;
1524}
1525
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +01001526static void vsock_update_buffer_size(struct vsock_sock *vsk,
1527 const struct vsock_transport *transport,
1528 u64 val)
1529{
1530 if (val > vsk->buffer_max_size)
1531 val = vsk->buffer_max_size;
1532
1533 if (val < vsk->buffer_min_size)
1534 val = vsk->buffer_min_size;
1535
1536 if (val != vsk->buffer_size &&
1537 transport && transport->notify_buffer_size)
1538 transport->notify_buffer_size(vsk, &val);
1539
1540 vsk->buffer_size = val;
1541}
1542
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001543static int vsock_connectible_setsockopt(struct socket *sock,
1544 int level,
1545 int optname,
1546 sockptr_t optval,
1547 unsigned int optlen)
Andy Kingd021c342013-02-06 14:23:56 +00001548{
1549 int err;
1550 struct sock *sk;
1551 struct vsock_sock *vsk;
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001552 const struct vsock_transport *transport;
Andy Kingd021c342013-02-06 14:23:56 +00001553 u64 val;
1554
1555 if (level != AF_VSOCK)
1556 return -ENOPROTOOPT;
1557
1558#define COPY_IN(_v) \
1559 do { \
1560 if (optlen < sizeof(_v)) { \
1561 err = -EINVAL; \
1562 goto exit; \
1563 } \
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02001564 if (copy_from_sockptr(&_v, optval, sizeof(_v)) != 0) { \
Andy Kingd021c342013-02-06 14:23:56 +00001565 err = -EFAULT; \
1566 goto exit; \
1567 } \
1568 } while (0)
1569
1570 err = 0;
1571 sk = sock->sk;
1572 vsk = vsock_sk(sk);
1573
1574 lock_sock(sk);
1575
Alexander Popovc518ada2021-02-01 11:47:19 +03001576 transport = vsk->transport;
1577
Andy Kingd021c342013-02-06 14:23:56 +00001578 switch (optname) {
1579 case SO_VM_SOCKETS_BUFFER_SIZE:
1580 COPY_IN(val);
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +01001581 vsock_update_buffer_size(vsk, transport, val);
Andy Kingd021c342013-02-06 14:23:56 +00001582 break;
1583
1584 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1585 COPY_IN(val);
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +01001586 vsk->buffer_max_size = val;
1587 vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
Andy Kingd021c342013-02-06 14:23:56 +00001588 break;
1589
1590 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1591 COPY_IN(val);
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +01001592 vsk->buffer_min_size = val;
1593 vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
Andy Kingd021c342013-02-06 14:23:56 +00001594 break;
1595
1596 case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
Arnd Bergmannfe0c72f2019-02-02 07:34:44 -08001597 struct __kernel_old_timeval tv;
Andy Kingd021c342013-02-06 14:23:56 +00001598 COPY_IN(tv);
1599 if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC &&
1600 tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) {
1601 vsk->connect_timeout = tv.tv_sec * HZ +
1602 DIV_ROUND_UP(tv.tv_usec, (1000000 / HZ));
1603 if (vsk->connect_timeout == 0)
1604 vsk->connect_timeout =
1605 VSOCK_DEFAULT_CONNECT_TIMEOUT;
1606
1607 } else {
1608 err = -ERANGE;
1609 }
1610 break;
1611 }
1612
1613 default:
1614 err = -ENOPROTOOPT;
1615 break;
1616 }
1617
1618#undef COPY_IN
1619
1620exit:
1621 release_sock(sk);
1622 return err;
1623}
1624
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001625static int vsock_connectible_getsockopt(struct socket *sock,
1626 int level, int optname,
1627 char __user *optval,
1628 int __user *optlen)
Andy Kingd021c342013-02-06 14:23:56 +00001629{
1630 int err;
1631 int len;
1632 struct sock *sk;
1633 struct vsock_sock *vsk;
1634 u64 val;
1635
1636 if (level != AF_VSOCK)
1637 return -ENOPROTOOPT;
1638
1639 err = get_user(len, optlen);
1640 if (err != 0)
1641 return err;
1642
1643#define COPY_OUT(_v) \
1644 do { \
1645 if (len < sizeof(_v)) \
1646 return -EINVAL; \
1647 \
1648 len = sizeof(_v); \
1649 if (copy_to_user(optval, &_v, len) != 0) \
1650 return -EFAULT; \
1651 \
1652 } while (0)
1653
1654 err = 0;
1655 sk = sock->sk;
1656 vsk = vsock_sk(sk);
1657
1658 switch (optname) {
1659 case SO_VM_SOCKETS_BUFFER_SIZE:
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +01001660 val = vsk->buffer_size;
Andy Kingd021c342013-02-06 14:23:56 +00001661 COPY_OUT(val);
1662 break;
1663
1664 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +01001665 val = vsk->buffer_max_size;
Andy Kingd021c342013-02-06 14:23:56 +00001666 COPY_OUT(val);
1667 break;
1668
1669 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +01001670 val = vsk->buffer_min_size;
Andy Kingd021c342013-02-06 14:23:56 +00001671 COPY_OUT(val);
1672 break;
1673
1674 case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
Arnd Bergmannfe0c72f2019-02-02 07:34:44 -08001675 struct __kernel_old_timeval tv;
Andy Kingd021c342013-02-06 14:23:56 +00001676 tv.tv_sec = vsk->connect_timeout / HZ;
1677 tv.tv_usec =
1678 (vsk->connect_timeout -
1679 tv.tv_sec * HZ) * (1000000 / HZ);
1680 COPY_OUT(tv);
1681 break;
1682 }
1683 default:
1684 return -ENOPROTOOPT;
1685 }
1686
1687 err = put_user(len, optlen);
1688 if (err != 0)
1689 return -EFAULT;
1690
1691#undef COPY_OUT
1692
1693 return 0;
1694}
1695
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001696static int vsock_connectible_sendmsg(struct socket *sock, struct msghdr *msg,
1697 size_t len)
Andy Kingd021c342013-02-06 14:23:56 +00001698{
1699 struct sock *sk;
1700 struct vsock_sock *vsk;
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001701 const struct vsock_transport *transport;
Andy Kingd021c342013-02-06 14:23:56 +00001702 ssize_t total_written;
1703 long timeout;
1704 int err;
1705 struct vsock_transport_send_notify_data send_data;
WANG Cong499fde62017-05-19 11:21:59 -07001706 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Andy Kingd021c342013-02-06 14:23:56 +00001707
1708 sk = sock->sk;
1709 vsk = vsock_sk(sk);
1710 total_written = 0;
1711 err = 0;
1712
1713 if (msg->msg_flags & MSG_OOB)
1714 return -EOPNOTSUPP;
1715
1716 lock_sock(sk);
1717
Alexander Popovc518ada2021-02-01 11:47:19 +03001718 transport = vsk->transport;
1719
Andy Kingd021c342013-02-06 14:23:56 +00001720 /* Callers should not provide a destination with stream sockets. */
1721 if (msg->msg_namelen) {
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001722 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
Andy Kingd021c342013-02-06 14:23:56 +00001723 goto out;
1724 }
1725
1726 /* Send data only if both sides are not shutdown in the direction. */
1727 if (sk->sk_shutdown & SEND_SHUTDOWN ||
1728 vsk->peer_shutdown & RCV_SHUTDOWN) {
1729 err = -EPIPE;
1730 goto out;
1731 }
1732
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01001733 if (!transport || sk->sk_state != TCP_ESTABLISHED ||
Andy Kingd021c342013-02-06 14:23:56 +00001734 !vsock_addr_bound(&vsk->local_addr)) {
1735 err = -ENOTCONN;
1736 goto out;
1737 }
1738
1739 if (!vsock_addr_bound(&vsk->remote_addr)) {
1740 err = -EDESTADDRREQ;
1741 goto out;
1742 }
1743
1744 /* Wait for room in the produce queue to enqueue our user's data. */
1745 timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1746
1747 err = transport->notify_send_init(vsk, &send_data);
1748 if (err < 0)
1749 goto out;
1750
Andy Kingd021c342013-02-06 14:23:56 +00001751 while (total_written < len) {
1752 ssize_t written;
1753
WANG Cong499fde62017-05-19 11:21:59 -07001754 add_wait_queue(sk_sleep(sk), &wait);
Andy Kingd021c342013-02-06 14:23:56 +00001755 while (vsock_stream_has_space(vsk) == 0 &&
1756 sk->sk_err == 0 &&
1757 !(sk->sk_shutdown & SEND_SHUTDOWN) &&
1758 !(vsk->peer_shutdown & RCV_SHUTDOWN)) {
1759
1760 /* Don't wait for non-blocking sockets. */
1761 if (timeout == 0) {
1762 err = -EAGAIN;
WANG Cong499fde62017-05-19 11:21:59 -07001763 remove_wait_queue(sk_sleep(sk), &wait);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001764 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001765 }
1766
1767 err = transport->notify_send_pre_block(vsk, &send_data);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001768 if (err < 0) {
WANG Cong499fde62017-05-19 11:21:59 -07001769 remove_wait_queue(sk_sleep(sk), &wait);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001770 goto out_err;
1771 }
Andy Kingd021c342013-02-06 14:23:56 +00001772
1773 release_sock(sk);
WANG Cong499fde62017-05-19 11:21:59 -07001774 timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
Andy Kingd021c342013-02-06 14:23:56 +00001775 lock_sock(sk);
1776 if (signal_pending(current)) {
1777 err = sock_intr_errno(timeout);
WANG Cong499fde62017-05-19 11:21:59 -07001778 remove_wait_queue(sk_sleep(sk), &wait);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001779 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001780 } else if (timeout == 0) {
1781 err = -EAGAIN;
WANG Cong499fde62017-05-19 11:21:59 -07001782 remove_wait_queue(sk_sleep(sk), &wait);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001783 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001784 }
Andy Kingd021c342013-02-06 14:23:56 +00001785 }
WANG Cong499fde62017-05-19 11:21:59 -07001786 remove_wait_queue(sk_sleep(sk), &wait);
Andy Kingd021c342013-02-06 14:23:56 +00001787
1788 /* These checks occur both as part of and after the loop
1789 * conditional since we need to check before and after
1790 * sleeping.
1791 */
1792 if (sk->sk_err) {
1793 err = -sk->sk_err;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001794 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001795 } else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
1796 (vsk->peer_shutdown & RCV_SHUTDOWN)) {
1797 err = -EPIPE;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001798 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001799 }
1800
1801 err = transport->notify_send_pre_enqueue(vsk, &send_data);
1802 if (err < 0)
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001803 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001804
1805 /* Note that enqueue will only write as many bytes as are free
1806 * in the produce queue, so we don't need to ensure len is
1807 * smaller than the queue size. It is the caller's
1808 * responsibility to check how many bytes we were able to send.
1809 */
1810
1811 written = transport->stream_enqueue(
Al Viro0f7db232014-11-20 04:05:34 -05001812 vsk, msg,
Andy Kingd021c342013-02-06 14:23:56 +00001813 len - total_written);
1814 if (written < 0) {
1815 err = -ENOMEM;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001816 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001817 }
1818
1819 total_written += written;
1820
1821 err = transport->notify_send_post_enqueue(
1822 vsk, written, &send_data);
1823 if (err < 0)
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001824 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001825
1826 }
1827
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001828out_err:
Andy Kingd021c342013-02-06 14:23:56 +00001829 if (total_written > 0)
1830 err = total_written;
Andy Kingd021c342013-02-06 14:23:56 +00001831out:
1832 release_sock(sk);
1833 return err;
1834}
1835
Andy Kingd021c342013-02-06 14:23:56 +00001836static int
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001837vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1838 int flags)
Andy Kingd021c342013-02-06 14:23:56 +00001839{
1840 struct sock *sk;
1841 struct vsock_sock *vsk;
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001842 const struct vsock_transport *transport;
Andy Kingd021c342013-02-06 14:23:56 +00001843 int err;
1844 size_t target;
1845 ssize_t copied;
1846 long timeout;
1847 struct vsock_transport_recv_notify_data recv_data;
1848
1849 DEFINE_WAIT(wait);
1850
1851 sk = sock->sk;
1852 vsk = vsock_sk(sk);
1853 err = 0;
1854
1855 lock_sock(sk);
1856
Alexander Popovc518ada2021-02-01 11:47:19 +03001857 transport = vsk->transport;
1858
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01001859 if (!transport || sk->sk_state != TCP_ESTABLISHED) {
Andy Kingd021c342013-02-06 14:23:56 +00001860 /* Recvmsg is supposed to return 0 if a peer performs an
1861 * orderly shutdown. Differentiate between that case and when a
Lu Wei9195f062021-03-27 10:27:24 +08001862 * peer has not connected or a local shutdown occurred with the
Andy Kingd021c342013-02-06 14:23:56 +00001863 * SOCK_DONE flag.
1864 */
1865 if (sock_flag(sk, SOCK_DONE))
1866 err = 0;
1867 else
1868 err = -ENOTCONN;
1869
1870 goto out;
1871 }
1872
1873 if (flags & MSG_OOB) {
1874 err = -EOPNOTSUPP;
1875 goto out;
1876 }
1877
1878 /* We don't check peer_shutdown flag here since peer may actually shut
1879 * down, but there can be data in the queue that a local socket can
1880 * receive.
1881 */
1882 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1883 err = 0;
1884 goto out;
1885 }
1886
1887 /* It is valid on Linux to pass in a zero-length receive buffer. This
1888 * is not an error. We may as well bail out now.
1889 */
1890 if (!len) {
1891 err = 0;
1892 goto out;
1893 }
1894
1895 /* We must not copy less than target bytes into the user's buffer
1896 * before returning successfully, so we wait for the consume queue to
1897 * have that much data to consume before dequeueing. Note that this
1898 * makes it impossible to handle cases where target is greater than the
1899 * queue size.
1900 */
1901 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1902 if (target >= transport->stream_rcvhiwat(vsk)) {
1903 err = -ENOMEM;
1904 goto out;
1905 }
1906 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1907 copied = 0;
1908
1909 err = transport->notify_recv_init(vsk, target, &recv_data);
1910 if (err < 0)
1911 goto out;
1912
Andy Kingd021c342013-02-06 14:23:56 +00001913
1914 while (1) {
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001915 s64 ready;
Andy Kingd021c342013-02-06 14:23:56 +00001916
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001917 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1918 ready = vsock_stream_has_data(vsk);
Andy Kingd021c342013-02-06 14:23:56 +00001919
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001920 if (ready == 0) {
1921 if (sk->sk_err != 0 ||
1922 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1923 (vsk->peer_shutdown & SEND_SHUTDOWN)) {
1924 finish_wait(sk_sleep(sk), &wait);
1925 break;
1926 }
1927 /* Don't wait for non-blocking sockets. */
1928 if (timeout == 0) {
1929 err = -EAGAIN;
1930 finish_wait(sk_sleep(sk), &wait);
1931 break;
1932 }
1933
1934 err = transport->notify_recv_pre_block(
1935 vsk, target, &recv_data);
1936 if (err < 0) {
1937 finish_wait(sk_sleep(sk), &wait);
1938 break;
1939 }
1940 release_sock(sk);
1941 timeout = schedule_timeout(timeout);
1942 lock_sock(sk);
1943
1944 if (signal_pending(current)) {
1945 err = sock_intr_errno(timeout);
1946 finish_wait(sk_sleep(sk), &wait);
1947 break;
1948 } else if (timeout == 0) {
1949 err = -EAGAIN;
1950 finish_wait(sk_sleep(sk), &wait);
1951 break;
1952 }
1953 } else {
Andy Kingd021c342013-02-06 14:23:56 +00001954 ssize_t read;
1955
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001956 finish_wait(sk_sleep(sk), &wait);
1957
1958 if (ready < 0) {
1959 /* Invalid queue pair content. XXX This should
1960 * be changed to a connection reset in a later
1961 * change.
1962 */
1963
1964 err = -ENOMEM;
1965 goto out;
1966 }
1967
Andy Kingd021c342013-02-06 14:23:56 +00001968 err = transport->notify_recv_pre_dequeue(
1969 vsk, target, &recv_data);
1970 if (err < 0)
1971 break;
1972
1973 read = transport->stream_dequeue(
Al Viro0f7db232014-11-20 04:05:34 -05001974 vsk, msg,
Andy Kingd021c342013-02-06 14:23:56 +00001975 len - copied, flags);
1976 if (read < 0) {
1977 err = -ENOMEM;
1978 break;
1979 }
1980
1981 copied += read;
1982
1983 err = transport->notify_recv_post_dequeue(
1984 vsk, target, read,
1985 !(flags & MSG_PEEK), &recv_data);
1986 if (err < 0)
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001987 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001988
1989 if (read >= target || flags & MSG_PEEK)
1990 break;
1991
1992 target -= read;
Andy Kingd021c342013-02-06 14:23:56 +00001993 }
1994 }
1995
1996 if (sk->sk_err)
1997 err = -sk->sk_err;
1998 else if (sk->sk_shutdown & RCV_SHUTDOWN)
1999 err = 0;
2000
Ian Campbelldedc58e2016-05-04 14:21:53 +01002001 if (copied > 0)
Andy Kingd021c342013-02-06 14:23:56 +00002002 err = copied;
Andy Kingd021c342013-02-06 14:23:56 +00002003
Andy Kingd021c342013-02-06 14:23:56 +00002004out:
2005 release_sock(sk);
2006 return err;
2007}
2008
2009static const struct proto_ops vsock_stream_ops = {
2010 .family = PF_VSOCK,
2011 .owner = THIS_MODULE,
2012 .release = vsock_release,
2013 .bind = vsock_bind,
Arseny Krasnova9e29e52021-06-11 14:09:47 +03002014 .connect = vsock_connect,
Andy Kingd021c342013-02-06 14:23:56 +00002015 .socketpair = sock_no_socketpair,
2016 .accept = vsock_accept,
2017 .getname = vsock_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07002018 .poll = vsock_poll,
Andy Kingd021c342013-02-06 14:23:56 +00002019 .ioctl = sock_no_ioctl,
2020 .listen = vsock_listen,
2021 .shutdown = vsock_shutdown,
Arseny Krasnova9e29e52021-06-11 14:09:47 +03002022 .setsockopt = vsock_connectible_setsockopt,
2023 .getsockopt = vsock_connectible_getsockopt,
2024 .sendmsg = vsock_connectible_sendmsg,
2025 .recvmsg = vsock_connectible_recvmsg,
Andy Kingd021c342013-02-06 14:23:56 +00002026 .mmap = sock_no_mmap,
2027 .sendpage = sock_no_sendpage,
2028};
2029
2030static int vsock_create(struct net *net, struct socket *sock,
2031 int protocol, int kern)
2032{
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002033 struct vsock_sock *vsk;
Stefano Garzarella55f3e142019-11-14 10:57:44 +01002034 struct sock *sk;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002035 int ret;
Stefano Garzarella55f3e142019-11-14 10:57:44 +01002036
Andy Kingd021c342013-02-06 14:23:56 +00002037 if (!sock)
2038 return -EINVAL;
2039
Andy King6cf1c5f2013-02-18 06:04:13 +00002040 if (protocol && protocol != PF_VSOCK)
Andy Kingd021c342013-02-06 14:23:56 +00002041 return -EPROTONOSUPPORT;
2042
2043 switch (sock->type) {
2044 case SOCK_DGRAM:
2045 sock->ops = &vsock_dgram_ops;
2046 break;
2047 case SOCK_STREAM:
2048 sock->ops = &vsock_stream_ops;
2049 break;
2050 default:
2051 return -ESOCKTNOSUPPORT;
2052 }
2053
2054 sock->state = SS_UNCONNECTED;
2055
Stefano Garzarella55f3e142019-11-14 10:57:44 +01002056 sk = __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern);
2057 if (!sk)
2058 return -ENOMEM;
2059
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002060 vsk = vsock_sk(sk);
2061
2062 if (sock->type == SOCK_DGRAM) {
2063 ret = vsock_assign_transport(vsk, NULL);
2064 if (ret < 0) {
2065 sock_put(sk);
2066 return ret;
2067 }
2068 }
2069
2070 vsock_insert_unbound(vsk);
Stefano Garzarella55f3e142019-11-14 10:57:44 +01002071
2072 return 0;
Andy Kingd021c342013-02-06 14:23:56 +00002073}
2074
2075static const struct net_proto_family vsock_family_ops = {
2076 .family = AF_VSOCK,
2077 .create = vsock_create,
2078 .owner = THIS_MODULE,
2079};
2080
2081static long vsock_dev_do_ioctl(struct file *filp,
2082 unsigned int cmd, void __user *ptr)
2083{
2084 u32 __user *p = ptr;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002085 u32 cid = VMADDR_CID_ANY;
Andy Kingd021c342013-02-06 14:23:56 +00002086 int retval = 0;
2087
2088 switch (cmd) {
2089 case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002090 /* To be compatible with the VMCI behavior, we prioritize the
2091 * guest CID instead of well-know host CID (VMADDR_CID_HOST).
2092 */
2093 if (transport_g2h)
2094 cid = transport_g2h->get_local_cid();
2095 else if (transport_h2g)
2096 cid = transport_h2g->get_local_cid();
2097
2098 if (put_user(cid, p) != 0)
Andy Kingd021c342013-02-06 14:23:56 +00002099 retval = -EFAULT;
2100 break;
2101
2102 default:
Colin Ian Kingc3e448c2020-10-27 09:09:42 +00002103 retval = -ENOIOCTLCMD;
Andy Kingd021c342013-02-06 14:23:56 +00002104 }
2105
2106 return retval;
2107}
2108
2109static long vsock_dev_ioctl(struct file *filp,
2110 unsigned int cmd, unsigned long arg)
2111{
2112 return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg);
2113}
2114
2115#ifdef CONFIG_COMPAT
2116static long vsock_dev_compat_ioctl(struct file *filp,
2117 unsigned int cmd, unsigned long arg)
2118{
2119 return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg));
2120}
2121#endif
2122
2123static const struct file_operations vsock_device_ops = {
2124 .owner = THIS_MODULE,
2125 .unlocked_ioctl = vsock_dev_ioctl,
2126#ifdef CONFIG_COMPAT
2127 .compat_ioctl = vsock_dev_compat_ioctl,
2128#endif
2129 .open = nonseekable_open,
2130};
2131
2132static struct miscdevice vsock_device = {
2133 .name = "vsock",
Andy Kingd021c342013-02-06 14:23:56 +00002134 .fops = &vsock_device_ops,
2135};
2136
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002137static int __init vsock_init(void)
Andy Kingd021c342013-02-06 14:23:56 +00002138{
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002139 int err = 0;
Andy King2c4a3362014-05-01 15:20:43 -07002140
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002141 vsock_init_tables();
Andy King2c4a3362014-05-01 15:20:43 -07002142
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002143 vsock_proto.owner = THIS_MODULE;
Asias He6ad0b2f2013-04-23 20:33:52 +00002144 vsock_device.minor = MISC_DYNAMIC_MINOR;
Andy Kingd021c342013-02-06 14:23:56 +00002145 err = misc_register(&vsock_device);
2146 if (err) {
2147 pr_err("Failed to register misc device\n");
Gao fengf6a835b2015-10-18 23:35:56 +08002148 goto err_reset_transport;
Andy Kingd021c342013-02-06 14:23:56 +00002149 }
2150
2151 err = proto_register(&vsock_proto, 1); /* we want our slab */
2152 if (err) {
2153 pr_err("Cannot register vsock protocol\n");
Gao fengf6a835b2015-10-18 23:35:56 +08002154 goto err_deregister_misc;
Andy Kingd021c342013-02-06 14:23:56 +00002155 }
2156
2157 err = sock_register(&vsock_family_ops);
2158 if (err) {
2159 pr_err("could not register af_vsock (%d) address family: %d\n",
2160 AF_VSOCK, err);
2161 goto err_unregister_proto;
2162 }
2163
2164 return 0;
2165
2166err_unregister_proto:
2167 proto_unregister(&vsock_proto);
Gao fengf6a835b2015-10-18 23:35:56 +08002168err_deregister_misc:
Andy Kingd021c342013-02-06 14:23:56 +00002169 misc_deregister(&vsock_device);
Gao fengf6a835b2015-10-18 23:35:56 +08002170err_reset_transport:
Andy Kingd021c342013-02-06 14:23:56 +00002171 return err;
2172}
Andy Kingd021c342013-02-06 14:23:56 +00002173
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002174static void __exit vsock_exit(void)
Andy Kingd021c342013-02-06 14:23:56 +00002175{
Andy Kingd021c342013-02-06 14:23:56 +00002176 misc_deregister(&vsock_device);
2177 sock_unregister(AF_VSOCK);
2178 proto_unregister(&vsock_proto);
Andy Kingd021c342013-02-06 14:23:56 +00002179}
Andy Kingd021c342013-02-06 14:23:56 +00002180
Stefano Garzarelladaabfbc2019-11-14 10:57:41 +01002181const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk)
Stefan Hajnoczi0b01aeb2016-07-28 15:36:30 +01002182{
Stefano Garzarelladaabfbc2019-11-14 10:57:41 +01002183 return vsk->transport;
Stefan Hajnoczi0b01aeb2016-07-28 15:36:30 +01002184}
2185EXPORT_SYMBOL_GPL(vsock_core_get_transport);
2186
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002187int vsock_core_register(const struct vsock_transport *t, int features)
Stefan Hajnoczi05e489b2018-04-17 14:25:58 +08002188{
Stefano Garzarella0e121902019-12-10 11:43:04 +01002189 const struct vsock_transport *t_h2g, *t_g2h, *t_dgram, *t_local;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002190 int err = mutex_lock_interruptible(&vsock_register_mutex);
Stefan Hajnoczi05e489b2018-04-17 14:25:58 +08002191
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002192 if (err)
2193 return err;
2194
2195 t_h2g = transport_h2g;
2196 t_g2h = transport_g2h;
2197 t_dgram = transport_dgram;
Stefano Garzarella0e121902019-12-10 11:43:04 +01002198 t_local = transport_local;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002199
2200 if (features & VSOCK_TRANSPORT_F_H2G) {
2201 if (t_h2g) {
2202 err = -EBUSY;
2203 goto err_busy;
2204 }
2205 t_h2g = t;
2206 }
2207
2208 if (features & VSOCK_TRANSPORT_F_G2H) {
2209 if (t_g2h) {
2210 err = -EBUSY;
2211 goto err_busy;
2212 }
2213 t_g2h = t;
2214 }
2215
2216 if (features & VSOCK_TRANSPORT_F_DGRAM) {
2217 if (t_dgram) {
2218 err = -EBUSY;
2219 goto err_busy;
2220 }
2221 t_dgram = t;
2222 }
2223
Stefano Garzarella0e121902019-12-10 11:43:04 +01002224 if (features & VSOCK_TRANSPORT_F_LOCAL) {
2225 if (t_local) {
2226 err = -EBUSY;
2227 goto err_busy;
2228 }
2229 t_local = t;
2230 }
2231
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002232 transport_h2g = t_h2g;
2233 transport_g2h = t_g2h;
2234 transport_dgram = t_dgram;
Stefano Garzarella0e121902019-12-10 11:43:04 +01002235 transport_local = t_local;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002236
2237err_busy:
2238 mutex_unlock(&vsock_register_mutex);
2239 return err;
2240}
2241EXPORT_SYMBOL_GPL(vsock_core_register);
2242
2243void vsock_core_unregister(const struct vsock_transport *t)
2244{
2245 mutex_lock(&vsock_register_mutex);
2246
2247 if (transport_h2g == t)
2248 transport_h2g = NULL;
2249
2250 if (transport_g2h == t)
2251 transport_g2h = NULL;
2252
2253 if (transport_dgram == t)
2254 transport_dgram = NULL;
2255
Stefano Garzarella0e121902019-12-10 11:43:04 +01002256 if (transport_local == t)
2257 transport_local = NULL;
2258
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002259 mutex_unlock(&vsock_register_mutex);
2260}
2261EXPORT_SYMBOL_GPL(vsock_core_unregister);
2262
2263module_init(vsock_init);
Stefan Hajnoczi05e489b2018-04-17 14:25:58 +08002264module_exit(vsock_exit);
Cong Wangc1eef222017-10-24 15:30:37 -07002265
Andy Kingd021c342013-02-06 14:23:56 +00002266MODULE_AUTHOR("VMware, Inc.");
2267MODULE_DESCRIPTION("VMware Virtual Socket Family");
Jorgen Hansen1190cfd2016-09-26 23:59:53 -07002268MODULE_VERSION("1.0.2.0-k");
Andy Kingd021c342013-02-06 14:23:56 +00002269MODULE_LICENSE("GPL v2");