blob: 7d851eb3a683074e078911e4771a8492ad4c1661 [file] [log] [blame]
Thomas Gleixner685a6bf2019-05-29 16:57:36 -07001// SPDX-License-Identifier: GPL-2.0-only
Andy Kingd021c342013-02-06 14:23:56 +00002/*
3 * VMware vSockets Driver
4 *
5 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
Andy Kingd021c342013-02-06 14:23:56 +00006 */
7
8/* Implementation notes:
9 *
10 * - There are two kinds of sockets: those created by user action (such as
11 * calling socket(2)) and those created by incoming connection request packets.
12 *
13 * - There are two "global" tables, one for bound sockets (sockets that have
14 * specified an address that they are responsible for) and one for connected
15 * sockets (sockets that have established a connection with another socket).
16 * These tables are "global" in that all sockets on the system are placed
17 * within them. - Note, though, that the bound table contains an extra entry
18 * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
19 * that list. The bound table is used solely for lookup of sockets when packets
20 * are received and that's not necessary for SOCK_DGRAM sockets since we create
21 * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM
22 * sockets out of the bound hash buckets will reduce the chance of collisions
23 * when looking for SOCK_STREAM sockets and prevents us from having to check the
24 * socket type in the hash table lookups.
25 *
26 * - Sockets created by user action will either be "client" sockets that
27 * initiate a connection or "server" sockets that listen for connections; we do
28 * not support simultaneous connects (two "client" sockets connecting).
29 *
30 * - "Server" sockets are referred to as listener sockets throughout this
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -040031 * implementation because they are in the TCP_LISTEN state. When a
Stefan Hajnocziea3803c2015-10-29 11:57:42 +000032 * connection request is received (the second kind of socket mentioned above),
33 * we create a new socket and refer to it as a pending socket. These pending
34 * sockets are placed on the pending connection list of the listener socket.
35 * When future packets are received for the address the listener socket is
36 * bound to, we check if the source of the packet is from one that has an
37 * existing pending connection. If it does, we process the packet for the
38 * pending socket. When that socket reaches the connected state, it is removed
39 * from the listener socket's pending list and enqueued in the listener
40 * socket's accept queue. Callers of accept(2) will accept connected sockets
41 * from the listener socket's accept queue. If the socket cannot be accepted
42 * for some reason then it is marked rejected. Once the connection is
43 * accepted, it is owned by the user process and the responsibility for cleanup
44 * falls with that user process.
Andy Kingd021c342013-02-06 14:23:56 +000045 *
46 * - It is possible that these pending sockets will never reach the connected
47 * state; in fact, we may never receive another packet after the connection
48 * request. Because of this, we must schedule a cleanup function to run in the
49 * future, after some amount of time passes where a connection should have been
50 * established. This function ensures that the socket is off all lists so it
51 * cannot be retrieved, then drops all references to the socket so it is cleaned
52 * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this
53 * function will also cleanup rejected sockets, those that reach the connected
54 * state but leave it before they have been accepted.
55 *
Stefan Hajnoczi4192f672016-06-23 16:28:58 +010056 * - Lock ordering for pending or accept queue sockets is:
57 *
58 * lock_sock(listener);
59 * lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
60 *
61 * Using explicit nested locking keeps lockdep happy since normally only one
62 * lock of a given class may be taken at a time.
63 *
Andy Kingd021c342013-02-06 14:23:56 +000064 * - Sockets created by user action will be cleaned up when the user process
65 * calls close(2), causing our release implementation to be called. Our release
66 * implementation will perform some cleanup then drop the last reference so our
67 * sk_destruct implementation is invoked. Our sk_destruct implementation will
68 * perform additional cleanup that's common for both types of sockets.
69 *
70 * - A socket's reference count is what ensures that the structure won't be
71 * freed. Each entry in a list (such as the "global" bound and connected tables
72 * and the listener socket's pending list and connected queue) ensures a
73 * reference. When we defer work until process context and pass a socket as our
74 * argument, we must ensure the reference count is increased to ensure the
75 * socket isn't freed before the function is run; the deferred function will
76 * then drop the reference.
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -040077 *
78 * - sk->sk_state uses the TCP state constants because they are widely used by
79 * other address families and exposed to userspace tools like ss(8):
80 *
81 * TCP_CLOSE - unconnected
82 * TCP_SYN_SENT - connecting
83 * TCP_ESTABLISHED - connected
84 * TCP_CLOSING - disconnecting
85 * TCP_LISTEN - listening
Andy Kingd021c342013-02-06 14:23:56 +000086 */
87
88#include <linux/types.h>
Andy Kingd021c342013-02-06 14:23:56 +000089#include <linux/bitops.h>
90#include <linux/cred.h>
91#include <linux/init.h>
92#include <linux/io.h>
93#include <linux/kernel.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010094#include <linux/sched/signal.h>
Andy Kingd021c342013-02-06 14:23:56 +000095#include <linux/kmod.h>
96#include <linux/list.h>
97#include <linux/miscdevice.h>
98#include <linux/module.h>
99#include <linux/mutex.h>
100#include <linux/net.h>
101#include <linux/poll.h>
Lepton Wu8236b082018-12-11 11:12:55 -0800102#include <linux/random.h>
Andy Kingd021c342013-02-06 14:23:56 +0000103#include <linux/skbuff.h>
104#include <linux/smp.h>
105#include <linux/socket.h>
106#include <linux/stddef.h>
107#include <linux/unistd.h>
108#include <linux/wait.h>
109#include <linux/workqueue.h>
110#include <net/sock.h>
Asias He82a54d02013-07-25 17:39:34 +0800111#include <net/af_vsock.h>
Andy Kingd021c342013-02-06 14:23:56 +0000112
113static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
114static void vsock_sk_destruct(struct sock *sk);
115static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
116
117/* Protocol family. */
118static struct proto vsock_proto = {
119 .name = "AF_VSOCK",
120 .owner = THIS_MODULE,
121 .obj_size = sizeof(struct vsock_sock),
122};
123
124/* The default peer timeout indicates how long we will wait for a peer response
125 * to a control message.
126 */
127#define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
128
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +0100129#define VSOCK_DEFAULT_BUFFER_SIZE (1024 * 256)
130#define VSOCK_DEFAULT_BUFFER_MAX_SIZE (1024 * 256)
131#define VSOCK_DEFAULT_BUFFER_MIN_SIZE 128
132
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100133/* Transport used for host->guest communication */
134static const struct vsock_transport *transport_h2g;
135/* Transport used for guest->host communication */
136static const struct vsock_transport *transport_g2h;
137/* Transport used for DGRAM communication */
138static const struct vsock_transport *transport_dgram;
Stefano Garzarella0e121902019-12-10 11:43:04 +0100139/* Transport used for local communication */
140static const struct vsock_transport *transport_local;
Andy Kingd021c342013-02-06 14:23:56 +0000141static DEFINE_MUTEX(vsock_register_mutex);
142
Andy Kingd021c342013-02-06 14:23:56 +0000143/**** UTILS ****/
144
145/* Each bound VSocket is stored in the bind hash table and each connected
146 * VSocket is stored in the connected hash table.
147 *
148 * Unbound sockets are all put on the same list attached to the end of the hash
149 * table (vsock_unbound_sockets). Bound sockets are added to the hash table in
150 * the bucket that their local address hashes to (vsock_bound_sockets(addr)
151 * represents the list that addr hashes to).
152 *
153 * Specifically, we initialize the vsock_bind_table array to a size of
154 * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
155 * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
156 * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function
Asias Hea49dd9d2013-06-20 17:20:33 +0800157 * mods with VSOCK_HASH_SIZE to ensure this.
Andy Kingd021c342013-02-06 14:23:56 +0000158 */
Andy Kingd021c342013-02-06 14:23:56 +0000159#define MAX_PORT_RETRIES 24
160
Asias Hea49dd9d2013-06-20 17:20:33 +0800161#define VSOCK_HASH(addr) ((addr)->svm_port % VSOCK_HASH_SIZE)
Andy Kingd021c342013-02-06 14:23:56 +0000162#define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
163#define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE])
164
165/* XXX This can probably be implemented in a better way. */
166#define VSOCK_CONN_HASH(src, dst) \
Asias Hea49dd9d2013-06-20 17:20:33 +0800167 (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE)
Andy Kingd021c342013-02-06 14:23:56 +0000168#define vsock_connected_sockets(src, dst) \
169 (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
170#define vsock_connected_sockets_vsk(vsk) \
171 vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
172
Stefan Hajnoczi44f20982017-10-05 16:46:50 -0400173struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
174EXPORT_SYMBOL_GPL(vsock_bind_table);
175struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
176EXPORT_SYMBOL_GPL(vsock_connected_table);
177DEFINE_SPINLOCK(vsock_table_lock);
178EXPORT_SYMBOL_GPL(vsock_table_lock);
Andy Kingd021c342013-02-06 14:23:56 +0000179
Asias Heb3a6dfe2013-06-20 17:20:30 +0800180/* Autobind this socket to the local address if necessary. */
181static int vsock_auto_bind(struct vsock_sock *vsk)
182{
183 struct sock *sk = sk_vsock(vsk);
184 struct sockaddr_vm local_addr;
185
186 if (vsock_addr_bound(&vsk->local_addr))
187 return 0;
188 vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
189 return __vsock_bind(sk, &local_addr);
190}
191
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100192static void vsock_init_tables(void)
Andy Kingd021c342013-02-06 14:23:56 +0000193{
194 int i;
195
196 for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++)
197 INIT_LIST_HEAD(&vsock_bind_table[i]);
198
199 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++)
200 INIT_LIST_HEAD(&vsock_connected_table[i]);
201}
202
203static void __vsock_insert_bound(struct list_head *list,
204 struct vsock_sock *vsk)
205{
206 sock_hold(&vsk->sk);
207 list_add(&vsk->bound_table, list);
208}
209
210static void __vsock_insert_connected(struct list_head *list,
211 struct vsock_sock *vsk)
212{
213 sock_hold(&vsk->sk);
214 list_add(&vsk->connected_table, list);
215}
216
217static void __vsock_remove_bound(struct vsock_sock *vsk)
218{
219 list_del_init(&vsk->bound_table);
220 sock_put(&vsk->sk);
221}
222
223static void __vsock_remove_connected(struct vsock_sock *vsk)
224{
225 list_del_init(&vsk->connected_table);
226 sock_put(&vsk->sk);
227}
228
229static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
230{
231 struct vsock_sock *vsk;
232
Stefano Garzarella36c5b482019-11-14 10:57:49 +0100233 list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) {
234 if (vsock_addr_equals_addr(addr, &vsk->local_addr))
Andy Kingd021c342013-02-06 14:23:56 +0000235 return sk_vsock(vsk);
236
Stefano Garzarella36c5b482019-11-14 10:57:49 +0100237 if (addr->svm_port == vsk->local_addr.svm_port &&
238 (vsk->local_addr.svm_cid == VMADDR_CID_ANY ||
239 addr->svm_cid == VMADDR_CID_ANY))
240 return sk_vsock(vsk);
241 }
242
Andy Kingd021c342013-02-06 14:23:56 +0000243 return NULL;
244}
245
246static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
247 struct sockaddr_vm *dst)
248{
249 struct vsock_sock *vsk;
250
251 list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
252 connected_table) {
Reilly Grant990454b2013-04-01 11:41:52 -0700253 if (vsock_addr_equals_addr(src, &vsk->remote_addr) &&
254 dst->svm_port == vsk->local_addr.svm_port) {
Andy Kingd021c342013-02-06 14:23:56 +0000255 return sk_vsock(vsk);
256 }
257 }
258
259 return NULL;
260}
261
Andy Kingd021c342013-02-06 14:23:56 +0000262static void vsock_insert_unbound(struct vsock_sock *vsk)
263{
264 spin_lock_bh(&vsock_table_lock);
265 __vsock_insert_bound(vsock_unbound_sockets, vsk);
266 spin_unlock_bh(&vsock_table_lock);
267}
268
269void vsock_insert_connected(struct vsock_sock *vsk)
270{
271 struct list_head *list = vsock_connected_sockets(
272 &vsk->remote_addr, &vsk->local_addr);
273
274 spin_lock_bh(&vsock_table_lock);
275 __vsock_insert_connected(list, vsk);
276 spin_unlock_bh(&vsock_table_lock);
277}
278EXPORT_SYMBOL_GPL(vsock_insert_connected);
279
280void vsock_remove_bound(struct vsock_sock *vsk)
281{
282 spin_lock_bh(&vsock_table_lock);
Sunil Muthuswamyd5afa822019-06-13 03:52:27 +0000283 if (__vsock_in_bound_table(vsk))
284 __vsock_remove_bound(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000285 spin_unlock_bh(&vsock_table_lock);
286}
287EXPORT_SYMBOL_GPL(vsock_remove_bound);
288
289void vsock_remove_connected(struct vsock_sock *vsk)
290{
291 spin_lock_bh(&vsock_table_lock);
Sunil Muthuswamyd5afa822019-06-13 03:52:27 +0000292 if (__vsock_in_connected_table(vsk))
293 __vsock_remove_connected(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000294 spin_unlock_bh(&vsock_table_lock);
295}
296EXPORT_SYMBOL_GPL(vsock_remove_connected);
297
298struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr)
299{
300 struct sock *sk;
301
302 spin_lock_bh(&vsock_table_lock);
303 sk = __vsock_find_bound_socket(addr);
304 if (sk)
305 sock_hold(sk);
306
307 spin_unlock_bh(&vsock_table_lock);
308
309 return sk;
310}
311EXPORT_SYMBOL_GPL(vsock_find_bound_socket);
312
313struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
314 struct sockaddr_vm *dst)
315{
316 struct sock *sk;
317
318 spin_lock_bh(&vsock_table_lock);
319 sk = __vsock_find_connected_socket(src, dst);
320 if (sk)
321 sock_hold(sk);
322
323 spin_unlock_bh(&vsock_table_lock);
324
325 return sk;
326}
327EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
328
Stefan Hajnoczi6773b7d2016-07-28 15:36:31 +0100329void vsock_remove_sock(struct vsock_sock *vsk)
330{
Sunil Muthuswamyd5afa822019-06-13 03:52:27 +0000331 vsock_remove_bound(vsk);
332 vsock_remove_connected(vsk);
Stefan Hajnoczi6773b7d2016-07-28 15:36:31 +0100333}
334EXPORT_SYMBOL_GPL(vsock_remove_sock);
335
Andy Kingd021c342013-02-06 14:23:56 +0000336void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
337{
338 int i;
339
340 spin_lock_bh(&vsock_table_lock);
341
342 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
343 struct vsock_sock *vsk;
344 list_for_each_entry(vsk, &vsock_connected_table[i],
Julia Lawalld9af2d62013-08-05 16:47:38 +0200345 connected_table)
Andy Kingd021c342013-02-06 14:23:56 +0000346 fn(sk_vsock(vsk));
347 }
348
349 spin_unlock_bh(&vsock_table_lock);
350}
351EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket);
352
353void vsock_add_pending(struct sock *listener, struct sock *pending)
354{
355 struct vsock_sock *vlistener;
356 struct vsock_sock *vpending;
357
358 vlistener = vsock_sk(listener);
359 vpending = vsock_sk(pending);
360
361 sock_hold(pending);
362 sock_hold(listener);
363 list_add_tail(&vpending->pending_links, &vlistener->pending_links);
364}
365EXPORT_SYMBOL_GPL(vsock_add_pending);
366
367void vsock_remove_pending(struct sock *listener, struct sock *pending)
368{
369 struct vsock_sock *vpending = vsock_sk(pending);
370
371 list_del_init(&vpending->pending_links);
372 sock_put(listener);
373 sock_put(pending);
374}
375EXPORT_SYMBOL_GPL(vsock_remove_pending);
376
377void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
378{
379 struct vsock_sock *vlistener;
380 struct vsock_sock *vconnected;
381
382 vlistener = vsock_sk(listener);
383 vconnected = vsock_sk(connected);
384
385 sock_hold(connected);
386 sock_hold(listener);
387 list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue);
388}
389EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
390
Stefano Garzarella408624af2019-12-10 11:43:06 +0100391static bool vsock_use_local_transport(unsigned int remote_cid)
392{
393 if (!transport_local)
394 return false;
395
396 if (remote_cid == VMADDR_CID_LOCAL)
397 return true;
398
399 if (transport_g2h) {
400 return remote_cid == transport_g2h->get_local_cid();
401 } else {
402 return remote_cid == VMADDR_CID_HOST;
403 }
404}
405
Stefano Garzarella6a2c09622019-11-14 10:57:48 +0100406static void vsock_deassign_transport(struct vsock_sock *vsk)
407{
408 if (!vsk->transport)
409 return;
410
411 vsk->transport->destruct(vsk);
412 module_put(vsk->transport->module);
413 vsk->transport = NULL;
414}
415
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100416/* Assign a transport to a socket and call the .init transport callback.
417 *
Arseny Krasnov8cb48552021-06-11 14:11:18 +0300418 * Note: for connection oriented socket this must be called when vsk->remote_addr
419 * is set (e.g. during the connect() or when a connection request on a listener
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100420 * socket is received).
421 * The vsk->remote_addr is used to decide which transport to use:
Stefano Garzarella408624af2019-12-10 11:43:06 +0100422 * - remote CID == VMADDR_CID_LOCAL or g2h->local_cid or VMADDR_CID_HOST if
423 * g2h is not loaded, will use local transport;
Andra Paraschiv7f816982020-12-14 18:11:22 +0200424 * - remote CID <= VMADDR_CID_HOST or h2g is not loaded or remote flags field
425 * includes VMADDR_FLAG_TO_HOST flag value, will use guest->host transport;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100426 * - remote CID > VMADDR_CID_HOST will use host->guest transport;
427 */
428int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
429{
430 const struct vsock_transport *new_transport;
431 struct sock *sk = sk_vsock(vsk);
432 unsigned int remote_cid = vsk->remote_addr.svm_cid;
Andra Paraschiv7f816982020-12-14 18:11:22 +0200433 __u8 remote_flags;
Stefano Garzarella039fccc2019-11-21 10:06:09 +0100434 int ret;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100435
Andra Paraschiv1b5f2ab2020-12-14 18:11:21 +0200436 /* If the packet is coming with the source and destination CIDs higher
437 * than VMADDR_CID_HOST, then a vsock channel where all the packets are
438 * forwarded to the host should be established. Then the host will
439 * need to forward the packets to the guest.
440 *
441 * The flag is set on the (listen) receive path (psk is not NULL). On
442 * the connect path the flag can be set by the user space application.
443 */
444 if (psk && vsk->local_addr.svm_cid > VMADDR_CID_HOST &&
445 vsk->remote_addr.svm_cid > VMADDR_CID_HOST)
446 vsk->remote_addr.svm_flags |= VMADDR_FLAG_TO_HOST;
447
Andra Paraschiv7f816982020-12-14 18:11:22 +0200448 remote_flags = vsk->remote_addr.svm_flags;
449
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100450 switch (sk->sk_type) {
451 case SOCK_DGRAM:
452 new_transport = transport_dgram;
453 break;
454 case SOCK_STREAM:
Arseny Krasnov0798e782021-06-11 14:11:04 +0300455 case SOCK_SEQPACKET:
Stefano Garzarella408624af2019-12-10 11:43:06 +0100456 if (vsock_use_local_transport(remote_cid))
457 new_transport = transport_local;
Andra Paraschiv7f816982020-12-14 18:11:22 +0200458 else if (remote_cid <= VMADDR_CID_HOST || !transport_h2g ||
459 (remote_flags & VMADDR_FLAG_TO_HOST))
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100460 new_transport = transport_g2h;
461 else
462 new_transport = transport_h2g;
463 break;
464 default:
465 return -ESOCKTNOSUPPORT;
466 }
467
468 if (vsk->transport) {
469 if (vsk->transport == new_transport)
470 return 0;
471
Stefano Garzarella3f749572020-02-26 11:58:18 +0100472 /* transport->release() must be called with sock lock acquired.
Arseny Krasnov8cb48552021-06-11 14:11:18 +0300473 * This path can only be taken during vsock_connect(), where we
474 * have already held the sock lock. In the other cases, this
475 * function is called on a new socket which is not assigned to
476 * any transport.
Stefano Garzarella3f749572020-02-26 11:58:18 +0100477 */
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100478 vsk->transport->release(vsk);
Stefano Garzarella6a2c09622019-11-14 10:57:48 +0100479 vsock_deassign_transport(vsk);
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100480 }
481
Stefano Garzarella6a2c09622019-11-14 10:57:48 +0100482 /* We increase the module refcnt to prevent the transport unloading
483 * while there are open sockets assigned to it.
484 */
485 if (!new_transport || !try_module_get(new_transport->module))
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100486 return -ENODEV;
487
Arseny Krasnov0798e782021-06-11 14:11:04 +0300488 if (sk->sk_type == SOCK_SEQPACKET) {
489 if (!new_transport->seqpacket_allow ||
490 !new_transport->seqpacket_allow(remote_cid)) {
491 module_put(new_transport->module);
492 return -ESOCKTNOSUPPORT;
493 }
494 }
495
Stefano Garzarella039fccc2019-11-21 10:06:09 +0100496 ret = new_transport->init(vsk, psk);
497 if (ret) {
498 module_put(new_transport->module);
499 return ret;
500 }
501
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100502 vsk->transport = new_transport;
503
Stefano Garzarella039fccc2019-11-21 10:06:09 +0100504 return 0;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100505}
506EXPORT_SYMBOL_GPL(vsock_assign_transport);
507
508bool vsock_find_cid(unsigned int cid)
509{
510 if (transport_g2h && cid == transport_g2h->get_local_cid())
511 return true;
512
513 if (transport_h2g && cid == VMADDR_CID_HOST)
514 return true;
515
Stefano Garzarella408624af2019-12-10 11:43:06 +0100516 if (transport_local && cid == VMADDR_CID_LOCAL)
517 return true;
518
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100519 return false;
520}
521EXPORT_SYMBOL_GPL(vsock_find_cid);
522
Andy Kingd021c342013-02-06 14:23:56 +0000523static struct sock *vsock_dequeue_accept(struct sock *listener)
524{
525 struct vsock_sock *vlistener;
526 struct vsock_sock *vconnected;
527
528 vlistener = vsock_sk(listener);
529
530 if (list_empty(&vlistener->accept_queue))
531 return NULL;
532
533 vconnected = list_entry(vlistener->accept_queue.next,
534 struct vsock_sock, accept_queue);
535
536 list_del_init(&vconnected->accept_queue);
537 sock_put(listener);
538 /* The caller will need a reference on the connected socket so we let
539 * it call sock_put().
540 */
541
542 return sk_vsock(vconnected);
543}
544
545static bool vsock_is_accept_queue_empty(struct sock *sk)
546{
547 struct vsock_sock *vsk = vsock_sk(sk);
548 return list_empty(&vsk->accept_queue);
549}
550
551static bool vsock_is_pending(struct sock *sk)
552{
553 struct vsock_sock *vsk = vsock_sk(sk);
554 return !list_empty(&vsk->pending_links);
555}
556
557static int vsock_send_shutdown(struct sock *sk, int mode)
558{
Stefano Garzarellafe502c42019-11-14 10:57:39 +0100559 struct vsock_sock *vsk = vsock_sk(sk);
560
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100561 if (!vsk->transport)
562 return -ENODEV;
563
Stefano Garzarellafe502c42019-11-14 10:57:39 +0100564 return vsk->transport->shutdown(vsk, mode);
Andy Kingd021c342013-02-06 14:23:56 +0000565}
566
Cong Wang455f05e2018-08-06 11:06:02 -0700567static void vsock_pending_work(struct work_struct *work)
Andy Kingd021c342013-02-06 14:23:56 +0000568{
569 struct sock *sk;
570 struct sock *listener;
571 struct vsock_sock *vsk;
572 bool cleanup;
573
Cong Wang455f05e2018-08-06 11:06:02 -0700574 vsk = container_of(work, struct vsock_sock, pending_work.work);
Andy Kingd021c342013-02-06 14:23:56 +0000575 sk = sk_vsock(vsk);
576 listener = vsk->listener;
577 cleanup = true;
578
579 lock_sock(listener);
Stefan Hajnoczi4192f672016-06-23 16:28:58 +0100580 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
Andy Kingd021c342013-02-06 14:23:56 +0000581
582 if (vsock_is_pending(sk)) {
583 vsock_remove_pending(listener, sk);
Jorgen Hansen1190cfd2016-09-26 23:59:53 -0700584
Eric Dumazet7976a112019-11-05 14:11:52 -0800585 sk_acceptq_removed(listener);
Andy Kingd021c342013-02-06 14:23:56 +0000586 } else if (!vsk->rejected) {
587 /* We are not on the pending list and accept() did not reject
588 * us, so we must have been accepted by our user process. We
589 * just need to drop our references to the sockets and be on
590 * our way.
591 */
592 cleanup = false;
593 goto out;
594 }
595
Andy Kingd021c342013-02-06 14:23:56 +0000596 /* We need to remove ourself from the global connected sockets list so
597 * incoming packets can't find this socket, and to reduce the reference
598 * count.
599 */
Sunil Muthuswamyd5afa822019-06-13 03:52:27 +0000600 vsock_remove_connected(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000601
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -0400602 sk->sk_state = TCP_CLOSE;
Andy Kingd021c342013-02-06 14:23:56 +0000603
604out:
605 release_sock(sk);
606 release_sock(listener);
607 if (cleanup)
608 sock_put(sk);
609
610 sock_put(sk);
611 sock_put(listener);
612}
Andy Kingd021c342013-02-06 14:23:56 +0000613
614/**** SOCKET OPERATIONS ****/
615
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300616static int __vsock_bind_connectible(struct vsock_sock *vsk,
617 struct sockaddr_vm *addr)
Andy Kingd021c342013-02-06 14:23:56 +0000618{
Lepton Wua22d3252019-01-09 15:45:41 -0800619 static u32 port;
Andy Kingd021c342013-02-06 14:23:56 +0000620 struct sockaddr_vm new_addr;
621
Lepton Wu8236b082018-12-11 11:12:55 -0800622 if (!port)
623 port = LAST_RESERVED_PORT + 1 +
624 prandom_u32_max(U32_MAX - LAST_RESERVED_PORT);
625
Andy Kingd021c342013-02-06 14:23:56 +0000626 vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
627
628 if (addr->svm_port == VMADDR_PORT_ANY) {
629 bool found = false;
630 unsigned int i;
631
632 for (i = 0; i < MAX_PORT_RETRIES; i++) {
633 if (port <= LAST_RESERVED_PORT)
634 port = LAST_RESERVED_PORT + 1;
635
636 new_addr.svm_port = port++;
637
638 if (!__vsock_find_bound_socket(&new_addr)) {
639 found = true;
640 break;
641 }
642 }
643
644 if (!found)
645 return -EADDRNOTAVAIL;
646 } else {
647 /* If port is in reserved range, ensure caller
648 * has necessary privileges.
649 */
650 if (addr->svm_port <= LAST_RESERVED_PORT &&
651 !capable(CAP_NET_BIND_SERVICE)) {
652 return -EACCES;
653 }
654
655 if (__vsock_find_bound_socket(&new_addr))
656 return -EADDRINUSE;
657 }
658
659 vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port);
660
Arseny Krasnov8cb48552021-06-11 14:11:18 +0300661 /* Remove connection oriented sockets from the unbound list and add them
662 * to the hash table for easy lookup by its address. The unbound list
663 * is simply an extra entry at the end of the hash table, a trick used
664 * by AF_UNIX.
Andy Kingd021c342013-02-06 14:23:56 +0000665 */
666 __vsock_remove_bound(vsk);
667 __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk);
668
669 return 0;
670}
671
672static int __vsock_bind_dgram(struct vsock_sock *vsk,
673 struct sockaddr_vm *addr)
674{
Stefano Garzarellafe502c42019-11-14 10:57:39 +0100675 return vsk->transport->dgram_bind(vsk, addr);
Andy Kingd021c342013-02-06 14:23:56 +0000676}
677
678static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
679{
680 struct vsock_sock *vsk = vsock_sk(sk);
Andy Kingd021c342013-02-06 14:23:56 +0000681 int retval;
682
683 /* First ensure this socket isn't already bound. */
684 if (vsock_addr_bound(&vsk->local_addr))
685 return -EINVAL;
686
687 /* Now bind to the provided address or select appropriate values if
688 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
689 * like AF_INET prevents binding to a non-local IP address (in most
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100690 * cases), we only allow binding to a local CID.
Andy Kingd021c342013-02-06 14:23:56 +0000691 */
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100692 if (addr->svm_cid != VMADDR_CID_ANY && !vsock_find_cid(addr->svm_cid))
Andy Kingd021c342013-02-06 14:23:56 +0000693 return -EADDRNOTAVAIL;
694
695 switch (sk->sk_socket->type) {
696 case SOCK_STREAM:
Arseny Krasnov0798e782021-06-11 14:11:04 +0300697 case SOCK_SEQPACKET:
Andy Kingd021c342013-02-06 14:23:56 +0000698 spin_lock_bh(&vsock_table_lock);
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300699 retval = __vsock_bind_connectible(vsk, addr);
Andy Kingd021c342013-02-06 14:23:56 +0000700 spin_unlock_bh(&vsock_table_lock);
701 break;
702
703 case SOCK_DGRAM:
704 retval = __vsock_bind_dgram(vsk, addr);
705 break;
706
707 default:
708 retval = -EINVAL;
709 break;
710 }
711
712 return retval;
713}
714
Cong Wang455f05e2018-08-06 11:06:02 -0700715static void vsock_connect_timeout(struct work_struct *work);
716
Stefano Garzarellab9ca2f52019-11-14 10:57:43 +0100717static struct sock *__vsock_create(struct net *net,
718 struct socket *sock,
719 struct sock *parent,
720 gfp_t priority,
721 unsigned short type,
722 int kern)
Andy Kingd021c342013-02-06 14:23:56 +0000723{
724 struct sock *sk;
725 struct vsock_sock *psk;
726 struct vsock_sock *vsk;
727
Eric W. Biederman11aa9c22015-05-08 21:09:13 -0500728 sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern);
Andy Kingd021c342013-02-06 14:23:56 +0000729 if (!sk)
730 return NULL;
731
732 sock_init_data(sock, sk);
733
734 /* sk->sk_type is normally set in sock_init_data, but only if sock is
735 * non-NULL. We make sure that our sockets always have a type by
736 * setting it here if needed.
737 */
738 if (!sock)
739 sk->sk_type = type;
740
741 vsk = vsock_sk(sk);
742 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
743 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
744
745 sk->sk_destruct = vsock_sk_destruct;
746 sk->sk_backlog_rcv = vsock_queue_rcv_skb;
Andy Kingd021c342013-02-06 14:23:56 +0000747 sock_reset_flag(sk, SOCK_DONE);
748
749 INIT_LIST_HEAD(&vsk->bound_table);
750 INIT_LIST_HEAD(&vsk->connected_table);
751 vsk->listener = NULL;
752 INIT_LIST_HEAD(&vsk->pending_links);
753 INIT_LIST_HEAD(&vsk->accept_queue);
754 vsk->rejected = false;
755 vsk->sent_request = false;
756 vsk->ignore_connecting_rst = false;
757 vsk->peer_shutdown = 0;
Cong Wang455f05e2018-08-06 11:06:02 -0700758 INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
759 INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
Andy Kingd021c342013-02-06 14:23:56 +0000760
761 psk = parent ? vsock_sk(parent) : NULL;
762 if (parent) {
763 vsk->trusted = psk->trusted;
764 vsk->owner = get_cred(psk->owner);
765 vsk->connect_timeout = psk->connect_timeout;
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +0100766 vsk->buffer_size = psk->buffer_size;
767 vsk->buffer_min_size = psk->buffer_min_size;
768 vsk->buffer_max_size = psk->buffer_max_size;
David Brazdil1f935e82021-03-19 13:05:41 +0000769 security_sk_clone(parent, sk);
Andy Kingd021c342013-02-06 14:23:56 +0000770 } else {
Jeff Vander Stoepaf545bb2020-10-23 16:37:57 +0200771 vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
Andy Kingd021c342013-02-06 14:23:56 +0000772 vsk->owner = get_current_cred();
773 vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +0100774 vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE;
775 vsk->buffer_min_size = VSOCK_DEFAULT_BUFFER_MIN_SIZE;
776 vsk->buffer_max_size = VSOCK_DEFAULT_BUFFER_MAX_SIZE;
Andy Kingd021c342013-02-06 14:23:56 +0000777 }
778
Andy Kingd021c342013-02-06 14:23:56 +0000779 return sk;
780}
Andy Kingd021c342013-02-06 14:23:56 +0000781
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300782static bool sock_type_connectible(u16 type)
783{
Arseny Krasnov0798e782021-06-11 14:11:04 +0300784 return (type == SOCK_STREAM) || (type == SOCK_SEQPACKET);
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300785}
786
Dexuan Cui0d9138f2019-09-30 18:43:50 +0000787static void __vsock_release(struct sock *sk, int level)
Andy Kingd021c342013-02-06 14:23:56 +0000788{
789 if (sk) {
Andy Kingd021c342013-02-06 14:23:56 +0000790 struct sock *pending;
791 struct vsock_sock *vsk;
792
793 vsk = vsock_sk(sk);
794 pending = NULL; /* Compiler warning. */
795
Dexuan Cui0d9138f2019-09-30 18:43:50 +0000796 /* When "level" is SINGLE_DEPTH_NESTING, use the nested
797 * version to avoid the warning "possible recursive locking
798 * detected". When "level" is 0, lock_sock_nested(sk, level)
799 * is the same as lock_sock(sk).
800 */
801 lock_sock_nested(sk, level);
Stefano Garzarella3f749572020-02-26 11:58:18 +0100802
803 if (vsk->transport)
804 vsk->transport->release(vsk);
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300805 else if (sock_type_connectible(sk->sk_type))
Stefano Garzarella3f749572020-02-26 11:58:18 +0100806 vsock_remove_sock(vsk);
807
Andy Kingd021c342013-02-06 14:23:56 +0000808 sock_orphan(sk);
809 sk->sk_shutdown = SHUTDOWN_MASK;
810
Christophe JAILLET3b7ad082019-11-03 07:11:11 +0100811 skb_queue_purge(&sk->sk_receive_queue);
Andy Kingd021c342013-02-06 14:23:56 +0000812
813 /* Clean up any sockets that never were accepted. */
814 while ((pending = vsock_dequeue_accept(sk)) != NULL) {
Dexuan Cui0d9138f2019-09-30 18:43:50 +0000815 __vsock_release(pending, SINGLE_DEPTH_NESTING);
Andy Kingd021c342013-02-06 14:23:56 +0000816 sock_put(pending);
817 }
818
819 release_sock(sk);
820 sock_put(sk);
821 }
822}
823
824static void vsock_sk_destruct(struct sock *sk)
825{
826 struct vsock_sock *vsk = vsock_sk(sk);
827
Stefano Garzarella6a2c09622019-11-14 10:57:48 +0100828 vsock_deassign_transport(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000829
830 /* When clearing these addresses, there's no need to set the family and
831 * possibly register the address family with the kernel.
832 */
833 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
834 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
835
836 put_cred(vsk->owner);
837}
838
839static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
840{
841 int err;
842
843 err = sock_queue_rcv_skb(sk, skb);
844 if (err)
845 kfree_skb(skb);
846
847 return err;
848}
849
Stefano Garzarellab9ca2f52019-11-14 10:57:43 +0100850struct sock *vsock_create_connected(struct sock *parent)
851{
852 return __vsock_create(sock_net(parent), NULL, parent, GFP_KERNEL,
853 parent->sk_type, 0);
854}
855EXPORT_SYMBOL_GPL(vsock_create_connected);
856
Andy Kingd021c342013-02-06 14:23:56 +0000857s64 vsock_stream_has_data(struct vsock_sock *vsk)
858{
Stefano Garzarellafe502c42019-11-14 10:57:39 +0100859 return vsk->transport->stream_has_data(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000860}
861EXPORT_SYMBOL_GPL(vsock_stream_has_data);
862
Stefano Garzarellacc971412021-06-18 15:35:24 +0200863static s64 vsock_connectible_has_data(struct vsock_sock *vsk)
Arseny Krasnov0798e782021-06-11 14:11:04 +0300864{
865 struct sock *sk = sk_vsock(vsk);
866
867 if (sk->sk_type == SOCK_SEQPACKET)
868 return vsk->transport->seqpacket_has_data(vsk);
869 else
870 return vsock_stream_has_data(vsk);
871}
872
Andy Kingd021c342013-02-06 14:23:56 +0000873s64 vsock_stream_has_space(struct vsock_sock *vsk)
874{
Stefano Garzarellafe502c42019-11-14 10:57:39 +0100875 return vsk->transport->stream_has_space(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000876}
877EXPORT_SYMBOL_GPL(vsock_stream_has_space);
878
879static int vsock_release(struct socket *sock)
880{
Dexuan Cui0d9138f2019-09-30 18:43:50 +0000881 __vsock_release(sock->sk, 0);
Andy Kingd021c342013-02-06 14:23:56 +0000882 sock->sk = NULL;
883 sock->state = SS_FREE;
884
885 return 0;
886}
887
888static int
889vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
890{
891 int err;
892 struct sock *sk;
893 struct sockaddr_vm *vm_addr;
894
895 sk = sock->sk;
896
897 if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0)
898 return -EINVAL;
899
900 lock_sock(sk);
901 err = __vsock_bind(sk, vm_addr);
902 release_sock(sk);
903
904 return err;
905}
906
907static int vsock_getname(struct socket *sock,
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100908 struct sockaddr *addr, int peer)
Andy Kingd021c342013-02-06 14:23:56 +0000909{
910 int err;
911 struct sock *sk;
912 struct vsock_sock *vsk;
913 struct sockaddr_vm *vm_addr;
914
915 sk = sock->sk;
916 vsk = vsock_sk(sk);
917 err = 0;
918
919 lock_sock(sk);
920
921 if (peer) {
922 if (sock->state != SS_CONNECTED) {
923 err = -ENOTCONN;
924 goto out;
925 }
926 vm_addr = &vsk->remote_addr;
927 } else {
928 vm_addr = &vsk->local_addr;
929 }
930
931 if (!vm_addr) {
932 err = -EINVAL;
933 goto out;
934 }
935
936 /* sys_getsockname() and sys_getpeername() pass us a
937 * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately
938 * that macro is defined in socket.c instead of .h, so we hardcode its
939 * value here.
940 */
941 BUILD_BUG_ON(sizeof(*vm_addr) > 128);
942 memcpy(addr, vm_addr, sizeof(*vm_addr));
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100943 err = sizeof(*vm_addr);
Andy Kingd021c342013-02-06 14:23:56 +0000944
945out:
946 release_sock(sk);
947 return err;
948}
949
950static int vsock_shutdown(struct socket *sock, int mode)
951{
952 int err;
953 struct sock *sk;
954
955 /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
956 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
957 * here like the other address families do. Note also that the
958 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
959 * which is what we want.
960 */
961 mode++;
962
963 if ((mode & ~SHUTDOWN_MASK) || !mode)
964 return -EINVAL;
965
Arseny Krasnov8cb48552021-06-11 14:11:18 +0300966 /* If this is a connection oriented socket and it is not connected then
967 * bail out immediately. If it is a DGRAM socket then we must first
968 * kick the socket so that it wakes up from any sleeping calls, for
969 * example recv(), and then afterwards return the error.
Andy Kingd021c342013-02-06 14:23:56 +0000970 */
971
972 sk = sock->sk;
Stefano Garzarella1c5fae92021-02-09 09:52:19 +0100973
974 lock_sock(sk);
Andy Kingd021c342013-02-06 14:23:56 +0000975 if (sock->state == SS_UNCONNECTED) {
976 err = -ENOTCONN;
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300977 if (sock_type_connectible(sk->sk_type))
Stefano Garzarella1c5fae92021-02-09 09:52:19 +0100978 goto out;
Andy Kingd021c342013-02-06 14:23:56 +0000979 } else {
980 sock->state = SS_DISCONNECTING;
981 err = 0;
982 }
983
984 /* Receive and send shutdowns are treated alike. */
985 mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
986 if (mode) {
Andy Kingd021c342013-02-06 14:23:56 +0000987 sk->sk_shutdown |= mode;
988 sk->sk_state_change(sk);
Andy Kingd021c342013-02-06 14:23:56 +0000989
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300990 if (sock_type_connectible(sk->sk_type)) {
Andy Kingd021c342013-02-06 14:23:56 +0000991 sock_reset_flag(sk, SOCK_DONE);
992 vsock_send_shutdown(sk, mode);
993 }
994 }
995
Stefano Garzarella1c5fae92021-02-09 09:52:19 +0100996out:
997 release_sock(sk);
Andy Kingd021c342013-02-06 14:23:56 +0000998 return err;
999}
1000
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001001static __poll_t vsock_poll(struct file *file, struct socket *sock,
1002 poll_table *wait)
Andy Kingd021c342013-02-06 14:23:56 +00001003{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001004 struct sock *sk;
1005 __poll_t mask;
1006 struct vsock_sock *vsk;
1007
1008 sk = sock->sk;
1009 vsk = vsock_sk(sk);
1010
1011 poll_wait(file, sk_sleep(sk), wait);
1012 mask = 0;
Andy Kingd021c342013-02-06 14:23:56 +00001013
1014 if (sk->sk_err)
1015 /* Signify that there has been an error on this socket. */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001016 mask |= EPOLLERR;
Andy Kingd021c342013-02-06 14:23:56 +00001017
1018 /* INET sockets treat local write shutdown and peer write shutdown as a
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001019 * case of EPOLLHUP set.
Andy Kingd021c342013-02-06 14:23:56 +00001020 */
1021 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
1022 ((sk->sk_shutdown & SEND_SHUTDOWN) &&
1023 (vsk->peer_shutdown & SEND_SHUTDOWN))) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001024 mask |= EPOLLHUP;
Andy Kingd021c342013-02-06 14:23:56 +00001025 }
1026
1027 if (sk->sk_shutdown & RCV_SHUTDOWN ||
1028 vsk->peer_shutdown & SEND_SHUTDOWN) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001029 mask |= EPOLLRDHUP;
Andy Kingd021c342013-02-06 14:23:56 +00001030 }
1031
1032 if (sock->type == SOCK_DGRAM) {
1033 /* For datagram sockets we can read if there is something in
1034 * the queue and write as long as the socket isn't shutdown for
1035 * sending.
1036 */
Eric Dumazet3ef7cf52019-10-23 22:44:50 -07001037 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
Andy Kingd021c342013-02-06 14:23:56 +00001038 (sk->sk_shutdown & RCV_SHUTDOWN)) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001039 mask |= EPOLLIN | EPOLLRDNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001040 }
1041
1042 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001043 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
Andy Kingd021c342013-02-06 14:23:56 +00001044
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001045 } else if (sock_type_connectible(sk->sk_type)) {
Alexander Popovc518ada2021-02-01 11:47:19 +03001046 const struct vsock_transport *transport;
1047
Andy Kingd021c342013-02-06 14:23:56 +00001048 lock_sock(sk);
1049
Alexander Popovc518ada2021-02-01 11:47:19 +03001050 transport = vsk->transport;
1051
Andy Kingd021c342013-02-06 14:23:56 +00001052 /* Listening sockets that have connections in their accept
1053 * queue can be read.
1054 */
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001055 if (sk->sk_state == TCP_LISTEN
Andy Kingd021c342013-02-06 14:23:56 +00001056 && !vsock_is_accept_queue_empty(sk))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001057 mask |= EPOLLIN | EPOLLRDNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001058
1059 /* If there is something in the queue then we can read. */
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01001060 if (transport && transport->stream_is_active(vsk) &&
Andy Kingd021c342013-02-06 14:23:56 +00001061 !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1062 bool data_ready_now = false;
1063 int ret = transport->notify_poll_in(
1064 vsk, 1, &data_ready_now);
1065 if (ret < 0) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001066 mask |= EPOLLERR;
Andy Kingd021c342013-02-06 14:23:56 +00001067 } else {
1068 if (data_ready_now)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001069 mask |= EPOLLIN | EPOLLRDNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001070
1071 }
1072 }
1073
1074 /* Sockets whose connections have been closed, reset, or
1075 * terminated should also be considered read, and we check the
1076 * shutdown flag for that.
1077 */
1078 if (sk->sk_shutdown & RCV_SHUTDOWN ||
1079 vsk->peer_shutdown & SEND_SHUTDOWN) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001080 mask |= EPOLLIN | EPOLLRDNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001081 }
1082
1083 /* Connected sockets that can produce data can be written. */
Stefano Garzarella1980c052020-08-12 14:56:02 +02001084 if (transport && sk->sk_state == TCP_ESTABLISHED) {
Andy Kingd021c342013-02-06 14:23:56 +00001085 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1086 bool space_avail_now = false;
1087 int ret = transport->notify_poll_out(
1088 vsk, 1, &space_avail_now);
1089 if (ret < 0) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001090 mask |= EPOLLERR;
Andy Kingd021c342013-02-06 14:23:56 +00001091 } else {
1092 if (space_avail_now)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001093 /* Remove EPOLLWRBAND since INET
Andy Kingd021c342013-02-06 14:23:56 +00001094 * sockets are not setting it.
1095 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001096 mask |= EPOLLOUT | EPOLLWRNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001097
1098 }
1099 }
1100 }
1101
1102 /* Simulate INET socket poll behaviors, which sets
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001103 * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read,
Andy Kingd021c342013-02-06 14:23:56 +00001104 * but local send is not shutdown.
1105 */
Stefan Hajnocziba3169f2018-01-26 11:48:25 +00001106 if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) {
Andy Kingd021c342013-02-06 14:23:56 +00001107 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001108 mask |= EPOLLOUT | EPOLLWRNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001109
1110 }
1111
1112 release_sock(sk);
1113 }
1114
1115 return mask;
1116}
1117
Ying Xue1b784142015-03-02 15:37:48 +08001118static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1119 size_t len)
Andy Kingd021c342013-02-06 14:23:56 +00001120{
1121 int err;
1122 struct sock *sk;
1123 struct vsock_sock *vsk;
1124 struct sockaddr_vm *remote_addr;
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001125 const struct vsock_transport *transport;
Andy Kingd021c342013-02-06 14:23:56 +00001126
1127 if (msg->msg_flags & MSG_OOB)
1128 return -EOPNOTSUPP;
1129
1130 /* For now, MSG_DONTWAIT is always assumed... */
1131 err = 0;
1132 sk = sock->sk;
1133 vsk = vsock_sk(sk);
1134
1135 lock_sock(sk);
1136
Alexander Popovc518ada2021-02-01 11:47:19 +03001137 transport = vsk->transport;
1138
Asias Heb3a6dfe2013-06-20 17:20:30 +08001139 err = vsock_auto_bind(vsk);
1140 if (err)
1141 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001142
Andy Kingd021c342013-02-06 14:23:56 +00001143
1144 /* If the provided message contains an address, use that. Otherwise
1145 * fall back on the socket's remote handle (if it has been connected).
1146 */
1147 if (msg->msg_name &&
1148 vsock_addr_cast(msg->msg_name, msg->msg_namelen,
1149 &remote_addr) == 0) {
1150 /* Ensure this address is of the right type and is a valid
1151 * destination.
1152 */
1153
1154 if (remote_addr->svm_cid == VMADDR_CID_ANY)
1155 remote_addr->svm_cid = transport->get_local_cid();
1156
1157 if (!vsock_addr_bound(remote_addr)) {
1158 err = -EINVAL;
1159 goto out;
1160 }
1161 } else if (sock->state == SS_CONNECTED) {
1162 remote_addr = &vsk->remote_addr;
1163
1164 if (remote_addr->svm_cid == VMADDR_CID_ANY)
1165 remote_addr->svm_cid = transport->get_local_cid();
1166
1167 /* XXX Should connect() or this function ensure remote_addr is
1168 * bound?
1169 */
1170 if (!vsock_addr_bound(&vsk->remote_addr)) {
1171 err = -EINVAL;
1172 goto out;
1173 }
1174 } else {
1175 err = -EINVAL;
1176 goto out;
1177 }
1178
1179 if (!transport->dgram_allow(remote_addr->svm_cid,
1180 remote_addr->svm_port)) {
1181 err = -EINVAL;
1182 goto out;
1183 }
1184
Al Viro0f7db232014-11-20 04:05:34 -05001185 err = transport->dgram_enqueue(vsk, remote_addr, msg, len);
Andy Kingd021c342013-02-06 14:23:56 +00001186
1187out:
1188 release_sock(sk);
1189 return err;
1190}
1191
1192static int vsock_dgram_connect(struct socket *sock,
1193 struct sockaddr *addr, int addr_len, int flags)
1194{
1195 int err;
1196 struct sock *sk;
1197 struct vsock_sock *vsk;
1198 struct sockaddr_vm *remote_addr;
1199
1200 sk = sock->sk;
1201 vsk = vsock_sk(sk);
1202
1203 err = vsock_addr_cast(addr, addr_len, &remote_addr);
1204 if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) {
1205 lock_sock(sk);
1206 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY,
1207 VMADDR_PORT_ANY);
1208 sock->state = SS_UNCONNECTED;
1209 release_sock(sk);
1210 return 0;
1211 } else if (err != 0)
1212 return -EINVAL;
1213
1214 lock_sock(sk);
1215
Asias Heb3a6dfe2013-06-20 17:20:30 +08001216 err = vsock_auto_bind(vsk);
1217 if (err)
1218 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001219
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001220 if (!vsk->transport->dgram_allow(remote_addr->svm_cid,
1221 remote_addr->svm_port)) {
Andy Kingd021c342013-02-06 14:23:56 +00001222 err = -EINVAL;
1223 goto out;
1224 }
1225
1226 memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr));
1227 sock->state = SS_CONNECTED;
1228
1229out:
1230 release_sock(sk);
1231 return err;
1232}
1233
Ying Xue1b784142015-03-02 15:37:48 +08001234static int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1235 size_t len, int flags)
Andy Kingd021c342013-02-06 14:23:56 +00001236{
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001237 struct vsock_sock *vsk = vsock_sk(sock->sk);
1238
1239 return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
Andy Kingd021c342013-02-06 14:23:56 +00001240}
1241
1242static const struct proto_ops vsock_dgram_ops = {
1243 .family = PF_VSOCK,
1244 .owner = THIS_MODULE,
1245 .release = vsock_release,
1246 .bind = vsock_bind,
1247 .connect = vsock_dgram_connect,
1248 .socketpair = sock_no_socketpair,
1249 .accept = sock_no_accept,
1250 .getname = vsock_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001251 .poll = vsock_poll,
Andy Kingd021c342013-02-06 14:23:56 +00001252 .ioctl = sock_no_ioctl,
1253 .listen = sock_no_listen,
1254 .shutdown = vsock_shutdown,
Andy Kingd021c342013-02-06 14:23:56 +00001255 .sendmsg = vsock_dgram_sendmsg,
1256 .recvmsg = vsock_dgram_recvmsg,
1257 .mmap = sock_no_mmap,
1258 .sendpage = sock_no_sendpage,
1259};
1260
Peng Tao380feae2017-03-15 09:32:17 +08001261static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
1262{
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001263 const struct vsock_transport *transport = vsk->transport;
1264
Norbert Slusarek5d1cbcc2021-02-05 13:12:06 +01001265 if (!transport || !transport->cancel_pkt)
Peng Tao380feae2017-03-15 09:32:17 +08001266 return -EOPNOTSUPP;
1267
1268 return transport->cancel_pkt(vsk);
1269}
1270
Andy Kingd021c342013-02-06 14:23:56 +00001271static void vsock_connect_timeout(struct work_struct *work)
1272{
1273 struct sock *sk;
1274 struct vsock_sock *vsk;
1275
Cong Wang455f05e2018-08-06 11:06:02 -07001276 vsk = container_of(work, struct vsock_sock, connect_work.work);
Andy Kingd021c342013-02-06 14:23:56 +00001277 sk = sk_vsock(vsk);
1278
1279 lock_sock(sk);
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001280 if (sk->sk_state == TCP_SYN_SENT &&
Andy Kingd021c342013-02-06 14:23:56 +00001281 (sk->sk_shutdown != SHUTDOWN_MASK)) {
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001282 sk->sk_state = TCP_CLOSE;
Andy Kingd021c342013-02-06 14:23:56 +00001283 sk->sk_err = ETIMEDOUT;
Alexander Aringe3ae2362021-06-27 18:48:21 -04001284 sk_error_report(sk);
Norbert Slusarek3d0bc442021-02-05 13:14:05 +01001285 vsock_transport_cancel_pkt(vsk);
Andy Kingd021c342013-02-06 14:23:56 +00001286 }
1287 release_sock(sk);
1288
1289 sock_put(sk);
1290}
1291
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001292static int vsock_connect(struct socket *sock, struct sockaddr *addr,
1293 int addr_len, int flags)
Andy Kingd021c342013-02-06 14:23:56 +00001294{
1295 int err;
1296 struct sock *sk;
1297 struct vsock_sock *vsk;
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001298 const struct vsock_transport *transport;
Andy Kingd021c342013-02-06 14:23:56 +00001299 struct sockaddr_vm *remote_addr;
1300 long timeout;
1301 DEFINE_WAIT(wait);
1302
1303 err = 0;
1304 sk = sock->sk;
1305 vsk = vsock_sk(sk);
1306
1307 lock_sock(sk);
1308
1309 /* XXX AF_UNSPEC should make us disconnect like AF_INET. */
1310 switch (sock->state) {
1311 case SS_CONNECTED:
1312 err = -EISCONN;
1313 goto out;
1314 case SS_DISCONNECTING:
1315 err = -EINVAL;
1316 goto out;
1317 case SS_CONNECTING:
1318 /* This continues on so we can move sock into the SS_CONNECTED
1319 * state once the connection has completed (at which point err
1320 * will be set to zero also). Otherwise, we will either wait
1321 * for the connection or return -EALREADY should this be a
1322 * non-blocking call.
1323 */
1324 err = -EALREADY;
1325 break;
1326 default:
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001327 if ((sk->sk_state == TCP_LISTEN) ||
Andy Kingd021c342013-02-06 14:23:56 +00001328 vsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
1329 err = -EINVAL;
1330 goto out;
1331 }
1332
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01001333 /* Set the remote address that we are connecting to. */
1334 memcpy(&vsk->remote_addr, remote_addr,
1335 sizeof(vsk->remote_addr));
1336
1337 err = vsock_assign_transport(vsk, NULL);
1338 if (err)
1339 goto out;
1340
1341 transport = vsk->transport;
1342
Andy Kingd021c342013-02-06 14:23:56 +00001343 /* The hypervisor and well-known contexts do not have socket
1344 * endpoints.
1345 */
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01001346 if (!transport ||
1347 !transport->stream_allow(remote_addr->svm_cid,
Andy Kingd021c342013-02-06 14:23:56 +00001348 remote_addr->svm_port)) {
1349 err = -ENETUNREACH;
1350 goto out;
1351 }
1352
Asias Heb3a6dfe2013-06-20 17:20:30 +08001353 err = vsock_auto_bind(vsk);
1354 if (err)
1355 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001356
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001357 sk->sk_state = TCP_SYN_SENT;
Andy Kingd021c342013-02-06 14:23:56 +00001358
1359 err = transport->connect(vsk);
1360 if (err < 0)
1361 goto out;
1362
1363 /* Mark sock as connecting and set the error code to in
1364 * progress in case this is a non-blocking connect.
1365 */
1366 sock->state = SS_CONNECTING;
1367 err = -EINPROGRESS;
1368 }
1369
1370 /* The receive path will handle all communication until we are able to
1371 * enter the connected state. Here we wait for the connection to be
1372 * completed or a notification of an error.
1373 */
1374 timeout = vsk->connect_timeout;
1375 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1376
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001377 while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) {
Andy Kingd021c342013-02-06 14:23:56 +00001378 if (flags & O_NONBLOCK) {
1379 /* If we're not going to block, we schedule a timeout
1380 * function to generate a timeout on the connection
1381 * attempt, in case the peer doesn't respond in a
1382 * timely manner. We hold on to the socket until the
1383 * timeout fires.
1384 */
1385 sock_hold(sk);
Cong Wang455f05e2018-08-06 11:06:02 -07001386 schedule_delayed_work(&vsk->connect_work, timeout);
Andy Kingd021c342013-02-06 14:23:56 +00001387
1388 /* Skip ahead to preserve error code set above. */
1389 goto out_wait;
1390 }
1391
1392 release_sock(sk);
1393 timeout = schedule_timeout(timeout);
1394 lock_sock(sk);
1395
1396 if (signal_pending(current)) {
1397 err = sock_intr_errno(timeout);
Longpeng(Mike)c7ff9cf2021-06-21 14:26:01 +08001398 sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001399 sock->state = SS_UNCONNECTED;
Peng Tao380feae2017-03-15 09:32:17 +08001400 vsock_transport_cancel_pkt(vsk);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001401 goto out_wait;
Andy Kingd021c342013-02-06 14:23:56 +00001402 } else if (timeout == 0) {
1403 err = -ETIMEDOUT;
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001404 sk->sk_state = TCP_CLOSE;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001405 sock->state = SS_UNCONNECTED;
Peng Tao380feae2017-03-15 09:32:17 +08001406 vsock_transport_cancel_pkt(vsk);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001407 goto out_wait;
Andy Kingd021c342013-02-06 14:23:56 +00001408 }
1409
1410 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1411 }
1412
1413 if (sk->sk_err) {
1414 err = -sk->sk_err;
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001415 sk->sk_state = TCP_CLOSE;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001416 sock->state = SS_UNCONNECTED;
1417 } else {
Andy Kingd021c342013-02-06 14:23:56 +00001418 err = 0;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001419 }
Andy Kingd021c342013-02-06 14:23:56 +00001420
1421out_wait:
1422 finish_wait(sk_sleep(sk), &wait);
1423out:
1424 release_sock(sk);
1425 return err;
Andy Kingd021c342013-02-06 14:23:56 +00001426}
1427
David Howellscdfbabf2017-03-09 08:09:05 +00001428static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
1429 bool kern)
Andy Kingd021c342013-02-06 14:23:56 +00001430{
1431 struct sock *listener;
1432 int err;
1433 struct sock *connected;
1434 struct vsock_sock *vconnected;
1435 long timeout;
1436 DEFINE_WAIT(wait);
1437
1438 err = 0;
1439 listener = sock->sk;
1440
1441 lock_sock(listener);
1442
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001443 if (!sock_type_connectible(sock->type)) {
Andy Kingd021c342013-02-06 14:23:56 +00001444 err = -EOPNOTSUPP;
1445 goto out;
1446 }
1447
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001448 if (listener->sk_state != TCP_LISTEN) {
Andy Kingd021c342013-02-06 14:23:56 +00001449 err = -EINVAL;
1450 goto out;
1451 }
1452
1453 /* Wait for children sockets to appear; these are the new sockets
1454 * created upon connection establishment.
1455 */
Stefano Garzarella7e0afbd2020-05-27 09:56:55 +02001456 timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK);
Andy Kingd021c342013-02-06 14:23:56 +00001457 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1458
1459 while ((connected = vsock_dequeue_accept(listener)) == NULL &&
1460 listener->sk_err == 0) {
1461 release_sock(listener);
1462 timeout = schedule_timeout(timeout);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001463 finish_wait(sk_sleep(listener), &wait);
Andy Kingd021c342013-02-06 14:23:56 +00001464 lock_sock(listener);
1465
1466 if (signal_pending(current)) {
1467 err = sock_intr_errno(timeout);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001468 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001469 } else if (timeout == 0) {
1470 err = -EAGAIN;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001471 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001472 }
1473
1474 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1475 }
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001476 finish_wait(sk_sleep(listener), &wait);
Andy Kingd021c342013-02-06 14:23:56 +00001477
1478 if (listener->sk_err)
1479 err = -listener->sk_err;
1480
1481 if (connected) {
Eric Dumazet7976a112019-11-05 14:11:52 -08001482 sk_acceptq_removed(listener);
Andy Kingd021c342013-02-06 14:23:56 +00001483
Stefan Hajnoczi4192f672016-06-23 16:28:58 +01001484 lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
Andy Kingd021c342013-02-06 14:23:56 +00001485 vconnected = vsock_sk(connected);
1486
1487 /* If the listener socket has received an error, then we should
1488 * reject this socket and return. Note that we simply mark the
1489 * socket rejected, drop our reference, and let the cleanup
1490 * function handle the cleanup; the fact that we found it in
1491 * the listener's accept queue guarantees that the cleanup
1492 * function hasn't run yet.
1493 */
1494 if (err) {
1495 vconnected->rejected = true;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001496 } else {
1497 newsock->state = SS_CONNECTED;
1498 sock_graft(connected, newsock);
Andy Kingd021c342013-02-06 14:23:56 +00001499 }
1500
Andy Kingd021c342013-02-06 14:23:56 +00001501 release_sock(connected);
1502 sock_put(connected);
1503 }
1504
Andy Kingd021c342013-02-06 14:23:56 +00001505out:
1506 release_sock(listener);
1507 return err;
1508}
1509
1510static int vsock_listen(struct socket *sock, int backlog)
1511{
1512 int err;
1513 struct sock *sk;
1514 struct vsock_sock *vsk;
1515
1516 sk = sock->sk;
1517
1518 lock_sock(sk);
1519
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001520 if (!sock_type_connectible(sk->sk_type)) {
Andy Kingd021c342013-02-06 14:23:56 +00001521 err = -EOPNOTSUPP;
1522 goto out;
1523 }
1524
1525 if (sock->state != SS_UNCONNECTED) {
1526 err = -EINVAL;
1527 goto out;
1528 }
1529
1530 vsk = vsock_sk(sk);
1531
1532 if (!vsock_addr_bound(&vsk->local_addr)) {
1533 err = -EINVAL;
1534 goto out;
1535 }
1536
1537 sk->sk_max_ack_backlog = backlog;
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001538 sk->sk_state = TCP_LISTEN;
Andy Kingd021c342013-02-06 14:23:56 +00001539
1540 err = 0;
1541
1542out:
1543 release_sock(sk);
1544 return err;
1545}
1546
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +01001547static void vsock_update_buffer_size(struct vsock_sock *vsk,
1548 const struct vsock_transport *transport,
1549 u64 val)
1550{
1551 if (val > vsk->buffer_max_size)
1552 val = vsk->buffer_max_size;
1553
1554 if (val < vsk->buffer_min_size)
1555 val = vsk->buffer_min_size;
1556
1557 if (val != vsk->buffer_size &&
1558 transport && transport->notify_buffer_size)
1559 transport->notify_buffer_size(vsk, &val);
1560
1561 vsk->buffer_size = val;
1562}
1563
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001564static int vsock_connectible_setsockopt(struct socket *sock,
1565 int level,
1566 int optname,
1567 sockptr_t optval,
1568 unsigned int optlen)
Andy Kingd021c342013-02-06 14:23:56 +00001569{
1570 int err;
1571 struct sock *sk;
1572 struct vsock_sock *vsk;
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001573 const struct vsock_transport *transport;
Andy Kingd021c342013-02-06 14:23:56 +00001574 u64 val;
1575
1576 if (level != AF_VSOCK)
1577 return -ENOPROTOOPT;
1578
1579#define COPY_IN(_v) \
1580 do { \
1581 if (optlen < sizeof(_v)) { \
1582 err = -EINVAL; \
1583 goto exit; \
1584 } \
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02001585 if (copy_from_sockptr(&_v, optval, sizeof(_v)) != 0) { \
Andy Kingd021c342013-02-06 14:23:56 +00001586 err = -EFAULT; \
1587 goto exit; \
1588 } \
1589 } while (0)
1590
1591 err = 0;
1592 sk = sock->sk;
1593 vsk = vsock_sk(sk);
1594
1595 lock_sock(sk);
1596
Alexander Popovc518ada2021-02-01 11:47:19 +03001597 transport = vsk->transport;
1598
Andy Kingd021c342013-02-06 14:23:56 +00001599 switch (optname) {
1600 case SO_VM_SOCKETS_BUFFER_SIZE:
1601 COPY_IN(val);
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +01001602 vsock_update_buffer_size(vsk, transport, val);
Andy Kingd021c342013-02-06 14:23:56 +00001603 break;
1604
1605 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1606 COPY_IN(val);
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +01001607 vsk->buffer_max_size = val;
1608 vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
Andy Kingd021c342013-02-06 14:23:56 +00001609 break;
1610
1611 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1612 COPY_IN(val);
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +01001613 vsk->buffer_min_size = val;
1614 vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
Andy Kingd021c342013-02-06 14:23:56 +00001615 break;
1616
Richard Palethorpe4c1e34c2021-10-08 11:00:53 +01001617 case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW:
1618 case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD: {
1619 struct __kernel_sock_timeval tv;
1620
1621 err = sock_copy_user_timeval(&tv, optval, optlen,
1622 optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD);
1623 if (err)
1624 break;
Andy Kingd021c342013-02-06 14:23:56 +00001625 if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC &&
1626 tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) {
1627 vsk->connect_timeout = tv.tv_sec * HZ +
Richard Palethorpe4c1e34c2021-10-08 11:00:53 +01001628 DIV_ROUND_UP((unsigned long)tv.tv_usec, (USEC_PER_SEC / HZ));
Andy Kingd021c342013-02-06 14:23:56 +00001629 if (vsk->connect_timeout == 0)
1630 vsk->connect_timeout =
1631 VSOCK_DEFAULT_CONNECT_TIMEOUT;
1632
1633 } else {
1634 err = -ERANGE;
1635 }
1636 break;
1637 }
1638
1639 default:
1640 err = -ENOPROTOOPT;
1641 break;
1642 }
1643
1644#undef COPY_IN
1645
1646exit:
1647 release_sock(sk);
1648 return err;
1649}
1650
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001651static int vsock_connectible_getsockopt(struct socket *sock,
1652 int level, int optname,
1653 char __user *optval,
1654 int __user *optlen)
Andy Kingd021c342013-02-06 14:23:56 +00001655{
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001656 struct sock *sk = sock->sk;
1657 struct vsock_sock *vsk = vsock_sk(sk);
1658
1659 union {
1660 u64 val64;
Richard Palethorpe4c1e34c2021-10-08 11:00:53 +01001661 struct old_timeval32 tm32;
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001662 struct __kernel_old_timeval tm;
Richard Palethorpe4c1e34c2021-10-08 11:00:53 +01001663 struct __kernel_sock_timeval stm;
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001664 } v;
1665
1666 int lv = sizeof(v.val64);
Andy Kingd021c342013-02-06 14:23:56 +00001667 int len;
Andy Kingd021c342013-02-06 14:23:56 +00001668
1669 if (level != AF_VSOCK)
1670 return -ENOPROTOOPT;
1671
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001672 if (get_user(len, optlen))
1673 return -EFAULT;
Andy Kingd021c342013-02-06 14:23:56 +00001674
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001675 memset(&v, 0, sizeof(v));
Andy Kingd021c342013-02-06 14:23:56 +00001676
1677 switch (optname) {
1678 case SO_VM_SOCKETS_BUFFER_SIZE:
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001679 v.val64 = vsk->buffer_size;
Andy Kingd021c342013-02-06 14:23:56 +00001680 break;
1681
1682 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001683 v.val64 = vsk->buffer_max_size;
Andy Kingd021c342013-02-06 14:23:56 +00001684 break;
1685
1686 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001687 v.val64 = vsk->buffer_min_size;
Andy Kingd021c342013-02-06 14:23:56 +00001688 break;
1689
Richard Palethorpe4c1e34c2021-10-08 11:00:53 +01001690 case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW:
1691 case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD:
1692 lv = sock_get_timeout(vsk->connect_timeout, &v,
1693 optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD);
Andy Kingd021c342013-02-06 14:23:56 +00001694 break;
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001695
Andy Kingd021c342013-02-06 14:23:56 +00001696 default:
1697 return -ENOPROTOOPT;
1698 }
1699
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001700 if (len < lv)
1701 return -EINVAL;
1702 if (len > lv)
1703 len = lv;
1704 if (copy_to_user(optval, &v, len))
Andy Kingd021c342013-02-06 14:23:56 +00001705 return -EFAULT;
1706
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001707 if (put_user(len, optlen))
1708 return -EFAULT;
Andy Kingd021c342013-02-06 14:23:56 +00001709
1710 return 0;
1711}
1712
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001713static int vsock_connectible_sendmsg(struct socket *sock, struct msghdr *msg,
1714 size_t len)
Andy Kingd021c342013-02-06 14:23:56 +00001715{
1716 struct sock *sk;
1717 struct vsock_sock *vsk;
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001718 const struct vsock_transport *transport;
Andy Kingd021c342013-02-06 14:23:56 +00001719 ssize_t total_written;
1720 long timeout;
1721 int err;
1722 struct vsock_transport_send_notify_data send_data;
WANG Cong499fde62017-05-19 11:21:59 -07001723 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Andy Kingd021c342013-02-06 14:23:56 +00001724
1725 sk = sock->sk;
1726 vsk = vsock_sk(sk);
1727 total_written = 0;
1728 err = 0;
1729
1730 if (msg->msg_flags & MSG_OOB)
1731 return -EOPNOTSUPP;
1732
1733 lock_sock(sk);
1734
Alexander Popovc518ada2021-02-01 11:47:19 +03001735 transport = vsk->transport;
1736
Arseny Krasnov8cb48552021-06-11 14:11:18 +03001737 /* Callers should not provide a destination with connection oriented
1738 * sockets.
1739 */
Andy Kingd021c342013-02-06 14:23:56 +00001740 if (msg->msg_namelen) {
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001741 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
Andy Kingd021c342013-02-06 14:23:56 +00001742 goto out;
1743 }
1744
1745 /* Send data only if both sides are not shutdown in the direction. */
1746 if (sk->sk_shutdown & SEND_SHUTDOWN ||
1747 vsk->peer_shutdown & RCV_SHUTDOWN) {
1748 err = -EPIPE;
1749 goto out;
1750 }
1751
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01001752 if (!transport || sk->sk_state != TCP_ESTABLISHED ||
Andy Kingd021c342013-02-06 14:23:56 +00001753 !vsock_addr_bound(&vsk->local_addr)) {
1754 err = -ENOTCONN;
1755 goto out;
1756 }
1757
1758 if (!vsock_addr_bound(&vsk->remote_addr)) {
1759 err = -EDESTADDRREQ;
1760 goto out;
1761 }
1762
1763 /* Wait for room in the produce queue to enqueue our user's data. */
1764 timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1765
1766 err = transport->notify_send_init(vsk, &send_data);
1767 if (err < 0)
1768 goto out;
1769
Andy Kingd021c342013-02-06 14:23:56 +00001770 while (total_written < len) {
1771 ssize_t written;
1772
WANG Cong499fde62017-05-19 11:21:59 -07001773 add_wait_queue(sk_sleep(sk), &wait);
Andy Kingd021c342013-02-06 14:23:56 +00001774 while (vsock_stream_has_space(vsk) == 0 &&
1775 sk->sk_err == 0 &&
1776 !(sk->sk_shutdown & SEND_SHUTDOWN) &&
1777 !(vsk->peer_shutdown & RCV_SHUTDOWN)) {
1778
1779 /* Don't wait for non-blocking sockets. */
1780 if (timeout == 0) {
1781 err = -EAGAIN;
WANG Cong499fde62017-05-19 11:21:59 -07001782 remove_wait_queue(sk_sleep(sk), &wait);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001783 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001784 }
1785
1786 err = transport->notify_send_pre_block(vsk, &send_data);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001787 if (err < 0) {
WANG Cong499fde62017-05-19 11:21:59 -07001788 remove_wait_queue(sk_sleep(sk), &wait);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001789 goto out_err;
1790 }
Andy Kingd021c342013-02-06 14:23:56 +00001791
1792 release_sock(sk);
WANG Cong499fde62017-05-19 11:21:59 -07001793 timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
Andy Kingd021c342013-02-06 14:23:56 +00001794 lock_sock(sk);
1795 if (signal_pending(current)) {
1796 err = sock_intr_errno(timeout);
WANG Cong499fde62017-05-19 11:21:59 -07001797 remove_wait_queue(sk_sleep(sk), &wait);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001798 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001799 } else if (timeout == 0) {
1800 err = -EAGAIN;
WANG Cong499fde62017-05-19 11:21:59 -07001801 remove_wait_queue(sk_sleep(sk), &wait);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001802 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001803 }
Andy Kingd021c342013-02-06 14:23:56 +00001804 }
WANG Cong499fde62017-05-19 11:21:59 -07001805 remove_wait_queue(sk_sleep(sk), &wait);
Andy Kingd021c342013-02-06 14:23:56 +00001806
1807 /* These checks occur both as part of and after the loop
1808 * conditional since we need to check before and after
1809 * sleeping.
1810 */
1811 if (sk->sk_err) {
1812 err = -sk->sk_err;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001813 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001814 } else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
1815 (vsk->peer_shutdown & RCV_SHUTDOWN)) {
1816 err = -EPIPE;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001817 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001818 }
1819
1820 err = transport->notify_send_pre_enqueue(vsk, &send_data);
1821 if (err < 0)
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001822 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001823
1824 /* Note that enqueue will only write as many bytes as are free
1825 * in the produce queue, so we don't need to ensure len is
1826 * smaller than the queue size. It is the caller's
1827 * responsibility to check how many bytes we were able to send.
1828 */
1829
Arseny Krasnovfbe70c42021-06-11 14:10:49 +03001830 if (sk->sk_type == SOCK_SEQPACKET) {
1831 written = transport->seqpacket_enqueue(vsk,
1832 msg, len - total_written);
1833 } else {
1834 written = transport->stream_enqueue(vsk,
1835 msg, len - total_written);
1836 }
Andy Kingd021c342013-02-06 14:23:56 +00001837 if (written < 0) {
1838 err = -ENOMEM;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001839 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001840 }
1841
1842 total_written += written;
1843
1844 err = transport->notify_send_post_enqueue(
1845 vsk, written, &send_data);
1846 if (err < 0)
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001847 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001848
1849 }
1850
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001851out_err:
Arseny Krasnovfbe70c42021-06-11 14:10:49 +03001852 if (total_written > 0) {
1853 /* Return number of written bytes only if:
1854 * 1) SOCK_STREAM socket.
1855 * 2) SOCK_SEQPACKET socket when whole buffer is sent.
1856 */
1857 if (sk->sk_type == SOCK_STREAM || total_written == len)
1858 err = total_written;
1859 }
Andy Kingd021c342013-02-06 14:23:56 +00001860out:
1861 release_sock(sk);
1862 return err;
1863}
1864
Stefano Garzarella0de5b2e2021-06-18 15:35:25 +02001865static int vsock_connectible_wait_data(struct sock *sk,
1866 struct wait_queue_entry *wait,
1867 long timeout,
1868 struct vsock_transport_recv_notify_data *recv_data,
1869 size_t target)
Arseny Krasnovb3f7fd52021-06-11 14:10:07 +03001870{
1871 const struct vsock_transport *transport;
1872 struct vsock_sock *vsk;
1873 s64 data;
1874 int err;
1875
1876 vsk = vsock_sk(sk);
1877 err = 0;
1878 transport = vsk->transport;
1879
Stefano Garzarellacc971412021-06-18 15:35:24 +02001880 while ((data = vsock_connectible_has_data(vsk)) == 0) {
Arseny Krasnovb3f7fd52021-06-11 14:10:07 +03001881 prepare_to_wait(sk_sleep(sk), wait, TASK_INTERRUPTIBLE);
1882
1883 if (sk->sk_err != 0 ||
1884 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1885 (vsk->peer_shutdown & SEND_SHUTDOWN)) {
1886 break;
1887 }
1888
1889 /* Don't wait for non-blocking sockets. */
1890 if (timeout == 0) {
1891 err = -EAGAIN;
1892 break;
1893 }
1894
1895 if (recv_data) {
1896 err = transport->notify_recv_pre_block(vsk, target, recv_data);
1897 if (err < 0)
1898 break;
1899 }
1900
1901 release_sock(sk);
1902 timeout = schedule_timeout(timeout);
1903 lock_sock(sk);
1904
1905 if (signal_pending(current)) {
1906 err = sock_intr_errno(timeout);
1907 break;
1908 } else if (timeout == 0) {
1909 err = -EAGAIN;
1910 break;
1911 }
1912 }
1913
1914 finish_wait(sk_sleep(sk), wait);
1915
1916 if (err)
1917 return err;
1918
1919 /* Internal transport error when checking for available
1920 * data. XXX This should be changed to a connection
1921 * reset in a later change.
1922 */
1923 if (data < 0)
1924 return -ENOMEM;
1925
1926 return data;
1927}
1928
Arseny Krasnov19c1b902021-06-11 14:10:21 +03001929static int __vsock_stream_recvmsg(struct sock *sk, struct msghdr *msg,
1930 size_t len, int flags)
Andy Kingd021c342013-02-06 14:23:56 +00001931{
Andy Kingd021c342013-02-06 14:23:56 +00001932 struct vsock_transport_recv_notify_data recv_data;
Arseny Krasnov19c1b902021-06-11 14:10:21 +03001933 const struct vsock_transport *transport;
1934 struct vsock_sock *vsk;
1935 ssize_t copied;
1936 size_t target;
1937 long timeout;
1938 int err;
Andy Kingd021c342013-02-06 14:23:56 +00001939
1940 DEFINE_WAIT(wait);
1941
Andy Kingd021c342013-02-06 14:23:56 +00001942 vsk = vsock_sk(sk);
Alexander Popovc518ada2021-02-01 11:47:19 +03001943 transport = vsk->transport;
1944
Andy Kingd021c342013-02-06 14:23:56 +00001945 /* We must not copy less than target bytes into the user's buffer
1946 * before returning successfully, so we wait for the consume queue to
1947 * have that much data to consume before dequeueing. Note that this
1948 * makes it impossible to handle cases where target is greater than the
1949 * queue size.
1950 */
1951 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1952 if (target >= transport->stream_rcvhiwat(vsk)) {
1953 err = -ENOMEM;
1954 goto out;
1955 }
1956 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1957 copied = 0;
1958
1959 err = transport->notify_recv_init(vsk, target, &recv_data);
1960 if (err < 0)
1961 goto out;
1962
Andy Kingd021c342013-02-06 14:23:56 +00001963
1964 while (1) {
Arseny Krasnovb3f7fd52021-06-11 14:10:07 +03001965 ssize_t read;
Andy Kingd021c342013-02-06 14:23:56 +00001966
Stefano Garzarella0de5b2e2021-06-18 15:35:25 +02001967 err = vsock_connectible_wait_data(sk, &wait, timeout,
1968 &recv_data, target);
Arseny Krasnovb3f7fd52021-06-11 14:10:07 +03001969 if (err <= 0)
1970 break;
Andy Kingd021c342013-02-06 14:23:56 +00001971
Arseny Krasnovb3f7fd52021-06-11 14:10:07 +03001972 err = transport->notify_recv_pre_dequeue(vsk, target,
1973 &recv_data);
1974 if (err < 0)
1975 break;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001976
Arseny Krasnovb3f7fd52021-06-11 14:10:07 +03001977 read = transport->stream_dequeue(vsk, msg, len - copied, flags);
1978 if (read < 0) {
1979 err = -ENOMEM;
1980 break;
Andy Kingd021c342013-02-06 14:23:56 +00001981 }
Arseny Krasnovb3f7fd52021-06-11 14:10:07 +03001982
1983 copied += read;
1984
1985 err = transport->notify_recv_post_dequeue(vsk, target, read,
1986 !(flags & MSG_PEEK), &recv_data);
1987 if (err < 0)
1988 goto out;
1989
1990 if (read >= target || flags & MSG_PEEK)
1991 break;
1992
1993 target -= read;
Andy Kingd021c342013-02-06 14:23:56 +00001994 }
1995
1996 if (sk->sk_err)
1997 err = -sk->sk_err;
1998 else if (sk->sk_shutdown & RCV_SHUTDOWN)
1999 err = 0;
2000
Ian Campbelldedc58e2016-05-04 14:21:53 +01002001 if (copied > 0)
Andy Kingd021c342013-02-06 14:23:56 +00002002 err = copied;
Andy Kingd021c342013-02-06 14:23:56 +00002003
Andy Kingd021c342013-02-06 14:23:56 +00002004out:
Arseny Krasnov19c1b902021-06-11 14:10:21 +03002005 return err;
2006}
2007
Arseny Krasnov9942c192021-06-11 14:10:34 +03002008static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg,
2009 size_t len, int flags)
2010{
2011 const struct vsock_transport *transport;
2012 struct vsock_sock *vsk;
Arseny Krasnov8fc92b72021-09-03 15:33:03 +03002013 ssize_t msg_len;
Arseny Krasnov9942c192021-06-11 14:10:34 +03002014 long timeout;
2015 int err = 0;
2016 DEFINE_WAIT(wait);
2017
2018 vsk = vsock_sk(sk);
2019 transport = vsk->transport;
2020
2021 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2022
Stefano Garzarella0de5b2e2021-06-18 15:35:25 +02002023 err = vsock_connectible_wait_data(sk, &wait, timeout, NULL, 0);
Arseny Krasnov9942c192021-06-11 14:10:34 +03002024 if (err <= 0)
2025 goto out;
2026
Arseny Krasnov8fc92b72021-09-03 15:33:03 +03002027 msg_len = transport->seqpacket_dequeue(vsk, msg, flags);
Arseny Krasnov9942c192021-06-11 14:10:34 +03002028
Arseny Krasnov8fc92b72021-09-03 15:33:03 +03002029 if (msg_len < 0) {
Arseny Krasnov9942c192021-06-11 14:10:34 +03002030 err = -ENOMEM;
2031 goto out;
2032 }
2033
2034 if (sk->sk_err) {
2035 err = -sk->sk_err;
2036 } else if (sk->sk_shutdown & RCV_SHUTDOWN) {
2037 err = 0;
2038 } else {
2039 /* User sets MSG_TRUNC, so return real length of
2040 * packet.
2041 */
2042 if (flags & MSG_TRUNC)
Arseny Krasnov8fc92b72021-09-03 15:33:03 +03002043 err = msg_len;
Arseny Krasnov9942c192021-06-11 14:10:34 +03002044 else
2045 err = len - msg_data_left(msg);
2046
2047 /* Always set MSG_TRUNC if real length of packet is
2048 * bigger than user's buffer.
2049 */
Arseny Krasnov8fc92b72021-09-03 15:33:03 +03002050 if (msg_len > len)
Arseny Krasnov9942c192021-06-11 14:10:34 +03002051 msg->msg_flags |= MSG_TRUNC;
2052 }
2053
2054out:
2055 return err;
2056}
2057
Arseny Krasnov19c1b902021-06-11 14:10:21 +03002058static int
2059vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2060 int flags)
2061{
2062 struct sock *sk;
2063 struct vsock_sock *vsk;
2064 const struct vsock_transport *transport;
2065 int err;
2066
2067 DEFINE_WAIT(wait);
2068
2069 sk = sock->sk;
2070 vsk = vsock_sk(sk);
2071 err = 0;
2072
2073 lock_sock(sk);
2074
2075 transport = vsk->transport;
2076
2077 if (!transport || sk->sk_state != TCP_ESTABLISHED) {
2078 /* Recvmsg is supposed to return 0 if a peer performs an
2079 * orderly shutdown. Differentiate between that case and when a
2080 * peer has not connected or a local shutdown occurred with the
2081 * SOCK_DONE flag.
2082 */
2083 if (sock_flag(sk, SOCK_DONE))
2084 err = 0;
2085 else
2086 err = -ENOTCONN;
2087
2088 goto out;
2089 }
2090
2091 if (flags & MSG_OOB) {
2092 err = -EOPNOTSUPP;
2093 goto out;
2094 }
2095
2096 /* We don't check peer_shutdown flag here since peer may actually shut
2097 * down, but there can be data in the queue that a local socket can
2098 * receive.
2099 */
2100 if (sk->sk_shutdown & RCV_SHUTDOWN) {
2101 err = 0;
2102 goto out;
2103 }
2104
2105 /* It is valid on Linux to pass in a zero-length receive buffer. This
2106 * is not an error. We may as well bail out now.
2107 */
2108 if (!len) {
2109 err = 0;
2110 goto out;
2111 }
2112
Arseny Krasnov9942c192021-06-11 14:10:34 +03002113 if (sk->sk_type == SOCK_STREAM)
2114 err = __vsock_stream_recvmsg(sk, msg, len, flags);
2115 else
2116 err = __vsock_seqpacket_recvmsg(sk, msg, len, flags);
Arseny Krasnov19c1b902021-06-11 14:10:21 +03002117
2118out:
Andy Kingd021c342013-02-06 14:23:56 +00002119 release_sock(sk);
2120 return err;
2121}
2122
2123static const struct proto_ops vsock_stream_ops = {
2124 .family = PF_VSOCK,
2125 .owner = THIS_MODULE,
2126 .release = vsock_release,
2127 .bind = vsock_bind,
Arseny Krasnova9e29e52021-06-11 14:09:47 +03002128 .connect = vsock_connect,
Andy Kingd021c342013-02-06 14:23:56 +00002129 .socketpair = sock_no_socketpair,
2130 .accept = vsock_accept,
2131 .getname = vsock_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07002132 .poll = vsock_poll,
Andy Kingd021c342013-02-06 14:23:56 +00002133 .ioctl = sock_no_ioctl,
2134 .listen = vsock_listen,
2135 .shutdown = vsock_shutdown,
Arseny Krasnova9e29e52021-06-11 14:09:47 +03002136 .setsockopt = vsock_connectible_setsockopt,
2137 .getsockopt = vsock_connectible_getsockopt,
2138 .sendmsg = vsock_connectible_sendmsg,
2139 .recvmsg = vsock_connectible_recvmsg,
Andy Kingd021c342013-02-06 14:23:56 +00002140 .mmap = sock_no_mmap,
2141 .sendpage = sock_no_sendpage,
2142};
2143
Arseny Krasnov0798e782021-06-11 14:11:04 +03002144static const struct proto_ops vsock_seqpacket_ops = {
2145 .family = PF_VSOCK,
2146 .owner = THIS_MODULE,
2147 .release = vsock_release,
2148 .bind = vsock_bind,
2149 .connect = vsock_connect,
2150 .socketpair = sock_no_socketpair,
2151 .accept = vsock_accept,
2152 .getname = vsock_getname,
2153 .poll = vsock_poll,
2154 .ioctl = sock_no_ioctl,
2155 .listen = vsock_listen,
2156 .shutdown = vsock_shutdown,
2157 .setsockopt = vsock_connectible_setsockopt,
2158 .getsockopt = vsock_connectible_getsockopt,
2159 .sendmsg = vsock_connectible_sendmsg,
2160 .recvmsg = vsock_connectible_recvmsg,
2161 .mmap = sock_no_mmap,
2162 .sendpage = sock_no_sendpage,
2163};
2164
Andy Kingd021c342013-02-06 14:23:56 +00002165static int vsock_create(struct net *net, struct socket *sock,
2166 int protocol, int kern)
2167{
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002168 struct vsock_sock *vsk;
Stefano Garzarella55f3e142019-11-14 10:57:44 +01002169 struct sock *sk;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002170 int ret;
Stefano Garzarella55f3e142019-11-14 10:57:44 +01002171
Andy Kingd021c342013-02-06 14:23:56 +00002172 if (!sock)
2173 return -EINVAL;
2174
Andy King6cf1c5f2013-02-18 06:04:13 +00002175 if (protocol && protocol != PF_VSOCK)
Andy Kingd021c342013-02-06 14:23:56 +00002176 return -EPROTONOSUPPORT;
2177
2178 switch (sock->type) {
2179 case SOCK_DGRAM:
2180 sock->ops = &vsock_dgram_ops;
2181 break;
2182 case SOCK_STREAM:
2183 sock->ops = &vsock_stream_ops;
2184 break;
Arseny Krasnov0798e782021-06-11 14:11:04 +03002185 case SOCK_SEQPACKET:
2186 sock->ops = &vsock_seqpacket_ops;
2187 break;
Andy Kingd021c342013-02-06 14:23:56 +00002188 default:
2189 return -ESOCKTNOSUPPORT;
2190 }
2191
2192 sock->state = SS_UNCONNECTED;
2193
Stefano Garzarella55f3e142019-11-14 10:57:44 +01002194 sk = __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern);
2195 if (!sk)
2196 return -ENOMEM;
2197
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002198 vsk = vsock_sk(sk);
2199
2200 if (sock->type == SOCK_DGRAM) {
2201 ret = vsock_assign_transport(vsk, NULL);
2202 if (ret < 0) {
2203 sock_put(sk);
2204 return ret;
2205 }
2206 }
2207
2208 vsock_insert_unbound(vsk);
Stefano Garzarella55f3e142019-11-14 10:57:44 +01002209
2210 return 0;
Andy Kingd021c342013-02-06 14:23:56 +00002211}
2212
2213static const struct net_proto_family vsock_family_ops = {
2214 .family = AF_VSOCK,
2215 .create = vsock_create,
2216 .owner = THIS_MODULE,
2217};
2218
2219static long vsock_dev_do_ioctl(struct file *filp,
2220 unsigned int cmd, void __user *ptr)
2221{
2222 u32 __user *p = ptr;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002223 u32 cid = VMADDR_CID_ANY;
Andy Kingd021c342013-02-06 14:23:56 +00002224 int retval = 0;
2225
2226 switch (cmd) {
2227 case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002228 /* To be compatible with the VMCI behavior, we prioritize the
2229 * guest CID instead of well-know host CID (VMADDR_CID_HOST).
2230 */
2231 if (transport_g2h)
2232 cid = transport_g2h->get_local_cid();
2233 else if (transport_h2g)
2234 cid = transport_h2g->get_local_cid();
2235
2236 if (put_user(cid, p) != 0)
Andy Kingd021c342013-02-06 14:23:56 +00002237 retval = -EFAULT;
2238 break;
2239
2240 default:
Colin Ian Kingc3e448c2020-10-27 09:09:42 +00002241 retval = -ENOIOCTLCMD;
Andy Kingd021c342013-02-06 14:23:56 +00002242 }
2243
2244 return retval;
2245}
2246
2247static long vsock_dev_ioctl(struct file *filp,
2248 unsigned int cmd, unsigned long arg)
2249{
2250 return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg);
2251}
2252
2253#ifdef CONFIG_COMPAT
2254static long vsock_dev_compat_ioctl(struct file *filp,
2255 unsigned int cmd, unsigned long arg)
2256{
2257 return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg));
2258}
2259#endif
2260
2261static const struct file_operations vsock_device_ops = {
2262 .owner = THIS_MODULE,
2263 .unlocked_ioctl = vsock_dev_ioctl,
2264#ifdef CONFIG_COMPAT
2265 .compat_ioctl = vsock_dev_compat_ioctl,
2266#endif
2267 .open = nonseekable_open,
2268};
2269
2270static struct miscdevice vsock_device = {
2271 .name = "vsock",
Andy Kingd021c342013-02-06 14:23:56 +00002272 .fops = &vsock_device_ops,
2273};
2274
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002275static int __init vsock_init(void)
Andy Kingd021c342013-02-06 14:23:56 +00002276{
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002277 int err = 0;
Andy King2c4a3362014-05-01 15:20:43 -07002278
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002279 vsock_init_tables();
Andy King2c4a3362014-05-01 15:20:43 -07002280
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002281 vsock_proto.owner = THIS_MODULE;
Asias He6ad0b2f2013-04-23 20:33:52 +00002282 vsock_device.minor = MISC_DYNAMIC_MINOR;
Andy Kingd021c342013-02-06 14:23:56 +00002283 err = misc_register(&vsock_device);
2284 if (err) {
2285 pr_err("Failed to register misc device\n");
Gao fengf6a835b2015-10-18 23:35:56 +08002286 goto err_reset_transport;
Andy Kingd021c342013-02-06 14:23:56 +00002287 }
2288
2289 err = proto_register(&vsock_proto, 1); /* we want our slab */
2290 if (err) {
2291 pr_err("Cannot register vsock protocol\n");
Gao fengf6a835b2015-10-18 23:35:56 +08002292 goto err_deregister_misc;
Andy Kingd021c342013-02-06 14:23:56 +00002293 }
2294
2295 err = sock_register(&vsock_family_ops);
2296 if (err) {
2297 pr_err("could not register af_vsock (%d) address family: %d\n",
2298 AF_VSOCK, err);
2299 goto err_unregister_proto;
2300 }
2301
2302 return 0;
2303
2304err_unregister_proto:
2305 proto_unregister(&vsock_proto);
Gao fengf6a835b2015-10-18 23:35:56 +08002306err_deregister_misc:
Andy Kingd021c342013-02-06 14:23:56 +00002307 misc_deregister(&vsock_device);
Gao fengf6a835b2015-10-18 23:35:56 +08002308err_reset_transport:
Andy Kingd021c342013-02-06 14:23:56 +00002309 return err;
2310}
Andy Kingd021c342013-02-06 14:23:56 +00002311
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002312static void __exit vsock_exit(void)
Andy Kingd021c342013-02-06 14:23:56 +00002313{
Andy Kingd021c342013-02-06 14:23:56 +00002314 misc_deregister(&vsock_device);
2315 sock_unregister(AF_VSOCK);
2316 proto_unregister(&vsock_proto);
Andy Kingd021c342013-02-06 14:23:56 +00002317}
Andy Kingd021c342013-02-06 14:23:56 +00002318
Stefano Garzarelladaabfbc2019-11-14 10:57:41 +01002319const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk)
Stefan Hajnoczi0b01aeb2016-07-28 15:36:30 +01002320{
Stefano Garzarelladaabfbc2019-11-14 10:57:41 +01002321 return vsk->transport;
Stefan Hajnoczi0b01aeb2016-07-28 15:36:30 +01002322}
2323EXPORT_SYMBOL_GPL(vsock_core_get_transport);
2324
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002325int vsock_core_register(const struct vsock_transport *t, int features)
Stefan Hajnoczi05e489b2018-04-17 14:25:58 +08002326{
Stefano Garzarella0e121902019-12-10 11:43:04 +01002327 const struct vsock_transport *t_h2g, *t_g2h, *t_dgram, *t_local;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002328 int err = mutex_lock_interruptible(&vsock_register_mutex);
Stefan Hajnoczi05e489b2018-04-17 14:25:58 +08002329
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002330 if (err)
2331 return err;
2332
2333 t_h2g = transport_h2g;
2334 t_g2h = transport_g2h;
2335 t_dgram = transport_dgram;
Stefano Garzarella0e121902019-12-10 11:43:04 +01002336 t_local = transport_local;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002337
2338 if (features & VSOCK_TRANSPORT_F_H2G) {
2339 if (t_h2g) {
2340 err = -EBUSY;
2341 goto err_busy;
2342 }
2343 t_h2g = t;
2344 }
2345
2346 if (features & VSOCK_TRANSPORT_F_G2H) {
2347 if (t_g2h) {
2348 err = -EBUSY;
2349 goto err_busy;
2350 }
2351 t_g2h = t;
2352 }
2353
2354 if (features & VSOCK_TRANSPORT_F_DGRAM) {
2355 if (t_dgram) {
2356 err = -EBUSY;
2357 goto err_busy;
2358 }
2359 t_dgram = t;
2360 }
2361
Stefano Garzarella0e121902019-12-10 11:43:04 +01002362 if (features & VSOCK_TRANSPORT_F_LOCAL) {
2363 if (t_local) {
2364 err = -EBUSY;
2365 goto err_busy;
2366 }
2367 t_local = t;
2368 }
2369
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002370 transport_h2g = t_h2g;
2371 transport_g2h = t_g2h;
2372 transport_dgram = t_dgram;
Stefano Garzarella0e121902019-12-10 11:43:04 +01002373 transport_local = t_local;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002374
2375err_busy:
2376 mutex_unlock(&vsock_register_mutex);
2377 return err;
2378}
2379EXPORT_SYMBOL_GPL(vsock_core_register);
2380
2381void vsock_core_unregister(const struct vsock_transport *t)
2382{
2383 mutex_lock(&vsock_register_mutex);
2384
2385 if (transport_h2g == t)
2386 transport_h2g = NULL;
2387
2388 if (transport_g2h == t)
2389 transport_g2h = NULL;
2390
2391 if (transport_dgram == t)
2392 transport_dgram = NULL;
2393
Stefano Garzarella0e121902019-12-10 11:43:04 +01002394 if (transport_local == t)
2395 transport_local = NULL;
2396
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002397 mutex_unlock(&vsock_register_mutex);
2398}
2399EXPORT_SYMBOL_GPL(vsock_core_unregister);
2400
2401module_init(vsock_init);
Stefan Hajnoczi05e489b2018-04-17 14:25:58 +08002402module_exit(vsock_exit);
Cong Wangc1eef222017-10-24 15:30:37 -07002403
Andy Kingd021c342013-02-06 14:23:56 +00002404MODULE_AUTHOR("VMware, Inc.");
2405MODULE_DESCRIPTION("VMware Virtual Socket Family");
Jorgen Hansen1190cfd2016-09-26 23:59:53 -07002406MODULE_VERSION("1.0.2.0-k");
Andy Kingd021c342013-02-06 14:23:56 +00002407MODULE_LICENSE("GPL v2");