blob: 3235261f138db3a583b5e842dbf7c12dafa5dae4 [file] [log] [blame]
Thomas Gleixner685a6bf2019-05-29 16:57:36 -07001// SPDX-License-Identifier: GPL-2.0-only
Andy Kingd021c342013-02-06 14:23:56 +00002/*
3 * VMware vSockets Driver
4 *
5 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
Andy Kingd021c342013-02-06 14:23:56 +00006 */
7
8/* Implementation notes:
9 *
10 * - There are two kinds of sockets: those created by user action (such as
11 * calling socket(2)) and those created by incoming connection request packets.
12 *
13 * - There are two "global" tables, one for bound sockets (sockets that have
14 * specified an address that they are responsible for) and one for connected
15 * sockets (sockets that have established a connection with another socket).
16 * These tables are "global" in that all sockets on the system are placed
17 * within them. - Note, though, that the bound table contains an extra entry
18 * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
19 * that list. The bound table is used solely for lookup of sockets when packets
20 * are received and that's not necessary for SOCK_DGRAM sockets since we create
21 * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM
22 * sockets out of the bound hash buckets will reduce the chance of collisions
23 * when looking for SOCK_STREAM sockets and prevents us from having to check the
24 * socket type in the hash table lookups.
25 *
26 * - Sockets created by user action will either be "client" sockets that
27 * initiate a connection or "server" sockets that listen for connections; we do
28 * not support simultaneous connects (two "client" sockets connecting).
29 *
30 * - "Server" sockets are referred to as listener sockets throughout this
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -040031 * implementation because they are in the TCP_LISTEN state. When a
Stefan Hajnocziea3803c2015-10-29 11:57:42 +000032 * connection request is received (the second kind of socket mentioned above),
33 * we create a new socket and refer to it as a pending socket. These pending
34 * sockets are placed on the pending connection list of the listener socket.
35 * When future packets are received for the address the listener socket is
36 * bound to, we check if the source of the packet is from one that has an
37 * existing pending connection. If it does, we process the packet for the
38 * pending socket. When that socket reaches the connected state, it is removed
39 * from the listener socket's pending list and enqueued in the listener
40 * socket's accept queue. Callers of accept(2) will accept connected sockets
41 * from the listener socket's accept queue. If the socket cannot be accepted
42 * for some reason then it is marked rejected. Once the connection is
43 * accepted, it is owned by the user process and the responsibility for cleanup
44 * falls with that user process.
Andy Kingd021c342013-02-06 14:23:56 +000045 *
46 * - It is possible that these pending sockets will never reach the connected
47 * state; in fact, we may never receive another packet after the connection
48 * request. Because of this, we must schedule a cleanup function to run in the
49 * future, after some amount of time passes where a connection should have been
50 * established. This function ensures that the socket is off all lists so it
51 * cannot be retrieved, then drops all references to the socket so it is cleaned
52 * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this
53 * function will also cleanup rejected sockets, those that reach the connected
54 * state but leave it before they have been accepted.
55 *
Stefan Hajnoczi4192f672016-06-23 16:28:58 +010056 * - Lock ordering for pending or accept queue sockets is:
57 *
58 * lock_sock(listener);
59 * lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
60 *
61 * Using explicit nested locking keeps lockdep happy since normally only one
62 * lock of a given class may be taken at a time.
63 *
Andy Kingd021c342013-02-06 14:23:56 +000064 * - Sockets created by user action will be cleaned up when the user process
65 * calls close(2), causing our release implementation to be called. Our release
66 * implementation will perform some cleanup then drop the last reference so our
67 * sk_destruct implementation is invoked. Our sk_destruct implementation will
68 * perform additional cleanup that's common for both types of sockets.
69 *
70 * - A socket's reference count is what ensures that the structure won't be
71 * freed. Each entry in a list (such as the "global" bound and connected tables
72 * and the listener socket's pending list and connected queue) ensures a
73 * reference. When we defer work until process context and pass a socket as our
74 * argument, we must ensure the reference count is increased to ensure the
75 * socket isn't freed before the function is run; the deferred function will
76 * then drop the reference.
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -040077 *
78 * - sk->sk_state uses the TCP state constants because they are widely used by
79 * other address families and exposed to userspace tools like ss(8):
80 *
81 * TCP_CLOSE - unconnected
82 * TCP_SYN_SENT - connecting
83 * TCP_ESTABLISHED - connected
84 * TCP_CLOSING - disconnecting
85 * TCP_LISTEN - listening
Andy Kingd021c342013-02-06 14:23:56 +000086 */
87
Jakub Kicinskib6459412021-12-28 16:49:13 -080088#include <linux/compat.h>
Andy Kingd021c342013-02-06 14:23:56 +000089#include <linux/types.h>
Andy Kingd021c342013-02-06 14:23:56 +000090#include <linux/bitops.h>
91#include <linux/cred.h>
92#include <linux/init.h>
93#include <linux/io.h>
94#include <linux/kernel.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010095#include <linux/sched/signal.h>
Andy Kingd021c342013-02-06 14:23:56 +000096#include <linux/kmod.h>
97#include <linux/list.h>
98#include <linux/miscdevice.h>
99#include <linux/module.h>
100#include <linux/mutex.h>
101#include <linux/net.h>
102#include <linux/poll.h>
Lepton Wu8236b082018-12-11 11:12:55 -0800103#include <linux/random.h>
Andy Kingd021c342013-02-06 14:23:56 +0000104#include <linux/skbuff.h>
105#include <linux/smp.h>
106#include <linux/socket.h>
107#include <linux/stddef.h>
108#include <linux/unistd.h>
109#include <linux/wait.h>
110#include <linux/workqueue.h>
111#include <net/sock.h>
Asias He82a54d02013-07-25 17:39:34 +0800112#include <net/af_vsock.h>
Andy Kingd021c342013-02-06 14:23:56 +0000113
114static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
115static void vsock_sk_destruct(struct sock *sk);
116static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
117
118/* Protocol family. */
119static struct proto vsock_proto = {
120 .name = "AF_VSOCK",
121 .owner = THIS_MODULE,
122 .obj_size = sizeof(struct vsock_sock),
123};
124
125/* The default peer timeout indicates how long we will wait for a peer response
126 * to a control message.
127 */
128#define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
129
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +0100130#define VSOCK_DEFAULT_BUFFER_SIZE (1024 * 256)
131#define VSOCK_DEFAULT_BUFFER_MAX_SIZE (1024 * 256)
132#define VSOCK_DEFAULT_BUFFER_MIN_SIZE 128
133
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100134/* Transport used for host->guest communication */
135static const struct vsock_transport *transport_h2g;
136/* Transport used for guest->host communication */
137static const struct vsock_transport *transport_g2h;
138/* Transport used for DGRAM communication */
139static const struct vsock_transport *transport_dgram;
Stefano Garzarella0e121902019-12-10 11:43:04 +0100140/* Transport used for local communication */
141static const struct vsock_transport *transport_local;
Andy Kingd021c342013-02-06 14:23:56 +0000142static DEFINE_MUTEX(vsock_register_mutex);
143
Andy Kingd021c342013-02-06 14:23:56 +0000144/**** UTILS ****/
145
146/* Each bound VSocket is stored in the bind hash table and each connected
147 * VSocket is stored in the connected hash table.
148 *
149 * Unbound sockets are all put on the same list attached to the end of the hash
150 * table (vsock_unbound_sockets). Bound sockets are added to the hash table in
151 * the bucket that their local address hashes to (vsock_bound_sockets(addr)
152 * represents the list that addr hashes to).
153 *
154 * Specifically, we initialize the vsock_bind_table array to a size of
155 * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
156 * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
157 * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function
Asias Hea49dd9d2013-06-20 17:20:33 +0800158 * mods with VSOCK_HASH_SIZE to ensure this.
Andy Kingd021c342013-02-06 14:23:56 +0000159 */
Andy Kingd021c342013-02-06 14:23:56 +0000160#define MAX_PORT_RETRIES 24
161
Asias Hea49dd9d2013-06-20 17:20:33 +0800162#define VSOCK_HASH(addr) ((addr)->svm_port % VSOCK_HASH_SIZE)
Andy Kingd021c342013-02-06 14:23:56 +0000163#define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
164#define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE])
165
166/* XXX This can probably be implemented in a better way. */
167#define VSOCK_CONN_HASH(src, dst) \
Asias Hea49dd9d2013-06-20 17:20:33 +0800168 (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE)
Andy Kingd021c342013-02-06 14:23:56 +0000169#define vsock_connected_sockets(src, dst) \
170 (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
171#define vsock_connected_sockets_vsk(vsk) \
172 vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
173
Stefan Hajnoczi44f20982017-10-05 16:46:50 -0400174struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
175EXPORT_SYMBOL_GPL(vsock_bind_table);
176struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
177EXPORT_SYMBOL_GPL(vsock_connected_table);
178DEFINE_SPINLOCK(vsock_table_lock);
179EXPORT_SYMBOL_GPL(vsock_table_lock);
Andy Kingd021c342013-02-06 14:23:56 +0000180
Asias Heb3a6dfe2013-06-20 17:20:30 +0800181/* Autobind this socket to the local address if necessary. */
182static int vsock_auto_bind(struct vsock_sock *vsk)
183{
184 struct sock *sk = sk_vsock(vsk);
185 struct sockaddr_vm local_addr;
186
187 if (vsock_addr_bound(&vsk->local_addr))
188 return 0;
189 vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
190 return __vsock_bind(sk, &local_addr);
191}
192
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100193static void vsock_init_tables(void)
Andy Kingd021c342013-02-06 14:23:56 +0000194{
195 int i;
196
197 for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++)
198 INIT_LIST_HEAD(&vsock_bind_table[i]);
199
200 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++)
201 INIT_LIST_HEAD(&vsock_connected_table[i]);
202}
203
204static void __vsock_insert_bound(struct list_head *list,
205 struct vsock_sock *vsk)
206{
207 sock_hold(&vsk->sk);
208 list_add(&vsk->bound_table, list);
209}
210
211static void __vsock_insert_connected(struct list_head *list,
212 struct vsock_sock *vsk)
213{
214 sock_hold(&vsk->sk);
215 list_add(&vsk->connected_table, list);
216}
217
218static void __vsock_remove_bound(struct vsock_sock *vsk)
219{
220 list_del_init(&vsk->bound_table);
221 sock_put(&vsk->sk);
222}
223
224static void __vsock_remove_connected(struct vsock_sock *vsk)
225{
226 list_del_init(&vsk->connected_table);
227 sock_put(&vsk->sk);
228}
229
230static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
231{
232 struct vsock_sock *vsk;
233
Stefano Garzarella36c5b482019-11-14 10:57:49 +0100234 list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) {
235 if (vsock_addr_equals_addr(addr, &vsk->local_addr))
Andy Kingd021c342013-02-06 14:23:56 +0000236 return sk_vsock(vsk);
237
Stefano Garzarella36c5b482019-11-14 10:57:49 +0100238 if (addr->svm_port == vsk->local_addr.svm_port &&
239 (vsk->local_addr.svm_cid == VMADDR_CID_ANY ||
240 addr->svm_cid == VMADDR_CID_ANY))
241 return sk_vsock(vsk);
242 }
243
Andy Kingd021c342013-02-06 14:23:56 +0000244 return NULL;
245}
246
247static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
248 struct sockaddr_vm *dst)
249{
250 struct vsock_sock *vsk;
251
252 list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
253 connected_table) {
Reilly Grant990454b2013-04-01 11:41:52 -0700254 if (vsock_addr_equals_addr(src, &vsk->remote_addr) &&
255 dst->svm_port == vsk->local_addr.svm_port) {
Andy Kingd021c342013-02-06 14:23:56 +0000256 return sk_vsock(vsk);
257 }
258 }
259
260 return NULL;
261}
262
Andy Kingd021c342013-02-06 14:23:56 +0000263static void vsock_insert_unbound(struct vsock_sock *vsk)
264{
265 spin_lock_bh(&vsock_table_lock);
266 __vsock_insert_bound(vsock_unbound_sockets, vsk);
267 spin_unlock_bh(&vsock_table_lock);
268}
269
270void vsock_insert_connected(struct vsock_sock *vsk)
271{
272 struct list_head *list = vsock_connected_sockets(
273 &vsk->remote_addr, &vsk->local_addr);
274
275 spin_lock_bh(&vsock_table_lock);
276 __vsock_insert_connected(list, vsk);
277 spin_unlock_bh(&vsock_table_lock);
278}
279EXPORT_SYMBOL_GPL(vsock_insert_connected);
280
281void vsock_remove_bound(struct vsock_sock *vsk)
282{
283 spin_lock_bh(&vsock_table_lock);
Sunil Muthuswamyd5afa822019-06-13 03:52:27 +0000284 if (__vsock_in_bound_table(vsk))
285 __vsock_remove_bound(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000286 spin_unlock_bh(&vsock_table_lock);
287}
288EXPORT_SYMBOL_GPL(vsock_remove_bound);
289
290void vsock_remove_connected(struct vsock_sock *vsk)
291{
292 spin_lock_bh(&vsock_table_lock);
Sunil Muthuswamyd5afa822019-06-13 03:52:27 +0000293 if (__vsock_in_connected_table(vsk))
294 __vsock_remove_connected(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000295 spin_unlock_bh(&vsock_table_lock);
296}
297EXPORT_SYMBOL_GPL(vsock_remove_connected);
298
299struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr)
300{
301 struct sock *sk;
302
303 spin_lock_bh(&vsock_table_lock);
304 sk = __vsock_find_bound_socket(addr);
305 if (sk)
306 sock_hold(sk);
307
308 spin_unlock_bh(&vsock_table_lock);
309
310 return sk;
311}
312EXPORT_SYMBOL_GPL(vsock_find_bound_socket);
313
314struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
315 struct sockaddr_vm *dst)
316{
317 struct sock *sk;
318
319 spin_lock_bh(&vsock_table_lock);
320 sk = __vsock_find_connected_socket(src, dst);
321 if (sk)
322 sock_hold(sk);
323
324 spin_unlock_bh(&vsock_table_lock);
325
326 return sk;
327}
328EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
329
Stefan Hajnoczi6773b7d2016-07-28 15:36:31 +0100330void vsock_remove_sock(struct vsock_sock *vsk)
331{
Sunil Muthuswamyd5afa822019-06-13 03:52:27 +0000332 vsock_remove_bound(vsk);
333 vsock_remove_connected(vsk);
Stefan Hajnoczi6773b7d2016-07-28 15:36:31 +0100334}
335EXPORT_SYMBOL_GPL(vsock_remove_sock);
336
Andy Kingd021c342013-02-06 14:23:56 +0000337void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
338{
339 int i;
340
341 spin_lock_bh(&vsock_table_lock);
342
343 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
344 struct vsock_sock *vsk;
345 list_for_each_entry(vsk, &vsock_connected_table[i],
Julia Lawalld9af2d62013-08-05 16:47:38 +0200346 connected_table)
Andy Kingd021c342013-02-06 14:23:56 +0000347 fn(sk_vsock(vsk));
348 }
349
350 spin_unlock_bh(&vsock_table_lock);
351}
352EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket);
353
354void vsock_add_pending(struct sock *listener, struct sock *pending)
355{
356 struct vsock_sock *vlistener;
357 struct vsock_sock *vpending;
358
359 vlistener = vsock_sk(listener);
360 vpending = vsock_sk(pending);
361
362 sock_hold(pending);
363 sock_hold(listener);
364 list_add_tail(&vpending->pending_links, &vlistener->pending_links);
365}
366EXPORT_SYMBOL_GPL(vsock_add_pending);
367
368void vsock_remove_pending(struct sock *listener, struct sock *pending)
369{
370 struct vsock_sock *vpending = vsock_sk(pending);
371
372 list_del_init(&vpending->pending_links);
373 sock_put(listener);
374 sock_put(pending);
375}
376EXPORT_SYMBOL_GPL(vsock_remove_pending);
377
378void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
379{
380 struct vsock_sock *vlistener;
381 struct vsock_sock *vconnected;
382
383 vlistener = vsock_sk(listener);
384 vconnected = vsock_sk(connected);
385
386 sock_hold(connected);
387 sock_hold(listener);
388 list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue);
389}
390EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
391
Stefano Garzarella408624af2019-12-10 11:43:06 +0100392static bool vsock_use_local_transport(unsigned int remote_cid)
393{
394 if (!transport_local)
395 return false;
396
397 if (remote_cid == VMADDR_CID_LOCAL)
398 return true;
399
400 if (transport_g2h) {
401 return remote_cid == transport_g2h->get_local_cid();
402 } else {
403 return remote_cid == VMADDR_CID_HOST;
404 }
405}
406
Stefano Garzarella6a2c09622019-11-14 10:57:48 +0100407static void vsock_deassign_transport(struct vsock_sock *vsk)
408{
409 if (!vsk->transport)
410 return;
411
412 vsk->transport->destruct(vsk);
413 module_put(vsk->transport->module);
414 vsk->transport = NULL;
415}
416
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100417/* Assign a transport to a socket and call the .init transport callback.
418 *
Arseny Krasnov8cb48552021-06-11 14:11:18 +0300419 * Note: for connection oriented socket this must be called when vsk->remote_addr
420 * is set (e.g. during the connect() or when a connection request on a listener
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100421 * socket is received).
422 * The vsk->remote_addr is used to decide which transport to use:
Stefano Garzarella408624af2019-12-10 11:43:06 +0100423 * - remote CID == VMADDR_CID_LOCAL or g2h->local_cid or VMADDR_CID_HOST if
424 * g2h is not loaded, will use local transport;
Andra Paraschiv7f816982020-12-14 18:11:22 +0200425 * - remote CID <= VMADDR_CID_HOST or h2g is not loaded or remote flags field
426 * includes VMADDR_FLAG_TO_HOST flag value, will use guest->host transport;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100427 * - remote CID > VMADDR_CID_HOST will use host->guest transport;
428 */
429int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
430{
431 const struct vsock_transport *new_transport;
432 struct sock *sk = sk_vsock(vsk);
433 unsigned int remote_cid = vsk->remote_addr.svm_cid;
Andra Paraschiv7f816982020-12-14 18:11:22 +0200434 __u8 remote_flags;
Stefano Garzarella039fccc2019-11-21 10:06:09 +0100435 int ret;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100436
Andra Paraschiv1b5f2ab2020-12-14 18:11:21 +0200437 /* If the packet is coming with the source and destination CIDs higher
438 * than VMADDR_CID_HOST, then a vsock channel where all the packets are
439 * forwarded to the host should be established. Then the host will
440 * need to forward the packets to the guest.
441 *
442 * The flag is set on the (listen) receive path (psk is not NULL). On
443 * the connect path the flag can be set by the user space application.
444 */
445 if (psk && vsk->local_addr.svm_cid > VMADDR_CID_HOST &&
446 vsk->remote_addr.svm_cid > VMADDR_CID_HOST)
447 vsk->remote_addr.svm_flags |= VMADDR_FLAG_TO_HOST;
448
Andra Paraschiv7f816982020-12-14 18:11:22 +0200449 remote_flags = vsk->remote_addr.svm_flags;
450
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100451 switch (sk->sk_type) {
452 case SOCK_DGRAM:
453 new_transport = transport_dgram;
454 break;
455 case SOCK_STREAM:
Arseny Krasnov0798e782021-06-11 14:11:04 +0300456 case SOCK_SEQPACKET:
Stefano Garzarella408624af2019-12-10 11:43:06 +0100457 if (vsock_use_local_transport(remote_cid))
458 new_transport = transport_local;
Andra Paraschiv7f816982020-12-14 18:11:22 +0200459 else if (remote_cid <= VMADDR_CID_HOST || !transport_h2g ||
460 (remote_flags & VMADDR_FLAG_TO_HOST))
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100461 new_transport = transport_g2h;
462 else
463 new_transport = transport_h2g;
464 break;
465 default:
466 return -ESOCKTNOSUPPORT;
467 }
468
469 if (vsk->transport) {
470 if (vsk->transport == new_transport)
471 return 0;
472
Stefano Garzarella3f749572020-02-26 11:58:18 +0100473 /* transport->release() must be called with sock lock acquired.
Arseny Krasnov8cb48552021-06-11 14:11:18 +0300474 * This path can only be taken during vsock_connect(), where we
475 * have already held the sock lock. In the other cases, this
476 * function is called on a new socket which is not assigned to
477 * any transport.
Stefano Garzarella3f749572020-02-26 11:58:18 +0100478 */
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100479 vsk->transport->release(vsk);
Stefano Garzarella6a2c09622019-11-14 10:57:48 +0100480 vsock_deassign_transport(vsk);
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100481 }
482
Stefano Garzarella6a2c09622019-11-14 10:57:48 +0100483 /* We increase the module refcnt to prevent the transport unloading
484 * while there are open sockets assigned to it.
485 */
486 if (!new_transport || !try_module_get(new_transport->module))
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100487 return -ENODEV;
488
Arseny Krasnov0798e782021-06-11 14:11:04 +0300489 if (sk->sk_type == SOCK_SEQPACKET) {
490 if (!new_transport->seqpacket_allow ||
491 !new_transport->seqpacket_allow(remote_cid)) {
492 module_put(new_transport->module);
493 return -ESOCKTNOSUPPORT;
494 }
495 }
496
Stefano Garzarella039fccc2019-11-21 10:06:09 +0100497 ret = new_transport->init(vsk, psk);
498 if (ret) {
499 module_put(new_transport->module);
500 return ret;
501 }
502
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100503 vsk->transport = new_transport;
504
Stefano Garzarella039fccc2019-11-21 10:06:09 +0100505 return 0;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100506}
507EXPORT_SYMBOL_GPL(vsock_assign_transport);
508
509bool vsock_find_cid(unsigned int cid)
510{
511 if (transport_g2h && cid == transport_g2h->get_local_cid())
512 return true;
513
514 if (transport_h2g && cid == VMADDR_CID_HOST)
515 return true;
516
Stefano Garzarella408624af2019-12-10 11:43:06 +0100517 if (transport_local && cid == VMADDR_CID_LOCAL)
518 return true;
519
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100520 return false;
521}
522EXPORT_SYMBOL_GPL(vsock_find_cid);
523
Andy Kingd021c342013-02-06 14:23:56 +0000524static struct sock *vsock_dequeue_accept(struct sock *listener)
525{
526 struct vsock_sock *vlistener;
527 struct vsock_sock *vconnected;
528
529 vlistener = vsock_sk(listener);
530
531 if (list_empty(&vlistener->accept_queue))
532 return NULL;
533
534 vconnected = list_entry(vlistener->accept_queue.next,
535 struct vsock_sock, accept_queue);
536
537 list_del_init(&vconnected->accept_queue);
538 sock_put(listener);
539 /* The caller will need a reference on the connected socket so we let
540 * it call sock_put().
541 */
542
543 return sk_vsock(vconnected);
544}
545
546static bool vsock_is_accept_queue_empty(struct sock *sk)
547{
548 struct vsock_sock *vsk = vsock_sk(sk);
549 return list_empty(&vsk->accept_queue);
550}
551
552static bool vsock_is_pending(struct sock *sk)
553{
554 struct vsock_sock *vsk = vsock_sk(sk);
555 return !list_empty(&vsk->pending_links);
556}
557
558static int vsock_send_shutdown(struct sock *sk, int mode)
559{
Stefano Garzarellafe502c42019-11-14 10:57:39 +0100560 struct vsock_sock *vsk = vsock_sk(sk);
561
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100562 if (!vsk->transport)
563 return -ENODEV;
564
Stefano Garzarellafe502c42019-11-14 10:57:39 +0100565 return vsk->transport->shutdown(vsk, mode);
Andy Kingd021c342013-02-06 14:23:56 +0000566}
567
Cong Wang455f05e2018-08-06 11:06:02 -0700568static void vsock_pending_work(struct work_struct *work)
Andy Kingd021c342013-02-06 14:23:56 +0000569{
570 struct sock *sk;
571 struct sock *listener;
572 struct vsock_sock *vsk;
573 bool cleanup;
574
Cong Wang455f05e2018-08-06 11:06:02 -0700575 vsk = container_of(work, struct vsock_sock, pending_work.work);
Andy Kingd021c342013-02-06 14:23:56 +0000576 sk = sk_vsock(vsk);
577 listener = vsk->listener;
578 cleanup = true;
579
580 lock_sock(listener);
Stefan Hajnoczi4192f672016-06-23 16:28:58 +0100581 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
Andy Kingd021c342013-02-06 14:23:56 +0000582
583 if (vsock_is_pending(sk)) {
584 vsock_remove_pending(listener, sk);
Jorgen Hansen1190cfd2016-09-26 23:59:53 -0700585
Eric Dumazet7976a112019-11-05 14:11:52 -0800586 sk_acceptq_removed(listener);
Andy Kingd021c342013-02-06 14:23:56 +0000587 } else if (!vsk->rejected) {
588 /* We are not on the pending list and accept() did not reject
589 * us, so we must have been accepted by our user process. We
590 * just need to drop our references to the sockets and be on
591 * our way.
592 */
593 cleanup = false;
594 goto out;
595 }
596
Andy Kingd021c342013-02-06 14:23:56 +0000597 /* We need to remove ourself from the global connected sockets list so
598 * incoming packets can't find this socket, and to reduce the reference
599 * count.
600 */
Sunil Muthuswamyd5afa822019-06-13 03:52:27 +0000601 vsock_remove_connected(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000602
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -0400603 sk->sk_state = TCP_CLOSE;
Andy Kingd021c342013-02-06 14:23:56 +0000604
605out:
606 release_sock(sk);
607 release_sock(listener);
608 if (cleanup)
609 sock_put(sk);
610
611 sock_put(sk);
612 sock_put(listener);
613}
Andy Kingd021c342013-02-06 14:23:56 +0000614
615/**** SOCKET OPERATIONS ****/
616
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300617static int __vsock_bind_connectible(struct vsock_sock *vsk,
618 struct sockaddr_vm *addr)
Andy Kingd021c342013-02-06 14:23:56 +0000619{
Lepton Wua22d3252019-01-09 15:45:41 -0800620 static u32 port;
Andy Kingd021c342013-02-06 14:23:56 +0000621 struct sockaddr_vm new_addr;
622
Lepton Wu8236b082018-12-11 11:12:55 -0800623 if (!port)
624 port = LAST_RESERVED_PORT + 1 +
625 prandom_u32_max(U32_MAX - LAST_RESERVED_PORT);
626
Andy Kingd021c342013-02-06 14:23:56 +0000627 vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
628
629 if (addr->svm_port == VMADDR_PORT_ANY) {
630 bool found = false;
631 unsigned int i;
632
633 for (i = 0; i < MAX_PORT_RETRIES; i++) {
634 if (port <= LAST_RESERVED_PORT)
635 port = LAST_RESERVED_PORT + 1;
636
637 new_addr.svm_port = port++;
638
639 if (!__vsock_find_bound_socket(&new_addr)) {
640 found = true;
641 break;
642 }
643 }
644
645 if (!found)
646 return -EADDRNOTAVAIL;
647 } else {
648 /* If port is in reserved range, ensure caller
649 * has necessary privileges.
650 */
651 if (addr->svm_port <= LAST_RESERVED_PORT &&
652 !capable(CAP_NET_BIND_SERVICE)) {
653 return -EACCES;
654 }
655
656 if (__vsock_find_bound_socket(&new_addr))
657 return -EADDRINUSE;
658 }
659
660 vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port);
661
Arseny Krasnov8cb48552021-06-11 14:11:18 +0300662 /* Remove connection oriented sockets from the unbound list and add them
663 * to the hash table for easy lookup by its address. The unbound list
664 * is simply an extra entry at the end of the hash table, a trick used
665 * by AF_UNIX.
Andy Kingd021c342013-02-06 14:23:56 +0000666 */
667 __vsock_remove_bound(vsk);
668 __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk);
669
670 return 0;
671}
672
673static int __vsock_bind_dgram(struct vsock_sock *vsk,
674 struct sockaddr_vm *addr)
675{
Stefano Garzarellafe502c42019-11-14 10:57:39 +0100676 return vsk->transport->dgram_bind(vsk, addr);
Andy Kingd021c342013-02-06 14:23:56 +0000677}
678
679static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
680{
681 struct vsock_sock *vsk = vsock_sk(sk);
Andy Kingd021c342013-02-06 14:23:56 +0000682 int retval;
683
684 /* First ensure this socket isn't already bound. */
685 if (vsock_addr_bound(&vsk->local_addr))
686 return -EINVAL;
687
688 /* Now bind to the provided address or select appropriate values if
689 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
690 * like AF_INET prevents binding to a non-local IP address (in most
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100691 * cases), we only allow binding to a local CID.
Andy Kingd021c342013-02-06 14:23:56 +0000692 */
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +0100693 if (addr->svm_cid != VMADDR_CID_ANY && !vsock_find_cid(addr->svm_cid))
Andy Kingd021c342013-02-06 14:23:56 +0000694 return -EADDRNOTAVAIL;
695
696 switch (sk->sk_socket->type) {
697 case SOCK_STREAM:
Arseny Krasnov0798e782021-06-11 14:11:04 +0300698 case SOCK_SEQPACKET:
Andy Kingd021c342013-02-06 14:23:56 +0000699 spin_lock_bh(&vsock_table_lock);
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300700 retval = __vsock_bind_connectible(vsk, addr);
Andy Kingd021c342013-02-06 14:23:56 +0000701 spin_unlock_bh(&vsock_table_lock);
702 break;
703
704 case SOCK_DGRAM:
705 retval = __vsock_bind_dgram(vsk, addr);
706 break;
707
708 default:
709 retval = -EINVAL;
710 break;
711 }
712
713 return retval;
714}
715
Cong Wang455f05e2018-08-06 11:06:02 -0700716static void vsock_connect_timeout(struct work_struct *work);
717
Stefano Garzarellab9ca2f52019-11-14 10:57:43 +0100718static struct sock *__vsock_create(struct net *net,
719 struct socket *sock,
720 struct sock *parent,
721 gfp_t priority,
722 unsigned short type,
723 int kern)
Andy Kingd021c342013-02-06 14:23:56 +0000724{
725 struct sock *sk;
726 struct vsock_sock *psk;
727 struct vsock_sock *vsk;
728
Eric W. Biederman11aa9c22015-05-08 21:09:13 -0500729 sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern);
Andy Kingd021c342013-02-06 14:23:56 +0000730 if (!sk)
731 return NULL;
732
733 sock_init_data(sock, sk);
734
735 /* sk->sk_type is normally set in sock_init_data, but only if sock is
736 * non-NULL. We make sure that our sockets always have a type by
737 * setting it here if needed.
738 */
739 if (!sock)
740 sk->sk_type = type;
741
742 vsk = vsock_sk(sk);
743 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
744 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
745
746 sk->sk_destruct = vsock_sk_destruct;
747 sk->sk_backlog_rcv = vsock_queue_rcv_skb;
Andy Kingd021c342013-02-06 14:23:56 +0000748 sock_reset_flag(sk, SOCK_DONE);
749
750 INIT_LIST_HEAD(&vsk->bound_table);
751 INIT_LIST_HEAD(&vsk->connected_table);
752 vsk->listener = NULL;
753 INIT_LIST_HEAD(&vsk->pending_links);
754 INIT_LIST_HEAD(&vsk->accept_queue);
755 vsk->rejected = false;
756 vsk->sent_request = false;
757 vsk->ignore_connecting_rst = false;
758 vsk->peer_shutdown = 0;
Cong Wang455f05e2018-08-06 11:06:02 -0700759 INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
760 INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
Andy Kingd021c342013-02-06 14:23:56 +0000761
762 psk = parent ? vsock_sk(parent) : NULL;
763 if (parent) {
764 vsk->trusted = psk->trusted;
765 vsk->owner = get_cred(psk->owner);
766 vsk->connect_timeout = psk->connect_timeout;
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +0100767 vsk->buffer_size = psk->buffer_size;
768 vsk->buffer_min_size = psk->buffer_min_size;
769 vsk->buffer_max_size = psk->buffer_max_size;
David Brazdil1f935e82021-03-19 13:05:41 +0000770 security_sk_clone(parent, sk);
Andy Kingd021c342013-02-06 14:23:56 +0000771 } else {
Jeff Vander Stoepaf545bb2020-10-23 16:37:57 +0200772 vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
Andy Kingd021c342013-02-06 14:23:56 +0000773 vsk->owner = get_current_cred();
774 vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +0100775 vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE;
776 vsk->buffer_min_size = VSOCK_DEFAULT_BUFFER_MIN_SIZE;
777 vsk->buffer_max_size = VSOCK_DEFAULT_BUFFER_MAX_SIZE;
Andy Kingd021c342013-02-06 14:23:56 +0000778 }
779
Andy Kingd021c342013-02-06 14:23:56 +0000780 return sk;
781}
Andy Kingd021c342013-02-06 14:23:56 +0000782
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300783static bool sock_type_connectible(u16 type)
784{
Arseny Krasnov0798e782021-06-11 14:11:04 +0300785 return (type == SOCK_STREAM) || (type == SOCK_SEQPACKET);
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300786}
787
Dexuan Cui0d9138f2019-09-30 18:43:50 +0000788static void __vsock_release(struct sock *sk, int level)
Andy Kingd021c342013-02-06 14:23:56 +0000789{
790 if (sk) {
Andy Kingd021c342013-02-06 14:23:56 +0000791 struct sock *pending;
792 struct vsock_sock *vsk;
793
794 vsk = vsock_sk(sk);
795 pending = NULL; /* Compiler warning. */
796
Dexuan Cui0d9138f2019-09-30 18:43:50 +0000797 /* When "level" is SINGLE_DEPTH_NESTING, use the nested
798 * version to avoid the warning "possible recursive locking
799 * detected". When "level" is 0, lock_sock_nested(sk, level)
800 * is the same as lock_sock(sk).
801 */
802 lock_sock_nested(sk, level);
Stefano Garzarella3f749572020-02-26 11:58:18 +0100803
804 if (vsk->transport)
805 vsk->transport->release(vsk);
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300806 else if (sock_type_connectible(sk->sk_type))
Stefano Garzarella3f749572020-02-26 11:58:18 +0100807 vsock_remove_sock(vsk);
808
Andy Kingd021c342013-02-06 14:23:56 +0000809 sock_orphan(sk);
810 sk->sk_shutdown = SHUTDOWN_MASK;
811
Christophe JAILLET3b7ad082019-11-03 07:11:11 +0100812 skb_queue_purge(&sk->sk_receive_queue);
Andy Kingd021c342013-02-06 14:23:56 +0000813
814 /* Clean up any sockets that never were accepted. */
815 while ((pending = vsock_dequeue_accept(sk)) != NULL) {
Dexuan Cui0d9138f2019-09-30 18:43:50 +0000816 __vsock_release(pending, SINGLE_DEPTH_NESTING);
Andy Kingd021c342013-02-06 14:23:56 +0000817 sock_put(pending);
818 }
819
820 release_sock(sk);
821 sock_put(sk);
822 }
823}
824
825static void vsock_sk_destruct(struct sock *sk)
826{
827 struct vsock_sock *vsk = vsock_sk(sk);
828
Stefano Garzarella6a2c09622019-11-14 10:57:48 +0100829 vsock_deassign_transport(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000830
831 /* When clearing these addresses, there's no need to set the family and
832 * possibly register the address family with the kernel.
833 */
834 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
835 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
836
837 put_cred(vsk->owner);
838}
839
840static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
841{
842 int err;
843
844 err = sock_queue_rcv_skb(sk, skb);
845 if (err)
846 kfree_skb(skb);
847
848 return err;
849}
850
Stefano Garzarellab9ca2f52019-11-14 10:57:43 +0100851struct sock *vsock_create_connected(struct sock *parent)
852{
853 return __vsock_create(sock_net(parent), NULL, parent, GFP_KERNEL,
854 parent->sk_type, 0);
855}
856EXPORT_SYMBOL_GPL(vsock_create_connected);
857
Andy Kingd021c342013-02-06 14:23:56 +0000858s64 vsock_stream_has_data(struct vsock_sock *vsk)
859{
Stefano Garzarellafe502c42019-11-14 10:57:39 +0100860 return vsk->transport->stream_has_data(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000861}
862EXPORT_SYMBOL_GPL(vsock_stream_has_data);
863
Stefano Garzarellacc971412021-06-18 15:35:24 +0200864static s64 vsock_connectible_has_data(struct vsock_sock *vsk)
Arseny Krasnov0798e782021-06-11 14:11:04 +0300865{
866 struct sock *sk = sk_vsock(vsk);
867
868 if (sk->sk_type == SOCK_SEQPACKET)
869 return vsk->transport->seqpacket_has_data(vsk);
870 else
871 return vsock_stream_has_data(vsk);
872}
873
Andy Kingd021c342013-02-06 14:23:56 +0000874s64 vsock_stream_has_space(struct vsock_sock *vsk)
875{
Stefano Garzarellafe502c42019-11-14 10:57:39 +0100876 return vsk->transport->stream_has_space(vsk);
Andy Kingd021c342013-02-06 14:23:56 +0000877}
878EXPORT_SYMBOL_GPL(vsock_stream_has_space);
879
880static int vsock_release(struct socket *sock)
881{
Dexuan Cui0d9138f2019-09-30 18:43:50 +0000882 __vsock_release(sock->sk, 0);
Andy Kingd021c342013-02-06 14:23:56 +0000883 sock->sk = NULL;
884 sock->state = SS_FREE;
885
886 return 0;
887}
888
889static int
890vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
891{
892 int err;
893 struct sock *sk;
894 struct sockaddr_vm *vm_addr;
895
896 sk = sock->sk;
897
898 if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0)
899 return -EINVAL;
900
901 lock_sock(sk);
902 err = __vsock_bind(sk, vm_addr);
903 release_sock(sk);
904
905 return err;
906}
907
908static int vsock_getname(struct socket *sock,
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100909 struct sockaddr *addr, int peer)
Andy Kingd021c342013-02-06 14:23:56 +0000910{
911 int err;
912 struct sock *sk;
913 struct vsock_sock *vsk;
914 struct sockaddr_vm *vm_addr;
915
916 sk = sock->sk;
917 vsk = vsock_sk(sk);
918 err = 0;
919
920 lock_sock(sk);
921
922 if (peer) {
923 if (sock->state != SS_CONNECTED) {
924 err = -ENOTCONN;
925 goto out;
926 }
927 vm_addr = &vsk->remote_addr;
928 } else {
929 vm_addr = &vsk->local_addr;
930 }
931
932 if (!vm_addr) {
933 err = -EINVAL;
934 goto out;
935 }
936
937 /* sys_getsockname() and sys_getpeername() pass us a
938 * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately
939 * that macro is defined in socket.c instead of .h, so we hardcode its
940 * value here.
941 */
942 BUILD_BUG_ON(sizeof(*vm_addr) > 128);
943 memcpy(addr, vm_addr, sizeof(*vm_addr));
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100944 err = sizeof(*vm_addr);
Andy Kingd021c342013-02-06 14:23:56 +0000945
946out:
947 release_sock(sk);
948 return err;
949}
950
951static int vsock_shutdown(struct socket *sock, int mode)
952{
953 int err;
954 struct sock *sk;
955
956 /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
957 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
958 * here like the other address families do. Note also that the
959 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
960 * which is what we want.
961 */
962 mode++;
963
964 if ((mode & ~SHUTDOWN_MASK) || !mode)
965 return -EINVAL;
966
Arseny Krasnov8cb48552021-06-11 14:11:18 +0300967 /* If this is a connection oriented socket and it is not connected then
968 * bail out immediately. If it is a DGRAM socket then we must first
969 * kick the socket so that it wakes up from any sleeping calls, for
970 * example recv(), and then afterwards return the error.
Andy Kingd021c342013-02-06 14:23:56 +0000971 */
972
973 sk = sock->sk;
Stefano Garzarella1c5fae92021-02-09 09:52:19 +0100974
975 lock_sock(sk);
Andy Kingd021c342013-02-06 14:23:56 +0000976 if (sock->state == SS_UNCONNECTED) {
977 err = -ENOTCONN;
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300978 if (sock_type_connectible(sk->sk_type))
Stefano Garzarella1c5fae92021-02-09 09:52:19 +0100979 goto out;
Andy Kingd021c342013-02-06 14:23:56 +0000980 } else {
981 sock->state = SS_DISCONNECTING;
982 err = 0;
983 }
984
985 /* Receive and send shutdowns are treated alike. */
986 mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
987 if (mode) {
Andy Kingd021c342013-02-06 14:23:56 +0000988 sk->sk_shutdown |= mode;
989 sk->sk_state_change(sk);
Andy Kingd021c342013-02-06 14:23:56 +0000990
Arseny Krasnova9e29e52021-06-11 14:09:47 +0300991 if (sock_type_connectible(sk->sk_type)) {
Andy Kingd021c342013-02-06 14:23:56 +0000992 sock_reset_flag(sk, SOCK_DONE);
993 vsock_send_shutdown(sk, mode);
994 }
995 }
996
Stefano Garzarella1c5fae92021-02-09 09:52:19 +0100997out:
998 release_sock(sk);
Andy Kingd021c342013-02-06 14:23:56 +0000999 return err;
1000}
1001
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001002static __poll_t vsock_poll(struct file *file, struct socket *sock,
1003 poll_table *wait)
Andy Kingd021c342013-02-06 14:23:56 +00001004{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001005 struct sock *sk;
1006 __poll_t mask;
1007 struct vsock_sock *vsk;
1008
1009 sk = sock->sk;
1010 vsk = vsock_sk(sk);
1011
1012 poll_wait(file, sk_sleep(sk), wait);
1013 mask = 0;
Andy Kingd021c342013-02-06 14:23:56 +00001014
1015 if (sk->sk_err)
1016 /* Signify that there has been an error on this socket. */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001017 mask |= EPOLLERR;
Andy Kingd021c342013-02-06 14:23:56 +00001018
1019 /* INET sockets treat local write shutdown and peer write shutdown as a
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001020 * case of EPOLLHUP set.
Andy Kingd021c342013-02-06 14:23:56 +00001021 */
1022 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
1023 ((sk->sk_shutdown & SEND_SHUTDOWN) &&
1024 (vsk->peer_shutdown & SEND_SHUTDOWN))) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001025 mask |= EPOLLHUP;
Andy Kingd021c342013-02-06 14:23:56 +00001026 }
1027
1028 if (sk->sk_shutdown & RCV_SHUTDOWN ||
1029 vsk->peer_shutdown & SEND_SHUTDOWN) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001030 mask |= EPOLLRDHUP;
Andy Kingd021c342013-02-06 14:23:56 +00001031 }
1032
1033 if (sock->type == SOCK_DGRAM) {
1034 /* For datagram sockets we can read if there is something in
1035 * the queue and write as long as the socket isn't shutdown for
1036 * sending.
1037 */
Eric Dumazet3ef7cf52019-10-23 22:44:50 -07001038 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
Andy Kingd021c342013-02-06 14:23:56 +00001039 (sk->sk_shutdown & RCV_SHUTDOWN)) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001040 mask |= EPOLLIN | EPOLLRDNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001041 }
1042
1043 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001044 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
Andy Kingd021c342013-02-06 14:23:56 +00001045
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001046 } else if (sock_type_connectible(sk->sk_type)) {
Alexander Popovc518ada2021-02-01 11:47:19 +03001047 const struct vsock_transport *transport;
1048
Andy Kingd021c342013-02-06 14:23:56 +00001049 lock_sock(sk);
1050
Alexander Popovc518ada2021-02-01 11:47:19 +03001051 transport = vsk->transport;
1052
Andy Kingd021c342013-02-06 14:23:56 +00001053 /* Listening sockets that have connections in their accept
1054 * queue can be read.
1055 */
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001056 if (sk->sk_state == TCP_LISTEN
Andy Kingd021c342013-02-06 14:23:56 +00001057 && !vsock_is_accept_queue_empty(sk))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001058 mask |= EPOLLIN | EPOLLRDNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001059
1060 /* If there is something in the queue then we can read. */
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01001061 if (transport && transport->stream_is_active(vsk) &&
Andy Kingd021c342013-02-06 14:23:56 +00001062 !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1063 bool data_ready_now = false;
1064 int ret = transport->notify_poll_in(
1065 vsk, 1, &data_ready_now);
1066 if (ret < 0) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001067 mask |= EPOLLERR;
Andy Kingd021c342013-02-06 14:23:56 +00001068 } else {
1069 if (data_ready_now)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001070 mask |= EPOLLIN | EPOLLRDNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001071
1072 }
1073 }
1074
1075 /* Sockets whose connections have been closed, reset, or
1076 * terminated should also be considered read, and we check the
1077 * shutdown flag for that.
1078 */
1079 if (sk->sk_shutdown & RCV_SHUTDOWN ||
1080 vsk->peer_shutdown & SEND_SHUTDOWN) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001081 mask |= EPOLLIN | EPOLLRDNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001082 }
1083
1084 /* Connected sockets that can produce data can be written. */
Stefano Garzarella1980c052020-08-12 14:56:02 +02001085 if (transport && sk->sk_state == TCP_ESTABLISHED) {
Andy Kingd021c342013-02-06 14:23:56 +00001086 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1087 bool space_avail_now = false;
1088 int ret = transport->notify_poll_out(
1089 vsk, 1, &space_avail_now);
1090 if (ret < 0) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001091 mask |= EPOLLERR;
Andy Kingd021c342013-02-06 14:23:56 +00001092 } else {
1093 if (space_avail_now)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001094 /* Remove EPOLLWRBAND since INET
Andy Kingd021c342013-02-06 14:23:56 +00001095 * sockets are not setting it.
1096 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001097 mask |= EPOLLOUT | EPOLLWRNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001098
1099 }
1100 }
1101 }
1102
1103 /* Simulate INET socket poll behaviors, which sets
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001104 * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read,
Andy Kingd021c342013-02-06 14:23:56 +00001105 * but local send is not shutdown.
1106 */
Stefan Hajnocziba3169f2018-01-26 11:48:25 +00001107 if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) {
Andy Kingd021c342013-02-06 14:23:56 +00001108 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001109 mask |= EPOLLOUT | EPOLLWRNORM;
Andy Kingd021c342013-02-06 14:23:56 +00001110
1111 }
1112
1113 release_sock(sk);
1114 }
1115
1116 return mask;
1117}
1118
Ying Xue1b784142015-03-02 15:37:48 +08001119static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1120 size_t len)
Andy Kingd021c342013-02-06 14:23:56 +00001121{
1122 int err;
1123 struct sock *sk;
1124 struct vsock_sock *vsk;
1125 struct sockaddr_vm *remote_addr;
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001126 const struct vsock_transport *transport;
Andy Kingd021c342013-02-06 14:23:56 +00001127
1128 if (msg->msg_flags & MSG_OOB)
1129 return -EOPNOTSUPP;
1130
1131 /* For now, MSG_DONTWAIT is always assumed... */
1132 err = 0;
1133 sk = sock->sk;
1134 vsk = vsock_sk(sk);
1135
1136 lock_sock(sk);
1137
Alexander Popovc518ada2021-02-01 11:47:19 +03001138 transport = vsk->transport;
1139
Asias Heb3a6dfe2013-06-20 17:20:30 +08001140 err = vsock_auto_bind(vsk);
1141 if (err)
1142 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001143
Andy Kingd021c342013-02-06 14:23:56 +00001144
1145 /* If the provided message contains an address, use that. Otherwise
1146 * fall back on the socket's remote handle (if it has been connected).
1147 */
1148 if (msg->msg_name &&
1149 vsock_addr_cast(msg->msg_name, msg->msg_namelen,
1150 &remote_addr) == 0) {
1151 /* Ensure this address is of the right type and is a valid
1152 * destination.
1153 */
1154
1155 if (remote_addr->svm_cid == VMADDR_CID_ANY)
1156 remote_addr->svm_cid = transport->get_local_cid();
1157
1158 if (!vsock_addr_bound(remote_addr)) {
1159 err = -EINVAL;
1160 goto out;
1161 }
1162 } else if (sock->state == SS_CONNECTED) {
1163 remote_addr = &vsk->remote_addr;
1164
1165 if (remote_addr->svm_cid == VMADDR_CID_ANY)
1166 remote_addr->svm_cid = transport->get_local_cid();
1167
1168 /* XXX Should connect() or this function ensure remote_addr is
1169 * bound?
1170 */
1171 if (!vsock_addr_bound(&vsk->remote_addr)) {
1172 err = -EINVAL;
1173 goto out;
1174 }
1175 } else {
1176 err = -EINVAL;
1177 goto out;
1178 }
1179
1180 if (!transport->dgram_allow(remote_addr->svm_cid,
1181 remote_addr->svm_port)) {
1182 err = -EINVAL;
1183 goto out;
1184 }
1185
Al Viro0f7db232014-11-20 04:05:34 -05001186 err = transport->dgram_enqueue(vsk, remote_addr, msg, len);
Andy Kingd021c342013-02-06 14:23:56 +00001187
1188out:
1189 release_sock(sk);
1190 return err;
1191}
1192
1193static int vsock_dgram_connect(struct socket *sock,
1194 struct sockaddr *addr, int addr_len, int flags)
1195{
1196 int err;
1197 struct sock *sk;
1198 struct vsock_sock *vsk;
1199 struct sockaddr_vm *remote_addr;
1200
1201 sk = sock->sk;
1202 vsk = vsock_sk(sk);
1203
1204 err = vsock_addr_cast(addr, addr_len, &remote_addr);
1205 if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) {
1206 lock_sock(sk);
1207 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY,
1208 VMADDR_PORT_ANY);
1209 sock->state = SS_UNCONNECTED;
1210 release_sock(sk);
1211 return 0;
1212 } else if (err != 0)
1213 return -EINVAL;
1214
1215 lock_sock(sk);
1216
Asias Heb3a6dfe2013-06-20 17:20:30 +08001217 err = vsock_auto_bind(vsk);
1218 if (err)
1219 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001220
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001221 if (!vsk->transport->dgram_allow(remote_addr->svm_cid,
1222 remote_addr->svm_port)) {
Andy Kingd021c342013-02-06 14:23:56 +00001223 err = -EINVAL;
1224 goto out;
1225 }
1226
1227 memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr));
1228 sock->state = SS_CONNECTED;
1229
1230out:
1231 release_sock(sk);
1232 return err;
1233}
1234
Ying Xue1b784142015-03-02 15:37:48 +08001235static int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1236 size_t len, int flags)
Andy Kingd021c342013-02-06 14:23:56 +00001237{
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001238 struct vsock_sock *vsk = vsock_sk(sock->sk);
1239
1240 return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
Andy Kingd021c342013-02-06 14:23:56 +00001241}
1242
1243static const struct proto_ops vsock_dgram_ops = {
1244 .family = PF_VSOCK,
1245 .owner = THIS_MODULE,
1246 .release = vsock_release,
1247 .bind = vsock_bind,
1248 .connect = vsock_dgram_connect,
1249 .socketpair = sock_no_socketpair,
1250 .accept = sock_no_accept,
1251 .getname = vsock_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001252 .poll = vsock_poll,
Andy Kingd021c342013-02-06 14:23:56 +00001253 .ioctl = sock_no_ioctl,
1254 .listen = sock_no_listen,
1255 .shutdown = vsock_shutdown,
Andy Kingd021c342013-02-06 14:23:56 +00001256 .sendmsg = vsock_dgram_sendmsg,
1257 .recvmsg = vsock_dgram_recvmsg,
1258 .mmap = sock_no_mmap,
1259 .sendpage = sock_no_sendpage,
1260};
1261
Peng Tao380feae2017-03-15 09:32:17 +08001262static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
1263{
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001264 const struct vsock_transport *transport = vsk->transport;
1265
Norbert Slusarek5d1cbcc2021-02-05 13:12:06 +01001266 if (!transport || !transport->cancel_pkt)
Peng Tao380feae2017-03-15 09:32:17 +08001267 return -EOPNOTSUPP;
1268
1269 return transport->cancel_pkt(vsk);
1270}
1271
Andy Kingd021c342013-02-06 14:23:56 +00001272static void vsock_connect_timeout(struct work_struct *work)
1273{
1274 struct sock *sk;
1275 struct vsock_sock *vsk;
1276
Cong Wang455f05e2018-08-06 11:06:02 -07001277 vsk = container_of(work, struct vsock_sock, connect_work.work);
Andy Kingd021c342013-02-06 14:23:56 +00001278 sk = sk_vsock(vsk);
1279
1280 lock_sock(sk);
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001281 if (sk->sk_state == TCP_SYN_SENT &&
Andy Kingd021c342013-02-06 14:23:56 +00001282 (sk->sk_shutdown != SHUTDOWN_MASK)) {
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001283 sk->sk_state = TCP_CLOSE;
Andy Kingd021c342013-02-06 14:23:56 +00001284 sk->sk_err = ETIMEDOUT;
Alexander Aringe3ae2362021-06-27 18:48:21 -04001285 sk_error_report(sk);
Norbert Slusarek3d0bc442021-02-05 13:14:05 +01001286 vsock_transport_cancel_pkt(vsk);
Andy Kingd021c342013-02-06 14:23:56 +00001287 }
1288 release_sock(sk);
1289
1290 sock_put(sk);
1291}
1292
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001293static int vsock_connect(struct socket *sock, struct sockaddr *addr,
1294 int addr_len, int flags)
Andy Kingd021c342013-02-06 14:23:56 +00001295{
1296 int err;
1297 struct sock *sk;
1298 struct vsock_sock *vsk;
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001299 const struct vsock_transport *transport;
Andy Kingd021c342013-02-06 14:23:56 +00001300 struct sockaddr_vm *remote_addr;
1301 long timeout;
1302 DEFINE_WAIT(wait);
1303
1304 err = 0;
1305 sk = sock->sk;
1306 vsk = vsock_sk(sk);
1307
1308 lock_sock(sk);
1309
1310 /* XXX AF_UNSPEC should make us disconnect like AF_INET. */
1311 switch (sock->state) {
1312 case SS_CONNECTED:
1313 err = -EISCONN;
1314 goto out;
1315 case SS_DISCONNECTING:
1316 err = -EINVAL;
1317 goto out;
1318 case SS_CONNECTING:
1319 /* This continues on so we can move sock into the SS_CONNECTED
1320 * state once the connection has completed (at which point err
1321 * will be set to zero also). Otherwise, we will either wait
1322 * for the connection or return -EALREADY should this be a
1323 * non-blocking call.
1324 */
1325 err = -EALREADY;
Eiichi Tsukatac7cd82b2021-11-09 00:15:02 +00001326 if (flags & O_NONBLOCK)
1327 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001328 break;
1329 default:
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001330 if ((sk->sk_state == TCP_LISTEN) ||
Andy Kingd021c342013-02-06 14:23:56 +00001331 vsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
1332 err = -EINVAL;
1333 goto out;
1334 }
1335
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01001336 /* Set the remote address that we are connecting to. */
1337 memcpy(&vsk->remote_addr, remote_addr,
1338 sizeof(vsk->remote_addr));
1339
1340 err = vsock_assign_transport(vsk, NULL);
1341 if (err)
1342 goto out;
1343
1344 transport = vsk->transport;
1345
Andy Kingd021c342013-02-06 14:23:56 +00001346 /* The hypervisor and well-known contexts do not have socket
1347 * endpoints.
1348 */
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01001349 if (!transport ||
1350 !transport->stream_allow(remote_addr->svm_cid,
Andy Kingd021c342013-02-06 14:23:56 +00001351 remote_addr->svm_port)) {
1352 err = -ENETUNREACH;
1353 goto out;
1354 }
1355
Asias Heb3a6dfe2013-06-20 17:20:30 +08001356 err = vsock_auto_bind(vsk);
1357 if (err)
1358 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001359
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001360 sk->sk_state = TCP_SYN_SENT;
Andy Kingd021c342013-02-06 14:23:56 +00001361
1362 err = transport->connect(vsk);
1363 if (err < 0)
1364 goto out;
1365
1366 /* Mark sock as connecting and set the error code to in
1367 * progress in case this is a non-blocking connect.
1368 */
1369 sock->state = SS_CONNECTING;
1370 err = -EINPROGRESS;
1371 }
1372
1373 /* The receive path will handle all communication until we are able to
1374 * enter the connected state. Here we wait for the connection to be
1375 * completed or a notification of an error.
1376 */
1377 timeout = vsk->connect_timeout;
1378 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1379
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001380 while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) {
Andy Kingd021c342013-02-06 14:23:56 +00001381 if (flags & O_NONBLOCK) {
1382 /* If we're not going to block, we schedule a timeout
1383 * function to generate a timeout on the connection
1384 * attempt, in case the peer doesn't respond in a
1385 * timely manner. We hold on to the socket until the
1386 * timeout fires.
1387 */
1388 sock_hold(sk);
Cong Wang455f05e2018-08-06 11:06:02 -07001389 schedule_delayed_work(&vsk->connect_work, timeout);
Andy Kingd021c342013-02-06 14:23:56 +00001390
1391 /* Skip ahead to preserve error code set above. */
1392 goto out_wait;
1393 }
1394
1395 release_sock(sk);
1396 timeout = schedule_timeout(timeout);
1397 lock_sock(sk);
1398
1399 if (signal_pending(current)) {
1400 err = sock_intr_errno(timeout);
Longpeng(Mike)c7ff9cf2021-06-21 14:26:01 +08001401 sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001402 sock->state = SS_UNCONNECTED;
Peng Tao380feae2017-03-15 09:32:17 +08001403 vsock_transport_cancel_pkt(vsk);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001404 goto out_wait;
Andy Kingd021c342013-02-06 14:23:56 +00001405 } else if (timeout == 0) {
1406 err = -ETIMEDOUT;
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001407 sk->sk_state = TCP_CLOSE;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001408 sock->state = SS_UNCONNECTED;
Peng Tao380feae2017-03-15 09:32:17 +08001409 vsock_transport_cancel_pkt(vsk);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001410 goto out_wait;
Andy Kingd021c342013-02-06 14:23:56 +00001411 }
1412
1413 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1414 }
1415
1416 if (sk->sk_err) {
1417 err = -sk->sk_err;
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001418 sk->sk_state = TCP_CLOSE;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001419 sock->state = SS_UNCONNECTED;
1420 } else {
Andy Kingd021c342013-02-06 14:23:56 +00001421 err = 0;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001422 }
Andy Kingd021c342013-02-06 14:23:56 +00001423
1424out_wait:
1425 finish_wait(sk_sleep(sk), &wait);
1426out:
1427 release_sock(sk);
1428 return err;
Andy Kingd021c342013-02-06 14:23:56 +00001429}
1430
David Howellscdfbabf2017-03-09 08:09:05 +00001431static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
1432 bool kern)
Andy Kingd021c342013-02-06 14:23:56 +00001433{
1434 struct sock *listener;
1435 int err;
1436 struct sock *connected;
1437 struct vsock_sock *vconnected;
1438 long timeout;
1439 DEFINE_WAIT(wait);
1440
1441 err = 0;
1442 listener = sock->sk;
1443
1444 lock_sock(listener);
1445
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001446 if (!sock_type_connectible(sock->type)) {
Andy Kingd021c342013-02-06 14:23:56 +00001447 err = -EOPNOTSUPP;
1448 goto out;
1449 }
1450
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001451 if (listener->sk_state != TCP_LISTEN) {
Andy Kingd021c342013-02-06 14:23:56 +00001452 err = -EINVAL;
1453 goto out;
1454 }
1455
1456 /* Wait for children sockets to appear; these are the new sockets
1457 * created upon connection establishment.
1458 */
Stefano Garzarella7e0afbd2020-05-27 09:56:55 +02001459 timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK);
Andy Kingd021c342013-02-06 14:23:56 +00001460 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1461
1462 while ((connected = vsock_dequeue_accept(listener)) == NULL &&
1463 listener->sk_err == 0) {
1464 release_sock(listener);
1465 timeout = schedule_timeout(timeout);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001466 finish_wait(sk_sleep(listener), &wait);
Andy Kingd021c342013-02-06 14:23:56 +00001467 lock_sock(listener);
1468
1469 if (signal_pending(current)) {
1470 err = sock_intr_errno(timeout);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001471 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001472 } else if (timeout == 0) {
1473 err = -EAGAIN;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001474 goto out;
Andy Kingd021c342013-02-06 14:23:56 +00001475 }
1476
1477 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1478 }
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001479 finish_wait(sk_sleep(listener), &wait);
Andy Kingd021c342013-02-06 14:23:56 +00001480
1481 if (listener->sk_err)
1482 err = -listener->sk_err;
1483
1484 if (connected) {
Eric Dumazet7976a112019-11-05 14:11:52 -08001485 sk_acceptq_removed(listener);
Andy Kingd021c342013-02-06 14:23:56 +00001486
Stefan Hajnoczi4192f672016-06-23 16:28:58 +01001487 lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
Andy Kingd021c342013-02-06 14:23:56 +00001488 vconnected = vsock_sk(connected);
1489
1490 /* If the listener socket has received an error, then we should
1491 * reject this socket and return. Note that we simply mark the
1492 * socket rejected, drop our reference, and let the cleanup
1493 * function handle the cleanup; the fact that we found it in
1494 * the listener's accept queue guarantees that the cleanup
1495 * function hasn't run yet.
1496 */
1497 if (err) {
1498 vconnected->rejected = true;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001499 } else {
1500 newsock->state = SS_CONNECTED;
1501 sock_graft(connected, newsock);
Andy Kingd021c342013-02-06 14:23:56 +00001502 }
1503
Andy Kingd021c342013-02-06 14:23:56 +00001504 release_sock(connected);
1505 sock_put(connected);
1506 }
1507
Andy Kingd021c342013-02-06 14:23:56 +00001508out:
1509 release_sock(listener);
1510 return err;
1511}
1512
1513static int vsock_listen(struct socket *sock, int backlog)
1514{
1515 int err;
1516 struct sock *sk;
1517 struct vsock_sock *vsk;
1518
1519 sk = sock->sk;
1520
1521 lock_sock(sk);
1522
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001523 if (!sock_type_connectible(sk->sk_type)) {
Andy Kingd021c342013-02-06 14:23:56 +00001524 err = -EOPNOTSUPP;
1525 goto out;
1526 }
1527
1528 if (sock->state != SS_UNCONNECTED) {
1529 err = -EINVAL;
1530 goto out;
1531 }
1532
1533 vsk = vsock_sk(sk);
1534
1535 if (!vsock_addr_bound(&vsk->local_addr)) {
1536 err = -EINVAL;
1537 goto out;
1538 }
1539
1540 sk->sk_max_ack_backlog = backlog;
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001541 sk->sk_state = TCP_LISTEN;
Andy Kingd021c342013-02-06 14:23:56 +00001542
1543 err = 0;
1544
1545out:
1546 release_sock(sk);
1547 return err;
1548}
1549
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +01001550static void vsock_update_buffer_size(struct vsock_sock *vsk,
1551 const struct vsock_transport *transport,
1552 u64 val)
1553{
1554 if (val > vsk->buffer_max_size)
1555 val = vsk->buffer_max_size;
1556
1557 if (val < vsk->buffer_min_size)
1558 val = vsk->buffer_min_size;
1559
1560 if (val != vsk->buffer_size &&
1561 transport && transport->notify_buffer_size)
1562 transport->notify_buffer_size(vsk, &val);
1563
1564 vsk->buffer_size = val;
1565}
1566
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001567static int vsock_connectible_setsockopt(struct socket *sock,
1568 int level,
1569 int optname,
1570 sockptr_t optval,
1571 unsigned int optlen)
Andy Kingd021c342013-02-06 14:23:56 +00001572{
1573 int err;
1574 struct sock *sk;
1575 struct vsock_sock *vsk;
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001576 const struct vsock_transport *transport;
Andy Kingd021c342013-02-06 14:23:56 +00001577 u64 val;
1578
1579 if (level != AF_VSOCK)
1580 return -ENOPROTOOPT;
1581
1582#define COPY_IN(_v) \
1583 do { \
1584 if (optlen < sizeof(_v)) { \
1585 err = -EINVAL; \
1586 goto exit; \
1587 } \
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02001588 if (copy_from_sockptr(&_v, optval, sizeof(_v)) != 0) { \
Andy Kingd021c342013-02-06 14:23:56 +00001589 err = -EFAULT; \
1590 goto exit; \
1591 } \
1592 } while (0)
1593
1594 err = 0;
1595 sk = sock->sk;
1596 vsk = vsock_sk(sk);
1597
1598 lock_sock(sk);
1599
Alexander Popovc518ada2021-02-01 11:47:19 +03001600 transport = vsk->transport;
1601
Andy Kingd021c342013-02-06 14:23:56 +00001602 switch (optname) {
1603 case SO_VM_SOCKETS_BUFFER_SIZE:
1604 COPY_IN(val);
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +01001605 vsock_update_buffer_size(vsk, transport, val);
Andy Kingd021c342013-02-06 14:23:56 +00001606 break;
1607
1608 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1609 COPY_IN(val);
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +01001610 vsk->buffer_max_size = val;
1611 vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
Andy Kingd021c342013-02-06 14:23:56 +00001612 break;
1613
1614 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1615 COPY_IN(val);
Stefano Garzarellab9f2b0f2019-11-14 10:57:42 +01001616 vsk->buffer_min_size = val;
1617 vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
Andy Kingd021c342013-02-06 14:23:56 +00001618 break;
1619
Richard Palethorpe4c1e34c2021-10-08 11:00:53 +01001620 case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW:
1621 case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD: {
1622 struct __kernel_sock_timeval tv;
1623
1624 err = sock_copy_user_timeval(&tv, optval, optlen,
1625 optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD);
1626 if (err)
1627 break;
Andy Kingd021c342013-02-06 14:23:56 +00001628 if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC &&
1629 tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) {
1630 vsk->connect_timeout = tv.tv_sec * HZ +
Richard Palethorpe4c1e34c2021-10-08 11:00:53 +01001631 DIV_ROUND_UP((unsigned long)tv.tv_usec, (USEC_PER_SEC / HZ));
Andy Kingd021c342013-02-06 14:23:56 +00001632 if (vsk->connect_timeout == 0)
1633 vsk->connect_timeout =
1634 VSOCK_DEFAULT_CONNECT_TIMEOUT;
1635
1636 } else {
1637 err = -ERANGE;
1638 }
1639 break;
1640 }
1641
1642 default:
1643 err = -ENOPROTOOPT;
1644 break;
1645 }
1646
1647#undef COPY_IN
1648
1649exit:
1650 release_sock(sk);
1651 return err;
1652}
1653
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001654static int vsock_connectible_getsockopt(struct socket *sock,
1655 int level, int optname,
1656 char __user *optval,
1657 int __user *optlen)
Andy Kingd021c342013-02-06 14:23:56 +00001658{
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001659 struct sock *sk = sock->sk;
1660 struct vsock_sock *vsk = vsock_sk(sk);
1661
1662 union {
1663 u64 val64;
Richard Palethorpe4c1e34c2021-10-08 11:00:53 +01001664 struct old_timeval32 tm32;
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001665 struct __kernel_old_timeval tm;
Richard Palethorpe4c1e34c2021-10-08 11:00:53 +01001666 struct __kernel_sock_timeval stm;
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001667 } v;
1668
1669 int lv = sizeof(v.val64);
Andy Kingd021c342013-02-06 14:23:56 +00001670 int len;
Andy Kingd021c342013-02-06 14:23:56 +00001671
1672 if (level != AF_VSOCK)
1673 return -ENOPROTOOPT;
1674
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001675 if (get_user(len, optlen))
1676 return -EFAULT;
Andy Kingd021c342013-02-06 14:23:56 +00001677
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001678 memset(&v, 0, sizeof(v));
Andy Kingd021c342013-02-06 14:23:56 +00001679
1680 switch (optname) {
1681 case SO_VM_SOCKETS_BUFFER_SIZE:
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001682 v.val64 = vsk->buffer_size;
Andy Kingd021c342013-02-06 14:23:56 +00001683 break;
1684
1685 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001686 v.val64 = vsk->buffer_max_size;
Andy Kingd021c342013-02-06 14:23:56 +00001687 break;
1688
1689 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001690 v.val64 = vsk->buffer_min_size;
Andy Kingd021c342013-02-06 14:23:56 +00001691 break;
1692
Richard Palethorpe4c1e34c2021-10-08 11:00:53 +01001693 case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW:
1694 case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD:
1695 lv = sock_get_timeout(vsk->connect_timeout, &v,
1696 optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD);
Andy Kingd021c342013-02-06 14:23:56 +00001697 break;
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001698
Andy Kingd021c342013-02-06 14:23:56 +00001699 default:
1700 return -ENOPROTOOPT;
1701 }
1702
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001703 if (len < lv)
1704 return -EINVAL;
1705 if (len > lv)
1706 len = lv;
1707 if (copy_to_user(optval, &v, len))
Andy Kingd021c342013-02-06 14:23:56 +00001708 return -EFAULT;
1709
Richard Palethorpe685c3f22021-10-08 11:00:52 +01001710 if (put_user(len, optlen))
1711 return -EFAULT;
Andy Kingd021c342013-02-06 14:23:56 +00001712
1713 return 0;
1714}
1715
Arseny Krasnova9e29e52021-06-11 14:09:47 +03001716static int vsock_connectible_sendmsg(struct socket *sock, struct msghdr *msg,
1717 size_t len)
Andy Kingd021c342013-02-06 14:23:56 +00001718{
1719 struct sock *sk;
1720 struct vsock_sock *vsk;
Stefano Garzarellafe502c42019-11-14 10:57:39 +01001721 const struct vsock_transport *transport;
Andy Kingd021c342013-02-06 14:23:56 +00001722 ssize_t total_written;
1723 long timeout;
1724 int err;
1725 struct vsock_transport_send_notify_data send_data;
WANG Cong499fde62017-05-19 11:21:59 -07001726 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Andy Kingd021c342013-02-06 14:23:56 +00001727
1728 sk = sock->sk;
1729 vsk = vsock_sk(sk);
1730 total_written = 0;
1731 err = 0;
1732
1733 if (msg->msg_flags & MSG_OOB)
1734 return -EOPNOTSUPP;
1735
1736 lock_sock(sk);
1737
Alexander Popovc518ada2021-02-01 11:47:19 +03001738 transport = vsk->transport;
1739
Arseny Krasnov8cb48552021-06-11 14:11:18 +03001740 /* Callers should not provide a destination with connection oriented
1741 * sockets.
1742 */
Andy Kingd021c342013-02-06 14:23:56 +00001743 if (msg->msg_namelen) {
Stefan Hajnoczi3b4477d2017-10-05 16:46:52 -04001744 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
Andy Kingd021c342013-02-06 14:23:56 +00001745 goto out;
1746 }
1747
1748 /* Send data only if both sides are not shutdown in the direction. */
1749 if (sk->sk_shutdown & SEND_SHUTDOWN ||
1750 vsk->peer_shutdown & RCV_SHUTDOWN) {
1751 err = -EPIPE;
1752 goto out;
1753 }
1754
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01001755 if (!transport || sk->sk_state != TCP_ESTABLISHED ||
Andy Kingd021c342013-02-06 14:23:56 +00001756 !vsock_addr_bound(&vsk->local_addr)) {
1757 err = -ENOTCONN;
1758 goto out;
1759 }
1760
1761 if (!vsock_addr_bound(&vsk->remote_addr)) {
1762 err = -EDESTADDRREQ;
1763 goto out;
1764 }
1765
1766 /* Wait for room in the produce queue to enqueue our user's data. */
1767 timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1768
1769 err = transport->notify_send_init(vsk, &send_data);
1770 if (err < 0)
1771 goto out;
1772
Andy Kingd021c342013-02-06 14:23:56 +00001773 while (total_written < len) {
1774 ssize_t written;
1775
WANG Cong499fde62017-05-19 11:21:59 -07001776 add_wait_queue(sk_sleep(sk), &wait);
Andy Kingd021c342013-02-06 14:23:56 +00001777 while (vsock_stream_has_space(vsk) == 0 &&
1778 sk->sk_err == 0 &&
1779 !(sk->sk_shutdown & SEND_SHUTDOWN) &&
1780 !(vsk->peer_shutdown & RCV_SHUTDOWN)) {
1781
1782 /* Don't wait for non-blocking sockets. */
1783 if (timeout == 0) {
1784 err = -EAGAIN;
WANG Cong499fde62017-05-19 11:21:59 -07001785 remove_wait_queue(sk_sleep(sk), &wait);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001786 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001787 }
1788
1789 err = transport->notify_send_pre_block(vsk, &send_data);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001790 if (err < 0) {
WANG Cong499fde62017-05-19 11:21:59 -07001791 remove_wait_queue(sk_sleep(sk), &wait);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001792 goto out_err;
1793 }
Andy Kingd021c342013-02-06 14:23:56 +00001794
1795 release_sock(sk);
WANG Cong499fde62017-05-19 11:21:59 -07001796 timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
Andy Kingd021c342013-02-06 14:23:56 +00001797 lock_sock(sk);
1798 if (signal_pending(current)) {
1799 err = sock_intr_errno(timeout);
WANG Cong499fde62017-05-19 11:21:59 -07001800 remove_wait_queue(sk_sleep(sk), &wait);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001801 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001802 } else if (timeout == 0) {
1803 err = -EAGAIN;
WANG Cong499fde62017-05-19 11:21:59 -07001804 remove_wait_queue(sk_sleep(sk), &wait);
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001805 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001806 }
Andy Kingd021c342013-02-06 14:23:56 +00001807 }
WANG Cong499fde62017-05-19 11:21:59 -07001808 remove_wait_queue(sk_sleep(sk), &wait);
Andy Kingd021c342013-02-06 14:23:56 +00001809
1810 /* These checks occur both as part of and after the loop
1811 * conditional since we need to check before and after
1812 * sleeping.
1813 */
1814 if (sk->sk_err) {
1815 err = -sk->sk_err;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001816 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001817 } else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
1818 (vsk->peer_shutdown & RCV_SHUTDOWN)) {
1819 err = -EPIPE;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001820 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001821 }
1822
1823 err = transport->notify_send_pre_enqueue(vsk, &send_data);
1824 if (err < 0)
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001825 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001826
1827 /* Note that enqueue will only write as many bytes as are free
1828 * in the produce queue, so we don't need to ensure len is
1829 * smaller than the queue size. It is the caller's
1830 * responsibility to check how many bytes we were able to send.
1831 */
1832
Arseny Krasnovfbe70c42021-06-11 14:10:49 +03001833 if (sk->sk_type == SOCK_SEQPACKET) {
1834 written = transport->seqpacket_enqueue(vsk,
1835 msg, len - total_written);
1836 } else {
1837 written = transport->stream_enqueue(vsk,
1838 msg, len - total_written);
1839 }
Andy Kingd021c342013-02-06 14:23:56 +00001840 if (written < 0) {
1841 err = -ENOMEM;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001842 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001843 }
1844
1845 total_written += written;
1846
1847 err = transport->notify_send_post_enqueue(
1848 vsk, written, &send_data);
1849 if (err < 0)
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001850 goto out_err;
Andy Kingd021c342013-02-06 14:23:56 +00001851
1852 }
1853
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001854out_err:
Arseny Krasnovfbe70c42021-06-11 14:10:49 +03001855 if (total_written > 0) {
1856 /* Return number of written bytes only if:
1857 * 1) SOCK_STREAM socket.
1858 * 2) SOCK_SEQPACKET socket when whole buffer is sent.
1859 */
1860 if (sk->sk_type == SOCK_STREAM || total_written == len)
1861 err = total_written;
1862 }
Andy Kingd021c342013-02-06 14:23:56 +00001863out:
1864 release_sock(sk);
1865 return err;
1866}
1867
Stefano Garzarella0de5b2e2021-06-18 15:35:25 +02001868static int vsock_connectible_wait_data(struct sock *sk,
1869 struct wait_queue_entry *wait,
1870 long timeout,
1871 struct vsock_transport_recv_notify_data *recv_data,
1872 size_t target)
Arseny Krasnovb3f7fd52021-06-11 14:10:07 +03001873{
1874 const struct vsock_transport *transport;
1875 struct vsock_sock *vsk;
1876 s64 data;
1877 int err;
1878
1879 vsk = vsock_sk(sk);
1880 err = 0;
1881 transport = vsk->transport;
1882
Stefano Garzarellacc971412021-06-18 15:35:24 +02001883 while ((data = vsock_connectible_has_data(vsk)) == 0) {
Arseny Krasnovb3f7fd52021-06-11 14:10:07 +03001884 prepare_to_wait(sk_sleep(sk), wait, TASK_INTERRUPTIBLE);
1885
1886 if (sk->sk_err != 0 ||
1887 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1888 (vsk->peer_shutdown & SEND_SHUTDOWN)) {
1889 break;
1890 }
1891
1892 /* Don't wait for non-blocking sockets. */
1893 if (timeout == 0) {
1894 err = -EAGAIN;
1895 break;
1896 }
1897
1898 if (recv_data) {
1899 err = transport->notify_recv_pre_block(vsk, target, recv_data);
1900 if (err < 0)
1901 break;
1902 }
1903
1904 release_sock(sk);
1905 timeout = schedule_timeout(timeout);
1906 lock_sock(sk);
1907
1908 if (signal_pending(current)) {
1909 err = sock_intr_errno(timeout);
1910 break;
1911 } else if (timeout == 0) {
1912 err = -EAGAIN;
1913 break;
1914 }
1915 }
1916
1917 finish_wait(sk_sleep(sk), wait);
1918
1919 if (err)
1920 return err;
1921
1922 /* Internal transport error when checking for available
1923 * data. XXX This should be changed to a connection
1924 * reset in a later change.
1925 */
1926 if (data < 0)
1927 return -ENOMEM;
1928
1929 return data;
1930}
1931
Arseny Krasnov19c1b902021-06-11 14:10:21 +03001932static int __vsock_stream_recvmsg(struct sock *sk, struct msghdr *msg,
1933 size_t len, int flags)
Andy Kingd021c342013-02-06 14:23:56 +00001934{
Andy Kingd021c342013-02-06 14:23:56 +00001935 struct vsock_transport_recv_notify_data recv_data;
Arseny Krasnov19c1b902021-06-11 14:10:21 +03001936 const struct vsock_transport *transport;
1937 struct vsock_sock *vsk;
1938 ssize_t copied;
1939 size_t target;
1940 long timeout;
1941 int err;
Andy Kingd021c342013-02-06 14:23:56 +00001942
1943 DEFINE_WAIT(wait);
1944
Andy Kingd021c342013-02-06 14:23:56 +00001945 vsk = vsock_sk(sk);
Alexander Popovc518ada2021-02-01 11:47:19 +03001946 transport = vsk->transport;
1947
Andy Kingd021c342013-02-06 14:23:56 +00001948 /* We must not copy less than target bytes into the user's buffer
1949 * before returning successfully, so we wait for the consume queue to
1950 * have that much data to consume before dequeueing. Note that this
1951 * makes it impossible to handle cases where target is greater than the
1952 * queue size.
1953 */
1954 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1955 if (target >= transport->stream_rcvhiwat(vsk)) {
1956 err = -ENOMEM;
1957 goto out;
1958 }
1959 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1960 copied = 0;
1961
1962 err = transport->notify_recv_init(vsk, target, &recv_data);
1963 if (err < 0)
1964 goto out;
1965
Andy Kingd021c342013-02-06 14:23:56 +00001966
1967 while (1) {
Arseny Krasnovb3f7fd52021-06-11 14:10:07 +03001968 ssize_t read;
Andy Kingd021c342013-02-06 14:23:56 +00001969
Stefano Garzarella0de5b2e2021-06-18 15:35:25 +02001970 err = vsock_connectible_wait_data(sk, &wait, timeout,
1971 &recv_data, target);
Arseny Krasnovb3f7fd52021-06-11 14:10:07 +03001972 if (err <= 0)
1973 break;
Andy Kingd021c342013-02-06 14:23:56 +00001974
Arseny Krasnovb3f7fd52021-06-11 14:10:07 +03001975 err = transport->notify_recv_pre_dequeue(vsk, target,
1976 &recv_data);
1977 if (err < 0)
1978 break;
Claudio Imbrendaf7f9b5e2016-03-22 17:05:52 +01001979
Arseny Krasnovb3f7fd52021-06-11 14:10:07 +03001980 read = transport->stream_dequeue(vsk, msg, len - copied, flags);
1981 if (read < 0) {
1982 err = -ENOMEM;
1983 break;
Andy Kingd021c342013-02-06 14:23:56 +00001984 }
Arseny Krasnovb3f7fd52021-06-11 14:10:07 +03001985
1986 copied += read;
1987
1988 err = transport->notify_recv_post_dequeue(vsk, target, read,
1989 !(flags & MSG_PEEK), &recv_data);
1990 if (err < 0)
1991 goto out;
1992
1993 if (read >= target || flags & MSG_PEEK)
1994 break;
1995
1996 target -= read;
Andy Kingd021c342013-02-06 14:23:56 +00001997 }
1998
1999 if (sk->sk_err)
2000 err = -sk->sk_err;
2001 else if (sk->sk_shutdown & RCV_SHUTDOWN)
2002 err = 0;
2003
Ian Campbelldedc58e2016-05-04 14:21:53 +01002004 if (copied > 0)
Andy Kingd021c342013-02-06 14:23:56 +00002005 err = copied;
Andy Kingd021c342013-02-06 14:23:56 +00002006
Andy Kingd021c342013-02-06 14:23:56 +00002007out:
Arseny Krasnov19c1b902021-06-11 14:10:21 +03002008 return err;
2009}
2010
Arseny Krasnov9942c192021-06-11 14:10:34 +03002011static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg,
2012 size_t len, int flags)
2013{
2014 const struct vsock_transport *transport;
2015 struct vsock_sock *vsk;
Arseny Krasnov8fc92b72021-09-03 15:33:03 +03002016 ssize_t msg_len;
Arseny Krasnov9942c192021-06-11 14:10:34 +03002017 long timeout;
2018 int err = 0;
2019 DEFINE_WAIT(wait);
2020
2021 vsk = vsock_sk(sk);
2022 transport = vsk->transport;
2023
2024 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2025
Stefano Garzarella0de5b2e2021-06-18 15:35:25 +02002026 err = vsock_connectible_wait_data(sk, &wait, timeout, NULL, 0);
Arseny Krasnov9942c192021-06-11 14:10:34 +03002027 if (err <= 0)
2028 goto out;
2029
Arseny Krasnov8fc92b72021-09-03 15:33:03 +03002030 msg_len = transport->seqpacket_dequeue(vsk, msg, flags);
Arseny Krasnov9942c192021-06-11 14:10:34 +03002031
Arseny Krasnov8fc92b72021-09-03 15:33:03 +03002032 if (msg_len < 0) {
Arseny Krasnov9942c192021-06-11 14:10:34 +03002033 err = -ENOMEM;
2034 goto out;
2035 }
2036
2037 if (sk->sk_err) {
2038 err = -sk->sk_err;
2039 } else if (sk->sk_shutdown & RCV_SHUTDOWN) {
2040 err = 0;
2041 } else {
2042 /* User sets MSG_TRUNC, so return real length of
2043 * packet.
2044 */
2045 if (flags & MSG_TRUNC)
Arseny Krasnov8fc92b72021-09-03 15:33:03 +03002046 err = msg_len;
Arseny Krasnov9942c192021-06-11 14:10:34 +03002047 else
2048 err = len - msg_data_left(msg);
2049
2050 /* Always set MSG_TRUNC if real length of packet is
2051 * bigger than user's buffer.
2052 */
Arseny Krasnov8fc92b72021-09-03 15:33:03 +03002053 if (msg_len > len)
Arseny Krasnov9942c192021-06-11 14:10:34 +03002054 msg->msg_flags |= MSG_TRUNC;
2055 }
2056
2057out:
2058 return err;
2059}
2060
Arseny Krasnov19c1b902021-06-11 14:10:21 +03002061static int
2062vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2063 int flags)
2064{
2065 struct sock *sk;
2066 struct vsock_sock *vsk;
2067 const struct vsock_transport *transport;
2068 int err;
2069
2070 DEFINE_WAIT(wait);
2071
2072 sk = sock->sk;
2073 vsk = vsock_sk(sk);
2074 err = 0;
2075
2076 lock_sock(sk);
2077
2078 transport = vsk->transport;
2079
2080 if (!transport || sk->sk_state != TCP_ESTABLISHED) {
2081 /* Recvmsg is supposed to return 0 if a peer performs an
2082 * orderly shutdown. Differentiate between that case and when a
2083 * peer has not connected or a local shutdown occurred with the
2084 * SOCK_DONE flag.
2085 */
2086 if (sock_flag(sk, SOCK_DONE))
2087 err = 0;
2088 else
2089 err = -ENOTCONN;
2090
2091 goto out;
2092 }
2093
2094 if (flags & MSG_OOB) {
2095 err = -EOPNOTSUPP;
2096 goto out;
2097 }
2098
2099 /* We don't check peer_shutdown flag here since peer may actually shut
2100 * down, but there can be data in the queue that a local socket can
2101 * receive.
2102 */
2103 if (sk->sk_shutdown & RCV_SHUTDOWN) {
2104 err = 0;
2105 goto out;
2106 }
2107
2108 /* It is valid on Linux to pass in a zero-length receive buffer. This
2109 * is not an error. We may as well bail out now.
2110 */
2111 if (!len) {
2112 err = 0;
2113 goto out;
2114 }
2115
Arseny Krasnov9942c192021-06-11 14:10:34 +03002116 if (sk->sk_type == SOCK_STREAM)
2117 err = __vsock_stream_recvmsg(sk, msg, len, flags);
2118 else
2119 err = __vsock_seqpacket_recvmsg(sk, msg, len, flags);
Arseny Krasnov19c1b902021-06-11 14:10:21 +03002120
2121out:
Andy Kingd021c342013-02-06 14:23:56 +00002122 release_sock(sk);
2123 return err;
2124}
2125
2126static const struct proto_ops vsock_stream_ops = {
2127 .family = PF_VSOCK,
2128 .owner = THIS_MODULE,
2129 .release = vsock_release,
2130 .bind = vsock_bind,
Arseny Krasnova9e29e52021-06-11 14:09:47 +03002131 .connect = vsock_connect,
Andy Kingd021c342013-02-06 14:23:56 +00002132 .socketpair = sock_no_socketpair,
2133 .accept = vsock_accept,
2134 .getname = vsock_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07002135 .poll = vsock_poll,
Andy Kingd021c342013-02-06 14:23:56 +00002136 .ioctl = sock_no_ioctl,
2137 .listen = vsock_listen,
2138 .shutdown = vsock_shutdown,
Arseny Krasnova9e29e52021-06-11 14:09:47 +03002139 .setsockopt = vsock_connectible_setsockopt,
2140 .getsockopt = vsock_connectible_getsockopt,
2141 .sendmsg = vsock_connectible_sendmsg,
2142 .recvmsg = vsock_connectible_recvmsg,
Andy Kingd021c342013-02-06 14:23:56 +00002143 .mmap = sock_no_mmap,
2144 .sendpage = sock_no_sendpage,
2145};
2146
Arseny Krasnov0798e782021-06-11 14:11:04 +03002147static const struct proto_ops vsock_seqpacket_ops = {
2148 .family = PF_VSOCK,
2149 .owner = THIS_MODULE,
2150 .release = vsock_release,
2151 .bind = vsock_bind,
2152 .connect = vsock_connect,
2153 .socketpair = sock_no_socketpair,
2154 .accept = vsock_accept,
2155 .getname = vsock_getname,
2156 .poll = vsock_poll,
2157 .ioctl = sock_no_ioctl,
2158 .listen = vsock_listen,
2159 .shutdown = vsock_shutdown,
2160 .setsockopt = vsock_connectible_setsockopt,
2161 .getsockopt = vsock_connectible_getsockopt,
2162 .sendmsg = vsock_connectible_sendmsg,
2163 .recvmsg = vsock_connectible_recvmsg,
2164 .mmap = sock_no_mmap,
2165 .sendpage = sock_no_sendpage,
2166};
2167
Andy Kingd021c342013-02-06 14:23:56 +00002168static int vsock_create(struct net *net, struct socket *sock,
2169 int protocol, int kern)
2170{
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002171 struct vsock_sock *vsk;
Stefano Garzarella55f3e142019-11-14 10:57:44 +01002172 struct sock *sk;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002173 int ret;
Stefano Garzarella55f3e142019-11-14 10:57:44 +01002174
Andy Kingd021c342013-02-06 14:23:56 +00002175 if (!sock)
2176 return -EINVAL;
2177
Andy King6cf1c5f2013-02-18 06:04:13 +00002178 if (protocol && protocol != PF_VSOCK)
Andy Kingd021c342013-02-06 14:23:56 +00002179 return -EPROTONOSUPPORT;
2180
2181 switch (sock->type) {
2182 case SOCK_DGRAM:
2183 sock->ops = &vsock_dgram_ops;
2184 break;
2185 case SOCK_STREAM:
2186 sock->ops = &vsock_stream_ops;
2187 break;
Arseny Krasnov0798e782021-06-11 14:11:04 +03002188 case SOCK_SEQPACKET:
2189 sock->ops = &vsock_seqpacket_ops;
2190 break;
Andy Kingd021c342013-02-06 14:23:56 +00002191 default:
2192 return -ESOCKTNOSUPPORT;
2193 }
2194
2195 sock->state = SS_UNCONNECTED;
2196
Stefano Garzarella55f3e142019-11-14 10:57:44 +01002197 sk = __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern);
2198 if (!sk)
2199 return -ENOMEM;
2200
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002201 vsk = vsock_sk(sk);
2202
2203 if (sock->type == SOCK_DGRAM) {
2204 ret = vsock_assign_transport(vsk, NULL);
2205 if (ret < 0) {
2206 sock_put(sk);
2207 return ret;
2208 }
2209 }
2210
2211 vsock_insert_unbound(vsk);
Stefano Garzarella55f3e142019-11-14 10:57:44 +01002212
2213 return 0;
Andy Kingd021c342013-02-06 14:23:56 +00002214}
2215
2216static const struct net_proto_family vsock_family_ops = {
2217 .family = AF_VSOCK,
2218 .create = vsock_create,
2219 .owner = THIS_MODULE,
2220};
2221
2222static long vsock_dev_do_ioctl(struct file *filp,
2223 unsigned int cmd, void __user *ptr)
2224{
2225 u32 __user *p = ptr;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002226 u32 cid = VMADDR_CID_ANY;
Andy Kingd021c342013-02-06 14:23:56 +00002227 int retval = 0;
2228
2229 switch (cmd) {
2230 case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002231 /* To be compatible with the VMCI behavior, we prioritize the
2232 * guest CID instead of well-know host CID (VMADDR_CID_HOST).
2233 */
2234 if (transport_g2h)
2235 cid = transport_g2h->get_local_cid();
2236 else if (transport_h2g)
2237 cid = transport_h2g->get_local_cid();
2238
2239 if (put_user(cid, p) != 0)
Andy Kingd021c342013-02-06 14:23:56 +00002240 retval = -EFAULT;
2241 break;
2242
2243 default:
Colin Ian Kingc3e448c2020-10-27 09:09:42 +00002244 retval = -ENOIOCTLCMD;
Andy Kingd021c342013-02-06 14:23:56 +00002245 }
2246
2247 return retval;
2248}
2249
2250static long vsock_dev_ioctl(struct file *filp,
2251 unsigned int cmd, unsigned long arg)
2252{
2253 return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg);
2254}
2255
2256#ifdef CONFIG_COMPAT
2257static long vsock_dev_compat_ioctl(struct file *filp,
2258 unsigned int cmd, unsigned long arg)
2259{
2260 return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg));
2261}
2262#endif
2263
2264static const struct file_operations vsock_device_ops = {
2265 .owner = THIS_MODULE,
2266 .unlocked_ioctl = vsock_dev_ioctl,
2267#ifdef CONFIG_COMPAT
2268 .compat_ioctl = vsock_dev_compat_ioctl,
2269#endif
2270 .open = nonseekable_open,
2271};
2272
2273static struct miscdevice vsock_device = {
2274 .name = "vsock",
Andy Kingd021c342013-02-06 14:23:56 +00002275 .fops = &vsock_device_ops,
2276};
2277
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002278static int __init vsock_init(void)
Andy Kingd021c342013-02-06 14:23:56 +00002279{
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002280 int err = 0;
Andy King2c4a3362014-05-01 15:20:43 -07002281
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002282 vsock_init_tables();
Andy King2c4a3362014-05-01 15:20:43 -07002283
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002284 vsock_proto.owner = THIS_MODULE;
Asias He6ad0b2f2013-04-23 20:33:52 +00002285 vsock_device.minor = MISC_DYNAMIC_MINOR;
Andy Kingd021c342013-02-06 14:23:56 +00002286 err = misc_register(&vsock_device);
2287 if (err) {
2288 pr_err("Failed to register misc device\n");
Gao fengf6a835b2015-10-18 23:35:56 +08002289 goto err_reset_transport;
Andy Kingd021c342013-02-06 14:23:56 +00002290 }
2291
2292 err = proto_register(&vsock_proto, 1); /* we want our slab */
2293 if (err) {
2294 pr_err("Cannot register vsock protocol\n");
Gao fengf6a835b2015-10-18 23:35:56 +08002295 goto err_deregister_misc;
Andy Kingd021c342013-02-06 14:23:56 +00002296 }
2297
2298 err = sock_register(&vsock_family_ops);
2299 if (err) {
2300 pr_err("could not register af_vsock (%d) address family: %d\n",
2301 AF_VSOCK, err);
2302 goto err_unregister_proto;
2303 }
2304
2305 return 0;
2306
2307err_unregister_proto:
2308 proto_unregister(&vsock_proto);
Gao fengf6a835b2015-10-18 23:35:56 +08002309err_deregister_misc:
Andy Kingd021c342013-02-06 14:23:56 +00002310 misc_deregister(&vsock_device);
Gao fengf6a835b2015-10-18 23:35:56 +08002311err_reset_transport:
Andy Kingd021c342013-02-06 14:23:56 +00002312 return err;
2313}
Andy Kingd021c342013-02-06 14:23:56 +00002314
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002315static void __exit vsock_exit(void)
Andy Kingd021c342013-02-06 14:23:56 +00002316{
Andy Kingd021c342013-02-06 14:23:56 +00002317 misc_deregister(&vsock_device);
2318 sock_unregister(AF_VSOCK);
2319 proto_unregister(&vsock_proto);
Andy Kingd021c342013-02-06 14:23:56 +00002320}
Andy Kingd021c342013-02-06 14:23:56 +00002321
Stefano Garzarelladaabfbc2019-11-14 10:57:41 +01002322const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk)
Stefan Hajnoczi0b01aeb2016-07-28 15:36:30 +01002323{
Stefano Garzarelladaabfbc2019-11-14 10:57:41 +01002324 return vsk->transport;
Stefan Hajnoczi0b01aeb2016-07-28 15:36:30 +01002325}
2326EXPORT_SYMBOL_GPL(vsock_core_get_transport);
2327
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002328int vsock_core_register(const struct vsock_transport *t, int features)
Stefan Hajnoczi05e489b2018-04-17 14:25:58 +08002329{
Stefano Garzarella0e121902019-12-10 11:43:04 +01002330 const struct vsock_transport *t_h2g, *t_g2h, *t_dgram, *t_local;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002331 int err = mutex_lock_interruptible(&vsock_register_mutex);
Stefan Hajnoczi05e489b2018-04-17 14:25:58 +08002332
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002333 if (err)
2334 return err;
2335
2336 t_h2g = transport_h2g;
2337 t_g2h = transport_g2h;
2338 t_dgram = transport_dgram;
Stefano Garzarella0e121902019-12-10 11:43:04 +01002339 t_local = transport_local;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002340
2341 if (features & VSOCK_TRANSPORT_F_H2G) {
2342 if (t_h2g) {
2343 err = -EBUSY;
2344 goto err_busy;
2345 }
2346 t_h2g = t;
2347 }
2348
2349 if (features & VSOCK_TRANSPORT_F_G2H) {
2350 if (t_g2h) {
2351 err = -EBUSY;
2352 goto err_busy;
2353 }
2354 t_g2h = t;
2355 }
2356
2357 if (features & VSOCK_TRANSPORT_F_DGRAM) {
2358 if (t_dgram) {
2359 err = -EBUSY;
2360 goto err_busy;
2361 }
2362 t_dgram = t;
2363 }
2364
Stefano Garzarella0e121902019-12-10 11:43:04 +01002365 if (features & VSOCK_TRANSPORT_F_LOCAL) {
2366 if (t_local) {
2367 err = -EBUSY;
2368 goto err_busy;
2369 }
2370 t_local = t;
2371 }
2372
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002373 transport_h2g = t_h2g;
2374 transport_g2h = t_g2h;
2375 transport_dgram = t_dgram;
Stefano Garzarella0e121902019-12-10 11:43:04 +01002376 transport_local = t_local;
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002377
2378err_busy:
2379 mutex_unlock(&vsock_register_mutex);
2380 return err;
2381}
2382EXPORT_SYMBOL_GPL(vsock_core_register);
2383
2384void vsock_core_unregister(const struct vsock_transport *t)
2385{
2386 mutex_lock(&vsock_register_mutex);
2387
2388 if (transport_h2g == t)
2389 transport_h2g = NULL;
2390
2391 if (transport_g2h == t)
2392 transport_g2h = NULL;
2393
2394 if (transport_dgram == t)
2395 transport_dgram = NULL;
2396
Stefano Garzarella0e121902019-12-10 11:43:04 +01002397 if (transport_local == t)
2398 transport_local = NULL;
2399
Stefano Garzarellac0cfa2d2019-11-14 10:57:46 +01002400 mutex_unlock(&vsock_register_mutex);
2401}
2402EXPORT_SYMBOL_GPL(vsock_core_unregister);
2403
2404module_init(vsock_init);
Stefan Hajnoczi05e489b2018-04-17 14:25:58 +08002405module_exit(vsock_exit);
Cong Wangc1eef222017-10-24 15:30:37 -07002406
Andy Kingd021c342013-02-06 14:23:56 +00002407MODULE_AUTHOR("VMware, Inc.");
2408MODULE_DESCRIPTION("VMware Virtual Socket Family");
Jorgen Hansen1190cfd2016-09-26 23:59:53 -07002409MODULE_VERSION("1.0.2.0-k");
Andy Kingd021c342013-02-06 14:23:56 +00002410MODULE_LICENSE("GPL v2");