blob: 67e87db5877f1132b52b0c93d349983873fec38f [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * NET4: Implementation of BSD Unix domain sockets.
4 *
Alan Cox113aa832008-10-13 19:01:08 -07005 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Fixes:
8 * Linus Torvalds : Assorted bug cures.
9 * Niibe Yutaka : async I/O support.
10 * Carsten Paeth : PF_UNIX check, address fixes.
11 * Alan Cox : Limit size of allocated blocks.
12 * Alan Cox : Fixed the stupid socketpair bug.
13 * Alan Cox : BSD compatibility fine tuning.
14 * Alan Cox : Fixed a bug in connect when interrupted.
15 * Alan Cox : Sorted out a proper draft version of
16 * file descriptor passing hacked up from
17 * Mike Shaver's work.
18 * Marty Leisner : Fixes to fd passing
19 * Nick Nevin : recvmsg bugfix.
20 * Alan Cox : Started proper garbage collector
21 * Heiko EiBfeldt : Missing verify_area check
22 * Alan Cox : Started POSIXisms
23 * Andreas Schwab : Replace inode by dentry for proper
24 * reference counting
25 * Kirk Petersen : Made this a module
26 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
27 * Lots of bug fixes.
28 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
29 * by above two patches.
30 * Andrea Arcangeli : If possible we block in connect(2)
31 * if the max backlog of the listen socket
32 * is been reached. This won't break
33 * old apps and it will avoid huge amount
34 * of socks hashed (this for unix_gc()
35 * performances reasons).
36 * Security fix that limits the max
37 * number of socks to 2*max_files and
38 * the number of skb queueable in the
39 * dgram receiver.
40 * Artur Skawina : Hash function optimizations
41 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
42 * Malcolm Beattie : Set peercred for socketpair
43 * Michal Ostrowski : Module initialization cleanup.
44 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
45 * the core infrastructure is doing that
46 * for all net proto families now (2.5.69+)
47 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 * Known differences from reference BSD that was tested:
49 *
50 * [TO FIX]
51 * ECONNREFUSED is not returned from one end of a connected() socket to the
52 * other the moment one end closes.
53 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
54 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
55 * [NOT TO FIX]
56 * accept() returns a path name even if the connecting socket has closed
57 * in the meantime (BSD loses the path and gives up).
58 * accept() returns 0 length path for an unbound connector. BSD returns 16
59 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
61 * BSD af_unix apparently has connect forgetting to block properly.
62 * (need to check this with the POSIX spec in detail)
63 *
64 * Differences from 2.0.0-11-... (ANK)
65 * Bug fixes and improvements.
66 * - client shutdown killed server socket.
67 * - removed all useless cli/sti pairs.
68 *
69 * Semantic changes/extensions.
70 * - generic control message passing.
71 * - SCM_CREDENTIALS control message.
72 * - "Abstract" (not FS based) socket bindings.
73 * Abstract names are sequences of bytes (not zero terminated)
74 * started by 0, so that this name space does not intersect
75 * with BSD names.
76 */
77
wangweidong5cc208b2013-12-06 18:03:36 +080078#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070081#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include <linux/signal.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010083#include <linux/sched/signal.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/errno.h>
85#include <linux/string.h>
86#include <linux/stat.h>
87#include <linux/dcache.h>
88#include <linux/namei.h>
89#include <linux/socket.h>
90#include <linux/un.h>
91#include <linux/fcntl.h>
92#include <linux/termios.h>
93#include <linux/sockios.h>
94#include <linux/net.h>
95#include <linux/in.h>
96#include <linux/fs.h>
97#include <linux/slab.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080098#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#include <linux/skbuff.h>
100#include <linux/netdevice.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200101#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102#include <net/sock.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -0700103#include <net/tcp_states.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/af_unix.h>
105#include <linux/proc_fs.h>
106#include <linux/seq_file.h>
107#include <net/scm.h>
108#include <linux/init.h>
109#include <linux/poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110#include <linux/rtnetlink.h>
111#include <linux/mount.h>
112#include <net/checksum.h>
113#include <linux/security.h>
Colin Cross2b15af62013-05-06 23:50:21 +0000114#include <linux/freezer.h>
Andrey Vaginba94f302017-02-01 11:00:45 -0800115#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
Jens Axboef4e65872019-02-08 09:01:44 -0700117#include "scm.h"
118
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000119struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
Pavel Emelyanovfa7ff562011-12-15 02:44:03 +0000120EXPORT_SYMBOL_GPL(unix_socket_table);
121DEFINE_SPINLOCK(unix_table_lock);
122EXPORT_SYMBOL_GPL(unix_table_lock);
Eric Dumazet518de9b2010-10-26 14:22:44 -0700123static atomic_long_t unix_nr_socks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000126static struct hlist_head *unix_sockets_unbound(void *addr)
127{
128 unsigned long hash = (unsigned long)addr;
129
130 hash ^= hash >> 16;
131 hash ^= hash >> 8;
132 hash %= UNIX_HASH_SIZE;
133 return &unix_socket_table[UNIX_HASH_SIZE + hash];
134}
135
136#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700138#ifdef CONFIG_SECURITY_NETWORK
Catherine Zhangdc49c1f2006-08-02 14:12:06 -0700139static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700140{
Stephen Smalley37a9a8d2015-06-10 08:44:59 -0400141 UNIXCB(skb).secid = scm->secid;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700142}
143
144static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
145{
Stephen Smalley37a9a8d2015-06-10 08:44:59 -0400146 scm->secid = UNIXCB(skb).secid;
147}
148
149static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
150{
151 return (scm->secid == UNIXCB(skb).secid);
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700152}
153#else
Catherine Zhangdc49c1f2006-08-02 14:12:06 -0700154static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700155{ }
156
157static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
158{ }
Stephen Smalley37a9a8d2015-06-10 08:44:59 -0400159
160static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
161{
162 return true;
163}
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700164#endif /* CONFIG_SECURITY_NETWORK */
165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166/*
167 * SMP locking strategy:
David S. Millerfbe9cc42005-12-13 23:26:29 -0800168 * hash table is protected with spinlock unix_table_lock
Stephen Hemminger663717f2010-02-18 14:12:06 -0800169 * each socket state is protected by separate spin lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 */
171
Eric Dumazet95c96172012-04-15 05:58:06 +0000172static inline unsigned int unix_hash_fold(__wsum n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173{
Anton Blanchard0a134042014-03-05 14:29:58 +1100174 unsigned int hash = (__force unsigned int)csum_fold(n);
Eric Dumazet95c96172012-04-15 05:58:06 +0000175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 hash ^= hash>>8;
177 return hash&(UNIX_HASH_SIZE-1);
178}
179
180#define unix_peer(sk) (unix_sk(sk)->peer)
181
182static inline int unix_our_peer(struct sock *sk, struct sock *osk)
183{
184 return unix_peer(osk) == sk;
185}
186
187static inline int unix_may_send(struct sock *sk, struct sock *osk)
188{
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800189 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190}
191
Rainer Weikusat3c734192008-06-17 22:28:05 -0700192static inline int unix_recvq_full(struct sock const *sk)
193{
194 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
195}
196
Pavel Emelyanovfa7ff562011-12-15 02:44:03 +0000197struct sock *unix_peer_get(struct sock *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
199 struct sock *peer;
200
David S. Miller1c92b4e2007-05-31 13:24:26 -0700201 unix_state_lock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 peer = unix_peer(s);
203 if (peer)
204 sock_hold(peer);
David S. Miller1c92b4e2007-05-31 13:24:26 -0700205 unix_state_unlock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 return peer;
207}
Pavel Emelyanovfa7ff562011-12-15 02:44:03 +0000208EXPORT_SYMBOL_GPL(unix_peer_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210static inline void unix_release_addr(struct unix_address *addr)
211{
Reshetova, Elena8c9814b2017-06-30 13:08:05 +0300212 if (refcount_dec_and_test(&addr->refcnt))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 kfree(addr);
214}
215
216/*
217 * Check unix socket name:
218 * - should be not zero length.
219 * - if started by not zero, should be NULL terminated (FS object)
220 * - if started by zero, it is abstract name.
221 */
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +0900222
Eric Dumazet95c96172012-04-15 05:58:06 +0000223static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224{
Kyeongdon Kim33c43682018-10-16 14:57:26 +0900225 *hashp = 0;
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 if (len <= sizeof(short) || len > sizeof(*sunaddr))
228 return -EINVAL;
229 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
230 return -EINVAL;
231 if (sunaddr->sun_path[0]) {
232 /*
233 * This may look like an off by one error but it is a bit more
234 * subtle. 108 is the longest valid AF_UNIX path for a binding.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300235 * sun_path[108] doesn't as such exist. However in kernel space
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 * we are guaranteed that it is a valid memory location in our
237 * kernel address buffer.
238 */
Jianjun Konge27dfce2008-11-01 21:38:31 -0700239 ((char *)sunaddr)[len] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 len = strlen(sunaddr->sun_path)+1+sizeof(short);
241 return len;
242 }
243
Joe Perches07f07572008-11-19 15:44:53 -0800244 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 return len;
246}
247
248static void __unix_remove_socket(struct sock *sk)
249{
250 sk_del_node_init(sk);
251}
252
253static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
254{
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700255 WARN_ON(!sk_unhashed(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 sk_add_node(sk, list);
257}
258
259static inline void unix_remove_socket(struct sock *sk)
260{
David S. Millerfbe9cc42005-12-13 23:26:29 -0800261 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 __unix_remove_socket(sk);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800263 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
266static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
267{
David S. Millerfbe9cc42005-12-13 23:26:29 -0800268 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 __unix_insert_socket(list, sk);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800270 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271}
272
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800273static struct sock *__unix_find_socket_byname(struct net *net,
274 struct sockaddr_un *sunname,
Eric Dumazet95c96172012-04-15 05:58:06 +0000275 int len, int type, unsigned int hash)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276{
277 struct sock *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
Sasha Levinb67bfe02013-02-27 17:06:00 -0800279 sk_for_each(s, &unix_socket_table[hash ^ type]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 struct unix_sock *u = unix_sk(s);
281
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +0900282 if (!net_eq(sock_net(s), net))
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800283 continue;
284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 if (u->addr->len == len &&
286 !memcmp(u->addr->name, sunname, len))
287 goto found;
288 }
289 s = NULL;
290found:
291 return s;
292}
293
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800294static inline struct sock *unix_find_socket_byname(struct net *net,
295 struct sockaddr_un *sunname,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 int len, int type,
Eric Dumazet95c96172012-04-15 05:58:06 +0000297 unsigned int hash)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298{
299 struct sock *s;
300
David S. Millerfbe9cc42005-12-13 23:26:29 -0800301 spin_lock(&unix_table_lock);
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800302 s = __unix_find_socket_byname(net, sunname, len, type, hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 if (s)
304 sock_hold(s);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800305 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 return s;
307}
308
Eric W. Biederman6616f782010-06-13 03:35:48 +0000309static struct sock *unix_find_socket_byinode(struct inode *i)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310{
311 struct sock *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
David S. Millerfbe9cc42005-12-13 23:26:29 -0800313 spin_lock(&unix_table_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800314 sk_for_each(s,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
Al Viro40ffe672012-03-14 21:54:32 -0400316 struct dentry *dentry = unix_sk(s)->path.dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
Miklos Szeredibeef5122016-12-16 11:02:53 +0100318 if (dentry && d_backing_inode(dentry) == i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 sock_hold(s);
320 goto found;
321 }
322 }
323 s = NULL;
324found:
David S. Millerfbe9cc42005-12-13 23:26:29 -0800325 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 return s;
327}
328
Rainer Weikusat7d267272015-11-20 22:07:23 +0000329/* Support code for asymmetrically connected dgram sockets
330 *
331 * If a datagram socket is connected to a socket not itself connected
332 * to the first socket (eg, /dev/log), clients may only enqueue more
333 * messages if the present receive queue of the server socket is not
334 * "too large". This means there's a second writeability condition
335 * poll and sendmsg need to test. The dgram recv code will do a wake
336 * up on the peer_wait wait queue of a socket upon reception of a
337 * datagram which needs to be propagated to sleeping would-be writers
338 * since these might not have sent anything so far. This can't be
339 * accomplished via poll_wait because the lifetime of the server
340 * socket might be less than that of its clients if these break their
341 * association with it or if the server socket is closed while clients
342 * are still connected to it and there's no way to inform "a polling
343 * implementation" that it should let go of a certain wait queue
344 *
Ingo Molnarac6424b2017-06-20 12:06:13 +0200345 * In order to propagate a wake up, a wait_queue_entry_t of the client
Rainer Weikusat7d267272015-11-20 22:07:23 +0000346 * socket is enqueued on the peer_wait queue of the server socket
347 * whose wake function does a wake_up on the ordinary client socket
348 * wait queue. This connection is established whenever a write (or
349 * poll for write) hit the flow control condition and broken when the
350 * association to the server socket is dissolved or after a wake up
351 * was relayed.
352 */
353
Ingo Molnarac6424b2017-06-20 12:06:13 +0200354static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
Rainer Weikusat7d267272015-11-20 22:07:23 +0000355 void *key)
356{
357 struct unix_sock *u;
358 wait_queue_head_t *u_sleep;
359
360 u = container_of(q, struct unix_sock, peer_wake);
361
362 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
363 q);
364 u->peer_wake.private = NULL;
365
366 /* relaying can only happen while the wq still exists */
367 u_sleep = sk_sleep(&u->sk);
368 if (u_sleep)
Al Viro3ad6f932017-07-03 20:14:56 -0400369 wake_up_interruptible_poll(u_sleep, key_to_poll(key));
Rainer Weikusat7d267272015-11-20 22:07:23 +0000370
371 return 0;
372}
373
374static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
375{
376 struct unix_sock *u, *u_other;
377 int rc;
378
379 u = unix_sk(sk);
380 u_other = unix_sk(other);
381 rc = 0;
382 spin_lock(&u_other->peer_wait.lock);
383
384 if (!u->peer_wake.private) {
385 u->peer_wake.private = other;
386 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
387
388 rc = 1;
389 }
390
391 spin_unlock(&u_other->peer_wait.lock);
392 return rc;
393}
394
395static void unix_dgram_peer_wake_disconnect(struct sock *sk,
396 struct sock *other)
397{
398 struct unix_sock *u, *u_other;
399
400 u = unix_sk(sk);
401 u_other = unix_sk(other);
402 spin_lock(&u_other->peer_wait.lock);
403
404 if (u->peer_wake.private == other) {
405 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
406 u->peer_wake.private = NULL;
407 }
408
409 spin_unlock(&u_other->peer_wait.lock);
410}
411
412static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
413 struct sock *other)
414{
415 unix_dgram_peer_wake_disconnect(sk, other);
416 wake_up_interruptible_poll(sk_sleep(sk),
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800417 EPOLLOUT |
418 EPOLLWRNORM |
419 EPOLLWRBAND);
Rainer Weikusat7d267272015-11-20 22:07:23 +0000420}
421
422/* preconditions:
423 * - unix_peer(sk) == other
424 * - association is stable
425 */
426static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
427{
428 int connected;
429
430 connected = unix_dgram_peer_wake_connect(sk, other);
431
Jason Baron51f7e952018-08-03 17:24:53 -0400432 /* If other is SOCK_DEAD, we want to make sure we signal
433 * POLLOUT, such that a subsequent write() can get a
434 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
435 * to other and its full, we will hang waiting for POLLOUT.
436 */
437 if (unix_recvq_full(other) && !sock_flag(other, SOCK_DEAD))
Rainer Weikusat7d267272015-11-20 22:07:23 +0000438 return 1;
439
440 if (connected)
441 unix_dgram_peer_wake_disconnect(sk, other);
442
443 return 0;
444}
445
Eric Dumazet1586a582015-10-23 10:59:16 -0700446static int unix_writable(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447{
Eric Dumazet1586a582015-10-23 10:59:16 -0700448 return sk->sk_state != TCP_LISTEN &&
Reshetova, Elena14afee42017-06-30 13:08:00 +0300449 (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450}
451
452static void unix_write_space(struct sock *sk)
453{
Eric Dumazet43815482010-04-29 11:01:49 +0000454 struct socket_wq *wq;
455
456 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 if (unix_writable(sk)) {
Eric Dumazet43815482010-04-29 11:01:49 +0000458 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +0800459 if (skwq_has_sleeper(wq))
Eric Dumazet67426b72010-10-29 20:44:44 +0000460 wake_up_interruptible_sync_poll(&wq->wait,
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800461 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +0800462 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 }
Eric Dumazet43815482010-04-29 11:01:49 +0000464 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465}
466
467/* When dgram socket disconnects (or changes its peer), we clear its receive
468 * queue of packets arrived from previous peer. First, it allows to do
469 * flow control based only on wmem_alloc; second, sk connected to peer
470 * may receive messages only from that peer. */
471static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
472{
David S. Millerb03efcf2005-07-08 14:57:23 -0700473 if (!skb_queue_empty(&sk->sk_receive_queue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 skb_queue_purge(&sk->sk_receive_queue);
475 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
476
477 /* If one link of bidirectional dgram pipe is disconnected,
478 * we signal error. Messages are lost. Do not make this,
479 * when peer was not connected to us.
480 */
481 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
482 other->sk_err = ECONNRESET;
483 other->sk_error_report(other);
484 }
485 }
486}
487
488static void unix_sock_destructor(struct sock *sk)
489{
490 struct unix_sock *u = unix_sk(sk);
491
492 skb_queue_purge(&sk->sk_receive_queue);
493
Reshetova, Elena14afee42017-06-30 13:08:00 +0300494 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700495 WARN_ON(!sk_unhashed(sk));
496 WARN_ON(sk->sk_socket);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 if (!sock_flag(sk, SOCK_DEAD)) {
wangweidong5cc208b2013-12-06 18:03:36 +0800498 pr_info("Attempt to release alive unix socket: %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 return;
500 }
501
502 if (u->addr)
503 unix_release_addr(u->addr);
504
Eric Dumazet518de9b2010-10-26 14:22:44 -0700505 atomic_long_dec(&unix_nr_socks);
David S. Miller6f756a82008-11-23 17:34:03 -0800506 local_bh_disable();
Eric Dumazeta8076d82008-11-17 02:38:49 -0800507 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
David S. Miller6f756a82008-11-23 17:34:03 -0800508 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509#ifdef UNIX_REFCNT_DEBUG
wangweidong5cc208b2013-12-06 18:03:36 +0800510 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
Eric Dumazet518de9b2010-10-26 14:22:44 -0700511 atomic_long_read(&unix_nr_socks));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512#endif
513}
514
Paul Mooreded34e02013-03-25 03:18:33 +0000515static void unix_release_sock(struct sock *sk, int embrion)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516{
517 struct unix_sock *u = unix_sk(sk);
Al Viro40ffe672012-03-14 21:54:32 -0400518 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 struct sock *skpair;
520 struct sk_buff *skb;
521 int state;
522
523 unix_remove_socket(sk);
524
525 /* Clear state */
David S. Miller1c92b4e2007-05-31 13:24:26 -0700526 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 sock_orphan(sk);
528 sk->sk_shutdown = SHUTDOWN_MASK;
Al Viro40ffe672012-03-14 21:54:32 -0400529 path = u->path;
530 u->path.dentry = NULL;
531 u->path.mnt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 state = sk->sk_state;
533 sk->sk_state = TCP_CLOSE;
David S. Miller1c92b4e2007-05-31 13:24:26 -0700534 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535
536 wake_up_interruptible_all(&u->peer_wait);
537
Jianjun Konge27dfce2008-11-01 21:38:31 -0700538 skpair = unix_peer(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
Jianjun Konge27dfce2008-11-01 21:38:31 -0700540 if (skpair != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
David S. Miller1c92b4e2007-05-31 13:24:26 -0700542 unix_state_lock(skpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 /* No more writes */
544 skpair->sk_shutdown = SHUTDOWN_MASK;
545 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
546 skpair->sk_err = ECONNRESET;
David S. Miller1c92b4e2007-05-31 13:24:26 -0700547 unix_state_unlock(skpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 skpair->sk_state_change(skpair);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +0800549 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 }
Rainer Weikusat7d267272015-11-20 22:07:23 +0000551
552 unix_dgram_peer_wake_disconnect(sk, skpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 sock_put(skpair); /* It may now die */
554 unix_peer(sk) = NULL;
555 }
556
557 /* Try to flush out this socket. Throw out buffers at least */
558
559 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
Jianjun Konge27dfce2008-11-01 21:38:31 -0700560 if (state == TCP_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 unix_release_sock(skb->sk, 1);
562 /* passed fds are erased in the kfree_skb hook */
Hannes Frederic Sowa73ed5d22015-11-10 16:23:15 +0100563 UNIXCB(skb).consumed = skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 kfree_skb(skb);
565 }
566
Al Viro40ffe672012-03-14 21:54:32 -0400567 if (path.dentry)
568 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569
570 sock_put(sk);
571
572 /* ---- Socket is dead now and most probably destroyed ---- */
573
574 /*
Alan Coxe04dae82012-09-17 00:52:41 +0000575 * Fixme: BSD difference: In BSD all sockets connected to us get
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 * ECONNRESET and we die on the spot. In Linux we behave
577 * like files and pipes do and wait for the last
578 * dereference.
579 *
580 * Can't we simply set sock->err?
581 *
582 * What the above comment does talk about? --ANK(980817)
583 */
584
Pavel Emelyanov9305cfa2007-11-10 22:06:01 -0800585 if (unix_tot_inflight)
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +0900586 unix_gc(); /* Garbage collect fds */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587}
588
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000589static void init_peercred(struct sock *sk)
590{
591 put_pid(sk->sk_peer_pid);
592 if (sk->sk_peer_cred)
593 put_cred(sk->sk_peer_cred);
594 sk->sk_peer_pid = get_pid(task_tgid(current));
595 sk->sk_peer_cred = get_current_cred();
596}
597
598static void copy_peercred(struct sock *sk, struct sock *peersk)
599{
600 put_pid(sk->sk_peer_pid);
601 if (sk->sk_peer_cred)
602 put_cred(sk->sk_peer_cred);
603 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
604 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
605}
606
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607static int unix_listen(struct socket *sock, int backlog)
608{
609 int err;
610 struct sock *sk = sock->sk;
611 struct unix_sock *u = unix_sk(sk);
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000612 struct pid *old_pid = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
614 err = -EOPNOTSUPP;
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800615 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
616 goto out; /* Only stream/seqpacket sockets accept */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 err = -EINVAL;
618 if (!u->addr)
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800619 goto out; /* No listens on an unbound socket */
David S. Miller1c92b4e2007-05-31 13:24:26 -0700620 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
622 goto out_unlock;
623 if (backlog > sk->sk_max_ack_backlog)
624 wake_up_interruptible_all(&u->peer_wait);
625 sk->sk_max_ack_backlog = backlog;
626 sk->sk_state = TCP_LISTEN;
627 /* set credentials so connect can copy them */
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000628 init_peercred(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 err = 0;
630
631out_unlock:
David S. Miller1c92b4e2007-05-31 13:24:26 -0700632 unix_state_unlock(sk);
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000633 put_pid(old_pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634out:
635 return err;
636}
637
638static int unix_release(struct socket *);
639static int unix_bind(struct socket *, struct sockaddr *, int);
640static int unix_stream_connect(struct socket *, struct sockaddr *,
641 int addr_len, int flags);
642static int unix_socketpair(struct socket *, struct socket *);
David Howellscdfbabf2017-03-09 08:09:05 +0000643static int unix_accept(struct socket *, struct socket *, int, bool);
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100644static int unix_getname(struct socket *, struct sockaddr *, int);
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700645static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
646static __poll_t unix_dgram_poll(struct file *, struct socket *,
647 poll_table *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648static int unix_ioctl(struct socket *, unsigned int, unsigned long);
649static int unix_shutdown(struct socket *, int);
Ying Xue1b784142015-03-02 15:37:48 +0800650static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
651static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +0200652static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
653 size_t size, int flags);
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +0200654static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
655 struct pipe_inode_info *, size_t size,
656 unsigned int flags);
Ying Xue1b784142015-03-02 15:37:48 +0800657static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
658static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659static int unix_dgram_connect(struct socket *, struct sockaddr *,
660 int, int);
Ying Xue1b784142015-03-02 15:37:48 +0800661static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
662static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
663 int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
Sasha Levin12663bf2013-12-07 17:26:27 -0500665static int unix_set_peek_off(struct sock *sk, int val)
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000666{
667 struct unix_sock *u = unix_sk(sk);
668
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -0700669 if (mutex_lock_interruptible(&u->iolock))
Sasha Levin12663bf2013-12-07 17:26:27 -0500670 return -EINTR;
671
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000672 sk->sk_peek_off = val;
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -0700673 mutex_unlock(&u->iolock);
Sasha Levin12663bf2013-12-07 17:26:27 -0500674
675 return 0;
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000676}
677
678
Eric Dumazet90ddc4f2005-12-22 12:49:22 -0800679static const struct proto_ops unix_stream_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 .family = PF_UNIX,
681 .owner = THIS_MODULE,
682 .release = unix_release,
683 .bind = unix_bind,
684 .connect = unix_stream_connect,
685 .socketpair = unix_socketpair,
686 .accept = unix_accept,
687 .getname = unix_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700688 .poll = unix_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 .ioctl = unix_ioctl,
690 .listen = unix_listen,
691 .shutdown = unix_shutdown,
692 .setsockopt = sock_no_setsockopt,
693 .getsockopt = sock_no_getsockopt,
694 .sendmsg = unix_stream_sendmsg,
695 .recvmsg = unix_stream_recvmsg,
696 .mmap = sock_no_mmap,
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +0200697 .sendpage = unix_stream_sendpage,
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +0200698 .splice_read = unix_stream_splice_read,
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +0000699 .set_peek_off = unix_set_peek_off,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700};
701
Eric Dumazet90ddc4f2005-12-22 12:49:22 -0800702static const struct proto_ops unix_dgram_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 .family = PF_UNIX,
704 .owner = THIS_MODULE,
705 .release = unix_release,
706 .bind = unix_bind,
707 .connect = unix_dgram_connect,
708 .socketpair = unix_socketpair,
709 .accept = sock_no_accept,
710 .getname = unix_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700711 .poll = unix_dgram_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 .ioctl = unix_ioctl,
713 .listen = sock_no_listen,
714 .shutdown = unix_shutdown,
715 .setsockopt = sock_no_setsockopt,
716 .getsockopt = sock_no_getsockopt,
717 .sendmsg = unix_dgram_sendmsg,
718 .recvmsg = unix_dgram_recvmsg,
719 .mmap = sock_no_mmap,
720 .sendpage = sock_no_sendpage,
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000721 .set_peek_off = unix_set_peek_off,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722};
723
Eric Dumazet90ddc4f2005-12-22 12:49:22 -0800724static const struct proto_ops unix_seqpacket_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 .family = PF_UNIX,
726 .owner = THIS_MODULE,
727 .release = unix_release,
728 .bind = unix_bind,
729 .connect = unix_stream_connect,
730 .socketpair = unix_socketpair,
731 .accept = unix_accept,
732 .getname = unix_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700733 .poll = unix_dgram_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 .ioctl = unix_ioctl,
735 .listen = unix_listen,
736 .shutdown = unix_shutdown,
737 .setsockopt = sock_no_setsockopt,
738 .getsockopt = sock_no_getsockopt,
739 .sendmsg = unix_seqpacket_sendmsg,
Eric W. Biedermana05d2ad2011-04-24 01:54:57 +0000740 .recvmsg = unix_seqpacket_recvmsg,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 .mmap = sock_no_mmap,
742 .sendpage = sock_no_sendpage,
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000743 .set_peek_off = unix_set_peek_off,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744};
745
746static struct proto unix_proto = {
Eric Dumazet248969a2008-11-17 00:00:30 -0800747 .name = "UNIX",
748 .owner = THIS_MODULE,
Eric Dumazet248969a2008-11-17 00:00:30 -0800749 .obj_size = sizeof(struct unix_sock),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750};
751
Eric W. Biederman11aa9c22015-05-08 21:09:13 -0500752static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753{
754 struct sock *sk = NULL;
755 struct unix_sock *u;
756
Eric Dumazet518de9b2010-10-26 14:22:44 -0700757 atomic_long_inc(&unix_nr_socks);
758 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 goto out;
760
Eric W. Biederman11aa9c22015-05-08 21:09:13 -0500761 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 if (!sk)
763 goto out;
764
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800765 sock_init_data(sock, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
Vladimir Davydov3aa97992016-07-26 15:24:36 -0700767 sk->sk_allocation = GFP_KERNEL_ACCOUNT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 sk->sk_write_space = unix_write_space;
Denis V. Luneva0a53c82007-12-11 04:19:17 -0800769 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 sk->sk_destruct = unix_sock_destructor;
771 u = unix_sk(sk);
Al Viro40ffe672012-03-14 21:54:32 -0400772 u->path.dentry = NULL;
773 u->path.mnt = NULL;
Benjamin LaHaisefd19f322006-01-03 14:10:46 -0800774 spin_lock_init(&u->lock);
Al Viro516e0cc2008-07-26 00:39:17 -0400775 atomic_long_set(&u->inflight, 0);
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700776 INIT_LIST_HEAD(&u->link);
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -0700777 mutex_init(&u->iolock); /* single task reading lock */
778 mutex_init(&u->bindlock); /* single task binding lock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 init_waitqueue_head(&u->peer_wait);
Rainer Weikusat7d267272015-11-20 22:07:23 +0000780 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000781 unix_insert_socket(unix_sockets_unbound(sk), sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782out:
Pavel Emelyanov284b3272007-11-10 22:08:30 -0800783 if (sk == NULL)
Eric Dumazet518de9b2010-10-26 14:22:44 -0700784 atomic_long_dec(&unix_nr_socks);
Eric Dumazet920de802008-11-24 00:09:29 -0800785 else {
786 local_bh_disable();
Eric Dumazeta8076d82008-11-17 02:38:49 -0800787 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
Eric Dumazet920de802008-11-24 00:09:29 -0800788 local_bh_enable();
789 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 return sk;
791}
792
Eric Paris3f378b62009-11-05 22:18:14 -0800793static int unix_create(struct net *net, struct socket *sock, int protocol,
794 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795{
796 if (protocol && protocol != PF_UNIX)
797 return -EPROTONOSUPPORT;
798
799 sock->state = SS_UNCONNECTED;
800
801 switch (sock->type) {
802 case SOCK_STREAM:
803 sock->ops = &unix_stream_ops;
804 break;
805 /*
806 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
807 * nothing uses it.
808 */
809 case SOCK_RAW:
Jianjun Konge27dfce2008-11-01 21:38:31 -0700810 sock->type = SOCK_DGRAM;
Gustavo A. R. Silva110af3a2017-10-20 12:05:30 -0500811 /* fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 case SOCK_DGRAM:
813 sock->ops = &unix_dgram_ops;
814 break;
815 case SOCK_SEQPACKET:
816 sock->ops = &unix_seqpacket_ops;
817 break;
818 default:
819 return -ESOCKTNOSUPPORT;
820 }
821
Eric W. Biederman11aa9c22015-05-08 21:09:13 -0500822 return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823}
824
825static int unix_release(struct socket *sock)
826{
827 struct sock *sk = sock->sk;
828
829 if (!sk)
830 return 0;
831
Paul Mooreded34e02013-03-25 03:18:33 +0000832 unix_release_sock(sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 sock->sk = NULL;
834
Paul Mooreded34e02013-03-25 03:18:33 +0000835 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836}
837
838static int unix_autobind(struct socket *sock)
839{
840 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900841 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 struct unix_sock *u = unix_sk(sk);
843 static u32 ordernum = 1;
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800844 struct unix_address *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 int err;
Tetsuo Handa8df73ff2010-09-04 01:34:28 +0000846 unsigned int retries = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -0700848 err = mutex_lock_interruptible(&u->bindlock);
Sasha Levin37ab4fa2013-12-13 10:54:22 -0500849 if (err)
850 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
852 err = 0;
853 if (u->addr)
854 goto out;
855
856 err = -ENOMEM;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700857 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 if (!addr)
859 goto out;
860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 addr->name->sun_family = AF_UNIX;
Reshetova, Elena8c9814b2017-06-30 13:08:05 +0300862 refcount_set(&addr->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
864retry:
865 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
Joe Perches07f07572008-11-19 15:44:53 -0800866 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
David S. Millerfbe9cc42005-12-13 23:26:29 -0800868 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 ordernum = (ordernum+1)&0xFFFFF;
870
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800871 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 addr->hash)) {
David S. Millerfbe9cc42005-12-13 23:26:29 -0800873 spin_unlock(&unix_table_lock);
Tetsuo Handa8df73ff2010-09-04 01:34:28 +0000874 /*
875 * __unix_find_socket_byname() may take long time if many names
876 * are already in use.
877 */
878 cond_resched();
879 /* Give up if all names seems to be in use. */
880 if (retries++ == 0xFFFFF) {
881 err = -ENOSPC;
882 kfree(addr);
883 goto out;
884 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 goto retry;
886 }
887 addr->hash ^= sk->sk_type;
888
889 __unix_remove_socket(sk);
Al Viroae3b5642019-02-15 20:09:35 +0000890 smp_store_release(&u->addr, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800892 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 err = 0;
894
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -0700895out: mutex_unlock(&u->bindlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 return err;
897}
898
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800899static struct sock *unix_find_other(struct net *net,
900 struct sockaddr_un *sunname, int len,
Eric Dumazet95c96172012-04-15 05:58:06 +0000901 int type, unsigned int hash, int *error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902{
903 struct sock *u;
Al Viro421748e2008-08-02 01:04:36 -0400904 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 int err = 0;
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +0900906
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 if (sunname->sun_path[0]) {
Al Viro421748e2008-08-02 01:04:36 -0400908 struct inode *inode;
909 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 if (err)
911 goto fail;
Miklos Szeredibeef5122016-12-16 11:02:53 +0100912 inode = d_backing_inode(path.dentry);
Al Viro421748e2008-08-02 01:04:36 -0400913 err = inode_permission(inode, MAY_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 if (err)
915 goto put_fail;
916
917 err = -ECONNREFUSED;
Al Viro421748e2008-08-02 01:04:36 -0400918 if (!S_ISSOCK(inode->i_mode))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 goto put_fail;
Eric W. Biederman6616f782010-06-13 03:35:48 +0000920 u = unix_find_socket_byinode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 if (!u)
922 goto put_fail;
923
924 if (u->sk_type == type)
Al Viro68ac1232012-03-15 08:21:57 -0400925 touch_atime(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
Al Viro421748e2008-08-02 01:04:36 -0400927 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928
Jianjun Konge27dfce2008-11-01 21:38:31 -0700929 err = -EPROTOTYPE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 if (u->sk_type != type) {
931 sock_put(u);
932 goto fail;
933 }
934 } else {
935 err = -ECONNREFUSED;
Jianjun Konge27dfce2008-11-01 21:38:31 -0700936 u = unix_find_socket_byname(net, sunname, len, type, hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 if (u) {
938 struct dentry *dentry;
Al Viro40ffe672012-03-14 21:54:32 -0400939 dentry = unix_sk(u)->path.dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 if (dentry)
Al Viro68ac1232012-03-15 08:21:57 -0400941 touch_atime(&unix_sk(u)->path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 } else
943 goto fail;
944 }
945 return u;
946
947put_fail:
Al Viro421748e2008-08-02 01:04:36 -0400948 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949fail:
Jianjun Konge27dfce2008-11-01 21:38:31 -0700950 *error = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 return NULL;
952}
953
Linus Torvalds38f7bd942016-09-01 14:56:49 -0700954static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
Al Virofaf02012012-07-20 02:37:29 +0400955{
Linus Torvalds38f7bd942016-09-01 14:56:49 -0700956 struct dentry *dentry;
957 struct path path;
958 int err = 0;
959 /*
960 * Get the parent directory, calculate the hash for last
961 * component.
962 */
963 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
964 err = PTR_ERR(dentry);
965 if (IS_ERR(dentry))
966 return err;
Al Virofaf02012012-07-20 02:37:29 +0400967
Linus Torvalds38f7bd942016-09-01 14:56:49 -0700968 /*
969 * All right, let's create it.
970 */
971 err = security_path_mknod(&path, dentry, mode, 0);
Al Virofaf02012012-07-20 02:37:29 +0400972 if (!err) {
Linus Torvalds38f7bd942016-09-01 14:56:49 -0700973 err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
Al Virofaf02012012-07-20 02:37:29 +0400974 if (!err) {
Linus Torvalds38f7bd942016-09-01 14:56:49 -0700975 res->mnt = mntget(path.mnt);
Al Virofaf02012012-07-20 02:37:29 +0400976 res->dentry = dget(dentry);
977 }
978 }
Linus Torvalds38f7bd942016-09-01 14:56:49 -0700979 done_path_create(&path, dentry);
Al Virofaf02012012-07-20 02:37:29 +0400980 return err;
981}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
984{
985 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900986 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 struct unix_sock *u = unix_sk(sk);
Jianjun Konge27dfce2008-11-01 21:38:31 -0700988 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
Al Virodae6ad82011-06-26 11:50:15 -0400989 char *sun_path = sunaddr->sun_path;
Linus Torvalds38f7bd942016-09-01 14:56:49 -0700990 int err;
Eric Dumazet95c96172012-04-15 05:58:06 +0000991 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 struct unix_address *addr;
993 struct hlist_head *list;
Kees Cook82fe0d22017-04-04 22:12:09 -0700994 struct path path = { };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995
996 err = -EINVAL;
Mateusz Jurczykdefbcf22017-06-08 11:13:36 +0200997 if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
998 sunaddr->sun_family != AF_UNIX)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 goto out;
1000
Jianjun Konge27dfce2008-11-01 21:38:31 -07001001 if (addr_len == sizeof(short)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 err = unix_autobind(sock);
1003 goto out;
1004 }
1005
1006 err = unix_mkname(sunaddr, addr_len, &hash);
1007 if (err < 0)
1008 goto out;
1009 addr_len = err;
1010
WANG Cong0fb44552017-01-23 11:17:35 -08001011 if (sun_path[0]) {
1012 umode_t mode = S_IFSOCK |
1013 (SOCK_INODE(sock)->i_mode & ~current_umask());
1014 err = unix_mknod(sun_path, mode, &path);
1015 if (err) {
1016 if (err == -EEXIST)
1017 err = -EADDRINUSE;
1018 goto out;
1019 }
1020 }
1021
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -07001022 err = mutex_lock_interruptible(&u->bindlock);
Sasha Levin37ab4fa2013-12-13 10:54:22 -05001023 if (err)
WANG Cong0fb44552017-01-23 11:17:35 -08001024 goto out_put;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025
1026 err = -EINVAL;
1027 if (u->addr)
1028 goto out_up;
1029
1030 err = -ENOMEM;
1031 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
1032 if (!addr)
1033 goto out_up;
1034
1035 memcpy(addr->name, sunaddr, addr_len);
1036 addr->len = addr_len;
1037 addr->hash = hash ^ sk->sk_type;
Reshetova, Elena8c9814b2017-06-30 13:08:05 +03001038 refcount_set(&addr->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
Linus Torvalds38f7bd942016-09-01 14:56:49 -07001040 if (sun_path[0]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 addr->hash = UNIX_HASH_SIZE;
Miklos Szeredibeef5122016-12-16 11:02:53 +01001042 hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
Al Virofaf02012012-07-20 02:37:29 +04001043 spin_lock(&unix_table_lock);
Linus Torvalds38f7bd942016-09-01 14:56:49 -07001044 u->path = path;
Al Virofaf02012012-07-20 02:37:29 +04001045 list = &unix_socket_table[hash];
1046 } else {
1047 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 err = -EADDRINUSE;
Denis V. Lunev097e66c2007-11-19 22:29:30 -08001049 if (__unix_find_socket_byname(net, sunaddr, addr_len,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 sk->sk_type, hash)) {
1051 unix_release_addr(addr);
1052 goto out_unlock;
1053 }
1054
1055 list = &unix_socket_table[addr->hash];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 }
1057
1058 err = 0;
1059 __unix_remove_socket(sk);
Al Viroae3b5642019-02-15 20:09:35 +00001060 smp_store_release(&u->addr, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 __unix_insert_socket(list, sk);
1062
1063out_unlock:
David S. Millerfbe9cc42005-12-13 23:26:29 -08001064 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065out_up:
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -07001066 mutex_unlock(&u->bindlock);
WANG Cong0fb44552017-01-23 11:17:35 -08001067out_put:
1068 if (err)
1069 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070out:
1071 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072}
1073
David S. Miller278a3de2007-05-31 15:19:20 -07001074static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1075{
1076 if (unlikely(sk1 == sk2) || !sk2) {
1077 unix_state_lock(sk1);
1078 return;
1079 }
1080 if (sk1 < sk2) {
1081 unix_state_lock(sk1);
1082 unix_state_lock_nested(sk2);
1083 } else {
1084 unix_state_lock(sk2);
1085 unix_state_lock_nested(sk1);
1086 }
1087}
1088
1089static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1090{
1091 if (unlikely(sk1 == sk2) || !sk2) {
1092 unix_state_unlock(sk1);
1093 return;
1094 }
1095 unix_state_unlock(sk1);
1096 unix_state_unlock(sk2);
1097}
1098
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1100 int alen, int flags)
1101{
1102 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001103 struct net *net = sock_net(sk);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001104 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 struct sock *other;
Eric Dumazet95c96172012-04-15 05:58:06 +00001106 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 int err;
1108
Mateusz Jurczykdefbcf22017-06-08 11:13:36 +02001109 err = -EINVAL;
1110 if (alen < offsetofend(struct sockaddr, sa_family))
1111 goto out;
1112
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 if (addr->sa_family != AF_UNSPEC) {
1114 err = unix_mkname(sunaddr, alen, &hash);
1115 if (err < 0)
1116 goto out;
1117 alen = err;
1118
1119 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1120 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
1121 goto out;
1122
David S. Miller278a3de2007-05-31 15:19:20 -07001123restart:
Jianjun Konge27dfce2008-11-01 21:38:31 -07001124 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 if (!other)
1126 goto out;
1127
David S. Miller278a3de2007-05-31 15:19:20 -07001128 unix_state_double_lock(sk, other);
1129
1130 /* Apparently VFS overslept socket death. Retry. */
1131 if (sock_flag(other, SOCK_DEAD)) {
1132 unix_state_double_unlock(sk, other);
1133 sock_put(other);
1134 goto restart;
1135 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
1137 err = -EPERM;
1138 if (!unix_may_send(sk, other))
1139 goto out_unlock;
1140
1141 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1142 if (err)
1143 goto out_unlock;
1144
1145 } else {
1146 /*
1147 * 1003.1g breaking connected state with AF_UNSPEC
1148 */
1149 other = NULL;
David S. Miller278a3de2007-05-31 15:19:20 -07001150 unix_state_double_lock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 }
1152
1153 /*
1154 * If it was connected, reconnect.
1155 */
1156 if (unix_peer(sk)) {
1157 struct sock *old_peer = unix_peer(sk);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001158 unix_peer(sk) = other;
Rainer Weikusat7d267272015-11-20 22:07:23 +00001159 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1160
David S. Miller278a3de2007-05-31 15:19:20 -07001161 unix_state_double_unlock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
1163 if (other != old_peer)
1164 unix_dgram_disconnected(sk, old_peer);
1165 sock_put(old_peer);
1166 } else {
Jianjun Konge27dfce2008-11-01 21:38:31 -07001167 unix_peer(sk) = other;
David S. Miller278a3de2007-05-31 15:19:20 -07001168 unix_state_double_unlock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 }
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001170 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171
1172out_unlock:
David S. Miller278a3de2007-05-31 15:19:20 -07001173 unix_state_double_unlock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 sock_put(other);
1175out:
1176 return err;
1177}
1178
1179static long unix_wait_for_peer(struct sock *other, long timeo)
1180{
1181 struct unix_sock *u = unix_sk(other);
1182 int sched;
1183 DEFINE_WAIT(wait);
1184
1185 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1186
1187 sched = !sock_flag(other, SOCK_DEAD) &&
1188 !(other->sk_shutdown & RCV_SHUTDOWN) &&
Rainer Weikusat3c734192008-06-17 22:28:05 -07001189 unix_recvq_full(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190
David S. Miller1c92b4e2007-05-31 13:24:26 -07001191 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192
1193 if (sched)
1194 timeo = schedule_timeout(timeo);
1195
1196 finish_wait(&u->peer_wait, &wait);
1197 return timeo;
1198}
1199
1200static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1201 int addr_len, int flags)
1202{
Jianjun Konge27dfce2008-11-01 21:38:31 -07001203 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001205 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1207 struct sock *newsk = NULL;
1208 struct sock *other = NULL;
1209 struct sk_buff *skb = NULL;
Eric Dumazet95c96172012-04-15 05:58:06 +00001210 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 int st;
1212 int err;
1213 long timeo;
1214
1215 err = unix_mkname(sunaddr, addr_len, &hash);
1216 if (err < 0)
1217 goto out;
1218 addr_len = err;
1219
Joe Perchesf64f9e72009-11-29 16:55:45 -08001220 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1221 (err = unix_autobind(sock)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 goto out;
1223
1224 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1225
1226 /* First of all allocate resources.
1227 If we will make it after state is locked,
1228 we will have to recheck all again in any case.
1229 */
1230
1231 err = -ENOMEM;
1232
1233 /* create new sock for complete connection */
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001234 newsk = unix_create1(sock_net(sk), NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 if (newsk == NULL)
1236 goto out;
1237
1238 /* Allocate skb for sending to listening sock */
1239 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1240 if (skb == NULL)
1241 goto out;
1242
1243restart:
1244 /* Find listening sock. */
Denis V. Lunev097e66c2007-11-19 22:29:30 -08001245 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 if (!other)
1247 goto out;
1248
1249 /* Latch state of peer */
David S. Miller1c92b4e2007-05-31 13:24:26 -07001250 unix_state_lock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251
1252 /* Apparently VFS overslept socket death. Retry. */
1253 if (sock_flag(other, SOCK_DEAD)) {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001254 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 sock_put(other);
1256 goto restart;
1257 }
1258
1259 err = -ECONNREFUSED;
1260 if (other->sk_state != TCP_LISTEN)
1261 goto out_unlock;
Tomoki Sekiyama77238f22009-10-18 23:17:37 -07001262 if (other->sk_shutdown & RCV_SHUTDOWN)
1263 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
Rainer Weikusat3c734192008-06-17 22:28:05 -07001265 if (unix_recvq_full(other)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 err = -EAGAIN;
1267 if (!timeo)
1268 goto out_unlock;
1269
1270 timeo = unix_wait_for_peer(other, timeo);
1271
1272 err = sock_intr_errno(timeo);
1273 if (signal_pending(current))
1274 goto out;
1275 sock_put(other);
1276 goto restart;
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001277 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278
1279 /* Latch our state.
1280
Daniel Balutae5537bf2011-03-14 15:25:33 -07001281 It is tricky place. We need to grab our state lock and cannot
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 drop lock on peer. It is dangerous because deadlock is
1283 possible. Connect to self case and simultaneous
1284 attempt to connect are eliminated by checking socket
1285 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1286 check this before attempt to grab lock.
1287
1288 Well, and we have to recheck the state after socket locked.
1289 */
1290 st = sk->sk_state;
1291
1292 switch (st) {
1293 case TCP_CLOSE:
1294 /* This is ok... continue with connect */
1295 break;
1296 case TCP_ESTABLISHED:
1297 /* Socket is already connected */
1298 err = -EISCONN;
1299 goto out_unlock;
1300 default:
1301 err = -EINVAL;
1302 goto out_unlock;
1303 }
1304
David S. Miller1c92b4e2007-05-31 13:24:26 -07001305 unix_state_lock_nested(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306
1307 if (sk->sk_state != st) {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001308 unix_state_unlock(sk);
1309 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 sock_put(other);
1311 goto restart;
1312 }
1313
David S. Miller3610cda2011-01-05 15:38:53 -08001314 err = security_unix_stream_connect(sk, other, newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 if (err) {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001316 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 goto out_unlock;
1318 }
1319
1320 /* The way is open! Fastly set all the necessary fields... */
1321
1322 sock_hold(sk);
1323 unix_peer(newsk) = sk;
1324 newsk->sk_state = TCP_ESTABLISHED;
1325 newsk->sk_type = sk->sk_type;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001326 init_peercred(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 newu = unix_sk(newsk);
Eric Dumazeteaefd1102011-02-18 03:26:36 +00001328 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 otheru = unix_sk(other);
1330
Al Viroae3b5642019-02-15 20:09:35 +00001331 /* copy address information from listening to new sock
1332 *
1333 * The contents of *(otheru->addr) and otheru->path
1334 * are seen fully set up here, since we have found
1335 * otheru in hash under unix_table_lock. Insertion
1336 * into the hash chain we'd found it in had been done
1337 * in an earlier critical area protected by unix_table_lock,
1338 * the same one where we'd set *(otheru->addr) contents,
1339 * as well as otheru->path and otheru->addr itself.
1340 *
1341 * Using smp_store_release() here to set newu->addr
1342 * is enough to make those stores, as well as stores
1343 * to newu->path visible to anyone who gets newu->addr
1344 * by smp_load_acquire(). IOW, the same warranties
1345 * as for unix_sock instances bound in unix_bind() or
1346 * in unix_autobind().
1347 */
Al Viro40ffe672012-03-14 21:54:32 -04001348 if (otheru->path.dentry) {
1349 path_get(&otheru->path);
1350 newu->path = otheru->path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 }
Al Viroae3b5642019-02-15 20:09:35 +00001352 refcount_inc(&otheru->addr->refcnt);
1353 smp_store_release(&newu->addr, otheru->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
1355 /* Set credentials */
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001356 copy_peercred(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 sock->state = SS_CONNECTED;
1359 sk->sk_state = TCP_ESTABLISHED;
Benjamin LaHaise830a1e52005-12-13 23:22:32 -08001360 sock_hold(newsk);
1361
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001362 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
Benjamin LaHaise830a1e52005-12-13 23:22:32 -08001363 unix_peer(sk) = newsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
David S. Miller1c92b4e2007-05-31 13:24:26 -07001365 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
1367 /* take ten and and send info to listening sock */
1368 spin_lock(&other->sk_receive_queue.lock);
1369 __skb_queue_tail(&other->sk_receive_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 spin_unlock(&other->sk_receive_queue.lock);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001371 unix_state_unlock(other);
David S. Miller676d2362014-04-11 16:15:36 -04001372 other->sk_data_ready(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 sock_put(other);
1374 return 0;
1375
1376out_unlock:
1377 if (other)
David S. Miller1c92b4e2007-05-31 13:24:26 -07001378 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
1380out:
Wei Yongjun40d44442009-02-25 00:32:45 +00001381 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 if (newsk)
1383 unix_release_sock(newsk, 0);
1384 if (other)
1385 sock_put(other);
1386 return err;
1387}
1388
1389static int unix_socketpair(struct socket *socka, struct socket *sockb)
1390{
Jianjun Konge27dfce2008-11-01 21:38:31 -07001391 struct sock *ska = socka->sk, *skb = sockb->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392
1393 /* Join our sockets back to back */
1394 sock_hold(ska);
1395 sock_hold(skb);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001396 unix_peer(ska) = skb;
1397 unix_peer(skb) = ska;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001398 init_peercred(ska);
1399 init_peercred(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400
1401 if (ska->sk_type != SOCK_DGRAM) {
1402 ska->sk_state = TCP_ESTABLISHED;
1403 skb->sk_state = TCP_ESTABLISHED;
1404 socka->state = SS_CONNECTED;
1405 sockb->state = SS_CONNECTED;
1406 }
1407 return 0;
1408}
1409
Daniel Borkmann90c6bd32013-10-17 22:51:31 +02001410static void unix_sock_inherit_flags(const struct socket *old,
1411 struct socket *new)
1412{
1413 if (test_bit(SOCK_PASSCRED, &old->flags))
1414 set_bit(SOCK_PASSCRED, &new->flags);
1415 if (test_bit(SOCK_PASSSEC, &old->flags))
1416 set_bit(SOCK_PASSSEC, &new->flags);
1417}
1418
David Howellscdfbabf2017-03-09 08:09:05 +00001419static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1420 bool kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421{
1422 struct sock *sk = sock->sk;
1423 struct sock *tsk;
1424 struct sk_buff *skb;
1425 int err;
1426
1427 err = -EOPNOTSUPP;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001428 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 goto out;
1430
1431 err = -EINVAL;
1432 if (sk->sk_state != TCP_LISTEN)
1433 goto out;
1434
1435 /* If socket state is TCP_LISTEN it cannot change (for now...),
1436 * so that no locks are necessary.
1437 */
1438
1439 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1440 if (!skb) {
1441 /* This means receive shutdown. */
1442 if (err == 0)
1443 err = -EINVAL;
1444 goto out;
1445 }
1446
1447 tsk = skb->sk;
1448 skb_free_datagram(sk, skb);
1449 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1450
1451 /* attach accepted sock to socket */
David S. Miller1c92b4e2007-05-31 13:24:26 -07001452 unix_state_lock(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 newsock->state = SS_CONNECTED;
Daniel Borkmann90c6bd32013-10-17 22:51:31 +02001454 unix_sock_inherit_flags(sock, newsock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 sock_graft(tsk, newsock);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001456 unix_state_unlock(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 return 0;
1458
1459out:
1460 return err;
1461}
1462
1463
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +01001464static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465{
1466 struct sock *sk = sock->sk;
Al Viroae3b5642019-02-15 20:09:35 +00001467 struct unix_address *addr;
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00001468 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 int err = 0;
1470
1471 if (peer) {
1472 sk = unix_peer_get(sk);
1473
1474 err = -ENOTCONN;
1475 if (!sk)
1476 goto out;
1477 err = 0;
1478 } else {
1479 sock_hold(sk);
1480 }
1481
Al Viroae3b5642019-02-15 20:09:35 +00001482 addr = smp_load_acquire(&unix_sk(sk)->addr);
1483 if (!addr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 sunaddr->sun_family = AF_UNIX;
1485 sunaddr->sun_path[0] = 0;
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +01001486 err = sizeof(short);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 } else {
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +01001488 err = addr->len;
1489 memcpy(sunaddr, addr->name, addr->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 sock_put(sk);
1492out:
1493 return err;
1494}
1495
David S. Millerf78a5fd2011-09-16 19:34:00 -04001496static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
Eric W. Biederman7361c362010-06-13 03:34:33 +00001497{
1498 int err = 0;
Eric Dumazet16e57262011-09-19 05:52:27 +00001499
David S. Millerf78a5fd2011-09-16 19:34:00 -04001500 UNIXCB(skb).pid = get_pid(scm->pid);
Eric W. Biederman6b0ee8c02013-04-03 17:28:16 +00001501 UNIXCB(skb).uid = scm->creds.uid;
1502 UNIXCB(skb).gid = scm->creds.gid;
Eric W. Biederman7361c362010-06-13 03:34:33 +00001503 UNIXCB(skb).fp = NULL;
Stephen Smalley37a9a8d2015-06-10 08:44:59 -04001504 unix_get_secdata(scm, skb);
Eric W. Biederman7361c362010-06-13 03:34:33 +00001505 if (scm->fp && send_fds)
1506 err = unix_attach_fds(scm, skb);
1507
1508 skb->destructor = unix_destruct_scm;
1509 return err;
1510}
1511
Hannes Frederic Sowa9490f882015-11-26 12:08:18 +01001512static bool unix_passcred_enabled(const struct socket *sock,
1513 const struct sock *other)
1514{
1515 return test_bit(SOCK_PASSCRED, &sock->flags) ||
1516 !other->sk_socket ||
1517 test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
1518}
1519
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520/*
Eric Dumazet16e57262011-09-19 05:52:27 +00001521 * Some apps rely on write() giving SCM_CREDENTIALS
1522 * We include credentials if source or destination socket
1523 * asserted SOCK_PASSCRED.
1524 */
1525static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1526 const struct sock *other)
1527{
Eric W. Biederman6b0ee8c02013-04-03 17:28:16 +00001528 if (UNIXCB(skb).pid)
Eric Dumazet16e57262011-09-19 05:52:27 +00001529 return;
Hannes Frederic Sowa9490f882015-11-26 12:08:18 +01001530 if (unix_passcred_enabled(sock, other)) {
Eric Dumazet16e57262011-09-19 05:52:27 +00001531 UNIXCB(skb).pid = get_pid(task_tgid(current));
David S. Miller6e0895c2013-04-22 20:32:51 -04001532 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
Eric Dumazet16e57262011-09-19 05:52:27 +00001533 }
1534}
1535
Hannes Frederic Sowa9490f882015-11-26 12:08:18 +01001536static int maybe_init_creds(struct scm_cookie *scm,
1537 struct socket *socket,
1538 const struct sock *other)
1539{
1540 int err;
1541 struct msghdr msg = { .msg_controllen = 0 };
1542
1543 err = scm_send(socket, &msg, scm, false);
1544 if (err)
1545 return err;
1546
1547 if (unix_passcred_enabled(socket, other)) {
1548 scm->pid = get_pid(task_tgid(current));
1549 current_uid_gid(&scm->creds.uid, &scm->creds.gid);
1550 }
1551 return err;
1552}
1553
1554static bool unix_skb_scm_eq(struct sk_buff *skb,
1555 struct scm_cookie *scm)
1556{
1557 const struct unix_skb_parms *u = &UNIXCB(skb);
1558
1559 return u->pid == scm->pid &&
1560 uid_eq(u->uid, scm->creds.uid) &&
1561 gid_eq(u->gid, scm->creds.gid) &&
1562 unix_secdata_eq(scm, skb);
1563}
1564
Eric Dumazet16e57262011-09-19 05:52:27 +00001565/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 * Send AF_UNIX data.
1567 */
1568
Ying Xue1b784142015-03-02 15:37:48 +08001569static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1570 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001573 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 struct unix_sock *u = unix_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01001575 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 struct sock *other = NULL;
1577 int namelen = 0; /* fake GCC */
1578 int err;
Eric Dumazet95c96172012-04-15 05:58:06 +00001579 unsigned int hash;
David S. Millerf78a5fd2011-09-16 19:34:00 -04001580 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 long timeo;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001582 struct scm_cookie scm;
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001583 int data_len = 0;
Rainer Weikusat7d267272015-11-20 22:07:23 +00001584 int sk_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585
dann frazier5f23b732008-11-26 15:32:27 -08001586 wait_for_unix_gc();
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001587 err = scm_send(sock, msg, &scm, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 if (err < 0)
1589 return err;
1590
1591 err = -EOPNOTSUPP;
1592 if (msg->msg_flags&MSG_OOB)
1593 goto out;
1594
1595 if (msg->msg_namelen) {
1596 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1597 if (err < 0)
1598 goto out;
1599 namelen = err;
1600 } else {
1601 sunaddr = NULL;
1602 err = -ENOTCONN;
1603 other = unix_peer_get(sk);
1604 if (!other)
1605 goto out;
1606 }
1607
Joe Perchesf64f9e72009-11-29 16:55:45 -08001608 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1609 && (err = unix_autobind(sock)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 goto out;
1611
1612 err = -EMSGSIZE;
1613 if (len > sk->sk_sndbuf - 32)
1614 goto out;
1615
Kirill Tkhai31ff6aa2014-05-15 19:56:28 +04001616 if (len > SKB_MAX_ALLOC) {
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001617 data_len = min_t(size_t,
1618 len - SKB_MAX_ALLOC,
1619 MAX_SKB_FRAGS * PAGE_SIZE);
Kirill Tkhai31ff6aa2014-05-15 19:56:28 +04001620 data_len = PAGE_ALIGN(data_len);
1621
1622 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1623 }
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001624
1625 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
Eric Dumazet28d64272013-08-08 14:38:47 -07001626 msg->msg_flags & MSG_DONTWAIT, &err,
1627 PAGE_ALLOC_COSTLY_ORDER);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001628 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 goto out;
1630
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001631 err = unix_scm_to_skb(&scm, skb, true);
Eric Dumazet25888e32010-11-25 04:11:39 +00001632 if (err < 0)
Eric W. Biederman7361c362010-06-13 03:34:33 +00001633 goto out_free;
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001634
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001635 skb_put(skb, len - data_len);
1636 skb->data_len = data_len;
1637 skb->len = len;
Al Viroc0371da2014-11-24 10:42:55 -05001638 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 if (err)
1640 goto out_free;
1641
1642 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1643
1644restart:
1645 if (!other) {
1646 err = -ECONNRESET;
1647 if (sunaddr == NULL)
1648 goto out_free;
1649
Denis V. Lunev097e66c2007-11-19 22:29:30 -08001650 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 hash, &err);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001652 if (other == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 goto out_free;
1654 }
1655
Alban Crequyd6ae3ba2011-01-18 06:39:15 +00001656 if (sk_filter(other, skb) < 0) {
1657 /* Toss the packet but do not return any error to the sender */
1658 err = len;
1659 goto out_free;
1660 }
1661
Rainer Weikusat7d267272015-11-20 22:07:23 +00001662 sk_locked = 0;
David S. Miller1c92b4e2007-05-31 13:24:26 -07001663 unix_state_lock(other);
Rainer Weikusat7d267272015-11-20 22:07:23 +00001664restart_locked:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 err = -EPERM;
1666 if (!unix_may_send(sk, other))
1667 goto out_unlock;
1668
Rainer Weikusat7d267272015-11-20 22:07:23 +00001669 if (unlikely(sock_flag(other, SOCK_DEAD))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 /*
1671 * Check with 1003.1g - what should
1672 * datagram error
1673 */
David S. Miller1c92b4e2007-05-31 13:24:26 -07001674 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 sock_put(other);
1676
Rainer Weikusat7d267272015-11-20 22:07:23 +00001677 if (!sk_locked)
1678 unix_state_lock(sk);
1679
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 if (unix_peer(sk) == other) {
Jianjun Konge27dfce2008-11-01 21:38:31 -07001682 unix_peer(sk) = NULL;
Rainer Weikusat7d267272015-11-20 22:07:23 +00001683 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1684
David S. Miller1c92b4e2007-05-31 13:24:26 -07001685 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686
1687 unix_dgram_disconnected(sk, other);
1688 sock_put(other);
1689 err = -ECONNREFUSED;
1690 } else {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001691 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 }
1693
1694 other = NULL;
1695 if (err)
1696 goto out_free;
1697 goto restart;
1698 }
1699
1700 err = -EPIPE;
1701 if (other->sk_shutdown & RCV_SHUTDOWN)
1702 goto out_unlock;
1703
1704 if (sk->sk_type != SOCK_SEQPACKET) {
1705 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1706 if (err)
1707 goto out_unlock;
1708 }
1709
Rainer Weikusata5527dd2016-02-11 19:37:27 +00001710 /* other == sk && unix_peer(other) != sk if
1711 * - unix_peer(sk) == NULL, destination address bound to sk
1712 * - unix_peer(sk) == sk by time of get but disconnected before lock
1713 */
1714 if (other != sk &&
1715 unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
Rainer Weikusat7d267272015-11-20 22:07:23 +00001716 if (timeo) {
1717 timeo = unix_wait_for_peer(other, timeo);
1718
1719 err = sock_intr_errno(timeo);
1720 if (signal_pending(current))
1721 goto out_free;
1722
1723 goto restart;
1724 }
1725
1726 if (!sk_locked) {
1727 unix_state_unlock(other);
1728 unix_state_double_lock(sk, other);
1729 }
1730
1731 if (unix_peer(sk) != other ||
1732 unix_dgram_peer_wake_me(sk, other)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 err = -EAGAIN;
Rainer Weikusat7d267272015-11-20 22:07:23 +00001734 sk_locked = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 goto out_unlock;
1736 }
1737
Rainer Weikusat7d267272015-11-20 22:07:23 +00001738 if (!sk_locked) {
1739 sk_locked = 1;
1740 goto restart_locked;
1741 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 }
1743
Rainer Weikusat7d267272015-11-20 22:07:23 +00001744 if (unlikely(sk_locked))
1745 unix_state_unlock(sk);
1746
Alban Crequy3f661162010-10-04 08:48:28 +00001747 if (sock_flag(other, SOCK_RCVTSTAMP))
1748 __net_timestamp(skb);
Eric Dumazet16e57262011-09-19 05:52:27 +00001749 maybe_add_creds(skb, sock, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 skb_queue_tail(&other->sk_receive_queue, skb);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001751 unix_state_unlock(other);
David S. Miller676d2362014-04-11 16:15:36 -04001752 other->sk_data_ready(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 sock_put(other);
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001754 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 return len;
1756
1757out_unlock:
Rainer Weikusat7d267272015-11-20 22:07:23 +00001758 if (sk_locked)
1759 unix_state_unlock(sk);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001760 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761out_free:
1762 kfree_skb(skb);
1763out:
1764 if (other)
1765 sock_put(other);
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001766 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 return err;
1768}
1769
Eric Dumazete370a722013-08-08 14:37:32 -07001770/* We use paged skbs for stream sockets, and limit occupancy to 32768
Tobias Klauserd4e9a402018-02-13 11:11:30 +01001771 * bytes, and a minimum of a full page.
Eric Dumazete370a722013-08-08 14:37:32 -07001772 */
1773#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001774
Ying Xue1b784142015-03-02 15:37:48 +08001775static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1776 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 struct sock *sk = sock->sk;
1779 struct sock *other = NULL;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001780 int err, size;
David S. Millerf78a5fd2011-09-16 19:34:00 -04001781 struct sk_buff *skb;
Jianjun Konge27dfce2008-11-01 21:38:31 -07001782 int sent = 0;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001783 struct scm_cookie scm;
Miklos Szeredi8ba69ba2009-09-11 11:31:45 -07001784 bool fds_sent = false;
Eric Dumazete370a722013-08-08 14:37:32 -07001785 int data_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786
dann frazier5f23b732008-11-26 15:32:27 -08001787 wait_for_unix_gc();
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001788 err = scm_send(sock, msg, &scm, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 if (err < 0)
1790 return err;
1791
1792 err = -EOPNOTSUPP;
1793 if (msg->msg_flags&MSG_OOB)
1794 goto out_err;
1795
1796 if (msg->msg_namelen) {
1797 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1798 goto out_err;
1799 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 err = -ENOTCONN;
Benjamin LaHaise830a1e52005-12-13 23:22:32 -08001801 other = unix_peer(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 if (!other)
1803 goto out_err;
1804 }
1805
1806 if (sk->sk_shutdown & SEND_SHUTDOWN)
1807 goto pipe_err;
1808
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001809 while (sent < len) {
Eric Dumazete370a722013-08-08 14:37:32 -07001810 size = len - sent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
1812 /* Keep two messages in the pipe so it schedules better */
Eric Dumazete370a722013-08-08 14:37:32 -07001813 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
Eric Dumazete370a722013-08-08 14:37:32 -07001815 /* allow fallback to order-0 allocations */
1816 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001817
Eric Dumazete370a722013-08-08 14:37:32 -07001818 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001819
Kirill Tkhai31ff6aa2014-05-15 19:56:28 +04001820 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
1821
Eric Dumazete370a722013-08-08 14:37:32 -07001822 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
Eric Dumazet28d64272013-08-08 14:38:47 -07001823 msg->msg_flags & MSG_DONTWAIT, &err,
1824 get_order(UNIX_SKB_FRAGS_SZ));
Eric Dumazete370a722013-08-08 14:37:32 -07001825 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 goto out_err;
1827
David S. Millerf78a5fd2011-09-16 19:34:00 -04001828 /* Only send the fds in the first buffer */
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001829 err = unix_scm_to_skb(&scm, skb, !fds_sent);
Eric Dumazet25888e32010-11-25 04:11:39 +00001830 if (err < 0) {
Eric W. Biederman7361c362010-06-13 03:34:33 +00001831 kfree_skb(skb);
David S. Millerf78a5fd2011-09-16 19:34:00 -04001832 goto out_err;
Miklos Szeredi62093442008-11-09 15:23:57 +01001833 }
Eric W. Biederman7361c362010-06-13 03:34:33 +00001834 fds_sent = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835
Eric Dumazete370a722013-08-08 14:37:32 -07001836 skb_put(skb, size - data_len);
1837 skb->data_len = data_len;
1838 skb->len = size;
Al Viroc0371da2014-11-24 10:42:55 -05001839 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001840 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 kfree_skb(skb);
David S. Millerf78a5fd2011-09-16 19:34:00 -04001842 goto out_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 }
1844
David S. Miller1c92b4e2007-05-31 13:24:26 -07001845 unix_state_lock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846
1847 if (sock_flag(other, SOCK_DEAD) ||
1848 (other->sk_shutdown & RCV_SHUTDOWN))
1849 goto pipe_err_free;
1850
Eric Dumazet16e57262011-09-19 05:52:27 +00001851 maybe_add_creds(skb, sock, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 skb_queue_tail(&other->sk_receive_queue, skb);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001853 unix_state_unlock(other);
David S. Miller676d2362014-04-11 16:15:36 -04001854 other->sk_data_ready(other);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001855 sent += size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001858 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859
1860 return sent;
1861
1862pipe_err_free:
David S. Miller1c92b4e2007-05-31 13:24:26 -07001863 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 kfree_skb(skb);
1865pipe_err:
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001866 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1867 send_sig(SIGPIPE, current, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 err = -EPIPE;
1869out_err:
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001870 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 return sent ? : err;
1872}
1873
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001874static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1875 int offset, size_t size, int flags)
1876{
Hannes Frederic Sowa9490f882015-11-26 12:08:18 +01001877 int err;
1878 bool send_sigpipe = false;
1879 bool init_scm = true;
1880 struct scm_cookie scm;
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001881 struct sock *other, *sk = socket->sk;
1882 struct sk_buff *skb, *newskb = NULL, *tail = NULL;
1883
1884 if (flags & MSG_OOB)
1885 return -EOPNOTSUPP;
1886
1887 other = unix_peer(sk);
1888 if (!other || sk->sk_state != TCP_ESTABLISHED)
1889 return -ENOTCONN;
1890
1891 if (false) {
1892alloc_skb:
1893 unix_state_unlock(other);
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -07001894 mutex_unlock(&unix_sk(other)->iolock);
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001895 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
1896 &err, 0);
1897 if (!newskb)
Hannes Frederic Sowa9490f882015-11-26 12:08:18 +01001898 goto err;
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001899 }
1900
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -07001901 /* we must acquire iolock as we modify already present
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001902 * skbs in the sk_receive_queue and mess with skb->len
1903 */
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -07001904 err = mutex_lock_interruptible(&unix_sk(other)->iolock);
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001905 if (err) {
1906 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001907 goto err;
1908 }
1909
1910 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1911 err = -EPIPE;
Hannes Frederic Sowa9490f882015-11-26 12:08:18 +01001912 send_sigpipe = true;
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001913 goto err_unlock;
1914 }
1915
1916 unix_state_lock(other);
1917
1918 if (sock_flag(other, SOCK_DEAD) ||
1919 other->sk_shutdown & RCV_SHUTDOWN) {
1920 err = -EPIPE;
Hannes Frederic Sowa9490f882015-11-26 12:08:18 +01001921 send_sigpipe = true;
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001922 goto err_state_unlock;
1923 }
1924
Hannes Frederic Sowa9490f882015-11-26 12:08:18 +01001925 if (init_scm) {
1926 err = maybe_init_creds(&scm, socket, other);
1927 if (err)
1928 goto err_state_unlock;
1929 init_scm = false;
1930 }
1931
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001932 skb = skb_peek_tail(&other->sk_receive_queue);
1933 if (tail && tail == skb) {
1934 skb = newskb;
Hannes Frederic Sowa9490f882015-11-26 12:08:18 +01001935 } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
1936 if (newskb) {
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001937 skb = newskb;
Hannes Frederic Sowa9490f882015-11-26 12:08:18 +01001938 } else {
1939 tail = skb;
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001940 goto alloc_skb;
Hannes Frederic Sowa9490f882015-11-26 12:08:18 +01001941 }
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001942 } else if (newskb) {
1943 /* this is fast path, we don't necessarily need to
1944 * call to kfree_skb even though with newskb == NULL
1945 * this - does no harm
1946 */
1947 consume_skb(newskb);
Hannes Frederic Sowa8844f972015-11-16 16:25:56 +01001948 newskb = NULL;
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001949 }
1950
1951 if (skb_append_pagefrags(skb, page, offset, size)) {
1952 tail = skb;
1953 goto alloc_skb;
1954 }
1955
1956 skb->len += size;
1957 skb->data_len += size;
1958 skb->truesize += size;
Reshetova, Elena14afee42017-06-30 13:08:00 +03001959 refcount_add(size, &sk->sk_wmem_alloc);
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001960
Hannes Frederic Sowaa3a116e2015-11-17 15:10:59 +01001961 if (newskb) {
Hannes Frederic Sowa9490f882015-11-26 12:08:18 +01001962 err = unix_scm_to_skb(&scm, skb, false);
1963 if (err)
1964 goto err_state_unlock;
Hannes Frederic Sowaa3a116e2015-11-17 15:10:59 +01001965 spin_lock(&other->sk_receive_queue.lock);
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001966 __skb_queue_tail(&other->sk_receive_queue, newskb);
Hannes Frederic Sowaa3a116e2015-11-17 15:10:59 +01001967 spin_unlock(&other->sk_receive_queue.lock);
1968 }
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001969
1970 unix_state_unlock(other);
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -07001971 mutex_unlock(&unix_sk(other)->iolock);
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001972
1973 other->sk_data_ready(other);
Hannes Frederic Sowa9490f882015-11-26 12:08:18 +01001974 scm_destroy(&scm);
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001975 return size;
1976
1977err_state_unlock:
1978 unix_state_unlock(other);
1979err_unlock:
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -07001980 mutex_unlock(&unix_sk(other)->iolock);
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001981err:
1982 kfree_skb(newskb);
1983 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
1984 send_sig(SIGPIPE, current, 0);
Hannes Frederic Sowa9490f882015-11-26 12:08:18 +01001985 if (!init_scm)
1986 scm_destroy(&scm);
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001987 return err;
1988}
1989
Ying Xue1b784142015-03-02 15:37:48 +08001990static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
1991 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992{
1993 int err;
1994 struct sock *sk = sock->sk;
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001995
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 err = sock_error(sk);
1997 if (err)
1998 return err;
1999
2000 if (sk->sk_state != TCP_ESTABLISHED)
2001 return -ENOTCONN;
2002
2003 if (msg->msg_namelen)
2004 msg->msg_namelen = 0;
2005
Ying Xue1b784142015-03-02 15:37:48 +08002006 return unix_dgram_sendmsg(sock, msg, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007}
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002008
Ying Xue1b784142015-03-02 15:37:48 +08002009static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2010 size_t size, int flags)
Eric W. Biedermana05d2ad2011-04-24 01:54:57 +00002011{
2012 struct sock *sk = sock->sk;
2013
2014 if (sk->sk_state != TCP_ESTABLISHED)
2015 return -ENOTCONN;
2016
Ying Xue1b784142015-03-02 15:37:48 +08002017 return unix_dgram_recvmsg(sock, msg, size, flags);
Eric W. Biedermana05d2ad2011-04-24 01:54:57 +00002018}
2019
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2021{
Al Viroae3b5642019-02-15 20:09:35 +00002022 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023
Al Viroae3b5642019-02-15 20:09:35 +00002024 if (addr) {
2025 msg->msg_namelen = addr->len;
2026 memcpy(msg->msg_name, addr->name, addr->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 }
2028}
2029
Ying Xue1b784142015-03-02 15:37:48 +08002030static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2031 size_t size, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032{
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002033 struct scm_cookie scm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 struct sock *sk = sock->sk;
2035 struct unix_sock *u = unix_sk(sk);
Rainer Weikusat64874282015-12-06 21:11:38 +00002036 struct sk_buff *skb, *last;
2037 long timeo;
Paolo Abenifd69c392019-04-08 10:15:59 +02002038 int skip;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 int err;
2040
2041 err = -EOPNOTSUPP;
2042 if (flags&MSG_OOB)
2043 goto out;
2044
Rainer Weikusat64874282015-12-06 21:11:38 +00002045 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046
Rainer Weikusat64874282015-12-06 21:11:38 +00002047 do {
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -07002048 mutex_lock(&u->iolock);
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00002049
Rainer Weikusat64874282015-12-06 21:11:38 +00002050 skip = sk_peek_offset(sk, flags);
Paolo Abenifd69c392019-04-08 10:15:59 +02002051 skb = __skb_try_recv_datagram(sk, flags, NULL, &skip, &err,
2052 &last);
Rainer Weikusat64874282015-12-06 21:11:38 +00002053 if (skb)
2054 break;
2055
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -07002056 mutex_unlock(&u->iolock);
Rainer Weikusat64874282015-12-06 21:11:38 +00002057
2058 if (err != -EAGAIN)
2059 break;
2060 } while (timeo &&
2061 !__skb_wait_for_more_packets(sk, &err, &timeo, last));
2062
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -07002063 if (!skb) { /* implies iolock unlocked */
Florian Zumbiehl0a112252007-11-29 23:19:23 +11002064 unix_state_lock(sk);
2065 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2066 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2067 (sk->sk_shutdown & RCV_SHUTDOWN))
2068 err = 0;
2069 unix_state_unlock(sk);
Rainer Weikusat64874282015-12-06 21:11:38 +00002070 goto out;
Florian Zumbiehl0a112252007-11-29 23:19:23 +11002071 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072
Rainer Weikusat77b75f42015-11-26 19:23:15 +00002073 if (wq_has_sleeper(&u->peer_wait))
2074 wake_up_interruptible_sync_poll(&u->peer_wait,
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002075 EPOLLOUT | EPOLLWRNORM |
2076 EPOLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077
2078 if (msg->msg_name)
2079 unix_copy_addr(msg, skb->sk);
2080
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00002081 if (size > skb->len - skip)
2082 size = skb->len - skip;
2083 else if (size < skb->len - skip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 msg->msg_flags |= MSG_TRUNC;
2085
David S. Miller51f3d022014-11-05 16:46:40 -05002086 err = skb_copy_datagram_msg(skb, skip, msg, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 if (err)
2088 goto out_free;
2089
Alban Crequy3f661162010-10-04 08:48:28 +00002090 if (sock_flag(sk, SOCK_RCVTSTAMP))
2091 __sock_recv_timestamp(msg, sk, skb);
2092
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002093 memset(&scm, 0, sizeof(scm));
2094
2095 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2096 unix_set_secdata(&scm, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002098 if (!(flags & MSG_PEEK)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002100 unix_detach_fds(&scm, skb);
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00002101
2102 sk_peek_offset_bwd(sk, skb->len);
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002103 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 /* It is questionable: on PEEK we could:
2105 - do not return fds - good, but too simple 8)
2106 - return fds, and do not return them on read (old strategy,
2107 apparently wrong)
2108 - clone fds (I chose it for now, it is the most universal
2109 solution)
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002110
2111 POSIX 1003.1g does not actually define this clearly
2112 at all. POSIX 1003.1g doesn't define a lot of things
2113 clearly however!
2114
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 */
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00002116
2117 sk_peek_offset_fwd(sk, size);
2118
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002120 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 }
Eric Dumazet9f6f9af2012-02-21 23:24:55 +00002122 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002124 scm_recv(sock, msg, &scm, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125
2126out_free:
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002127 skb_free_datagram(sk, skb);
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -07002128 mutex_unlock(&u->iolock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129out:
2130 return err;
2131}
2132
2133/*
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002134 * Sleep until more data has arrived. But check for races..
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 */
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002136static long unix_stream_data_wait(struct sock *sk, long timeo,
WANG Cong06a77b02016-11-17 15:55:26 -08002137 struct sk_buff *last, unsigned int last_len,
2138 bool freezable)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139{
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002140 struct sk_buff *tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 DEFINE_WAIT(wait);
2142
David S. Miller1c92b4e2007-05-31 13:24:26 -07002143 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144
2145 for (;;) {
Eric Dumazetaa395142010-04-20 13:03:51 +00002146 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002148 tail = skb_peek_tail(&sk->sk_receive_queue);
2149 if (tail != last ||
2150 (tail && tail->len != last_len) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 sk->sk_err ||
2152 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2153 signal_pending(current) ||
2154 !timeo)
2155 break;
2156
Eric Dumazet9cd3e072015-11-29 20:03:10 -08002157 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002158 unix_state_unlock(sk);
WANG Cong06a77b02016-11-17 15:55:26 -08002159 if (freezable)
2160 timeo = freezable_schedule_timeout(timeo);
2161 else
2162 timeo = schedule_timeout(timeo);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002163 unix_state_lock(sk);
Mark Salyzynb48732e2015-05-26 08:22:19 -07002164
2165 if (sock_flag(sk, SOCK_DEAD))
2166 break;
2167
Eric Dumazet9cd3e072015-11-29 20:03:10 -08002168 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 }
2170
Eric Dumazetaa395142010-04-20 13:03:51 +00002171 finish_wait(sk_sleep(sk), &wait);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002172 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 return timeo;
2174}
2175
Eric Dumazete370a722013-08-08 14:37:32 -07002176static unsigned int unix_skb_len(const struct sk_buff *skb)
2177{
2178 return skb->len - UNIXCB(skb).consumed;
2179}
2180
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002181struct unix_stream_read_state {
2182 int (*recv_actor)(struct sk_buff *, int, int,
2183 struct unix_stream_read_state *);
2184 struct socket *socket;
2185 struct msghdr *msg;
2186 struct pipe_inode_info *pipe;
2187 size_t size;
2188 int flags;
2189 unsigned int splice_flags;
2190};
2191
WANG Cong06a77b02016-11-17 15:55:26 -08002192static int unix_stream_read_generic(struct unix_stream_read_state *state,
2193 bool freezable)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194{
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002195 struct scm_cookie scm;
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002196 struct socket *sock = state->socket;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 struct sock *sk = sock->sk;
2198 struct unix_sock *u = unix_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 int copied = 0;
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002200 int flags = state->flags;
Eric Dumazetde144392014-03-25 18:42:27 -07002201 int noblock = flags & MSG_DONTWAIT;
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002202 bool check_creds = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 int target;
2204 int err = 0;
2205 long timeo;
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002206 int skip;
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002207 size_t size = state->size;
2208 unsigned int last_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
Rainer Weikusat1b92ee32016-02-08 18:47:19 +00002210 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2211 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 goto out;
Rainer Weikusat1b92ee32016-02-08 18:47:19 +00002213 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214
Rainer Weikusat1b92ee32016-02-08 18:47:19 +00002215 if (unlikely(flags & MSG_OOB)) {
2216 err = -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 goto out;
Rainer Weikusat1b92ee32016-02-08 18:47:19 +00002218 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002220 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
Eric Dumazetde144392014-03-25 18:42:27 -07002221 timeo = sock_rcvtimeo(sk, noblock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002223 memset(&scm, 0, sizeof(scm));
2224
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 /* Lock the socket to prevent queue disordering
2226 * while sleeps in memcpy_tomsg
2227 */
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -07002228 mutex_lock(&u->iolock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229
Matthew Dawsona0917e02017-08-18 15:04:54 -04002230 skip = max(sk_peek_offset(sk, flags), 0);
Andrey Vagine9193d62015-10-02 00:05:36 +03002231
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002232 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 int chunk;
Hannes Frederic Sowa73ed5d22015-11-10 16:23:15 +01002234 bool drop_skb;
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002235 struct sk_buff *skb, *last;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236
Rainer Weikusat18eceb82016-02-18 12:39:46 +00002237redo:
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002238 unix_state_lock(sk);
Mark Salyzynb48732e2015-05-26 08:22:19 -07002239 if (sock_flag(sk, SOCK_DEAD)) {
2240 err = -ECONNRESET;
2241 goto unlock;
2242 }
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002243 last = skb = skb_peek(&sk->sk_receive_queue);
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002244 last_len = last ? last->len : 0;
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002245again:
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002246 if (skb == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 if (copied >= target)
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002248 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249
2250 /*
2251 * POSIX 1003.1g mandates this order.
2252 */
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002253
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002254 err = sock_error(sk);
2255 if (err)
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002256 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 if (sk->sk_shutdown & RCV_SHUTDOWN)
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002258 goto unlock;
2259
2260 unix_state_unlock(sk);
Rainer Weikusat1b92ee32016-02-08 18:47:19 +00002261 if (!timeo) {
2262 err = -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 break;
Rainer Weikusat1b92ee32016-02-08 18:47:19 +00002264 }
2265
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -07002266 mutex_unlock(&u->iolock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002268 timeo = unix_stream_data_wait(sk, timeo, last,
WANG Cong06a77b02016-11-17 15:55:26 -08002269 last_len, freezable);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270
Rainer Weikusat3822b5c2015-12-16 20:09:25 +00002271 if (signal_pending(current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 err = sock_intr_errno(timeo);
Eric Dumazetfa0dc042016-01-24 13:53:50 -08002273 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 goto out;
2275 }
Rainer Weikusatb3ca9b02011-02-28 04:50:55 +00002276
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -07002277 mutex_lock(&u->iolock);
Rainer Weikusat18eceb82016-02-18 12:39:46 +00002278 goto redo;
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002279unlock:
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002280 unix_state_unlock(sk);
2281 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 }
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002283
Eric Dumazete370a722013-08-08 14:37:32 -07002284 while (skip >= unix_skb_len(skb)) {
2285 skip -= unix_skb_len(skb);
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002286 last = skb;
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002287 last_len = skb->len;
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002288 skb = skb_peek_next(skb, &sk->sk_receive_queue);
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002289 if (!skb)
2290 goto again;
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002291 }
2292
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002293 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294
2295 if (check_creds) {
2296 /* Never glue messages from different writers */
Hannes Frederic Sowa9490f882015-11-26 12:08:18 +01002297 if (!unix_skb_scm_eq(skb, &scm))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 break;
Eric W. Biederman0e82e7f6d2013-04-03 16:14:47 +00002299 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 /* Copy credentials */
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002301 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
Stephen Smalley37a9a8d2015-06-10 08:44:59 -04002302 unix_set_secdata(&scm, skb);
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002303 check_creds = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 }
2305
2306 /* Copy address just once */
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002307 if (state->msg && state->msg->msg_name) {
2308 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2309 state->msg->msg_name);
2310 unix_copy_addr(state->msg, skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 sunaddr = NULL;
2312 }
2313
Eric Dumazete370a722013-08-08 14:37:32 -07002314 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
Hannes Frederic Sowa73ed5d22015-11-10 16:23:15 +01002315 skb_get(skb);
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002316 chunk = state->recv_actor(skb, skip, chunk, state);
Hannes Frederic Sowa73ed5d22015-11-10 16:23:15 +01002317 drop_skb = !unix_skb_len(skb);
2318 /* skb is only safe to use if !drop_skb */
2319 consume_skb(skb);
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002320 if (chunk < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 if (copied == 0)
2322 copied = -EFAULT;
2323 break;
2324 }
2325 copied += chunk;
2326 size -= chunk;
2327
Hannes Frederic Sowa73ed5d22015-11-10 16:23:15 +01002328 if (drop_skb) {
2329 /* the skb was touched by a concurrent reader;
2330 * we should not expect anything from this skb
2331 * anymore and assume it invalid - we can be
2332 * sure it was dropped from the socket queue
2333 *
2334 * let's report a short read
2335 */
2336 err = 0;
2337 break;
2338 }
2339
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 /* Mark read part of skb as used */
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002341 if (!(flags & MSG_PEEK)) {
Eric Dumazete370a722013-08-08 14:37:32 -07002342 UNIXCB(skb).consumed += chunk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002344 sk_peek_offset_bwd(sk, chunk);
2345
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002347 unix_detach_fds(&scm, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348
Eric Dumazete370a722013-08-08 14:37:32 -07002349 if (unix_skb_len(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351
Eric Dumazet6f01fd62012-01-28 16:11:03 +00002352 skb_unlink(skb, &sk->sk_receive_queue);
Neil Horman70d4bf62010-07-20 06:45:56 +00002353 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002355 if (scm.fp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 break;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002357 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 /* It is questionable, see note in unix_dgram_recvmsg.
2359 */
2360 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002361 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362
Andrey Vagine9193d62015-10-02 00:05:36 +03002363 sk_peek_offset_fwd(sk, chunk);
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002364
Aaron Conole9f389e32015-09-26 18:50:43 -04002365 if (UNIXCB(skb).fp)
2366 break;
2367
Andrey Vagine9193d62015-10-02 00:05:36 +03002368 skip = 0;
Aaron Conole9f389e32015-09-26 18:50:43 -04002369 last = skb;
2370 last_len = skb->len;
2371 unix_state_lock(sk);
2372 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2373 if (skb)
2374 goto again;
2375 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 break;
2377 }
2378 } while (size);
2379
Linus Torvalds6e1ce3c2016-09-01 14:43:53 -07002380 mutex_unlock(&u->iolock);
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002381 if (state->msg)
2382 scm_recv(sock, state->msg, &scm, flags);
2383 else
2384 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385out:
2386 return copied ? : err;
2387}
2388
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002389static int unix_stream_read_actor(struct sk_buff *skb,
2390 int skip, int chunk,
2391 struct unix_stream_read_state *state)
2392{
2393 int ret;
2394
2395 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2396 state->msg, chunk);
2397 return ret ?: chunk;
2398}
2399
2400static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2401 size_t size, int flags)
2402{
2403 struct unix_stream_read_state state = {
2404 .recv_actor = unix_stream_read_actor,
2405 .socket = sock,
2406 .msg = msg,
2407 .size = size,
2408 .flags = flags
2409 };
2410
WANG Cong06a77b02016-11-17 15:55:26 -08002411 return unix_stream_read_generic(&state, true);
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002412}
2413
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002414static int unix_stream_splice_actor(struct sk_buff *skb,
2415 int skip, int chunk,
2416 struct unix_stream_read_state *state)
2417{
2418 return skb_splice_bits(skb, state->socket->sk,
2419 UNIXCB(skb).consumed + skip,
Al Viro25869262016-09-17 21:02:10 -04002420 state->pipe, chunk, state->splice_flags);
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002421}
2422
2423static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2424 struct pipe_inode_info *pipe,
2425 size_t size, unsigned int flags)
2426{
2427 struct unix_stream_read_state state = {
2428 .recv_actor = unix_stream_splice_actor,
2429 .socket = sock,
2430 .pipe = pipe,
2431 .size = size,
2432 .splice_flags = flags,
2433 };
2434
2435 if (unlikely(*ppos))
2436 return -ESPIPE;
2437
2438 if (sock->file->f_flags & O_NONBLOCK ||
2439 flags & SPLICE_F_NONBLOCK)
2440 state.flags = MSG_DONTWAIT;
2441
WANG Cong06a77b02016-11-17 15:55:26 -08002442 return unix_stream_read_generic(&state, false);
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002443}
2444
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445static int unix_shutdown(struct socket *sock, int mode)
2446{
2447 struct sock *sk = sock->sk;
2448 struct sock *other;
2449
Xi Wangfc61b922012-08-26 16:47:13 +00002450 if (mode < SHUT_RD || mode > SHUT_RDWR)
2451 return -EINVAL;
2452 /* This maps:
2453 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2454 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2455 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2456 */
2457 ++mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458
Alban Crequy7180a032011-01-19 04:56:36 +00002459 unix_state_lock(sk);
2460 sk->sk_shutdown |= mode;
2461 other = unix_peer(sk);
2462 if (other)
2463 sock_hold(other);
2464 unix_state_unlock(sk);
2465 sk->sk_state_change(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466
Alban Crequy7180a032011-01-19 04:56:36 +00002467 if (other &&
2468 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469
Alban Crequy7180a032011-01-19 04:56:36 +00002470 int peer_mode = 0;
2471
2472 if (mode&RCV_SHUTDOWN)
2473 peer_mode |= SEND_SHUTDOWN;
2474 if (mode&SEND_SHUTDOWN)
2475 peer_mode |= RCV_SHUTDOWN;
2476 unix_state_lock(other);
2477 other->sk_shutdown |= peer_mode;
2478 unix_state_unlock(other);
2479 other->sk_state_change(other);
2480 if (peer_mode == SHUTDOWN_MASK)
2481 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2482 else if (peer_mode & RCV_SHUTDOWN)
2483 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 }
Alban Crequy7180a032011-01-19 04:56:36 +00002485 if (other)
2486 sock_put(other);
2487
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 return 0;
2489}
2490
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002491long unix_inq_len(struct sock *sk)
2492{
2493 struct sk_buff *skb;
2494 long amount = 0;
2495
2496 if (sk->sk_state == TCP_LISTEN)
2497 return -EINVAL;
2498
2499 spin_lock(&sk->sk_receive_queue.lock);
2500 if (sk->sk_type == SOCK_STREAM ||
2501 sk->sk_type == SOCK_SEQPACKET) {
2502 skb_queue_walk(&sk->sk_receive_queue, skb)
Eric Dumazete370a722013-08-08 14:37:32 -07002503 amount += unix_skb_len(skb);
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002504 } else {
2505 skb = skb_peek(&sk->sk_receive_queue);
2506 if (skb)
2507 amount = skb->len;
2508 }
2509 spin_unlock(&sk->sk_receive_queue.lock);
2510
2511 return amount;
2512}
2513EXPORT_SYMBOL_GPL(unix_inq_len);
2514
2515long unix_outq_len(struct sock *sk)
2516{
2517 return sk_wmem_alloc_get(sk);
2518}
2519EXPORT_SYMBOL_GPL(unix_outq_len);
2520
Andrey Vaginba94f302017-02-01 11:00:45 -08002521static int unix_open_file(struct sock *sk)
2522{
2523 struct path path;
2524 struct file *f;
2525 int fd;
2526
2527 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2528 return -EPERM;
2529
Al Viroae3b5642019-02-15 20:09:35 +00002530 if (!smp_load_acquire(&unix_sk(sk)->addr))
Andrey Vaginba94f302017-02-01 11:00:45 -08002531 return -ENOENT;
Al Viroae3b5642019-02-15 20:09:35 +00002532
2533 path = unix_sk(sk)->path;
2534 if (!path.dentry)
2535 return -ENOENT;
Andrey Vaginba94f302017-02-01 11:00:45 -08002536
2537 path_get(&path);
Andrey Vaginba94f302017-02-01 11:00:45 -08002538
2539 fd = get_unused_fd_flags(O_CLOEXEC);
2540 if (fd < 0)
2541 goto out;
2542
2543 f = dentry_open(&path, O_PATH, current_cred());
2544 if (IS_ERR(f)) {
2545 put_unused_fd(fd);
2546 fd = PTR_ERR(f);
2547 goto out;
2548 }
2549
2550 fd_install(fd, f);
2551out:
2552 path_put(&path);
2553
2554 return fd;
2555}
2556
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2558{
2559 struct sock *sk = sock->sk;
Jianjun Konge27dfce2008-11-01 21:38:31 -07002560 long amount = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 int err;
2562
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002563 switch (cmd) {
2564 case SIOCOUTQ:
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002565 amount = unix_outq_len(sk);
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002566 err = put_user(amount, (int __user *)arg);
2567 break;
2568 case SIOCINQ:
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002569 amount = unix_inq_len(sk);
2570 if (amount < 0)
2571 err = amount;
2572 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 err = put_user(amount, (int __user *)arg);
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002574 break;
Andrey Vaginba94f302017-02-01 11:00:45 -08002575 case SIOCUNIXFILE:
2576 err = unix_open_file(sk);
2577 break;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002578 default:
2579 err = -ENOIOCTLCMD;
2580 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 }
2582 return err;
2583}
2584
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07002585static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586{
2587 struct sock *sk = sock->sk;
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07002588 __poll_t mask;
2589
Karsten Graul89ab0662018-10-23 13:40:39 +02002590 sock_poll_wait(file, sock, wait);
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07002591 mask = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592
2593 /* exceptional events? */
2594 if (sk->sk_err)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002595 mask |= EPOLLERR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 if (sk->sk_shutdown == SHUTDOWN_MASK)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002597 mask |= EPOLLHUP;
Davide Libenzif348d702006-03-25 03:07:39 -08002598 if (sk->sk_shutdown & RCV_SHUTDOWN)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002599 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600
2601 /* readable? */
Eric Dumazetdb409802010-09-06 11:13:50 +00002602 if (!skb_queue_empty(&sk->sk_receive_queue))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002603 mask |= EPOLLIN | EPOLLRDNORM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604
2605 /* Connection-based need to check for termination and startup */
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002606 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2607 sk->sk_state == TCP_CLOSE)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002608 mask |= EPOLLHUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609
2610 /*
2611 * we set writable also when the other side has shut down the
2612 * connection. This prevents stuck sockets.
2613 */
2614 if (unix_writable(sk))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002615 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616
2617 return mask;
2618}
2619
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07002620static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
2621 poll_table *wait)
Rainer Weikusat3c734192008-06-17 22:28:05 -07002622{
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002623 struct sock *sk = sock->sk, *other;
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07002624 unsigned int writable;
2625 __poll_t mask;
2626
Karsten Graul89ab0662018-10-23 13:40:39 +02002627 sock_poll_wait(file, sock, wait);
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07002628 mask = 0;
Rainer Weikusat3c734192008-06-17 22:28:05 -07002629
2630 /* exceptional events? */
2631 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002632 mask |= EPOLLERR |
2633 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00002634
Rainer Weikusat3c734192008-06-17 22:28:05 -07002635 if (sk->sk_shutdown & RCV_SHUTDOWN)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002636 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
Rainer Weikusat3c734192008-06-17 22:28:05 -07002637 if (sk->sk_shutdown == SHUTDOWN_MASK)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002638 mask |= EPOLLHUP;
Rainer Weikusat3c734192008-06-17 22:28:05 -07002639
2640 /* readable? */
Eric Dumazet5456f092010-10-31 05:36:23 +00002641 if (!skb_queue_empty(&sk->sk_receive_queue))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002642 mask |= EPOLLIN | EPOLLRDNORM;
Rainer Weikusat3c734192008-06-17 22:28:05 -07002643
2644 /* Connection-based need to check for termination and startup */
2645 if (sk->sk_type == SOCK_SEQPACKET) {
2646 if (sk->sk_state == TCP_CLOSE)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002647 mask |= EPOLLHUP;
Rainer Weikusat3c734192008-06-17 22:28:05 -07002648 /* connection hasn't started yet? */
2649 if (sk->sk_state == TCP_SYN_SENT)
2650 return mask;
2651 }
2652
Eric Dumazet973a34a2010-10-31 05:38:25 +00002653 /* No write status requested, avoid expensive OUT tests. */
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07002654 if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
Eric Dumazet973a34a2010-10-31 05:38:25 +00002655 return mask;
2656
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002657 writable = unix_writable(sk);
Rainer Weikusat7d267272015-11-20 22:07:23 +00002658 if (writable) {
2659 unix_state_lock(sk);
2660
2661 other = unix_peer(sk);
2662 if (other && unix_peer(other) != sk &&
2663 unix_recvq_full(other) &&
2664 unix_dgram_peer_wake_me(sk, other))
2665 writable = 0;
2666
2667 unix_state_unlock(sk);
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002668 }
2669
2670 if (writable)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002671 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
Rainer Weikusat3c734192008-06-17 22:28:05 -07002672 else
Eric Dumazet9cd3e072015-11-29 20:03:10 -08002673 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
Rainer Weikusat3c734192008-06-17 22:28:05 -07002674
Rainer Weikusat3c734192008-06-17 22:28:05 -07002675 return mask;
2676}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677
2678#ifdef CONFIG_PROC_FS
Pavel Emelyanova53eb3f2007-11-23 20:30:01 +08002679
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002680#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2681
2682#define get_bucket(x) ((x) >> BUCKET_SPACE)
2683#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2684#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
Pavel Emelyanova53eb3f2007-11-23 20:30:01 +08002685
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002686static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687{
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002688 unsigned long offset = get_offset(*pos);
2689 unsigned long bucket = get_bucket(*pos);
2690 struct sock *sk;
2691 unsigned long count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002693 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2694 if (sock_net(sk) != seq_file_net(seq))
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002695 continue;
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002696 if (++count == offset)
2697 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 }
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002699
2700 return sk;
2701}
2702
2703static struct sock *unix_next_socket(struct seq_file *seq,
2704 struct sock *sk,
2705 loff_t *pos)
2706{
2707 unsigned long bucket;
2708
2709 while (sk > (struct sock *)SEQ_START_TOKEN) {
2710 sk = sk_next(sk);
2711 if (!sk)
2712 goto next_bucket;
2713 if (sock_net(sk) == seq_file_net(seq))
2714 return sk;
2715 }
2716
2717 do {
2718 sk = unix_from_bucket(seq, pos);
2719 if (sk)
2720 return sk;
2721
2722next_bucket:
2723 bucket = get_bucket(*pos) + 1;
2724 *pos = set_bucket_offset(bucket, 1);
2725 } while (bucket < ARRAY_SIZE(unix_socket_table));
2726
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 return NULL;
2728}
2729
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002731 __acquires(unix_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732{
David S. Millerfbe9cc42005-12-13 23:26:29 -08002733 spin_lock(&unix_table_lock);
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002734
2735 if (!*pos)
2736 return SEQ_START_TOKEN;
2737
2738 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2739 return NULL;
2740
2741 return unix_next_socket(seq, NULL, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742}
2743
2744static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2745{
2746 ++*pos;
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002747 return unix_next_socket(seq, v, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748}
2749
2750static void unix_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002751 __releases(unix_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752{
David S. Millerfbe9cc42005-12-13 23:26:29 -08002753 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754}
2755
2756static int unix_seq_show(struct seq_file *seq, void *v)
2757{
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002758
Joe Perchesb9f31242008-04-12 19:04:38 -07002759 if (v == SEQ_START_TOKEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2761 "Inode Path\n");
2762 else {
2763 struct sock *s = v;
2764 struct unix_sock *u = unix_sk(s);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002765 unix_state_lock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766
Dan Rosenberg71338aa2011-05-23 12:17:35 +00002767 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 s,
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002769 refcount_read(&s->sk_refcnt),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 0,
2771 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2772 s->sk_type,
2773 s->sk_socket ?
2774 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2775 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2776 sock_i_ino(s));
2777
Al Viroae3b5642019-02-15 20:09:35 +00002778 if (u->addr) { // under unix_table_lock here
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 int i, len;
2780 seq_putc(seq, ' ');
2781
2782 i = 0;
2783 len = u->addr->len - sizeof(short);
2784 if (!UNIX_ABSTRACT(s))
2785 len--;
2786 else {
2787 seq_putc(seq, '@');
2788 i++;
2789 }
2790 for ( ; i < len; i++)
Isaac Boukrise7947ea2016-11-01 02:41:35 +02002791 seq_putc(seq, u->addr->name->sun_path[i] ?:
2792 '@');
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 }
David S. Miller1c92b4e2007-05-31 13:24:26 -07002794 unix_state_unlock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 seq_putc(seq, '\n');
2796 }
2797
2798 return 0;
2799}
2800
Philippe De Muyter56b3d972007-07-10 23:07:31 -07002801static const struct seq_operations unix_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 .start = unix_seq_start,
2803 .next = unix_seq_next,
2804 .stop = unix_seq_stop,
2805 .show = unix_seq_show,
2806};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807#endif
2808
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00002809static const struct net_proto_family unix_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 .family = PF_UNIX,
2811 .create = unix_create,
2812 .owner = THIS_MODULE,
2813};
2814
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002815
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002816static int __net_init unix_net_init(struct net *net)
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002817{
2818 int error = -ENOMEM;
2819
Denis V. Luneva0a53c82007-12-11 04:19:17 -08002820 net->unx.sysctl_max_dgram_qlen = 10;
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002821 if (unix_sysctl_register(net))
2822 goto out;
Pavel Emelyanovd392e492007-12-01 23:44:15 +11002823
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002824#ifdef CONFIG_PROC_FS
Christoph Hellwigc3506372018-04-10 19:42:55 +02002825 if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
2826 sizeof(struct seq_net_private))) {
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002827 unix_sysctl_unregister(net);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002828 goto out;
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002829 }
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002830#endif
2831 error = 0;
2832out:
Jianjun Kong48dcc33e2008-11-01 21:37:27 -07002833 return error;
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002834}
2835
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002836static void __net_exit unix_net_exit(struct net *net)
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002837{
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002838 unix_sysctl_unregister(net);
Gao fengece31ff2013-02-18 01:34:56 +00002839 remove_proc_entry("unix", net->proc_net);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002840}
2841
2842static struct pernet_operations unix_net_ops = {
2843 .init = unix_net_init,
2844 .exit = unix_net_exit,
2845};
2846
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847static int __init af_unix_init(void)
2848{
2849 int rc = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850
YOSHIFUJI Hideaki / 吉藤英明b4fff5f2013-01-09 07:20:07 +00002851 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852
2853 rc = proto_register(&unix_proto, 1);
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002854 if (rc != 0) {
wangweidong5cc208b2013-12-06 18:03:36 +08002855 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 goto out;
2857 }
2858
2859 sock_register(&unix_family_ops);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002860 register_pernet_subsys(&unix_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861out:
2862 return rc;
2863}
2864
2865static void __exit af_unix_exit(void)
2866{
2867 sock_unregister(PF_UNIX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 proto_unregister(&unix_proto);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002869 unregister_pernet_subsys(&unix_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870}
2871
David Woodhouse3d366962008-04-24 00:59:25 -07002872/* Earlier than device_initcall() so that other drivers invoking
2873 request_module() don't end up in a loop when modprobe tries
2874 to use a UNIX socket. But later than subsys_initcall() because
2875 we depend on stuff initialised there */
2876fs_initcall(af_unix_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877module_exit(af_unix_exit);
2878
2879MODULE_LICENSE("GPL");
2880MODULE_ALIAS_NETPROTO(PF_UNIX);