blob: 6c5c6b18eff4c5271fd3d510f7a370122ab91964 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Generic socket support routines. Memory allocators, socket lock/release
8 * handler for protocols to use and generic option handler.
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 */
85
Joe Perchese005d192012-05-16 19:58:40 +000086#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
87
Richard Cochran80b14de2018-07-03 15:42:48 -070088#include <asm/unaligned.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080089#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070090#include <linux/errno.h>
Richard Cochrancb820f82013-07-19 19:40:09 +020091#include <linux/errqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070092#include <linux/types.h>
93#include <linux/socket.h>
94#include <linux/in.h>
95#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#include <linux/module.h>
97#include <linux/proc_fs.h>
98#include <linux/seq_file.h>
99#include <linux/sched.h>
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700100#include <linux/sched/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <linux/timer.h>
102#include <linux/string.h>
103#include <linux/sockios.h>
104#include <linux/net.h>
105#include <linux/mm.h>
106#include <linux/slab.h>
107#include <linux/interrupt.h>
108#include <linux/poll.h>
109#include <linux/tcp.h>
110#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400111#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000112#include <linux/user_namespace.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100113#include <linux/static_key.h>
David S. Miller3969eb32012-01-09 13:44:23 -0800114#include <linux/memcontrol.h>
David S. Miller8c1ae102012-05-03 02:25:55 -0400115#include <linux/prefetch.h>
Christoph Hellwiga6c0d092020-07-22 09:40:27 +0200116#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -0800118#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120#include <linux/netdevice.h>
121#include <net/protocol.h>
122#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200123#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700124#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000126#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#include <net/xfrm.h>
128#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700129#include <net/cls_cgroup.h>
Neil Horman5bc14212011-11-22 05:10:51 +0000130#include <net/netprio_cgroup.h>
Craig Gallekeb4cb002015-06-15 11:26:18 -0400131#include <linux/sock_diag.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
133#include <linux/filter.h>
Craig Gallek538950a2016-01-04 17:41:47 -0500134#include <net/sock_reuseport.h>
Martin KaFai Lau6ac99e82019-04-26 16:39:39 -0700135#include <net/bpf_sk_storage.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
Satoru Moriya3847ce32011-06-17 12:00:03 +0000137#include <trace/events/sock.h>
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139#include <net/tcp.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +0300140#include <net/busy_poll.h>
Eliezer Tamir06021292013-06-10 11:39:50 +0300141
Glauber Costa36b77a52011-12-16 00:51:59 +0000142static DEFINE_MUTEX(proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000143static LIST_HEAD(proto_list);
144
Tonghao Zhang648845a2017-12-14 05:51:58 -0800145static void sock_inuse_add(struct net *net, int val);
146
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700147/**
148 * sk_ns_capable - General socket capability test
149 * @sk: Socket to use a capability on or through
150 * @user_ns: The user namespace of the capability to use
151 * @cap: The capability to use
152 *
153 * Test to see if the opener of the socket had when the socket was
154 * created and the current process has the capability @cap in the user
155 * namespace @user_ns.
156 */
157bool sk_ns_capable(const struct sock *sk,
158 struct user_namespace *user_ns, int cap)
159{
160 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
161 ns_capable(user_ns, cap);
162}
163EXPORT_SYMBOL(sk_ns_capable);
164
165/**
166 * sk_capable - Socket global capability test
167 * @sk: Socket to use a capability on or through
Masanari Iidae793c0f2014-09-04 23:44:36 +0900168 * @cap: The global capability to use
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700169 *
170 * Test to see if the opener of the socket had when the socket was
171 * created and the current process has the capability @cap in all user
172 * namespaces.
173 */
174bool sk_capable(const struct sock *sk, int cap)
175{
176 return sk_ns_capable(sk, &init_user_ns, cap);
177}
178EXPORT_SYMBOL(sk_capable);
179
180/**
181 * sk_net_capable - Network namespace socket capability test
182 * @sk: Socket to use a capability on or through
183 * @cap: The capability to use
184 *
Masanari Iidae793c0f2014-09-04 23:44:36 +0900185 * Test to see if the opener of the socket had when the socket was created
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700186 * and the current process has the capability @cap over the network namespace
187 * the socket is a member of.
188 */
189bool sk_net_capable(const struct sock *sk, int cap)
190{
191 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
192}
193EXPORT_SYMBOL(sk_net_capable);
194
Ingo Molnarda21f242006-07-03 00:25:12 -0700195/*
196 * Each address family might have different locking rules, so we have
David Howellscdfbabf2017-03-09 08:09:05 +0000197 * one slock key per address family and separate keys for internal and
198 * userspace sockets.
Ingo Molnarda21f242006-07-03 00:25:12 -0700199 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700200static struct lock_class_key af_family_keys[AF_MAX];
David Howellscdfbabf2017-03-09 08:09:05 +0000201static struct lock_class_key af_family_kern_keys[AF_MAX];
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700202static struct lock_class_key af_family_slock_keys[AF_MAX];
David Howellscdfbabf2017-03-09 08:09:05 +0000203static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700204
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700205/*
206 * Make lock validator output more readable. (we pre-construct these
207 * strings build-time, so that runtime initialization of socket
208 * locks is fast):
209 */
David Howellscdfbabf2017-03-09 08:09:05 +0000210
211#define _sock_locks(x) \
212 x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \
213 x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \
214 x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \
215 x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \
216 x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \
217 x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \
218 x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \
219 x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \
220 x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \
221 x "27" , x "28" , x "AF_CAN" , \
222 x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \
223 x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \
224 x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \
225 x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \
Björn Töpel68e8b842018-05-02 13:01:22 +0200226 x "AF_QIPCRTR", x "AF_SMC" , x "AF_XDP" , \
227 x "AF_MAX"
David Howellscdfbabf2017-03-09 08:09:05 +0000228
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700229static const char *const af_family_key_strings[AF_MAX+1] = {
David Howellscdfbabf2017-03-09 08:09:05 +0000230 _sock_locks("sk_lock-")
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700231};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700232static const char *const af_family_slock_key_strings[AF_MAX+1] = {
David Howellscdfbabf2017-03-09 08:09:05 +0000233 _sock_locks("slock-")
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700234};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700235static const char *const af_family_clock_key_strings[AF_MAX+1] = {
David Howellscdfbabf2017-03-09 08:09:05 +0000236 _sock_locks("clock-")
237};
238
239static const char *const af_family_kern_key_strings[AF_MAX+1] = {
240 _sock_locks("k-sk_lock-")
241};
242static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
243 _sock_locks("k-slock-")
244};
245static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
246 _sock_locks("k-clock-")
Peter Zijlstra443aef0e2007-07-19 01:49:00 -0700247};
Paolo Abeni581319c2017-03-09 13:54:08 +0100248static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
Matthieu Baerts6b431d52018-08-02 18:14:33 +0200249 _sock_locks("rlock-")
Paolo Abeni581319c2017-03-09 13:54:08 +0100250};
251static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
Matthieu Baerts6b431d52018-08-02 18:14:33 +0200252 _sock_locks("wlock-")
Paolo Abeni581319c2017-03-09 13:54:08 +0100253};
254static const char *const af_family_elock_key_strings[AF_MAX+1] = {
Matthieu Baerts6b431d52018-08-02 18:14:33 +0200255 _sock_locks("elock-")
Paolo Abeni581319c2017-03-09 13:54:08 +0100256};
Ingo Molnarda21f242006-07-03 00:25:12 -0700257
258/*
Paolo Abeni581319c2017-03-09 13:54:08 +0100259 * sk_callback_lock and sk queues locking rules are per-address-family,
Ingo Molnarda21f242006-07-03 00:25:12 -0700260 * so split the lock classes by using a per-AF key:
261 */
262static struct lock_class_key af_callback_keys[AF_MAX];
Paolo Abeni581319c2017-03-09 13:54:08 +0100263static struct lock_class_key af_rlock_keys[AF_MAX];
264static struct lock_class_key af_wlock_keys[AF_MAX];
265static struct lock_class_key af_elock_keys[AF_MAX];
David Howellscdfbabf2017-03-09 08:09:05 +0000266static struct lock_class_key af_kern_callback_keys[AF_MAX];
Ingo Molnarda21f242006-07-03 00:25:12 -0700267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700269__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200270EXPORT_SYMBOL(sysctl_wmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700271__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200272EXPORT_SYMBOL(sysctl_rmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700273__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
274__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300276/* Maximal space eaten by iovec or ancillary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700277int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000278EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
Willem de Bruijnb245be12015-01-30 13:29:32 -0500280int sysctl_tstamp_allow_data __read_mostly = 1;
281
Davidlohr Buesoa7950ae2018-05-08 09:06:59 -0700282DEFINE_STATIC_KEY_FALSE(memalloc_socks_key);
283EXPORT_SYMBOL_GPL(memalloc_socks_key);
Mel Gormanc93bdd02012-07-31 16:44:19 -0700284
Mel Gorman7cb02402012-07-31 16:44:16 -0700285/**
286 * sk_set_memalloc - sets %SOCK_MEMALLOC
287 * @sk: socket to set it on
288 *
289 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
290 * It's the responsibility of the admin to adjust min_free_kbytes
291 * to meet the requirements
292 */
293void sk_set_memalloc(struct sock *sk)
294{
295 sock_set_flag(sk, SOCK_MEMALLOC);
296 sk->sk_allocation |= __GFP_MEMALLOC;
Davidlohr Buesoa7950ae2018-05-08 09:06:59 -0700297 static_branch_inc(&memalloc_socks_key);
Mel Gorman7cb02402012-07-31 16:44:16 -0700298}
299EXPORT_SYMBOL_GPL(sk_set_memalloc);
300
301void sk_clear_memalloc(struct sock *sk)
302{
303 sock_reset_flag(sk, SOCK_MEMALLOC);
304 sk->sk_allocation &= ~__GFP_MEMALLOC;
Davidlohr Buesoa7950ae2018-05-08 09:06:59 -0700305 static_branch_dec(&memalloc_socks_key);
Mel Gormanc76562b2012-07-31 16:44:41 -0700306
307 /*
308 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
Mel Gorman5d753612015-06-10 21:02:04 -0400309 * progress of swapping. SOCK_MEMALLOC may be cleared while
310 * it has rmem allocations due to the last swapfile being deactivated
311 * but there is a risk that the socket is unusable due to exceeding
312 * the rmem limits. Reclaim the reserves and obey rmem limits again.
Mel Gormanc76562b2012-07-31 16:44:41 -0700313 */
Mel Gorman5d753612015-06-10 21:02:04 -0400314 sk_mem_reclaim(sk);
Mel Gorman7cb02402012-07-31 16:44:16 -0700315}
316EXPORT_SYMBOL_GPL(sk_clear_memalloc);
317
Mel Gormanb4b9e352012-07-31 16:44:26 -0700318int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
319{
320 int ret;
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700321 unsigned int noreclaim_flag;
Mel Gormanb4b9e352012-07-31 16:44:26 -0700322
323 /* these should have been dropped before queueing */
324 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
325
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700326 noreclaim_flag = memalloc_noreclaim_save();
Mel Gormanb4b9e352012-07-31 16:44:26 -0700327 ret = sk->sk_backlog_rcv(sk, skb);
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700328 memalloc_noreclaim_restore(noreclaim_flag);
Mel Gormanb4b9e352012-07-31 16:44:26 -0700329
330 return ret;
331}
332EXPORT_SYMBOL(__sk_backlog_rcv);
333
Deepa Dinamania9beb862019-02-02 07:34:54 -0800334static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
Arnd Bergmannfe0c72f2019-02-02 07:34:44 -0800335{
Deepa Dinamania9beb862019-02-02 07:34:54 -0800336 struct __kernel_sock_timeval tv;
Arnd Bergmannfe0c72f2019-02-02 07:34:44 -0800337
338 if (timeo == MAX_SCHEDULE_TIMEOUT) {
339 tv.tv_sec = 0;
340 tv.tv_usec = 0;
341 } else {
342 tv.tv_sec = timeo / HZ;
343 tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
344 }
345
Arnd Bergmanne6986422019-04-16 22:31:14 +0200346 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
Arnd Bergmannfe0c72f2019-02-02 07:34:44 -0800347 struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
348 *(struct old_timeval32 *)optval = tv32;
349 return sizeof(tv32);
350 }
351
Deepa Dinamania9beb862019-02-02 07:34:54 -0800352 if (old_timeval) {
353 struct __kernel_old_timeval old_tv;
354 old_tv.tv_sec = tv.tv_sec;
355 old_tv.tv_usec = tv.tv_usec;
356 *(struct __kernel_old_timeval *)optval = old_tv;
Vito Caputo28e72b22019-10-09 21:08:24 -0700357 return sizeof(old_tv);
Deepa Dinamania9beb862019-02-02 07:34:54 -0800358 }
359
Vito Caputo28e72b22019-10-09 21:08:24 -0700360 *(struct __kernel_sock_timeval *)optval = tv;
361 return sizeof(tv);
Arnd Bergmannfe0c72f2019-02-02 07:34:44 -0800362}
363
Christoph Hellwigc34645a2020-07-23 08:08:49 +0200364static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
365 bool old_timeval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
Deepa Dinamania9beb862019-02-02 07:34:54 -0800367 struct __kernel_sock_timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
Arnd Bergmanne6986422019-04-16 22:31:14 +0200369 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
Arnd Bergmannfe0c72f2019-02-02 07:34:44 -0800370 struct old_timeval32 tv32;
371
372 if (optlen < sizeof(tv32))
373 return -EINVAL;
374
Christoph Hellwigc34645a2020-07-23 08:08:49 +0200375 if (copy_from_sockptr(&tv32, optval, sizeof(tv32)))
Arnd Bergmannfe0c72f2019-02-02 07:34:44 -0800376 return -EFAULT;
377 tv.tv_sec = tv32.tv_sec;
378 tv.tv_usec = tv32.tv_usec;
Deepa Dinamania9beb862019-02-02 07:34:54 -0800379 } else if (old_timeval) {
380 struct __kernel_old_timeval old_tv;
381
382 if (optlen < sizeof(old_tv))
383 return -EINVAL;
Christoph Hellwigc34645a2020-07-23 08:08:49 +0200384 if (copy_from_sockptr(&old_tv, optval, sizeof(old_tv)))
Deepa Dinamania9beb862019-02-02 07:34:54 -0800385 return -EFAULT;
386 tv.tv_sec = old_tv.tv_sec;
387 tv.tv_usec = old_tv.tv_usec;
Arnd Bergmannfe0c72f2019-02-02 07:34:44 -0800388 } else {
389 if (optlen < sizeof(tv))
390 return -EINVAL;
Christoph Hellwigc34645a2020-07-23 08:08:49 +0200391 if (copy_from_sockptr(&tv, optval, sizeof(tv)))
Arnd Bergmannfe0c72f2019-02-02 07:34:44 -0800392 return -EFAULT;
393 }
Vasily Averinba780732007-05-24 16:58:54 -0700394 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
395 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
Vasily Averinba780732007-05-24 16:58:54 -0700397 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700398 static int warned __read_mostly;
399
Vasily Averinba780732007-05-24 16:58:54 -0700400 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700401 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700402 warned++;
Joe Perchese005d192012-05-16 19:58:40 +0000403 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
404 __func__, current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700405 }
Vasily Averinba780732007-05-24 16:58:54 -0700406 return 0;
407 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 *timeo_p = MAX_SCHEDULE_TIMEOUT;
409 if (tv.tv_sec == 0 && tv.tv_usec == 0)
410 return 0;
Deepa Dinamania9beb862019-02-02 07:34:54 -0800411 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))
412 *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, USEC_PER_SEC / HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 return 0;
414}
415
416static void sock_warn_obsolete_bsdism(const char *name)
417{
418 static int warned;
419 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900420 if (strcmp(warncomm, current->comm) && warned < 5) {
421 strcpy(warncomm, current->comm);
Joe Perchese005d192012-05-16 19:58:40 +0000422 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
423 warncomm, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 warned++;
425 }
426}
427
Hannes Frederic Sowa080a2702015-10-26 13:51:37 +0100428static bool sock_needs_netstamp(const struct sock *sk)
429{
430 switch (sk->sk_family) {
431 case AF_UNSPEC:
432 case AF_UNIX:
433 return false;
434 default:
435 return true;
436 }
437}
438
Eric Dumazet08e29af2011-11-28 12:04:18 +0000439static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900440{
Eric Dumazet08e29af2011-11-28 12:04:18 +0000441 if (sk->sk_flags & flags) {
442 sk->sk_flags &= ~flags;
Hannes Frederic Sowa080a2702015-10-26 13:51:37 +0100443 if (sock_needs_netstamp(sk) &&
444 !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +0000445 net_disable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 }
447}
448
449
samanthakumare6afc8a2016-04-05 12:41:15 -0400450int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800451{
Neil Horman3b885782009-10-12 13:26:31 -0700452 unsigned long flags;
453 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800454
Eric Dumazet0fd7bac2011-12-21 07:11:44 +0000455 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700456 atomic_inc(&sk->sk_drops);
Satoru Moriya3847ce32011-06-17 12:00:03 +0000457 trace_sock_rcvqueue_full(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -0700458 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800459 }
460
Mel Gormanc76562b2012-07-31 16:44:41 -0700461 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700462 atomic_inc(&sk->sk_drops);
463 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800464 }
465
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800466 skb->dev = NULL;
467 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800468
Eric Dumazet7fee2262010-05-11 23:19:48 +0000469 /* we escape from rcu protected region, make sure we dont leak
470 * a norefcounted dst
471 */
472 skb_dst_force(skb);
473
Neil Horman3b885782009-10-12 13:26:31 -0700474 spin_lock_irqsave(&list->lock, flags);
Eyal Birger3bc3b962015-03-01 14:58:30 +0200475 sock_skb_set_dropcount(sk, skb);
Neil Horman3b885782009-10-12 13:26:31 -0700476 __skb_queue_tail(list, skb);
477 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800478
479 if (!sock_flag(sk, SOCK_DEAD))
David S. Miller676d2362014-04-11 16:15:36 -0400480 sk->sk_data_ready(sk);
Eric Dumazet766e90372009-10-14 20:40:11 -0700481 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800482}
samanthakumare6afc8a2016-04-05 12:41:15 -0400483EXPORT_SYMBOL(__sock_queue_rcv_skb);
484
485int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
486{
487 int err;
488
489 err = sk_filter(sk, skb);
490 if (err)
491 return err;
492
493 return __sock_queue_rcv_skb(sk, skb);
494}
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800495EXPORT_SYMBOL(sock_queue_rcv_skb);
496
Willem de Bruijn4f0c40d92016-07-12 18:18:57 -0400497int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
Eric Dumazetc3f24cf2016-11-02 17:14:41 -0700498 const int nested, unsigned int trim_cap, bool refcounted)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800499{
500 int rc = NET_RX_SUCCESS;
501
Willem de Bruijn4f0c40d92016-07-12 18:18:57 -0400502 if (sk_filter_trim_cap(sk, skb, trim_cap))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800503 goto discard_and_relse;
504
505 skb->dev = NULL;
506
Sorin Dumitru274f4822014-07-22 21:16:51 +0300507 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
Eric Dumazetc3774112010-04-27 15:13:20 -0700508 atomic_inc(&sk->sk_drops);
509 goto discard_and_relse;
510 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200511 if (nested)
512 bh_lock_sock_nested(sk);
513 else
514 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700515 if (!sock_owned_by_user(sk)) {
516 /*
517 * trylock + unlock semantics:
518 */
519 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
520
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700521 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700522
Qian Cai5facae42019-09-19 12:09:40 -0400523 mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
Eric Dumazet82657922019-10-09 15:21:13 -0700524 } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000525 bh_unlock_sock(sk);
526 atomic_inc(&sk->sk_drops);
527 goto discard_and_relse;
528 }
529
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800530 bh_unlock_sock(sk);
531out:
Eric Dumazetc3f24cf2016-11-02 17:14:41 -0700532 if (refcounted)
533 sock_put(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800534 return rc;
535discard_and_relse:
536 kfree_skb(skb);
537 goto out;
538}
Willem de Bruijn4f0c40d92016-07-12 18:18:57 -0400539EXPORT_SYMBOL(__sk_receive_skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800540
541struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
542{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000543 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800544
545 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000546 sk_tx_queue_clear(sk);
Julian Anastasov9b8805a2017-02-06 23:14:11 +0200547 sk->sk_dst_pending_confirm = 0;
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000548 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800549 dst_release(dst);
550 return NULL;
551 }
552
553 return dst;
554}
555EXPORT_SYMBOL(__sk_dst_check);
556
557struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
558{
559 struct dst_entry *dst = sk_dst_get(sk);
560
561 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
562 sk_dst_reset(sk);
563 dst_release(dst);
564 return NULL;
565 }
566
567 return dst;
568}
569EXPORT_SYMBOL(sk_dst_check);
570
Christoph Hellwig75948882020-05-28 07:12:13 +0200571static int sock_bindtoindex_locked(struct sock *sk, int ifindex)
David Herrmannf5dd3d02019-01-15 14:42:14 +0100572{
573 int ret = -ENOPROTOOPT;
574#ifdef CONFIG_NETDEVICES
575 struct net *net = sock_net(sk);
576
577 /* Sorry... */
578 ret = -EPERM;
Vincent Bernatc427bfe2020-03-31 15:20:10 +0200579 if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW))
David Herrmannf5dd3d02019-01-15 14:42:14 +0100580 goto out;
581
582 ret = -EINVAL;
583 if (ifindex < 0)
584 goto out;
585
586 sk->sk_bound_dev_if = ifindex;
587 if (sk->sk_prot->rehash)
588 sk->sk_prot->rehash(sk);
589 sk_dst_reset(sk);
590
591 ret = 0;
592
593out:
594#endif
595
596 return ret;
597}
598
Ferenc Fejes8ea204c2020-05-30 23:09:00 +0200599int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk)
Christoph Hellwig75948882020-05-28 07:12:13 +0200600{
601 int ret;
602
Ferenc Fejes8ea204c2020-05-30 23:09:00 +0200603 if (lock_sk)
604 lock_sock(sk);
Christoph Hellwig75948882020-05-28 07:12:13 +0200605 ret = sock_bindtoindex_locked(sk, ifindex);
Ferenc Fejes8ea204c2020-05-30 23:09:00 +0200606 if (lock_sk)
607 release_sock(sk);
Christoph Hellwig75948882020-05-28 07:12:13 +0200608
609 return ret;
610}
611EXPORT_SYMBOL(sock_bindtoindex);
612
Christoph Hellwig57906422020-07-23 08:08:48 +0200613static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen)
David S. Miller48788092007-09-14 16:41:03 -0700614{
615 int ret = -ENOPROTOOPT;
616#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900617 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700618 char devname[IFNAMSIZ];
619 int index;
620
David S. Miller48788092007-09-14 16:41:03 -0700621 ret = -EINVAL;
622 if (optlen < 0)
623 goto out;
624
625 /* Bind this socket to a particular device like "eth0",
626 * as specified in the passed interface name. If the
627 * name is "" or the option length is zero the socket
628 * is not bound.
629 */
630 if (optlen > IFNAMSIZ - 1)
631 optlen = IFNAMSIZ - 1;
632 memset(devname, 0, sizeof(devname));
633
634 ret = -EFAULT;
Christoph Hellwig57906422020-07-23 08:08:48 +0200635 if (copy_from_sockptr(devname, optval, optlen))
David S. Miller48788092007-09-14 16:41:03 -0700636 goto out;
637
David S. Miller000ba2e2009-11-05 22:37:11 -0800638 index = 0;
639 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800640 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700641
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800642 rcu_read_lock();
643 dev = dev_get_by_name_rcu(net, devname);
644 if (dev)
645 index = dev->ifindex;
646 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700647 ret = -ENODEV;
648 if (!dev)
649 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700650 }
651
Ferenc Fejes8ea204c2020-05-30 23:09:00 +0200652 return sock_bindtoindex(sk, index, true);
David S. Miller48788092007-09-14 16:41:03 -0700653out:
654#endif
655
656 return ret;
657}
658
Brian Haleyc91f6df2012-11-26 05:21:08 +0000659static int sock_getbindtodevice(struct sock *sk, char __user *optval,
660 int __user *optlen, int len)
661{
662 int ret = -ENOPROTOOPT;
663#ifdef CONFIG_NETDEVICES
664 struct net *net = sock_net(sk);
Brian Haleyc91f6df2012-11-26 05:21:08 +0000665 char devname[IFNAMSIZ];
Brian Haleyc91f6df2012-11-26 05:21:08 +0000666
667 if (sk->sk_bound_dev_if == 0) {
668 len = 0;
669 goto zero;
670 }
671
672 ret = -EINVAL;
673 if (len < IFNAMSIZ)
674 goto out;
675
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200676 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
677 if (ret)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000678 goto out;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000679
680 len = strlen(devname) + 1;
681
682 ret = -EFAULT;
683 if (copy_to_user(optval, devname, len))
684 goto out;
685
686zero:
687 ret = -EFAULT;
688 if (put_user(len, optlen))
689 goto out;
690
691 ret = 0;
692
693out:
694#endif
695
696 return ret;
697}
698
hannes@stressinduktion.orgf60e5992015-04-01 17:07:44 +0200699bool sk_mc_loop(struct sock *sk)
700{
701 if (dev_recursion_level())
702 return false;
703 if (!sk)
704 return true;
705 switch (sk->sk_family) {
706 case AF_INET:
707 return inet_sk(sk)->mc_loop;
708#if IS_ENABLED(CONFIG_IPV6)
709 case AF_INET6:
710 return inet6_sk(sk)->mc_loop;
711#endif
712 }
Eric Dumazet0ad6f6e2020-06-17 22:23:25 -0700713 WARN_ON_ONCE(1);
hannes@stressinduktion.orgf60e5992015-04-01 17:07:44 +0200714 return true;
715}
716EXPORT_SYMBOL(sk_mc_loop);
717
Christoph Hellwigb58f0e82020-05-28 07:12:09 +0200718void sock_set_reuseaddr(struct sock *sk)
719{
720 lock_sock(sk);
721 sk->sk_reuse = SK_CAN_REUSE;
722 release_sock(sk);
723}
724EXPORT_SYMBOL(sock_set_reuseaddr);
725
Christoph Hellwigfe31a322020-05-28 07:12:17 +0200726void sock_set_reuseport(struct sock *sk)
727{
728 lock_sock(sk);
729 sk->sk_reuseport = true;
730 release_sock(sk);
731}
732EXPORT_SYMBOL(sock_set_reuseport);
733
Christoph Hellwigc4335942020-05-28 07:12:10 +0200734void sock_no_linger(struct sock *sk)
735{
736 lock_sock(sk);
737 sk->sk_lingertime = 0;
738 sock_set_flag(sk, SOCK_LINGER);
739 release_sock(sk);
740}
741EXPORT_SYMBOL(sock_no_linger);
742
Christoph Hellwig6e434962020-05-28 07:12:11 +0200743void sock_set_priority(struct sock *sk, u32 priority)
744{
745 lock_sock(sk);
746 sk->sk_priority = priority;
747 release_sock(sk);
748}
749EXPORT_SYMBOL(sock_set_priority);
750
Christoph Hellwig76ee0782020-05-28 07:12:12 +0200751void sock_set_sndtimeo(struct sock *sk, s64 secs)
752{
753 lock_sock(sk);
754 if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1)
755 sk->sk_sndtimeo = secs * HZ;
756 else
757 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
758 release_sock(sk);
759}
760EXPORT_SYMBOL(sock_set_sndtimeo);
761
Christoph Hellwig783da702020-05-28 07:12:14 +0200762static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
763{
764 if (val) {
765 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new);
766 sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, ns);
767 sock_set_flag(sk, SOCK_RCVTSTAMP);
768 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
769 } else {
770 sock_reset_flag(sk, SOCK_RCVTSTAMP);
771 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
772 sock_reset_flag(sk, SOCK_TSTAMP_NEW);
773 }
774}
775
776void sock_enable_timestamps(struct sock *sk)
777{
778 lock_sock(sk);
779 __sock_set_timestamps(sk, true, false, true);
780 release_sock(sk);
781}
782EXPORT_SYMBOL(sock_enable_timestamps);
783
Christoph Hellwigce3d9542020-05-28 07:12:15 +0200784void sock_set_keepalive(struct sock *sk)
785{
786 lock_sock(sk);
787 if (sk->sk_prot->keepalive)
788 sk->sk_prot->keepalive(sk, true);
789 sock_valbool_flag(sk, SOCK_KEEPOPEN, true);
790 release_sock(sk);
791}
792EXPORT_SYMBOL(sock_set_keepalive);
793
Christoph Hellwig26cfabf2020-05-28 07:12:16 +0200794static void __sock_set_rcvbuf(struct sock *sk, int val)
795{
796 /* Ensure val * 2 fits into an int, to prevent max_t() from treating it
797 * as a negative value.
798 */
799 val = min_t(int, val, INT_MAX / 2);
800 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
801
802 /* We double it on the way in to account for "struct sk_buff" etc.
803 * overhead. Applications assume that the SO_RCVBUF setting they make
804 * will allow that much actual data to be received on that socket.
805 *
806 * Applications are unaware that "struct sk_buff" and other overheads
807 * allocate from the receive buffer during socket buffer allocation.
808 *
809 * And after considering the possible alternatives, returning the value
810 * we actually used in getsockopt is the most desirable behavior.
811 */
812 WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF));
813}
814
815void sock_set_rcvbuf(struct sock *sk, int val)
816{
817 lock_sock(sk);
818 __sock_set_rcvbuf(sk, val);
819 release_sock(sk);
820}
821EXPORT_SYMBOL(sock_set_rcvbuf);
822
Alexander Aring84d1c612020-06-26 13:26:48 -0400823void sock_set_mark(struct sock *sk, u32 val)
824{
825 lock_sock(sk);
826 sk->sk_mark = val;
827 release_sock(sk);
828}
829EXPORT_SYMBOL(sock_set_mark);
830
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831/*
832 * This is meant for all protocols to use and covers goings on
833 * at the socket level. Everything here is generic.
834 */
835
836int sock_setsockopt(struct socket *sock, int level, int optname,
Christoph Hellwigc8c1bbb2020-07-23 08:08:50 +0200837 sockptr_t optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838{
Richard Cochran80b14de2018-07-03 15:42:48 -0700839 struct sock_txtime sk_txtime;
Eric Dumazet2a915252009-05-27 11:30:05 +0000840 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 int val;
842 int valbool;
843 struct linger ling;
844 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900845
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 /*
847 * Options without arguments
848 */
849
David S. Miller48788092007-09-14 16:41:03 -0700850 if (optname == SO_BINDTODEVICE)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000851 return sock_setbindtodevice(sk, optval, optlen);
David S. Miller48788092007-09-14 16:41:03 -0700852
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700853 if (optlen < sizeof(int))
854 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900855
Christoph Hellwigc8c1bbb2020-07-23 08:08:50 +0200856 if (copy_from_sockptr(&val, optval, sizeof(val)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900858
Eric Dumazet2a915252009-05-27 11:30:05 +0000859 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860
861 lock_sock(sk);
862
Eric Dumazet2a915252009-05-27 11:30:05 +0000863 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700864 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000865 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700866 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000867 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800868 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700869 break;
870 case SO_REUSEADDR:
Bart Van Asschecdb87442018-06-12 10:05:55 -0700871 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700872 break;
Tom Herbert055dc212013-01-22 09:49:50 +0000873 case SO_REUSEPORT:
874 sk->sk_reuseport = valbool;
875 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700876 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000877 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000878 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700879 case SO_ERROR:
880 ret = -ENOPROTOOPT;
881 break;
882 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800883 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
yupeng0fbe82e2018-12-05 18:56:28 -0800884 sk_dst_reset(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700885 break;
886 case SO_BROADCAST:
887 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
888 break;
889 case SO_SNDBUF:
890 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000891 * about it this is right. Otherwise apps have to
892 * play 'guess the biggest size' games. RCVBUF/SNDBUF
893 * are treated in BSD as hints
894 */
895 val = min_t(u32, val, sysctl_wmem_max);
Patrick McHardyb0573de2005-08-09 19:30:51 -0700896set_sndbuf:
Guillaume Nault40577652019-02-13 04:30:34 +0100897 /* Ensure val * 2 fits into an int, to prevent max_t()
898 * from treating it as a negative value.
899 */
900 val = min_t(int, val, INT_MAX / 2);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700901 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
Eric Dumazete292f052019-10-10 20:17:45 -0700902 WRITE_ONCE(sk->sk_sndbuf,
903 max_t(int, val * 2, SOCK_MIN_SNDBUF));
Eric Dumazet82981932012-04-26 20:07:59 +0000904 /* Wake up sending tasks if we upped the value. */
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700905 sk->sk_write_space(sk);
906 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700908 case SO_SNDBUFFORCE:
909 if (!capable(CAP_NET_ADMIN)) {
910 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 break;
912 }
Guillaume Nault40577652019-02-13 04:30:34 +0100913
914 /* No negative values (to prevent underflow, as val will be
915 * multiplied by 2).
916 */
917 if (val < 0)
918 val = 0;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700919 goto set_sndbuf;
920
921 case SO_RCVBUF:
922 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000923 * about it this is right. Otherwise apps have to
924 * play 'guess the biggest size' games. RCVBUF/SNDBUF
925 * are treated in BSD as hints
926 */
Christoph Hellwig26cfabf2020-05-28 07:12:16 +0200927 __sock_set_rcvbuf(sk, min_t(u32, val, sysctl_rmem_max));
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700928 break;
929
930 case SO_RCVBUFFORCE:
931 if (!capable(CAP_NET_ADMIN)) {
932 ret = -EPERM;
933 break;
934 }
Guillaume Nault40577652019-02-13 04:30:34 +0100935
936 /* No negative values (to prevent underflow, as val will be
937 * multiplied by 2).
938 */
Christoph Hellwig26cfabf2020-05-28 07:12:16 +0200939 __sock_set_rcvbuf(sk, max(val, 0));
940 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700941
942 case SO_KEEPALIVE:
Ursula Braun4b9d07a2017-01-09 16:55:12 +0100943 if (sk->sk_prot->keepalive)
944 sk->sk_prot->keepalive(sk, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700945 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
946 break;
947
948 case SO_OOBINLINE:
949 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
950 break;
951
952 case SO_NO_CHECK:
Tom Herbert28448b82014-05-23 08:47:19 -0700953 sk->sk_no_check_tx = valbool;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700954 break;
955
956 case SO_PRIORITY:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000957 if ((val >= 0 && val <= 6) ||
958 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700959 sk->sk_priority = val;
960 else
961 ret = -EPERM;
962 break;
963
964 case SO_LINGER:
965 if (optlen < sizeof(ling)) {
966 ret = -EINVAL; /* 1003.1g */
967 break;
968 }
Christoph Hellwigc8c1bbb2020-07-23 08:08:50 +0200969 if (copy_from_sockptr(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700970 ret = -EFAULT;
971 break;
972 }
973 if (!ling.l_onoff)
974 sock_reset_flag(sk, SOCK_LINGER);
975 else {
976#if (BITS_PER_LONG == 32)
977 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
978 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
979 else
980#endif
981 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
982 sock_set_flag(sk, SOCK_LINGER);
983 }
984 break;
985
986 case SO_BSDCOMPAT:
987 sock_warn_obsolete_bsdism("setsockopt");
988 break;
989
990 case SO_PASSCRED:
991 if (valbool)
992 set_bit(SOCK_PASSCRED, &sock->flags);
993 else
994 clear_bit(SOCK_PASSCRED, &sock->flags);
995 break;
996
Deepa Dinamani7f1bc6e2019-02-02 07:34:46 -0800997 case SO_TIMESTAMP_OLD:
Christoph Hellwig783da702020-05-28 07:12:14 +0200998 __sock_set_timestamps(sk, valbool, false, false);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700999 break;
Christoph Hellwig783da702020-05-28 07:12:14 +02001000 case SO_TIMESTAMP_NEW:
1001 __sock_set_timestamps(sk, valbool, true, false);
1002 break;
1003 case SO_TIMESTAMPNS_OLD:
1004 __sock_set_timestamps(sk, valbool, false, true);
1005 break;
1006 case SO_TIMESTAMPNS_NEW:
1007 __sock_set_timestamps(sk, valbool, true, true);
1008 break;
Deepa Dinamani97184752019-02-02 07:34:51 -08001009 case SO_TIMESTAMPING_NEW:
1010 sock_set_flag(sk, SOCK_TSTAMP_NEW);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05001011 fallthrough;
Deepa Dinamani7f1bc6e2019-02-02 07:34:46 -08001012 case SO_TIMESTAMPING_OLD:
Patrick Ohly20d49472009-02-12 05:03:38 +00001013 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +00001014 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +00001015 break;
1016 }
Willem de Bruijnb245be12015-01-30 13:29:32 -05001017
Willem de Bruijn09c2d252014-08-04 22:11:47 -04001018 if (val & SOF_TIMESTAMPING_OPT_ID &&
Willem de Bruijn4ed2d762014-08-04 22:11:49 -04001019 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
WANG Congac5cc972015-12-16 23:39:04 -08001020 if (sk->sk_protocol == IPPROTO_TCP &&
1021 sk->sk_type == SOCK_STREAM) {
Soheil Hassas Yeganeh6db8b962016-04-02 23:08:07 -04001022 if ((1 << sk->sk_state) &
1023 (TCPF_CLOSE | TCPF_LISTEN)) {
Willem de Bruijn4ed2d762014-08-04 22:11:49 -04001024 ret = -EINVAL;
1025 break;
1026 }
1027 sk->sk_tskey = tcp_sk(sk)->snd_una;
1028 } else {
1029 sk->sk_tskey = 0;
1030 }
1031 }
Francis Yan1c885802016-11-27 23:07:18 -08001032
1033 if (val & SOF_TIMESTAMPING_OPT_STATS &&
1034 !(val & SOF_TIMESTAMPING_OPT_TSONLY)) {
1035 ret = -EINVAL;
1036 break;
1037 }
1038
Willem de Bruijnb9f40e22014-08-04 22:11:46 -04001039 sk->sk_tsflags = val;
Patrick Ohly20d49472009-02-12 05:03:38 +00001040 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
1041 sock_enable_timestamp(sk,
1042 SOCK_TIMESTAMPING_RX_SOFTWARE);
Deepa Dinamani97184752019-02-02 07:34:51 -08001043 else {
1044 if (optname == SO_TIMESTAMPING_NEW)
1045 sock_reset_flag(sk, SOCK_TSTAMP_NEW);
1046
Patrick Ohly20d49472009-02-12 05:03:38 +00001047 sock_disable_timestamp(sk,
Eric Dumazet08e29af2011-11-28 12:04:18 +00001048 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
Deepa Dinamani97184752019-02-02 07:34:51 -08001049 }
Patrick Ohly20d49472009-02-12 05:03:38 +00001050 break;
1051
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001052 case SO_RCVLOWAT:
1053 if (val < 0)
1054 val = INT_MAX;
Eric Dumazetd1361842018-04-16 10:33:35 -07001055 if (sock->ops->set_rcvlowat)
1056 ret = sock->ops->set_rcvlowat(sk, val);
1057 else
Eric Dumazeteac66402019-10-09 15:32:35 -07001058 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001059 break;
1060
Deepa Dinamani45bdc662019-02-02 07:34:53 -08001061 case SO_RCVTIMEO_OLD:
Deepa Dinamania9beb862019-02-02 07:34:54 -08001062 case SO_RCVTIMEO_NEW:
Christoph Hellwigc8c1bbb2020-07-23 08:08:50 +02001063 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval,
Christoph Hellwigc34645a2020-07-23 08:08:49 +02001064 optlen, optname == SO_RCVTIMEO_OLD);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001065 break;
1066
Deepa Dinamani45bdc662019-02-02 07:34:53 -08001067 case SO_SNDTIMEO_OLD:
Deepa Dinamania9beb862019-02-02 07:34:54 -08001068 case SO_SNDTIMEO_NEW:
Christoph Hellwigc8c1bbb2020-07-23 08:08:50 +02001069 ret = sock_set_timeout(&sk->sk_sndtimeo, optval,
Christoph Hellwigc34645a2020-07-23 08:08:49 +02001070 optlen, optname == SO_SNDTIMEO_OLD);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001071 break;
1072
Christoph Hellwig4d295e52020-07-17 08:23:13 +02001073 case SO_ATTACH_FILTER: {
1074 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
Christoph Hellwigc8c1bbb2020-07-23 08:08:50 +02001076 ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
Christoph Hellwig4d295e52020-07-17 08:23:13 +02001077 if (!ret)
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001078 ret = sk_attach_filter(&fprog, sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001079 break;
Christoph Hellwig4d295e52020-07-17 08:23:13 +02001080 }
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001081 case SO_ATTACH_BPF:
1082 ret = -EINVAL;
1083 if (optlen == sizeof(u32)) {
1084 u32 ufd;
1085
1086 ret = -EFAULT;
Christoph Hellwigc8c1bbb2020-07-23 08:08:50 +02001087 if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001088 break;
1089
1090 ret = sk_attach_bpf(ufd, sk);
1091 }
1092 break;
1093
Christoph Hellwig4d295e52020-07-17 08:23:13 +02001094 case SO_ATTACH_REUSEPORT_CBPF: {
1095 struct sock_fprog fprog;
Craig Gallek538950a2016-01-04 17:41:47 -05001096
Christoph Hellwigc8c1bbb2020-07-23 08:08:50 +02001097 ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
Christoph Hellwig4d295e52020-07-17 08:23:13 +02001098 if (!ret)
Craig Gallek538950a2016-01-04 17:41:47 -05001099 ret = sk_reuseport_attach_filter(&fprog, sk);
Craig Gallek538950a2016-01-04 17:41:47 -05001100 break;
Christoph Hellwig4d295e52020-07-17 08:23:13 +02001101 }
Craig Gallek538950a2016-01-04 17:41:47 -05001102 case SO_ATTACH_REUSEPORT_EBPF:
1103 ret = -EINVAL;
1104 if (optlen == sizeof(u32)) {
1105 u32 ufd;
1106
1107 ret = -EFAULT;
Christoph Hellwigc8c1bbb2020-07-23 08:08:50 +02001108 if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
Craig Gallek538950a2016-01-04 17:41:47 -05001109 break;
1110
1111 ret = sk_reuseport_attach_bpf(ufd, sk);
1112 }
1113 break;
1114
Martin KaFai Lau99f3a062019-06-13 15:00:01 -07001115 case SO_DETACH_REUSEPORT_BPF:
1116 ret = reuseport_detach_prog(sk);
1117 break;
1118
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001119 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -07001120 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001121 break;
1122
Vincent Bernatd59577b2013-01-16 22:55:49 +01001123 case SO_LOCK_FILTER:
1124 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
1125 ret = -EPERM;
1126 else
1127 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
1128 break;
1129
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001130 case SO_PASSSEC:
1131 if (valbool)
1132 set_bit(SOCK_PASSSEC, &sock->flags);
1133 else
1134 clear_bit(SOCK_PASSSEC, &sock->flags);
1135 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001136 case SO_MARK:
David Barmann50254252018-11-08 08:13:35 -06001137 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001138 ret = -EPERM;
David Barmann50254252018-11-08 08:13:35 -06001139 } else if (val != sk->sk_mark) {
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001140 sk->sk_mark = val;
David Barmann50254252018-11-08 08:13:35 -06001141 sk_dst_reset(sk);
1142 }
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001143 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001144
Neil Horman3b885782009-10-12 13:26:31 -07001145 case SO_RXQ_OVFL:
Johannes Berg8083f0f2011-10-07 03:30:20 +00001146 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
Neil Horman3b885782009-10-12 13:26:31 -07001147 break;
Johannes Berg6e3e9392011-11-09 10:15:42 +01001148
1149 case SO_WIFI_STATUS:
1150 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
1151 break;
1152
Pavel Emelyanovef64a542012-02-21 07:31:34 +00001153 case SO_PEEK_OFF:
1154 if (sock->ops->set_peek_off)
Sasha Levin12663bf2013-12-07 17:26:27 -05001155 ret = sock->ops->set_peek_off(sk, val);
Pavel Emelyanovef64a542012-02-21 07:31:34 +00001156 else
1157 ret = -EOPNOTSUPP;
1158 break;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001159
1160 case SO_NOFCS:
1161 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
1162 break;
1163
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00001164 case SO_SELECT_ERR_QUEUE:
1165 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
1166 break;
1167
Cong Wange0d10952013-08-01 11:10:25 +08001168#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03001169 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +03001170 /* allow unprivileged users to decrease the value */
1171 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
1172 ret = -EPERM;
1173 else {
1174 if (val < 0)
1175 ret = -EINVAL;
1176 else
1177 sk->sk_ll_usec = val;
1178 }
1179 break;
1180#endif
Eric Dumazet62748f32013-09-24 08:20:52 -07001181
1182 case SO_MAX_PACING_RATE:
Eric Dumazet6bdef102019-02-28 15:17:27 -08001183 {
1184 unsigned long ulval = (val == ~0U) ? ~0UL : val;
1185
1186 if (sizeof(ulval) != sizeof(val) &&
1187 optlen >= sizeof(ulval) &&
Christoph Hellwigc8c1bbb2020-07-23 08:08:50 +02001188 copy_from_sockptr(&ulval, optval, sizeof(ulval))) {
Eric Dumazet6bdef102019-02-28 15:17:27 -08001189 ret = -EFAULT;
1190 break;
1191 }
1192 if (ulval != ~0UL)
Eric Dumazet218af592017-05-16 04:24:36 -07001193 cmpxchg(&sk->sk_pacing_status,
1194 SK_PACING_NONE,
1195 SK_PACING_NEEDED);
Eric Dumazet6bdef102019-02-28 15:17:27 -08001196 sk->sk_max_pacing_rate = ulval;
1197 sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
Eric Dumazet62748f32013-09-24 08:20:52 -07001198 break;
Eric Dumazet6bdef102019-02-28 15:17:27 -08001199 }
Eric Dumazet70da2682015-10-08 19:33:21 -07001200 case SO_INCOMING_CPU:
Eric Dumazet7170a972019-10-30 13:00:04 -07001201 WRITE_ONCE(sk->sk_incoming_cpu, val);
Eric Dumazet70da2682015-10-08 19:33:21 -07001202 break;
1203
Tom Herberta87cb3e2016-02-24 10:02:52 -08001204 case SO_CNX_ADVICE:
1205 if (val == 1)
1206 dst_negative_advice(sk);
1207 break;
Willem de Bruijn76851d12017-08-03 16:29:40 -04001208
1209 case SO_ZEROCOPY:
Sowmini Varadhan28190752018-02-15 10:49:34 -08001210 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
Willem de Bruijnb5947e52018-11-30 15:32:39 -05001211 if (!((sk->sk_type == SOCK_STREAM &&
1212 sk->sk_protocol == IPPROTO_TCP) ||
1213 (sk->sk_type == SOCK_DGRAM &&
1214 sk->sk_protocol == IPPROTO_UDP)))
Sowmini Varadhan28190752018-02-15 10:49:34 -08001215 ret = -ENOTSUPP;
Sowmini Varadhan28190752018-02-15 10:49:34 -08001216 } else if (sk->sk_family != PF_RDS) {
Willem de Bruijn76851d12017-08-03 16:29:40 -04001217 ret = -ENOTSUPP;
Sowmini Varadhan28190752018-02-15 10:49:34 -08001218 }
1219 if (!ret) {
1220 if (val < 0 || val > 1)
1221 ret = -EINVAL;
1222 else
1223 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
Sowmini Varadhan28190752018-02-15 10:49:34 -08001224 }
Jesus Sanchez-Palencia334e6412018-03-07 09:40:57 -08001225 break;
1226
Richard Cochran80b14de2018-07-03 15:42:48 -07001227 case SO_TXTIME:
Eric Dumazet790709f2020-05-07 10:05:39 -07001228 if (optlen != sizeof(struct sock_txtime)) {
Richard Cochran80b14de2018-07-03 15:42:48 -07001229 ret = -EINVAL;
Eric Dumazet790709f2020-05-07 10:05:39 -07001230 break;
Christoph Hellwigc8c1bbb2020-07-23 08:08:50 +02001231 } else if (copy_from_sockptr(&sk_txtime, optval,
Richard Cochran80b14de2018-07-03 15:42:48 -07001232 sizeof(struct sock_txtime))) {
1233 ret = -EFAULT;
Eric Dumazet790709f2020-05-07 10:05:39 -07001234 break;
Richard Cochran80b14de2018-07-03 15:42:48 -07001235 } else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) {
1236 ret = -EINVAL;
Eric Dumazet790709f2020-05-07 10:05:39 -07001237 break;
Richard Cochran80b14de2018-07-03 15:42:48 -07001238 }
Eric Dumazet790709f2020-05-07 10:05:39 -07001239 /* CLOCK_MONOTONIC is only used by sch_fq, and this packet
1240 * scheduler has enough safe guards.
1241 */
1242 if (sk_txtime.clockid != CLOCK_MONOTONIC &&
1243 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1244 ret = -EPERM;
1245 break;
1246 }
1247 sock_valbool_flag(sk, SOCK_TXTIME, true);
1248 sk->sk_clockid = sk_txtime.clockid;
1249 sk->sk_txtime_deadline_mode =
1250 !!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE);
1251 sk->sk_txtime_report_errors =
1252 !!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS);
Richard Cochran80b14de2018-07-03 15:42:48 -07001253 break;
1254
David Herrmannf5dd3d02019-01-15 14:42:14 +01001255 case SO_BINDTOIFINDEX:
Christoph Hellwig75948882020-05-28 07:12:13 +02001256 ret = sock_bindtoindex_locked(sk, val);
David Herrmannf5dd3d02019-01-15 14:42:14 +01001257 break;
1258
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001259 default:
1260 ret = -ENOPROTOOPT;
1261 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001262 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 release_sock(sk);
1264 return ret;
1265}
Eric Dumazet2a915252009-05-27 11:30:05 +00001266EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267
1268
stephen hemminger8f098982014-01-03 09:17:14 -08001269static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1270 struct ucred *ucred)
Eric W. Biederman3f551f92010-06-13 03:28:59 +00001271{
1272 ucred->pid = pid_vnr(pid);
1273 ucred->uid = ucred->gid = -1;
1274 if (cred) {
1275 struct user_namespace *current_ns = current_user_ns();
1276
Eric W. Biedermanb2e4f542012-05-23 16:39:45 -06001277 ucred->uid = from_kuid_munged(current_ns, cred->euid);
1278 ucred->gid = from_kgid_munged(current_ns, cred->egid);
Eric W. Biederman3f551f92010-06-13 03:28:59 +00001279 }
1280}
1281
David Herrmann28b5ba2a2017-06-21 10:47:15 +02001282static int groups_to_user(gid_t __user *dst, const struct group_info *src)
1283{
1284 struct user_namespace *user_ns = current_user_ns();
1285 int i;
1286
1287 for (i = 0; i < src->ngroups; i++)
1288 if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i))
1289 return -EFAULT;
1290
1291 return 0;
1292}
1293
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294int sock_getsockopt(struct socket *sock, int level, int optname,
1295 char __user *optval, int __user *optlen)
1296{
1297 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001298
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001299 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001300 int val;
Chenbo Feng5daab9d2017-04-05 19:00:55 -07001301 u64 val64;
Eric Dumazet677f1362019-02-28 15:17:28 -08001302 unsigned long ulval;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001303 struct linger ling;
Arnd Bergmannfe0c72f2019-02-02 07:34:44 -08001304 struct old_timeval32 tm32;
1305 struct __kernel_old_timeval tm;
Deepa Dinamania9beb862019-02-02 07:34:54 -08001306 struct __kernel_sock_timeval stm;
Richard Cochran80b14de2018-07-03 15:42:48 -07001307 struct sock_txtime txtime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001309
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -08001310 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001312
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001313 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001314 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001315 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001317
Eugene Teo50fee1d2009-02-23 15:38:41 -08001318 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -08001319
Eric Dumazet2a915252009-05-27 11:30:05 +00001320 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001321 case SO_DEBUG:
1322 v.val = sock_flag(sk, SOCK_DBG);
1323 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001324
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001325 case SO_DONTROUTE:
1326 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1327 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001328
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001329 case SO_BROADCAST:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001330 v.val = sock_flag(sk, SOCK_BROADCAST);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001331 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001333 case SO_SNDBUF:
1334 v.val = sk->sk_sndbuf;
1335 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001336
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001337 case SO_RCVBUF:
1338 v.val = sk->sk_rcvbuf;
1339 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001341 case SO_REUSEADDR:
1342 v.val = sk->sk_reuse;
1343 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344
Tom Herbert055dc212013-01-22 09:49:50 +00001345 case SO_REUSEPORT:
1346 v.val = sk->sk_reuseport;
1347 break;
1348
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001349 case SO_KEEPALIVE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001350 v.val = sock_flag(sk, SOCK_KEEPOPEN);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001351 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001353 case SO_TYPE:
1354 v.val = sk->sk_type;
1355 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356
Jan Engelhardt49c794e2009-08-04 07:28:28 +00001357 case SO_PROTOCOL:
1358 v.val = sk->sk_protocol;
1359 break;
1360
Jan Engelhardt0d6038e2009-08-04 07:28:29 +00001361 case SO_DOMAIN:
1362 v.val = sk->sk_family;
1363 break;
1364
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001365 case SO_ERROR:
1366 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +00001367 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001368 v.val = xchg(&sk->sk_err_soft, 0);
1369 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001371 case SO_OOBINLINE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001372 v.val = sock_flag(sk, SOCK_URGINLINE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001373 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001374
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001375 case SO_NO_CHECK:
Tom Herbert28448b82014-05-23 08:47:19 -07001376 v.val = sk->sk_no_check_tx;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001377 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001379 case SO_PRIORITY:
1380 v.val = sk->sk_priority;
1381 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001382
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001383 case SO_LINGER:
1384 lv = sizeof(v.ling);
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001385 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001386 v.ling.l_linger = sk->sk_lingertime / HZ;
1387 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001388
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001389 case SO_BSDCOMPAT:
1390 sock_warn_obsolete_bsdism("getsockopt");
1391 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392
Deepa Dinamani7f1bc6e2019-02-02 07:34:46 -08001393 case SO_TIMESTAMP_OLD:
Eric Dumazet92f37fd2007-03-25 22:14:49 -07001394 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
Deepa Dinamani887feae2019-02-02 07:34:50 -08001395 !sock_flag(sk, SOCK_TSTAMP_NEW) &&
Eric Dumazet92f37fd2007-03-25 22:14:49 -07001396 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1397 break;
1398
Deepa Dinamani7f1bc6e2019-02-02 07:34:46 -08001399 case SO_TIMESTAMPNS_OLD:
Deepa Dinamani887feae2019-02-02 07:34:50 -08001400 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW);
1401 break;
1402
1403 case SO_TIMESTAMP_NEW:
1404 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW);
1405 break;
1406
1407 case SO_TIMESTAMPNS_NEW:
1408 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001409 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
Deepa Dinamani7f1bc6e2019-02-02 07:34:46 -08001411 case SO_TIMESTAMPING_OLD:
Willem de Bruijnb9f40e22014-08-04 22:11:46 -04001412 v.val = sk->sk_tsflags;
Patrick Ohly20d49472009-02-12 05:03:38 +00001413 break;
1414
Deepa Dinamania9beb862019-02-02 07:34:54 -08001415 case SO_RCVTIMEO_OLD:
1416 case SO_RCVTIMEO_NEW:
1417 lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001418 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
Deepa Dinamania9beb862019-02-02 07:34:54 -08001420 case SO_SNDTIMEO_OLD:
1421 case SO_SNDTIMEO_NEW:
1422 lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001423 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001425 case SO_RCVLOWAT:
1426 v.val = sk->sk_rcvlowat;
1427 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001428
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001429 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +00001430 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001431 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001433 case SO_PASSCRED:
Eric Dumazet82981932012-04-26 20:07:59 +00001434 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001435 break;
1436
1437 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001438 {
1439 struct ucred peercred;
1440 if (len > sizeof(peercred))
1441 len = sizeof(peercred);
1442 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1443 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001444 return -EFAULT;
1445 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001446 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001447
David Herrmann28b5ba2a2017-06-21 10:47:15 +02001448 case SO_PEERGROUPS:
1449 {
1450 int ret, n;
1451
1452 if (!sk->sk_peer_cred)
1453 return -ENODATA;
1454
1455 n = sk->sk_peer_cred->group_info->ngroups;
1456 if (len < n * sizeof(gid_t)) {
1457 len = n * sizeof(gid_t);
1458 return put_user(len, optlen) ? -EFAULT : -ERANGE;
1459 }
1460 len = n * sizeof(gid_t);
1461
1462 ret = groups_to_user((gid_t __user *)optval,
1463 sk->sk_peer_cred->group_info);
1464 if (ret)
1465 return ret;
1466 goto lenout;
1467 }
1468
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001469 case SO_PEERNAME:
1470 {
1471 char address[128];
1472
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +01001473 lv = sock->ops->getname(sock, (struct sockaddr *)address, 2);
1474 if (lv < 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001475 return -ENOTCONN;
1476 if (lv < len)
1477 return -EINVAL;
1478 if (copy_to_user(optval, address, len))
1479 return -EFAULT;
1480 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001482
1483 /* Dubious BSD thing... Probably nobody even uses it, but
1484 * the UNIX standard wants it for whatever reason... -DaveM
1485 */
1486 case SO_ACCEPTCONN:
1487 v.val = sk->sk_state == TCP_LISTEN;
1488 break;
1489
1490 case SO_PASSSEC:
Eric Dumazet82981932012-04-26 20:07:59 +00001491 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001492 break;
1493
1494 case SO_PEERSEC:
1495 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1496
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001497 case SO_MARK:
1498 v.val = sk->sk_mark;
1499 break;
1500
Neil Horman3b885782009-10-12 13:26:31 -07001501 case SO_RXQ_OVFL:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001502 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
Neil Horman3b885782009-10-12 13:26:31 -07001503 break;
1504
Johannes Berg6e3e9392011-11-09 10:15:42 +01001505 case SO_WIFI_STATUS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001506 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
Johannes Berg6e3e9392011-11-09 10:15:42 +01001507 break;
1508
Pavel Emelyanovef64a542012-02-21 07:31:34 +00001509 case SO_PEEK_OFF:
1510 if (!sock->ops->set_peek_off)
1511 return -EOPNOTSUPP;
1512
1513 v.val = sk->sk_peek_off;
1514 break;
David S. Millerbc2f7992012-02-24 14:48:34 -05001515 case SO_NOFCS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001516 v.val = sock_flag(sk, SOCK_NOFCS);
David S. Millerbc2f7992012-02-24 14:48:34 -05001517 break;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001518
Pavel Emelyanovf7b86bf2012-10-18 23:55:56 +00001519 case SO_BINDTODEVICE:
Brian Haleyc91f6df2012-11-26 05:21:08 +00001520 return sock_getbindtodevice(sk, optval, optlen, len);
1521
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001522 case SO_GET_FILTER:
1523 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1524 if (len < 0)
1525 return len;
1526
1527 goto lenout;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001528
Vincent Bernatd59577b2013-01-16 22:55:49 +01001529 case SO_LOCK_FILTER:
1530 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1531 break;
1532
Michal Sekletarea02f942014-01-17 17:09:45 +01001533 case SO_BPF_EXTENSIONS:
1534 v.val = bpf_tell_extensions();
1535 break;
1536
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00001537 case SO_SELECT_ERR_QUEUE:
1538 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1539 break;
1540
Cong Wange0d10952013-08-01 11:10:25 +08001541#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03001542 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +03001543 v.val = sk->sk_ll_usec;
1544 break;
1545#endif
1546
Eric Dumazet62748f32013-09-24 08:20:52 -07001547 case SO_MAX_PACING_RATE:
Eric Dumazet677f1362019-02-28 15:17:28 -08001548 if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) {
1549 lv = sizeof(v.ulval);
1550 v.ulval = sk->sk_max_pacing_rate;
1551 } else {
1552 /* 32bit version */
1553 v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
1554 }
Eric Dumazet62748f32013-09-24 08:20:52 -07001555 break;
1556
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001557 case SO_INCOMING_CPU:
Eric Dumazet7170a972019-10-30 13:00:04 -07001558 v.val = READ_ONCE(sk->sk_incoming_cpu);
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001559 break;
1560
Josh Hunta2d133b2017-03-20 15:22:03 -04001561 case SO_MEMINFO:
1562 {
1563 u32 meminfo[SK_MEMINFO_VARS];
1564
Josh Hunta2d133b2017-03-20 15:22:03 -04001565 sk_get_meminfo(sk, meminfo);
1566
1567 len = min_t(unsigned int, len, sizeof(meminfo));
1568 if (copy_to_user(optval, &meminfo, len))
1569 return -EFAULT;
1570
1571 goto lenout;
1572 }
Sridhar Samudrala6d433902017-03-24 10:08:36 -07001573
1574#ifdef CONFIG_NET_RX_BUSY_POLL
1575 case SO_INCOMING_NAPI_ID:
1576 v.val = READ_ONCE(sk->sk_napi_id);
1577
1578 /* aggregate non-NAPI IDs down to 0 */
1579 if (v.val < MIN_NAPI_ID)
1580 v.val = 0;
1581
1582 break;
1583#endif
1584
Chenbo Feng5daab9d2017-04-05 19:00:55 -07001585 case SO_COOKIE:
1586 lv = sizeof(u64);
1587 if (len < lv)
1588 return -EINVAL;
1589 v.val64 = sock_gen_cookie(sk);
1590 break;
1591
Willem de Bruijn76851d12017-08-03 16:29:40 -04001592 case SO_ZEROCOPY:
1593 v.val = sock_flag(sk, SOCK_ZEROCOPY);
1594 break;
1595
Richard Cochran80b14de2018-07-03 15:42:48 -07001596 case SO_TXTIME:
1597 lv = sizeof(v.txtime);
1598 v.txtime.clockid = sk->sk_clockid;
1599 v.txtime.flags |= sk->sk_txtime_deadline_mode ?
1600 SOF_TXTIME_DEADLINE_MODE : 0;
Jesus Sanchez-Palencia4b15c702018-07-03 15:43:00 -07001601 v.txtime.flags |= sk->sk_txtime_report_errors ?
1602 SOF_TXTIME_REPORT_ERRORS : 0;
Richard Cochran80b14de2018-07-03 15:42:48 -07001603 break;
1604
David Herrmannf5dd3d02019-01-15 14:42:14 +01001605 case SO_BINDTOIFINDEX:
1606 v.val = sk->sk_bound_dev_if;
1607 break;
1608
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001609 default:
YOSHIFUJI Hideaki/吉藤英明443b5992015-03-23 18:04:13 +09001610 /* We implement the SO_SNDLOWAT etc to not be settable
1611 * (1003.1g 7).
1612 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001613 return -ENOPROTOOPT;
1614 }
1615
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 if (len > lv)
1617 len = lv;
1618 if (copy_to_user(optval, &v, len))
1619 return -EFAULT;
1620lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001621 if (put_user(len, optlen))
1622 return -EFAULT;
1623 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624}
1625
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001626/*
1627 * Initialize an sk_lock.
1628 *
1629 * (We also register the sk_lock with the lock validator.)
1630 */
Dave Jonesb6f99a22007-03-22 12:27:49 -07001631static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001632{
David Howellscdfbabf2017-03-09 08:09:05 +00001633 if (sk->sk_kern_sock)
1634 sock_lock_init_class_and_name(
1635 sk,
1636 af_family_kern_slock_key_strings[sk->sk_family],
1637 af_family_kern_slock_keys + sk->sk_family,
1638 af_family_kern_key_strings[sk->sk_family],
1639 af_family_kern_keys + sk->sk_family);
1640 else
1641 sock_lock_init_class_and_name(
1642 sk,
Peter Zijlstraed075362006-12-06 20:35:24 -08001643 af_family_slock_key_strings[sk->sk_family],
1644 af_family_slock_keys + sk->sk_family,
1645 af_family_key_strings[sk->sk_family],
1646 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001647}
1648
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001649/*
1650 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1651 * even temporarly, because of RCU lookups. sk_node should also be left as is.
Eric Dumazet68835ab2010-11-30 19:04:07 +00001652 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001653 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001654static void sock_copy(struct sock *nsk, const struct sock *osk)
1655{
Jakub Sitnickib8e202d2020-02-18 17:10:13 +00001656 const struct proto *prot = READ_ONCE(osk->sk_prot);
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001657#ifdef CONFIG_SECURITY_NETWORK
1658 void *sptr = nsk->sk_security;
1659#endif
Eric Dumazet68835ab2010-11-30 19:04:07 +00001660 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1661
1662 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
Jakub Sitnickib8e202d2020-02-18 17:10:13 +00001663 prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
Eric Dumazet68835ab2010-11-30 19:04:07 +00001664
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001665#ifdef CONFIG_SECURITY_NETWORK
1666 nsk->sk_security = sptr;
1667 security_sk_clone(osk, nsk);
1668#endif
1669}
1670
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001671static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1672 int family)
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001673{
1674 struct sock *sk;
1675 struct kmem_cache *slab;
1676
1677 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001678 if (slab != NULL) {
1679 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1680 if (!sk)
1681 return sk;
Alexander Potapenko64713842019-07-11 20:59:19 -07001682 if (want_init_on_alloc(priority))
Eric Dumazetba2489b2016-08-23 11:39:29 -07001683 sk_prot_clear_nulls(sk, prot->obj_size);
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001684 } else
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001685 sk = kmalloc(prot->obj_size, priority);
1686
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001687 if (sk != NULL) {
1688 if (security_sk_alloc(sk, family, priority))
1689 goto out_free;
1690
1691 if (!try_module_get(prot->owner))
1692 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001693 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001694 }
1695
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001696 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001697
1698out_free_sec:
1699 security_sk_free(sk);
1700out_free:
1701 if (slab != NULL)
1702 kmem_cache_free(slab, sk);
1703 else
1704 kfree(sk);
1705 return NULL;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001706}
1707
1708static void sk_prot_free(struct proto *prot, struct sock *sk)
1709{
1710 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001711 struct module *owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001712
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001713 owner = prot->owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001714 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001715
Tejun Heobd1060a2015-12-07 17:38:53 -05001716 cgroup_sk_free(&sk->sk_cgrp_data);
Johannes Weiner2d758072016-10-07 17:00:58 -07001717 mem_cgroup_sk_free(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001718 security_sk_free(sk);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001719 if (slab != NULL)
1720 kmem_cache_free(slab, sk);
1721 else
1722 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001723 module_put(owner);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001724}
1725
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726/**
1727 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001728 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001729 * @family: protocol family
1730 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1731 * @prot: struct proto associated with this new sock instance
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001732 * @kern: is this to be a kernel socket?
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001734struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001735 struct proto *prot, int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736{
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001737 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001739 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001741 sk->sk_family = family;
1742 /*
1743 * See comment in struct sock definition to understand
1744 * why we need sk_prot_creator -acme
1745 */
1746 sk->sk_prot = sk->sk_prot_creator = prot;
David Howellscdfbabf2017-03-09 08:09:05 +00001747 sk->sk_kern_sock = kern;
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001748 sock_lock_init(sk);
Eric W. Biederman26abe142015-05-08 21:10:31 -05001749 sk->sk_net_refcnt = kern ? 0 : 1;
Tonghao Zhang648845a2017-12-14 05:51:58 -08001750 if (likely(sk->sk_net_refcnt)) {
Eric W. Biederman26abe142015-05-08 21:10:31 -05001751 get_net(net);
Tonghao Zhang648845a2017-12-14 05:51:58 -08001752 sock_inuse_add(net, 1);
1753 }
1754
Eric W. Biederman26abe142015-05-08 21:10:31 -05001755 sock_net_set(sk, net);
Reshetova, Elena14afee42017-06-30 13:08:00 +03001756 refcount_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001757
Johannes Weiner2d758072016-10-07 17:00:58 -07001758 mem_cgroup_sk_alloc(sk);
Johannes Weinerd979a392016-09-19 14:44:38 -07001759 cgroup_sk_alloc(&sk->sk_cgrp_data);
Tejun Heo2a56a1f2015-12-07 17:38:52 -05001760 sock_update_classid(&sk->sk_cgrp_data);
1761 sock_update_netprioidx(&sk->sk_cgrp_data);
Tariq Toukan41b14fb2020-06-22 23:26:04 +03001762 sk_tx_queue_clear(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 }
Frank Filza79af592005-09-27 15:23:38 -07001764
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001765 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766}
Eric Dumazet2a915252009-05-27 11:30:05 +00001767EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
Eric Dumazeta4298e42016-04-01 08:52:12 -07001769/* Sockets having SOCK_RCU_FREE will call this function after one RCU
1770 * grace period. This is the case for UDP sockets and TCP listeners.
1771 */
1772static void __sk_destruct(struct rcu_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773{
Eric Dumazeta4298e42016-04-01 08:52:12 -07001774 struct sock *sk = container_of(head, struct sock, sk_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
1777 if (sk->sk_destruct)
1778 sk->sk_destruct(sk);
1779
Paul E. McKenneya898def2010-02-22 17:04:49 -08001780 filter = rcu_dereference_check(sk->sk_filter,
Reshetova, Elena14afee42017-06-30 13:08:00 +03001781 refcount_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001783 sk_filter_uncharge(sk, filter);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001784 RCU_INIT_POINTER(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 }
1786
Eric Dumazet08e29af2011-11-28 12:04:18 +00001787 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
Martin KaFai Lau6ac99e82019-04-26 16:39:39 -07001789#ifdef CONFIG_BPF_SYSCALL
1790 bpf_sk_storage_free(sk);
1791#endif
1792
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 if (atomic_read(&sk->sk_omem_alloc))
Joe Perchese005d192012-05-16 19:58:40 +00001794 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1795 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
Eric Dumazet22a0e182017-03-15 13:21:28 -07001797 if (sk->sk_frag.page) {
1798 put_page(sk->sk_frag.page);
1799 sk->sk_frag.page = NULL;
1800 }
1801
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001802 if (sk->sk_peer_cred)
1803 put_cred(sk->sk_peer_cred);
1804 put_pid(sk->sk_peer_pid);
Eric W. Biederman26abe142015-05-08 21:10:31 -05001805 if (likely(sk->sk_net_refcnt))
1806 put_net(sock_net(sk));
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001807 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001809
Eric Dumazeta4298e42016-04-01 08:52:12 -07001810void sk_destruct(struct sock *sk)
1811{
Martin KaFai Lau8c7138b2019-09-27 16:00:31 -07001812 bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
1813
1814 if (rcu_access_pointer(sk->sk_reuseport_cb)) {
1815 reuseport_detach_sock(sk);
1816 use_call_rcu = true;
1817 }
1818
1819 if (use_call_rcu)
Eric Dumazeta4298e42016-04-01 08:52:12 -07001820 call_rcu(&sk->sk_rcu, __sk_destruct);
1821 else
1822 __sk_destruct(&sk->sk_rcu);
1823}
1824
Craig Gallekeb4cb002015-06-15 11:26:18 -04001825static void __sk_free(struct sock *sk)
1826{
Tonghao Zhang648845a2017-12-14 05:51:58 -08001827 if (likely(sk->sk_net_refcnt))
1828 sock_inuse_add(sock_net(sk), -1);
1829
Eric Dumazet97090202018-05-18 04:47:55 -07001830 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
Craig Gallekeb4cb002015-06-15 11:26:18 -04001831 sock_diag_broadcast_destroy(sk);
1832 else
1833 sk_destruct(sk);
1834}
1835
Eric Dumazet2b85a342009-06-11 02:55:43 -07001836void sk_free(struct sock *sk)
1837{
1838 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001839 * We subtract one from sk_wmem_alloc and can know if
Eric Dumazet2b85a342009-06-11 02:55:43 -07001840 * some packets are still in some tx queue.
1841 * If not null, sock_wfree() will call __sk_free(sk) later
1842 */
Reshetova, Elena14afee42017-06-30 13:08:00 +03001843 if (refcount_dec_and_test(&sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001844 __sk_free(sk);
1845}
Eric Dumazet2a915252009-05-27 11:30:05 +00001846EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847
Paolo Abeni581319c2017-03-09 13:54:08 +01001848static void sk_init_common(struct sock *sk)
1849{
1850 skb_queue_head_init(&sk->sk_receive_queue);
1851 skb_queue_head_init(&sk->sk_write_queue);
1852 skb_queue_head_init(&sk->sk_error_queue);
1853
1854 rwlock_init(&sk->sk_callback_lock);
1855 lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
1856 af_rlock_keys + sk->sk_family,
1857 af_family_rlock_key_strings[sk->sk_family]);
1858 lockdep_set_class_and_name(&sk->sk_write_queue.lock,
1859 af_wlock_keys + sk->sk_family,
1860 af_family_wlock_key_strings[sk->sk_family]);
1861 lockdep_set_class_and_name(&sk->sk_error_queue.lock,
1862 af_elock_keys + sk->sk_family,
1863 af_family_elock_key_strings[sk->sk_family]);
1864 lockdep_set_class_and_name(&sk->sk_callback_lock,
1865 af_callback_keys + sk->sk_family,
1866 af_family_clock_key_strings[sk->sk_family]);
1867}
1868
Eric Dumazete56c57d2011-11-08 17:07:07 -05001869/**
1870 * sk_clone_lock - clone a socket, and lock its clone
1871 * @sk: the socket to clone
1872 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1873 *
1874 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1875 */
1876struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001877{
Jakub Sitnickib8e202d2020-02-18 17:10:13 +00001878 struct proto *prot = READ_ONCE(sk->sk_prot);
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001879 struct sock *newsk;
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001880 bool is_charged = true;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001881
Jakub Sitnickib8e202d2020-02-18 17:10:13 +00001882 newsk = sk_prot_alloc(prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001883 if (newsk != NULL) {
1884 struct sk_filter *filter;
1885
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001886 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001887
Jakub Sitnickib8e202d2020-02-18 17:10:13 +00001888 newsk->sk_prot_creator = prot;
Christoph Paasch9d538fa2017-09-26 17:38:50 -07001889
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001890 /* SANITY */
Sowmini Varadhan8a681732015-07-30 15:50:36 +02001891 if (likely(newsk->sk_net_refcnt))
1892 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001893 sk_node_init(&newsk->sk_node);
1894 sock_lock_init(newsk);
1895 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001896 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001897 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001898
1899 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001900 /*
1901 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1902 */
Reshetova, Elena14afee42017-06-30 13:08:00 +03001903 refcount_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001904 atomic_set(&newsk->sk_omem_alloc, 0);
Paolo Abeni581319c2017-03-09 13:54:08 +01001905 sk_init_common(newsk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001906
1907 newsk->sk_dst_cache = NULL;
Julian Anastasov9b8805a2017-02-06 23:14:11 +02001908 newsk->sk_dst_pending_confirm = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001909 newsk->sk_wmem_queued = 0;
1910 newsk->sk_forward_alloc = 0;
Eric Dumazet9caad862016-04-01 08:52:20 -07001911 atomic_set(&newsk->sk_drops, 0);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001912 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001913 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
Willem de Bruijn52267792017-08-03 16:29:39 -04001914 atomic_set(&newsk->sk_zckey, 0);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001915
1916 sock_reset_flag(newsk, SOCK_DONE);
Shakeel Buttd752a492020-03-09 22:16:06 -07001917
1918 /* sk->sk_memcg will be populated at accept() time */
1919 newsk->sk_memcg = NULL;
1920
Cong Wangad0f75e2020-07-02 11:52:56 -07001921 cgroup_sk_clone(&newsk->sk_cgrp_data);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001922
Eric Dumazeteefca202017-10-02 12:20:51 -07001923 rcu_read_lock();
1924 filter = rcu_dereference(sk->sk_filter);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001925 if (filter != NULL)
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001926 /* though it's an empty new sock, the charging may fail
1927 * if sysctl_optmem_max was changed between creation of
1928 * original socket and cloning
1929 */
1930 is_charged = sk_filter_charge(newsk, filter);
Eric Dumazeteefca202017-10-02 12:20:51 -07001931 RCU_INIT_POINTER(newsk->sk_filter, filter);
1932 rcu_read_unlock();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001933
Eric Dumazetd188ba82015-12-08 07:22:02 -08001934 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
Daniel Borkmanna97e50c2017-03-22 13:08:08 +01001935 /* We need to make sure that we don't uncharge the new
1936 * socket if we couldn't charge it in the first place
1937 * as otherwise we uncharge the parent's filter.
1938 */
1939 if (!is_charged)
1940 RCU_INIT_POINTER(newsk->sk_filter, NULL);
Arnaldo Carvalho de Melo94352d42017-03-01 16:35:08 -03001941 sk_free_unlock_clone(newsk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001942 newsk = NULL;
1943 goto out;
1944 }
Craig Gallekfa463492016-02-10 11:50:39 -05001945 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
Stanislav Fomichev8f51dfc2019-08-14 10:37:49 -07001946
1947 if (bpf_sk_storage_clone(sk, newsk)) {
1948 sk_free_unlock_clone(newsk);
1949 newsk = NULL;
1950 goto out;
1951 }
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001952
Jakub Sitnickif1ff5ce2020-02-18 17:10:14 +00001953 /* Clear sk_user_data if parent had the pointer tagged
1954 * as not suitable for copying when cloning.
1955 */
1956 if (sk_user_data_is_nocopy(newsk))
Jakub Sitnicki7a1ca972020-04-02 14:55:24 +02001957 newsk->sk_user_data = NULL;
Jakub Sitnickif1ff5ce2020-02-18 17:10:14 +00001958
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001959 newsk->sk_err = 0;
Eric Dumazete551c322016-10-28 13:40:24 -07001960 newsk->sk_err_soft = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001961 newsk->sk_priority = 0;
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001962 newsk->sk_incoming_cpu = raw_smp_processor_id();
Tonghao Zhang648845a2017-12-14 05:51:58 -08001963 if (likely(newsk->sk_net_refcnt))
1964 sock_inuse_add(sock_net(newsk), 1);
Johannes Weinerd979a392016-09-19 14:44:38 -07001965
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001966 /*
1967 * Before updating sk_refcnt, we must commit prior changes to memory
Mauro Carvalho Chehab2cdb54c2020-04-21 19:04:05 +02001968 * (Documentation/RCU/rculist_nulls.rst for details)
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001969 */
1970 smp_wmb();
Reshetova, Elena41c6d652017-06-30 13:08:01 +03001971 refcount_set(&newsk->sk_refcnt, 2);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001972
1973 /*
1974 * Increment the counter in the same struct proto as the master
1975 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1976 * is the same as sk->sk_prot->socks, as this field was copied
1977 * with memcpy).
1978 *
1979 * This _changes_ the previous behaviour, where
1980 * tcp_create_openreq_child always was incrementing the
1981 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1982 * to be taken into account in all callers. -acme
1983 */
1984 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001985 sk_set_socket(newsk, NULL);
Tariq Toukan41b14fb2020-06-22 23:26:04 +03001986 sk_tx_queue_clear(newsk);
Li RongQingc2f26e82019-02-22 17:08:22 +08001987 RCU_INIT_POINTER(newsk->sk_wq, NULL);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001988
1989 if (newsk->sk_prot->sockets_allocated)
Glauber Costa180d8cd2011-12-11 21:47:02 +00001990 sk_sockets_allocated_inc(newsk);
Octavian Purdila704da5602010-01-08 00:00:09 -08001991
Hannes Frederic Sowa080a2702015-10-26 13:51:37 +01001992 if (sock_needs_netstamp(sk) &&
1993 newsk->sk_flags & SK_FLAGS_TIMESTAMP)
Octavian Purdila704da5602010-01-08 00:00:09 -08001994 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001995 }
1996out:
1997 return newsk;
1998}
Eric Dumazete56c57d2011-11-08 17:07:07 -05001999EXPORT_SYMBOL_GPL(sk_clone_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07002000
Arnaldo Carvalho de Melo94352d42017-03-01 16:35:08 -03002001void sk_free_unlock_clone(struct sock *sk)
2002{
2003 /* It is still raw copy of parent, so invalidate
2004 * destructor and make plain sk_free() */
2005 sk->sk_destruct = NULL;
2006 bh_unlock_sock(sk);
2007 sk_free(sk);
2008}
2009EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
2010
Andi Kleen99580892007-04-20 17:12:43 -07002011void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
2012{
Eric Dumazetd6a4e262015-05-26 08:55:28 -07002013 u32 max_segs = 1;
2014
Eric Dumazet6bd4f352015-12-02 21:53:57 -08002015 sk_dst_set(sk, dst);
Eric Dumazet0a6b2a12018-02-19 11:56:47 -08002016 sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
Andi Kleen99580892007-04-20 17:12:43 -07002017 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07002018 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07002019 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07002020 if (sk_can_gso(sk)) {
Steffen Klassertf70f2502017-08-01 12:49:10 +03002021 if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
Andi Kleen99580892007-04-20 17:12:43 -07002022 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07002023 } else {
Andi Kleen99580892007-04-20 17:12:43 -07002024 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07002025 sk->sk_gso_max_size = dst->dev->gso_max_size;
Eric Dumazetd6a4e262015-05-26 08:55:28 -07002026 max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07002027 }
Andi Kleen99580892007-04-20 17:12:43 -07002028 }
Eric Dumazetd6a4e262015-05-26 08:55:28 -07002029 sk->sk_gso_max_segs = max_segs;
Andi Kleen99580892007-04-20 17:12:43 -07002030}
2031EXPORT_SYMBOL_GPL(sk_setup_caps);
2032
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033/*
2034 * Simple resource managers for sockets.
2035 */
2036
2037
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002038/*
2039 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 */
2041void sock_wfree(struct sk_buff *skb)
2042{
2043 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00002044 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045
Eric Dumazetd99927f2009-09-24 10:49:24 +00002046 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
2047 /*
2048 * Keep a reference on sk_wmem_alloc, this will be released
2049 * after sk_write_space() call
2050 */
Reshetova, Elena14afee42017-06-30 13:08:00 +03002051 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00002053 len = 1;
2054 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07002055 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00002056 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
2057 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07002058 */
Reshetova, Elena14afee42017-06-30 13:08:00 +03002059 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07002060 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061}
Eric Dumazet2a915252009-05-27 11:30:05 +00002062EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063
Eric Dumazet1d2077a2016-05-02 10:56:27 -07002064/* This variant of sock_wfree() is used by TCP,
2065 * since it sets SOCK_USE_WRITE_QUEUE.
2066 */
2067void __sock_wfree(struct sk_buff *skb)
2068{
2069 struct sock *sk = skb->sk;
2070
Reshetova, Elena14afee42017-06-30 13:08:00 +03002071 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
Eric Dumazet1d2077a2016-05-02 10:56:27 -07002072 __sk_free(sk);
2073}
2074
Eric Dumazet9e17f8a2015-11-01 15:36:55 -08002075void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
2076{
2077 skb_orphan(skb);
2078 skb->sk = sk;
2079#ifdef CONFIG_INET
2080 if (unlikely(!sk_fullsock(sk))) {
2081 skb->destructor = sock_edemux;
2082 sock_hold(sk);
2083 return;
2084 }
2085#endif
2086 skb->destructor = sock_wfree;
2087 skb_set_hash_from_sk(skb, sk);
2088 /*
2089 * We used to take a refcount on sk, but following operation
2090 * is enough to guarantee sk_free() wont free this sock until
2091 * all in-flight packets are completed
2092 */
Reshetova, Elena14afee42017-06-30 13:08:00 +03002093 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
Eric Dumazet9e17f8a2015-11-01 15:36:55 -08002094}
2095EXPORT_SYMBOL(skb_set_owner_w);
2096
Jakub Kicinski41477662019-08-07 17:03:59 -07002097static bool can_skb_orphan_partial(const struct sk_buff *skb)
2098{
2099#ifdef CONFIG_TLS_DEVICE
2100 /* Drivers depend on in-order delivery for crypto offload,
2101 * partial orphan breaks out-of-order-OK logic.
2102 */
2103 if (skb->decrypted)
2104 return false;
2105#endif
2106 return (skb->destructor == sock_wfree ||
2107 (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
2108}
2109
Eric Dumazet1d2077a2016-05-02 10:56:27 -07002110/* This helper is used by netem, as it can hold packets in its
2111 * delay queue. We want to allow the owner socket to send more
2112 * packets, as if they were already TX completed by a typical driver.
2113 * But we also want to keep skb->sk set because some packet schedulers
Eric Dumazetf6ba8d32017-05-11 15:24:41 -07002114 * rely on it (sch_fq for example).
Eric Dumazet1d2077a2016-05-02 10:56:27 -07002115 */
Eric Dumazetf2f872f2013-07-30 17:55:08 -07002116void skb_orphan_partial(struct sk_buff *skb)
2117{
Eric Dumazetf6ba8d32017-05-11 15:24:41 -07002118 if (skb_is_tcp_pure_ack(skb))
Eric Dumazet1d2077a2016-05-02 10:56:27 -07002119 return;
2120
Jakub Kicinski41477662019-08-07 17:03:59 -07002121 if (can_skb_orphan_partial(skb)) {
Eric Dumazetf6ba8d32017-05-11 15:24:41 -07002122 struct sock *sk = skb->sk;
2123
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002124 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
Reshetova, Elena14afee42017-06-30 13:08:00 +03002125 WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
Eric Dumazetf6ba8d32017-05-11 15:24:41 -07002126 skb->destructor = sock_efree;
2127 }
Eric Dumazetf2f872f2013-07-30 17:55:08 -07002128 } else {
2129 skb_orphan(skb);
2130 }
2131}
2132EXPORT_SYMBOL(skb_orphan_partial);
2133
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002134/*
2135 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 */
2137void sock_rfree(struct sk_buff *skb)
2138{
2139 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00002140 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141
Eric Dumazetd361fd52010-07-10 22:45:17 +00002142 atomic_sub(len, &sk->sk_rmem_alloc);
2143 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144}
Eric Dumazet2a915252009-05-27 11:30:05 +00002145EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146
Oliver Hartkopp7768eed2015-03-10 19:03:46 +01002147/*
2148 * Buffer destructor for skbs that are not used directly in read or write
2149 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
2150 */
Alexander Duyck62bccb82014-09-04 13:31:35 -04002151void sock_efree(struct sk_buff *skb)
2152{
2153 sock_put(skb->sk);
2154}
2155EXPORT_SYMBOL(sock_efree);
2156
Joe Stringercf7fbe62020-03-29 15:53:38 -07002157/* Buffer destructor for prefetch/receive path where reference count may
2158 * not be held, e.g. for listen sockets.
2159 */
2160#ifdef CONFIG_INET
2161void sock_pfree(struct sk_buff *skb)
2162{
Joe Stringer7ae215d2020-03-29 15:53:40 -07002163 if (sk_is_refcounted(skb->sk))
2164 sock_gen_put(skb->sk);
Joe Stringercf7fbe62020-03-29 15:53:38 -07002165}
2166EXPORT_SYMBOL(sock_pfree);
2167#endif /* CONFIG_INET */
2168
Eric W. Biederman976d02012012-05-23 17:16:53 -06002169kuid_t sock_i_uid(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170{
Eric W. Biederman976d02012012-05-23 17:16:53 -06002171 kuid_t uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172
Eric Dumazetf064af12010-09-22 12:43:39 +00002173 read_lock_bh(&sk->sk_callback_lock);
Eric W. Biederman976d02012012-05-23 17:16:53 -06002174 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
Eric Dumazetf064af12010-09-22 12:43:39 +00002175 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 return uid;
2177}
Eric Dumazet2a915252009-05-27 11:30:05 +00002178EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179
2180unsigned long sock_i_ino(struct sock *sk)
2181{
2182 unsigned long ino;
2183
Eric Dumazetf064af12010-09-22 12:43:39 +00002184 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00002186 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 return ino;
2188}
Eric Dumazet2a915252009-05-27 11:30:05 +00002189EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190
2191/*
2192 * Allocate a skb from the socket's send buffer.
2193 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07002194struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01002195 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196{
Eric Dumazete292f052019-10-10 20:17:45 -07002197 if (force ||
2198 refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {
Eric Dumazet2a915252009-05-27 11:30:05 +00002199 struct sk_buff *skb = alloc_skb(size, priority);
Eric Dumazete292f052019-10-10 20:17:45 -07002200
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 if (skb) {
2202 skb_set_owner_w(skb, sk);
2203 return skb;
2204 }
2205 }
2206 return NULL;
2207}
Eric Dumazet2a915252009-05-27 11:30:05 +00002208EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
Willem de Bruijn98ba0bd2017-08-03 16:29:37 -04002210static void sock_ofree(struct sk_buff *skb)
2211{
2212 struct sock *sk = skb->sk;
2213
2214 atomic_sub(skb->truesize, &sk->sk_omem_alloc);
2215}
2216
2217struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
2218 gfp_t priority)
2219{
2220 struct sk_buff *skb;
2221
2222 /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
2223 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
2224 sysctl_optmem_max)
2225 return NULL;
2226
2227 skb = alloc_skb(size, priority);
2228 if (!skb)
2229 return NULL;
2230
2231 atomic_add(skb->truesize, &sk->sk_omem_alloc);
2232 skb->sk = sk;
2233 skb->destructor = sock_ofree;
2234 return skb;
2235}
2236
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002239 */
Al Virodd0fc662005-10-07 07:46:04 +01002240void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241{
Eric Dumazet95c96172012-04-15 05:58:06 +00002242 if ((unsigned int)size <= sysctl_optmem_max &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
2244 void *mem;
2245 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002246 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 */
2248 atomic_add(size, &sk->sk_omem_alloc);
2249 mem = kmalloc(size, priority);
2250 if (mem)
2251 return mem;
2252 atomic_sub(size, &sk->sk_omem_alloc);
2253 }
2254 return NULL;
2255}
Eric Dumazet2a915252009-05-27 11:30:05 +00002256EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257
Daniel Borkmann79e88652014-11-19 17:13:11 +01002258/* Free an option memory block. Note, we actually want the inline
2259 * here as this allows gcc to detect the nullify and fold away the
2260 * condition entirely.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 */
Daniel Borkmann79e88652014-11-19 17:13:11 +01002262static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
2263 const bool nullify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264{
David S. Millere53da5f2014-10-14 17:02:37 -04002265 if (WARN_ON_ONCE(!mem))
2266 return;
Daniel Borkmann79e88652014-11-19 17:13:11 +01002267 if (nullify)
Waiman Long453431a2020-08-06 23:18:13 -07002268 kfree_sensitive(mem);
Daniel Borkmann79e88652014-11-19 17:13:11 +01002269 else
2270 kfree(mem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 atomic_sub(size, &sk->sk_omem_alloc);
2272}
Daniel Borkmann79e88652014-11-19 17:13:11 +01002273
2274void sock_kfree_s(struct sock *sk, void *mem, int size)
2275{
2276 __sock_kfree_s(sk, mem, size, false);
2277}
Eric Dumazet2a915252009-05-27 11:30:05 +00002278EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279
Daniel Borkmann79e88652014-11-19 17:13:11 +01002280void sock_kzfree_s(struct sock *sk, void *mem, int size)
2281{
2282 __sock_kfree_s(sk, mem, size, true);
2283}
2284EXPORT_SYMBOL(sock_kzfree_s);
2285
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
2287 I think, these locks should be removed for datagram sockets.
2288 */
Eric Dumazet2a915252009-05-27 11:30:05 +00002289static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290{
2291 DEFINE_WAIT(wait);
2292
Eric Dumazet9cd3e072015-11-29 20:03:10 -08002293 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 for (;;) {
2295 if (!timeo)
2296 break;
2297 if (signal_pending(current))
2298 break;
2299 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00002300 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Eric Dumazete292f052019-10-10 20:17:45 -07002301 if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 break;
2303 if (sk->sk_shutdown & SEND_SHUTDOWN)
2304 break;
2305 if (sk->sk_err)
2306 break;
2307 timeo = schedule_timeout(timeo);
2308 }
Eric Dumazetaa395142010-04-20 13:03:51 +00002309 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 return timeo;
2311}
2312
2313
2314/*
2315 * Generic send/receive buffer handlers
2316 */
2317
Herbert Xu4cc7f682009-02-04 16:55:54 -08002318struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
2319 unsigned long data_len, int noblock,
Eric Dumazet28d64272013-08-08 14:38:47 -07002320 int *errcode, int max_page_order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321{
Eric Dumazet2e4e4412014-09-17 04:49:49 -07002322 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 long timeo;
2324 int err;
2325
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 timeo = sock_sndtimeo(sk, noblock);
Eric Dumazet2e4e4412014-09-17 04:49:49 -07002327 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 err = sock_error(sk);
2329 if (err != 0)
2330 goto failure;
2331
2332 err = -EPIPE;
2333 if (sk->sk_shutdown & SEND_SHUTDOWN)
2334 goto failure;
2335
Eric Dumazete292f052019-10-10 20:17:45 -07002336 if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
Eric Dumazet2e4e4412014-09-17 04:49:49 -07002337 break;
Eric Dumazet28d64272013-08-08 14:38:47 -07002338
Eric Dumazet9cd3e072015-11-29 20:03:10 -08002339 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
Eric Dumazet2e4e4412014-09-17 04:49:49 -07002340 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2341 err = -EAGAIN;
2342 if (!timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 goto failure;
Eric Dumazet2e4e4412014-09-17 04:49:49 -07002344 if (signal_pending(current))
2345 goto interrupted;
2346 timeo = sock_wait_for_wmem(sk, timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 }
Eric Dumazet2e4e4412014-09-17 04:49:49 -07002348 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
2349 errcode, sk->sk_allocation);
2350 if (skb)
2351 skb_set_owner_w(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 return skb;
2353
2354interrupted:
2355 err = sock_intr_errno(timeo);
2356failure:
2357 *errcode = err;
2358 return NULL;
2359}
Herbert Xu4cc7f682009-02-04 16:55:54 -08002360EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002362struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 int noblock, int *errcode)
2364{
Eric Dumazet28d64272013-08-08 14:38:47 -07002365 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366}
Eric Dumazet2a915252009-05-27 11:30:05 +00002367EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368
Willem de Bruijn39771b12016-04-02 23:08:06 -04002369int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
2370 struct sockcm_cookie *sockc)
2371{
Soheil Hassas Yeganeh3dd17e62016-04-02 23:08:09 -04002372 u32 tsflags;
2373
Willem de Bruijn39771b12016-04-02 23:08:06 -04002374 switch (cmsg->cmsg_type) {
2375 case SO_MARK:
2376 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2377 return -EPERM;
2378 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2379 return -EINVAL;
2380 sockc->mark = *(u32 *)CMSG_DATA(cmsg);
2381 break;
Deepa Dinamani7f1bc6e2019-02-02 07:34:46 -08002382 case SO_TIMESTAMPING_OLD:
Soheil Hassas Yeganeh3dd17e62016-04-02 23:08:09 -04002383 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2384 return -EINVAL;
2385
2386 tsflags = *(u32 *)CMSG_DATA(cmsg);
2387 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
2388 return -EINVAL;
2389
2390 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
2391 sockc->tsflags |= tsflags;
2392 break;
Richard Cochran80b14de2018-07-03 15:42:48 -07002393 case SCM_TXTIME:
2394 if (!sock_flag(sk, SOCK_TXTIME))
2395 return -EINVAL;
2396 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64)))
2397 return -EINVAL;
2398 sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg));
2399 break;
Soheil Hassas Yeganeh779f1ed2016-07-11 16:51:26 -04002400 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
2401 case SCM_RIGHTS:
2402 case SCM_CREDENTIALS:
2403 break;
Willem de Bruijn39771b12016-04-02 23:08:06 -04002404 default:
2405 return -EINVAL;
2406 }
2407 return 0;
2408}
2409EXPORT_SYMBOL(__sock_cmsg_send);
2410
Edward Jeef28ea362015-10-08 14:56:48 -07002411int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
2412 struct sockcm_cookie *sockc)
2413{
2414 struct cmsghdr *cmsg;
Willem de Bruijn39771b12016-04-02 23:08:06 -04002415 int ret;
Edward Jeef28ea362015-10-08 14:56:48 -07002416
2417 for_each_cmsghdr(cmsg, msg) {
2418 if (!CMSG_OK(msg, cmsg))
2419 return -EINVAL;
2420 if (cmsg->cmsg_level != SOL_SOCKET)
2421 continue;
Willem de Bruijn39771b12016-04-02 23:08:06 -04002422 ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
2423 if (ret)
2424 return ret;
Edward Jeef28ea362015-10-08 14:56:48 -07002425 }
2426 return 0;
2427}
2428EXPORT_SYMBOL(sock_cmsg_send);
2429
Eric Dumazet06044752017-06-07 13:29:12 -07002430static void sk_enter_memory_pressure(struct sock *sk)
2431{
2432 if (!sk->sk_prot->enter_memory_pressure)
2433 return;
2434
2435 sk->sk_prot->enter_memory_pressure(sk);
2436}
2437
2438static void sk_leave_memory_pressure(struct sock *sk)
2439{
2440 if (sk->sk_prot->leave_memory_pressure) {
2441 sk->sk_prot->leave_memory_pressure(sk);
2442 } else {
2443 unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
2444
Eric Dumazet503978a2019-10-09 12:55:53 -07002445 if (memory_pressure && READ_ONCE(*memory_pressure))
2446 WRITE_ONCE(*memory_pressure, 0);
Eric Dumazet06044752017-06-07 13:29:12 -07002447 }
2448}
2449
Eric Dumazet5640f762012-09-23 23:04:42 +00002450#define SKB_FRAG_PAGE_ORDER get_order(32768)
Eric Dumazetce27ec62019-06-14 16:22:21 -07002451DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
Eric Dumazet5640f762012-09-23 23:04:42 +00002452
Eric Dumazet400dfd32013-10-17 16:27:07 -07002453/**
2454 * skb_page_frag_refill - check that a page_frag contains enough room
2455 * @sz: minimum size of the fragment we want to get
2456 * @pfrag: pointer to page_frag
Eric Dumazet82d5e2b2014-09-08 04:00:00 -07002457 * @gfp: priority for memory allocation
Eric Dumazet400dfd32013-10-17 16:27:07 -07002458 *
2459 * Note: While this allocator tries to use high order pages, there is
2460 * no guarantee that allocations succeed. Therefore, @sz MUST be
2461 * less or equal than PAGE_SIZE.
2462 */
Eric Dumazetd9b29382014-08-27 20:49:34 -07002463bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
Eric Dumazet5640f762012-09-23 23:04:42 +00002464{
Eric Dumazet5640f762012-09-23 23:04:42 +00002465 if (pfrag->page) {
Joonsoo Kimfe896d12016-03-17 14:19:26 -07002466 if (page_ref_count(pfrag->page) == 1) {
Eric Dumazet5640f762012-09-23 23:04:42 +00002467 pfrag->offset = 0;
2468 return true;
2469 }
Eric Dumazet400dfd32013-10-17 16:27:07 -07002470 if (pfrag->offset + sz <= pfrag->size)
Eric Dumazet5640f762012-09-23 23:04:42 +00002471 return true;
2472 put_page(pfrag->page);
2473 }
2474
Eric Dumazetd9b29382014-08-27 20:49:34 -07002475 pfrag->offset = 0;
Eric Dumazetce27ec62019-06-14 16:22:21 -07002476 if (SKB_FRAG_PAGE_ORDER &&
2477 !static_branch_unlikely(&net_high_order_alloc_disable_key)) {
Mel Gormand0164ad2015-11-06 16:28:21 -08002478 /* Avoid direct reclaim but allow kswapd to wake */
2479 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
2480 __GFP_COMP | __GFP_NOWARN |
2481 __GFP_NORETRY,
Eric Dumazetd9b29382014-08-27 20:49:34 -07002482 SKB_FRAG_PAGE_ORDER);
Eric Dumazet5640f762012-09-23 23:04:42 +00002483 if (likely(pfrag->page)) {
Eric Dumazetd9b29382014-08-27 20:49:34 -07002484 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
Eric Dumazet5640f762012-09-23 23:04:42 +00002485 return true;
2486 }
Eric Dumazetd9b29382014-08-27 20:49:34 -07002487 }
2488 pfrag->page = alloc_page(gfp);
2489 if (likely(pfrag->page)) {
2490 pfrag->size = PAGE_SIZE;
2491 return true;
2492 }
Eric Dumazet400dfd32013-10-17 16:27:07 -07002493 return false;
2494}
2495EXPORT_SYMBOL(skb_page_frag_refill);
2496
2497bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2498{
2499 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2500 return true;
2501
Eric Dumazet5640f762012-09-23 23:04:42 +00002502 sk_enter_memory_pressure(sk);
2503 sk_stream_moderate_sndbuf(sk);
2504 return false;
2505}
2506EXPORT_SYMBOL(sk_page_frag_refill);
2507
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508static void __lock_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00002509 __releases(&sk->sk_lock.slock)
2510 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511{
2512 DEFINE_WAIT(wait);
2513
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002514 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2516 TASK_UNINTERRUPTIBLE);
2517 spin_unlock_bh(&sk->sk_lock.slock);
2518 schedule();
2519 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002520 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 break;
2522 }
2523 finish_wait(&sk->sk_lock.wq, &wait);
2524}
2525
Eric Dumazet8873c062018-10-01 23:24:26 -07002526void __release_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00002527 __releases(&sk->sk_lock.slock)
2528 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529{
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002530 struct sk_buff *skb, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002532 while ((skb = sk->sk_backlog.head) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002534
2535 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536
2537 do {
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002538 next = skb->next;
Eric Dumazete4cbb022012-04-30 16:07:09 +00002539 prefetch(next);
Eric Dumazet7fee2262010-05-11 23:19:48 +00002540 WARN_ON_ONCE(skb_dst_is_noref(skb));
David S. Millera8305bf2018-07-29 20:42:53 -07002541 skb_mark_not_on_list(skb);
Peter Zijlstrac57943a2008-10-07 14:18:42 -07002542 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002544 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545
2546 skb = next;
2547 } while (skb != NULL);
2548
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002549 spin_lock_bh(&sk->sk_lock.slock);
2550 }
Zhu Yi8eae9392010-03-04 18:01:40 +00002551
2552 /*
2553 * Doing the zeroing here guarantee we can not loop forever
2554 * while a wild producer attempts to flood us.
2555 */
2556 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557}
2558
Eric Dumazetd41a69f2016-04-29 14:16:53 -07002559void __sk_flush_backlog(struct sock *sk)
2560{
2561 spin_lock_bh(&sk->sk_lock.slock);
2562 __release_sock(sk);
2563 spin_unlock_bh(&sk->sk_lock.slock);
2564}
2565
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566/**
2567 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07002568 * @sk: sock to wait on
2569 * @timeo: for how long
Sabrina Dubrocadfbafc92015-07-24 18:19:25 +02002570 * @skb: last skb seen on sk_receive_queue
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 *
2572 * Now socket state including sk->sk_err is changed only under lock,
2573 * hence we may omit checks after joining wait queue.
2574 * We check receive queue before schedule() only as optimization;
2575 * it is very likely that release_sock() added new data.
2576 */
Sabrina Dubrocadfbafc92015-07-24 18:19:25 +02002577int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578{
WANG Congd9dc8b02016-11-11 10:20:50 -08002579 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581
WANG Congd9dc8b02016-11-11 10:20:50 -08002582 add_wait_queue(sk_sleep(sk), &wait);
Eric Dumazet9cd3e072015-11-29 20:03:10 -08002583 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
WANG Congd9dc8b02016-11-11 10:20:50 -08002584 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
Eric Dumazet9cd3e072015-11-29 20:03:10 -08002585 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
WANG Congd9dc8b02016-11-11 10:20:50 -08002586 remove_wait_queue(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 return rc;
2588}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589EXPORT_SYMBOL(sk_wait_data);
2590
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002591/**
Paolo Abenif8c3bf02016-10-21 13:55:45 +02002592 * __sk_mem_raise_allocated - increase memory_allocated
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002593 * @sk: socket
2594 * @size: memory size to allocate
Paolo Abenif8c3bf02016-10-21 13:55:45 +02002595 * @amt: pages to allocate
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002596 * @kind: allocation type
2597 *
Paolo Abenif8c3bf02016-10-21 13:55:45 +02002598 * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002599 */
Paolo Abenif8c3bf02016-10-21 13:55:45 +02002600int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002601{
2602 struct proto *prot = sk->sk_prot;
Paolo Abenif8c3bf02016-10-21 13:55:45 +02002603 long allocated = sk_memory_allocated_add(sk, amt);
Yafang Shaod6f19932018-07-01 23:31:30 +08002604 bool charged = true;
Johannes Weinere8056052016-01-14 15:21:14 -08002605
Johannes Weinerbaac50b2016-01-14 15:21:17 -08002606 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
Yafang Shaod6f19932018-07-01 23:31:30 +08002607 !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt)))
Johannes Weinere8056052016-01-14 15:21:14 -08002608 goto suppress_allocation;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002609
2610 /* Under limit. */
Johannes Weinere8056052016-01-14 15:21:14 -08002611 if (allocated <= sk_prot_mem_limits(sk, 0)) {
Glauber Costa180d8cd2011-12-11 21:47:02 +00002612 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002613 return 1;
2614 }
2615
Johannes Weinere8056052016-01-14 15:21:14 -08002616 /* Under pressure. */
2617 if (allocated > sk_prot_mem_limits(sk, 1))
Glauber Costa180d8cd2011-12-11 21:47:02 +00002618 sk_enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002619
Johannes Weinere8056052016-01-14 15:21:14 -08002620 /* Over hard limit. */
2621 if (allocated > sk_prot_mem_limits(sk, 2))
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002622 goto suppress_allocation;
2623
2624 /* guarantee minimum buffer size under pressure */
2625 if (kind == SK_MEM_RECV) {
Eric Dumazeta3dcaf12017-11-07 00:29:27 -08002626 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002627 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002628
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002629 } else { /* SK_MEM_SEND */
Eric Dumazeta3dcaf12017-11-07 00:29:27 -08002630 int wmem0 = sk_get_wmem0(sk, prot);
2631
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002632 if (sk->sk_type == SOCK_STREAM) {
Eric Dumazeta3dcaf12017-11-07 00:29:27 -08002633 if (sk->sk_wmem_queued < wmem0)
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002634 return 1;
Eric Dumazeta3dcaf12017-11-07 00:29:27 -08002635 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002636 return 1;
Eric Dumazeta3dcaf12017-11-07 00:29:27 -08002637 }
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002638 }
2639
Glauber Costa180d8cd2011-12-11 21:47:02 +00002640 if (sk_has_memory_pressure(sk)) {
Eric Dumazet5bf325a2019-02-12 12:26:27 -08002641 u64 alloc;
Eric Dumazet17483762008-11-25 21:16:35 -08002642
Glauber Costa180d8cd2011-12-11 21:47:02 +00002643 if (!sk_under_memory_pressure(sk))
Eric Dumazet17483762008-11-25 21:16:35 -08002644 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002645 alloc = sk_sockets_allocated_read_positive(sk);
2646 if (sk_prot_mem_limits(sk, 2) > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002647 sk_mem_pages(sk->sk_wmem_queued +
2648 atomic_read(&sk->sk_rmem_alloc) +
2649 sk->sk_forward_alloc))
2650 return 1;
2651 }
2652
2653suppress_allocation:
2654
2655 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2656 sk_stream_moderate_sndbuf(sk);
2657
2658 /* Fail only if socket is _under_ its sndbuf.
2659 * In this case we cannot block, so that we have to fail.
2660 */
2661 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2662 return 1;
2663 }
2664
Yafang Shaod6f19932018-07-01 23:31:30 +08002665 if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
2666 trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
Satoru Moriya3847ce32011-06-17 12:00:03 +00002667
Glauber Costa0e90b312012-01-20 04:57:16 +00002668 sk_memory_allocated_sub(sk, amt);
Glauber Costa180d8cd2011-12-11 21:47:02 +00002669
Johannes Weinerbaac50b2016-01-14 15:21:17 -08002670 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2671 mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
Johannes Weinere8056052016-01-14 15:21:14 -08002672
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002673 return 0;
2674}
Paolo Abenif8c3bf02016-10-21 13:55:45 +02002675EXPORT_SYMBOL(__sk_mem_raise_allocated);
2676
2677/**
2678 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2679 * @sk: socket
2680 * @size: memory size to allocate
2681 * @kind: allocation type
2682 *
2683 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2684 * rmem allocation. This function assumes that protocols which have
2685 * memory_pressure use sk_wmem_queued as write buffer accounting.
2686 */
2687int __sk_mem_schedule(struct sock *sk, int size, int kind)
2688{
2689 int ret, amt = sk_mem_pages(size);
2690
2691 sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
2692 ret = __sk_mem_raise_allocated(sk, size, amt, kind);
2693 if (!ret)
2694 sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
2695 return ret;
2696}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002697EXPORT_SYMBOL(__sk_mem_schedule);
2698
2699/**
Paolo Abenif8c3bf02016-10-21 13:55:45 +02002700 * __sk_mem_reduce_allocated - reclaim memory_allocated
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002701 * @sk: socket
Paolo Abenif8c3bf02016-10-21 13:55:45 +02002702 * @amount: number of quanta
2703 *
2704 * Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002705 */
Paolo Abenif8c3bf02016-10-21 13:55:45 +02002706void __sk_mem_reduce_allocated(struct sock *sk, int amount)
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002707{
Eric Dumazet1a24e042015-05-15 12:39:25 -07002708 sk_memory_allocated_sub(sk, amount);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002709
Johannes Weinerbaac50b2016-01-14 15:21:17 -08002710 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2711 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
Johannes Weinere8056052016-01-14 15:21:14 -08002712
Glauber Costa180d8cd2011-12-11 21:47:02 +00002713 if (sk_under_memory_pressure(sk) &&
2714 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2715 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002716}
Paolo Abenif8c3bf02016-10-21 13:55:45 +02002717EXPORT_SYMBOL(__sk_mem_reduce_allocated);
2718
2719/**
2720 * __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
2721 * @sk: socket
2722 * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
2723 */
2724void __sk_mem_reclaim(struct sock *sk, int amount)
2725{
2726 amount >>= SK_MEM_QUANTUM_SHIFT;
2727 sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
2728 __sk_mem_reduce_allocated(sk, amount);
2729}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002730EXPORT_SYMBOL(__sk_mem_reclaim);
2731
samanthakumar627d2d62016-04-05 12:41:16 -04002732int sk_set_peek_off(struct sock *sk, int val)
2733{
samanthakumar627d2d62016-04-05 12:41:16 -04002734 sk->sk_peek_off = val;
2735 return 0;
2736}
2737EXPORT_SYMBOL_GPL(sk_set_peek_off);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002738
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739/*
2740 * Set of default routines for initialising struct proto_ops when
2741 * the protocol does not support a particular function. In certain
2742 * cases where it makes no sense for a protocol to have a "do nothing"
2743 * function, some default processing is provided.
2744 */
2745
2746int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2747{
2748 return -EOPNOTSUPP;
2749}
Eric Dumazet2a915252009-05-27 11:30:05 +00002750EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002752int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 int len, int flags)
2754{
2755 return -EOPNOTSUPP;
2756}
Eric Dumazet2a915252009-05-27 11:30:05 +00002757EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758
2759int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2760{
2761 return -EOPNOTSUPP;
2762}
Eric Dumazet2a915252009-05-27 11:30:05 +00002763EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
David Howellscdfbabf2017-03-09 08:09:05 +00002765int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
2766 bool kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767{
2768 return -EOPNOTSUPP;
2769}
Eric Dumazet2a915252009-05-27 11:30:05 +00002770EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002772int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +01002773 int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774{
2775 return -EOPNOTSUPP;
2776}
Eric Dumazet2a915252009-05-27 11:30:05 +00002777EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2780{
2781 return -EOPNOTSUPP;
2782}
Eric Dumazet2a915252009-05-27 11:30:05 +00002783EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784
2785int sock_no_listen(struct socket *sock, int backlog)
2786{
2787 return -EOPNOTSUPP;
2788}
Eric Dumazet2a915252009-05-27 11:30:05 +00002789EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790
2791int sock_no_shutdown(struct socket *sock, int how)
2792{
2793 return -EOPNOTSUPP;
2794}
Eric Dumazet2a915252009-05-27 11:30:05 +00002795EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796
Ying Xue1b784142015-03-02 15:37:48 +08002797int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798{
2799 return -EOPNOTSUPP;
2800}
Eric Dumazet2a915252009-05-27 11:30:05 +00002801EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802
Tom Herbert306b13e2017-07-28 16:22:41 -07002803int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
2804{
2805 return -EOPNOTSUPP;
2806}
2807EXPORT_SYMBOL(sock_no_sendmsg_locked);
2808
Ying Xue1b784142015-03-02 15:37:48 +08002809int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2810 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811{
2812 return -EOPNOTSUPP;
2813}
Eric Dumazet2a915252009-05-27 11:30:05 +00002814EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815
2816int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2817{
2818 /* Mirror missing mmap method error code */
2819 return -ENODEV;
2820}
Eric Dumazet2a915252009-05-27 11:30:05 +00002821EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822
Kees Cookd9539752020-06-09 16:11:29 -07002823/*
2824 * When a file is received (via SCM_RIGHTS, etc), we must bump the
2825 * various sock-based usage counts.
2826 */
2827void __receive_sock(struct file *file)
2828{
2829 struct socket *sock;
2830 int error;
2831
2832 /*
2833 * The resulting value of "error" is ignored here since we only
2834 * need to take action when the file is a socket and testing
2835 * "sock" for NULL is sufficient.
2836 */
2837 sock = sock_from_file(file, &error);
2838 if (sock) {
2839 sock_update_netprioidx(&sock->sk->sk_cgrp_data);
2840 sock_update_classid(&sock->sk->sk_cgrp_data);
2841 }
2842}
2843
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2845{
2846 ssize_t res;
2847 struct msghdr msg = {.msg_flags = flags};
2848 struct kvec iov;
2849 char *kaddr = kmap(page);
2850 iov.iov_base = kaddr + offset;
2851 iov.iov_len = size;
2852 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2853 kunmap(page);
2854 return res;
2855}
Eric Dumazet2a915252009-05-27 11:30:05 +00002856EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857
Tom Herbert306b13e2017-07-28 16:22:41 -07002858ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
2859 int offset, size_t size, int flags)
2860{
2861 ssize_t res;
2862 struct msghdr msg = {.msg_flags = flags};
2863 struct kvec iov;
2864 char *kaddr = kmap(page);
2865
2866 iov.iov_base = kaddr + offset;
2867 iov.iov_len = size;
2868 res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size);
2869 kunmap(page);
2870 return res;
2871}
2872EXPORT_SYMBOL(sock_no_sendpage_locked);
2873
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874/*
2875 * Default Socket Callbacks
2876 */
2877
2878static void sock_def_wakeup(struct sock *sk)
2879{
Eric Dumazet43815482010-04-29 11:01:49 +00002880 struct socket_wq *wq;
2881
2882 rcu_read_lock();
2883 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +08002884 if (skwq_has_sleeper(wq))
Eric Dumazet43815482010-04-29 11:01:49 +00002885 wake_up_interruptible_all(&wq->wait);
2886 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887}
2888
2889static void sock_def_error_report(struct sock *sk)
2890{
Eric Dumazet43815482010-04-29 11:01:49 +00002891 struct socket_wq *wq;
2892
2893 rcu_read_lock();
2894 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +08002895 if (skwq_has_sleeper(wq))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002896 wake_up_interruptible_poll(&wq->wait, EPOLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002897 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00002898 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899}
2900
Björn Töpel43a825a2020-01-20 10:29:17 +01002901void sock_def_readable(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902{
Eric Dumazet43815482010-04-29 11:01:49 +00002903 struct socket_wq *wq;
2904
2905 rcu_read_lock();
2906 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +08002907 if (skwq_has_sleeper(wq))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002908 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
2909 EPOLLRDNORM | EPOLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002910 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00002911 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912}
2913
2914static void sock_def_write_space(struct sock *sk)
2915{
Eric Dumazet43815482010-04-29 11:01:49 +00002916 struct socket_wq *wq;
2917
2918 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919
2920 /* Do not wake up a writer until he can make "significant"
2921 * progress. --DaveM
2922 */
Eric Dumazete292f052019-10-10 20:17:45 -07002923 if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {
Eric Dumazet43815482010-04-29 11:01:49 +00002924 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +08002925 if (skwq_has_sleeper(wq))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002926 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
2927 EPOLLWRNORM | EPOLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928
2929 /* Should agree with poll, otherwise some programs break */
2930 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002931 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 }
2933
Eric Dumazet43815482010-04-29 11:01:49 +00002934 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935}
2936
2937static void sock_def_destruct(struct sock *sk)
2938{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939}
2940
2941void sk_send_sigurg(struct sock *sk)
2942{
2943 if (sk->sk_socket && sk->sk_socket->file)
2944 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002945 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946}
Eric Dumazet2a915252009-05-27 11:30:05 +00002947EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948
2949void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2950 unsigned long expires)
2951{
2952 if (!mod_timer(timer, expires))
2953 sock_hold(sk);
2954}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955EXPORT_SYMBOL(sk_reset_timer);
2956
2957void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2958{
Ying Xue25cc4ae2013-02-03 20:32:57 +00002959 if (del_timer(timer))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960 __sock_put(sk);
2961}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962EXPORT_SYMBOL(sk_stop_timer);
2963
2964void sock_init_data(struct socket *sock, struct sock *sk)
2965{
Paolo Abeni581319c2017-03-09 13:54:08 +01002966 sk_init_common(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 sk->sk_send_head = NULL;
2968
Kees Cook99767f22017-10-16 17:29:36 -07002969 timer_setup(&sk->sk_timer, NULL, 0);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002970
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 sk->sk_allocation = GFP_KERNEL;
2972 sk->sk_rcvbuf = sysctl_rmem_default;
2973 sk->sk_sndbuf = sysctl_wmem_default;
2974 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07002975 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976
2977 sock_set_flag(sk, SOCK_ZAPPED);
2978
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002979 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 sk->sk_type = sock->type;
Al Viro333f7902019-07-05 20:14:16 +01002981 RCU_INIT_POINTER(sk->sk_wq, &sock->wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982 sock->sk = sk;
Lorenzo Colitti86741ec2016-11-04 02:23:41 +09002983 sk->sk_uid = SOCK_INODE(sock)->i_uid;
2984 } else {
Li RongQingc2f26e82019-02-22 17:08:22 +08002985 RCU_INIT_POINTER(sk->sk_wq, NULL);
Lorenzo Colitti86741ec2016-11-04 02:23:41 +09002986 sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0);
2987 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989 rwlock_init(&sk->sk_callback_lock);
David Howellscdfbabf2017-03-09 08:09:05 +00002990 if (sk->sk_kern_sock)
2991 lockdep_set_class_and_name(
2992 &sk->sk_callback_lock,
2993 af_kern_callback_keys + sk->sk_family,
2994 af_family_kern_clock_key_strings[sk->sk_family]);
2995 else
2996 lockdep_set_class_and_name(
2997 &sk->sk_callback_lock,
Peter Zijlstra443aef0e2007-07-19 01:49:00 -07002998 af_callback_keys + sk->sk_family,
2999 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000
3001 sk->sk_state_change = sock_def_wakeup;
3002 sk->sk_data_ready = sock_def_readable;
3003 sk->sk_write_space = sock_def_write_space;
3004 sk->sk_error_report = sock_def_error_report;
3005 sk->sk_destruct = sock_def_destruct;
3006
Eric Dumazet5640f762012-09-23 23:04:42 +00003007 sk->sk_frag.page = NULL;
3008 sk->sk_frag.offset = 0;
Pavel Emelyanovef64a542012-02-21 07:31:34 +00003009 sk->sk_peek_off = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010
Eric W. Biederman109f6e32010-06-13 03:30:14 +00003011 sk->sk_peer_pid = NULL;
3012 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 sk->sk_write_pending = 0;
3014 sk->sk_rcvlowat = 1;
3015 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
3016 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
3017
Paolo Abeni6c7c98b2017-03-30 14:03:06 +02003018 sk->sk_stamp = SK_DEFAULT_STAMP;
Deepa Dinamani3a0ed3e92018-12-27 18:55:09 -08003019#if BITS_PER_LONG==32
3020 seqlock_init(&sk->sk_stamp_seq);
3021#endif
Willem de Bruijn52267792017-08-03 16:29:39 -04003022 atomic_set(&sk->sk_zckey, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023
Cong Wange0d10952013-08-01 11:10:25 +08003024#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir06021292013-06-10 11:39:50 +03003025 sk->sk_napi_id = 0;
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03003026 sk->sk_ll_usec = sysctl_net_busy_read;
Eliezer Tamir06021292013-06-10 11:39:50 +03003027#endif
3028
Eric Dumazet76a9ebe2018-10-15 09:37:53 -07003029 sk->sk_max_pacing_rate = ~0UL;
3030 sk->sk_pacing_rate = ~0UL;
Eric Dumazet7c68fa2b2019-12-16 18:51:03 -08003031 WRITE_ONCE(sk->sk_pacing_shift, 10);
Eric Dumazet70da2682015-10-08 19:33:21 -07003032 sk->sk_incoming_cpu = -1;
Amritha Nambiarc6345ce2018-06-29 21:26:57 -07003033
3034 sk_rx_queue_clear(sk);
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00003035 /*
3036 * Before updating sk_refcnt, we must commit prior changes to memory
Mauro Carvalho Chehab2cdb54c2020-04-21 19:04:05 +02003037 * (Documentation/RCU/rculist_nulls.rst for details)
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00003038 */
3039 smp_wmb();
Reshetova, Elena41c6d652017-06-30 13:08:01 +03003040 refcount_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08003041 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042}
Eric Dumazet2a915252009-05-27 11:30:05 +00003043EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044
Harvey Harrisonb5606c22008-02-13 15:03:16 -08003045void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046{
3047 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07003048 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02003049 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02003051 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07003052 spin_unlock(&sk->sk_lock.slock);
3053 /*
3054 * The sk_lock has mutex_lock() semantics here:
3055 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08003056 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07003057 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08003059EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060
Harvey Harrisonb5606c22008-02-13 15:03:16 -08003061void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07003063 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064 if (sk->sk_backlog.tail)
3065 __release_sock(sk);
Eric Dumazet46d3cea2012-07-11 05:50:31 +00003066
Eric Dumazetc3f9b012014-03-10 09:50:11 -07003067 /* Warning : release_cb() might need to release sk ownership,
3068 * ie call sock_release_ownership(sk) before us.
3069 */
Eric Dumazet46d3cea2012-07-11 05:50:31 +00003070 if (sk->sk_prot->release_cb)
3071 sk->sk_prot->release_cb(sk);
3072
Eric Dumazetc3f9b012014-03-10 09:50:11 -07003073 sock_release_ownership(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07003074 if (waitqueue_active(&sk->sk_lock.wq))
3075 wake_up(&sk->sk_lock.wq);
3076 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003077}
3078EXPORT_SYMBOL(release_sock);
3079
Eric Dumazet8a74ad62010-05-26 19:20:18 +00003080/**
3081 * lock_sock_fast - fast version of lock_sock
3082 * @sk: socket
3083 *
3084 * This version should be used for very small section, where process wont block
Mauro Carvalho Chehabd6519832017-05-12 09:35:46 -03003085 * return false if fast path is taken:
3086 *
Eric Dumazet8a74ad62010-05-26 19:20:18 +00003087 * sk_lock.slock locked, owned = 0, BH disabled
Mauro Carvalho Chehabd6519832017-05-12 09:35:46 -03003088 *
3089 * return true if slow path is taken:
3090 *
Eric Dumazet8a74ad62010-05-26 19:20:18 +00003091 * sk_lock.slock unlocked, owned = 1, BH enabled
3092 */
3093bool lock_sock_fast(struct sock *sk)
3094{
3095 might_sleep();
3096 spin_lock_bh(&sk->sk_lock.slock);
3097
3098 if (!sk->sk_lock.owned)
3099 /*
3100 * Note : We must disable BH
3101 */
3102 return false;
3103
3104 __lock_sock(sk);
3105 sk->sk_lock.owned = 1;
3106 spin_unlock(&sk->sk_lock.slock);
3107 /*
3108 * The sk_lock has mutex_lock() semantics here:
3109 */
3110 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
3111 local_bh_enable();
3112 return true;
3113}
3114EXPORT_SYMBOL(lock_sock_fast);
3115
Arnd Bergmannc7cbdbf2019-04-17 22:51:48 +02003116int sock_gettstamp(struct socket *sock, void __user *userstamp,
3117 bool timeval, bool time32)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003118{
Arnd Bergmannc7cbdbf2019-04-17 22:51:48 +02003119 struct sock *sk = sock->sk;
3120 struct timespec64 ts;
Yafang Shao9dae3492018-08-06 11:57:02 +08003121
3122 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Arnd Bergmannc7cbdbf2019-04-17 22:51:48 +02003123 ts = ktime_to_timespec64(sock_read_timestamp(sk));
Eric Dumazetae40eb12007-03-18 17:33:16 -07003124 if (ts.tv_sec == -1)
3125 return -ENOENT;
3126 if (ts.tv_sec == 0) {
Deepa Dinamani3a0ed3e92018-12-27 18:55:09 -08003127 ktime_t kt = ktime_get_real();
YueHaibingf95f96a2019-10-25 17:18:36 +08003128 sock_write_timestamp(sk, kt);
Arnd Bergmannc7cbdbf2019-04-17 22:51:48 +02003129 ts = ktime_to_timespec64(kt);
Eric Dumazetae40eb12007-03-18 17:33:16 -07003130 }
Arnd Bergmannc7cbdbf2019-04-17 22:51:48 +02003131
3132 if (timeval)
3133 ts.tv_nsec /= 1000;
3134
3135#ifdef CONFIG_COMPAT_32BIT_TIME
3136 if (time32)
3137 return put_old_timespec32(&ts, userstamp);
3138#endif
3139#ifdef CONFIG_SPARC64
3140 /* beware of padding in sparc64 timeval */
3141 if (timeval && !in_compat_syscall()) {
3142 struct __kernel_old_timeval __user tv = {
Stephen Rothwellc98f4822019-04-23 17:25:24 +10003143 .tv_sec = ts.tv_sec,
3144 .tv_usec = ts.tv_nsec,
Arnd Bergmannc7cbdbf2019-04-17 22:51:48 +02003145 };
Stephen Rothwellc98f4822019-04-23 17:25:24 +10003146 if (copy_to_user(userstamp, &tv, sizeof(tv)))
Arnd Bergmannc7cbdbf2019-04-17 22:51:48 +02003147 return -EFAULT;
3148 return 0;
3149 }
3150#endif
3151 return put_timespec64(&ts, userstamp);
Eric Dumazetae40eb12007-03-18 17:33:16 -07003152}
Arnd Bergmannc7cbdbf2019-04-17 22:51:48 +02003153EXPORT_SYMBOL(sock_gettstamp);
Eric Dumazetae40eb12007-03-18 17:33:16 -07003154
Alexey Dobriyan193d3572019-10-03 23:56:37 +03003155void sock_enable_timestamp(struct sock *sk, enum sock_flags flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003156{
Patrick Ohly20d49472009-02-12 05:03:38 +00003157 if (!sock_flag(sk, flag)) {
Eric Dumazet08e29af2011-11-28 12:04:18 +00003158 unsigned long previous_flags = sk->sk_flags;
3159
Patrick Ohly20d49472009-02-12 05:03:38 +00003160 sock_set_flag(sk, flag);
3161 /*
3162 * we just set one of the two flags which require net
3163 * time stamping, but time stamping might have been on
3164 * already because of the other one
3165 */
Hannes Frederic Sowa080a2702015-10-26 13:51:37 +01003166 if (sock_needs_netstamp(sk) &&
3167 !(previous_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00003168 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169 }
3170}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171
Richard Cochrancb820f82013-07-19 19:40:09 +02003172int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
3173 int level, int type)
3174{
3175 struct sock_exterr_skb *serr;
Willem de Bruijn364a9e92014-08-31 21:30:27 -04003176 struct sk_buff *skb;
Richard Cochrancb820f82013-07-19 19:40:09 +02003177 int copied, err;
3178
3179 err = -EAGAIN;
Willem de Bruijn364a9e92014-08-31 21:30:27 -04003180 skb = sock_dequeue_err_skb(sk);
Richard Cochrancb820f82013-07-19 19:40:09 +02003181 if (skb == NULL)
3182 goto out;
3183
3184 copied = skb->len;
3185 if (copied > len) {
3186 msg->msg_flags |= MSG_TRUNC;
3187 copied = len;
3188 }
David S. Miller51f3d022014-11-05 16:46:40 -05003189 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Richard Cochrancb820f82013-07-19 19:40:09 +02003190 if (err)
3191 goto out_free_skb;
3192
3193 sock_recv_timestamp(msg, sk, skb);
3194
3195 serr = SKB_EXT_ERR(skb);
3196 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
3197
3198 msg->msg_flags |= MSG_ERRQUEUE;
3199 err = copied;
3200
Richard Cochrancb820f82013-07-19 19:40:09 +02003201out_free_skb:
3202 kfree_skb(skb);
3203out:
3204 return err;
3205}
3206EXPORT_SYMBOL(sock_recv_errqueue);
3207
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208/*
3209 * Get a socket option on an socket.
3210 *
3211 * FIX: POSIX 1003.1g is very ambiguous here. It states that
3212 * asynchronous errors should be reported by getsockopt. We assume
3213 * this means if you specify SO_ERROR (otherwise whats the point of it).
3214 */
3215int sock_common_getsockopt(struct socket *sock, int level, int optname,
3216 char __user *optval, int __user *optlen)
3217{
3218 struct sock *sk = sock->sk;
3219
3220 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
3221}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222EXPORT_SYMBOL(sock_common_getsockopt);
3223
Ying Xue1b784142015-03-02 15:37:48 +08003224int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
3225 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226{
3227 struct sock *sk = sock->sk;
3228 int addr_len = 0;
3229 int err;
3230
Ying Xue1b784142015-03-02 15:37:48 +08003231 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232 flags & ~MSG_DONTWAIT, &addr_len);
3233 if (err >= 0)
3234 msg->msg_namelen = addr_len;
3235 return err;
3236}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237EXPORT_SYMBOL(sock_common_recvmsg);
3238
3239/*
3240 * Set socket options on an inet socket.
3241 */
3242int sock_common_setsockopt(struct socket *sock, int level, int optname,
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02003243 sockptr_t optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244{
3245 struct sock *sk = sock->sk;
3246
3247 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
3248}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249EXPORT_SYMBOL(sock_common_setsockopt);
3250
3251void sk_common_release(struct sock *sk)
3252{
3253 if (sk->sk_prot->destroy)
3254 sk->sk_prot->destroy(sk);
3255
3256 /*
Miaohe Lin645f0892020-08-27 07:27:49 -04003257 * Observation: when sk_common_release is called, processes have
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258 * no access to socket. But net still has.
3259 * Step one, detach it from networking:
3260 *
3261 * A. Remove from hash tables.
3262 */
3263
3264 sk->sk_prot->unhash(sk);
3265
3266 /*
3267 * In this point socket cannot receive new packets, but it is possible
3268 * that some packets are in flight because some CPU runs receiver and
3269 * did hash table lookup before we unhashed socket. They will achieve
3270 * receive queue and will be purged by socket destructor.
3271 *
3272 * Also we still have packets pending on receive queue and probably,
3273 * our own packets waiting in device queues. sock_destroy will drain
3274 * receive queue, but transmitted packets will delay socket destruction
3275 * until the last reference will be released.
3276 */
3277
3278 sock_orphan(sk);
3279
3280 xfrm_sk_free_policy(sk);
3281
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07003282 sk_refcnt_debug_release(sk);
Eric Dumazet5640f762012-09-23 23:04:42 +00003283
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284 sock_put(sk);
3285}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286EXPORT_SYMBOL(sk_common_release);
3287
Josh Hunta2d133b2017-03-20 15:22:03 -04003288void sk_get_meminfo(const struct sock *sk, u32 *mem)
3289{
3290 memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
3291
3292 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
Eric Dumazetebb3b782019-10-10 20:17:44 -07003293 mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
Josh Hunta2d133b2017-03-20 15:22:03 -04003294 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
Eric Dumazete292f052019-10-10 20:17:45 -07003295 mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
Josh Hunta2d133b2017-03-20 15:22:03 -04003296 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
Eric Dumazetab4e8462019-10-10 20:17:46 -07003297 mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
Josh Hunta2d133b2017-03-20 15:22:03 -04003298 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
Eric Dumazet70c26552019-10-09 15:41:03 -07003299 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
Josh Hunta2d133b2017-03-20 15:22:03 -04003300 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
3301}
3302
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07003303#ifdef CONFIG_PROC_FS
3304#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07003305struct prot_inuse {
3306 int val[PROTO_INUSE_NR];
3307};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07003308
3309static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07003310
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07003311void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
3312{
Tonghao Zhang08fc7f82017-12-14 05:51:57 -08003313 __this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07003314}
3315EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
3316
3317int sock_prot_inuse_get(struct net *net, struct proto *prot)
3318{
3319 int cpu, idx = prot->inuse_idx;
3320 int res = 0;
3321
3322 for_each_possible_cpu(cpu)
Tonghao Zhang08fc7f82017-12-14 05:51:57 -08003323 res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07003324
3325 return res >= 0 ? res : 0;
3326}
3327EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
3328
Tonghao Zhang648845a2017-12-14 05:51:58 -08003329static void sock_inuse_add(struct net *net, int val)
3330{
3331 this_cpu_add(*net->core.sock_inuse, val);
3332}
3333
3334int sock_inuse_get(struct net *net)
3335{
3336 int cpu, res = 0;
3337
3338 for_each_possible_cpu(cpu)
3339 res += *per_cpu_ptr(net->core.sock_inuse, cpu);
3340
3341 return res;
3342}
3343
3344EXPORT_SYMBOL_GPL(sock_inuse_get);
3345
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003346static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07003347{
Tonghao Zhang08fc7f82017-12-14 05:51:57 -08003348 net->core.prot_inuse = alloc_percpu(struct prot_inuse);
Tonghao Zhang648845a2017-12-14 05:51:58 -08003349 if (net->core.prot_inuse == NULL)
3350 return -ENOMEM;
3351
3352 net->core.sock_inuse = alloc_percpu(int);
3353 if (net->core.sock_inuse == NULL)
3354 goto out;
3355
3356 return 0;
3357
3358out:
3359 free_percpu(net->core.prot_inuse);
3360 return -ENOMEM;
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07003361}
3362
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003363static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07003364{
Tonghao Zhang08fc7f82017-12-14 05:51:57 -08003365 free_percpu(net->core.prot_inuse);
Tonghao Zhang648845a2017-12-14 05:51:58 -08003366 free_percpu(net->core.sock_inuse);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07003367}
3368
3369static struct pernet_operations net_inuse_ops = {
3370 .init = sock_inuse_init_net,
3371 .exit = sock_inuse_exit_net,
3372};
3373
3374static __init int net_inuse_init(void)
3375{
3376 if (register_pernet_subsys(&net_inuse_ops))
3377 panic("Cannot initialize net inuse counters");
3378
3379 return 0;
3380}
3381
3382core_initcall(net_inuse_init);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07003383
zhanglinb45ce322019-08-23 09:14:11 +08003384static int assign_proto_idx(struct proto *prot)
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07003385{
3386 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
3387
3388 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
Joe Perchese005d192012-05-16 19:58:40 +00003389 pr_err("PROTO_INUSE_NR exhausted\n");
zhanglinb45ce322019-08-23 09:14:11 +08003390 return -ENOSPC;
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07003391 }
3392
3393 set_bit(prot->inuse_idx, proto_inuse_idx);
zhanglinb45ce322019-08-23 09:14:11 +08003394 return 0;
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07003395}
3396
3397static void release_proto_idx(struct proto *prot)
3398{
3399 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
3400 clear_bit(prot->inuse_idx, proto_inuse_idx);
3401}
3402#else
zhanglinb45ce322019-08-23 09:14:11 +08003403static inline int assign_proto_idx(struct proto *prot)
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07003404{
zhanglinb45ce322019-08-23 09:14:11 +08003405 return 0;
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07003406}
3407
3408static inline void release_proto_idx(struct proto *prot)
3409{
3410}
Tonghao Zhang648845a2017-12-14 05:51:58 -08003411
3412static void sock_inuse_add(struct net *net, int val)
3413{
3414}
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07003415#endif
3416
Miaohe Lin0f5907a2020-08-10 08:16:58 -04003417static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
3418{
3419 if (!twsk_prot)
3420 return;
3421 kfree(twsk_prot->twsk_slab_name);
3422 twsk_prot->twsk_slab_name = NULL;
3423 kmem_cache_destroy(twsk_prot->twsk_slab);
3424 twsk_prot->twsk_slab = NULL;
3425}
3426
Eric Dumazet0159dfd2015-03-12 16:44:07 -07003427static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
3428{
3429 if (!rsk_prot)
3430 return;
3431 kfree(rsk_prot->slab_name);
3432 rsk_prot->slab_name = NULL;
Julia Lawalladf78ed2015-09-13 14:15:18 +02003433 kmem_cache_destroy(rsk_prot->slab);
3434 rsk_prot->slab = NULL;
Eric Dumazet0159dfd2015-03-12 16:44:07 -07003435}
3436
3437static int req_prot_init(const struct proto *prot)
3438{
3439 struct request_sock_ops *rsk_prot = prot->rsk_prot;
3440
3441 if (!rsk_prot)
3442 return 0;
3443
3444 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
3445 prot->name);
3446 if (!rsk_prot->slab_name)
3447 return -ENOMEM;
3448
3449 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
3450 rsk_prot->obj_size, 0,
Shakeel Butte699e2c2018-06-27 15:16:42 -07003451 SLAB_ACCOUNT | prot->slab_flags,
3452 NULL);
Eric Dumazet0159dfd2015-03-12 16:44:07 -07003453
3454 if (!rsk_prot->slab) {
3455 pr_crit("%s: Can't create request sock SLAB cache!\n",
3456 prot->name);
3457 return -ENOMEM;
3458 }
3459 return 0;
3460}
3461
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462int proto_register(struct proto *prot, int alloc_slab)
3463{
zhanglinb45ce322019-08-23 09:14:11 +08003464 int ret = -ENOBUFS;
3465
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466 if (alloc_slab) {
David Windsor30c2c9f2017-06-10 22:50:42 -04003467 prot->slab = kmem_cache_create_usercopy(prot->name,
3468 prot->obj_size, 0,
Shakeel Butte699e2c2018-06-27 15:16:42 -07003469 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
3470 prot->slab_flags,
Kees Cook289a48602017-08-24 16:59:38 -07003471 prot->useroffset, prot->usersize,
Eric Dumazet271b72c2008-10-29 02:11:14 -07003472 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473
3474 if (prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00003475 pr_crit("%s: Can't create sock SLAB cache!\n",
3476 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07003477 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07003479
Eric Dumazet0159dfd2015-03-12 16:44:07 -07003480 if (req_prot_init(prot))
3481 goto out_free_request_sock_slab;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07003482
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08003483 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00003484 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07003485
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08003486 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07003487 goto out_free_request_sock_slab;
3488
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08003489 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08003490 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08003491 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08003492 0,
Shakeel Butte699e2c2018-06-27 15:16:42 -07003493 SLAB_ACCOUNT |
Eric Dumazet52db70d2015-04-10 06:07:18 -07003494 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09003495 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08003496 if (prot->twsk_prot->twsk_slab == NULL)
Miaohe Lin0f5907a2020-08-10 08:16:58 -04003497 goto out_free_timewait_sock_slab;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07003498 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499 }
3500
Glauber Costa36b77a52011-12-16 00:51:59 +00003501 mutex_lock(&proto_list_mutex);
zhanglinb45ce322019-08-23 09:14:11 +08003502 ret = assign_proto_idx(prot);
3503 if (ret) {
3504 mutex_unlock(&proto_list_mutex);
Miaohe Lin0f5907a2020-08-10 08:16:58 -04003505 goto out_free_timewait_sock_slab;
zhanglinb45ce322019-08-23 09:14:11 +08003506 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507 list_add(&prot->node, &proto_list);
Glauber Costa36b77a52011-12-16 00:51:59 +00003508 mutex_unlock(&proto_list_mutex);
zhanglinb45ce322019-08-23 09:14:11 +08003509 return ret;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08003510
Miaohe Lin0f5907a2020-08-10 08:16:58 -04003511out_free_timewait_sock_slab:
zhanglinb45ce322019-08-23 09:14:11 +08003512 if (alloc_slab && prot->twsk_prot)
Miaohe Lin0f5907a2020-08-10 08:16:58 -04003513 tw_prot_cleanup(prot->twsk_prot);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07003514out_free_request_sock_slab:
zhanglinb45ce322019-08-23 09:14:11 +08003515 if (alloc_slab) {
3516 req_prot_cleanup(prot->rsk_prot);
Eric Dumazet0159dfd2015-03-12 16:44:07 -07003517
zhanglinb45ce322019-08-23 09:14:11 +08003518 kmem_cache_destroy(prot->slab);
3519 prot->slab = NULL;
3520 }
Pavel Emelyanovb733c002007-11-07 02:23:38 -08003521out:
zhanglinb45ce322019-08-23 09:14:11 +08003522 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524EXPORT_SYMBOL(proto_register);
3525
3526void proto_unregister(struct proto *prot)
3527{
Glauber Costa36b77a52011-12-16 00:51:59 +00003528 mutex_lock(&proto_list_mutex);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07003529 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07003530 list_del(&prot->node);
Glauber Costa36b77a52011-12-16 00:51:59 +00003531 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532
Julia Lawalladf78ed2015-09-13 14:15:18 +02003533 kmem_cache_destroy(prot->slab);
3534 prot->slab = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535
Eric Dumazet0159dfd2015-03-12 16:44:07 -07003536 req_prot_cleanup(prot->rsk_prot);
Miaohe Lin0f5907a2020-08-10 08:16:58 -04003537 tw_prot_cleanup(prot->twsk_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539EXPORT_SYMBOL(proto_unregister);
3540
Xin Longbf2ae2e2018-03-10 18:57:50 +08003541int sock_load_diag_module(int family, int protocol)
3542{
3543 if (!protocol) {
3544 if (!sock_is_registered(family))
3545 return -ENOENT;
3546
3547 return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
3548 NETLINK_SOCK_DIAG, family);
3549 }
3550
3551#ifdef CONFIG_INET
3552 if (family == AF_INET &&
Andrei Vaginc34c1282018-11-04 22:37:15 -08003553 protocol != IPPROTO_RAW &&
Paolo Abeni3f935c72020-07-09 15:12:39 +02003554 protocol < MAX_INET_PROTOS &&
Xin Longbf2ae2e2018-03-10 18:57:50 +08003555 !rcu_access_pointer(inet_protos[protocol]))
3556 return -ENOENT;
3557#endif
3558
3559 return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
3560 NETLINK_SOCK_DIAG, family, protocol);
3561}
3562EXPORT_SYMBOL(sock_load_diag_module);
3563
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Glauber Costa36b77a52011-12-16 00:51:59 +00003566 __acquires(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567{
Glauber Costa36b77a52011-12-16 00:51:59 +00003568 mutex_lock(&proto_list_mutex);
Pavel Emelianov60f04382007-07-09 13:15:14 -07003569 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003570}
3571
3572static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3573{
Pavel Emelianov60f04382007-07-09 13:15:14 -07003574 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575}
3576
3577static void proto_seq_stop(struct seq_file *seq, void *v)
Glauber Costa36b77a52011-12-16 00:51:59 +00003578 __releases(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579{
Glauber Costa36b77a52011-12-16 00:51:59 +00003580 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003581}
3582
3583static char proto_method_implemented(const void *method)
3584{
3585 return method == NULL ? 'n' : 'y';
3586}
Glauber Costa180d8cd2011-12-11 21:47:02 +00003587static long sock_prot_memory_allocated(struct proto *proto)
3588{
Jeffrin Josecb75a362012-04-25 19:17:29 +05303589 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
Glauber Costa180d8cd2011-12-11 21:47:02 +00003590}
3591
Alexey Dobriyan7a512eb2019-10-04 00:44:40 +03003592static const char *sock_prot_memory_pressure(struct proto *proto)
Glauber Costa180d8cd2011-12-11 21:47:02 +00003593{
3594 return proto->memory_pressure != NULL ?
3595 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
3596}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597
3598static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
3599{
Glauber Costa180d8cd2011-12-11 21:47:02 +00003600
Eric Dumazet8d987e52010-11-09 23:24:26 +00003601 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
3603 proto->name,
3604 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08003605 sock_prot_inuse_get(seq_file_net(seq), proto),
Glauber Costa180d8cd2011-12-11 21:47:02 +00003606 sock_prot_memory_allocated(proto),
3607 sock_prot_memory_pressure(proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 proto->max_header,
3609 proto->slab == NULL ? "no" : "yes",
3610 module_name(proto->owner),
3611 proto_method_implemented(proto->close),
3612 proto_method_implemented(proto->connect),
3613 proto_method_implemented(proto->disconnect),
3614 proto_method_implemented(proto->accept),
3615 proto_method_implemented(proto->ioctl),
3616 proto_method_implemented(proto->init),
3617 proto_method_implemented(proto->destroy),
3618 proto_method_implemented(proto->shutdown),
3619 proto_method_implemented(proto->setsockopt),
3620 proto_method_implemented(proto->getsockopt),
3621 proto_method_implemented(proto->sendmsg),
3622 proto_method_implemented(proto->recvmsg),
3623 proto_method_implemented(proto->sendpage),
3624 proto_method_implemented(proto->bind),
3625 proto_method_implemented(proto->backlog_rcv),
3626 proto_method_implemented(proto->hash),
3627 proto_method_implemented(proto->unhash),
3628 proto_method_implemented(proto->get_port),
3629 proto_method_implemented(proto->enter_memory_pressure));
3630}
3631
3632static int proto_seq_show(struct seq_file *seq, void *v)
3633{
Pavel Emelianov60f04382007-07-09 13:15:14 -07003634 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003635 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
3636 "protocol",
3637 "size",
3638 "sockets",
3639 "memory",
3640 "press",
3641 "maxhdr",
3642 "slab",
3643 "module",
3644 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
3645 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07003646 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647 return 0;
3648}
3649
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003650static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 .start = proto_seq_start,
3652 .next = proto_seq_next,
3653 .stop = proto_seq_stop,
3654 .show = proto_seq_show,
3655};
3656
Eric Dumazet14e943d2008-11-19 15:14:01 -08003657static __net_init int proto_init_net(struct net *net)
3658{
Christoph Hellwigc3506372018-04-10 19:42:55 +02003659 if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops,
3660 sizeof(struct seq_net_private)))
Eric Dumazet14e943d2008-11-19 15:14:01 -08003661 return -ENOMEM;
3662
3663 return 0;
3664}
3665
3666static __net_exit void proto_exit_net(struct net *net)
3667{
Gao fengece31ff2013-02-18 01:34:56 +00003668 remove_proc_entry("protocols", net->proc_net);
Eric Dumazet14e943d2008-11-19 15:14:01 -08003669}
3670
3671
3672static __net_initdata struct pernet_operations proto_net_ops = {
3673 .init = proto_init_net,
3674 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675};
3676
3677static int __init proto_init(void)
3678{
Eric Dumazet14e943d2008-11-19 15:14:01 -08003679 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680}
3681
3682subsys_initcall(proto_init);
3683
3684#endif /* PROC_FS */
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07003685
3686#ifdef CONFIG_NET_RX_BUSY_POLL
3687bool sk_busy_loop_end(void *p, unsigned long start_time)
3688{
3689 struct sock *sk = p;
3690
Eric Dumazet3f926af2019-10-23 22:44:51 -07003691 return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07003692 sk_busy_loop_timeout(sk, start_time);
3693}
3694EXPORT_SYMBOL(sk_busy_loop_end);
3695#endif /* CONFIG_NET_RX_BUSY_POLL */
Christoph Hellwigc0425a42020-05-29 14:09:42 +02003696
3697int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len)
3698{
3699 if (!sk->sk_prot->bind_add)
3700 return -EOPNOTSUPP;
3701 return sk->sk_prot->bind_add(sk, addr, addr_len);
3702}
3703EXPORT_SYMBOL(sock_bind_add);