blob: 9e8968e9ebeb3ff19aab63d99e558f919bb5609d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Joe Perchese005d192012-05-16 19:58:40 +000092#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
Randy Dunlap4fc268d2006-01-11 12:17:47 -080094#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/errno.h>
Richard Cochrancb820f82013-07-19 19:40:09 +020096#include <linux/errqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <linux/types.h>
98#include <linux/socket.h>
99#include <linux/in.h>
100#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <linux/module.h>
102#include <linux/proc_fs.h>
103#include <linux/seq_file.h>
104#include <linux/sched.h>
105#include <linux/timer.h>
106#include <linux/string.h>
107#include <linux/sockios.h>
108#include <linux/net.h>
109#include <linux/mm.h>
110#include <linux/slab.h>
111#include <linux/interrupt.h>
112#include <linux/poll.h>
113#include <linux/tcp.h>
114#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400115#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000116#include <linux/user_namespace.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100117#include <linux/static_key.h>
David S. Miller3969eb32012-01-09 13:44:23 -0800118#include <linux/memcontrol.h>
David S. Miller8c1ae102012-05-03 02:25:55 -0400119#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123#include <linux/netdevice.h>
124#include <net/protocol.h>
125#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200126#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700127#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000129#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130#include <net/xfrm.h>
131#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700132#include <net/cls_cgroup.h>
Neil Horman5bc14212011-11-22 05:10:51 +0000133#include <net/netprio_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135#include <linux/filter.h>
136
Satoru Moriya3847ce32011-06-17 12:00:03 +0000137#include <trace/events/sock.h>
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139#ifdef CONFIG_INET
140#include <net/tcp.h>
141#endif
142
Eliezer Tamir076bb0c2013-07-10 17:13:17 +0300143#include <net/busy_poll.h>
Eliezer Tamir06021292013-06-10 11:39:50 +0300144
Glauber Costa36b77a52011-12-16 00:51:59 +0000145static DEFINE_MUTEX(proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000146static LIST_HEAD(proto_list);
147
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700148/**
149 * sk_ns_capable - General socket capability test
150 * @sk: Socket to use a capability on or through
151 * @user_ns: The user namespace of the capability to use
152 * @cap: The capability to use
153 *
154 * Test to see if the opener of the socket had when the socket was
155 * created and the current process has the capability @cap in the user
156 * namespace @user_ns.
157 */
158bool sk_ns_capable(const struct sock *sk,
159 struct user_namespace *user_ns, int cap)
160{
161 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
162 ns_capable(user_ns, cap);
163}
164EXPORT_SYMBOL(sk_ns_capable);
165
166/**
167 * sk_capable - Socket global capability test
168 * @sk: Socket to use a capability on or through
Masanari Iidae793c0f2014-09-04 23:44:36 +0900169 * @cap: The global capability to use
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700170 *
171 * Test to see if the opener of the socket had when the socket was
172 * created and the current process has the capability @cap in all user
173 * namespaces.
174 */
175bool sk_capable(const struct sock *sk, int cap)
176{
177 return sk_ns_capable(sk, &init_user_ns, cap);
178}
179EXPORT_SYMBOL(sk_capable);
180
181/**
182 * sk_net_capable - Network namespace socket capability test
183 * @sk: Socket to use a capability on or through
184 * @cap: The capability to use
185 *
Masanari Iidae793c0f2014-09-04 23:44:36 +0900186 * Test to see if the opener of the socket had when the socket was created
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700187 * and the current process has the capability @cap over the network namespace
188 * the socket is a member of.
189 */
190bool sk_net_capable(const struct sock *sk, int cap)
191{
192 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
193}
194EXPORT_SYMBOL(sk_net_capable);
195
196
Andrew Mortonc255a452012-07-31 16:43:02 -0700197#ifdef CONFIG_MEMCG_KMEM
Glauber Costa1d62e432012-04-09 19:36:33 -0300198int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000199{
200 struct proto *proto;
201 int ret = 0;
202
Glauber Costa36b77a52011-12-16 00:51:59 +0000203 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000204 list_for_each_entry(proto, &proto_list, node) {
205 if (proto->init_cgroup) {
Glauber Costa1d62e432012-04-09 19:36:33 -0300206 ret = proto->init_cgroup(memcg, ss);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000207 if (ret)
208 goto out;
209 }
210 }
211
Glauber Costa36b77a52011-12-16 00:51:59 +0000212 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000213 return ret;
214out:
215 list_for_each_entry_continue_reverse(proto, &proto_list, node)
216 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300217 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000218 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000219 return ret;
220}
221
Glauber Costa1d62e432012-04-09 19:36:33 -0300222void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000223{
224 struct proto *proto;
225
Glauber Costa36b77a52011-12-16 00:51:59 +0000226 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000227 list_for_each_entry_reverse(proto, &proto_list, node)
228 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300229 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000230 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000231}
232#endif
233
Ingo Molnarda21f242006-07-03 00:25:12 -0700234/*
235 * Each address family might have different locking rules, so we have
236 * one slock key per address family:
237 */
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700238static struct lock_class_key af_family_keys[AF_MAX];
239static struct lock_class_key af_family_slock_keys[AF_MAX];
240
stephen hemmingercbda4ea2013-02-22 07:59:10 +0000241#if defined(CONFIG_MEMCG_KMEM)
Ingo Molnarc5905af2012-02-24 08:31:31 +0100242struct static_key memcg_socket_limit_enabled;
Glauber Costae1aab162011-12-11 21:47:03 +0000243EXPORT_SYMBOL(memcg_socket_limit_enabled);
stephen hemmingercbda4ea2013-02-22 07:59:10 +0000244#endif
Glauber Costae1aab162011-12-11 21:47:03 +0000245
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700246/*
247 * Make lock validator output more readable. (we pre-construct these
248 * strings build-time, so that runtime initialization of socket
249 * locks is fast):
250 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700251static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700252 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
253 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
254 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
255 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
256 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
257 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
258 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800259 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700260 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800261 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700262 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700263 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800264 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000265 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700266};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700267static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700268 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
269 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
270 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
271 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
272 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
273 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
274 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800275 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700276 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800277 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700278 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700279 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800280 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000281 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700282};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700283static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef0e2007-07-19 01:49:00 -0700284 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
285 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
286 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
287 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
288 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
289 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
290 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800291 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef0e2007-07-19 01:49:00 -0700292 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700293 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700294 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700295 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800296 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000297 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
Peter Zijlstra443aef0e2007-07-19 01:49:00 -0700298};
Ingo Molnarda21f242006-07-03 00:25:12 -0700299
300/*
301 * sk_callback_lock locking rules are per-address-family,
302 * so split the lock classes by using a per-AF key:
303 */
304static struct lock_class_key af_callback_keys[AF_MAX];
305
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306/* Take into consideration the size of the struct sk_buff overhead in the
307 * determination of these values, since that is non-constant across
308 * platforms. This makes socket queueing behavior and performance
309 * not depend upon such differences.
310 */
311#define _SK_MEM_PACKETS 256
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000312#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
314#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
315
316/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700317__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200318EXPORT_SYMBOL(sysctl_wmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700319__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200320EXPORT_SYMBOL(sysctl_rmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700321__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
322__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300324/* Maximal space eaten by iovec or ancillary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700325int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000326EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
Willem de Bruijnb245be12015-01-30 13:29:32 -0500328int sysctl_tstamp_allow_data __read_mostly = 1;
329
Mel Gormanc93bdd02012-07-31 16:44:19 -0700330struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
331EXPORT_SYMBOL_GPL(memalloc_socks);
332
Mel Gorman7cb02402012-07-31 16:44:16 -0700333/**
334 * sk_set_memalloc - sets %SOCK_MEMALLOC
335 * @sk: socket to set it on
336 *
337 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
338 * It's the responsibility of the admin to adjust min_free_kbytes
339 * to meet the requirements
340 */
341void sk_set_memalloc(struct sock *sk)
342{
343 sock_set_flag(sk, SOCK_MEMALLOC);
344 sk->sk_allocation |= __GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700345 static_key_slow_inc(&memalloc_socks);
Mel Gorman7cb02402012-07-31 16:44:16 -0700346}
347EXPORT_SYMBOL_GPL(sk_set_memalloc);
348
349void sk_clear_memalloc(struct sock *sk)
350{
351 sock_reset_flag(sk, SOCK_MEMALLOC);
352 sk->sk_allocation &= ~__GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700353 static_key_slow_dec(&memalloc_socks);
Mel Gormanc76562b2012-07-31 16:44:41 -0700354
355 /*
356 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
357 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
358 * it has rmem allocations there is a risk that the user of the
359 * socket cannot make forward progress due to exceeding the rmem
360 * limits. By rights, sk_clear_memalloc() should only be called
361 * on sockets being torn down but warn and reset the accounting if
362 * that assumption breaks.
363 */
364 if (WARN_ON(sk->sk_forward_alloc))
365 sk_mem_reclaim(sk);
Mel Gorman7cb02402012-07-31 16:44:16 -0700366}
367EXPORT_SYMBOL_GPL(sk_clear_memalloc);
368
Mel Gormanb4b9e352012-07-31 16:44:26 -0700369int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
370{
371 int ret;
372 unsigned long pflags = current->flags;
373
374 /* these should have been dropped before queueing */
375 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
376
377 current->flags |= PF_MEMALLOC;
378 ret = sk->sk_backlog_rcv(sk, skb);
379 tsk_restore_flags(current, pflags, PF_MEMALLOC);
380
381 return ret;
382}
383EXPORT_SYMBOL(__sk_backlog_rcv);
384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
386{
387 struct timeval tv;
388
389 if (optlen < sizeof(tv))
390 return -EINVAL;
391 if (copy_from_user(&tv, optval, sizeof(tv)))
392 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700393 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
394 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
Vasily Averinba780732007-05-24 16:58:54 -0700396 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700397 static int warned __read_mostly;
398
Vasily Averinba780732007-05-24 16:58:54 -0700399 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700400 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700401 warned++;
Joe Perchese005d192012-05-16 19:58:40 +0000402 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
403 __func__, current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700404 }
Vasily Averinba780732007-05-24 16:58:54 -0700405 return 0;
406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 *timeo_p = MAX_SCHEDULE_TIMEOUT;
408 if (tv.tv_sec == 0 && tv.tv_usec == 0)
409 return 0;
410 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
411 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
412 return 0;
413}
414
415static void sock_warn_obsolete_bsdism(const char *name)
416{
417 static int warned;
418 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900419 if (strcmp(warncomm, current->comm) && warned < 5) {
420 strcpy(warncomm, current->comm);
Joe Perchese005d192012-05-16 19:58:40 +0000421 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
422 warncomm, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 warned++;
424 }
425}
426
Eric Dumazet08e29af2011-11-28 12:04:18 +0000427#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
428
429static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900430{
Eric Dumazet08e29af2011-11-28 12:04:18 +0000431 if (sk->sk_flags & flags) {
432 sk->sk_flags &= ~flags;
433 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +0000434 net_disable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 }
436}
437
438
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800439int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
440{
Eric Dumazet766e90372009-10-14 20:40:11 -0700441 int err;
Neil Horman3b885782009-10-12 13:26:31 -0700442 unsigned long flags;
443 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800444
Eric Dumazet0fd7bac2011-12-21 07:11:44 +0000445 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700446 atomic_inc(&sk->sk_drops);
Satoru Moriya3847ce32011-06-17 12:00:03 +0000447 trace_sock_rcvqueue_full(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -0700448 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800449 }
450
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700451 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800452 if (err)
Eric Dumazet766e90372009-10-14 20:40:11 -0700453 return err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800454
Mel Gormanc76562b2012-07-31 16:44:41 -0700455 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700456 atomic_inc(&sk->sk_drops);
457 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800458 }
459
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800460 skb->dev = NULL;
461 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800462
Eric Dumazet7fee2262010-05-11 23:19:48 +0000463 /* we escape from rcu protected region, make sure we dont leak
464 * a norefcounted dst
465 */
466 skb_dst_force(skb);
467
Neil Horman3b885782009-10-12 13:26:31 -0700468 spin_lock_irqsave(&list->lock, flags);
Eyal Birger3bc3b962015-03-01 14:58:30 +0200469 sock_skb_set_dropcount(sk, skb);
Neil Horman3b885782009-10-12 13:26:31 -0700470 __skb_queue_tail(list, skb);
471 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800472
473 if (!sock_flag(sk, SOCK_DEAD))
David S. Miller676d2362014-04-11 16:15:36 -0400474 sk->sk_data_ready(sk);
Eric Dumazet766e90372009-10-14 20:40:11 -0700475 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800476}
477EXPORT_SYMBOL(sock_queue_rcv_skb);
478
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200479int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800480{
481 int rc = NET_RX_SUCCESS;
482
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700483 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800484 goto discard_and_relse;
485
486 skb->dev = NULL;
487
Sorin Dumitru274f4822014-07-22 21:16:51 +0300488 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
Eric Dumazetc3774112010-04-27 15:13:20 -0700489 atomic_inc(&sk->sk_drops);
490 goto discard_and_relse;
491 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200492 if (nested)
493 bh_lock_sock_nested(sk);
494 else
495 bh_lock_sock(sk);
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700496 if (!sock_owned_by_user(sk)) {
497 /*
498 * trylock + unlock semantics:
499 */
500 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
501
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700502 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700503
504 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Eric Dumazetf545a382012-04-22 23:34:26 +0000505 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000506 bh_unlock_sock(sk);
507 atomic_inc(&sk->sk_drops);
508 goto discard_and_relse;
509 }
510
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800511 bh_unlock_sock(sk);
512out:
513 sock_put(sk);
514 return rc;
515discard_and_relse:
516 kfree_skb(skb);
517 goto out;
518}
519EXPORT_SYMBOL(sk_receive_skb);
520
521struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
522{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000523 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800524
525 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000526 sk_tx_queue_clear(sk);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000527 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800528 dst_release(dst);
529 return NULL;
530 }
531
532 return dst;
533}
534EXPORT_SYMBOL(__sk_dst_check);
535
536struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
537{
538 struct dst_entry *dst = sk_dst_get(sk);
539
540 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
541 sk_dst_reset(sk);
542 dst_release(dst);
543 return NULL;
544 }
545
546 return dst;
547}
548EXPORT_SYMBOL(sk_dst_check);
549
Brian Haleyc91f6df2012-11-26 05:21:08 +0000550static int sock_setbindtodevice(struct sock *sk, char __user *optval,
551 int optlen)
David S. Miller48788092007-09-14 16:41:03 -0700552{
553 int ret = -ENOPROTOOPT;
554#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900555 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700556 char devname[IFNAMSIZ];
557 int index;
558
559 /* Sorry... */
560 ret = -EPERM;
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000561 if (!ns_capable(net->user_ns, CAP_NET_RAW))
David S. Miller48788092007-09-14 16:41:03 -0700562 goto out;
563
564 ret = -EINVAL;
565 if (optlen < 0)
566 goto out;
567
568 /* Bind this socket to a particular device like "eth0",
569 * as specified in the passed interface name. If the
570 * name is "" or the option length is zero the socket
571 * is not bound.
572 */
573 if (optlen > IFNAMSIZ - 1)
574 optlen = IFNAMSIZ - 1;
575 memset(devname, 0, sizeof(devname));
576
577 ret = -EFAULT;
578 if (copy_from_user(devname, optval, optlen))
579 goto out;
580
David S. Miller000ba2e2009-11-05 22:37:11 -0800581 index = 0;
582 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800583 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700584
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800585 rcu_read_lock();
586 dev = dev_get_by_name_rcu(net, devname);
587 if (dev)
588 index = dev->ifindex;
589 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700590 ret = -ENODEV;
591 if (!dev)
592 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700593 }
594
595 lock_sock(sk);
596 sk->sk_bound_dev_if = index;
597 sk_dst_reset(sk);
598 release_sock(sk);
599
600 ret = 0;
601
602out:
603#endif
604
605 return ret;
606}
607
Brian Haleyc91f6df2012-11-26 05:21:08 +0000608static int sock_getbindtodevice(struct sock *sk, char __user *optval,
609 int __user *optlen, int len)
610{
611 int ret = -ENOPROTOOPT;
612#ifdef CONFIG_NETDEVICES
613 struct net *net = sock_net(sk);
Brian Haleyc91f6df2012-11-26 05:21:08 +0000614 char devname[IFNAMSIZ];
Brian Haleyc91f6df2012-11-26 05:21:08 +0000615
616 if (sk->sk_bound_dev_if == 0) {
617 len = 0;
618 goto zero;
619 }
620
621 ret = -EINVAL;
622 if (len < IFNAMSIZ)
623 goto out;
624
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200625 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
626 if (ret)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000627 goto out;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000628
629 len = strlen(devname) + 1;
630
631 ret = -EFAULT;
632 if (copy_to_user(optval, devname, len))
633 goto out;
634
635zero:
636 ret = -EFAULT;
637 if (put_user(len, optlen))
638 goto out;
639
640 ret = 0;
641
642out:
643#endif
644
645 return ret;
646}
647
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800648static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
649{
650 if (valbool)
651 sock_set_flag(sk, bit);
652 else
653 sock_reset_flag(sk, bit);
654}
655
hannes@stressinduktion.orgf60e5992015-04-01 17:07:44 +0200656bool sk_mc_loop(struct sock *sk)
657{
658 if (dev_recursion_level())
659 return false;
660 if (!sk)
661 return true;
662 switch (sk->sk_family) {
663 case AF_INET:
664 return inet_sk(sk)->mc_loop;
665#if IS_ENABLED(CONFIG_IPV6)
666 case AF_INET6:
667 return inet6_sk(sk)->mc_loop;
668#endif
669 }
670 WARN_ON(1);
671 return true;
672}
673EXPORT_SYMBOL(sk_mc_loop);
674
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675/*
676 * This is meant for all protocols to use and covers goings on
677 * at the socket level. Everything here is generic.
678 */
679
680int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700681 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
Eric Dumazet2a915252009-05-27 11:30:05 +0000683 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 int val;
685 int valbool;
686 struct linger ling;
687 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900688
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 /*
690 * Options without arguments
691 */
692
David S. Miller48788092007-09-14 16:41:03 -0700693 if (optname == SO_BINDTODEVICE)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000694 return sock_setbindtodevice(sk, optval, optlen);
David S. Miller48788092007-09-14 16:41:03 -0700695
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700696 if (optlen < sizeof(int))
697 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900698
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 if (get_user(val, (int __user *)optval))
700 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900701
Eric Dumazet2a915252009-05-27 11:30:05 +0000702 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703
704 lock_sock(sk);
705
Eric Dumazet2a915252009-05-27 11:30:05 +0000706 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700707 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000708 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700709 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000710 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800711 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700712 break;
713 case SO_REUSEADDR:
Pavel Emelyanov4a17fd52012-04-19 03:39:36 +0000714 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700715 break;
Tom Herbert055dc212013-01-22 09:49:50 +0000716 case SO_REUSEPORT:
717 sk->sk_reuseport = valbool;
718 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700719 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000720 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000721 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700722 case SO_ERROR:
723 ret = -ENOPROTOOPT;
724 break;
725 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800726 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700727 break;
728 case SO_BROADCAST:
729 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
730 break;
731 case SO_SNDBUF:
732 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000733 * about it this is right. Otherwise apps have to
734 * play 'guess the biggest size' games. RCVBUF/SNDBUF
735 * are treated in BSD as hints
736 */
737 val = min_t(u32, val, sysctl_wmem_max);
Patrick McHardyb0573de2005-08-09 19:30:51 -0700738set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700739 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
Eric Dumazet82981932012-04-26 20:07:59 +0000740 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
741 /* Wake up sending tasks if we upped the value. */
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700742 sk->sk_write_space(sk);
743 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700745 case SO_SNDBUFFORCE:
746 if (!capable(CAP_NET_ADMIN)) {
747 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 break;
749 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700750 goto set_sndbuf;
751
752 case SO_RCVBUF:
753 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000754 * about it this is right. Otherwise apps have to
755 * play 'guess the biggest size' games. RCVBUF/SNDBUF
756 * are treated in BSD as hints
757 */
758 val = min_t(u32, val, sysctl_rmem_max);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700759set_rcvbuf:
760 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
761 /*
762 * We double it on the way in to account for
763 * "struct sk_buff" etc. overhead. Applications
764 * assume that the SO_RCVBUF setting they make will
765 * allow that much actual data to be received on that
766 * socket.
767 *
768 * Applications are unaware that "struct sk_buff" and
769 * other overheads allocate from the receive buffer
770 * during socket buffer allocation.
771 *
772 * And after considering the possible alternatives,
773 * returning the value we actually used in getsockopt
774 * is the most desirable behavior.
775 */
Eric Dumazet82981932012-04-26 20:07:59 +0000776 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700777 break;
778
779 case SO_RCVBUFFORCE:
780 if (!capable(CAP_NET_ADMIN)) {
781 ret = -EPERM;
782 break;
783 }
784 goto set_rcvbuf;
785
786 case SO_KEEPALIVE:
787#ifdef CONFIG_INET
Eric Dumazet3e109862012-09-24 07:00:11 +0000788 if (sk->sk_protocol == IPPROTO_TCP &&
789 sk->sk_type == SOCK_STREAM)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700790 tcp_set_keepalive(sk, valbool);
791#endif
792 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
793 break;
794
795 case SO_OOBINLINE:
796 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
797 break;
798
799 case SO_NO_CHECK:
Tom Herbert28448b82014-05-23 08:47:19 -0700800 sk->sk_no_check_tx = valbool;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700801 break;
802
803 case SO_PRIORITY:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000804 if ((val >= 0 && val <= 6) ||
805 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700806 sk->sk_priority = val;
807 else
808 ret = -EPERM;
809 break;
810
811 case SO_LINGER:
812 if (optlen < sizeof(ling)) {
813 ret = -EINVAL; /* 1003.1g */
814 break;
815 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000816 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700817 ret = -EFAULT;
818 break;
819 }
820 if (!ling.l_onoff)
821 sock_reset_flag(sk, SOCK_LINGER);
822 else {
823#if (BITS_PER_LONG == 32)
824 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
825 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
826 else
827#endif
828 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
829 sock_set_flag(sk, SOCK_LINGER);
830 }
831 break;
832
833 case SO_BSDCOMPAT:
834 sock_warn_obsolete_bsdism("setsockopt");
835 break;
836
837 case SO_PASSCRED:
838 if (valbool)
839 set_bit(SOCK_PASSCRED, &sock->flags);
840 else
841 clear_bit(SOCK_PASSCRED, &sock->flags);
842 break;
843
844 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700845 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700846 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700847 if (optname == SO_TIMESTAMP)
848 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
849 else
850 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700851 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000852 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700853 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700854 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700855 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
856 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700857 break;
858
Patrick Ohly20d49472009-02-12 05:03:38 +0000859 case SO_TIMESTAMPING:
860 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000861 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000862 break;
863 }
Willem de Bruijnb245be12015-01-30 13:29:32 -0500864
Willem de Bruijn09c2d252014-08-04 22:11:47 -0400865 if (val & SOF_TIMESTAMPING_OPT_ID &&
Willem de Bruijn4ed2d762014-08-04 22:11:49 -0400866 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
867 if (sk->sk_protocol == IPPROTO_TCP) {
868 if (sk->sk_state != TCP_ESTABLISHED) {
869 ret = -EINVAL;
870 break;
871 }
872 sk->sk_tskey = tcp_sk(sk)->snd_una;
873 } else {
874 sk->sk_tskey = 0;
875 }
876 }
Willem de Bruijnb9f40e22014-08-04 22:11:46 -0400877 sk->sk_tsflags = val;
Patrick Ohly20d49472009-02-12 05:03:38 +0000878 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
879 sock_enable_timestamp(sk,
880 SOCK_TIMESTAMPING_RX_SOFTWARE);
881 else
882 sock_disable_timestamp(sk,
Eric Dumazet08e29af2011-11-28 12:04:18 +0000883 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
Patrick Ohly20d49472009-02-12 05:03:38 +0000884 break;
885
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700886 case SO_RCVLOWAT:
887 if (val < 0)
888 val = INT_MAX;
889 sk->sk_rcvlowat = val ? : 1;
890 break;
891
892 case SO_RCVTIMEO:
893 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
894 break;
895
896 case SO_SNDTIMEO:
897 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
898 break;
899
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700900 case SO_ATTACH_FILTER:
901 ret = -EINVAL;
902 if (optlen == sizeof(struct sock_fprog)) {
903 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700905 ret = -EFAULT;
906 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700909 ret = sk_attach_filter(&fprog, sk);
910 }
911 break;
912
Alexei Starovoitov89aa0752014-12-01 15:06:35 -0800913 case SO_ATTACH_BPF:
914 ret = -EINVAL;
915 if (optlen == sizeof(u32)) {
916 u32 ufd;
917
918 ret = -EFAULT;
919 if (copy_from_user(&ufd, optval, sizeof(ufd)))
920 break;
921
922 ret = sk_attach_bpf(ufd, sk);
923 }
924 break;
925
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700926 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700927 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700928 break;
929
Vincent Bernatd59577b2013-01-16 22:55:49 +0100930 case SO_LOCK_FILTER:
931 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
932 ret = -EPERM;
933 else
934 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
935 break;
936
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700937 case SO_PASSSEC:
938 if (valbool)
939 set_bit(SOCK_PASSSEC, &sock->flags);
940 else
941 clear_bit(SOCK_PASSSEC, &sock->flags);
942 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800943 case SO_MARK:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000944 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800945 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000946 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800947 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800948 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700949
Neil Horman3b885782009-10-12 13:26:31 -0700950 case SO_RXQ_OVFL:
Johannes Berg8083f0f2011-10-07 03:30:20 +0000951 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
Neil Horman3b885782009-10-12 13:26:31 -0700952 break;
Johannes Berg6e3e9392011-11-09 10:15:42 +0100953
954 case SO_WIFI_STATUS:
955 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
956 break;
957
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000958 case SO_PEEK_OFF:
959 if (sock->ops->set_peek_off)
Sasha Levin12663bf2013-12-07 17:26:27 -0500960 ret = sock->ops->set_peek_off(sk, val);
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000961 else
962 ret = -EOPNOTSUPP;
963 break;
Ben Greear3bdc0eb2012-02-11 15:39:30 +0000964
965 case SO_NOFCS:
966 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
967 break;
968
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +0000969 case SO_SELECT_ERR_QUEUE:
970 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
971 break;
972
Cong Wange0d10952013-08-01 11:10:25 +0800973#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +0300974 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +0300975 /* allow unprivileged users to decrease the value */
976 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
977 ret = -EPERM;
978 else {
979 if (val < 0)
980 ret = -EINVAL;
981 else
982 sk->sk_ll_usec = val;
983 }
984 break;
985#endif
Eric Dumazet62748f32013-09-24 08:20:52 -0700986
987 case SO_MAX_PACING_RATE:
988 sk->sk_max_pacing_rate = val;
989 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
990 sk->sk_max_pacing_rate);
991 break;
992
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700993 default:
994 ret = -ENOPROTOOPT;
995 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900996 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 release_sock(sk);
998 return ret;
999}
Eric Dumazet2a915252009-05-27 11:30:05 +00001000EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
1002
stephen hemminger8f098982014-01-03 09:17:14 -08001003static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1004 struct ucred *ucred)
Eric W. Biederman3f551f92010-06-13 03:28:59 +00001005{
1006 ucred->pid = pid_vnr(pid);
1007 ucred->uid = ucred->gid = -1;
1008 if (cred) {
1009 struct user_namespace *current_ns = current_user_ns();
1010
Eric W. Biedermanb2e4f542012-05-23 16:39:45 -06001011 ucred->uid = from_kuid_munged(current_ns, cred->euid);
1012 ucred->gid = from_kgid_munged(current_ns, cred->egid);
Eric W. Biederman3f551f92010-06-13 03:28:59 +00001013 }
1014}
1015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016int sock_getsockopt(struct socket *sock, int level, int optname,
1017 char __user *optval, int __user *optlen)
1018{
1019 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001020
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001021 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001022 int val;
1023 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 struct timeval tm;
1025 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001026
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -08001027 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001029
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001030 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001031 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001032 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001034
Eugene Teo50fee1d2009-02-23 15:38:41 -08001035 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -08001036
Eric Dumazet2a915252009-05-27 11:30:05 +00001037 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001038 case SO_DEBUG:
1039 v.val = sock_flag(sk, SOCK_DBG);
1040 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001041
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001042 case SO_DONTROUTE:
1043 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1044 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001045
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001046 case SO_BROADCAST:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001047 v.val = sock_flag(sk, SOCK_BROADCAST);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001048 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001050 case SO_SNDBUF:
1051 v.val = sk->sk_sndbuf;
1052 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001053
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001054 case SO_RCVBUF:
1055 v.val = sk->sk_rcvbuf;
1056 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001058 case SO_REUSEADDR:
1059 v.val = sk->sk_reuse;
1060 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061
Tom Herbert055dc212013-01-22 09:49:50 +00001062 case SO_REUSEPORT:
1063 v.val = sk->sk_reuseport;
1064 break;
1065
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001066 case SO_KEEPALIVE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001067 v.val = sock_flag(sk, SOCK_KEEPOPEN);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001068 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001070 case SO_TYPE:
1071 v.val = sk->sk_type;
1072 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
Jan Engelhardt49c794e2009-08-04 07:28:28 +00001074 case SO_PROTOCOL:
1075 v.val = sk->sk_protocol;
1076 break;
1077
Jan Engelhardt0d6038e2009-08-04 07:28:29 +00001078 case SO_DOMAIN:
1079 v.val = sk->sk_family;
1080 break;
1081
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001082 case SO_ERROR:
1083 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +00001084 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001085 v.val = xchg(&sk->sk_err_soft, 0);
1086 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001088 case SO_OOBINLINE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001089 v.val = sock_flag(sk, SOCK_URGINLINE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001090 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001091
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001092 case SO_NO_CHECK:
Tom Herbert28448b82014-05-23 08:47:19 -07001093 v.val = sk->sk_no_check_tx;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001094 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001096 case SO_PRIORITY:
1097 v.val = sk->sk_priority;
1098 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001099
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001100 case SO_LINGER:
1101 lv = sizeof(v.ling);
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001102 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001103 v.ling.l_linger = sk->sk_lingertime / HZ;
1104 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001105
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001106 case SO_BSDCOMPAT:
1107 sock_warn_obsolete_bsdism("getsockopt");
1108 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001110 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -07001111 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1112 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1113 break;
1114
1115 case SO_TIMESTAMPNS:
1116 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001117 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118
Patrick Ohly20d49472009-02-12 05:03:38 +00001119 case SO_TIMESTAMPING:
Willem de Bruijnb9f40e22014-08-04 22:11:46 -04001120 v.val = sk->sk_tsflags;
Patrick Ohly20d49472009-02-12 05:03:38 +00001121 break;
1122
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001123 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001124 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001125 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1126 v.tm.tv_sec = 0;
1127 v.tm.tv_usec = 0;
1128 } else {
1129 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1130 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001132 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001134 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001135 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001136 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1137 v.tm.tv_sec = 0;
1138 v.tm.tv_usec = 0;
1139 } else {
1140 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1141 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1142 }
1143 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001145 case SO_RCVLOWAT:
1146 v.val = sk->sk_rcvlowat;
1147 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001148
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001149 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +00001150 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001151 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001153 case SO_PASSCRED:
Eric Dumazet82981932012-04-26 20:07:59 +00001154 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001155 break;
1156
1157 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001158 {
1159 struct ucred peercred;
1160 if (len > sizeof(peercred))
1161 len = sizeof(peercred);
1162 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1163 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001164 return -EFAULT;
1165 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001166 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001167
1168 case SO_PEERNAME:
1169 {
1170 char address[128];
1171
1172 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1173 return -ENOTCONN;
1174 if (lv < len)
1175 return -EINVAL;
1176 if (copy_to_user(optval, address, len))
1177 return -EFAULT;
1178 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001180
1181 /* Dubious BSD thing... Probably nobody even uses it, but
1182 * the UNIX standard wants it for whatever reason... -DaveM
1183 */
1184 case SO_ACCEPTCONN:
1185 v.val = sk->sk_state == TCP_LISTEN;
1186 break;
1187
1188 case SO_PASSSEC:
Eric Dumazet82981932012-04-26 20:07:59 +00001189 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001190 break;
1191
1192 case SO_PEERSEC:
1193 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1194
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001195 case SO_MARK:
1196 v.val = sk->sk_mark;
1197 break;
1198
Neil Horman3b885782009-10-12 13:26:31 -07001199 case SO_RXQ_OVFL:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001200 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
Neil Horman3b885782009-10-12 13:26:31 -07001201 break;
1202
Johannes Berg6e3e9392011-11-09 10:15:42 +01001203 case SO_WIFI_STATUS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001204 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
Johannes Berg6e3e9392011-11-09 10:15:42 +01001205 break;
1206
Pavel Emelyanovef64a542012-02-21 07:31:34 +00001207 case SO_PEEK_OFF:
1208 if (!sock->ops->set_peek_off)
1209 return -EOPNOTSUPP;
1210
1211 v.val = sk->sk_peek_off;
1212 break;
David S. Millerbc2f7992012-02-24 14:48:34 -05001213 case SO_NOFCS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001214 v.val = sock_flag(sk, SOCK_NOFCS);
David S. Millerbc2f7992012-02-24 14:48:34 -05001215 break;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001216
Pavel Emelyanovf7b86bf2012-10-18 23:55:56 +00001217 case SO_BINDTODEVICE:
Brian Haleyc91f6df2012-11-26 05:21:08 +00001218 return sock_getbindtodevice(sk, optval, optlen, len);
1219
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001220 case SO_GET_FILTER:
1221 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1222 if (len < 0)
1223 return len;
1224
1225 goto lenout;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001226
Vincent Bernatd59577b2013-01-16 22:55:49 +01001227 case SO_LOCK_FILTER:
1228 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1229 break;
1230
Michal Sekletarea02f942014-01-17 17:09:45 +01001231 case SO_BPF_EXTENSIONS:
1232 v.val = bpf_tell_extensions();
1233 break;
1234
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00001235 case SO_SELECT_ERR_QUEUE:
1236 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1237 break;
1238
Cong Wange0d10952013-08-01 11:10:25 +08001239#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03001240 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +03001241 v.val = sk->sk_ll_usec;
1242 break;
1243#endif
1244
Eric Dumazet62748f32013-09-24 08:20:52 -07001245 case SO_MAX_PACING_RATE:
1246 v.val = sk->sk_max_pacing_rate;
1247 break;
1248
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001249 case SO_INCOMING_CPU:
1250 v.val = sk->sk_incoming_cpu;
1251 break;
1252
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001253 default:
YOSHIFUJI Hideaki/吉藤英明443b5992015-03-23 18:04:13 +09001254 /* We implement the SO_SNDLOWAT etc to not be settable
1255 * (1003.1g 7).
1256 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001257 return -ENOPROTOOPT;
1258 }
1259
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 if (len > lv)
1261 len = lv;
1262 if (copy_to_user(optval, &v, len))
1263 return -EFAULT;
1264lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001265 if (put_user(len, optlen))
1266 return -EFAULT;
1267 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268}
1269
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07001270/*
1271 * Initialize an sk_lock.
1272 *
1273 * (We also register the sk_lock with the lock validator.)
1274 */
Dave Jonesb6f99a22007-03-22 12:27:49 -07001275static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07001276{
Peter Zijlstraed075362006-12-06 20:35:24 -08001277 sock_lock_init_class_and_name(sk,
1278 af_family_slock_key_strings[sk->sk_family],
1279 af_family_slock_keys + sk->sk_family,
1280 af_family_key_strings[sk->sk_family],
1281 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07001282}
1283
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001284/*
1285 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1286 * even temporarly, because of RCU lookups. sk_node should also be left as is.
Eric Dumazet68835ab2010-11-30 19:04:07 +00001287 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001288 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001289static void sock_copy(struct sock *nsk, const struct sock *osk)
1290{
1291#ifdef CONFIG_SECURITY_NETWORK
1292 void *sptr = nsk->sk_security;
1293#endif
Eric Dumazet68835ab2010-11-30 19:04:07 +00001294 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1295
1296 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1297 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1298
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001299#ifdef CONFIG_SECURITY_NETWORK
1300 nsk->sk_security = sptr;
1301 security_sk_clone(osk, nsk);
1302#endif
1303}
1304
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001305void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1306{
1307 unsigned long nulls1, nulls2;
1308
1309 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1310 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1311 if (nulls1 > nulls2)
1312 swap(nulls1, nulls2);
1313
1314 if (nulls1 != 0)
1315 memset((char *)sk, 0, nulls1);
1316 memset((char *)sk + nulls1 + sizeof(void *), 0,
1317 nulls2 - nulls1 - sizeof(void *));
1318 memset((char *)sk + nulls2 + sizeof(void *), 0,
1319 size - nulls2 - sizeof(void *));
1320}
1321EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1322
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001323static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1324 int family)
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001325{
1326 struct sock *sk;
1327 struct kmem_cache *slab;
1328
1329 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001330 if (slab != NULL) {
1331 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1332 if (!sk)
1333 return sk;
1334 if (priority & __GFP_ZERO) {
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001335 if (prot->clear_sk)
1336 prot->clear_sk(sk, prot->obj_size);
1337 else
1338 sk_prot_clear_nulls(sk, prot->obj_size);
Eric Dumazete912b112009-07-08 19:36:05 +00001339 }
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001340 } else
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001341 sk = kmalloc(prot->obj_size, priority);
1342
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001343 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001344 kmemcheck_annotate_bitfield(sk, flags);
1345
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001346 if (security_sk_alloc(sk, family, priority))
1347 goto out_free;
1348
1349 if (!try_module_get(prot->owner))
1350 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001351 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001352 }
1353
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001354 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001355
1356out_free_sec:
1357 security_sk_free(sk);
1358out_free:
1359 if (slab != NULL)
1360 kmem_cache_free(slab, sk);
1361 else
1362 kfree(sk);
1363 return NULL;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001364}
1365
1366static void sk_prot_free(struct proto *prot, struct sock *sk)
1367{
1368 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001369 struct module *owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001370
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001371 owner = prot->owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001372 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001373
1374 security_sk_free(sk);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001375 if (slab != NULL)
1376 kmem_cache_free(slab, sk);
1377 else
1378 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001379 module_put(owner);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001380}
1381
Daniel Borkmann86f85152013-12-29 17:27:11 +01001382#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Zefan Li6ffd4642013-04-08 20:03:47 +00001383void sock_update_netprioidx(struct sock *sk)
Neil Horman5bc14212011-11-22 05:10:51 +00001384{
Neil Horman5bc14212011-11-22 05:10:51 +00001385 if (in_interrupt())
1386 return;
Neil Horman2b73bc62012-02-10 05:43:38 +00001387
Zefan Li6ffd4642013-04-08 20:03:47 +00001388 sk->sk_cgrp_prioidx = task_netprioidx(current);
Neil Horman5bc14212011-11-22 05:10:51 +00001389}
1390EXPORT_SYMBOL_GPL(sock_update_netprioidx);
Herbert Xuf8451722010-05-24 00:12:34 -07001391#endif
1392
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393/**
1394 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001395 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001396 * @family: protocol family
1397 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1398 * @prot: struct proto associated with this new sock instance
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001399 * @kern: is this to be a kernel socket?
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001401struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001402 struct proto *prot, int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403{
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001404 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001406 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001408 sk->sk_family = family;
1409 /*
1410 * See comment in struct sock definition to understand
1411 * why we need sk_prot_creator -acme
1412 */
1413 sk->sk_prot = sk->sk_prot_creator = prot;
1414 sock_lock_init(sk);
Eric W. Biederman26abe142015-05-08 21:10:31 -05001415 sk->sk_net_refcnt = kern ? 0 : 1;
1416 if (likely(sk->sk_net_refcnt))
1417 get_net(net);
1418 sock_net_set(sk, net);
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001419 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001420
Zefan Li211d2f972013-04-08 20:03:35 +00001421 sock_update_classid(sk);
Zefan Li6ffd4642013-04-08 20:03:47 +00001422 sock_update_netprioidx(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 }
Frank Filza79af592005-09-27 15:23:38 -07001424
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001425 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426}
Eric Dumazet2a915252009-05-27 11:30:05 +00001427EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428
Eric Dumazet2b85a342009-06-11 02:55:43 -07001429static void __sk_free(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430{
1431 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
1433 if (sk->sk_destruct)
1434 sk->sk_destruct(sk);
1435
Paul E. McKenneya898def2010-02-22 17:04:49 -08001436 filter = rcu_dereference_check(sk->sk_filter,
1437 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001439 sk_filter_uncharge(sk, filter);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001440 RCU_INIT_POINTER(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 }
1442
Eric Dumazet08e29af2011-11-28 12:04:18 +00001443 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444
1445 if (atomic_read(&sk->sk_omem_alloc))
Joe Perchese005d192012-05-16 19:58:40 +00001446 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1447 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001449 if (sk->sk_peer_cred)
1450 put_cred(sk->sk_peer_cred);
1451 put_pid(sk->sk_peer_pid);
Eric W. Biederman26abe142015-05-08 21:10:31 -05001452 if (likely(sk->sk_net_refcnt))
1453 put_net(sock_net(sk));
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001454 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001456
1457void sk_free(struct sock *sk)
1458{
1459 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001460 * We subtract one from sk_wmem_alloc and can know if
Eric Dumazet2b85a342009-06-11 02:55:43 -07001461 * some packets are still in some tx queue.
1462 * If not null, sock_wfree() will call __sk_free(sk) later
1463 */
1464 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1465 __sk_free(sk);
1466}
Eric Dumazet2a915252009-05-27 11:30:05 +00001467EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
Denis V. Lunevedf02082008-02-29 11:18:32 -08001469/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001470 * Last sock_put should drop reference to sk->sk_net. It has already
1471 * been dropped in sk_change_net. Taking reference to stopping namespace
Denis V. Lunevedf02082008-02-29 11:18:32 -08001472 * is not an option.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001473 * Take reference to a socket to remove it from hash _alive_ and after that
Denis V. Lunevedf02082008-02-29 11:18:32 -08001474 * destroy it in the context of init_net.
1475 */
1476void sk_release_kernel(struct sock *sk)
1477{
1478 if (sk == NULL || sk->sk_socket == NULL)
1479 return;
1480
1481 sock_hold(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001482 sock_net_set(sk, get_net(&init_net));
Ying Xuec243d7e2015-03-16 18:19:12 +08001483 sock_release(sk->sk_socket);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001484 sock_put(sk);
1485}
David S. Miller45af1752008-02-29 11:33:19 -08001486EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001487
Stephen Rothwell475f1b52012-01-09 16:33:16 +11001488static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1489{
1490 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1491 sock_update_memcg(newsk);
1492}
1493
Eric Dumazete56c57d2011-11-08 17:07:07 -05001494/**
1495 * sk_clone_lock - clone a socket, and lock its clone
1496 * @sk: the socket to clone
1497 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1498 *
1499 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1500 */
1501struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001502{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001503 struct sock *newsk;
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001504 bool is_charged = true;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001505
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001506 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001507 if (newsk != NULL) {
1508 struct sk_filter *filter;
1509
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001510 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001511
1512 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001513 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001514 sk_node_init(&newsk->sk_node);
1515 sock_lock_init(newsk);
1516 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001517 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001518 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001519
1520 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001521 /*
1522 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1523 */
1524 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001525 atomic_set(&newsk->sk_omem_alloc, 0);
1526 skb_queue_head_init(&newsk->sk_receive_queue);
1527 skb_queue_head_init(&newsk->sk_write_queue);
1528
Eric Dumazetb6c67122010-04-08 23:03:29 +00001529 spin_lock_init(&newsk->sk_dst_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001530 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef0e2007-07-19 01:49:00 -07001531 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1532 af_callback_keys + newsk->sk_family,
1533 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001534
1535 newsk->sk_dst_cache = NULL;
1536 newsk->sk_wmem_queued = 0;
1537 newsk->sk_forward_alloc = 0;
1538 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001539 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1540
1541 sock_reset_flag(newsk, SOCK_DONE);
1542 skb_queue_head_init(&newsk->sk_error_queue);
1543
Eric Dumazet0d7da9d2010-10-25 03:47:05 +00001544 filter = rcu_dereference_protected(newsk->sk_filter, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001545 if (filter != NULL)
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001546 /* though it's an empty new sock, the charging may fail
1547 * if sysctl_optmem_max was changed between creation of
1548 * original socket and cloning
1549 */
1550 is_charged = sk_filter_charge(newsk, filter);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001551
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001552 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) {
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001553 /* It is still raw copy of parent, so invalidate
1554 * destructor and make plain sk_free() */
1555 newsk->sk_destruct = NULL;
Thomas Gleixnerb0691c82011-10-25 02:30:50 +00001556 bh_unlock_sock(newsk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001557 sk_free(newsk);
1558 newsk = NULL;
1559 goto out;
1560 }
1561
1562 newsk->sk_err = 0;
1563 newsk->sk_priority = 0;
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001564 newsk->sk_incoming_cpu = raw_smp_processor_id();
Eric Dumazet33cf7c92015-03-11 18:53:14 -07001565 atomic64_set(&newsk->sk_cookie, 0);
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001566 /*
1567 * Before updating sk_refcnt, we must commit prior changes to memory
1568 * (Documentation/RCU/rculist_nulls.txt for details)
1569 */
1570 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001571 atomic_set(&newsk->sk_refcnt, 2);
1572
1573 /*
1574 * Increment the counter in the same struct proto as the master
1575 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1576 * is the same as sk->sk_prot->socks, as this field was copied
1577 * with memcpy).
1578 *
1579 * This _changes_ the previous behaviour, where
1580 * tcp_create_openreq_child always was incrementing the
1581 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1582 * to be taken into account in all callers. -acme
1583 */
1584 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001585 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001586 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001587
Glauber Costaf3f511e2012-01-05 20:16:39 +00001588 sk_update_clone(sk, newsk);
1589
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001590 if (newsk->sk_prot->sockets_allocated)
Glauber Costa180d8cd2011-12-11 21:47:02 +00001591 sk_sockets_allocated_inc(newsk);
Octavian Purdila704da5602010-01-08 00:00:09 -08001592
Eric Dumazet08e29af2011-11-28 12:04:18 +00001593 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
Octavian Purdila704da5602010-01-08 00:00:09 -08001594 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001595 }
1596out:
1597 return newsk;
1598}
Eric Dumazete56c57d2011-11-08 17:07:07 -05001599EXPORT_SYMBOL_GPL(sk_clone_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001600
Andi Kleen99580892007-04-20 17:12:43 -07001601void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1602{
1603 __sk_dst_set(sk, dst);
1604 sk->sk_route_caps = dst->dev->features;
1605 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001606 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001607 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001608 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001609 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001610 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001611 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001612 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001613 sk->sk_gso_max_size = dst->dev->gso_max_size;
Ben Hutchings14853482012-07-30 16:11:42 +00001614 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001615 }
Andi Kleen99580892007-04-20 17:12:43 -07001616 }
1617}
1618EXPORT_SYMBOL_GPL(sk_setup_caps);
1619
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620/*
1621 * Simple resource managers for sockets.
1622 */
1623
1624
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001625/*
1626 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 */
1628void sock_wfree(struct sk_buff *skb)
1629{
1630 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001631 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632
Eric Dumazetd99927f2009-09-24 10:49:24 +00001633 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1634 /*
1635 * Keep a reference on sk_wmem_alloc, this will be released
1636 * after sk_write_space() call
1637 */
1638 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001640 len = 1;
1641 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001642 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001643 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1644 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001645 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001646 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001647 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648}
Eric Dumazet2a915252009-05-27 11:30:05 +00001649EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
Eric Dumazetf2f872f2013-07-30 17:55:08 -07001651void skb_orphan_partial(struct sk_buff *skb)
1652{
1653 /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1654 * so we do not completely orphan skb, but transfert all
1655 * accounted bytes but one, to avoid unexpected reorders.
1656 */
1657 if (skb->destructor == sock_wfree
1658#ifdef CONFIG_INET
1659 || skb->destructor == tcp_wfree
1660#endif
1661 ) {
1662 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
1663 skb->truesize = 1;
1664 } else {
1665 skb_orphan(skb);
1666 }
1667}
1668EXPORT_SYMBOL(skb_orphan_partial);
1669
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001670/*
1671 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 */
1673void sock_rfree(struct sk_buff *skb)
1674{
1675 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00001676 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
Eric Dumazetd361fd52010-07-10 22:45:17 +00001678 atomic_sub(len, &sk->sk_rmem_alloc);
1679 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680}
Eric Dumazet2a915252009-05-27 11:30:05 +00001681EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682
Oliver Hartkopp7768eed2015-03-10 19:03:46 +01001683/*
1684 * Buffer destructor for skbs that are not used directly in read or write
1685 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
1686 */
Alexander Duyck62bccb82014-09-04 13:31:35 -04001687void sock_efree(struct sk_buff *skb)
1688{
1689 sock_put(skb->sk);
1690}
1691EXPORT_SYMBOL(sock_efree);
1692
Eric W. Biederman976d02012012-05-23 17:16:53 -06001693kuid_t sock_i_uid(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694{
Eric W. Biederman976d02012012-05-23 17:16:53 -06001695 kuid_t uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
Eric Dumazetf064af12010-09-22 12:43:39 +00001697 read_lock_bh(&sk->sk_callback_lock);
Eric W. Biederman976d02012012-05-23 17:16:53 -06001698 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
Eric Dumazetf064af12010-09-22 12:43:39 +00001699 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 return uid;
1701}
Eric Dumazet2a915252009-05-27 11:30:05 +00001702EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
1704unsigned long sock_i_ino(struct sock *sk)
1705{
1706 unsigned long ino;
1707
Eric Dumazetf064af12010-09-22 12:43:39 +00001708 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001710 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 return ino;
1712}
Eric Dumazet2a915252009-05-27 11:30:05 +00001713EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714
1715/*
1716 * Allocate a skb from the socket's send buffer.
1717 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001718struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001719 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720{
1721 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001722 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 if (skb) {
1724 skb_set_owner_w(skb, sk);
1725 return skb;
1726 }
1727 }
1728 return NULL;
1729}
Eric Dumazet2a915252009-05-27 11:30:05 +00001730EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
1732/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001734 */
Al Virodd0fc662005-10-07 07:46:04 +01001735void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736{
Eric Dumazet95c96172012-04-15 05:58:06 +00001737 if ((unsigned int)size <= sysctl_optmem_max &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1739 void *mem;
1740 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001741 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 */
1743 atomic_add(size, &sk->sk_omem_alloc);
1744 mem = kmalloc(size, priority);
1745 if (mem)
1746 return mem;
1747 atomic_sub(size, &sk->sk_omem_alloc);
1748 }
1749 return NULL;
1750}
Eric Dumazet2a915252009-05-27 11:30:05 +00001751EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
Daniel Borkmann79e88652014-11-19 17:13:11 +01001753/* Free an option memory block. Note, we actually want the inline
1754 * here as this allows gcc to detect the nullify and fold away the
1755 * condition entirely.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 */
Daniel Borkmann79e88652014-11-19 17:13:11 +01001757static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
1758 const bool nullify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759{
David S. Millere53da5f2014-10-14 17:02:37 -04001760 if (WARN_ON_ONCE(!mem))
1761 return;
Daniel Borkmann79e88652014-11-19 17:13:11 +01001762 if (nullify)
1763 kzfree(mem);
1764 else
1765 kfree(mem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 atomic_sub(size, &sk->sk_omem_alloc);
1767}
Daniel Borkmann79e88652014-11-19 17:13:11 +01001768
1769void sock_kfree_s(struct sock *sk, void *mem, int size)
1770{
1771 __sock_kfree_s(sk, mem, size, false);
1772}
Eric Dumazet2a915252009-05-27 11:30:05 +00001773EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774
Daniel Borkmann79e88652014-11-19 17:13:11 +01001775void sock_kzfree_s(struct sock *sk, void *mem, int size)
1776{
1777 __sock_kfree_s(sk, mem, size, true);
1778}
1779EXPORT_SYMBOL(sock_kzfree_s);
1780
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1782 I think, these locks should be removed for datagram sockets.
1783 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001784static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785{
1786 DEFINE_WAIT(wait);
1787
1788 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1789 for (;;) {
1790 if (!timeo)
1791 break;
1792 if (signal_pending(current))
1793 break;
1794 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001795 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1797 break;
1798 if (sk->sk_shutdown & SEND_SHUTDOWN)
1799 break;
1800 if (sk->sk_err)
1801 break;
1802 timeo = schedule_timeout(timeo);
1803 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001804 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 return timeo;
1806}
1807
1808
1809/*
1810 * Generic send/receive buffer handlers
1811 */
1812
Herbert Xu4cc7f682009-02-04 16:55:54 -08001813struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1814 unsigned long data_len, int noblock,
Eric Dumazet28d64272013-08-08 14:38:47 -07001815 int *errcode, int max_page_order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816{
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001817 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 long timeo;
1819 int err;
1820
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 timeo = sock_sndtimeo(sk, noblock);
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001822 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 err = sock_error(sk);
1824 if (err != 0)
1825 goto failure;
1826
1827 err = -EPIPE;
1828 if (sk->sk_shutdown & SEND_SHUTDOWN)
1829 goto failure;
1830
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001831 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
1832 break;
Eric Dumazet28d64272013-08-08 14:38:47 -07001833
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001834 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1835 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1836 err = -EAGAIN;
1837 if (!timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 goto failure;
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001839 if (signal_pending(current))
1840 goto interrupted;
1841 timeo = sock_wait_for_wmem(sk, timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 }
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001843 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
1844 errcode, sk->sk_allocation);
1845 if (skb)
1846 skb_set_owner_w(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 return skb;
1848
1849interrupted:
1850 err = sock_intr_errno(timeo);
1851failure:
1852 *errcode = err;
1853 return NULL;
1854}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001855EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001857struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 int noblock, int *errcode)
1859{
Eric Dumazet28d64272013-08-08 14:38:47 -07001860 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861}
Eric Dumazet2a915252009-05-27 11:30:05 +00001862EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
Eric Dumazet5640f762012-09-23 23:04:42 +00001864/* On 32bit arches, an skb frag is limited to 2^15 */
1865#define SKB_FRAG_PAGE_ORDER get_order(32768)
1866
Eric Dumazet400dfd32013-10-17 16:27:07 -07001867/**
1868 * skb_page_frag_refill - check that a page_frag contains enough room
1869 * @sz: minimum size of the fragment we want to get
1870 * @pfrag: pointer to page_frag
Eric Dumazet82d5e2b2014-09-08 04:00:00 -07001871 * @gfp: priority for memory allocation
Eric Dumazet400dfd32013-10-17 16:27:07 -07001872 *
1873 * Note: While this allocator tries to use high order pages, there is
1874 * no guarantee that allocations succeed. Therefore, @sz MUST be
1875 * less or equal than PAGE_SIZE.
1876 */
Eric Dumazetd9b29382014-08-27 20:49:34 -07001877bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
Eric Dumazet5640f762012-09-23 23:04:42 +00001878{
Eric Dumazet5640f762012-09-23 23:04:42 +00001879 if (pfrag->page) {
1880 if (atomic_read(&pfrag->page->_count) == 1) {
1881 pfrag->offset = 0;
1882 return true;
1883 }
Eric Dumazet400dfd32013-10-17 16:27:07 -07001884 if (pfrag->offset + sz <= pfrag->size)
Eric Dumazet5640f762012-09-23 23:04:42 +00001885 return true;
1886 put_page(pfrag->page);
1887 }
1888
Eric Dumazetd9b29382014-08-27 20:49:34 -07001889 pfrag->offset = 0;
1890 if (SKB_FRAG_PAGE_ORDER) {
1891 pfrag->page = alloc_pages(gfp | __GFP_COMP |
1892 __GFP_NOWARN | __GFP_NORETRY,
1893 SKB_FRAG_PAGE_ORDER);
Eric Dumazet5640f762012-09-23 23:04:42 +00001894 if (likely(pfrag->page)) {
Eric Dumazetd9b29382014-08-27 20:49:34 -07001895 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
Eric Dumazet5640f762012-09-23 23:04:42 +00001896 return true;
1897 }
Eric Dumazetd9b29382014-08-27 20:49:34 -07001898 }
1899 pfrag->page = alloc_page(gfp);
1900 if (likely(pfrag->page)) {
1901 pfrag->size = PAGE_SIZE;
1902 return true;
1903 }
Eric Dumazet400dfd32013-10-17 16:27:07 -07001904 return false;
1905}
1906EXPORT_SYMBOL(skb_page_frag_refill);
1907
1908bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1909{
1910 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
1911 return true;
1912
Eric Dumazet5640f762012-09-23 23:04:42 +00001913 sk_enter_memory_pressure(sk);
1914 sk_stream_moderate_sndbuf(sk);
1915 return false;
1916}
1917EXPORT_SYMBOL(sk_page_frag_refill);
1918
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919static void __lock_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001920 __releases(&sk->sk_lock.slock)
1921 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922{
1923 DEFINE_WAIT(wait);
1924
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001925 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1927 TASK_UNINTERRUPTIBLE);
1928 spin_unlock_bh(&sk->sk_lock.slock);
1929 schedule();
1930 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001931 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 break;
1933 }
1934 finish_wait(&sk->sk_lock.wq, &wait);
1935}
1936
1937static void __release_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001938 __releases(&sk->sk_lock.slock)
1939 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940{
1941 struct sk_buff *skb = sk->sk_backlog.head;
1942
1943 do {
1944 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1945 bh_unlock_sock(sk);
1946
1947 do {
1948 struct sk_buff *next = skb->next;
1949
Eric Dumazete4cbb022012-04-30 16:07:09 +00001950 prefetch(next);
Eric Dumazet7fee2262010-05-11 23:19:48 +00001951 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001953 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954
1955 /*
1956 * We are in process context here with softirqs
1957 * disabled, use cond_resched_softirq() to preempt.
1958 * This is safe to do because we've taken the backlog
1959 * queue private:
1960 */
1961 cond_resched_softirq();
1962
1963 skb = next;
1964 } while (skb != NULL);
1965
1966 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001967 } while ((skb = sk->sk_backlog.head) != NULL);
Zhu Yi8eae9392010-03-04 18:01:40 +00001968
1969 /*
1970 * Doing the zeroing here guarantee we can not loop forever
1971 * while a wild producer attempts to flood us.
1972 */
1973 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974}
1975
1976/**
1977 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001978 * @sk: sock to wait on
1979 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 *
1981 * Now socket state including sk->sk_err is changed only under lock,
1982 * hence we may omit checks after joining wait queue.
1983 * We check receive queue before schedule() only as optimization;
1984 * it is very likely that release_sock() added new data.
1985 */
1986int sk_wait_data(struct sock *sk, long *timeo)
1987{
1988 int rc;
1989 DEFINE_WAIT(wait);
1990
Eric Dumazetaa395142010-04-20 13:03:51 +00001991 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1993 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1994 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001995 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 return rc;
1997}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998EXPORT_SYMBOL(sk_wait_data);
1999
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002000/**
2001 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2002 * @sk: socket
2003 * @size: memory size to allocate
2004 * @kind: allocation type
2005 *
2006 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2007 * rmem allocation. This function assumes that protocols which have
2008 * memory_pressure use sk_wmem_queued as write buffer accounting.
2009 */
2010int __sk_mem_schedule(struct sock *sk, int size, int kind)
2011{
2012 struct proto *prot = sk->sk_prot;
2013 int amt = sk_mem_pages(size);
Eric Dumazet8d987e52010-11-09 23:24:26 +00002014 long allocated;
Glauber Costae1aab162011-12-11 21:47:03 +00002015 int parent_status = UNDER_LIMIT;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002016
2017 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002018
Glauber Costae1aab162011-12-11 21:47:03 +00002019 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002020
2021 /* Under limit. */
Glauber Costae1aab162011-12-11 21:47:03 +00002022 if (parent_status == UNDER_LIMIT &&
2023 allocated <= sk_prot_mem_limits(sk, 0)) {
Glauber Costa180d8cd2011-12-11 21:47:02 +00002024 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002025 return 1;
2026 }
2027
Glauber Costae1aab162011-12-11 21:47:03 +00002028 /* Under pressure. (we or our parents) */
2029 if ((parent_status > SOFT_LIMIT) ||
2030 allocated > sk_prot_mem_limits(sk, 1))
Glauber Costa180d8cd2011-12-11 21:47:02 +00002031 sk_enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002032
Glauber Costae1aab162011-12-11 21:47:03 +00002033 /* Over hard limit (we or our parents) */
2034 if ((parent_status == OVER_LIMIT) ||
2035 (allocated > sk_prot_mem_limits(sk, 2)))
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002036 goto suppress_allocation;
2037
2038 /* guarantee minimum buffer size under pressure */
2039 if (kind == SK_MEM_RECV) {
2040 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2041 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002042
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002043 } else { /* SK_MEM_SEND */
2044 if (sk->sk_type == SOCK_STREAM) {
2045 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2046 return 1;
2047 } else if (atomic_read(&sk->sk_wmem_alloc) <
2048 prot->sysctl_wmem[0])
2049 return 1;
2050 }
2051
Glauber Costa180d8cd2011-12-11 21:47:02 +00002052 if (sk_has_memory_pressure(sk)) {
Eric Dumazet17483762008-11-25 21:16:35 -08002053 int alloc;
2054
Glauber Costa180d8cd2011-12-11 21:47:02 +00002055 if (!sk_under_memory_pressure(sk))
Eric Dumazet17483762008-11-25 21:16:35 -08002056 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002057 alloc = sk_sockets_allocated_read_positive(sk);
2058 if (sk_prot_mem_limits(sk, 2) > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002059 sk_mem_pages(sk->sk_wmem_queued +
2060 atomic_read(&sk->sk_rmem_alloc) +
2061 sk->sk_forward_alloc))
2062 return 1;
2063 }
2064
2065suppress_allocation:
2066
2067 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2068 sk_stream_moderate_sndbuf(sk);
2069
2070 /* Fail only if socket is _under_ its sndbuf.
2071 * In this case we cannot block, so that we have to fail.
2072 */
2073 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2074 return 1;
2075 }
2076
Satoru Moriya3847ce32011-06-17 12:00:03 +00002077 trace_sock_exceed_buf_limit(sk, prot, allocated);
2078
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002079 /* Alas. Undo changes. */
2080 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002081
Glauber Costa0e90b312012-01-20 04:57:16 +00002082 sk_memory_allocated_sub(sk, amt);
Glauber Costa180d8cd2011-12-11 21:47:02 +00002083
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002084 return 0;
2085}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002086EXPORT_SYMBOL(__sk_mem_schedule);
2087
2088/**
2089 * __sk_reclaim - reclaim memory_allocated
2090 * @sk: socket
2091 */
2092void __sk_mem_reclaim(struct sock *sk)
2093{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002094 sk_memory_allocated_sub(sk,
Glauber Costa0e90b312012-01-20 04:57:16 +00002095 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002096 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
2097
Glauber Costa180d8cd2011-12-11 21:47:02 +00002098 if (sk_under_memory_pressure(sk) &&
2099 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2100 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002101}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002102EXPORT_SYMBOL(__sk_mem_reclaim);
2103
2104
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105/*
2106 * Set of default routines for initialising struct proto_ops when
2107 * the protocol does not support a particular function. In certain
2108 * cases where it makes no sense for a protocol to have a "do nothing"
2109 * function, some default processing is provided.
2110 */
2111
2112int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2113{
2114 return -EOPNOTSUPP;
2115}
Eric Dumazet2a915252009-05-27 11:30:05 +00002116EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002118int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 int len, int flags)
2120{
2121 return -EOPNOTSUPP;
2122}
Eric Dumazet2a915252009-05-27 11:30:05 +00002123EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124
2125int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2126{
2127 return -EOPNOTSUPP;
2128}
Eric Dumazet2a915252009-05-27 11:30:05 +00002129EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
2131int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2132{
2133 return -EOPNOTSUPP;
2134}
Eric Dumazet2a915252009-05-27 11:30:05 +00002135EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002137int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 int *len, int peer)
2139{
2140 return -EOPNOTSUPP;
2141}
Eric Dumazet2a915252009-05-27 11:30:05 +00002142EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143
Eric Dumazet2a915252009-05-27 11:30:05 +00002144unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145{
2146 return 0;
2147}
Eric Dumazet2a915252009-05-27 11:30:05 +00002148EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149
2150int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2151{
2152 return -EOPNOTSUPP;
2153}
Eric Dumazet2a915252009-05-27 11:30:05 +00002154EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155
2156int sock_no_listen(struct socket *sock, int backlog)
2157{
2158 return -EOPNOTSUPP;
2159}
Eric Dumazet2a915252009-05-27 11:30:05 +00002160EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161
2162int sock_no_shutdown(struct socket *sock, int how)
2163{
2164 return -EOPNOTSUPP;
2165}
Eric Dumazet2a915252009-05-27 11:30:05 +00002166EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
2168int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002169 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170{
2171 return -EOPNOTSUPP;
2172}
Eric Dumazet2a915252009-05-27 11:30:05 +00002173EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
2175int sock_no_getsockopt(struct socket *sock, int level, int optname,
2176 char __user *optval, int __user *optlen)
2177{
2178 return -EOPNOTSUPP;
2179}
Eric Dumazet2a915252009-05-27 11:30:05 +00002180EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181
Ying Xue1b784142015-03-02 15:37:48 +08002182int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183{
2184 return -EOPNOTSUPP;
2185}
Eric Dumazet2a915252009-05-27 11:30:05 +00002186EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
Ying Xue1b784142015-03-02 15:37:48 +08002188int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2189 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190{
2191 return -EOPNOTSUPP;
2192}
Eric Dumazet2a915252009-05-27 11:30:05 +00002193EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194
2195int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2196{
2197 /* Mirror missing mmap method error code */
2198 return -ENODEV;
2199}
Eric Dumazet2a915252009-05-27 11:30:05 +00002200EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201
2202ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2203{
2204 ssize_t res;
2205 struct msghdr msg = {.msg_flags = flags};
2206 struct kvec iov;
2207 char *kaddr = kmap(page);
2208 iov.iov_base = kaddr + offset;
2209 iov.iov_len = size;
2210 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2211 kunmap(page);
2212 return res;
2213}
Eric Dumazet2a915252009-05-27 11:30:05 +00002214EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215
2216/*
2217 * Default Socket Callbacks
2218 */
2219
2220static void sock_def_wakeup(struct sock *sk)
2221{
Eric Dumazet43815482010-04-29 11:01:49 +00002222 struct socket_wq *wq;
2223
2224 rcu_read_lock();
2225 wq = rcu_dereference(sk->sk_wq);
2226 if (wq_has_sleeper(wq))
2227 wake_up_interruptible_all(&wq->wait);
2228 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229}
2230
2231static void sock_def_error_report(struct sock *sk)
2232{
Eric Dumazet43815482010-04-29 11:01:49 +00002233 struct socket_wq *wq;
2234
2235 rcu_read_lock();
2236 wq = rcu_dereference(sk->sk_wq);
2237 if (wq_has_sleeper(wq))
2238 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002239 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00002240 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241}
2242
David S. Miller676d2362014-04-11 16:15:36 -04002243static void sock_def_readable(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244{
Eric Dumazet43815482010-04-29 11:01:49 +00002245 struct socket_wq *wq;
2246
2247 rcu_read_lock();
2248 wq = rcu_dereference(sk->sk_wq);
2249 if (wq_has_sleeper(wq))
Eric Dumazet2c6607c2011-01-06 10:54:29 -08002250 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
Davide Libenzi37e55402009-03-31 15:24:21 -07002251 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002252 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00002253 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254}
2255
2256static void sock_def_write_space(struct sock *sk)
2257{
Eric Dumazet43815482010-04-29 11:01:49 +00002258 struct socket_wq *wq;
2259
2260 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261
2262 /* Do not wake up a writer until he can make "significant"
2263 * progress. --DaveM
2264 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002265 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00002266 wq = rcu_dereference(sk->sk_wq);
2267 if (wq_has_sleeper(wq))
2268 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07002269 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270
2271 /* Should agree with poll, otherwise some programs break */
2272 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002273 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 }
2275
Eric Dumazet43815482010-04-29 11:01:49 +00002276 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277}
2278
2279static void sock_def_destruct(struct sock *sk)
2280{
Jesper Juhla51482b2005-11-08 09:41:34 -08002281 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282}
2283
2284void sk_send_sigurg(struct sock *sk)
2285{
2286 if (sk->sk_socket && sk->sk_socket->file)
2287 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002288 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289}
Eric Dumazet2a915252009-05-27 11:30:05 +00002290EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291
2292void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2293 unsigned long expires)
2294{
2295 if (!mod_timer(timer, expires))
2296 sock_hold(sk);
2297}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298EXPORT_SYMBOL(sk_reset_timer);
2299
2300void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2301{
Ying Xue25cc4ae2013-02-03 20:32:57 +00002302 if (del_timer(timer))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 __sock_put(sk);
2304}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305EXPORT_SYMBOL(sk_stop_timer);
2306
2307void sock_init_data(struct socket *sock, struct sock *sk)
2308{
2309 skb_queue_head_init(&sk->sk_receive_queue);
2310 skb_queue_head_init(&sk->sk_write_queue);
2311 skb_queue_head_init(&sk->sk_error_queue);
2312
2313 sk->sk_send_head = NULL;
2314
2315 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002316
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 sk->sk_allocation = GFP_KERNEL;
2318 sk->sk_rcvbuf = sysctl_rmem_default;
2319 sk->sk_sndbuf = sysctl_wmem_default;
2320 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07002321 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322
2323 sock_set_flag(sk, SOCK_ZAPPED);
2324
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002325 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00002327 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 sock->sk = sk;
2329 } else
Eric Dumazet43815482010-04-29 11:01:49 +00002330 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331
Eric Dumazetb6c67122010-04-08 23:03:29 +00002332 spin_lock_init(&sk->sk_dst_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef0e2007-07-19 01:49:00 -07002334 lockdep_set_class_and_name(&sk->sk_callback_lock,
2335 af_callback_keys + sk->sk_family,
2336 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337
2338 sk->sk_state_change = sock_def_wakeup;
2339 sk->sk_data_ready = sock_def_readable;
2340 sk->sk_write_space = sock_def_write_space;
2341 sk->sk_error_report = sock_def_error_report;
2342 sk->sk_destruct = sock_def_destruct;
2343
Eric Dumazet5640f762012-09-23 23:04:42 +00002344 sk->sk_frag.page = NULL;
2345 sk->sk_frag.offset = 0;
Pavel Emelyanovef64a542012-02-21 07:31:34 +00002346 sk->sk_peek_off = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
Eric W. Biederman109f6e32010-06-13 03:30:14 +00002348 sk->sk_peer_pid = NULL;
2349 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 sk->sk_write_pending = 0;
2351 sk->sk_rcvlowat = 1;
2352 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2353 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2354
Eric Dumazetf37f0af2008-04-13 21:39:26 -07002355 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356
Cong Wange0d10952013-08-01 11:10:25 +08002357#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir06021292013-06-10 11:39:50 +03002358 sk->sk_napi_id = 0;
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03002359 sk->sk_ll_usec = sysctl_net_busy_read;
Eliezer Tamir06021292013-06-10 11:39:50 +03002360#endif
2361
Eric Dumazet62748f32013-09-24 08:20:52 -07002362 sk->sk_max_pacing_rate = ~0U;
Eric Dumazet7eec4172013-10-08 15:16:00 -07002363 sk->sk_pacing_rate = ~0U;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00002364 /*
2365 * Before updating sk_refcnt, we must commit prior changes to memory
2366 * (Documentation/RCU/rculist_nulls.txt for details)
2367 */
2368 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08002370 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371}
Eric Dumazet2a915252009-05-27 11:30:05 +00002372EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002374void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375{
2376 might_sleep();
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07002377 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002378 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002380 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07002381 spin_unlock(&sk->sk_lock.slock);
2382 /*
2383 * The sk_lock has mutex_lock() semantics here:
2384 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002385 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07002386 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002388EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002390void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391{
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07002392 /*
2393 * The sk_lock has mutex_unlock() semantics:
2394 */
2395 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2396
2397 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 if (sk->sk_backlog.tail)
2399 __release_sock(sk);
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002400
Eric Dumazetc3f9b012014-03-10 09:50:11 -07002401 /* Warning : release_cb() might need to release sk ownership,
2402 * ie call sock_release_ownership(sk) before us.
2403 */
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002404 if (sk->sk_prot->release_cb)
2405 sk->sk_prot->release_cb(sk);
2406
Eric Dumazetc3f9b012014-03-10 09:50:11 -07002407 sock_release_ownership(sk);
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07002408 if (waitqueue_active(&sk->sk_lock.wq))
2409 wake_up(&sk->sk_lock.wq);
2410 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411}
2412EXPORT_SYMBOL(release_sock);
2413
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002414/**
2415 * lock_sock_fast - fast version of lock_sock
2416 * @sk: socket
2417 *
2418 * This version should be used for very small section, where process wont block
2419 * return false if fast path is taken
2420 * sk_lock.slock locked, owned = 0, BH disabled
2421 * return true if slow path is taken
2422 * sk_lock.slock unlocked, owned = 1, BH enabled
2423 */
2424bool lock_sock_fast(struct sock *sk)
2425{
2426 might_sleep();
2427 spin_lock_bh(&sk->sk_lock.slock);
2428
2429 if (!sk->sk_lock.owned)
2430 /*
2431 * Note : We must disable BH
2432 */
2433 return false;
2434
2435 __lock_sock(sk);
2436 sk->sk_lock.owned = 1;
2437 spin_unlock(&sk->sk_lock.slock);
2438 /*
2439 * The sk_lock has mutex_lock() semantics here:
2440 */
2441 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2442 local_bh_enable();
2443 return true;
2444}
2445EXPORT_SYMBOL(lock_sock_fast);
2446
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002448{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002449 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002451 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002452 tv = ktime_to_timeval(sk->sk_stamp);
2453 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002455 if (tv.tv_sec == 0) {
2456 sk->sk_stamp = ktime_get_real();
2457 tv = ktime_to_timeval(sk->sk_stamp);
2458 }
2459 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002460}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461EXPORT_SYMBOL(sock_get_timestamp);
2462
Eric Dumazetae40eb12007-03-18 17:33:16 -07002463int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2464{
2465 struct timespec ts;
2466 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002467 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002468 ts = ktime_to_timespec(sk->sk_stamp);
2469 if (ts.tv_sec == -1)
2470 return -ENOENT;
2471 if (ts.tv_sec == 0) {
2472 sk->sk_stamp = ktime_get_real();
2473 ts = ktime_to_timespec(sk->sk_stamp);
2474 }
2475 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2476}
2477EXPORT_SYMBOL(sock_get_timestampns);
2478
Patrick Ohly20d49472009-02-12 05:03:38 +00002479void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002480{
Patrick Ohly20d49472009-02-12 05:03:38 +00002481 if (!sock_flag(sk, flag)) {
Eric Dumazet08e29af2011-11-28 12:04:18 +00002482 unsigned long previous_flags = sk->sk_flags;
2483
Patrick Ohly20d49472009-02-12 05:03:38 +00002484 sock_set_flag(sk, flag);
2485 /*
2486 * we just set one of the two flags which require net
2487 * time stamping, but time stamping might have been on
2488 * already because of the other one
2489 */
Eric Dumazet08e29af2011-11-28 12:04:18 +00002490 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002491 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492 }
2493}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494
Richard Cochrancb820f82013-07-19 19:40:09 +02002495int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2496 int level, int type)
2497{
2498 struct sock_exterr_skb *serr;
Willem de Bruijn364a9e92014-08-31 21:30:27 -04002499 struct sk_buff *skb;
Richard Cochrancb820f82013-07-19 19:40:09 +02002500 int copied, err;
2501
2502 err = -EAGAIN;
Willem de Bruijn364a9e92014-08-31 21:30:27 -04002503 skb = sock_dequeue_err_skb(sk);
Richard Cochrancb820f82013-07-19 19:40:09 +02002504 if (skb == NULL)
2505 goto out;
2506
2507 copied = skb->len;
2508 if (copied > len) {
2509 msg->msg_flags |= MSG_TRUNC;
2510 copied = len;
2511 }
David S. Miller51f3d022014-11-05 16:46:40 -05002512 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Richard Cochrancb820f82013-07-19 19:40:09 +02002513 if (err)
2514 goto out_free_skb;
2515
2516 sock_recv_timestamp(msg, sk, skb);
2517
2518 serr = SKB_EXT_ERR(skb);
2519 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2520
2521 msg->msg_flags |= MSG_ERRQUEUE;
2522 err = copied;
2523
Richard Cochrancb820f82013-07-19 19:40:09 +02002524out_free_skb:
2525 kfree_skb(skb);
2526out:
2527 return err;
2528}
2529EXPORT_SYMBOL(sock_recv_errqueue);
2530
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531/*
2532 * Get a socket option on an socket.
2533 *
2534 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2535 * asynchronous errors should be reported by getsockopt. We assume
2536 * this means if you specify SO_ERROR (otherwise whats the point of it).
2537 */
2538int sock_common_getsockopt(struct socket *sock, int level, int optname,
2539 char __user *optval, int __user *optlen)
2540{
2541 struct sock *sk = sock->sk;
2542
2543 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2544}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545EXPORT_SYMBOL(sock_common_getsockopt);
2546
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002547#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002548int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2549 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002550{
2551 struct sock *sk = sock->sk;
2552
Johannes Berg1e51f952007-03-06 13:44:06 -08002553 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002554 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2555 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002556 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2557}
2558EXPORT_SYMBOL(compat_sock_common_getsockopt);
2559#endif
2560
Ying Xue1b784142015-03-02 15:37:48 +08002561int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2562 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563{
2564 struct sock *sk = sock->sk;
2565 int addr_len = 0;
2566 int err;
2567
Ying Xue1b784142015-03-02 15:37:48 +08002568 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 flags & ~MSG_DONTWAIT, &addr_len);
2570 if (err >= 0)
2571 msg->msg_namelen = addr_len;
2572 return err;
2573}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574EXPORT_SYMBOL(sock_common_recvmsg);
2575
2576/*
2577 * Set socket options on an inet socket.
2578 */
2579int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002580 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581{
2582 struct sock *sk = sock->sk;
2583
2584 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2585}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586EXPORT_SYMBOL(sock_common_setsockopt);
2587
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002588#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002589int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002590 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002591{
2592 struct sock *sk = sock->sk;
2593
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002594 if (sk->sk_prot->compat_setsockopt != NULL)
2595 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2596 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002597 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2598}
2599EXPORT_SYMBOL(compat_sock_common_setsockopt);
2600#endif
2601
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602void sk_common_release(struct sock *sk)
2603{
2604 if (sk->sk_prot->destroy)
2605 sk->sk_prot->destroy(sk);
2606
2607 /*
2608 * Observation: when sock_common_release is called, processes have
2609 * no access to socket. But net still has.
2610 * Step one, detach it from networking:
2611 *
2612 * A. Remove from hash tables.
2613 */
2614
2615 sk->sk_prot->unhash(sk);
2616
2617 /*
2618 * In this point socket cannot receive new packets, but it is possible
2619 * that some packets are in flight because some CPU runs receiver and
2620 * did hash table lookup before we unhashed socket. They will achieve
2621 * receive queue and will be purged by socket destructor.
2622 *
2623 * Also we still have packets pending on receive queue and probably,
2624 * our own packets waiting in device queues. sock_destroy will drain
2625 * receive queue, but transmitted packets will delay socket destruction
2626 * until the last reference will be released.
2627 */
2628
2629 sock_orphan(sk);
2630
2631 xfrm_sk_free_policy(sk);
2632
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002633 sk_refcnt_debug_release(sk);
Eric Dumazet5640f762012-09-23 23:04:42 +00002634
2635 if (sk->sk_frag.page) {
2636 put_page(sk->sk_frag.page);
2637 sk->sk_frag.page = NULL;
2638 }
2639
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640 sock_put(sk);
2641}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642EXPORT_SYMBOL(sk_common_release);
2643
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002644#ifdef CONFIG_PROC_FS
2645#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002646struct prot_inuse {
2647 int val[PROTO_INUSE_NR];
2648};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002649
2650static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002651
2652#ifdef CONFIG_NET_NS
2653void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2654{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002655 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002656}
2657EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2658
2659int sock_prot_inuse_get(struct net *net, struct proto *prot)
2660{
2661 int cpu, idx = prot->inuse_idx;
2662 int res = 0;
2663
2664 for_each_possible_cpu(cpu)
2665 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2666
2667 return res >= 0 ? res : 0;
2668}
2669EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2670
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002671static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002672{
2673 net->core.inuse = alloc_percpu(struct prot_inuse);
2674 return net->core.inuse ? 0 : -ENOMEM;
2675}
2676
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002677static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002678{
2679 free_percpu(net->core.inuse);
2680}
2681
2682static struct pernet_operations net_inuse_ops = {
2683 .init = sock_inuse_init_net,
2684 .exit = sock_inuse_exit_net,
2685};
2686
2687static __init int net_inuse_init(void)
2688{
2689 if (register_pernet_subsys(&net_inuse_ops))
2690 panic("Cannot initialize net inuse counters");
2691
2692 return 0;
2693}
2694
2695core_initcall(net_inuse_init);
2696#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002697static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2698
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002699void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002700{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002701 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002702}
2703EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2704
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002705int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002706{
2707 int cpu, idx = prot->inuse_idx;
2708 int res = 0;
2709
2710 for_each_possible_cpu(cpu)
2711 res += per_cpu(prot_inuse, cpu).val[idx];
2712
2713 return res >= 0 ? res : 0;
2714}
2715EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002716#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002717
2718static void assign_proto_idx(struct proto *prot)
2719{
2720 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2721
2722 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
Joe Perchese005d192012-05-16 19:58:40 +00002723 pr_err("PROTO_INUSE_NR exhausted\n");
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002724 return;
2725 }
2726
2727 set_bit(prot->inuse_idx, proto_inuse_idx);
2728}
2729
2730static void release_proto_idx(struct proto *prot)
2731{
2732 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2733 clear_bit(prot->inuse_idx, proto_inuse_idx);
2734}
2735#else
2736static inline void assign_proto_idx(struct proto *prot)
2737{
2738}
2739
2740static inline void release_proto_idx(struct proto *prot)
2741{
2742}
2743#endif
2744
Eric Dumazet0159dfd2015-03-12 16:44:07 -07002745static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
2746{
2747 if (!rsk_prot)
2748 return;
2749 kfree(rsk_prot->slab_name);
2750 rsk_prot->slab_name = NULL;
2751 if (rsk_prot->slab) {
2752 kmem_cache_destroy(rsk_prot->slab);
2753 rsk_prot->slab = NULL;
2754 }
2755}
2756
2757static int req_prot_init(const struct proto *prot)
2758{
2759 struct request_sock_ops *rsk_prot = prot->rsk_prot;
2760
2761 if (!rsk_prot)
2762 return 0;
2763
2764 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
2765 prot->name);
2766 if (!rsk_prot->slab_name)
2767 return -ENOMEM;
2768
2769 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
2770 rsk_prot->obj_size, 0,
Eric Dumazetfa76ce732015-03-19 19:04:20 -07002771 0, NULL);
Eric Dumazet0159dfd2015-03-12 16:44:07 -07002772
2773 if (!rsk_prot->slab) {
2774 pr_crit("%s: Can't create request sock SLAB cache!\n",
2775 prot->name);
2776 return -ENOMEM;
2777 }
2778 return 0;
2779}
2780
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781int proto_register(struct proto *prot, int alloc_slab)
2782{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783 if (alloc_slab) {
2784 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002785 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2786 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787
2788 if (prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002789 pr_crit("%s: Can't create sock SLAB cache!\n",
2790 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002791 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002793
Eric Dumazet0159dfd2015-03-12 16:44:07 -07002794 if (req_prot_init(prot))
2795 goto out_free_request_sock_slab;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002796
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002797 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002798 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002799
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002800 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002801 goto out_free_request_sock_slab;
2802
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002803 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002804 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002805 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002806 0,
Eric Dumazet52db70d2015-04-10 06:07:18 -07002807 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002808 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002809 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002810 goto out_free_timewait_sock_slab_name;
2811 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 }
2813
Glauber Costa36b77a52011-12-16 00:51:59 +00002814 mutex_lock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002816 assign_proto_idx(prot);
Glauber Costa36b77a52011-12-16 00:51:59 +00002817 mutex_unlock(&proto_list_mutex);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002818 return 0;
2819
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002820out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002821 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002822out_free_request_sock_slab:
Eric Dumazet0159dfd2015-03-12 16:44:07 -07002823 req_prot_cleanup(prot->rsk_prot);
2824
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002825 kmem_cache_destroy(prot->slab);
2826 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002827out:
2828 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830EXPORT_SYMBOL(proto_register);
2831
2832void proto_unregister(struct proto *prot)
2833{
Glauber Costa36b77a52011-12-16 00:51:59 +00002834 mutex_lock(&proto_list_mutex);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002835 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002836 list_del(&prot->node);
Glauber Costa36b77a52011-12-16 00:51:59 +00002837 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838
2839 if (prot->slab != NULL) {
2840 kmem_cache_destroy(prot->slab);
2841 prot->slab = NULL;
2842 }
2843
Eric Dumazet0159dfd2015-03-12 16:44:07 -07002844 req_prot_cleanup(prot->rsk_prot);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002845
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002846 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002847 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002848 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002849 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002850 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852EXPORT_SYMBOL(proto_unregister);
2853
2854#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Glauber Costa36b77a52011-12-16 00:51:59 +00002856 __acquires(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857{
Glauber Costa36b77a52011-12-16 00:51:59 +00002858 mutex_lock(&proto_list_mutex);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002859 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860}
2861
2862static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2863{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002864 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865}
2866
2867static void proto_seq_stop(struct seq_file *seq, void *v)
Glauber Costa36b77a52011-12-16 00:51:59 +00002868 __releases(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869{
Glauber Costa36b77a52011-12-16 00:51:59 +00002870 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871}
2872
2873static char proto_method_implemented(const void *method)
2874{
2875 return method == NULL ? 'n' : 'y';
2876}
Glauber Costa180d8cd2011-12-11 21:47:02 +00002877static long sock_prot_memory_allocated(struct proto *proto)
2878{
Jeffrin Josecb75a362012-04-25 19:17:29 +05302879 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002880}
2881
2882static char *sock_prot_memory_pressure(struct proto *proto)
2883{
2884 return proto->memory_pressure != NULL ?
2885 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2886}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887
2888static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2889{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002890
Eric Dumazet8d987e52010-11-09 23:24:26 +00002891 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2893 proto->name,
2894 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002895 sock_prot_inuse_get(seq_file_net(seq), proto),
Glauber Costa180d8cd2011-12-11 21:47:02 +00002896 sock_prot_memory_allocated(proto),
2897 sock_prot_memory_pressure(proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 proto->max_header,
2899 proto->slab == NULL ? "no" : "yes",
2900 module_name(proto->owner),
2901 proto_method_implemented(proto->close),
2902 proto_method_implemented(proto->connect),
2903 proto_method_implemented(proto->disconnect),
2904 proto_method_implemented(proto->accept),
2905 proto_method_implemented(proto->ioctl),
2906 proto_method_implemented(proto->init),
2907 proto_method_implemented(proto->destroy),
2908 proto_method_implemented(proto->shutdown),
2909 proto_method_implemented(proto->setsockopt),
2910 proto_method_implemented(proto->getsockopt),
2911 proto_method_implemented(proto->sendmsg),
2912 proto_method_implemented(proto->recvmsg),
2913 proto_method_implemented(proto->sendpage),
2914 proto_method_implemented(proto->bind),
2915 proto_method_implemented(proto->backlog_rcv),
2916 proto_method_implemented(proto->hash),
2917 proto_method_implemented(proto->unhash),
2918 proto_method_implemented(proto->get_port),
2919 proto_method_implemented(proto->enter_memory_pressure));
2920}
2921
2922static int proto_seq_show(struct seq_file *seq, void *v)
2923{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002924 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2926 "protocol",
2927 "size",
2928 "sockets",
2929 "memory",
2930 "press",
2931 "maxhdr",
2932 "slab",
2933 "module",
2934 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2935 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002936 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937 return 0;
2938}
2939
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002940static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941 .start = proto_seq_start,
2942 .next = proto_seq_next,
2943 .stop = proto_seq_stop,
2944 .show = proto_seq_show,
2945};
2946
2947static int proto_seq_open(struct inode *inode, struct file *file)
2948{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002949 return seq_open_net(inode, file, &proto_seq_ops,
2950 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951}
2952
Arjan van de Ven9a321442007-02-12 00:55:35 -08002953static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954 .owner = THIS_MODULE,
2955 .open = proto_seq_open,
2956 .read = seq_read,
2957 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002958 .release = seq_release_net,
2959};
2960
2961static __net_init int proto_init_net(struct net *net)
2962{
Gao fengd4beaa62013-02-18 01:34:54 +00002963 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
Eric Dumazet14e943d2008-11-19 15:14:01 -08002964 return -ENOMEM;
2965
2966 return 0;
2967}
2968
2969static __net_exit void proto_exit_net(struct net *net)
2970{
Gao fengece31ff2013-02-18 01:34:56 +00002971 remove_proc_entry("protocols", net->proc_net);
Eric Dumazet14e943d2008-11-19 15:14:01 -08002972}
2973
2974
2975static __net_initdata struct pernet_operations proto_net_ops = {
2976 .init = proto_init_net,
2977 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978};
2979
2980static int __init proto_init(void)
2981{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002982 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983}
2984
2985subsys_initcall(proto_init);
2986
2987#endif /* PROC_FS */