blob: 9a56b2000c3f374fb95aedada3327447816a9512 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Joe Perchese005d192012-05-16 19:58:40 +000092#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
Randy Dunlap4fc268d2006-01-11 12:17:47 -080094#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/errno.h>
Richard Cochrancb820f82013-07-19 19:40:09 +020096#include <linux/errqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <linux/types.h>
98#include <linux/socket.h>
99#include <linux/in.h>
100#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <linux/module.h>
102#include <linux/proc_fs.h>
103#include <linux/seq_file.h>
104#include <linux/sched.h>
105#include <linux/timer.h>
106#include <linux/string.h>
107#include <linux/sockios.h>
108#include <linux/net.h>
109#include <linux/mm.h>
110#include <linux/slab.h>
111#include <linux/interrupt.h>
112#include <linux/poll.h>
113#include <linux/tcp.h>
114#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400115#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000116#include <linux/user_namespace.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100117#include <linux/static_key.h>
David S. Miller3969eb32012-01-09 13:44:23 -0800118#include <linux/memcontrol.h>
David S. Miller8c1ae102012-05-03 02:25:55 -0400119#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123#include <linux/netdevice.h>
124#include <net/protocol.h>
125#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200126#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700127#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000129#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130#include <net/xfrm.h>
131#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700132#include <net/cls_cgroup.h>
Neil Horman5bc14212011-11-22 05:10:51 +0000133#include <net/netprio_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135#include <linux/filter.h>
136
Satoru Moriya3847ce32011-06-17 12:00:03 +0000137#include <trace/events/sock.h>
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139#ifdef CONFIG_INET
140#include <net/tcp.h>
141#endif
142
Eliezer Tamir076bb0c2013-07-10 17:13:17 +0300143#include <net/busy_poll.h>
Eliezer Tamir06021292013-06-10 11:39:50 +0300144
Glauber Costa36b77a52011-12-16 00:51:59 +0000145static DEFINE_MUTEX(proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000146static LIST_HEAD(proto_list);
147
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700148/**
149 * sk_ns_capable - General socket capability test
150 * @sk: Socket to use a capability on or through
151 * @user_ns: The user namespace of the capability to use
152 * @cap: The capability to use
153 *
154 * Test to see if the opener of the socket had when the socket was
155 * created and the current process has the capability @cap in the user
156 * namespace @user_ns.
157 */
158bool sk_ns_capable(const struct sock *sk,
159 struct user_namespace *user_ns, int cap)
160{
161 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
162 ns_capable(user_ns, cap);
163}
164EXPORT_SYMBOL(sk_ns_capable);
165
166/**
167 * sk_capable - Socket global capability test
168 * @sk: Socket to use a capability on or through
Masanari Iidae793c0f2014-09-04 23:44:36 +0900169 * @cap: The global capability to use
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700170 *
171 * Test to see if the opener of the socket had when the socket was
172 * created and the current process has the capability @cap in all user
173 * namespaces.
174 */
175bool sk_capable(const struct sock *sk, int cap)
176{
177 return sk_ns_capable(sk, &init_user_ns, cap);
178}
179EXPORT_SYMBOL(sk_capable);
180
181/**
182 * sk_net_capable - Network namespace socket capability test
183 * @sk: Socket to use a capability on or through
184 * @cap: The capability to use
185 *
Masanari Iidae793c0f2014-09-04 23:44:36 +0900186 * Test to see if the opener of the socket had when the socket was created
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700187 * and the current process has the capability @cap over the network namespace
188 * the socket is a member of.
189 */
190bool sk_net_capable(const struct sock *sk, int cap)
191{
192 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
193}
194EXPORT_SYMBOL(sk_net_capable);
195
196
Andrew Mortonc255a452012-07-31 16:43:02 -0700197#ifdef CONFIG_MEMCG_KMEM
Glauber Costa1d62e432012-04-09 19:36:33 -0300198int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000199{
200 struct proto *proto;
201 int ret = 0;
202
Glauber Costa36b77a52011-12-16 00:51:59 +0000203 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000204 list_for_each_entry(proto, &proto_list, node) {
205 if (proto->init_cgroup) {
Glauber Costa1d62e432012-04-09 19:36:33 -0300206 ret = proto->init_cgroup(memcg, ss);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000207 if (ret)
208 goto out;
209 }
210 }
211
Glauber Costa36b77a52011-12-16 00:51:59 +0000212 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000213 return ret;
214out:
215 list_for_each_entry_continue_reverse(proto, &proto_list, node)
216 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300217 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000218 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000219 return ret;
220}
221
Glauber Costa1d62e432012-04-09 19:36:33 -0300222void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000223{
224 struct proto *proto;
225
Glauber Costa36b77a52011-12-16 00:51:59 +0000226 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000227 list_for_each_entry_reverse(proto, &proto_list, node)
228 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300229 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000230 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000231}
232#endif
233
Ingo Molnarda21f242006-07-03 00:25:12 -0700234/*
235 * Each address family might have different locking rules, so we have
236 * one slock key per address family:
237 */
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700238static struct lock_class_key af_family_keys[AF_MAX];
239static struct lock_class_key af_family_slock_keys[AF_MAX];
240
stephen hemmingercbda4ea2013-02-22 07:59:10 +0000241#if defined(CONFIG_MEMCG_KMEM)
Ingo Molnarc5905af2012-02-24 08:31:31 +0100242struct static_key memcg_socket_limit_enabled;
Glauber Costae1aab162011-12-11 21:47:03 +0000243EXPORT_SYMBOL(memcg_socket_limit_enabled);
stephen hemmingercbda4ea2013-02-22 07:59:10 +0000244#endif
Glauber Costae1aab162011-12-11 21:47:03 +0000245
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700246/*
247 * Make lock validator output more readable. (we pre-construct these
248 * strings build-time, so that runtime initialization of socket
249 * locks is fast):
250 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700251static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700252 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
253 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
254 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
255 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
256 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
257 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
258 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800259 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700260 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800261 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700262 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700263 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800264 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000265 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700266};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700267static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700268 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
269 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
270 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
271 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
272 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
273 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
274 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800275 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700276 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800277 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700278 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700279 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800280 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000281 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700282};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700283static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef0e2007-07-19 01:49:00 -0700284 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
285 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
286 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
287 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
288 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
289 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
290 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800291 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef0e2007-07-19 01:49:00 -0700292 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700293 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700294 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700295 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800296 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000297 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
Peter Zijlstra443aef0e2007-07-19 01:49:00 -0700298};
Ingo Molnarda21f242006-07-03 00:25:12 -0700299
300/*
301 * sk_callback_lock locking rules are per-address-family,
302 * so split the lock classes by using a per-AF key:
303 */
304static struct lock_class_key af_callback_keys[AF_MAX];
305
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306/* Take into consideration the size of the struct sk_buff overhead in the
307 * determination of these values, since that is non-constant across
308 * platforms. This makes socket queueing behavior and performance
309 * not depend upon such differences.
310 */
311#define _SK_MEM_PACKETS 256
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000312#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
314#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
315
316/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700317__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200318EXPORT_SYMBOL(sysctl_wmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700319__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200320EXPORT_SYMBOL(sysctl_rmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700321__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
322__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300324/* Maximal space eaten by iovec or ancillary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700325int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000326EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
Mel Gormanc93bdd02012-07-31 16:44:19 -0700328struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
329EXPORT_SYMBOL_GPL(memalloc_socks);
330
Mel Gorman7cb02402012-07-31 16:44:16 -0700331/**
332 * sk_set_memalloc - sets %SOCK_MEMALLOC
333 * @sk: socket to set it on
334 *
335 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
336 * It's the responsibility of the admin to adjust min_free_kbytes
337 * to meet the requirements
338 */
339void sk_set_memalloc(struct sock *sk)
340{
341 sock_set_flag(sk, SOCK_MEMALLOC);
342 sk->sk_allocation |= __GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700343 static_key_slow_inc(&memalloc_socks);
Mel Gorman7cb02402012-07-31 16:44:16 -0700344}
345EXPORT_SYMBOL_GPL(sk_set_memalloc);
346
347void sk_clear_memalloc(struct sock *sk)
348{
349 sock_reset_flag(sk, SOCK_MEMALLOC);
350 sk->sk_allocation &= ~__GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700351 static_key_slow_dec(&memalloc_socks);
Mel Gormanc76562b2012-07-31 16:44:41 -0700352
353 /*
354 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
355 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
356 * it has rmem allocations there is a risk that the user of the
357 * socket cannot make forward progress due to exceeding the rmem
358 * limits. By rights, sk_clear_memalloc() should only be called
359 * on sockets being torn down but warn and reset the accounting if
360 * that assumption breaks.
361 */
362 if (WARN_ON(sk->sk_forward_alloc))
363 sk_mem_reclaim(sk);
Mel Gorman7cb02402012-07-31 16:44:16 -0700364}
365EXPORT_SYMBOL_GPL(sk_clear_memalloc);
366
Mel Gormanb4b9e352012-07-31 16:44:26 -0700367int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
368{
369 int ret;
370 unsigned long pflags = current->flags;
371
372 /* these should have been dropped before queueing */
373 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
374
375 current->flags |= PF_MEMALLOC;
376 ret = sk->sk_backlog_rcv(sk, skb);
377 tsk_restore_flags(current, pflags, PF_MEMALLOC);
378
379 return ret;
380}
381EXPORT_SYMBOL(__sk_backlog_rcv);
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
384{
385 struct timeval tv;
386
387 if (optlen < sizeof(tv))
388 return -EINVAL;
389 if (copy_from_user(&tv, optval, sizeof(tv)))
390 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700391 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
392 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Vasily Averinba780732007-05-24 16:58:54 -0700394 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700395 static int warned __read_mostly;
396
Vasily Averinba780732007-05-24 16:58:54 -0700397 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700398 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700399 warned++;
Joe Perchese005d192012-05-16 19:58:40 +0000400 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
401 __func__, current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700402 }
Vasily Averinba780732007-05-24 16:58:54 -0700403 return 0;
404 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 *timeo_p = MAX_SCHEDULE_TIMEOUT;
406 if (tv.tv_sec == 0 && tv.tv_usec == 0)
407 return 0;
408 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
409 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
410 return 0;
411}
412
413static void sock_warn_obsolete_bsdism(const char *name)
414{
415 static int warned;
416 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900417 if (strcmp(warncomm, current->comm) && warned < 5) {
418 strcpy(warncomm, current->comm);
Joe Perchese005d192012-05-16 19:58:40 +0000419 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
420 warncomm, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 warned++;
422 }
423}
424
Eric Dumazet08e29af2011-11-28 12:04:18 +0000425#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
426
427static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900428{
Eric Dumazet08e29af2011-11-28 12:04:18 +0000429 if (sk->sk_flags & flags) {
430 sk->sk_flags &= ~flags;
431 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +0000432 net_disable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 }
434}
435
436
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800437int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
438{
Eric Dumazet766e90372009-10-14 20:40:11 -0700439 int err;
Neil Horman3b885782009-10-12 13:26:31 -0700440 unsigned long flags;
441 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800442
Eric Dumazet0fd7bac2011-12-21 07:11:44 +0000443 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700444 atomic_inc(&sk->sk_drops);
Satoru Moriya3847ce32011-06-17 12:00:03 +0000445 trace_sock_rcvqueue_full(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -0700446 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800447 }
448
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700449 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800450 if (err)
Eric Dumazet766e90372009-10-14 20:40:11 -0700451 return err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800452
Mel Gormanc76562b2012-07-31 16:44:41 -0700453 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700454 atomic_inc(&sk->sk_drops);
455 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800456 }
457
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800458 skb->dev = NULL;
459 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800460
Eric Dumazet7fee2262010-05-11 23:19:48 +0000461 /* we escape from rcu protected region, make sure we dont leak
462 * a norefcounted dst
463 */
464 skb_dst_force(skb);
465
Neil Horman3b885782009-10-12 13:26:31 -0700466 spin_lock_irqsave(&list->lock, flags);
467 skb->dropcount = atomic_read(&sk->sk_drops);
468 __skb_queue_tail(list, skb);
469 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800470
471 if (!sock_flag(sk, SOCK_DEAD))
David S. Miller676d2362014-04-11 16:15:36 -0400472 sk->sk_data_ready(sk);
Eric Dumazet766e90372009-10-14 20:40:11 -0700473 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800474}
475EXPORT_SYMBOL(sock_queue_rcv_skb);
476
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200477int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800478{
479 int rc = NET_RX_SUCCESS;
480
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700481 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800482 goto discard_and_relse;
483
484 skb->dev = NULL;
485
Sorin Dumitru274f4822014-07-22 21:16:51 +0300486 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
Eric Dumazetc3774112010-04-27 15:13:20 -0700487 atomic_inc(&sk->sk_drops);
488 goto discard_and_relse;
489 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200490 if (nested)
491 bh_lock_sock_nested(sk);
492 else
493 bh_lock_sock(sk);
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700494 if (!sock_owned_by_user(sk)) {
495 /*
496 * trylock + unlock semantics:
497 */
498 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
499
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700500 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700501
502 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Eric Dumazetf545a382012-04-22 23:34:26 +0000503 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000504 bh_unlock_sock(sk);
505 atomic_inc(&sk->sk_drops);
506 goto discard_and_relse;
507 }
508
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800509 bh_unlock_sock(sk);
510out:
511 sock_put(sk);
512 return rc;
513discard_and_relse:
514 kfree_skb(skb);
515 goto out;
516}
517EXPORT_SYMBOL(sk_receive_skb);
518
519struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
520{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000521 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800522
523 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000524 sk_tx_queue_clear(sk);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000525 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800526 dst_release(dst);
527 return NULL;
528 }
529
530 return dst;
531}
532EXPORT_SYMBOL(__sk_dst_check);
533
534struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
535{
536 struct dst_entry *dst = sk_dst_get(sk);
537
538 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
539 sk_dst_reset(sk);
540 dst_release(dst);
541 return NULL;
542 }
543
544 return dst;
545}
546EXPORT_SYMBOL(sk_dst_check);
547
Brian Haleyc91f6df2012-11-26 05:21:08 +0000548static int sock_setbindtodevice(struct sock *sk, char __user *optval,
549 int optlen)
David S. Miller48788092007-09-14 16:41:03 -0700550{
551 int ret = -ENOPROTOOPT;
552#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900553 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700554 char devname[IFNAMSIZ];
555 int index;
556
557 /* Sorry... */
558 ret = -EPERM;
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000559 if (!ns_capable(net->user_ns, CAP_NET_RAW))
David S. Miller48788092007-09-14 16:41:03 -0700560 goto out;
561
562 ret = -EINVAL;
563 if (optlen < 0)
564 goto out;
565
566 /* Bind this socket to a particular device like "eth0",
567 * as specified in the passed interface name. If the
568 * name is "" or the option length is zero the socket
569 * is not bound.
570 */
571 if (optlen > IFNAMSIZ - 1)
572 optlen = IFNAMSIZ - 1;
573 memset(devname, 0, sizeof(devname));
574
575 ret = -EFAULT;
576 if (copy_from_user(devname, optval, optlen))
577 goto out;
578
David S. Miller000ba2e2009-11-05 22:37:11 -0800579 index = 0;
580 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800581 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700582
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800583 rcu_read_lock();
584 dev = dev_get_by_name_rcu(net, devname);
585 if (dev)
586 index = dev->ifindex;
587 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700588 ret = -ENODEV;
589 if (!dev)
590 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700591 }
592
593 lock_sock(sk);
594 sk->sk_bound_dev_if = index;
595 sk_dst_reset(sk);
596 release_sock(sk);
597
598 ret = 0;
599
600out:
601#endif
602
603 return ret;
604}
605
Brian Haleyc91f6df2012-11-26 05:21:08 +0000606static int sock_getbindtodevice(struct sock *sk, char __user *optval,
607 int __user *optlen, int len)
608{
609 int ret = -ENOPROTOOPT;
610#ifdef CONFIG_NETDEVICES
611 struct net *net = sock_net(sk);
Brian Haleyc91f6df2012-11-26 05:21:08 +0000612 char devname[IFNAMSIZ];
Brian Haleyc91f6df2012-11-26 05:21:08 +0000613
614 if (sk->sk_bound_dev_if == 0) {
615 len = 0;
616 goto zero;
617 }
618
619 ret = -EINVAL;
620 if (len < IFNAMSIZ)
621 goto out;
622
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200623 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
624 if (ret)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000625 goto out;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000626
627 len = strlen(devname) + 1;
628
629 ret = -EFAULT;
630 if (copy_to_user(optval, devname, len))
631 goto out;
632
633zero:
634 ret = -EFAULT;
635 if (put_user(len, optlen))
636 goto out;
637
638 ret = 0;
639
640out:
641#endif
642
643 return ret;
644}
645
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800646static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
647{
648 if (valbool)
649 sock_set_flag(sk, bit);
650 else
651 sock_reset_flag(sk, bit);
652}
653
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654/*
655 * This is meant for all protocols to use and covers goings on
656 * at the socket level. Everything here is generic.
657 */
658
659int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700660 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661{
Eric Dumazet2a915252009-05-27 11:30:05 +0000662 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 int val;
664 int valbool;
665 struct linger ling;
666 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900667
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 /*
669 * Options without arguments
670 */
671
David S. Miller48788092007-09-14 16:41:03 -0700672 if (optname == SO_BINDTODEVICE)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000673 return sock_setbindtodevice(sk, optval, optlen);
David S. Miller48788092007-09-14 16:41:03 -0700674
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700675 if (optlen < sizeof(int))
676 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 if (get_user(val, (int __user *)optval))
679 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900680
Eric Dumazet2a915252009-05-27 11:30:05 +0000681 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
683 lock_sock(sk);
684
Eric Dumazet2a915252009-05-27 11:30:05 +0000685 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700686 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000687 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700688 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000689 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800690 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700691 break;
692 case SO_REUSEADDR:
Pavel Emelyanov4a17fd52012-04-19 03:39:36 +0000693 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700694 break;
Tom Herbert055dc212013-01-22 09:49:50 +0000695 case SO_REUSEPORT:
696 sk->sk_reuseport = valbool;
697 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700698 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000699 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000700 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700701 case SO_ERROR:
702 ret = -ENOPROTOOPT;
703 break;
704 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800705 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700706 break;
707 case SO_BROADCAST:
708 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
709 break;
710 case SO_SNDBUF:
711 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000712 * about it this is right. Otherwise apps have to
713 * play 'guess the biggest size' games. RCVBUF/SNDBUF
714 * are treated in BSD as hints
715 */
716 val = min_t(u32, val, sysctl_wmem_max);
Patrick McHardyb0573de2005-08-09 19:30:51 -0700717set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700718 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
Eric Dumazet82981932012-04-26 20:07:59 +0000719 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
720 /* Wake up sending tasks if we upped the value. */
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700721 sk->sk_write_space(sk);
722 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700724 case SO_SNDBUFFORCE:
725 if (!capable(CAP_NET_ADMIN)) {
726 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 break;
728 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700729 goto set_sndbuf;
730
731 case SO_RCVBUF:
732 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000733 * about it this is right. Otherwise apps have to
734 * play 'guess the biggest size' games. RCVBUF/SNDBUF
735 * are treated in BSD as hints
736 */
737 val = min_t(u32, val, sysctl_rmem_max);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700738set_rcvbuf:
739 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
740 /*
741 * We double it on the way in to account for
742 * "struct sk_buff" etc. overhead. Applications
743 * assume that the SO_RCVBUF setting they make will
744 * allow that much actual data to be received on that
745 * socket.
746 *
747 * Applications are unaware that "struct sk_buff" and
748 * other overheads allocate from the receive buffer
749 * during socket buffer allocation.
750 *
751 * And after considering the possible alternatives,
752 * returning the value we actually used in getsockopt
753 * is the most desirable behavior.
754 */
Eric Dumazet82981932012-04-26 20:07:59 +0000755 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700756 break;
757
758 case SO_RCVBUFFORCE:
759 if (!capable(CAP_NET_ADMIN)) {
760 ret = -EPERM;
761 break;
762 }
763 goto set_rcvbuf;
764
765 case SO_KEEPALIVE:
766#ifdef CONFIG_INET
Eric Dumazet3e109862012-09-24 07:00:11 +0000767 if (sk->sk_protocol == IPPROTO_TCP &&
768 sk->sk_type == SOCK_STREAM)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700769 tcp_set_keepalive(sk, valbool);
770#endif
771 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
772 break;
773
774 case SO_OOBINLINE:
775 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
776 break;
777
778 case SO_NO_CHECK:
Tom Herbert28448b82014-05-23 08:47:19 -0700779 sk->sk_no_check_tx = valbool;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700780 break;
781
782 case SO_PRIORITY:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000783 if ((val >= 0 && val <= 6) ||
784 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700785 sk->sk_priority = val;
786 else
787 ret = -EPERM;
788 break;
789
790 case SO_LINGER:
791 if (optlen < sizeof(ling)) {
792 ret = -EINVAL; /* 1003.1g */
793 break;
794 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000795 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700796 ret = -EFAULT;
797 break;
798 }
799 if (!ling.l_onoff)
800 sock_reset_flag(sk, SOCK_LINGER);
801 else {
802#if (BITS_PER_LONG == 32)
803 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
804 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
805 else
806#endif
807 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
808 sock_set_flag(sk, SOCK_LINGER);
809 }
810 break;
811
812 case SO_BSDCOMPAT:
813 sock_warn_obsolete_bsdism("setsockopt");
814 break;
815
816 case SO_PASSCRED:
817 if (valbool)
818 set_bit(SOCK_PASSCRED, &sock->flags);
819 else
820 clear_bit(SOCK_PASSCRED, &sock->flags);
821 break;
822
823 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700824 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700825 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700826 if (optname == SO_TIMESTAMP)
827 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
828 else
829 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700830 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000831 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700832 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700833 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700834 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
835 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700836 break;
837
Patrick Ohly20d49472009-02-12 05:03:38 +0000838 case SO_TIMESTAMPING:
839 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000840 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000841 break;
842 }
Willem de Bruijn09c2d252014-08-04 22:11:47 -0400843 if (val & SOF_TIMESTAMPING_OPT_ID &&
Willem de Bruijn4ed2d762014-08-04 22:11:49 -0400844 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
845 if (sk->sk_protocol == IPPROTO_TCP) {
846 if (sk->sk_state != TCP_ESTABLISHED) {
847 ret = -EINVAL;
848 break;
849 }
850 sk->sk_tskey = tcp_sk(sk)->snd_una;
851 } else {
852 sk->sk_tskey = 0;
853 }
854 }
Willem de Bruijnb9f40e22014-08-04 22:11:46 -0400855 sk->sk_tsflags = val;
Patrick Ohly20d49472009-02-12 05:03:38 +0000856 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
857 sock_enable_timestamp(sk,
858 SOCK_TIMESTAMPING_RX_SOFTWARE);
859 else
860 sock_disable_timestamp(sk,
Eric Dumazet08e29af2011-11-28 12:04:18 +0000861 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
Patrick Ohly20d49472009-02-12 05:03:38 +0000862 break;
863
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700864 case SO_RCVLOWAT:
865 if (val < 0)
866 val = INT_MAX;
867 sk->sk_rcvlowat = val ? : 1;
868 break;
869
870 case SO_RCVTIMEO:
871 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
872 break;
873
874 case SO_SNDTIMEO:
875 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
876 break;
877
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700878 case SO_ATTACH_FILTER:
879 ret = -EINVAL;
880 if (optlen == sizeof(struct sock_fprog)) {
881 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700883 ret = -EFAULT;
884 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700887 ret = sk_attach_filter(&fprog, sk);
888 }
889 break;
890
Alexei Starovoitov89aa0752014-12-01 15:06:35 -0800891 case SO_ATTACH_BPF:
892 ret = -EINVAL;
893 if (optlen == sizeof(u32)) {
894 u32 ufd;
895
896 ret = -EFAULT;
897 if (copy_from_user(&ufd, optval, sizeof(ufd)))
898 break;
899
900 ret = sk_attach_bpf(ufd, sk);
901 }
902 break;
903
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700904 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700905 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700906 break;
907
Vincent Bernatd59577b2013-01-16 22:55:49 +0100908 case SO_LOCK_FILTER:
909 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
910 ret = -EPERM;
911 else
912 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
913 break;
914
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700915 case SO_PASSSEC:
916 if (valbool)
917 set_bit(SOCK_PASSSEC, &sock->flags);
918 else
919 clear_bit(SOCK_PASSSEC, &sock->flags);
920 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800921 case SO_MARK:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000922 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800923 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000924 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800925 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800926 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700927
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 /* We implement the SO_SNDLOWAT etc to
929 not be settable (1003.1g 5.3) */
Neil Horman3b885782009-10-12 13:26:31 -0700930 case SO_RXQ_OVFL:
Johannes Berg8083f0f2011-10-07 03:30:20 +0000931 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
Neil Horman3b885782009-10-12 13:26:31 -0700932 break;
Johannes Berg6e3e9392011-11-09 10:15:42 +0100933
934 case SO_WIFI_STATUS:
935 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
936 break;
937
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000938 case SO_PEEK_OFF:
939 if (sock->ops->set_peek_off)
Sasha Levin12663bf2013-12-07 17:26:27 -0500940 ret = sock->ops->set_peek_off(sk, val);
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000941 else
942 ret = -EOPNOTSUPP;
943 break;
Ben Greear3bdc0eb2012-02-11 15:39:30 +0000944
945 case SO_NOFCS:
946 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
947 break;
948
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +0000949 case SO_SELECT_ERR_QUEUE:
950 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
951 break;
952
Cong Wange0d10952013-08-01 11:10:25 +0800953#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +0300954 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +0300955 /* allow unprivileged users to decrease the value */
956 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
957 ret = -EPERM;
958 else {
959 if (val < 0)
960 ret = -EINVAL;
961 else
962 sk->sk_ll_usec = val;
963 }
964 break;
965#endif
Eric Dumazet62748f32013-09-24 08:20:52 -0700966
967 case SO_MAX_PACING_RATE:
968 sk->sk_max_pacing_rate = val;
969 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
970 sk->sk_max_pacing_rate);
971 break;
972
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700973 default:
974 ret = -ENOPROTOOPT;
975 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900976 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 release_sock(sk);
978 return ret;
979}
Eric Dumazet2a915252009-05-27 11:30:05 +0000980EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
982
stephen hemminger8f098982014-01-03 09:17:14 -0800983static void cred_to_ucred(struct pid *pid, const struct cred *cred,
984 struct ucred *ucred)
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000985{
986 ucred->pid = pid_vnr(pid);
987 ucred->uid = ucred->gid = -1;
988 if (cred) {
989 struct user_namespace *current_ns = current_user_ns();
990
Eric W. Biedermanb2e4f542012-05-23 16:39:45 -0600991 ucred->uid = from_kuid_munged(current_ns, cred->euid);
992 ucred->gid = from_kgid_munged(current_ns, cred->egid);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000993 }
994}
995
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996int sock_getsockopt(struct socket *sock, int level, int optname,
997 char __user *optval, int __user *optlen)
998{
999 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001000
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001001 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001002 int val;
1003 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 struct timeval tm;
1005 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001006
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -08001007 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001009
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001010 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001011 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001012 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001014
Eugene Teo50fee1d2009-02-23 15:38:41 -08001015 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -08001016
Eric Dumazet2a915252009-05-27 11:30:05 +00001017 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001018 case SO_DEBUG:
1019 v.val = sock_flag(sk, SOCK_DBG);
1020 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001021
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001022 case SO_DONTROUTE:
1023 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1024 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001025
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001026 case SO_BROADCAST:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001027 v.val = sock_flag(sk, SOCK_BROADCAST);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001028 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001030 case SO_SNDBUF:
1031 v.val = sk->sk_sndbuf;
1032 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001033
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001034 case SO_RCVBUF:
1035 v.val = sk->sk_rcvbuf;
1036 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001038 case SO_REUSEADDR:
1039 v.val = sk->sk_reuse;
1040 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
Tom Herbert055dc212013-01-22 09:49:50 +00001042 case SO_REUSEPORT:
1043 v.val = sk->sk_reuseport;
1044 break;
1045
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001046 case SO_KEEPALIVE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001047 v.val = sock_flag(sk, SOCK_KEEPOPEN);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001048 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001050 case SO_TYPE:
1051 v.val = sk->sk_type;
1052 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
Jan Engelhardt49c794e2009-08-04 07:28:28 +00001054 case SO_PROTOCOL:
1055 v.val = sk->sk_protocol;
1056 break;
1057
Jan Engelhardt0d6038e2009-08-04 07:28:29 +00001058 case SO_DOMAIN:
1059 v.val = sk->sk_family;
1060 break;
1061
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001062 case SO_ERROR:
1063 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +00001064 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001065 v.val = xchg(&sk->sk_err_soft, 0);
1066 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001068 case SO_OOBINLINE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001069 v.val = sock_flag(sk, SOCK_URGINLINE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001070 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001071
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001072 case SO_NO_CHECK:
Tom Herbert28448b82014-05-23 08:47:19 -07001073 v.val = sk->sk_no_check_tx;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001074 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001076 case SO_PRIORITY:
1077 v.val = sk->sk_priority;
1078 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001079
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001080 case SO_LINGER:
1081 lv = sizeof(v.ling);
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001082 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001083 v.ling.l_linger = sk->sk_lingertime / HZ;
1084 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001085
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001086 case SO_BSDCOMPAT:
1087 sock_warn_obsolete_bsdism("getsockopt");
1088 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001090 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -07001091 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1092 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1093 break;
1094
1095 case SO_TIMESTAMPNS:
1096 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001097 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098
Patrick Ohly20d49472009-02-12 05:03:38 +00001099 case SO_TIMESTAMPING:
Willem de Bruijnb9f40e22014-08-04 22:11:46 -04001100 v.val = sk->sk_tsflags;
Patrick Ohly20d49472009-02-12 05:03:38 +00001101 break;
1102
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001103 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001104 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001105 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1106 v.tm.tv_sec = 0;
1107 v.tm.tv_usec = 0;
1108 } else {
1109 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1110 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001112 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001114 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001115 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001116 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1117 v.tm.tv_sec = 0;
1118 v.tm.tv_usec = 0;
1119 } else {
1120 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1121 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1122 }
1123 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001125 case SO_RCVLOWAT:
1126 v.val = sk->sk_rcvlowat;
1127 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001128
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001129 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +00001130 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001131 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001133 case SO_PASSCRED:
Eric Dumazet82981932012-04-26 20:07:59 +00001134 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001135 break;
1136
1137 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001138 {
1139 struct ucred peercred;
1140 if (len > sizeof(peercred))
1141 len = sizeof(peercred);
1142 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1143 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001144 return -EFAULT;
1145 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001146 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001147
1148 case SO_PEERNAME:
1149 {
1150 char address[128];
1151
1152 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1153 return -ENOTCONN;
1154 if (lv < len)
1155 return -EINVAL;
1156 if (copy_to_user(optval, address, len))
1157 return -EFAULT;
1158 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001160
1161 /* Dubious BSD thing... Probably nobody even uses it, but
1162 * the UNIX standard wants it for whatever reason... -DaveM
1163 */
1164 case SO_ACCEPTCONN:
1165 v.val = sk->sk_state == TCP_LISTEN;
1166 break;
1167
1168 case SO_PASSSEC:
Eric Dumazet82981932012-04-26 20:07:59 +00001169 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001170 break;
1171
1172 case SO_PEERSEC:
1173 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1174
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001175 case SO_MARK:
1176 v.val = sk->sk_mark;
1177 break;
1178
Neil Horman3b885782009-10-12 13:26:31 -07001179 case SO_RXQ_OVFL:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001180 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
Neil Horman3b885782009-10-12 13:26:31 -07001181 break;
1182
Johannes Berg6e3e9392011-11-09 10:15:42 +01001183 case SO_WIFI_STATUS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001184 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
Johannes Berg6e3e9392011-11-09 10:15:42 +01001185 break;
1186
Pavel Emelyanovef64a542012-02-21 07:31:34 +00001187 case SO_PEEK_OFF:
1188 if (!sock->ops->set_peek_off)
1189 return -EOPNOTSUPP;
1190
1191 v.val = sk->sk_peek_off;
1192 break;
David S. Millerbc2f7992012-02-24 14:48:34 -05001193 case SO_NOFCS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001194 v.val = sock_flag(sk, SOCK_NOFCS);
David S. Millerbc2f7992012-02-24 14:48:34 -05001195 break;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001196
Pavel Emelyanovf7b86bf2012-10-18 23:55:56 +00001197 case SO_BINDTODEVICE:
Brian Haleyc91f6df2012-11-26 05:21:08 +00001198 return sock_getbindtodevice(sk, optval, optlen, len);
1199
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001200 case SO_GET_FILTER:
1201 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1202 if (len < 0)
1203 return len;
1204
1205 goto lenout;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001206
Vincent Bernatd59577b2013-01-16 22:55:49 +01001207 case SO_LOCK_FILTER:
1208 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1209 break;
1210
Michal Sekletarea02f942014-01-17 17:09:45 +01001211 case SO_BPF_EXTENSIONS:
1212 v.val = bpf_tell_extensions();
1213 break;
1214
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00001215 case SO_SELECT_ERR_QUEUE:
1216 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1217 break;
1218
Cong Wange0d10952013-08-01 11:10:25 +08001219#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03001220 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +03001221 v.val = sk->sk_ll_usec;
1222 break;
1223#endif
1224
Eric Dumazet62748f32013-09-24 08:20:52 -07001225 case SO_MAX_PACING_RATE:
1226 v.val = sk->sk_max_pacing_rate;
1227 break;
1228
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001229 case SO_INCOMING_CPU:
1230 v.val = sk->sk_incoming_cpu;
1231 break;
1232
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001233 default:
1234 return -ENOPROTOOPT;
1235 }
1236
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 if (len > lv)
1238 len = lv;
1239 if (copy_to_user(optval, &v, len))
1240 return -EFAULT;
1241lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001242 if (put_user(len, optlen))
1243 return -EFAULT;
1244 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245}
1246
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07001247/*
1248 * Initialize an sk_lock.
1249 *
1250 * (We also register the sk_lock with the lock validator.)
1251 */
Dave Jonesb6f99a22007-03-22 12:27:49 -07001252static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07001253{
Peter Zijlstraed075362006-12-06 20:35:24 -08001254 sock_lock_init_class_and_name(sk,
1255 af_family_slock_key_strings[sk->sk_family],
1256 af_family_slock_keys + sk->sk_family,
1257 af_family_key_strings[sk->sk_family],
1258 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07001259}
1260
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001261/*
1262 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1263 * even temporarly, because of RCU lookups. sk_node should also be left as is.
Eric Dumazet68835ab2010-11-30 19:04:07 +00001264 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001265 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001266static void sock_copy(struct sock *nsk, const struct sock *osk)
1267{
1268#ifdef CONFIG_SECURITY_NETWORK
1269 void *sptr = nsk->sk_security;
1270#endif
Eric Dumazet68835ab2010-11-30 19:04:07 +00001271 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1272
1273 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1274 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1275
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001276#ifdef CONFIG_SECURITY_NETWORK
1277 nsk->sk_security = sptr;
1278 security_sk_clone(osk, nsk);
1279#endif
1280}
1281
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001282void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1283{
1284 unsigned long nulls1, nulls2;
1285
1286 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1287 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1288 if (nulls1 > nulls2)
1289 swap(nulls1, nulls2);
1290
1291 if (nulls1 != 0)
1292 memset((char *)sk, 0, nulls1);
1293 memset((char *)sk + nulls1 + sizeof(void *), 0,
1294 nulls2 - nulls1 - sizeof(void *));
1295 memset((char *)sk + nulls2 + sizeof(void *), 0,
1296 size - nulls2 - sizeof(void *));
1297}
1298EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1299
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001300static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1301 int family)
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001302{
1303 struct sock *sk;
1304 struct kmem_cache *slab;
1305
1306 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001307 if (slab != NULL) {
1308 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1309 if (!sk)
1310 return sk;
1311 if (priority & __GFP_ZERO) {
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001312 if (prot->clear_sk)
1313 prot->clear_sk(sk, prot->obj_size);
1314 else
1315 sk_prot_clear_nulls(sk, prot->obj_size);
Eric Dumazete912b112009-07-08 19:36:05 +00001316 }
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001317 } else
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001318 sk = kmalloc(prot->obj_size, priority);
1319
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001320 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001321 kmemcheck_annotate_bitfield(sk, flags);
1322
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001323 if (security_sk_alloc(sk, family, priority))
1324 goto out_free;
1325
1326 if (!try_module_get(prot->owner))
1327 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001328 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001329 }
1330
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001331 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001332
1333out_free_sec:
1334 security_sk_free(sk);
1335out_free:
1336 if (slab != NULL)
1337 kmem_cache_free(slab, sk);
1338 else
1339 kfree(sk);
1340 return NULL;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001341}
1342
1343static void sk_prot_free(struct proto *prot, struct sock *sk)
1344{
1345 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001346 struct module *owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001347
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001348 owner = prot->owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001349 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001350
1351 security_sk_free(sk);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001352 if (slab != NULL)
1353 kmem_cache_free(slab, sk);
1354 else
1355 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001356 module_put(owner);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001357}
1358
Daniel Borkmann86f85152013-12-29 17:27:11 +01001359#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Zefan Li6ffd4642013-04-08 20:03:47 +00001360void sock_update_netprioidx(struct sock *sk)
Neil Horman5bc14212011-11-22 05:10:51 +00001361{
Neil Horman5bc14212011-11-22 05:10:51 +00001362 if (in_interrupt())
1363 return;
Neil Horman2b73bc62012-02-10 05:43:38 +00001364
Zefan Li6ffd4642013-04-08 20:03:47 +00001365 sk->sk_cgrp_prioidx = task_netprioidx(current);
Neil Horman5bc14212011-11-22 05:10:51 +00001366}
1367EXPORT_SYMBOL_GPL(sock_update_netprioidx);
Herbert Xuf8451722010-05-24 00:12:34 -07001368#endif
1369
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370/**
1371 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001372 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001373 * @family: protocol family
1374 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1375 * @prot: struct proto associated with this new sock instance
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001377struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001378 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379{
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001380 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001382 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001384 sk->sk_family = family;
1385 /*
1386 * See comment in struct sock definition to understand
1387 * why we need sk_prot_creator -acme
1388 */
1389 sk->sk_prot = sk->sk_prot_creator = prot;
1390 sock_lock_init(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001391 sock_net_set(sk, get_net(net));
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001392 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001393
Zefan Li211d2f972013-04-08 20:03:35 +00001394 sock_update_classid(sk);
Zefan Li6ffd4642013-04-08 20:03:47 +00001395 sock_update_netprioidx(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 }
Frank Filza79af592005-09-27 15:23:38 -07001397
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001398 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399}
Eric Dumazet2a915252009-05-27 11:30:05 +00001400EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401
Eric Dumazet2b85a342009-06-11 02:55:43 -07001402static void __sk_free(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403{
1404 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
1406 if (sk->sk_destruct)
1407 sk->sk_destruct(sk);
1408
Paul E. McKenneya898def2010-02-22 17:04:49 -08001409 filter = rcu_dereference_check(sk->sk_filter,
1410 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001412 sk_filter_uncharge(sk, filter);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001413 RCU_INIT_POINTER(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 }
1415
Eric Dumazet08e29af2011-11-28 12:04:18 +00001416 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
1418 if (atomic_read(&sk->sk_omem_alloc))
Joe Perchese005d192012-05-16 19:58:40 +00001419 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1420 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001422 if (sk->sk_peer_cred)
1423 put_cred(sk->sk_peer_cred);
1424 put_pid(sk->sk_peer_pid);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001425 put_net(sock_net(sk));
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001426 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001428
1429void sk_free(struct sock *sk)
1430{
1431 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001432 * We subtract one from sk_wmem_alloc and can know if
Eric Dumazet2b85a342009-06-11 02:55:43 -07001433 * some packets are still in some tx queue.
1434 * If not null, sock_wfree() will call __sk_free(sk) later
1435 */
1436 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1437 __sk_free(sk);
1438}
Eric Dumazet2a915252009-05-27 11:30:05 +00001439EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
Denis V. Lunevedf02082008-02-29 11:18:32 -08001441/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001442 * Last sock_put should drop reference to sk->sk_net. It has already
1443 * been dropped in sk_change_net. Taking reference to stopping namespace
Denis V. Lunevedf02082008-02-29 11:18:32 -08001444 * is not an option.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001445 * Take reference to a socket to remove it from hash _alive_ and after that
Denis V. Lunevedf02082008-02-29 11:18:32 -08001446 * destroy it in the context of init_net.
1447 */
1448void sk_release_kernel(struct sock *sk)
1449{
1450 if (sk == NULL || sk->sk_socket == NULL)
1451 return;
1452
1453 sock_hold(sk);
1454 sock_release(sk->sk_socket);
Denis V. Lunev65a18ec2008-04-16 01:59:46 -07001455 release_net(sock_net(sk));
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001456 sock_net_set(sk, get_net(&init_net));
Denis V. Lunevedf02082008-02-29 11:18:32 -08001457 sock_put(sk);
1458}
David S. Miller45af1752008-02-29 11:33:19 -08001459EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001460
Stephen Rothwell475f1b52012-01-09 16:33:16 +11001461static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1462{
1463 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1464 sock_update_memcg(newsk);
1465}
1466
Eric Dumazete56c57d2011-11-08 17:07:07 -05001467/**
1468 * sk_clone_lock - clone a socket, and lock its clone
1469 * @sk: the socket to clone
1470 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1471 *
1472 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1473 */
1474struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001475{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001476 struct sock *newsk;
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001477 bool is_charged = true;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001478
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001479 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001480 if (newsk != NULL) {
1481 struct sk_filter *filter;
1482
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001483 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001484
1485 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001486 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001487 sk_node_init(&newsk->sk_node);
1488 sock_lock_init(newsk);
1489 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001490 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001491 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001492
1493 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001494 /*
1495 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1496 */
1497 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001498 atomic_set(&newsk->sk_omem_alloc, 0);
1499 skb_queue_head_init(&newsk->sk_receive_queue);
1500 skb_queue_head_init(&newsk->sk_write_queue);
1501
Eric Dumazetb6c67122010-04-08 23:03:29 +00001502 spin_lock_init(&newsk->sk_dst_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001503 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef0e2007-07-19 01:49:00 -07001504 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1505 af_callback_keys + newsk->sk_family,
1506 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001507
1508 newsk->sk_dst_cache = NULL;
1509 newsk->sk_wmem_queued = 0;
1510 newsk->sk_forward_alloc = 0;
1511 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001512 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1513
1514 sock_reset_flag(newsk, SOCK_DONE);
1515 skb_queue_head_init(&newsk->sk_error_queue);
1516
Eric Dumazet0d7da9d2010-10-25 03:47:05 +00001517 filter = rcu_dereference_protected(newsk->sk_filter, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001518 if (filter != NULL)
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001519 /* though it's an empty new sock, the charging may fail
1520 * if sysctl_optmem_max was changed between creation of
1521 * original socket and cloning
1522 */
1523 is_charged = sk_filter_charge(newsk, filter);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001524
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001525 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) {
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001526 /* It is still raw copy of parent, so invalidate
1527 * destructor and make plain sk_free() */
1528 newsk->sk_destruct = NULL;
Thomas Gleixnerb0691c82011-10-25 02:30:50 +00001529 bh_unlock_sock(newsk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001530 sk_free(newsk);
1531 newsk = NULL;
1532 goto out;
1533 }
1534
1535 newsk->sk_err = 0;
1536 newsk->sk_priority = 0;
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001537 newsk->sk_incoming_cpu = raw_smp_processor_id();
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001538 /*
1539 * Before updating sk_refcnt, we must commit prior changes to memory
1540 * (Documentation/RCU/rculist_nulls.txt for details)
1541 */
1542 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001543 atomic_set(&newsk->sk_refcnt, 2);
1544
1545 /*
1546 * Increment the counter in the same struct proto as the master
1547 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1548 * is the same as sk->sk_prot->socks, as this field was copied
1549 * with memcpy).
1550 *
1551 * This _changes_ the previous behaviour, where
1552 * tcp_create_openreq_child always was incrementing the
1553 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1554 * to be taken into account in all callers. -acme
1555 */
1556 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001557 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001558 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001559
Glauber Costaf3f511e2012-01-05 20:16:39 +00001560 sk_update_clone(sk, newsk);
1561
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001562 if (newsk->sk_prot->sockets_allocated)
Glauber Costa180d8cd2011-12-11 21:47:02 +00001563 sk_sockets_allocated_inc(newsk);
Octavian Purdila704da5602010-01-08 00:00:09 -08001564
Eric Dumazet08e29af2011-11-28 12:04:18 +00001565 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
Octavian Purdila704da5602010-01-08 00:00:09 -08001566 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001567 }
1568out:
1569 return newsk;
1570}
Eric Dumazete56c57d2011-11-08 17:07:07 -05001571EXPORT_SYMBOL_GPL(sk_clone_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001572
Andi Kleen99580892007-04-20 17:12:43 -07001573void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1574{
1575 __sk_dst_set(sk, dst);
1576 sk->sk_route_caps = dst->dev->features;
1577 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001578 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001579 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001580 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001581 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001582 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001583 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001584 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001585 sk->sk_gso_max_size = dst->dev->gso_max_size;
Ben Hutchings14853482012-07-30 16:11:42 +00001586 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001587 }
Andi Kleen99580892007-04-20 17:12:43 -07001588 }
1589}
1590EXPORT_SYMBOL_GPL(sk_setup_caps);
1591
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592/*
1593 * Simple resource managers for sockets.
1594 */
1595
1596
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001597/*
1598 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 */
1600void sock_wfree(struct sk_buff *skb)
1601{
1602 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001603 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604
Eric Dumazetd99927f2009-09-24 10:49:24 +00001605 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1606 /*
1607 * Keep a reference on sk_wmem_alloc, this will be released
1608 * after sk_write_space() call
1609 */
1610 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001612 len = 1;
1613 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001614 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001615 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1616 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001617 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001618 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001619 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620}
Eric Dumazet2a915252009-05-27 11:30:05 +00001621EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
Eric Dumazetf2f872f2013-07-30 17:55:08 -07001623void skb_orphan_partial(struct sk_buff *skb)
1624{
1625 /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1626 * so we do not completely orphan skb, but transfert all
1627 * accounted bytes but one, to avoid unexpected reorders.
1628 */
1629 if (skb->destructor == sock_wfree
1630#ifdef CONFIG_INET
1631 || skb->destructor == tcp_wfree
1632#endif
1633 ) {
1634 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
1635 skb->truesize = 1;
1636 } else {
1637 skb_orphan(skb);
1638 }
1639}
1640EXPORT_SYMBOL(skb_orphan_partial);
1641
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001642/*
1643 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 */
1645void sock_rfree(struct sk_buff *skb)
1646{
1647 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00001648 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649
Eric Dumazetd361fd52010-07-10 22:45:17 +00001650 atomic_sub(len, &sk->sk_rmem_alloc);
1651 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652}
Eric Dumazet2a915252009-05-27 11:30:05 +00001653EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
Alexander Duyck62bccb82014-09-04 13:31:35 -04001655void sock_efree(struct sk_buff *skb)
1656{
1657 sock_put(skb->sk);
1658}
1659EXPORT_SYMBOL(sock_efree);
1660
Alexander Duyck82eabd92014-09-04 13:32:11 -04001661#ifdef CONFIG_INET
David S. Miller41063e92012-06-19 21:22:05 -07001662void sock_edemux(struct sk_buff *skb)
1663{
Eric Dumazete8123472012-09-02 23:57:18 +00001664 struct sock *sk = skb->sk;
1665
1666 if (sk->sk_state == TCP_TIME_WAIT)
1667 inet_twsk_put(inet_twsk(sk));
1668 else
1669 sock_put(sk);
David S. Miller41063e92012-06-19 21:22:05 -07001670}
1671EXPORT_SYMBOL(sock_edemux);
Alexander Duyck82eabd92014-09-04 13:32:11 -04001672#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
Eric W. Biederman976d02012012-05-23 17:16:53 -06001674kuid_t sock_i_uid(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675{
Eric W. Biederman976d02012012-05-23 17:16:53 -06001676 kuid_t uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
Eric Dumazetf064af12010-09-22 12:43:39 +00001678 read_lock_bh(&sk->sk_callback_lock);
Eric W. Biederman976d02012012-05-23 17:16:53 -06001679 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
Eric Dumazetf064af12010-09-22 12:43:39 +00001680 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 return uid;
1682}
Eric Dumazet2a915252009-05-27 11:30:05 +00001683EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684
1685unsigned long sock_i_ino(struct sock *sk)
1686{
1687 unsigned long ino;
1688
Eric Dumazetf064af12010-09-22 12:43:39 +00001689 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001691 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 return ino;
1693}
Eric Dumazet2a915252009-05-27 11:30:05 +00001694EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
1696/*
1697 * Allocate a skb from the socket's send buffer.
1698 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001699struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001700 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701{
1702 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001703 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 if (skb) {
1705 skb_set_owner_w(skb, sk);
1706 return skb;
1707 }
1708 }
1709 return NULL;
1710}
Eric Dumazet2a915252009-05-27 11:30:05 +00001711EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712
1713/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001715 */
Al Virodd0fc662005-10-07 07:46:04 +01001716void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717{
Eric Dumazet95c96172012-04-15 05:58:06 +00001718 if ((unsigned int)size <= sysctl_optmem_max &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1720 void *mem;
1721 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001722 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 */
1724 atomic_add(size, &sk->sk_omem_alloc);
1725 mem = kmalloc(size, priority);
1726 if (mem)
1727 return mem;
1728 atomic_sub(size, &sk->sk_omem_alloc);
1729 }
1730 return NULL;
1731}
Eric Dumazet2a915252009-05-27 11:30:05 +00001732EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733
1734/*
1735 * Free an option memory block.
1736 */
1737void sock_kfree_s(struct sock *sk, void *mem, int size)
1738{
David S. Millere53da5f2014-10-14 17:02:37 -04001739 if (WARN_ON_ONCE(!mem))
1740 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 kfree(mem);
1742 atomic_sub(size, &sk->sk_omem_alloc);
1743}
Eric Dumazet2a915252009-05-27 11:30:05 +00001744EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
1746/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1747 I think, these locks should be removed for datagram sockets.
1748 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001749static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750{
1751 DEFINE_WAIT(wait);
1752
1753 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1754 for (;;) {
1755 if (!timeo)
1756 break;
1757 if (signal_pending(current))
1758 break;
1759 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001760 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1762 break;
1763 if (sk->sk_shutdown & SEND_SHUTDOWN)
1764 break;
1765 if (sk->sk_err)
1766 break;
1767 timeo = schedule_timeout(timeo);
1768 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001769 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 return timeo;
1771}
1772
1773
1774/*
1775 * Generic send/receive buffer handlers
1776 */
1777
Herbert Xu4cc7f682009-02-04 16:55:54 -08001778struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1779 unsigned long data_len, int noblock,
Eric Dumazet28d64272013-08-08 14:38:47 -07001780 int *errcode, int max_page_order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781{
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001782 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 long timeo;
1784 int err;
1785
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 timeo = sock_sndtimeo(sk, noblock);
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001787 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 err = sock_error(sk);
1789 if (err != 0)
1790 goto failure;
1791
1792 err = -EPIPE;
1793 if (sk->sk_shutdown & SEND_SHUTDOWN)
1794 goto failure;
1795
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001796 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
1797 break;
Eric Dumazet28d64272013-08-08 14:38:47 -07001798
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001799 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1800 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1801 err = -EAGAIN;
1802 if (!timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 goto failure;
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001804 if (signal_pending(current))
1805 goto interrupted;
1806 timeo = sock_wait_for_wmem(sk, timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 }
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001808 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
1809 errcode, sk->sk_allocation);
1810 if (skb)
1811 skb_set_owner_w(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 return skb;
1813
1814interrupted:
1815 err = sock_intr_errno(timeo);
1816failure:
1817 *errcode = err;
1818 return NULL;
1819}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001820EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001822struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 int noblock, int *errcode)
1824{
Eric Dumazet28d64272013-08-08 14:38:47 -07001825 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826}
Eric Dumazet2a915252009-05-27 11:30:05 +00001827EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828
Eric Dumazet5640f762012-09-23 23:04:42 +00001829/* On 32bit arches, an skb frag is limited to 2^15 */
1830#define SKB_FRAG_PAGE_ORDER get_order(32768)
1831
Eric Dumazet400dfd32013-10-17 16:27:07 -07001832/**
1833 * skb_page_frag_refill - check that a page_frag contains enough room
1834 * @sz: minimum size of the fragment we want to get
1835 * @pfrag: pointer to page_frag
Eric Dumazet82d5e2b2014-09-08 04:00:00 -07001836 * @gfp: priority for memory allocation
Eric Dumazet400dfd32013-10-17 16:27:07 -07001837 *
1838 * Note: While this allocator tries to use high order pages, there is
1839 * no guarantee that allocations succeed. Therefore, @sz MUST be
1840 * less or equal than PAGE_SIZE.
1841 */
Eric Dumazetd9b29382014-08-27 20:49:34 -07001842bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
Eric Dumazet5640f762012-09-23 23:04:42 +00001843{
Eric Dumazet5640f762012-09-23 23:04:42 +00001844 if (pfrag->page) {
1845 if (atomic_read(&pfrag->page->_count) == 1) {
1846 pfrag->offset = 0;
1847 return true;
1848 }
Eric Dumazet400dfd32013-10-17 16:27:07 -07001849 if (pfrag->offset + sz <= pfrag->size)
Eric Dumazet5640f762012-09-23 23:04:42 +00001850 return true;
1851 put_page(pfrag->page);
1852 }
1853
Eric Dumazetd9b29382014-08-27 20:49:34 -07001854 pfrag->offset = 0;
1855 if (SKB_FRAG_PAGE_ORDER) {
1856 pfrag->page = alloc_pages(gfp | __GFP_COMP |
1857 __GFP_NOWARN | __GFP_NORETRY,
1858 SKB_FRAG_PAGE_ORDER);
Eric Dumazet5640f762012-09-23 23:04:42 +00001859 if (likely(pfrag->page)) {
Eric Dumazetd9b29382014-08-27 20:49:34 -07001860 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
Eric Dumazet5640f762012-09-23 23:04:42 +00001861 return true;
1862 }
Eric Dumazetd9b29382014-08-27 20:49:34 -07001863 }
1864 pfrag->page = alloc_page(gfp);
1865 if (likely(pfrag->page)) {
1866 pfrag->size = PAGE_SIZE;
1867 return true;
1868 }
Eric Dumazet400dfd32013-10-17 16:27:07 -07001869 return false;
1870}
1871EXPORT_SYMBOL(skb_page_frag_refill);
1872
1873bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1874{
1875 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
1876 return true;
1877
Eric Dumazet5640f762012-09-23 23:04:42 +00001878 sk_enter_memory_pressure(sk);
1879 sk_stream_moderate_sndbuf(sk);
1880 return false;
1881}
1882EXPORT_SYMBOL(sk_page_frag_refill);
1883
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884static void __lock_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001885 __releases(&sk->sk_lock.slock)
1886 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887{
1888 DEFINE_WAIT(wait);
1889
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001890 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1892 TASK_UNINTERRUPTIBLE);
1893 spin_unlock_bh(&sk->sk_lock.slock);
1894 schedule();
1895 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001896 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 break;
1898 }
1899 finish_wait(&sk->sk_lock.wq, &wait);
1900}
1901
1902static void __release_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001903 __releases(&sk->sk_lock.slock)
1904 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905{
1906 struct sk_buff *skb = sk->sk_backlog.head;
1907
1908 do {
1909 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1910 bh_unlock_sock(sk);
1911
1912 do {
1913 struct sk_buff *next = skb->next;
1914
Eric Dumazete4cbb022012-04-30 16:07:09 +00001915 prefetch(next);
Eric Dumazet7fee2262010-05-11 23:19:48 +00001916 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001918 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919
1920 /*
1921 * We are in process context here with softirqs
1922 * disabled, use cond_resched_softirq() to preempt.
1923 * This is safe to do because we've taken the backlog
1924 * queue private:
1925 */
1926 cond_resched_softirq();
1927
1928 skb = next;
1929 } while (skb != NULL);
1930
1931 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001932 } while ((skb = sk->sk_backlog.head) != NULL);
Zhu Yi8eae9392010-03-04 18:01:40 +00001933
1934 /*
1935 * Doing the zeroing here guarantee we can not loop forever
1936 * while a wild producer attempts to flood us.
1937 */
1938 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939}
1940
1941/**
1942 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001943 * @sk: sock to wait on
1944 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 *
1946 * Now socket state including sk->sk_err is changed only under lock,
1947 * hence we may omit checks after joining wait queue.
1948 * We check receive queue before schedule() only as optimization;
1949 * it is very likely that release_sock() added new data.
1950 */
1951int sk_wait_data(struct sock *sk, long *timeo)
1952{
1953 int rc;
1954 DEFINE_WAIT(wait);
1955
Eric Dumazetaa395142010-04-20 13:03:51 +00001956 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1958 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1959 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001960 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 return rc;
1962}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963EXPORT_SYMBOL(sk_wait_data);
1964
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001965/**
1966 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1967 * @sk: socket
1968 * @size: memory size to allocate
1969 * @kind: allocation type
1970 *
1971 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1972 * rmem allocation. This function assumes that protocols which have
1973 * memory_pressure use sk_wmem_queued as write buffer accounting.
1974 */
1975int __sk_mem_schedule(struct sock *sk, int size, int kind)
1976{
1977 struct proto *prot = sk->sk_prot;
1978 int amt = sk_mem_pages(size);
Eric Dumazet8d987e52010-11-09 23:24:26 +00001979 long allocated;
Glauber Costae1aab162011-12-11 21:47:03 +00001980 int parent_status = UNDER_LIMIT;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001981
1982 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001983
Glauber Costae1aab162011-12-11 21:47:03 +00001984 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001985
1986 /* Under limit. */
Glauber Costae1aab162011-12-11 21:47:03 +00001987 if (parent_status == UNDER_LIMIT &&
1988 allocated <= sk_prot_mem_limits(sk, 0)) {
Glauber Costa180d8cd2011-12-11 21:47:02 +00001989 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001990 return 1;
1991 }
1992
Glauber Costae1aab162011-12-11 21:47:03 +00001993 /* Under pressure. (we or our parents) */
1994 if ((parent_status > SOFT_LIMIT) ||
1995 allocated > sk_prot_mem_limits(sk, 1))
Glauber Costa180d8cd2011-12-11 21:47:02 +00001996 sk_enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001997
Glauber Costae1aab162011-12-11 21:47:03 +00001998 /* Over hard limit (we or our parents) */
1999 if ((parent_status == OVER_LIMIT) ||
2000 (allocated > sk_prot_mem_limits(sk, 2)))
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002001 goto suppress_allocation;
2002
2003 /* guarantee minimum buffer size under pressure */
2004 if (kind == SK_MEM_RECV) {
2005 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2006 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002007
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002008 } else { /* SK_MEM_SEND */
2009 if (sk->sk_type == SOCK_STREAM) {
2010 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2011 return 1;
2012 } else if (atomic_read(&sk->sk_wmem_alloc) <
2013 prot->sysctl_wmem[0])
2014 return 1;
2015 }
2016
Glauber Costa180d8cd2011-12-11 21:47:02 +00002017 if (sk_has_memory_pressure(sk)) {
Eric Dumazet17483762008-11-25 21:16:35 -08002018 int alloc;
2019
Glauber Costa180d8cd2011-12-11 21:47:02 +00002020 if (!sk_under_memory_pressure(sk))
Eric Dumazet17483762008-11-25 21:16:35 -08002021 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002022 alloc = sk_sockets_allocated_read_positive(sk);
2023 if (sk_prot_mem_limits(sk, 2) > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002024 sk_mem_pages(sk->sk_wmem_queued +
2025 atomic_read(&sk->sk_rmem_alloc) +
2026 sk->sk_forward_alloc))
2027 return 1;
2028 }
2029
2030suppress_allocation:
2031
2032 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2033 sk_stream_moderate_sndbuf(sk);
2034
2035 /* Fail only if socket is _under_ its sndbuf.
2036 * In this case we cannot block, so that we have to fail.
2037 */
2038 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2039 return 1;
2040 }
2041
Satoru Moriya3847ce32011-06-17 12:00:03 +00002042 trace_sock_exceed_buf_limit(sk, prot, allocated);
2043
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002044 /* Alas. Undo changes. */
2045 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002046
Glauber Costa0e90b312012-01-20 04:57:16 +00002047 sk_memory_allocated_sub(sk, amt);
Glauber Costa180d8cd2011-12-11 21:47:02 +00002048
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002049 return 0;
2050}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002051EXPORT_SYMBOL(__sk_mem_schedule);
2052
2053/**
2054 * __sk_reclaim - reclaim memory_allocated
2055 * @sk: socket
2056 */
2057void __sk_mem_reclaim(struct sock *sk)
2058{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002059 sk_memory_allocated_sub(sk,
Glauber Costa0e90b312012-01-20 04:57:16 +00002060 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002061 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
2062
Glauber Costa180d8cd2011-12-11 21:47:02 +00002063 if (sk_under_memory_pressure(sk) &&
2064 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2065 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002066}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002067EXPORT_SYMBOL(__sk_mem_reclaim);
2068
2069
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070/*
2071 * Set of default routines for initialising struct proto_ops when
2072 * the protocol does not support a particular function. In certain
2073 * cases where it makes no sense for a protocol to have a "do nothing"
2074 * function, some default processing is provided.
2075 */
2076
2077int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2078{
2079 return -EOPNOTSUPP;
2080}
Eric Dumazet2a915252009-05-27 11:30:05 +00002081EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002083int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 int len, int flags)
2085{
2086 return -EOPNOTSUPP;
2087}
Eric Dumazet2a915252009-05-27 11:30:05 +00002088EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089
2090int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2091{
2092 return -EOPNOTSUPP;
2093}
Eric Dumazet2a915252009-05-27 11:30:05 +00002094EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095
2096int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2097{
2098 return -EOPNOTSUPP;
2099}
Eric Dumazet2a915252009-05-27 11:30:05 +00002100EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002102int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 int *len, int peer)
2104{
2105 return -EOPNOTSUPP;
2106}
Eric Dumazet2a915252009-05-27 11:30:05 +00002107EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108
Eric Dumazet2a915252009-05-27 11:30:05 +00002109unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110{
2111 return 0;
2112}
Eric Dumazet2a915252009-05-27 11:30:05 +00002113EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
2115int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2116{
2117 return -EOPNOTSUPP;
2118}
Eric Dumazet2a915252009-05-27 11:30:05 +00002119EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120
2121int sock_no_listen(struct socket *sock, int backlog)
2122{
2123 return -EOPNOTSUPP;
2124}
Eric Dumazet2a915252009-05-27 11:30:05 +00002125EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126
2127int sock_no_shutdown(struct socket *sock, int how)
2128{
2129 return -EOPNOTSUPP;
2130}
Eric Dumazet2a915252009-05-27 11:30:05 +00002131EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
2133int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002134 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135{
2136 return -EOPNOTSUPP;
2137}
Eric Dumazet2a915252009-05-27 11:30:05 +00002138EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139
2140int sock_no_getsockopt(struct socket *sock, int level, int optname,
2141 char __user *optval, int __user *optlen)
2142{
2143 return -EOPNOTSUPP;
2144}
Eric Dumazet2a915252009-05-27 11:30:05 +00002145EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146
2147int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2148 size_t len)
2149{
2150 return -EOPNOTSUPP;
2151}
Eric Dumazet2a915252009-05-27 11:30:05 +00002152EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153
2154int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2155 size_t len, int flags)
2156{
2157 return -EOPNOTSUPP;
2158}
Eric Dumazet2a915252009-05-27 11:30:05 +00002159EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160
2161int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2162{
2163 /* Mirror missing mmap method error code */
2164 return -ENODEV;
2165}
Eric Dumazet2a915252009-05-27 11:30:05 +00002166EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
2168ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2169{
2170 ssize_t res;
2171 struct msghdr msg = {.msg_flags = flags};
2172 struct kvec iov;
2173 char *kaddr = kmap(page);
2174 iov.iov_base = kaddr + offset;
2175 iov.iov_len = size;
2176 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2177 kunmap(page);
2178 return res;
2179}
Eric Dumazet2a915252009-05-27 11:30:05 +00002180EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181
2182/*
2183 * Default Socket Callbacks
2184 */
2185
2186static void sock_def_wakeup(struct sock *sk)
2187{
Eric Dumazet43815482010-04-29 11:01:49 +00002188 struct socket_wq *wq;
2189
2190 rcu_read_lock();
2191 wq = rcu_dereference(sk->sk_wq);
2192 if (wq_has_sleeper(wq))
2193 wake_up_interruptible_all(&wq->wait);
2194 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195}
2196
2197static void sock_def_error_report(struct sock *sk)
2198{
Eric Dumazet43815482010-04-29 11:01:49 +00002199 struct socket_wq *wq;
2200
2201 rcu_read_lock();
2202 wq = rcu_dereference(sk->sk_wq);
2203 if (wq_has_sleeper(wq))
2204 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002205 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00002206 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207}
2208
David S. Miller676d2362014-04-11 16:15:36 -04002209static void sock_def_readable(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210{
Eric Dumazet43815482010-04-29 11:01:49 +00002211 struct socket_wq *wq;
2212
2213 rcu_read_lock();
2214 wq = rcu_dereference(sk->sk_wq);
2215 if (wq_has_sleeper(wq))
Eric Dumazet2c6607c2011-01-06 10:54:29 -08002216 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
Davide Libenzi37e55402009-03-31 15:24:21 -07002217 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002218 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00002219 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220}
2221
2222static void sock_def_write_space(struct sock *sk)
2223{
Eric Dumazet43815482010-04-29 11:01:49 +00002224 struct socket_wq *wq;
2225
2226 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227
2228 /* Do not wake up a writer until he can make "significant"
2229 * progress. --DaveM
2230 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002231 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00002232 wq = rcu_dereference(sk->sk_wq);
2233 if (wq_has_sleeper(wq))
2234 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07002235 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236
2237 /* Should agree with poll, otherwise some programs break */
2238 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002239 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 }
2241
Eric Dumazet43815482010-04-29 11:01:49 +00002242 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243}
2244
2245static void sock_def_destruct(struct sock *sk)
2246{
Jesper Juhla51482b2005-11-08 09:41:34 -08002247 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248}
2249
2250void sk_send_sigurg(struct sock *sk)
2251{
2252 if (sk->sk_socket && sk->sk_socket->file)
2253 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002254 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255}
Eric Dumazet2a915252009-05-27 11:30:05 +00002256EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257
2258void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2259 unsigned long expires)
2260{
2261 if (!mod_timer(timer, expires))
2262 sock_hold(sk);
2263}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264EXPORT_SYMBOL(sk_reset_timer);
2265
2266void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2267{
Ying Xue25cc4ae2013-02-03 20:32:57 +00002268 if (del_timer(timer))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 __sock_put(sk);
2270}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271EXPORT_SYMBOL(sk_stop_timer);
2272
2273void sock_init_data(struct socket *sock, struct sock *sk)
2274{
2275 skb_queue_head_init(&sk->sk_receive_queue);
2276 skb_queue_head_init(&sk->sk_write_queue);
2277 skb_queue_head_init(&sk->sk_error_queue);
2278
2279 sk->sk_send_head = NULL;
2280
2281 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002282
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 sk->sk_allocation = GFP_KERNEL;
2284 sk->sk_rcvbuf = sysctl_rmem_default;
2285 sk->sk_sndbuf = sysctl_wmem_default;
2286 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07002287 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288
2289 sock_set_flag(sk, SOCK_ZAPPED);
2290
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002291 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00002293 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 sock->sk = sk;
2295 } else
Eric Dumazet43815482010-04-29 11:01:49 +00002296 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
Eric Dumazetb6c67122010-04-08 23:03:29 +00002298 spin_lock_init(&sk->sk_dst_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef0e2007-07-19 01:49:00 -07002300 lockdep_set_class_and_name(&sk->sk_callback_lock,
2301 af_callback_keys + sk->sk_family,
2302 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303
2304 sk->sk_state_change = sock_def_wakeup;
2305 sk->sk_data_ready = sock_def_readable;
2306 sk->sk_write_space = sock_def_write_space;
2307 sk->sk_error_report = sock_def_error_report;
2308 sk->sk_destruct = sock_def_destruct;
2309
Eric Dumazet5640f762012-09-23 23:04:42 +00002310 sk->sk_frag.page = NULL;
2311 sk->sk_frag.offset = 0;
Pavel Emelyanovef64a542012-02-21 07:31:34 +00002312 sk->sk_peek_off = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
Eric W. Biederman109f6e32010-06-13 03:30:14 +00002314 sk->sk_peer_pid = NULL;
2315 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 sk->sk_write_pending = 0;
2317 sk->sk_rcvlowat = 1;
2318 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2319 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2320
Eric Dumazetf37f0af2008-04-13 21:39:26 -07002321 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322
Cong Wange0d10952013-08-01 11:10:25 +08002323#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir06021292013-06-10 11:39:50 +03002324 sk->sk_napi_id = 0;
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03002325 sk->sk_ll_usec = sysctl_net_busy_read;
Eliezer Tamir06021292013-06-10 11:39:50 +03002326#endif
2327
Eric Dumazet62748f32013-09-24 08:20:52 -07002328 sk->sk_max_pacing_rate = ~0U;
Eric Dumazet7eec4172013-10-08 15:16:00 -07002329 sk->sk_pacing_rate = ~0U;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00002330 /*
2331 * Before updating sk_refcnt, we must commit prior changes to memory
2332 * (Documentation/RCU/rculist_nulls.txt for details)
2333 */
2334 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08002336 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337}
Eric Dumazet2a915252009-05-27 11:30:05 +00002338EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002340void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341{
2342 might_sleep();
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07002343 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002344 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002346 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07002347 spin_unlock(&sk->sk_lock.slock);
2348 /*
2349 * The sk_lock has mutex_lock() semantics here:
2350 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002351 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07002352 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002354EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002356void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357{
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07002358 /*
2359 * The sk_lock has mutex_unlock() semantics:
2360 */
2361 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2362
2363 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 if (sk->sk_backlog.tail)
2365 __release_sock(sk);
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002366
Eric Dumazetc3f9b012014-03-10 09:50:11 -07002367 /* Warning : release_cb() might need to release sk ownership,
2368 * ie call sock_release_ownership(sk) before us.
2369 */
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002370 if (sk->sk_prot->release_cb)
2371 sk->sk_prot->release_cb(sk);
2372
Eric Dumazetc3f9b012014-03-10 09:50:11 -07002373 sock_release_ownership(sk);
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07002374 if (waitqueue_active(&sk->sk_lock.wq))
2375 wake_up(&sk->sk_lock.wq);
2376 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377}
2378EXPORT_SYMBOL(release_sock);
2379
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002380/**
2381 * lock_sock_fast - fast version of lock_sock
2382 * @sk: socket
2383 *
2384 * This version should be used for very small section, where process wont block
2385 * return false if fast path is taken
2386 * sk_lock.slock locked, owned = 0, BH disabled
2387 * return true if slow path is taken
2388 * sk_lock.slock unlocked, owned = 1, BH enabled
2389 */
2390bool lock_sock_fast(struct sock *sk)
2391{
2392 might_sleep();
2393 spin_lock_bh(&sk->sk_lock.slock);
2394
2395 if (!sk->sk_lock.owned)
2396 /*
2397 * Note : We must disable BH
2398 */
2399 return false;
2400
2401 __lock_sock(sk);
2402 sk->sk_lock.owned = 1;
2403 spin_unlock(&sk->sk_lock.slock);
2404 /*
2405 * The sk_lock has mutex_lock() semantics here:
2406 */
2407 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2408 local_bh_enable();
2409 return true;
2410}
2411EXPORT_SYMBOL(lock_sock_fast);
2412
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002414{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002415 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002417 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002418 tv = ktime_to_timeval(sk->sk_stamp);
2419 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002421 if (tv.tv_sec == 0) {
2422 sk->sk_stamp = ktime_get_real();
2423 tv = ktime_to_timeval(sk->sk_stamp);
2424 }
2425 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002426}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427EXPORT_SYMBOL(sock_get_timestamp);
2428
Eric Dumazetae40eb12007-03-18 17:33:16 -07002429int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2430{
2431 struct timespec ts;
2432 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002433 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002434 ts = ktime_to_timespec(sk->sk_stamp);
2435 if (ts.tv_sec == -1)
2436 return -ENOENT;
2437 if (ts.tv_sec == 0) {
2438 sk->sk_stamp = ktime_get_real();
2439 ts = ktime_to_timespec(sk->sk_stamp);
2440 }
2441 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2442}
2443EXPORT_SYMBOL(sock_get_timestampns);
2444
Patrick Ohly20d49472009-02-12 05:03:38 +00002445void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002446{
Patrick Ohly20d49472009-02-12 05:03:38 +00002447 if (!sock_flag(sk, flag)) {
Eric Dumazet08e29af2011-11-28 12:04:18 +00002448 unsigned long previous_flags = sk->sk_flags;
2449
Patrick Ohly20d49472009-02-12 05:03:38 +00002450 sock_set_flag(sk, flag);
2451 /*
2452 * we just set one of the two flags which require net
2453 * time stamping, but time stamping might have been on
2454 * already because of the other one
2455 */
Eric Dumazet08e29af2011-11-28 12:04:18 +00002456 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002457 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 }
2459}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460
Richard Cochrancb820f82013-07-19 19:40:09 +02002461int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2462 int level, int type)
2463{
2464 struct sock_exterr_skb *serr;
Willem de Bruijn364a9e92014-08-31 21:30:27 -04002465 struct sk_buff *skb;
Richard Cochrancb820f82013-07-19 19:40:09 +02002466 int copied, err;
2467
2468 err = -EAGAIN;
Willem de Bruijn364a9e92014-08-31 21:30:27 -04002469 skb = sock_dequeue_err_skb(sk);
Richard Cochrancb820f82013-07-19 19:40:09 +02002470 if (skb == NULL)
2471 goto out;
2472
2473 copied = skb->len;
2474 if (copied > len) {
2475 msg->msg_flags |= MSG_TRUNC;
2476 copied = len;
2477 }
David S. Miller51f3d022014-11-05 16:46:40 -05002478 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Richard Cochrancb820f82013-07-19 19:40:09 +02002479 if (err)
2480 goto out_free_skb;
2481
2482 sock_recv_timestamp(msg, sk, skb);
2483
2484 serr = SKB_EXT_ERR(skb);
2485 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2486
2487 msg->msg_flags |= MSG_ERRQUEUE;
2488 err = copied;
2489
Richard Cochrancb820f82013-07-19 19:40:09 +02002490out_free_skb:
2491 kfree_skb(skb);
2492out:
2493 return err;
2494}
2495EXPORT_SYMBOL(sock_recv_errqueue);
2496
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497/*
2498 * Get a socket option on an socket.
2499 *
2500 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2501 * asynchronous errors should be reported by getsockopt. We assume
2502 * this means if you specify SO_ERROR (otherwise whats the point of it).
2503 */
2504int sock_common_getsockopt(struct socket *sock, int level, int optname,
2505 char __user *optval, int __user *optlen)
2506{
2507 struct sock *sk = sock->sk;
2508
2509 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2510}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511EXPORT_SYMBOL(sock_common_getsockopt);
2512
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002513#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002514int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2515 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002516{
2517 struct sock *sk = sock->sk;
2518
Johannes Berg1e51f952007-03-06 13:44:06 -08002519 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002520 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2521 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002522 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2523}
2524EXPORT_SYMBOL(compat_sock_common_getsockopt);
2525#endif
2526
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2528 struct msghdr *msg, size_t size, int flags)
2529{
2530 struct sock *sk = sock->sk;
2531 int addr_len = 0;
2532 int err;
2533
2534 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2535 flags & ~MSG_DONTWAIT, &addr_len);
2536 if (err >= 0)
2537 msg->msg_namelen = addr_len;
2538 return err;
2539}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540EXPORT_SYMBOL(sock_common_recvmsg);
2541
2542/*
2543 * Set socket options on an inet socket.
2544 */
2545int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002546 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547{
2548 struct sock *sk = sock->sk;
2549
2550 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2551}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552EXPORT_SYMBOL(sock_common_setsockopt);
2553
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002554#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002555int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002556 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002557{
2558 struct sock *sk = sock->sk;
2559
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002560 if (sk->sk_prot->compat_setsockopt != NULL)
2561 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2562 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002563 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2564}
2565EXPORT_SYMBOL(compat_sock_common_setsockopt);
2566#endif
2567
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568void sk_common_release(struct sock *sk)
2569{
2570 if (sk->sk_prot->destroy)
2571 sk->sk_prot->destroy(sk);
2572
2573 /*
2574 * Observation: when sock_common_release is called, processes have
2575 * no access to socket. But net still has.
2576 * Step one, detach it from networking:
2577 *
2578 * A. Remove from hash tables.
2579 */
2580
2581 sk->sk_prot->unhash(sk);
2582
2583 /*
2584 * In this point socket cannot receive new packets, but it is possible
2585 * that some packets are in flight because some CPU runs receiver and
2586 * did hash table lookup before we unhashed socket. They will achieve
2587 * receive queue and will be purged by socket destructor.
2588 *
2589 * Also we still have packets pending on receive queue and probably,
2590 * our own packets waiting in device queues. sock_destroy will drain
2591 * receive queue, but transmitted packets will delay socket destruction
2592 * until the last reference will be released.
2593 */
2594
2595 sock_orphan(sk);
2596
2597 xfrm_sk_free_policy(sk);
2598
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002599 sk_refcnt_debug_release(sk);
Eric Dumazet5640f762012-09-23 23:04:42 +00002600
2601 if (sk->sk_frag.page) {
2602 put_page(sk->sk_frag.page);
2603 sk->sk_frag.page = NULL;
2604 }
2605
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 sock_put(sk);
2607}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608EXPORT_SYMBOL(sk_common_release);
2609
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002610#ifdef CONFIG_PROC_FS
2611#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002612struct prot_inuse {
2613 int val[PROTO_INUSE_NR];
2614};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002615
2616static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002617
2618#ifdef CONFIG_NET_NS
2619void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2620{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002621 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002622}
2623EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2624
2625int sock_prot_inuse_get(struct net *net, struct proto *prot)
2626{
2627 int cpu, idx = prot->inuse_idx;
2628 int res = 0;
2629
2630 for_each_possible_cpu(cpu)
2631 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2632
2633 return res >= 0 ? res : 0;
2634}
2635EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2636
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002637static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002638{
2639 net->core.inuse = alloc_percpu(struct prot_inuse);
2640 return net->core.inuse ? 0 : -ENOMEM;
2641}
2642
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002643static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002644{
2645 free_percpu(net->core.inuse);
2646}
2647
2648static struct pernet_operations net_inuse_ops = {
2649 .init = sock_inuse_init_net,
2650 .exit = sock_inuse_exit_net,
2651};
2652
2653static __init int net_inuse_init(void)
2654{
2655 if (register_pernet_subsys(&net_inuse_ops))
2656 panic("Cannot initialize net inuse counters");
2657
2658 return 0;
2659}
2660
2661core_initcall(net_inuse_init);
2662#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002663static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2664
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002665void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002666{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002667 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002668}
2669EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2670
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002671int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002672{
2673 int cpu, idx = prot->inuse_idx;
2674 int res = 0;
2675
2676 for_each_possible_cpu(cpu)
2677 res += per_cpu(prot_inuse, cpu).val[idx];
2678
2679 return res >= 0 ? res : 0;
2680}
2681EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002682#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002683
2684static void assign_proto_idx(struct proto *prot)
2685{
2686 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2687
2688 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
Joe Perchese005d192012-05-16 19:58:40 +00002689 pr_err("PROTO_INUSE_NR exhausted\n");
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002690 return;
2691 }
2692
2693 set_bit(prot->inuse_idx, proto_inuse_idx);
2694}
2695
2696static void release_proto_idx(struct proto *prot)
2697{
2698 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2699 clear_bit(prot->inuse_idx, proto_inuse_idx);
2700}
2701#else
2702static inline void assign_proto_idx(struct proto *prot)
2703{
2704}
2705
2706static inline void release_proto_idx(struct proto *prot)
2707{
2708}
2709#endif
2710
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711int proto_register(struct proto *prot, int alloc_slab)
2712{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 if (alloc_slab) {
2714 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002715 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2716 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717
2718 if (prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002719 pr_crit("%s: Can't create sock SLAB cache!\n",
2720 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002721 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002723
2724 if (prot->rsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002725 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002726 if (prot->rsk_prot->slab_name == NULL)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002727 goto out_free_sock_slab;
2728
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002729 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002730 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002731 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002732
2733 if (prot->rsk_prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002734 pr_crit("%s: Can't create request sock SLAB cache!\n",
2735 prot->name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002736 goto out_free_request_sock_slab_name;
2737 }
2738 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002739
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002740 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002741 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002742
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002743 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002744 goto out_free_request_sock_slab;
2745
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002746 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002747 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002748 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002749 0,
2750 SLAB_HWCACHE_ALIGN |
2751 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002752 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002753 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002754 goto out_free_timewait_sock_slab_name;
2755 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 }
2757
Glauber Costa36b77a52011-12-16 00:51:59 +00002758 mutex_lock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002760 assign_proto_idx(prot);
Glauber Costa36b77a52011-12-16 00:51:59 +00002761 mutex_unlock(&proto_list_mutex);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002762 return 0;
2763
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002764out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002765 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002766out_free_request_sock_slab:
2767 if (prot->rsk_prot && prot->rsk_prot->slab) {
2768 kmem_cache_destroy(prot->rsk_prot->slab);
2769 prot->rsk_prot->slab = NULL;
2770 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002771out_free_request_sock_slab_name:
Dan Carpenter72150e92010-03-06 01:04:45 +00002772 if (prot->rsk_prot)
2773 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002774out_free_sock_slab:
2775 kmem_cache_destroy(prot->slab);
2776 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002777out:
2778 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780EXPORT_SYMBOL(proto_register);
2781
2782void proto_unregister(struct proto *prot)
2783{
Glauber Costa36b77a52011-12-16 00:51:59 +00002784 mutex_lock(&proto_list_mutex);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002785 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002786 list_del(&prot->node);
Glauber Costa36b77a52011-12-16 00:51:59 +00002787 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788
2789 if (prot->slab != NULL) {
2790 kmem_cache_destroy(prot->slab);
2791 prot->slab = NULL;
2792 }
2793
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002794 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002795 kmem_cache_destroy(prot->rsk_prot->slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002796 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002797 prot->rsk_prot->slab = NULL;
2798 }
2799
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002800 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002801 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002802 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002803 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002804 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806EXPORT_SYMBOL(proto_unregister);
2807
2808#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Glauber Costa36b77a52011-12-16 00:51:59 +00002810 __acquires(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811{
Glauber Costa36b77a52011-12-16 00:51:59 +00002812 mutex_lock(&proto_list_mutex);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002813 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814}
2815
2816static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2817{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002818 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819}
2820
2821static void proto_seq_stop(struct seq_file *seq, void *v)
Glauber Costa36b77a52011-12-16 00:51:59 +00002822 __releases(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823{
Glauber Costa36b77a52011-12-16 00:51:59 +00002824 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825}
2826
2827static char proto_method_implemented(const void *method)
2828{
2829 return method == NULL ? 'n' : 'y';
2830}
Glauber Costa180d8cd2011-12-11 21:47:02 +00002831static long sock_prot_memory_allocated(struct proto *proto)
2832{
Jeffrin Josecb75a362012-04-25 19:17:29 +05302833 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002834}
2835
2836static char *sock_prot_memory_pressure(struct proto *proto)
2837{
2838 return proto->memory_pressure != NULL ?
2839 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2840}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841
2842static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2843{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002844
Eric Dumazet8d987e52010-11-09 23:24:26 +00002845 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2847 proto->name,
2848 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002849 sock_prot_inuse_get(seq_file_net(seq), proto),
Glauber Costa180d8cd2011-12-11 21:47:02 +00002850 sock_prot_memory_allocated(proto),
2851 sock_prot_memory_pressure(proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 proto->max_header,
2853 proto->slab == NULL ? "no" : "yes",
2854 module_name(proto->owner),
2855 proto_method_implemented(proto->close),
2856 proto_method_implemented(proto->connect),
2857 proto_method_implemented(proto->disconnect),
2858 proto_method_implemented(proto->accept),
2859 proto_method_implemented(proto->ioctl),
2860 proto_method_implemented(proto->init),
2861 proto_method_implemented(proto->destroy),
2862 proto_method_implemented(proto->shutdown),
2863 proto_method_implemented(proto->setsockopt),
2864 proto_method_implemented(proto->getsockopt),
2865 proto_method_implemented(proto->sendmsg),
2866 proto_method_implemented(proto->recvmsg),
2867 proto_method_implemented(proto->sendpage),
2868 proto_method_implemented(proto->bind),
2869 proto_method_implemented(proto->backlog_rcv),
2870 proto_method_implemented(proto->hash),
2871 proto_method_implemented(proto->unhash),
2872 proto_method_implemented(proto->get_port),
2873 proto_method_implemented(proto->enter_memory_pressure));
2874}
2875
2876static int proto_seq_show(struct seq_file *seq, void *v)
2877{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002878 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2880 "protocol",
2881 "size",
2882 "sockets",
2883 "memory",
2884 "press",
2885 "maxhdr",
2886 "slab",
2887 "module",
2888 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2889 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002890 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891 return 0;
2892}
2893
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002894static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 .start = proto_seq_start,
2896 .next = proto_seq_next,
2897 .stop = proto_seq_stop,
2898 .show = proto_seq_show,
2899};
2900
2901static int proto_seq_open(struct inode *inode, struct file *file)
2902{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002903 return seq_open_net(inode, file, &proto_seq_ops,
2904 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905}
2906
Arjan van de Ven9a321442007-02-12 00:55:35 -08002907static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 .owner = THIS_MODULE,
2909 .open = proto_seq_open,
2910 .read = seq_read,
2911 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002912 .release = seq_release_net,
2913};
2914
2915static __net_init int proto_init_net(struct net *net)
2916{
Gao fengd4beaa62013-02-18 01:34:54 +00002917 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
Eric Dumazet14e943d2008-11-19 15:14:01 -08002918 return -ENOMEM;
2919
2920 return 0;
2921}
2922
2923static __net_exit void proto_exit_net(struct net *net)
2924{
Gao fengece31ff2013-02-18 01:34:56 +00002925 remove_proc_entry("protocols", net->proc_net);
Eric Dumazet14e943d2008-11-19 15:14:01 -08002926}
2927
2928
2929static __net_initdata struct pernet_operations proto_net_ops = {
2930 .init = proto_init_net,
2931 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932};
2933
2934static int __init proto_init(void)
2935{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002936 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937}
2938
2939subsys_initcall(proto_init);
2940
2941#endif /* PROC_FS */