blob: c71b645a78f0c58204a7686a3f9394b91e6b0364 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
10 * Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $
11 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070012 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Alan Cox, <A.Cox@swansea.ac.uk>
16 *
17 * Fixes:
18 * Alan Cox : Numerous verify_area() problems
19 * Alan Cox : Connecting on a connecting socket
20 * now returns an error for tcp.
21 * Alan Cox : sock->protocol is set correctly.
22 * and is not sometimes left as 0.
23 * Alan Cox : connect handles icmp errors on a
24 * connect properly. Unfortunately there
25 * is a restart syscall nasty there. I
26 * can't match BSD without hacking the C
27 * library. Ideas urgently sought!
28 * Alan Cox : Disallow bind() to addresses that are
29 * not ours - especially broadcast ones!!
30 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
31 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
32 * instead they leave that for the DESTROY timer.
33 * Alan Cox : Clean up error flag in accept
34 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
35 * was buggy. Put a remove_sock() in the handler
36 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090037 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 * TCP layer surgery.
39 * Alan Cox : Fixed TCP ack bug, removed remove sock
40 * and fixed timer/inet_bh race.
41 * Alan Cox : Added zapped flag for TCP
42 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
43 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
44 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
45 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
46 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
47 * Rick Sladkey : Relaxed UDP rules for matching packets.
48 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
49 * Pauline Middelink : identd support
50 * Alan Cox : Fixed connect() taking signals I think.
51 * Alan Cox : SO_LINGER supported
52 * Alan Cox : Error reporting fixes
53 * Anonymous : inet_create tidied up (sk->reuse setting)
54 * Alan Cox : inet sockets don't set sk->type!
55 * Alan Cox : Split socket option code
56 * Alan Cox : Callbacks
57 * Alan Cox : Nagle flag for Charles & Johannes stuff
58 * Alex : Removed restriction on inet fioctl
59 * Alan Cox : Splitting INET from NET core
60 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
61 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
62 * Alan Cox : Split IP from generic code
63 * Alan Cox : New kfree_skbmem()
64 * Alan Cox : Make SO_DEBUG superuser only.
65 * Alan Cox : Allow anyone to clear SO_DEBUG
66 * (compatibility fix)
67 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
68 * Alan Cox : Allocator for a socket is settable.
69 * Alan Cox : SO_ERROR includes soft errors.
70 * Alan Cox : Allow NULL arguments on some SO_ opts
71 * Alan Cox : Generic socket allocation to make hooks
72 * easier (suggested by Craig Metz).
73 * Michael Pall : SO_ERROR returns positive errno again
74 * Steve Whitehouse: Added default destructor to free
75 * protocol private data.
76 * Steve Whitehouse: Added various other default routines
77 * common to several socket families.
78 * Chris Evans : Call suser() check last on F_SETOWN
79 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
80 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
81 * Andi Kleen : Fix write_space callback
82 * Chris Evans : Security fixes - signedness again
83 * Arnaldo C. Melo : cleanups, use skb_queue_purge
84 *
85 * To Fix:
86 *
87 *
88 * This program is free software; you can redistribute it and/or
89 * modify it under the terms of the GNU General Public License
90 * as published by the Free Software Foundation; either version
91 * 2 of the License, or (at your option) any later version.
92 */
93
Randy Dunlap4fc268d2006-01-11 12:17:47 -080094#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/errno.h>
96#include <linux/types.h>
97#include <linux/socket.h>
98#include <linux/in.h>
99#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/module.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/sched.h>
104#include <linux/timer.h>
105#include <linux/string.h>
106#include <linux/sockios.h>
107#include <linux/net.h>
108#include <linux/mm.h>
109#include <linux/slab.h>
110#include <linux/interrupt.h>
111#include <linux/poll.h>
112#include <linux/tcp.h>
113#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400114#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116#include <asm/uaccess.h>
117#include <asm/system.h>
118
119#include <linux/netdevice.h>
120#include <net/protocol.h>
121#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200122#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700123#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124#include <net/sock.h>
125#include <net/xfrm.h>
126#include <linux/ipsec.h>
127
128#include <linux/filter.h>
129
130#ifdef CONFIG_INET
131#include <net/tcp.h>
132#endif
133
Ingo Molnarda21f242006-07-03 00:25:12 -0700134/*
135 * Each address family might have different locking rules, so we have
136 * one slock key per address family:
137 */
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700138static struct lock_class_key af_family_keys[AF_MAX];
139static struct lock_class_key af_family_slock_keys[AF_MAX];
140
141#ifdef CONFIG_DEBUG_LOCK_ALLOC
142/*
143 * Make lock validator output more readable. (we pre-construct these
144 * strings build-time, so that runtime initialization of socket
145 * locks is fast):
146 */
147static const char *af_family_key_strings[AF_MAX+1] = {
148 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
149 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
150 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
151 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
152 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
153 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
154 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
155 "sk_lock-21" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
156 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800157 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700158 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
159 "sk_lock-AF_RXRPC" , "sk_lock-AF_MAX"
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700160};
161static const char *af_family_slock_key_strings[AF_MAX+1] = {
162 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
163 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
164 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
165 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
166 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
167 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
168 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
169 "slock-21" , "slock-AF_SNA" , "slock-AF_IRDA" ,
170 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800171 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700172 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
173 "slock-AF_RXRPC" , "slock-AF_MAX"
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700174};
Peter Zijlstra443aef0e2007-07-19 01:49:00 -0700175static const char *af_family_clock_key_strings[AF_MAX+1] = {
176 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
177 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
178 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
179 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
180 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
181 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
182 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
183 "clock-21" , "clock-AF_SNA" , "clock-AF_IRDA" ,
184 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
185 "clock-27" , "clock-28" , "clock-29" ,
David Howellse51f8022007-07-21 19:30:16 -0700186 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
187 "clock-AF_RXRPC" , "clock-AF_MAX"
Peter Zijlstra443aef0e2007-07-19 01:49:00 -0700188};
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700189#endif
Ingo Molnarda21f242006-07-03 00:25:12 -0700190
191/*
192 * sk_callback_lock locking rules are per-address-family,
193 * so split the lock classes by using a per-AF key:
194 */
195static struct lock_class_key af_callback_keys[AF_MAX];
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197/* Take into consideration the size of the struct sk_buff overhead in the
198 * determination of these values, since that is non-constant across
199 * platforms. This makes socket queueing behavior and performance
200 * not depend upon such differences.
201 */
202#define _SK_MEM_PACKETS 256
203#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
204#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
205#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
206
207/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700208__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
209__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
210__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
211__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
213/* Maximal space eaten by iovec or ancilliary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700214int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
216static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
217{
218 struct timeval tv;
219
220 if (optlen < sizeof(tv))
221 return -EINVAL;
222 if (copy_from_user(&tv, optval, sizeof(tv)))
223 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700224 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
225 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
Vasily Averinba780732007-05-24 16:58:54 -0700227 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700228 static int warned __read_mostly;
229
Vasily Averinba780732007-05-24 16:58:54 -0700230 *timeo_p = 0;
231 if (warned < 10 && net_ratelimit())
232 warned++;
233 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
234 "tries to set negative timeout\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -0700235 current->comm, task_pid_nr(current));
Vasily Averinba780732007-05-24 16:58:54 -0700236 return 0;
237 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 *timeo_p = MAX_SCHEDULE_TIMEOUT;
239 if (tv.tv_sec == 0 && tv.tv_usec == 0)
240 return 0;
241 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
242 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
243 return 0;
244}
245
246static void sock_warn_obsolete_bsdism(const char *name)
247{
248 static int warned;
249 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900250 if (strcmp(warncomm, current->comm) && warned < 5) {
251 strcpy(warncomm, current->comm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 printk(KERN_WARNING "process `%s' is using obsolete "
253 "%s SO_BSDCOMPAT\n", warncomm, name);
254 warned++;
255 }
256}
257
258static void sock_disable_timestamp(struct sock *sk)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900259{
260 if (sock_flag(sk, SOCK_TIMESTAMP)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 sock_reset_flag(sk, SOCK_TIMESTAMP);
262 net_disable_timestamp();
263 }
264}
265
266
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800267int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
268{
269 int err = 0;
270 int skb_len;
271
272 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
273 number of warnings when compiling with -W --ANK
274 */
275 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
276 (unsigned)sk->sk_rcvbuf) {
277 err = -ENOMEM;
278 goto out;
279 }
280
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700281 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800282 if (err)
283 goto out;
284
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800285 if (!sk_rmem_schedule(sk, skb->truesize)) {
286 err = -ENOBUFS;
287 goto out;
288 }
289
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800290 skb->dev = NULL;
291 skb_set_owner_r(skb, sk);
292
293 /* Cache the SKB length before we tack it onto the receive
294 * queue. Once it is added it no longer belongs to us and
295 * may be freed by other threads of control pulling packets
296 * from the queue.
297 */
298 skb_len = skb->len;
299
300 skb_queue_tail(&sk->sk_receive_queue, skb);
301
302 if (!sock_flag(sk, SOCK_DEAD))
303 sk->sk_data_ready(sk, skb_len);
304out:
305 return err;
306}
307EXPORT_SYMBOL(sock_queue_rcv_skb);
308
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200309int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800310{
311 int rc = NET_RX_SUCCESS;
312
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700313 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800314 goto discard_and_relse;
315
316 skb->dev = NULL;
317
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200318 if (nested)
319 bh_lock_sock_nested(sk);
320 else
321 bh_lock_sock(sk);
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700322 if (!sock_owned_by_user(sk)) {
323 /*
324 * trylock + unlock semantics:
325 */
326 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
327
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800328 rc = sk->sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700329
330 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
331 } else
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800332 sk_add_backlog(sk, skb);
333 bh_unlock_sock(sk);
334out:
335 sock_put(sk);
336 return rc;
337discard_and_relse:
338 kfree_skb(skb);
339 goto out;
340}
341EXPORT_SYMBOL(sk_receive_skb);
342
343struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
344{
345 struct dst_entry *dst = sk->sk_dst_cache;
346
347 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
348 sk->sk_dst_cache = NULL;
349 dst_release(dst);
350 return NULL;
351 }
352
353 return dst;
354}
355EXPORT_SYMBOL(__sk_dst_check);
356
357struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
358{
359 struct dst_entry *dst = sk_dst_get(sk);
360
361 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
362 sk_dst_reset(sk);
363 dst_release(dst);
364 return NULL;
365 }
366
367 return dst;
368}
369EXPORT_SYMBOL(sk_dst_check);
370
David S. Miller48788092007-09-14 16:41:03 -0700371static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
372{
373 int ret = -ENOPROTOOPT;
374#ifdef CONFIG_NETDEVICES
Eric W. Biederman881d9662007-09-17 11:56:21 -0700375 struct net *net = sk->sk_net;
David S. Miller48788092007-09-14 16:41:03 -0700376 char devname[IFNAMSIZ];
377 int index;
378
379 /* Sorry... */
380 ret = -EPERM;
381 if (!capable(CAP_NET_RAW))
382 goto out;
383
384 ret = -EINVAL;
385 if (optlen < 0)
386 goto out;
387
388 /* Bind this socket to a particular device like "eth0",
389 * as specified in the passed interface name. If the
390 * name is "" or the option length is zero the socket
391 * is not bound.
392 */
393 if (optlen > IFNAMSIZ - 1)
394 optlen = IFNAMSIZ - 1;
395 memset(devname, 0, sizeof(devname));
396
397 ret = -EFAULT;
398 if (copy_from_user(devname, optval, optlen))
399 goto out;
400
401 if (devname[0] == '\0') {
402 index = 0;
403 } else {
Eric W. Biederman881d9662007-09-17 11:56:21 -0700404 struct net_device *dev = dev_get_by_name(net, devname);
David S. Miller48788092007-09-14 16:41:03 -0700405
406 ret = -ENODEV;
407 if (!dev)
408 goto out;
409
410 index = dev->ifindex;
411 dev_put(dev);
412 }
413
414 lock_sock(sk);
415 sk->sk_bound_dev_if = index;
416 sk_dst_reset(sk);
417 release_sock(sk);
418
419 ret = 0;
420
421out:
422#endif
423
424 return ret;
425}
426
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800427static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
428{
429 if (valbool)
430 sock_set_flag(sk, bit);
431 else
432 sock_reset_flag(sk, bit);
433}
434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435/*
436 * This is meant for all protocols to use and covers goings on
437 * at the socket level. Everything here is generic.
438 */
439
440int sock_setsockopt(struct socket *sock, int level, int optname,
441 char __user *optval, int optlen)
442{
443 struct sock *sk=sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 int val;
445 int valbool;
446 struct linger ling;
447 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900448
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 /*
450 * Options without arguments
451 */
452
453#ifdef SO_DONTLINGER /* Compatibility item... */
Kyle Moffetta77be812005-07-27 14:22:30 -0700454 if (optname == SO_DONTLINGER) {
455 lock_sock(sk);
456 sock_reset_flag(sk, SOCK_LINGER);
457 release_sock(sk);
458 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 }
Kyle Moffetta77be812005-07-27 14:22:30 -0700460#endif
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900461
David S. Miller48788092007-09-14 16:41:03 -0700462 if (optname == SO_BINDTODEVICE)
463 return sock_bindtodevice(sk, optval, optlen);
464
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700465 if (optlen < sizeof(int))
466 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900467
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 if (get_user(val, (int __user *)optval))
469 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900470
471 valbool = val?1:0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
473 lock_sock(sk);
474
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700475 switch(optname) {
476 case SO_DEBUG:
477 if (val && !capable(CAP_NET_ADMIN)) {
478 ret = -EACCES;
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800479 } else
480 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700481 break;
482 case SO_REUSEADDR:
483 sk->sk_reuse = valbool;
484 break;
485 case SO_TYPE:
486 case SO_ERROR:
487 ret = -ENOPROTOOPT;
488 break;
489 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800490 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700491 break;
492 case SO_BROADCAST:
493 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
494 break;
495 case SO_SNDBUF:
496 /* Don't error on this BSD doesn't and if you think
497 about it this is right. Otherwise apps have to
498 play 'guess the biggest size' games. RCVBUF/SNDBUF
499 are treated in BSD as hints */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900500
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700501 if (val > sysctl_wmem_max)
502 val = sysctl_wmem_max;
Patrick McHardyb0573de2005-08-09 19:30:51 -0700503set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700504 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
505 if ((val * 2) < SOCK_MIN_SNDBUF)
506 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
507 else
508 sk->sk_sndbuf = val * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700510 /*
511 * Wake up sending tasks if we
512 * upped the value.
513 */
514 sk->sk_write_space(sk);
515 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700517 case SO_SNDBUFFORCE:
518 if (!capable(CAP_NET_ADMIN)) {
519 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 break;
521 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700522 goto set_sndbuf;
523
524 case SO_RCVBUF:
525 /* Don't error on this BSD doesn't and if you think
526 about it this is right. Otherwise apps have to
527 play 'guess the biggest size' games. RCVBUF/SNDBUF
528 are treated in BSD as hints */
529
530 if (val > sysctl_rmem_max)
531 val = sysctl_rmem_max;
532set_rcvbuf:
533 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
534 /*
535 * We double it on the way in to account for
536 * "struct sk_buff" etc. overhead. Applications
537 * assume that the SO_RCVBUF setting they make will
538 * allow that much actual data to be received on that
539 * socket.
540 *
541 * Applications are unaware that "struct sk_buff" and
542 * other overheads allocate from the receive buffer
543 * during socket buffer allocation.
544 *
545 * And after considering the possible alternatives,
546 * returning the value we actually used in getsockopt
547 * is the most desirable behavior.
548 */
549 if ((val * 2) < SOCK_MIN_RCVBUF)
550 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
551 else
552 sk->sk_rcvbuf = val * 2;
553 break;
554
555 case SO_RCVBUFFORCE:
556 if (!capable(CAP_NET_ADMIN)) {
557 ret = -EPERM;
558 break;
559 }
560 goto set_rcvbuf;
561
562 case SO_KEEPALIVE:
563#ifdef CONFIG_INET
564 if (sk->sk_protocol == IPPROTO_TCP)
565 tcp_set_keepalive(sk, valbool);
566#endif
567 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
568 break;
569
570 case SO_OOBINLINE:
571 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
572 break;
573
574 case SO_NO_CHECK:
575 sk->sk_no_check = valbool;
576 break;
577
578 case SO_PRIORITY:
579 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
580 sk->sk_priority = val;
581 else
582 ret = -EPERM;
583 break;
584
585 case SO_LINGER:
586 if (optlen < sizeof(ling)) {
587 ret = -EINVAL; /* 1003.1g */
588 break;
589 }
590 if (copy_from_user(&ling,optval,sizeof(ling))) {
591 ret = -EFAULT;
592 break;
593 }
594 if (!ling.l_onoff)
595 sock_reset_flag(sk, SOCK_LINGER);
596 else {
597#if (BITS_PER_LONG == 32)
598 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
599 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
600 else
601#endif
602 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
603 sock_set_flag(sk, SOCK_LINGER);
604 }
605 break;
606
607 case SO_BSDCOMPAT:
608 sock_warn_obsolete_bsdism("setsockopt");
609 break;
610
611 case SO_PASSCRED:
612 if (valbool)
613 set_bit(SOCK_PASSCRED, &sock->flags);
614 else
615 clear_bit(SOCK_PASSCRED, &sock->flags);
616 break;
617
618 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700619 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700620 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700621 if (optname == SO_TIMESTAMP)
622 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
623 else
624 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700625 sock_set_flag(sk, SOCK_RCVTSTAMP);
626 sock_enable_timestamp(sk);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700627 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700628 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700629 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
630 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700631 break;
632
633 case SO_RCVLOWAT:
634 if (val < 0)
635 val = INT_MAX;
636 sk->sk_rcvlowat = val ? : 1;
637 break;
638
639 case SO_RCVTIMEO:
640 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
641 break;
642
643 case SO_SNDTIMEO:
644 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
645 break;
646
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700647 case SO_ATTACH_FILTER:
648 ret = -EINVAL;
649 if (optlen == sizeof(struct sock_fprog)) {
650 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700652 ret = -EFAULT;
653 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700656 ret = sk_attach_filter(&fprog, sk);
657 }
658 break;
659
660 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700661 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700662 break;
663
664 case SO_PASSSEC:
665 if (valbool)
666 set_bit(SOCK_PASSSEC, &sock->flags);
667 else
668 clear_bit(SOCK_PASSSEC, &sock->flags);
669 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800670 case SO_MARK:
671 if (!capable(CAP_NET_ADMIN))
672 ret = -EPERM;
673 else {
674 sk->sk_mark = val;
675 }
676 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 /* We implement the SO_SNDLOWAT etc to
679 not be settable (1003.1g 5.3) */
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700680 default:
681 ret = -ENOPROTOOPT;
682 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900683 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 release_sock(sk);
685 return ret;
686}
687
688
689int sock_getsockopt(struct socket *sock, int level, int optname,
690 char __user *optval, int __user *optlen)
691{
692 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900693
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700694 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900695 int val;
696 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 struct timeval tm;
698 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900699
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 unsigned int lv = sizeof(int);
701 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900702
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700703 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900704 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700705 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900707
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700708 switch(optname) {
709 case SO_DEBUG:
710 v.val = sock_flag(sk, SOCK_DBG);
711 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900712
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700713 case SO_DONTROUTE:
714 v.val = sock_flag(sk, SOCK_LOCALROUTE);
715 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900716
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700717 case SO_BROADCAST:
718 v.val = !!sock_flag(sk, SOCK_BROADCAST);
719 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700721 case SO_SNDBUF:
722 v.val = sk->sk_sndbuf;
723 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900724
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700725 case SO_RCVBUF:
726 v.val = sk->sk_rcvbuf;
727 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700729 case SO_REUSEADDR:
730 v.val = sk->sk_reuse;
731 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700733 case SO_KEEPALIVE:
734 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
735 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700737 case SO_TYPE:
738 v.val = sk->sk_type;
739 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700741 case SO_ERROR:
742 v.val = -sock_error(sk);
743 if (v.val==0)
744 v.val = xchg(&sk->sk_err_soft, 0);
745 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700747 case SO_OOBINLINE:
748 v.val = !!sock_flag(sk, SOCK_URGINLINE);
749 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900750
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700751 case SO_NO_CHECK:
752 v.val = sk->sk_no_check;
753 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700755 case SO_PRIORITY:
756 v.val = sk->sk_priority;
757 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900758
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700759 case SO_LINGER:
760 lv = sizeof(v.ling);
761 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
762 v.ling.l_linger = sk->sk_lingertime / HZ;
763 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900764
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700765 case SO_BSDCOMPAT:
766 sock_warn_obsolete_bsdism("getsockopt");
767 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700769 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700770 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
771 !sock_flag(sk, SOCK_RCVTSTAMPNS);
772 break;
773
774 case SO_TIMESTAMPNS:
775 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700776 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700778 case SO_RCVTIMEO:
779 lv=sizeof(struct timeval);
780 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
781 v.tm.tv_sec = 0;
782 v.tm.tv_usec = 0;
783 } else {
784 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
785 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700787 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700789 case SO_SNDTIMEO:
790 lv=sizeof(struct timeval);
791 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
792 v.tm.tv_sec = 0;
793 v.tm.tv_usec = 0;
794 } else {
795 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
796 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
797 }
798 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700800 case SO_RCVLOWAT:
801 v.val = sk->sk_rcvlowat;
802 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700803
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700804 case SO_SNDLOWAT:
805 v.val=1;
806 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700808 case SO_PASSCRED:
809 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
810 break;
811
812 case SO_PEERCRED:
813 if (len > sizeof(sk->sk_peercred))
814 len = sizeof(sk->sk_peercred);
815 if (copy_to_user(optval, &sk->sk_peercred, len))
816 return -EFAULT;
817 goto lenout;
818
819 case SO_PEERNAME:
820 {
821 char address[128];
822
823 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
824 return -ENOTCONN;
825 if (lv < len)
826 return -EINVAL;
827 if (copy_to_user(optval, address, len))
828 return -EFAULT;
829 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700831
832 /* Dubious BSD thing... Probably nobody even uses it, but
833 * the UNIX standard wants it for whatever reason... -DaveM
834 */
835 case SO_ACCEPTCONN:
836 v.val = sk->sk_state == TCP_LISTEN;
837 break;
838
839 case SO_PASSSEC:
840 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
841 break;
842
843 case SO_PEERSEC:
844 return security_socket_getpeersec_stream(sock, optval, optlen, len);
845
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800846 case SO_MARK:
847 v.val = sk->sk_mark;
848 break;
849
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700850 default:
851 return -ENOPROTOOPT;
852 }
853
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 if (len > lv)
855 len = lv;
856 if (copy_to_user(optval, &v, len))
857 return -EFAULT;
858lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900859 if (put_user(len, optlen))
860 return -EFAULT;
861 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862}
863
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700864/*
865 * Initialize an sk_lock.
866 *
867 * (We also register the sk_lock with the lock validator.)
868 */
Dave Jonesb6f99a22007-03-22 12:27:49 -0700869static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700870{
Peter Zijlstraed075362006-12-06 20:35:24 -0800871 sock_lock_init_class_and_name(sk,
872 af_family_slock_key_strings[sk->sk_family],
873 af_family_slock_keys + sk->sk_family,
874 af_family_key_strings[sk->sk_family],
875 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -0700876}
877
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -0700878static void sock_copy(struct sock *nsk, const struct sock *osk)
879{
880#ifdef CONFIG_SECURITY_NETWORK
881 void *sptr = nsk->sk_security;
882#endif
883
884 memcpy(nsk, osk, osk->sk_prot->obj_size);
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -0700885#ifdef CONFIG_SECURITY_NETWORK
886 nsk->sk_security = sptr;
887 security_sk_clone(osk, nsk);
888#endif
889}
890
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -0700891static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
892 int family)
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -0700893{
894 struct sock *sk;
895 struct kmem_cache *slab;
896
897 slab = prot->slab;
898 if (slab != NULL)
899 sk = kmem_cache_alloc(slab, priority);
900 else
901 sk = kmalloc(prot->obj_size, priority);
902
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -0700903 if (sk != NULL) {
904 if (security_sk_alloc(sk, family, priority))
905 goto out_free;
906
907 if (!try_module_get(prot->owner))
908 goto out_free_sec;
909 }
910
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -0700911 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -0700912
913out_free_sec:
914 security_sk_free(sk);
915out_free:
916 if (slab != NULL)
917 kmem_cache_free(slab, sk);
918 else
919 kfree(sk);
920 return NULL;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -0700921}
922
923static void sk_prot_free(struct proto *prot, struct sock *sk)
924{
925 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -0700926 struct module *owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -0700927
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -0700928 owner = prot->owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -0700929 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -0700930
931 security_sk_free(sk);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -0700932 if (slab != NULL)
933 kmem_cache_free(slab, sk);
934 else
935 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -0700936 module_put(owner);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -0700937}
938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939/**
940 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700941 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -0700942 * @family: protocol family
943 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
944 * @prot: struct proto associated with this new sock instance
945 * @zero_it: if we should zero the newly allocated sock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -0700947struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -0700948 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949{
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -0700950 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951
Pavel Emelyanov154adbc2007-11-01 00:38:43 -0700952 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -0700954 sk->sk_family = family;
955 /*
956 * See comment in struct sock definition to understand
957 * why we need sk_prot_creator -acme
958 */
959 sk->sk_prot = sk->sk_prot_creator = prot;
960 sock_lock_init(sk);
961 sk->sk_net = get_net(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 }
Frank Filza79af592005-09-27 15:23:38 -0700963
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -0700964 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965}
966
967void sk_free(struct sock *sk)
968{
969 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
971 if (sk->sk_destruct)
972 sk->sk_destruct(sk);
973
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700974 filter = rcu_dereference(sk->sk_filter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -0700976 sk_filter_uncharge(sk, filter);
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700977 rcu_assign_pointer(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 }
979
980 sock_disable_timestamp(sk);
981
982 if (atomic_read(&sk->sk_omem_alloc))
983 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
984 __FUNCTION__, atomic_read(&sk->sk_omem_alloc));
985
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -0700986 put_net(sk->sk_net);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -0700987 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988}
989
Denis V. Lunevedf02082008-02-29 11:18:32 -0800990/*
991 * Last sock_put should drop referrence to sk->sk_net. It has already
992 * been dropped in sk_change_net. Taking referrence to stopping namespace
993 * is not an option.
994 * Take referrence to a socket to remove it from hash _alive_ and after that
995 * destroy it in the context of init_net.
996 */
997void sk_release_kernel(struct sock *sk)
998{
999 if (sk == NULL || sk->sk_socket == NULL)
1000 return;
1001
1002 sock_hold(sk);
1003 sock_release(sk->sk_socket);
1004 sk->sk_net = get_net(&init_net);
1005 sock_put(sk);
1006}
1007
Al Virodd0fc662005-10-07 07:46:04 +01001008struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001009{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001010 struct sock *newsk;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001011
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001012 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001013 if (newsk != NULL) {
1014 struct sk_filter *filter;
1015
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001016 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001017
1018 /* SANITY */
Pavel Emelyanov1e2e6b82007-11-01 00:31:26 -07001019 get_net(newsk->sk_net);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001020 sk_node_init(&newsk->sk_node);
1021 sock_lock_init(newsk);
1022 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001023 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001024
1025 atomic_set(&newsk->sk_rmem_alloc, 0);
1026 atomic_set(&newsk->sk_wmem_alloc, 0);
1027 atomic_set(&newsk->sk_omem_alloc, 0);
1028 skb_queue_head_init(&newsk->sk_receive_queue);
1029 skb_queue_head_init(&newsk->sk_write_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001030#ifdef CONFIG_NET_DMA
1031 skb_queue_head_init(&newsk->sk_async_wait_queue);
1032#endif
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001033
1034 rwlock_init(&newsk->sk_dst_lock);
1035 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef0e2007-07-19 01:49:00 -07001036 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1037 af_callback_keys + newsk->sk_family,
1038 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001039
1040 newsk->sk_dst_cache = NULL;
1041 newsk->sk_wmem_queued = 0;
1042 newsk->sk_forward_alloc = 0;
1043 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001044 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1045
1046 sock_reset_flag(newsk, SOCK_DONE);
1047 skb_queue_head_init(&newsk->sk_error_queue);
1048
1049 filter = newsk->sk_filter;
1050 if (filter != NULL)
1051 sk_filter_charge(newsk, filter);
1052
1053 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1054 /* It is still raw copy of parent, so invalidate
1055 * destructor and make plain sk_free() */
1056 newsk->sk_destruct = NULL;
1057 sk_free(newsk);
1058 newsk = NULL;
1059 goto out;
1060 }
1061
1062 newsk->sk_err = 0;
1063 newsk->sk_priority = 0;
1064 atomic_set(&newsk->sk_refcnt, 2);
1065
1066 /*
1067 * Increment the counter in the same struct proto as the master
1068 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1069 * is the same as sk->sk_prot->socks, as this field was copied
1070 * with memcpy).
1071 *
1072 * This _changes_ the previous behaviour, where
1073 * tcp_create_openreq_child always was incrementing the
1074 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1075 * to be taken into account in all callers. -acme
1076 */
1077 sk_refcnt_debug_inc(newsk);
1078 newsk->sk_socket = NULL;
1079 newsk->sk_sleep = NULL;
1080
1081 if (newsk->sk_prot->sockets_allocated)
1082 atomic_inc(newsk->sk_prot->sockets_allocated);
1083 }
1084out:
1085 return newsk;
1086}
1087
1088EXPORT_SYMBOL_GPL(sk_clone);
1089
Andi Kleen99580892007-04-20 17:12:43 -07001090void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1091{
1092 __sk_dst_set(sk, dst);
1093 sk->sk_route_caps = dst->dev->features;
1094 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001095 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Andi Kleen99580892007-04-20 17:12:43 -07001096 if (sk_can_gso(sk)) {
1097 if (dst->header_len)
1098 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1099 else
1100 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1101 }
1102}
1103EXPORT_SYMBOL_GPL(sk_setup_caps);
1104
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105void __init sk_init(void)
1106{
1107 if (num_physpages <= 4096) {
1108 sysctl_wmem_max = 32767;
1109 sysctl_rmem_max = 32767;
1110 sysctl_wmem_default = 32767;
1111 sysctl_rmem_default = 32767;
1112 } else if (num_physpages >= 131072) {
1113 sysctl_wmem_max = 131071;
1114 sysctl_rmem_max = 131071;
1115 }
1116}
1117
1118/*
1119 * Simple resource managers for sockets.
1120 */
1121
1122
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001123/*
1124 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 */
1126void sock_wfree(struct sk_buff *skb)
1127{
1128 struct sock *sk = skb->sk;
1129
1130 /* In case it might be waiting for more memory. */
1131 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
1132 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
1133 sk->sk_write_space(sk);
1134 sock_put(sk);
1135}
1136
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001137/*
1138 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 */
1140void sock_rfree(struct sk_buff *skb)
1141{
1142 struct sock *sk = skb->sk;
1143
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001144 skb_truesize_check(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001146 sk_mem_uncharge(skb->sk, skb->truesize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147}
1148
1149
1150int sock_i_uid(struct sock *sk)
1151{
1152 int uid;
1153
1154 read_lock(&sk->sk_callback_lock);
1155 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1156 read_unlock(&sk->sk_callback_lock);
1157 return uid;
1158}
1159
1160unsigned long sock_i_ino(struct sock *sk)
1161{
1162 unsigned long ino;
1163
1164 read_lock(&sk->sk_callback_lock);
1165 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1166 read_unlock(&sk->sk_callback_lock);
1167 return ino;
1168}
1169
1170/*
1171 * Allocate a skb from the socket's send buffer.
1172 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001173struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001174 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175{
1176 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1177 struct sk_buff * skb = alloc_skb(size, priority);
1178 if (skb) {
1179 skb_set_owner_w(skb, sk);
1180 return skb;
1181 }
1182 }
1183 return NULL;
1184}
1185
1186/*
1187 * Allocate a skb from the socket's receive buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001188 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001189struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001190 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191{
1192 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1193 struct sk_buff *skb = alloc_skb(size, priority);
1194 if (skb) {
1195 skb_set_owner_r(skb, sk);
1196 return skb;
1197 }
1198 }
1199 return NULL;
1200}
1201
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001202/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001204 */
Al Virodd0fc662005-10-07 07:46:04 +01001205void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206{
1207 if ((unsigned)size <= sysctl_optmem_max &&
1208 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1209 void *mem;
1210 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001211 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 */
1213 atomic_add(size, &sk->sk_omem_alloc);
1214 mem = kmalloc(size, priority);
1215 if (mem)
1216 return mem;
1217 atomic_sub(size, &sk->sk_omem_alloc);
1218 }
1219 return NULL;
1220}
1221
1222/*
1223 * Free an option memory block.
1224 */
1225void sock_kfree_s(struct sock *sk, void *mem, int size)
1226{
1227 kfree(mem);
1228 atomic_sub(size, &sk->sk_omem_alloc);
1229}
1230
1231/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1232 I think, these locks should be removed for datagram sockets.
1233 */
1234static long sock_wait_for_wmem(struct sock * sk, long timeo)
1235{
1236 DEFINE_WAIT(wait);
1237
1238 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1239 for (;;) {
1240 if (!timeo)
1241 break;
1242 if (signal_pending(current))
1243 break;
1244 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1245 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1246 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1247 break;
1248 if (sk->sk_shutdown & SEND_SHUTDOWN)
1249 break;
1250 if (sk->sk_err)
1251 break;
1252 timeo = schedule_timeout(timeo);
1253 }
1254 finish_wait(sk->sk_sleep, &wait);
1255 return timeo;
1256}
1257
1258
1259/*
1260 * Generic send/receive buffer handlers
1261 */
1262
1263static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
1264 unsigned long header_len,
1265 unsigned long data_len,
1266 int noblock, int *errcode)
1267{
1268 struct sk_buff *skb;
Al Viro7d877f32005-10-21 03:20:43 -04001269 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 long timeo;
1271 int err;
1272
1273 gfp_mask = sk->sk_allocation;
1274 if (gfp_mask & __GFP_WAIT)
1275 gfp_mask |= __GFP_REPEAT;
1276
1277 timeo = sock_sndtimeo(sk, noblock);
1278 while (1) {
1279 err = sock_error(sk);
1280 if (err != 0)
1281 goto failure;
1282
1283 err = -EPIPE;
1284 if (sk->sk_shutdown & SEND_SHUTDOWN)
1285 goto failure;
1286
1287 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Larry Woodmandb38c1792006-11-03 16:05:45 -08001288 skb = alloc_skb(header_len, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 if (skb) {
1290 int npages;
1291 int i;
1292
1293 /* No pages, we're done... */
1294 if (!data_len)
1295 break;
1296
1297 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1298 skb->truesize += data_len;
1299 skb_shinfo(skb)->nr_frags = npages;
1300 for (i = 0; i < npages; i++) {
1301 struct page *page;
1302 skb_frag_t *frag;
1303
1304 page = alloc_pages(sk->sk_allocation, 0);
1305 if (!page) {
1306 err = -ENOBUFS;
1307 skb_shinfo(skb)->nr_frags = i;
1308 kfree_skb(skb);
1309 goto failure;
1310 }
1311
1312 frag = &skb_shinfo(skb)->frags[i];
1313 frag->page = page;
1314 frag->page_offset = 0;
1315 frag->size = (data_len >= PAGE_SIZE ?
1316 PAGE_SIZE :
1317 data_len);
1318 data_len -= PAGE_SIZE;
1319 }
1320
1321 /* Full success... */
1322 break;
1323 }
1324 err = -ENOBUFS;
1325 goto failure;
1326 }
1327 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1328 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1329 err = -EAGAIN;
1330 if (!timeo)
1331 goto failure;
1332 if (signal_pending(current))
1333 goto interrupted;
1334 timeo = sock_wait_for_wmem(sk, timeo);
1335 }
1336
1337 skb_set_owner_w(skb, sk);
1338 return skb;
1339
1340interrupted:
1341 err = sock_intr_errno(timeo);
1342failure:
1343 *errcode = err;
1344 return NULL;
1345}
1346
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001347struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 int noblock, int *errcode)
1349{
1350 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1351}
1352
1353static void __lock_sock(struct sock *sk)
1354{
1355 DEFINE_WAIT(wait);
1356
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001357 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1359 TASK_UNINTERRUPTIBLE);
1360 spin_unlock_bh(&sk->sk_lock.slock);
1361 schedule();
1362 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001363 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 break;
1365 }
1366 finish_wait(&sk->sk_lock.wq, &wait);
1367}
1368
1369static void __release_sock(struct sock *sk)
1370{
1371 struct sk_buff *skb = sk->sk_backlog.head;
1372
1373 do {
1374 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1375 bh_unlock_sock(sk);
1376
1377 do {
1378 struct sk_buff *next = skb->next;
1379
1380 skb->next = NULL;
1381 sk->sk_backlog_rcv(sk, skb);
1382
1383 /*
1384 * We are in process context here with softirqs
1385 * disabled, use cond_resched_softirq() to preempt.
1386 * This is safe to do because we've taken the backlog
1387 * queue private:
1388 */
1389 cond_resched_softirq();
1390
1391 skb = next;
1392 } while (skb != NULL);
1393
1394 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001395 } while ((skb = sk->sk_backlog.head) != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396}
1397
1398/**
1399 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001400 * @sk: sock to wait on
1401 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 *
1403 * Now socket state including sk->sk_err is changed only under lock,
1404 * hence we may omit checks after joining wait queue.
1405 * We check receive queue before schedule() only as optimization;
1406 * it is very likely that release_sock() added new data.
1407 */
1408int sk_wait_data(struct sock *sk, long *timeo)
1409{
1410 int rc;
1411 DEFINE_WAIT(wait);
1412
1413 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1414 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1415 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1416 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1417 finish_wait(sk->sk_sleep, &wait);
1418 return rc;
1419}
1420
1421EXPORT_SYMBOL(sk_wait_data);
1422
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001423/**
1424 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1425 * @sk: socket
1426 * @size: memory size to allocate
1427 * @kind: allocation type
1428 *
1429 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1430 * rmem allocation. This function assumes that protocols which have
1431 * memory_pressure use sk_wmem_queued as write buffer accounting.
1432 */
1433int __sk_mem_schedule(struct sock *sk, int size, int kind)
1434{
1435 struct proto *prot = sk->sk_prot;
1436 int amt = sk_mem_pages(size);
1437 int allocated;
1438
1439 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1440 allocated = atomic_add_return(amt, prot->memory_allocated);
1441
1442 /* Under limit. */
1443 if (allocated <= prot->sysctl_mem[0]) {
1444 if (prot->memory_pressure && *prot->memory_pressure)
1445 *prot->memory_pressure = 0;
1446 return 1;
1447 }
1448
1449 /* Under pressure. */
1450 if (allocated > prot->sysctl_mem[1])
1451 if (prot->enter_memory_pressure)
1452 prot->enter_memory_pressure();
1453
1454 /* Over hard limit. */
1455 if (allocated > prot->sysctl_mem[2])
1456 goto suppress_allocation;
1457
1458 /* guarantee minimum buffer size under pressure */
1459 if (kind == SK_MEM_RECV) {
1460 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1461 return 1;
1462 } else { /* SK_MEM_SEND */
1463 if (sk->sk_type == SOCK_STREAM) {
1464 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1465 return 1;
1466 } else if (atomic_read(&sk->sk_wmem_alloc) <
1467 prot->sysctl_wmem[0])
1468 return 1;
1469 }
1470
1471 if (prot->memory_pressure) {
1472 if (!*prot->memory_pressure ||
1473 prot->sysctl_mem[2] > atomic_read(prot->sockets_allocated) *
1474 sk_mem_pages(sk->sk_wmem_queued +
1475 atomic_read(&sk->sk_rmem_alloc) +
1476 sk->sk_forward_alloc))
1477 return 1;
1478 }
1479
1480suppress_allocation:
1481
1482 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1483 sk_stream_moderate_sndbuf(sk);
1484
1485 /* Fail only if socket is _under_ its sndbuf.
1486 * In this case we cannot block, so that we have to fail.
1487 */
1488 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1489 return 1;
1490 }
1491
1492 /* Alas. Undo changes. */
1493 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1494 atomic_sub(amt, prot->memory_allocated);
1495 return 0;
1496}
1497
1498EXPORT_SYMBOL(__sk_mem_schedule);
1499
1500/**
1501 * __sk_reclaim - reclaim memory_allocated
1502 * @sk: socket
1503 */
1504void __sk_mem_reclaim(struct sock *sk)
1505{
1506 struct proto *prot = sk->sk_prot;
1507
Eric Dumazet680a5a52007-12-31 15:00:50 -08001508 atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001509 prot->memory_allocated);
1510 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1511
1512 if (prot->memory_pressure && *prot->memory_pressure &&
1513 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1514 *prot->memory_pressure = 0;
1515}
1516
1517EXPORT_SYMBOL(__sk_mem_reclaim);
1518
1519
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520/*
1521 * Set of default routines for initialising struct proto_ops when
1522 * the protocol does not support a particular function. In certain
1523 * cases where it makes no sense for a protocol to have a "do nothing"
1524 * function, some default processing is provided.
1525 */
1526
1527int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1528{
1529 return -EOPNOTSUPP;
1530}
1531
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001532int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 int len, int flags)
1534{
1535 return -EOPNOTSUPP;
1536}
1537
1538int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1539{
1540 return -EOPNOTSUPP;
1541}
1542
1543int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1544{
1545 return -EOPNOTSUPP;
1546}
1547
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001548int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 int *len, int peer)
1550{
1551 return -EOPNOTSUPP;
1552}
1553
1554unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt)
1555{
1556 return 0;
1557}
1558
1559int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1560{
1561 return -EOPNOTSUPP;
1562}
1563
1564int sock_no_listen(struct socket *sock, int backlog)
1565{
1566 return -EOPNOTSUPP;
1567}
1568
1569int sock_no_shutdown(struct socket *sock, int how)
1570{
1571 return -EOPNOTSUPP;
1572}
1573
1574int sock_no_setsockopt(struct socket *sock, int level, int optname,
1575 char __user *optval, int optlen)
1576{
1577 return -EOPNOTSUPP;
1578}
1579
1580int sock_no_getsockopt(struct socket *sock, int level, int optname,
1581 char __user *optval, int __user *optlen)
1582{
1583 return -EOPNOTSUPP;
1584}
1585
1586int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1587 size_t len)
1588{
1589 return -EOPNOTSUPP;
1590}
1591
1592int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1593 size_t len, int flags)
1594{
1595 return -EOPNOTSUPP;
1596}
1597
1598int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1599{
1600 /* Mirror missing mmap method error code */
1601 return -ENODEV;
1602}
1603
1604ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1605{
1606 ssize_t res;
1607 struct msghdr msg = {.msg_flags = flags};
1608 struct kvec iov;
1609 char *kaddr = kmap(page);
1610 iov.iov_base = kaddr + offset;
1611 iov.iov_len = size;
1612 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1613 kunmap(page);
1614 return res;
1615}
1616
1617/*
1618 * Default Socket Callbacks
1619 */
1620
1621static void sock_def_wakeup(struct sock *sk)
1622{
1623 read_lock(&sk->sk_callback_lock);
1624 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1625 wake_up_interruptible_all(sk->sk_sleep);
1626 read_unlock(&sk->sk_callback_lock);
1627}
1628
1629static void sock_def_error_report(struct sock *sk)
1630{
1631 read_lock(&sk->sk_callback_lock);
1632 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1633 wake_up_interruptible(sk->sk_sleep);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001634 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 read_unlock(&sk->sk_callback_lock);
1636}
1637
1638static void sock_def_readable(struct sock *sk, int len)
1639{
1640 read_lock(&sk->sk_callback_lock);
1641 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1642 wake_up_interruptible(sk->sk_sleep);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001643 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 read_unlock(&sk->sk_callback_lock);
1645}
1646
1647static void sock_def_write_space(struct sock *sk)
1648{
1649 read_lock(&sk->sk_callback_lock);
1650
1651 /* Do not wake up a writer until he can make "significant"
1652 * progress. --DaveM
1653 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001654 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1656 wake_up_interruptible(sk->sk_sleep);
1657
1658 /* Should agree with poll, otherwise some programs break */
1659 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001660 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 }
1662
1663 read_unlock(&sk->sk_callback_lock);
1664}
1665
1666static void sock_def_destruct(struct sock *sk)
1667{
Jesper Juhla51482b2005-11-08 09:41:34 -08001668 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669}
1670
1671void sk_send_sigurg(struct sock *sk)
1672{
1673 if (sk->sk_socket && sk->sk_socket->file)
1674 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001675 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676}
1677
1678void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1679 unsigned long expires)
1680{
1681 if (!mod_timer(timer, expires))
1682 sock_hold(sk);
1683}
1684
1685EXPORT_SYMBOL(sk_reset_timer);
1686
1687void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1688{
1689 if (timer_pending(timer) && del_timer(timer))
1690 __sock_put(sk);
1691}
1692
1693EXPORT_SYMBOL(sk_stop_timer);
1694
1695void sock_init_data(struct socket *sock, struct sock *sk)
1696{
1697 skb_queue_head_init(&sk->sk_receive_queue);
1698 skb_queue_head_init(&sk->sk_write_queue);
1699 skb_queue_head_init(&sk->sk_error_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001700#ifdef CONFIG_NET_DMA
1701 skb_queue_head_init(&sk->sk_async_wait_queue);
1702#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
1704 sk->sk_send_head = NULL;
1705
1706 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001707
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 sk->sk_allocation = GFP_KERNEL;
1709 sk->sk_rcvbuf = sysctl_rmem_default;
1710 sk->sk_sndbuf = sysctl_wmem_default;
1711 sk->sk_state = TCP_CLOSE;
1712 sk->sk_socket = sock;
1713
1714 sock_set_flag(sk, SOCK_ZAPPED);
1715
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001716 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 sk->sk_type = sock->type;
1718 sk->sk_sleep = &sock->wait;
1719 sock->sk = sk;
1720 } else
1721 sk->sk_sleep = NULL;
1722
1723 rwlock_init(&sk->sk_dst_lock);
1724 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef0e2007-07-19 01:49:00 -07001725 lockdep_set_class_and_name(&sk->sk_callback_lock,
1726 af_callback_keys + sk->sk_family,
1727 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728
1729 sk->sk_state_change = sock_def_wakeup;
1730 sk->sk_data_ready = sock_def_readable;
1731 sk->sk_write_space = sock_def_write_space;
1732 sk->sk_error_report = sock_def_error_report;
1733 sk->sk_destruct = sock_def_destruct;
1734
1735 sk->sk_sndmsg_page = NULL;
1736 sk->sk_sndmsg_off = 0;
1737
1738 sk->sk_peercred.pid = 0;
1739 sk->sk_peercred.uid = -1;
1740 sk->sk_peercred.gid = -1;
1741 sk->sk_write_pending = 0;
1742 sk->sk_rcvlowat = 1;
1743 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1744 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1745
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001746 sk->sk_stamp = ktime_set(-1L, -1L);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
1748 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08001749 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750}
1751
Harvey Harrisonb5606c22008-02-13 15:03:16 -08001752void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753{
1754 might_sleep();
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07001755 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02001756 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02001758 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07001759 spin_unlock(&sk->sk_lock.slock);
1760 /*
1761 * The sk_lock has mutex_lock() semantics here:
1762 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08001763 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07001764 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765}
1766
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08001767EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
Harvey Harrisonb5606c22008-02-13 15:03:16 -08001769void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770{
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07001771 /*
1772 * The sk_lock has mutex_unlock() semantics:
1773 */
1774 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
1775
1776 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 if (sk->sk_backlog.tail)
1778 __release_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02001779 sk->sk_lock.owned = 0;
Ingo Molnara5b5bb9a2006-07-03 00:25:35 -07001780 if (waitqueue_active(&sk->sk_lock.wq))
1781 wake_up(&sk->sk_lock.wq);
1782 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783}
1784EXPORT_SYMBOL(release_sock);
1785
1786int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001787{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001788 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 if (!sock_flag(sk, SOCK_TIMESTAMP))
1790 sock_enable_timestamp(sk);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001791 tv = ktime_to_timeval(sk->sk_stamp);
1792 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001794 if (tv.tv_sec == 0) {
1795 sk->sk_stamp = ktime_get_real();
1796 tv = ktime_to_timeval(sk->sk_stamp);
1797 }
1798 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001799}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800EXPORT_SYMBOL(sock_get_timestamp);
1801
Eric Dumazetae40eb12007-03-18 17:33:16 -07001802int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
1803{
1804 struct timespec ts;
1805 if (!sock_flag(sk, SOCK_TIMESTAMP))
1806 sock_enable_timestamp(sk);
1807 ts = ktime_to_timespec(sk->sk_stamp);
1808 if (ts.tv_sec == -1)
1809 return -ENOENT;
1810 if (ts.tv_sec == 0) {
1811 sk->sk_stamp = ktime_get_real();
1812 ts = ktime_to_timespec(sk->sk_stamp);
1813 }
1814 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
1815}
1816EXPORT_SYMBOL(sock_get_timestampns);
1817
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818void sock_enable_timestamp(struct sock *sk)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001819{
1820 if (!sock_flag(sk, SOCK_TIMESTAMP)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 sock_set_flag(sk, SOCK_TIMESTAMP);
1822 net_enable_timestamp();
1823 }
1824}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825
1826/*
1827 * Get a socket option on an socket.
1828 *
1829 * FIX: POSIX 1003.1g is very ambiguous here. It states that
1830 * asynchronous errors should be reported by getsockopt. We assume
1831 * this means if you specify SO_ERROR (otherwise whats the point of it).
1832 */
1833int sock_common_getsockopt(struct socket *sock, int level, int optname,
1834 char __user *optval, int __user *optlen)
1835{
1836 struct sock *sk = sock->sk;
1837
1838 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1839}
1840
1841EXPORT_SYMBOL(sock_common_getsockopt);
1842
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001843#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001844int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
1845 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001846{
1847 struct sock *sk = sock->sk;
1848
Johannes Berg1e51f952007-03-06 13:44:06 -08001849 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001850 return sk->sk_prot->compat_getsockopt(sk, level, optname,
1851 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001852 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1853}
1854EXPORT_SYMBOL(compat_sock_common_getsockopt);
1855#endif
1856
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1858 struct msghdr *msg, size_t size, int flags)
1859{
1860 struct sock *sk = sock->sk;
1861 int addr_len = 0;
1862 int err;
1863
1864 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
1865 flags & ~MSG_DONTWAIT, &addr_len);
1866 if (err >= 0)
1867 msg->msg_namelen = addr_len;
1868 return err;
1869}
1870
1871EXPORT_SYMBOL(sock_common_recvmsg);
1872
1873/*
1874 * Set socket options on an inet socket.
1875 */
1876int sock_common_setsockopt(struct socket *sock, int level, int optname,
1877 char __user *optval, int optlen)
1878{
1879 struct sock *sk = sock->sk;
1880
1881 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1882}
1883
1884EXPORT_SYMBOL(sock_common_setsockopt);
1885
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001886#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001887int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
1888 char __user *optval, int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001889{
1890 struct sock *sk = sock->sk;
1891
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001892 if (sk->sk_prot->compat_setsockopt != NULL)
1893 return sk->sk_prot->compat_setsockopt(sk, level, optname,
1894 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001895 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1896}
1897EXPORT_SYMBOL(compat_sock_common_setsockopt);
1898#endif
1899
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900void sk_common_release(struct sock *sk)
1901{
1902 if (sk->sk_prot->destroy)
1903 sk->sk_prot->destroy(sk);
1904
1905 /*
1906 * Observation: when sock_common_release is called, processes have
1907 * no access to socket. But net still has.
1908 * Step one, detach it from networking:
1909 *
1910 * A. Remove from hash tables.
1911 */
1912
1913 sk->sk_prot->unhash(sk);
1914
1915 /*
1916 * In this point socket cannot receive new packets, but it is possible
1917 * that some packets are in flight because some CPU runs receiver and
1918 * did hash table lookup before we unhashed socket. They will achieve
1919 * receive queue and will be purged by socket destructor.
1920 *
1921 * Also we still have packets pending on receive queue and probably,
1922 * our own packets waiting in device queues. sock_destroy will drain
1923 * receive queue, but transmitted packets will delay socket destruction
1924 * until the last reference will be released.
1925 */
1926
1927 sock_orphan(sk);
1928
1929 xfrm_sk_free_policy(sk);
1930
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001931 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 sock_put(sk);
1933}
1934
1935EXPORT_SYMBOL(sk_common_release);
1936
1937static DEFINE_RWLOCK(proto_list_lock);
1938static LIST_HEAD(proto_list);
1939
1940int proto_register(struct proto *prot, int alloc_slab)
1941{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001942 char *request_sock_slab_name = NULL;
1943 char *timewait_sock_slab_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944
Eric Dumazet65f76512008-01-03 20:46:48 -08001945 if (sock_prot_inuse_init(prot) != 0) {
Arnaldo Carvalho de Meloebb53d72007-11-21 22:08:50 +08001946 printk(KERN_CRIT "%s: Can't alloc inuse counters!\n", prot->name);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08001947 goto out;
Arnaldo Carvalho de Meloebb53d72007-11-21 22:08:50 +08001948 }
Pavel Emelyanovb733c002007-11-07 02:23:38 -08001949
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 if (alloc_slab) {
1951 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09001952 SLAB_HWCACHE_ALIGN, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953
1954 if (prot->slab == NULL) {
1955 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
1956 prot->name);
Eric Dumazet286ab3d2007-11-05 23:38:39 -08001957 goto out_free_inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001959
1960 if (prot->rsk_prot != NULL) {
1961 static const char mask[] = "request_sock_%s";
1962
1963 request_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
1964 if (request_sock_slab_name == NULL)
1965 goto out_free_sock_slab;
1966
1967 sprintf(request_sock_slab_name, mask, prot->name);
1968 prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name,
1969 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09001970 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001971
1972 if (prot->rsk_prot->slab == NULL) {
1973 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
1974 prot->name);
1975 goto out_free_request_sock_slab_name;
1976 }
1977 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001978
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08001979 if (prot->twsk_prot != NULL) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001980 static const char mask[] = "tw_sock_%s";
1981
1982 timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
1983
1984 if (timewait_sock_slab_name == NULL)
1985 goto out_free_request_sock_slab;
1986
1987 sprintf(timewait_sock_slab_name, mask, prot->name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08001988 prot->twsk_prot->twsk_slab =
1989 kmem_cache_create(timewait_sock_slab_name,
1990 prot->twsk_prot->twsk_obj_size,
1991 0, SLAB_HWCACHE_ALIGN,
Paul Mundt20c2df82007-07-20 10:11:58 +09001992 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08001993 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001994 goto out_free_timewait_sock_slab_name;
1995 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 }
1997
Arnaldo Carvalho de Melo2a278052005-04-16 15:24:09 -07001998 write_lock(&proto_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 list_add(&prot->node, &proto_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 write_unlock(&proto_list_lock);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002001 return 0;
2002
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002003out_free_timewait_sock_slab_name:
2004 kfree(timewait_sock_slab_name);
2005out_free_request_sock_slab:
2006 if (prot->rsk_prot && prot->rsk_prot->slab) {
2007 kmem_cache_destroy(prot->rsk_prot->slab);
2008 prot->rsk_prot->slab = NULL;
2009 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002010out_free_request_sock_slab_name:
2011 kfree(request_sock_slab_name);
2012out_free_sock_slab:
2013 kmem_cache_destroy(prot->slab);
2014 prot->slab = NULL;
Eric Dumazet286ab3d2007-11-05 23:38:39 -08002015out_free_inuse:
Eric Dumazet65f76512008-01-03 20:46:48 -08002016 sock_prot_inuse_free(prot);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002017out:
2018 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019}
2020
2021EXPORT_SYMBOL(proto_register);
2022
2023void proto_unregister(struct proto *prot)
2024{
2025 write_lock(&proto_list_lock);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002026 list_del(&prot->node);
2027 write_unlock(&proto_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028
Eric Dumazet65f76512008-01-03 20:46:48 -08002029 sock_prot_inuse_free(prot);
Arnaldo Carvalho de Meloebb53d72007-11-21 22:08:50 +08002030
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 if (prot->slab != NULL) {
2032 kmem_cache_destroy(prot->slab);
2033 prot->slab = NULL;
2034 }
2035
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002036 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2037 const char *name = kmem_cache_name(prot->rsk_prot->slab);
2038
2039 kmem_cache_destroy(prot->rsk_prot->slab);
2040 kfree(name);
2041 prot->rsk_prot->slab = NULL;
2042 }
2043
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002044 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2045 const char *name = kmem_cache_name(prot->twsk_prot->twsk_slab);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002046
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002047 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002048 kfree(name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002049 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002050 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051}
2052
2053EXPORT_SYMBOL(proto_unregister);
2054
2055#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002057 __acquires(proto_list_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058{
2059 read_lock(&proto_list_lock);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002060 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061}
2062
2063static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2064{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002065 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066}
2067
2068static void proto_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002069 __releases(proto_list_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070{
2071 read_unlock(&proto_list_lock);
2072}
2073
2074static char proto_method_implemented(const void *method)
2075{
2076 return method == NULL ? 'n' : 'y';
2077}
2078
2079static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2080{
2081 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
2082 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2083 proto->name,
2084 proto->obj_size,
2085 proto->sockets_allocated != NULL ? atomic_read(proto->sockets_allocated) : -1,
2086 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
2087 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2088 proto->max_header,
2089 proto->slab == NULL ? "no" : "yes",
2090 module_name(proto->owner),
2091 proto_method_implemented(proto->close),
2092 proto_method_implemented(proto->connect),
2093 proto_method_implemented(proto->disconnect),
2094 proto_method_implemented(proto->accept),
2095 proto_method_implemented(proto->ioctl),
2096 proto_method_implemented(proto->init),
2097 proto_method_implemented(proto->destroy),
2098 proto_method_implemented(proto->shutdown),
2099 proto_method_implemented(proto->setsockopt),
2100 proto_method_implemented(proto->getsockopt),
2101 proto_method_implemented(proto->sendmsg),
2102 proto_method_implemented(proto->recvmsg),
2103 proto_method_implemented(proto->sendpage),
2104 proto_method_implemented(proto->bind),
2105 proto_method_implemented(proto->backlog_rcv),
2106 proto_method_implemented(proto->hash),
2107 proto_method_implemented(proto->unhash),
2108 proto_method_implemented(proto->get_port),
2109 proto_method_implemented(proto->enter_memory_pressure));
2110}
2111
2112static int proto_seq_show(struct seq_file *seq, void *v)
2113{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002114 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2116 "protocol",
2117 "size",
2118 "sockets",
2119 "memory",
2120 "press",
2121 "maxhdr",
2122 "slab",
2123 "module",
2124 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2125 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002126 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 return 0;
2128}
2129
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002130static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 .start = proto_seq_start,
2132 .next = proto_seq_next,
2133 .stop = proto_seq_stop,
2134 .show = proto_seq_show,
2135};
2136
2137static int proto_seq_open(struct inode *inode, struct file *file)
2138{
2139 return seq_open(file, &proto_seq_ops);
2140}
2141
Arjan van de Ven9a321442007-02-12 00:55:35 -08002142static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 .owner = THIS_MODULE,
2144 .open = proto_seq_open,
2145 .read = seq_read,
2146 .llseek = seq_lseek,
2147 .release = seq_release,
2148};
2149
2150static int __init proto_init(void)
2151{
2152 /* register /proc/net/protocols */
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002153 return proc_net_fops_create(&init_net, "protocols", S_IRUGO, &proto_seq_fops) == NULL ? -ENOBUFS : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154}
2155
2156subsys_initcall(proto_init);
2157
2158#endif /* PROC_FS */
2159
2160EXPORT_SYMBOL(sk_alloc);
2161EXPORT_SYMBOL(sk_free);
2162EXPORT_SYMBOL(sk_send_sigurg);
2163EXPORT_SYMBOL(sock_alloc_send_skb);
2164EXPORT_SYMBOL(sock_init_data);
2165EXPORT_SYMBOL(sock_kfree_s);
2166EXPORT_SYMBOL(sock_kmalloc);
2167EXPORT_SYMBOL(sock_no_accept);
2168EXPORT_SYMBOL(sock_no_bind);
2169EXPORT_SYMBOL(sock_no_connect);
2170EXPORT_SYMBOL(sock_no_getname);
2171EXPORT_SYMBOL(sock_no_getsockopt);
2172EXPORT_SYMBOL(sock_no_ioctl);
2173EXPORT_SYMBOL(sock_no_listen);
2174EXPORT_SYMBOL(sock_no_mmap);
2175EXPORT_SYMBOL(sock_no_poll);
2176EXPORT_SYMBOL(sock_no_recvmsg);
2177EXPORT_SYMBOL(sock_no_sendmsg);
2178EXPORT_SYMBOL(sock_no_sendpage);
2179EXPORT_SYMBOL(sock_no_setsockopt);
2180EXPORT_SYMBOL(sock_no_shutdown);
2181EXPORT_SYMBOL(sock_no_socketpair);
2182EXPORT_SYMBOL(sock_rfree);
2183EXPORT_SYMBOL(sock_setsockopt);
2184EXPORT_SYMBOL(sock_wfree);
2185EXPORT_SYMBOL(sock_wmalloc);
2186EXPORT_SYMBOL(sock_i_uid);
2187EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188EXPORT_SYMBOL(sysctl_optmem_max);