blob: cf755156a684373f92c639c274f0fb4ab62aa211 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The User Datagram Protocol (UDP).
8 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07009 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
Alan Cox113aa832008-10-13 19:01:08 -070012 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Hirokazu Takahashi, <taka@valinux.co.jp>
14 *
15 * Fixes:
16 * Alan Cox : verify_area() calls
17 * Alan Cox : stopped close while in use off icmp
18 * messages. Not a fix but a botch that
19 * for udp at least is 'valid'.
20 * Alan Cox : Fixed icmp handling properly
21 * Alan Cox : Correct error for oversized datagrams
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090022 * Alan Cox : Tidied select() semantics.
23 * Alan Cox : udp_err() fixed properly, also now
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * select and read wake correctly on errors
25 * Alan Cox : udp_send verify_area moved to avoid mem leak
26 * Alan Cox : UDP can count its memory
27 * Alan Cox : send to an unknown connection causes
28 * an ECONNREFUSED off the icmp, but
29 * does NOT close.
30 * Alan Cox : Switched to new sk_buff handlers. No more backlog!
31 * Alan Cox : Using generic datagram code. Even smaller and the PEEK
32 * bug no longer crashes it.
33 * Fred Van Kempen : Net2e support for sk->broadcast.
34 * Alan Cox : Uses skb_free_datagram
35 * Alan Cox : Added get/set sockopt support.
36 * Alan Cox : Broadcasting without option set returns EACCES.
37 * Alan Cox : No wakeup calls. Instead we now use the callbacks.
38 * Alan Cox : Use ip_tos and ip_ttl
39 * Alan Cox : SNMP Mibs
40 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
41 * Matt Dillon : UDP length checks.
42 * Alan Cox : Smarter af_inet used properly.
43 * Alan Cox : Use new kernel side addressing.
44 * Alan Cox : Incorrect return on truncated datagram receive.
45 * Arnt Gulbrandsen : New udp_send and stuff
46 * Alan Cox : Cache last socket
47 * Alan Cox : Route cache
48 * Jon Peatfield : Minor efficiency fix to sendto().
49 * Mike Shaver : RFC1122 checks.
50 * Alan Cox : Nonblocking error fix.
51 * Willy Konynenberg : Transparent proxying support.
52 * Mike McLagan : Routing by source
53 * David S. Miller : New socket lookup architecture.
54 * Last socket cache retained as it
55 * does have a high hit rate.
56 * Olaf Kirch : Don't linearise iovec on sendmsg.
57 * Andi Kleen : Some cleanups, cache destination entry
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090058 * for connect.
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
60 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
61 * return ENOTCONN for unconnected sockets (POSIX)
62 * Janos Farkas : don't deliver multi/broadcasts to a different
63 * bound-to-device socket
64 * Hirokazu Takahashi : HW checksumming for outgoing UDP
65 * datagrams.
66 * Hirokazu Takahashi : sendfile() on UDP works now.
67 * Arnaldo C. Melo : convert /proc/net/udp to seq_file
68 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
69 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
70 * a single port at the same time.
71 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
James Chapman342f0232007-06-27 15:37:46 -070072 * James Chapman : Add L2TP encapsulation type.
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090074
Joe Perchesafd465032012-03-12 07:03:32 +000075#define pr_fmt(fmt) "UDP: " fmt
76
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080077#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <asm/ioctls.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070079#include <linux/memblock.h>
Eric Dumazet8203efb2008-10-29 02:32:32 -070080#include <linux/highmem.h>
81#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include <linux/types.h>
83#include <linux/fcntl.h>
84#include <linux/module.h>
85#include <linux/socket.h>
86#include <linux/sockios.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020087#include <linux/igmp.h>
Shawn Bohrer6e540302015-06-03 16:27:38 -050088#include <linux/inetdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070089#include <linux/in.h>
90#include <linux/errno.h>
91#include <linux/timer.h>
92#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#include <linux/inet.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/netdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090095#include <linux/slab.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070096#include <net/tcp_states.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <linux/skbuff.h>
98#include <linux/proc_fs.h>
99#include <linux/seq_file.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200100#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <net/icmp.h>
Shawn Bohrer421b3882013-10-07 11:01:39 -0500102#include <net/inet_hashtables.h>
Stefano Brivioe7cc0822018-11-08 12:19:22 +0100103#include <net/ip_tunnels.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/route.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <net/checksum.h>
106#include <net/xfrm.h>
Satoru Moriya296f7ea2011-06-17 11:58:39 +0000107#include <trace/events/udp.h>
Eric Dumazet447167b2012-04-11 23:05:28 +0000108#include <linux/static_key.h>
Eric Dumazet22911fc2012-06-27 00:23:44 +0000109#include <trace/events/skb.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +0300110#include <net/busy_poll.h>
Gerrit Renkerba4e58e2006-11-27 11:10:57 -0800111#include "udp_impl.h"
Craig Galleke32ea7e2016-01-04 17:41:46 -0500112#include <net/sock_reuseport.h>
Eric Dumazet217375a2016-08-18 09:59:12 -0700113#include <net/addrconf.h>
Paolo Abeni60fb9562018-11-07 12:38:28 +0100114#include <net/udp_tunnel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000116struct udp_table udp_table __read_mostly;
Eric Dumazet645ca702008-10-29 01:41:45 -0700117EXPORT_SYMBOL(udp_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Eric Dumazet8d987e52010-11-09 23:24:26 +0000119long sysctl_udp_mem[3] __read_mostly;
Hideo Aoki95766ff2007-12-31 00:29:24 -0800120EXPORT_SYMBOL(sysctl_udp_mem);
Eric Dumazetc482c562009-07-17 00:26:32 +0000121
Eric Dumazet8d987e52010-11-09 23:24:26 +0000122atomic_long_t udp_memory_allocated;
Hideo Aoki95766ff2007-12-31 00:29:24 -0800123EXPORT_SYMBOL(udp_memory_allocated);
124
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000125#define MAX_UDP_PORTS 65536
126#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
Eric Dumazet98322f22009-01-26 21:35:35 -0800127
Eric Dumazetf24d43c2008-10-09 14:51:27 -0700128static int udp_lib_lport_inuse(struct net *net, __u16 num,
Eric Dumazet645ca702008-10-29 01:41:45 -0700129 const struct udp_hslot *hslot,
Eric Dumazet98322f22009-01-26 21:35:35 -0800130 unsigned long *bitmap,
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800131 struct sock *sk, unsigned int log)
Gerrit Renker25030a72006-08-26 20:06:05 -0700132{
Eric Dumazetf24d43c2008-10-09 14:51:27 -0700133 struct sock *sk2;
Tom Herbertba418fa2013-01-22 09:50:32 +0000134 kuid_t uid = sock_i_uid(sk);
Gerrit Renker25030a72006-08-26 20:06:05 -0700135
Eric Dumazetca065d02016-04-01 08:52:13 -0700136 sk_for_each(sk2, &hslot->head) {
Joe Perches9d4fb272009-11-23 10:41:23 -0800137 if (net_eq(sock_net(sk2), net) &&
138 sk2 != sk &&
Eric Dumazetd4cada42009-11-08 10:17:30 +0000139 (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
Joe Perches9d4fb272009-11-23 10:41:23 -0800140 (!sk2->sk_reuse || !sk->sk_reuse) &&
141 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
142 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800143 inet_rcv_saddr_equal(sk, sk2, true)) {
Eric Garverdf560052017-01-05 20:22:36 -0500144 if (sk2->sk_reuseport && sk->sk_reuseport &&
145 !rcu_access_pointer(sk->sk_reuseport_cb) &&
146 uid_eq(uid, sock_i_uid(sk2))) {
147 if (!bitmap)
148 return 0;
149 } else {
150 if (!bitmap)
151 return 1;
152 __set_bit(udp_sk(sk2)->udp_port_hash >> log,
153 bitmap);
154 }
Eric Dumazet98322f22009-01-26 21:35:35 -0800155 }
Joe Perches4243cdc2014-11-11 21:59:20 -0800156 }
Gerrit Renker25030a72006-08-26 20:06:05 -0700157 return 0;
158}
159
Eric Dumazet30fff922009-11-09 05:26:33 +0000160/*
161 * Note: we still hold spinlock of primary hash chain, so no other writer
162 * can insert/delete a socket with local_port == num
163 */
164static int udp_lib_lport_inuse2(struct net *net, __u16 num,
Joe Perches4243cdc2014-11-11 21:59:20 -0800165 struct udp_hslot *hslot2,
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800166 struct sock *sk)
Eric Dumazet30fff922009-11-09 05:26:33 +0000167{
168 struct sock *sk2;
Tom Herbertba418fa2013-01-22 09:50:32 +0000169 kuid_t uid = sock_i_uid(sk);
Eric Dumazet30fff922009-11-09 05:26:33 +0000170 int res = 0;
171
172 spin_lock(&hslot2->lock);
Eric Dumazetca065d02016-04-01 08:52:13 -0700173 udp_portaddr_for_each_entry(sk2, &hslot2->head) {
Joe Perches9d4fb272009-11-23 10:41:23 -0800174 if (net_eq(sock_net(sk2), net) &&
175 sk2 != sk &&
176 (udp_sk(sk2)->udp_port_hash == num) &&
177 (!sk2->sk_reuse || !sk->sk_reuse) &&
178 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
179 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800180 inet_rcv_saddr_equal(sk, sk2, true)) {
Eric Garverdf560052017-01-05 20:22:36 -0500181 if (sk2->sk_reuseport && sk->sk_reuseport &&
182 !rcu_access_pointer(sk->sk_reuseport_cb) &&
183 uid_eq(uid, sock_i_uid(sk2))) {
184 res = 0;
185 } else {
186 res = 1;
187 }
Eric Dumazet30fff922009-11-09 05:26:33 +0000188 break;
189 }
Joe Perches4243cdc2014-11-11 21:59:20 -0800190 }
Eric Dumazet30fff922009-11-09 05:26:33 +0000191 spin_unlock(&hslot2->lock);
192 return res;
193}
194
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800195static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
Craig Galleke32ea7e2016-01-04 17:41:46 -0500196{
197 struct net *net = sock_net(sk);
Craig Galleke32ea7e2016-01-04 17:41:46 -0500198 kuid_t uid = sock_i_uid(sk);
199 struct sock *sk2;
200
Eric Dumazetca065d02016-04-01 08:52:13 -0700201 sk_for_each(sk2, &hslot->head) {
Craig Galleke32ea7e2016-01-04 17:41:46 -0500202 if (net_eq(sock_net(sk2), net) &&
203 sk2 != sk &&
204 sk2->sk_family == sk->sk_family &&
205 ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
206 (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
207 (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
208 sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800209 inet_rcv_saddr_equal(sk, sk2, false)) {
Martin KaFai Lau2dbb9b92018-08-08 01:01:25 -0700210 return reuseport_add_sock(sk, sk2,
211 inet_rcv_saddr_any(sk));
Craig Galleke32ea7e2016-01-04 17:41:46 -0500212 }
213 }
214
Martin KaFai Lau2dbb9b92018-08-08 01:01:25 -0700215 return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
Craig Galleke32ea7e2016-01-04 17:41:46 -0500216}
217
Gerrit Renker25030a72006-08-26 20:06:05 -0700218/**
Pavel Emelyanov6ba5a3c2008-03-22 16:51:21 -0700219 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
Gerrit Renker25030a72006-08-26 20:06:05 -0700220 *
221 * @sk: socket struct in question
222 * @snum: port number to look up
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300223 * @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
Eric Dumazet30fff922009-11-09 05:26:33 +0000224 * with NULL address
Gerrit Renker25030a72006-08-26 20:06:05 -0700225 */
Pavel Emelyanov6ba5a3c2008-03-22 16:51:21 -0700226int udp_lib_get_port(struct sock *sk, unsigned short snum,
Eric Dumazet30fff922009-11-09 05:26:33 +0000227 unsigned int hash2_nulladdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228{
Eric Dumazet512615b2009-11-08 10:17:58 +0000229 struct udp_hslot *hslot, *hslot2;
Eric Dumazet645ca702008-10-29 01:41:45 -0700230 struct udp_table *udptable = sk->sk_prot->h.udp_table;
Gerrit Renker25030a72006-08-26 20:06:05 -0700231 int error = 1;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900232 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
Stephen Hemminger32c1da72007-08-24 23:09:41 -0700234 if (!snum) {
Eric Dumazet9088c562008-10-08 11:44:17 -0700235 int low, high, remaining;
Eric Dumazet95c96172012-04-15 05:58:06 +0000236 unsigned int rand;
Eric Dumazet98322f22009-01-26 21:35:35 -0800237 unsigned short first, last;
238 DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
Eric W. Biederman0bbf87d2013-09-28 14:10:59 -0700240 inet_get_local_port_range(net, &low, &high);
Anton Arapova25de532007-10-18 22:00:17 -0700241 remaining = (high - low) + 1;
Stephen Hemminger227b60f2007-10-10 17:30:46 -0700242
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500243 rand = prandom_u32();
Daniel Borkmann8fc54f62014-08-23 20:58:54 +0200244 first = reciprocal_scale(rand, remaining) + low;
Eric Dumazet98322f22009-01-26 21:35:35 -0800245 /*
246 * force rand to be an odd multiple of UDP_HTABLE_SIZE
247 */
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000248 rand = (rand | 1) * (udptable->mask + 1);
Eric Dumazet5781b232009-12-13 19:32:39 -0800249 last = first + udptable->mask + 1;
250 do {
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000251 hslot = udp_hashslot(udptable, net, first);
Eric Dumazet98322f22009-01-26 21:35:35 -0800252 bitmap_zero(bitmap, PORTS_PER_CHAIN);
Eric Dumazet645ca702008-10-29 01:41:45 -0700253 spin_lock_bh(&hslot->lock);
Eric Dumazet98322f22009-01-26 21:35:35 -0800254 udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800255 udptable->log);
Eric Dumazet98322f22009-01-26 21:35:35 -0800256
257 snum = first;
258 /*
259 * Iterate on all possible values of snum for this hash.
260 * Using steps of an odd multiple of UDP_HTABLE_SIZE
261 * give us randomization and full range coverage.
262 */
Eric Dumazet9088c562008-10-08 11:44:17 -0700263 do {
Eric Dumazet98322f22009-01-26 21:35:35 -0800264 if (low <= snum && snum <= high &&
Amerigo Wange3826f12010-05-05 00:27:06 +0000265 !test_bit(snum >> udptable->log, bitmap) &&
WANG Cong122ff242014-05-12 16:04:53 -0700266 !inet_is_local_reserved_port(net, snum))
Eric Dumazet98322f22009-01-26 21:35:35 -0800267 goto found;
268 snum += rand;
269 } while (snum != first);
270 spin_unlock_bh(&hslot->lock);
Eric Garverdf560052017-01-05 20:22:36 -0500271 cond_resched();
Eric Dumazet5781b232009-12-13 19:32:39 -0800272 } while (++first != last);
Eric Dumazet98322f22009-01-26 21:35:35 -0800273 goto fail;
Eric Dumazet645ca702008-10-29 01:41:45 -0700274 } else {
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000275 hslot = udp_hashslot(udptable, net, snum);
Eric Dumazet645ca702008-10-29 01:41:45 -0700276 spin_lock_bh(&hslot->lock);
Eric Dumazet30fff922009-11-09 05:26:33 +0000277 if (hslot->count > 10) {
278 int exist;
279 unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
280
281 slot2 &= udptable->mask;
282 hash2_nulladdr &= udptable->mask;
283
284 hslot2 = udp_hashslot2(udptable, slot2);
285 if (hslot->count < hslot2->count)
286 goto scan_primary_hash;
287
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800288 exist = udp_lib_lport_inuse2(net, snum, hslot2, sk);
Eric Dumazet30fff922009-11-09 05:26:33 +0000289 if (!exist && (hash2_nulladdr != slot2)) {
290 hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
291 exist = udp_lib_lport_inuse2(net, snum, hslot2,
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800292 sk);
Eric Dumazet30fff922009-11-09 05:26:33 +0000293 }
294 if (exist)
295 goto fail_unlock;
296 else
297 goto found;
298 }
299scan_primary_hash:
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800300 if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0))
Eric Dumazet645ca702008-10-29 01:41:45 -0700301 goto fail_unlock;
302 }
Eric Dumazet98322f22009-01-26 21:35:35 -0800303found:
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000304 inet_sk(sk)->inet_num = snum;
Eric Dumazetd4cada42009-11-08 10:17:30 +0000305 udp_sk(sk)->udp_port_hash = snum;
306 udp_sk(sk)->udp_portaddr_hash ^= snum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 if (sk_unhashed(sk)) {
Craig Galleke32ea7e2016-01-04 17:41:46 -0500308 if (sk->sk_reuseport &&
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800309 udp_reuseport_add_sock(sk, hslot)) {
Craig Galleke32ea7e2016-01-04 17:41:46 -0500310 inet_sk(sk)->inet_num = 0;
311 udp_sk(sk)->udp_port_hash = 0;
312 udp_sk(sk)->udp_portaddr_hash ^= snum;
313 goto fail_unlock;
314 }
315
Eric Dumazetca065d02016-04-01 08:52:13 -0700316 sk_add_node_rcu(sk, &hslot->head);
Eric Dumazetfdcc8aa92009-11-08 10:17:05 +0000317 hslot->count++;
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -0700318 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
Eric Dumazet512615b2009-11-08 10:17:58 +0000319
320 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
321 spin_lock(&hslot2->lock);
Craig Gallekd894ba12016-04-12 13:11:25 -0400322 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
David S. Miller1602f492016-04-23 18:26:24 -0400323 sk->sk_family == AF_INET6)
324 hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
325 &hslot2->head);
Craig Gallekd894ba12016-04-12 13:11:25 -0400326 else
David S. Miller1602f492016-04-23 18:26:24 -0400327 hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
328 &hslot2->head);
Eric Dumazet512615b2009-11-08 10:17:58 +0000329 hslot2->count++;
330 spin_unlock(&hslot2->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 }
Eric Dumazetca065d02016-04-01 08:52:13 -0700332 sock_set_flag(sk, SOCK_RCU_FREE);
Gerrit Renker25030a72006-08-26 20:06:05 -0700333 error = 0;
Eric Dumazet645ca702008-10-29 01:41:45 -0700334fail_unlock:
335 spin_unlock_bh(&hslot->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336fail:
Gerrit Renker25030a72006-08-26 20:06:05 -0700337 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338}
Eric Dumazetc482c562009-07-17 00:26:32 +0000339EXPORT_SYMBOL(udp_lib_get_port);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
Pavel Emelyanov6ba5a3c2008-03-22 16:51:21 -0700341int udp_v4_get_port(struct sock *sk, unsigned short snum)
David S. Millerdb8dac22008-03-06 16:22:02 -0800342{
Eric Dumazet30fff922009-11-09 05:26:33 +0000343 unsigned int hash2_nulladdr =
Martin KaFai Lauf0b1e642017-12-01 12:52:30 -0800344 ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
Eric Dumazet30fff922009-11-09 05:26:33 +0000345 unsigned int hash2_partial =
Martin KaFai Lauf0b1e642017-12-01 12:52:30 -0800346 ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
Eric Dumazet30fff922009-11-09 05:26:33 +0000347
Eric Dumazetd4cada42009-11-08 10:17:30 +0000348 /* precompute partial secondary hash */
Eric Dumazet30fff922009-11-09 05:26:33 +0000349 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800350 return udp_lib_get_port(sk, snum, hash2_nulladdr);
David S. Millerdb8dac22008-03-06 16:22:02 -0800351}
352
Su, Xuemind1e37282016-06-13 11:02:50 +0800353static int compute_score(struct sock *sk, struct net *net,
354 __be32 saddr, __be16 sport,
David Ahernfb74c272017-08-07 08:44:16 -0700355 __be32 daddr, unsigned short hnum,
Tim Beale73545372019-06-14 16:41:26 +1200356 int dif, int sdif)
Eric Dumazet645ca702008-10-29 01:41:45 -0700357{
Joe Perches60c04ae2014-12-01 20:29:06 -0800358 int score;
359 struct inet_sock *inet;
Mike Manning6da5b0f2018-11-07 15:36:04 +0000360 bool dev_match;
Eric Dumazet645ca702008-10-29 01:41:45 -0700361
Joe Perches60c04ae2014-12-01 20:29:06 -0800362 if (!net_eq(sock_net(sk), net) ||
363 udp_sk(sk)->udp_port_hash != hnum ||
364 ipv6_only_sock(sk))
365 return -1;
Eric Dumazet645ca702008-10-29 01:41:45 -0700366
Peter Oskolkov4cdeeee2018-12-12 13:15:33 -0800367 if (sk->sk_rcv_saddr != daddr)
368 return -1;
369
Joe Perches60c04ae2014-12-01 20:29:06 -0800370 score = (sk->sk_family == PF_INET) ? 2 : 1;
Peter Oskolkov4cdeeee2018-12-12 13:15:33 -0800371
Joe Perches60c04ae2014-12-01 20:29:06 -0800372 inet = inet_sk(sk);
Joe Perches60c04ae2014-12-01 20:29:06 -0800373 if (inet->inet_daddr) {
374 if (inet->inet_daddr != saddr)
375 return -1;
376 score += 4;
377 }
378
379 if (inet->inet_dport) {
380 if (inet->inet_dport != sport)
381 return -1;
382 score += 4;
383 }
384
Mike Manning6da5b0f2018-11-07 15:36:04 +0000385 dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if,
386 dif, sdif);
387 if (!dev_match)
388 return -1;
389 score += 4;
David Ahernfb74c272017-08-07 08:44:16 -0700390
Eric Dumazet70da2682015-10-08 19:33:21 -0700391 if (sk->sk_incoming_cpu == raw_smp_processor_id())
392 score++;
Eric Dumazet645ca702008-10-29 01:41:45 -0700393 return score;
394}
395
Eric Dumazet6eada012015-03-18 14:05:33 -0700396static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
397 const __u16 lport, const __be32 faddr,
398 const __be16 fport)
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +0200399{
Hannes Frederic Sowa1bbdcee2013-10-19 21:48:57 +0200400 static u32 udp_ehash_secret __read_mostly;
401
402 net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
403
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +0200404 return __inet_ehashfn(laddr, lport, faddr, fport,
Hannes Frederic Sowa1bbdcee2013-10-19 21:48:57 +0200405 udp_ehash_secret + net_hash_mix(net));
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +0200406}
407
Su, Xuemind1e37282016-06-13 11:02:50 +0800408/* called with rcu_read_lock() */
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000409static struct sock *udp4_lib_lookup2(struct net *net,
David Ahernfb74c272017-08-07 08:44:16 -0700410 __be32 saddr, __be16 sport,
411 __be32 daddr, unsigned int hnum,
Tim Beale73545372019-06-14 16:41:26 +1200412 int dif, int sdif,
David Ahernfb74c272017-08-07 08:44:16 -0700413 struct udp_hslot *hslot2,
414 struct sk_buff *skb)
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000415{
416 struct sock *sk, *result;
Paolo Abenie94a62f2017-11-30 15:39:34 +0100417 int score, badness;
Tom Herbertba418fa2013-01-22 09:50:32 +0000418 u32 hash = 0;
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000419
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000420 result = NULL;
Tom Herbertba418fa2013-01-22 09:50:32 +0000421 badness = 0;
Eric Dumazetca065d02016-04-01 08:52:13 -0700422 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
Su, Xuemind1e37282016-06-13 11:02:50 +0800423 score = compute_score(sk, net, saddr, sport,
Tim Beale73545372019-06-14 16:41:26 +1200424 daddr, hnum, dif, sdif);
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000425 if (score > badness) {
Willem de Bruijnacdcecc2019-09-12 21:16:39 -0400426 if (sk->sk_reuseport &&
427 sk->sk_state != TCP_ESTABLISHED) {
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +0200428 hash = udp_ehashfn(net, daddr, hnum,
429 saddr, sport);
Eric Dumazetca065d02016-04-01 08:52:13 -0700430 result = reuseport_select_sock(sk, hash, skb,
Eric Dumazeted0dfff2016-01-19 08:36:43 -0800431 sizeof(struct udphdr));
Willem de Bruijnacdcecc2019-09-12 21:16:39 -0400432 if (result && !reuseport_has_conns(sk, false))
Eric Dumazetca065d02016-04-01 08:52:13 -0700433 return result;
Tom Herbertba418fa2013-01-22 09:50:32 +0000434 }
Eric Dumazetca065d02016-04-01 08:52:13 -0700435 badness = score;
436 result = sk;
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000437 }
438 }
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000439 return result;
440}
441
David S. Millerdb8dac22008-03-06 16:22:02 -0800442/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
443 * harder than this. -DaveM
444 */
Pavel Emelyanovfce82332011-12-09 06:23:34 +0000445struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
David Ahernfb74c272017-08-07 08:44:16 -0700446 __be16 sport, __be32 daddr, __be16 dport, int dif,
447 int sdif, struct udp_table *udptable, struct sk_buff *skb)
David S. Millerdb8dac22008-03-06 16:22:02 -0800448{
Peter Oskolkov4cdeeee2018-12-12 13:15:33 -0800449 struct sock *result;
David S. Millerdb8dac22008-03-06 16:22:02 -0800450 unsigned short hnum = ntohs(dport);
Peter Oskolkov4cdeeee2018-12-12 13:15:33 -0800451 unsigned int hash2, slot2;
452 struct udp_hslot *hslot2;
David S. Millerdb8dac22008-03-06 16:22:02 -0800453
Peter Oskolkov4cdeeee2018-12-12 13:15:33 -0800454 hash2 = ipv4_portaddr_hash(net, daddr, hnum);
455 slot2 = hash2 & udptable->mask;
456 hslot2 = &udptable->hash2[slot2];
457
458 result = udp4_lib_lookup2(net, saddr, sport,
459 daddr, hnum, dif, sdif,
Tim Beale73545372019-06-14 16:41:26 +1200460 hslot2, skb);
Peter Oskolkov4cdeeee2018-12-12 13:15:33 -0800461 if (!result) {
462 hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000463 slot2 = hash2 & udptable->mask;
464 hslot2 = &udptable->hash2[slot2];
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000465
466 result = udp4_lib_lookup2(net, saddr, sport,
Peter Oskolkov4cdeeee2018-12-12 13:15:33 -0800467 htonl(INADDR_ANY), hnum, dif, sdif,
Tim Beale73545372019-06-14 16:41:26 +1200468 hslot2, skb);
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000469 }
Enrico Weigelt88e235b2019-06-05 23:09:05 +0200470 if (IS_ERR(result))
Peter Oskolkov4cdeeee2018-12-12 13:15:33 -0800471 return NULL;
David S. Millerdb8dac22008-03-06 16:22:02 -0800472 return result;
473}
Pavel Emelyanovfce82332011-12-09 06:23:34 +0000474EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
David S. Millerdb8dac22008-03-06 16:22:02 -0800475
KOVACS Krisztian607c4aa2008-10-07 12:38:32 -0700476static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
477 __be16 sport, __be16 dport,
Eric Dumazet645ca702008-10-29 01:41:45 -0700478 struct udp_table *udptable)
KOVACS Krisztian607c4aa2008-10-07 12:38:32 -0700479{
480 const struct iphdr *iph = ip_hdr(skb);
481
Alexander Duycked7cbbc2016-05-12 16:23:44 -0700482 return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
Eric Dumazet8afdd992013-12-10 18:07:23 -0800483 iph->daddr, dport, inet_iif(skb),
David Ahernfb74c272017-08-07 08:44:16 -0700484 inet_sdif(skb), udptable, skb);
KOVACS Krisztian607c4aa2008-10-07 12:38:32 -0700485}
486
Tom Herbert63058302016-04-05 08:22:50 -0700487struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
488 __be16 sport, __be16 dport)
489{
Martin KaFai Lau257a5252019-05-31 15:29:13 -0700490 const struct iphdr *iph = ip_hdr(skb);
491
492 return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
493 iph->daddr, dport, inet_iif(skb),
494 inet_sdif(skb), &udp_table, NULL);
Tom Herbert63058302016-04-05 08:22:50 -0700495}
496EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
497
Eric Dumazetca065d02016-04-01 08:52:13 -0700498/* Must be called under rcu_read_lock().
499 * Does increment socket refcount.
500 */
Arnd Bergmann6e860002018-06-05 13:40:34 +0200501#if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
KOVACS Krisztianbcd41302008-10-01 07:48:10 -0700502struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
503 __be32 daddr, __be16 dport, int dif)
504{
Eric Dumazetca065d02016-04-01 08:52:13 -0700505 struct sock *sk;
506
507 sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport,
David Ahernfb74c272017-08-07 08:44:16 -0700508 dif, 0, &udp_table, NULL);
Reshetova, Elena41c6d652017-06-30 13:08:01 +0300509 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
Eric Dumazetca065d02016-04-01 08:52:13 -0700510 sk = NULL;
511 return sk;
KOVACS Krisztianbcd41302008-10-01 07:48:10 -0700512}
513EXPORT_SYMBOL_GPL(udp4_lib_lookup);
Eric Dumazetca065d02016-04-01 08:52:13 -0700514#endif
KOVACS Krisztianbcd41302008-10-01 07:48:10 -0700515
Shawn Bohrer421b3882013-10-07 11:01:39 -0500516static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
517 __be16 loc_port, __be32 loc_addr,
518 __be16 rmt_port, __be32 rmt_addr,
David Ahernfb74c272017-08-07 08:44:16 -0700519 int dif, int sdif, unsigned short hnum)
Shawn Bohrer421b3882013-10-07 11:01:39 -0500520{
521 struct inet_sock *inet = inet_sk(sk);
522
523 if (!net_eq(sock_net(sk), net) ||
524 udp_sk(sk)->udp_port_hash != hnum ||
525 (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
526 (inet->inet_dport != rmt_port && inet->inet_dport) ||
527 (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
528 ipv6_only_sock(sk) ||
Tim Beale82ba25c2019-06-04 13:56:23 +1200529 !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
Shawn Bohrer421b3882013-10-07 11:01:39 -0500530 return false;
David Ahern60d9b032017-08-07 08:44:19 -0700531 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif))
Shawn Bohrer421b3882013-10-07 11:01:39 -0500532 return false;
533 return true;
534}
535
Stefano Brivioa36e1852018-11-08 12:19:14 +0100536DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
537void udp_encap_enable(void)
538{
Paolo Abeni9c480602018-11-15 02:34:50 +0100539 static_branch_inc(&udp_encap_needed_key);
Stefano Brivioa36e1852018-11-08 12:19:14 +0100540}
541EXPORT_SYMBOL(udp_encap_enable);
542
Stefano Brivioe7cc0822018-11-08 12:19:22 +0100543/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
544 * through error handlers in encapsulations looking for a match.
545 */
546static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info)
547{
548 int i;
549
550 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
551 int (*handler)(struct sk_buff *skb, u32 info);
Paolo Abeni92b95362019-02-21 17:44:00 +0100552 const struct ip_tunnel_encap_ops *encap;
Stefano Brivioe7cc0822018-11-08 12:19:22 +0100553
Paolo Abeni92b95362019-02-21 17:44:00 +0100554 encap = rcu_dereference(iptun_encaps[i]);
555 if (!encap)
Stefano Brivioe7cc0822018-11-08 12:19:22 +0100556 continue;
Paolo Abeni92b95362019-02-21 17:44:00 +0100557 handler = encap->err_handler;
Stefano Brivioe7cc0822018-11-08 12:19:22 +0100558 if (handler && !handler(skb, info))
559 return 0;
560 }
561
562 return -ENOENT;
563}
564
Stefano Brivioa36e1852018-11-08 12:19:14 +0100565/* Try to match ICMP errors to UDP tunnels by looking up a socket without
566 * reversing source and destination port: this will match tunnels that force the
567 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
568 * lwtunnels might actually break this assumption by being configured with
569 * different destination ports on endpoints, in this case we won't be able to
570 * trace ICMP messages back to them.
571 *
Stefano Brivioe7cc0822018-11-08 12:19:22 +0100572 * If this doesn't match any socket, probe tunnels with arbitrary destination
573 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
574 * we've sent packets to won't necessarily match the local destination port.
575 *
Stefano Brivioa36e1852018-11-08 12:19:14 +0100576 * Then ask the tunnel implementation to match the error against a valid
577 * association.
578 *
Stefano Brivioe7cc0822018-11-08 12:19:22 +0100579 * Return an error if we can't find a match, the socket if we need further
580 * processing, zero otherwise.
Stefano Brivioa36e1852018-11-08 12:19:14 +0100581 */
582static struct sock *__udp4_lib_err_encap(struct net *net,
583 const struct iphdr *iph,
584 struct udphdr *uh,
585 struct udp_table *udptable,
Stefano Brivioe7cc0822018-11-08 12:19:22 +0100586 struct sk_buff *skb, u32 info)
Stefano Brivioa36e1852018-11-08 12:19:14 +0100587{
Stefano Brivioa36e1852018-11-08 12:19:14 +0100588 int network_offset, transport_offset;
Stefano Brivioa36e1852018-11-08 12:19:14 +0100589 struct sock *sk;
590
Stefano Brivioa36e1852018-11-08 12:19:14 +0100591 network_offset = skb_network_offset(skb);
592 transport_offset = skb_transport_offset(skb);
593
594 /* Network header needs to point to the outer IPv4 header inside ICMP */
595 skb_reset_network_header(skb);
596
597 /* Transport header needs to point to the UDP header */
598 skb_set_transport_header(skb, iph->ihl << 2);
599
Stefano Brivioe7cc0822018-11-08 12:19:22 +0100600 sk = __udp4_lib_lookup(net, iph->daddr, uh->source,
601 iph->saddr, uh->dest, skb->dev->ifindex, 0,
602 udptable, NULL);
603 if (sk) {
604 int (*lookup)(struct sock *sk, struct sk_buff *skb);
605 struct udp_sock *up = udp_sk(sk);
606
607 lookup = READ_ONCE(up->encap_err_lookup);
608 if (!lookup || lookup(sk, skb))
609 sk = NULL;
610 }
611
612 if (!sk)
613 sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info));
Stefano Brivioa36e1852018-11-08 12:19:14 +0100614
615 skb_set_transport_header(skb, transport_offset);
616 skb_set_network_header(skb, network_offset);
617
618 return sk;
619}
620
David S. Millerdb8dac22008-03-06 16:22:02 -0800621/*
622 * This routine is called by the ICMP module when it gets some
623 * sort of error condition. If err < 0 then the socket should
624 * be closed and the error returned to the user. If err > 0
625 * it's just the icmp type << 8 | icmp code.
626 * Header points to the ip header of the error packet. We move
627 * on past this. Then (as it used to claim before adjustment)
628 * header points to the first 8 bytes of the udp header. We need
629 * to find the appropriate port.
630 */
631
Stefano Brivio32bbd872018-11-08 12:19:21 +0100632int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
David S. Millerdb8dac22008-03-06 16:22:02 -0800633{
634 struct inet_sock *inet;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000635 const struct iphdr *iph = (const struct iphdr *)skb->data;
Eric Dumazetc482c562009-07-17 00:26:32 +0000636 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
David S. Millerdb8dac22008-03-06 16:22:02 -0800637 const int type = icmp_hdr(skb)->type;
638 const int code = icmp_hdr(skb)->code;
Stefano Brivioa36e1852018-11-08 12:19:14 +0100639 bool tunnel = false;
David S. Millerdb8dac22008-03-06 16:22:02 -0800640 struct sock *sk;
641 int harderr;
642 int err;
Pavel Emelyanovfd54d712008-07-14 23:01:40 -0700643 struct net *net = dev_net(skb->dev);
David S. Millerdb8dac22008-03-06 16:22:02 -0800644
Pavel Emelyanovfd54d712008-07-14 23:01:40 -0700645 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
Mike Manningf64bf6b82018-10-26 12:24:35 +0100646 iph->saddr, uh->source, skb->dev->ifindex,
647 inet_sdif(skb), udptable, NULL);
Ian Morris51456b22015-04-03 09:17:26 +0100648 if (!sk) {
Stefano Brivioa36e1852018-11-08 12:19:14 +0100649 /* No socket for error: try tunnels before discarding */
Stefano Brivioe7cc0822018-11-08 12:19:22 +0100650 sk = ERR_PTR(-ENOENT);
651 if (static_branch_unlikely(&udp_encap_needed_key)) {
652 sk = __udp4_lib_err_encap(net, iph, uh, udptable, skb,
653 info);
654 if (!sk)
655 return 0;
Stefano Brivioa36e1852018-11-08 12:19:14 +0100656 }
Stefano Brivioe7cc0822018-11-08 12:19:22 +0100657
658 if (IS_ERR(sk)) {
659 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
660 return PTR_ERR(sk);
661 }
662
Stefano Brivioa36e1852018-11-08 12:19:14 +0100663 tunnel = true;
David S. Millerdb8dac22008-03-06 16:22:02 -0800664 }
665
666 err = 0;
667 harderr = 0;
668 inet = inet_sk(sk);
669
670 switch (type) {
671 default:
672 case ICMP_TIME_EXCEEDED:
673 err = EHOSTUNREACH;
674 break;
675 case ICMP_SOURCE_QUENCH:
676 goto out;
677 case ICMP_PARAMETERPROB:
678 err = EPROTO;
679 harderr = 1;
680 break;
681 case ICMP_DEST_UNREACH:
682 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
David S. Miller36393392012-06-14 22:21:46 -0700683 ipv4_sk_update_pmtu(skb, sk, info);
David S. Millerdb8dac22008-03-06 16:22:02 -0800684 if (inet->pmtudisc != IP_PMTUDISC_DONT) {
685 err = EMSGSIZE;
686 harderr = 1;
687 break;
688 }
689 goto out;
690 }
691 err = EHOSTUNREACH;
692 if (code <= NR_ICMP_UNREACH) {
693 harderr = icmp_err_convert[code].fatal;
694 err = icmp_err_convert[code].errno;
695 }
696 break;
David S. Miller55be7a92012-07-11 21:27:49 -0700697 case ICMP_REDIRECT:
698 ipv4_sk_redirect(skb, sk);
Duan Jiong1a462d12013-09-20 18:20:28 +0800699 goto out;
David S. Millerdb8dac22008-03-06 16:22:02 -0800700 }
701
702 /*
703 * RFC1122: OK. Passes ICMP errors back to application, as per
704 * 4.1.3.3.
705 */
Stefano Brivioa36e1852018-11-08 12:19:14 +0100706 if (tunnel) {
707 /* ...not for tunnels though: we don't have a sending socket */
708 goto out;
709 }
David S. Millerdb8dac22008-03-06 16:22:02 -0800710 if (!inet->recverr) {
711 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
712 goto out;
Eric Dumazetb1faf562010-05-31 23:44:05 -0700713 } else
Eric Dumazetc482c562009-07-17 00:26:32 +0000714 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
Eric Dumazetb1faf562010-05-31 23:44:05 -0700715
David S. Millerdb8dac22008-03-06 16:22:02 -0800716 sk->sk_err = err;
717 sk->sk_error_report(sk);
718out:
Stefano Brivio32bbd872018-11-08 12:19:21 +0100719 return 0;
David S. Millerdb8dac22008-03-06 16:22:02 -0800720}
721
Stefano Brivio32bbd872018-11-08 12:19:21 +0100722int udp_err(struct sk_buff *skb, u32 info)
David S. Millerdb8dac22008-03-06 16:22:02 -0800723{
Stefano Brivio32bbd872018-11-08 12:19:21 +0100724 return __udp4_lib_err(skb, info, &udp_table);
David S. Millerdb8dac22008-03-06 16:22:02 -0800725}
726
727/*
728 * Throw away all pending data and cancel the corking. Socket is locked.
729 */
Denis V. Lunev36d926b2008-06-04 15:49:07 +0400730void udp_flush_pending_frames(struct sock *sk)
David S. Millerdb8dac22008-03-06 16:22:02 -0800731{
732 struct udp_sock *up = udp_sk(sk);
733
734 if (up->pending) {
735 up->len = 0;
736 up->pending = 0;
737 ip_flush_pending_frames(sk);
738 }
739}
Denis V. Lunev36d926b2008-06-04 15:49:07 +0400740EXPORT_SYMBOL(udp_flush_pending_frames);
David S. Millerdb8dac22008-03-06 16:22:02 -0800741
742/**
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000743 * udp4_hwcsum - handle outgoing HW checksumming
David S. Millerdb8dac22008-03-06 16:22:02 -0800744 * @skb: sk_buff containing the filled-in UDP header
745 * (checksum field must be zeroed out)
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000746 * @src: source IP address
747 * @dst: destination IP address
David S. Millerdb8dac22008-03-06 16:22:02 -0800748 */
Thomas Grafc26bf4a2013-07-25 18:12:18 +0200749void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
David S. Millerdb8dac22008-03-06 16:22:02 -0800750{
David S. Millerdb8dac22008-03-06 16:22:02 -0800751 struct udphdr *uh = udp_hdr(skb);
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000752 int offset = skb_transport_offset(skb);
753 int len = skb->len - offset;
754 int hlen = len;
David S. Millerdb8dac22008-03-06 16:22:02 -0800755 __wsum csum = 0;
756
WANG Congebbe4952014-06-02 16:12:02 -0700757 if (!skb_has_frag_list(skb)) {
David S. Millerdb8dac22008-03-06 16:22:02 -0800758 /*
759 * Only one fragment on the socket.
760 */
761 skb->csum_start = skb_transport_header(skb) - skb->head;
762 skb->csum_offset = offsetof(struct udphdr, check);
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000763 uh->check = ~csum_tcpudp_magic(src, dst, len,
764 IPPROTO_UDP, 0);
David S. Millerdb8dac22008-03-06 16:22:02 -0800765 } else {
WANG Congebbe4952014-06-02 16:12:02 -0700766 struct sk_buff *frags;
767
David S. Millerdb8dac22008-03-06 16:22:02 -0800768 /*
769 * HW-checksum won't work as there are two or more
770 * fragments on the socket so that all csums of sk_buffs
771 * should be together
772 */
WANG Congebbe4952014-06-02 16:12:02 -0700773 skb_walk_frags(skb, frags) {
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000774 csum = csum_add(csum, frags->csum);
775 hlen -= frags->len;
WANG Congebbe4952014-06-02 16:12:02 -0700776 }
David S. Millerdb8dac22008-03-06 16:22:02 -0800777
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000778 csum = skb_checksum(skb, offset, hlen, csum);
David S. Millerdb8dac22008-03-06 16:22:02 -0800779 skb->ip_summed = CHECKSUM_NONE;
780
David S. Millerdb8dac22008-03-06 16:22:02 -0800781 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
782 if (uh->check == 0)
783 uh->check = CSUM_MANGLED_0;
784 }
785}
Thomas Grafc26bf4a2013-07-25 18:12:18 +0200786EXPORT_SYMBOL_GPL(udp4_hwcsum);
David S. Millerdb8dac22008-03-06 16:22:02 -0800787
Tom Herbertaf5fcba2014-06-04 17:19:48 -0700788/* Function to set UDP checksum for an IPv4 UDP packet. This is intended
789 * for the simple case like when setting the checksum for a UDP tunnel.
790 */
791void udp_set_csum(bool nocheck, struct sk_buff *skb,
792 __be32 saddr, __be32 daddr, int len)
793{
794 struct udphdr *uh = udp_hdr(skb);
795
Edward Cree179bc672016-02-11 20:48:04 +0000796 if (nocheck) {
Tom Herbertaf5fcba2014-06-04 17:19:48 -0700797 uh->check = 0;
Edward Cree179bc672016-02-11 20:48:04 +0000798 } else if (skb_is_gso(skb)) {
Tom Herbertaf5fcba2014-06-04 17:19:48 -0700799 uh->check = ~udp_v4_check(len, saddr, daddr, 0);
Edward Cree179bc672016-02-11 20:48:04 +0000800 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
801 uh->check = 0;
802 uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb));
803 if (uh->check == 0)
804 uh->check = CSUM_MANGLED_0;
Edward Creed75f1302016-02-11 20:49:40 +0000805 } else {
Tom Herbertaf5fcba2014-06-04 17:19:48 -0700806 skb->ip_summed = CHECKSUM_PARTIAL;
807 skb->csum_start = skb_transport_header(skb) - skb->head;
808 skb->csum_offset = offsetof(struct udphdr, check);
809 uh->check = ~udp_v4_check(len, saddr, daddr, 0);
Tom Herbertaf5fcba2014-06-04 17:19:48 -0700810 }
811}
812EXPORT_SYMBOL(udp_set_csum);
813
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -0400814static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
815 struct inet_cork *cork)
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000816{
817 struct sock *sk = skb->sk;
818 struct inet_sock *inet = inet_sk(sk);
819 struct udphdr *uh;
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000820 int err = 0;
821 int is_udplite = IS_UDPLITE(sk);
822 int offset = skb_transport_offset(skb);
823 int len = skb->len - offset;
824 __wsum csum = 0;
825
826 /*
827 * Create a UDP header
828 */
829 uh = udp_hdr(skb);
830 uh->source = inet->inet_sport;
David S. Miller79ab0532011-05-09 13:31:04 -0700831 uh->dest = fl4->fl4_dport;
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000832 uh->len = htons(len);
833 uh->check = 0;
834
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -0400835 if (cork->gso_size) {
836 const int hlen = skb_network_header_len(skb) +
837 sizeof(struct udphdr);
838
Willem de Bruijn0f149c92019-01-15 11:40:02 -0500839 if (hlen + cork->gso_size > cork->fragsize) {
840 kfree_skb(skb);
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -0400841 return -EINVAL;
Willem de Bruijn0f149c92019-01-15 11:40:02 -0500842 }
843 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
844 kfree_skb(skb);
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -0400845 return -EINVAL;
Willem de Bruijn0f149c92019-01-15 11:40:02 -0500846 }
847 if (sk->sk_no_check_tx) {
848 kfree_skb(skb);
Willem de Bruijna8c744a2018-04-30 15:58:36 -0400849 return -EINVAL;
Willem de Bruijn0f149c92019-01-15 11:40:02 -0500850 }
Willem de Bruijnff063422018-05-22 11:34:39 -0400851 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
Willem de Bruijn0f149c92019-01-15 11:40:02 -0500852 dst_xfrm(skb_dst(skb))) {
853 kfree_skb(skb);
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -0400854 return -EIO;
Willem de Bruijn0f149c92019-01-15 11:40:02 -0500855 }
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -0400856
857 skb_shinfo(skb)->gso_size = cork->gso_size;
858 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
Alexander Duyckdfec0ee2018-05-07 11:08:22 -0700859 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh),
860 cork->gso_size);
Willem de Bruijna8c744a2018-04-30 15:58:36 -0400861 goto csum_partial;
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -0400862 }
863
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000864 if (is_udplite) /* UDP-Lite */
865 csum = udplite_csum(skb);
866
Willem de Bruijnab2fb7e2017-08-22 11:39:57 -0400867 else if (sk->sk_no_check_tx) { /* UDP csum off */
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000868
869 skb->ip_summed = CHECKSUM_NONE;
870 goto send;
871
872 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
Willem de Bruijna8c744a2018-04-30 15:58:36 -0400873csum_partial:
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000874
David S. Miller79ab0532011-05-09 13:31:04 -0700875 udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000876 goto send;
877
878 } else
879 csum = udp_csum(skb);
880
881 /* add protocol-dependent pseudo-header */
David S. Miller79ab0532011-05-09 13:31:04 -0700882 uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000883 sk->sk_protocol, csum);
884 if (uh->check == 0)
885 uh->check = CSUM_MANGLED_0;
886
887send:
Eric Dumazetb5ec8ee2012-08-10 02:22:47 +0000888 err = ip_send_skb(sock_net(sk), skb);
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000889 if (err) {
890 if (err == -ENOBUFS && !inet->recverr) {
Eric Dumazet6aef70a2016-04-27 16:44:27 -0700891 UDP_INC_STATS(sock_net(sk),
892 UDP_MIB_SNDBUFERRORS, is_udplite);
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000893 err = 0;
894 }
895 } else
Eric Dumazet6aef70a2016-04-27 16:44:27 -0700896 UDP_INC_STATS(sock_net(sk),
897 UDP_MIB_OUTDATAGRAMS, is_udplite);
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000898 return err;
899}
900
David S. Millerdb8dac22008-03-06 16:22:02 -0800901/*
902 * Push out all pending data as one UDP datagram. Socket is locked.
903 */
Hannes Frederic Sowa8822b642013-07-01 20:21:30 +0200904int udp_push_pending_frames(struct sock *sk)
David S. Millerdb8dac22008-03-06 16:22:02 -0800905{
906 struct udp_sock *up = udp_sk(sk);
907 struct inet_sock *inet = inet_sk(sk);
David S. Millerb6f21b22011-03-12 02:09:18 -0500908 struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
David S. Millerdb8dac22008-03-06 16:22:02 -0800909 struct sk_buff *skb;
David S. Millerdb8dac22008-03-06 16:22:02 -0800910 int err = 0;
David S. Millerdb8dac22008-03-06 16:22:02 -0800911
David S. Miller77968b72011-05-08 17:12:19 -0700912 skb = ip_finish_skb(sk, fl4);
Herbert Xuf6b9664f2011-03-01 02:36:48 +0000913 if (!skb)
David S. Millerdb8dac22008-03-06 16:22:02 -0800914 goto out;
915
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -0400916 err = udp_send_skb(skb, fl4, &inet->cork.base);
David S. Millerdb8dac22008-03-06 16:22:02 -0800917
David S. Millerdb8dac22008-03-06 16:22:02 -0800918out:
919 up->len = 0;
920 up->pending = 0;
David S. Millerdb8dac22008-03-06 16:22:02 -0800921 return err;
922}
Hannes Frederic Sowa8822b642013-07-01 20:21:30 +0200923EXPORT_SYMBOL(udp_push_pending_frames);
David S. Millerdb8dac22008-03-06 16:22:02 -0800924
Willem de Bruijn2e8de852018-04-26 13:42:20 -0400925static int __udp_cmsg_send(struct cmsghdr *cmsg, u16 *gso_size)
926{
927 switch (cmsg->cmsg_type) {
928 case UDP_SEGMENT:
929 if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u16)))
930 return -EINVAL;
931 *gso_size = *(__u16 *)CMSG_DATA(cmsg);
932 return 0;
933 default:
934 return -EINVAL;
935 }
936}
937
938int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size)
939{
940 struct cmsghdr *cmsg;
941 bool need_ip = false;
942 int err;
943
944 for_each_cmsghdr(cmsg, msg) {
945 if (!CMSG_OK(msg, cmsg))
946 return -EINVAL;
947
948 if (cmsg->cmsg_level != SOL_UDP) {
949 need_ip = true;
950 continue;
951 }
952
953 err = __udp_cmsg_send(cmsg, gso_size);
954 if (err)
955 return err;
956 }
957
958 return need_ip;
959}
960EXPORT_SYMBOL_GPL(udp_cmsg_send);
961
Ying Xue1b784142015-03-02 15:37:48 +0800962int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
David S. Millerdb8dac22008-03-06 16:22:02 -0800963{
964 struct inet_sock *inet = inet_sk(sk);
965 struct udp_sock *up = udp_sk(sk);
Andrey Ignatov1cedee12018-05-25 08:55:23 -0700966 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
David S. Millere4749952011-05-08 16:38:45 -0700967 struct flowi4 fl4_stack;
David S. Millerb6f21b22011-03-12 02:09:18 -0500968 struct flowi4 *fl4;
David S. Millerdb8dac22008-03-06 16:22:02 -0800969 int ulen = len;
970 struct ipcm_cookie ipc;
971 struct rtable *rt = NULL;
972 int free = 0;
973 int connected = 0;
974 __be32 daddr, faddr, saddr;
975 __be16 dport;
976 u8 tos;
977 int err, is_udplite = IS_UDPLITE(sk);
978 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
979 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
Herbert Xu903ab862011-03-01 02:36:48 +0000980 struct sk_buff *skb;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000981 struct ip_options_data opt_copy;
David S. Millerdb8dac22008-03-06 16:22:02 -0800982
983 if (len > 0xFFFF)
984 return -EMSGSIZE;
985
986 /*
987 * Check the flags.
988 */
989
Eric Dumazetc482c562009-07-17 00:26:32 +0000990 if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
David S. Millerdb8dac22008-03-06 16:22:02 -0800991 return -EOPNOTSUPP;
992
Herbert Xu903ab862011-03-01 02:36:48 +0000993 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
994
David S. Millerf5fca602011-05-08 17:24:10 -0700995 fl4 = &inet->cork.fl.u.ip4;
David S. Millerdb8dac22008-03-06 16:22:02 -0800996 if (up->pending) {
997 /*
998 * There are pending frames.
999 * The socket lock must be held while it's corked.
1000 */
1001 lock_sock(sk);
1002 if (likely(up->pending)) {
1003 if (unlikely(up->pending != AF_INET)) {
1004 release_sock(sk);
1005 return -EINVAL;
1006 }
1007 goto do_append_data;
1008 }
1009 release_sock(sk);
1010 }
1011 ulen += sizeof(struct udphdr);
1012
1013 /*
1014 * Get and verify the address.
1015 */
Andrey Ignatov1cedee12018-05-25 08:55:23 -07001016 if (usin) {
David S. Millerdb8dac22008-03-06 16:22:02 -08001017 if (msg->msg_namelen < sizeof(*usin))
1018 return -EINVAL;
1019 if (usin->sin_family != AF_INET) {
1020 if (usin->sin_family != AF_UNSPEC)
1021 return -EAFNOSUPPORT;
1022 }
1023
1024 daddr = usin->sin_addr.s_addr;
1025 dport = usin->sin_port;
1026 if (dport == 0)
1027 return -EINVAL;
1028 } else {
1029 if (sk->sk_state != TCP_ESTABLISHED)
1030 return -EDESTADDRREQ;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001031 daddr = inet->inet_daddr;
1032 dport = inet->inet_dport;
David S. Millerdb8dac22008-03-06 16:22:02 -08001033 /* Open fast path for connected socket.
1034 Route will not be used, if at least one option is set.
1035 */
1036 connected = 1;
1037 }
Soheil Hassas Yeganehc14ac942016-04-02 23:08:12 -04001038
Willem de Bruijn35178202018-07-06 10:12:54 -04001039 ipcm_init_sk(&ipc, inet);
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -04001040 ipc.gso_size = up->gso_size;
Daniel Borkmannbf84a0102013-04-14 08:08:13 +00001041
David S. Millerdb8dac22008-03-06 16:22:02 -08001042 if (msg->msg_controllen) {
Willem de Bruijn2e8de852018-04-26 13:42:20 -04001043 err = udp_cmsg_send(sk, msg, &ipc.gso_size);
1044 if (err > 0)
1045 err = ip_cmsg_send(sk, msg, &ipc,
1046 sk->sk_family == AF_INET6);
1047 if (unlikely(err < 0)) {
Eric Dumazet91948302016-02-04 06:23:28 -08001048 kfree(ipc.opt);
David S. Millerdb8dac22008-03-06 16:22:02 -08001049 return err;
Eric Dumazet91948302016-02-04 06:23:28 -08001050 }
David S. Millerdb8dac22008-03-06 16:22:02 -08001051 if (ipc.opt)
1052 free = 1;
1053 connected = 0;
1054 }
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001055 if (!ipc.opt) {
1056 struct ip_options_rcu *inet_opt;
1057
1058 rcu_read_lock();
1059 inet_opt = rcu_dereference(inet->inet_opt);
1060 if (inet_opt) {
1061 memcpy(&opt_copy, inet_opt,
1062 sizeof(*inet_opt) + inet_opt->opt.optlen);
1063 ipc.opt = &opt_copy.opt;
1064 }
1065 rcu_read_unlock();
1066 }
David S. Millerdb8dac22008-03-06 16:22:02 -08001067
Andrey Ignatov1cedee12018-05-25 08:55:23 -07001068 if (cgroup_bpf_enabled && !connected) {
1069 err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk,
1070 (struct sockaddr *)usin, &ipc.addr);
1071 if (err)
1072 goto out_free;
1073 if (usin) {
1074 if (usin->sin_port == 0) {
1075 /* BPF program set invalid port. Reject it. */
1076 err = -EINVAL;
1077 goto out_free;
1078 }
1079 daddr = usin->sin_addr.s_addr;
1080 dport = usin->sin_port;
1081 }
1082 }
1083
David S. Millerdb8dac22008-03-06 16:22:02 -08001084 saddr = ipc.addr;
1085 ipc.addr = faddr = daddr;
1086
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001087 if (ipc.opt && ipc.opt->opt.srr) {
Andrey Ignatov1b970132018-05-10 10:59:34 -07001088 if (!daddr) {
1089 err = -EINVAL;
1090 goto out_free;
1091 }
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001092 faddr = ipc.opt->opt.faddr;
David S. Millerdb8dac22008-03-06 16:22:02 -08001093 connected = 0;
1094 }
Francesco Fuscoaa661582013-09-24 15:43:09 +02001095 tos = get_rttos(&ipc, inet);
David S. Millerdb8dac22008-03-06 16:22:02 -08001096 if (sock_flag(sk, SOCK_LOCALROUTE) ||
1097 (msg->msg_flags & MSG_DONTROUTE) ||
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001098 (ipc.opt && ipc.opt->opt.is_strictroute)) {
David S. Millerdb8dac22008-03-06 16:22:02 -08001099 tos |= RTO_ONLINK;
1100 connected = 0;
1101 }
1102
1103 if (ipv4_is_multicast(daddr)) {
Robert Shearman854da992018-10-01 09:40:23 +01001104 if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
David S. Millerdb8dac22008-03-06 16:22:02 -08001105 ipc.oif = inet->mc_index;
1106 if (!saddr)
1107 saddr = inet->mc_addr;
1108 connected = 0;
David Ahern9515a2e2018-01-24 19:37:38 -08001109 } else if (!ipc.oif) {
Erich E. Hoover76e21052012-02-08 09:11:07 +00001110 ipc.oif = inet->uc_index;
David Ahern9515a2e2018-01-24 19:37:38 -08001111 } else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
1112 /* oif is set, packet is to local broadcast and
1113 * and uc_index is set. oif is most likely set
1114 * by sk_bound_dev_if. If uc_index != oif check if the
1115 * oif is an L3 master and uc_index is an L3 slave.
1116 * If so, we want to allow the send using the uc_index.
1117 */
1118 if (ipc.oif != inet->uc_index &&
1119 ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
1120 inet->uc_index)) {
1121 ipc.oif = inet->uc_index;
1122 }
1123 }
David S. Millerdb8dac22008-03-06 16:22:02 -08001124
1125 if (connected)
Eric Dumazetc482c562009-07-17 00:26:32 +00001126 rt = (struct rtable *)sk_dst_check(sk, 0);
David S. Millerdb8dac22008-03-06 16:22:02 -08001127
Ian Morris51456b22015-04-03 09:17:26 +01001128 if (!rt) {
Pavel Emelyanov84a3aa02008-07-16 20:19:08 -07001129 struct net *net = sock_net(sk);
David Ahern9a24abf2015-08-13 14:59:03 -06001130 __u8 flow_flags = inet_sk_flowi_flags(sk);
Pavel Emelyanov84a3aa02008-07-16 20:19:08 -07001131
David S. Millere4749952011-05-08 16:38:45 -07001132 fl4 = &fl4_stack;
David Ahern9a24abf2015-08-13 14:59:03 -06001133
Willem de Bruijnc6af0c22019-09-11 15:50:51 -04001134 flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark, tos,
David S. Millerc0951cb2011-03-31 04:54:27 -07001135 RT_SCOPE_UNIVERSE, sk->sk_protocol,
David Ahern9a24abf2015-08-13 14:59:03 -06001136 flow_flags,
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09001137 faddr, saddr, dport, inet->inet_sport,
1138 sk->sk_uid);
David S. Millerc0951cb2011-03-31 04:54:27 -07001139
David S. Millere4749952011-05-08 16:38:45 -07001140 security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
1141 rt = ip_route_output_flow(net, fl4, sk);
David S. Millerb23dd4f2011-03-02 14:31:35 -08001142 if (IS_ERR(rt)) {
1143 err = PTR_ERR(rt);
David S. Miller06dc94b2011-03-03 10:38:01 -08001144 rt = NULL;
David S. Millerdb8dac22008-03-06 16:22:02 -08001145 if (err == -ENETUNREACH)
Eric Dumazetf1d8cba2013-11-28 09:51:22 -08001146 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
David S. Millerdb8dac22008-03-06 16:22:02 -08001147 goto out;
1148 }
1149
1150 err = -EACCES;
1151 if ((rt->rt_flags & RTCF_BROADCAST) &&
1152 !sock_flag(sk, SOCK_BROADCAST))
1153 goto out;
1154 if (connected)
Changli Gaod8d1f302010-06-10 23:31:35 -07001155 sk_dst_set(sk, dst_clone(&rt->dst));
David S. Millerdb8dac22008-03-06 16:22:02 -08001156 }
1157
1158 if (msg->msg_flags&MSG_CONFIRM)
1159 goto do_confirm;
1160back_from_confirm:
1161
David S. Millere4749952011-05-08 16:38:45 -07001162 saddr = fl4->saddr;
David S. Millerdb8dac22008-03-06 16:22:02 -08001163 if (!ipc.addr)
David S. Millere4749952011-05-08 16:38:45 -07001164 daddr = ipc.addr = fl4->daddr;
David S. Millerdb8dac22008-03-06 16:22:02 -08001165
Herbert Xu903ab862011-03-01 02:36:48 +00001166 /* Lockless fast path for the non-corking case. */
1167 if (!corkreq) {
Willem de Bruijn1cd78842018-04-26 13:42:15 -04001168 struct inet_cork cork;
1169
Al Virof69e6d12014-11-24 13:23:40 -05001170 skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
Herbert Xu903ab862011-03-01 02:36:48 +00001171 sizeof(struct udphdr), &ipc, &rt,
Willem de Bruijn1cd78842018-04-26 13:42:15 -04001172 &cork, msg->msg_flags);
Herbert Xu903ab862011-03-01 02:36:48 +00001173 err = PTR_ERR(skb);
YOSHIFUJI Hideaki / 吉藤英明50c3a482013-01-22 06:32:49 +00001174 if (!IS_ERR_OR_NULL(skb))
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -04001175 err = udp_send_skb(skb, fl4, &cork);
Herbert Xu903ab862011-03-01 02:36:48 +00001176 goto out;
1177 }
1178
David S. Millerdb8dac22008-03-06 16:22:02 -08001179 lock_sock(sk);
1180 if (unlikely(up->pending)) {
1181 /* The socket is already corked while preparing it. */
1182 /* ... which is an evident application bug. --ANK */
1183 release_sock(sk);
1184
Matteo Croce197df022017-10-19 14:22:17 +02001185 net_dbg_ratelimited("socket already corked\n");
David S. Millerdb8dac22008-03-06 16:22:02 -08001186 err = -EINVAL;
1187 goto out;
1188 }
1189 /*
1190 * Now cork the socket to pend data.
1191 */
David S. Millerb6f21b22011-03-12 02:09:18 -05001192 fl4 = &inet->cork.fl.u.ip4;
1193 fl4->daddr = daddr;
1194 fl4->saddr = saddr;
David S. Miller9cce96d2011-03-12 03:00:33 -05001195 fl4->fl4_dport = dport;
1196 fl4->fl4_sport = inet->inet_sport;
David S. Millerdb8dac22008-03-06 16:22:02 -08001197 up->pending = AF_INET;
1198
1199do_append_data:
1200 up->len += ulen;
Al Virof69e6d12014-11-24 13:23:40 -05001201 err = ip_append_data(sk, fl4, getfrag, msg, ulen,
David S. Millerf5fca602011-05-08 17:24:10 -07001202 sizeof(struct udphdr), &ipc, &rt,
1203 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
David S. Millerdb8dac22008-03-06 16:22:02 -08001204 if (err)
1205 udp_flush_pending_frames(sk);
1206 else if (!corkreq)
1207 err = udp_push_pending_frames(sk);
1208 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1209 up->pending = 0;
1210 release_sock(sk);
1211
1212out:
1213 ip_rt_put(rt);
Andrey Ignatov1b970132018-05-10 10:59:34 -07001214out_free:
David S. Millerdb8dac22008-03-06 16:22:02 -08001215 if (free)
1216 kfree(ipc.opt);
1217 if (!err)
1218 return len;
1219 /*
1220 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1221 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1222 * we don't have a good statistic (IpOutDiscards but it can be too many
1223 * things). We could add another new stat but at least for now that
1224 * seems like overkill.
1225 */
1226 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
Eric Dumazet6aef70a2016-04-27 16:44:27 -07001227 UDP_INC_STATS(sock_net(sk),
1228 UDP_MIB_SNDBUFERRORS, is_udplite);
David S. Millerdb8dac22008-03-06 16:22:02 -08001229 }
1230 return err;
1231
1232do_confirm:
Julian Anastasov0dec8792017-02-06 23:14:16 +02001233 if (msg->msg_flags & MSG_PROBE)
1234 dst_confirm_neigh(&rt->dst, &fl4->daddr);
David S. Millerdb8dac22008-03-06 16:22:02 -08001235 if (!(msg->msg_flags&MSG_PROBE) || len)
1236 goto back_from_confirm;
1237 err = 0;
1238 goto out;
1239}
Eric Dumazetc482c562009-07-17 00:26:32 +00001240EXPORT_SYMBOL(udp_sendmsg);
David S. Millerdb8dac22008-03-06 16:22:02 -08001241
1242int udp_sendpage(struct sock *sk, struct page *page, int offset,
1243 size_t size, int flags)
1244{
David S. Millerf5fca602011-05-08 17:24:10 -07001245 struct inet_sock *inet = inet_sk(sk);
David S. Millerdb8dac22008-03-06 16:22:02 -08001246 struct udp_sock *up = udp_sk(sk);
1247 int ret;
1248
Shawn Landdend3f7d562013-11-24 22:36:28 -08001249 if (flags & MSG_SENDPAGE_NOTLAST)
1250 flags |= MSG_MORE;
1251
David S. Millerdb8dac22008-03-06 16:22:02 -08001252 if (!up->pending) {
1253 struct msghdr msg = { .msg_flags = flags|MSG_MORE };
1254
1255 /* Call udp_sendmsg to specify destination address which
1256 * sendpage interface can't pass.
1257 * This will succeed only when the socket is connected.
1258 */
Ying Xue1b784142015-03-02 15:37:48 +08001259 ret = udp_sendmsg(sk, &msg, 0);
David S. Millerdb8dac22008-03-06 16:22:02 -08001260 if (ret < 0)
1261 return ret;
1262 }
1263
1264 lock_sock(sk);
1265
1266 if (unlikely(!up->pending)) {
1267 release_sock(sk);
1268
Matteo Croce197df022017-10-19 14:22:17 +02001269 net_dbg_ratelimited("cork failed\n");
David S. Millerdb8dac22008-03-06 16:22:02 -08001270 return -EINVAL;
1271 }
1272
David S. Millerf5fca602011-05-08 17:24:10 -07001273 ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
1274 page, offset, size, flags);
David S. Millerdb8dac22008-03-06 16:22:02 -08001275 if (ret == -EOPNOTSUPP) {
1276 release_sock(sk);
1277 return sock_no_sendpage(sk->sk_socket, page, offset,
1278 size, flags);
1279 }
1280 if (ret < 0) {
1281 udp_flush_pending_frames(sk);
1282 goto out;
1283 }
1284
1285 up->len += size;
1286 if (!(up->corkflag || (flags&MSG_MORE)))
1287 ret = udp_push_pending_frames(sk);
1288 if (!ret)
1289 ret = size;
1290out:
1291 release_sock(sk);
1292 return ret;
1293}
1294
Paolo Abenidce45512017-07-25 17:57:47 +02001295#define UDP_SKB_IS_STATELESS 0x80000000
1296
Paolo Abenib65ac442017-06-12 11:23:43 +02001297static void udp_set_dev_scratch(struct sk_buff *skb)
1298{
Paolo Abenidce45512017-07-25 17:57:47 +02001299 struct udp_dev_scratch *scratch = udp_skb_scratch(skb);
Paolo Abenib65ac442017-06-12 11:23:43 +02001300
1301 BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long));
Paolo Abenidce45512017-07-25 17:57:47 +02001302 scratch->_tsize_state = skb->truesize;
1303#if BITS_PER_LONG == 64
Paolo Abenib65ac442017-06-12 11:23:43 +02001304 scratch->len = skb->len;
1305 scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
1306 scratch->is_linear = !skb_is_nonlinear(skb);
Paolo Abenib65ac442017-06-12 11:23:43 +02001307#endif
Paolo Abeni3bdefdf2017-08-03 18:07:08 +02001308 /* all head states execept sp (dst, sk, nf) are always cleared by
1309 * udp_rcv() and we need to preserve secpath, if present, to eventually
1310 * process IP_CMSG_PASSSEC at recvmsg() time
1311 */
1312 if (likely(!skb_sec_path(skb)))
Paolo Abenidce45512017-07-25 17:57:47 +02001313 scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
1314}
1315
1316static int udp_skb_truesize(struct sk_buff *skb)
1317{
1318 return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
1319}
1320
1321static bool udp_skb_has_head_state(struct sk_buff *skb)
1322{
1323 return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS);
1324}
Paolo Abenib65ac442017-06-12 11:23:43 +02001325
Paolo Abeni7c13f972016-11-04 11:28:59 +01001326/* fully reclaim rmem/fwd memory allocated for skb */
Paolo Abeni6dfb4362017-05-16 11:20:15 +02001327static void udp_rmem_release(struct sock *sk, int size, int partial,
1328 bool rx_queue_lock_held)
Paolo Abenif970bd92016-10-21 13:55:46 +02001329{
Eric Dumazet6b229cf2016-12-08 11:41:56 -08001330 struct udp_sock *up = udp_sk(sk);
Paolo Abeni2276f582017-05-16 11:20:14 +02001331 struct sk_buff_head *sk_queue;
Paolo Abenif970bd92016-10-21 13:55:46 +02001332 int amt;
1333
Eric Dumazet6b229cf2016-12-08 11:41:56 -08001334 if (likely(partial)) {
1335 up->forward_deficit += size;
1336 size = up->forward_deficit;
Paolo Abeni0d4a6602017-09-19 12:11:43 +02001337 if (size < (sk->sk_rcvbuf >> 2))
Eric Dumazet6b229cf2016-12-08 11:41:56 -08001338 return;
1339 } else {
1340 size += up->forward_deficit;
1341 }
1342 up->forward_deficit = 0;
1343
Paolo Abeni6dfb4362017-05-16 11:20:15 +02001344 /* acquire the sk_receive_queue for fwd allocated memory scheduling,
1345 * if the called don't held it already
1346 */
Paolo Abeni2276f582017-05-16 11:20:14 +02001347 sk_queue = &sk->sk_receive_queue;
Paolo Abeni6dfb4362017-05-16 11:20:15 +02001348 if (!rx_queue_lock_held)
1349 spin_lock(&sk_queue->lock);
1350
Paolo Abeni2276f582017-05-16 11:20:14 +02001351
Paolo Abenif970bd92016-10-21 13:55:46 +02001352 sk->sk_forward_alloc += size;
1353 amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1);
1354 sk->sk_forward_alloc -= amt;
Paolo Abenif970bd92016-10-21 13:55:46 +02001355
1356 if (amt)
1357 __sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT);
Eric Dumazet02ab0d12016-12-08 11:41:57 -08001358
1359 atomic_sub(size, &sk->sk_rmem_alloc);
Paolo Abeni2276f582017-05-16 11:20:14 +02001360
1361 /* this can save us from acquiring the rx queue lock on next receive */
1362 skb_queue_splice_tail_init(sk_queue, &up->reader_queue);
1363
Paolo Abeni6dfb4362017-05-16 11:20:15 +02001364 if (!rx_queue_lock_held)
1365 spin_unlock(&sk_queue->lock);
Paolo Abenif970bd92016-10-21 13:55:46 +02001366}
1367
Paolo Abeni2276f582017-05-16 11:20:14 +02001368/* Note: called with reader_queue.lock held.
Eric Dumazetc84d9492016-12-08 11:41:55 -08001369 * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
1370 * This avoids a cache line miss while receive_queue lock is held.
1371 * Look at __udp_enqueue_schedule_skb() to find where this copy is done.
1372 */
Paolo Abeni7c13f972016-11-04 11:28:59 +01001373void udp_skb_destructor(struct sock *sk, struct sk_buff *skb)
Paolo Abenif970bd92016-10-21 13:55:46 +02001374{
Paolo Abenib65ac442017-06-12 11:23:43 +02001375 prefetch(&skb->data);
1376 udp_rmem_release(sk, udp_skb_truesize(skb), 1, false);
Paolo Abenif970bd92016-10-21 13:55:46 +02001377}
Paolo Abeni7c13f972016-11-04 11:28:59 +01001378EXPORT_SYMBOL(udp_skb_destructor);
Paolo Abenif970bd92016-10-21 13:55:46 +02001379
Paolo Abeni6dfb4362017-05-16 11:20:15 +02001380/* as above, but the caller held the rx queue lock, too */
Colin Ian King64f51022017-05-17 09:50:36 +01001381static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb)
Paolo Abeni6dfb4362017-05-16 11:20:15 +02001382{
Paolo Abenib65ac442017-06-12 11:23:43 +02001383 prefetch(&skb->data);
1384 udp_rmem_release(sk, udp_skb_truesize(skb), 1, true);
Paolo Abeni6dfb4362017-05-16 11:20:15 +02001385}
1386
Eric Dumazet4b272752016-12-08 11:41:54 -08001387/* Idea of busylocks is to let producers grab an extra spinlock
1388 * to relieve pressure on the receive_queue spinlock shared by consumer.
1389 * Under flood, this means that only one producer can be in line
1390 * trying to acquire the receive_queue spinlock.
1391 * These busylock can be allocated on a per cpu manner, instead of a
1392 * per socket one (that would consume a cache line per socket)
1393 */
1394static int udp_busylocks_log __read_mostly;
1395static spinlock_t *udp_busylocks __read_mostly;
1396
1397static spinlock_t *busylock_acquire(void *ptr)
1398{
1399 spinlock_t *busy;
1400
1401 busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log);
1402 spin_lock(busy);
1403 return busy;
1404}
1405
1406static void busylock_release(spinlock_t *busy)
1407{
1408 if (busy)
1409 spin_unlock(busy);
1410}
1411
Paolo Abenif970bd92016-10-21 13:55:46 +02001412int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
1413{
1414 struct sk_buff_head *list = &sk->sk_receive_queue;
1415 int rmem, delta, amt, err = -ENOMEM;
Eric Dumazet4b272752016-12-08 11:41:54 -08001416 spinlock_t *busy = NULL;
Eric Dumazetc8c8b122016-12-07 09:19:33 -08001417 int size;
Paolo Abenif970bd92016-10-21 13:55:46 +02001418
1419 /* try to avoid the costly atomic add/sub pair when the receive
1420 * queue is full; always allow at least a packet
1421 */
1422 rmem = atomic_read(&sk->sk_rmem_alloc);
Paolo Abeni363dc732016-12-02 17:35:49 +01001423 if (rmem > sk->sk_rcvbuf)
Paolo Abenif970bd92016-10-21 13:55:46 +02001424 goto drop;
1425
Eric Dumazetc8c8b122016-12-07 09:19:33 -08001426 /* Under mem pressure, it might be helpful to help udp_recvmsg()
1427 * having linear skbs :
1428 * - Reduce memory overhead and thus increase receive queue capacity
1429 * - Less cache line misses at copyout() time
1430 * - Less work at consume_skb() (less alien page frag freeing)
1431 */
Eric Dumazet4b272752016-12-08 11:41:54 -08001432 if (rmem > (sk->sk_rcvbuf >> 1)) {
Eric Dumazetc8c8b122016-12-07 09:19:33 -08001433 skb_condense(skb);
Eric Dumazet4b272752016-12-08 11:41:54 -08001434
1435 busy = busylock_acquire(sk);
1436 }
Eric Dumazetc8c8b122016-12-07 09:19:33 -08001437 size = skb->truesize;
Paolo Abenib65ac442017-06-12 11:23:43 +02001438 udp_set_dev_scratch(skb);
Eric Dumazetc8c8b122016-12-07 09:19:33 -08001439
Paolo Abenif970bd92016-10-21 13:55:46 +02001440 /* we drop only if the receive buf is full and the receive
1441 * queue contains some other skb
1442 */
1443 rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
Paolo Abeni363dc732016-12-02 17:35:49 +01001444 if (rmem > (size + sk->sk_rcvbuf))
Paolo Abenif970bd92016-10-21 13:55:46 +02001445 goto uncharge_drop;
1446
1447 spin_lock(&list->lock);
1448 if (size >= sk->sk_forward_alloc) {
1449 amt = sk_mem_pages(size);
1450 delta = amt << SK_MEM_QUANTUM_SHIFT;
1451 if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) {
1452 err = -ENOBUFS;
1453 spin_unlock(&list->lock);
1454 goto uncharge_drop;
1455 }
1456
1457 sk->sk_forward_alloc += delta;
1458 }
1459
1460 sk->sk_forward_alloc -= size;
1461
Paolo Abeni7c13f972016-11-04 11:28:59 +01001462 /* no need to setup a destructor, we will explicitly release the
1463 * forward allocated memory on dequeue
1464 */
Paolo Abenif970bd92016-10-21 13:55:46 +02001465 sock_skb_set_dropcount(sk, skb);
1466
1467 __skb_queue_tail(list, skb);
1468 spin_unlock(&list->lock);
1469
1470 if (!sock_flag(sk, SOCK_DEAD))
1471 sk->sk_data_ready(sk);
1472
Eric Dumazet4b272752016-12-08 11:41:54 -08001473 busylock_release(busy);
Paolo Abenif970bd92016-10-21 13:55:46 +02001474 return 0;
1475
1476uncharge_drop:
1477 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
1478
1479drop:
1480 atomic_inc(&sk->sk_drops);
Eric Dumazet4b272752016-12-08 11:41:54 -08001481 busylock_release(busy);
Paolo Abenif970bd92016-10-21 13:55:46 +02001482 return err;
1483}
1484EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb);
1485
Paolo Abenic915fe12016-11-15 16:37:53 +01001486void udp_destruct_sock(struct sock *sk)
Paolo Abenif970bd92016-10-21 13:55:46 +02001487{
1488 /* reclaim completely the forward allocated memory */
Paolo Abeni2276f582017-05-16 11:20:14 +02001489 struct udp_sock *up = udp_sk(sk);
Paolo Abeni7c13f972016-11-04 11:28:59 +01001490 unsigned int total = 0;
1491 struct sk_buff *skb;
1492
Paolo Abeni2276f582017-05-16 11:20:14 +02001493 skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue);
1494 while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) {
Paolo Abeni7c13f972016-11-04 11:28:59 +01001495 total += skb->truesize;
1496 kfree_skb(skb);
1497 }
Paolo Abeni6dfb4362017-05-16 11:20:15 +02001498 udp_rmem_release(sk, total, 0, true);
Paolo Abeni7c13f972016-11-04 11:28:59 +01001499
Paolo Abenif970bd92016-10-21 13:55:46 +02001500 inet_sock_destruct(sk);
1501}
Paolo Abenic915fe12016-11-15 16:37:53 +01001502EXPORT_SYMBOL_GPL(udp_destruct_sock);
Paolo Abenif970bd92016-10-21 13:55:46 +02001503
1504int udp_init_sock(struct sock *sk)
1505{
Paolo Abeni2276f582017-05-16 11:20:14 +02001506 skb_queue_head_init(&udp_sk(sk)->reader_queue);
Paolo Abenif970bd92016-10-21 13:55:46 +02001507 sk->sk_destruct = udp_destruct_sock;
1508 return 0;
1509}
1510EXPORT_SYMBOL_GPL(udp_init_sock);
1511
1512void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
1513{
1514 if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) {
1515 bool slow = lock_sock_fast(sk);
1516
1517 sk_peek_offset_bwd(sk, len);
1518 unlock_sock_fast(sk, slow);
1519 }
Paolo Abeni0a463c72017-06-12 11:23:42 +02001520
Paolo Abenica2c1412017-09-06 14:44:36 +02001521 if (!skb_unref(skb))
1522 return;
1523
Paolo Abenidce45512017-07-25 17:57:47 +02001524 /* In the more common cases we cleared the head states previously,
1525 * see __udp_queue_rcv_skb().
Paolo Abeni0ddf3fb2017-07-18 11:57:55 +02001526 */
Paolo Abenidce45512017-07-25 17:57:47 +02001527 if (unlikely(udp_skb_has_head_state(skb)))
Paolo Abeni0ddf3fb2017-07-18 11:57:55 +02001528 skb_release_head_state(skb);
Paolo Abenica2c1412017-09-06 14:44:36 +02001529 __consume_stateless_skb(skb);
Paolo Abenif970bd92016-10-21 13:55:46 +02001530}
1531EXPORT_SYMBOL_GPL(skb_consume_udp);
1532
Paolo Abeni2276f582017-05-16 11:20:14 +02001533static struct sk_buff *__first_packet_length(struct sock *sk,
1534 struct sk_buff_head *rcvq,
1535 int *total)
1536{
1537 struct sk_buff *skb;
1538
Paolo Abeni9bd780f2017-06-23 14:19:51 +02001539 while ((skb = skb_peek(rcvq)) != NULL) {
1540 if (udp_lib_checksum_complete(skb)) {
1541 __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
1542 IS_UDPLITE(sk));
1543 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
1544 IS_UDPLITE(sk));
1545 atomic_inc(&sk->sk_drops);
1546 __skb_unlink(skb, rcvq);
1547 *total += skb->truesize;
1548 kfree_skb(skb);
1549 } else {
1550 /* the csum related bits could be changed, refresh
1551 * the scratch area
1552 */
1553 udp_set_dev_scratch(skb);
1554 break;
1555 }
Paolo Abeni2276f582017-05-16 11:20:14 +02001556 }
1557 return skb;
1558}
1559
Eric Dumazet85584672009-10-09 04:43:40 +00001560/**
1561 * first_packet_length - return length of first packet in receive queue
1562 * @sk: socket
1563 *
1564 * Drops all bad checksum frames, until a valid one is found.
Eric Dumazete83c6742016-08-23 13:59:33 -07001565 * Returns the length of found skb, or -1 if none is found.
Eric Dumazet85584672009-10-09 04:43:40 +00001566 */
Eric Dumazete83c6742016-08-23 13:59:33 -07001567static int first_packet_length(struct sock *sk)
Eric Dumazet85584672009-10-09 04:43:40 +00001568{
Paolo Abeni2276f582017-05-16 11:20:14 +02001569 struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
1570 struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
Eric Dumazet85584672009-10-09 04:43:40 +00001571 struct sk_buff *skb;
Paolo Abeni7c13f972016-11-04 11:28:59 +01001572 int total = 0;
Eric Dumazete83c6742016-08-23 13:59:33 -07001573 int res;
Eric Dumazet85584672009-10-09 04:43:40 +00001574
Eric Dumazet85584672009-10-09 04:43:40 +00001575 spin_lock_bh(&rcvq->lock);
Paolo Abeni2276f582017-05-16 11:20:14 +02001576 skb = __first_packet_length(sk, rcvq, &total);
1577 if (!skb && !skb_queue_empty(sk_queue)) {
1578 spin_lock(&sk_queue->lock);
1579 skb_queue_splice_tail_init(sk_queue, rcvq);
1580 spin_unlock(&sk_queue->lock);
1581
1582 skb = __first_packet_length(sk, rcvq, &total);
Eric Dumazet85584672009-10-09 04:43:40 +00001583 }
Eric Dumazete83c6742016-08-23 13:59:33 -07001584 res = skb ? skb->len : -1;
Paolo Abeni7c13f972016-11-04 11:28:59 +01001585 if (total)
Paolo Abeni6dfb4362017-05-16 11:20:15 +02001586 udp_rmem_release(sk, total, 1, false);
Eric Dumazet85584672009-10-09 04:43:40 +00001587 spin_unlock_bh(&rcvq->lock);
Eric Dumazet85584672009-10-09 04:43:40 +00001588 return res;
1589}
1590
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591/*
1592 * IOCTL requests applicable to the UDP protocol
1593 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001594
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
1596{
Stephen Hemminger6516c652007-03-08 20:41:55 -08001597 switch (cmd) {
1598 case SIOCOUTQ:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 {
Eric Dumazet31e6d362009-06-17 19:05:41 -07001600 int amount = sk_wmem_alloc_get(sk);
1601
Stephen Hemminger6516c652007-03-08 20:41:55 -08001602 return put_user(amount, (int __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 }
Stephen Hemminger6516c652007-03-08 20:41:55 -08001604
1605 case SIOCINQ:
1606 {
Eric Dumazete83c6742016-08-23 13:59:33 -07001607 int amount = max_t(int, 0, first_packet_length(sk));
Stephen Hemminger6516c652007-03-08 20:41:55 -08001608
Stephen Hemminger6516c652007-03-08 20:41:55 -08001609 return put_user(amount, (int __user *)arg);
1610 }
1611
1612 default:
1613 return -ENOIOCTLCMD;
1614 }
1615
1616 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617}
Eric Dumazetc482c562009-07-17 00:26:32 +00001618EXPORT_SYMBOL(udp_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619
Paolo Abeni2276f582017-05-16 11:20:14 +02001620struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
Paolo Abenifd69c392019-04-08 10:15:59 +02001621 int noblock, int *off, int *err)
Paolo Abeni2276f582017-05-16 11:20:14 +02001622{
1623 struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
1624 struct sk_buff_head *queue;
1625 struct sk_buff *last;
1626 long timeo;
1627 int error;
1628
1629 queue = &udp_sk(sk)->reader_queue;
1630 flags |= noblock ? MSG_DONTWAIT : 0;
1631 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1632 do {
1633 struct sk_buff *skb;
1634
1635 error = sock_error(sk);
1636 if (error)
1637 break;
1638
1639 error = -EAGAIN;
Paolo Abeni2276f582017-05-16 11:20:14 +02001640 do {
Paolo Abeni2276f582017-05-16 11:20:14 +02001641 spin_lock_bh(&queue->lock);
1642 skb = __skb_try_recv_from_queue(sk, queue, flags,
1643 udp_skb_destructor,
Paolo Abenifd69c392019-04-08 10:15:59 +02001644 off, err, &last);
Paolo Abeni2276f582017-05-16 11:20:14 +02001645 if (skb) {
1646 spin_unlock_bh(&queue->lock);
Paolo Abeni2276f582017-05-16 11:20:14 +02001647 return skb;
1648 }
1649
1650 if (skb_queue_empty(sk_queue)) {
1651 spin_unlock_bh(&queue->lock);
1652 goto busy_check;
1653 }
1654
Paolo Abeni6dfb4362017-05-16 11:20:15 +02001655 /* refill the reader queue and walk it again
1656 * keep both queues locked to avoid re-acquiring
1657 * the sk_receive_queue lock if fwd memory scheduling
1658 * is needed.
1659 */
Paolo Abeni2276f582017-05-16 11:20:14 +02001660 spin_lock(&sk_queue->lock);
1661 skb_queue_splice_tail_init(sk_queue, queue);
Paolo Abeni2276f582017-05-16 11:20:14 +02001662
1663 skb = __skb_try_recv_from_queue(sk, queue, flags,
Paolo Abeni6dfb4362017-05-16 11:20:15 +02001664 udp_skb_dtor_locked,
Paolo Abenifd69c392019-04-08 10:15:59 +02001665 off, err, &last);
Paolo Abeni6dfb4362017-05-16 11:20:15 +02001666 spin_unlock(&sk_queue->lock);
Paolo Abeni2276f582017-05-16 11:20:14 +02001667 spin_unlock_bh(&queue->lock);
Andrey Vaginde321ed2017-05-17 11:39:05 -07001668 if (skb)
Paolo Abeni2276f582017-05-16 11:20:14 +02001669 return skb;
Paolo Abeni2276f582017-05-16 11:20:14 +02001670
1671busy_check:
1672 if (!sk_can_busy_loop(sk))
1673 break;
1674
1675 sk_busy_loop(sk, flags & MSG_DONTWAIT);
1676 } while (!skb_queue_empty(sk_queue));
1677
1678 /* sk_queue is empty, reader_queue may contain peeked packets */
1679 } while (timeo &&
1680 !__skb_wait_for_more_packets(sk, &error, &timeo,
1681 (struct sk_buff *)sk_queue));
1682
1683 *err = error;
1684 return NULL;
1685}
Jiri Kosina7e823642018-10-04 13:37:32 +02001686EXPORT_SYMBOL(__skb_recv_udp);
Paolo Abeni2276f582017-05-16 11:20:14 +02001687
David S. Millerdb8dac22008-03-06 16:22:02 -08001688/*
1689 * This should be easy, if there is something there we
1690 * return it, otherwise we block.
1691 */
1692
Ying Xue1b784142015-03-02 15:37:48 +08001693int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
1694 int flags, int *addr_len)
David S. Millerdb8dac22008-03-06 16:22:02 -08001695{
1696 struct inet_sock *inet = inet_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01001697 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
David S. Millerdb8dac22008-03-06 16:22:02 -08001698 struct sk_buff *skb;
David S. Miller59c2cda2011-12-01 14:12:55 -05001699 unsigned int ulen, copied;
Paolo Abenifd69c392019-04-08 10:15:59 +02001700 int off, err, peeking = flags & MSG_PEEK;
David S. Millerdb8dac22008-03-06 16:22:02 -08001701 int is_udplite = IS_UDPLITE(sk);
Eric Dumazet197c9492015-12-30 08:51:12 -05001702 bool checksum_valid = false;
David S. Millerdb8dac22008-03-06 16:22:02 -08001703
David S. Millerdb8dac22008-03-06 16:22:02 -08001704 if (flags & MSG_ERRQUEUE)
Hannes Frederic Sowa85fbaa72013-11-23 00:46:12 +01001705 return ip_recv_error(sk, msg, len, addr_len);
David S. Millerdb8dac22008-03-06 16:22:02 -08001706
1707try_again:
Matthew Dawsona0917e02017-08-18 15:04:54 -04001708 off = sk_peek_offset(sk, flags);
Paolo Abenifd69c392019-04-08 10:15:59 +02001709 skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
David S. Millerdb8dac22008-03-06 16:22:02 -08001710 if (!skb)
samanthakumar627d2d62016-04-05 12:41:16 -04001711 return err;
David S. Millerdb8dac22008-03-06 16:22:02 -08001712
Paolo Abenib65ac442017-06-12 11:23:43 +02001713 ulen = udp_skb_len(skb);
David S. Miller59c2cda2011-12-01 14:12:55 -05001714 copied = len;
samanthakumar627d2d62016-04-05 12:41:16 -04001715 if (copied > ulen - off)
1716 copied = ulen - off;
David S. Miller59c2cda2011-12-01 14:12:55 -05001717 else if (copied < ulen)
David S. Millerdb8dac22008-03-06 16:22:02 -08001718 msg->msg_flags |= MSG_TRUNC;
1719
1720 /*
1721 * If checksum is needed at all, try to do it while copying the
1722 * data. If the data is truncated, or if we only want a partial
1723 * coverage checksum (UDP-Lite), do it before the copy.
1724 */
1725
Eric Dumazetd21dbdf2016-11-18 17:18:03 -08001726 if (copied < ulen || peeking ||
1727 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
Paolo Abenib65ac442017-06-12 11:23:43 +02001728 checksum_valid = udp_skb_csum_unnecessary(skb) ||
1729 !__udp_lib_checksum_complete(skb);
Eric Dumazet197c9492015-12-30 08:51:12 -05001730 if (!checksum_valid)
David S. Millerdb8dac22008-03-06 16:22:02 -08001731 goto csum_copy_err;
1732 }
1733
Paolo Abenib65ac442017-06-12 11:23:43 +02001734 if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
1735 if (udp_skb_is_linear(skb))
1736 err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
1737 else
1738 err = skb_copy_datagram_msg(skb, off, msg, copied);
1739 } else {
samanthakumar627d2d62016-04-05 12:41:16 -04001740 err = skb_copy_and_csum_datagram_msg(skb, off, msg);
David S. Millerdb8dac22008-03-06 16:22:02 -08001741
1742 if (err == -EINVAL)
1743 goto csum_copy_err;
1744 }
1745
Eric Dumazet22911fc2012-06-27 00:23:44 +00001746 if (unlikely(err)) {
Paolo Abenifd69c392019-04-08 10:15:59 +02001747 if (!peeking) {
Eric Dumazet979402b2012-09-05 23:34:44 +00001748 atomic_inc(&sk->sk_drops);
Eric Dumazet6aef70a2016-04-27 16:44:27 -07001749 UDP_INC_STATS(sock_net(sk),
1750 UDP_MIB_INERRORS, is_udplite);
Eric Dumazet979402b2012-09-05 23:34:44 +00001751 }
Paolo Abeni850cbad2016-10-21 13:55:47 +02001752 kfree_skb(skb);
samanthakumar627d2d62016-04-05 12:41:16 -04001753 return err;
Eric Dumazet22911fc2012-06-27 00:23:44 +00001754 }
David S. Millerdb8dac22008-03-06 16:22:02 -08001755
Paolo Abenifd69c392019-04-08 10:15:59 +02001756 if (!peeking)
Eric Dumazet6aef70a2016-04-27 16:44:27 -07001757 UDP_INC_STATS(sock_net(sk),
1758 UDP_MIB_INDATAGRAMS, is_udplite);
David S. Millerdb8dac22008-03-06 16:22:02 -08001759
Neil Horman3b885782009-10-12 13:26:31 -07001760 sock_recv_ts_and_drops(msg, sk, skb);
David S. Millerdb8dac22008-03-06 16:22:02 -08001761
1762 /* Copy the address. */
Eric Dumazetc482c562009-07-17 00:26:32 +00001763 if (sin) {
David S. Millerdb8dac22008-03-06 16:22:02 -08001764 sin->sin_family = AF_INET;
1765 sin->sin_port = udp_hdr(skb)->source;
1766 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
1767 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
Hannes Frederic Sowabceaa902013-11-18 04:20:45 +01001768 *addr_len = sizeof(*sin);
Daniel Borkmann983695f2019-06-07 01:48:57 +02001769
1770 if (cgroup_bpf_enabled)
1771 BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk,
1772 (struct sockaddr *)sin);
David S. Millerdb8dac22008-03-06 16:22:02 -08001773 }
Paolo Abenibcd16652018-11-07 12:38:30 +01001774
1775 if (udp_sk(sk)->gro_enabled)
1776 udp_cmsg_recv(msg, sk, skb);
1777
David S. Millerdb8dac22008-03-06 16:22:02 -08001778 if (inet->cmsg_flags)
Paolo Abeniad959032016-11-04 11:28:58 +01001779 ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off);
David S. Millerdb8dac22008-03-06 16:22:02 -08001780
David S. Miller59c2cda2011-12-01 14:12:55 -05001781 err = copied;
David S. Millerdb8dac22008-03-06 16:22:02 -08001782 if (flags & MSG_TRUNC)
1783 err = ulen;
1784
Paolo Abeni850cbad2016-10-21 13:55:47 +02001785 skb_consume_udp(sk, skb, peeking ? -err : err);
David S. Millerdb8dac22008-03-06 16:22:02 -08001786 return err;
1787
1788csum_copy_err:
Paolo Abeni2276f582017-05-16 11:20:14 +02001789 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
1790 udp_skb_destructor)) {
Eric Dumazet6aef70a2016-04-27 16:44:27 -07001791 UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
1792 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001793 }
Paolo Abeni850cbad2016-10-21 13:55:47 +02001794 kfree_skb(skb);
David S. Millerdb8dac22008-03-06 16:22:02 -08001795
Eric Dumazetbeb39db2015-05-30 09:16:53 -07001796 /* starting over for a new packet, but check if we need to yield */
1797 cond_resched();
Xufeng Zhang9cfaa8d2011-06-21 10:43:40 +00001798 msg->msg_flags &= ~MSG_TRUNC;
David S. Millerdb8dac22008-03-06 16:22:02 -08001799 goto try_again;
1800}
1801
Andrey Ignatovd74bad42018-03-30 15:08:05 -07001802int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
1803{
1804 /* This check is replicated from __ip4_datagram_connect() and
1805 * intended to prevent BPF program called below from accessing bytes
1806 * that are out of the bound specified by user in addr_len.
1807 */
1808 if (addr_len < sizeof(struct sockaddr_in))
1809 return -EINVAL;
1810
1811 return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr);
1812}
1813EXPORT_SYMBOL(udp_pre_connect);
1814
Eric Dumazet286c72d2016-10-20 09:39:40 -07001815int __udp_disconnect(struct sock *sk, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816{
1817 struct inet_sock *inet = inet_sk(sk);
1818 /*
1819 * 1003.1g - break association.
1820 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001821
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 sk->sk_state = TCP_CLOSE;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001823 inet->inet_daddr = 0;
1824 inet->inet_dport = 0;
Tom Herbertbdeab992011-08-14 19:45:55 +00001825 sock_rps_reset_rxhash(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 sk->sk_bound_dev_if = 0;
1827 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1828 inet_reset_saddr(sk);
1829
1830 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
1831 sk->sk_prot->unhash(sk);
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001832 inet->inet_sport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 }
1834 sk_dst_reset(sk);
1835 return 0;
1836}
Eric Dumazet286c72d2016-10-20 09:39:40 -07001837EXPORT_SYMBOL(__udp_disconnect);
1838
1839int udp_disconnect(struct sock *sk, int flags)
1840{
1841 lock_sock(sk);
1842 __udp_disconnect(sk, flags);
1843 release_sock(sk);
1844 return 0;
1845}
Eric Dumazetc482c562009-07-17 00:26:32 +00001846EXPORT_SYMBOL(udp_disconnect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847
Eric Dumazet645ca702008-10-29 01:41:45 -07001848void udp_lib_unhash(struct sock *sk)
1849{
Eric Dumazet723b4612008-11-25 13:55:15 -08001850 if (sk_hashed(sk)) {
1851 struct udp_table *udptable = sk->sk_prot->h.udp_table;
Eric Dumazet512615b2009-11-08 10:17:58 +00001852 struct udp_hslot *hslot, *hslot2;
1853
1854 hslot = udp_hashslot(udptable, sock_net(sk),
1855 udp_sk(sk)->udp_port_hash);
1856 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
Eric Dumazet645ca702008-10-29 01:41:45 -07001857
Eric Dumazet723b4612008-11-25 13:55:15 -08001858 spin_lock_bh(&hslot->lock);
Craig Galleke32ea7e2016-01-04 17:41:46 -05001859 if (rcu_access_pointer(sk->sk_reuseport_cb))
1860 reuseport_detach_sock(sk);
Eric Dumazetca065d02016-04-01 08:52:13 -07001861 if (sk_del_node_init_rcu(sk)) {
Eric Dumazetfdcc8aa92009-11-08 10:17:05 +00001862 hslot->count--;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001863 inet_sk(sk)->inet_num = 0;
Eric Dumazet723b4612008-11-25 13:55:15 -08001864 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
Eric Dumazet512615b2009-11-08 10:17:58 +00001865
1866 spin_lock(&hslot2->lock);
Eric Dumazetca065d02016-04-01 08:52:13 -07001867 hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
Eric Dumazet512615b2009-11-08 10:17:58 +00001868 hslot2->count--;
1869 spin_unlock(&hslot2->lock);
Eric Dumazet723b4612008-11-25 13:55:15 -08001870 }
1871 spin_unlock_bh(&hslot->lock);
Eric Dumazet645ca702008-10-29 01:41:45 -07001872 }
Eric Dumazet645ca702008-10-29 01:41:45 -07001873}
1874EXPORT_SYMBOL(udp_lib_unhash);
1875
Eric Dumazet719f8352010-09-08 05:08:44 +00001876/*
1877 * inet_rcv_saddr was changed, we must rehash secondary hash
1878 */
1879void udp_lib_rehash(struct sock *sk, u16 newhash)
1880{
1881 if (sk_hashed(sk)) {
1882 struct udp_table *udptable = sk->sk_prot->h.udp_table;
1883 struct udp_hslot *hslot, *hslot2, *nhslot2;
1884
1885 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
1886 nhslot2 = udp_hashslot2(udptable, newhash);
1887 udp_sk(sk)->udp_portaddr_hash = newhash;
Craig Galleke32ea7e2016-01-04 17:41:46 -05001888
1889 if (hslot2 != nhslot2 ||
1890 rcu_access_pointer(sk->sk_reuseport_cb)) {
Eric Dumazet719f8352010-09-08 05:08:44 +00001891 hslot = udp_hashslot(udptable, sock_net(sk),
1892 udp_sk(sk)->udp_port_hash);
1893 /* we must lock primary chain too */
1894 spin_lock_bh(&hslot->lock);
Craig Galleke32ea7e2016-01-04 17:41:46 -05001895 if (rcu_access_pointer(sk->sk_reuseport_cb))
1896 reuseport_detach_sock(sk);
Eric Dumazet719f8352010-09-08 05:08:44 +00001897
Craig Galleke32ea7e2016-01-04 17:41:46 -05001898 if (hslot2 != nhslot2) {
1899 spin_lock(&hslot2->lock);
Eric Dumazetca065d02016-04-01 08:52:13 -07001900 hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
Craig Galleke32ea7e2016-01-04 17:41:46 -05001901 hslot2->count--;
1902 spin_unlock(&hslot2->lock);
Eric Dumazet719f8352010-09-08 05:08:44 +00001903
Craig Galleke32ea7e2016-01-04 17:41:46 -05001904 spin_lock(&nhslot2->lock);
Eric Dumazetca065d02016-04-01 08:52:13 -07001905 hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
Craig Galleke32ea7e2016-01-04 17:41:46 -05001906 &nhslot2->head);
1907 nhslot2->count++;
1908 spin_unlock(&nhslot2->lock);
1909 }
Eric Dumazet719f8352010-09-08 05:08:44 +00001910
1911 spin_unlock_bh(&hslot->lock);
1912 }
1913 }
1914}
1915EXPORT_SYMBOL(udp_lib_rehash);
1916
Alexey Kodanev8f6b5392019-01-16 19:17:44 +03001917void udp_v4_rehash(struct sock *sk)
Eric Dumazet719f8352010-09-08 05:08:44 +00001918{
Martin KaFai Lauf0b1e642017-12-01 12:52:30 -08001919 u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
Eric Dumazet719f8352010-09-08 05:08:44 +00001920 inet_sk(sk)->inet_rcv_saddr,
1921 inet_sk(sk)->inet_num);
1922 udp_lib_rehash(sk, new_hash);
1923}
1924
Paolo Abenia3f96c42017-05-17 14:52:16 +02001925static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
Herbert Xu93821772008-09-15 11:48:46 -07001926{
Tom Herbertfec5e652010-04-16 16:01:27 -07001927 int rc;
Herbert Xu93821772008-09-15 11:48:46 -07001928
Shawn Bohrer005ec972013-10-07 11:01:38 -05001929 if (inet_sk(sk)->inet_daddr) {
Tom Herbertbdeab992011-08-14 19:45:55 +00001930 sock_rps_save_rxhash(sk, skb);
Shawn Bohrer005ec972013-10-07 11:01:38 -05001931 sk_mark_napi_id(sk, skb);
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001932 sk_incoming_cpu_update(sk);
Eric Dumazete68b6e52016-11-16 09:10:42 -08001933 } else {
1934 sk_mark_napi_id_once(sk, skb);
Shawn Bohrer005ec972013-10-07 11:01:38 -05001935 }
Tom Herbertfec5e652010-04-16 16:01:27 -07001936
Paolo Abeni850cbad2016-10-21 13:55:47 +02001937 rc = __udp_enqueue_schedule_skb(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -07001938 if (rc < 0) {
1939 int is_udplite = IS_UDPLITE(sk);
1940
Herbert Xu93821772008-09-15 11:48:46 -07001941 /* Note that an ENOMEM error is charged twice */
Eric Dumazet766e90372009-10-14 20:40:11 -07001942 if (rc == -ENOMEM)
Eric Dumazete61da9e2016-04-29 14:16:50 -07001943 UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
Eric Dumazet02c22342016-04-27 16:44:30 -07001944 is_udplite);
Eric Dumazete61da9e2016-04-29 14:16:50 -07001945 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
Eric Dumazet766e90372009-10-14 20:40:11 -07001946 kfree_skb(skb);
Satoru Moriya296f7ea2011-06-17 11:58:39 +00001947 trace_udp_fail_queue_rcv_skb(rc, sk);
Eric Dumazet766e90372009-10-14 20:40:11 -07001948 return -1;
Herbert Xu93821772008-09-15 11:48:46 -07001949 }
1950
1951 return 0;
Herbert Xu93821772008-09-15 11:48:46 -07001952}
1953
David S. Millerdb8dac22008-03-06 16:22:02 -08001954/* returns:
1955 * -1: error
1956 * 0: success
1957 * >0: "udp encap" protocol resubmission
1958 *
1959 * Note that in the success and error cases, the skb is assumed to
1960 * have either been requeued or freed.
1961 */
Paolo Abenicf329aa2018-11-07 12:38:33 +01001962static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
David S. Millerdb8dac22008-03-06 16:22:02 -08001963{
1964 struct udp_sock *up = udp_sk(sk);
David S. Millerdb8dac22008-03-06 16:22:02 -08001965 int is_udplite = IS_UDPLITE(sk);
1966
1967 /*
1968 * Charge it to the socket, dropping if the queue is full.
1969 */
1970 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1971 goto drop;
1972 nf_reset(skb);
1973
Davidlohr Bueso88ab3102018-05-08 09:07:03 -07001974 if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
Eric Dumazet0ad92ad2011-11-01 12:56:59 +00001975 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
1976
David S. Millerdb8dac22008-03-06 16:22:02 -08001977 /*
1978 * This is an encapsulation socket so pass the skb to
1979 * the socket's udp_encap_rcv() hook. Otherwise, just
1980 * fall through and pass this up the UDP socket.
1981 * up->encap_rcv() returns the following value:
1982 * =0 if skb was successfully passed to the encap
1983 * handler or was discarded by it.
1984 * >0 if skb should be passed on to UDP.
1985 * <0 if skb should be resubmitted as proto -N
1986 */
1987
1988 /* if we're overly short, let UDP handle it */
Mark Rutland6aa7de02017-10-23 14:07:29 -07001989 encap_rcv = READ_ONCE(up->encap_rcv);
Hannes Frederic Sowae5aed002016-05-19 15:58:33 +02001990 if (encap_rcv) {
David S. Millerdb8dac22008-03-06 16:22:02 -08001991 int ret;
1992
Tom Herbert0a809662014-05-07 16:52:39 -07001993 /* Verify checksum before giving to encap */
1994 if (udp_lib_checksum_complete(skb))
1995 goto csum_error;
1996
Eric Dumazet0ad92ad2011-11-01 12:56:59 +00001997 ret = encap_rcv(sk, skb);
David S. Millerdb8dac22008-03-06 16:22:02 -08001998 if (ret <= 0) {
Eric Dumazet02c22342016-04-27 16:44:30 -07001999 __UDP_INC_STATS(sock_net(sk),
2000 UDP_MIB_INDATAGRAMS,
2001 is_udplite);
David S. Millerdb8dac22008-03-06 16:22:02 -08002002 return -ret;
2003 }
2004 }
2005
2006 /* FALLTHROUGH -- it's a UDP Packet */
2007 }
2008
2009 /*
2010 * UDP-Lite specific tests, ignored on UDP sockets
2011 */
2012 if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
2013
2014 /*
2015 * MIB statistics other than incrementing the error count are
2016 * disabled for the following two types of errors: these depend
2017 * on the application settings, not on the functioning of the
2018 * protocol stack as such.
2019 *
2020 * RFC 3828 here recommends (sec 3.3): "There should also be a
2021 * way ... to ... at least let the receiving application block
2022 * delivery of packets with coverage values less than a value
2023 * provided by the application."
2024 */
2025 if (up->pcrlen == 0) { /* full coverage was set */
Joe Perchesba7a46f2014-11-11 10:59:17 -08002026 net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
2027 UDP_SKB_CB(skb)->cscov, skb->len);
David S. Millerdb8dac22008-03-06 16:22:02 -08002028 goto drop;
2029 }
2030 /* The next case involves violating the min. coverage requested
2031 * by the receiver. This is subtle: if receiver wants x and x is
2032 * greater than the buffersize/MTU then receiver will complain
2033 * that it wants x while sender emits packets of smaller size y.
2034 * Therefore the above ...()->partial_cov statement is essential.
2035 */
2036 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
Joe Perchesba7a46f2014-11-11 10:59:17 -08002037 net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
2038 UDP_SKB_CB(skb)->cscov, up->pcrlen);
David S. Millerdb8dac22008-03-06 16:22:02 -08002039 goto drop;
2040 }
2041 }
2042
Paolo Abenidd99e422017-06-21 10:24:40 +02002043 prefetch(&sk->sk_rmem_alloc);
Eric Dumazetce25d662016-06-02 14:52:43 -07002044 if (rcu_access_pointer(sk->sk_filter) &&
2045 udp_lib_checksum_complete(skb))
samanthakumare6afc8a2016-04-05 12:41:15 -04002046 goto csum_error;
Eric Dumazetce25d662016-06-02 14:52:43 -07002047
Daniel Borkmannba66bbe2016-07-25 18:06:12 +02002048 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
Michal Kubečeka6127692016-07-08 17:52:33 +02002049 goto drop;
David S. Millerdb8dac22008-03-06 16:22:02 -08002050
samanthakumare6afc8a2016-04-05 12:41:15 -04002051 udp_csum_pull_header(skb);
David S. Millerdb8dac22008-03-06 16:22:02 -08002052
Shawn Bohrerfbf88662013-10-07 11:01:40 -05002053 ipv4_pktinfo_prepare(sk, skb);
Paolo Abeni850cbad2016-10-21 13:55:47 +02002054 return __udp_queue_rcv_skb(sk, skb);
David S. Millerdb8dac22008-03-06 16:22:02 -08002055
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00002056csum_error:
Eric Dumazet02c22342016-04-27 16:44:30 -07002057 __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
David S. Millerdb8dac22008-03-06 16:22:02 -08002058drop:
Eric Dumazet02c22342016-04-27 16:44:30 -07002059 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
Eric Dumazet8edf19c2009-10-15 00:12:40 +00002060 atomic_inc(&sk->sk_drops);
David S. Millerdb8dac22008-03-06 16:22:02 -08002061 kfree_skb(skb);
2062 return -1;
2063}
2064
Paolo Abenicf329aa2018-11-07 12:38:33 +01002065static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
2066{
2067 struct sk_buff *next, *segs;
2068 int ret;
2069
2070 if (likely(!udp_unexpected_gso(sk, skb)))
2071 return udp_queue_rcv_one_skb(sk, skb);
2072
2073 BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_SGO_CB_OFFSET);
2074 __skb_push(skb, -skb_mac_offset(skb));
2075 segs = udp_rcv_segment(sk, skb, true);
2076 for (skb = segs; skb; skb = next) {
2077 next = skb->next;
2078 __skb_pull(skb, skb_transport_offset(skb));
2079 ret = udp_queue_rcv_one_skb(sk, skb);
2080 if (ret > 0)
2081 ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret);
2082 }
2083 return 0;
2084}
2085
Eric Dumazet97502232013-12-11 14:46:51 -08002086/* For TCP sockets, sk_rx_dst is protected by socket lock
Eric Dumazete47eb5d2013-12-15 10:53:46 -08002087 * For UDP, we use xchg() to guard against concurrent changes.
Eric Dumazet97502232013-12-11 14:46:51 -08002088 */
Paolo Abeni64f0f5d2017-08-25 14:31:01 +02002089bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
Shawn Bohrer421b3882013-10-07 11:01:39 -05002090{
Eric Dumazet97502232013-12-11 14:46:51 -08002091 struct dst_entry *old;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002092
Wei Wangd24406c2017-06-17 10:42:25 -07002093 if (dst_hold_safe(dst)) {
2094 old = xchg(&sk->sk_rx_dst, dst);
2095 dst_release(old);
Paolo Abeni64f0f5d2017-08-25 14:31:01 +02002096 return old != dst;
Wei Wangd24406c2017-06-17 10:42:25 -07002097 }
Paolo Abeni64f0f5d2017-08-25 14:31:01 +02002098 return false;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002099}
Paolo Abenic9f2c1a2017-07-27 14:45:09 +02002100EXPORT_SYMBOL(udp_sk_rx_dst_set);
Shawn Bohrer421b3882013-10-07 11:01:39 -05002101
David S. Millerdb8dac22008-03-06 16:22:02 -08002102/*
2103 * Multicasts and broadcasts go to each listener.
2104 *
Eric Dumazet1240d132009-11-08 10:18:44 +00002105 * Note: called only from the BH handler context.
David S. Millerdb8dac22008-03-06 16:22:02 -08002106 */
Pavel Emelyanove3163492008-06-16 17:12:11 -07002107static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
David S. Millerdb8dac22008-03-06 16:22:02 -08002108 struct udphdr *uh,
2109 __be32 saddr, __be32 daddr,
Rick Jones36cbb242014-11-06 10:37:54 -08002110 struct udp_table *udptable,
2111 int proto)
David S. Millerdb8dac22008-03-06 16:22:02 -08002112{
Eric Dumazetca065d02016-04-01 08:52:13 -07002113 struct sock *sk, *first = NULL;
David Held5cf3d462014-07-15 23:28:31 -04002114 unsigned short hnum = ntohs(uh->dest);
2115 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
David Held2dc41cf2014-07-15 23:28:32 -04002116 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
Eric Dumazetca065d02016-04-01 08:52:13 -07002117 unsigned int offset = offsetof(typeof(*sk), sk_node);
2118 int dif = skb->dev->ifindex;
David Ahernfb74c272017-08-07 08:44:16 -07002119 int sdif = inet_sdif(skb);
Eric Dumazetca065d02016-04-01 08:52:13 -07002120 struct hlist_node *node;
2121 struct sk_buff *nskb;
David Held2dc41cf2014-07-15 23:28:32 -04002122
2123 if (use_hash2) {
Martin KaFai Lauf0b1e642017-12-01 12:52:30 -08002124 hash2_any = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
Pablo Neira73e2d5e2016-11-14 23:40:30 +01002125 udptable->mask;
Martin KaFai Lauf0b1e642017-12-01 12:52:30 -08002126 hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask;
David Held2dc41cf2014-07-15 23:28:32 -04002127start_lookup:
Pablo Neira73e2d5e2016-11-14 23:40:30 +01002128 hslot = &udptable->hash2[hash2];
David Held2dc41cf2014-07-15 23:28:32 -04002129 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
2130 }
David S. Millerdb8dac22008-03-06 16:22:02 -08002131
Eric Dumazetca065d02016-04-01 08:52:13 -07002132 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
2133 if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr,
David Ahernfb74c272017-08-07 08:44:16 -07002134 uh->source, saddr, dif, sdif, hnum))
Eric Dumazetca065d02016-04-01 08:52:13 -07002135 continue;
David S. Millerdb8dac22008-03-06 16:22:02 -08002136
Eric Dumazetca065d02016-04-01 08:52:13 -07002137 if (!first) {
2138 first = sk;
2139 continue;
2140 }
2141 nskb = skb_clone(skb, GFP_ATOMIC);
2142
2143 if (unlikely(!nskb)) {
2144 atomic_inc(&sk->sk_drops);
Eric Dumazet02c22342016-04-27 16:44:30 -07002145 __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
2146 IS_UDPLITE(sk));
2147 __UDP_INC_STATS(net, UDP_MIB_INERRORS,
2148 IS_UDPLITE(sk));
Eric Dumazetca065d02016-04-01 08:52:13 -07002149 continue;
2150 }
2151 if (udp_queue_rcv_skb(sk, nskb) > 0)
2152 consume_skb(nskb);
2153 }
Eric Dumazet1240d132009-11-08 10:18:44 +00002154
David Held2dc41cf2014-07-15 23:28:32 -04002155 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
2156 if (use_hash2 && hash2 != hash2_any) {
2157 hash2 = hash2_any;
2158 goto start_lookup;
2159 }
2160
Eric Dumazetca065d02016-04-01 08:52:13 -07002161 if (first) {
2162 if (udp_queue_rcv_skb(first, skb) > 0)
2163 consume_skb(skb);
Eric Dumazet1240d132009-11-08 10:18:44 +00002164 } else {
Eric Dumazetca065d02016-04-01 08:52:13 -07002165 kfree_skb(skb);
Eric Dumazet02c22342016-04-27 16:44:30 -07002166 __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
2167 proto == IPPROTO_UDPLITE);
Eric Dumazet1240d132009-11-08 10:18:44 +00002168 }
David S. Millerdb8dac22008-03-06 16:22:02 -08002169 return 0;
2170}
2171
2172/* Initialize UDP checksum. If exited with zero value (success),
2173 * CHECKSUM_UNNECESSARY means, that no more checks are required.
Su Yanjun666a3d62019-07-18 10:19:23 +08002174 * Otherwise, csum completion requires checksumming packet body,
David S. Millerdb8dac22008-03-06 16:22:02 -08002175 * including udp header and folding it to skb->csum.
2176 */
2177static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
2178 int proto)
2179{
David S. Millerdb8dac22008-03-06 16:22:02 -08002180 int err;
2181
2182 UDP_SKB_CB(skb)->partial_cov = 0;
2183 UDP_SKB_CB(skb)->cscov = skb->len;
2184
2185 if (proto == IPPROTO_UDPLITE) {
2186 err = udplite_checksum_init(skb, uh);
2187 if (err)
2188 return err;
Alexey Kodanev15f35d42018-02-15 20:18:43 +03002189
2190 if (UDP_SKB_CB(skb)->partial_cov) {
2191 skb->csum = inet_compute_pseudo(skb, proto);
2192 return 0;
2193 }
David S. Millerdb8dac22008-03-06 16:22:02 -08002194 }
2195
Hannes Frederic Sowab46d9f62016-06-12 12:02:46 +02002196 /* Note, we are only interested in != 0 or == 0, thus the
2197 * force to int.
2198 */
Sean Tranchettidb4f1be2018-10-23 16:04:31 -06002199 err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
2200 inet_compute_pseudo);
2201 if (err)
2202 return err;
2203
2204 if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) {
2205 /* If SW calculated the value, we know it's bad */
2206 if (skb->csum_complete_sw)
2207 return 1;
2208
2209 /* HW says the value is bad. Let's validate that.
2210 * skb->csum is no longer the full packet checksum,
2211 * so don't treat it as such.
2212 */
2213 skb_checksum_complete_unset(skb);
2214 }
2215
2216 return 0;
David S. Millerdb8dac22008-03-06 16:22:02 -08002217}
2218
Paolo Abeni2b5a9212018-09-13 16:27:20 +02002219/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
2220 * return code conversion for ip layer consumption
2221 */
2222static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
2223 struct udphdr *uh)
2224{
2225 int ret;
2226
2227 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
Li RongQinge4aa33a2019-07-04 17:03:26 +08002228 skb_checksum_try_convert(skb, IPPROTO_UDP, inet_compute_pseudo);
Paolo Abeni2b5a9212018-09-13 16:27:20 +02002229
2230 ret = udp_queue_rcv_skb(sk, skb);
2231
2232 /* a return value > 0 means to resubmit the input, but
2233 * it wants the return to be -protocol, or 0
2234 */
2235 if (ret > 0)
2236 return -ret;
2237 return 0;
2238}
2239
David S. Millerdb8dac22008-03-06 16:22:02 -08002240/*
2241 * All we need to do is get the socket, and then do a checksum.
2242 */
2243
Eric Dumazet645ca702008-10-29 01:41:45 -07002244int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
David S. Millerdb8dac22008-03-06 16:22:02 -08002245 int proto)
2246{
2247 struct sock *sk;
Jesper Dangaard Brouer7b5e56f2009-02-05 15:05:45 -08002248 struct udphdr *uh;
David S. Millerdb8dac22008-03-06 16:22:02 -08002249 unsigned short ulen;
Eric Dumazetadf30902009-06-02 05:19:30 +00002250 struct rtable *rt = skb_rtable(skb);
Jesper Dangaard Brouer2783ef22009-02-06 01:59:12 -08002251 __be32 saddr, daddr;
Pavel Emelyanov02833282008-07-05 21:18:48 -07002252 struct net *net = dev_net(skb->dev);
David S. Millerdb8dac22008-03-06 16:22:02 -08002253
2254 /*
2255 * Validate the packet.
2256 */
2257 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
2258 goto drop; /* No space for header. */
2259
Jesper Dangaard Brouer7b5e56f2009-02-05 15:05:45 -08002260 uh = udp_hdr(skb);
David S. Millerdb8dac22008-03-06 16:22:02 -08002261 ulen = ntohs(uh->len);
Bjørn Morkccc2d972010-05-06 03:44:34 +00002262 saddr = ip_hdr(skb)->saddr;
2263 daddr = ip_hdr(skb)->daddr;
2264
David S. Millerdb8dac22008-03-06 16:22:02 -08002265 if (ulen > skb->len)
2266 goto short_packet;
2267
2268 if (proto == IPPROTO_UDP) {
2269 /* UDP validates ulen. */
2270 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
2271 goto short_packet;
2272 uh = udp_hdr(skb);
2273 }
2274
2275 if (udp4_csum_init(skb, uh, proto))
2276 goto csum_error;
2277
Eric Dumazet8afdd992013-12-10 18:07:23 -08002278 sk = skb_steal_sock(skb);
2279 if (sk) {
Eric Dumazet97502232013-12-11 14:46:51 -08002280 struct dst_entry *dst = skb_dst(skb);
Shawn Bohrer421b3882013-10-07 11:01:39 -05002281 int ret;
David S. Millerdb8dac22008-03-06 16:22:02 -08002282
Eric Dumazet97502232013-12-11 14:46:51 -08002283 if (unlikely(sk->sk_rx_dst != dst))
2284 udp_sk_rx_dst_set(sk, dst);
Shawn Bohrer421b3882013-10-07 11:01:39 -05002285
Paolo Abeni2b5a9212018-09-13 16:27:20 +02002286 ret = udp_unicast_rcv_skb(sk, skb, uh);
Eric Dumazet8afdd992013-12-10 18:07:23 -08002287 sock_put(sk);
Paolo Abeni2b5a9212018-09-13 16:27:20 +02002288 return ret;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002289 }
David S. Millerdb8dac22008-03-06 16:22:02 -08002290
Fabian Frederickc18450a2014-11-04 20:48:41 +01002291 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
2292 return __udp4_lib_mcast_deliver(net, skb, uh,
Rick Jones36cbb242014-11-06 10:37:54 -08002293 saddr, daddr, udptable, proto);
Fabian Frederickc18450a2014-11-04 20:48:41 +01002294
2295 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
Paolo Abeni2b5a9212018-09-13 16:27:20 +02002296 if (sk)
2297 return udp_unicast_rcv_skb(sk, skb, uh);
David S. Millerdb8dac22008-03-06 16:22:02 -08002298
2299 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2300 goto drop;
2301 nf_reset(skb);
2302
2303 /* No socket. Drop packet silently, if checksum is wrong */
2304 if (udp_lib_checksum_complete(skb))
2305 goto csum_error;
2306
Eric Dumazet02c22342016-04-27 16:44:30 -07002307 __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
David S. Millerdb8dac22008-03-06 16:22:02 -08002308 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
2309
2310 /*
2311 * Hmm. We got an UDP packet to a port to which we
2312 * don't wanna listen. Ignore it.
2313 */
2314 kfree_skb(skb);
2315 return 0;
2316
2317short_packet:
Joe Perchesba7a46f2014-11-11 10:59:17 -08002318 net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
2319 proto == IPPROTO_UDPLITE ? "Lite" : "",
2320 &saddr, ntohs(uh->source),
2321 ulen, skb->len,
2322 &daddr, ntohs(uh->dest));
David S. Millerdb8dac22008-03-06 16:22:02 -08002323 goto drop;
2324
2325csum_error:
2326 /*
2327 * RFC1122: OK. Discards the bad packet silently (as far as
2328 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
2329 */
Joe Perchesba7a46f2014-11-11 10:59:17 -08002330 net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
2331 proto == IPPROTO_UDPLITE ? "Lite" : "",
2332 &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
2333 ulen);
Eric Dumazet02c22342016-04-27 16:44:30 -07002334 __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
David S. Millerdb8dac22008-03-06 16:22:02 -08002335drop:
Eric Dumazet02c22342016-04-27 16:44:30 -07002336 __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
David S. Millerdb8dac22008-03-06 16:22:02 -08002337 kfree_skb(skb);
2338 return 0;
2339}
2340
Shawn Bohrer421b3882013-10-07 11:01:39 -05002341/* We can only early demux multicast if there is a single matching socket.
2342 * If more than one socket found returns NULL
2343 */
2344static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
2345 __be16 loc_port, __be32 loc_addr,
2346 __be16 rmt_port, __be32 rmt_addr,
David Ahernfb74c272017-08-07 08:44:16 -07002347 int dif, int sdif)
Shawn Bohrer421b3882013-10-07 11:01:39 -05002348{
2349 struct sock *sk, *result;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002350 unsigned short hnum = ntohs(loc_port);
Eric Dumazetca065d02016-04-01 08:52:13 -07002351 unsigned int slot = udp_hashfn(net, hnum, udp_table.mask);
Shawn Bohrer421b3882013-10-07 11:01:39 -05002352 struct udp_hslot *hslot = &udp_table.hash[slot];
2353
Eric Dumazet63c6f812014-06-12 16:13:06 -07002354 /* Do not bother scanning a too big list */
2355 if (hslot->count > 10)
2356 return NULL;
2357
Shawn Bohrer421b3882013-10-07 11:01:39 -05002358 result = NULL;
Eric Dumazetca065d02016-04-01 08:52:13 -07002359 sk_for_each_rcu(sk, &hslot->head) {
2360 if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr,
David Ahernfb74c272017-08-07 08:44:16 -07002361 rmt_port, rmt_addr, dif, sdif, hnum)) {
Eric Dumazetca065d02016-04-01 08:52:13 -07002362 if (result)
2363 return NULL;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002364 result = sk;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002365 }
2366 }
Shawn Bohrer421b3882013-10-07 11:01:39 -05002367
Shawn Bohrer421b3882013-10-07 11:01:39 -05002368 return result;
2369}
2370
2371/* For unicast we should only early demux connected sockets or we can
2372 * break forwarding setups. The chains here can be long so only check
2373 * if the first socket is an exact match and if not move on.
2374 */
2375static struct sock *__udp4_lib_demux_lookup(struct net *net,
2376 __be16 loc_port, __be32 loc_addr,
2377 __be16 rmt_port, __be32 rmt_addr,
David Ahern3fa6f612017-08-07 08:44:17 -07002378 int dif, int sdif)
Shawn Bohrer421b3882013-10-07 11:01:39 -05002379{
Shawn Bohrer421b3882013-10-07 11:01:39 -05002380 unsigned short hnum = ntohs(loc_port);
Martin KaFai Lauf0b1e642017-12-01 12:52:30 -08002381 unsigned int hash2 = ipv4_portaddr_hash(net, loc_addr, hnum);
Shawn Bohrer421b3882013-10-07 11:01:39 -05002382 unsigned int slot2 = hash2 & udp_table.mask;
2383 struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
Joe Perchesc7228312014-05-13 20:30:07 -07002384 INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
Shawn Bohrer421b3882013-10-07 11:01:39 -05002385 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
Eric Dumazetca065d02016-04-01 08:52:13 -07002386 struct sock *sk;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002387
Eric Dumazetca065d02016-04-01 08:52:13 -07002388 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
2389 if (INET_MATCH(sk, net, acookie, rmt_addr,
David Ahern3fa6f612017-08-07 08:44:17 -07002390 loc_addr, ports, dif, sdif))
Eric Dumazetca065d02016-04-01 08:52:13 -07002391 return sk;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002392 /* Only check first socket in chain */
2393 break;
2394 }
Eric Dumazetca065d02016-04-01 08:52:13 -07002395 return NULL;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002396}
2397
Paolo Abeni74874492017-09-28 15:51:36 +02002398int udp_v4_early_demux(struct sk_buff *skb)
Shawn Bohrer421b3882013-10-07 11:01:39 -05002399{
Eric Dumazet610438b2013-12-11 08:10:05 -08002400 struct net *net = dev_net(skb->dev);
Paolo Abenibc044e82017-09-28 15:51:37 +02002401 struct in_device *in_dev = NULL;
Eric Dumazet610438b2013-12-11 08:10:05 -08002402 const struct iphdr *iph;
2403 const struct udphdr *uh;
Eric Dumazetca065d02016-04-01 08:52:13 -07002404 struct sock *sk = NULL;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002405 struct dst_entry *dst;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002406 int dif = skb->dev->ifindex;
David Ahernfb74c272017-08-07 08:44:16 -07002407 int sdif = inet_sdif(skb);
Shawn Bohrer6e540302015-06-03 16:27:38 -05002408 int ours;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002409
2410 /* validate the packet */
2411 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
Paolo Abeni74874492017-09-28 15:51:36 +02002412 return 0;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002413
Eric Dumazet610438b2013-12-11 08:10:05 -08002414 iph = ip_hdr(skb);
2415 uh = udp_hdr(skb);
2416
Paolo Abeni996b44f2017-10-09 14:52:10 +02002417 if (skb->pkt_type == PACKET_MULTICAST) {
Paolo Abenibc044e82017-09-28 15:51:37 +02002418 in_dev = __in_dev_get_rcu(skb->dev);
Shawn Bohrer6e540302015-06-03 16:27:38 -05002419
2420 if (!in_dev)
Paolo Abeni74874492017-09-28 15:51:36 +02002421 return 0;
Shawn Bohrer6e540302015-06-03 16:27:38 -05002422
Paolo Abeni996b44f2017-10-09 14:52:10 +02002423 ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
2424 iph->protocol);
2425 if (!ours)
2426 return 0;
Paolo Abeniad0ea192016-03-22 09:19:38 +01002427
Shawn Bohrer421b3882013-10-07 11:01:39 -05002428 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
David Ahernfb74c272017-08-07 08:44:16 -07002429 uh->source, iph->saddr,
2430 dif, sdif);
Shawn Bohrer6e540302015-06-03 16:27:38 -05002431 } else if (skb->pkt_type == PACKET_HOST) {
Shawn Bohrer421b3882013-10-07 11:01:39 -05002432 sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
David Ahern3fa6f612017-08-07 08:44:17 -07002433 uh->source, iph->saddr, dif, sdif);
Shawn Bohrer6e540302015-06-03 16:27:38 -05002434 }
Shawn Bohrer421b3882013-10-07 11:01:39 -05002435
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002436 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
Paolo Abeni74874492017-09-28 15:51:36 +02002437 return 0;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002438
2439 skb->sk = sk;
Alexander Duyck82eabd92014-09-04 13:32:11 -04002440 skb->destructor = sock_efree;
Eric Dumazet10e2eb82015-08-01 12:14:33 +02002441 dst = READ_ONCE(sk->sk_rx_dst);
Shawn Bohrer421b3882013-10-07 11:01:39 -05002442
2443 if (dst)
2444 dst = dst_check(dst, 0);
Eric Dumazet10e2eb82015-08-01 12:14:33 +02002445 if (dst) {
Paolo Abenibc044e82017-09-28 15:51:37 +02002446 u32 itag = 0;
2447
Wei Wangd24406c2017-06-17 10:42:25 -07002448 /* set noref for now.
2449 * any place which wants to hold dst has to call
2450 * dst_hold_safe()
2451 */
2452 skb_dst_set_noref(skb, dst);
Paolo Abenibc044e82017-09-28 15:51:37 +02002453
2454 /* for unconnected multicast sockets we need to validate
2455 * the source on each packet
2456 */
2457 if (!inet_sk(sk)->inet_daddr && in_dev)
2458 return ip_mc_validate_source(skb, iph->daddr,
2459 iph->saddr, iph->tos,
2460 skb->dev, in_dev, &itag);
Eric Dumazet10e2eb82015-08-01 12:14:33 +02002461 }
Paolo Abeni74874492017-09-28 15:51:36 +02002462 return 0;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002463}
2464
David S. Millerdb8dac22008-03-06 16:22:02 -08002465int udp_rcv(struct sk_buff *skb)
2466{
Eric Dumazet645ca702008-10-29 01:41:45 -07002467 return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
David S. Millerdb8dac22008-03-06 16:22:02 -08002468}
2469
Brian Haley7d06b2e2008-06-14 17:04:49 -07002470void udp_destroy_sock(struct sock *sk)
David S. Millerdb8dac22008-03-06 16:22:02 -08002471{
Tom Parkin44046a52013-03-19 06:11:12 +00002472 struct udp_sock *up = udp_sk(sk);
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002473 bool slow = lock_sock_fast(sk);
David S. Millerdb8dac22008-03-06 16:22:02 -08002474 udp_flush_pending_frames(sk);
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002475 unlock_sock_fast(sk, slow);
Paolo Abeni60fb9562018-11-07 12:38:28 +01002476 if (static_branch_unlikely(&udp_encap_needed_key)) {
2477 if (up->encap_type) {
2478 void (*encap_destroy)(struct sock *sk);
2479 encap_destroy = READ_ONCE(up->encap_destroy);
2480 if (encap_destroy)
2481 encap_destroy(sk);
2482 }
2483 if (up->encap_enabled)
Paolo Abeni9c480602018-11-15 02:34:50 +01002484 static_branch_dec(&udp_encap_needed_key);
Tom Parkin44046a52013-03-19 06:11:12 +00002485 }
David S. Millerdb8dac22008-03-06 16:22:02 -08002486}
2487
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488/*
2489 * Socket option code for UDP
2490 */
Gerrit Renker4c0a6cb2006-11-27 09:29:59 -08002491int udp_lib_setsockopt(struct sock *sk, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002492 char __user *optval, unsigned int optlen,
Gerrit Renker4c0a6cb2006-11-27 09:29:59 -08002493 int (*push_pending_frames)(struct sock *))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494{
2495 struct udp_sock *up = udp_sk(sk);
Tom Herbert1c194482014-05-23 08:47:32 -07002496 int val, valbool;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 int err = 0;
Wang Chenb2bf1e22007-12-03 22:34:16 +11002498 int is_udplite = IS_UDPLITE(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499
Eric Dumazetc482c562009-07-17 00:26:32 +00002500 if (optlen < sizeof(int))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 return -EINVAL;
2502
2503 if (get_user(val, (int __user *)optval))
2504 return -EFAULT;
2505
Tom Herbert1c194482014-05-23 08:47:32 -07002506 valbool = val ? 1 : 0;
2507
Stephen Hemminger6516c652007-03-08 20:41:55 -08002508 switch (optname) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 case UDP_CORK:
2510 if (val != 0) {
2511 up->corkflag = 1;
2512 } else {
2513 up->corkflag = 0;
2514 lock_sock(sk);
Joe Perches4243cdc2014-11-11 21:59:20 -08002515 push_pending_frames(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 release_sock(sk);
2517 }
2518 break;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002519
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 case UDP_ENCAP:
2521 switch (val) {
2522 case 0:
2523 case UDP_ENCAP_ESPINUDP:
2524 case UDP_ENCAP_ESPINUDP_NON_IKE:
James Chapman067b2072007-07-05 17:08:05 -07002525 up->encap_rcv = xfrm4_udp_encap_rcv;
2526 /* FALLTHROUGH */
James Chapman342f0232007-06-27 15:37:46 -07002527 case UDP_ENCAP_L2TPINUDP:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 up->encap_type = val;
Paolo Abeni60fb9562018-11-07 12:38:28 +01002529 lock_sock(sk);
2530 udp_tunnel_encap_enable(sk->sk_socket);
2531 release_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 break;
2533 default:
2534 err = -ENOPROTOOPT;
2535 break;
2536 }
2537 break;
2538
Tom Herbert1c194482014-05-23 08:47:32 -07002539 case UDP_NO_CHECK6_TX:
2540 up->no_check6_tx = valbool;
2541 break;
2542
2543 case UDP_NO_CHECK6_RX:
2544 up->no_check6_rx = valbool;
2545 break;
2546
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -04002547 case UDP_SEGMENT:
2548 if (val < 0 || val > USHRT_MAX)
2549 return -EINVAL;
2550 up->gso_size = val;
2551 break;
2552
Paolo Abenie20cf8d2018-11-07 12:38:29 +01002553 case UDP_GRO:
2554 lock_sock(sk);
2555 if (valbool)
2556 udp_tunnel_encap_enable(sk->sk_socket);
2557 up->gro_enabled = valbool;
2558 release_sock(sk);
2559 break;
2560
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002561 /*
2562 * UDP-Lite's partial checksum coverage (RFC 3828).
2563 */
2564 /* The sender sets actual checksum coverage length via this option.
2565 * The case coverage > packet length is handled by send module. */
2566 case UDPLITE_SEND_CSCOV:
Wang Chenb2bf1e22007-12-03 22:34:16 +11002567 if (!is_udplite) /* Disable the option on UDP sockets */
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002568 return -ENOPROTOOPT;
2569 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
2570 val = 8;
Alexey Dobriyan4be929b2010-05-24 14:33:03 -07002571 else if (val > USHRT_MAX)
2572 val = USHRT_MAX;
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002573 up->pcslen = val;
2574 up->pcflag |= UDPLITE_SEND_CC;
2575 break;
2576
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002577 /* The receiver specifies a minimum checksum coverage value. To make
2578 * sense, this should be set to at least 8 (as done below). If zero is
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002579 * used, this again means full checksum coverage. */
2580 case UDPLITE_RECV_CSCOV:
Wang Chenb2bf1e22007-12-03 22:34:16 +11002581 if (!is_udplite) /* Disable the option on UDP sockets */
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002582 return -ENOPROTOOPT;
2583 if (val != 0 && val < 8) /* Avoid silly minimal values. */
2584 val = 8;
Alexey Dobriyan4be929b2010-05-24 14:33:03 -07002585 else if (val > USHRT_MAX)
2586 val = USHRT_MAX;
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002587 up->pcrlen = val;
2588 up->pcflag |= UDPLITE_RECV_CC;
2589 break;
2590
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 default:
2592 err = -ENOPROTOOPT;
2593 break;
Stephen Hemminger6516c652007-03-08 20:41:55 -08002594 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595
2596 return err;
2597}
Eric Dumazetc482c562009-07-17 00:26:32 +00002598EXPORT_SYMBOL(udp_lib_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599
David S. Millerdb8dac22008-03-06 16:22:02 -08002600int udp_setsockopt(struct sock *sk, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002601 char __user *optval, unsigned int optlen)
David S. Millerdb8dac22008-03-06 16:22:02 -08002602{
2603 if (level == SOL_UDP || level == SOL_UDPLITE)
2604 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
2605 udp_push_pending_frames);
2606 return ip_setsockopt(sk, level, optname, optval, optlen);
2607}
2608
2609#ifdef CONFIG_COMPAT
2610int compat_udp_setsockopt(struct sock *sk, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002611 char __user *optval, unsigned int optlen)
David S. Millerdb8dac22008-03-06 16:22:02 -08002612{
2613 if (level == SOL_UDP || level == SOL_UDPLITE)
2614 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
2615 udp_push_pending_frames);
2616 return compat_ip_setsockopt(sk, level, optname, optval, optlen);
2617}
2618#endif
2619
Gerrit Renker4c0a6cb2006-11-27 09:29:59 -08002620int udp_lib_getsockopt(struct sock *sk, int level, int optname,
2621 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622{
2623 struct udp_sock *up = udp_sk(sk);
2624 int val, len;
2625
Eric Dumazetc482c562009-07-17 00:26:32 +00002626 if (get_user(len, optlen))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 return -EFAULT;
2628
2629 len = min_t(unsigned int, len, sizeof(int));
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002630
Stephen Hemminger6516c652007-03-08 20:41:55 -08002631 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 return -EINVAL;
2633
Stephen Hemminger6516c652007-03-08 20:41:55 -08002634 switch (optname) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635 case UDP_CORK:
2636 val = up->corkflag;
2637 break;
2638
2639 case UDP_ENCAP:
2640 val = up->encap_type;
2641 break;
2642
Tom Herbert1c194482014-05-23 08:47:32 -07002643 case UDP_NO_CHECK6_TX:
2644 val = up->no_check6_tx;
2645 break;
2646
2647 case UDP_NO_CHECK6_RX:
2648 val = up->no_check6_rx;
2649 break;
2650
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -04002651 case UDP_SEGMENT:
2652 val = up->gso_size;
2653 break;
2654
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002655 /* The following two cannot be changed on UDP sockets, the return is
2656 * always 0 (which corresponds to the full checksum coverage of UDP). */
2657 case UDPLITE_SEND_CSCOV:
2658 val = up->pcslen;
2659 break;
2660
2661 case UDPLITE_RECV_CSCOV:
2662 val = up->pcrlen;
2663 break;
2664
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665 default:
2666 return -ENOPROTOOPT;
Stephen Hemminger6516c652007-03-08 20:41:55 -08002667 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668
Stephen Hemminger6516c652007-03-08 20:41:55 -08002669 if (put_user(len, optlen))
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002670 return -EFAULT;
Eric Dumazetc482c562009-07-17 00:26:32 +00002671 if (copy_to_user(optval, &val, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 return -EFAULT;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002673 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674}
Eric Dumazetc482c562009-07-17 00:26:32 +00002675EXPORT_SYMBOL(udp_lib_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676
David S. Millerdb8dac22008-03-06 16:22:02 -08002677int udp_getsockopt(struct sock *sk, int level, int optname,
2678 char __user *optval, int __user *optlen)
2679{
2680 if (level == SOL_UDP || level == SOL_UDPLITE)
2681 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
2682 return ip_getsockopt(sk, level, optname, optval, optlen);
2683}
2684
2685#ifdef CONFIG_COMPAT
2686int compat_udp_getsockopt(struct sock *sk, int level, int optname,
2687 char __user *optval, int __user *optlen)
2688{
2689 if (level == SOL_UDP || level == SOL_UDPLITE)
2690 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
2691 return compat_ip_getsockopt(sk, level, optname, optval, optlen);
2692}
2693#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694/**
2695 * udp_poll - wait for a UDP event.
2696 * @file - file struct
2697 * @sock - socket
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07002698 * @wait - poll table
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002700 * This is same as datagram poll, except for the special case of
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701 * blocking sockets. If application is using a blocking fd
2702 * and a packet with checksum error is in the queue;
2703 * then it could get return from select indicating data available
2704 * but then block when reading it. Add special case code
2705 * to work around these arguably broken applications.
2706 */
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07002707__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07002709 __poll_t mask = datagram_poll(file, sock, wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 struct sock *sk = sock->sk;
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002711
Paolo Abeni2276f582017-05-16 11:20:14 +02002712 if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002713 mask |= EPOLLIN | EPOLLRDNORM;
Paolo Abeni2276f582017-05-16 11:20:14 +02002714
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 /* Check for false positives due to checksum errors */
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07002716 if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
Eric Dumazete83c6742016-08-23 13:59:33 -07002717 !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002718 mask &= ~(EPOLLIN | EPOLLRDNORM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719
2720 return mask;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002721
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722}
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07002723EXPORT_SYMBOL(udp_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724
David Ahern5d77dca2016-08-23 21:06:33 -07002725int udp_abort(struct sock *sk, int err)
2726{
2727 lock_sock(sk);
2728
2729 sk->sk_err = err;
2730 sk->sk_error_report(sk);
Eric Dumazet286c72d2016-10-20 09:39:40 -07002731 __udp_disconnect(sk, 0);
David Ahern5d77dca2016-08-23 21:06:33 -07002732
2733 release_sock(sk);
2734
2735 return 0;
2736}
2737EXPORT_SYMBOL_GPL(udp_abort);
2738
David S. Millerdb8dac22008-03-06 16:22:02 -08002739struct proto udp_prot = {
Tonghao Zhang1e802952018-03-13 21:57:16 -07002740 .name = "UDP",
2741 .owner = THIS_MODULE,
2742 .close = udp_lib_close,
Andrey Ignatovd74bad42018-03-30 15:08:05 -07002743 .pre_connect = udp_pre_connect,
Tonghao Zhang1e802952018-03-13 21:57:16 -07002744 .connect = ip4_datagram_connect,
2745 .disconnect = udp_disconnect,
2746 .ioctl = udp_ioctl,
2747 .init = udp_init_sock,
2748 .destroy = udp_destroy_sock,
2749 .setsockopt = udp_setsockopt,
2750 .getsockopt = udp_getsockopt,
2751 .sendmsg = udp_sendmsg,
2752 .recvmsg = udp_recvmsg,
2753 .sendpage = udp_sendpage,
2754 .release_cb = ip4_datagram_release_cb,
2755 .hash = udp_lib_hash,
2756 .unhash = udp_lib_unhash,
2757 .rehash = udp_v4_rehash,
2758 .get_port = udp_v4_get_port,
2759 .memory_allocated = &udp_memory_allocated,
2760 .sysctl_mem = sysctl_udp_mem,
2761 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
2762 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
2763 .obj_size = sizeof(struct udp_sock),
2764 .h.udp_table = &udp_table,
David S. Millerdb8dac22008-03-06 16:22:02 -08002765#ifdef CONFIG_COMPAT
Tonghao Zhang1e802952018-03-13 21:57:16 -07002766 .compat_setsockopt = compat_udp_setsockopt,
2767 .compat_getsockopt = compat_udp_getsockopt,
David S. Millerdb8dac22008-03-06 16:22:02 -08002768#endif
Tonghao Zhang1e802952018-03-13 21:57:16 -07002769 .diag_destroy = udp_abort,
David S. Millerdb8dac22008-03-06 16:22:02 -08002770};
Eric Dumazetc482c562009-07-17 00:26:32 +00002771EXPORT_SYMBOL(udp_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772
2773/* ------------------------------------------------------------------------ */
2774#ifdef CONFIG_PROC_FS
2775
Eric Dumazet645ca702008-10-29 01:41:45 -07002776static struct sock *udp_get_first(struct seq_file *seq, int start)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777{
2778 struct sock *sk;
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002779 struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 struct udp_iter_state *state = seq->private;
Denis V. Lunev6f191ef2008-03-28 18:23:33 -07002781 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002783 for (state->bucket = start; state->bucket <= afinfo->udp_table->mask;
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002784 ++state->bucket) {
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002785 struct udp_hslot *hslot = &afinfo->udp_table->hash[state->bucket];
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002786
Eric Dumazetca065d02016-04-01 08:52:13 -07002787 if (hlist_empty(&hslot->head))
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002788 continue;
2789
Eric Dumazet645ca702008-10-29 01:41:45 -07002790 spin_lock_bh(&hslot->lock);
Eric Dumazetca065d02016-04-01 08:52:13 -07002791 sk_for_each(sk, &hslot->head) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002792 if (!net_eq(sock_net(sk), net))
Daniel Lezcanoa91275e2008-03-21 04:11:58 -07002793 continue;
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002794 if (sk->sk_family == afinfo->family)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 goto found;
2796 }
Eric Dumazet645ca702008-10-29 01:41:45 -07002797 spin_unlock_bh(&hslot->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 }
2799 sk = NULL;
2800found:
2801 return sk;
2802}
2803
2804static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
2805{
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002806 struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807 struct udp_iter_state *state = seq->private;
Denis V. Lunev6f191ef2008-03-28 18:23:33 -07002808 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809
2810 do {
Eric Dumazetca065d02016-04-01 08:52:13 -07002811 sk = sk_next(sk);
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002812 } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != afinfo->family));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813
Eric Dumazet645ca702008-10-29 01:41:45 -07002814 if (!sk) {
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002815 if (state->bucket <= afinfo->udp_table->mask)
2816 spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock);
Eric Dumazet645ca702008-10-29 01:41:45 -07002817 return udp_get_first(seq, state->bucket + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 }
2819 return sk;
2820}
2821
2822static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
2823{
Eric Dumazet645ca702008-10-29 01:41:45 -07002824 struct sock *sk = udp_get_first(seq, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825
2826 if (sk)
Stephen Hemminger6516c652007-03-08 20:41:55 -08002827 while (pos && (sk = udp_get_next(seq, sk)) != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 --pos;
2829 return pos ? NULL : sk;
2830}
2831
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002832void *udp_seq_start(struct seq_file *seq, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833{
Vitaly Mayatskikh30842f22009-03-23 15:22:33 -07002834 struct udp_iter_state *state = seq->private;
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002835 state->bucket = MAX_UDP_PORTS;
Vitaly Mayatskikh30842f22009-03-23 15:22:33 -07002836
YOSHIFUJI Hideakib50660f2008-03-31 19:38:15 -07002837 return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838}
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002839EXPORT_SYMBOL(udp_seq_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002841void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842{
2843 struct sock *sk;
2844
YOSHIFUJI Hideakib50660f2008-03-31 19:38:15 -07002845 if (v == SEQ_START_TOKEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 sk = udp_get_idx(seq, 0);
2847 else
2848 sk = udp_get_next(seq, v);
2849
2850 ++*pos;
2851 return sk;
2852}
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002853EXPORT_SYMBOL(udp_seq_next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002855void udp_seq_stop(struct seq_file *seq, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856{
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002857 struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Eric Dumazet645ca702008-10-29 01:41:45 -07002858 struct udp_iter_state *state = seq->private;
2859
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002860 if (state->bucket <= afinfo->udp_table->mask)
2861 spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862}
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002863EXPORT_SYMBOL(udp_seq_stop);
David S. Millerdb8dac22008-03-06 16:22:02 -08002864
2865/* ------------------------------------------------------------------------ */
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002866static void udp4_format_sock(struct sock *sp, struct seq_file *f,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002867 int bucket)
David S. Millerdb8dac22008-03-06 16:22:02 -08002868{
2869 struct inet_sock *inet = inet_sk(sp);
Eric Dumazetc720c7e82009-10-15 06:30:45 +00002870 __be32 dest = inet->inet_daddr;
2871 __be32 src = inet->inet_rcv_saddr;
2872 __u16 destp = ntohs(inet->inet_dport);
2873 __u16 srcp = ntohs(inet->inet_sport);
David S. Millerdb8dac22008-03-06 16:22:02 -08002874
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002875 seq_printf(f, "%5d: %08X:%04X %08X:%04X"
Patrick Talbertea9a0372019-05-17 17:11:28 +02002876 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u",
David S. Millerdb8dac22008-03-06 16:22:02 -08002877 bucket, src, srcp, dest, destp, sp->sk_state,
Eric Dumazet31e6d362009-06-17 19:05:41 -07002878 sk_wmem_alloc_get(sp),
Paolo Abeni6c206b22018-06-08 11:35:40 +02002879 udp_rqueue_get(sp),
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06002880 0, 0L, 0,
2881 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
2882 0, sock_i_ino(sp),
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002883 refcount_read(&sp->sk_refcnt), sp,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002884 atomic_read(&sp->sk_drops));
David S. Millerdb8dac22008-03-06 16:22:02 -08002885}
2886
2887int udp4_seq_show(struct seq_file *seq, void *v)
2888{
Tetsuo Handa652586d2013-11-14 14:31:57 -08002889 seq_setwidth(seq, 127);
David S. Millerdb8dac22008-03-06 16:22:02 -08002890 if (v == SEQ_START_TOKEN)
Tetsuo Handa652586d2013-11-14 14:31:57 -08002891 seq_puts(seq, " sl local_address rem_address st tx_queue "
David S. Millerdb8dac22008-03-06 16:22:02 -08002892 "rx_queue tr tm->when retrnsmt uid timeout "
Eric Dumazetcb61cb92008-06-17 21:04:56 -07002893 "inode ref pointer drops");
David S. Millerdb8dac22008-03-06 16:22:02 -08002894 else {
David S. Millerdb8dac22008-03-06 16:22:02 -08002895 struct udp_iter_state *state = seq->private;
2896
Tetsuo Handa652586d2013-11-14 14:31:57 -08002897 udp4_format_sock(v, seq, state->bucket);
David S. Millerdb8dac22008-03-06 16:22:02 -08002898 }
Tetsuo Handa652586d2013-11-14 14:31:57 -08002899 seq_pad(seq, '\n');
David S. Millerdb8dac22008-03-06 16:22:02 -08002900 return 0;
2901}
2902
Christoph Hellwigc3506372018-04-10 19:42:55 +02002903const struct seq_operations udp_seq_ops = {
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002904 .start = udp_seq_start,
2905 .next = udp_seq_next,
2906 .stop = udp_seq_stop,
2907 .show = udp4_seq_show,
2908};
Christoph Hellwigc3506372018-04-10 19:42:55 +02002909EXPORT_SYMBOL(udp_seq_ops);
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00002910
David S. Millerdb8dac22008-03-06 16:22:02 -08002911static struct udp_seq_afinfo udp4_seq_afinfo = {
David S. Millerdb8dac22008-03-06 16:22:02 -08002912 .family = AF_INET,
Eric Dumazet645ca702008-10-29 01:41:45 -07002913 .udp_table = &udp_table,
David S. Millerdb8dac22008-03-06 16:22:02 -08002914};
2915
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002916static int __net_init udp4_proc_init_net(struct net *net)
Pavel Emelyanov15439fe2008-03-24 14:53:49 -07002917{
Christoph Hellwigc3506372018-04-10 19:42:55 +02002918 if (!proc_create_net_data("udp", 0444, net->proc_net, &udp_seq_ops,
2919 sizeof(struct udp_iter_state), &udp4_seq_afinfo))
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002920 return -ENOMEM;
2921 return 0;
Pavel Emelyanov15439fe2008-03-24 14:53:49 -07002922}
2923
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002924static void __net_exit udp4_proc_exit_net(struct net *net)
Pavel Emelyanov15439fe2008-03-24 14:53:49 -07002925{
Christoph Hellwiga3d25992018-04-10 21:31:50 +02002926 remove_proc_entry("udp", net->proc_net);
Pavel Emelyanov15439fe2008-03-24 14:53:49 -07002927}
2928
2929static struct pernet_operations udp4_net_ops = {
2930 .init = udp4_proc_init_net,
2931 .exit = udp4_proc_exit_net,
2932};
2933
David S. Millerdb8dac22008-03-06 16:22:02 -08002934int __init udp4_proc_init(void)
2935{
Pavel Emelyanov15439fe2008-03-24 14:53:49 -07002936 return register_pernet_subsys(&udp4_net_ops);
David S. Millerdb8dac22008-03-06 16:22:02 -08002937}
2938
2939void udp4_proc_exit(void)
2940{
Pavel Emelyanov15439fe2008-03-24 14:53:49 -07002941 unregister_pernet_subsys(&udp4_net_ops);
David S. Millerdb8dac22008-03-06 16:22:02 -08002942}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943#endif /* CONFIG_PROC_FS */
2944
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002945static __initdata unsigned long uhash_entries;
2946static int __init set_uhash_entries(char *str)
Eric Dumazet645ca702008-10-29 01:41:45 -07002947{
Eldad Zack413c27d2012-05-19 14:13:18 +00002948 ssize_t ret;
2949
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002950 if (!str)
2951 return 0;
Eldad Zack413c27d2012-05-19 14:13:18 +00002952
2953 ret = kstrtoul(str, 0, &uhash_entries);
2954 if (ret)
2955 return 0;
2956
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002957 if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
2958 uhash_entries = UDP_HTABLE_SIZE_MIN;
2959 return 1;
2960}
2961__setup("uhash_entries=", set_uhash_entries);
Eric Dumazet645ca702008-10-29 01:41:45 -07002962
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002963void __init udp_table_init(struct udp_table *table, const char *name)
2964{
2965 unsigned int i;
2966
Tim Bird31fe62b2012-05-23 13:33:35 +00002967 table->hash = alloc_large_system_hash(name,
2968 2 * sizeof(struct udp_hslot),
2969 uhash_entries,
2970 21, /* one slot per 2 MB */
2971 0,
2972 &table->log,
2973 &table->mask,
2974 UDP_HTABLE_SIZE_MIN,
2975 64 * 1024);
2976
Eric Dumazet512615b2009-11-08 10:17:58 +00002977 table->hash2 = table->hash + (table->mask + 1);
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002978 for (i = 0; i <= table->mask; i++) {
Eric Dumazetca065d02016-04-01 08:52:13 -07002979 INIT_HLIST_HEAD(&table->hash[i].head);
Eric Dumazetfdcc8aa92009-11-08 10:17:05 +00002980 table->hash[i].count = 0;
Eric Dumazet645ca702008-10-29 01:41:45 -07002981 spin_lock_init(&table->hash[i].lock);
2982 }
Eric Dumazet512615b2009-11-08 10:17:58 +00002983 for (i = 0; i <= table->mask; i++) {
Eric Dumazetca065d02016-04-01 08:52:13 -07002984 INIT_HLIST_HEAD(&table->hash2[i].head);
Eric Dumazet512615b2009-11-08 10:17:58 +00002985 table->hash2[i].count = 0;
2986 spin_lock_init(&table->hash2[i].lock);
2987 }
Eric Dumazet645ca702008-10-29 01:41:45 -07002988}
2989
Tom Herbert723b8e42015-02-24 09:17:31 -08002990u32 udp_flow_hashrnd(void)
2991{
2992 static u32 hashrnd __read_mostly;
2993
2994 net_get_random_once(&hashrnd, sizeof(hashrnd));
2995
2996 return hashrnd;
2997}
2998EXPORT_SYMBOL(udp_flow_hashrnd);
2999
Tonghao Zhang1e802952018-03-13 21:57:16 -07003000static void __udp_sysctl_init(struct net *net)
3001{
3002 net->ipv4.sysctl_udp_rmem_min = SK_MEM_QUANTUM;
3003 net->ipv4.sysctl_udp_wmem_min = SK_MEM_QUANTUM;
3004
3005#ifdef CONFIG_NET_L3_MASTER_DEV
3006 net->ipv4.sysctl_udp_l3mdev_accept = 0;
3007#endif
3008}
3009
3010static int __net_init udp_sysctl_init(struct net *net)
3011{
3012 __udp_sysctl_init(net);
3013 return 0;
3014}
3015
3016static struct pernet_operations __net_initdata udp_sysctl_ops = {
Kirill Tkhaifc189992018-03-22 21:34:46 +03003017 .init = udp_sysctl_init,
Tonghao Zhang1e802952018-03-13 21:57:16 -07003018};
3019
Hideo Aoki95766ff2007-12-31 00:29:24 -08003020void __init udp_init(void)
3021{
Eric Dumazetf03d78d2011-07-07 00:27:05 -07003022 unsigned long limit;
Eric Dumazet4b272752016-12-08 11:41:54 -08003023 unsigned int i;
Hideo Aoki95766ff2007-12-31 00:29:24 -08003024
Eric Dumazetf86dcc52009-10-07 00:37:59 +00003025 udp_table_init(&udp_table, "UDP");
Eric Dumazetf03d78d2011-07-07 00:27:05 -07003026 limit = nr_free_buffer_pages() / 8;
Hideo Aoki95766ff2007-12-31 00:29:24 -08003027 limit = max(limit, 128UL);
3028 sysctl_udp_mem[0] = limit / 4 * 3;
3029 sysctl_udp_mem[1] = limit;
3030 sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
3031
Tonghao Zhang1e802952018-03-13 21:57:16 -07003032 __udp_sysctl_init(&init_net);
Eric Dumazet4b272752016-12-08 11:41:54 -08003033
3034 /* 16 spinlocks per cpu */
3035 udp_busylocks_log = ilog2(nr_cpu_ids) + 4;
3036 udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log,
3037 GFP_KERNEL);
3038 if (!udp_busylocks)
3039 panic("UDP: failed to alloc udp_busylocks\n");
3040 for (i = 0; i < (1U << udp_busylocks_log); i++)
3041 spin_lock_init(udp_busylocks + i);
Tonghao Zhang1e802952018-03-13 21:57:16 -07003042
3043 if (register_pernet_subsys(&udp_sysctl_ops))
3044 panic("UDP: failed to init sysctl parameters.\n");
Hideo Aoki95766ff2007-12-31 00:29:24 -08003045}