Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
| 4 | * operating system. INET is implemented using the BSD Socket |
| 5 | * interface as the means of communication with the user level. |
| 6 | * |
| 7 | * The User Datagram Protocol (UDP). |
| 8 | * |
Jesper Juhl | 02c30a8 | 2005-05-05 16:16:16 -0700 | [diff] [blame] | 9 | * Authors: Ross Biro |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
| 11 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> |
Alan Cox | 113aa83 | 2008-10-13 19:01:08 -0700 | [diff] [blame] | 12 | * Alan Cox, <alan@lxorguk.ukuu.org.uk> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * Hirokazu Takahashi, <taka@valinux.co.jp> |
| 14 | * |
| 15 | * Fixes: |
| 16 | * Alan Cox : verify_area() calls |
| 17 | * Alan Cox : stopped close while in use off icmp |
| 18 | * messages. Not a fix but a botch that |
| 19 | * for udp at least is 'valid'. |
| 20 | * Alan Cox : Fixed icmp handling properly |
| 21 | * Alan Cox : Correct error for oversized datagrams |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 22 | * Alan Cox : Tidied select() semantics. |
| 23 | * Alan Cox : udp_err() fixed properly, also now |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | * select and read wake correctly on errors |
| 25 | * Alan Cox : udp_send verify_area moved to avoid mem leak |
| 26 | * Alan Cox : UDP can count its memory |
| 27 | * Alan Cox : send to an unknown connection causes |
| 28 | * an ECONNREFUSED off the icmp, but |
| 29 | * does NOT close. |
| 30 | * Alan Cox : Switched to new sk_buff handlers. No more backlog! |
| 31 | * Alan Cox : Using generic datagram code. Even smaller and the PEEK |
| 32 | * bug no longer crashes it. |
| 33 | * Fred Van Kempen : Net2e support for sk->broadcast. |
| 34 | * Alan Cox : Uses skb_free_datagram |
| 35 | * Alan Cox : Added get/set sockopt support. |
| 36 | * Alan Cox : Broadcasting without option set returns EACCES. |
| 37 | * Alan Cox : No wakeup calls. Instead we now use the callbacks. |
| 38 | * Alan Cox : Use ip_tos and ip_ttl |
| 39 | * Alan Cox : SNMP Mibs |
| 40 | * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support. |
| 41 | * Matt Dillon : UDP length checks. |
| 42 | * Alan Cox : Smarter af_inet used properly. |
| 43 | * Alan Cox : Use new kernel side addressing. |
| 44 | * Alan Cox : Incorrect return on truncated datagram receive. |
| 45 | * Arnt Gulbrandsen : New udp_send and stuff |
| 46 | * Alan Cox : Cache last socket |
| 47 | * Alan Cox : Route cache |
| 48 | * Jon Peatfield : Minor efficiency fix to sendto(). |
| 49 | * Mike Shaver : RFC1122 checks. |
| 50 | * Alan Cox : Nonblocking error fix. |
| 51 | * Willy Konynenberg : Transparent proxying support. |
| 52 | * Mike McLagan : Routing by source |
| 53 | * David S. Miller : New socket lookup architecture. |
| 54 | * Last socket cache retained as it |
| 55 | * does have a high hit rate. |
| 56 | * Olaf Kirch : Don't linearise iovec on sendmsg. |
| 57 | * Andi Kleen : Some cleanups, cache destination entry |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 58 | * for connect. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | * Vitaly E. Lavrov : Transparent proxy revived after year coma. |
| 60 | * Melvin Smith : Check msg_name not msg_namelen in sendto(), |
| 61 | * return ENOTCONN for unconnected sockets (POSIX) |
| 62 | * Janos Farkas : don't deliver multi/broadcasts to a different |
| 63 | * bound-to-device socket |
| 64 | * Hirokazu Takahashi : HW checksumming for outgoing UDP |
| 65 | * datagrams. |
| 66 | * Hirokazu Takahashi : sendfile() on UDP works now. |
| 67 | * Arnaldo C. Melo : convert /proc/net/udp to seq_file |
| 68 | * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which |
| 69 | * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind |
| 70 | * a single port at the same time. |
| 71 | * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support |
James Chapman | 342f023 | 2007-06-27 15:37:46 -0700 | [diff] [blame] | 72 | * James Chapman : Add L2TP encapsulation type. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | */ |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 74 | |
Joe Perches | afd46503 | 2012-03-12 07:03:32 +0000 | [diff] [blame] | 75 | #define pr_fmt(fmt) "UDP: " fmt |
| 76 | |
Jakub Kicinski | aef2fed | 2021-12-15 18:55:37 -0800 | [diff] [blame] | 77 | #include <linux/bpf-cgroup.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 78 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | #include <asm/ioctls.h> |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 80 | #include <linux/memblock.h> |
Eric Dumazet | 8203efb | 2008-10-29 02:32:32 -0700 | [diff] [blame] | 81 | #include <linux/highmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | #include <linux/types.h> |
| 83 | #include <linux/fcntl.h> |
| 84 | #include <linux/module.h> |
| 85 | #include <linux/socket.h> |
| 86 | #include <linux/sockios.h> |
Arnaldo Carvalho de Melo | 14c8502 | 2005-12-27 02:43:12 -0200 | [diff] [blame] | 87 | #include <linux/igmp.h> |
Shawn Bohrer | 6e54030 | 2015-06-03 16:27:38 -0500 | [diff] [blame] | 88 | #include <linux/inetdevice.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | #include <linux/in.h> |
| 90 | #include <linux/errno.h> |
| 91 | #include <linux/timer.h> |
| 92 | #include <linux/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | #include <linux/inet.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | #include <linux/netdevice.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 95 | #include <linux/slab.h> |
Arnaldo Carvalho de Melo | c752f07 | 2005-08-09 20:08:28 -0700 | [diff] [blame] | 96 | #include <net/tcp_states.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | #include <linux/skbuff.h> |
| 98 | #include <linux/proc_fs.h> |
| 99 | #include <linux/seq_file.h> |
Eric W. Biederman | 457c4cb | 2007-09-12 12:01:34 +0200 | [diff] [blame] | 100 | #include <net/net_namespace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | #include <net/icmp.h> |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 102 | #include <net/inet_hashtables.h> |
Stefano Brivio | e7cc082 | 2018-11-08 12:19:22 +0100 | [diff] [blame] | 103 | #include <net/ip_tunnels.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | #include <net/route.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | #include <net/checksum.h> |
| 106 | #include <net/xfrm.h> |
Satoru Moriya | 296f7ea | 2011-06-17 11:58:39 +0000 | [diff] [blame] | 107 | #include <trace/events/udp.h> |
Eric Dumazet | 447167b | 2012-04-11 23:05:28 +0000 | [diff] [blame] | 108 | #include <linux/static_key.h> |
Yonghong Song | 951cf36 | 2020-07-20 09:34:03 -0700 | [diff] [blame] | 109 | #include <linux/btf_ids.h> |
Eric Dumazet | 22911fc | 2012-06-27 00:23:44 +0000 | [diff] [blame] | 110 | #include <trace/events/skb.h> |
Eliezer Tamir | 076bb0c | 2013-07-10 17:13:17 +0300 | [diff] [blame] | 111 | #include <net/busy_poll.h> |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 112 | #include "udp_impl.h" |
Craig Gallek | e32ea7e | 2016-01-04 17:41:46 -0500 | [diff] [blame] | 113 | #include <net/sock_reuseport.h> |
Eric Dumazet | 217375a | 2016-08-18 09:59:12 -0700 | [diff] [blame] | 114 | #include <net/addrconf.h> |
Paolo Abeni | 60fb956 | 2018-11-07 12:38:28 +0100 | [diff] [blame] | 115 | #include <net/udp_tunnel.h> |
Sabrina Dubroca | 0146dca | 2020-04-27 17:59:34 +0200 | [diff] [blame] | 116 | #if IS_ENABLED(CONFIG_IPV6) |
| 117 | #include <net/ipv6_stubs.h> |
| 118 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 120 | struct udp_table udp_table __read_mostly; |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 121 | EXPORT_SYMBOL(udp_table); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | |
Eric Dumazet | 8d987e5 | 2010-11-09 23:24:26 +0000 | [diff] [blame] | 123 | long sysctl_udp_mem[3] __read_mostly; |
Hideo Aoki | 95766ff | 2007-12-31 00:29:24 -0800 | [diff] [blame] | 124 | EXPORT_SYMBOL(sysctl_udp_mem); |
Eric Dumazet | c482c56 | 2009-07-17 00:26:32 +0000 | [diff] [blame] | 125 | |
Eric Dumazet | 91b6d32 | 2021-11-15 11:02:39 -0800 | [diff] [blame] | 126 | atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp; |
Hideo Aoki | 95766ff | 2007-12-31 00:29:24 -0800 | [diff] [blame] | 127 | EXPORT_SYMBOL(udp_memory_allocated); |
| 128 | |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 129 | #define MAX_UDP_PORTS 65536 |
| 130 | #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN) |
Eric Dumazet | 98322f2 | 2009-01-26 21:35:35 -0800 | [diff] [blame] | 131 | |
Eric Dumazet | f24d43c | 2008-10-09 14:51:27 -0700 | [diff] [blame] | 132 | static int udp_lib_lport_inuse(struct net *net, __u16 num, |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 133 | const struct udp_hslot *hslot, |
Eric Dumazet | 98322f2 | 2009-01-26 21:35:35 -0800 | [diff] [blame] | 134 | unsigned long *bitmap, |
Josef Bacik | fe38d2a | 2017-01-17 07:51:01 -0800 | [diff] [blame] | 135 | struct sock *sk, unsigned int log) |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 136 | { |
Eric Dumazet | f24d43c | 2008-10-09 14:51:27 -0700 | [diff] [blame] | 137 | struct sock *sk2; |
Tom Herbert | ba418fa | 2013-01-22 09:50:32 +0000 | [diff] [blame] | 138 | kuid_t uid = sock_i_uid(sk); |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 139 | |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 140 | sk_for_each(sk2, &hslot->head) { |
Joe Perches | 9d4fb27 | 2009-11-23 10:41:23 -0800 | [diff] [blame] | 141 | if (net_eq(sock_net(sk2), net) && |
| 142 | sk2 != sk && |
Eric Dumazet | d4cada4 | 2009-11-08 10:17:30 +0000 | [diff] [blame] | 143 | (bitmap || udp_sk(sk2)->udp_port_hash == num) && |
Joe Perches | 9d4fb27 | 2009-11-23 10:41:23 -0800 | [diff] [blame] | 144 | (!sk2->sk_reuse || !sk->sk_reuse) && |
| 145 | (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || |
| 146 | sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && |
Josef Bacik | fe38d2a | 2017-01-17 07:51:01 -0800 | [diff] [blame] | 147 | inet_rcv_saddr_equal(sk, sk2, true)) { |
Eric Garver | df56005 | 2017-01-05 20:22:36 -0500 | [diff] [blame] | 148 | if (sk2->sk_reuseport && sk->sk_reuseport && |
| 149 | !rcu_access_pointer(sk->sk_reuseport_cb) && |
| 150 | uid_eq(uid, sock_i_uid(sk2))) { |
| 151 | if (!bitmap) |
| 152 | return 0; |
| 153 | } else { |
| 154 | if (!bitmap) |
| 155 | return 1; |
| 156 | __set_bit(udp_sk(sk2)->udp_port_hash >> log, |
| 157 | bitmap); |
| 158 | } |
Eric Dumazet | 98322f2 | 2009-01-26 21:35:35 -0800 | [diff] [blame] | 159 | } |
Joe Perches | 4243cdc | 2014-11-11 21:59:20 -0800 | [diff] [blame] | 160 | } |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 161 | return 0; |
| 162 | } |
| 163 | |
Eric Dumazet | 30fff92 | 2009-11-09 05:26:33 +0000 | [diff] [blame] | 164 | /* |
| 165 | * Note: we still hold spinlock of primary hash chain, so no other writer |
| 166 | * can insert/delete a socket with local_port == num |
| 167 | */ |
| 168 | static int udp_lib_lport_inuse2(struct net *net, __u16 num, |
Joe Perches | 4243cdc | 2014-11-11 21:59:20 -0800 | [diff] [blame] | 169 | struct udp_hslot *hslot2, |
Josef Bacik | fe38d2a | 2017-01-17 07:51:01 -0800 | [diff] [blame] | 170 | struct sock *sk) |
Eric Dumazet | 30fff92 | 2009-11-09 05:26:33 +0000 | [diff] [blame] | 171 | { |
| 172 | struct sock *sk2; |
Tom Herbert | ba418fa | 2013-01-22 09:50:32 +0000 | [diff] [blame] | 173 | kuid_t uid = sock_i_uid(sk); |
Eric Dumazet | 30fff92 | 2009-11-09 05:26:33 +0000 | [diff] [blame] | 174 | int res = 0; |
| 175 | |
| 176 | spin_lock(&hslot2->lock); |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 177 | udp_portaddr_for_each_entry(sk2, &hslot2->head) { |
Joe Perches | 9d4fb27 | 2009-11-23 10:41:23 -0800 | [diff] [blame] | 178 | if (net_eq(sock_net(sk2), net) && |
| 179 | sk2 != sk && |
| 180 | (udp_sk(sk2)->udp_port_hash == num) && |
| 181 | (!sk2->sk_reuse || !sk->sk_reuse) && |
| 182 | (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || |
| 183 | sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && |
Josef Bacik | fe38d2a | 2017-01-17 07:51:01 -0800 | [diff] [blame] | 184 | inet_rcv_saddr_equal(sk, sk2, true)) { |
Eric Garver | df56005 | 2017-01-05 20:22:36 -0500 | [diff] [blame] | 185 | if (sk2->sk_reuseport && sk->sk_reuseport && |
| 186 | !rcu_access_pointer(sk->sk_reuseport_cb) && |
| 187 | uid_eq(uid, sock_i_uid(sk2))) { |
| 188 | res = 0; |
| 189 | } else { |
| 190 | res = 1; |
| 191 | } |
Eric Dumazet | 30fff92 | 2009-11-09 05:26:33 +0000 | [diff] [blame] | 192 | break; |
| 193 | } |
Joe Perches | 4243cdc | 2014-11-11 21:59:20 -0800 | [diff] [blame] | 194 | } |
Eric Dumazet | 30fff92 | 2009-11-09 05:26:33 +0000 | [diff] [blame] | 195 | spin_unlock(&hslot2->lock); |
| 196 | return res; |
| 197 | } |
| 198 | |
Josef Bacik | fe38d2a | 2017-01-17 07:51:01 -0800 | [diff] [blame] | 199 | static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot) |
Craig Gallek | e32ea7e | 2016-01-04 17:41:46 -0500 | [diff] [blame] | 200 | { |
| 201 | struct net *net = sock_net(sk); |
Craig Gallek | e32ea7e | 2016-01-04 17:41:46 -0500 | [diff] [blame] | 202 | kuid_t uid = sock_i_uid(sk); |
| 203 | struct sock *sk2; |
| 204 | |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 205 | sk_for_each(sk2, &hslot->head) { |
Craig Gallek | e32ea7e | 2016-01-04 17:41:46 -0500 | [diff] [blame] | 206 | if (net_eq(sock_net(sk2), net) && |
| 207 | sk2 != sk && |
| 208 | sk2->sk_family == sk->sk_family && |
| 209 | ipv6_only_sock(sk2) == ipv6_only_sock(sk) && |
| 210 | (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) && |
| 211 | (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && |
| 212 | sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && |
Josef Bacik | fe38d2a | 2017-01-17 07:51:01 -0800 | [diff] [blame] | 213 | inet_rcv_saddr_equal(sk, sk2, false)) { |
Martin KaFai Lau | 2dbb9b9 | 2018-08-08 01:01:25 -0700 | [diff] [blame] | 214 | return reuseport_add_sock(sk, sk2, |
| 215 | inet_rcv_saddr_any(sk)); |
Craig Gallek | e32ea7e | 2016-01-04 17:41:46 -0500 | [diff] [blame] | 216 | } |
| 217 | } |
| 218 | |
Martin KaFai Lau | 2dbb9b9 | 2018-08-08 01:01:25 -0700 | [diff] [blame] | 219 | return reuseport_alloc(sk, inet_rcv_saddr_any(sk)); |
Craig Gallek | e32ea7e | 2016-01-04 17:41:46 -0500 | [diff] [blame] | 220 | } |
| 221 | |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 222 | /** |
Pavel Emelyanov | 6ba5a3c | 2008-03-22 16:51:21 -0700 | [diff] [blame] | 223 | * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 224 | * |
| 225 | * @sk: socket struct in question |
| 226 | * @snum: port number to look up |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 227 | * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, |
Eric Dumazet | 30fff92 | 2009-11-09 05:26:33 +0000 | [diff] [blame] | 228 | * with NULL address |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 229 | */ |
Pavel Emelyanov | 6ba5a3c | 2008-03-22 16:51:21 -0700 | [diff] [blame] | 230 | int udp_lib_get_port(struct sock *sk, unsigned short snum, |
Eric Dumazet | 30fff92 | 2009-11-09 05:26:33 +0000 | [diff] [blame] | 231 | unsigned int hash2_nulladdr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | { |
Eric Dumazet | 512615b | 2009-11-08 10:17:58 +0000 | [diff] [blame] | 233 | struct udp_hslot *hslot, *hslot2; |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 234 | struct udp_table *udptable = sk->sk_prot->h.udp_table; |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 235 | int error = 1; |
YOSHIFUJI Hideaki | 3b1e0a6 | 2008-03-26 02:26:21 +0900 | [diff] [blame] | 236 | struct net *net = sock_net(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | |
Stephen Hemminger | 32c1da7 | 2007-08-24 23:09:41 -0700 | [diff] [blame] | 238 | if (!snum) { |
Eric Dumazet | 9088c56 | 2008-10-08 11:44:17 -0700 | [diff] [blame] | 239 | int low, high, remaining; |
Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 240 | unsigned int rand; |
Eric Dumazet | 98322f2 | 2009-01-26 21:35:35 -0800 | [diff] [blame] | 241 | unsigned short first, last; |
| 242 | DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | |
Eric W. Biederman | 0bbf87d | 2013-09-28 14:10:59 -0700 | [diff] [blame] | 244 | inet_get_local_port_range(net, &low, &high); |
Anton Arapov | a25de53 | 2007-10-18 22:00:17 -0700 | [diff] [blame] | 245 | remaining = (high - low) + 1; |
Stephen Hemminger | 227b60f | 2007-10-10 17:30:46 -0700 | [diff] [blame] | 246 | |
Aruna-Hewapathirane | 63862b5 | 2014-01-11 07:15:59 -0500 | [diff] [blame] | 247 | rand = prandom_u32(); |
Daniel Borkmann | 8fc54f6 | 2014-08-23 20:58:54 +0200 | [diff] [blame] | 248 | first = reciprocal_scale(rand, remaining) + low; |
Eric Dumazet | 98322f2 | 2009-01-26 21:35:35 -0800 | [diff] [blame] | 249 | /* |
| 250 | * force rand to be an odd multiple of UDP_HTABLE_SIZE |
| 251 | */ |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 252 | rand = (rand | 1) * (udptable->mask + 1); |
Eric Dumazet | 5781b23 | 2009-12-13 19:32:39 -0800 | [diff] [blame] | 253 | last = first + udptable->mask + 1; |
| 254 | do { |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 255 | hslot = udp_hashslot(udptable, net, first); |
Eric Dumazet | 98322f2 | 2009-01-26 21:35:35 -0800 | [diff] [blame] | 256 | bitmap_zero(bitmap, PORTS_PER_CHAIN); |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 257 | spin_lock_bh(&hslot->lock); |
Eric Dumazet | 98322f2 | 2009-01-26 21:35:35 -0800 | [diff] [blame] | 258 | udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, |
Josef Bacik | fe38d2a | 2017-01-17 07:51:01 -0800 | [diff] [blame] | 259 | udptable->log); |
Eric Dumazet | 98322f2 | 2009-01-26 21:35:35 -0800 | [diff] [blame] | 260 | |
| 261 | snum = first; |
| 262 | /* |
| 263 | * Iterate on all possible values of snum for this hash. |
| 264 | * Using steps of an odd multiple of UDP_HTABLE_SIZE |
| 265 | * give us randomization and full range coverage. |
| 266 | */ |
Eric Dumazet | 9088c56 | 2008-10-08 11:44:17 -0700 | [diff] [blame] | 267 | do { |
Eric Dumazet | 98322f2 | 2009-01-26 21:35:35 -0800 | [diff] [blame] | 268 | if (low <= snum && snum <= high && |
Amerigo Wang | e3826f1 | 2010-05-05 00:27:06 +0000 | [diff] [blame] | 269 | !test_bit(snum >> udptable->log, bitmap) && |
WANG Cong | 122ff24 | 2014-05-12 16:04:53 -0700 | [diff] [blame] | 270 | !inet_is_local_reserved_port(net, snum)) |
Eric Dumazet | 98322f2 | 2009-01-26 21:35:35 -0800 | [diff] [blame] | 271 | goto found; |
| 272 | snum += rand; |
| 273 | } while (snum != first); |
| 274 | spin_unlock_bh(&hslot->lock); |
Eric Garver | df56005 | 2017-01-05 20:22:36 -0500 | [diff] [blame] | 275 | cond_resched(); |
Eric Dumazet | 5781b23 | 2009-12-13 19:32:39 -0800 | [diff] [blame] | 276 | } while (++first != last); |
Eric Dumazet | 98322f2 | 2009-01-26 21:35:35 -0800 | [diff] [blame] | 277 | goto fail; |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 278 | } else { |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 279 | hslot = udp_hashslot(udptable, net, snum); |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 280 | spin_lock_bh(&hslot->lock); |
Eric Dumazet | 30fff92 | 2009-11-09 05:26:33 +0000 | [diff] [blame] | 281 | if (hslot->count > 10) { |
| 282 | int exist; |
| 283 | unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; |
| 284 | |
| 285 | slot2 &= udptable->mask; |
| 286 | hash2_nulladdr &= udptable->mask; |
| 287 | |
| 288 | hslot2 = udp_hashslot2(udptable, slot2); |
| 289 | if (hslot->count < hslot2->count) |
| 290 | goto scan_primary_hash; |
| 291 | |
Josef Bacik | fe38d2a | 2017-01-17 07:51:01 -0800 | [diff] [blame] | 292 | exist = udp_lib_lport_inuse2(net, snum, hslot2, sk); |
Eric Dumazet | 30fff92 | 2009-11-09 05:26:33 +0000 | [diff] [blame] | 293 | if (!exist && (hash2_nulladdr != slot2)) { |
| 294 | hslot2 = udp_hashslot2(udptable, hash2_nulladdr); |
| 295 | exist = udp_lib_lport_inuse2(net, snum, hslot2, |
Josef Bacik | fe38d2a | 2017-01-17 07:51:01 -0800 | [diff] [blame] | 296 | sk); |
Eric Dumazet | 30fff92 | 2009-11-09 05:26:33 +0000 | [diff] [blame] | 297 | } |
| 298 | if (exist) |
| 299 | goto fail_unlock; |
| 300 | else |
| 301 | goto found; |
| 302 | } |
| 303 | scan_primary_hash: |
Josef Bacik | fe38d2a | 2017-01-17 07:51:01 -0800 | [diff] [blame] | 304 | if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0)) |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 305 | goto fail_unlock; |
| 306 | } |
Eric Dumazet | 98322f2 | 2009-01-26 21:35:35 -0800 | [diff] [blame] | 307 | found: |
Eric Dumazet | c720c7e8 | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 308 | inet_sk(sk)->inet_num = snum; |
Eric Dumazet | d4cada4 | 2009-11-08 10:17:30 +0000 | [diff] [blame] | 309 | udp_sk(sk)->udp_port_hash = snum; |
| 310 | udp_sk(sk)->udp_portaddr_hash ^= snum; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | if (sk_unhashed(sk)) { |
Craig Gallek | e32ea7e | 2016-01-04 17:41:46 -0500 | [diff] [blame] | 312 | if (sk->sk_reuseport && |
Josef Bacik | fe38d2a | 2017-01-17 07:51:01 -0800 | [diff] [blame] | 313 | udp_reuseport_add_sock(sk, hslot)) { |
Craig Gallek | e32ea7e | 2016-01-04 17:41:46 -0500 | [diff] [blame] | 314 | inet_sk(sk)->inet_num = 0; |
| 315 | udp_sk(sk)->udp_port_hash = 0; |
| 316 | udp_sk(sk)->udp_portaddr_hash ^= snum; |
| 317 | goto fail_unlock; |
| 318 | } |
| 319 | |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 320 | sk_add_node_rcu(sk, &hslot->head); |
Eric Dumazet | fdcc8aa9 | 2009-11-08 10:17:05 +0000 | [diff] [blame] | 321 | hslot->count++; |
Pavel Emelyanov | c29a0bc | 2008-03-31 19:41:46 -0700 | [diff] [blame] | 322 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); |
Eric Dumazet | 512615b | 2009-11-08 10:17:58 +0000 | [diff] [blame] | 323 | |
| 324 | hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); |
| 325 | spin_lock(&hslot2->lock); |
Craig Gallek | d894ba1 | 2016-04-12 13:11:25 -0400 | [diff] [blame] | 326 | if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && |
David S. Miller | 1602f49 | 2016-04-23 18:26:24 -0400 | [diff] [blame] | 327 | sk->sk_family == AF_INET6) |
| 328 | hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node, |
| 329 | &hslot2->head); |
Craig Gallek | d894ba1 | 2016-04-12 13:11:25 -0400 | [diff] [blame] | 330 | else |
David S. Miller | 1602f49 | 2016-04-23 18:26:24 -0400 | [diff] [blame] | 331 | hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, |
| 332 | &hslot2->head); |
Eric Dumazet | 512615b | 2009-11-08 10:17:58 +0000 | [diff] [blame] | 333 | hslot2->count++; |
| 334 | spin_unlock(&hslot2->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | } |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 336 | sock_set_flag(sk, SOCK_RCU_FREE); |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 337 | error = 0; |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 338 | fail_unlock: |
| 339 | spin_unlock_bh(&hslot->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | fail: |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 341 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | } |
Eric Dumazet | c482c56 | 2009-07-17 00:26:32 +0000 | [diff] [blame] | 343 | EXPORT_SYMBOL(udp_lib_get_port); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | |
Pavel Emelyanov | 6ba5a3c | 2008-03-22 16:51:21 -0700 | [diff] [blame] | 345 | int udp_v4_get_port(struct sock *sk, unsigned short snum) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 346 | { |
Eric Dumazet | 30fff92 | 2009-11-09 05:26:33 +0000 | [diff] [blame] | 347 | unsigned int hash2_nulladdr = |
Martin KaFai Lau | f0b1e64 | 2017-12-01 12:52:30 -0800 | [diff] [blame] | 348 | ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); |
Eric Dumazet | 30fff92 | 2009-11-09 05:26:33 +0000 | [diff] [blame] | 349 | unsigned int hash2_partial = |
Martin KaFai Lau | f0b1e64 | 2017-12-01 12:52:30 -0800 | [diff] [blame] | 350 | ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); |
Eric Dumazet | 30fff92 | 2009-11-09 05:26:33 +0000 | [diff] [blame] | 351 | |
Eric Dumazet | d4cada4 | 2009-11-08 10:17:30 +0000 | [diff] [blame] | 352 | /* precompute partial secondary hash */ |
Eric Dumazet | 30fff92 | 2009-11-09 05:26:33 +0000 | [diff] [blame] | 353 | udp_sk(sk)->udp_portaddr_hash = hash2_partial; |
Josef Bacik | fe38d2a | 2017-01-17 07:51:01 -0800 | [diff] [blame] | 354 | return udp_lib_get_port(sk, snum, hash2_nulladdr); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 355 | } |
| 356 | |
Su, Xuemin | d1e3728 | 2016-06-13 11:02:50 +0800 | [diff] [blame] | 357 | static int compute_score(struct sock *sk, struct net *net, |
| 358 | __be32 saddr, __be16 sport, |
David Ahern | fb74c27 | 2017-08-07 08:44:16 -0700 | [diff] [blame] | 359 | __be32 daddr, unsigned short hnum, |
Tim Beale | 7354537 | 2019-06-14 16:41:26 +1200 | [diff] [blame] | 360 | int dif, int sdif) |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 361 | { |
Joe Perches | 60c04ae | 2014-12-01 20:29:06 -0800 | [diff] [blame] | 362 | int score; |
| 363 | struct inet_sock *inet; |
Mike Manning | 6da5b0f | 2018-11-07 15:36:04 +0000 | [diff] [blame] | 364 | bool dev_match; |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 365 | |
Joe Perches | 60c04ae | 2014-12-01 20:29:06 -0800 | [diff] [blame] | 366 | if (!net_eq(sock_net(sk), net) || |
| 367 | udp_sk(sk)->udp_port_hash != hnum || |
| 368 | ipv6_only_sock(sk)) |
| 369 | return -1; |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 370 | |
Peter Oskolkov | 4cdeeee | 2018-12-12 13:15:33 -0800 | [diff] [blame] | 371 | if (sk->sk_rcv_saddr != daddr) |
| 372 | return -1; |
| 373 | |
Joe Perches | 60c04ae | 2014-12-01 20:29:06 -0800 | [diff] [blame] | 374 | score = (sk->sk_family == PF_INET) ? 2 : 1; |
Peter Oskolkov | 4cdeeee | 2018-12-12 13:15:33 -0800 | [diff] [blame] | 375 | |
Joe Perches | 60c04ae | 2014-12-01 20:29:06 -0800 | [diff] [blame] | 376 | inet = inet_sk(sk); |
Joe Perches | 60c04ae | 2014-12-01 20:29:06 -0800 | [diff] [blame] | 377 | if (inet->inet_daddr) { |
| 378 | if (inet->inet_daddr != saddr) |
| 379 | return -1; |
| 380 | score += 4; |
| 381 | } |
| 382 | |
| 383 | if (inet->inet_dport) { |
| 384 | if (inet->inet_dport != sport) |
| 385 | return -1; |
| 386 | score += 4; |
| 387 | } |
| 388 | |
Mike Manning | 6da5b0f | 2018-11-07 15:36:04 +0000 | [diff] [blame] | 389 | dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, |
| 390 | dif, sdif); |
| 391 | if (!dev_match) |
| 392 | return -1; |
Mike Manning | 8d6c414 | 2021-10-05 14:03:42 +0100 | [diff] [blame] | 393 | if (sk->sk_bound_dev_if) |
| 394 | score += 4; |
David Ahern | fb74c27 | 2017-08-07 08:44:16 -0700 | [diff] [blame] | 395 | |
Eric Dumazet | 7170a97 | 2019-10-30 13:00:04 -0700 | [diff] [blame] | 396 | if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) |
Eric Dumazet | 70da268 | 2015-10-08 19:33:21 -0700 | [diff] [blame] | 397 | score++; |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 398 | return score; |
| 399 | } |
| 400 | |
Eric Dumazet | 6eada01 | 2015-03-18 14:05:33 -0700 | [diff] [blame] | 401 | static u32 udp_ehashfn(const struct net *net, const __be32 laddr, |
| 402 | const __u16 lport, const __be32 faddr, |
| 403 | const __be16 fport) |
Hannes Frederic Sowa | 65cd803 | 2013-10-19 21:48:51 +0200 | [diff] [blame] | 404 | { |
Hannes Frederic Sowa | 1bbdcee | 2013-10-19 21:48:57 +0200 | [diff] [blame] | 405 | static u32 udp_ehash_secret __read_mostly; |
| 406 | |
| 407 | net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret)); |
| 408 | |
Hannes Frederic Sowa | 65cd803 | 2013-10-19 21:48:51 +0200 | [diff] [blame] | 409 | return __inet_ehashfn(laddr, lport, faddr, fport, |
Hannes Frederic Sowa | 1bbdcee | 2013-10-19 21:48:57 +0200 | [diff] [blame] | 410 | udp_ehash_secret + net_hash_mix(net)); |
Hannes Frederic Sowa | 65cd803 | 2013-10-19 21:48:51 +0200 | [diff] [blame] | 411 | } |
| 412 | |
David S. Miller | a57066b | 2020-07-25 17:49:04 -0700 | [diff] [blame] | 413 | static struct sock *lookup_reuseport(struct net *net, struct sock *sk, |
| 414 | struct sk_buff *skb, |
| 415 | __be32 saddr, __be16 sport, |
| 416 | __be32 daddr, unsigned short hnum) |
Jakub Sitnicki | 7629c73 | 2020-07-17 12:35:28 +0200 | [diff] [blame] | 417 | { |
| 418 | struct sock *reuse_sk = NULL; |
| 419 | u32 hash; |
| 420 | |
| 421 | if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { |
| 422 | hash = udp_ehashfn(net, daddr, hnum, saddr, sport); |
| 423 | reuse_sk = reuseport_select_sock(sk, hash, skb, |
| 424 | sizeof(struct udphdr)); |
Jakub Sitnicki | 7629c73 | 2020-07-17 12:35:28 +0200 | [diff] [blame] | 425 | } |
| 426 | return reuse_sk; |
| 427 | } |
| 428 | |
Su, Xuemin | d1e3728 | 2016-06-13 11:02:50 +0800 | [diff] [blame] | 429 | /* called with rcu_read_lock() */ |
Eric Dumazet | 5051ebd | 2009-11-08 10:18:11 +0000 | [diff] [blame] | 430 | static struct sock *udp4_lib_lookup2(struct net *net, |
David Ahern | fb74c27 | 2017-08-07 08:44:16 -0700 | [diff] [blame] | 431 | __be32 saddr, __be16 sport, |
| 432 | __be32 daddr, unsigned int hnum, |
Tim Beale | 7354537 | 2019-06-14 16:41:26 +1200 | [diff] [blame] | 433 | int dif, int sdif, |
David Ahern | fb74c27 | 2017-08-07 08:44:16 -0700 | [diff] [blame] | 434 | struct udp_hslot *hslot2, |
| 435 | struct sk_buff *skb) |
Eric Dumazet | 5051ebd | 2009-11-08 10:18:11 +0000 | [diff] [blame] | 436 | { |
| 437 | struct sock *sk, *result; |
Paolo Abeni | e94a62f | 2017-11-30 15:39:34 +0100 | [diff] [blame] | 438 | int score, badness; |
Eric Dumazet | 5051ebd | 2009-11-08 10:18:11 +0000 | [diff] [blame] | 439 | |
Eric Dumazet | 5051ebd | 2009-11-08 10:18:11 +0000 | [diff] [blame] | 440 | result = NULL; |
Tom Herbert | ba418fa | 2013-01-22 09:50:32 +0000 | [diff] [blame] | 441 | badness = 0; |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 442 | udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { |
Su, Xuemin | d1e3728 | 2016-06-13 11:02:50 +0800 | [diff] [blame] | 443 | score = compute_score(sk, net, saddr, sport, |
Tim Beale | 7354537 | 2019-06-14 16:41:26 +1200 | [diff] [blame] | 444 | daddr, hnum, dif, sdif); |
Eric Dumazet | 5051ebd | 2009-11-08 10:18:11 +0000 | [diff] [blame] | 445 | if (score > badness) { |
Jakub Sitnicki | 7629c73 | 2020-07-17 12:35:28 +0200 | [diff] [blame] | 446 | result = lookup_reuseport(net, sk, skb, |
| 447 | saddr, sport, daddr, hnum); |
David S. Miller | a57066b | 2020-07-25 17:49:04 -0700 | [diff] [blame] | 448 | /* Fall back to scoring if group has connections */ |
| 449 | if (result && !reuseport_has_conns(sk, false)) |
Jakub Sitnicki | 7629c73 | 2020-07-17 12:35:28 +0200 | [diff] [blame] | 450 | return result; |
| 451 | |
David S. Miller | a57066b | 2020-07-25 17:49:04 -0700 | [diff] [blame] | 452 | result = result ? : sk; |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 453 | badness = score; |
Eric Dumazet | 5051ebd | 2009-11-08 10:18:11 +0000 | [diff] [blame] | 454 | } |
| 455 | } |
Eric Dumazet | 5051ebd | 2009-11-08 10:18:11 +0000 | [diff] [blame] | 456 | return result; |
| 457 | } |
| 458 | |
David S. Miller | a57066b | 2020-07-25 17:49:04 -0700 | [diff] [blame] | 459 | static struct sock *udp4_lookup_run_bpf(struct net *net, |
| 460 | struct udp_table *udptable, |
| 461 | struct sk_buff *skb, |
| 462 | __be32 saddr, __be16 sport, |
Mark Pashmfouroush | f893156 | 2021-11-10 11:10:15 +0000 | [diff] [blame] | 463 | __be32 daddr, u16 hnum, const int dif) |
Jakub Sitnicki | 72f7e94 | 2020-07-17 12:35:29 +0200 | [diff] [blame] | 464 | { |
| 465 | struct sock *sk, *reuse_sk; |
| 466 | bool no_reuseport; |
| 467 | |
| 468 | if (udptable != &udp_table) |
| 469 | return NULL; /* only UDP is supported */ |
| 470 | |
Mark Pashmfouroush | f893156 | 2021-11-10 11:10:15 +0000 | [diff] [blame] | 471 | no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP, saddr, sport, |
| 472 | daddr, hnum, dif, &sk); |
Jakub Sitnicki | 72f7e94 | 2020-07-17 12:35:29 +0200 | [diff] [blame] | 473 | if (no_reuseport || IS_ERR_OR_NULL(sk)) |
| 474 | return sk; |
| 475 | |
| 476 | reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); |
Jakub Sitnicki | c64c9c28 | 2020-07-26 14:02:28 +0200 | [diff] [blame] | 477 | if (reuse_sk) |
Jakub Sitnicki | 72f7e94 | 2020-07-17 12:35:29 +0200 | [diff] [blame] | 478 | sk = reuse_sk; |
| 479 | return sk; |
| 480 | } |
| 481 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 482 | /* UDP is nearly always wildcards out the wazoo, it makes no sense to try |
| 483 | * harder than this. -DaveM |
| 484 | */ |
Pavel Emelyanov | fce8233 | 2011-12-09 06:23:34 +0000 | [diff] [blame] | 485 | struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, |
David Ahern | fb74c27 | 2017-08-07 08:44:16 -0700 | [diff] [blame] | 486 | __be16 sport, __be32 daddr, __be16 dport, int dif, |
| 487 | int sdif, struct udp_table *udptable, struct sk_buff *skb) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 488 | { |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 489 | unsigned short hnum = ntohs(dport); |
Peter Oskolkov | 4cdeeee | 2018-12-12 13:15:33 -0800 | [diff] [blame] | 490 | unsigned int hash2, slot2; |
| 491 | struct udp_hslot *hslot2; |
Jakub Sitnicki | 72f7e94 | 2020-07-17 12:35:29 +0200 | [diff] [blame] | 492 | struct sock *result, *sk; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 493 | |
Peter Oskolkov | 4cdeeee | 2018-12-12 13:15:33 -0800 | [diff] [blame] | 494 | hash2 = ipv4_portaddr_hash(net, daddr, hnum); |
| 495 | slot2 = hash2 & udptable->mask; |
| 496 | hslot2 = &udptable->hash2[slot2]; |
| 497 | |
Jakub Sitnicki | 72f7e94 | 2020-07-17 12:35:29 +0200 | [diff] [blame] | 498 | /* Lookup connected or non-wildcard socket */ |
Peter Oskolkov | 4cdeeee | 2018-12-12 13:15:33 -0800 | [diff] [blame] | 499 | result = udp4_lib_lookup2(net, saddr, sport, |
| 500 | daddr, hnum, dif, sdif, |
Tim Beale | 7354537 | 2019-06-14 16:41:26 +1200 | [diff] [blame] | 501 | hslot2, skb); |
Jakub Sitnicki | 72f7e94 | 2020-07-17 12:35:29 +0200 | [diff] [blame] | 502 | if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) |
| 503 | goto done; |
Eric Dumazet | 5051ebd | 2009-11-08 10:18:11 +0000 | [diff] [blame] | 504 | |
Jakub Sitnicki | 72f7e94 | 2020-07-17 12:35:29 +0200 | [diff] [blame] | 505 | /* Lookup redirect from BPF */ |
| 506 | if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { |
| 507 | sk = udp4_lookup_run_bpf(net, udptable, skb, |
Mark Pashmfouroush | f893156 | 2021-11-10 11:10:15 +0000 | [diff] [blame] | 508 | saddr, sport, daddr, hnum, dif); |
Jakub Sitnicki | 72f7e94 | 2020-07-17 12:35:29 +0200 | [diff] [blame] | 509 | if (sk) { |
| 510 | result = sk; |
| 511 | goto done; |
| 512 | } |
Eric Dumazet | 5051ebd | 2009-11-08 10:18:11 +0000 | [diff] [blame] | 513 | } |
Jakub Sitnicki | 72f7e94 | 2020-07-17 12:35:29 +0200 | [diff] [blame] | 514 | |
| 515 | /* Got non-wildcard socket or error on first lookup */ |
| 516 | if (result) |
| 517 | goto done; |
| 518 | |
| 519 | /* Lookup wildcard sockets */ |
| 520 | hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum); |
| 521 | slot2 = hash2 & udptable->mask; |
| 522 | hslot2 = &udptable->hash2[slot2]; |
| 523 | |
| 524 | result = udp4_lib_lookup2(net, saddr, sport, |
| 525 | htonl(INADDR_ANY), hnum, dif, sdif, |
| 526 | hslot2, skb); |
| 527 | done: |
Enrico Weigelt | 88e235b | 2019-06-05 23:09:05 +0200 | [diff] [blame] | 528 | if (IS_ERR(result)) |
Peter Oskolkov | 4cdeeee | 2018-12-12 13:15:33 -0800 | [diff] [blame] | 529 | return NULL; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 530 | return result; |
| 531 | } |
Pavel Emelyanov | fce8233 | 2011-12-09 06:23:34 +0000 | [diff] [blame] | 532 | EXPORT_SYMBOL_GPL(__udp4_lib_lookup); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 533 | |
KOVACS Krisztian | 607c4aa | 2008-10-07 12:38:32 -0700 | [diff] [blame] | 534 | static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, |
| 535 | __be16 sport, __be16 dport, |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 536 | struct udp_table *udptable) |
KOVACS Krisztian | 607c4aa | 2008-10-07 12:38:32 -0700 | [diff] [blame] | 537 | { |
| 538 | const struct iphdr *iph = ip_hdr(skb); |
| 539 | |
Alexander Duyck | ed7cbbc | 2016-05-12 16:23:44 -0700 | [diff] [blame] | 540 | return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, |
Eric Dumazet | 8afdd99 | 2013-12-10 18:07:23 -0800 | [diff] [blame] | 541 | iph->daddr, dport, inet_iif(skb), |
David Ahern | fb74c27 | 2017-08-07 08:44:16 -0700 | [diff] [blame] | 542 | inet_sdif(skb), udptable, skb); |
KOVACS Krisztian | 607c4aa | 2008-10-07 12:38:32 -0700 | [diff] [blame] | 543 | } |
| 544 | |
Eric Dumazet | 7b58e63 | 2020-11-09 15:13:49 -0800 | [diff] [blame] | 545 | struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb, |
Tom Herbert | 6305830 | 2016-04-05 08:22:50 -0700 | [diff] [blame] | 546 | __be16 sport, __be16 dport) |
| 547 | { |
Martin KaFai Lau | 257a525 | 2019-05-31 15:29:13 -0700 | [diff] [blame] | 548 | const struct iphdr *iph = ip_hdr(skb); |
| 549 | |
| 550 | return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, |
| 551 | iph->daddr, dport, inet_iif(skb), |
| 552 | inet_sdif(skb), &udp_table, NULL); |
Tom Herbert | 6305830 | 2016-04-05 08:22:50 -0700 | [diff] [blame] | 553 | } |
Tom Herbert | 6305830 | 2016-04-05 08:22:50 -0700 | [diff] [blame] | 554 | |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 555 | /* Must be called under rcu_read_lock(). |
| 556 | * Does increment socket refcount. |
| 557 | */ |
Arnd Bergmann | 6e86000 | 2018-06-05 13:40:34 +0200 | [diff] [blame] | 558 | #if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4) |
KOVACS Krisztian | bcd4130 | 2008-10-01 07:48:10 -0700 | [diff] [blame] | 559 | struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, |
| 560 | __be32 daddr, __be16 dport, int dif) |
| 561 | { |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 562 | struct sock *sk; |
| 563 | |
| 564 | sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport, |
David Ahern | fb74c27 | 2017-08-07 08:44:16 -0700 | [diff] [blame] | 565 | dif, 0, &udp_table, NULL); |
Reshetova, Elena | 41c6d65 | 2017-06-30 13:08:01 +0300 | [diff] [blame] | 566 | if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 567 | sk = NULL; |
| 568 | return sk; |
KOVACS Krisztian | bcd4130 | 2008-10-01 07:48:10 -0700 | [diff] [blame] | 569 | } |
| 570 | EXPORT_SYMBOL_GPL(udp4_lib_lookup); |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 571 | #endif |
KOVACS Krisztian | bcd4130 | 2008-10-01 07:48:10 -0700 | [diff] [blame] | 572 | |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 573 | static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, |
| 574 | __be16 loc_port, __be32 loc_addr, |
| 575 | __be16 rmt_port, __be32 rmt_addr, |
David Ahern | fb74c27 | 2017-08-07 08:44:16 -0700 | [diff] [blame] | 576 | int dif, int sdif, unsigned short hnum) |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 577 | { |
| 578 | struct inet_sock *inet = inet_sk(sk); |
| 579 | |
| 580 | if (!net_eq(sock_net(sk), net) || |
| 581 | udp_sk(sk)->udp_port_hash != hnum || |
| 582 | (inet->inet_daddr && inet->inet_daddr != rmt_addr) || |
| 583 | (inet->inet_dport != rmt_port && inet->inet_dport) || |
| 584 | (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || |
| 585 | ipv6_only_sock(sk) || |
Tim Beale | 82ba25c | 2019-06-04 13:56:23 +1200 | [diff] [blame] | 586 | !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 587 | return false; |
David Ahern | 60d9b03 | 2017-08-07 08:44:19 -0700 | [diff] [blame] | 588 | if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif)) |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 589 | return false; |
| 590 | return true; |
| 591 | } |
| 592 | |
Stefano Brivio | a36e185 | 2018-11-08 12:19:14 +0100 | [diff] [blame] | 593 | DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key); |
| 594 | void udp_encap_enable(void) |
| 595 | { |
Paolo Abeni | 9c48060 | 2018-11-15 02:34:50 +0100 | [diff] [blame] | 596 | static_branch_inc(&udp_encap_needed_key); |
Stefano Brivio | a36e185 | 2018-11-08 12:19:14 +0100 | [diff] [blame] | 597 | } |
| 598 | EXPORT_SYMBOL(udp_encap_enable); |
| 599 | |
Xin Long | a4a600dd | 2021-02-03 16:54:22 +0800 | [diff] [blame] | 600 | void udp_encap_disable(void) |
| 601 | { |
| 602 | static_branch_dec(&udp_encap_needed_key); |
| 603 | } |
| 604 | EXPORT_SYMBOL(udp_encap_disable); |
| 605 | |
Stefano Brivio | e7cc082 | 2018-11-08 12:19:22 +0100 | [diff] [blame] | 606 | /* Handler for tunnels with arbitrary destination ports: no socket lookup, go |
| 607 | * through error handlers in encapsulations looking for a match. |
| 608 | */ |
| 609 | static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info) |
| 610 | { |
| 611 | int i; |
| 612 | |
| 613 | for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { |
| 614 | int (*handler)(struct sk_buff *skb, u32 info); |
Paolo Abeni | 92b9536 | 2019-02-21 17:44:00 +0100 | [diff] [blame] | 615 | const struct ip_tunnel_encap_ops *encap; |
Stefano Brivio | e7cc082 | 2018-11-08 12:19:22 +0100 | [diff] [blame] | 616 | |
Paolo Abeni | 92b9536 | 2019-02-21 17:44:00 +0100 | [diff] [blame] | 617 | encap = rcu_dereference(iptun_encaps[i]); |
| 618 | if (!encap) |
Stefano Brivio | e7cc082 | 2018-11-08 12:19:22 +0100 | [diff] [blame] | 619 | continue; |
Paolo Abeni | 92b9536 | 2019-02-21 17:44:00 +0100 | [diff] [blame] | 620 | handler = encap->err_handler; |
Stefano Brivio | e7cc082 | 2018-11-08 12:19:22 +0100 | [diff] [blame] | 621 | if (handler && !handler(skb, info)) |
| 622 | return 0; |
| 623 | } |
| 624 | |
| 625 | return -ENOENT; |
| 626 | } |
| 627 | |
Stefano Brivio | a36e185 | 2018-11-08 12:19:14 +0100 | [diff] [blame] | 628 | /* Try to match ICMP errors to UDP tunnels by looking up a socket without |
| 629 | * reversing source and destination port: this will match tunnels that force the |
| 630 | * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that |
| 631 | * lwtunnels might actually break this assumption by being configured with |
| 632 | * different destination ports on endpoints, in this case we won't be able to |
| 633 | * trace ICMP messages back to them. |
| 634 | * |
Stefano Brivio | e7cc082 | 2018-11-08 12:19:22 +0100 | [diff] [blame] | 635 | * If this doesn't match any socket, probe tunnels with arbitrary destination |
| 636 | * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port |
| 637 | * we've sent packets to won't necessarily match the local destination port. |
| 638 | * |
Stefano Brivio | a36e185 | 2018-11-08 12:19:14 +0100 | [diff] [blame] | 639 | * Then ask the tunnel implementation to match the error against a valid |
| 640 | * association. |
| 641 | * |
Stefano Brivio | e7cc082 | 2018-11-08 12:19:22 +0100 | [diff] [blame] | 642 | * Return an error if we can't find a match, the socket if we need further |
| 643 | * processing, zero otherwise. |
Stefano Brivio | a36e185 | 2018-11-08 12:19:14 +0100 | [diff] [blame] | 644 | */ |
| 645 | static struct sock *__udp4_lib_err_encap(struct net *net, |
| 646 | const struct iphdr *iph, |
| 647 | struct udphdr *uh, |
| 648 | struct udp_table *udptable, |
Vadim Fedorenko | 9bfce73 | 2021-07-20 23:35:28 +0300 | [diff] [blame] | 649 | struct sock *sk, |
Stefano Brivio | e7cc082 | 2018-11-08 12:19:22 +0100 | [diff] [blame] | 650 | struct sk_buff *skb, u32 info) |
Stefano Brivio | a36e185 | 2018-11-08 12:19:14 +0100 | [diff] [blame] | 651 | { |
Vadim Fedorenko | 9bfce73 | 2021-07-20 23:35:28 +0300 | [diff] [blame] | 652 | int (*lookup)(struct sock *sk, struct sk_buff *skb); |
Stefano Brivio | a36e185 | 2018-11-08 12:19:14 +0100 | [diff] [blame] | 653 | int network_offset, transport_offset; |
Vadim Fedorenko | 9bfce73 | 2021-07-20 23:35:28 +0300 | [diff] [blame] | 654 | struct udp_sock *up; |
Stefano Brivio | a36e185 | 2018-11-08 12:19:14 +0100 | [diff] [blame] | 655 | |
Stefano Brivio | a36e185 | 2018-11-08 12:19:14 +0100 | [diff] [blame] | 656 | network_offset = skb_network_offset(skb); |
| 657 | transport_offset = skb_transport_offset(skb); |
| 658 | |
| 659 | /* Network header needs to point to the outer IPv4 header inside ICMP */ |
| 660 | skb_reset_network_header(skb); |
| 661 | |
| 662 | /* Transport header needs to point to the UDP header */ |
| 663 | skb_set_transport_header(skb, iph->ihl << 2); |
| 664 | |
Vadim Fedorenko | 9bfce73 | 2021-07-20 23:35:28 +0300 | [diff] [blame] | 665 | if (sk) { |
| 666 | up = udp_sk(sk); |
| 667 | |
| 668 | lookup = READ_ONCE(up->encap_err_lookup); |
| 669 | if (lookup && lookup(sk, skb)) |
| 670 | sk = NULL; |
| 671 | |
| 672 | goto out; |
| 673 | } |
| 674 | |
Stefano Brivio | e7cc082 | 2018-11-08 12:19:22 +0100 | [diff] [blame] | 675 | sk = __udp4_lib_lookup(net, iph->daddr, uh->source, |
| 676 | iph->saddr, uh->dest, skb->dev->ifindex, 0, |
| 677 | udptable, NULL); |
| 678 | if (sk) { |
Vadim Fedorenko | 9bfce73 | 2021-07-20 23:35:28 +0300 | [diff] [blame] | 679 | up = udp_sk(sk); |
Stefano Brivio | e7cc082 | 2018-11-08 12:19:22 +0100 | [diff] [blame] | 680 | |
| 681 | lookup = READ_ONCE(up->encap_err_lookup); |
| 682 | if (!lookup || lookup(sk, skb)) |
| 683 | sk = NULL; |
| 684 | } |
| 685 | |
Vadim Fedorenko | 9bfce73 | 2021-07-20 23:35:28 +0300 | [diff] [blame] | 686 | out: |
Stefano Brivio | e7cc082 | 2018-11-08 12:19:22 +0100 | [diff] [blame] | 687 | if (!sk) |
| 688 | sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info)); |
Stefano Brivio | a36e185 | 2018-11-08 12:19:14 +0100 | [diff] [blame] | 689 | |
| 690 | skb_set_transport_header(skb, transport_offset); |
| 691 | skb_set_network_header(skb, network_offset); |
| 692 | |
| 693 | return sk; |
| 694 | } |
| 695 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 696 | /* |
| 697 | * This routine is called by the ICMP module when it gets some |
| 698 | * sort of error condition. If err < 0 then the socket should |
| 699 | * be closed and the error returned to the user. If err > 0 |
| 700 | * it's just the icmp type << 8 | icmp code. |
| 701 | * Header points to the ip header of the error packet. We move |
| 702 | * on past this. Then (as it used to claim before adjustment) |
| 703 | * header points to the first 8 bytes of the udp header. We need |
| 704 | * to find the appropriate port. |
| 705 | */ |
| 706 | |
Stefano Brivio | 32bbd87 | 2018-11-08 12:19:21 +0100 | [diff] [blame] | 707 | int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 708 | { |
| 709 | struct inet_sock *inet; |
Eric Dumazet | b71d1d4 | 2011-04-22 04:53:02 +0000 | [diff] [blame] | 710 | const struct iphdr *iph = (const struct iphdr *)skb->data; |
Eric Dumazet | c482c56 | 2009-07-17 00:26:32 +0000 | [diff] [blame] | 711 | struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 712 | const int type = icmp_hdr(skb)->type; |
| 713 | const int code = icmp_hdr(skb)->code; |
Stefano Brivio | a36e185 | 2018-11-08 12:19:14 +0100 | [diff] [blame] | 714 | bool tunnel = false; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 715 | struct sock *sk; |
| 716 | int harderr; |
| 717 | int err; |
Pavel Emelyanov | fd54d71 | 2008-07-14 23:01:40 -0700 | [diff] [blame] | 718 | struct net *net = dev_net(skb->dev); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 719 | |
Pavel Emelyanov | fd54d71 | 2008-07-14 23:01:40 -0700 | [diff] [blame] | 720 | sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, |
Mike Manning | f64bf6b8 | 2018-10-26 12:24:35 +0100 | [diff] [blame] | 721 | iph->saddr, uh->source, skb->dev->ifindex, |
| 722 | inet_sdif(skb), udptable, NULL); |
Vadim Fedorenko | 9bfce73 | 2021-07-20 23:35:28 +0300 | [diff] [blame] | 723 | |
Xin Long | d26796a | 2020-10-29 15:04:55 +0800 | [diff] [blame] | 724 | if (!sk || udp_sk(sk)->encap_type) { |
Stefano Brivio | a36e185 | 2018-11-08 12:19:14 +0100 | [diff] [blame] | 725 | /* No socket for error: try tunnels before discarding */ |
Stefano Brivio | e7cc082 | 2018-11-08 12:19:22 +0100 | [diff] [blame] | 726 | if (static_branch_unlikely(&udp_encap_needed_key)) { |
Vadim Fedorenko | 9bfce73 | 2021-07-20 23:35:28 +0300 | [diff] [blame] | 727 | sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb, |
Stefano Brivio | e7cc082 | 2018-11-08 12:19:22 +0100 | [diff] [blame] | 728 | info); |
| 729 | if (!sk) |
| 730 | return 0; |
Vadim Fedorenko | 9bfce73 | 2021-07-20 23:35:28 +0300 | [diff] [blame] | 731 | } else |
| 732 | sk = ERR_PTR(-ENOENT); |
Stefano Brivio | e7cc082 | 2018-11-08 12:19:22 +0100 | [diff] [blame] | 733 | |
| 734 | if (IS_ERR(sk)) { |
| 735 | __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); |
| 736 | return PTR_ERR(sk); |
| 737 | } |
| 738 | |
Stefano Brivio | a36e185 | 2018-11-08 12:19:14 +0100 | [diff] [blame] | 739 | tunnel = true; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 740 | } |
| 741 | |
| 742 | err = 0; |
| 743 | harderr = 0; |
| 744 | inet = inet_sk(sk); |
| 745 | |
| 746 | switch (type) { |
| 747 | default: |
| 748 | case ICMP_TIME_EXCEEDED: |
| 749 | err = EHOSTUNREACH; |
| 750 | break; |
| 751 | case ICMP_SOURCE_QUENCH: |
| 752 | goto out; |
| 753 | case ICMP_PARAMETERPROB: |
| 754 | err = EPROTO; |
| 755 | harderr = 1; |
| 756 | break; |
| 757 | case ICMP_DEST_UNREACH: |
| 758 | if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ |
David S. Miller | 3639339 | 2012-06-14 22:21:46 -0700 | [diff] [blame] | 759 | ipv4_sk_update_pmtu(skb, sk, info); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 760 | if (inet->pmtudisc != IP_PMTUDISC_DONT) { |
| 761 | err = EMSGSIZE; |
| 762 | harderr = 1; |
| 763 | break; |
| 764 | } |
| 765 | goto out; |
| 766 | } |
| 767 | err = EHOSTUNREACH; |
| 768 | if (code <= NR_ICMP_UNREACH) { |
| 769 | harderr = icmp_err_convert[code].fatal; |
| 770 | err = icmp_err_convert[code].errno; |
| 771 | } |
| 772 | break; |
David S. Miller | 55be7a9 | 2012-07-11 21:27:49 -0700 | [diff] [blame] | 773 | case ICMP_REDIRECT: |
| 774 | ipv4_sk_redirect(skb, sk); |
Duan Jiong | 1a462d1 | 2013-09-20 18:20:28 +0800 | [diff] [blame] | 775 | goto out; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 776 | } |
| 777 | |
| 778 | /* |
| 779 | * RFC1122: OK. Passes ICMP errors back to application, as per |
| 780 | * 4.1.3.3. |
| 781 | */ |
Stefano Brivio | a36e185 | 2018-11-08 12:19:14 +0100 | [diff] [blame] | 782 | if (tunnel) { |
| 783 | /* ...not for tunnels though: we don't have a sending socket */ |
| 784 | goto out; |
| 785 | } |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 786 | if (!inet->recverr) { |
| 787 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) |
| 788 | goto out; |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 789 | } else |
Eric Dumazet | c482c56 | 2009-07-17 00:26:32 +0000 | [diff] [blame] | 790 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 791 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 792 | sk->sk_err = err; |
Alexander Aring | e3ae236 | 2021-06-27 18:48:21 -0400 | [diff] [blame] | 793 | sk_error_report(sk); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 794 | out: |
Stefano Brivio | 32bbd87 | 2018-11-08 12:19:21 +0100 | [diff] [blame] | 795 | return 0; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 796 | } |
| 797 | |
Stefano Brivio | 32bbd87 | 2018-11-08 12:19:21 +0100 | [diff] [blame] | 798 | int udp_err(struct sk_buff *skb, u32 info) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 799 | { |
Stefano Brivio | 32bbd87 | 2018-11-08 12:19:21 +0100 | [diff] [blame] | 800 | return __udp4_lib_err(skb, info, &udp_table); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 801 | } |
| 802 | |
| 803 | /* |
| 804 | * Throw away all pending data and cancel the corking. Socket is locked. |
| 805 | */ |
Denis V. Lunev | 36d926b | 2008-06-04 15:49:07 +0400 | [diff] [blame] | 806 | void udp_flush_pending_frames(struct sock *sk) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 807 | { |
| 808 | struct udp_sock *up = udp_sk(sk); |
| 809 | |
| 810 | if (up->pending) { |
| 811 | up->len = 0; |
| 812 | up->pending = 0; |
| 813 | ip_flush_pending_frames(sk); |
| 814 | } |
| 815 | } |
Denis V. Lunev | 36d926b | 2008-06-04 15:49:07 +0400 | [diff] [blame] | 816 | EXPORT_SYMBOL(udp_flush_pending_frames); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 817 | |
| 818 | /** |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 819 | * udp4_hwcsum - handle outgoing HW checksumming |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 820 | * @skb: sk_buff containing the filled-in UDP header |
| 821 | * (checksum field must be zeroed out) |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 822 | * @src: source IP address |
| 823 | * @dst: destination IP address |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 824 | */ |
Thomas Graf | c26bf4a | 2013-07-25 18:12:18 +0200 | [diff] [blame] | 825 | void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 826 | { |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 827 | struct udphdr *uh = udp_hdr(skb); |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 828 | int offset = skb_transport_offset(skb); |
| 829 | int len = skb->len - offset; |
| 830 | int hlen = len; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 831 | __wsum csum = 0; |
| 832 | |
WANG Cong | ebbe495 | 2014-06-02 16:12:02 -0700 | [diff] [blame] | 833 | if (!skb_has_frag_list(skb)) { |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 834 | /* |
| 835 | * Only one fragment on the socket. |
| 836 | */ |
| 837 | skb->csum_start = skb_transport_header(skb) - skb->head; |
| 838 | skb->csum_offset = offsetof(struct udphdr, check); |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 839 | uh->check = ~csum_tcpudp_magic(src, dst, len, |
| 840 | IPPROTO_UDP, 0); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 841 | } else { |
WANG Cong | ebbe495 | 2014-06-02 16:12:02 -0700 | [diff] [blame] | 842 | struct sk_buff *frags; |
| 843 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 844 | /* |
| 845 | * HW-checksum won't work as there are two or more |
| 846 | * fragments on the socket so that all csums of sk_buffs |
| 847 | * should be together |
| 848 | */ |
WANG Cong | ebbe495 | 2014-06-02 16:12:02 -0700 | [diff] [blame] | 849 | skb_walk_frags(skb, frags) { |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 850 | csum = csum_add(csum, frags->csum); |
| 851 | hlen -= frags->len; |
WANG Cong | ebbe495 | 2014-06-02 16:12:02 -0700 | [diff] [blame] | 852 | } |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 853 | |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 854 | csum = skb_checksum(skb, offset, hlen, csum); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 855 | skb->ip_summed = CHECKSUM_NONE; |
| 856 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 857 | uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); |
| 858 | if (uh->check == 0) |
| 859 | uh->check = CSUM_MANGLED_0; |
| 860 | } |
| 861 | } |
Thomas Graf | c26bf4a | 2013-07-25 18:12:18 +0200 | [diff] [blame] | 862 | EXPORT_SYMBOL_GPL(udp4_hwcsum); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 863 | |
Tom Herbert | af5fcba | 2014-06-04 17:19:48 -0700 | [diff] [blame] | 864 | /* Function to set UDP checksum for an IPv4 UDP packet. This is intended |
| 865 | * for the simple case like when setting the checksum for a UDP tunnel. |
| 866 | */ |
| 867 | void udp_set_csum(bool nocheck, struct sk_buff *skb, |
| 868 | __be32 saddr, __be32 daddr, int len) |
| 869 | { |
| 870 | struct udphdr *uh = udp_hdr(skb); |
| 871 | |
Edward Cree | 179bc67 | 2016-02-11 20:48:04 +0000 | [diff] [blame] | 872 | if (nocheck) { |
Tom Herbert | af5fcba | 2014-06-04 17:19:48 -0700 | [diff] [blame] | 873 | uh->check = 0; |
Edward Cree | 179bc67 | 2016-02-11 20:48:04 +0000 | [diff] [blame] | 874 | } else if (skb_is_gso(skb)) { |
Tom Herbert | af5fcba | 2014-06-04 17:19:48 -0700 | [diff] [blame] | 875 | uh->check = ~udp_v4_check(len, saddr, daddr, 0); |
Edward Cree | 179bc67 | 2016-02-11 20:48:04 +0000 | [diff] [blame] | 876 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
| 877 | uh->check = 0; |
| 878 | uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb)); |
| 879 | if (uh->check == 0) |
| 880 | uh->check = CSUM_MANGLED_0; |
Edward Cree | d75f130 | 2016-02-11 20:49:40 +0000 | [diff] [blame] | 881 | } else { |
Tom Herbert | af5fcba | 2014-06-04 17:19:48 -0700 | [diff] [blame] | 882 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 883 | skb->csum_start = skb_transport_header(skb) - skb->head; |
| 884 | skb->csum_offset = offsetof(struct udphdr, check); |
| 885 | uh->check = ~udp_v4_check(len, saddr, daddr, 0); |
Tom Herbert | af5fcba | 2014-06-04 17:19:48 -0700 | [diff] [blame] | 886 | } |
| 887 | } |
| 888 | EXPORT_SYMBOL(udp_set_csum); |
| 889 | |
Willem de Bruijn | bec1f6f | 2018-04-26 13:42:17 -0400 | [diff] [blame] | 890 | static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, |
| 891 | struct inet_cork *cork) |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 892 | { |
| 893 | struct sock *sk = skb->sk; |
| 894 | struct inet_sock *inet = inet_sk(sk); |
| 895 | struct udphdr *uh; |
Menglong Dong | cffb8f6 | 2020-11-06 01:42:40 -0500 | [diff] [blame] | 896 | int err; |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 897 | int is_udplite = IS_UDPLITE(sk); |
| 898 | int offset = skb_transport_offset(skb); |
| 899 | int len = skb->len - offset; |
Josh Hunt | 4094871 | 2019-10-02 13:29:23 -0400 | [diff] [blame] | 900 | int datalen = len - sizeof(*uh); |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 901 | __wsum csum = 0; |
| 902 | |
| 903 | /* |
| 904 | * Create a UDP header |
| 905 | */ |
| 906 | uh = udp_hdr(skb); |
| 907 | uh->source = inet->inet_sport; |
David S. Miller | 79ab053 | 2011-05-09 13:31:04 -0700 | [diff] [blame] | 908 | uh->dest = fl4->fl4_dport; |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 909 | uh->len = htons(len); |
| 910 | uh->check = 0; |
| 911 | |
Willem de Bruijn | bec1f6f | 2018-04-26 13:42:17 -0400 | [diff] [blame] | 912 | if (cork->gso_size) { |
| 913 | const int hlen = skb_network_header_len(skb) + |
| 914 | sizeof(struct udphdr); |
| 915 | |
Willem de Bruijn | 0f149c9 | 2019-01-15 11:40:02 -0500 | [diff] [blame] | 916 | if (hlen + cork->gso_size > cork->fragsize) { |
| 917 | kfree_skb(skb); |
Willem de Bruijn | bec1f6f | 2018-04-26 13:42:17 -0400 | [diff] [blame] | 918 | return -EINVAL; |
Willem de Bruijn | 0f149c9 | 2019-01-15 11:40:02 -0500 | [diff] [blame] | 919 | } |
Jianguo Wu | 158390e | 2021-12-08 18:03:33 +0800 | [diff] [blame] | 920 | if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { |
Willem de Bruijn | 0f149c9 | 2019-01-15 11:40:02 -0500 | [diff] [blame] | 921 | kfree_skb(skb); |
Willem de Bruijn | bec1f6f | 2018-04-26 13:42:17 -0400 | [diff] [blame] | 922 | return -EINVAL; |
Willem de Bruijn | 0f149c9 | 2019-01-15 11:40:02 -0500 | [diff] [blame] | 923 | } |
| 924 | if (sk->sk_no_check_tx) { |
| 925 | kfree_skb(skb); |
Willem de Bruijn | a8c744a | 2018-04-30 15:58:36 -0400 | [diff] [blame] | 926 | return -EINVAL; |
Willem de Bruijn | 0f149c9 | 2019-01-15 11:40:02 -0500 | [diff] [blame] | 927 | } |
Willem de Bruijn | ff06342 | 2018-05-22 11:34:39 -0400 | [diff] [blame] | 928 | if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || |
Willem de Bruijn | 0f149c9 | 2019-01-15 11:40:02 -0500 | [diff] [blame] | 929 | dst_xfrm(skb_dst(skb))) { |
| 930 | kfree_skb(skb); |
Willem de Bruijn | bec1f6f | 2018-04-26 13:42:17 -0400 | [diff] [blame] | 931 | return -EIO; |
Willem de Bruijn | 0f149c9 | 2019-01-15 11:40:02 -0500 | [diff] [blame] | 932 | } |
Willem de Bruijn | bec1f6f | 2018-04-26 13:42:17 -0400 | [diff] [blame] | 933 | |
Josh Hunt | 4094871 | 2019-10-02 13:29:23 -0400 | [diff] [blame] | 934 | if (datalen > cork->gso_size) { |
| 935 | skb_shinfo(skb)->gso_size = cork->gso_size; |
| 936 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; |
| 937 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen, |
| 938 | cork->gso_size); |
| 939 | } |
Willem de Bruijn | a8c744a | 2018-04-30 15:58:36 -0400 | [diff] [blame] | 940 | goto csum_partial; |
Willem de Bruijn | bec1f6f | 2018-04-26 13:42:17 -0400 | [diff] [blame] | 941 | } |
| 942 | |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 943 | if (is_udplite) /* UDP-Lite */ |
| 944 | csum = udplite_csum(skb); |
| 945 | |
Willem de Bruijn | ab2fb7e | 2017-08-22 11:39:57 -0400 | [diff] [blame] | 946 | else if (sk->sk_no_check_tx) { /* UDP csum off */ |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 947 | |
| 948 | skb->ip_summed = CHECKSUM_NONE; |
| 949 | goto send; |
| 950 | |
| 951 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ |
Willem de Bruijn | a8c744a | 2018-04-30 15:58:36 -0400 | [diff] [blame] | 952 | csum_partial: |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 953 | |
David S. Miller | 79ab053 | 2011-05-09 13:31:04 -0700 | [diff] [blame] | 954 | udp4_hwcsum(skb, fl4->saddr, fl4->daddr); |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 955 | goto send; |
| 956 | |
| 957 | } else |
| 958 | csum = udp_csum(skb); |
| 959 | |
| 960 | /* add protocol-dependent pseudo-header */ |
David S. Miller | 79ab053 | 2011-05-09 13:31:04 -0700 | [diff] [blame] | 961 | uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len, |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 962 | sk->sk_protocol, csum); |
| 963 | if (uh->check == 0) |
| 964 | uh->check = CSUM_MANGLED_0; |
| 965 | |
| 966 | send: |
Eric Dumazet | b5ec8ee | 2012-08-10 02:22:47 +0000 | [diff] [blame] | 967 | err = ip_send_skb(sock_net(sk), skb); |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 968 | if (err) { |
| 969 | if (err == -ENOBUFS && !inet->recverr) { |
Eric Dumazet | 6aef70a | 2016-04-27 16:44:27 -0700 | [diff] [blame] | 970 | UDP_INC_STATS(sock_net(sk), |
| 971 | UDP_MIB_SNDBUFERRORS, is_udplite); |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 972 | err = 0; |
| 973 | } |
| 974 | } else |
Eric Dumazet | 6aef70a | 2016-04-27 16:44:27 -0700 | [diff] [blame] | 975 | UDP_INC_STATS(sock_net(sk), |
| 976 | UDP_MIB_OUTDATAGRAMS, is_udplite); |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 977 | return err; |
| 978 | } |
| 979 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 980 | /* |
| 981 | * Push out all pending data as one UDP datagram. Socket is locked. |
| 982 | */ |
Hannes Frederic Sowa | 8822b64 | 2013-07-01 20:21:30 +0200 | [diff] [blame] | 983 | int udp_push_pending_frames(struct sock *sk) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 984 | { |
| 985 | struct udp_sock *up = udp_sk(sk); |
| 986 | struct inet_sock *inet = inet_sk(sk); |
David S. Miller | b6f21b2 | 2011-03-12 02:09:18 -0500 | [diff] [blame] | 987 | struct flowi4 *fl4 = &inet->cork.fl.u.ip4; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 988 | struct sk_buff *skb; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 989 | int err = 0; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 990 | |
David S. Miller | 77968b7 | 2011-05-08 17:12:19 -0700 | [diff] [blame] | 991 | skb = ip_finish_skb(sk, fl4); |
Herbert Xu | f6b9664f | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 992 | if (!skb) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 993 | goto out; |
| 994 | |
Willem de Bruijn | bec1f6f | 2018-04-26 13:42:17 -0400 | [diff] [blame] | 995 | err = udp_send_skb(skb, fl4, &inet->cork.base); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 996 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 997 | out: |
| 998 | up->len = 0; |
| 999 | up->pending = 0; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1000 | return err; |
| 1001 | } |
Hannes Frederic Sowa | 8822b64 | 2013-07-01 20:21:30 +0200 | [diff] [blame] | 1002 | EXPORT_SYMBOL(udp_push_pending_frames); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1003 | |
Willem de Bruijn | 2e8de85 | 2018-04-26 13:42:20 -0400 | [diff] [blame] | 1004 | static int __udp_cmsg_send(struct cmsghdr *cmsg, u16 *gso_size) |
| 1005 | { |
| 1006 | switch (cmsg->cmsg_type) { |
| 1007 | case UDP_SEGMENT: |
| 1008 | if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u16))) |
| 1009 | return -EINVAL; |
| 1010 | *gso_size = *(__u16 *)CMSG_DATA(cmsg); |
| 1011 | return 0; |
| 1012 | default: |
| 1013 | return -EINVAL; |
| 1014 | } |
| 1015 | } |
| 1016 | |
| 1017 | int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size) |
| 1018 | { |
| 1019 | struct cmsghdr *cmsg; |
| 1020 | bool need_ip = false; |
| 1021 | int err; |
| 1022 | |
| 1023 | for_each_cmsghdr(cmsg, msg) { |
| 1024 | if (!CMSG_OK(msg, cmsg)) |
| 1025 | return -EINVAL; |
| 1026 | |
| 1027 | if (cmsg->cmsg_level != SOL_UDP) { |
| 1028 | need_ip = true; |
| 1029 | continue; |
| 1030 | } |
| 1031 | |
| 1032 | err = __udp_cmsg_send(cmsg, gso_size); |
| 1033 | if (err) |
| 1034 | return err; |
| 1035 | } |
| 1036 | |
| 1037 | return need_ip; |
| 1038 | } |
| 1039 | EXPORT_SYMBOL_GPL(udp_cmsg_send); |
| 1040 | |
Ying Xue | 1b78414 | 2015-03-02 15:37:48 +0800 | [diff] [blame] | 1041 | int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1042 | { |
| 1043 | struct inet_sock *inet = inet_sk(sk); |
| 1044 | struct udp_sock *up = udp_sk(sk); |
Andrey Ignatov | 1cedee1 | 2018-05-25 08:55:23 -0700 | [diff] [blame] | 1045 | DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); |
David S. Miller | e474995 | 2011-05-08 16:38:45 -0700 | [diff] [blame] | 1046 | struct flowi4 fl4_stack; |
David S. Miller | b6f21b2 | 2011-03-12 02:09:18 -0500 | [diff] [blame] | 1047 | struct flowi4 *fl4; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1048 | int ulen = len; |
| 1049 | struct ipcm_cookie ipc; |
| 1050 | struct rtable *rt = NULL; |
| 1051 | int free = 0; |
| 1052 | int connected = 0; |
| 1053 | __be32 daddr, faddr, saddr; |
| 1054 | __be16 dport; |
| 1055 | u8 tos; |
| 1056 | int err, is_udplite = IS_UDPLITE(sk); |
Eric Dumazet | a9f5970 | 2021-09-27 17:29:24 -0700 | [diff] [blame] | 1057 | int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1058 | int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); |
Herbert Xu | 903ab86 | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 1059 | struct sk_buff *skb; |
Eric Dumazet | f6d8bd0 | 2011-04-21 09:45:37 +0000 | [diff] [blame] | 1060 | struct ip_options_data opt_copy; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1061 | |
| 1062 | if (len > 0xFFFF) |
| 1063 | return -EMSGSIZE; |
| 1064 | |
| 1065 | /* |
| 1066 | * Check the flags. |
| 1067 | */ |
| 1068 | |
Eric Dumazet | c482c56 | 2009-07-17 00:26:32 +0000 | [diff] [blame] | 1069 | if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1070 | return -EOPNOTSUPP; |
| 1071 | |
Herbert Xu | 903ab86 | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 1072 | getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; |
| 1073 | |
David S. Miller | f5fca60 | 2011-05-08 17:24:10 -0700 | [diff] [blame] | 1074 | fl4 = &inet->cork.fl.u.ip4; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1075 | if (up->pending) { |
| 1076 | /* |
| 1077 | * There are pending frames. |
| 1078 | * The socket lock must be held while it's corked. |
| 1079 | */ |
| 1080 | lock_sock(sk); |
| 1081 | if (likely(up->pending)) { |
| 1082 | if (unlikely(up->pending != AF_INET)) { |
| 1083 | release_sock(sk); |
| 1084 | return -EINVAL; |
| 1085 | } |
| 1086 | goto do_append_data; |
| 1087 | } |
| 1088 | release_sock(sk); |
| 1089 | } |
| 1090 | ulen += sizeof(struct udphdr); |
| 1091 | |
| 1092 | /* |
| 1093 | * Get and verify the address. |
| 1094 | */ |
Andrey Ignatov | 1cedee1 | 2018-05-25 08:55:23 -0700 | [diff] [blame] | 1095 | if (usin) { |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1096 | if (msg->msg_namelen < sizeof(*usin)) |
| 1097 | return -EINVAL; |
| 1098 | if (usin->sin_family != AF_INET) { |
| 1099 | if (usin->sin_family != AF_UNSPEC) |
| 1100 | return -EAFNOSUPPORT; |
| 1101 | } |
| 1102 | |
| 1103 | daddr = usin->sin_addr.s_addr; |
| 1104 | dport = usin->sin_port; |
| 1105 | if (dport == 0) |
| 1106 | return -EINVAL; |
| 1107 | } else { |
| 1108 | if (sk->sk_state != TCP_ESTABLISHED) |
| 1109 | return -EDESTADDRREQ; |
Eric Dumazet | c720c7e8 | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 1110 | daddr = inet->inet_daddr; |
| 1111 | dport = inet->inet_dport; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1112 | /* Open fast path for connected socket. |
| 1113 | Route will not be used, if at least one option is set. |
| 1114 | */ |
| 1115 | connected = 1; |
| 1116 | } |
Soheil Hassas Yeganeh | c14ac94 | 2016-04-02 23:08:12 -0400 | [diff] [blame] | 1117 | |
Willem de Bruijn | 3517820 | 2018-07-06 10:12:54 -0400 | [diff] [blame] | 1118 | ipcm_init_sk(&ipc, inet); |
Eric Dumazet | 18a419b | 2021-06-30 09:42:44 -0700 | [diff] [blame] | 1119 | ipc.gso_size = READ_ONCE(up->gso_size); |
Daniel Borkmann | bf84a010 | 2013-04-14 08:08:13 +0000 | [diff] [blame] | 1120 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1121 | if (msg->msg_controllen) { |
Willem de Bruijn | 2e8de85 | 2018-04-26 13:42:20 -0400 | [diff] [blame] | 1122 | err = udp_cmsg_send(sk, msg, &ipc.gso_size); |
| 1123 | if (err > 0) |
| 1124 | err = ip_cmsg_send(sk, msg, &ipc, |
| 1125 | sk->sk_family == AF_INET6); |
| 1126 | if (unlikely(err < 0)) { |
Eric Dumazet | 9194830 | 2016-02-04 06:23:28 -0800 | [diff] [blame] | 1127 | kfree(ipc.opt); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1128 | return err; |
Eric Dumazet | 9194830 | 2016-02-04 06:23:28 -0800 | [diff] [blame] | 1129 | } |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1130 | if (ipc.opt) |
| 1131 | free = 1; |
| 1132 | connected = 0; |
| 1133 | } |
Eric Dumazet | f6d8bd0 | 2011-04-21 09:45:37 +0000 | [diff] [blame] | 1134 | if (!ipc.opt) { |
| 1135 | struct ip_options_rcu *inet_opt; |
| 1136 | |
| 1137 | rcu_read_lock(); |
| 1138 | inet_opt = rcu_dereference(inet->inet_opt); |
| 1139 | if (inet_opt) { |
| 1140 | memcpy(&opt_copy, inet_opt, |
| 1141 | sizeof(*inet_opt) + inet_opt->opt.optlen); |
| 1142 | ipc.opt = &opt_copy.opt; |
| 1143 | } |
| 1144 | rcu_read_unlock(); |
| 1145 | } |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1146 | |
Dave Marchevsky | 6fc88c3 | 2021-08-19 02:24:20 -0700 | [diff] [blame] | 1147 | if (cgroup_bpf_enabled(CGROUP_UDP4_SENDMSG) && !connected) { |
Andrey Ignatov | 1cedee1 | 2018-05-25 08:55:23 -0700 | [diff] [blame] | 1148 | err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, |
| 1149 | (struct sockaddr *)usin, &ipc.addr); |
| 1150 | if (err) |
| 1151 | goto out_free; |
| 1152 | if (usin) { |
| 1153 | if (usin->sin_port == 0) { |
| 1154 | /* BPF program set invalid port. Reject it. */ |
| 1155 | err = -EINVAL; |
| 1156 | goto out_free; |
| 1157 | } |
| 1158 | daddr = usin->sin_addr.s_addr; |
| 1159 | dport = usin->sin_port; |
| 1160 | } |
| 1161 | } |
| 1162 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1163 | saddr = ipc.addr; |
| 1164 | ipc.addr = faddr = daddr; |
| 1165 | |
Eric Dumazet | f6d8bd0 | 2011-04-21 09:45:37 +0000 | [diff] [blame] | 1166 | if (ipc.opt && ipc.opt->opt.srr) { |
Andrey Ignatov | 1b97013 | 2018-05-10 10:59:34 -0700 | [diff] [blame] | 1167 | if (!daddr) { |
| 1168 | err = -EINVAL; |
| 1169 | goto out_free; |
| 1170 | } |
Eric Dumazet | f6d8bd0 | 2011-04-21 09:45:37 +0000 | [diff] [blame] | 1171 | faddr = ipc.opt->opt.faddr; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1172 | connected = 0; |
| 1173 | } |
Francesco Fusco | aa66158 | 2013-09-24 15:43:09 +0200 | [diff] [blame] | 1174 | tos = get_rttos(&ipc, inet); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1175 | if (sock_flag(sk, SOCK_LOCALROUTE) || |
| 1176 | (msg->msg_flags & MSG_DONTROUTE) || |
Eric Dumazet | f6d8bd0 | 2011-04-21 09:45:37 +0000 | [diff] [blame] | 1177 | (ipc.opt && ipc.opt->opt.is_strictroute)) { |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1178 | tos |= RTO_ONLINK; |
| 1179 | connected = 0; |
| 1180 | } |
| 1181 | |
| 1182 | if (ipv4_is_multicast(daddr)) { |
Robert Shearman | 854da99 | 2018-10-01 09:40:23 +0100 | [diff] [blame] | 1183 | if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif)) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1184 | ipc.oif = inet->mc_index; |
| 1185 | if (!saddr) |
| 1186 | saddr = inet->mc_addr; |
| 1187 | connected = 0; |
David Ahern | 9515a2e | 2018-01-24 19:37:38 -0800 | [diff] [blame] | 1188 | } else if (!ipc.oif) { |
Erich E. Hoover | 76e2105 | 2012-02-08 09:11:07 +0000 | [diff] [blame] | 1189 | ipc.oif = inet->uc_index; |
David Ahern | 9515a2e | 2018-01-24 19:37:38 -0800 | [diff] [blame] | 1190 | } else if (ipv4_is_lbcast(daddr) && inet->uc_index) { |
| 1191 | /* oif is set, packet is to local broadcast and |
Randy Dunlap | 2bdcc73 | 2020-08-22 16:31:41 -0700 | [diff] [blame] | 1192 | * uc_index is set. oif is most likely set |
David Ahern | 9515a2e | 2018-01-24 19:37:38 -0800 | [diff] [blame] | 1193 | * by sk_bound_dev_if. If uc_index != oif check if the |
| 1194 | * oif is an L3 master and uc_index is an L3 slave. |
| 1195 | * If so, we want to allow the send using the uc_index. |
| 1196 | */ |
| 1197 | if (ipc.oif != inet->uc_index && |
| 1198 | ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk), |
| 1199 | inet->uc_index)) { |
| 1200 | ipc.oif = inet->uc_index; |
| 1201 | } |
| 1202 | } |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1203 | |
| 1204 | if (connected) |
Eric Dumazet | c482c56 | 2009-07-17 00:26:32 +0000 | [diff] [blame] | 1205 | rt = (struct rtable *)sk_dst_check(sk, 0); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1206 | |
Ian Morris | 51456b2 | 2015-04-03 09:17:26 +0100 | [diff] [blame] | 1207 | if (!rt) { |
Pavel Emelyanov | 84a3aa0 | 2008-07-16 20:19:08 -0700 | [diff] [blame] | 1208 | struct net *net = sock_net(sk); |
David Ahern | 9a24abf | 2015-08-13 14:59:03 -0600 | [diff] [blame] | 1209 | __u8 flow_flags = inet_sk_flowi_flags(sk); |
Pavel Emelyanov | 84a3aa0 | 2008-07-16 20:19:08 -0700 | [diff] [blame] | 1210 | |
David S. Miller | e474995 | 2011-05-08 16:38:45 -0700 | [diff] [blame] | 1211 | fl4 = &fl4_stack; |
David Ahern | 9a24abf | 2015-08-13 14:59:03 -0600 | [diff] [blame] | 1212 | |
Willem de Bruijn | c6af0c2 | 2019-09-11 15:50:51 -0400 | [diff] [blame] | 1213 | flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark, tos, |
David S. Miller | c0951cb | 2011-03-31 04:54:27 -0700 | [diff] [blame] | 1214 | RT_SCOPE_UNIVERSE, sk->sk_protocol, |
David Ahern | 9a24abf | 2015-08-13 14:59:03 -0600 | [diff] [blame] | 1215 | flow_flags, |
Lorenzo Colitti | e2d118a | 2016-11-04 02:23:43 +0900 | [diff] [blame] | 1216 | faddr, saddr, dport, inet->inet_sport, |
| 1217 | sk->sk_uid); |
David S. Miller | c0951cb | 2011-03-31 04:54:27 -0700 | [diff] [blame] | 1218 | |
Paul Moore | 3df98d7 | 2020-09-27 22:38:26 -0400 | [diff] [blame] | 1219 | security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); |
David S. Miller | e474995 | 2011-05-08 16:38:45 -0700 | [diff] [blame] | 1220 | rt = ip_route_output_flow(net, fl4, sk); |
David S. Miller | b23dd4f | 2011-03-02 14:31:35 -0800 | [diff] [blame] | 1221 | if (IS_ERR(rt)) { |
| 1222 | err = PTR_ERR(rt); |
David S. Miller | 06dc94b | 2011-03-03 10:38:01 -0800 | [diff] [blame] | 1223 | rt = NULL; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1224 | if (err == -ENETUNREACH) |
Eric Dumazet | f1d8cba | 2013-11-28 09:51:22 -0800 | [diff] [blame] | 1225 | IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1226 | goto out; |
| 1227 | } |
| 1228 | |
| 1229 | err = -EACCES; |
| 1230 | if ((rt->rt_flags & RTCF_BROADCAST) && |
| 1231 | !sock_flag(sk, SOCK_BROADCAST)) |
| 1232 | goto out; |
| 1233 | if (connected) |
Changli Gao | d8d1f30 | 2010-06-10 23:31:35 -0700 | [diff] [blame] | 1234 | sk_dst_set(sk, dst_clone(&rt->dst)); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1235 | } |
| 1236 | |
| 1237 | if (msg->msg_flags&MSG_CONFIRM) |
| 1238 | goto do_confirm; |
| 1239 | back_from_confirm: |
| 1240 | |
David S. Miller | e474995 | 2011-05-08 16:38:45 -0700 | [diff] [blame] | 1241 | saddr = fl4->saddr; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1242 | if (!ipc.addr) |
David S. Miller | e474995 | 2011-05-08 16:38:45 -0700 | [diff] [blame] | 1243 | daddr = ipc.addr = fl4->daddr; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1244 | |
Herbert Xu | 903ab86 | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 1245 | /* Lockless fast path for the non-corking case. */ |
| 1246 | if (!corkreq) { |
Willem de Bruijn | 1cd7884 | 2018-04-26 13:42:15 -0400 | [diff] [blame] | 1247 | struct inet_cork cork; |
| 1248 | |
Al Viro | f69e6d1 | 2014-11-24 13:23:40 -0500 | [diff] [blame] | 1249 | skb = ip_make_skb(sk, fl4, getfrag, msg, ulen, |
Herbert Xu | 903ab86 | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 1250 | sizeof(struct udphdr), &ipc, &rt, |
Willem de Bruijn | 1cd7884 | 2018-04-26 13:42:15 -0400 | [diff] [blame] | 1251 | &cork, msg->msg_flags); |
Herbert Xu | 903ab86 | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 1252 | err = PTR_ERR(skb); |
YOSHIFUJI Hideaki / 吉藤英明 | 50c3a48 | 2013-01-22 06:32:49 +0000 | [diff] [blame] | 1253 | if (!IS_ERR_OR_NULL(skb)) |
Willem de Bruijn | bec1f6f | 2018-04-26 13:42:17 -0400 | [diff] [blame] | 1254 | err = udp_send_skb(skb, fl4, &cork); |
Herbert Xu | 903ab86 | 2011-03-01 02:36:48 +0000 | [diff] [blame] | 1255 | goto out; |
| 1256 | } |
| 1257 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1258 | lock_sock(sk); |
| 1259 | if (unlikely(up->pending)) { |
| 1260 | /* The socket is already corked while preparing it. */ |
| 1261 | /* ... which is an evident application bug. --ANK */ |
| 1262 | release_sock(sk); |
| 1263 | |
Matteo Croce | 197df02 | 2017-10-19 14:22:17 +0200 | [diff] [blame] | 1264 | net_dbg_ratelimited("socket already corked\n"); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1265 | err = -EINVAL; |
| 1266 | goto out; |
| 1267 | } |
| 1268 | /* |
| 1269 | * Now cork the socket to pend data. |
| 1270 | */ |
David S. Miller | b6f21b2 | 2011-03-12 02:09:18 -0500 | [diff] [blame] | 1271 | fl4 = &inet->cork.fl.u.ip4; |
| 1272 | fl4->daddr = daddr; |
| 1273 | fl4->saddr = saddr; |
David S. Miller | 9cce96d | 2011-03-12 03:00:33 -0500 | [diff] [blame] | 1274 | fl4->fl4_dport = dport; |
| 1275 | fl4->fl4_sport = inet->inet_sport; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1276 | up->pending = AF_INET; |
| 1277 | |
| 1278 | do_append_data: |
| 1279 | up->len += ulen; |
Al Viro | f69e6d1 | 2014-11-24 13:23:40 -0500 | [diff] [blame] | 1280 | err = ip_append_data(sk, fl4, getfrag, msg, ulen, |
David S. Miller | f5fca60 | 2011-05-08 17:24:10 -0700 | [diff] [blame] | 1281 | sizeof(struct udphdr), &ipc, &rt, |
| 1282 | corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1283 | if (err) |
| 1284 | udp_flush_pending_frames(sk); |
| 1285 | else if (!corkreq) |
| 1286 | err = udp_push_pending_frames(sk); |
| 1287 | else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) |
| 1288 | up->pending = 0; |
| 1289 | release_sock(sk); |
| 1290 | |
| 1291 | out: |
| 1292 | ip_rt_put(rt); |
Andrey Ignatov | 1b97013 | 2018-05-10 10:59:34 -0700 | [diff] [blame] | 1293 | out_free: |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1294 | if (free) |
| 1295 | kfree(ipc.opt); |
| 1296 | if (!err) |
| 1297 | return len; |
| 1298 | /* |
| 1299 | * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting |
| 1300 | * ENOBUFS might not be good (it's not tunable per se), but otherwise |
| 1301 | * we don't have a good statistic (IpOutDiscards but it can be too many |
| 1302 | * things). We could add another new stat but at least for now that |
| 1303 | * seems like overkill. |
| 1304 | */ |
| 1305 | if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { |
Eric Dumazet | 6aef70a | 2016-04-27 16:44:27 -0700 | [diff] [blame] | 1306 | UDP_INC_STATS(sock_net(sk), |
| 1307 | UDP_MIB_SNDBUFERRORS, is_udplite); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1308 | } |
| 1309 | return err; |
| 1310 | |
| 1311 | do_confirm: |
Julian Anastasov | 0dec879 | 2017-02-06 23:14:16 +0200 | [diff] [blame] | 1312 | if (msg->msg_flags & MSG_PROBE) |
| 1313 | dst_confirm_neigh(&rt->dst, &fl4->daddr); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1314 | if (!(msg->msg_flags&MSG_PROBE) || len) |
| 1315 | goto back_from_confirm; |
| 1316 | err = 0; |
| 1317 | goto out; |
| 1318 | } |
Eric Dumazet | c482c56 | 2009-07-17 00:26:32 +0000 | [diff] [blame] | 1319 | EXPORT_SYMBOL(udp_sendmsg); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1320 | |
| 1321 | int udp_sendpage(struct sock *sk, struct page *page, int offset, |
| 1322 | size_t size, int flags) |
| 1323 | { |
David S. Miller | f5fca60 | 2011-05-08 17:24:10 -0700 | [diff] [blame] | 1324 | struct inet_sock *inet = inet_sk(sk); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1325 | struct udp_sock *up = udp_sk(sk); |
| 1326 | int ret; |
| 1327 | |
Shawn Landden | d3f7d56 | 2013-11-24 22:36:28 -0800 | [diff] [blame] | 1328 | if (flags & MSG_SENDPAGE_NOTLAST) |
| 1329 | flags |= MSG_MORE; |
| 1330 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1331 | if (!up->pending) { |
| 1332 | struct msghdr msg = { .msg_flags = flags|MSG_MORE }; |
| 1333 | |
| 1334 | /* Call udp_sendmsg to specify destination address which |
| 1335 | * sendpage interface can't pass. |
| 1336 | * This will succeed only when the socket is connected. |
| 1337 | */ |
Ying Xue | 1b78414 | 2015-03-02 15:37:48 +0800 | [diff] [blame] | 1338 | ret = udp_sendmsg(sk, &msg, 0); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1339 | if (ret < 0) |
| 1340 | return ret; |
| 1341 | } |
| 1342 | |
| 1343 | lock_sock(sk); |
| 1344 | |
| 1345 | if (unlikely(!up->pending)) { |
| 1346 | release_sock(sk); |
| 1347 | |
Matteo Croce | 197df02 | 2017-10-19 14:22:17 +0200 | [diff] [blame] | 1348 | net_dbg_ratelimited("cork failed\n"); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1349 | return -EINVAL; |
| 1350 | } |
| 1351 | |
David S. Miller | f5fca60 | 2011-05-08 17:24:10 -0700 | [diff] [blame] | 1352 | ret = ip_append_page(sk, &inet->cork.fl.u.ip4, |
| 1353 | page, offset, size, flags); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1354 | if (ret == -EOPNOTSUPP) { |
| 1355 | release_sock(sk); |
| 1356 | return sock_no_sendpage(sk->sk_socket, page, offset, |
| 1357 | size, flags); |
| 1358 | } |
| 1359 | if (ret < 0) { |
| 1360 | udp_flush_pending_frames(sk); |
| 1361 | goto out; |
| 1362 | } |
| 1363 | |
| 1364 | up->len += size; |
Eric Dumazet | a9f5970 | 2021-09-27 17:29:24 -0700 | [diff] [blame] | 1365 | if (!(READ_ONCE(up->corkflag) || (flags&MSG_MORE))) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1366 | ret = udp_push_pending_frames(sk); |
| 1367 | if (!ret) |
| 1368 | ret = size; |
| 1369 | out: |
| 1370 | release_sock(sk); |
| 1371 | return ret; |
| 1372 | } |
| 1373 | |
Paolo Abeni | dce4551 | 2017-07-25 17:57:47 +0200 | [diff] [blame] | 1374 | #define UDP_SKB_IS_STATELESS 0x80000000 |
| 1375 | |
Florian Westphal | 677bf08 | 2019-11-21 06:56:23 +0100 | [diff] [blame] | 1376 | /* all head states (dst, sk, nf conntrack) except skb extensions are |
| 1377 | * cleared by udp_rcv(). |
| 1378 | * |
| 1379 | * We need to preserve secpath, if present, to eventually process |
| 1380 | * IP_CMSG_PASSSEC at recvmsg() time. |
| 1381 | * |
| 1382 | * Other extensions can be cleared. |
| 1383 | */ |
| 1384 | static bool udp_try_make_stateless(struct sk_buff *skb) |
| 1385 | { |
| 1386 | if (!skb_has_extensions(skb)) |
| 1387 | return true; |
| 1388 | |
| 1389 | if (!secpath_exists(skb)) { |
| 1390 | skb_ext_reset(skb); |
| 1391 | return true; |
| 1392 | } |
| 1393 | |
| 1394 | return false; |
| 1395 | } |
| 1396 | |
Paolo Abeni | b65ac44 | 2017-06-12 11:23:43 +0200 | [diff] [blame] | 1397 | static void udp_set_dev_scratch(struct sk_buff *skb) |
| 1398 | { |
Paolo Abeni | dce4551 | 2017-07-25 17:57:47 +0200 | [diff] [blame] | 1399 | struct udp_dev_scratch *scratch = udp_skb_scratch(skb); |
Paolo Abeni | b65ac44 | 2017-06-12 11:23:43 +0200 | [diff] [blame] | 1400 | |
| 1401 | BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long)); |
Paolo Abeni | dce4551 | 2017-07-25 17:57:47 +0200 | [diff] [blame] | 1402 | scratch->_tsize_state = skb->truesize; |
| 1403 | #if BITS_PER_LONG == 64 |
Paolo Abeni | b65ac44 | 2017-06-12 11:23:43 +0200 | [diff] [blame] | 1404 | scratch->len = skb->len; |
| 1405 | scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); |
| 1406 | scratch->is_linear = !skb_is_nonlinear(skb); |
Paolo Abeni | b65ac44 | 2017-06-12 11:23:43 +0200 | [diff] [blame] | 1407 | #endif |
Florian Westphal | 677bf08 | 2019-11-21 06:56:23 +0100 | [diff] [blame] | 1408 | if (udp_try_make_stateless(skb)) |
Paolo Abeni | dce4551 | 2017-07-25 17:57:47 +0200 | [diff] [blame] | 1409 | scratch->_tsize_state |= UDP_SKB_IS_STATELESS; |
| 1410 | } |
| 1411 | |
Eric Dumazet | a793183 | 2019-10-24 11:43:31 -0700 | [diff] [blame] | 1412 | static void udp_skb_csum_unnecessary_set(struct sk_buff *skb) |
| 1413 | { |
| 1414 | /* We come here after udp_lib_checksum_complete() returned 0. |
| 1415 | * This means that __skb_checksum_complete() might have |
| 1416 | * set skb->csum_valid to 1. |
| 1417 | * On 64bit platforms, we can set csum_unnecessary |
| 1418 | * to true, but only if the skb is not shared. |
| 1419 | */ |
| 1420 | #if BITS_PER_LONG == 64 |
| 1421 | if (!skb_shared(skb)) |
| 1422 | udp_skb_scratch(skb)->csum_unnecessary = true; |
| 1423 | #endif |
| 1424 | } |
| 1425 | |
Paolo Abeni | dce4551 | 2017-07-25 17:57:47 +0200 | [diff] [blame] | 1426 | static int udp_skb_truesize(struct sk_buff *skb) |
| 1427 | { |
| 1428 | return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS; |
| 1429 | } |
| 1430 | |
| 1431 | static bool udp_skb_has_head_state(struct sk_buff *skb) |
| 1432 | { |
| 1433 | return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS); |
| 1434 | } |
Paolo Abeni | b65ac44 | 2017-06-12 11:23:43 +0200 | [diff] [blame] | 1435 | |
Paolo Abeni | 7c13f97 | 2016-11-04 11:28:59 +0100 | [diff] [blame] | 1436 | /* fully reclaim rmem/fwd memory allocated for skb */ |
Paolo Abeni | 6dfb436 | 2017-05-16 11:20:15 +0200 | [diff] [blame] | 1437 | static void udp_rmem_release(struct sock *sk, int size, int partial, |
| 1438 | bool rx_queue_lock_held) |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1439 | { |
Eric Dumazet | 6b229cf | 2016-12-08 11:41:56 -0800 | [diff] [blame] | 1440 | struct udp_sock *up = udp_sk(sk); |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1441 | struct sk_buff_head *sk_queue; |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1442 | int amt; |
| 1443 | |
Eric Dumazet | 6b229cf | 2016-12-08 11:41:56 -0800 | [diff] [blame] | 1444 | if (likely(partial)) { |
| 1445 | up->forward_deficit += size; |
| 1446 | size = up->forward_deficit; |
Paolo Abeni | d39ca25 | 2020-01-21 16:50:49 +0100 | [diff] [blame] | 1447 | if (size < (sk->sk_rcvbuf >> 2) && |
| 1448 | !skb_queue_empty(&up->reader_queue)) |
Eric Dumazet | 6b229cf | 2016-12-08 11:41:56 -0800 | [diff] [blame] | 1449 | return; |
| 1450 | } else { |
| 1451 | size += up->forward_deficit; |
| 1452 | } |
| 1453 | up->forward_deficit = 0; |
| 1454 | |
Paolo Abeni | 6dfb436 | 2017-05-16 11:20:15 +0200 | [diff] [blame] | 1455 | /* acquire the sk_receive_queue for fwd allocated memory scheduling, |
| 1456 | * if the called don't held it already |
| 1457 | */ |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1458 | sk_queue = &sk->sk_receive_queue; |
Paolo Abeni | 6dfb436 | 2017-05-16 11:20:15 +0200 | [diff] [blame] | 1459 | if (!rx_queue_lock_held) |
| 1460 | spin_lock(&sk_queue->lock); |
| 1461 | |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1462 | |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1463 | sk->sk_forward_alloc += size; |
| 1464 | amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1); |
| 1465 | sk->sk_forward_alloc -= amt; |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1466 | |
| 1467 | if (amt) |
| 1468 | __sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT); |
Eric Dumazet | 02ab0d1 | 2016-12-08 11:41:57 -0800 | [diff] [blame] | 1469 | |
| 1470 | atomic_sub(size, &sk->sk_rmem_alloc); |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1471 | |
| 1472 | /* this can save us from acquiring the rx queue lock on next receive */ |
| 1473 | skb_queue_splice_tail_init(sk_queue, &up->reader_queue); |
| 1474 | |
Paolo Abeni | 6dfb436 | 2017-05-16 11:20:15 +0200 | [diff] [blame] | 1475 | if (!rx_queue_lock_held) |
| 1476 | spin_unlock(&sk_queue->lock); |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1477 | } |
| 1478 | |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1479 | /* Note: called with reader_queue.lock held. |
Eric Dumazet | c84d949 | 2016-12-08 11:41:55 -0800 | [diff] [blame] | 1480 | * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch |
| 1481 | * This avoids a cache line miss while receive_queue lock is held. |
| 1482 | * Look at __udp_enqueue_schedule_skb() to find where this copy is done. |
| 1483 | */ |
Paolo Abeni | 7c13f97 | 2016-11-04 11:28:59 +0100 | [diff] [blame] | 1484 | void udp_skb_destructor(struct sock *sk, struct sk_buff *skb) |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1485 | { |
Paolo Abeni | b65ac44 | 2017-06-12 11:23:43 +0200 | [diff] [blame] | 1486 | prefetch(&skb->data); |
| 1487 | udp_rmem_release(sk, udp_skb_truesize(skb), 1, false); |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1488 | } |
Paolo Abeni | 7c13f97 | 2016-11-04 11:28:59 +0100 | [diff] [blame] | 1489 | EXPORT_SYMBOL(udp_skb_destructor); |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1490 | |
Paolo Abeni | 6dfb436 | 2017-05-16 11:20:15 +0200 | [diff] [blame] | 1491 | /* as above, but the caller held the rx queue lock, too */ |
Colin Ian King | 64f5102 | 2017-05-17 09:50:36 +0100 | [diff] [blame] | 1492 | static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb) |
Paolo Abeni | 6dfb436 | 2017-05-16 11:20:15 +0200 | [diff] [blame] | 1493 | { |
Paolo Abeni | b65ac44 | 2017-06-12 11:23:43 +0200 | [diff] [blame] | 1494 | prefetch(&skb->data); |
| 1495 | udp_rmem_release(sk, udp_skb_truesize(skb), 1, true); |
Paolo Abeni | 6dfb436 | 2017-05-16 11:20:15 +0200 | [diff] [blame] | 1496 | } |
| 1497 | |
Eric Dumazet | 4b27275 | 2016-12-08 11:41:54 -0800 | [diff] [blame] | 1498 | /* Idea of busylocks is to let producers grab an extra spinlock |
| 1499 | * to relieve pressure on the receive_queue spinlock shared by consumer. |
| 1500 | * Under flood, this means that only one producer can be in line |
| 1501 | * trying to acquire the receive_queue spinlock. |
| 1502 | * These busylock can be allocated on a per cpu manner, instead of a |
| 1503 | * per socket one (that would consume a cache line per socket) |
| 1504 | */ |
| 1505 | static int udp_busylocks_log __read_mostly; |
| 1506 | static spinlock_t *udp_busylocks __read_mostly; |
| 1507 | |
| 1508 | static spinlock_t *busylock_acquire(void *ptr) |
| 1509 | { |
| 1510 | spinlock_t *busy; |
| 1511 | |
| 1512 | busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log); |
| 1513 | spin_lock(busy); |
| 1514 | return busy; |
| 1515 | } |
| 1516 | |
| 1517 | static void busylock_release(spinlock_t *busy) |
| 1518 | { |
| 1519 | if (busy) |
| 1520 | spin_unlock(busy); |
| 1521 | } |
| 1522 | |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1523 | int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) |
| 1524 | { |
| 1525 | struct sk_buff_head *list = &sk->sk_receive_queue; |
| 1526 | int rmem, delta, amt, err = -ENOMEM; |
Eric Dumazet | 4b27275 | 2016-12-08 11:41:54 -0800 | [diff] [blame] | 1527 | spinlock_t *busy = NULL; |
Eric Dumazet | c8c8b12 | 2016-12-07 09:19:33 -0800 | [diff] [blame] | 1528 | int size; |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1529 | |
| 1530 | /* try to avoid the costly atomic add/sub pair when the receive |
| 1531 | * queue is full; always allow at least a packet |
| 1532 | */ |
| 1533 | rmem = atomic_read(&sk->sk_rmem_alloc); |
Paolo Abeni | 363dc73 | 2016-12-02 17:35:49 +0100 | [diff] [blame] | 1534 | if (rmem > sk->sk_rcvbuf) |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1535 | goto drop; |
| 1536 | |
Eric Dumazet | c8c8b12 | 2016-12-07 09:19:33 -0800 | [diff] [blame] | 1537 | /* Under mem pressure, it might be helpful to help udp_recvmsg() |
| 1538 | * having linear skbs : |
| 1539 | * - Reduce memory overhead and thus increase receive queue capacity |
| 1540 | * - Less cache line misses at copyout() time |
| 1541 | * - Less work at consume_skb() (less alien page frag freeing) |
| 1542 | */ |
Eric Dumazet | 4b27275 | 2016-12-08 11:41:54 -0800 | [diff] [blame] | 1543 | if (rmem > (sk->sk_rcvbuf >> 1)) { |
Eric Dumazet | c8c8b12 | 2016-12-07 09:19:33 -0800 | [diff] [blame] | 1544 | skb_condense(skb); |
Eric Dumazet | 4b27275 | 2016-12-08 11:41:54 -0800 | [diff] [blame] | 1545 | |
| 1546 | busy = busylock_acquire(sk); |
| 1547 | } |
Eric Dumazet | c8c8b12 | 2016-12-07 09:19:33 -0800 | [diff] [blame] | 1548 | size = skb->truesize; |
Paolo Abeni | b65ac44 | 2017-06-12 11:23:43 +0200 | [diff] [blame] | 1549 | udp_set_dev_scratch(skb); |
Eric Dumazet | c8c8b12 | 2016-12-07 09:19:33 -0800 | [diff] [blame] | 1550 | |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1551 | /* we drop only if the receive buf is full and the receive |
| 1552 | * queue contains some other skb |
| 1553 | */ |
| 1554 | rmem = atomic_add_return(size, &sk->sk_rmem_alloc); |
Antonio Messina | feed8a4 | 2019-12-19 15:08:03 +0100 | [diff] [blame] | 1555 | if (rmem > (size + (unsigned int)sk->sk_rcvbuf)) |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1556 | goto uncharge_drop; |
| 1557 | |
| 1558 | spin_lock(&list->lock); |
| 1559 | if (size >= sk->sk_forward_alloc) { |
| 1560 | amt = sk_mem_pages(size); |
| 1561 | delta = amt << SK_MEM_QUANTUM_SHIFT; |
| 1562 | if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) { |
| 1563 | err = -ENOBUFS; |
| 1564 | spin_unlock(&list->lock); |
| 1565 | goto uncharge_drop; |
| 1566 | } |
| 1567 | |
| 1568 | sk->sk_forward_alloc += delta; |
| 1569 | } |
| 1570 | |
| 1571 | sk->sk_forward_alloc -= size; |
| 1572 | |
Paolo Abeni | 7c13f97 | 2016-11-04 11:28:59 +0100 | [diff] [blame] | 1573 | /* no need to setup a destructor, we will explicitly release the |
| 1574 | * forward allocated memory on dequeue |
| 1575 | */ |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1576 | sock_skb_set_dropcount(sk, skb); |
| 1577 | |
| 1578 | __skb_queue_tail(list, skb); |
| 1579 | spin_unlock(&list->lock); |
| 1580 | |
| 1581 | if (!sock_flag(sk, SOCK_DEAD)) |
| 1582 | sk->sk_data_ready(sk); |
| 1583 | |
Eric Dumazet | 4b27275 | 2016-12-08 11:41:54 -0800 | [diff] [blame] | 1584 | busylock_release(busy); |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1585 | return 0; |
| 1586 | |
| 1587 | uncharge_drop: |
| 1588 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); |
| 1589 | |
| 1590 | drop: |
| 1591 | atomic_inc(&sk->sk_drops); |
Eric Dumazet | 4b27275 | 2016-12-08 11:41:54 -0800 | [diff] [blame] | 1592 | busylock_release(busy); |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1593 | return err; |
| 1594 | } |
| 1595 | EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb); |
| 1596 | |
Paolo Abeni | c915fe1 | 2016-11-15 16:37:53 +0100 | [diff] [blame] | 1597 | void udp_destruct_sock(struct sock *sk) |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1598 | { |
| 1599 | /* reclaim completely the forward allocated memory */ |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1600 | struct udp_sock *up = udp_sk(sk); |
Paolo Abeni | 7c13f97 | 2016-11-04 11:28:59 +0100 | [diff] [blame] | 1601 | unsigned int total = 0; |
| 1602 | struct sk_buff *skb; |
| 1603 | |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1604 | skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue); |
| 1605 | while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) { |
Paolo Abeni | 7c13f97 | 2016-11-04 11:28:59 +0100 | [diff] [blame] | 1606 | total += skb->truesize; |
| 1607 | kfree_skb(skb); |
| 1608 | } |
Paolo Abeni | 6dfb436 | 2017-05-16 11:20:15 +0200 | [diff] [blame] | 1609 | udp_rmem_release(sk, total, 0, true); |
Paolo Abeni | 7c13f97 | 2016-11-04 11:28:59 +0100 | [diff] [blame] | 1610 | |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1611 | inet_sock_destruct(sk); |
| 1612 | } |
Paolo Abeni | c915fe1 | 2016-11-15 16:37:53 +0100 | [diff] [blame] | 1613 | EXPORT_SYMBOL_GPL(udp_destruct_sock); |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1614 | |
| 1615 | int udp_init_sock(struct sock *sk) |
| 1616 | { |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1617 | skb_queue_head_init(&udp_sk(sk)->reader_queue); |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1618 | sk->sk_destruct = udp_destruct_sock; |
| 1619 | return 0; |
| 1620 | } |
| 1621 | EXPORT_SYMBOL_GPL(udp_init_sock); |
| 1622 | |
| 1623 | void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) |
| 1624 | { |
| 1625 | if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) { |
| 1626 | bool slow = lock_sock_fast(sk); |
| 1627 | |
| 1628 | sk_peek_offset_bwd(sk, len); |
| 1629 | unlock_sock_fast(sk, slow); |
| 1630 | } |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 1631 | |
Paolo Abeni | ca2c141 | 2017-09-06 14:44:36 +0200 | [diff] [blame] | 1632 | if (!skb_unref(skb)) |
| 1633 | return; |
| 1634 | |
Paolo Abeni | dce4551 | 2017-07-25 17:57:47 +0200 | [diff] [blame] | 1635 | /* In the more common cases we cleared the head states previously, |
| 1636 | * see __udp_queue_rcv_skb(). |
Paolo Abeni | 0ddf3fb | 2017-07-18 11:57:55 +0200 | [diff] [blame] | 1637 | */ |
Paolo Abeni | dce4551 | 2017-07-25 17:57:47 +0200 | [diff] [blame] | 1638 | if (unlikely(udp_skb_has_head_state(skb))) |
Paolo Abeni | 0ddf3fb | 2017-07-18 11:57:55 +0200 | [diff] [blame] | 1639 | skb_release_head_state(skb); |
Paolo Abeni | ca2c141 | 2017-09-06 14:44:36 +0200 | [diff] [blame] | 1640 | __consume_stateless_skb(skb); |
Paolo Abeni | f970bd9 | 2016-10-21 13:55:46 +0200 | [diff] [blame] | 1641 | } |
| 1642 | EXPORT_SYMBOL_GPL(skb_consume_udp); |
| 1643 | |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1644 | static struct sk_buff *__first_packet_length(struct sock *sk, |
| 1645 | struct sk_buff_head *rcvq, |
| 1646 | int *total) |
| 1647 | { |
| 1648 | struct sk_buff *skb; |
| 1649 | |
Paolo Abeni | 9bd780f | 2017-06-23 14:19:51 +0200 | [diff] [blame] | 1650 | while ((skb = skb_peek(rcvq)) != NULL) { |
| 1651 | if (udp_lib_checksum_complete(skb)) { |
| 1652 | __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, |
| 1653 | IS_UDPLITE(sk)); |
| 1654 | __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, |
| 1655 | IS_UDPLITE(sk)); |
| 1656 | atomic_inc(&sk->sk_drops); |
| 1657 | __skb_unlink(skb, rcvq); |
| 1658 | *total += skb->truesize; |
| 1659 | kfree_skb(skb); |
| 1660 | } else { |
Eric Dumazet | a793183 | 2019-10-24 11:43:31 -0700 | [diff] [blame] | 1661 | udp_skb_csum_unnecessary_set(skb); |
Paolo Abeni | 9bd780f | 2017-06-23 14:19:51 +0200 | [diff] [blame] | 1662 | break; |
| 1663 | } |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1664 | } |
| 1665 | return skb; |
| 1666 | } |
| 1667 | |
Eric Dumazet | 8558467 | 2009-10-09 04:43:40 +0000 | [diff] [blame] | 1668 | /** |
| 1669 | * first_packet_length - return length of first packet in receive queue |
| 1670 | * @sk: socket |
| 1671 | * |
| 1672 | * Drops all bad checksum frames, until a valid one is found. |
Eric Dumazet | e83c674 | 2016-08-23 13:59:33 -0700 | [diff] [blame] | 1673 | * Returns the length of found skb, or -1 if none is found. |
Eric Dumazet | 8558467 | 2009-10-09 04:43:40 +0000 | [diff] [blame] | 1674 | */ |
Eric Dumazet | e83c674 | 2016-08-23 13:59:33 -0700 | [diff] [blame] | 1675 | static int first_packet_length(struct sock *sk) |
Eric Dumazet | 8558467 | 2009-10-09 04:43:40 +0000 | [diff] [blame] | 1676 | { |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1677 | struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue; |
| 1678 | struct sk_buff_head *sk_queue = &sk->sk_receive_queue; |
Eric Dumazet | 8558467 | 2009-10-09 04:43:40 +0000 | [diff] [blame] | 1679 | struct sk_buff *skb; |
Paolo Abeni | 7c13f97 | 2016-11-04 11:28:59 +0100 | [diff] [blame] | 1680 | int total = 0; |
Eric Dumazet | e83c674 | 2016-08-23 13:59:33 -0700 | [diff] [blame] | 1681 | int res; |
Eric Dumazet | 8558467 | 2009-10-09 04:43:40 +0000 | [diff] [blame] | 1682 | |
Eric Dumazet | 8558467 | 2009-10-09 04:43:40 +0000 | [diff] [blame] | 1683 | spin_lock_bh(&rcvq->lock); |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1684 | skb = __first_packet_length(sk, rcvq, &total); |
Eric Dumazet | 137a0db | 2019-10-23 22:44:49 -0700 | [diff] [blame] | 1685 | if (!skb && !skb_queue_empty_lockless(sk_queue)) { |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1686 | spin_lock(&sk_queue->lock); |
| 1687 | skb_queue_splice_tail_init(sk_queue, rcvq); |
| 1688 | spin_unlock(&sk_queue->lock); |
| 1689 | |
| 1690 | skb = __first_packet_length(sk, rcvq, &total); |
Eric Dumazet | 8558467 | 2009-10-09 04:43:40 +0000 | [diff] [blame] | 1691 | } |
Eric Dumazet | e83c674 | 2016-08-23 13:59:33 -0700 | [diff] [blame] | 1692 | res = skb ? skb->len : -1; |
Paolo Abeni | 7c13f97 | 2016-11-04 11:28:59 +0100 | [diff] [blame] | 1693 | if (total) |
Paolo Abeni | 6dfb436 | 2017-05-16 11:20:15 +0200 | [diff] [blame] | 1694 | udp_rmem_release(sk, total, 1, false); |
Eric Dumazet | 8558467 | 2009-10-09 04:43:40 +0000 | [diff] [blame] | 1695 | spin_unlock_bh(&rcvq->lock); |
Eric Dumazet | 8558467 | 2009-10-09 04:43:40 +0000 | [diff] [blame] | 1696 | return res; |
| 1697 | } |
| 1698 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1699 | /* |
| 1700 | * IOCTL requests applicable to the UDP protocol |
| 1701 | */ |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 1702 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1703 | int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) |
| 1704 | { |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 1705 | switch (cmd) { |
| 1706 | case SIOCOUTQ: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1707 | { |
Eric Dumazet | 31e6d36 | 2009-06-17 19:05:41 -0700 | [diff] [blame] | 1708 | int amount = sk_wmem_alloc_get(sk); |
| 1709 | |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 1710 | return put_user(amount, (int __user *)arg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1711 | } |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 1712 | |
| 1713 | case SIOCINQ: |
| 1714 | { |
Eric Dumazet | e83c674 | 2016-08-23 13:59:33 -0700 | [diff] [blame] | 1715 | int amount = max_t(int, 0, first_packet_length(sk)); |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 1716 | |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 1717 | return put_user(amount, (int __user *)arg); |
| 1718 | } |
| 1719 | |
| 1720 | default: |
| 1721 | return -ENOIOCTLCMD; |
| 1722 | } |
| 1723 | |
| 1724 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1725 | } |
Eric Dumazet | c482c56 | 2009-07-17 00:26:32 +0000 | [diff] [blame] | 1726 | EXPORT_SYMBOL(udp_ioctl); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1727 | |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1728 | struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, |
Paolo Abeni | fd69c39 | 2019-04-08 10:15:59 +0200 | [diff] [blame] | 1729 | int noblock, int *off, int *err) |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1730 | { |
| 1731 | struct sk_buff_head *sk_queue = &sk->sk_receive_queue; |
| 1732 | struct sk_buff_head *queue; |
| 1733 | struct sk_buff *last; |
| 1734 | long timeo; |
| 1735 | int error; |
| 1736 | |
| 1737 | queue = &udp_sk(sk)->reader_queue; |
| 1738 | flags |= noblock ? MSG_DONTWAIT : 0; |
| 1739 | timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
| 1740 | do { |
| 1741 | struct sk_buff *skb; |
| 1742 | |
| 1743 | error = sock_error(sk); |
| 1744 | if (error) |
| 1745 | break; |
| 1746 | |
| 1747 | error = -EAGAIN; |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1748 | do { |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1749 | spin_lock_bh(&queue->lock); |
Paolo Abeni | e427cad | 2020-02-28 14:45:22 +0100 | [diff] [blame] | 1750 | skb = __skb_try_recv_from_queue(sk, queue, flags, off, |
| 1751 | err, &last); |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1752 | if (skb) { |
Paolo Abeni | e427cad | 2020-02-28 14:45:22 +0100 | [diff] [blame] | 1753 | if (!(flags & MSG_PEEK)) |
| 1754 | udp_skb_destructor(sk, skb); |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1755 | spin_unlock_bh(&queue->lock); |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1756 | return skb; |
| 1757 | } |
| 1758 | |
Eric Dumazet | 137a0db | 2019-10-23 22:44:49 -0700 | [diff] [blame] | 1759 | if (skb_queue_empty_lockless(sk_queue)) { |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1760 | spin_unlock_bh(&queue->lock); |
| 1761 | goto busy_check; |
| 1762 | } |
| 1763 | |
Paolo Abeni | 6dfb436 | 2017-05-16 11:20:15 +0200 | [diff] [blame] | 1764 | /* refill the reader queue and walk it again |
| 1765 | * keep both queues locked to avoid re-acquiring |
| 1766 | * the sk_receive_queue lock if fwd memory scheduling |
| 1767 | * is needed. |
| 1768 | */ |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1769 | spin_lock(&sk_queue->lock); |
| 1770 | skb_queue_splice_tail_init(sk_queue, queue); |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1771 | |
Paolo Abeni | e427cad | 2020-02-28 14:45:22 +0100 | [diff] [blame] | 1772 | skb = __skb_try_recv_from_queue(sk, queue, flags, off, |
| 1773 | err, &last); |
| 1774 | if (skb && !(flags & MSG_PEEK)) |
| 1775 | udp_skb_dtor_locked(sk, skb); |
Paolo Abeni | 6dfb436 | 2017-05-16 11:20:15 +0200 | [diff] [blame] | 1776 | spin_unlock(&sk_queue->lock); |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1777 | spin_unlock_bh(&queue->lock); |
Andrey Vagin | de321ed | 2017-05-17 11:39:05 -0700 | [diff] [blame] | 1778 | if (skb) |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1779 | return skb; |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1780 | |
| 1781 | busy_check: |
| 1782 | if (!sk_can_busy_loop(sk)) |
| 1783 | break; |
| 1784 | |
| 1785 | sk_busy_loop(sk, flags & MSG_DONTWAIT); |
Eric Dumazet | 137a0db | 2019-10-23 22:44:49 -0700 | [diff] [blame] | 1786 | } while (!skb_queue_empty_lockless(sk_queue)); |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1787 | |
| 1788 | /* sk_queue is empty, reader_queue may contain peeked packets */ |
| 1789 | } while (timeo && |
Sabrina Dubroca | b50b058 | 2019-11-25 14:48:57 +0100 | [diff] [blame] | 1790 | !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue, |
| 1791 | &error, &timeo, |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1792 | (struct sk_buff *)sk_queue)); |
| 1793 | |
| 1794 | *err = error; |
| 1795 | return NULL; |
| 1796 | } |
Jiri Kosina | 7e82364 | 2018-10-04 13:37:32 +0200 | [diff] [blame] | 1797 | EXPORT_SYMBOL(__skb_recv_udp); |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1798 | |
Cong Wang | d7f5711 | 2021-03-30 19:32:32 -0700 | [diff] [blame] | 1799 | int udp_read_sock(struct sock *sk, read_descriptor_t *desc, |
| 1800 | sk_read_actor_t recv_actor) |
| 1801 | { |
| 1802 | int copied = 0; |
| 1803 | |
| 1804 | while (1) { |
| 1805 | struct sk_buff *skb; |
| 1806 | int err, used; |
| 1807 | |
| 1808 | skb = skb_recv_udp(sk, 0, 1, &err); |
| 1809 | if (!skb) |
| 1810 | return err; |
Cong Wang | 099f896 | 2021-11-14 20:40:06 -0800 | [diff] [blame] | 1811 | |
| 1812 | if (udp_lib_checksum_complete(skb)) { |
| 1813 | __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, |
| 1814 | IS_UDPLITE(sk)); |
| 1815 | __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, |
| 1816 | IS_UDPLITE(sk)); |
| 1817 | atomic_inc(&sk->sk_drops); |
| 1818 | kfree_skb(skb); |
| 1819 | continue; |
| 1820 | } |
| 1821 | |
Cong Wang | d7f5711 | 2021-03-30 19:32:32 -0700 | [diff] [blame] | 1822 | used = recv_actor(desc, skb, 0, skb->len); |
| 1823 | if (used <= 0) { |
| 1824 | if (!copied) |
| 1825 | copied = used; |
Cong Wang | e00a5c3 | 2021-06-14 19:13:37 -0700 | [diff] [blame] | 1826 | kfree_skb(skb); |
Cong Wang | d7f5711 | 2021-03-30 19:32:32 -0700 | [diff] [blame] | 1827 | break; |
| 1828 | } else if (used <= skb->len) { |
| 1829 | copied += used; |
| 1830 | } |
| 1831 | |
Cong Wang | e00a5c3 | 2021-06-14 19:13:37 -0700 | [diff] [blame] | 1832 | kfree_skb(skb); |
Cong Wang | d7f5711 | 2021-03-30 19:32:32 -0700 | [diff] [blame] | 1833 | if (!desc->count) |
| 1834 | break; |
| 1835 | } |
| 1836 | |
| 1837 | return copied; |
| 1838 | } |
| 1839 | EXPORT_SYMBOL(udp_read_sock); |
| 1840 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1841 | /* |
| 1842 | * This should be easy, if there is something there we |
| 1843 | * return it, otherwise we block. |
| 1844 | */ |
| 1845 | |
Ying Xue | 1b78414 | 2015-03-02 15:37:48 +0800 | [diff] [blame] | 1846 | int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, |
| 1847 | int flags, int *addr_len) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1848 | { |
| 1849 | struct inet_sock *inet = inet_sk(sk); |
Steffen Hurrle | 342dfc3 | 2014-01-17 22:53:15 +0100 | [diff] [blame] | 1850 | DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1851 | struct sk_buff *skb; |
David S. Miller | 59c2cda | 2011-12-01 14:12:55 -0500 | [diff] [blame] | 1852 | unsigned int ulen, copied; |
Paolo Abeni | fd69c39 | 2019-04-08 10:15:59 +0200 | [diff] [blame] | 1853 | int off, err, peeking = flags & MSG_PEEK; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1854 | int is_udplite = IS_UDPLITE(sk); |
Eric Dumazet | 197c949 | 2015-12-30 08:51:12 -0500 | [diff] [blame] | 1855 | bool checksum_valid = false; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1856 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1857 | if (flags & MSG_ERRQUEUE) |
Hannes Frederic Sowa | 85fbaa7 | 2013-11-23 00:46:12 +0100 | [diff] [blame] | 1858 | return ip_recv_error(sk, msg, len, addr_len); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1859 | |
| 1860 | try_again: |
Matthew Dawson | a0917e0 | 2017-08-18 15:04:54 -0400 | [diff] [blame] | 1861 | off = sk_peek_offset(sk, flags); |
Paolo Abeni | fd69c39 | 2019-04-08 10:15:59 +0200 | [diff] [blame] | 1862 | skb = __skb_recv_udp(sk, flags, noblock, &off, &err); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1863 | if (!skb) |
samanthakumar | 627d2d6 | 2016-04-05 12:41:16 -0400 | [diff] [blame] | 1864 | return err; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1865 | |
Paolo Abeni | b65ac44 | 2017-06-12 11:23:43 +0200 | [diff] [blame] | 1866 | ulen = udp_skb_len(skb); |
David S. Miller | 59c2cda | 2011-12-01 14:12:55 -0500 | [diff] [blame] | 1867 | copied = len; |
samanthakumar | 627d2d6 | 2016-04-05 12:41:16 -0400 | [diff] [blame] | 1868 | if (copied > ulen - off) |
| 1869 | copied = ulen - off; |
David S. Miller | 59c2cda | 2011-12-01 14:12:55 -0500 | [diff] [blame] | 1870 | else if (copied < ulen) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1871 | msg->msg_flags |= MSG_TRUNC; |
| 1872 | |
| 1873 | /* |
| 1874 | * If checksum is needed at all, try to do it while copying the |
| 1875 | * data. If the data is truncated, or if we only want a partial |
| 1876 | * coverage checksum (UDP-Lite), do it before the copy. |
| 1877 | */ |
| 1878 | |
Eric Dumazet | d21dbdf | 2016-11-18 17:18:03 -0800 | [diff] [blame] | 1879 | if (copied < ulen || peeking || |
| 1880 | (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { |
Paolo Abeni | b65ac44 | 2017-06-12 11:23:43 +0200 | [diff] [blame] | 1881 | checksum_valid = udp_skb_csum_unnecessary(skb) || |
| 1882 | !__udp_lib_checksum_complete(skb); |
Eric Dumazet | 197c949 | 2015-12-30 08:51:12 -0500 | [diff] [blame] | 1883 | if (!checksum_valid) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1884 | goto csum_copy_err; |
| 1885 | } |
| 1886 | |
Paolo Abeni | b65ac44 | 2017-06-12 11:23:43 +0200 | [diff] [blame] | 1887 | if (checksum_valid || udp_skb_csum_unnecessary(skb)) { |
| 1888 | if (udp_skb_is_linear(skb)) |
| 1889 | err = copy_linear_skb(skb, copied, off, &msg->msg_iter); |
| 1890 | else |
| 1891 | err = skb_copy_datagram_msg(skb, off, msg, copied); |
| 1892 | } else { |
samanthakumar | 627d2d6 | 2016-04-05 12:41:16 -0400 | [diff] [blame] | 1893 | err = skb_copy_and_csum_datagram_msg(skb, off, msg); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1894 | |
| 1895 | if (err == -EINVAL) |
| 1896 | goto csum_copy_err; |
| 1897 | } |
| 1898 | |
Eric Dumazet | 22911fc | 2012-06-27 00:23:44 +0000 | [diff] [blame] | 1899 | if (unlikely(err)) { |
Paolo Abeni | fd69c39 | 2019-04-08 10:15:59 +0200 | [diff] [blame] | 1900 | if (!peeking) { |
Eric Dumazet | 979402b | 2012-09-05 23:34:44 +0000 | [diff] [blame] | 1901 | atomic_inc(&sk->sk_drops); |
Eric Dumazet | 6aef70a | 2016-04-27 16:44:27 -0700 | [diff] [blame] | 1902 | UDP_INC_STATS(sock_net(sk), |
| 1903 | UDP_MIB_INERRORS, is_udplite); |
Eric Dumazet | 979402b | 2012-09-05 23:34:44 +0000 | [diff] [blame] | 1904 | } |
Paolo Abeni | 850cbad | 2016-10-21 13:55:47 +0200 | [diff] [blame] | 1905 | kfree_skb(skb); |
samanthakumar | 627d2d6 | 2016-04-05 12:41:16 -0400 | [diff] [blame] | 1906 | return err; |
Eric Dumazet | 22911fc | 2012-06-27 00:23:44 +0000 | [diff] [blame] | 1907 | } |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1908 | |
Paolo Abeni | fd69c39 | 2019-04-08 10:15:59 +0200 | [diff] [blame] | 1909 | if (!peeking) |
Eric Dumazet | 6aef70a | 2016-04-27 16:44:27 -0700 | [diff] [blame] | 1910 | UDP_INC_STATS(sock_net(sk), |
| 1911 | UDP_MIB_INDATAGRAMS, is_udplite); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1912 | |
Neil Horman | 3b88578 | 2009-10-12 13:26:31 -0700 | [diff] [blame] | 1913 | sock_recv_ts_and_drops(msg, sk, skb); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1914 | |
| 1915 | /* Copy the address. */ |
Eric Dumazet | c482c56 | 2009-07-17 00:26:32 +0000 | [diff] [blame] | 1916 | if (sin) { |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1917 | sin->sin_family = AF_INET; |
| 1918 | sin->sin_port = udp_hdr(skb)->source; |
| 1919 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; |
| 1920 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); |
Hannes Frederic Sowa | bceaa90 | 2013-11-18 04:20:45 +0100 | [diff] [blame] | 1921 | *addr_len = sizeof(*sin); |
Daniel Borkmann | 983695f | 2019-06-07 01:48:57 +0200 | [diff] [blame] | 1922 | |
Stanislav Fomichev | a9ed15da | 2021-01-15 08:35:01 -0800 | [diff] [blame] | 1923 | BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, |
| 1924 | (struct sockaddr *)sin); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1925 | } |
Paolo Abeni | bcd1665 | 2018-11-07 12:38:30 +0100 | [diff] [blame] | 1926 | |
| 1927 | if (udp_sk(sk)->gro_enabled) |
| 1928 | udp_cmsg_recv(msg, sk, skb); |
| 1929 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1930 | if (inet->cmsg_flags) |
Paolo Abeni | ad95903 | 2016-11-04 11:28:58 +0100 | [diff] [blame] | 1931 | ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1932 | |
David S. Miller | 59c2cda | 2011-12-01 14:12:55 -0500 | [diff] [blame] | 1933 | err = copied; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1934 | if (flags & MSG_TRUNC) |
| 1935 | err = ulen; |
| 1936 | |
Paolo Abeni | 850cbad | 2016-10-21 13:55:47 +0200 | [diff] [blame] | 1937 | skb_consume_udp(sk, skb, peeking ? -err : err); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1938 | return err; |
| 1939 | |
| 1940 | csum_copy_err: |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 1941 | if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, |
| 1942 | udp_skb_destructor)) { |
Eric Dumazet | 6aef70a | 2016-04-27 16:44:27 -0700 | [diff] [blame] | 1943 | UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); |
| 1944 | UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
Eric Dumazet | 6a5dc9e | 2013-04-29 08:39:56 +0000 | [diff] [blame] | 1945 | } |
Paolo Abeni | 850cbad | 2016-10-21 13:55:47 +0200 | [diff] [blame] | 1946 | kfree_skb(skb); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1947 | |
Eric Dumazet | beb39db | 2015-05-30 09:16:53 -0700 | [diff] [blame] | 1948 | /* starting over for a new packet, but check if we need to yield */ |
| 1949 | cond_resched(); |
Xufeng Zhang | 9cfaa8d | 2011-06-21 10:43:40 +0000 | [diff] [blame] | 1950 | msg->msg_flags &= ~MSG_TRUNC; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1951 | goto try_again; |
| 1952 | } |
| 1953 | |
Andrey Ignatov | d74bad4 | 2018-03-30 15:08:05 -0700 | [diff] [blame] | 1954 | int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
| 1955 | { |
| 1956 | /* This check is replicated from __ip4_datagram_connect() and |
| 1957 | * intended to prevent BPF program called below from accessing bytes |
| 1958 | * that are out of the bound specified by user in addr_len. |
| 1959 | */ |
| 1960 | if (addr_len < sizeof(struct sockaddr_in)) |
| 1961 | return -EINVAL; |
| 1962 | |
| 1963 | return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr); |
| 1964 | } |
| 1965 | EXPORT_SYMBOL(udp_pre_connect); |
| 1966 | |
Eric Dumazet | 286c72d | 2016-10-20 09:39:40 -0700 | [diff] [blame] | 1967 | int __udp_disconnect(struct sock *sk, int flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1968 | { |
| 1969 | struct inet_sock *inet = inet_sk(sk); |
| 1970 | /* |
| 1971 | * 1003.1g - break association. |
| 1972 | */ |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 1973 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1974 | sk->sk_state = TCP_CLOSE; |
Eric Dumazet | c720c7e8 | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 1975 | inet->inet_daddr = 0; |
| 1976 | inet->inet_dport = 0; |
Tom Herbert | bdeab99 | 2011-08-14 19:45:55 +0000 | [diff] [blame] | 1977 | sock_rps_reset_rxhash(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1978 | sk->sk_bound_dev_if = 0; |
Willem de Bruijn | 303d040 | 2020-02-19 14:16:32 -0500 | [diff] [blame] | 1979 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1980 | inet_reset_saddr(sk); |
Willem de Bruijn | 303d040 | 2020-02-19 14:16:32 -0500 | [diff] [blame] | 1981 | if (sk->sk_prot->rehash && |
| 1982 | (sk->sk_userlocks & SOCK_BINDPORT_LOCK)) |
| 1983 | sk->sk_prot->rehash(sk); |
| 1984 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1985 | |
| 1986 | if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { |
| 1987 | sk->sk_prot->unhash(sk); |
Eric Dumazet | c720c7e8 | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 1988 | inet->inet_sport = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1989 | } |
| 1990 | sk_dst_reset(sk); |
| 1991 | return 0; |
| 1992 | } |
Eric Dumazet | 286c72d | 2016-10-20 09:39:40 -0700 | [diff] [blame] | 1993 | EXPORT_SYMBOL(__udp_disconnect); |
| 1994 | |
| 1995 | int udp_disconnect(struct sock *sk, int flags) |
| 1996 | { |
| 1997 | lock_sock(sk); |
| 1998 | __udp_disconnect(sk, flags); |
| 1999 | release_sock(sk); |
| 2000 | return 0; |
| 2001 | } |
Eric Dumazet | c482c56 | 2009-07-17 00:26:32 +0000 | [diff] [blame] | 2002 | EXPORT_SYMBOL(udp_disconnect); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2003 | |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 2004 | void udp_lib_unhash(struct sock *sk) |
| 2005 | { |
Eric Dumazet | 723b461 | 2008-11-25 13:55:15 -0800 | [diff] [blame] | 2006 | if (sk_hashed(sk)) { |
| 2007 | struct udp_table *udptable = sk->sk_prot->h.udp_table; |
Eric Dumazet | 512615b | 2009-11-08 10:17:58 +0000 | [diff] [blame] | 2008 | struct udp_hslot *hslot, *hslot2; |
| 2009 | |
| 2010 | hslot = udp_hashslot(udptable, sock_net(sk), |
| 2011 | udp_sk(sk)->udp_port_hash); |
| 2012 | hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 2013 | |
Eric Dumazet | 723b461 | 2008-11-25 13:55:15 -0800 | [diff] [blame] | 2014 | spin_lock_bh(&hslot->lock); |
Craig Gallek | e32ea7e | 2016-01-04 17:41:46 -0500 | [diff] [blame] | 2015 | if (rcu_access_pointer(sk->sk_reuseport_cb)) |
| 2016 | reuseport_detach_sock(sk); |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2017 | if (sk_del_node_init_rcu(sk)) { |
Eric Dumazet | fdcc8aa9 | 2009-11-08 10:17:05 +0000 | [diff] [blame] | 2018 | hslot->count--; |
Eric Dumazet | c720c7e8 | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 2019 | inet_sk(sk)->inet_num = 0; |
Eric Dumazet | 723b461 | 2008-11-25 13:55:15 -0800 | [diff] [blame] | 2020 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); |
Eric Dumazet | 512615b | 2009-11-08 10:17:58 +0000 | [diff] [blame] | 2021 | |
| 2022 | spin_lock(&hslot2->lock); |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2023 | hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); |
Eric Dumazet | 512615b | 2009-11-08 10:17:58 +0000 | [diff] [blame] | 2024 | hslot2->count--; |
| 2025 | spin_unlock(&hslot2->lock); |
Eric Dumazet | 723b461 | 2008-11-25 13:55:15 -0800 | [diff] [blame] | 2026 | } |
| 2027 | spin_unlock_bh(&hslot->lock); |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 2028 | } |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 2029 | } |
| 2030 | EXPORT_SYMBOL(udp_lib_unhash); |
| 2031 | |
Eric Dumazet | 719f835 | 2010-09-08 05:08:44 +0000 | [diff] [blame] | 2032 | /* |
| 2033 | * inet_rcv_saddr was changed, we must rehash secondary hash |
| 2034 | */ |
| 2035 | void udp_lib_rehash(struct sock *sk, u16 newhash) |
| 2036 | { |
| 2037 | if (sk_hashed(sk)) { |
| 2038 | struct udp_table *udptable = sk->sk_prot->h.udp_table; |
| 2039 | struct udp_hslot *hslot, *hslot2, *nhslot2; |
| 2040 | |
| 2041 | hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); |
| 2042 | nhslot2 = udp_hashslot2(udptable, newhash); |
| 2043 | udp_sk(sk)->udp_portaddr_hash = newhash; |
Craig Gallek | e32ea7e | 2016-01-04 17:41:46 -0500 | [diff] [blame] | 2044 | |
| 2045 | if (hslot2 != nhslot2 || |
| 2046 | rcu_access_pointer(sk->sk_reuseport_cb)) { |
Eric Dumazet | 719f835 | 2010-09-08 05:08:44 +0000 | [diff] [blame] | 2047 | hslot = udp_hashslot(udptable, sock_net(sk), |
| 2048 | udp_sk(sk)->udp_port_hash); |
| 2049 | /* we must lock primary chain too */ |
| 2050 | spin_lock_bh(&hslot->lock); |
Craig Gallek | e32ea7e | 2016-01-04 17:41:46 -0500 | [diff] [blame] | 2051 | if (rcu_access_pointer(sk->sk_reuseport_cb)) |
| 2052 | reuseport_detach_sock(sk); |
Eric Dumazet | 719f835 | 2010-09-08 05:08:44 +0000 | [diff] [blame] | 2053 | |
Craig Gallek | e32ea7e | 2016-01-04 17:41:46 -0500 | [diff] [blame] | 2054 | if (hslot2 != nhslot2) { |
| 2055 | spin_lock(&hslot2->lock); |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2056 | hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); |
Craig Gallek | e32ea7e | 2016-01-04 17:41:46 -0500 | [diff] [blame] | 2057 | hslot2->count--; |
| 2058 | spin_unlock(&hslot2->lock); |
Eric Dumazet | 719f835 | 2010-09-08 05:08:44 +0000 | [diff] [blame] | 2059 | |
Craig Gallek | e32ea7e | 2016-01-04 17:41:46 -0500 | [diff] [blame] | 2060 | spin_lock(&nhslot2->lock); |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2061 | hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, |
Craig Gallek | e32ea7e | 2016-01-04 17:41:46 -0500 | [diff] [blame] | 2062 | &nhslot2->head); |
| 2063 | nhslot2->count++; |
| 2064 | spin_unlock(&nhslot2->lock); |
| 2065 | } |
Eric Dumazet | 719f835 | 2010-09-08 05:08:44 +0000 | [diff] [blame] | 2066 | |
| 2067 | spin_unlock_bh(&hslot->lock); |
| 2068 | } |
| 2069 | } |
| 2070 | } |
| 2071 | EXPORT_SYMBOL(udp_lib_rehash); |
| 2072 | |
Alexey Kodanev | 8f6b539 | 2019-01-16 19:17:44 +0300 | [diff] [blame] | 2073 | void udp_v4_rehash(struct sock *sk) |
Eric Dumazet | 719f835 | 2010-09-08 05:08:44 +0000 | [diff] [blame] | 2074 | { |
Martin KaFai Lau | f0b1e64 | 2017-12-01 12:52:30 -0800 | [diff] [blame] | 2075 | u16 new_hash = ipv4_portaddr_hash(sock_net(sk), |
Eric Dumazet | 719f835 | 2010-09-08 05:08:44 +0000 | [diff] [blame] | 2076 | inet_sk(sk)->inet_rcv_saddr, |
| 2077 | inet_sk(sk)->inet_num); |
| 2078 | udp_lib_rehash(sk, new_hash); |
| 2079 | } |
| 2080 | |
Paolo Abeni | a3f96c4 | 2017-05-17 14:52:16 +0200 | [diff] [blame] | 2081 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
Herbert Xu | 9382177 | 2008-09-15 11:48:46 -0700 | [diff] [blame] | 2082 | { |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 2083 | int rc; |
Herbert Xu | 9382177 | 2008-09-15 11:48:46 -0700 | [diff] [blame] | 2084 | |
Shawn Bohrer | 005ec97 | 2013-10-07 11:01:38 -0500 | [diff] [blame] | 2085 | if (inet_sk(sk)->inet_daddr) { |
Tom Herbert | bdeab99 | 2011-08-14 19:45:55 +0000 | [diff] [blame] | 2086 | sock_rps_save_rxhash(sk, skb); |
Shawn Bohrer | 005ec97 | 2013-10-07 11:01:38 -0500 | [diff] [blame] | 2087 | sk_mark_napi_id(sk, skb); |
Eric Dumazet | 2c8c56e | 2014-11-11 05:54:28 -0800 | [diff] [blame] | 2088 | sk_incoming_cpu_update(sk); |
Eric Dumazet | e68b6e5 | 2016-11-16 09:10:42 -0800 | [diff] [blame] | 2089 | } else { |
| 2090 | sk_mark_napi_id_once(sk, skb); |
Shawn Bohrer | 005ec97 | 2013-10-07 11:01:38 -0500 | [diff] [blame] | 2091 | } |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 2092 | |
Paolo Abeni | 850cbad | 2016-10-21 13:55:47 +0200 | [diff] [blame] | 2093 | rc = __udp_enqueue_schedule_skb(sk, skb); |
Eric Dumazet | 766e9037 | 2009-10-14 20:40:11 -0700 | [diff] [blame] | 2094 | if (rc < 0) { |
| 2095 | int is_udplite = IS_UDPLITE(sk); |
| 2096 | |
Herbert Xu | 9382177 | 2008-09-15 11:48:46 -0700 | [diff] [blame] | 2097 | /* Note that an ENOMEM error is charged twice */ |
Eric Dumazet | 766e9037 | 2009-10-14 20:40:11 -0700 | [diff] [blame] | 2098 | if (rc == -ENOMEM) |
Eric Dumazet | e61da9e | 2016-04-29 14:16:50 -0700 | [diff] [blame] | 2099 | UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, |
Eric Dumazet | 02c2234 | 2016-04-27 16:44:30 -0700 | [diff] [blame] | 2100 | is_udplite); |
Menglong Dong | a3ce2b10 | 2020-11-05 20:49:14 -0500 | [diff] [blame] | 2101 | else |
| 2102 | UDP_INC_STATS(sock_net(sk), UDP_MIB_MEMERRORS, |
| 2103 | is_udplite); |
Eric Dumazet | e61da9e | 2016-04-29 14:16:50 -0700 | [diff] [blame] | 2104 | UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
Eric Dumazet | 766e9037 | 2009-10-14 20:40:11 -0700 | [diff] [blame] | 2105 | kfree_skb(skb); |
Satoru Moriya | 296f7ea | 2011-06-17 11:58:39 +0000 | [diff] [blame] | 2106 | trace_udp_fail_queue_rcv_skb(rc, sk); |
Eric Dumazet | 766e9037 | 2009-10-14 20:40:11 -0700 | [diff] [blame] | 2107 | return -1; |
Herbert Xu | 9382177 | 2008-09-15 11:48:46 -0700 | [diff] [blame] | 2108 | } |
| 2109 | |
| 2110 | return 0; |
Herbert Xu | 9382177 | 2008-09-15 11:48:46 -0700 | [diff] [blame] | 2111 | } |
| 2112 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2113 | /* returns: |
| 2114 | * -1: error |
| 2115 | * 0: success |
| 2116 | * >0: "udp encap" protocol resubmission |
| 2117 | * |
| 2118 | * Note that in the success and error cases, the skb is assumed to |
| 2119 | * have either been requeued or freed. |
| 2120 | */ |
Paolo Abeni | cf329aa | 2018-11-07 12:38:33 +0100 | [diff] [blame] | 2121 | static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2122 | { |
| 2123 | struct udp_sock *up = udp_sk(sk); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2124 | int is_udplite = IS_UDPLITE(sk); |
| 2125 | |
| 2126 | /* |
| 2127 | * Charge it to the socket, dropping if the queue is full. |
| 2128 | */ |
| 2129 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) |
| 2130 | goto drop; |
Florian Westphal | 895b5c9 | 2019-09-29 20:54:03 +0200 | [diff] [blame] | 2131 | nf_reset_ct(skb); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2132 | |
Davidlohr Bueso | 88ab310 | 2018-05-08 09:07:03 -0700 | [diff] [blame] | 2133 | if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) { |
Eric Dumazet | 0ad92ad | 2011-11-01 12:56:59 +0000 | [diff] [blame] | 2134 | int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); |
| 2135 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2136 | /* |
| 2137 | * This is an encapsulation socket so pass the skb to |
| 2138 | * the socket's udp_encap_rcv() hook. Otherwise, just |
| 2139 | * fall through and pass this up the UDP socket. |
| 2140 | * up->encap_rcv() returns the following value: |
| 2141 | * =0 if skb was successfully passed to the encap |
| 2142 | * handler or was discarded by it. |
| 2143 | * >0 if skb should be passed on to UDP. |
| 2144 | * <0 if skb should be resubmitted as proto -N |
| 2145 | */ |
| 2146 | |
| 2147 | /* if we're overly short, let UDP handle it */ |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 2148 | encap_rcv = READ_ONCE(up->encap_rcv); |
Hannes Frederic Sowa | e5aed00 | 2016-05-19 15:58:33 +0200 | [diff] [blame] | 2149 | if (encap_rcv) { |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2150 | int ret; |
| 2151 | |
Tom Herbert | 0a80966 | 2014-05-07 16:52:39 -0700 | [diff] [blame] | 2152 | /* Verify checksum before giving to encap */ |
| 2153 | if (udp_lib_checksum_complete(skb)) |
| 2154 | goto csum_error; |
| 2155 | |
Eric Dumazet | 0ad92ad | 2011-11-01 12:56:59 +0000 | [diff] [blame] | 2156 | ret = encap_rcv(sk, skb); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2157 | if (ret <= 0) { |
Eric Dumazet | 02c2234 | 2016-04-27 16:44:30 -0700 | [diff] [blame] | 2158 | __UDP_INC_STATS(sock_net(sk), |
| 2159 | UDP_MIB_INDATAGRAMS, |
| 2160 | is_udplite); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2161 | return -ret; |
| 2162 | } |
| 2163 | } |
| 2164 | |
| 2165 | /* FALLTHROUGH -- it's a UDP Packet */ |
| 2166 | } |
| 2167 | |
| 2168 | /* |
| 2169 | * UDP-Lite specific tests, ignored on UDP sockets |
| 2170 | */ |
Miaohe Lin | b0a4227 | 2020-07-21 17:11:44 +0800 | [diff] [blame] | 2171 | if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2172 | |
| 2173 | /* |
| 2174 | * MIB statistics other than incrementing the error count are |
| 2175 | * disabled for the following two types of errors: these depend |
| 2176 | * on the application settings, not on the functioning of the |
| 2177 | * protocol stack as such. |
| 2178 | * |
| 2179 | * RFC 3828 here recommends (sec 3.3): "There should also be a |
| 2180 | * way ... to ... at least let the receiving application block |
| 2181 | * delivery of packets with coverage values less than a value |
| 2182 | * provided by the application." |
| 2183 | */ |
| 2184 | if (up->pcrlen == 0) { /* full coverage was set */ |
Joe Perches | ba7a46f | 2014-11-11 10:59:17 -0800 | [diff] [blame] | 2185 | net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n", |
| 2186 | UDP_SKB_CB(skb)->cscov, skb->len); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2187 | goto drop; |
| 2188 | } |
| 2189 | /* The next case involves violating the min. coverage requested |
| 2190 | * by the receiver. This is subtle: if receiver wants x and x is |
| 2191 | * greater than the buffersize/MTU then receiver will complain |
| 2192 | * that it wants x while sender emits packets of smaller size y. |
| 2193 | * Therefore the above ...()->partial_cov statement is essential. |
| 2194 | */ |
| 2195 | if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { |
Joe Perches | ba7a46f | 2014-11-11 10:59:17 -0800 | [diff] [blame] | 2196 | net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n", |
| 2197 | UDP_SKB_CB(skb)->cscov, up->pcrlen); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2198 | goto drop; |
| 2199 | } |
| 2200 | } |
| 2201 | |
Paolo Abeni | dd99e42 | 2017-06-21 10:24:40 +0200 | [diff] [blame] | 2202 | prefetch(&sk->sk_rmem_alloc); |
Eric Dumazet | ce25d66 | 2016-06-02 14:52:43 -0700 | [diff] [blame] | 2203 | if (rcu_access_pointer(sk->sk_filter) && |
| 2204 | udp_lib_checksum_complete(skb)) |
samanthakumar | e6afc8a | 2016-04-05 12:41:15 -0400 | [diff] [blame] | 2205 | goto csum_error; |
Eric Dumazet | ce25d66 | 2016-06-02 14:52:43 -0700 | [diff] [blame] | 2206 | |
Daniel Borkmann | ba66bbe | 2016-07-25 18:06:12 +0200 | [diff] [blame] | 2207 | if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) |
Michal Kubeček | a612769 | 2016-07-08 17:52:33 +0200 | [diff] [blame] | 2208 | goto drop; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2209 | |
samanthakumar | e6afc8a | 2016-04-05 12:41:15 -0400 | [diff] [blame] | 2210 | udp_csum_pull_header(skb); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2211 | |
Shawn Bohrer | fbf8866 | 2013-10-07 11:01:40 -0500 | [diff] [blame] | 2212 | ipv4_pktinfo_prepare(sk, skb); |
Paolo Abeni | 850cbad | 2016-10-21 13:55:47 +0200 | [diff] [blame] | 2213 | return __udp_queue_rcv_skb(sk, skb); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2214 | |
Eric Dumazet | 6a5dc9e | 2013-04-29 08:39:56 +0000 | [diff] [blame] | 2215 | csum_error: |
Eric Dumazet | 02c2234 | 2016-04-27 16:44:30 -0700 | [diff] [blame] | 2216 | __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2217 | drop: |
Eric Dumazet | 02c2234 | 2016-04-27 16:44:30 -0700 | [diff] [blame] | 2218 | __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
Eric Dumazet | 8edf19c | 2009-10-15 00:12:40 +0000 | [diff] [blame] | 2219 | atomic_inc(&sk->sk_drops); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2220 | kfree_skb(skb); |
| 2221 | return -1; |
| 2222 | } |
| 2223 | |
Paolo Abeni | cf329aa | 2018-11-07 12:38:33 +0100 | [diff] [blame] | 2224 | static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
| 2225 | { |
| 2226 | struct sk_buff *next, *segs; |
| 2227 | int ret; |
| 2228 | |
| 2229 | if (likely(!udp_unexpected_gso(sk, skb))) |
| 2230 | return udp_queue_rcv_one_skb(sk, skb); |
| 2231 | |
Cambda Zhu | a08e7fd | 2020-03-26 15:33:14 +0800 | [diff] [blame] | 2232 | BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_GSO_CB_OFFSET); |
Paolo Abeni | cf329aa | 2018-11-07 12:38:33 +0100 | [diff] [blame] | 2233 | __skb_push(skb, -skb_mac_offset(skb)); |
| 2234 | segs = udp_rcv_segment(sk, skb, true); |
Jason A. Donenfeld | 1a186c1 | 2020-01-13 18:42:27 -0500 | [diff] [blame] | 2235 | skb_list_walk_safe(segs, skb, next) { |
Paolo Abeni | cf329aa | 2018-11-07 12:38:33 +0100 | [diff] [blame] | 2236 | __skb_pull(skb, skb_transport_offset(skb)); |
Paolo Abeni | 000ac44 | 2021-03-30 12:28:49 +0200 | [diff] [blame] | 2237 | |
| 2238 | udp_post_segment_fix_csum(skb); |
Paolo Abeni | cf329aa | 2018-11-07 12:38:33 +0100 | [diff] [blame] | 2239 | ret = udp_queue_rcv_one_skb(sk, skb); |
| 2240 | if (ret > 0) |
Xin Long | 10c678b | 2020-12-07 15:55:40 +0800 | [diff] [blame] | 2241 | ip_protocol_deliver_rcu(dev_net(skb->dev), skb, ret); |
Paolo Abeni | cf329aa | 2018-11-07 12:38:33 +0100 | [diff] [blame] | 2242 | } |
| 2243 | return 0; |
| 2244 | } |
| 2245 | |
Eric Dumazet | 9750223 | 2013-12-11 14:46:51 -0800 | [diff] [blame] | 2246 | /* For TCP sockets, sk_rx_dst is protected by socket lock |
Eric Dumazet | e47eb5d | 2013-12-15 10:53:46 -0800 | [diff] [blame] | 2247 | * For UDP, we use xchg() to guard against concurrent changes. |
Eric Dumazet | 9750223 | 2013-12-11 14:46:51 -0800 | [diff] [blame] | 2248 | */ |
Paolo Abeni | 64f0f5d | 2017-08-25 14:31:01 +0200 | [diff] [blame] | 2249 | bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2250 | { |
Eric Dumazet | 9750223 | 2013-12-11 14:46:51 -0800 | [diff] [blame] | 2251 | struct dst_entry *old; |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2252 | |
Wei Wang | d24406c | 2017-06-17 10:42:25 -0700 | [diff] [blame] | 2253 | if (dst_hold_safe(dst)) { |
Eric Dumazet | 8f905c0 | 2021-12-20 06:33:30 -0800 | [diff] [blame] | 2254 | old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst); |
Wei Wang | d24406c | 2017-06-17 10:42:25 -0700 | [diff] [blame] | 2255 | dst_release(old); |
Paolo Abeni | 64f0f5d | 2017-08-25 14:31:01 +0200 | [diff] [blame] | 2256 | return old != dst; |
Wei Wang | d24406c | 2017-06-17 10:42:25 -0700 | [diff] [blame] | 2257 | } |
Paolo Abeni | 64f0f5d | 2017-08-25 14:31:01 +0200 | [diff] [blame] | 2258 | return false; |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2259 | } |
Paolo Abeni | c9f2c1a | 2017-07-27 14:45:09 +0200 | [diff] [blame] | 2260 | EXPORT_SYMBOL(udp_sk_rx_dst_set); |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2261 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2262 | /* |
| 2263 | * Multicasts and broadcasts go to each listener. |
| 2264 | * |
Eric Dumazet | 1240d13 | 2009-11-08 10:18:44 +0000 | [diff] [blame] | 2265 | * Note: called only from the BH handler context. |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2266 | */ |
Pavel Emelyanov | e316349 | 2008-06-16 17:12:11 -0700 | [diff] [blame] | 2267 | static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2268 | struct udphdr *uh, |
| 2269 | __be32 saddr, __be32 daddr, |
Rick Jones | 36cbb24 | 2014-11-06 10:37:54 -0800 | [diff] [blame] | 2270 | struct udp_table *udptable, |
| 2271 | int proto) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2272 | { |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2273 | struct sock *sk, *first = NULL; |
David Held | 5cf3d46 | 2014-07-15 23:28:31 -0400 | [diff] [blame] | 2274 | unsigned short hnum = ntohs(uh->dest); |
| 2275 | struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); |
David Held | 2dc41cf | 2014-07-15 23:28:32 -0400 | [diff] [blame] | 2276 | unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2277 | unsigned int offset = offsetof(typeof(*sk), sk_node); |
| 2278 | int dif = skb->dev->ifindex; |
David Ahern | fb74c27 | 2017-08-07 08:44:16 -0700 | [diff] [blame] | 2279 | int sdif = inet_sdif(skb); |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2280 | struct hlist_node *node; |
| 2281 | struct sk_buff *nskb; |
David Held | 2dc41cf | 2014-07-15 23:28:32 -0400 | [diff] [blame] | 2282 | |
| 2283 | if (use_hash2) { |
Martin KaFai Lau | f0b1e64 | 2017-12-01 12:52:30 -0800 | [diff] [blame] | 2284 | hash2_any = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & |
Pablo Neira | 73e2d5e | 2016-11-14 23:40:30 +0100 | [diff] [blame] | 2285 | udptable->mask; |
Martin KaFai Lau | f0b1e64 | 2017-12-01 12:52:30 -0800 | [diff] [blame] | 2286 | hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask; |
David Held | 2dc41cf | 2014-07-15 23:28:32 -0400 | [diff] [blame] | 2287 | start_lookup: |
Pablo Neira | 73e2d5e | 2016-11-14 23:40:30 +0100 | [diff] [blame] | 2288 | hslot = &udptable->hash2[hash2]; |
David Held | 2dc41cf | 2014-07-15 23:28:32 -0400 | [diff] [blame] | 2289 | offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); |
| 2290 | } |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2291 | |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2292 | sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { |
| 2293 | if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr, |
David Ahern | fb74c27 | 2017-08-07 08:44:16 -0700 | [diff] [blame] | 2294 | uh->source, saddr, dif, sdif, hnum)) |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2295 | continue; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2296 | |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2297 | if (!first) { |
| 2298 | first = sk; |
| 2299 | continue; |
| 2300 | } |
| 2301 | nskb = skb_clone(skb, GFP_ATOMIC); |
| 2302 | |
| 2303 | if (unlikely(!nskb)) { |
| 2304 | atomic_inc(&sk->sk_drops); |
Eric Dumazet | 02c2234 | 2016-04-27 16:44:30 -0700 | [diff] [blame] | 2305 | __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS, |
| 2306 | IS_UDPLITE(sk)); |
| 2307 | __UDP_INC_STATS(net, UDP_MIB_INERRORS, |
| 2308 | IS_UDPLITE(sk)); |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2309 | continue; |
| 2310 | } |
| 2311 | if (udp_queue_rcv_skb(sk, nskb) > 0) |
| 2312 | consume_skb(nskb); |
| 2313 | } |
Eric Dumazet | 1240d13 | 2009-11-08 10:18:44 +0000 | [diff] [blame] | 2314 | |
David Held | 2dc41cf | 2014-07-15 23:28:32 -0400 | [diff] [blame] | 2315 | /* Also lookup *:port if we are using hash2 and haven't done so yet. */ |
| 2316 | if (use_hash2 && hash2 != hash2_any) { |
| 2317 | hash2 = hash2_any; |
| 2318 | goto start_lookup; |
| 2319 | } |
| 2320 | |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2321 | if (first) { |
| 2322 | if (udp_queue_rcv_skb(first, skb) > 0) |
| 2323 | consume_skb(skb); |
Eric Dumazet | 1240d13 | 2009-11-08 10:18:44 +0000 | [diff] [blame] | 2324 | } else { |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2325 | kfree_skb(skb); |
Eric Dumazet | 02c2234 | 2016-04-27 16:44:30 -0700 | [diff] [blame] | 2326 | __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI, |
| 2327 | proto == IPPROTO_UDPLITE); |
Eric Dumazet | 1240d13 | 2009-11-08 10:18:44 +0000 | [diff] [blame] | 2328 | } |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2329 | return 0; |
| 2330 | } |
| 2331 | |
| 2332 | /* Initialize UDP checksum. If exited with zero value (success), |
| 2333 | * CHECKSUM_UNNECESSARY means, that no more checks are required. |
Su Yanjun | 666a3d6 | 2019-07-18 10:19:23 +0800 | [diff] [blame] | 2334 | * Otherwise, csum completion requires checksumming packet body, |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2335 | * including udp header and folding it to skb->csum. |
| 2336 | */ |
| 2337 | static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, |
| 2338 | int proto) |
| 2339 | { |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2340 | int err; |
| 2341 | |
| 2342 | UDP_SKB_CB(skb)->partial_cov = 0; |
| 2343 | UDP_SKB_CB(skb)->cscov = skb->len; |
| 2344 | |
| 2345 | if (proto == IPPROTO_UDPLITE) { |
| 2346 | err = udplite_checksum_init(skb, uh); |
| 2347 | if (err) |
| 2348 | return err; |
Alexey Kodanev | 15f35d4 | 2018-02-15 20:18:43 +0300 | [diff] [blame] | 2349 | |
| 2350 | if (UDP_SKB_CB(skb)->partial_cov) { |
| 2351 | skb->csum = inet_compute_pseudo(skb, proto); |
| 2352 | return 0; |
| 2353 | } |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2354 | } |
| 2355 | |
Hannes Frederic Sowa | b46d9f6 | 2016-06-12 12:02:46 +0200 | [diff] [blame] | 2356 | /* Note, we are only interested in != 0 or == 0, thus the |
| 2357 | * force to int. |
| 2358 | */ |
Sean Tranchetti | db4f1be | 2018-10-23 16:04:31 -0600 | [diff] [blame] | 2359 | err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check, |
| 2360 | inet_compute_pseudo); |
| 2361 | if (err) |
| 2362 | return err; |
| 2363 | |
| 2364 | if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) { |
| 2365 | /* If SW calculated the value, we know it's bad */ |
| 2366 | if (skb->csum_complete_sw) |
| 2367 | return 1; |
| 2368 | |
| 2369 | /* HW says the value is bad. Let's validate that. |
| 2370 | * skb->csum is no longer the full packet checksum, |
| 2371 | * so don't treat it as such. |
| 2372 | */ |
| 2373 | skb_checksum_complete_unset(skb); |
| 2374 | } |
| 2375 | |
| 2376 | return 0; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2377 | } |
| 2378 | |
Paolo Abeni | 2b5a921 | 2018-09-13 16:27:20 +0200 | [diff] [blame] | 2379 | /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and |
| 2380 | * return code conversion for ip layer consumption |
| 2381 | */ |
| 2382 | static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, |
| 2383 | struct udphdr *uh) |
| 2384 | { |
| 2385 | int ret; |
| 2386 | |
| 2387 | if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) |
Li RongQing | e4aa33a | 2019-07-04 17:03:26 +0800 | [diff] [blame] | 2388 | skb_checksum_try_convert(skb, IPPROTO_UDP, inet_compute_pseudo); |
Paolo Abeni | 2b5a921 | 2018-09-13 16:27:20 +0200 | [diff] [blame] | 2389 | |
| 2390 | ret = udp_queue_rcv_skb(sk, skb); |
| 2391 | |
| 2392 | /* a return value > 0 means to resubmit the input, but |
| 2393 | * it wants the return to be -protocol, or 0 |
| 2394 | */ |
| 2395 | if (ret > 0) |
| 2396 | return -ret; |
| 2397 | return 0; |
| 2398 | } |
| 2399 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2400 | /* |
| 2401 | * All we need to do is get the socket, and then do a checksum. |
| 2402 | */ |
| 2403 | |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 2404 | int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2405 | int proto) |
| 2406 | { |
| 2407 | struct sock *sk; |
Jesper Dangaard Brouer | 7b5e56f | 2009-02-05 15:05:45 -0800 | [diff] [blame] | 2408 | struct udphdr *uh; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2409 | unsigned short ulen; |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 2410 | struct rtable *rt = skb_rtable(skb); |
Jesper Dangaard Brouer | 2783ef2 | 2009-02-06 01:59:12 -0800 | [diff] [blame] | 2411 | __be32 saddr, daddr; |
Pavel Emelyanov | 0283328 | 2008-07-05 21:18:48 -0700 | [diff] [blame] | 2412 | struct net *net = dev_net(skb->dev); |
Joe Stringer | 71489e2 | 2020-03-29 15:53:39 -0700 | [diff] [blame] | 2413 | bool refcounted; |
Menglong Dong | 1c7fab7 | 2022-01-09 14:36:28 +0800 | [diff] [blame] | 2414 | int drop_reason; |
| 2415 | |
| 2416 | drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2417 | |
| 2418 | /* |
| 2419 | * Validate the packet. |
| 2420 | */ |
| 2421 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) |
| 2422 | goto drop; /* No space for header. */ |
| 2423 | |
Jesper Dangaard Brouer | 7b5e56f | 2009-02-05 15:05:45 -0800 | [diff] [blame] | 2424 | uh = udp_hdr(skb); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2425 | ulen = ntohs(uh->len); |
Bjørn Mork | ccc2d97 | 2010-05-06 03:44:34 +0000 | [diff] [blame] | 2426 | saddr = ip_hdr(skb)->saddr; |
| 2427 | daddr = ip_hdr(skb)->daddr; |
| 2428 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2429 | if (ulen > skb->len) |
| 2430 | goto short_packet; |
| 2431 | |
| 2432 | if (proto == IPPROTO_UDP) { |
| 2433 | /* UDP validates ulen. */ |
| 2434 | if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) |
| 2435 | goto short_packet; |
| 2436 | uh = udp_hdr(skb); |
| 2437 | } |
| 2438 | |
| 2439 | if (udp4_csum_init(skb, uh, proto)) |
| 2440 | goto csum_error; |
| 2441 | |
Joe Stringer | 71489e2 | 2020-03-29 15:53:39 -0700 | [diff] [blame] | 2442 | sk = skb_steal_sock(skb, &refcounted); |
Eric Dumazet | 8afdd99 | 2013-12-10 18:07:23 -0800 | [diff] [blame] | 2443 | if (sk) { |
Eric Dumazet | 9750223 | 2013-12-11 14:46:51 -0800 | [diff] [blame] | 2444 | struct dst_entry *dst = skb_dst(skb); |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2445 | int ret; |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2446 | |
Eric Dumazet | 8f905c0 | 2021-12-20 06:33:30 -0800 | [diff] [blame] | 2447 | if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) |
Eric Dumazet | 9750223 | 2013-12-11 14:46:51 -0800 | [diff] [blame] | 2448 | udp_sk_rx_dst_set(sk, dst); |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2449 | |
Paolo Abeni | 2b5a921 | 2018-09-13 16:27:20 +0200 | [diff] [blame] | 2450 | ret = udp_unicast_rcv_skb(sk, skb, uh); |
Joe Stringer | 71489e2 | 2020-03-29 15:53:39 -0700 | [diff] [blame] | 2451 | if (refcounted) |
| 2452 | sock_put(sk); |
Paolo Abeni | 2b5a921 | 2018-09-13 16:27:20 +0200 | [diff] [blame] | 2453 | return ret; |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2454 | } |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2455 | |
Fabian Frederick | c18450a | 2014-11-04 20:48:41 +0100 | [diff] [blame] | 2456 | if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) |
| 2457 | return __udp4_lib_mcast_deliver(net, skb, uh, |
Rick Jones | 36cbb24 | 2014-11-06 10:37:54 -0800 | [diff] [blame] | 2458 | saddr, daddr, udptable, proto); |
Fabian Frederick | c18450a | 2014-11-04 20:48:41 +0100 | [diff] [blame] | 2459 | |
| 2460 | sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); |
Paolo Abeni | 2b5a921 | 2018-09-13 16:27:20 +0200 | [diff] [blame] | 2461 | if (sk) |
| 2462 | return udp_unicast_rcv_skb(sk, skb, uh); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2463 | |
| 2464 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
| 2465 | goto drop; |
Florian Westphal | 895b5c9 | 2019-09-29 20:54:03 +0200 | [diff] [blame] | 2466 | nf_reset_ct(skb); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2467 | |
| 2468 | /* No socket. Drop packet silently, if checksum is wrong */ |
| 2469 | if (udp_lib_checksum_complete(skb)) |
| 2470 | goto csum_error; |
| 2471 | |
Menglong Dong | 1c7fab7 | 2022-01-09 14:36:28 +0800 | [diff] [blame] | 2472 | drop_reason = SKB_DROP_REASON_NO_SOCKET; |
Eric Dumazet | 02c2234 | 2016-04-27 16:44:30 -0700 | [diff] [blame] | 2473 | __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2474 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); |
| 2475 | |
| 2476 | /* |
| 2477 | * Hmm. We got an UDP packet to a port to which we |
| 2478 | * don't wanna listen. Ignore it. |
| 2479 | */ |
Menglong Dong | 1c7fab7 | 2022-01-09 14:36:28 +0800 | [diff] [blame] | 2480 | kfree_skb_reason(skb, drop_reason); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2481 | return 0; |
| 2482 | |
| 2483 | short_packet: |
Menglong Dong | 1c7fab7 | 2022-01-09 14:36:28 +0800 | [diff] [blame] | 2484 | drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL; |
Joe Perches | ba7a46f | 2014-11-11 10:59:17 -0800 | [diff] [blame] | 2485 | net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", |
| 2486 | proto == IPPROTO_UDPLITE ? "Lite" : "", |
| 2487 | &saddr, ntohs(uh->source), |
| 2488 | ulen, skb->len, |
| 2489 | &daddr, ntohs(uh->dest)); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2490 | goto drop; |
| 2491 | |
| 2492 | csum_error: |
| 2493 | /* |
| 2494 | * RFC1122: OK. Discards the bad packet silently (as far as |
| 2495 | * the network is concerned, anyway) as per 4.1.3.4 (MUST). |
| 2496 | */ |
Menglong Dong | 1c7fab7 | 2022-01-09 14:36:28 +0800 | [diff] [blame] | 2497 | drop_reason = SKB_DROP_REASON_UDP_CSUM; |
Joe Perches | ba7a46f | 2014-11-11 10:59:17 -0800 | [diff] [blame] | 2498 | net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", |
| 2499 | proto == IPPROTO_UDPLITE ? "Lite" : "", |
| 2500 | &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), |
| 2501 | ulen); |
Eric Dumazet | 02c2234 | 2016-04-27 16:44:30 -0700 | [diff] [blame] | 2502 | __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2503 | drop: |
Eric Dumazet | 02c2234 | 2016-04-27 16:44:30 -0700 | [diff] [blame] | 2504 | __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); |
Menglong Dong | 1c7fab7 | 2022-01-09 14:36:28 +0800 | [diff] [blame] | 2505 | kfree_skb_reason(skb, drop_reason); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2506 | return 0; |
| 2507 | } |
| 2508 | |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2509 | /* We can only early demux multicast if there is a single matching socket. |
| 2510 | * If more than one socket found returns NULL |
| 2511 | */ |
| 2512 | static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, |
| 2513 | __be16 loc_port, __be32 loc_addr, |
| 2514 | __be16 rmt_port, __be32 rmt_addr, |
David Ahern | fb74c27 | 2017-08-07 08:44:16 -0700 | [diff] [blame] | 2515 | int dif, int sdif) |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2516 | { |
| 2517 | struct sock *sk, *result; |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2518 | unsigned short hnum = ntohs(loc_port); |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2519 | unsigned int slot = udp_hashfn(net, hnum, udp_table.mask); |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2520 | struct udp_hslot *hslot = &udp_table.hash[slot]; |
| 2521 | |
Eric Dumazet | 63c6f81 | 2014-06-12 16:13:06 -0700 | [diff] [blame] | 2522 | /* Do not bother scanning a too big list */ |
| 2523 | if (hslot->count > 10) |
| 2524 | return NULL; |
| 2525 | |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2526 | result = NULL; |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2527 | sk_for_each_rcu(sk, &hslot->head) { |
| 2528 | if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr, |
David Ahern | fb74c27 | 2017-08-07 08:44:16 -0700 | [diff] [blame] | 2529 | rmt_port, rmt_addr, dif, sdif, hnum)) { |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2530 | if (result) |
| 2531 | return NULL; |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2532 | result = sk; |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2533 | } |
| 2534 | } |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2535 | |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2536 | return result; |
| 2537 | } |
| 2538 | |
| 2539 | /* For unicast we should only early demux connected sockets or we can |
| 2540 | * break forwarding setups. The chains here can be long so only check |
| 2541 | * if the first socket is an exact match and if not move on. |
| 2542 | */ |
| 2543 | static struct sock *__udp4_lib_demux_lookup(struct net *net, |
| 2544 | __be16 loc_port, __be32 loc_addr, |
| 2545 | __be16 rmt_port, __be32 rmt_addr, |
David Ahern | 3fa6f61 | 2017-08-07 08:44:17 -0700 | [diff] [blame] | 2546 | int dif, int sdif) |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2547 | { |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2548 | unsigned short hnum = ntohs(loc_port); |
Martin KaFai Lau | f0b1e64 | 2017-12-01 12:52:30 -0800 | [diff] [blame] | 2549 | unsigned int hash2 = ipv4_portaddr_hash(net, loc_addr, hnum); |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2550 | unsigned int slot2 = hash2 & udp_table.mask; |
| 2551 | struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; |
Joe Perches | c722831 | 2014-05-13 20:30:07 -0700 | [diff] [blame] | 2552 | INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr); |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2553 | const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2554 | struct sock *sk; |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2555 | |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2556 | udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { |
| 2557 | if (INET_MATCH(sk, net, acookie, rmt_addr, |
David Ahern | 3fa6f61 | 2017-08-07 08:44:17 -0700 | [diff] [blame] | 2558 | loc_addr, ports, dif, sdif)) |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2559 | return sk; |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2560 | /* Only check first socket in chain */ |
| 2561 | break; |
| 2562 | } |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2563 | return NULL; |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2564 | } |
| 2565 | |
Paolo Abeni | 7487449 | 2017-09-28 15:51:36 +0200 | [diff] [blame] | 2566 | int udp_v4_early_demux(struct sk_buff *skb) |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2567 | { |
Eric Dumazet | 610438b | 2013-12-11 08:10:05 -0800 | [diff] [blame] | 2568 | struct net *net = dev_net(skb->dev); |
Paolo Abeni | bc044e8 | 2017-09-28 15:51:37 +0200 | [diff] [blame] | 2569 | struct in_device *in_dev = NULL; |
Eric Dumazet | 610438b | 2013-12-11 08:10:05 -0800 | [diff] [blame] | 2570 | const struct iphdr *iph; |
| 2571 | const struct udphdr *uh; |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2572 | struct sock *sk = NULL; |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2573 | struct dst_entry *dst; |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2574 | int dif = skb->dev->ifindex; |
David Ahern | fb74c27 | 2017-08-07 08:44:16 -0700 | [diff] [blame] | 2575 | int sdif = inet_sdif(skb); |
Shawn Bohrer | 6e54030 | 2015-06-03 16:27:38 -0500 | [diff] [blame] | 2576 | int ours; |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2577 | |
| 2578 | /* validate the packet */ |
| 2579 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) |
Paolo Abeni | 7487449 | 2017-09-28 15:51:36 +0200 | [diff] [blame] | 2580 | return 0; |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2581 | |
Eric Dumazet | 610438b | 2013-12-11 08:10:05 -0800 | [diff] [blame] | 2582 | iph = ip_hdr(skb); |
| 2583 | uh = udp_hdr(skb); |
| 2584 | |
Paolo Abeni | 996b44f | 2017-10-09 14:52:10 +0200 | [diff] [blame] | 2585 | if (skb->pkt_type == PACKET_MULTICAST) { |
Paolo Abeni | bc044e8 | 2017-09-28 15:51:37 +0200 | [diff] [blame] | 2586 | in_dev = __in_dev_get_rcu(skb->dev); |
Shawn Bohrer | 6e54030 | 2015-06-03 16:27:38 -0500 | [diff] [blame] | 2587 | |
| 2588 | if (!in_dev) |
Paolo Abeni | 7487449 | 2017-09-28 15:51:36 +0200 | [diff] [blame] | 2589 | return 0; |
Shawn Bohrer | 6e54030 | 2015-06-03 16:27:38 -0500 | [diff] [blame] | 2590 | |
Paolo Abeni | 996b44f | 2017-10-09 14:52:10 +0200 | [diff] [blame] | 2591 | ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, |
| 2592 | iph->protocol); |
| 2593 | if (!ours) |
| 2594 | return 0; |
Paolo Abeni | ad0ea19 | 2016-03-22 09:19:38 +0100 | [diff] [blame] | 2595 | |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2596 | sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, |
David Ahern | fb74c27 | 2017-08-07 08:44:16 -0700 | [diff] [blame] | 2597 | uh->source, iph->saddr, |
| 2598 | dif, sdif); |
Shawn Bohrer | 6e54030 | 2015-06-03 16:27:38 -0500 | [diff] [blame] | 2599 | } else if (skb->pkt_type == PACKET_HOST) { |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2600 | sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, |
David Ahern | 3fa6f61 | 2017-08-07 08:44:17 -0700 | [diff] [blame] | 2601 | uh->source, iph->saddr, dif, sdif); |
Shawn Bohrer | 6e54030 | 2015-06-03 16:27:38 -0500 | [diff] [blame] | 2602 | } |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2603 | |
Reshetova, Elena | 41c6d65 | 2017-06-30 13:08:01 +0300 | [diff] [blame] | 2604 | if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) |
Paolo Abeni | 7487449 | 2017-09-28 15:51:36 +0200 | [diff] [blame] | 2605 | return 0; |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2606 | |
| 2607 | skb->sk = sk; |
Alexander Duyck | 82eabd9 | 2014-09-04 13:32:11 -0400 | [diff] [blame] | 2608 | skb->destructor = sock_efree; |
Eric Dumazet | 8f905c0 | 2021-12-20 06:33:30 -0800 | [diff] [blame] | 2609 | dst = rcu_dereference(sk->sk_rx_dst); |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2610 | |
| 2611 | if (dst) |
| 2612 | dst = dst_check(dst, 0); |
Eric Dumazet | 10e2eb8 | 2015-08-01 12:14:33 +0200 | [diff] [blame] | 2613 | if (dst) { |
Paolo Abeni | bc044e8 | 2017-09-28 15:51:37 +0200 | [diff] [blame] | 2614 | u32 itag = 0; |
| 2615 | |
Wei Wang | d24406c | 2017-06-17 10:42:25 -0700 | [diff] [blame] | 2616 | /* set noref for now. |
| 2617 | * any place which wants to hold dst has to call |
| 2618 | * dst_hold_safe() |
| 2619 | */ |
| 2620 | skb_dst_set_noref(skb, dst); |
Paolo Abeni | bc044e8 | 2017-09-28 15:51:37 +0200 | [diff] [blame] | 2621 | |
| 2622 | /* for unconnected multicast sockets we need to validate |
| 2623 | * the source on each packet |
| 2624 | */ |
| 2625 | if (!inet_sk(sk)->inet_daddr && in_dev) |
| 2626 | return ip_mc_validate_source(skb, iph->daddr, |
Guillaume Nault | 8d2b51b | 2021-01-16 11:44:22 +0100 | [diff] [blame] | 2627 | iph->saddr, |
| 2628 | iph->tos & IPTOS_RT_MASK, |
Paolo Abeni | bc044e8 | 2017-09-28 15:51:37 +0200 | [diff] [blame] | 2629 | skb->dev, in_dev, &itag); |
Eric Dumazet | 10e2eb8 | 2015-08-01 12:14:33 +0200 | [diff] [blame] | 2630 | } |
Paolo Abeni | 7487449 | 2017-09-28 15:51:36 +0200 | [diff] [blame] | 2631 | return 0; |
Shawn Bohrer | 421b388 | 2013-10-07 11:01:39 -0500 | [diff] [blame] | 2632 | } |
| 2633 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2634 | int udp_rcv(struct sk_buff *skb) |
| 2635 | { |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 2636 | return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2637 | } |
| 2638 | |
Brian Haley | 7d06b2e | 2008-06-14 17:04:49 -0700 | [diff] [blame] | 2639 | void udp_destroy_sock(struct sock *sk) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2640 | { |
Tom Parkin | 44046a5 | 2013-03-19 06:11:12 +0000 | [diff] [blame] | 2641 | struct udp_sock *up = udp_sk(sk); |
Eric Dumazet | 8a74ad6 | 2010-05-26 19:20:18 +0000 | [diff] [blame] | 2642 | bool slow = lock_sock_fast(sk); |
Paolo Abeni | a8b897c | 2021-06-09 11:49:01 +0200 | [diff] [blame] | 2643 | |
| 2644 | /* protects from races with udp_abort() */ |
| 2645 | sock_set_flag(sk, SOCK_DEAD); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2646 | udp_flush_pending_frames(sk); |
Eric Dumazet | 8a74ad6 | 2010-05-26 19:20:18 +0000 | [diff] [blame] | 2647 | unlock_sock_fast(sk, slow); |
Paolo Abeni | 60fb956 | 2018-11-07 12:38:28 +0100 | [diff] [blame] | 2648 | if (static_branch_unlikely(&udp_encap_needed_key)) { |
| 2649 | if (up->encap_type) { |
| 2650 | void (*encap_destroy)(struct sock *sk); |
| 2651 | encap_destroy = READ_ONCE(up->encap_destroy); |
| 2652 | if (encap_destroy) |
| 2653 | encap_destroy(sk); |
| 2654 | } |
| 2655 | if (up->encap_enabled) |
Paolo Abeni | 9c48060 | 2018-11-15 02:34:50 +0100 | [diff] [blame] | 2656 | static_branch_dec(&udp_encap_needed_key); |
Tom Parkin | 44046a5 | 2013-03-19 06:11:12 +0000 | [diff] [blame] | 2657 | } |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2658 | } |
| 2659 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2660 | /* |
| 2661 | * Socket option code for UDP |
| 2662 | */ |
Gerrit Renker | 4c0a6cb | 2006-11-27 09:29:59 -0800 | [diff] [blame] | 2663 | int udp_lib_setsockopt(struct sock *sk, int level, int optname, |
Christoph Hellwig | 91ac1cc | 2020-07-23 08:09:04 +0200 | [diff] [blame] | 2664 | sockptr_t optval, unsigned int optlen, |
Gerrit Renker | 4c0a6cb | 2006-11-27 09:29:59 -0800 | [diff] [blame] | 2665 | int (*push_pending_frames)(struct sock *)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2666 | { |
| 2667 | struct udp_sock *up = udp_sk(sk); |
Tom Herbert | 1c19448 | 2014-05-23 08:47:32 -0700 | [diff] [blame] | 2668 | int val, valbool; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2669 | int err = 0; |
Wang Chen | b2bf1e2 | 2007-12-03 22:34:16 +1100 | [diff] [blame] | 2670 | int is_udplite = IS_UDPLITE(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2671 | |
Eric Dumazet | c482c56 | 2009-07-17 00:26:32 +0000 | [diff] [blame] | 2672 | if (optlen < sizeof(int)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2673 | return -EINVAL; |
| 2674 | |
Christoph Hellwig | 91ac1cc | 2020-07-23 08:09:04 +0200 | [diff] [blame] | 2675 | if (copy_from_sockptr(&val, optval, sizeof(val))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2676 | return -EFAULT; |
| 2677 | |
Tom Herbert | 1c19448 | 2014-05-23 08:47:32 -0700 | [diff] [blame] | 2678 | valbool = val ? 1 : 0; |
| 2679 | |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 2680 | switch (optname) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2681 | case UDP_CORK: |
| 2682 | if (val != 0) { |
Eric Dumazet | a9f5970 | 2021-09-27 17:29:24 -0700 | [diff] [blame] | 2683 | WRITE_ONCE(up->corkflag, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2684 | } else { |
Eric Dumazet | a9f5970 | 2021-09-27 17:29:24 -0700 | [diff] [blame] | 2685 | WRITE_ONCE(up->corkflag, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2686 | lock_sock(sk); |
Joe Perches | 4243cdc | 2014-11-11 21:59:20 -0800 | [diff] [blame] | 2687 | push_pending_frames(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2688 | release_sock(sk); |
| 2689 | } |
| 2690 | break; |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 2691 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2692 | case UDP_ENCAP: |
| 2693 | switch (val) { |
| 2694 | case 0: |
Alexey Dobriyan | fd1ac07 | 2019-10-04 00:21:57 +0300 | [diff] [blame] | 2695 | #ifdef CONFIG_XFRM |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2696 | case UDP_ENCAP_ESPINUDP: |
| 2697 | case UDP_ENCAP_ESPINUDP_NON_IKE: |
Sabrina Dubroca | 0146dca | 2020-04-27 17:59:34 +0200 | [diff] [blame] | 2698 | #if IS_ENABLED(CONFIG_IPV6) |
| 2699 | if (sk->sk_family == AF_INET6) |
| 2700 | up->encap_rcv = ipv6_stub->xfrm6_udp_encap_rcv; |
| 2701 | else |
| 2702 | #endif |
| 2703 | up->encap_rcv = xfrm4_udp_encap_rcv; |
Alexey Dobriyan | fd1ac07 | 2019-10-04 00:21:57 +0300 | [diff] [blame] | 2704 | #endif |
Joe Perches | a8eceea | 2020-03-12 15:50:22 -0700 | [diff] [blame] | 2705 | fallthrough; |
James Chapman | 342f023 | 2007-06-27 15:37:46 -0700 | [diff] [blame] | 2706 | case UDP_ENCAP_L2TPINUDP: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2707 | up->encap_type = val; |
Paolo Abeni | 60fb956 | 2018-11-07 12:38:28 +0100 | [diff] [blame] | 2708 | lock_sock(sk); |
| 2709 | udp_tunnel_encap_enable(sk->sk_socket); |
| 2710 | release_sock(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2711 | break; |
| 2712 | default: |
| 2713 | err = -ENOPROTOOPT; |
| 2714 | break; |
| 2715 | } |
| 2716 | break; |
| 2717 | |
Tom Herbert | 1c19448 | 2014-05-23 08:47:32 -0700 | [diff] [blame] | 2718 | case UDP_NO_CHECK6_TX: |
| 2719 | up->no_check6_tx = valbool; |
| 2720 | break; |
| 2721 | |
| 2722 | case UDP_NO_CHECK6_RX: |
| 2723 | up->no_check6_rx = valbool; |
| 2724 | break; |
| 2725 | |
Willem de Bruijn | bec1f6f | 2018-04-26 13:42:17 -0400 | [diff] [blame] | 2726 | case UDP_SEGMENT: |
| 2727 | if (val < 0 || val > USHRT_MAX) |
| 2728 | return -EINVAL; |
Eric Dumazet | 18a419b | 2021-06-30 09:42:44 -0700 | [diff] [blame] | 2729 | WRITE_ONCE(up->gso_size, val); |
Willem de Bruijn | bec1f6f | 2018-04-26 13:42:17 -0400 | [diff] [blame] | 2730 | break; |
| 2731 | |
Paolo Abeni | e20cf8d | 2018-11-07 12:38:29 +0100 | [diff] [blame] | 2732 | case UDP_GRO: |
| 2733 | lock_sock(sk); |
Paolo Abeni | 78352f7 | 2021-03-30 12:28:52 +0200 | [diff] [blame] | 2734 | |
| 2735 | /* when enabling GRO, accept the related GSO packet type */ |
Paolo Abeni | e20cf8d | 2018-11-07 12:38:29 +0100 | [diff] [blame] | 2736 | if (valbool) |
| 2737 | udp_tunnel_encap_enable(sk->sk_socket); |
| 2738 | up->gro_enabled = valbool; |
Paolo Abeni | 78352f7 | 2021-03-30 12:28:52 +0200 | [diff] [blame] | 2739 | up->accept_udp_l4 = valbool; |
Paolo Abeni | e20cf8d | 2018-11-07 12:38:29 +0100 | [diff] [blame] | 2740 | release_sock(sk); |
| 2741 | break; |
| 2742 | |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 2743 | /* |
| 2744 | * UDP-Lite's partial checksum coverage (RFC 3828). |
| 2745 | */ |
| 2746 | /* The sender sets actual checksum coverage length via this option. |
| 2747 | * The case coverage > packet length is handled by send module. */ |
| 2748 | case UDPLITE_SEND_CSCOV: |
Wang Chen | b2bf1e2 | 2007-12-03 22:34:16 +1100 | [diff] [blame] | 2749 | if (!is_udplite) /* Disable the option on UDP sockets */ |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 2750 | return -ENOPROTOOPT; |
| 2751 | if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ |
| 2752 | val = 8; |
Alexey Dobriyan | 4be929b | 2010-05-24 14:33:03 -0700 | [diff] [blame] | 2753 | else if (val > USHRT_MAX) |
| 2754 | val = USHRT_MAX; |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 2755 | up->pcslen = val; |
| 2756 | up->pcflag |= UDPLITE_SEND_CC; |
| 2757 | break; |
| 2758 | |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 2759 | /* The receiver specifies a minimum checksum coverage value. To make |
| 2760 | * sense, this should be set to at least 8 (as done below). If zero is |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 2761 | * used, this again means full checksum coverage. */ |
| 2762 | case UDPLITE_RECV_CSCOV: |
Wang Chen | b2bf1e2 | 2007-12-03 22:34:16 +1100 | [diff] [blame] | 2763 | if (!is_udplite) /* Disable the option on UDP sockets */ |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 2764 | return -ENOPROTOOPT; |
| 2765 | if (val != 0 && val < 8) /* Avoid silly minimal values. */ |
| 2766 | val = 8; |
Alexey Dobriyan | 4be929b | 2010-05-24 14:33:03 -0700 | [diff] [blame] | 2767 | else if (val > USHRT_MAX) |
| 2768 | val = USHRT_MAX; |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 2769 | up->pcrlen = val; |
| 2770 | up->pcflag |= UDPLITE_RECV_CC; |
| 2771 | break; |
| 2772 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2773 | default: |
| 2774 | err = -ENOPROTOOPT; |
| 2775 | break; |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 2776 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2777 | |
| 2778 | return err; |
| 2779 | } |
Eric Dumazet | c482c56 | 2009-07-17 00:26:32 +0000 | [diff] [blame] | 2780 | EXPORT_SYMBOL(udp_lib_setsockopt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2781 | |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 2782 | int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, |
| 2783 | unsigned int optlen) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2784 | { |
| 2785 | if (level == SOL_UDP || level == SOL_UDPLITE) |
Christoph Hellwig | 91ac1cc | 2020-07-23 08:09:04 +0200 | [diff] [blame] | 2786 | return udp_lib_setsockopt(sk, level, optname, |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 2787 | optval, optlen, |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2788 | udp_push_pending_frames); |
| 2789 | return ip_setsockopt(sk, level, optname, optval, optlen); |
| 2790 | } |
| 2791 | |
Gerrit Renker | 4c0a6cb | 2006-11-27 09:29:59 -0800 | [diff] [blame] | 2792 | int udp_lib_getsockopt(struct sock *sk, int level, int optname, |
| 2793 | char __user *optval, int __user *optlen) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2794 | { |
| 2795 | struct udp_sock *up = udp_sk(sk); |
| 2796 | int val, len; |
| 2797 | |
Eric Dumazet | c482c56 | 2009-07-17 00:26:32 +0000 | [diff] [blame] | 2798 | if (get_user(len, optlen)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2799 | return -EFAULT; |
| 2800 | |
| 2801 | len = min_t(unsigned int, len, sizeof(int)); |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 2802 | |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 2803 | if (len < 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2804 | return -EINVAL; |
| 2805 | |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 2806 | switch (optname) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2807 | case UDP_CORK: |
Eric Dumazet | a9f5970 | 2021-09-27 17:29:24 -0700 | [diff] [blame] | 2808 | val = READ_ONCE(up->corkflag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2809 | break; |
| 2810 | |
| 2811 | case UDP_ENCAP: |
| 2812 | val = up->encap_type; |
| 2813 | break; |
| 2814 | |
Tom Herbert | 1c19448 | 2014-05-23 08:47:32 -0700 | [diff] [blame] | 2815 | case UDP_NO_CHECK6_TX: |
| 2816 | val = up->no_check6_tx; |
| 2817 | break; |
| 2818 | |
| 2819 | case UDP_NO_CHECK6_RX: |
| 2820 | val = up->no_check6_rx; |
| 2821 | break; |
| 2822 | |
Willem de Bruijn | bec1f6f | 2018-04-26 13:42:17 -0400 | [diff] [blame] | 2823 | case UDP_SEGMENT: |
Eric Dumazet | 18a419b | 2021-06-30 09:42:44 -0700 | [diff] [blame] | 2824 | val = READ_ONCE(up->gso_size); |
Willem de Bruijn | bec1f6f | 2018-04-26 13:42:17 -0400 | [diff] [blame] | 2825 | break; |
| 2826 | |
Norman Maurer | 9818461 | 2021-04-01 08:59:17 +0200 | [diff] [blame] | 2827 | case UDP_GRO: |
| 2828 | val = up->gro_enabled; |
| 2829 | break; |
| 2830 | |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 2831 | /* The following two cannot be changed on UDP sockets, the return is |
| 2832 | * always 0 (which corresponds to the full checksum coverage of UDP). */ |
| 2833 | case UDPLITE_SEND_CSCOV: |
| 2834 | val = up->pcslen; |
| 2835 | break; |
| 2836 | |
| 2837 | case UDPLITE_RECV_CSCOV: |
| 2838 | val = up->pcrlen; |
| 2839 | break; |
| 2840 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2841 | default: |
| 2842 | return -ENOPROTOOPT; |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 2843 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2844 | |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 2845 | if (put_user(len, optlen)) |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 2846 | return -EFAULT; |
Eric Dumazet | c482c56 | 2009-07-17 00:26:32 +0000 | [diff] [blame] | 2847 | if (copy_to_user(optval, &val, len)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2848 | return -EFAULT; |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 2849 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2850 | } |
Eric Dumazet | c482c56 | 2009-07-17 00:26:32 +0000 | [diff] [blame] | 2851 | EXPORT_SYMBOL(udp_lib_getsockopt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2852 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2853 | int udp_getsockopt(struct sock *sk, int level, int optname, |
| 2854 | char __user *optval, int __user *optlen) |
| 2855 | { |
| 2856 | if (level == SOL_UDP || level == SOL_UDPLITE) |
| 2857 | return udp_lib_getsockopt(sk, level, optname, optval, optlen); |
| 2858 | return ip_getsockopt(sk, level, optname, optval, optlen); |
| 2859 | } |
| 2860 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2861 | /** |
| 2862 | * udp_poll - wait for a UDP event. |
Andrew Lunn | 3628e3c | 2020-07-13 01:15:02 +0200 | [diff] [blame] | 2863 | * @file: - file struct |
| 2864 | * @sock: - socket |
| 2865 | * @wait: - poll table |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2866 | * |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 2867 | * This is same as datagram poll, except for the special case of |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2868 | * blocking sockets. If application is using a blocking fd |
| 2869 | * and a packet with checksum error is in the queue; |
| 2870 | * then it could get return from select indicating data available |
| 2871 | * but then block when reading it. Add special case code |
| 2872 | * to work around these arguably broken applications. |
| 2873 | */ |
Linus Torvalds | a11e1d4 | 2018-06-28 09:43:44 -0700 | [diff] [blame] | 2874 | __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2875 | { |
Linus Torvalds | a11e1d4 | 2018-06-28 09:43:44 -0700 | [diff] [blame] | 2876 | __poll_t mask = datagram_poll(file, sock, wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2877 | struct sock *sk = sock->sk; |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 2878 | |
Eric Dumazet | 3ef7cf5 | 2019-10-23 22:44:50 -0700 | [diff] [blame] | 2879 | if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue)) |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 2880 | mask |= EPOLLIN | EPOLLRDNORM; |
Paolo Abeni | 2276f58 | 2017-05-16 11:20:14 +0200 | [diff] [blame] | 2881 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2882 | /* Check for false positives due to checksum errors */ |
Linus Torvalds | a11e1d4 | 2018-06-28 09:43:44 -0700 | [diff] [blame] | 2883 | if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) && |
Eric Dumazet | e83c674 | 2016-08-23 13:59:33 -0700 | [diff] [blame] | 2884 | !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 2885 | mask &= ~(EPOLLIN | EPOLLRDNORM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2886 | |
Cong Wang | af49338 | 2021-10-08 13:33:05 -0700 | [diff] [blame] | 2887 | /* psock ingress_msg queue should not contain any bad checksum frames */ |
| 2888 | if (sk_is_readable(sk)) |
| 2889 | mask |= EPOLLIN | EPOLLRDNORM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2890 | return mask; |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 2891 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2892 | } |
Linus Torvalds | a11e1d4 | 2018-06-28 09:43:44 -0700 | [diff] [blame] | 2893 | EXPORT_SYMBOL(udp_poll); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2894 | |
David Ahern | 5d77dca | 2016-08-23 21:06:33 -0700 | [diff] [blame] | 2895 | int udp_abort(struct sock *sk, int err) |
| 2896 | { |
| 2897 | lock_sock(sk); |
| 2898 | |
Paolo Abeni | a8b897c | 2021-06-09 11:49:01 +0200 | [diff] [blame] | 2899 | /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing |
| 2900 | * with close() |
| 2901 | */ |
| 2902 | if (sock_flag(sk, SOCK_DEAD)) |
| 2903 | goto out; |
| 2904 | |
David Ahern | 5d77dca | 2016-08-23 21:06:33 -0700 | [diff] [blame] | 2905 | sk->sk_err = err; |
Alexander Aring | e3ae236 | 2021-06-27 18:48:21 -0400 | [diff] [blame] | 2906 | sk_error_report(sk); |
Eric Dumazet | 286c72d | 2016-10-20 09:39:40 -0700 | [diff] [blame] | 2907 | __udp_disconnect(sk, 0); |
David Ahern | 5d77dca | 2016-08-23 21:06:33 -0700 | [diff] [blame] | 2908 | |
Paolo Abeni | a8b897c | 2021-06-09 11:49:01 +0200 | [diff] [blame] | 2909 | out: |
David Ahern | 5d77dca | 2016-08-23 21:06:33 -0700 | [diff] [blame] | 2910 | release_sock(sk); |
| 2911 | |
| 2912 | return 0; |
| 2913 | } |
| 2914 | EXPORT_SYMBOL_GPL(udp_abort); |
| 2915 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2916 | struct proto udp_prot = { |
Tonghao Zhang | 1e80295 | 2018-03-13 21:57:16 -0700 | [diff] [blame] | 2917 | .name = "UDP", |
| 2918 | .owner = THIS_MODULE, |
| 2919 | .close = udp_lib_close, |
Andrey Ignatov | d74bad4 | 2018-03-30 15:08:05 -0700 | [diff] [blame] | 2920 | .pre_connect = udp_pre_connect, |
Tonghao Zhang | 1e80295 | 2018-03-13 21:57:16 -0700 | [diff] [blame] | 2921 | .connect = ip4_datagram_connect, |
| 2922 | .disconnect = udp_disconnect, |
| 2923 | .ioctl = udp_ioctl, |
| 2924 | .init = udp_init_sock, |
| 2925 | .destroy = udp_destroy_sock, |
| 2926 | .setsockopt = udp_setsockopt, |
| 2927 | .getsockopt = udp_getsockopt, |
| 2928 | .sendmsg = udp_sendmsg, |
| 2929 | .recvmsg = udp_recvmsg, |
| 2930 | .sendpage = udp_sendpage, |
| 2931 | .release_cb = ip4_datagram_release_cb, |
| 2932 | .hash = udp_lib_hash, |
| 2933 | .unhash = udp_lib_unhash, |
| 2934 | .rehash = udp_v4_rehash, |
| 2935 | .get_port = udp_v4_get_port, |
Menglong Dong | 91a760b | 2022-01-06 21:20:20 +0800 | [diff] [blame] | 2936 | .put_port = udp_lib_unhash, |
Cong Wang | 8a59f9d | 2021-03-30 19:32:31 -0700 | [diff] [blame] | 2937 | #ifdef CONFIG_BPF_SYSCALL |
| 2938 | .psock_update_sk_prot = udp_bpf_update_proto, |
| 2939 | #endif |
Tonghao Zhang | 1e80295 | 2018-03-13 21:57:16 -0700 | [diff] [blame] | 2940 | .memory_allocated = &udp_memory_allocated, |
| 2941 | .sysctl_mem = sysctl_udp_mem, |
| 2942 | .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), |
| 2943 | .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), |
| 2944 | .obj_size = sizeof(struct udp_sock), |
| 2945 | .h.udp_table = &udp_table, |
Tonghao Zhang | 1e80295 | 2018-03-13 21:57:16 -0700 | [diff] [blame] | 2946 | .diag_destroy = udp_abort, |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 2947 | }; |
Eric Dumazet | c482c56 | 2009-07-17 00:26:32 +0000 | [diff] [blame] | 2948 | EXPORT_SYMBOL(udp_prot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2949 | |
| 2950 | /* ------------------------------------------------------------------------ */ |
| 2951 | #ifdef CONFIG_PROC_FS |
| 2952 | |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 2953 | static struct sock *udp_get_first(struct seq_file *seq, int start) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2954 | { |
| 2955 | struct sock *sk; |
Yonghong Song | 9e8ca27 | 2020-06-23 16:08:12 -0700 | [diff] [blame] | 2956 | struct udp_seq_afinfo *afinfo; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2957 | struct udp_iter_state *state = seq->private; |
Denis V. Lunev | 6f191ef | 2008-03-28 18:23:33 -0700 | [diff] [blame] | 2958 | struct net *net = seq_file_net(seq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2959 | |
Yonghong Song | 9e8ca27 | 2020-06-23 16:08:12 -0700 | [diff] [blame] | 2960 | if (state->bpf_seq_afinfo) |
| 2961 | afinfo = state->bpf_seq_afinfo; |
| 2962 | else |
Muchun Song | 359745d | 2022-01-21 22:14:23 -0800 | [diff] [blame] | 2963 | afinfo = pde_data(file_inode(seq->file)); |
Yonghong Song | 9e8ca27 | 2020-06-23 16:08:12 -0700 | [diff] [blame] | 2964 | |
Christoph Hellwig | a3d2599 | 2018-04-10 21:31:50 +0200 | [diff] [blame] | 2965 | for (state->bucket = start; state->bucket <= afinfo->udp_table->mask; |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 2966 | ++state->bucket) { |
Christoph Hellwig | a3d2599 | 2018-04-10 21:31:50 +0200 | [diff] [blame] | 2967 | struct udp_hslot *hslot = &afinfo->udp_table->hash[state->bucket]; |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 2968 | |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2969 | if (hlist_empty(&hslot->head)) |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 2970 | continue; |
| 2971 | |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 2972 | spin_lock_bh(&hslot->lock); |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2973 | sk_for_each(sk, &hslot->head) { |
YOSHIFUJI Hideaki | 878628f | 2008-03-26 03:57:35 +0900 | [diff] [blame] | 2974 | if (!net_eq(sock_net(sk), net)) |
Daniel Lezcano | a91275e | 2008-03-21 04:11:58 -0700 | [diff] [blame] | 2975 | continue; |
Yonghong Song | 9e8ca27 | 2020-06-23 16:08:12 -0700 | [diff] [blame] | 2976 | if (afinfo->family == AF_UNSPEC || |
| 2977 | sk->sk_family == afinfo->family) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2978 | goto found; |
| 2979 | } |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 2980 | spin_unlock_bh(&hslot->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2981 | } |
| 2982 | sk = NULL; |
| 2983 | found: |
| 2984 | return sk; |
| 2985 | } |
| 2986 | |
| 2987 | static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) |
| 2988 | { |
Yonghong Song | 9e8ca27 | 2020-06-23 16:08:12 -0700 | [diff] [blame] | 2989 | struct udp_seq_afinfo *afinfo; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2990 | struct udp_iter_state *state = seq->private; |
Denis V. Lunev | 6f191ef | 2008-03-28 18:23:33 -0700 | [diff] [blame] | 2991 | struct net *net = seq_file_net(seq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2992 | |
Yonghong Song | 9e8ca27 | 2020-06-23 16:08:12 -0700 | [diff] [blame] | 2993 | if (state->bpf_seq_afinfo) |
| 2994 | afinfo = state->bpf_seq_afinfo; |
| 2995 | else |
Muchun Song | 359745d | 2022-01-21 22:14:23 -0800 | [diff] [blame] | 2996 | afinfo = pde_data(file_inode(seq->file)); |
Yonghong Song | 9e8ca27 | 2020-06-23 16:08:12 -0700 | [diff] [blame] | 2997 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2998 | do { |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 2999 | sk = sk_next(sk); |
Yonghong Song | 9e8ca27 | 2020-06-23 16:08:12 -0700 | [diff] [blame] | 3000 | } while (sk && (!net_eq(sock_net(sk), net) || |
| 3001 | (afinfo->family != AF_UNSPEC && |
| 3002 | sk->sk_family != afinfo->family))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3003 | |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 3004 | if (!sk) { |
Christoph Hellwig | a3d2599 | 2018-04-10 21:31:50 +0200 | [diff] [blame] | 3005 | if (state->bucket <= afinfo->udp_table->mask) |
| 3006 | spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock); |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 3007 | return udp_get_first(seq, state->bucket + 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3008 | } |
| 3009 | return sk; |
| 3010 | } |
| 3011 | |
| 3012 | static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) |
| 3013 | { |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 3014 | struct sock *sk = udp_get_first(seq, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3015 | |
| 3016 | if (sk) |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 3017 | while (pos && (sk = udp_get_next(seq, sk)) != NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3018 | --pos; |
| 3019 | return pos ? NULL : sk; |
| 3020 | } |
| 3021 | |
Christoph Hellwig | a3d2599 | 2018-04-10 21:31:50 +0200 | [diff] [blame] | 3022 | void *udp_seq_start(struct seq_file *seq, loff_t *pos) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3023 | { |
Vitaly Mayatskikh | 30842f2 | 2009-03-23 15:22:33 -0700 | [diff] [blame] | 3024 | struct udp_iter_state *state = seq->private; |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 3025 | state->bucket = MAX_UDP_PORTS; |
Vitaly Mayatskikh | 30842f2 | 2009-03-23 15:22:33 -0700 | [diff] [blame] | 3026 | |
YOSHIFUJI Hideaki | b50660f | 2008-03-31 19:38:15 -0700 | [diff] [blame] | 3027 | return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3028 | } |
Christoph Hellwig | a3d2599 | 2018-04-10 21:31:50 +0200 | [diff] [blame] | 3029 | EXPORT_SYMBOL(udp_seq_start); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3030 | |
Christoph Hellwig | a3d2599 | 2018-04-10 21:31:50 +0200 | [diff] [blame] | 3031 | void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3032 | { |
| 3033 | struct sock *sk; |
| 3034 | |
YOSHIFUJI Hideaki | b50660f | 2008-03-31 19:38:15 -0700 | [diff] [blame] | 3035 | if (v == SEQ_START_TOKEN) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3036 | sk = udp_get_idx(seq, 0); |
| 3037 | else |
| 3038 | sk = udp_get_next(seq, v); |
| 3039 | |
| 3040 | ++*pos; |
| 3041 | return sk; |
| 3042 | } |
Christoph Hellwig | a3d2599 | 2018-04-10 21:31:50 +0200 | [diff] [blame] | 3043 | EXPORT_SYMBOL(udp_seq_next); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3044 | |
Christoph Hellwig | a3d2599 | 2018-04-10 21:31:50 +0200 | [diff] [blame] | 3045 | void udp_seq_stop(struct seq_file *seq, void *v) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3046 | { |
Yonghong Song | 9e8ca27 | 2020-06-23 16:08:12 -0700 | [diff] [blame] | 3047 | struct udp_seq_afinfo *afinfo; |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 3048 | struct udp_iter_state *state = seq->private; |
| 3049 | |
Yonghong Song | 9e8ca27 | 2020-06-23 16:08:12 -0700 | [diff] [blame] | 3050 | if (state->bpf_seq_afinfo) |
| 3051 | afinfo = state->bpf_seq_afinfo; |
| 3052 | else |
Muchun Song | 359745d | 2022-01-21 22:14:23 -0800 | [diff] [blame] | 3053 | afinfo = pde_data(file_inode(seq->file)); |
Yonghong Song | 9e8ca27 | 2020-06-23 16:08:12 -0700 | [diff] [blame] | 3054 | |
Christoph Hellwig | a3d2599 | 2018-04-10 21:31:50 +0200 | [diff] [blame] | 3055 | if (state->bucket <= afinfo->udp_table->mask) |
| 3056 | spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3057 | } |
Christoph Hellwig | a3d2599 | 2018-04-10 21:31:50 +0200 | [diff] [blame] | 3058 | EXPORT_SYMBOL(udp_seq_stop); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 3059 | |
| 3060 | /* ------------------------------------------------------------------------ */ |
Pavel Emelyanov | 5e659e4 | 2008-04-24 01:02:16 -0700 | [diff] [blame] | 3061 | static void udp4_format_sock(struct sock *sp, struct seq_file *f, |
Tetsuo Handa | 652586d | 2013-11-14 14:31:57 -0800 | [diff] [blame] | 3062 | int bucket) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 3063 | { |
| 3064 | struct inet_sock *inet = inet_sk(sp); |
Eric Dumazet | c720c7e8 | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 3065 | __be32 dest = inet->inet_daddr; |
| 3066 | __be32 src = inet->inet_rcv_saddr; |
| 3067 | __u16 destp = ntohs(inet->inet_dport); |
| 3068 | __u16 srcp = ntohs(inet->inet_sport); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 3069 | |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 3070 | seq_printf(f, "%5d: %08X:%04X %08X:%04X" |
Patrick Talbert | ea9a037 | 2019-05-17 17:11:28 +0200 | [diff] [blame] | 3071 | " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u", |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 3072 | bucket, src, srcp, dest, destp, sp->sk_state, |
Eric Dumazet | 31e6d36 | 2009-06-17 19:05:41 -0700 | [diff] [blame] | 3073 | sk_wmem_alloc_get(sp), |
Paolo Abeni | 6c206b2 | 2018-06-08 11:35:40 +0200 | [diff] [blame] | 3074 | udp_rqueue_get(sp), |
Eric W. Biederman | a7cb5a4 | 2012-05-24 01:10:10 -0600 | [diff] [blame] | 3075 | 0, 0L, 0, |
| 3076 | from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), |
| 3077 | 0, sock_i_ino(sp), |
Reshetova, Elena | 41c6d65 | 2017-06-30 13:08:01 +0300 | [diff] [blame] | 3078 | refcount_read(&sp->sk_refcnt), sp, |
Tetsuo Handa | 652586d | 2013-11-14 14:31:57 -0800 | [diff] [blame] | 3079 | atomic_read(&sp->sk_drops)); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 3080 | } |
| 3081 | |
| 3082 | int udp4_seq_show(struct seq_file *seq, void *v) |
| 3083 | { |
Tetsuo Handa | 652586d | 2013-11-14 14:31:57 -0800 | [diff] [blame] | 3084 | seq_setwidth(seq, 127); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 3085 | if (v == SEQ_START_TOKEN) |
yangxingwu | 6c25449 | 2021-12-27 16:29:51 +0800 | [diff] [blame] | 3086 | seq_puts(seq, " sl local_address rem_address st tx_queue " |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 3087 | "rx_queue tr tm->when retrnsmt uid timeout " |
Eric Dumazet | cb61cb9 | 2008-06-17 21:04:56 -0700 | [diff] [blame] | 3088 | "inode ref pointer drops"); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 3089 | else { |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 3090 | struct udp_iter_state *state = seq->private; |
| 3091 | |
Tetsuo Handa | 652586d | 2013-11-14 14:31:57 -0800 | [diff] [blame] | 3092 | udp4_format_sock(v, seq, state->bucket); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 3093 | } |
Tetsuo Handa | 652586d | 2013-11-14 14:31:57 -0800 | [diff] [blame] | 3094 | seq_pad(seq, '\n'); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 3095 | return 0; |
| 3096 | } |
| 3097 | |
Yonghong Song | 5788b3a | 2020-06-23 16:08:13 -0700 | [diff] [blame] | 3098 | #ifdef CONFIG_BPF_SYSCALL |
| 3099 | struct bpf_iter__udp { |
| 3100 | __bpf_md_ptr(struct bpf_iter_meta *, meta); |
| 3101 | __bpf_md_ptr(struct udp_sock *, udp_sk); |
| 3102 | uid_t uid __aligned(8); |
| 3103 | int bucket __aligned(8); |
| 3104 | }; |
| 3105 | |
| 3106 | static int udp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, |
| 3107 | struct udp_sock *udp_sk, uid_t uid, int bucket) |
| 3108 | { |
| 3109 | struct bpf_iter__udp ctx; |
| 3110 | |
| 3111 | meta->seq_num--; /* skip SEQ_START_TOKEN */ |
| 3112 | ctx.meta = meta; |
| 3113 | ctx.udp_sk = udp_sk; |
| 3114 | ctx.uid = uid; |
| 3115 | ctx.bucket = bucket; |
| 3116 | return bpf_iter_run_prog(prog, &ctx); |
| 3117 | } |
| 3118 | |
| 3119 | static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v) |
| 3120 | { |
| 3121 | struct udp_iter_state *state = seq->private; |
| 3122 | struct bpf_iter_meta meta; |
| 3123 | struct bpf_prog *prog; |
| 3124 | struct sock *sk = v; |
| 3125 | uid_t uid; |
| 3126 | |
| 3127 | if (v == SEQ_START_TOKEN) |
| 3128 | return 0; |
| 3129 | |
| 3130 | uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); |
| 3131 | meta.seq = seq; |
| 3132 | prog = bpf_iter_get_info(&meta, false); |
| 3133 | return udp_prog_seq_show(prog, &meta, v, uid, state->bucket); |
| 3134 | } |
| 3135 | |
| 3136 | static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v) |
| 3137 | { |
| 3138 | struct bpf_iter_meta meta; |
| 3139 | struct bpf_prog *prog; |
| 3140 | |
| 3141 | if (!v) { |
| 3142 | meta.seq = seq; |
| 3143 | prog = bpf_iter_get_info(&meta, true); |
| 3144 | if (prog) |
| 3145 | (void)udp_prog_seq_show(prog, &meta, v, 0, 0); |
| 3146 | } |
| 3147 | |
| 3148 | udp_seq_stop(seq, v); |
| 3149 | } |
| 3150 | |
| 3151 | static const struct seq_operations bpf_iter_udp_seq_ops = { |
| 3152 | .start = udp_seq_start, |
| 3153 | .next = udp_seq_next, |
| 3154 | .stop = bpf_iter_udp_seq_stop, |
| 3155 | .show = bpf_iter_udp_seq_show, |
| 3156 | }; |
| 3157 | #endif |
| 3158 | |
Christoph Hellwig | c350637 | 2018-04-10 19:42:55 +0200 | [diff] [blame] | 3159 | const struct seq_operations udp_seq_ops = { |
Christoph Hellwig | a3d2599 | 2018-04-10 21:31:50 +0200 | [diff] [blame] | 3160 | .start = udp_seq_start, |
| 3161 | .next = udp_seq_next, |
| 3162 | .stop = udp_seq_stop, |
| 3163 | .show = udp4_seq_show, |
| 3164 | }; |
Christoph Hellwig | c350637 | 2018-04-10 19:42:55 +0200 | [diff] [blame] | 3165 | EXPORT_SYMBOL(udp_seq_ops); |
Arjan van de Ven | 73cb88e | 2011-10-30 06:46:30 +0000 | [diff] [blame] | 3166 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 3167 | static struct udp_seq_afinfo udp4_seq_afinfo = { |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 3168 | .family = AF_INET, |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 3169 | .udp_table = &udp_table, |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 3170 | }; |
| 3171 | |
Alexey Dobriyan | 2c8c1e7 | 2010-01-17 03:35:32 +0000 | [diff] [blame] | 3172 | static int __net_init udp4_proc_init_net(struct net *net) |
Pavel Emelyanov | 15439fe | 2008-03-24 14:53:49 -0700 | [diff] [blame] | 3173 | { |
Christoph Hellwig | c350637 | 2018-04-10 19:42:55 +0200 | [diff] [blame] | 3174 | if (!proc_create_net_data("udp", 0444, net->proc_net, &udp_seq_ops, |
| 3175 | sizeof(struct udp_iter_state), &udp4_seq_afinfo)) |
Christoph Hellwig | a3d2599 | 2018-04-10 21:31:50 +0200 | [diff] [blame] | 3176 | return -ENOMEM; |
| 3177 | return 0; |
Pavel Emelyanov | 15439fe | 2008-03-24 14:53:49 -0700 | [diff] [blame] | 3178 | } |
| 3179 | |
Alexey Dobriyan | 2c8c1e7 | 2010-01-17 03:35:32 +0000 | [diff] [blame] | 3180 | static void __net_exit udp4_proc_exit_net(struct net *net) |
Pavel Emelyanov | 15439fe | 2008-03-24 14:53:49 -0700 | [diff] [blame] | 3181 | { |
Christoph Hellwig | a3d2599 | 2018-04-10 21:31:50 +0200 | [diff] [blame] | 3182 | remove_proc_entry("udp", net->proc_net); |
Pavel Emelyanov | 15439fe | 2008-03-24 14:53:49 -0700 | [diff] [blame] | 3183 | } |
| 3184 | |
| 3185 | static struct pernet_operations udp4_net_ops = { |
| 3186 | .init = udp4_proc_init_net, |
| 3187 | .exit = udp4_proc_exit_net, |
| 3188 | }; |
| 3189 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 3190 | int __init udp4_proc_init(void) |
| 3191 | { |
Pavel Emelyanov | 15439fe | 2008-03-24 14:53:49 -0700 | [diff] [blame] | 3192 | return register_pernet_subsys(&udp4_net_ops); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 3193 | } |
| 3194 | |
| 3195 | void udp4_proc_exit(void) |
| 3196 | { |
Pavel Emelyanov | 15439fe | 2008-03-24 14:53:49 -0700 | [diff] [blame] | 3197 | unregister_pernet_subsys(&udp4_net_ops); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 3198 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3199 | #endif /* CONFIG_PROC_FS */ |
| 3200 | |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 3201 | static __initdata unsigned long uhash_entries; |
| 3202 | static int __init set_uhash_entries(char *str) |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 3203 | { |
Eldad Zack | 413c27d | 2012-05-19 14:13:18 +0000 | [diff] [blame] | 3204 | ssize_t ret; |
| 3205 | |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 3206 | if (!str) |
| 3207 | return 0; |
Eldad Zack | 413c27d | 2012-05-19 14:13:18 +0000 | [diff] [blame] | 3208 | |
| 3209 | ret = kstrtoul(str, 0, &uhash_entries); |
| 3210 | if (ret) |
| 3211 | return 0; |
| 3212 | |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 3213 | if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) |
| 3214 | uhash_entries = UDP_HTABLE_SIZE_MIN; |
| 3215 | return 1; |
| 3216 | } |
| 3217 | __setup("uhash_entries=", set_uhash_entries); |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 3218 | |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 3219 | void __init udp_table_init(struct udp_table *table, const char *name) |
| 3220 | { |
| 3221 | unsigned int i; |
| 3222 | |
Tim Bird | 31fe62b | 2012-05-23 13:33:35 +0000 | [diff] [blame] | 3223 | table->hash = alloc_large_system_hash(name, |
| 3224 | 2 * sizeof(struct udp_hslot), |
| 3225 | uhash_entries, |
| 3226 | 21, /* one slot per 2 MB */ |
| 3227 | 0, |
| 3228 | &table->log, |
| 3229 | &table->mask, |
| 3230 | UDP_HTABLE_SIZE_MIN, |
| 3231 | 64 * 1024); |
| 3232 | |
Eric Dumazet | 512615b | 2009-11-08 10:17:58 +0000 | [diff] [blame] | 3233 | table->hash2 = table->hash + (table->mask + 1); |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 3234 | for (i = 0; i <= table->mask; i++) { |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 3235 | INIT_HLIST_HEAD(&table->hash[i].head); |
Eric Dumazet | fdcc8aa9 | 2009-11-08 10:17:05 +0000 | [diff] [blame] | 3236 | table->hash[i].count = 0; |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 3237 | spin_lock_init(&table->hash[i].lock); |
| 3238 | } |
Eric Dumazet | 512615b | 2009-11-08 10:17:58 +0000 | [diff] [blame] | 3239 | for (i = 0; i <= table->mask; i++) { |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 3240 | INIT_HLIST_HEAD(&table->hash2[i].head); |
Eric Dumazet | 512615b | 2009-11-08 10:17:58 +0000 | [diff] [blame] | 3241 | table->hash2[i].count = 0; |
| 3242 | spin_lock_init(&table->hash2[i].lock); |
| 3243 | } |
Eric Dumazet | 645ca70 | 2008-10-29 01:41:45 -0700 | [diff] [blame] | 3244 | } |
| 3245 | |
Tom Herbert | 723b8e4 | 2015-02-24 09:17:31 -0800 | [diff] [blame] | 3246 | u32 udp_flow_hashrnd(void) |
| 3247 | { |
| 3248 | static u32 hashrnd __read_mostly; |
| 3249 | |
| 3250 | net_get_random_once(&hashrnd, sizeof(hashrnd)); |
| 3251 | |
| 3252 | return hashrnd; |
| 3253 | } |
| 3254 | EXPORT_SYMBOL(udp_flow_hashrnd); |
| 3255 | |
Tonghao Zhang | 1e80295 | 2018-03-13 21:57:16 -0700 | [diff] [blame] | 3256 | static void __udp_sysctl_init(struct net *net) |
| 3257 | { |
| 3258 | net->ipv4.sysctl_udp_rmem_min = SK_MEM_QUANTUM; |
| 3259 | net->ipv4.sysctl_udp_wmem_min = SK_MEM_QUANTUM; |
| 3260 | |
| 3261 | #ifdef CONFIG_NET_L3_MASTER_DEV |
| 3262 | net->ipv4.sysctl_udp_l3mdev_accept = 0; |
| 3263 | #endif |
| 3264 | } |
| 3265 | |
| 3266 | static int __net_init udp_sysctl_init(struct net *net) |
| 3267 | { |
| 3268 | __udp_sysctl_init(net); |
| 3269 | return 0; |
| 3270 | } |
| 3271 | |
| 3272 | static struct pernet_operations __net_initdata udp_sysctl_ops = { |
Kirill Tkhai | fc18999 | 2018-03-22 21:34:46 +0300 | [diff] [blame] | 3273 | .init = udp_sysctl_init, |
Tonghao Zhang | 1e80295 | 2018-03-13 21:57:16 -0700 | [diff] [blame] | 3274 | }; |
| 3275 | |
Yonghong Song | 5788b3a | 2020-06-23 16:08:13 -0700 | [diff] [blame] | 3276 | #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) |
| 3277 | DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta, |
| 3278 | struct udp_sock *udp_sk, uid_t uid, int bucket) |
| 3279 | |
Yonghong Song | f9c7927 | 2020-07-23 11:41:10 -0700 | [diff] [blame] | 3280 | static int bpf_iter_init_udp(void *priv_data, struct bpf_iter_aux_info *aux) |
Yonghong Song | 5788b3a | 2020-06-23 16:08:13 -0700 | [diff] [blame] | 3281 | { |
| 3282 | struct udp_iter_state *st = priv_data; |
| 3283 | struct udp_seq_afinfo *afinfo; |
| 3284 | int ret; |
| 3285 | |
| 3286 | afinfo = kmalloc(sizeof(*afinfo), GFP_USER | __GFP_NOWARN); |
| 3287 | if (!afinfo) |
| 3288 | return -ENOMEM; |
| 3289 | |
| 3290 | afinfo->family = AF_UNSPEC; |
| 3291 | afinfo->udp_table = &udp_table; |
| 3292 | st->bpf_seq_afinfo = afinfo; |
Yonghong Song | f9c7927 | 2020-07-23 11:41:10 -0700 | [diff] [blame] | 3293 | ret = bpf_iter_init_seq_net(priv_data, aux); |
Yonghong Song | 5788b3a | 2020-06-23 16:08:13 -0700 | [diff] [blame] | 3294 | if (ret) |
| 3295 | kfree(afinfo); |
| 3296 | return ret; |
| 3297 | } |
| 3298 | |
| 3299 | static void bpf_iter_fini_udp(void *priv_data) |
| 3300 | { |
| 3301 | struct udp_iter_state *st = priv_data; |
| 3302 | |
| 3303 | kfree(st->bpf_seq_afinfo); |
| 3304 | bpf_iter_fini_seq_net(priv_data); |
| 3305 | } |
| 3306 | |
Yonghong Song | 14fc6bd6 | 2020-07-23 11:41:09 -0700 | [diff] [blame] | 3307 | static const struct bpf_iter_seq_info udp_seq_info = { |
Yonghong Song | 5788b3a | 2020-06-23 16:08:13 -0700 | [diff] [blame] | 3308 | .seq_ops = &bpf_iter_udp_seq_ops, |
| 3309 | .init_seq_private = bpf_iter_init_udp, |
| 3310 | .fini_seq_private = bpf_iter_fini_udp, |
| 3311 | .seq_priv_size = sizeof(struct udp_iter_state), |
Yonghong Song | 14fc6bd6 | 2020-07-23 11:41:09 -0700 | [diff] [blame] | 3312 | }; |
| 3313 | |
| 3314 | static struct bpf_iter_reg udp_reg_info = { |
| 3315 | .target = "udp", |
Yonghong Song | 5788b3a | 2020-06-23 16:08:13 -0700 | [diff] [blame] | 3316 | .ctx_arg_info_size = 1, |
| 3317 | .ctx_arg_info = { |
| 3318 | { offsetof(struct bpf_iter__udp, udp_sk), |
| 3319 | PTR_TO_BTF_ID_OR_NULL }, |
| 3320 | }, |
Yonghong Song | 14fc6bd6 | 2020-07-23 11:41:09 -0700 | [diff] [blame] | 3321 | .seq_info = &udp_seq_info, |
Yonghong Song | 5788b3a | 2020-06-23 16:08:13 -0700 | [diff] [blame] | 3322 | }; |
| 3323 | |
| 3324 | static void __init bpf_iter_register(void) |
| 3325 | { |
Yonghong Song | 951cf36 | 2020-07-20 09:34:03 -0700 | [diff] [blame] | 3326 | udp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UDP]; |
Yonghong Song | 5788b3a | 2020-06-23 16:08:13 -0700 | [diff] [blame] | 3327 | if (bpf_iter_reg_target(&udp_reg_info)) |
| 3328 | pr_warn("Warning: could not register bpf iterator udp\n"); |
| 3329 | } |
| 3330 | #endif |
| 3331 | |
Hideo Aoki | 95766ff | 2007-12-31 00:29:24 -0800 | [diff] [blame] | 3332 | void __init udp_init(void) |
| 3333 | { |
Eric Dumazet | f03d78d | 2011-07-07 00:27:05 -0700 | [diff] [blame] | 3334 | unsigned long limit; |
Eric Dumazet | 4b27275 | 2016-12-08 11:41:54 -0800 | [diff] [blame] | 3335 | unsigned int i; |
Hideo Aoki | 95766ff | 2007-12-31 00:29:24 -0800 | [diff] [blame] | 3336 | |
Eric Dumazet | f86dcc5 | 2009-10-07 00:37:59 +0000 | [diff] [blame] | 3337 | udp_table_init(&udp_table, "UDP"); |
Eric Dumazet | f03d78d | 2011-07-07 00:27:05 -0700 | [diff] [blame] | 3338 | limit = nr_free_buffer_pages() / 8; |
Hideo Aoki | 95766ff | 2007-12-31 00:29:24 -0800 | [diff] [blame] | 3339 | limit = max(limit, 128UL); |
| 3340 | sysctl_udp_mem[0] = limit / 4 * 3; |
| 3341 | sysctl_udp_mem[1] = limit; |
| 3342 | sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2; |
| 3343 | |
Tonghao Zhang | 1e80295 | 2018-03-13 21:57:16 -0700 | [diff] [blame] | 3344 | __udp_sysctl_init(&init_net); |
Eric Dumazet | 4b27275 | 2016-12-08 11:41:54 -0800 | [diff] [blame] | 3345 | |
| 3346 | /* 16 spinlocks per cpu */ |
| 3347 | udp_busylocks_log = ilog2(nr_cpu_ids) + 4; |
| 3348 | udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log, |
| 3349 | GFP_KERNEL); |
| 3350 | if (!udp_busylocks) |
| 3351 | panic("UDP: failed to alloc udp_busylocks\n"); |
| 3352 | for (i = 0; i < (1U << udp_busylocks_log); i++) |
| 3353 | spin_lock_init(udp_busylocks + i); |
Tonghao Zhang | 1e80295 | 2018-03-13 21:57:16 -0700 | [diff] [blame] | 3354 | |
| 3355 | if (register_pernet_subsys(&udp_sysctl_ops)) |
| 3356 | panic("UDP: failed to init sysctl parameters.\n"); |
Yonghong Song | 5788b3a | 2020-06-23 16:08:13 -0700 | [diff] [blame] | 3357 | |
| 3358 | #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) |
| 3359 | bpf_iter_register(); |
| 3360 | #endif |
Hideo Aoki | 95766ff | 2007-12-31 00:29:24 -0800 | [diff] [blame] | 3361 | } |