blob: 24a95126e6981efe2bdbc8cef2f26bfc0072167d [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The Internet Protocol (IP) module.
8 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07009 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Donald Becker, <becker@super.org>
Alan Cox113aa832008-10-13 19:01:08 -070012 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Richard Underwood
14 * Stefan Becker, <stefanb@yello.ping.de>
15 * Jorge Cwik, <jorge@laser.satlink.net>
16 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090017 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 * Fixes:
19 * Alan Cox : Commented a couple of minor bits of surplus code
20 * Alan Cox : Undefining IP_FORWARD doesn't include the code
21 * (just stops a compiler warning).
22 * Alan Cox : Frames with >=MAX_ROUTE record routes, strict routes or loose routes
23 * are junked rather than corrupting things.
24 * Alan Cox : Frames to bad broadcast subnets are dumped
25 * We used to process them non broadcast and
26 * boy could that cause havoc.
27 * Alan Cox : ip_forward sets the free flag on the
28 * new frame it queues. Still crap because
29 * it copies the frame but at least it
30 * doesn't eat memory too.
31 * Alan Cox : Generic queue code and memory fixes.
32 * Fred Van Kempen : IP fragment support (borrowed from NET2E)
33 * Gerhard Koerting: Forward fragmented frames correctly.
34 * Gerhard Koerting: Fixes to my fix of the above 8-).
35 * Gerhard Koerting: IP interface addressing fix.
36 * Linus Torvalds : More robustness checks
37 * Alan Cox : Even more checks: Still not as robust as it ought to be
38 * Alan Cox : Save IP header pointer for later
39 * Alan Cox : ip option setting
40 * Alan Cox : Use ip_tos/ip_ttl settings
41 * Alan Cox : Fragmentation bogosity removed
42 * (Thanks to Mark.Bush@prg.ox.ac.uk)
43 * Dmitry Gorodchanin : Send of a raw packet crash fix.
44 * Alan Cox : Silly ip bug when an overlength
45 * fragment turns up. Now frees the
46 * queue.
47 * Linus Torvalds/ : Memory leakage on fragmentation
48 * Alan Cox : handling.
49 * Gerhard Koerting: Forwarding uses IP priority hints
50 * Teemu Rantanen : Fragment problems.
51 * Alan Cox : General cleanup, comments and reformat
52 * Alan Cox : SNMP statistics
53 * Alan Cox : BSD address rule semantics. Also see
54 * UDP as there is a nasty checksum issue
55 * if you do things the wrong way.
56 * Alan Cox : Always defrag, moved IP_FORWARD to the config.in file
57 * Alan Cox : IP options adjust sk->priority.
58 * Pedro Roque : Fix mtu/length error in ip_forward.
59 * Alan Cox : Avoid ip_chk_addr when possible.
60 * Richard Underwood : IP multicasting.
61 * Alan Cox : Cleaned up multicast handlers.
62 * Alan Cox : RAW sockets demultiplex in the BSD style.
63 * Gunther Mayer : Fix the SNMP reporting typo
64 * Alan Cox : Always in group 224.0.0.1
65 * Pauline Middelink : Fast ip_checksum update when forwarding
66 * Masquerading support.
67 * Alan Cox : Multicast loopback error for 224.0.0.1
68 * Alan Cox : IP_MULTICAST_LOOP option.
69 * Alan Cox : Use notifiers.
70 * Bjorn Ekwall : Removed ip_csum (from slhc.c too)
71 * Bjorn Ekwall : Moved ip_fast_csum to ip.h (inline!)
72 * Stefan Becker : Send out ICMP HOST REDIRECT
73 * Arnt Gulbrandsen : ip_build_xmit
74 * Alan Cox : Per socket routing cache
75 * Alan Cox : Fixed routing cache, added header cache.
76 * Alan Cox : Loopback didn't work right in original ip_build_xmit - fixed it.
77 * Alan Cox : Only send ICMP_REDIRECT if src/dest are the same net.
78 * Alan Cox : Incoming IP option handling.
79 * Alan Cox : Set saddr on raw output frames as per BSD.
80 * Alan Cox : Stopped broadcast source route explosions.
81 * Alan Cox : Can disable source routing
82 * Takeshi Sone : Masquerading didn't work.
83 * Dave Bonn,Alan Cox : Faster IP forwarding whenever possible.
84 * Alan Cox : Memory leaks, tramples, misc debugging.
85 * Alan Cox : Fixed multicast (by popular demand 8))
86 * Alan Cox : Fixed forwarding (by even more popular demand 8))
87 * Alan Cox : Fixed SNMP statistics [I think]
88 * Gerhard Koerting : IP fragmentation forwarding fix
89 * Alan Cox : Device lock against page fault.
90 * Alan Cox : IP_HDRINCL facility.
91 * Werner Almesberger : Zero fragment bug
92 * Alan Cox : RAW IP frame length bug
93 * Alan Cox : Outgoing firewall on build_xmit
94 * A.N.Kuznetsov : IP_OPTIONS support throughout the kernel
95 * Alan Cox : Multicast routing hooks
96 * Jos Vos : Do accounting *before* call_in_firewall
97 * Willy Konynenberg : Transparent proxying support
98 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 * To Fix:
100 * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
101 * and could be made very efficient with the addition of some virtual memory hacks to permit
102 * the allocation of a buffer that can then be 'grown' by twiddling page tables.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900103 * Output fragmentation wants updating along with the buffer management to use a single
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
105 * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
106 * fragmentation anyway.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 */
108
Joe Perchesafd465032012-03-12 07:03:32 +0000109#define pr_fmt(fmt) "IPv4: " fmt
110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/module.h>
112#include <linux/types.h>
113#include <linux/kernel.h>
114#include <linux/string.h>
115#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +0900116#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118#include <linux/net.h>
119#include <linux/socket.h>
120#include <linux/sockios.h>
121#include <linux/in.h>
122#include <linux/inet.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -0200123#include <linux/inetdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124#include <linux/netdevice.h>
125#include <linux/etherdevice.h>
Paolo Abeni0e219ae2019-05-03 17:01:37 +0200126#include <linux/indirect_call_wrapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128#include <net/snmp.h>
129#include <net/ip.h>
130#include <net/protocol.h>
131#include <net/route.h>
132#include <linux/skbuff.h>
133#include <net/sock.h>
134#include <net/arp.h>
135#include <net/icmp.h>
136#include <net/raw.h>
137#include <net/checksum.h>
Eric Dumazet1f07d032013-08-06 03:32:11 -0700138#include <net/inet_ecn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139#include <linux/netfilter_ipv4.h>
140#include <net/xfrm.h>
141#include <linux/mroute.h>
142#include <linux/netlink.h>
Thomas Graff38a9eb2015-07-21 10:43:56 +0200143#include <net/dst_metadata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
145/*
Eric Dumazet66018502010-06-07 03:12:08 +0000146 * Process Router Attention IP option (RFC 2113)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900147 */
David S. Millerba57b4d2012-03-07 20:45:32 -0500148bool ip_call_ra_chain(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149{
150 struct ip_ra_chain *ra;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700151 u8 protocol = ip_hdr(skb)->protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 struct sock *last = NULL;
Denis V. Lunevcb846632008-03-24 15:31:00 -0700153 struct net_device *dev = skb->dev;
Eric W. Biederman37fcbab2015-10-09 13:44:53 -0500154 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Kirill Tkhai5796ef72018-03-22 12:45:32 +0300156 for (ra = rcu_dereference(net->ipv4.ra_chain); ra; ra = rcu_dereference(ra->next)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 struct sock *sk = ra->sk;
158
159 /* If socket is bound to an interface, only report
160 * the packet if it came from that interface.
161 */
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000162 if (sk && inet_sk(sk)->inet_num == protocol &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 (!sk->sk_bound_dev_if ||
Kirill Tkhai5796ef72018-03-22 12:45:32 +0300164 sk->sk_bound_dev_if == dev->ifindex)) {
Paul Gortmaker56f8a752011-06-21 20:33:34 -0700165 if (ip_is_fragment(ip_hdr(skb))) {
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500166 if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN))
David S. Millerba57b4d2012-03-07 20:45:32 -0500167 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 }
169 if (last) {
170 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
171 if (skb2)
172 raw_rcv(last, skb2);
173 }
174 last = sk;
175 }
176 }
177
178 if (last) {
179 raw_rcv(last, skb);
David S. Millerba57b4d2012-03-07 20:45:32 -0500180 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 }
David S. Millerba57b4d2012-03-07 20:45:32 -0500182 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183}
184
Paolo Abeni0e219ae2019-05-03 17:01:37 +0200185INDIRECT_CALLABLE_DECLARE(int udp_rcv(struct sk_buff *));
186INDIRECT_CALLABLE_DECLARE(int tcp_v4_rcv(struct sk_buff *));
Paolo Abeni68cb7d52018-11-07 12:38:31 +0100187void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol)
188{
189 const struct net_protocol *ipprot;
190 int raw, ret;
191
192resubmit:
193 raw = raw_local_deliver(skb, protocol);
194
195 ipprot = rcu_dereference(inet_protos[protocol]);
196 if (ipprot) {
197 if (!ipprot->no_policy) {
198 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
199 kfree_skb(skb);
200 return;
201 }
Florian Westphal895b5c92019-09-29 20:54:03 +0200202 nf_reset_ct(skb);
Paolo Abeni68cb7d52018-11-07 12:38:31 +0100203 }
Paolo Abeni0e219ae2019-05-03 17:01:37 +0200204 ret = INDIRECT_CALL_2(ipprot->handler, tcp_v4_rcv, udp_rcv,
205 skb);
Paolo Abeni68cb7d52018-11-07 12:38:31 +0100206 if (ret < 0) {
207 protocol = -ret;
208 goto resubmit;
209 }
210 __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
211 } else {
212 if (!raw) {
213 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
214 __IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
215 icmp_send(skb, ICMP_DEST_UNREACH,
216 ICMP_PROT_UNREACH, 0);
217 }
218 kfree_skb(skb);
219 } else {
220 __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
221 consume_skb(skb);
222 }
223 }
224}
225
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -0500226static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227{
Eric Dumazet21d11962013-07-15 20:03:19 -0700228 __skb_pull(skb, skb_network_header_len(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
230 rcu_read_lock();
Paolo Abeni68cb7d52018-11-07 12:38:31 +0100231 ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 rcu_read_unlock();
233
234 return 0;
235}
236
237/*
238 * Deliver IP Packets to the higher protocol layers.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900239 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240int ip_local_deliver(struct sk_buff *skb)
241{
242 /*
243 * Reassemble IP fragments.
244 */
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500245 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
Paul Gortmaker56f8a752011-06-21 20:33:34 -0700247 if (ip_is_fragment(ip_hdr(skb))) {
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500248 if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 return 0;
250 }
251
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500252 return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN,
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500253 net, NULL, skb, skb->dev, NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 ip_local_deliver_finish);
255}
256
Stephen Suryaputra8c83f2d2019-04-01 09:17:32 -0400257static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
Thomas Grafd2454072005-08-20 17:26:12 -0700258{
259 struct ip_options *opt;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000260 const struct iphdr *iph;
Thomas Grafd2454072005-08-20 17:26:12 -0700261
262 /* It looks as overkill, because not all
263 IP options require packet mangling.
264 But it is the easiest for now, especially taking
265 into account that combination of IP options
266 and running sniffer is extremely rare condition.
267 --ANK (980813)
268 */
269 if (skb_cow(skb, skb_headroom(skb))) {
Eric Dumazetb45386e2016-04-27 16:44:35 -0700270 __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS);
Thomas Grafd2454072005-08-20 17:26:12 -0700271 goto drop;
272 }
273
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700274 iph = ip_hdr(skb);
Denis V. Lunev22aba382008-03-22 16:36:20 -0700275 opt = &(IPCB(skb)->opt);
276 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
Thomas Grafd2454072005-08-20 17:26:12 -0700277
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900278 if (ip_options_compile(dev_net(dev), opt, skb)) {
Eric Dumazetb45386e2016-04-27 16:44:35 -0700279 __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
Thomas Grafd2454072005-08-20 17:26:12 -0700280 goto drop;
281 }
282
Thomas Grafd2454072005-08-20 17:26:12 -0700283 if (unlikely(opt->srr)) {
Eric Dumazet6e8b11b2010-06-07 03:54:46 +0000284 struct in_device *in_dev = __in_dev_get_rcu(dev);
285
Thomas Grafd2454072005-08-20 17:26:12 -0700286 if (in_dev) {
287 if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
Joe Perchese87cc472012-05-13 21:56:26 +0000288 if (IN_DEV_LOG_MARTIANS(in_dev))
289 net_info_ratelimited("source route option %pI4 -> %pI4\n",
290 &iph->saddr,
291 &iph->daddr);
Thomas Grafd2454072005-08-20 17:26:12 -0700292 goto drop;
293 }
Thomas Grafd2454072005-08-20 17:26:12 -0700294 }
295
Stephen Suryaputra8c83f2d2019-04-01 09:17:32 -0400296 if (ip_options_rcv_srr(skb, dev))
Thomas Grafd2454072005-08-20 17:26:12 -0700297 goto drop;
298 }
299
David S. Miller6a913952012-03-07 20:48:08 -0500300 return false;
Thomas Grafd2454072005-08-20 17:26:12 -0700301drop:
David S. Miller6a913952012-03-07 20:48:08 -0500302 return true;
Thomas Grafd2454072005-08-20 17:26:12 -0700303}
304
Paolo Abeni97ff7ff2019-05-03 17:01:38 +0200305INDIRECT_CALLABLE_DECLARE(int udp_v4_early_demux(struct sk_buff *));
306INDIRECT_CALLABLE_DECLARE(int tcp_v4_early_demux(struct sk_buff *));
Edward Cree5fa12732018-07-02 16:14:34 +0100307static int ip_rcv_finish_core(struct net *net, struct sock *sk,
David Aherna1fd1ad2019-02-25 13:55:48 -0800308 struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309{
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700310 const struct iphdr *iph = ip_hdr(skb);
Paolo Abeni74874492017-09-28 15:51:36 +0200311 int (*edemux)(struct sk_buff *skb);
Paolo Abeni74874492017-09-28 15:51:36 +0200312 struct rtable *rt;
313 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
Nikolay Borisove21145a2016-02-15 12:11:30 +0200315 if (net->ipv4.sysctl_ip_early_demux &&
Eric Dumazet63e51b62016-01-26 16:59:42 -0800316 !skb_dst(skb) &&
317 !skb->sk &&
318 !ip_is_fragment(iph)) {
David S. Miller160eb5a2012-06-27 22:01:22 -0700319 const struct net_protocol *ipprot;
320 int protocol = iph->protocol;
321
David S. Miller160eb5a2012-06-27 22:01:22 -0700322 ipprot = rcu_dereference(inet_protos[protocol]);
subashab@codeaurora.orgdddb64b2017-03-23 13:34:16 -0600323 if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
Paolo Abeni97ff7ff2019-05-03 17:01:38 +0200324 err = INDIRECT_CALL_2(edemux, tcp_v4_early_demux,
325 udp_v4_early_demux, skb);
Paolo Abeni74874492017-09-28 15:51:36 +0200326 if (unlikely(err))
327 goto drop_error;
Eric Dumazet9cb429d2012-07-24 01:19:31 +0000328 /* must reload iph, skb->head might have changed */
329 iph = ip_hdr(skb);
330 }
David S. Miller160eb5a2012-06-27 22:01:22 -0700331 }
332
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 /*
334 * Initialise the virtual path cache for the packet. It describes
335 * how the packet travels inside Linux networking.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900336 */
Thomas Graff38a9eb2015-07-21 10:43:56 +0200337 if (!skb_valid_dst(skb)) {
Paolo Abeni74874492017-09-28 15:51:36 +0200338 err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
339 iph->tos, dev);
340 if (unlikely(err))
341 goto drop_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 }
343
Patrick McHardyc7066f72011-01-14 13:36:42 +0100344#ifdef CONFIG_IP_ROUTE_CLASSID
Eric Dumazetadf30902009-06-02 05:19:30 +0000345 if (unlikely(skb_dst(skb)->tclassid)) {
Eric Dumazet7a9b2d52010-06-24 00:52:37 +0000346 struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
Eric Dumazetadf30902009-06-02 05:19:30 +0000347 u32 idx = skb_dst(skb)->tclassid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 st[idx&0xFF].o_packets++;
Jianjun Kongfd3f8c42008-11-03 02:47:38 -0800349 st[idx&0xFF].o_bytes += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 st[(idx>>16)&0xFF].i_packets++;
Jianjun Kongfd3f8c42008-11-03 02:47:38 -0800351 st[(idx>>16)&0xFF].i_bytes += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 }
353#endif
354
Stephen Suryaputra8c83f2d2019-04-01 09:17:32 -0400355 if (iph->ihl > 5 && ip_rcv_options(skb, dev))
Thomas Grafd2454072005-08-20 17:26:12 -0700356 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
Eric Dumazet511c3f92009-06-02 05:14:27 +0000358 rt = skb_rtable(skb);
Neil Hormanedf391f2009-04-27 02:45:02 -0700359 if (rt->rt_type == RTN_MULTICAST) {
Eric Dumazetb15084e2016-04-27 16:44:38 -0700360 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
Johannes Berg12b74df2016-02-04 13:31:17 +0100361 } else if (rt->rt_type == RTN_BROADCAST) {
Eric Dumazetb15084e2016-04-27 16:44:38 -0700362 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
Johannes Berg12b74df2016-02-04 13:31:17 +0100363 } else if (skb->pkt_type == PACKET_BROADCAST ||
364 skb->pkt_type == PACKET_MULTICAST) {
Mark Tomlinsond6f64d72016-09-15 11:40:05 +1200365 struct in_device *in_dev = __in_dev_get_rcu(dev);
Johannes Berg12b74df2016-02-04 13:31:17 +0100366
367 /* RFC 1122 3.3.6:
368 *
369 * When a host sends a datagram to a link-layer broadcast
370 * address, the IP destination address MUST be a legal IP
371 * broadcast or IP multicast address.
372 *
373 * A host SHOULD silently discard a datagram that is received
374 * via a link-layer broadcast (see Section 2.4) but does not
375 * specify an IP multicast or broadcast destination address.
376 *
377 * This doesn't explicitly say L2 *broadcast*, but broadcast is
378 * in a way a form of multicast and the most common use case for
379 * this is 802.11 protecting against cross-station spoofing (the
380 * so-called "hole-196" attack) so do it for both.
381 */
382 if (in_dev &&
383 IN_DEV_ORCONF(in_dev, DROP_UNICAST_IN_L2_MULTICAST))
384 goto drop;
385 }
Mitsuru Chinen5506b542007-04-30 00:48:10 -0700386
Edward Cree5fa12732018-07-02 16:14:34 +0100387 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389drop:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900390 kfree_skb(skb);
391 return NET_RX_DROP;
Paolo Abeni74874492017-09-28 15:51:36 +0200392
393drop_error:
394 if (err == -EXDEV)
395 __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
396 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397}
398
Edward Cree5fa12732018-07-02 16:14:34 +0100399static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
400{
David Aherna1fd1ad2019-02-25 13:55:48 -0800401 struct net_device *dev = skb->dev;
Edward Creeefe6aac2018-07-05 15:47:39 +0100402 int ret;
Edward Cree5fa12732018-07-02 16:14:34 +0100403
Edward Creeefe6aac2018-07-05 15:47:39 +0100404 /* if ingress device is enslaved to an L3 master device pass the
405 * skb to its handler for processing
406 */
407 skb = l3mdev_ip_rcv(skb);
408 if (!skb)
409 return NET_RX_SUCCESS;
410
David Aherna1fd1ad2019-02-25 13:55:48 -0800411 ret = ip_rcv_finish_core(net, sk, skb, dev);
Edward Cree5fa12732018-07-02 16:14:34 +0100412 if (ret != NET_RX_DROP)
413 ret = dst_input(skb);
414 return ret;
415}
416
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417/*
418 * Main IP Receive routine.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900419 */
Edward Cree17266ee2018-07-02 16:14:12 +0100420static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000422 const struct iphdr *iph;
Thomas Graf58615242005-08-20 17:25:29 -0700423 u32 len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
425 /* When the interface is in promisc. mode, drop all the crap
426 * that it receives, do not try to analyse it.
427 */
428 if (skb->pkt_type == PACKET_OTHERHOST)
429 goto drop;
430
Eric Dumazetb15084e2016-04-27 16:44:38 -0700431 __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
Ian Morris51456b22015-04-03 09:17:26 +0100433 skb = skb_share_check(skb, GFP_ATOMIC);
434 if (!skb) {
Eric Dumazetb45386e2016-04-27 16:44:35 -0700435 __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 goto out;
437 }
438
439 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
440 goto inhdr_error;
441
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700442 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444 /*
J.H.M. Dassen (Ray)c67fa022008-05-08 01:11:04 -0700445 * RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 *
447 * Is the datagram acceptable?
448 *
449 * 1. Length at least the size of an ip header
450 * 2. Version of 4
451 * 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums]
452 * 4. Doesn't have a bogus length
453 */
454
455 if (iph->ihl < 5 || iph->version != 4)
Thomas Graf58615242005-08-20 17:25:29 -0700456 goto inhdr_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
Eric Dumazet1f07d032013-08-06 03:32:11 -0700458 BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
459 BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
460 BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
Eric Dumazet98f61992016-04-27 16:44:37 -0700461 __IP_ADD_STATS(net,
462 IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
463 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
Eric Dumazet1f07d032013-08-06 03:32:11 -0700464
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 if (!pskb_may_pull(skb, iph->ihl*4))
466 goto inhdr_error;
467
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700468 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
Thomas Grafe9c60422005-08-20 17:25:52 -0700470 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +0000471 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
Thomas Graf58615242005-08-20 17:25:29 -0700473 len = ntohs(iph->tot_len);
Mitsuru Chinen704aed52007-04-30 00:46:30 -0700474 if (skb->len < len) {
Eric Dumazetb45386e2016-04-27 16:44:35 -0700475 __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
Mitsuru Chinen704aed52007-04-30 00:46:30 -0700476 goto drop;
477 } else if (len < (iph->ihl*4))
Thomas Graf58615242005-08-20 17:25:29 -0700478 goto inhdr_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
Thomas Graf58615242005-08-20 17:25:29 -0700480 /* Our transport medium may have padded the buffer out. Now we know it
481 * is IP we can trim to the true length of the frame.
482 * Note this now means skb->len holds ntohs(iph->tot_len).
483 */
484 if (pskb_trim_rcsum(skb, len)) {
Eric Dumazetb45386e2016-04-27 16:44:35 -0700485 __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
Thomas Graf58615242005-08-20 17:25:29 -0700486 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 }
488
Ross Lagerwall6c57f042019-01-17 15:34:38 +0000489 iph = ip_hdr(skb);
Eric Dumazet21d11962013-07-15 20:03:19 -0700490 skb->transport_header = skb->network_header + iph->ihl*4;
491
Stephen Hemminger53602f92006-07-14 14:49:32 -0700492 /* Remove any debris in the socket control block */
Guillaume Chazaraind569f1d2006-07-24 23:45:16 -0700493 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
David Ahern0b922b72016-05-10 11:19:51 -0700494 IPCB(skb)->iif = skb->skb_iif;
Stephen Hemminger53602f92006-07-14 14:49:32 -0700495
Herbert Xu71f9dac2009-06-26 19:22:37 -0700496 /* Must drop socket now because of tproxy. */
497 skb_orphan(skb);
498
Edward Cree17266ee2018-07-02 16:14:12 +0100499 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +0000501csum_error:
Eric Dumazetb45386e2016-04-27 16:44:35 -0700502 __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503inhdr_error:
Eric Dumazetb45386e2016-04-27 16:44:35 -0700504 __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505drop:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900506 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507out:
Edward Cree17266ee2018-07-02 16:14:12 +0100508 return NULL;
509}
510
511/*
512 * IP receive entry point
513 */
514int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
515 struct net_device *orig_dev)
516{
517 struct net *net = dev_net(dev);
518
519 skb = ip_rcv_core(skb, net);
520 if (skb == NULL)
521 return NET_RX_DROP;
Yang Weifb1b6992019-01-25 22:41:50 +0800522
Edward Cree17266ee2018-07-02 16:14:12 +0100523 return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
524 net, NULL, skb, dev, NULL,
525 ip_rcv_finish);
526}
527
Edward Cree5fa12732018-07-02 16:14:34 +0100528static void ip_sublist_rcv_finish(struct list_head *head)
Edward Cree17266ee2018-07-02 16:14:12 +0100529{
530 struct sk_buff *skb, *next;
531
Jesper Dangaard Brouer07616802018-07-11 17:01:20 +0200532 list_for_each_entry_safe(skb, next, head, list) {
David S. Miller992cba72018-07-31 15:27:56 -0700533 skb_list_del_init(skb);
Edward Cree5fa12732018-07-02 16:14:34 +0100534 dst_input(skb);
Jesper Dangaard Brouer07616802018-07-11 17:01:20 +0200535 }
Edward Cree5fa12732018-07-02 16:14:34 +0100536}
537
538static void ip_list_rcv_finish(struct net *net, struct sock *sk,
539 struct list_head *head)
540{
541 struct dst_entry *curr_dst = NULL;
542 struct sk_buff *skb, *next;
543 struct list_head sublist;
544
Edward Creea4ca8b72018-07-04 19:23:50 +0100545 INIT_LIST_HEAD(&sublist);
Edward Cree5fa12732018-07-02 16:14:34 +0100546 list_for_each_entry_safe(skb, next, head, list) {
David Aherna1fd1ad2019-02-25 13:55:48 -0800547 struct net_device *dev = skb->dev;
Edward Cree5fa12732018-07-02 16:14:34 +0100548 struct dst_entry *dst;
549
Edward Cree22f6bbb2018-12-04 17:37:57 +0000550 skb_list_del_init(skb);
Edward Creeefe6aac2018-07-05 15:47:39 +0100551 /* if ingress device is enslaved to an L3 master device pass the
552 * skb to its handler for processing
553 */
554 skb = l3mdev_ip_rcv(skb);
555 if (!skb)
556 continue;
David Aherna1fd1ad2019-02-25 13:55:48 -0800557 if (ip_rcv_finish_core(net, sk, skb, dev) == NET_RX_DROP)
Edward Cree5fa12732018-07-02 16:14:34 +0100558 continue;
559
560 dst = skb_dst(skb);
561 if (curr_dst != dst) {
562 /* dispatch old sublist */
Edward Cree5fa12732018-07-02 16:14:34 +0100563 if (!list_empty(&sublist))
564 ip_sublist_rcv_finish(&sublist);
565 /* start new sublist */
Edward Creea4ca8b72018-07-04 19:23:50 +0100566 INIT_LIST_HEAD(&sublist);
Edward Cree5fa12732018-07-02 16:14:34 +0100567 curr_dst = dst;
568 }
Edward Creea4ca8b72018-07-04 19:23:50 +0100569 list_add_tail(&skb->list, &sublist);
Edward Cree5fa12732018-07-02 16:14:34 +0100570 }
571 /* dispatch final sublist */
Edward Creea4ca8b72018-07-04 19:23:50 +0100572 ip_sublist_rcv_finish(&sublist);
Edward Cree5fa12732018-07-02 16:14:34 +0100573}
574
575static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
576 struct net *net)
577{
Edward Cree17266ee2018-07-02 16:14:12 +0100578 NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL,
579 head, dev, NULL, ip_rcv_finish);
Edward Cree5fa12732018-07-02 16:14:34 +0100580 ip_list_rcv_finish(net, NULL, head);
Edward Cree17266ee2018-07-02 16:14:12 +0100581}
582
583/* Receive a list of IP packets */
584void ip_list_rcv(struct list_head *head, struct packet_type *pt,
585 struct net_device *orig_dev)
586{
587 struct net_device *curr_dev = NULL;
588 struct net *curr_net = NULL;
589 struct sk_buff *skb, *next;
590 struct list_head sublist;
591
Edward Creea4ca8b72018-07-04 19:23:50 +0100592 INIT_LIST_HEAD(&sublist);
Edward Cree17266ee2018-07-02 16:14:12 +0100593 list_for_each_entry_safe(skb, next, head, list) {
594 struct net_device *dev = skb->dev;
595 struct net *net = dev_net(dev);
596
Edward Cree22f6bbb2018-12-04 17:37:57 +0000597 skb_list_del_init(skb);
Edward Cree17266ee2018-07-02 16:14:12 +0100598 skb = ip_rcv_core(skb, net);
599 if (skb == NULL)
600 continue;
601
602 if (curr_dev != dev || curr_net != net) {
603 /* dispatch old sublist */
Edward Cree17266ee2018-07-02 16:14:12 +0100604 if (!list_empty(&sublist))
Edward Creea4ca8b72018-07-04 19:23:50 +0100605 ip_sublist_rcv(&sublist, curr_dev, curr_net);
Edward Cree17266ee2018-07-02 16:14:12 +0100606 /* start new sublist */
Edward Creea4ca8b72018-07-04 19:23:50 +0100607 INIT_LIST_HEAD(&sublist);
Edward Cree17266ee2018-07-02 16:14:12 +0100608 curr_dev = dev;
609 curr_net = net;
610 }
Edward Creea4ca8b72018-07-04 19:23:50 +0100611 list_add_tail(&skb->list, &sublist);
Edward Cree17266ee2018-07-02 16:14:12 +0100612 }
613 /* dispatch final sublist */
Florian Westphal51210ad2019-10-29 01:44:04 +0100614 if (!list_empty(&sublist))
615 ip_sublist_rcv(&sublist, curr_dev, curr_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616}