blob: 8d78de4b030487ebc4d683a33a2185ebc4700aac [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) module.
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
Alan Cox113aa832008-10-13 19:01:08 -070011 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 * Richard Underwood
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090016 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 *
18 * Fixes:
19 * Alan Cox : Commented a couple of minor bits of surplus code
20 * Alan Cox : Undefining IP_FORWARD doesn't include the code
21 * (just stops a compiler warning).
22 * Alan Cox : Frames with >=MAX_ROUTE record routes, strict routes or loose routes
23 * are junked rather than corrupting things.
24 * Alan Cox : Frames to bad broadcast subnets are dumped
25 * We used to process them non broadcast and
26 * boy could that cause havoc.
27 * Alan Cox : ip_forward sets the free flag on the
28 * new frame it queues. Still crap because
29 * it copies the frame but at least it
30 * doesn't eat memory too.
31 * Alan Cox : Generic queue code and memory fixes.
32 * Fred Van Kempen : IP fragment support (borrowed from NET2E)
33 * Gerhard Koerting: Forward fragmented frames correctly.
34 * Gerhard Koerting: Fixes to my fix of the above 8-).
35 * Gerhard Koerting: IP interface addressing fix.
36 * Linus Torvalds : More robustness checks
37 * Alan Cox : Even more checks: Still not as robust as it ought to be
38 * Alan Cox : Save IP header pointer for later
39 * Alan Cox : ip option setting
40 * Alan Cox : Use ip_tos/ip_ttl settings
41 * Alan Cox : Fragmentation bogosity removed
42 * (Thanks to Mark.Bush@prg.ox.ac.uk)
43 * Dmitry Gorodchanin : Send of a raw packet crash fix.
44 * Alan Cox : Silly ip bug when an overlength
45 * fragment turns up. Now frees the
46 * queue.
47 * Linus Torvalds/ : Memory leakage on fragmentation
48 * Alan Cox : handling.
49 * Gerhard Koerting: Forwarding uses IP priority hints
50 * Teemu Rantanen : Fragment problems.
51 * Alan Cox : General cleanup, comments and reformat
52 * Alan Cox : SNMP statistics
53 * Alan Cox : BSD address rule semantics. Also see
54 * UDP as there is a nasty checksum issue
55 * if you do things the wrong way.
56 * Alan Cox : Always defrag, moved IP_FORWARD to the config.in file
57 * Alan Cox : IP options adjust sk->priority.
58 * Pedro Roque : Fix mtu/length error in ip_forward.
59 * Alan Cox : Avoid ip_chk_addr when possible.
60 * Richard Underwood : IP multicasting.
61 * Alan Cox : Cleaned up multicast handlers.
62 * Alan Cox : RAW sockets demultiplex in the BSD style.
63 * Gunther Mayer : Fix the SNMP reporting typo
64 * Alan Cox : Always in group 224.0.0.1
65 * Pauline Middelink : Fast ip_checksum update when forwarding
66 * Masquerading support.
67 * Alan Cox : Multicast loopback error for 224.0.0.1
68 * Alan Cox : IP_MULTICAST_LOOP option.
69 * Alan Cox : Use notifiers.
70 * Bjorn Ekwall : Removed ip_csum (from slhc.c too)
71 * Bjorn Ekwall : Moved ip_fast_csum to ip.h (inline!)
72 * Stefan Becker : Send out ICMP HOST REDIRECT
73 * Arnt Gulbrandsen : ip_build_xmit
74 * Alan Cox : Per socket routing cache
75 * Alan Cox : Fixed routing cache, added header cache.
76 * Alan Cox : Loopback didn't work right in original ip_build_xmit - fixed it.
77 * Alan Cox : Only send ICMP_REDIRECT if src/dest are the same net.
78 * Alan Cox : Incoming IP option handling.
79 * Alan Cox : Set saddr on raw output frames as per BSD.
80 * Alan Cox : Stopped broadcast source route explosions.
81 * Alan Cox : Can disable source routing
82 * Takeshi Sone : Masquerading didn't work.
83 * Dave Bonn,Alan Cox : Faster IP forwarding whenever possible.
84 * Alan Cox : Memory leaks, tramples, misc debugging.
85 * Alan Cox : Fixed multicast (by popular demand 8))
86 * Alan Cox : Fixed forwarding (by even more popular demand 8))
87 * Alan Cox : Fixed SNMP statistics [I think]
88 * Gerhard Koerting : IP fragmentation forwarding fix
89 * Alan Cox : Device lock against page fault.
90 * Alan Cox : IP_HDRINCL facility.
91 * Werner Almesberger : Zero fragment bug
92 * Alan Cox : RAW IP frame length bug
93 * Alan Cox : Outgoing firewall on build_xmit
94 * A.N.Kuznetsov : IP_OPTIONS support throughout the kernel
95 * Alan Cox : Multicast routing hooks
96 * Jos Vos : Do accounting *before* call_in_firewall
97 * Willy Konynenberg : Transparent proxying support
98 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090099 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 *
101 * To Fix:
102 * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
103 * and could be made very efficient with the addition of some virtual memory hacks to permit
104 * the allocation of a buffer that can then be 'grown' by twiddling page tables.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900105 * Output fragmentation wants updating along with the buffer management to use a single
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
107 * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
108 * fragmentation anyway.
109 *
110 * This program is free software; you can redistribute it and/or
111 * modify it under the terms of the GNU General Public License
112 * as published by the Free Software Foundation; either version
113 * 2 of the License, or (at your option) any later version.
114 */
115
Joe Perchesafd465032012-03-12 07:03:32 +0000116#define pr_fmt(fmt) "IPv4: " fmt
117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#include <linux/module.h>
119#include <linux/types.h>
120#include <linux/kernel.h>
121#include <linux/string.h>
122#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +0900123#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
125#include <linux/net.h>
126#include <linux/socket.h>
127#include <linux/sockios.h>
128#include <linux/in.h>
129#include <linux/inet.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -0200130#include <linux/inetdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131#include <linux/netdevice.h>
132#include <linux/etherdevice.h>
Paolo Abeni0e219ae2019-05-03 17:01:37 +0200133#include <linux/indirect_call_wrapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135#include <net/snmp.h>
136#include <net/ip.h>
137#include <net/protocol.h>
138#include <net/route.h>
139#include <linux/skbuff.h>
140#include <net/sock.h>
141#include <net/arp.h>
142#include <net/icmp.h>
143#include <net/raw.h>
144#include <net/checksum.h>
Eric Dumazet1f07d032013-08-06 03:32:11 -0700145#include <net/inet_ecn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146#include <linux/netfilter_ipv4.h>
147#include <net/xfrm.h>
148#include <linux/mroute.h>
149#include <linux/netlink.h>
Thomas Graff38a9eb2015-07-21 10:43:56 +0200150#include <net/dst_metadata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
152/*
Eric Dumazet66018502010-06-07 03:12:08 +0000153 * Process Router Attention IP option (RFC 2113)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900154 */
David S. Millerba57b4d2012-03-07 20:45:32 -0500155bool ip_call_ra_chain(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
157 struct ip_ra_chain *ra;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700158 u8 protocol = ip_hdr(skb)->protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 struct sock *last = NULL;
Denis V. Lunevcb846632008-03-24 15:31:00 -0700160 struct net_device *dev = skb->dev;
Eric W. Biederman37fcbab2015-10-09 13:44:53 -0500161 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Kirill Tkhai5796ef72018-03-22 12:45:32 +0300163 for (ra = rcu_dereference(net->ipv4.ra_chain); ra; ra = rcu_dereference(ra->next)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 struct sock *sk = ra->sk;
165
166 /* If socket is bound to an interface, only report
167 * the packet if it came from that interface.
168 */
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000169 if (sk && inet_sk(sk)->inet_num == protocol &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 (!sk->sk_bound_dev_if ||
Kirill Tkhai5796ef72018-03-22 12:45:32 +0300171 sk->sk_bound_dev_if == dev->ifindex)) {
Paul Gortmaker56f8a752011-06-21 20:33:34 -0700172 if (ip_is_fragment(ip_hdr(skb))) {
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500173 if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN))
David S. Millerba57b4d2012-03-07 20:45:32 -0500174 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 }
176 if (last) {
177 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
178 if (skb2)
179 raw_rcv(last, skb2);
180 }
181 last = sk;
182 }
183 }
184
185 if (last) {
186 raw_rcv(last, skb);
David S. Millerba57b4d2012-03-07 20:45:32 -0500187 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 }
David S. Millerba57b4d2012-03-07 20:45:32 -0500189 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190}
191
Paolo Abeni0e219ae2019-05-03 17:01:37 +0200192INDIRECT_CALLABLE_DECLARE(int udp_rcv(struct sk_buff *));
193INDIRECT_CALLABLE_DECLARE(int tcp_v4_rcv(struct sk_buff *));
Paolo Abeni68cb7d52018-11-07 12:38:31 +0100194void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol)
195{
196 const struct net_protocol *ipprot;
197 int raw, ret;
198
199resubmit:
200 raw = raw_local_deliver(skb, protocol);
201
202 ipprot = rcu_dereference(inet_protos[protocol]);
203 if (ipprot) {
204 if (!ipprot->no_policy) {
205 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
206 kfree_skb(skb);
207 return;
208 }
209 nf_reset(skb);
210 }
Paolo Abeni0e219ae2019-05-03 17:01:37 +0200211 ret = INDIRECT_CALL_2(ipprot->handler, tcp_v4_rcv, udp_rcv,
212 skb);
Paolo Abeni68cb7d52018-11-07 12:38:31 +0100213 if (ret < 0) {
214 protocol = -ret;
215 goto resubmit;
216 }
217 __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
218 } else {
219 if (!raw) {
220 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
221 __IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
222 icmp_send(skb, ICMP_DEST_UNREACH,
223 ICMP_PROT_UNREACH, 0);
224 }
225 kfree_skb(skb);
226 } else {
227 __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
228 consume_skb(skb);
229 }
230 }
231}
232
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -0500233static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234{
Eric Dumazet21d11962013-07-15 20:03:19 -0700235 __skb_pull(skb, skb_network_header_len(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
237 rcu_read_lock();
Paolo Abeni68cb7d52018-11-07 12:38:31 +0100238 ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 rcu_read_unlock();
240
241 return 0;
242}
243
244/*
245 * Deliver IP Packets to the higher protocol layers.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900246 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247int ip_local_deliver(struct sk_buff *skb)
248{
249 /*
250 * Reassemble IP fragments.
251 */
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500252 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Paul Gortmaker56f8a752011-06-21 20:33:34 -0700254 if (ip_is_fragment(ip_hdr(skb))) {
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500255 if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 return 0;
257 }
258
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500259 return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN,
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500260 net, NULL, skb, skb->dev, NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 ip_local_deliver_finish);
262}
263
Stephen Suryaputra8c83f2d2019-04-01 09:17:32 -0400264static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
Thomas Grafd2454072005-08-20 17:26:12 -0700265{
266 struct ip_options *opt;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000267 const struct iphdr *iph;
Thomas Grafd2454072005-08-20 17:26:12 -0700268
269 /* It looks as overkill, because not all
270 IP options require packet mangling.
271 But it is the easiest for now, especially taking
272 into account that combination of IP options
273 and running sniffer is extremely rare condition.
274 --ANK (980813)
275 */
276 if (skb_cow(skb, skb_headroom(skb))) {
Eric Dumazetb45386e2016-04-27 16:44:35 -0700277 __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS);
Thomas Grafd2454072005-08-20 17:26:12 -0700278 goto drop;
279 }
280
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700281 iph = ip_hdr(skb);
Denis V. Lunev22aba382008-03-22 16:36:20 -0700282 opt = &(IPCB(skb)->opt);
283 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
Thomas Grafd2454072005-08-20 17:26:12 -0700284
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900285 if (ip_options_compile(dev_net(dev), opt, skb)) {
Eric Dumazetb45386e2016-04-27 16:44:35 -0700286 __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
Thomas Grafd2454072005-08-20 17:26:12 -0700287 goto drop;
288 }
289
Thomas Grafd2454072005-08-20 17:26:12 -0700290 if (unlikely(opt->srr)) {
Eric Dumazet6e8b11b2010-06-07 03:54:46 +0000291 struct in_device *in_dev = __in_dev_get_rcu(dev);
292
Thomas Grafd2454072005-08-20 17:26:12 -0700293 if (in_dev) {
294 if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
Joe Perchese87cc472012-05-13 21:56:26 +0000295 if (IN_DEV_LOG_MARTIANS(in_dev))
296 net_info_ratelimited("source route option %pI4 -> %pI4\n",
297 &iph->saddr,
298 &iph->daddr);
Thomas Grafd2454072005-08-20 17:26:12 -0700299 goto drop;
300 }
Thomas Grafd2454072005-08-20 17:26:12 -0700301 }
302
Stephen Suryaputra8c83f2d2019-04-01 09:17:32 -0400303 if (ip_options_rcv_srr(skb, dev))
Thomas Grafd2454072005-08-20 17:26:12 -0700304 goto drop;
305 }
306
David S. Miller6a913952012-03-07 20:48:08 -0500307 return false;
Thomas Grafd2454072005-08-20 17:26:12 -0700308drop:
David S. Miller6a913952012-03-07 20:48:08 -0500309 return true;
Thomas Grafd2454072005-08-20 17:26:12 -0700310}
311
Edward Cree5fa12732018-07-02 16:14:34 +0100312static int ip_rcv_finish_core(struct net *net, struct sock *sk,
David Aherna1fd1ad2019-02-25 13:55:48 -0800313 struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314{
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700315 const struct iphdr *iph = ip_hdr(skb);
Paolo Abeni74874492017-09-28 15:51:36 +0200316 int (*edemux)(struct sk_buff *skb);
Paolo Abeni74874492017-09-28 15:51:36 +0200317 struct rtable *rt;
318 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
Nikolay Borisove21145a2016-02-15 12:11:30 +0200320 if (net->ipv4.sysctl_ip_early_demux &&
Eric Dumazet63e51b62016-01-26 16:59:42 -0800321 !skb_dst(skb) &&
322 !skb->sk &&
323 !ip_is_fragment(iph)) {
David S. Miller160eb5a2012-06-27 22:01:22 -0700324 const struct net_protocol *ipprot;
325 int protocol = iph->protocol;
326
David S. Miller160eb5a2012-06-27 22:01:22 -0700327 ipprot = rcu_dereference(inet_protos[protocol]);
subashab@codeaurora.orgdddb64b2017-03-23 13:34:16 -0600328 if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
Paolo Abeni74874492017-09-28 15:51:36 +0200329 err = edemux(skb);
330 if (unlikely(err))
331 goto drop_error;
Eric Dumazet9cb429d2012-07-24 01:19:31 +0000332 /* must reload iph, skb->head might have changed */
333 iph = ip_hdr(skb);
334 }
David S. Miller160eb5a2012-06-27 22:01:22 -0700335 }
336
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 /*
338 * Initialise the virtual path cache for the packet. It describes
339 * how the packet travels inside Linux networking.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900340 */
Thomas Graff38a9eb2015-07-21 10:43:56 +0200341 if (!skb_valid_dst(skb)) {
Paolo Abeni74874492017-09-28 15:51:36 +0200342 err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
343 iph->tos, dev);
344 if (unlikely(err))
345 goto drop_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 }
347
Patrick McHardyc7066f72011-01-14 13:36:42 +0100348#ifdef CONFIG_IP_ROUTE_CLASSID
Eric Dumazetadf30902009-06-02 05:19:30 +0000349 if (unlikely(skb_dst(skb)->tclassid)) {
Eric Dumazet7a9b2d52010-06-24 00:52:37 +0000350 struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
Eric Dumazetadf30902009-06-02 05:19:30 +0000351 u32 idx = skb_dst(skb)->tclassid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 st[idx&0xFF].o_packets++;
Jianjun Kongfd3f8c42008-11-03 02:47:38 -0800353 st[idx&0xFF].o_bytes += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 st[(idx>>16)&0xFF].i_packets++;
Jianjun Kongfd3f8c42008-11-03 02:47:38 -0800355 st[(idx>>16)&0xFF].i_bytes += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 }
357#endif
358
Stephen Suryaputra8c83f2d2019-04-01 09:17:32 -0400359 if (iph->ihl > 5 && ip_rcv_options(skb, dev))
Thomas Grafd2454072005-08-20 17:26:12 -0700360 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
Eric Dumazet511c3f92009-06-02 05:14:27 +0000362 rt = skb_rtable(skb);
Neil Hormanedf391f2009-04-27 02:45:02 -0700363 if (rt->rt_type == RTN_MULTICAST) {
Eric Dumazetb15084e2016-04-27 16:44:38 -0700364 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
Johannes Berg12b74df2016-02-04 13:31:17 +0100365 } else if (rt->rt_type == RTN_BROADCAST) {
Eric Dumazetb15084e2016-04-27 16:44:38 -0700366 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
Johannes Berg12b74df2016-02-04 13:31:17 +0100367 } else if (skb->pkt_type == PACKET_BROADCAST ||
368 skb->pkt_type == PACKET_MULTICAST) {
Mark Tomlinsond6f64d72016-09-15 11:40:05 +1200369 struct in_device *in_dev = __in_dev_get_rcu(dev);
Johannes Berg12b74df2016-02-04 13:31:17 +0100370
371 /* RFC 1122 3.3.6:
372 *
373 * When a host sends a datagram to a link-layer broadcast
374 * address, the IP destination address MUST be a legal IP
375 * broadcast or IP multicast address.
376 *
377 * A host SHOULD silently discard a datagram that is received
378 * via a link-layer broadcast (see Section 2.4) but does not
379 * specify an IP multicast or broadcast destination address.
380 *
381 * This doesn't explicitly say L2 *broadcast*, but broadcast is
382 * in a way a form of multicast and the most common use case for
383 * this is 802.11 protecting against cross-station spoofing (the
384 * so-called "hole-196" attack) so do it for both.
385 */
386 if (in_dev &&
387 IN_DEV_ORCONF(in_dev, DROP_UNICAST_IN_L2_MULTICAST))
388 goto drop;
389 }
Mitsuru Chinen5506b542007-04-30 00:48:10 -0700390
Edward Cree5fa12732018-07-02 16:14:34 +0100391 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393drop:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900394 kfree_skb(skb);
395 return NET_RX_DROP;
Paolo Abeni74874492017-09-28 15:51:36 +0200396
397drop_error:
398 if (err == -EXDEV)
399 __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
400 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401}
402
Edward Cree5fa12732018-07-02 16:14:34 +0100403static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
404{
David Aherna1fd1ad2019-02-25 13:55:48 -0800405 struct net_device *dev = skb->dev;
Edward Creeefe6aac2018-07-05 15:47:39 +0100406 int ret;
Edward Cree5fa12732018-07-02 16:14:34 +0100407
Edward Creeefe6aac2018-07-05 15:47:39 +0100408 /* if ingress device is enslaved to an L3 master device pass the
409 * skb to its handler for processing
410 */
411 skb = l3mdev_ip_rcv(skb);
412 if (!skb)
413 return NET_RX_SUCCESS;
414
David Aherna1fd1ad2019-02-25 13:55:48 -0800415 ret = ip_rcv_finish_core(net, sk, skb, dev);
Edward Cree5fa12732018-07-02 16:14:34 +0100416 if (ret != NET_RX_DROP)
417 ret = dst_input(skb);
418 return ret;
419}
420
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421/*
422 * Main IP Receive routine.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900423 */
Edward Cree17266ee2018-07-02 16:14:12 +0100424static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000426 const struct iphdr *iph;
Thomas Graf58615242005-08-20 17:25:29 -0700427 u32 len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
429 /* When the interface is in promisc. mode, drop all the crap
430 * that it receives, do not try to analyse it.
431 */
432 if (skb->pkt_type == PACKET_OTHERHOST)
433 goto drop;
434
Eric Dumazetb15084e2016-04-27 16:44:38 -0700435 __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Ian Morris51456b22015-04-03 09:17:26 +0100437 skb = skb_share_check(skb, GFP_ATOMIC);
438 if (!skb) {
Eric Dumazetb45386e2016-04-27 16:44:35 -0700439 __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 goto out;
441 }
442
443 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
444 goto inhdr_error;
445
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700446 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448 /*
J.H.M. Dassen (Ray)c67fa022008-05-08 01:11:04 -0700449 * RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 *
451 * Is the datagram acceptable?
452 *
453 * 1. Length at least the size of an ip header
454 * 2. Version of 4
455 * 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums]
456 * 4. Doesn't have a bogus length
457 */
458
459 if (iph->ihl < 5 || iph->version != 4)
Thomas Graf58615242005-08-20 17:25:29 -0700460 goto inhdr_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
Eric Dumazet1f07d032013-08-06 03:32:11 -0700462 BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
463 BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
464 BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
Eric Dumazet98f61992016-04-27 16:44:37 -0700465 __IP_ADD_STATS(net,
466 IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
467 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
Eric Dumazet1f07d032013-08-06 03:32:11 -0700468
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 if (!pskb_may_pull(skb, iph->ihl*4))
470 goto inhdr_error;
471
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700472 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
Thomas Grafe9c60422005-08-20 17:25:52 -0700474 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +0000475 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Thomas Graf58615242005-08-20 17:25:29 -0700477 len = ntohs(iph->tot_len);
Mitsuru Chinen704aed52007-04-30 00:46:30 -0700478 if (skb->len < len) {
Eric Dumazetb45386e2016-04-27 16:44:35 -0700479 __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
Mitsuru Chinen704aed52007-04-30 00:46:30 -0700480 goto drop;
481 } else if (len < (iph->ihl*4))
Thomas Graf58615242005-08-20 17:25:29 -0700482 goto inhdr_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
Thomas Graf58615242005-08-20 17:25:29 -0700484 /* Our transport medium may have padded the buffer out. Now we know it
485 * is IP we can trim to the true length of the frame.
486 * Note this now means skb->len holds ntohs(iph->tot_len).
487 */
488 if (pskb_trim_rcsum(skb, len)) {
Eric Dumazetb45386e2016-04-27 16:44:35 -0700489 __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
Thomas Graf58615242005-08-20 17:25:29 -0700490 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 }
492
Ross Lagerwall6c57f042019-01-17 15:34:38 +0000493 iph = ip_hdr(skb);
Eric Dumazet21d11962013-07-15 20:03:19 -0700494 skb->transport_header = skb->network_header + iph->ihl*4;
495
Stephen Hemminger53602f92006-07-14 14:49:32 -0700496 /* Remove any debris in the socket control block */
Guillaume Chazaraind569f1d2006-07-24 23:45:16 -0700497 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
David Ahern0b922b72016-05-10 11:19:51 -0700498 IPCB(skb)->iif = skb->skb_iif;
Stephen Hemminger53602f92006-07-14 14:49:32 -0700499
Herbert Xu71f9dac2009-06-26 19:22:37 -0700500 /* Must drop socket now because of tproxy. */
501 skb_orphan(skb);
502
Edward Cree17266ee2018-07-02 16:14:12 +0100503 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +0000505csum_error:
Eric Dumazetb45386e2016-04-27 16:44:35 -0700506 __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507inhdr_error:
Eric Dumazetb45386e2016-04-27 16:44:35 -0700508 __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509drop:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900510 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511out:
Edward Cree17266ee2018-07-02 16:14:12 +0100512 return NULL;
513}
514
515/*
516 * IP receive entry point
517 */
518int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
519 struct net_device *orig_dev)
520{
521 struct net *net = dev_net(dev);
522
523 skb = ip_rcv_core(skb, net);
524 if (skb == NULL)
525 return NET_RX_DROP;
Yang Weifb1b6992019-01-25 22:41:50 +0800526
Edward Cree17266ee2018-07-02 16:14:12 +0100527 return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
528 net, NULL, skb, dev, NULL,
529 ip_rcv_finish);
530}
531
Edward Cree5fa12732018-07-02 16:14:34 +0100532static void ip_sublist_rcv_finish(struct list_head *head)
Edward Cree17266ee2018-07-02 16:14:12 +0100533{
534 struct sk_buff *skb, *next;
535
Jesper Dangaard Brouer07616802018-07-11 17:01:20 +0200536 list_for_each_entry_safe(skb, next, head, list) {
David S. Miller992cba72018-07-31 15:27:56 -0700537 skb_list_del_init(skb);
Edward Cree5fa12732018-07-02 16:14:34 +0100538 dst_input(skb);
Jesper Dangaard Brouer07616802018-07-11 17:01:20 +0200539 }
Edward Cree5fa12732018-07-02 16:14:34 +0100540}
541
542static void ip_list_rcv_finish(struct net *net, struct sock *sk,
543 struct list_head *head)
544{
545 struct dst_entry *curr_dst = NULL;
546 struct sk_buff *skb, *next;
547 struct list_head sublist;
548
Edward Creea4ca8b72018-07-04 19:23:50 +0100549 INIT_LIST_HEAD(&sublist);
Edward Cree5fa12732018-07-02 16:14:34 +0100550 list_for_each_entry_safe(skb, next, head, list) {
David Aherna1fd1ad2019-02-25 13:55:48 -0800551 struct net_device *dev = skb->dev;
Edward Cree5fa12732018-07-02 16:14:34 +0100552 struct dst_entry *dst;
553
Edward Cree22f6bbb2018-12-04 17:37:57 +0000554 skb_list_del_init(skb);
Edward Creeefe6aac2018-07-05 15:47:39 +0100555 /* if ingress device is enslaved to an L3 master device pass the
556 * skb to its handler for processing
557 */
558 skb = l3mdev_ip_rcv(skb);
559 if (!skb)
560 continue;
David Aherna1fd1ad2019-02-25 13:55:48 -0800561 if (ip_rcv_finish_core(net, sk, skb, dev) == NET_RX_DROP)
Edward Cree5fa12732018-07-02 16:14:34 +0100562 continue;
563
564 dst = skb_dst(skb);
565 if (curr_dst != dst) {
566 /* dispatch old sublist */
Edward Cree5fa12732018-07-02 16:14:34 +0100567 if (!list_empty(&sublist))
568 ip_sublist_rcv_finish(&sublist);
569 /* start new sublist */
Edward Creea4ca8b72018-07-04 19:23:50 +0100570 INIT_LIST_HEAD(&sublist);
Edward Cree5fa12732018-07-02 16:14:34 +0100571 curr_dst = dst;
572 }
Edward Creea4ca8b72018-07-04 19:23:50 +0100573 list_add_tail(&skb->list, &sublist);
Edward Cree5fa12732018-07-02 16:14:34 +0100574 }
575 /* dispatch final sublist */
Edward Creea4ca8b72018-07-04 19:23:50 +0100576 ip_sublist_rcv_finish(&sublist);
Edward Cree5fa12732018-07-02 16:14:34 +0100577}
578
579static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
580 struct net *net)
581{
Edward Cree17266ee2018-07-02 16:14:12 +0100582 NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL,
583 head, dev, NULL, ip_rcv_finish);
Edward Cree5fa12732018-07-02 16:14:34 +0100584 ip_list_rcv_finish(net, NULL, head);
Edward Cree17266ee2018-07-02 16:14:12 +0100585}
586
587/* Receive a list of IP packets */
588void ip_list_rcv(struct list_head *head, struct packet_type *pt,
589 struct net_device *orig_dev)
590{
591 struct net_device *curr_dev = NULL;
592 struct net *curr_net = NULL;
593 struct sk_buff *skb, *next;
594 struct list_head sublist;
595
Edward Creea4ca8b72018-07-04 19:23:50 +0100596 INIT_LIST_HEAD(&sublist);
Edward Cree17266ee2018-07-02 16:14:12 +0100597 list_for_each_entry_safe(skb, next, head, list) {
598 struct net_device *dev = skb->dev;
599 struct net *net = dev_net(dev);
600
Edward Cree22f6bbb2018-12-04 17:37:57 +0000601 skb_list_del_init(skb);
Edward Cree17266ee2018-07-02 16:14:12 +0100602 skb = ip_rcv_core(skb, net);
603 if (skb == NULL)
604 continue;
605
606 if (curr_dev != dev || curr_net != net) {
607 /* dispatch old sublist */
Edward Cree17266ee2018-07-02 16:14:12 +0100608 if (!list_empty(&sublist))
Edward Creea4ca8b72018-07-04 19:23:50 +0100609 ip_sublist_rcv(&sublist, curr_dev, curr_net);
Edward Cree17266ee2018-07-02 16:14:12 +0100610 /* start new sublist */
Edward Creea4ca8b72018-07-04 19:23:50 +0100611 INIT_LIST_HEAD(&sublist);
Edward Cree17266ee2018-07-02 16:14:12 +0100612 curr_dev = dev;
613 curr_net = net;
614 }
Edward Creea4ca8b72018-07-04 19:23:50 +0100615 list_add_tail(&skb->list, &sublist);
Edward Cree17266ee2018-07-02 16:14:12 +0100616 }
617 /* dispatch final sublist */
Edward Creea4ca8b72018-07-04 19:23:50 +0100618 ip_sublist_rcv(&sublist, curr_dev, curr_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619}