blob: d59269bbe1b610c4a34c7b09ab15d05ab13b7afa [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The IP fragmentation functionality.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09008 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
Alan Cox113aa832008-10-13 19:01:08 -070010 * Alan Cox <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 * Fixes:
13 * Alan Cox : Split from ip.c , see ip_input.c for history.
14 * David S. Miller : Begin massive cleanup...
15 * Andi Kleen : Add sysctls.
16 * xxxx : Overlapfrag bug.
17 * Ultima : ip_expire() kernel panic.
18 * Bill Hawes : Frag accounting and evictor fixes.
19 * John McDonald : 0 length frag bug.
20 * Alexey Kuznetsov: SMP races, threading, cleanup.
21 * Patrick McHardy : LRU queue of frag heads for evictor.
22 */
23
Joe Perchesafd465032012-03-12 07:03:32 +000024#define pr_fmt(fmt) "IPv4: " fmt
25
Herbert Xu89cee8b2005-12-13 23:14:27 -080026#include <linux/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/mm.h>
30#include <linux/jiffies.h>
31#include <linux/skbuff.h>
32#include <linux/list.h>
33#include <linux/ip.h>
34#include <linux/icmp.h>
35#include <linux/netdevice.h>
36#include <linux/jhash.h>
37#include <linux/random.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Shan Weie9017b52010-01-23 01:57:42 -080039#include <net/route.h>
40#include <net/dst.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <net/sock.h>
42#include <net/ip.h>
43#include <net/icmp.h>
44#include <net/checksum.h>
Herbert Xu89cee8b2005-12-13 23:14:27 -080045#include <net/inetpeer.h>
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070046#include <net/inet_frag.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <linux/tcp.h>
48#include <linux/udp.h>
49#include <linux/inet.h>
50#include <linux/netfilter_ipv4.h>
Eric Dumazet6623e3b2011-01-05 07:52:55 +000051#include <net/inet_ecn.h>
David Ahern385add92015-09-29 20:07:13 -070052#include <net/l3mdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
54/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
55 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
56 * as well. Or notify me, at least. --ANK
57 */
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +020058static const char ip_frag_cache_name[] = "ip4-frags";
Herbert Xu89cee8b2005-12-13 23:14:27 -080059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/* Describe an entry in the "incomplete datagrams" queue. */
61struct ipq {
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070062 struct inet_frag_queue q;
63
Eric Dumazet6623e3b2011-01-05 07:52:55 +000064 u8 ecn; /* RFC3168 support */
Florian Westphald6b915e2015-05-22 16:32:51 +020065 u16 max_df_size; /* largest frag with DF set seen */
Herbert Xu89cee8b2005-12-13 23:14:27 -080066 int iif;
67 unsigned int rid;
68 struct inet_peer *peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069};
70
Fabian Frederickaa1f7312014-11-04 20:44:04 +010071static u8 ip4_frag_ecn(u8 tos)
Eric Dumazet6623e3b2011-01-05 07:52:55 +000072{
Eric Dumazet5173cc02011-05-16 08:37:37 +000073 return 1 << (tos & INET_ECN_MASK);
Eric Dumazet6623e3b2011-01-05 07:52:55 +000074}
75
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070076static struct inet_frags ip4_frags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Peter Oskolkova4fd2842018-08-11 20:27:25 +000078static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
79 struct sk_buff *prev_tail, struct net_device *dev);
Herbert Xu1706d582007-10-14 00:38:15 -070080
Pavel Emelyanovabd65232007-10-17 19:47:21 -070081
Florian Westphal36c77782014-07-24 16:50:29 +020082static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
Pavel Emelyanovc6fda282007-10-17 19:46:47 -070083{
84 struct ipq *qp = container_of(q, struct ipq, q);
Eric Dumazeta39aca62019-05-24 09:03:38 -070085 struct net *net = q->fqdir->net;
Gao feng54db0cc2012-06-08 01:21:40 +000086
Eric Dumazet648700f2018-03-31 12:58:49 -070087 const struct frag_v4_compare_key *key = a;
Pavel Emelyanovc6fda282007-10-17 19:46:47 -070088
Eric Dumazet648700f2018-03-31 12:58:49 -070089 q->key.v4 = *key;
90 qp->ecn = 0;
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -070091 qp->peer = q->fqdir->max_dist ?
Eric Dumazet648700f2018-03-31 12:58:49 -070092 inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
David Ahern192132b2015-08-27 16:07:03 -070093 NULL;
Pavel Emelyanovc6fda282007-10-17 19:46:47 -070094}
95
Fabian Frederickaa1f7312014-11-04 20:44:04 +010096static void ip4_frag_free(struct inet_frag_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -070098 struct ipq *qp;
99
100 qp = container_of(q, struct ipq, q);
101 if (qp->peer)
102 inet_putpeer(qp->peer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103}
104
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106/* Destruction primitives. */
107
Fabian Frederickaa1f7312014-11-04 20:44:04 +0100108static void ipq_put(struct ipq *ipq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109{
Eric Dumazet093ba722018-03-31 12:58:44 -0700110 inet_frag_put(&ipq->q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111}
112
113/* Kill ipq entry. It is not destroyed immediately,
114 * because caller (and someone more) holds reference count.
115 */
116static void ipq_kill(struct ipq *ipq)
117{
Eric Dumazet093ba722018-03-31 12:58:44 -0700118 inet_frag_kill(&ipq->q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119}
120
Andy Zhou5cf42282015-05-15 14:15:35 -0700121static bool frag_expire_skip_icmp(u32 user)
122{
123 return user == IP_DEFRAG_AF_PACKET ||
124 ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN,
Andy Zhou8bc04862015-05-15 14:15:36 -0700125 __IP_DEFRAG_CONNTRACK_IN_END) ||
126 ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN,
127 __IP_DEFRAG_CONNTRACK_BRIDGE_IN);
Andy Zhou5cf42282015-05-15 14:15:35 -0700128}
129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130/*
131 * Oops, a fragment queue timed out. Kill it and send an ICMP reply.
132 */
Kees Cook78802012017-10-16 17:29:20 -0700133static void ip_expire(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134{
Kees Cook78802012017-10-16 17:29:20 -0700135 struct inet_frag_queue *frag = from_timer(frag, t, timer);
Eric Dumazet399d1402018-03-31 12:58:51 -0700136 const struct iphdr *iph;
Peter Oskolkovfa0f5272018-08-02 23:34:39 +0000137 struct sk_buff *head = NULL;
Pavel Emelyanov84a3aa02008-07-16 20:19:08 -0700138 struct net *net;
Eric Dumazet399d1402018-03-31 12:58:51 -0700139 struct ipq *qp;
140 int err;
Pavel Emelyanove521db92007-10-17 19:45:23 -0700141
Kees Cook78802012017-10-16 17:29:20 -0700142 qp = container_of(frag, struct ipq, q);
Eric Dumazeta39aca62019-05-24 09:03:38 -0700143 net = qp->q.fqdir->net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Eric Dumazetec4fbd62017-03-22 08:57:15 -0700145 rcu_read_lock();
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700146 spin_lock(&qp->q.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200148 if (qp->q.flags & INET_FRAG_COMPLETE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 goto out;
150
151 ipq_kill(qp);
Eric Dumazetb45386e2016-04-27 16:44:35 -0700152 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
Eric Dumazet399d1402018-03-31 12:58:51 -0700153 __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
Nikolay Aleksandrov2e404f62014-08-01 12:29:47 +0200154
Dan Carpenter70837ff2018-08-06 22:17:35 +0300155 if (!(qp->q.flags & INET_FRAG_FIRST_IN))
Eric Dumazet399d1402018-03-31 12:58:51 -0700156 goto out;
Nikolay Aleksandrov2e404f62014-08-01 12:29:47 +0200157
Peter Oskolkovfa0f5272018-08-02 23:34:39 +0000158 /* sk_buff::dev and sk_buff::rbnode are unionized. So we
159 * pull the head out of the tree in order to be able to
160 * deal with head->dev.
161 */
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800162 head = inet_frag_pull_head(&qp->q);
163 if (!head)
164 goto out;
Eric Dumazet399d1402018-03-31 12:58:51 -0700165 head->dev = dev_get_by_index_rcu(net, qp->iif);
166 if (!head->dev)
167 goto out;
Eric Dumazetec4fbd62017-03-22 08:57:15 -0700168
Shan Weie9017b52010-01-23 01:57:42 -0800169
Eric Dumazet399d1402018-03-31 12:58:51 -0700170 /* skb has no dst, perform route lookup again */
171 iph = ip_hdr(head);
172 err = ip_route_input_noref(head, iph->daddr, iph->saddr,
David S. Millerc6cffba2012-07-26 11:14:38 +0000173 iph->tos, head->dev);
Eric Dumazet399d1402018-03-31 12:58:51 -0700174 if (err)
175 goto out;
Eric Dumazet64f3b9e2011-05-04 10:02:26 +0000176
Eric Dumazet399d1402018-03-31 12:58:51 -0700177 /* Only an end host needs to send an ICMP
178 * "Fragment Reassembly Timeout" message, per RFC792.
179 */
180 if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
181 (skb_rtable(head)->rt_type != RTN_LOCAL))
182 goto out;
Eric Dumazetec4fbd62017-03-22 08:57:15 -0700183
Eric Dumazet1eec5d52018-03-31 12:58:54 -0700184 spin_unlock(&qp->q.lock);
185 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
Eric Dumazet1eec5d52018-03-31 12:58:54 -0700186 goto out_rcu_unlock;
Shan Weie9017b52010-01-23 01:57:42 -0800187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188out:
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700189 spin_unlock(&qp->q.lock);
Eric Dumazetec4fbd62017-03-22 08:57:15 -0700190out_rcu_unlock:
191 rcu_read_unlock();
zhong jiang1d089622018-09-20 17:37:43 +0800192 kfree_skb(head);
Pavel Emelyanov4b6cb5d2007-10-15 02:41:09 -0700193 ipq_put(qp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194}
195
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700196/* Find the correct entry in the "incomplete datagrams" queue for
197 * this IP datagram, and create new one, if nothing is found.
198 */
David Ahern9972f132015-08-13 14:59:09 -0600199static struct ipq *ip_find(struct net *net, struct iphdr *iph,
200 u32 user, int vif)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
Eric Dumazet648700f2018-03-31 12:58:49 -0700202 struct frag_v4_compare_key key = {
203 .saddr = iph->saddr,
204 .daddr = iph->daddr,
205 .user = user,
206 .vif = vif,
207 .id = iph->id,
208 .protocol = iph->protocol,
209 };
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700210 struct inet_frag_queue *q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Eric Dumazet803fdd92019-05-24 09:03:32 -0700212 q = inet_frag_find(&net->ipv4.fqdir, &key);
Eric Dumazet2d44ed22018-03-31 12:58:52 -0700213 if (!q)
Hannes Frederic Sowa5a3da1f2013-03-15 11:32:30 +0000214 return NULL;
Eric Dumazet2d44ed22018-03-31 12:58:52 -0700215
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700216 return container_of(q, struct ipq, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217}
218
Herbert Xu89cee8b2005-12-13 23:14:27 -0800219/* Is the fragment too far ahead to be part of ipq? */
Fabian Frederickaa1f7312014-11-04 20:44:04 +0100220static int ip_frag_too_far(struct ipq *qp)
Herbert Xu89cee8b2005-12-13 23:14:27 -0800221{
222 struct inet_peer *peer = qp->peer;
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700223 unsigned int max = qp->q.fqdir->max_dist;
Herbert Xu89cee8b2005-12-13 23:14:27 -0800224 unsigned int start, end;
225
226 int rc;
227
228 if (!peer || !max)
229 return 0;
230
231 start = qp->rid;
232 end = atomic_inc_return(&peer->rid);
233 qp->rid = end;
234
Peter Oskolkovfa0f5272018-08-02 23:34:39 +0000235 rc = qp->q.fragments_tail && (end - start) > max;
Herbert Xu89cee8b2005-12-13 23:14:27 -0800236
Eric Dumazeta39aca62019-05-24 09:03:38 -0700237 if (rc)
238 __IP_INC_STATS(qp->q.fqdir->net, IPSTATS_MIB_REASMFAILS);
Herbert Xu89cee8b2005-12-13 23:14:27 -0800239
240 return rc;
241}
242
243static int ip_frag_reinit(struct ipq *qp)
244{
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000245 unsigned int sum_truesize = 0;
Herbert Xu89cee8b2005-12-13 23:14:27 -0800246
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700247 if (!mod_timer(&qp->q.timer, jiffies + qp->q.fqdir->timeout)) {
Reshetova, Elenaedcb6912017-06-30 13:08:07 +0300248 refcount_inc(&qp->q.refcnt);
Herbert Xu89cee8b2005-12-13 23:14:27 -0800249 return -ETIMEDOUT;
250 }
251
Peter Oskolkova4fd2842018-08-11 20:27:25 +0000252 sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments);
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700253 sub_frag_mem_limit(qp->q.fqdir, sum_truesize);
Herbert Xu89cee8b2005-12-13 23:14:27 -0800254
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200255 qp->q.flags = 0;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700256 qp->q.len = 0;
257 qp->q.meat = 0;
Peter Oskolkovfa0f5272018-08-02 23:34:39 +0000258 qp->q.rb_fragments = RB_ROOT;
Changli Gaod6bebca2010-06-29 04:39:37 +0000259 qp->q.fragments_tail = NULL;
Peter Oskolkova4fd2842018-08-11 20:27:25 +0000260 qp->q.last_run_head = NULL;
Herbert Xu89cee8b2005-12-13 23:14:27 -0800261 qp->iif = 0;
Eric Dumazet6623e3b2011-01-05 07:52:55 +0000262 qp->ecn = 0;
Herbert Xu89cee8b2005-12-13 23:14:27 -0800263
264 return 0;
265}
266
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267/* Add new segment to existing queue. */
Herbert Xu1706d582007-10-14 00:38:15 -0700268static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269{
Eric Dumazeta39aca62019-05-24 09:03:38 -0700270 struct net *net = qp->q.fqdir->net;
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800271 int ihl, end, flags, offset;
272 struct sk_buff *prev_tail;
Herbert Xu1706d582007-10-14 00:38:15 -0700273 struct net_device *dev;
Florian Westphald6b915e2015-05-22 16:32:51 +0200274 unsigned int fragsize;
Herbert Xu1706d582007-10-14 00:38:15 -0700275 int err = -ENOENT;
Eric Dumazet6623e3b2011-01-05 07:52:55 +0000276 u8 ecn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200278 if (qp->q.flags & INET_FRAG_COMPLETE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 goto err;
280
Herbert Xu89cee8b2005-12-13 23:14:27 -0800281 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
Herbert Xu1706d582007-10-14 00:38:15 -0700282 unlikely(ip_frag_too_far(qp)) &&
283 unlikely(err = ip_frag_reinit(qp))) {
Herbert Xu89cee8b2005-12-13 23:14:27 -0800284 ipq_kill(qp);
285 goto err;
286 }
287
Eric Dumazet6623e3b2011-01-05 07:52:55 +0000288 ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700289 offset = ntohs(ip_hdr(skb)->frag_off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 flags = offset & ~IP_OFFSET;
291 offset &= IP_OFFSET;
292 offset <<= 3; /* offset is in 8-byte chunks */
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -0300293 ihl = ip_hdrlen(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
295 /* Determine the position of this fragment. */
Edward Hyunkoo Jee0848f642015-07-21 09:43:59 +0200296 end = offset + skb->len - skb_network_offset(skb) - ihl;
Herbert Xu1706d582007-10-14 00:38:15 -0700297 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
299 /* Is this the final fragment? */
300 if ((flags & IP_MF) == 0) {
301 /* If we already have some bits beyond end
Justin P. Mattock42b2aa82011-11-28 20:31:00 -0800302 * or have different end, the segment is corrupted.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 */
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700304 if (end < qp->q.len ||
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200305 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
Peter Oskolkov0ff89ef2018-08-28 11:36:19 -0700306 goto discard_qp;
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200307 qp->q.flags |= INET_FRAG_LAST_IN;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700308 qp->q.len = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 } else {
310 if (end&7) {
311 end &= ~7;
312 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
313 skb->ip_summed = CHECKSUM_NONE;
314 }
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700315 if (end > qp->q.len) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 /* Some bits beyond end -> corruption. */
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200317 if (qp->q.flags & INET_FRAG_LAST_IN)
Peter Oskolkov0ff89ef2018-08-28 11:36:19 -0700318 goto discard_qp;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700319 qp->q.len = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 }
321 }
322 if (end == offset)
Peter Oskolkov0ff89ef2018-08-28 11:36:19 -0700323 goto discard_qp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
Herbert Xu1706d582007-10-14 00:38:15 -0700325 err = -ENOMEM;
Edward Hyunkoo Jee0848f642015-07-21 09:43:59 +0200326 if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
Peter Oskolkov0ff89ef2018-08-28 11:36:19 -0700327 goto discard_qp;
Herbert Xu1706d582007-10-14 00:38:15 -0700328
329 err = pskb_trim_rcsum(skb, end - offset);
330 if (err)
Peter Oskolkov0ff89ef2018-08-28 11:36:19 -0700331 goto discard_qp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
Peter Oskolkovfa0f5272018-08-02 23:34:39 +0000333 /* Note : skb->rbnode and skb->dev share the same location. */
334 dev = skb->dev;
335 /* Makes sure compiler wont do silly aliasing games */
336 barrier();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
Peter Oskolkova4fd2842018-08-11 20:27:25 +0000338 prev_tail = qp->q.fragments_tail;
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800339 err = inet_frag_queue_insert(&qp->q, skb, offset, end);
340 if (err)
341 goto insert_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
Eric Dumazetbf663372018-03-31 12:58:58 -0700343 if (dev)
344 qp->iif = dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700346 qp->q.stamp = skb->tstamp;
347 qp->q.meat += skb->len;
Eric Dumazet6623e3b2011-01-05 07:52:55 +0000348 qp->ecn |= ecn;
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700349 add_frag_mem_limit(qp->q.fqdir, skb->truesize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 if (offset == 0)
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200351 qp->q.flags |= INET_FRAG_FIRST_IN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
Florian Westphald6b915e2015-05-22 16:32:51 +0200353 fragsize = skb->len + ihl;
354
355 if (fragsize > qp->q.max_size)
356 qp->q.max_size = fragsize;
357
Patrick McHardy5f2d04f2012-08-26 19:13:55 +0200358 if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
Florian Westphald6b915e2015-05-22 16:32:51 +0200359 fragsize > qp->max_df_size)
360 qp->max_df_size = fragsize;
Patrick McHardy5f2d04f2012-08-26 19:13:55 +0200361
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200362 if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
Eric Dumazet97599dc2013-04-16 12:55:41 +0000363 qp->q.meat == qp->q.len) {
364 unsigned long orefdst = skb->_skb_refdst;
Herbert Xu1706d582007-10-14 00:38:15 -0700365
Eric Dumazet97599dc2013-04-16 12:55:41 +0000366 skb->_skb_refdst = 0UL;
Peter Oskolkova4fd2842018-08-11 20:27:25 +0000367 err = ip_frag_reasm(qp, skb, prev_tail, dev);
Eric Dumazet97599dc2013-04-16 12:55:41 +0000368 skb->_skb_refdst = orefdst;
Peter Oskolkov0ff89ef2018-08-28 11:36:19 -0700369 if (err)
370 inet_frag_kill(&qp->q);
Eric Dumazet97599dc2013-04-16 12:55:41 +0000371 return err;
372 }
373
374 skb_dst_drop(skb);
Herbert Xu1706d582007-10-14 00:38:15 -0700375 return -EINPROGRESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800377insert_error:
378 if (err == IPFRAG_DUP) {
379 kfree_skb(skb);
380 return -EINVAL;
381 }
382 err = -EINVAL;
Peter Oskolkov0ff89ef2018-08-28 11:36:19 -0700383 __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
Peter Oskolkov7969e5c2018-08-02 23:34:37 +0000384discard_qp:
385 inet_frag_kill(&qp->q);
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800386 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387err:
388 kfree_skb(skb);
Herbert Xu1706d582007-10-14 00:38:15 -0700389 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390}
391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392/* Build a new IP datagram from all its fragments. */
Peter Oskolkovfa0f5272018-08-02 23:34:39 +0000393static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
Peter Oskolkova4fd2842018-08-11 20:27:25 +0000394 struct sk_buff *prev_tail, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395{
Eric Dumazeta39aca62019-05-24 09:03:38 -0700396 struct net *net = qp->q.fqdir->net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 struct iphdr *iph;
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800398 void *reasm_data;
399 int len, err;
Eric Dumazet5173cc02011-05-16 08:37:37 +0000400 u8 ecn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
402 ipq_kill(qp);
403
Hannes Frederic Sowabe991972013-03-22 08:24:37 +0000404 ecn = ip_frag_ecn_table[qp->ecn];
Eric Dumazet5173cc02011-05-16 08:37:37 +0000405 if (unlikely(ecn == 0xff)) {
406 err = -EINVAL;
407 goto out_fail;
408 }
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800409
Herbert Xu1706d582007-10-14 00:38:15 -0700410 /* Make the one we just received the head. */
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800411 reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
412 if (!reasm_data)
413 goto out_nomem;
Herbert Xu1706d582007-10-14 00:38:15 -0700414
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800415 len = ip_hdrlen(skb) + qp->q.len;
Herbert Xu1706d582007-10-14 00:38:15 -0700416 err = -E2BIG;
Stephen Hemminger132adf52007-03-08 20:44:43 -0800417 if (len > 65535)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 goto out_oversize;
419
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800420 inet_frag_reasm_finish(&qp->q, skb, reasm_data);
Jiri Wiesnerebaf39e2018-12-05 16:55:29 +0100421
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800422 skb->dev = dev;
423 IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800425 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 iph->tot_len = htons(len);
Eric Dumazet5173cc02011-05-16 08:37:37 +0000427 iph->tos |= ecn;
Florian Westphald6b915e2015-05-22 16:32:51 +0200428
429 /* When we set IP_DF on a refragmented skb we must also force a
430 * call to ip_fragment to avoid forwarding a DF-skb of size s while
431 * original sender only sent fragments of size f (where f < s).
432 *
433 * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
434 * frag seen to avoid sending tiny DF-fragments in case skb was built
435 * from one very small df-fragment and one large non-df frag.
436 */
437 if (qp->max_df_size == qp->q.max_size) {
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800438 IPCB(skb)->flags |= IPSKB_FRAG_PMTU;
Florian Westphald6b915e2015-05-22 16:32:51 +0200439 iph->frag_off = htons(IP_DF);
440 } else {
441 iph->frag_off = 0;
442 }
443
Edward Hyunkoo Jee0848f642015-07-21 09:43:59 +0200444 ip_send_check(iph);
445
Eric Dumazetb45386e2016-04-27 16:44:35 -0700446 __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
Peter Oskolkovfa0f5272018-08-02 23:34:39 +0000447 qp->q.rb_fragments = RB_ROOT;
Changli Gaod6bebca2010-06-29 04:39:37 +0000448 qp->q.fragments_tail = NULL;
Peter Oskolkova4fd2842018-08-11 20:27:25 +0000449 qp->q.last_run_head = NULL;
Herbert Xu1706d582007-10-14 00:38:15 -0700450 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
452out_nomem:
Joe Perchesba7a46f2014-11-11 10:59:17 -0800453 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp);
David Howells45542472007-10-17 21:37:22 -0700454 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 goto out_fail;
456out_oversize:
Eric Dumazet648700f2018-03-31 12:58:49 -0700457 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458out_fail:
Eric Dumazetb45386e2016-04-27 16:44:35 -0700459 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
Herbert Xu1706d582007-10-14 00:38:15 -0700460 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461}
462
463/* Process an incoming IP datagram fragment. */
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500464int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465{
David Ahern9972f132015-08-13 14:59:09 -0600466 struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
David Ahern385add92015-09-29 20:07:13 -0700467 int vif = l3mdev_master_ifindex_rcu(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 struct ipq *qp;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900469
Eric Dumazetb45386e2016-04-27 16:44:35 -0700470 __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
Joe Stringer8282f272016-01-22 15:49:12 -0800471 skb_orphan(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 /* Lookup (or create) queue header */
David Ahern9972f132015-08-13 14:59:09 -0600474 qp = ip_find(net, ip_hdr(skb), user, vif);
Ian Morris00db4122015-04-03 09:17:27 +0100475 if (qp) {
Herbert Xu1706d582007-10-14 00:38:15 -0700476 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700478 spin_lock(&qp->q.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
Herbert Xu1706d582007-10-14 00:38:15 -0700480 ret = ip_frag_queue(qp, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700482 spin_unlock(&qp->q.lock);
Pavel Emelyanov4b6cb5d2007-10-15 02:41:09 -0700483 ipq_put(qp);
Herbert Xu776c7292007-10-14 00:38:32 -0700484 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 }
486
Eric Dumazetb45386e2016-04-27 16:44:35 -0700487 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 kfree_skb(skb);
Herbert Xu776c7292007-10-14 00:38:32 -0700489 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000491EXPORT_SYMBOL(ip_defrag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500493struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
Eric Dumazetbc416d92011-10-06 10:28:31 +0000494{
Johannes Berg1bf37512012-12-09 23:41:06 +0000495 struct iphdr iph;
Alexander Drozdov3e32e732015-03-05 10:29:39 +0300496 int netoff;
Eric Dumazetbc416d92011-10-06 10:28:31 +0000497 u32 len;
498
499 if (skb->protocol != htons(ETH_P_IP))
500 return skb;
501
Alexander Drozdov3e32e732015-03-05 10:29:39 +0300502 netoff = skb_network_offset(skb);
503
504 if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
Eric Dumazetbc416d92011-10-06 10:28:31 +0000505 return skb;
506
Johannes Berg1bf37512012-12-09 23:41:06 +0000507 if (iph.ihl < 5 || iph.version != 4)
Eric Dumazetbc416d92011-10-06 10:28:31 +0000508 return skb;
509
Johannes Berg1bf37512012-12-09 23:41:06 +0000510 len = ntohs(iph.tot_len);
Alexander Drozdov3e32e732015-03-05 10:29:39 +0300511 if (skb->len < netoff + len || len < (iph.ihl * 4))
Johannes Berg1bf37512012-12-09 23:41:06 +0000512 return skb;
513
514 if (ip_is_fragment(&iph)) {
Eric Dumazetbc416d92011-10-06 10:28:31 +0000515 skb = skb_share_check(skb, GFP_ATOMIC);
516 if (skb) {
Cong Wang7de414a2018-11-01 12:02:37 -0700517 if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) {
518 kfree_skb(skb);
519 return NULL;
520 }
521 if (pskb_trim_rcsum(skb, netoff + len)) {
522 kfree_skb(skb);
523 return NULL;
524 }
Eric Dumazetbc416d92011-10-06 10:28:31 +0000525 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500526 if (ip_defrag(net, skb, user))
Eric Dumazetbc416d92011-10-06 10:28:31 +0000527 return NULL;
Tom Herbert7539fad2013-12-15 22:12:18 -0800528 skb_clear_hash(skb);
Eric Dumazetbc416d92011-10-06 10:28:31 +0000529 }
530 }
531 return skb;
532}
533EXPORT_SYMBOL(ip_check_defrag);
534
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800535#ifdef CONFIG_SYSCTL
Eric Dumazet3d234012018-04-04 08:35:10 -0700536static int dist_min;
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800537
Pavel Emelyanov0a64b4b2008-05-19 13:51:29 -0700538static struct ctl_table ip4_frags_ns_ctl_table[] = {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800539 {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800540 .procname = "ipfrag_high_thresh",
Eric Dumazet3e67f102018-03-31 12:58:53 -0700541 .maxlen = sizeof(unsigned long),
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800542 .mode = 0644,
Eric Dumazet3e67f102018-03-31 12:58:53 -0700543 .proc_handler = proc_doulongvec_minmax,
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800544 },
545 {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800546 .procname = "ipfrag_low_thresh",
Eric Dumazet3e67f102018-03-31 12:58:53 -0700547 .maxlen = sizeof(unsigned long),
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800548 .mode = 0644,
Eric Dumazet3e67f102018-03-31 12:58:53 -0700549 .proc_handler = proc_doulongvec_minmax,
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800550 },
551 {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800552 .procname = "ipfrag_time",
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800553 .maxlen = sizeof(int),
554 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -0800555 .proc_handler = proc_dointvec_jiffies,
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800556 },
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +0200557 {
558 .procname = "ipfrag_max_dist",
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +0200559 .maxlen = sizeof(int),
560 .mode = 0644,
561 .proc_handler = proc_dointvec_minmax,
Eric Dumazet3d234012018-04-04 08:35:10 -0700562 .extra1 = &dist_min,
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +0200563 },
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700564 { }
565};
566
Florian Westphale3a57d12014-07-24 16:50:35 +0200567/* secret interval has been deprecated */
568static int ip4_frags_secret_interval_unused;
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700569static struct ctl_table ip4_frags_ctl_table[] = {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800570 {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800571 .procname = "ipfrag_secret_interval",
Florian Westphale3a57d12014-07-24 16:50:35 +0200572 .data = &ip4_frags_secret_interval_unused,
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800573 .maxlen = sizeof(int),
574 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -0800575 .proc_handler = proc_dointvec_jiffies,
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800576 },
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800577 { }
578};
579
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +0000580static int __net_init ip4_frags_ns_ctl_register(struct net *net)
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800581{
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800582 struct ctl_table *table;
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800583 struct ctl_table_header *hdr;
584
Pavel Emelyanov0a64b4b2008-05-19 13:51:29 -0700585 table = ip4_frags_ns_ctl_table;
Octavian Purdila09ad9bc2009-11-25 15:14:13 -0800586 if (!net_eq(net, &init_net)) {
Pavel Emelyanov0a64b4b2008-05-19 13:51:29 -0700587 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
Ian Morris51456b22015-04-03 09:17:26 +0100588 if (!table)
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800589 goto err_alloc;
590
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800591 }
Eric Dumazet8dfdb312019-05-24 09:03:33 -0700592 table[0].data = &net->ipv4.fqdir.high_thresh;
593 table[0].extra1 = &net->ipv4.fqdir.low_thresh;
594 table[1].data = &net->ipv4.fqdir.low_thresh;
595 table[1].extra2 = &net->ipv4.fqdir.high_thresh;
596 table[2].data = &net->ipv4.fqdir.timeout;
597 table[3].data = &net->ipv4.fqdir.max_dist;
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800598
Eric W. Biedermanec8f23c2012-04-19 13:44:49 +0000599 hdr = register_net_sysctl(net, "net/ipv4", table);
Ian Morris51456b22015-04-03 09:17:26 +0100600 if (!hdr)
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800601 goto err_reg;
602
603 net->ipv4.frags_hdr = hdr;
604 return 0;
605
606err_reg:
Octavian Purdila09ad9bc2009-11-25 15:14:13 -0800607 if (!net_eq(net, &init_net))
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800608 kfree(table);
609err_alloc:
610 return -ENOMEM;
611}
612
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +0000613static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800614{
615 struct ctl_table *table;
616
617 table = net->ipv4.frags_hdr->ctl_table_arg;
618 unregister_net_sysctl_table(net->ipv4.frags_hdr);
619 kfree(table);
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800620}
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700621
Fabian Frederick57a02c32014-10-01 19:18:57 +0200622static void __init ip4_frags_ctl_register(void)
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700623{
Eric W. Biederman43444752012-04-19 13:22:55 +0000624 register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700625}
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800626#else
Fabian Frederickaa1f7312014-11-04 20:44:04 +0100627static int ip4_frags_ns_ctl_register(struct net *net)
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800628{
629 return 0;
630}
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800631
Fabian Frederickaa1f7312014-11-04 20:44:04 +0100632static void ip4_frags_ns_ctl_unregister(struct net *net)
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800633{
634}
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700635
Fabian Frederickaa1f7312014-11-04 20:44:04 +0100636static void __init ip4_frags_ctl_register(void)
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700637{
638}
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800639#endif
640
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +0000641static int __net_init ipv4_frags_init_net(struct net *net)
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800642{
Eric Dumazet787bea72018-03-31 12:58:43 -0700643 int res;
644
Jesper Dangaard Brouerc2a93662013-01-15 07:16:35 +0000645 /* Fragment cache limits.
646 *
647 * The fragment memory accounting code, (tries to) account for
648 * the real memory usage, by measuring both the size of frag
649 * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue))
650 * and the SKB's truesize.
651 *
652 * A 64K fragment consumes 129736 bytes (44*2944)+200
653 * (1500 truesize == 2944, sizeof(struct ipq) == 200)
654 *
655 * We will commit 4MB at one time. Should we cross that limit
656 * we will prune down to 3MB, making room for approx 8 big 64K
657 * fragments 8x128k.
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -0800658 */
Eric Dumazet803fdd92019-05-24 09:03:32 -0700659 net->ipv4.fqdir.high_thresh = 4 * 1024 * 1024;
660 net->ipv4.fqdir.low_thresh = 3 * 1024 * 1024;
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -0800661 /*
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -0800662 * Important NOTE! Fragment queue must be destroyed before MSL expires.
663 * RFC791 is wrong proposing to prolongate timer each fragment arrival
664 * by TTL.
665 */
Eric Dumazet803fdd92019-05-24 09:03:32 -0700666 net->ipv4.fqdir.timeout = IP_FRAG_TIME;
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -0800667
Eric Dumazet803fdd92019-05-24 09:03:32 -0700668 net->ipv4.fqdir.max_dist = 64;
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +0200669
Eric Dumazeta39aca62019-05-24 09:03:38 -0700670 res = fqdir_init(&net->ipv4.fqdir, &ip4_frags, net);
Eric Dumazet787bea72018-03-31 12:58:43 -0700671 if (res < 0)
672 return res;
673 res = ip4_frags_ns_ctl_register(net);
674 if (res < 0)
Eric Dumazet803fdd92019-05-24 09:03:32 -0700675 fqdir_exit(&net->ipv4.fqdir);
Eric Dumazet787bea72018-03-31 12:58:43 -0700676 return res;
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800677}
678
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +0000679static void __net_exit ipv4_frags_exit_net(struct net *net)
Pavel Emelyanov81566e82008-01-22 06:12:39 -0800680{
Pavel Emelyanov0a64b4b2008-05-19 13:51:29 -0700681 ip4_frags_ns_ctl_unregister(net);
Eric Dumazet803fdd92019-05-24 09:03:32 -0700682 fqdir_exit(&net->ipv4.fqdir);
Pavel Emelyanov81566e82008-01-22 06:12:39 -0800683}
684
685static struct pernet_operations ip4_frags_ops = {
686 .init = ipv4_frags_init_net,
687 .exit = ipv4_frags_exit_net,
688};
689
Eric Dumazet648700f2018-03-31 12:58:49 -0700690
691static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed)
692{
693 return jhash2(data,
694 sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
695}
696
697static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed)
698{
699 const struct inet_frag_queue *fq = data;
700
701 return jhash2((const u32 *)&fq->key.v4,
702 sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
703}
704
705static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
706{
707 const struct frag_v4_compare_key *key = arg->key;
708 const struct inet_frag_queue *fq = ptr;
709
710 return !!memcmp(&fq->key, key, sizeof(*key));
711}
712
713static const struct rhashtable_params ip4_rhash_params = {
714 .head_offset = offsetof(struct inet_frag_queue, node),
715 .key_offset = offsetof(struct inet_frag_queue, key),
716 .key_len = sizeof(struct frag_v4_compare_key),
717 .hashfn = ip4_key_hashfn,
718 .obj_hashfn = ip4_obj_hashfn,
719 .obj_cmpfn = ip4_obj_cmpfn,
720 .automatic_shrinking = true,
721};
722
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -0700723void __init ipfrag_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724{
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700725 ip4_frags.constructor = ip4_frag_init;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700726 ip4_frags.destructor = ip4_frag_free;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700727 ip4_frags.qsize = sizeof(struct ipq);
Pavel Emelyanove521db92007-10-17 19:45:23 -0700728 ip4_frags.frag_expire = ip_expire;
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200729 ip4_frags.frags_cache_name = ip_frag_cache_name;
Eric Dumazet648700f2018-03-31 12:58:49 -0700730 ip4_frags.rhash_params = ip4_rhash_params;
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200731 if (inet_frags_init(&ip4_frags))
732 panic("IP: failed to allocate ip4_frags cache\n");
Eric Dumazet483a6e42018-03-31 12:58:47 -0700733 ip4_frags_ctl_register();
734 register_pernet_subsys(&ip4_frags_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735}