blob: e0e8a65d561ec6553970af7ac8948db515f774dc [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Pavel Emelyanov7eb95152007-10-15 02:31:52 -07002/*
3 * inet fragments management
4 *
Pavel Emelyanov7eb95152007-10-15 02:31:52 -07005 * Authors: Pavel Emelyanov <xemul@openvz.org>
6 * Started as consolidation of ipv4/ip_fragment.c,
7 * ipv6/reassembly. and ipv6 nf conntrack reassembly
8 */
9
10#include <linux/list.h>
11#include <linux/spinlock.h>
12#include <linux/module.h>
13#include <linux/timer.h>
14#include <linux/mm.h>
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070015#include <linux/random.h>
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -070016#include <linux/skbuff.h>
17#include <linux/rtnetlink.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
NeilBrown0eb71a92018-06-18 12:52:50 +100019#include <linux/rhashtable.h>
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070020
Hannes Frederic Sowa5a3da1f2013-03-15 11:32:30 +000021#include <net/sock.h>
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070022#include <net/inet_frag.h>
Hannes Frederic Sowabe991972013-03-22 08:24:37 +000023#include <net/inet_ecn.h>
Peter Oskolkovc23f35d2019-01-22 10:02:50 -080024#include <net/ip.h>
25#include <net/ipv6.h>
26
27/* Use skb->cb to track consecutive/adjacent fragments coming at
28 * the end of the queue. Nodes in the rb-tree queue will
29 * contain "runs" of one or more adjacent fragments.
30 *
31 * Invariants:
32 * - next_frag is NULL at the tail of a "run";
33 * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
34 */
35struct ipfrag_skb_cb {
36 union {
37 struct inet_skb_parm h4;
38 struct inet6_skb_parm h6;
39 };
40 struct sk_buff *next_frag;
41 int frag_run_len;
42};
43
44#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
45
46static void fragcb_clear(struct sk_buff *skb)
47{
48 RB_CLEAR_NODE(&skb->rbnode);
49 FRAG_CB(skb)->next_frag = NULL;
50 FRAG_CB(skb)->frag_run_len = skb->len;
51}
52
53/* Append skb to the last "run". */
54static void fragrun_append_to_last(struct inet_frag_queue *q,
55 struct sk_buff *skb)
56{
57 fragcb_clear(skb);
58
59 FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
60 FRAG_CB(q->fragments_tail)->next_frag = skb;
61 q->fragments_tail = skb;
62}
63
64/* Create a new "run" with the skb. */
65static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
66{
67 BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
68 fragcb_clear(skb);
69
70 if (q->last_run_head)
71 rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
72 &q->last_run_head->rbnode.rb_right);
73 else
74 rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
75 rb_insert_color(&skb->rbnode, &q->rb_fragments);
76
77 q->fragments_tail = skb;
78 q->last_run_head = skb;
79}
Hannes Frederic Sowabe991972013-03-22 08:24:37 +000080
81/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
82 * Value : 0xff if frame should be dropped.
83 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
84 */
85const u8 ip_frag_ecn_table[16] = {
86 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
87 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
88 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
89 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
90
91 /* invalid combinations : drop frame */
92 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
93 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
94 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
95 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
96 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
97 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
98 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
99};
100EXPORT_SYMBOL(ip_frag_ecn_table);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700101
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200102int inet_frags_init(struct inet_frags *f)
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700103{
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200104 f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
105 NULL);
106 if (!f->frags_cachep)
107 return -ENOMEM;
108
Eric Dumazetdc93f462019-05-27 16:56:49 -0700109 refcount_set(&f->refcnt, 1);
110 init_completion(&f->completion);
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200111 return 0;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700112}
113EXPORT_SYMBOL(inet_frags_init);
114
115void inet_frags_fini(struct inet_frags *f)
116{
Eric Dumazetdc93f462019-05-27 16:56:49 -0700117 if (refcount_dec_and_test(&f->refcnt))
118 complete(&f->completion);
119
120 wait_for_completion(&f->completion);
Eric Dumazet648700f2018-03-31 12:58:49 -0700121
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200122 kmem_cache_destroy(f->frags_cachep);
Eric Dumazet648700f2018-03-31 12:58:49 -0700123 f->frags_cachep = NULL;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700124}
125EXPORT_SYMBOL(inet_frags_fini);
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700126
Eric Dumazet3c8fc872019-05-24 09:03:40 -0700127/* called from rhashtable_free_and_destroy() at netns_frags dismantle */
Eric Dumazet648700f2018-03-31 12:58:49 -0700128static void inet_frags_free_cb(void *ptr, void *arg)
129{
130 struct inet_frag_queue *fq = ptr;
Eric Dumazet3c8fc872019-05-24 09:03:40 -0700131 int count;
Eric Dumazet648700f2018-03-31 12:58:49 -0700132
Eric Dumazet3c8fc872019-05-24 09:03:40 -0700133 count = del_timer_sync(&fq->timer) ? 1 : 0;
Eric Dumazet648700f2018-03-31 12:58:49 -0700134
135 spin_lock_bh(&fq->lock);
136 if (!(fq->flags & INET_FRAG_COMPLETE)) {
137 fq->flags |= INET_FRAG_COMPLETE;
Eric Dumazet3c8fc872019-05-24 09:03:40 -0700138 count++;
139 } else if (fq->flags & INET_FRAG_HASH_DEAD) {
140 count++;
Eric Dumazet648700f2018-03-31 12:58:49 -0700141 }
142 spin_unlock_bh(&fq->lock);
143
Eric Dumazet3c8fc872019-05-24 09:03:40 -0700144 if (refcount_sub_and_test(count, &fq->refcnt))
145 inet_frag_destroy(fq);
146}
147
Eric Dumazetd5dd8872019-06-18 11:09:00 -0700148static void fqdir_work_fn(struct work_struct *work)
Eric Dumazet3c8fc872019-05-24 09:03:40 -0700149{
Eric Dumazetd5dd8872019-06-18 11:09:00 -0700150 struct fqdir *fqdir = container_of(work, struct fqdir, destroy_work);
Eric Dumazetdc93f462019-05-27 16:56:49 -0700151 struct inet_frags *f = fqdir->f;
Eric Dumazet3c8fc872019-05-24 09:03:40 -0700152
153 rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
Eric Dumazetdc93f462019-05-27 16:56:49 -0700154
155 /* We need to make sure all ongoing call_rcu(..., inet_frag_destroy_rcu)
156 * have completed, since they need to dereference fqdir.
157 * Would it not be nice to have kfree_rcu_barrier() ? :)
158 */
159 rcu_barrier();
160
161 if (refcount_dec_and_test(&f->refcnt))
162 complete(&f->completion);
163
Eric Dumazet3c8fc872019-05-24 09:03:40 -0700164 kfree(fqdir);
Eric Dumazet648700f2018-03-31 12:58:49 -0700165}
166
Eric Dumazet6b73d192019-05-27 16:56:47 -0700167int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
168{
169 struct fqdir *fqdir = kzalloc(sizeof(*fqdir), GFP_KERNEL);
170 int res;
171
172 if (!fqdir)
173 return -ENOMEM;
174 fqdir->f = f;
175 fqdir->net = net;
176 res = rhashtable_init(&fqdir->rhashtable, &fqdir->f->rhash_params);
177 if (res < 0) {
178 kfree(fqdir);
179 return res;
180 }
Eric Dumazetdc93f462019-05-27 16:56:49 -0700181 refcount_inc(&f->refcnt);
Eric Dumazet6b73d192019-05-27 16:56:47 -0700182 *fqdirp = fqdir;
183 return 0;
184}
185EXPORT_SYMBOL(fqdir_init);
186
Eric Dumazet89fb9002019-05-24 09:03:31 -0700187void fqdir_exit(struct fqdir *fqdir)
Pavel Emelyanov81566e82008-01-22 06:12:39 -0800188{
Eric Dumazetd5dd8872019-06-18 11:09:00 -0700189 INIT_WORK(&fqdir->destroy_work, fqdir_work_fn);
190 queue_work(system_wq, &fqdir->destroy_work);
Pavel Emelyanov81566e82008-01-22 06:12:39 -0800191}
Eric Dumazet89fb9002019-05-24 09:03:31 -0700192EXPORT_SYMBOL(fqdir_exit);
Pavel Emelyanov81566e82008-01-22 06:12:39 -0800193
Eric Dumazet093ba722018-03-31 12:58:44 -0700194void inet_frag_kill(struct inet_frag_queue *fq)
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700195{
196 if (del_timer(&fq->timer))
Reshetova, Elenaedcb6912017-06-30 13:08:07 +0300197 refcount_dec(&fq->refcnt);
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700198
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200199 if (!(fq->flags & INET_FRAG_COMPLETE)) {
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700200 struct fqdir *fqdir = fq->fqdir;
Eric Dumazet648700f2018-03-31 12:58:49 -0700201
202 fq->flags |= INET_FRAG_COMPLETE;
Eric Dumazet3c8fc872019-05-24 09:03:40 -0700203 rcu_read_lock();
Herbert Xu32707c42019-05-29 13:40:26 +0800204 /* The RCU read lock provides a memory barrier
205 * guaranteeing that if fqdir->dead is false then
206 * the hash table destruction will not start until
Eric Dumazetedc09542022-01-13 01:22:29 -0800207 * after we unlock. Paired with fqdir_pre_exit().
Eric Dumazet3c8fc872019-05-24 09:03:40 -0700208 */
Eric Dumazetedc09542022-01-13 01:22:29 -0800209 if (!READ_ONCE(fqdir->dead)) {
Eric Dumazet3c8fc872019-05-24 09:03:40 -0700210 rhashtable_remove_fast(&fqdir->rhashtable, &fq->node,
211 fqdir->f->rhash_params);
212 refcount_dec(&fq->refcnt);
213 } else {
214 fq->flags |= INET_FRAG_HASH_DEAD;
215 }
216 rcu_read_unlock();
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700217 }
218}
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700219EXPORT_SYMBOL(inet_frag_kill);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700220
Eric Dumazet648700f2018-03-31 12:58:49 -0700221static void inet_frag_destroy_rcu(struct rcu_head *head)
222{
223 struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
224 rcu);
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700225 struct inet_frags *f = q->fqdir->f;
Eric Dumazet648700f2018-03-31 12:58:49 -0700226
227 if (f->destructor)
228 f->destructor(q);
229 kmem_cache_free(f->frags_cachep, q);
230}
231
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800232unsigned int inet_frag_rbtree_purge(struct rb_root *root)
233{
234 struct rb_node *p = rb_first(root);
235 unsigned int sum = 0;
236
237 while (p) {
238 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
239
240 p = rb_next(p);
241 rb_erase(&skb->rbnode, root);
242 while (skb) {
243 struct sk_buff *next = FRAG_CB(skb)->next_frag;
244
245 sum += skb->truesize;
246 kfree_skb(skb);
247 skb = next;
248 }
249 }
250 return sum;
251}
252EXPORT_SYMBOL(inet_frag_rbtree_purge);
253
Eric Dumazet093ba722018-03-31 12:58:44 -0700254void inet_frag_destroy(struct inet_frag_queue *q)
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700255{
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700256 struct fqdir *fqdir;
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000257 unsigned int sum, sum_truesize = 0;
Eric Dumazet093ba722018-03-31 12:58:44 -0700258 struct inet_frags *f;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700259
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200260 WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700261 WARN_ON(del_timer(&q->timer) != 0);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700262
263 /* Release all fragment data. */
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700264 fqdir = q->fqdir;
265 f = fqdir->f;
Peter Oskolkovd8cf7572019-02-25 17:43:46 -0800266 sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000267 sum = sum_truesize + f->qsize;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700268
Eric Dumazet648700f2018-03-31 12:58:49 -0700269 call_rcu(&q->rcu, inet_frag_destroy_rcu);
Florian Westphal5719b292015-07-23 12:05:39 +0200270
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700271 sub_frag_mem_limit(fqdir, sum);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700272}
273EXPORT_SYMBOL(inet_frag_destroy);
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700274
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700275static struct inet_frag_queue *inet_frag_alloc(struct fqdir *fqdir,
Nikolay Aleksandrovf926e232014-08-01 12:29:46 +0200276 struct inet_frags *f,
277 void *arg)
Pavel Emelyanove521db92007-10-17 19:45:23 -0700278{
279 struct inet_frag_queue *q;
280
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200281 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
Ian Morris51456b22015-04-03 09:17:26 +0100282 if (!q)
Pavel Emelyanove521db92007-10-17 19:45:23 -0700283 return NULL;
284
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700285 q->fqdir = fqdir;
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700286 f->constructor(q, arg);
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700287 add_frag_mem_limit(fqdir, f->qsize);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000288
Kees Cook78802012017-10-16 17:29:20 -0700289 timer_setup(&q->timer, f->frag_expire, 0);
Pavel Emelyanove521db92007-10-17 19:45:23 -0700290 spin_lock_init(&q->lock);
Eric Dumazet648700f2018-03-31 12:58:49 -0700291 refcount_set(&q->refcnt, 3);
Pavel Emelyanove521db92007-10-17 19:45:23 -0700292
293 return q;
294}
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700295
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700296static struct inet_frag_queue *inet_frag_create(struct fqdir *fqdir,
Eric Dumazet0d5b9312018-11-08 17:34:27 -0800297 void *arg,
298 struct inet_frag_queue **prev)
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700299{
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700300 struct inet_frags *f = fqdir->f;
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700301 struct inet_frag_queue *q;
302
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700303 q = inet_frag_alloc(fqdir, f, arg);
Eric Dumazet0d5b9312018-11-08 17:34:27 -0800304 if (!q) {
305 *prev = ERR_PTR(-ENOMEM);
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700306 return NULL;
Eric Dumazet0d5b9312018-11-08 17:34:27 -0800307 }
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700308 mod_timer(&q->timer, jiffies + fqdir->timeout);
Eric Dumazet648700f2018-03-31 12:58:49 -0700309
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700310 *prev = rhashtable_lookup_get_insert_key(&fqdir->rhashtable, &q->key,
Eric Dumazet0d5b9312018-11-08 17:34:27 -0800311 &q->node, f->rhash_params);
312 if (*prev) {
Eric Dumazet648700f2018-03-31 12:58:49 -0700313 q->flags |= INET_FRAG_COMPLETE;
314 inet_frag_kill(q);
315 inet_frag_destroy(q);
316 return NULL;
317 }
318 return q;
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700319}
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700320
Eric Dumazet648700f2018-03-31 12:58:49 -0700321/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700322struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key)
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700323{
Eric Dumazetedc09542022-01-13 01:22:29 -0800324 /* This pairs with WRITE_ONCE() in fqdir_pre_exit(). */
325 long high_thresh = READ_ONCE(fqdir->high_thresh);
Eric Dumazet0d5b9312018-11-08 17:34:27 -0800326 struct inet_frag_queue *fq = NULL, *prev;
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700327
Eric Dumazetedc09542022-01-13 01:22:29 -0800328 if (!high_thresh || frag_mem_limit(fqdir) > high_thresh)
Eric Dumazet56e2c942018-07-30 20:09:11 -0700329 return NULL;
330
Eric Dumazet648700f2018-03-31 12:58:49 -0700331 rcu_read_lock();
Florian Westphal86e93e42014-07-24 16:50:31 +0200332
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700333 prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params);
Eric Dumazet0d5b9312018-11-08 17:34:27 -0800334 if (!prev)
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700335 fq = inet_frag_create(fqdir, key, &prev);
Pavel Machekc7148c02019-07-24 13:56:37 -0700336 if (!IS_ERR_OR_NULL(prev)) {
Eric Dumazet0d5b9312018-11-08 17:34:27 -0800337 fq = prev;
Eric Dumazet648700f2018-03-31 12:58:49 -0700338 if (!refcount_inc_not_zero(&fq->refcnt))
339 fq = NULL;
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700340 }
Eric Dumazet648700f2018-03-31 12:58:49 -0700341 rcu_read_unlock();
Eric Dumazet0d5b9312018-11-08 17:34:27 -0800342 return fq;
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700343}
344EXPORT_SYMBOL(inet_frag_find);
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800345
346int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
347 int offset, int end)
348{
349 struct sk_buff *last = q->fragments_tail;
350
351 /* RFC5722, Section 4, amended by Errata ID : 3089
352 * When reassembling an IPv6 datagram, if
353 * one or more its constituent fragments is determined to be an
354 * overlapping fragment, the entire datagram (and any constituent
355 * fragments) MUST be silently discarded.
356 *
357 * Duplicates, however, should be ignored (i.e. skb dropped, but the
358 * queue/fragments kept for later reassembly).
359 */
360 if (!last)
361 fragrun_create(q, skb); /* First fragment. */
362 else if (last->ip_defrag_offset + last->len < end) {
363 /* This is the common case: skb goes to the end. */
364 /* Detect and discard overlaps. */
365 if (offset < last->ip_defrag_offset + last->len)
366 return IPFRAG_OVERLAP;
367 if (offset == last->ip_defrag_offset + last->len)
368 fragrun_append_to_last(q, skb);
369 else
370 fragrun_create(q, skb);
371 } else {
372 /* Binary search. Note that skb can become the first fragment,
373 * but not the last (covered above).
374 */
375 struct rb_node **rbn, *parent;
376
377 rbn = &q->rb_fragments.rb_node;
378 do {
379 struct sk_buff *curr;
380 int curr_run_end;
381
382 parent = *rbn;
383 curr = rb_to_skb(parent);
384 curr_run_end = curr->ip_defrag_offset +
385 FRAG_CB(curr)->frag_run_len;
386 if (end <= curr->ip_defrag_offset)
387 rbn = &parent->rb_left;
388 else if (offset >= curr_run_end)
389 rbn = &parent->rb_right;
390 else if (offset >= curr->ip_defrag_offset &&
391 end <= curr_run_end)
392 return IPFRAG_DUP;
393 else
394 return IPFRAG_OVERLAP;
395 } while (*rbn);
396 /* Here we have parent properly set, and rbn pointing to
397 * one of its NULL left/right children. Insert skb.
398 */
399 fragcb_clear(skb);
400 rb_link_node(&skb->rbnode, parent, rbn);
401 rb_insert_color(&skb->rbnode, &q->rb_fragments);
402 }
403
404 skb->ip_defrag_offset = offset;
405
406 return IPFRAG_OK;
407}
408EXPORT_SYMBOL(inet_frag_queue_insert);
409
410void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
411 struct sk_buff *parent)
412{
413 struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
414 struct sk_buff **nextp;
415 int delta;
416
417 if (head != skb) {
418 fp = skb_clone(skb, GFP_ATOMIC);
419 if (!fp)
420 return NULL;
421 FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
422 if (RB_EMPTY_NODE(&skb->rbnode))
423 FRAG_CB(parent)->next_frag = fp;
424 else
425 rb_replace_node(&skb->rbnode, &fp->rbnode,
426 &q->rb_fragments);
427 if (q->fragments_tail == skb)
428 q->fragments_tail = fp;
429 skb_morph(skb, head);
430 FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
431 rb_replace_node(&head->rbnode, &skb->rbnode,
432 &q->rb_fragments);
433 consume_skb(head);
434 head = skb;
435 }
436 WARN_ON(head->ip_defrag_offset != 0);
437
438 delta = -head->truesize;
439
440 /* Head of list must not be cloned. */
441 if (skb_unclone(head, GFP_ATOMIC))
442 return NULL;
443
444 delta += head->truesize;
445 if (delta)
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700446 add_frag_mem_limit(q->fqdir, delta);
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800447
448 /* If the first fragment is fragmented itself, we split
449 * it to two chunks: the first with data and paged part
450 * and the second, holding only fragments.
451 */
452 if (skb_has_frag_list(head)) {
453 struct sk_buff *clone;
454 int i, plen = 0;
455
456 clone = alloc_skb(0, GFP_ATOMIC);
457 if (!clone)
458 return NULL;
459 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
460 skb_frag_list_init(head);
461 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
462 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
463 clone->data_len = head->data_len - plen;
464 clone->len = clone->data_len;
465 head->truesize += clone->truesize;
466 clone->csum = 0;
467 clone->ip_summed = head->ip_summed;
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700468 add_frag_mem_limit(q->fqdir, clone->truesize);
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800469 skb_shinfo(head)->frag_list = clone;
470 nextp = &clone->next;
471 } else {
472 nextp = &skb_shinfo(head)->frag_list;
473 }
474
475 return nextp;
476}
477EXPORT_SYMBOL(inet_frag_reasm_prepare);
478
479void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
Guillaume Nault891584f2019-08-02 17:15:03 +0200480 void *reasm_data, bool try_coalesce)
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800481{
482 struct sk_buff **nextp = (struct sk_buff **)reasm_data;
483 struct rb_node *rbn;
484 struct sk_buff *fp;
Guillaume Nault891584f2019-08-02 17:15:03 +0200485 int sum_truesize;
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800486
487 skb_push(head, head->data - skb_network_header(head));
488
489 /* Traverse the tree in order, to build frag_list. */
490 fp = FRAG_CB(head)->next_frag;
491 rbn = rb_next(&head->rbnode);
492 rb_erase(&head->rbnode, &q->rb_fragments);
Guillaume Nault891584f2019-08-02 17:15:03 +0200493
494 sum_truesize = head->truesize;
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800495 while (rbn || fp) {
496 /* fp points to the next sk_buff in the current run;
497 * rbn points to the next run.
498 */
499 /* Go through the current run. */
500 while (fp) {
Guillaume Nault891584f2019-08-02 17:15:03 +0200501 struct sk_buff *next_frag = FRAG_CB(fp)->next_frag;
502 bool stolen;
503 int delta;
504
505 sum_truesize += fp->truesize;
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800506 if (head->ip_summed != fp->ip_summed)
507 head->ip_summed = CHECKSUM_NONE;
508 else if (head->ip_summed == CHECKSUM_COMPLETE)
509 head->csum = csum_add(head->csum, fp->csum);
Guillaume Nault891584f2019-08-02 17:15:03 +0200510
511 if (try_coalesce && skb_try_coalesce(head, fp, &stolen,
512 &delta)) {
513 kfree_skb_partial(fp, stolen);
514 } else {
515 fp->prev = NULL;
516 memset(&fp->rbnode, 0, sizeof(fp->rbnode));
517 fp->sk = NULL;
518
519 head->data_len += fp->len;
520 head->len += fp->len;
521 head->truesize += fp->truesize;
522
523 *nextp = fp;
524 nextp = &fp->next;
525 }
526
527 fp = next_frag;
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800528 }
529 /* Move to the next run. */
530 if (rbn) {
531 struct rb_node *rbnext = rb_next(rbn);
532
533 fp = rb_to_skb(rbn);
534 rb_erase(rbn, &q->rb_fragments);
535 rbn = rbnext;
536 }
537 }
Guillaume Nault891584f2019-08-02 17:15:03 +0200538 sub_frag_mem_limit(q->fqdir, sum_truesize);
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800539
540 *nextp = NULL;
541 skb_mark_not_on_list(head);
542 head->prev = NULL;
543 head->tstamp = q->stamp;
544}
545EXPORT_SYMBOL(inet_frag_reasm_finish);
546
547struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
548{
Peter Oskolkovd8cf7572019-02-25 17:43:46 -0800549 struct sk_buff *head, *skb;
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800550
Peter Oskolkovd8cf7572019-02-25 17:43:46 -0800551 head = skb_rb_first(&q->rb_fragments);
552 if (!head)
553 return NULL;
554 skb = FRAG_CB(head)->next_frag;
555 if (skb)
556 rb_replace_node(&head->rbnode, &skb->rbnode,
557 &q->rb_fragments);
558 else
559 rb_erase(&head->rbnode, &q->rb_fragments);
560 memset(&head->rbnode, 0, sizeof(head->rbnode));
561 barrier();
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800562
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800563 if (head == q->fragments_tail)
564 q->fragments_tail = NULL;
565
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700566 sub_frag_mem_limit(q->fqdir, head->truesize);
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800567
568 return head;
569}
570EXPORT_SYMBOL(inet_frag_pull_head);