blob: 63540be0fc34ac0f5270dd474bec96c65a5bdcbe [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -07002#ifndef __NET_FRAG_H__
3#define __NET_FRAG_H__
4
NeilBrown0eb71a92018-06-18 12:52:50 +10005#include <linux/rhashtable-types.h>
Eric Dumazetdc93f462019-05-27 16:56:49 -07006#include <linux/completion.h>
Eric Dumazet648700f2018-03-31 12:58:49 -07007
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -07008/* Per netns frag queues directory */
9struct fqdir {
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -080010 /* sysctls */
Eric Dumazet3e67f102018-03-31 12:58:53 -070011 long high_thresh;
12 long low_thresh;
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -080013 int timeout;
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +020014 int max_dist;
Eric Dumazet093ba722018-03-31 12:58:44 -070015 struct inet_frags *f;
Eric Dumazeta39aca62019-05-24 09:03:38 -070016 struct net *net;
Eric Dumazet3c8fc872019-05-24 09:03:40 -070017 bool dead;
Eric Dumazetc2615cf2018-03-31 12:58:57 -070018
19 struct rhashtable rhashtable ____cacheline_aligned_in_smp;
20
21 /* Keep atomic mem on separate cachelines in structs that include it */
22 atomic_long_t mem ____cacheline_aligned_in_smp;
Eric Dumazetd5dd8872019-06-18 11:09:00 -070023 struct work_struct destroy_work;
SeongJae Park0b9b2412020-12-11 12:24:05 +010024 struct llist_node free_list;
Pavel Emelyanovac18e752008-01-22 06:02:14 -080025};
26
Nikolay Aleksandrov1ab19342014-08-01 12:29:45 +020027/**
28 * fragment queue flags
29 *
30 * @INET_FRAG_FIRST_IN: first fragment has arrived
31 * @INET_FRAG_LAST_IN: final fragment has arrived
32 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
Eric Dumazet3c8fc872019-05-24 09:03:40 -070033 * @INET_FRAG_HASH_DEAD: inet_frag_kill() has not removed fq from rhashtable
Nikolay Aleksandrov1ab19342014-08-01 12:29:45 +020034 */
35enum {
36 INET_FRAG_FIRST_IN = BIT(0),
37 INET_FRAG_LAST_IN = BIT(1),
38 INET_FRAG_COMPLETE = BIT(2),
Eric Dumazet3c8fc872019-05-24 09:03:40 -070039 INET_FRAG_HASH_DEAD = BIT(3),
Nikolay Aleksandrov1ab19342014-08-01 12:29:45 +020040};
41
Eric Dumazet648700f2018-03-31 12:58:49 -070042struct frag_v4_compare_key {
43 __be32 saddr;
44 __be32 daddr;
45 u32 user;
46 u32 vif;
47 __be16 id;
48 u16 protocol;
49};
50
51struct frag_v6_compare_key {
52 struct in6_addr saddr;
53 struct in6_addr daddr;
54 u32 user;
55 __be32 id;
56 u32 iif;
57};
58
Nikolay Aleksandrov1ab19342014-08-01 12:29:45 +020059/**
60 * struct inet_frag_queue - fragment queue
61 *
Eric Dumazet648700f2018-03-31 12:58:49 -070062 * @node: rhash node
63 * @key: keys identifying this frag.
Nikolay Aleksandrov1ab19342014-08-01 12:29:45 +020064 * @timer: queue expiration timer
Eric Dumazet648700f2018-03-31 12:58:49 -070065 * @lock: spinlock protecting this frag
Nikolay Aleksandrov1ab19342014-08-01 12:29:45 +020066 * @refcnt: reference count of the queue
Peter Oskolkov353c9cb2018-08-11 20:27:24 +000067 * @rb_fragments: received fragments rb-tree root
Nikolay Aleksandrov1ab19342014-08-01 12:29:45 +020068 * @fragments_tail: received fragments tail
Peter Oskolkov353c9cb2018-08-11 20:27:24 +000069 * @last_run_head: the head of the last "run". see ip_fragment.c
Nikolay Aleksandrov1ab19342014-08-01 12:29:45 +020070 * @stamp: timestamp of the last received fragment
71 * @len: total length of the original datagram
72 * @meat: length of received fragments so far
73 * @flags: fragment queue flags
Florian Westphald6b915e2015-05-22 16:32:51 +020074 * @max_size: maximum received fragment size
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -070075 * @fqdir: pointer to struct fqdir
Eric Dumazet648700f2018-03-31 12:58:49 -070076 * @rcu: rcu head for freeing deferall
Nikolay Aleksandrov1ab19342014-08-01 12:29:45 +020077 */
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070078struct inet_frag_queue {
Eric Dumazet648700f2018-03-31 12:58:49 -070079 struct rhash_head node;
80 union {
81 struct frag_v4_compare_key v4;
82 struct frag_v6_compare_key v6;
83 } key;
Nikolay Aleksandrov1ab19342014-08-01 12:29:45 +020084 struct timer_list timer;
Eric Dumazet648700f2018-03-31 12:58:49 -070085 spinlock_t lock;
Reshetova, Elenaedcb6912017-06-30 13:08:07 +030086 refcount_t refcnt;
Peter Oskolkovd8cf7572019-02-25 17:43:46 -080087 struct rb_root rb_fragments;
Changli Gaod6bebca2010-06-29 04:39:37 +000088 struct sk_buff *fragments_tail;
Peter Oskolkov353c9cb2018-08-11 20:27:24 +000089 struct sk_buff *last_run_head;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070090 ktime_t stamp;
Nikolay Aleksandrov1ab19342014-08-01 12:29:45 +020091 int len;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070092 int meat;
Nikolay Aleksandrov1ab19342014-08-01 12:29:45 +020093 __u8 flags;
Patrick McHardy5f2d04f2012-08-26 19:13:55 +020094 u16 max_size;
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -070095 struct fqdir *fqdir;
Eric Dumazet648700f2018-03-31 12:58:49 -070096 struct rcu_head rcu;
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +000097};
98
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070099struct inet_frags {
Alexey Dobriyan4c0ebd62017-05-23 00:20:26 +0300100 unsigned int qsize;
Pavel Emelyanov321a3a92007-10-15 02:38:08 -0700101
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700102 void (*constructor)(struct inet_frag_queue *q,
Florian Westphal36c77782014-07-24 16:50:29 +0200103 const void *arg);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700104 void (*destructor)(struct inet_frag_queue *);
Kees Cook78802012017-10-16 17:29:20 -0700105 void (*frag_expire)(struct timer_list *t);
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200106 struct kmem_cache *frags_cachep;
107 const char *frags_cache_name;
Eric Dumazet648700f2018-03-31 12:58:49 -0700108 struct rhashtable_params rhash_params;
Eric Dumazetdc93f462019-05-27 16:56:49 -0700109 refcount_t refcnt;
110 struct completion completion;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700111};
112
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200113int inet_frags_init(struct inet_frags *);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700114void inet_frags_fini(struct inet_frags *);
115
Eric Dumazet6b73d192019-05-27 16:56:47 -0700116int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net);
Eric Dumazetd5dd8872019-06-18 11:09:00 -0700117
Qian Cai08003d02019-06-20 10:52:40 -0400118static inline void fqdir_pre_exit(struct fqdir *fqdir)
Eric Dumazetd5dd8872019-06-18 11:09:00 -0700119{
Eric Dumazet91341fa2022-01-13 01:22:29 -0800120 /* Prevent creation of new frags.
121 * Pairs with READ_ONCE() in inet_frag_find().
122 */
123 WRITE_ONCE(fqdir->high_thresh, 0);
124
125 /* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire()
126 * and ip6frag_expire_frag_queue().
127 */
128 WRITE_ONCE(fqdir->dead, true);
Eric Dumazetd5dd8872019-06-18 11:09:00 -0700129}
Eric Dumazet89fb9002019-05-24 09:03:31 -0700130void fqdir_exit(struct fqdir *fqdir);
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -0800131
Eric Dumazet093ba722018-03-31 12:58:44 -0700132void inet_frag_kill(struct inet_frag_queue *q);
133void inet_frag_destroy(struct inet_frag_queue *q);
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700134struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key);
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700135
Peter Oskolkov353c9cb2018-08-11 20:27:24 +0000136/* Free all skbs in the queue; return the sum of their truesizes. */
137unsigned int inet_frag_rbtree_purge(struct rb_root *root);
138
Eric Dumazet093ba722018-03-31 12:58:44 -0700139static inline void inet_frag_put(struct inet_frag_queue *q)
Pavel Emelyanov762cc402007-10-15 02:41:56 -0700140{
Reshetova, Elenaedcb6912017-06-30 13:08:07 +0300141 if (refcount_dec_and_test(&q->refcnt))
Eric Dumazet093ba722018-03-31 12:58:44 -0700142 inet_frag_destroy(q);
Pavel Emelyanov762cc402007-10-15 02:41:56 -0700143}
144
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000145/* Memory Tracking Functions. */
146
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700147static inline long frag_mem_limit(const struct fqdir *fqdir)
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000148{
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700149 return atomic_long_read(&fqdir->mem);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000150}
151
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700152static inline void sub_frag_mem_limit(struct fqdir *fqdir, long val)
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000153{
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700154 atomic_long_sub(val, &fqdir->mem);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000155}
156
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700157static inline void add_frag_mem_limit(struct fqdir *fqdir, long val)
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000158{
Eric Dumazet6ce3b4d2019-05-24 09:03:30 -0700159 atomic_long_add(val, &fqdir->mem);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000160}
161
Hannes Frederic Sowabe991972013-03-22 08:24:37 +0000162/* RFC 3168 support :
163 * We want to check ECN values of all fragments, do detect invalid combinations.
164 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
165 */
166#define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
167#define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
168#define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
169#define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
170
171extern const u8 ip_frag_ecn_table[16];
172
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800173/* Return values of inet_frag_queue_insert() */
174#define IPFRAG_OK 0
175#define IPFRAG_DUP 1
176#define IPFRAG_OVERLAP 2
177int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
178 int offset, int end);
179void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
180 struct sk_buff *parent);
181void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
Guillaume Nault891584f2019-08-02 17:15:03 +0200182 void *reasm_data, bool try_coalesce);
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800183struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
184
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700185#endif