Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 2 | #ifndef __NET_FRAG_H__ |
| 3 | #define __NET_FRAG_H__ |
| 4 | |
NeilBrown | 0eb71a9 | 2018-06-18 12:52:50 +1000 | [diff] [blame] | 5 | #include <linux/rhashtable-types.h> |
Eric Dumazet | dc93f46 | 2019-05-27 16:56:49 -0700 | [diff] [blame] | 6 | #include <linux/completion.h> |
Eric Dumazet | 648700f | 2018-03-31 12:58:49 -0700 | [diff] [blame] | 7 | |
Eric Dumazet | 6ce3b4d | 2019-05-24 09:03:30 -0700 | [diff] [blame] | 8 | /* Per netns frag queues directory */ |
| 9 | struct fqdir { |
Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 10 | /* sysctls */ |
Eric Dumazet | 3e67f10 | 2018-03-31 12:58:53 -0700 | [diff] [blame] | 11 | long high_thresh; |
| 12 | long low_thresh; |
Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 13 | int timeout; |
Nikolay Borisov | 0fbf4cb | 2016-02-15 12:11:31 +0200 | [diff] [blame] | 14 | int max_dist; |
Eric Dumazet | 093ba72 | 2018-03-31 12:58:44 -0700 | [diff] [blame] | 15 | struct inet_frags *f; |
Eric Dumazet | a39aca6 | 2019-05-24 09:03:38 -0700 | [diff] [blame] | 16 | struct net *net; |
Eric Dumazet | 3c8fc87 | 2019-05-24 09:03:40 -0700 | [diff] [blame] | 17 | bool dead; |
Eric Dumazet | c2615cf | 2018-03-31 12:58:57 -0700 | [diff] [blame] | 18 | |
| 19 | struct rhashtable rhashtable ____cacheline_aligned_in_smp; |
| 20 | |
| 21 | /* Keep atomic mem on separate cachelines in structs that include it */ |
| 22 | atomic_long_t mem ____cacheline_aligned_in_smp; |
Eric Dumazet | d5dd887 | 2019-06-18 11:09:00 -0700 | [diff] [blame] | 23 | struct work_struct destroy_work; |
SeongJae Park | 0b9b241 | 2020-12-11 12:24:05 +0100 | [diff] [blame] | 24 | struct llist_node free_list; |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 25 | }; |
| 26 | |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 27 | /** |
| 28 | * fragment queue flags |
| 29 | * |
| 30 | * @INET_FRAG_FIRST_IN: first fragment has arrived |
| 31 | * @INET_FRAG_LAST_IN: final fragment has arrived |
| 32 | * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction |
Eric Dumazet | 3c8fc87 | 2019-05-24 09:03:40 -0700 | [diff] [blame] | 33 | * @INET_FRAG_HASH_DEAD: inet_frag_kill() has not removed fq from rhashtable |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 34 | */ |
| 35 | enum { |
| 36 | INET_FRAG_FIRST_IN = BIT(0), |
| 37 | INET_FRAG_LAST_IN = BIT(1), |
| 38 | INET_FRAG_COMPLETE = BIT(2), |
Eric Dumazet | 3c8fc87 | 2019-05-24 09:03:40 -0700 | [diff] [blame] | 39 | INET_FRAG_HASH_DEAD = BIT(3), |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 40 | }; |
| 41 | |
Eric Dumazet | 648700f | 2018-03-31 12:58:49 -0700 | [diff] [blame] | 42 | struct frag_v4_compare_key { |
| 43 | __be32 saddr; |
| 44 | __be32 daddr; |
| 45 | u32 user; |
| 46 | u32 vif; |
| 47 | __be16 id; |
| 48 | u16 protocol; |
| 49 | }; |
| 50 | |
| 51 | struct frag_v6_compare_key { |
| 52 | struct in6_addr saddr; |
| 53 | struct in6_addr daddr; |
| 54 | u32 user; |
| 55 | __be32 id; |
| 56 | u32 iif; |
| 57 | }; |
| 58 | |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 59 | /** |
| 60 | * struct inet_frag_queue - fragment queue |
| 61 | * |
Eric Dumazet | 648700f | 2018-03-31 12:58:49 -0700 | [diff] [blame] | 62 | * @node: rhash node |
| 63 | * @key: keys identifying this frag. |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 64 | * @timer: queue expiration timer |
Eric Dumazet | 648700f | 2018-03-31 12:58:49 -0700 | [diff] [blame] | 65 | * @lock: spinlock protecting this frag |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 66 | * @refcnt: reference count of the queue |
Peter Oskolkov | 353c9cb | 2018-08-11 20:27:24 +0000 | [diff] [blame] | 67 | * @rb_fragments: received fragments rb-tree root |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 68 | * @fragments_tail: received fragments tail |
Peter Oskolkov | 353c9cb | 2018-08-11 20:27:24 +0000 | [diff] [blame] | 69 | * @last_run_head: the head of the last "run". see ip_fragment.c |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 70 | * @stamp: timestamp of the last received fragment |
| 71 | * @len: total length of the original datagram |
| 72 | * @meat: length of received fragments so far |
| 73 | * @flags: fragment queue flags |
Florian Westphal | d6b915e | 2015-05-22 16:32:51 +0200 | [diff] [blame] | 74 | * @max_size: maximum received fragment size |
Eric Dumazet | 6ce3b4d | 2019-05-24 09:03:30 -0700 | [diff] [blame] | 75 | * @fqdir: pointer to struct fqdir |
Eric Dumazet | 648700f | 2018-03-31 12:58:49 -0700 | [diff] [blame] | 76 | * @rcu: rcu head for freeing deferall |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 77 | */ |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 78 | struct inet_frag_queue { |
Eric Dumazet | 648700f | 2018-03-31 12:58:49 -0700 | [diff] [blame] | 79 | struct rhash_head node; |
| 80 | union { |
| 81 | struct frag_v4_compare_key v4; |
| 82 | struct frag_v6_compare_key v6; |
| 83 | } key; |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 84 | struct timer_list timer; |
Eric Dumazet | 648700f | 2018-03-31 12:58:49 -0700 | [diff] [blame] | 85 | spinlock_t lock; |
Reshetova, Elena | edcb691 | 2017-06-30 13:08:07 +0300 | [diff] [blame] | 86 | refcount_t refcnt; |
Peter Oskolkov | d8cf757 | 2019-02-25 17:43:46 -0800 | [diff] [blame] | 87 | struct rb_root rb_fragments; |
Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 88 | struct sk_buff *fragments_tail; |
Peter Oskolkov | 353c9cb | 2018-08-11 20:27:24 +0000 | [diff] [blame] | 89 | struct sk_buff *last_run_head; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 90 | ktime_t stamp; |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 91 | int len; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 92 | int meat; |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 93 | __u8 flags; |
Patrick McHardy | 5f2d04f | 2012-08-26 19:13:55 +0200 | [diff] [blame] | 94 | u16 max_size; |
Eric Dumazet | 6ce3b4d | 2019-05-24 09:03:30 -0700 | [diff] [blame] | 95 | struct fqdir *fqdir; |
Eric Dumazet | 648700f | 2018-03-31 12:58:49 -0700 | [diff] [blame] | 96 | struct rcu_head rcu; |
Jesper Dangaard Brouer | 19952cc | 2013-04-03 23:38:16 +0000 | [diff] [blame] | 97 | }; |
| 98 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 99 | struct inet_frags { |
Alexey Dobriyan | 4c0ebd6 | 2017-05-23 00:20:26 +0300 | [diff] [blame] | 100 | unsigned int qsize; |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 101 | |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 102 | void (*constructor)(struct inet_frag_queue *q, |
Florian Westphal | 36c7778 | 2014-07-24 16:50:29 +0200 | [diff] [blame] | 103 | const void *arg); |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 104 | void (*destructor)(struct inet_frag_queue *); |
Kees Cook | 7880201 | 2017-10-16 17:29:20 -0700 | [diff] [blame] | 105 | void (*frag_expire)(struct timer_list *t); |
Nikolay Aleksandrov | d4ad4d2 | 2014-08-01 12:29:48 +0200 | [diff] [blame] | 106 | struct kmem_cache *frags_cachep; |
| 107 | const char *frags_cache_name; |
Eric Dumazet | 648700f | 2018-03-31 12:58:49 -0700 | [diff] [blame] | 108 | struct rhashtable_params rhash_params; |
Eric Dumazet | dc93f46 | 2019-05-27 16:56:49 -0700 | [diff] [blame] | 109 | refcount_t refcnt; |
| 110 | struct completion completion; |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 111 | }; |
| 112 | |
Nikolay Aleksandrov | d4ad4d2 | 2014-08-01 12:29:48 +0200 | [diff] [blame] | 113 | int inet_frags_init(struct inet_frags *); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 114 | void inet_frags_fini(struct inet_frags *); |
| 115 | |
Eric Dumazet | 6b73d19 | 2019-05-27 16:56:47 -0700 | [diff] [blame] | 116 | int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net); |
Eric Dumazet | d5dd887 | 2019-06-18 11:09:00 -0700 | [diff] [blame] | 117 | |
Qian Cai | 08003d0 | 2019-06-20 10:52:40 -0400 | [diff] [blame] | 118 | static inline void fqdir_pre_exit(struct fqdir *fqdir) |
Eric Dumazet | d5dd887 | 2019-06-18 11:09:00 -0700 | [diff] [blame] | 119 | { |
Eric Dumazet | 91341fa | 2022-01-13 01:22:29 -0800 | [diff] [blame] | 120 | /* Prevent creation of new frags. |
| 121 | * Pairs with READ_ONCE() in inet_frag_find(). |
| 122 | */ |
| 123 | WRITE_ONCE(fqdir->high_thresh, 0); |
| 124 | |
| 125 | /* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire() |
| 126 | * and ip6frag_expire_frag_queue(). |
| 127 | */ |
| 128 | WRITE_ONCE(fqdir->dead, true); |
Eric Dumazet | d5dd887 | 2019-06-18 11:09:00 -0700 | [diff] [blame] | 129 | } |
Eric Dumazet | 89fb900 | 2019-05-24 09:03:31 -0700 | [diff] [blame] | 130 | void fqdir_exit(struct fqdir *fqdir); |
Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 131 | |
Eric Dumazet | 093ba72 | 2018-03-31 12:58:44 -0700 | [diff] [blame] | 132 | void inet_frag_kill(struct inet_frag_queue *q); |
| 133 | void inet_frag_destroy(struct inet_frag_queue *q); |
Eric Dumazet | 6ce3b4d | 2019-05-24 09:03:30 -0700 | [diff] [blame] | 134 | struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key); |
Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 135 | |
Peter Oskolkov | 353c9cb | 2018-08-11 20:27:24 +0000 | [diff] [blame] | 136 | /* Free all skbs in the queue; return the sum of their truesizes. */ |
| 137 | unsigned int inet_frag_rbtree_purge(struct rb_root *root); |
| 138 | |
Eric Dumazet | 093ba72 | 2018-03-31 12:58:44 -0700 | [diff] [blame] | 139 | static inline void inet_frag_put(struct inet_frag_queue *q) |
Pavel Emelyanov | 762cc40 | 2007-10-15 02:41:56 -0700 | [diff] [blame] | 140 | { |
Reshetova, Elena | edcb691 | 2017-06-30 13:08:07 +0300 | [diff] [blame] | 141 | if (refcount_dec_and_test(&q->refcnt)) |
Eric Dumazet | 093ba72 | 2018-03-31 12:58:44 -0700 | [diff] [blame] | 142 | inet_frag_destroy(q); |
Pavel Emelyanov | 762cc40 | 2007-10-15 02:41:56 -0700 | [diff] [blame] | 143 | } |
| 144 | |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 145 | /* Memory Tracking Functions. */ |
| 146 | |
Eric Dumazet | 6ce3b4d | 2019-05-24 09:03:30 -0700 | [diff] [blame] | 147 | static inline long frag_mem_limit(const struct fqdir *fqdir) |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 148 | { |
Eric Dumazet | 6ce3b4d | 2019-05-24 09:03:30 -0700 | [diff] [blame] | 149 | return atomic_long_read(&fqdir->mem); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 150 | } |
| 151 | |
Eric Dumazet | 6ce3b4d | 2019-05-24 09:03:30 -0700 | [diff] [blame] | 152 | static inline void sub_frag_mem_limit(struct fqdir *fqdir, long val) |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 153 | { |
Eric Dumazet | 6ce3b4d | 2019-05-24 09:03:30 -0700 | [diff] [blame] | 154 | atomic_long_sub(val, &fqdir->mem); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 155 | } |
| 156 | |
Eric Dumazet | 6ce3b4d | 2019-05-24 09:03:30 -0700 | [diff] [blame] | 157 | static inline void add_frag_mem_limit(struct fqdir *fqdir, long val) |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 158 | { |
Eric Dumazet | 6ce3b4d | 2019-05-24 09:03:30 -0700 | [diff] [blame] | 159 | atomic_long_add(val, &fqdir->mem); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 160 | } |
| 161 | |
Hannes Frederic Sowa | be99197 | 2013-03-22 08:24:37 +0000 | [diff] [blame] | 162 | /* RFC 3168 support : |
| 163 | * We want to check ECN values of all fragments, do detect invalid combinations. |
| 164 | * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value. |
| 165 | */ |
| 166 | #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */ |
| 167 | #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */ |
| 168 | #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */ |
| 169 | #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */ |
| 170 | |
| 171 | extern const u8 ip_frag_ecn_table[16]; |
| 172 | |
Peter Oskolkov | c23f35d | 2019-01-22 10:02:50 -0800 | [diff] [blame] | 173 | /* Return values of inet_frag_queue_insert() */ |
| 174 | #define IPFRAG_OK 0 |
| 175 | #define IPFRAG_DUP 1 |
| 176 | #define IPFRAG_OVERLAP 2 |
| 177 | int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb, |
| 178 | int offset, int end); |
| 179 | void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb, |
| 180 | struct sk_buff *parent); |
| 181 | void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, |
Guillaume Nault | 891584f | 2019-08-02 17:15:03 +0200 | [diff] [blame] | 182 | void *reasm_data, bool try_coalesce); |
Peter Oskolkov | c23f35d | 2019-01-22 10:02:50 -0800 | [diff] [blame] | 183 | struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q); |
| 184 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 185 | #endif |