Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 1 | #ifndef __NET_FRAG_H__ |
| 2 | #define __NET_FRAG_H__ |
| 3 | |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 4 | #include <linux/percpu_counter.h> |
| 5 | |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 6 | struct netns_frags { |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 7 | /* The percpu_counter "mem" need to be cacheline aligned. |
| 8 | * mem.count must not share cacheline with other writers |
Jesper Dangaard Brouer | cd39a78 | 2013-01-28 23:44:14 +0000 | [diff] [blame] | 9 | */ |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 10 | struct percpu_counter mem ____cacheline_aligned_in_smp; |
| 11 | |
Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 12 | /* sysctls */ |
| 13 | int timeout; |
Pavel Emelyanov | e31e0bdc7 | 2008-01-22 06:10:13 -0800 | [diff] [blame] | 14 | int high_thresh; |
| 15 | int low_thresh; |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 16 | }; |
| 17 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 18 | struct inet_frag_queue { |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 19 | spinlock_t lock; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 20 | struct timer_list timer; /* when will this queue expire? */ |
Jesper Dangaard Brouer | 6e34a8b | 2013-01-28 23:44:49 +0000 | [diff] [blame] | 21 | struct hlist_node list; |
| 22 | atomic_t refcnt; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 23 | struct sk_buff *fragments; /* list of received fragments */ |
Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 24 | struct sk_buff *fragments_tail; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 25 | ktime_t stamp; |
| 26 | int len; /* total length of orig datagram */ |
| 27 | int meat; |
| 28 | __u8 last_in; /* first/last segment arrived? */ |
| 29 | |
Florian Westphal | b13d3cb | 2014-07-24 16:50:32 +0200 | [diff] [blame] | 30 | #define INET_FRAG_EVICTED 8 |
Joe Perches | bc578a5 | 2008-03-28 16:35:27 -0700 | [diff] [blame] | 31 | #define INET_FRAG_COMPLETE 4 |
| 32 | #define INET_FRAG_FIRST_IN 2 |
| 33 | #define INET_FRAG_LAST_IN 1 |
Patrick McHardy | 5f2d04f | 2012-08-26 19:13:55 +0200 | [diff] [blame] | 34 | |
| 35 | u16 max_size; |
Jesper Dangaard Brouer | 6e34a8b | 2013-01-28 23:44:49 +0000 | [diff] [blame] | 36 | |
| 37 | struct netns_frags *net; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 38 | }; |
| 39 | |
Jesper Dangaard Brouer | a4c4009 | 2013-04-25 09:52:25 +0000 | [diff] [blame] | 40 | #define INETFRAGS_HASHSZ 1024 |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 41 | |
Hannes Frederic Sowa | 5a3da1f | 2013-03-15 11:32:30 +0000 | [diff] [blame] | 42 | /* averaged: |
| 43 | * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / |
| 44 | * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or |
| 45 | * struct frag_queue)) |
| 46 | */ |
Florian Westphal | b13d3cb | 2014-07-24 16:50:32 +0200 | [diff] [blame] | 47 | #define INETFRAGS_MAXDEPTH 128 |
Hannes Frederic Sowa | 5a3da1f | 2013-03-15 11:32:30 +0000 | [diff] [blame] | 48 | |
Jesper Dangaard Brouer | 19952cc | 2013-04-03 23:38:16 +0000 | [diff] [blame] | 49 | struct inet_frag_bucket { |
| 50 | struct hlist_head chain; |
| 51 | spinlock_t chain_lock; |
| 52 | }; |
| 53 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 54 | struct inet_frags { |
Jesper Dangaard Brouer | 19952cc | 2013-04-03 23:38:16 +0000 | [diff] [blame] | 55 | struct inet_frag_bucket hash[INETFRAGS_HASHSZ]; |
Hannes Frederic Sowa | 7088ad7 | 2013-10-23 11:06:57 +0200 | [diff] [blame] | 56 | |
Florian Westphal | b13d3cb | 2014-07-24 16:50:32 +0200 | [diff] [blame] | 57 | struct work_struct frags_work; |
| 58 | unsigned int next_bucket; |
Florian Westphal | e3a57d1 | 2014-07-24 16:50:35 +0200 | [diff] [blame] | 59 | unsigned long last_rebuild_jiffies; |
| 60 | bool rebuild; |
Florian Westphal | b13d3cb | 2014-07-24 16:50:32 +0200 | [diff] [blame] | 61 | |
Hannes Frederic Sowa | 7088ad7 | 2013-10-23 11:06:57 +0200 | [diff] [blame] | 62 | /* The first call to hashfn is responsible to initialize |
| 63 | * rnd. This is best done with net_get_random_once. |
Florian Westphal | ab1c724 | 2014-07-24 16:50:36 +0200 | [diff] [blame^] | 64 | * |
| 65 | * rnd_seqlock is used to let hash insertion detect |
| 66 | * when it needs to re-lookup the hash chain to use. |
Hannes Frederic Sowa | 7088ad7 | 2013-10-23 11:06:57 +0200 | [diff] [blame] | 67 | */ |
Jesper Dangaard Brouer | 5f8e1e8 | 2013-01-28 23:44:37 +0000 | [diff] [blame] | 68 | u32 rnd; |
Florian Westphal | ab1c724 | 2014-07-24 16:50:36 +0200 | [diff] [blame^] | 69 | seqlock_t rnd_seqlock; |
Jesper Dangaard Brouer | 5f8e1e8 | 2013-01-28 23:44:37 +0000 | [diff] [blame] | 70 | int qsize; |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 71 | |
Florian Westphal | 36c7778 | 2014-07-24 16:50:29 +0200 | [diff] [blame] | 72 | unsigned int (*hashfn)(const struct inet_frag_queue *); |
| 73 | bool (*match)(const struct inet_frag_queue *q, |
| 74 | const void *arg); |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 75 | void (*constructor)(struct inet_frag_queue *q, |
Florian Westphal | 36c7778 | 2014-07-24 16:50:29 +0200 | [diff] [blame] | 76 | const void *arg); |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 77 | void (*destructor)(struct inet_frag_queue *); |
| 78 | void (*skb_free)(struct sk_buff *); |
Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 79 | void (*frag_expire)(unsigned long data); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 80 | }; |
| 81 | |
| 82 | void inet_frags_init(struct inet_frags *); |
| 83 | void inet_frags_fini(struct inet_frags *); |
| 84 | |
Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 85 | void inet_frags_init_net(struct netns_frags *nf); |
Pavel Emelyanov | 81566e8 | 2008-01-22 06:12:39 -0800 | [diff] [blame] | 86 | void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); |
Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 87 | |
Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 88 | void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); |
Florian Westphal | 3fd588e | 2014-07-24 16:50:34 +0200 | [diff] [blame] | 89 | void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f); |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 90 | struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, |
Florian Westphal | ab1c724 | 2014-07-24 16:50:36 +0200 | [diff] [blame^] | 91 | struct inet_frags *f, void *key, unsigned int hash); |
| 92 | |
Hannes Frederic Sowa | 5a3da1f | 2013-03-15 11:32:30 +0000 | [diff] [blame] | 93 | void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, |
| 94 | const char *prefix); |
Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 95 | |
Pavel Emelyanov | 762cc40 | 2007-10-15 02:41:56 -0700 | [diff] [blame] | 96 | static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) |
| 97 | { |
| 98 | if (atomic_dec_and_test(&q->refcnt)) |
Florian Westphal | 3fd588e | 2014-07-24 16:50:34 +0200 | [diff] [blame] | 99 | inet_frag_destroy(q, f); |
Pavel Emelyanov | 762cc40 | 2007-10-15 02:41:56 -0700 | [diff] [blame] | 100 | } |
| 101 | |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 102 | /* Memory Tracking Functions. */ |
| 103 | |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 104 | /* The default percpu_counter batch size is not big enough to scale to |
| 105 | * fragmentation mem acct sizes. |
| 106 | * The mem size of a 64K fragment is approx: |
| 107 | * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes |
| 108 | */ |
| 109 | static unsigned int frag_percpu_counter_batch = 130000; |
| 110 | |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 111 | static inline int frag_mem_limit(struct netns_frags *nf) |
| 112 | { |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 113 | return percpu_counter_read(&nf->mem); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 114 | } |
| 115 | |
| 116 | static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) |
| 117 | { |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 118 | __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 119 | } |
| 120 | |
| 121 | static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) |
| 122 | { |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 123 | __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 124 | } |
| 125 | |
| 126 | static inline void init_frag_mem_limit(struct netns_frags *nf) |
| 127 | { |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 128 | percpu_counter_init(&nf->mem, 0); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 129 | } |
| 130 | |
Florian Westphal | 36c7778 | 2014-07-24 16:50:29 +0200 | [diff] [blame] | 131 | static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf) |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 132 | { |
Florian Westphal | 36c7778 | 2014-07-24 16:50:29 +0200 | [diff] [blame] | 133 | unsigned int res; |
Eric Dumazet | 4cfb048 | 2013-02-22 07:43:35 +0000 | [diff] [blame] | 134 | |
| 135 | local_bh_disable(); |
| 136 | res = percpu_counter_sum_positive(&nf->mem); |
| 137 | local_bh_enable(); |
| 138 | |
| 139 | return res; |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 140 | } |
| 141 | |
Hannes Frederic Sowa | be99197 | 2013-03-22 08:24:37 +0000 | [diff] [blame] | 142 | /* RFC 3168 support : |
| 143 | * We want to check ECN values of all fragments, do detect invalid combinations. |
| 144 | * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value. |
| 145 | */ |
| 146 | #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */ |
| 147 | #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */ |
| 148 | #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */ |
| 149 | #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */ |
| 150 | |
| 151 | extern const u8 ip_frag_ecn_table[16]; |
| 152 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 153 | #endif |