blob: aab13ba1176729cca0e4f010c82a9b467353ce44 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Jiri Pirko77b99002015-05-12 14:56:21 +02002/*
3 * net/sched/cls_flower.c Flower classifier
4 *
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
Jiri Pirko77b99002015-05-12 14:56:21 +02006 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/rhashtable.h>
Daniel Borkmannd9363772016-11-27 01:18:01 +010012#include <linux/workqueue.h>
Vlad Buslov06177552019-03-21 15:17:35 +020013#include <linux/refcount.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020014
15#include <linux/if_ether.h>
16#include <linux/in6.h>
17#include <linux/ip.h>
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040018#include <linux/mpls.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020019
20#include <net/sch_generic.h>
21#include <net/pkt_cls.h>
22#include <net/ip.h>
23#include <net/flow_dissector.h>
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +020024#include <net/geneve.h>
Xin Longd8f9dfa2019-11-21 18:03:28 +080025#include <net/vxlan.h>
Xin Long79b10112019-11-21 18:03:29 +080026#include <net/erspan.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020027
Amir Vadaibc3103f2016-09-08 16:23:47 +030028#include <net/dst.h>
29#include <net/dst_metadata.h>
30
Paul Blakeye0ace682019-07-09 10:30:50 +030031#include <uapi/linux/netfilter/nf_conntrack_common.h>
32
wenxu1bcc51a2021-02-09 14:37:49 +080033#define TCA_FLOWER_KEY_CT_FLAGS_MAX \
34 ((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
35#define TCA_FLOWER_KEY_CT_FLAGS_MASK \
36 (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
37
Jiri Pirko77b99002015-05-12 14:56:21 +020038struct fl_flow_key {
Jiri Pirko8212ed72019-06-19 09:41:03 +030039 struct flow_dissector_key_meta meta;
Tom Herbert42aecaa2015-06-04 09:16:39 -070040 struct flow_dissector_key_control control;
Amir Vadaibc3103f2016-09-08 16:23:47 +030041 struct flow_dissector_key_control enc_control;
Jiri Pirko77b99002015-05-12 14:56:21 +020042 struct flow_dissector_key_basic basic;
43 struct flow_dissector_key_eth_addrs eth;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +030044 struct flow_dissector_key_vlan vlan;
Jianbo Liud64efd02018-07-06 05:38:16 +000045 struct flow_dissector_key_vlan cvlan;
Jiri Pirko77b99002015-05-12 14:56:21 +020046 union {
Tom Herbertc3f83242015-06-04 09:16:40 -070047 struct flow_dissector_key_ipv4_addrs ipv4;
Jiri Pirko77b99002015-05-12 14:56:21 +020048 struct flow_dissector_key_ipv6_addrs ipv6;
49 };
50 struct flow_dissector_key_ports tp;
Simon Horman7b684882016-12-07 13:48:28 +010051 struct flow_dissector_key_icmp icmp;
Simon Horman99d31322017-01-11 14:05:43 +010052 struct flow_dissector_key_arp arp;
Amir Vadaibc3103f2016-09-08 16:23:47 +030053 struct flow_dissector_key_keyid enc_key_id;
54 union {
55 struct flow_dissector_key_ipv4_addrs enc_ipv4;
56 struct flow_dissector_key_ipv6_addrs enc_ipv6;
57 };
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +020058 struct flow_dissector_key_ports enc_tp;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040059 struct flow_dissector_key_mpls mpls;
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +020060 struct flow_dissector_key_tcp tcp;
Or Gerlitz4d80cc02017-06-01 21:37:38 +030061 struct flow_dissector_key_ip ip;
Or Gerlitz0e2c17b2018-07-17 19:27:18 +030062 struct flow_dissector_key_ip enc_ip;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +020063 struct flow_dissector_key_enc_opts enc_opts;
Yoshiki Komachi8ffb0552019-12-03 19:40:12 +090064 union {
65 struct flow_dissector_key_ports tp;
66 struct {
67 struct flow_dissector_key_ports tp_min;
68 struct flow_dissector_key_ports tp_max;
69 };
70 } tp_range;
Paul Blakeye0ace682019-07-09 10:30:50 +030071 struct flow_dissector_key_ct ct;
Ariel Levkovich5923b8f2020-07-23 01:03:01 +030072 struct flow_dissector_key_hash hash;
Jiri Pirko77b99002015-05-12 14:56:21 +020073} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
74
75struct fl_flow_mask_range {
76 unsigned short int start;
77 unsigned short int end;
78};
79
80struct fl_flow_mask {
81 struct fl_flow_key key;
82 struct fl_flow_mask_range range;
Amritha Nambiar5c722992018-11-12 16:15:55 -080083 u32 flags;
Paul Blakey05cd2712018-04-30 14:28:30 +030084 struct rhash_head ht_node;
85 struct rhashtable ht;
86 struct rhashtable_params filter_ht_params;
87 struct flow_dissector dissector;
88 struct list_head filters;
Paolo Abeni44a5cd42018-06-21 20:02:16 +020089 struct rcu_work rwork;
Paul Blakey05cd2712018-04-30 14:28:30 +030090 struct list_head list;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +020091 refcount_t refcnt;
Jiri Pirko77b99002015-05-12 14:56:21 +020092};
93
Jiri Pirkob95ec7e2018-07-23 09:23:10 +020094struct fl_flow_tmplt {
95 struct fl_flow_key dummy_key;
96 struct fl_flow_key mask;
97 struct flow_dissector dissector;
98 struct tcf_chain *chain;
99};
100
Jiri Pirko77b99002015-05-12 14:56:21 +0200101struct cls_fl_head {
102 struct rhashtable ht;
Vlad Buslov259e60f2019-03-21 15:17:39 +0200103 spinlock_t masks_lock; /* Protect masks list */
Paul Blakey05cd2712018-04-30 14:28:30 +0300104 struct list_head masks;
Vlad Buslovc049d562019-04-24 09:53:31 +0300105 struct list_head hw_filters;
Cong Wangaaa908f2018-05-23 15:26:53 -0700106 struct rcu_work rwork;
Chris Mic15ab232017-08-30 02:31:58 -0400107 struct idr handle_idr;
Jiri Pirko77b99002015-05-12 14:56:21 +0200108};
109
110struct cls_fl_filter {
Paul Blakey05cd2712018-04-30 14:28:30 +0300111 struct fl_flow_mask *mask;
Jiri Pirko77b99002015-05-12 14:56:21 +0200112 struct rhash_head ht_node;
113 struct fl_flow_key mkey;
114 struct tcf_exts exts;
115 struct tcf_result res;
116 struct fl_flow_key key;
117 struct list_head list;
Vlad Buslovc049d562019-04-24 09:53:31 +0300118 struct list_head hw_list;
Jiri Pirko77b99002015-05-12 14:56:21 +0200119 u32 handle;
Amir Vadaie69985c2016-06-05 17:11:18 +0300120 u32 flags;
Vlad Buslov86c55362018-09-07 17:22:21 +0300121 u32 in_hw_count;
Cong Wangaaa908f2018-05-23 15:26:53 -0700122 struct rcu_work rwork;
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200123 struct net_device *hw_dev;
Vlad Buslov06177552019-03-21 15:17:35 +0200124 /* Flower classifier is unlocked, which means that its reference counter
125 * can be changed concurrently without any kind of external
126 * synchronization. Use atomic reference counter to be concurrency-safe.
127 */
128 refcount_t refcnt;
Vlad Buslovb2552b82019-03-21 15:17:36 +0200129 bool deleted;
Jiri Pirko77b99002015-05-12 14:56:21 +0200130};
131
Paul Blakey05cd2712018-04-30 14:28:30 +0300132static const struct rhashtable_params mask_ht_params = {
133 .key_offset = offsetof(struct fl_flow_mask, key),
134 .key_len = sizeof(struct fl_flow_key),
135 .head_offset = offsetof(struct fl_flow_mask, ht_node),
136 .automatic_shrinking = true,
137};
138
Jiri Pirko77b99002015-05-12 14:56:21 +0200139static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
140{
141 return mask->range.end - mask->range.start;
142}
143
144static void fl_mask_update_range(struct fl_flow_mask *mask)
145{
146 const u8 *bytes = (const u8 *) &mask->key;
147 size_t size = sizeof(mask->key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300148 size_t i, first = 0, last;
Jiri Pirko77b99002015-05-12 14:56:21 +0200149
Paul Blakey05cd2712018-04-30 14:28:30 +0300150 for (i = 0; i < size; i++) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200151 if (bytes[i]) {
Paul Blakey05cd2712018-04-30 14:28:30 +0300152 first = i;
153 break;
154 }
155 }
156 last = first;
157 for (i = size - 1; i != first; i--) {
158 if (bytes[i]) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200159 last = i;
Paul Blakey05cd2712018-04-30 14:28:30 +0300160 break;
Jiri Pirko77b99002015-05-12 14:56:21 +0200161 }
162 }
163 mask->range.start = rounddown(first, sizeof(long));
164 mask->range.end = roundup(last + 1, sizeof(long));
165}
166
167static void *fl_key_get_start(struct fl_flow_key *key,
168 const struct fl_flow_mask *mask)
169{
170 return (u8 *) key + mask->range.start;
171}
172
173static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
174 struct fl_flow_mask *mask)
175{
176 const long *lkey = fl_key_get_start(key, mask);
177 const long *lmask = fl_key_get_start(&mask->key, mask);
178 long *lmkey = fl_key_get_start(mkey, mask);
179 int i;
180
181 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
182 *lmkey++ = *lkey++ & *lmask++;
183}
184
Jiri Pirkob95ec7e2018-07-23 09:23:10 +0200185static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
186 struct fl_flow_mask *mask)
187{
188 const long *lmask = fl_key_get_start(&mask->key, mask);
189 const long *ltmplt;
190 int i;
191
192 if (!tmplt)
193 return true;
194 ltmplt = fl_key_get_start(&tmplt->mask, mask);
195 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
196 if (~*ltmplt++ & *lmask++)
197 return false;
198 }
199 return true;
200}
201
Jiri Pirko77b99002015-05-12 14:56:21 +0200202static void fl_clear_masked_range(struct fl_flow_key *key,
203 struct fl_flow_mask *mask)
204{
205 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
206}
207
Amritha Nambiar5c722992018-11-12 16:15:55 -0800208static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
209 struct fl_flow_key *key,
210 struct fl_flow_key *mkey)
211{
Vladimir Oltean6215afc2021-03-21 23:05:48 +0200212 u16 min_mask, max_mask, min_val, max_val;
Amritha Nambiar5c722992018-11-12 16:15:55 -0800213
Vladimir Oltean6215afc2021-03-21 23:05:48 +0200214 min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
215 max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
216 min_val = ntohs(filter->key.tp_range.tp_min.dst);
217 max_val = ntohs(filter->key.tp_range.tp_max.dst);
Amritha Nambiar5c722992018-11-12 16:15:55 -0800218
219 if (min_mask && max_mask) {
Vladimir Oltean6215afc2021-03-21 23:05:48 +0200220 if (ntohs(key->tp_range.tp.dst) < min_val ||
221 ntohs(key->tp_range.tp.dst) > max_val)
Amritha Nambiar5c722992018-11-12 16:15:55 -0800222 return false;
223
224 /* skb does not have min and max values */
Yoshiki Komachi8ffb0552019-12-03 19:40:12 +0900225 mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
226 mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
Amritha Nambiar5c722992018-11-12 16:15:55 -0800227 }
228 return true;
229}
230
231static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
232 struct fl_flow_key *key,
233 struct fl_flow_key *mkey)
234{
Vladimir Oltean6215afc2021-03-21 23:05:48 +0200235 u16 min_mask, max_mask, min_val, max_val;
Amritha Nambiar5c722992018-11-12 16:15:55 -0800236
Vladimir Oltean6215afc2021-03-21 23:05:48 +0200237 min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
238 max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
239 min_val = ntohs(filter->key.tp_range.tp_min.src);
240 max_val = ntohs(filter->key.tp_range.tp_max.src);
Amritha Nambiar5c722992018-11-12 16:15:55 -0800241
242 if (min_mask && max_mask) {
Vladimir Oltean6215afc2021-03-21 23:05:48 +0200243 if (ntohs(key->tp_range.tp.src) < min_val ||
244 ntohs(key->tp_range.tp.src) > max_val)
Amritha Nambiar5c722992018-11-12 16:15:55 -0800245 return false;
246
247 /* skb does not have min and max values */
Yoshiki Komachi8ffb0552019-12-03 19:40:12 +0900248 mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
249 mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
Amritha Nambiar5c722992018-11-12 16:15:55 -0800250 }
251 return true;
252}
253
254static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
255 struct fl_flow_key *mkey)
Paul Blakeya3308d82017-01-16 10:45:13 +0200256{
Paul Blakey05cd2712018-04-30 14:28:30 +0300257 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
258 mask->filter_ht_params);
Paul Blakeya3308d82017-01-16 10:45:13 +0200259}
260
Amritha Nambiar5c722992018-11-12 16:15:55 -0800261static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
262 struct fl_flow_key *mkey,
263 struct fl_flow_key *key)
264{
265 struct cls_fl_filter *filter, *f;
266
267 list_for_each_entry_rcu(filter, &mask->filters, list) {
268 if (!fl_range_port_dst_cmp(filter, key, mkey))
269 continue;
270
271 if (!fl_range_port_src_cmp(filter, key, mkey))
272 continue;
273
274 f = __fl_lookup(mask, mkey);
275 if (f)
276 return f;
277 }
278 return NULL;
279}
280
Arnd Bergmann0af413b2020-05-29 22:13:58 +0200281static noinline_for_stack
282struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
Amritha Nambiar5c722992018-11-12 16:15:55 -0800283{
Arnd Bergmann0af413b2020-05-29 22:13:58 +0200284 struct fl_flow_key mkey;
Amritha Nambiar5c722992018-11-12 16:15:55 -0800285
Arnd Bergmann0af413b2020-05-29 22:13:58 +0200286 fl_set_masked_key(&mkey, key, mask);
287 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
288 return fl_lookup_range(mask, &mkey, key);
289
290 return __fl_lookup(mask, &mkey);
Amritha Nambiar5c722992018-11-12 16:15:55 -0800291}
292
Paul Blakeye0ace682019-07-09 10:30:50 +0300293static u16 fl_ct_info_to_flower_map[] = {
294 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
295 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
296 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
297 TCA_FLOWER_KEY_CT_FLAGS_RELATED,
298 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
Paul Blakey8c85d182021-01-27 16:32:45 +0200299 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
300 TCA_FLOWER_KEY_CT_FLAGS_REPLY,
Paul Blakeye0ace682019-07-09 10:30:50 +0300301 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
Paul Blakey8c85d182021-01-27 16:32:45 +0200302 TCA_FLOWER_KEY_CT_FLAGS_RELATED |
303 TCA_FLOWER_KEY_CT_FLAGS_REPLY,
Paul Blakeye0ace682019-07-09 10:30:50 +0300304 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
305 TCA_FLOWER_KEY_CT_FLAGS_NEW,
306};
307
Jiri Pirko77b99002015-05-12 14:56:21 +0200308static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
309 struct tcf_result *res)
310{
311 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
wenxu7baf2422021-01-19 16:31:50 +0800312 bool post_ct = qdisc_skb_cb(skb)->post_ct;
Paul Blakeye0ace682019-07-09 10:30:50 +0300313 struct fl_flow_key skb_key;
314 struct fl_flow_mask *mask;
315 struct cls_fl_filter *f;
Jiri Pirko77b99002015-05-12 14:56:21 +0200316
Paul Blakey05cd2712018-04-30 14:28:30 +0300317 list_for_each_entry_rcu(mask, &head->masks, list) {
Jason Baron8a9093c2020-02-17 15:38:09 -0500318 flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
Paul Blakey05cd2712018-04-30 14:28:30 +0300319 fl_clear_masked_range(&skb_key, mask);
Amir Vadaie69985c2016-06-05 17:11:18 +0300320
Jiri Pirko8212ed72019-06-19 09:41:03 +0300321 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300322 /* skb_flow_dissect() does not set n_proto in case an unknown
323 * protocol, so do it rather here.
324 */
Toke Høiland-Jørgensend7bf2eb2020-07-03 22:26:43 +0200325 skb_key.basic.n_proto = skb_protocol(skb, false);
Paul Blakey05cd2712018-04-30 14:28:30 +0300326 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
Paul Blakeye0ace682019-07-09 10:30:50 +0300327 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
328 fl_ct_info_to_flower_map,
wenxu7baf2422021-01-19 16:31:50 +0800329 ARRAY_SIZE(fl_ct_info_to_flower_map),
330 post_ct);
Ariel Levkovich5923b8f2020-07-23 01:03:01 +0300331 skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
Yoshiki Komachi6de6e462021-10-29 09:21:41 +0000332 skb_flow_dissect(skb, &mask->dissector, &skb_key,
333 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
Amir Vadaibc3103f2016-09-08 16:23:47 +0300334
Arnd Bergmann0af413b2020-05-29 22:13:58 +0200335 f = fl_mask_lookup(mask, &skb_key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300336 if (f && !tc_skip_sw(f->flags)) {
337 *res = f->res;
338 return tcf_exts_exec(skb, &f->exts, res);
339 }
Jiri Pirko77b99002015-05-12 14:56:21 +0200340 }
341 return -1;
342}
343
344static int fl_init(struct tcf_proto *tp)
345{
346 struct cls_fl_head *head;
347
348 head = kzalloc(sizeof(*head), GFP_KERNEL);
349 if (!head)
350 return -ENOBUFS;
351
Vlad Buslov259e60f2019-03-21 15:17:39 +0200352 spin_lock_init(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +0300353 INIT_LIST_HEAD_RCU(&head->masks);
Vlad Buslovc049d562019-04-24 09:53:31 +0300354 INIT_LIST_HEAD(&head->hw_filters);
Jiri Pirko77b99002015-05-12 14:56:21 +0200355 rcu_assign_pointer(tp->root, head);
Chris Mic15ab232017-08-30 02:31:58 -0400356 idr_init(&head->handle_idr);
Jiri Pirko77b99002015-05-12 14:56:21 +0200357
Paul Blakey05cd2712018-04-30 14:28:30 +0300358 return rhashtable_init(&head->ht, &mask_ht_params);
359}
360
Vlad Buslov99815f52019-06-13 17:54:04 +0300361static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200362{
Vlad Buslov99815f52019-06-13 17:54:04 +0300363 /* temporary masks don't have their filters list and ht initialized */
364 if (mask_init_done) {
365 WARN_ON(!list_empty(&mask->filters));
366 rhashtable_destroy(&mask->ht);
367 }
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200368 kfree(mask);
369}
370
371static void fl_mask_free_work(struct work_struct *work)
372{
373 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
374 struct fl_flow_mask, rwork);
375
Vlad Buslov99815f52019-06-13 17:54:04 +0300376 fl_mask_free(mask, true);
377}
378
379static void fl_uninit_mask_free_work(struct work_struct *work)
380{
381 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
382 struct fl_flow_mask, rwork);
383
384 fl_mask_free(mask, false);
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200385}
386
Vlad Buslov99946772019-04-12 00:54:19 +0300387static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
Paul Blakey05cd2712018-04-30 14:28:30 +0300388{
Vlad Buslovf48ef4d2019-03-21 15:17:37 +0200389 if (!refcount_dec_and_test(&mask->refcnt))
Paul Blakey05cd2712018-04-30 14:28:30 +0300390 return false;
391
392 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
Vlad Buslov259e60f2019-03-21 15:17:39 +0200393
394 spin_lock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +0300395 list_del_rcu(&mask->list);
Vlad Buslov259e60f2019-03-21 15:17:39 +0200396 spin_unlock(&head->masks_lock);
397
Vlad Buslov99946772019-04-12 00:54:19 +0300398 tcf_queue_work(&mask->rwork, fl_mask_free_work);
Paul Blakey05cd2712018-04-30 14:28:30 +0300399
400 return true;
Jiri Pirko77b99002015-05-12 14:56:21 +0200401}
402
Vlad Buslovc049d562019-04-24 09:53:31 +0300403static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
404{
405 /* Flower classifier only changes root pointer during init and destroy.
406 * Users must obtain reference to tcf_proto instance before calling its
407 * API, so tp->root pointer is protected from concurrent call to
408 * fl_destroy() by reference counting.
409 */
410 return rcu_dereference_raw(tp->root);
411}
412
Cong Wang0dadc112017-11-06 13:47:24 -0800413static void __fl_destroy_filter(struct cls_fl_filter *f)
414{
415 tcf_exts_destroy(&f->exts);
416 tcf_exts_put_net(&f->exts);
417 kfree(f);
418}
419
Cong Wang0552c8a2017-10-26 18:24:33 -0700420static void fl_destroy_filter_work(struct work_struct *work)
421{
Cong Wangaaa908f2018-05-23 15:26:53 -0700422 struct cls_fl_filter *f = container_of(to_rcu_work(work),
423 struct cls_fl_filter, rwork);
Cong Wang0552c8a2017-10-26 18:24:33 -0700424
Cong Wang0dadc112017-11-06 13:47:24 -0800425 __fl_destroy_filter(f);
Cong Wang0552c8a2017-10-26 18:24:33 -0700426}
427
Jakub Kicinski1b0f8032018-01-24 12:54:21 -0800428static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200429 bool rtnl_held, struct netlink_ext_ack *extack)
Amir Vadai5b33f482016-03-08 12:42:29 +0200430{
Jiri Pirko208c0f42017-10-19 15:50:32 +0200431 struct tcf_block *block = tp->chain->block;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200432 struct flow_cls_offload cls_flower = {};
Amir Vadai5b33f482016-03-08 12:42:29 +0200433
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700434 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200435 cls_flower.command = FLOW_CLS_DESTROY;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200436 cls_flower.cookie = (unsigned long) f;
Amir Vadai5b33f482016-03-08 12:42:29 +0200437
Vlad Buslov40119212019-08-26 16:44:59 +0300438 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
Vlad Buslov918190f2019-08-26 16:45:06 +0300439 &f->flags, &f->in_hw_count, rtnl_held);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200440
Amir Vadai5b33f482016-03-08 12:42:29 +0200441}
442
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300443static int fl_hw_replace_filter(struct tcf_proto *tp,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200444 struct cls_fl_filter *f, bool rtnl_held,
Quentin Monnet41002032018-01-19 17:44:43 -0800445 struct netlink_ext_ack *extack)
Amir Vadai5b33f482016-03-08 12:42:29 +0200446{
Jiri Pirko208c0f42017-10-19 15:50:32 +0200447 struct tcf_block *block = tp->chain->block;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200448 struct flow_cls_offload cls_flower = {};
Jiri Pirko717503b2017-10-11 09:41:09 +0200449 bool skip_sw = tc_skip_sw(f->flags);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200450 int err = 0;
451
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +0100452 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
Vlad Buslov918190f2019-08-26 16:45:06 +0300453 if (!cls_flower.rule)
454 return -ENOMEM;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100455
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700456 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200457 cls_flower.command = FLOW_CLS_REPLACE;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200458 cls_flower.cookie = (unsigned long) f;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100459 cls_flower.rule->match.dissector = &f->mask->dissector;
460 cls_flower.rule->match.mask = &f->mask->key;
461 cls_flower.rule->match.key = &f->mkey;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700462 cls_flower.classid = f->res.classid;
Amir Vadai5b33f482016-03-08 12:42:29 +0200463
Vlad Buslovb15e7a62020-02-17 12:12:12 +0200464 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +0100465 if (err) {
466 kfree(cls_flower.rule);
Vlad Buslov918190f2019-08-26 16:45:06 +0300467 if (skip_sw) {
Vlad Buslov1f15bb42019-02-12 23:39:06 +0200468 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
Vlad Buslov918190f2019-08-26 16:45:06 +0300469 return err;
470 }
471 return 0;
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +0100472 }
473
Vlad Buslov40119212019-08-26 16:44:59 +0300474 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
Vlad Buslov918190f2019-08-26 16:45:06 +0300475 skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
Vlad Buslov5a6ff4b2019-08-26 16:45:04 +0300476 tc_cleanup_flow_action(&cls_flower.rule->action);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100477 kfree(cls_flower.rule);
478
Vlad Buslov40119212019-08-26 16:44:59 +0300479 if (err) {
Vlad Buslov918190f2019-08-26 16:45:06 +0300480 fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
481 return err;
Jiri Pirko717503b2017-10-11 09:41:09 +0200482 }
483
Vlad Buslov918190f2019-08-26 16:45:06 +0300484 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
485 return -EINVAL;
Jiri Pirko717503b2017-10-11 09:41:09 +0200486
Vlad Buslov918190f2019-08-26 16:45:06 +0300487 return 0;
Amir Vadai5b33f482016-03-08 12:42:29 +0200488}
489
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200490static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
491 bool rtnl_held)
Amir Vadai10cbc682016-05-13 12:55:37 +0000492{
Jiri Pirko208c0f42017-10-19 15:50:32 +0200493 struct tcf_block *block = tp->chain->block;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200494 struct flow_cls_offload cls_flower = {};
Amir Vadai10cbc682016-05-13 12:55:37 +0000495
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700496 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200497 cls_flower.command = FLOW_CLS_STATS;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200498 cls_flower.cookie = (unsigned long) f;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700499 cls_flower.classid = f->res.classid;
Amir Vadai10cbc682016-05-13 12:55:37 +0000500
Vlad Buslov918190f2019-08-26 16:45:06 +0300501 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
502 rtnl_held);
Pablo Neira Ayuso3b1903e2019-02-02 12:50:47 +0100503
504 tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
505 cls_flower.stats.pkts,
Po Liu4b61d3e2020-06-19 14:01:07 +0800506 cls_flower.stats.drops,
Jiri Pirko93a129e2020-03-28 16:37:43 +0100507 cls_flower.stats.lastused,
508 cls_flower.stats.used_hw_stats,
509 cls_flower.stats.used_hw_stats_valid);
Amir Vadai10cbc682016-05-13 12:55:37 +0000510}
511
Vlad Buslov06177552019-03-21 15:17:35 +0200512static void __fl_put(struct cls_fl_filter *f)
513{
514 if (!refcount_dec_and_test(&f->refcnt))
515 return;
516
517 if (tcf_exts_get_net(&f->exts))
518 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
519 else
520 __fl_destroy_filter(f);
521}
522
523static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
524{
525 struct cls_fl_filter *f;
526
527 rcu_read_lock();
528 f = idr_find(&head->handle_idr, handle);
529 if (f && !refcount_inc_not_zero(&f->refcnt))
530 f = NULL;
531 rcu_read_unlock();
532
533 return f;
534}
535
Vlad Buslovb2552b82019-03-21 15:17:36 +0200536static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200537 bool *last, bool rtnl_held,
538 struct netlink_ext_ack *extack)
Roi Dayan13fa8762016-11-01 16:08:29 +0200539{
Vlad Buslove4746192019-03-21 15:17:33 +0200540 struct cls_fl_head *head = fl_head_dereference(tp);
Chris Mic15ab232017-08-30 02:31:58 -0400541
Vlad Buslovb2552b82019-03-21 15:17:36 +0200542 *last = false;
543
Vlad Buslov3d81e712019-03-21 15:17:42 +0200544 spin_lock(&tp->lock);
545 if (f->deleted) {
546 spin_unlock(&tp->lock);
Vlad Buslovb2552b82019-03-21 15:17:36 +0200547 return -ENOENT;
Vlad Buslov3d81e712019-03-21 15:17:42 +0200548 }
Vlad Buslovb2552b82019-03-21 15:17:36 +0200549
550 f->deleted = true;
551 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
552 f->mask->filter_ht_params);
Matthew Wilcox9c160942017-11-28 09:48:43 -0500553 idr_remove(&head->handle_idr, f->handle);
Roi Dayan13fa8762016-11-01 16:08:29 +0200554 list_del_rcu(&f->list);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200555 spin_unlock(&tp->lock);
556
Vlad Buslov99946772019-04-12 00:54:19 +0300557 *last = fl_mask_put(head, f->mask);
Hadar Hen Zion79685212016-12-01 14:06:34 +0200558 if (!tc_skip_hw(f->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200559 fl_hw_destroy_filter(tp, f, rtnl_held, extack);
Roi Dayan13fa8762016-11-01 16:08:29 +0200560 tcf_unbind_filter(tp, &f->res);
Vlad Buslov06177552019-03-21 15:17:35 +0200561 __fl_put(f);
Paul Blakey05cd2712018-04-30 14:28:30 +0300562
Vlad Buslovb2552b82019-03-21 15:17:36 +0200563 return 0;
Roi Dayan13fa8762016-11-01 16:08:29 +0200564}
565
Daniel Borkmannd9363772016-11-27 01:18:01 +0100566static void fl_destroy_sleepable(struct work_struct *work)
567{
Cong Wangaaa908f2018-05-23 15:26:53 -0700568 struct cls_fl_head *head = container_of(to_rcu_work(work),
569 struct cls_fl_head,
570 rwork);
Paul Blakeyde9dc652018-06-03 10:06:13 +0300571
572 rhashtable_destroy(&head->ht);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100573 kfree(head);
574 module_put(THIS_MODULE);
575}
576
Vlad Buslov12db03b2019-02-11 10:55:45 +0200577static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
578 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +0200579{
Vlad Buslove4746192019-03-21 15:17:33 +0200580 struct cls_fl_head *head = fl_head_dereference(tp);
Paul Blakey05cd2712018-04-30 14:28:30 +0300581 struct fl_flow_mask *mask, *next_mask;
Jiri Pirko77b99002015-05-12 14:56:21 +0200582 struct cls_fl_filter *f, *next;
Vlad Buslovb2552b82019-03-21 15:17:36 +0200583 bool last;
Jiri Pirko77b99002015-05-12 14:56:21 +0200584
Paul Blakey05cd2712018-04-30 14:28:30 +0300585 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
586 list_for_each_entry_safe(f, next, &mask->filters, list) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200587 __fl_delete(tp, f, &last, rtnl_held, extack);
Vlad Buslovb2552b82019-03-21 15:17:36 +0200588 if (last)
Paul Blakey05cd2712018-04-30 14:28:30 +0300589 break;
590 }
591 }
Chris Mic15ab232017-08-30 02:31:58 -0400592 idr_destroy(&head->handle_idr);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100593
594 __module_get(THIS_MODULE);
Cong Wangaaa908f2018-05-23 15:26:53 -0700595 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
Jiri Pirko77b99002015-05-12 14:56:21 +0200596}
597
Vlad Buslov06177552019-03-21 15:17:35 +0200598static void fl_put(struct tcf_proto *tp, void *arg)
599{
600 struct cls_fl_filter *f = arg;
601
602 __fl_put(f);
603}
604
WANG Cong8113c092017-08-04 21:31:43 -0700605static void *fl_get(struct tcf_proto *tp, u32 handle)
Jiri Pirko77b99002015-05-12 14:56:21 +0200606{
Vlad Buslove4746192019-03-21 15:17:33 +0200607 struct cls_fl_head *head = fl_head_dereference(tp);
Jiri Pirko77b99002015-05-12 14:56:21 +0200608
Vlad Buslov06177552019-03-21 15:17:35 +0200609 return __fl_get(head, handle);
Jiri Pirko77b99002015-05-12 14:56:21 +0200610}
611
612static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
613 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
614 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
615 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
616 .len = IFNAMSIZ },
617 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
618 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
619 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
620 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
621 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
622 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
623 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
624 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
625 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
626 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
627 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
628 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
629 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
630 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
631 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
632 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
Jamal Hadi Salimb175c3a2015-06-25 06:55:27 -0400633 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
634 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300635 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
636 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
637 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
Amir Vadaibc3103f2016-09-08 16:23:47 +0300638 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
639 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
640 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
641 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
642 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
643 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
644 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
645 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
646 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
Or Gerlitzaa72d702016-09-15 15:28:22 +0300647 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
648 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
649 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
650 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
Simon Horman5976c5f2016-11-03 13:24:21 +0100651 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
652 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
653 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
654 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200655 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
656 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
657 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
658 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200659 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
660 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
Simon Horman7b684882016-12-07 13:48:28 +0100661 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
662 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
663 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
664 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
665 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
666 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
667 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
668 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
Simon Horman99d31322017-01-11 14:05:43 +0100669 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
670 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
671 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
672 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
673 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
674 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
675 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
676 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
677 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
678 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400679 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
680 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
681 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
682 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
Guillaume Nault61aec252020-05-26 14:29:04 +0200683 [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED },
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +0200684 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
685 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300686 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
687 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
688 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
689 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
Jianbo Liud64efd02018-07-06 05:38:16 +0000690 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
691 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
692 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300693 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
694 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
695 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
696 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200697 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
698 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
wenxu1bcc51a2021-02-09 14:37:49 +0800699 [TCA_FLOWER_KEY_CT_STATE] =
700 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
701 [TCA_FLOWER_KEY_CT_STATE_MASK] =
702 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
Paul Blakeye0ace682019-07-09 10:30:50 +0300703 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 },
704 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 },
705 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 },
706 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 },
707 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY,
708 .len = 128 / BITS_PER_BYTE },
709 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
710 .len = 128 / BITS_PER_BYTE },
Davide Carattie2debf02020-02-11 19:33:40 +0100711 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
Ariel Levkovich5923b8f2020-07-23 01:03:01 +0300712 [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 },
713 [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 },
714
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200715};
716
717static const struct nla_policy
718enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
Xin Longd8f9dfa2019-11-21 18:03:28 +0800719 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = {
720 .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200721 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
Xin Longd8f9dfa2019-11-21 18:03:28 +0800722 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
Xin Long79b10112019-11-21 18:03:29 +0800723 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200724};
725
726static const struct nla_policy
727geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
728 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
729 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
730 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
731 .len = 128 },
Jiri Pirko77b99002015-05-12 14:56:21 +0200732};
733
Xin Longd8f9dfa2019-11-21 18:03:28 +0800734static const struct nla_policy
735vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
736 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
737};
738
Xin Long79b10112019-11-21 18:03:29 +0800739static const struct nla_policy
740erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
741 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
742 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
743 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
744 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
745};
746
Guillaume Nault61aec252020-05-26 14:29:04 +0200747static const struct nla_policy
Guillaume Nault61aec252020-05-26 14:29:04 +0200748mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
749 [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 },
750 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 },
751 [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 },
752 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 },
753 [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 },
754};
755
Jiri Pirko77b99002015-05-12 14:56:21 +0200756static void fl_set_key_val(struct nlattr **tb,
757 void *val, int val_type,
758 void *mask, int mask_type, int len)
759{
760 if (!tb[val_type])
761 return;
Paul Blakeye0ace682019-07-09 10:30:50 +0300762 nla_memcpy(val, tb[val_type], len);
Jiri Pirko77b99002015-05-12 14:56:21 +0200763 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
764 memset(mask, 0xff, len);
765 else
Paul Blakeye0ace682019-07-09 10:30:50 +0300766 nla_memcpy(mask, tb[mask_type], len);
Jiri Pirko77b99002015-05-12 14:56:21 +0200767}
768
Amritha Nambiar5c722992018-11-12 16:15:55 -0800769static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
Guillaume Naultbd7d4c12020-03-23 21:48:51 +0100770 struct fl_flow_key *mask,
771 struct netlink_ext_ack *extack)
Amritha Nambiar5c722992018-11-12 16:15:55 -0800772{
Yoshiki Komachi8ffb0552019-12-03 19:40:12 +0900773 fl_set_key_val(tb, &key->tp_range.tp_min.dst,
774 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
775 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
776 fl_set_key_val(tb, &key->tp_range.tp_max.dst,
777 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
778 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
779 fl_set_key_val(tb, &key->tp_range.tp_min.src,
780 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
781 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
782 fl_set_key_val(tb, &key->tp_range.tp_max.src,
783 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
784 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
Amritha Nambiar5c722992018-11-12 16:15:55 -0800785
Guillaume Naultbd7d4c12020-03-23 21:48:51 +0100786 if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
Vladimir Oltean6215afc2021-03-21 23:05:48 +0200787 ntohs(key->tp_range.tp_max.dst) <=
788 ntohs(key->tp_range.tp_min.dst)) {
Guillaume Naultbd7d4c12020-03-23 21:48:51 +0100789 NL_SET_ERR_MSG_ATTR(extack,
790 tb[TCA_FLOWER_KEY_PORT_DST_MIN],
791 "Invalid destination port range (min must be strictly smaller than max)");
Amritha Nambiar5c722992018-11-12 16:15:55 -0800792 return -EINVAL;
Guillaume Naultbd7d4c12020-03-23 21:48:51 +0100793 }
794 if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
Vladimir Oltean6215afc2021-03-21 23:05:48 +0200795 ntohs(key->tp_range.tp_max.src) <=
796 ntohs(key->tp_range.tp_min.src)) {
Guillaume Naultbd7d4c12020-03-23 21:48:51 +0100797 NL_SET_ERR_MSG_ATTR(extack,
798 tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
799 "Invalid source port range (min must be strictly smaller than max)");
800 return -EINVAL;
801 }
Amritha Nambiar5c722992018-11-12 16:15:55 -0800802
803 return 0;
804}
805
Guillaume Nault61aec252020-05-26 14:29:04 +0200806static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
807 struct flow_dissector_key_mpls *key_val,
808 struct flow_dissector_key_mpls *key_mask,
809 struct netlink_ext_ack *extack)
810{
811 struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
812 struct flow_dissector_mpls_lse *lse_mask;
813 struct flow_dissector_mpls_lse *lse_val;
814 u8 lse_index;
815 u8 depth;
816 int err;
817
818 err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
819 mpls_stack_entry_policy, extack);
820 if (err < 0)
821 return err;
822
823 if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
824 NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
825 return -EINVAL;
826 }
827
828 depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
829
830 /* LSE depth starts at 1, for consistency with terminology used by
831 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
832 */
833 if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
834 NL_SET_ERR_MSG_ATTR(extack,
835 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
836 "Invalid MPLS depth");
837 return -EINVAL;
838 }
839 lse_index = depth - 1;
840
841 dissector_set_mpls_lse(key_val, lse_index);
842 dissector_set_mpls_lse(key_mask, lse_index);
843
844 lse_val = &key_val->ls[lse_index];
845 lse_mask = &key_mask->ls[lse_index];
846
847 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
848 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
849 lse_mask->mpls_ttl = MPLS_TTL_MASK;
850 }
851 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
852 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
853
854 if (bos & ~MPLS_BOS_MASK) {
855 NL_SET_ERR_MSG_ATTR(extack,
856 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
857 "Bottom Of Stack (BOS) must be 0 or 1");
858 return -EINVAL;
859 }
860 lse_val->mpls_bos = bos;
861 lse_mask->mpls_bos = MPLS_BOS_MASK;
862 }
863 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
864 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
865
866 if (tc & ~MPLS_TC_MASK) {
867 NL_SET_ERR_MSG_ATTR(extack,
868 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
869 "Traffic Class (TC) must be between 0 and 7");
870 return -EINVAL;
871 }
872 lse_val->mpls_tc = tc;
873 lse_mask->mpls_tc = MPLS_TC_MASK;
874 }
875 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
876 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
877
878 if (label & ~MPLS_LABEL_MASK) {
879 NL_SET_ERR_MSG_ATTR(extack,
880 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
881 "Label must be between 0 and 1048575");
882 return -EINVAL;
883 }
884 lse_val->mpls_label = label;
885 lse_mask->mpls_label = MPLS_LABEL_MASK;
886 }
887
888 return 0;
889}
890
891static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
892 struct flow_dissector_key_mpls *key_val,
893 struct flow_dissector_key_mpls *key_mask,
894 struct netlink_ext_ack *extack)
895{
896 struct nlattr *nla_lse;
897 int rem;
898 int err;
899
900 if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
901 NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
902 "NLA_F_NESTED is missing");
903 return -EINVAL;
904 }
905
906 nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
907 if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
908 NL_SET_ERR_MSG_ATTR(extack, nla_lse,
909 "Invalid MPLS option type");
910 return -EINVAL;
911 }
912
913 err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
914 if (err < 0)
915 return err;
916 }
917 if (rem) {
918 NL_SET_ERR_MSG(extack,
919 "Bytes leftover after parsing MPLS options");
920 return -EINVAL;
921 }
922
923 return 0;
924}
925
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400926static int fl_set_key_mpls(struct nlattr **tb,
927 struct flow_dissector_key_mpls *key_val,
Guillaume Nault442f7302020-03-23 21:48:49 +0100928 struct flow_dissector_key_mpls *key_mask,
929 struct netlink_ext_ack *extack)
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400930{
Guillaume Nault58cff782020-05-26 14:29:00 +0200931 struct flow_dissector_mpls_lse *lse_mask;
932 struct flow_dissector_mpls_lse *lse_val;
933
Guillaume Nault61aec252020-05-26 14:29:04 +0200934 if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
935 if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
936 tb[TCA_FLOWER_KEY_MPLS_BOS] ||
937 tb[TCA_FLOWER_KEY_MPLS_TC] ||
938 tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
939 NL_SET_ERR_MSG_ATTR(extack,
940 tb[TCA_FLOWER_KEY_MPLS_OPTS],
941 "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
942 return -EBADMSG;
943 }
944
945 return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
946 key_val, key_mask, extack);
947 }
948
Guillaume Nault58cff782020-05-26 14:29:00 +0200949 lse_val = &key_val->ls[0];
950 lse_mask = &key_mask->ls[0];
951
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400952 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
Guillaume Nault58cff782020-05-26 14:29:00 +0200953 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
954 lse_mask->mpls_ttl = MPLS_TTL_MASK;
955 dissector_set_mpls_lse(key_val, 0);
956 dissector_set_mpls_lse(key_mask, 0);
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400957 }
958 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400959 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
960
Guillaume Nault442f7302020-03-23 21:48:49 +0100961 if (bos & ~MPLS_BOS_MASK) {
962 NL_SET_ERR_MSG_ATTR(extack,
963 tb[TCA_FLOWER_KEY_MPLS_BOS],
964 "Bottom Of Stack (BOS) must be 0 or 1");
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400965 return -EINVAL;
Guillaume Nault442f7302020-03-23 21:48:49 +0100966 }
Guillaume Nault58cff782020-05-26 14:29:00 +0200967 lse_val->mpls_bos = bos;
968 lse_mask->mpls_bos = MPLS_BOS_MASK;
969 dissector_set_mpls_lse(key_val, 0);
970 dissector_set_mpls_lse(key_mask, 0);
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400971 }
972 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400973 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
974
Guillaume Nault442f7302020-03-23 21:48:49 +0100975 if (tc & ~MPLS_TC_MASK) {
976 NL_SET_ERR_MSG_ATTR(extack,
977 tb[TCA_FLOWER_KEY_MPLS_TC],
978 "Traffic Class (TC) must be between 0 and 7");
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400979 return -EINVAL;
Guillaume Nault442f7302020-03-23 21:48:49 +0100980 }
Guillaume Nault58cff782020-05-26 14:29:00 +0200981 lse_val->mpls_tc = tc;
982 lse_mask->mpls_tc = MPLS_TC_MASK;
983 dissector_set_mpls_lse(key_val, 0);
984 dissector_set_mpls_lse(key_mask, 0);
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400985 }
986 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400987 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
988
Guillaume Nault442f7302020-03-23 21:48:49 +0100989 if (label & ~MPLS_LABEL_MASK) {
990 NL_SET_ERR_MSG_ATTR(extack,
991 tb[TCA_FLOWER_KEY_MPLS_LABEL],
992 "Label must be between 0 and 1048575");
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400993 return -EINVAL;
Guillaume Nault442f7302020-03-23 21:48:49 +0100994 }
Guillaume Nault58cff782020-05-26 14:29:00 +0200995 lse_val->mpls_label = label;
996 lse_mask->mpls_label = MPLS_LABEL_MASK;
997 dissector_set_mpls_lse(key_val, 0);
998 dissector_set_mpls_lse(key_mask, 0);
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400999 }
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -04001000 return 0;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001001}
1002
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001003static void fl_set_key_vlan(struct nlattr **tb,
Jianbo Liuaaab0832018-07-06 05:38:13 +00001004 __be16 ethertype,
Jianbo Liud64efd02018-07-06 05:38:16 +00001005 int vlan_id_key, int vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001006 struct flow_dissector_key_vlan *key_val,
1007 struct flow_dissector_key_vlan *key_mask)
1008{
1009#define VLAN_PRIORITY_MASK 0x7
1010
Jianbo Liud64efd02018-07-06 05:38:16 +00001011 if (tb[vlan_id_key]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001012 key_val->vlan_id =
Jianbo Liud64efd02018-07-06 05:38:16 +00001013 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001014 key_mask->vlan_id = VLAN_VID_MASK;
1015 }
Jianbo Liud64efd02018-07-06 05:38:16 +00001016 if (tb[vlan_prio_key]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001017 key_val->vlan_priority =
Jianbo Liud64efd02018-07-06 05:38:16 +00001018 nla_get_u8(tb[vlan_prio_key]) &
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001019 VLAN_PRIORITY_MASK;
1020 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1021 }
Jianbo Liuaaab0832018-07-06 05:38:13 +00001022 key_val->vlan_tpid = ethertype;
1023 key_mask->vlan_tpid = cpu_to_be16(~0);
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001024}
1025
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001026static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1027 u32 *dissector_key, u32 *dissector_mask,
1028 u32 flower_flag_bit, u32 dissector_flag_bit)
1029{
1030 if (flower_mask & flower_flag_bit) {
1031 *dissector_mask |= dissector_flag_bit;
1032 if (flower_key & flower_flag_bit)
1033 *dissector_key |= dissector_flag_bit;
1034 }
1035}
1036
Guillaume Naulte304e212020-03-23 21:48:53 +01001037static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1038 u32 *flags_mask, struct netlink_ext_ack *extack)
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001039{
1040 u32 key, mask;
1041
Or Gerlitzd9724772016-12-22 14:28:15 +02001042 /* mask is mandatory for flags */
Guillaume Naulte304e212020-03-23 21:48:53 +01001043 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1044 NL_SET_ERR_MSG(extack, "Missing flags mask");
Or Gerlitzd9724772016-12-22 14:28:15 +02001045 return -EINVAL;
Guillaume Naulte304e212020-03-23 21:48:53 +01001046 }
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001047
Vladimir Olteanabee13f2021-03-21 23:05:49 +02001048 key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
1049 mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001050
1051 *flags_key = 0;
1052 *flags_mask = 0;
1053
1054 fl_set_key_flag(key, mask, flags_key, flags_mask,
1055 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Pieter Jansen van Vuuren459d1532018-03-06 18:11:14 +01001056 fl_set_key_flag(key, mask, flags_key, flags_mask,
1057 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1058 FLOW_DIS_FIRST_FRAG);
Or Gerlitzd9724772016-12-22 14:28:15 +02001059
1060 return 0;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001061}
1062
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001063static void fl_set_key_ip(struct nlattr **tb, bool encap,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001064 struct flow_dissector_key_ip *key,
1065 struct flow_dissector_key_ip *mask)
1066{
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001067 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1068 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1069 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1070 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001071
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001072 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1073 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001074}
1075
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001076static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1077 int depth, int option_len,
1078 struct netlink_ext_ack *extack)
1079{
1080 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1081 struct nlattr *class = NULL, *type = NULL, *data = NULL;
1082 struct geneve_opt *opt;
1083 int err, data_len = 0;
1084
1085 if (option_len > sizeof(struct geneve_opt))
1086 data_len = option_len - sizeof(struct geneve_opt);
1087
1088 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1089 memset(opt, 0xff, option_len);
1090 opt->length = data_len / 4;
1091 opt->r1 = 0;
1092 opt->r2 = 0;
1093 opt->r3 = 0;
1094
1095 /* If no mask has been prodived we assume an exact match. */
1096 if (!depth)
1097 return sizeof(struct geneve_opt) + data_len;
1098
1099 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1100 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1101 return -EINVAL;
1102 }
1103
Johannes Berg8cb08172019-04-26 14:07:28 +02001104 err = nla_parse_nested_deprecated(tb,
1105 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1106 nla, geneve_opt_policy, extack);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001107 if (err < 0)
1108 return err;
1109
1110 /* We are not allowed to omit any of CLASS, TYPE or DATA
1111 * fields from the key.
1112 */
1113 if (!option_len &&
1114 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1115 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1116 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1117 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1118 return -EINVAL;
1119 }
1120
1121 /* Omitting any of CLASS, TYPE or DATA fields is allowed
1122 * for the mask.
1123 */
1124 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1125 int new_len = key->enc_opts.len;
1126
1127 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1128 data_len = nla_len(data);
1129 if (data_len < 4) {
1130 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1131 return -ERANGE;
1132 }
1133 if (data_len % 4) {
1134 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1135 return -ERANGE;
1136 }
1137
1138 new_len += sizeof(struct geneve_opt) + data_len;
1139 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1140 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1141 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1142 return -ERANGE;
1143 }
1144 opt->length = data_len / 4;
1145 memcpy(opt->opt_data, nla_data(data), data_len);
1146 }
1147
1148 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1149 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1150 opt->opt_class = nla_get_be16(class);
1151 }
1152
1153 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1154 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1155 opt->type = nla_get_u8(type);
1156 }
1157
1158 return sizeof(struct geneve_opt) + data_len;
1159}
1160
Xin Longd8f9dfa2019-11-21 18:03:28 +08001161static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1162 int depth, int option_len,
1163 struct netlink_ext_ack *extack)
1164{
1165 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1166 struct vxlan_metadata *md;
1167 int err;
1168
1169 md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1170 memset(md, 0xff, sizeof(*md));
1171
1172 if (!depth)
1173 return sizeof(*md);
1174
1175 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1176 NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1177 return -EINVAL;
1178 }
1179
1180 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1181 vxlan_opt_policy, extack);
1182 if (err < 0)
1183 return err;
1184
1185 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1186 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1187 return -EINVAL;
1188 }
1189
Xin Long13e6ce92020-09-13 19:51:50 +08001190 if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
Xin Longd8f9dfa2019-11-21 18:03:28 +08001191 md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
Xin Long13e6ce92020-09-13 19:51:50 +08001192 md->gbp &= VXLAN_GBP_MASK;
1193 }
Xin Longd8f9dfa2019-11-21 18:03:28 +08001194
1195 return sizeof(*md);
1196}
1197
Xin Long79b10112019-11-21 18:03:29 +08001198static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1199 int depth, int option_len,
1200 struct netlink_ext_ack *extack)
1201{
1202 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1203 struct erspan_metadata *md;
1204 int err;
1205
1206 md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1207 memset(md, 0xff, sizeof(*md));
1208 md->version = 1;
1209
1210 if (!depth)
1211 return sizeof(*md);
1212
1213 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1214 NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1215 return -EINVAL;
1216 }
1217
1218 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1219 erspan_opt_policy, extack);
1220 if (err < 0)
1221 return err;
1222
1223 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1224 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1225 return -EINVAL;
1226 }
1227
1228 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1229 md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1230
1231 if (md->version == 1) {
1232 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1233 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1234 return -EINVAL;
1235 }
1236 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1237 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
Xin Long8e1b3ac2020-09-13 19:43:03 +08001238 memset(&md->u, 0x00, sizeof(md->u));
Xin Long79b10112019-11-21 18:03:29 +08001239 md->u.index = nla_get_be32(nla);
1240 }
1241 } else if (md->version == 2) {
1242 if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1243 !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1244 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1245 return -EINVAL;
1246 }
1247 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1248 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1249 md->u.md2.dir = nla_get_u8(nla);
1250 }
1251 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1252 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1253 set_hwid(&md->u.md2, nla_get_u8(nla));
1254 }
1255 } else {
1256 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1257 return -EINVAL;
1258 }
1259
1260 return sizeof(*md);
1261}
1262
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001263static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1264 struct fl_flow_key *mask,
1265 struct netlink_ext_ack *extack)
1266{
1267 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
Jakub Kicinski63c82992018-11-09 21:06:26 -08001268 int err, option_len, key_depth, msk_depth = 0;
1269
Johannes Berg8cb08172019-04-26 14:07:28 +02001270 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1271 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1272 enc_opts_policy, extack);
Jakub Kicinski63c82992018-11-09 21:06:26 -08001273 if (err)
1274 return err;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001275
1276 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1277
1278 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
Johannes Berg8cb08172019-04-26 14:07:28 +02001279 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1280 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1281 enc_opts_policy, extack);
Jakub Kicinski63c82992018-11-09 21:06:26 -08001282 if (err)
1283 return err;
1284
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001285 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1286 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
Cong Wangc96adff2021-01-15 10:50:24 -08001287 if (!nla_ok(nla_opt_msk, msk_depth)) {
1288 NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1289 return -EINVAL;
1290 }
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001291 }
1292
1293 nla_for_each_attr(nla_opt_key, nla_enc_key,
1294 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1295 switch (nla_type(nla_opt_key)) {
1296 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
Xin Longd8f9dfa2019-11-21 18:03:28 +08001297 if (key->enc_opts.dst_opt_type &&
1298 key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1299 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1300 return -EINVAL;
1301 }
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001302 option_len = 0;
1303 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1304 option_len = fl_set_geneve_opt(nla_opt_key, key,
1305 key_depth, option_len,
1306 extack);
1307 if (option_len < 0)
1308 return option_len;
1309
1310 key->enc_opts.len += option_len;
1311 /* At the same time we need to parse through the mask
1312 * in order to verify exact and mask attribute lengths.
1313 */
1314 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1315 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1316 msk_depth, option_len,
1317 extack);
1318 if (option_len < 0)
1319 return option_len;
1320
1321 mask->enc_opts.len += option_len;
1322 if (key->enc_opts.len != mask->enc_opts.len) {
1323 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1324 return -EINVAL;
1325 }
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001326 break;
Xin Longd8f9dfa2019-11-21 18:03:28 +08001327 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1328 if (key->enc_opts.dst_opt_type) {
1329 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1330 return -EINVAL;
1331 }
1332 option_len = 0;
1333 key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1334 option_len = fl_set_vxlan_opt(nla_opt_key, key,
1335 key_depth, option_len,
1336 extack);
1337 if (option_len < 0)
1338 return option_len;
1339
1340 key->enc_opts.len += option_len;
1341 /* At the same time we need to parse through the mask
1342 * in order to verify exact and mask attribute lengths.
1343 */
1344 mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1345 option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1346 msk_depth, option_len,
1347 extack);
1348 if (option_len < 0)
1349 return option_len;
1350
1351 mask->enc_opts.len += option_len;
1352 if (key->enc_opts.len != mask->enc_opts.len) {
1353 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1354 return -EINVAL;
1355 }
Xin Longd8f9dfa2019-11-21 18:03:28 +08001356 break;
Xin Long79b10112019-11-21 18:03:29 +08001357 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1358 if (key->enc_opts.dst_opt_type) {
1359 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1360 return -EINVAL;
1361 }
1362 option_len = 0;
1363 key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1364 option_len = fl_set_erspan_opt(nla_opt_key, key,
1365 key_depth, option_len,
1366 extack);
1367 if (option_len < 0)
1368 return option_len;
1369
1370 key->enc_opts.len += option_len;
1371 /* At the same time we need to parse through the mask
1372 * in order to verify exact and mask attribute lengths.
1373 */
1374 mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1375 option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1376 msk_depth, option_len,
1377 extack);
1378 if (option_len < 0)
1379 return option_len;
1380
1381 mask->enc_opts.len += option_len;
1382 if (key->enc_opts.len != mask->enc_opts.len) {
1383 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1384 return -EINVAL;
1385 }
Xin Long79b10112019-11-21 18:03:29 +08001386 break;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001387 default:
1388 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1389 return -EINVAL;
1390 }
Cong Wangc96adff2021-01-15 10:50:24 -08001391
1392 if (!msk_depth)
1393 continue;
1394
1395 if (!nla_ok(nla_opt_msk, msk_depth)) {
1396 NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1397 return -EINVAL;
1398 }
1399 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001400 }
1401
1402 return 0;
1403}
1404
wenxu1bcc51a2021-02-09 14:37:49 +08001405static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1406 struct netlink_ext_ack *extack)
1407{
1408 if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1409 NL_SET_ERR_MSG_ATTR(extack, tb,
1410 "no trk, so no other flag can be set");
1411 return -EINVAL;
1412 }
1413
1414 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1415 state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1416 NL_SET_ERR_MSG_ATTR(extack, tb,
1417 "new and est are mutually exclusive");
1418 return -EINVAL;
1419 }
1420
wenxu3aed8b62021-02-23 15:11:55 +08001421 if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1422 state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1423 TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1424 NL_SET_ERR_MSG_ATTR(extack, tb,
1425 "when inv is set, only trk may be set");
1426 return -EINVAL;
1427 }
1428
1429 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1430 state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1431 NL_SET_ERR_MSG_ATTR(extack, tb,
1432 "new and rpl are mutually exclusive");
1433 return -EINVAL;
1434 }
1435
wenxu1bcc51a2021-02-09 14:37:49 +08001436 return 0;
1437}
1438
Paul Blakeye0ace682019-07-09 10:30:50 +03001439static int fl_set_key_ct(struct nlattr **tb,
1440 struct flow_dissector_key_ct *key,
1441 struct flow_dissector_key_ct *mask,
1442 struct netlink_ext_ack *extack)
1443{
1444 if (tb[TCA_FLOWER_KEY_CT_STATE]) {
wenxu1bcc51a2021-02-09 14:37:49 +08001445 int err;
1446
Paul Blakeye0ace682019-07-09 10:30:50 +03001447 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1448 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1449 return -EOPNOTSUPP;
1450 }
1451 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1452 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1453 sizeof(key->ct_state));
wenxu1bcc51a2021-02-09 14:37:49 +08001454
wenxuafa536d2021-03-17 12:02:43 +08001455 err = fl_validate_ct_state(key->ct_state & mask->ct_state,
wenxu1bcc51a2021-02-09 14:37:49 +08001456 tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1457 extack);
1458 if (err)
1459 return err;
1460
Paul Blakeye0ace682019-07-09 10:30:50 +03001461 }
1462 if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1463 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1464 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1465 return -EOPNOTSUPP;
1466 }
1467 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1468 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1469 sizeof(key->ct_zone));
1470 }
1471 if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1472 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1473 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1474 return -EOPNOTSUPP;
1475 }
1476 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1477 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1478 sizeof(key->ct_mark));
1479 }
1480 if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1481 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1482 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1483 return -EOPNOTSUPP;
1484 }
1485 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1486 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1487 sizeof(key->ct_labels));
1488 }
1489
1490 return 0;
1491}
1492
Jiri Pirko77b99002015-05-12 14:56:21 +02001493static int fl_set_key(struct net *net, struct nlattr **tb,
Alexander Aring1057c552018-01-18 11:20:54 -05001494 struct fl_flow_key *key, struct fl_flow_key *mask,
1495 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001496{
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001497 __be16 ethertype;
Or Gerlitzd9724772016-12-22 14:28:15 +02001498 int ret = 0;
Jiri Pirkoa5148622019-06-15 11:03:49 +02001499
Jiri Pirko77b99002015-05-12 14:56:21 +02001500 if (tb[TCA_FLOWER_INDEV]) {
Alexander Aring1057c552018-01-18 11:20:54 -05001501 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001502 if (err < 0)
1503 return err;
Jiri Pirko8212ed72019-06-19 09:41:03 +03001504 key->meta.ingress_ifindex = err;
1505 mask->meta.ingress_ifindex = 0xffffffff;
Jiri Pirko77b99002015-05-12 14:56:21 +02001506 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001507
1508 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1509 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1510 sizeof(key->eth.dst));
1511 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1512 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1513 sizeof(key->eth.src));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001514
Arnd Bergmann0b498a52016-08-26 17:25:45 +02001515 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001516 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1517
Jianbo Liuaaab0832018-07-06 05:38:13 +00001518 if (eth_type_vlan(ethertype)) {
Jianbo Liud64efd02018-07-06 05:38:16 +00001519 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1520 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1521 &mask->vlan);
1522
Jianbo Liu5e9a0fe2018-07-09 02:26:20 +00001523 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1524 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1525 if (eth_type_vlan(ethertype)) {
1526 fl_set_key_vlan(tb, ethertype,
1527 TCA_FLOWER_KEY_CVLAN_ID,
1528 TCA_FLOWER_KEY_CVLAN_PRIO,
1529 &key->cvlan, &mask->cvlan);
1530 fl_set_key_val(tb, &key->basic.n_proto,
1531 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1532 &mask->basic.n_proto,
1533 TCA_FLOWER_UNSPEC,
1534 sizeof(key->basic.n_proto));
1535 } else {
1536 key->basic.n_proto = ethertype;
1537 mask->basic.n_proto = cpu_to_be16(~0);
1538 }
Jianbo Liud64efd02018-07-06 05:38:16 +00001539 }
Arnd Bergmann0b498a52016-08-26 17:25:45 +02001540 } else {
1541 key->basic.n_proto = ethertype;
1542 mask->basic.n_proto = cpu_to_be16(~0);
1543 }
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001544 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001545
Jiri Pirko77b99002015-05-12 14:56:21 +02001546 if (key->basic.n_proto == htons(ETH_P_IP) ||
1547 key->basic.n_proto == htons(ETH_P_IPV6)) {
1548 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1549 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1550 sizeof(key->basic.ip_proto));
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001551 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
Jiri Pirko77b99002015-05-12 14:56:21 +02001552 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001553
1554 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1555 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001556 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001557 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1558 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1559 sizeof(key->ipv4.src));
1560 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1561 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1562 sizeof(key->ipv4.dst));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001563 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1564 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001565 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001566 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1567 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1568 sizeof(key->ipv6.src));
1569 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1570 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1571 sizeof(key->ipv6.dst));
1572 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001573
Jiri Pirko77b99002015-05-12 14:56:21 +02001574 if (key->basic.ip_proto == IPPROTO_TCP) {
1575 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001576 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001577 sizeof(key->tp.src));
1578 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001579 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001580 sizeof(key->tp.dst));
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02001581 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1582 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1583 sizeof(key->tcp.flags));
Jiri Pirko77b99002015-05-12 14:56:21 +02001584 } else if (key->basic.ip_proto == IPPROTO_UDP) {
1585 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001586 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001587 sizeof(key->tp.src));
1588 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001589 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001590 sizeof(key->tp.dst));
Simon Horman5976c5f2016-11-03 13:24:21 +01001591 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
1592 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1593 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1594 sizeof(key->tp.src));
1595 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1596 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1597 sizeof(key->tp.dst));
Simon Horman7b684882016-12-07 13:48:28 +01001598 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
1599 key->basic.ip_proto == IPPROTO_ICMP) {
1600 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1601 &mask->icmp.type,
1602 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1603 sizeof(key->icmp.type));
1604 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1605 &mask->icmp.code,
1606 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1607 sizeof(key->icmp.code));
1608 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1609 key->basic.ip_proto == IPPROTO_ICMPV6) {
1610 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1611 &mask->icmp.type,
1612 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1613 sizeof(key->icmp.type));
Simon Horman040587a2017-01-30 16:19:02 +01001614 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
Simon Horman7b684882016-12-07 13:48:28 +01001615 &mask->icmp.code,
Simon Horman040587a2017-01-30 16:19:02 +01001616 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
Simon Horman7b684882016-12-07 13:48:28 +01001617 sizeof(key->icmp.code));
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001618 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1619 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
Guillaume Nault442f7302020-03-23 21:48:49 +01001620 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -04001621 if (ret)
1622 return ret;
Simon Horman99d31322017-01-11 14:05:43 +01001623 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1624 key->basic.n_proto == htons(ETH_P_RARP)) {
1625 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1626 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1627 sizeof(key->arp.sip));
1628 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1629 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1630 sizeof(key->arp.tip));
1631 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1632 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1633 sizeof(key->arp.op));
1634 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1635 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1636 sizeof(key->arp.sha));
1637 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1638 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1639 sizeof(key->arp.tha));
Jiri Pirko77b99002015-05-12 14:56:21 +02001640 }
1641
Amritha Nambiar5c722992018-11-12 16:15:55 -08001642 if (key->basic.ip_proto == IPPROTO_TCP ||
1643 key->basic.ip_proto == IPPROTO_UDP ||
1644 key->basic.ip_proto == IPPROTO_SCTP) {
Guillaume Naultbd7d4c12020-03-23 21:48:51 +01001645 ret = fl_set_key_port_range(tb, key, mask, extack);
Amritha Nambiar5c722992018-11-12 16:15:55 -08001646 if (ret)
1647 return ret;
1648 }
1649
Amir Vadaibc3103f2016-09-08 16:23:47 +03001650 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1651 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1652 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001653 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +03001654 fl_set_key_val(tb, &key->enc_ipv4.src,
1655 TCA_FLOWER_KEY_ENC_IPV4_SRC,
1656 &mask->enc_ipv4.src,
1657 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1658 sizeof(key->enc_ipv4.src));
1659 fl_set_key_val(tb, &key->enc_ipv4.dst,
1660 TCA_FLOWER_KEY_ENC_IPV4_DST,
1661 &mask->enc_ipv4.dst,
1662 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1663 sizeof(key->enc_ipv4.dst));
1664 }
1665
1666 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1667 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1668 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001669 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +03001670 fl_set_key_val(tb, &key->enc_ipv6.src,
1671 TCA_FLOWER_KEY_ENC_IPV6_SRC,
1672 &mask->enc_ipv6.src,
1673 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1674 sizeof(key->enc_ipv6.src));
1675 fl_set_key_val(tb, &key->enc_ipv6.dst,
1676 TCA_FLOWER_KEY_ENC_IPV6_DST,
1677 &mask->enc_ipv6.dst,
1678 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1679 sizeof(key->enc_ipv6.dst));
1680 }
1681
1682 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +03001683 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
Amir Vadaibc3103f2016-09-08 16:23:47 +03001684 sizeof(key->enc_key_id.keyid));
1685
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001686 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1687 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1688 sizeof(key->enc_tp.src));
1689
1690 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1691 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1692 sizeof(key->enc_tp.dst));
1693
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001694 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1695
Ariel Levkovich5923b8f2020-07-23 01:03:01 +03001696 fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1697 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1698 sizeof(key->hash.hash));
1699
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001700 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1701 ret = fl_set_enc_opt(tb, key, mask, extack);
1702 if (ret)
1703 return ret;
1704 }
1705
Paul Blakeye0ace682019-07-09 10:30:50 +03001706 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1707 if (ret)
1708 return ret;
1709
Or Gerlitzd9724772016-12-22 14:28:15 +02001710 if (tb[TCA_FLOWER_KEY_FLAGS])
Guillaume Naulte304e212020-03-23 21:48:53 +01001711 ret = fl_set_key_flags(tb, &key->control.flags,
1712 &mask->control.flags, extack);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001713
Or Gerlitzd9724772016-12-22 14:28:15 +02001714 return ret;
Jiri Pirko77b99002015-05-12 14:56:21 +02001715}
1716
Paul Blakey05cd2712018-04-30 14:28:30 +03001717static void fl_mask_copy(struct fl_flow_mask *dst,
1718 struct fl_flow_mask *src)
Jiri Pirko77b99002015-05-12 14:56:21 +02001719{
Paul Blakey05cd2712018-04-30 14:28:30 +03001720 const void *psrc = fl_key_get_start(&src->key, src);
1721 void *pdst = fl_key_get_start(&dst->key, src);
Jiri Pirko77b99002015-05-12 14:56:21 +02001722
Paul Blakey05cd2712018-04-30 14:28:30 +03001723 memcpy(pdst, psrc, fl_mask_range(src));
1724 dst->range = src->range;
Jiri Pirko77b99002015-05-12 14:56:21 +02001725}
1726
1727static const struct rhashtable_params fl_ht_params = {
1728 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1729 .head_offset = offsetof(struct cls_fl_filter, ht_node),
1730 .automatic_shrinking = true,
1731};
1732
Paul Blakey05cd2712018-04-30 14:28:30 +03001733static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02001734{
Paul Blakey05cd2712018-04-30 14:28:30 +03001735 mask->filter_ht_params = fl_ht_params;
1736 mask->filter_ht_params.key_len = fl_mask_range(mask);
1737 mask->filter_ht_params.key_offset += mask->range.start;
Jiri Pirko77b99002015-05-12 14:56:21 +02001738
Paul Blakey05cd2712018-04-30 14:28:30 +03001739 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
Jiri Pirko77b99002015-05-12 14:56:21 +02001740}
1741
1742#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
Pankaj Bharadiyac5936422019-12-09 10:31:43 -08001743#define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
Jiri Pirko77b99002015-05-12 14:56:21 +02001744
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001745#define FL_KEY_IS_MASKED(mask, member) \
1746 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1747 0, FL_KEY_MEMBER_SIZE(member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001748
1749#define FL_KEY_SET(keys, cnt, id, member) \
1750 do { \
1751 keys[cnt].key_id = id; \
1752 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1753 cnt++; \
1754 } while(0);
1755
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001756#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001757 do { \
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001758 if (FL_KEY_IS_MASKED(mask, member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001759 FL_KEY_SET(keys, cnt, id, member); \
1760 } while(0);
1761
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001762static void fl_init_dissector(struct flow_dissector *dissector,
1763 struct fl_flow_key *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02001764{
1765 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1766 size_t cnt = 0;
1767
Jiri Pirko8212ed72019-06-19 09:41:03 +03001768 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1769 FLOW_DISSECTOR_KEY_META, meta);
Tom Herbert42aecaa2015-06-04 09:16:39 -07001770 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
Jiri Pirko77b99002015-05-12 14:56:21 +02001771 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001772 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001773 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001774 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001775 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001776 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001777 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
Yoshiki Komachi8ffb0552019-12-03 19:40:12 +09001778 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1779 FLOW_DISSECTOR_KEY_PORTS, tp);
1780 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1781 FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001782 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001783 FLOW_DISSECTOR_KEY_IP, ip);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001784 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02001785 FLOW_DISSECTOR_KEY_TCP, tcp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001786 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Simon Horman7b684882016-12-07 13:48:28 +01001787 FLOW_DISSECTOR_KEY_ICMP, icmp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001788 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Simon Horman99d31322017-01-11 14:05:43 +01001789 FLOW_DISSECTOR_KEY_ARP, arp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001790 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001791 FLOW_DISSECTOR_KEY_MPLS, mpls);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001792 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001793 FLOW_DISSECTOR_KEY_VLAN, vlan);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001794 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Jianbo Liud64efd02018-07-06 05:38:16 +00001795 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001796 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001797 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001798 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001799 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001800 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001801 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001802 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1803 FL_KEY_IS_MASKED(mask, enc_ipv6))
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001804 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1805 enc_control);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001806 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001807 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001808 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001809 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001810 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1811 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
Paul Blakeye0ace682019-07-09 10:30:50 +03001812 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1813 FLOW_DISSECTOR_KEY_CT, ct);
Ariel Levkovich5923b8f2020-07-23 01:03:01 +03001814 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1815 FLOW_DISSECTOR_KEY_HASH, hash);
Jiri Pirko77b99002015-05-12 14:56:21 +02001816
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001817 skb_flow_dissector_init(dissector, keys, cnt);
Paul Blakey05cd2712018-04-30 14:28:30 +03001818}
1819
1820static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1821 struct fl_flow_mask *mask)
1822{
1823 struct fl_flow_mask *newmask;
1824 int err;
1825
1826 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1827 if (!newmask)
1828 return ERR_PTR(-ENOMEM);
1829
1830 fl_mask_copy(newmask, mask);
1831
Yoshiki Komachi8ffb0552019-12-03 19:40:12 +09001832 if ((newmask->key.tp_range.tp_min.dst &&
1833 newmask->key.tp_range.tp_max.dst) ||
1834 (newmask->key.tp_range.tp_min.src &&
1835 newmask->key.tp_range.tp_max.src))
Amritha Nambiar5c722992018-11-12 16:15:55 -08001836 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1837
Paul Blakey05cd2712018-04-30 14:28:30 +03001838 err = fl_init_mask_hashtable(newmask);
1839 if (err)
1840 goto errout_free;
1841
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001842 fl_init_dissector(&newmask->dissector, &newmask->key);
Paul Blakey05cd2712018-04-30 14:28:30 +03001843
1844 INIT_LIST_HEAD_RCU(&newmask->filters);
1845
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001846 refcount_set(&newmask->refcnt, 1);
Vlad Buslov195c2342019-03-21 15:17:38 +02001847 err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1848 &newmask->ht_node, mask_ht_params);
Paul Blakey05cd2712018-04-30 14:28:30 +03001849 if (err)
1850 goto errout_destroy;
1851
Vlad Buslov259e60f2019-03-21 15:17:39 +02001852 spin_lock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +03001853 list_add_tail_rcu(&newmask->list, &head->masks);
Vlad Buslov259e60f2019-03-21 15:17:39 +02001854 spin_unlock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +03001855
1856 return newmask;
1857
1858errout_destroy:
1859 rhashtable_destroy(&newmask->ht);
1860errout_free:
1861 kfree(newmask);
1862
1863 return ERR_PTR(err);
Jiri Pirko77b99002015-05-12 14:56:21 +02001864}
1865
1866static int fl_check_assign_mask(struct cls_fl_head *head,
Paul Blakey05cd2712018-04-30 14:28:30 +03001867 struct cls_fl_filter *fnew,
1868 struct cls_fl_filter *fold,
Jiri Pirko77b99002015-05-12 14:56:21 +02001869 struct fl_flow_mask *mask)
1870{
Paul Blakey05cd2712018-04-30 14:28:30 +03001871 struct fl_flow_mask *newmask;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001872 int ret = 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001873
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001874 rcu_read_lock();
Vlad Buslov195c2342019-03-21 15:17:38 +02001875
1876 /* Insert mask as temporary node to prevent concurrent creation of mask
1877 * with same key. Any concurrent lookups with same key will return
Vlad Buslov99815f52019-06-13 17:54:04 +03001878 * -EAGAIN because mask's refcnt is zero.
Vlad Buslov195c2342019-03-21 15:17:38 +02001879 */
1880 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1881 &mask->ht_node,
1882 mask_ht_params);
Paul Blakey05cd2712018-04-30 14:28:30 +03001883 if (!fnew->mask) {
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001884 rcu_read_unlock();
1885
Vlad Buslov195c2342019-03-21 15:17:38 +02001886 if (fold) {
1887 ret = -EINVAL;
1888 goto errout_cleanup;
1889 }
Paul Blakey05cd2712018-04-30 14:28:30 +03001890
1891 newmask = fl_create_new_mask(head, mask);
Vlad Buslov195c2342019-03-21 15:17:38 +02001892 if (IS_ERR(newmask)) {
1893 ret = PTR_ERR(newmask);
1894 goto errout_cleanup;
1895 }
Paul Blakey05cd2712018-04-30 14:28:30 +03001896
1897 fnew->mask = newmask;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001898 return 0;
Vlad Buslov195c2342019-03-21 15:17:38 +02001899 } else if (IS_ERR(fnew->mask)) {
1900 ret = PTR_ERR(fnew->mask);
Paul Blakeyf6521c52018-06-03 10:06:14 +03001901 } else if (fold && fold->mask != fnew->mask) {
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001902 ret = -EINVAL;
1903 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1904 /* Mask was deleted concurrently, try again */
1905 ret = -EAGAIN;
Jiri Pirko77b99002015-05-12 14:56:21 +02001906 }
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001907 rcu_read_unlock();
1908 return ret;
Vlad Buslov195c2342019-03-21 15:17:38 +02001909
1910errout_cleanup:
1911 rhashtable_remove_fast(&head->ht, &mask->ht_node,
1912 mask_ht_params);
Vlad Buslov195c2342019-03-21 15:17:38 +02001913 return ret;
Jiri Pirko77b99002015-05-12 14:56:21 +02001914}
1915
1916static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1917 struct cls_fl_filter *f, struct fl_flow_mask *mask,
1918 unsigned long base, struct nlattr **tb,
Cong Wang695176b2021-07-29 16:12:14 -07001919 struct nlattr *est,
1920 struct fl_flow_tmplt *tmplt, u32 flags,
Alexander Aring50a56192018-01-18 11:20:52 -05001921 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001922{
Jiri Pirko77b99002015-05-12 14:56:21 +02001923 int err;
1924
Cong Wang695176b2021-07-29 16:12:14 -07001925 err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001926 if (err < 0)
1927 return err;
1928
1929 if (tb[TCA_FLOWER_CLASSID]) {
1930 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
Cong Wang695176b2021-07-29 16:12:14 -07001931 if (flags & TCA_ACT_FLAGS_NO_RTNL)
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001932 rtnl_lock();
Jiri Pirko77b99002015-05-12 14:56:21 +02001933 tcf_bind_filter(tp, &f->res, base);
Cong Wang695176b2021-07-29 16:12:14 -07001934 if (flags & TCA_ACT_FLAGS_NO_RTNL)
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001935 rtnl_unlock();
Jiri Pirko77b99002015-05-12 14:56:21 +02001936 }
1937
Alexander Aring1057c552018-01-18 11:20:54 -05001938 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001939 if (err)
Jiri Pirko45507522017-08-04 14:29:06 +02001940 return err;
Jiri Pirko77b99002015-05-12 14:56:21 +02001941
1942 fl_mask_update_range(mask);
1943 fl_set_masked_key(&f->mkey, &f->key, mask);
1944
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001945 if (!fl_mask_fits_tmplt(tmplt, mask)) {
1946 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1947 return -EINVAL;
1948 }
1949
Jiri Pirko77b99002015-05-12 14:56:21 +02001950 return 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001951}
1952
Vlad Buslov1f17f772019-04-05 20:56:26 +03001953static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1954 struct cls_fl_filter *fold,
1955 bool *in_ht)
1956{
1957 struct fl_flow_mask *mask = fnew->mask;
1958 int err;
1959
Vlad Buslov9e355522019-04-11 19:12:20 +03001960 err = rhashtable_lookup_insert_fast(&mask->ht,
1961 &fnew->ht_node,
1962 mask->filter_ht_params);
Vlad Buslov1f17f772019-04-05 20:56:26 +03001963 if (err) {
1964 *in_ht = false;
1965 /* It is okay if filter with same key exists when
1966 * overwriting.
1967 */
1968 return fold && err == -EEXIST ? 0 : err;
1969 }
1970
1971 *in_ht = true;
1972 return 0;
1973}
1974
Jiri Pirko77b99002015-05-12 14:56:21 +02001975static int fl_change(struct net *net, struct sk_buff *in_skb,
1976 struct tcf_proto *tp, unsigned long base,
1977 u32 handle, struct nlattr **tca,
Cong Wang695176b2021-07-29 16:12:14 -07001978 void **arg, u32 flags,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001979 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001980{
Vlad Buslove4746192019-03-21 15:17:33 +02001981 struct cls_fl_head *head = fl_head_dereference(tp);
Cong Wang695176b2021-07-29 16:12:14 -07001982 bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
WANG Cong8113c092017-08-04 21:31:43 -07001983 struct cls_fl_filter *fold = *arg;
Jiri Pirko77b99002015-05-12 14:56:21 +02001984 struct cls_fl_filter *fnew;
Ivan Vecera2cddd202019-01-16 16:53:52 +01001985 struct fl_flow_mask *mask;
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001986 struct nlattr **tb;
Vlad Buslov1f17f772019-04-05 20:56:26 +03001987 bool in_ht;
Jiri Pirko77b99002015-05-12 14:56:21 +02001988 int err;
1989
Vlad Buslov06177552019-03-21 15:17:35 +02001990 if (!tca[TCA_OPTIONS]) {
1991 err = -EINVAL;
1992 goto errout_fold;
1993 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001994
Ivan Vecera2cddd202019-01-16 16:53:52 +01001995 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
Vlad Buslov06177552019-03-21 15:17:35 +02001996 if (!mask) {
1997 err = -ENOBUFS;
1998 goto errout_fold;
1999 }
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01002000
Ivan Vecera2cddd202019-01-16 16:53:52 +01002001 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2002 if (!tb) {
2003 err = -ENOBUFS;
2004 goto errout_mask_alloc;
2005 }
2006
Johannes Berg8cb08172019-04-26 14:07:28 +02002007 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2008 tca[TCA_OPTIONS], fl_policy, NULL);
Jiri Pirko77b99002015-05-12 14:56:21 +02002009 if (err < 0)
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01002010 goto errout_tb;
Jiri Pirko77b99002015-05-12 14:56:21 +02002011
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01002012 if (fold && handle && fold->handle != handle) {
2013 err = -EINVAL;
2014 goto errout_tb;
2015 }
Jiri Pirko77b99002015-05-12 14:56:21 +02002016
2017 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01002018 if (!fnew) {
2019 err = -ENOBUFS;
2020 goto errout_tb;
2021 }
Vlad Buslovc049d562019-04-24 09:53:31 +03002022 INIT_LIST_HEAD(&fnew->hw_list);
Vlad Buslov06177552019-03-21 15:17:35 +02002023 refcount_set(&fnew->refcnt, 1);
Jiri Pirko77b99002015-05-12 14:56:21 +02002024
Cong Wang14215102019-02-20 21:37:42 -08002025 err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
WANG Congb9a24bb2016-08-19 12:36:54 -07002026 if (err < 0)
2027 goto errout;
Jiri Pirko77b99002015-05-12 14:56:21 +02002028
Vlad Buslovecb3dea2019-03-06 16:22:12 +02002029 if (tb[TCA_FLOWER_FLAGS]) {
2030 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2031
2032 if (!tc_flags_valid(fnew->flags)) {
2033 err = -EINVAL;
2034 goto errout;
2035 }
2036 }
2037
Cong Wang695176b2021-07-29 16:12:14 -07002038 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
2039 tp->chain->tmplt_priv, flags, extack);
Vlad Buslovecb3dea2019-03-06 16:22:12 +02002040 if (err)
2041 goto errout;
2042
2043 err = fl_check_assign_mask(head, fnew, fold, mask);
2044 if (err)
2045 goto errout;
2046
Vlad Buslov1f17f772019-04-05 20:56:26 +03002047 err = fl_ht_insert_unique(fnew, fold, &in_ht);
2048 if (err)
2049 goto errout_mask;
2050
Hadar Hen Zion79685212016-12-01 14:06:34 +02002051 if (!tc_skip_hw(fnew->flags)) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +02002052 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
Hadar Hen Zion79685212016-12-01 14:06:34 +02002053 if (err)
Vlad Buslov1f17f772019-04-05 20:56:26 +03002054 goto errout_ht;
Hadar Hen Zion79685212016-12-01 14:06:34 +02002055 }
Amir Vadai5b33f482016-03-08 12:42:29 +02002056
Or Gerlitz55593962017-02-16 10:31:13 +02002057 if (!tc_in_hw(fnew->flags))
2058 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2059
Vlad Buslov3d81e712019-03-21 15:17:42 +02002060 spin_lock(&tp->lock);
2061
Vlad Buslov272ffaa2019-03-21 15:17:41 +02002062 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2063 * proto again or create new one, if necessary.
2064 */
2065 if (tp->deleting) {
2066 err = -EAGAIN;
2067 goto errout_hw;
2068 }
2069
Amir Vadai5b33f482016-03-08 12:42:29 +02002070 if (fold) {
Vlad Buslovb2552b82019-03-21 15:17:36 +02002071 /* Fold filter was deleted concurrently. Retry lookup. */
2072 if (fold->deleted) {
2073 err = -EAGAIN;
2074 goto errout_hw;
2075 }
2076
Vlad Buslov620da482019-03-21 15:17:34 +02002077 fnew->handle = handle;
2078
Vlad Buslov1f17f772019-04-05 20:56:26 +03002079 if (!in_ht) {
2080 struct rhashtable_params params =
2081 fnew->mask->filter_ht_params;
2082
2083 err = rhashtable_insert_fast(&fnew->mask->ht,
2084 &fnew->ht_node,
2085 params);
2086 if (err)
2087 goto errout_hw;
2088 in_ht = true;
2089 }
Vlad Buslov620da482019-03-21 15:17:34 +02002090
Vlad Buslovc049d562019-04-24 09:53:31 +03002091 refcount_inc(&fnew->refcnt);
Roi Dayan599d2572018-12-19 18:07:56 +02002092 rhashtable_remove_fast(&fold->mask->ht,
2093 &fold->ht_node,
2094 fold->mask->filter_ht_params);
Matthew Wilcox234a4622017-11-28 09:56:36 -05002095 idr_replace(&head->handle_idr, fnew, fnew->handle);
Daniel Borkmannff3532f2015-07-17 22:38:44 +02002096 list_replace_rcu(&fold->list, &fnew->list);
Vlad Buslovb2552b82019-03-21 15:17:36 +02002097 fold->deleted = true;
Vlad Buslov620da482019-03-21 15:17:34 +02002098
Vlad Buslov3d81e712019-03-21 15:17:42 +02002099 spin_unlock(&tp->lock);
2100
Vlad Buslov99946772019-04-12 00:54:19 +03002101 fl_mask_put(head, fold->mask);
Vlad Buslov620da482019-03-21 15:17:34 +02002102 if (!tc_skip_hw(fold->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +02002103 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
Jiri Pirko77b99002015-05-12 14:56:21 +02002104 tcf_unbind_filter(tp, &fold->res);
Vlad Buslov06177552019-03-21 15:17:35 +02002105 /* Caller holds reference to fold, so refcnt is always > 0
2106 * after this.
2107 */
2108 refcount_dec(&fold->refcnt);
2109 __fl_put(fold);
Jiri Pirko77b99002015-05-12 14:56:21 +02002110 } else {
Vlad Buslov620da482019-03-21 15:17:34 +02002111 if (handle) {
2112 /* user specifies a handle and it doesn't exist */
2113 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2114 handle, GFP_ATOMIC);
Vlad Buslov9a2d9382019-03-21 15:17:40 +02002115
2116 /* Filter with specified handle was concurrently
2117 * inserted after initial check in cls_api. This is not
2118 * necessarily an error if NLM_F_EXCL is not set in
2119 * message flags. Returning EAGAIN will cause cls_api to
2120 * try to update concurrently inserted rule.
2121 */
2122 if (err == -ENOSPC)
2123 err = -EAGAIN;
Vlad Buslov620da482019-03-21 15:17:34 +02002124 } else {
2125 handle = 1;
2126 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2127 INT_MAX, GFP_ATOMIC);
2128 }
2129 if (err)
2130 goto errout_hw;
2131
Vlad Buslovc049d562019-04-24 09:53:31 +03002132 refcount_inc(&fnew->refcnt);
Vlad Buslov620da482019-03-21 15:17:34 +02002133 fnew->handle = handle;
Paul Blakey05cd2712018-04-30 14:28:30 +03002134 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
Vlad Buslov3d81e712019-03-21 15:17:42 +02002135 spin_unlock(&tp->lock);
Jiri Pirko77b99002015-05-12 14:56:21 +02002136 }
2137
Vlad Buslov620da482019-03-21 15:17:34 +02002138 *arg = fnew;
2139
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01002140 kfree(tb);
Vlad Buslov99815f52019-06-13 17:54:04 +03002141 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
Jiri Pirko77b99002015-05-12 14:56:21 +02002142 return 0;
2143
Vlad Buslovc049d562019-04-24 09:53:31 +03002144errout_ht:
2145 spin_lock(&tp->lock);
Vlad Buslov620da482019-03-21 15:17:34 +02002146errout_hw:
Vlad Buslovc049d562019-04-24 09:53:31 +03002147 fnew->deleted = true;
Vlad Buslov3d81e712019-03-21 15:17:42 +02002148 spin_unlock(&tp->lock);
Vlad Buslov620da482019-03-21 15:17:34 +02002149 if (!tc_skip_hw(fnew->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +02002150 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
Vlad Buslov1f17f772019-04-05 20:56:26 +03002151 if (in_ht)
2152 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2153 fnew->mask->filter_ht_params);
Vlad Buslovecb3dea2019-03-06 16:22:12 +02002154errout_mask:
Vlad Buslov99946772019-04-12 00:54:19 +03002155 fl_mask_put(head, fnew->mask);
Jiri Pirko77b99002015-05-12 14:56:21 +02002156errout:
Vlad Buslovc049d562019-04-24 09:53:31 +03002157 __fl_put(fnew);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01002158errout_tb:
2159 kfree(tb);
Ivan Vecera2cddd202019-01-16 16:53:52 +01002160errout_mask_alloc:
Vlad Buslov99815f52019-06-13 17:54:04 +03002161 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
Vlad Buslov06177552019-03-21 15:17:35 +02002162errout_fold:
2163 if (fold)
2164 __fl_put(fold);
Jiri Pirko77b99002015-05-12 14:56:21 +02002165 return err;
2166}
2167
Alexander Aring571acf22018-01-18 11:20:53 -05002168static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002169 bool rtnl_held, struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02002170{
Vlad Buslove4746192019-03-21 15:17:33 +02002171 struct cls_fl_head *head = fl_head_dereference(tp);
WANG Cong8113c092017-08-04 21:31:43 -07002172 struct cls_fl_filter *f = arg;
Vlad Buslovb2552b82019-03-21 15:17:36 +02002173 bool last_on_mask;
2174 int err = 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02002175
Vlad Buslovc24e43d82019-03-21 15:17:43 +02002176 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
Paul Blakey05cd2712018-04-30 14:28:30 +03002177 *last = list_empty(&head->masks);
Vlad Buslov06177552019-03-21 15:17:35 +02002178 __fl_put(f);
2179
Vlad Buslovb2552b82019-03-21 15:17:36 +02002180 return err;
Jiri Pirko77b99002015-05-12 14:56:21 +02002181}
2182
Vlad Buslov12db03b2019-02-11 10:55:45 +02002183static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2184 bool rtnl_held)
Jiri Pirko77b99002015-05-12 14:56:21 +02002185{
Cong Wangd39d7142019-06-28 11:03:42 -07002186 struct cls_fl_head *head = fl_head_dereference(tp);
2187 unsigned long id = arg->cookie, tmp;
Jiri Pirko77b99002015-05-12 14:56:21 +02002188 struct cls_fl_filter *f;
2189
Vlad Buslov01683a12018-07-09 13:29:11 +03002190 arg->count = arg->skip;
2191
Vlad Buslovd5ef1902021-09-29 18:08:49 +03002192 rcu_read_lock();
Cong Wangd39d7142019-06-28 11:03:42 -07002193 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2194 /* don't return filters that are being deleted */
2195 if (!refcount_inc_not_zero(&f->refcnt))
2196 continue;
Vlad Buslovd5ef1902021-09-29 18:08:49 +03002197 rcu_read_unlock();
2198
Vlad Buslov01683a12018-07-09 13:29:11 +03002199 if (arg->fn(tp, f, arg) < 0) {
Vlad Buslov06177552019-03-21 15:17:35 +02002200 __fl_put(f);
Vlad Buslov01683a12018-07-09 13:29:11 +03002201 arg->stop = 1;
Vlad Buslovd5ef1902021-09-29 18:08:49 +03002202 rcu_read_lock();
Vlad Buslov01683a12018-07-09 13:29:11 +03002203 break;
Paul Blakey05cd2712018-04-30 14:28:30 +03002204 }
Vlad Buslov06177552019-03-21 15:17:35 +02002205 __fl_put(f);
Vlad Buslov01683a12018-07-09 13:29:11 +03002206 arg->count++;
Vlad Buslovd5ef1902021-09-29 18:08:49 +03002207 rcu_read_lock();
Jiri Pirko77b99002015-05-12 14:56:21 +02002208 }
Vlad Buslovd5ef1902021-09-29 18:08:49 +03002209 rcu_read_unlock();
Cong Wangd39d7142019-06-28 11:03:42 -07002210 arg->cookie = id;
Jiri Pirko77b99002015-05-12 14:56:21 +02002211}
2212
Vlad Buslovc049d562019-04-24 09:53:31 +03002213static struct cls_fl_filter *
2214fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2215{
2216 struct cls_fl_head *head = fl_head_dereference(tp);
2217
2218 spin_lock(&tp->lock);
2219 if (list_empty(&head->hw_filters)) {
2220 spin_unlock(&tp->lock);
2221 return NULL;
2222 }
2223
2224 if (!f)
2225 f = list_entry(&head->hw_filters, struct cls_fl_filter,
2226 hw_list);
2227 list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2228 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2229 spin_unlock(&tp->lock);
2230 return f;
2231 }
2232 }
2233
2234 spin_unlock(&tp->lock);
2235 return NULL;
2236}
2237
Pablo Neira Ayusoa7323312019-07-19 18:20:15 +02002238static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
John Hurley31533cb2018-06-25 14:30:06 -07002239 void *cb_priv, struct netlink_ext_ack *extack)
2240{
John Hurley31533cb2018-06-25 14:30:06 -07002241 struct tcf_block *block = tp->chain->block;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02002242 struct flow_cls_offload cls_flower = {};
Vlad Buslovc049d562019-04-24 09:53:31 +03002243 struct cls_fl_filter *f = NULL;
John Hurley31533cb2018-06-25 14:30:06 -07002244 int err;
2245
Vlad Buslovc049d562019-04-24 09:53:31 +03002246 /* hw_filters list can only be changed by hw offload functions after
2247 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2248 * iterating it.
2249 */
2250 ASSERT_RTNL();
John Hurley31533cb2018-06-25 14:30:06 -07002251
Vlad Buslovc049d562019-04-24 09:53:31 +03002252 while ((f = fl_get_next_hw_filter(tp, f, add))) {
John Hurley95e27a42019-04-02 23:53:20 +01002253 cls_flower.rule =
2254 flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2255 if (!cls_flower.rule) {
2256 __fl_put(f);
2257 return -ENOMEM;
John Hurley31533cb2018-06-25 14:30:06 -07002258 }
John Hurley95e27a42019-04-02 23:53:20 +01002259
2260 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -07002261 extack);
John Hurley95e27a42019-04-02 23:53:20 +01002262 cls_flower.command = add ?
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02002263 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
John Hurley95e27a42019-04-02 23:53:20 +01002264 cls_flower.cookie = (unsigned long)f;
2265 cls_flower.rule->match.dissector = &f->mask->dissector;
2266 cls_flower.rule->match.mask = &f->mask->key;
2267 cls_flower.rule->match.key = &f->mkey;
2268
Vlad Buslovb15e7a62020-02-17 12:12:12 +02002269 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
John Hurley95e27a42019-04-02 23:53:20 +01002270 if (err) {
2271 kfree(cls_flower.rule);
2272 if (tc_skip_sw(f->flags)) {
2273 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
2274 __fl_put(f);
2275 return err;
2276 }
2277 goto next_flow;
2278 }
2279
2280 cls_flower.classid = f->res.classid;
2281
Vlad Buslov40119212019-08-26 16:44:59 +03002282 err = tc_setup_cb_reoffload(block, tp, add, cb,
2283 TC_SETUP_CLSFLOWER, &cls_flower,
2284 cb_priv, &f->flags,
2285 &f->in_hw_count);
Vlad Buslov5a6ff4b2019-08-26 16:45:04 +03002286 tc_cleanup_flow_action(&cls_flower.rule->action);
John Hurley95e27a42019-04-02 23:53:20 +01002287 kfree(cls_flower.rule);
2288
2289 if (err) {
Vlad Buslov40119212019-08-26 16:44:59 +03002290 __fl_put(f);
2291 return err;
John Hurley95e27a42019-04-02 23:53:20 +01002292 }
John Hurley95e27a42019-04-02 23:53:20 +01002293next_flow:
John Hurley95e27a42019-04-02 23:53:20 +01002294 __fl_put(f);
John Hurley31533cb2018-06-25 14:30:06 -07002295 }
2296
2297 return 0;
2298}
2299
Vlad Buslova449a3e2019-08-26 16:45:00 +03002300static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2301{
2302 struct flow_cls_offload *cls_flower = type_data;
2303 struct cls_fl_filter *f =
2304 (struct cls_fl_filter *) cls_flower->cookie;
2305 struct cls_fl_head *head = fl_head_dereference(tp);
2306
2307 spin_lock(&tp->lock);
2308 list_add(&f->hw_list, &head->hw_filters);
2309 spin_unlock(&tp->lock);
2310}
2311
2312static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2313{
2314 struct flow_cls_offload *cls_flower = type_data;
2315 struct cls_fl_filter *f =
2316 (struct cls_fl_filter *) cls_flower->cookie;
2317
2318 spin_lock(&tp->lock);
2319 if (!list_empty(&f->hw_list))
2320 list_del_init(&f->hw_list);
2321 spin_unlock(&tp->lock);
2322}
2323
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002324static int fl_hw_create_tmplt(struct tcf_chain *chain,
2325 struct fl_flow_tmplt *tmplt)
Jiri Pirko34738452018-07-23 09:23:11 +02002326{
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02002327 struct flow_cls_offload cls_flower = {};
Jiri Pirko34738452018-07-23 09:23:11 +02002328 struct tcf_block *block = chain->block;
Jiri Pirko34738452018-07-23 09:23:11 +02002329
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +01002330 cls_flower.rule = flow_rule_alloc(0);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002331 if (!cls_flower.rule)
2332 return -ENOMEM;
2333
Jiri Pirko34738452018-07-23 09:23:11 +02002334 cls_flower.common.chain_index = chain->index;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02002335 cls_flower.command = FLOW_CLS_TMPLT_CREATE;
Jiri Pirko34738452018-07-23 09:23:11 +02002336 cls_flower.cookie = (unsigned long) tmplt;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002337 cls_flower.rule->match.dissector = &tmplt->dissector;
2338 cls_flower.rule->match.mask = &tmplt->mask;
2339 cls_flower.rule->match.key = &tmplt->dummy_key;
Jiri Pirko34738452018-07-23 09:23:11 +02002340
2341 /* We don't care if driver (any of them) fails to handle this
2342 * call. It serves just as a hint for it.
2343 */
Vlad Buslov40119212019-08-26 16:44:59 +03002344 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002345 kfree(cls_flower.rule);
2346
2347 return 0;
Jiri Pirko34738452018-07-23 09:23:11 +02002348}
2349
2350static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2351 struct fl_flow_tmplt *tmplt)
2352{
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02002353 struct flow_cls_offload cls_flower = {};
Jiri Pirko34738452018-07-23 09:23:11 +02002354 struct tcf_block *block = chain->block;
2355
2356 cls_flower.common.chain_index = chain->index;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02002357 cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
Jiri Pirko34738452018-07-23 09:23:11 +02002358 cls_flower.cookie = (unsigned long) tmplt;
2359
Vlad Buslov40119212019-08-26 16:44:59 +03002360 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
Jiri Pirko34738452018-07-23 09:23:11 +02002361}
2362
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002363static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2364 struct nlattr **tca,
2365 struct netlink_ext_ack *extack)
2366{
2367 struct fl_flow_tmplt *tmplt;
2368 struct nlattr **tb;
2369 int err;
2370
2371 if (!tca[TCA_OPTIONS])
2372 return ERR_PTR(-EINVAL);
2373
2374 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2375 if (!tb)
2376 return ERR_PTR(-ENOBUFS);
Johannes Berg8cb08172019-04-26 14:07:28 +02002377 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2378 tca[TCA_OPTIONS], fl_policy, NULL);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002379 if (err)
2380 goto errout_tb;
2381
2382 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
Dan Carpenter1cbc36a52018-08-03 22:27:55 +03002383 if (!tmplt) {
2384 err = -ENOMEM;
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002385 goto errout_tb;
Dan Carpenter1cbc36a52018-08-03 22:27:55 +03002386 }
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002387 tmplt->chain = chain;
2388 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2389 if (err)
2390 goto errout_tmplt;
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002391
2392 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2393
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002394 err = fl_hw_create_tmplt(chain, tmplt);
2395 if (err)
2396 goto errout_tmplt;
Jiri Pirko34738452018-07-23 09:23:11 +02002397
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002398 kfree(tb);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002399 return tmplt;
2400
2401errout_tmplt:
2402 kfree(tmplt);
2403errout_tb:
2404 kfree(tb);
2405 return ERR_PTR(err);
2406}
2407
2408static void fl_tmplt_destroy(void *tmplt_priv)
2409{
2410 struct fl_flow_tmplt *tmplt = tmplt_priv;
2411
Cong Wang95278dd2018-10-02 12:50:19 -07002412 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2413 kfree(tmplt);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002414}
2415
Jiri Pirko77b99002015-05-12 14:56:21 +02002416static int fl_dump_key_val(struct sk_buff *skb,
2417 void *val, int val_type,
2418 void *mask, int mask_type, int len)
2419{
2420 int err;
2421
2422 if (!memchr_inv(mask, 0, len))
2423 return 0;
2424 err = nla_put(skb, val_type, len, val);
2425 if (err)
2426 return err;
2427 if (mask_type != TCA_FLOWER_UNSPEC) {
2428 err = nla_put(skb, mask_type, len, mask);
2429 if (err)
2430 return err;
2431 }
2432 return 0;
2433}
2434
Amritha Nambiar5c722992018-11-12 16:15:55 -08002435static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2436 struct fl_flow_key *mask)
2437{
Yoshiki Komachi8ffb0552019-12-03 19:40:12 +09002438 if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2439 TCA_FLOWER_KEY_PORT_DST_MIN,
2440 &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2441 sizeof(key->tp_range.tp_min.dst)) ||
2442 fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2443 TCA_FLOWER_KEY_PORT_DST_MAX,
2444 &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2445 sizeof(key->tp_range.tp_max.dst)) ||
2446 fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2447 TCA_FLOWER_KEY_PORT_SRC_MIN,
2448 &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2449 sizeof(key->tp_range.tp_min.src)) ||
2450 fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2451 TCA_FLOWER_KEY_PORT_SRC_MAX,
2452 &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2453 sizeof(key->tp_range.tp_max.src)))
Amritha Nambiar5c722992018-11-12 16:15:55 -08002454 return -1;
2455
2456 return 0;
2457}
2458
Guillaume Nault61aec252020-05-26 14:29:04 +02002459static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2460 struct flow_dissector_key_mpls *mpls_key,
2461 struct flow_dissector_key_mpls *mpls_mask,
2462 u8 lse_index)
2463{
2464 struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2465 struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2466 int err;
2467
2468 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2469 lse_index + 1);
2470 if (err)
2471 return err;
2472
2473 if (lse_mask->mpls_ttl) {
2474 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2475 lse_key->mpls_ttl);
2476 if (err)
2477 return err;
2478 }
2479 if (lse_mask->mpls_bos) {
2480 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2481 lse_key->mpls_bos);
2482 if (err)
2483 return err;
2484 }
2485 if (lse_mask->mpls_tc) {
2486 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2487 lse_key->mpls_tc);
2488 if (err)
2489 return err;
2490 }
2491 if (lse_mask->mpls_label) {
Guillaume Nault7fdd3752020-12-09 16:48:41 +01002492 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2493 lse_key->mpls_label);
Guillaume Nault61aec252020-05-26 14:29:04 +02002494 if (err)
2495 return err;
2496 }
2497
2498 return 0;
2499}
2500
2501static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2502 struct flow_dissector_key_mpls *mpls_key,
2503 struct flow_dissector_key_mpls *mpls_mask)
2504{
2505 struct nlattr *opts;
2506 struct nlattr *lse;
2507 u8 lse_index;
2508 int err;
2509
2510 opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2511 if (!opts)
2512 return -EMSGSIZE;
2513
2514 for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2515 if (!(mpls_mask->used_lses & 1 << lse_index))
2516 continue;
2517
2518 lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2519 if (!lse) {
2520 err = -EMSGSIZE;
2521 goto err_opts;
2522 }
2523
2524 err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2525 lse_index);
2526 if (err)
2527 goto err_opts_lse;
2528 nla_nest_end(skb, lse);
2529 }
2530 nla_nest_end(skb, opts);
2531
2532 return 0;
2533
2534err_opts_lse:
2535 nla_nest_cancel(skb, lse);
2536err_opts:
2537 nla_nest_cancel(skb, opts);
2538
2539 return err;
2540}
2541
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002542static int fl_dump_key_mpls(struct sk_buff *skb,
2543 struct flow_dissector_key_mpls *mpls_key,
2544 struct flow_dissector_key_mpls *mpls_mask)
2545{
Guillaume Nault58cff782020-05-26 14:29:00 +02002546 struct flow_dissector_mpls_lse *lse_mask;
2547 struct flow_dissector_mpls_lse *lse_key;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002548 int err;
2549
Guillaume Nault61aec252020-05-26 14:29:04 +02002550 if (!mpls_mask->used_lses)
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002551 return 0;
Guillaume Nault58cff782020-05-26 14:29:00 +02002552
2553 lse_mask = &mpls_mask->ls[0];
2554 lse_key = &mpls_key->ls[0];
2555
Guillaume Nault61aec252020-05-26 14:29:04 +02002556 /* For backward compatibility, don't use the MPLS nested attributes if
2557 * the rule can be expressed using the old attributes.
2558 */
2559 if (mpls_mask->used_lses & ~1 ||
2560 (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2561 !lse_mask->mpls_tc && !lse_mask->mpls_label))
2562 return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2563
Guillaume Nault58cff782020-05-26 14:29:00 +02002564 if (lse_mask->mpls_ttl) {
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002565 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
Guillaume Nault58cff782020-05-26 14:29:00 +02002566 lse_key->mpls_ttl);
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002567 if (err)
2568 return err;
2569 }
Guillaume Nault58cff782020-05-26 14:29:00 +02002570 if (lse_mask->mpls_tc) {
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002571 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
Guillaume Nault58cff782020-05-26 14:29:00 +02002572 lse_key->mpls_tc);
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002573 if (err)
2574 return err;
2575 }
Guillaume Nault58cff782020-05-26 14:29:00 +02002576 if (lse_mask->mpls_label) {
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002577 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
Guillaume Nault58cff782020-05-26 14:29:00 +02002578 lse_key->mpls_label);
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002579 if (err)
2580 return err;
2581 }
Guillaume Nault58cff782020-05-26 14:29:00 +02002582 if (lse_mask->mpls_bos) {
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002583 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
Guillaume Nault58cff782020-05-26 14:29:00 +02002584 lse_key->mpls_bos);
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002585 if (err)
2586 return err;
2587 }
2588 return 0;
2589}
2590
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002591static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002592 struct flow_dissector_key_ip *key,
2593 struct flow_dissector_key_ip *mask)
2594{
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002595 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2596 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2597 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2598 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2599
2600 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2601 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002602 return -1;
2603
2604 return 0;
2605}
2606
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002607static int fl_dump_key_vlan(struct sk_buff *skb,
Jianbo Liud64efd02018-07-06 05:38:16 +00002608 int vlan_id_key, int vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002609 struct flow_dissector_key_vlan *vlan_key,
2610 struct flow_dissector_key_vlan *vlan_mask)
2611{
2612 int err;
2613
2614 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2615 return 0;
2616 if (vlan_mask->vlan_id) {
Jianbo Liud64efd02018-07-06 05:38:16 +00002617 err = nla_put_u16(skb, vlan_id_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002618 vlan_key->vlan_id);
2619 if (err)
2620 return err;
2621 }
2622 if (vlan_mask->vlan_priority) {
Jianbo Liud64efd02018-07-06 05:38:16 +00002623 err = nla_put_u8(skb, vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002624 vlan_key->vlan_priority);
2625 if (err)
2626 return err;
2627 }
2628 return 0;
2629}
2630
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002631static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2632 u32 *flower_key, u32 *flower_mask,
2633 u32 flower_flag_bit, u32 dissector_flag_bit)
2634{
2635 if (dissector_mask & dissector_flag_bit) {
2636 *flower_mask |= flower_flag_bit;
2637 if (dissector_key & dissector_flag_bit)
2638 *flower_key |= flower_flag_bit;
2639 }
2640}
2641
2642static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2643{
2644 u32 key, mask;
2645 __be32 _key, _mask;
2646 int err;
2647
2648 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2649 return 0;
2650
2651 key = 0;
2652 mask = 0;
2653
2654 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2655 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Pieter Jansen van Vuuren459d1532018-03-06 18:11:14 +01002656 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2657 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2658 FLOW_DIS_FIRST_FRAG);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002659
2660 _key = cpu_to_be32(key);
2661 _mask = cpu_to_be32(mask);
2662
2663 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2664 if (err)
2665 return err;
2666
2667 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2668}
2669
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002670static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2671 struct flow_dissector_key_enc_opts *enc_opts)
2672{
2673 struct geneve_opt *opt;
2674 struct nlattr *nest;
2675 int opt_off = 0;
2676
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002677 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002678 if (!nest)
2679 goto nla_put_failure;
2680
2681 while (enc_opts->len > opt_off) {
2682 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2683
2684 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2685 opt->opt_class))
2686 goto nla_put_failure;
2687 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2688 opt->type))
2689 goto nla_put_failure;
2690 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2691 opt->length * 4, opt->opt_data))
2692 goto nla_put_failure;
2693
2694 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2695 }
2696 nla_nest_end(skb, nest);
2697 return 0;
2698
2699nla_put_failure:
2700 nla_nest_cancel(skb, nest);
2701 return -EMSGSIZE;
2702}
2703
Xin Longd8f9dfa2019-11-21 18:03:28 +08002704static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2705 struct flow_dissector_key_enc_opts *enc_opts)
2706{
2707 struct vxlan_metadata *md;
2708 struct nlattr *nest;
2709
2710 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2711 if (!nest)
2712 goto nla_put_failure;
2713
2714 md = (struct vxlan_metadata *)&enc_opts->data[0];
2715 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2716 goto nla_put_failure;
2717
2718 nla_nest_end(skb, nest);
2719 return 0;
2720
2721nla_put_failure:
2722 nla_nest_cancel(skb, nest);
2723 return -EMSGSIZE;
2724}
2725
Xin Long79b10112019-11-21 18:03:29 +08002726static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2727 struct flow_dissector_key_enc_opts *enc_opts)
2728{
2729 struct erspan_metadata *md;
2730 struct nlattr *nest;
2731
2732 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2733 if (!nest)
2734 goto nla_put_failure;
2735
2736 md = (struct erspan_metadata *)&enc_opts->data[0];
2737 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2738 goto nla_put_failure;
2739
2740 if (md->version == 1 &&
2741 nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2742 goto nla_put_failure;
2743
2744 if (md->version == 2 &&
2745 (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2746 md->u.md2.dir) ||
2747 nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2748 get_hwid(&md->u.md2))))
2749 goto nla_put_failure;
2750
2751 nla_nest_end(skb, nest);
2752 return 0;
2753
2754nla_put_failure:
2755 nla_nest_cancel(skb, nest);
2756 return -EMSGSIZE;
2757}
2758
Paul Blakeye0ace682019-07-09 10:30:50 +03002759static int fl_dump_key_ct(struct sk_buff *skb,
2760 struct flow_dissector_key_ct *key,
2761 struct flow_dissector_key_ct *mask)
2762{
2763 if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2764 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2765 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2766 sizeof(key->ct_state)))
2767 goto nla_put_failure;
2768
2769 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2770 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2771 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2772 sizeof(key->ct_zone)))
2773 goto nla_put_failure;
2774
2775 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2776 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2777 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2778 sizeof(key->ct_mark)))
2779 goto nla_put_failure;
2780
2781 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2782 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2783 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2784 sizeof(key->ct_labels)))
2785 goto nla_put_failure;
2786
2787 return 0;
2788
2789nla_put_failure:
2790 return -EMSGSIZE;
2791}
2792
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002793static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2794 struct flow_dissector_key_enc_opts *enc_opts)
2795{
2796 struct nlattr *nest;
2797 int err;
2798
2799 if (!enc_opts->len)
2800 return 0;
2801
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002802 nest = nla_nest_start_noflag(skb, enc_opt_type);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002803 if (!nest)
2804 goto nla_put_failure;
2805
2806 switch (enc_opts->dst_opt_type) {
2807 case TUNNEL_GENEVE_OPT:
2808 err = fl_dump_key_geneve_opt(skb, enc_opts);
2809 if (err)
2810 goto nla_put_failure;
2811 break;
Xin Longd8f9dfa2019-11-21 18:03:28 +08002812 case TUNNEL_VXLAN_OPT:
2813 err = fl_dump_key_vxlan_opt(skb, enc_opts);
2814 if (err)
2815 goto nla_put_failure;
2816 break;
Xin Long79b10112019-11-21 18:03:29 +08002817 case TUNNEL_ERSPAN_OPT:
2818 err = fl_dump_key_erspan_opt(skb, enc_opts);
2819 if (err)
2820 goto nla_put_failure;
2821 break;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002822 default:
2823 goto nla_put_failure;
2824 }
2825 nla_nest_end(skb, nest);
2826 return 0;
2827
2828nla_put_failure:
2829 nla_nest_cancel(skb, nest);
2830 return -EMSGSIZE;
2831}
2832
2833static int fl_dump_key_enc_opt(struct sk_buff *skb,
2834 struct flow_dissector_key_enc_opts *key_opts,
2835 struct flow_dissector_key_enc_opts *msk_opts)
2836{
2837 int err;
2838
2839 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2840 if (err)
2841 return err;
2842
2843 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2844}
2845
Jiri Pirkof5749082018-07-23 09:23:08 +02002846static int fl_dump_key(struct sk_buff *skb, struct net *net,
2847 struct fl_flow_key *key, struct fl_flow_key *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02002848{
Jiri Pirko8212ed72019-06-19 09:41:03 +03002849 if (mask->meta.ingress_ifindex) {
Jiri Pirko77b99002015-05-12 14:56:21 +02002850 struct net_device *dev;
2851
Jiri Pirko8212ed72019-06-19 09:41:03 +03002852 dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
Jiri Pirko77b99002015-05-12 14:56:21 +02002853 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2854 goto nla_put_failure;
2855 }
2856
2857 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2858 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2859 sizeof(key->eth.dst)) ||
2860 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2861 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2862 sizeof(key->eth.src)) ||
2863 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2864 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2865 sizeof(key->basic.n_proto)))
2866 goto nla_put_failure;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002867
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002868 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2869 goto nla_put_failure;
2870
Jianbo Liud64efd02018-07-06 05:38:16 +00002871 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2872 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002873 goto nla_put_failure;
2874
Jianbo Liud64efd02018-07-06 05:38:16 +00002875 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2876 TCA_FLOWER_KEY_CVLAN_PRIO,
2877 &key->cvlan, &mask->cvlan) ||
2878 (mask->cvlan.vlan_tpid &&
Jianbo Liu158abbf2018-07-25 02:31:25 +00002879 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2880 key->cvlan.vlan_tpid)))
Jianbo Liud3069512018-07-06 05:38:15 +00002881 goto nla_put_failure;
2882
Jianbo Liu5e9a0fe2018-07-09 02:26:20 +00002883 if (mask->basic.n_proto) {
2884 if (mask->cvlan.vlan_tpid) {
2885 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2886 key->basic.n_proto))
2887 goto nla_put_failure;
2888 } else if (mask->vlan.vlan_tpid) {
2889 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2890 key->basic.n_proto))
2891 goto nla_put_failure;
2892 }
Jianbo Liud64efd02018-07-06 05:38:16 +00002893 }
2894
Jiri Pirko77b99002015-05-12 14:56:21 +02002895 if ((key->basic.n_proto == htons(ETH_P_IP) ||
2896 key->basic.n_proto == htons(ETH_P_IPV6)) &&
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002897 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
Jiri Pirko77b99002015-05-12 14:56:21 +02002898 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002899 sizeof(key->basic.ip_proto)) ||
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002900 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
Jiri Pirko77b99002015-05-12 14:56:21 +02002901 goto nla_put_failure;
2902
Tom Herbertc3f83242015-06-04 09:16:40 -07002903 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02002904 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2905 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2906 sizeof(key->ipv4.src)) ||
2907 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2908 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2909 sizeof(key->ipv4.dst))))
2910 goto nla_put_failure;
Tom Herbertc3f83242015-06-04 09:16:40 -07002911 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02002912 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2913 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2914 sizeof(key->ipv6.src)) ||
2915 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2916 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2917 sizeof(key->ipv6.dst))))
2918 goto nla_put_failure;
2919
2920 if (key->basic.ip_proto == IPPROTO_TCP &&
2921 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002922 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002923 sizeof(key->tp.src)) ||
2924 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002925 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02002926 sizeof(key->tp.dst)) ||
2927 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2928 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2929 sizeof(key->tcp.flags))))
Jiri Pirko77b99002015-05-12 14:56:21 +02002930 goto nla_put_failure;
2931 else if (key->basic.ip_proto == IPPROTO_UDP &&
2932 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002933 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002934 sizeof(key->tp.src)) ||
2935 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002936 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002937 sizeof(key->tp.dst))))
2938 goto nla_put_failure;
Simon Horman5976c5f2016-11-03 13:24:21 +01002939 else if (key->basic.ip_proto == IPPROTO_SCTP &&
2940 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2941 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2942 sizeof(key->tp.src)) ||
2943 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2944 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2945 sizeof(key->tp.dst))))
2946 goto nla_put_failure;
Simon Horman7b684882016-12-07 13:48:28 +01002947 else if (key->basic.n_proto == htons(ETH_P_IP) &&
2948 key->basic.ip_proto == IPPROTO_ICMP &&
2949 (fl_dump_key_val(skb, &key->icmp.type,
2950 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2951 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2952 sizeof(key->icmp.type)) ||
2953 fl_dump_key_val(skb, &key->icmp.code,
2954 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2955 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2956 sizeof(key->icmp.code))))
2957 goto nla_put_failure;
2958 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2959 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2960 (fl_dump_key_val(skb, &key->icmp.type,
2961 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2962 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2963 sizeof(key->icmp.type)) ||
2964 fl_dump_key_val(skb, &key->icmp.code,
2965 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2966 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2967 sizeof(key->icmp.code))))
2968 goto nla_put_failure;
Simon Horman99d31322017-01-11 14:05:43 +01002969 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2970 key->basic.n_proto == htons(ETH_P_RARP)) &&
2971 (fl_dump_key_val(skb, &key->arp.sip,
2972 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2973 TCA_FLOWER_KEY_ARP_SIP_MASK,
2974 sizeof(key->arp.sip)) ||
2975 fl_dump_key_val(skb, &key->arp.tip,
2976 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2977 TCA_FLOWER_KEY_ARP_TIP_MASK,
2978 sizeof(key->arp.tip)) ||
2979 fl_dump_key_val(skb, &key->arp.op,
2980 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2981 TCA_FLOWER_KEY_ARP_OP_MASK,
2982 sizeof(key->arp.op)) ||
2983 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2984 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2985 sizeof(key->arp.sha)) ||
2986 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2987 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2988 sizeof(key->arp.tha))))
2989 goto nla_put_failure;
Jiri Pirko77b99002015-05-12 14:56:21 +02002990
Amritha Nambiar5c722992018-11-12 16:15:55 -08002991 if ((key->basic.ip_proto == IPPROTO_TCP ||
2992 key->basic.ip_proto == IPPROTO_UDP ||
2993 key->basic.ip_proto == IPPROTO_SCTP) &&
2994 fl_dump_key_port_range(skb, key, mask))
2995 goto nla_put_failure;
2996
Amir Vadaibc3103f2016-09-08 16:23:47 +03002997 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2998 (fl_dump_key_val(skb, &key->enc_ipv4.src,
2999 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3000 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3001 sizeof(key->enc_ipv4.src)) ||
3002 fl_dump_key_val(skb, &key->enc_ipv4.dst,
3003 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3004 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3005 sizeof(key->enc_ipv4.dst))))
3006 goto nla_put_failure;
3007 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3008 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3009 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3010 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3011 sizeof(key->enc_ipv6.src)) ||
3012 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3013 TCA_FLOWER_KEY_ENC_IPV6_DST,
3014 &mask->enc_ipv6.dst,
3015 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3016 sizeof(key->enc_ipv6.dst))))
3017 goto nla_put_failure;
3018
3019 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +03003020 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02003021 sizeof(key->enc_key_id)) ||
3022 fl_dump_key_val(skb, &key->enc_tp.src,
3023 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3024 &mask->enc_tp.src,
3025 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3026 sizeof(key->enc_tp.src)) ||
3027 fl_dump_key_val(skb, &key->enc_tp.dst,
3028 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3029 &mask->enc_tp.dst,
3030 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03003031 sizeof(key->enc_tp.dst)) ||
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02003032 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3033 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
Amir Vadaibc3103f2016-09-08 16:23:47 +03003034 goto nla_put_failure;
3035
Paul Blakeye0ace682019-07-09 10:30:50 +03003036 if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3037 goto nla_put_failure;
3038
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02003039 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3040 goto nla_put_failure;
3041
Ariel Levkovich5923b8f2020-07-23 01:03:01 +03003042 if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3043 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3044 sizeof(key->hash.hash)))
3045 goto nla_put_failure;
3046
Jiri Pirkof5749082018-07-23 09:23:08 +02003047 return 0;
3048
3049nla_put_failure:
3050 return -EMSGSIZE;
3051}
3052
3053static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
Vlad Buslov12db03b2019-02-11 10:55:45 +02003054 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
Jiri Pirkof5749082018-07-23 09:23:08 +02003055{
3056 struct cls_fl_filter *f = fh;
3057 struct nlattr *nest;
3058 struct fl_flow_key *key, *mask;
Vlad Buslov3d81e712019-03-21 15:17:42 +02003059 bool skip_hw;
Jiri Pirkof5749082018-07-23 09:23:08 +02003060
3061 if (!f)
3062 return skb->len;
3063
3064 t->tcm_handle = f->handle;
3065
Michal Kubecekae0be8d2019-04-26 11:13:06 +02003066 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Jiri Pirkof5749082018-07-23 09:23:08 +02003067 if (!nest)
3068 goto nla_put_failure;
3069
Vlad Buslov3d81e712019-03-21 15:17:42 +02003070 spin_lock(&tp->lock);
3071
Jiri Pirkof5749082018-07-23 09:23:08 +02003072 if (f->res.classid &&
3073 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
Vlad Buslov3d81e712019-03-21 15:17:42 +02003074 goto nla_put_failure_locked;
Jiri Pirkof5749082018-07-23 09:23:08 +02003075
3076 key = &f->key;
3077 mask = &f->mask->key;
Vlad Buslov3d81e712019-03-21 15:17:42 +02003078 skip_hw = tc_skip_hw(f->flags);
Jiri Pirkof5749082018-07-23 09:23:08 +02003079
3080 if (fl_dump_key(skb, net, key, mask))
Vlad Buslov3d81e712019-03-21 15:17:42 +02003081 goto nla_put_failure_locked;
Jiri Pirkof5749082018-07-23 09:23:08 +02003082
Or Gerlitz749e6722017-02-16 10:31:10 +02003083 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
Vlad Buslov3d81e712019-03-21 15:17:42 +02003084 goto nla_put_failure_locked;
3085
3086 spin_unlock(&tp->lock);
3087
3088 if (!skip_hw)
Vlad Buslovc24e43d82019-03-21 15:17:43 +02003089 fl_hw_update_stats(tp, f, rtnl_held);
Amir Vadaie69985c2016-06-05 17:11:18 +03003090
Vlad Buslov86c55362018-09-07 17:22:21 +03003091 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3092 goto nla_put_failure;
3093
Jiri Pirko77b99002015-05-12 14:56:21 +02003094 if (tcf_exts_dump(skb, &f->exts))
3095 goto nla_put_failure;
3096
3097 nla_nest_end(skb, nest);
3098
3099 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3100 goto nla_put_failure;
3101
3102 return skb->len;
3103
Vlad Buslov3d81e712019-03-21 15:17:42 +02003104nla_put_failure_locked:
3105 spin_unlock(&tp->lock);
Jiri Pirko77b99002015-05-12 14:56:21 +02003106nla_put_failure:
3107 nla_nest_cancel(skb, nest);
3108 return -1;
3109}
3110
Vlad Buslov03484512020-05-15 14:40:13 +03003111static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3112 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3113{
3114 struct cls_fl_filter *f = fh;
3115 struct nlattr *nest;
3116 bool skip_hw;
3117
3118 if (!f)
3119 return skb->len;
3120
3121 t->tcm_handle = f->handle;
3122
3123 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3124 if (!nest)
3125 goto nla_put_failure;
3126
3127 spin_lock(&tp->lock);
3128
3129 skip_hw = tc_skip_hw(f->flags);
3130
3131 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3132 goto nla_put_failure_locked;
3133
3134 spin_unlock(&tp->lock);
3135
3136 if (!skip_hw)
3137 fl_hw_update_stats(tp, f, rtnl_held);
3138
3139 if (tcf_exts_terse_dump(skb, &f->exts))
3140 goto nla_put_failure;
3141
3142 nla_nest_end(skb, nest);
3143
3144 return skb->len;
3145
3146nla_put_failure_locked:
3147 spin_unlock(&tp->lock);
3148nla_put_failure:
3149 nla_nest_cancel(skb, nest);
3150 return -1;
3151}
3152
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02003153static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3154{
3155 struct fl_flow_tmplt *tmplt = tmplt_priv;
3156 struct fl_flow_key *key, *mask;
3157 struct nlattr *nest;
3158
Michal Kubecekae0be8d2019-04-26 11:13:06 +02003159 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02003160 if (!nest)
3161 goto nla_put_failure;
3162
3163 key = &tmplt->dummy_key;
3164 mask = &tmplt->mask;
3165
3166 if (fl_dump_key(skb, net, key, mask))
3167 goto nla_put_failure;
3168
3169 nla_nest_end(skb, nest);
3170
3171 return skb->len;
3172
3173nla_put_failure:
3174 nla_nest_cancel(skb, nest);
3175 return -EMSGSIZE;
3176}
3177
Cong Wang2e24cd72020-01-23 16:26:18 -08003178static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3179 unsigned long base)
Cong Wang07d79fc2017-08-30 14:30:36 -07003180{
3181 struct cls_fl_filter *f = fh;
3182
Cong Wang2e24cd72020-01-23 16:26:18 -08003183 if (f && f->res.classid == classid) {
3184 if (cl)
3185 __tcf_bind_filter(q, &f->res, base);
3186 else
3187 __tcf_unbind_filter(q, &f->res);
3188 }
Cong Wang07d79fc2017-08-30 14:30:36 -07003189}
3190
Davide Carattia5b72a02019-12-28 16:36:58 +01003191static bool fl_delete_empty(struct tcf_proto *tp)
3192{
3193 struct cls_fl_head *head = fl_head_dereference(tp);
3194
3195 spin_lock(&tp->lock);
3196 tp->deleting = idr_is_empty(&head->handle_idr);
3197 spin_unlock(&tp->lock);
3198
3199 return tp->deleting;
3200}
3201
Jiri Pirko77b99002015-05-12 14:56:21 +02003202static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3203 .kind = "flower",
3204 .classify = fl_classify,
3205 .init = fl_init,
3206 .destroy = fl_destroy,
3207 .get = fl_get,
Vlad Buslov06177552019-03-21 15:17:35 +02003208 .put = fl_put,
Jiri Pirko77b99002015-05-12 14:56:21 +02003209 .change = fl_change,
3210 .delete = fl_delete,
Davide Carattia5b72a02019-12-28 16:36:58 +01003211 .delete_empty = fl_delete_empty,
Jiri Pirko77b99002015-05-12 14:56:21 +02003212 .walk = fl_walk,
John Hurley31533cb2018-06-25 14:30:06 -07003213 .reoffload = fl_reoffload,
Vlad Buslova449a3e2019-08-26 16:45:00 +03003214 .hw_add = fl_hw_add,
3215 .hw_del = fl_hw_del,
Jiri Pirko77b99002015-05-12 14:56:21 +02003216 .dump = fl_dump,
Vlad Buslov03484512020-05-15 14:40:13 +03003217 .terse_dump = fl_terse_dump,
Cong Wang07d79fc2017-08-30 14:30:36 -07003218 .bind_class = fl_bind_class,
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02003219 .tmplt_create = fl_tmplt_create,
3220 .tmplt_destroy = fl_tmplt_destroy,
3221 .tmplt_dump = fl_tmplt_dump,
Jiri Pirko77b99002015-05-12 14:56:21 +02003222 .owner = THIS_MODULE,
Vlad Buslov92149192019-03-21 15:17:44 +02003223 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
Jiri Pirko77b99002015-05-12 14:56:21 +02003224};
3225
3226static int __init cls_fl_init(void)
3227{
3228 return register_tcf_proto_ops(&cls_fl_ops);
3229}
3230
3231static void __exit cls_fl_exit(void)
3232{
3233 unregister_tcf_proto_ops(&cls_fl_ops);
3234}
3235
3236module_init(cls_fl_init);
3237module_exit(cls_fl_exit);
3238
3239MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3240MODULE_DESCRIPTION("Flower classifier");
3241MODULE_LICENSE("GPL v2");