blob: 1a9b1f140f9e97bb1b7e9ec4d30c9823bbcf7ee3 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Jiri Pirko77b99002015-05-12 14:56:21 +02002/*
3 * net/sched/cls_flower.c Flower classifier
4 *
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
Jiri Pirko77b99002015-05-12 14:56:21 +02006 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/rhashtable.h>
Daniel Borkmannd9363772016-11-27 01:18:01 +010012#include <linux/workqueue.h>
Vlad Buslov06177552019-03-21 15:17:35 +020013#include <linux/refcount.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020014
15#include <linux/if_ether.h>
16#include <linux/in6.h>
17#include <linux/ip.h>
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040018#include <linux/mpls.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020019
20#include <net/sch_generic.h>
21#include <net/pkt_cls.h>
Paul Blakeyec624fe2021-12-14 19:24:33 +020022#include <net/pkt_sched.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020023#include <net/ip.h>
24#include <net/flow_dissector.h>
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +020025#include <net/geneve.h>
Xin Longd8f9dfa2019-11-21 18:03:28 +080026#include <net/vxlan.h>
Xin Long79b10112019-11-21 18:03:29 +080027#include <net/erspan.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020028
Amir Vadaibc3103f2016-09-08 16:23:47 +030029#include <net/dst.h>
30#include <net/dst_metadata.h>
31
Paul Blakeye0ace682019-07-09 10:30:50 +030032#include <uapi/linux/netfilter/nf_conntrack_common.h>
33
wenxu1bcc51a2021-02-09 14:37:49 +080034#define TCA_FLOWER_KEY_CT_FLAGS_MAX \
35 ((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
36#define TCA_FLOWER_KEY_CT_FLAGS_MASK \
37 (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
38
Jiri Pirko77b99002015-05-12 14:56:21 +020039struct fl_flow_key {
Jiri Pirko8212ed72019-06-19 09:41:03 +030040 struct flow_dissector_key_meta meta;
Tom Herbert42aecaa2015-06-04 09:16:39 -070041 struct flow_dissector_key_control control;
Amir Vadaibc3103f2016-09-08 16:23:47 +030042 struct flow_dissector_key_control enc_control;
Jiri Pirko77b99002015-05-12 14:56:21 +020043 struct flow_dissector_key_basic basic;
44 struct flow_dissector_key_eth_addrs eth;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +030045 struct flow_dissector_key_vlan vlan;
Jianbo Liud64efd02018-07-06 05:38:16 +000046 struct flow_dissector_key_vlan cvlan;
Jiri Pirko77b99002015-05-12 14:56:21 +020047 union {
Tom Herbertc3f83242015-06-04 09:16:40 -070048 struct flow_dissector_key_ipv4_addrs ipv4;
Jiri Pirko77b99002015-05-12 14:56:21 +020049 struct flow_dissector_key_ipv6_addrs ipv6;
50 };
51 struct flow_dissector_key_ports tp;
Simon Horman7b684882016-12-07 13:48:28 +010052 struct flow_dissector_key_icmp icmp;
Simon Horman99d31322017-01-11 14:05:43 +010053 struct flow_dissector_key_arp arp;
Amir Vadaibc3103f2016-09-08 16:23:47 +030054 struct flow_dissector_key_keyid enc_key_id;
55 union {
56 struct flow_dissector_key_ipv4_addrs enc_ipv4;
57 struct flow_dissector_key_ipv6_addrs enc_ipv6;
58 };
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +020059 struct flow_dissector_key_ports enc_tp;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040060 struct flow_dissector_key_mpls mpls;
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +020061 struct flow_dissector_key_tcp tcp;
Or Gerlitz4d80cc02017-06-01 21:37:38 +030062 struct flow_dissector_key_ip ip;
Or Gerlitz0e2c17b2018-07-17 19:27:18 +030063 struct flow_dissector_key_ip enc_ip;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +020064 struct flow_dissector_key_enc_opts enc_opts;
Yoshiki Komachi8ffb0552019-12-03 19:40:12 +090065 union {
66 struct flow_dissector_key_ports tp;
67 struct {
68 struct flow_dissector_key_ports tp_min;
69 struct flow_dissector_key_ports tp_max;
70 };
71 } tp_range;
Paul Blakeye0ace682019-07-09 10:30:50 +030072 struct flow_dissector_key_ct ct;
Ariel Levkovich5923b8f2020-07-23 01:03:01 +030073 struct flow_dissector_key_hash hash;
Jiri Pirko77b99002015-05-12 14:56:21 +020074} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
75
76struct fl_flow_mask_range {
77 unsigned short int start;
78 unsigned short int end;
79};
80
81struct fl_flow_mask {
82 struct fl_flow_key key;
83 struct fl_flow_mask_range range;
Amritha Nambiar5c722992018-11-12 16:15:55 -080084 u32 flags;
Paul Blakey05cd2712018-04-30 14:28:30 +030085 struct rhash_head ht_node;
86 struct rhashtable ht;
87 struct rhashtable_params filter_ht_params;
88 struct flow_dissector dissector;
89 struct list_head filters;
Paolo Abeni44a5cd42018-06-21 20:02:16 +020090 struct rcu_work rwork;
Paul Blakey05cd2712018-04-30 14:28:30 +030091 struct list_head list;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +020092 refcount_t refcnt;
Jiri Pirko77b99002015-05-12 14:56:21 +020093};
94
Jiri Pirkob95ec7e2018-07-23 09:23:10 +020095struct fl_flow_tmplt {
96 struct fl_flow_key dummy_key;
97 struct fl_flow_key mask;
98 struct flow_dissector dissector;
99 struct tcf_chain *chain;
100};
101
Jiri Pirko77b99002015-05-12 14:56:21 +0200102struct cls_fl_head {
103 struct rhashtable ht;
Vlad Buslov259e60f2019-03-21 15:17:39 +0200104 spinlock_t masks_lock; /* Protect masks list */
Paul Blakey05cd2712018-04-30 14:28:30 +0300105 struct list_head masks;
Vlad Buslovc049d562019-04-24 09:53:31 +0300106 struct list_head hw_filters;
Cong Wangaaa908f2018-05-23 15:26:53 -0700107 struct rcu_work rwork;
Chris Mic15ab232017-08-30 02:31:58 -0400108 struct idr handle_idr;
Jiri Pirko77b99002015-05-12 14:56:21 +0200109};
110
111struct cls_fl_filter {
Paul Blakey05cd2712018-04-30 14:28:30 +0300112 struct fl_flow_mask *mask;
Jiri Pirko77b99002015-05-12 14:56:21 +0200113 struct rhash_head ht_node;
114 struct fl_flow_key mkey;
115 struct tcf_exts exts;
116 struct tcf_result res;
117 struct fl_flow_key key;
118 struct list_head list;
Vlad Buslovc049d562019-04-24 09:53:31 +0300119 struct list_head hw_list;
Jiri Pirko77b99002015-05-12 14:56:21 +0200120 u32 handle;
Amir Vadaie69985c2016-06-05 17:11:18 +0300121 u32 flags;
Vlad Buslov86c55362018-09-07 17:22:21 +0300122 u32 in_hw_count;
Cong Wangaaa908f2018-05-23 15:26:53 -0700123 struct rcu_work rwork;
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200124 struct net_device *hw_dev;
Vlad Buslov06177552019-03-21 15:17:35 +0200125 /* Flower classifier is unlocked, which means that its reference counter
126 * can be changed concurrently without any kind of external
127 * synchronization. Use atomic reference counter to be concurrency-safe.
128 */
129 refcount_t refcnt;
Vlad Buslovb2552b82019-03-21 15:17:36 +0200130 bool deleted;
Jiri Pirko77b99002015-05-12 14:56:21 +0200131};
132
Paul Blakey05cd2712018-04-30 14:28:30 +0300133static const struct rhashtable_params mask_ht_params = {
134 .key_offset = offsetof(struct fl_flow_mask, key),
135 .key_len = sizeof(struct fl_flow_key),
136 .head_offset = offsetof(struct fl_flow_mask, ht_node),
137 .automatic_shrinking = true,
138};
139
Jiri Pirko77b99002015-05-12 14:56:21 +0200140static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
141{
142 return mask->range.end - mask->range.start;
143}
144
145static void fl_mask_update_range(struct fl_flow_mask *mask)
146{
147 const u8 *bytes = (const u8 *) &mask->key;
148 size_t size = sizeof(mask->key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300149 size_t i, first = 0, last;
Jiri Pirko77b99002015-05-12 14:56:21 +0200150
Paul Blakey05cd2712018-04-30 14:28:30 +0300151 for (i = 0; i < size; i++) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200152 if (bytes[i]) {
Paul Blakey05cd2712018-04-30 14:28:30 +0300153 first = i;
154 break;
155 }
156 }
157 last = first;
158 for (i = size - 1; i != first; i--) {
159 if (bytes[i]) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200160 last = i;
Paul Blakey05cd2712018-04-30 14:28:30 +0300161 break;
Jiri Pirko77b99002015-05-12 14:56:21 +0200162 }
163 }
164 mask->range.start = rounddown(first, sizeof(long));
165 mask->range.end = roundup(last + 1, sizeof(long));
166}
167
168static void *fl_key_get_start(struct fl_flow_key *key,
169 const struct fl_flow_mask *mask)
170{
171 return (u8 *) key + mask->range.start;
172}
173
174static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
175 struct fl_flow_mask *mask)
176{
177 const long *lkey = fl_key_get_start(key, mask);
178 const long *lmask = fl_key_get_start(&mask->key, mask);
179 long *lmkey = fl_key_get_start(mkey, mask);
180 int i;
181
182 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
183 *lmkey++ = *lkey++ & *lmask++;
184}
185
Jiri Pirkob95ec7e2018-07-23 09:23:10 +0200186static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
187 struct fl_flow_mask *mask)
188{
189 const long *lmask = fl_key_get_start(&mask->key, mask);
190 const long *ltmplt;
191 int i;
192
193 if (!tmplt)
194 return true;
195 ltmplt = fl_key_get_start(&tmplt->mask, mask);
196 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
197 if (~*ltmplt++ & *lmask++)
198 return false;
199 }
200 return true;
201}
202
Jiri Pirko77b99002015-05-12 14:56:21 +0200203static void fl_clear_masked_range(struct fl_flow_key *key,
204 struct fl_flow_mask *mask)
205{
206 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
207}
208
Amritha Nambiar5c722992018-11-12 16:15:55 -0800209static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
210 struct fl_flow_key *key,
211 struct fl_flow_key *mkey)
212{
Vladimir Oltean6215afc2021-03-21 23:05:48 +0200213 u16 min_mask, max_mask, min_val, max_val;
Amritha Nambiar5c722992018-11-12 16:15:55 -0800214
Vladimir Oltean6215afc2021-03-21 23:05:48 +0200215 min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
216 max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
217 min_val = ntohs(filter->key.tp_range.tp_min.dst);
218 max_val = ntohs(filter->key.tp_range.tp_max.dst);
Amritha Nambiar5c722992018-11-12 16:15:55 -0800219
220 if (min_mask && max_mask) {
Vladimir Oltean6215afc2021-03-21 23:05:48 +0200221 if (ntohs(key->tp_range.tp.dst) < min_val ||
222 ntohs(key->tp_range.tp.dst) > max_val)
Amritha Nambiar5c722992018-11-12 16:15:55 -0800223 return false;
224
225 /* skb does not have min and max values */
Yoshiki Komachi8ffb0552019-12-03 19:40:12 +0900226 mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
227 mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
Amritha Nambiar5c722992018-11-12 16:15:55 -0800228 }
229 return true;
230}
231
232static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
233 struct fl_flow_key *key,
234 struct fl_flow_key *mkey)
235{
Vladimir Oltean6215afc2021-03-21 23:05:48 +0200236 u16 min_mask, max_mask, min_val, max_val;
Amritha Nambiar5c722992018-11-12 16:15:55 -0800237
Vladimir Oltean6215afc2021-03-21 23:05:48 +0200238 min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
239 max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
240 min_val = ntohs(filter->key.tp_range.tp_min.src);
241 max_val = ntohs(filter->key.tp_range.tp_max.src);
Amritha Nambiar5c722992018-11-12 16:15:55 -0800242
243 if (min_mask && max_mask) {
Vladimir Oltean6215afc2021-03-21 23:05:48 +0200244 if (ntohs(key->tp_range.tp.src) < min_val ||
245 ntohs(key->tp_range.tp.src) > max_val)
Amritha Nambiar5c722992018-11-12 16:15:55 -0800246 return false;
247
248 /* skb does not have min and max values */
Yoshiki Komachi8ffb0552019-12-03 19:40:12 +0900249 mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
250 mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
Amritha Nambiar5c722992018-11-12 16:15:55 -0800251 }
252 return true;
253}
254
255static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
256 struct fl_flow_key *mkey)
Paul Blakeya3308d82017-01-16 10:45:13 +0200257{
Paul Blakey05cd2712018-04-30 14:28:30 +0300258 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
259 mask->filter_ht_params);
Paul Blakeya3308d82017-01-16 10:45:13 +0200260}
261
Amritha Nambiar5c722992018-11-12 16:15:55 -0800262static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
263 struct fl_flow_key *mkey,
264 struct fl_flow_key *key)
265{
266 struct cls_fl_filter *filter, *f;
267
268 list_for_each_entry_rcu(filter, &mask->filters, list) {
269 if (!fl_range_port_dst_cmp(filter, key, mkey))
270 continue;
271
272 if (!fl_range_port_src_cmp(filter, key, mkey))
273 continue;
274
275 f = __fl_lookup(mask, mkey);
276 if (f)
277 return f;
278 }
279 return NULL;
280}
281
Arnd Bergmann0af413b2020-05-29 22:13:58 +0200282static noinline_for_stack
283struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
Amritha Nambiar5c722992018-11-12 16:15:55 -0800284{
Arnd Bergmann0af413b2020-05-29 22:13:58 +0200285 struct fl_flow_key mkey;
Amritha Nambiar5c722992018-11-12 16:15:55 -0800286
Arnd Bergmann0af413b2020-05-29 22:13:58 +0200287 fl_set_masked_key(&mkey, key, mask);
288 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
289 return fl_lookup_range(mask, &mkey, key);
290
291 return __fl_lookup(mask, &mkey);
Amritha Nambiar5c722992018-11-12 16:15:55 -0800292}
293
Paul Blakeye0ace682019-07-09 10:30:50 +0300294static u16 fl_ct_info_to_flower_map[] = {
295 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
296 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
297 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
298 TCA_FLOWER_KEY_CT_FLAGS_RELATED,
299 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
Paul Blakey8c85d182021-01-27 16:32:45 +0200300 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
301 TCA_FLOWER_KEY_CT_FLAGS_REPLY,
Paul Blakeye0ace682019-07-09 10:30:50 +0300302 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
Paul Blakey8c85d182021-01-27 16:32:45 +0200303 TCA_FLOWER_KEY_CT_FLAGS_RELATED |
304 TCA_FLOWER_KEY_CT_FLAGS_REPLY,
Paul Blakeye0ace682019-07-09 10:30:50 +0300305 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
306 TCA_FLOWER_KEY_CT_FLAGS_NEW,
307};
308
Jiri Pirko77b99002015-05-12 14:56:21 +0200309static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
310 struct tcf_result *res)
311{
312 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
Paul Blakeyec624fe2021-12-14 19:24:33 +0200313 bool post_ct = tc_skb_cb(skb)->post_ct;
Paul Blakey38495952021-12-14 19:24:34 +0200314 u16 zone = tc_skb_cb(skb)->zone;
Paul Blakeye0ace682019-07-09 10:30:50 +0300315 struct fl_flow_key skb_key;
316 struct fl_flow_mask *mask;
317 struct cls_fl_filter *f;
Jiri Pirko77b99002015-05-12 14:56:21 +0200318
Paul Blakey05cd2712018-04-30 14:28:30 +0300319 list_for_each_entry_rcu(mask, &head->masks, list) {
Jason Baron8a9093c2020-02-17 15:38:09 -0500320 flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
Paul Blakey05cd2712018-04-30 14:28:30 +0300321 fl_clear_masked_range(&skb_key, mask);
Amir Vadaie69985c2016-06-05 17:11:18 +0300322
Jiri Pirko8212ed72019-06-19 09:41:03 +0300323 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300324 /* skb_flow_dissect() does not set n_proto in case an unknown
325 * protocol, so do it rather here.
326 */
Toke Høiland-Jørgensend7bf2eb2020-07-03 22:26:43 +0200327 skb_key.basic.n_proto = skb_protocol(skb, false);
Paul Blakey05cd2712018-04-30 14:28:30 +0300328 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
Paul Blakeye0ace682019-07-09 10:30:50 +0300329 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
330 fl_ct_info_to_flower_map,
wenxu7baf2422021-01-19 16:31:50 +0800331 ARRAY_SIZE(fl_ct_info_to_flower_map),
Paul Blakey38495952021-12-14 19:24:34 +0200332 post_ct, zone);
Ariel Levkovich5923b8f2020-07-23 01:03:01 +0300333 skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
Yoshiki Komachi6de6e462021-10-29 09:21:41 +0000334 skb_flow_dissect(skb, &mask->dissector, &skb_key,
335 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
Amir Vadaibc3103f2016-09-08 16:23:47 +0300336
Arnd Bergmann0af413b2020-05-29 22:13:58 +0200337 f = fl_mask_lookup(mask, &skb_key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300338 if (f && !tc_skip_sw(f->flags)) {
339 *res = f->res;
340 return tcf_exts_exec(skb, &f->exts, res);
341 }
Jiri Pirko77b99002015-05-12 14:56:21 +0200342 }
343 return -1;
344}
345
346static int fl_init(struct tcf_proto *tp)
347{
348 struct cls_fl_head *head;
349
350 head = kzalloc(sizeof(*head), GFP_KERNEL);
351 if (!head)
352 return -ENOBUFS;
353
Vlad Buslov259e60f2019-03-21 15:17:39 +0200354 spin_lock_init(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +0300355 INIT_LIST_HEAD_RCU(&head->masks);
Vlad Buslovc049d562019-04-24 09:53:31 +0300356 INIT_LIST_HEAD(&head->hw_filters);
Jiri Pirko77b99002015-05-12 14:56:21 +0200357 rcu_assign_pointer(tp->root, head);
Chris Mic15ab232017-08-30 02:31:58 -0400358 idr_init(&head->handle_idr);
Jiri Pirko77b99002015-05-12 14:56:21 +0200359
Paul Blakey05cd2712018-04-30 14:28:30 +0300360 return rhashtable_init(&head->ht, &mask_ht_params);
361}
362
Vlad Buslov99815f52019-06-13 17:54:04 +0300363static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200364{
Vlad Buslov99815f52019-06-13 17:54:04 +0300365 /* temporary masks don't have their filters list and ht initialized */
366 if (mask_init_done) {
367 WARN_ON(!list_empty(&mask->filters));
368 rhashtable_destroy(&mask->ht);
369 }
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200370 kfree(mask);
371}
372
373static void fl_mask_free_work(struct work_struct *work)
374{
375 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
376 struct fl_flow_mask, rwork);
377
Vlad Buslov99815f52019-06-13 17:54:04 +0300378 fl_mask_free(mask, true);
379}
380
381static void fl_uninit_mask_free_work(struct work_struct *work)
382{
383 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
384 struct fl_flow_mask, rwork);
385
386 fl_mask_free(mask, false);
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200387}
388
Vlad Buslov99946772019-04-12 00:54:19 +0300389static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
Paul Blakey05cd2712018-04-30 14:28:30 +0300390{
Vlad Buslovf48ef4d2019-03-21 15:17:37 +0200391 if (!refcount_dec_and_test(&mask->refcnt))
Paul Blakey05cd2712018-04-30 14:28:30 +0300392 return false;
393
394 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
Vlad Buslov259e60f2019-03-21 15:17:39 +0200395
396 spin_lock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +0300397 list_del_rcu(&mask->list);
Vlad Buslov259e60f2019-03-21 15:17:39 +0200398 spin_unlock(&head->masks_lock);
399
Vlad Buslov99946772019-04-12 00:54:19 +0300400 tcf_queue_work(&mask->rwork, fl_mask_free_work);
Paul Blakey05cd2712018-04-30 14:28:30 +0300401
402 return true;
Jiri Pirko77b99002015-05-12 14:56:21 +0200403}
404
Vlad Buslovc049d562019-04-24 09:53:31 +0300405static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
406{
407 /* Flower classifier only changes root pointer during init and destroy.
408 * Users must obtain reference to tcf_proto instance before calling its
409 * API, so tp->root pointer is protected from concurrent call to
410 * fl_destroy() by reference counting.
411 */
412 return rcu_dereference_raw(tp->root);
413}
414
Cong Wang0dadc112017-11-06 13:47:24 -0800415static void __fl_destroy_filter(struct cls_fl_filter *f)
416{
417 tcf_exts_destroy(&f->exts);
418 tcf_exts_put_net(&f->exts);
419 kfree(f);
420}
421
Cong Wang0552c8a2017-10-26 18:24:33 -0700422static void fl_destroy_filter_work(struct work_struct *work)
423{
Cong Wangaaa908f2018-05-23 15:26:53 -0700424 struct cls_fl_filter *f = container_of(to_rcu_work(work),
425 struct cls_fl_filter, rwork);
Cong Wang0552c8a2017-10-26 18:24:33 -0700426
Cong Wang0dadc112017-11-06 13:47:24 -0800427 __fl_destroy_filter(f);
Cong Wang0552c8a2017-10-26 18:24:33 -0700428}
429
Jakub Kicinski1b0f8032018-01-24 12:54:21 -0800430static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200431 bool rtnl_held, struct netlink_ext_ack *extack)
Amir Vadai5b33f482016-03-08 12:42:29 +0200432{
Jiri Pirko208c0f42017-10-19 15:50:32 +0200433 struct tcf_block *block = tp->chain->block;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200434 struct flow_cls_offload cls_flower = {};
Amir Vadai5b33f482016-03-08 12:42:29 +0200435
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700436 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200437 cls_flower.command = FLOW_CLS_DESTROY;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200438 cls_flower.cookie = (unsigned long) f;
Amir Vadai5b33f482016-03-08 12:42:29 +0200439
Vlad Buslov40119212019-08-26 16:44:59 +0300440 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
Vlad Buslov918190f2019-08-26 16:45:06 +0300441 &f->flags, &f->in_hw_count, rtnl_held);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200442
Amir Vadai5b33f482016-03-08 12:42:29 +0200443}
444
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300445static int fl_hw_replace_filter(struct tcf_proto *tp,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200446 struct cls_fl_filter *f, bool rtnl_held,
Quentin Monnet41002032018-01-19 17:44:43 -0800447 struct netlink_ext_ack *extack)
Amir Vadai5b33f482016-03-08 12:42:29 +0200448{
Jiri Pirko208c0f42017-10-19 15:50:32 +0200449 struct tcf_block *block = tp->chain->block;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200450 struct flow_cls_offload cls_flower = {};
Jiri Pirko717503b2017-10-11 09:41:09 +0200451 bool skip_sw = tc_skip_sw(f->flags);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200452 int err = 0;
453
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +0100454 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
Vlad Buslov918190f2019-08-26 16:45:06 +0300455 if (!cls_flower.rule)
456 return -ENOMEM;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100457
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700458 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200459 cls_flower.command = FLOW_CLS_REPLACE;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200460 cls_flower.cookie = (unsigned long) f;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100461 cls_flower.rule->match.dissector = &f->mask->dissector;
462 cls_flower.rule->match.mask = &f->mask->key;
463 cls_flower.rule->match.key = &f->mkey;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700464 cls_flower.classid = f->res.classid;
Amir Vadai5b33f482016-03-08 12:42:29 +0200465
Baowen Zheng9c1c0e12021-12-17 19:16:20 +0100466 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts);
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +0100467 if (err) {
468 kfree(cls_flower.rule);
Vlad Buslov918190f2019-08-26 16:45:06 +0300469 if (skip_sw) {
Vlad Buslov1f15bb42019-02-12 23:39:06 +0200470 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
Vlad Buslov918190f2019-08-26 16:45:06 +0300471 return err;
472 }
473 return 0;
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +0100474 }
475
Vlad Buslov40119212019-08-26 16:44:59 +0300476 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
Vlad Buslov918190f2019-08-26 16:45:06 +0300477 skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
Baowen Zheng9c1c0e12021-12-17 19:16:20 +0100478 tc_cleanup_offload_action(&cls_flower.rule->action);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100479 kfree(cls_flower.rule);
480
Vlad Buslov40119212019-08-26 16:44:59 +0300481 if (err) {
Vlad Buslov918190f2019-08-26 16:45:06 +0300482 fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
483 return err;
Jiri Pirko717503b2017-10-11 09:41:09 +0200484 }
485
Vlad Buslov918190f2019-08-26 16:45:06 +0300486 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
487 return -EINVAL;
Jiri Pirko717503b2017-10-11 09:41:09 +0200488
Vlad Buslov918190f2019-08-26 16:45:06 +0300489 return 0;
Amir Vadai5b33f482016-03-08 12:42:29 +0200490}
491
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200492static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
493 bool rtnl_held)
Amir Vadai10cbc682016-05-13 12:55:37 +0000494{
Jiri Pirko208c0f42017-10-19 15:50:32 +0200495 struct tcf_block *block = tp->chain->block;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200496 struct flow_cls_offload cls_flower = {};
Amir Vadai10cbc682016-05-13 12:55:37 +0000497
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700498 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200499 cls_flower.command = FLOW_CLS_STATS;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200500 cls_flower.cookie = (unsigned long) f;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700501 cls_flower.classid = f->res.classid;
Amir Vadai10cbc682016-05-13 12:55:37 +0000502
Vlad Buslov918190f2019-08-26 16:45:06 +0300503 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
504 rtnl_held);
Pablo Neira Ayuso3b1903e2019-02-02 12:50:47 +0100505
Baowen Zhengbcd64362021-12-17 19:16:24 +0100506 tcf_exts_hw_stats_update(&f->exts, cls_flower.stats.bytes,
507 cls_flower.stats.pkts,
508 cls_flower.stats.drops,
509 cls_flower.stats.lastused,
510 cls_flower.stats.used_hw_stats,
511 cls_flower.stats.used_hw_stats_valid);
Amir Vadai10cbc682016-05-13 12:55:37 +0000512}
513
Vlad Buslov06177552019-03-21 15:17:35 +0200514static void __fl_put(struct cls_fl_filter *f)
515{
516 if (!refcount_dec_and_test(&f->refcnt))
517 return;
518
519 if (tcf_exts_get_net(&f->exts))
520 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
521 else
522 __fl_destroy_filter(f);
523}
524
525static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
526{
527 struct cls_fl_filter *f;
528
529 rcu_read_lock();
530 f = idr_find(&head->handle_idr, handle);
531 if (f && !refcount_inc_not_zero(&f->refcnt))
532 f = NULL;
533 rcu_read_unlock();
534
535 return f;
536}
537
Vlad Buslovb2552b82019-03-21 15:17:36 +0200538static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200539 bool *last, bool rtnl_held,
540 struct netlink_ext_ack *extack)
Roi Dayan13fa8762016-11-01 16:08:29 +0200541{
Vlad Buslove4746192019-03-21 15:17:33 +0200542 struct cls_fl_head *head = fl_head_dereference(tp);
Chris Mic15ab232017-08-30 02:31:58 -0400543
Vlad Buslovb2552b82019-03-21 15:17:36 +0200544 *last = false;
545
Vlad Buslov3d81e712019-03-21 15:17:42 +0200546 spin_lock(&tp->lock);
547 if (f->deleted) {
548 spin_unlock(&tp->lock);
Vlad Buslovb2552b82019-03-21 15:17:36 +0200549 return -ENOENT;
Vlad Buslov3d81e712019-03-21 15:17:42 +0200550 }
Vlad Buslovb2552b82019-03-21 15:17:36 +0200551
552 f->deleted = true;
553 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
554 f->mask->filter_ht_params);
Matthew Wilcox9c160942017-11-28 09:48:43 -0500555 idr_remove(&head->handle_idr, f->handle);
Roi Dayan13fa8762016-11-01 16:08:29 +0200556 list_del_rcu(&f->list);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200557 spin_unlock(&tp->lock);
558
Vlad Buslov99946772019-04-12 00:54:19 +0300559 *last = fl_mask_put(head, f->mask);
Hadar Hen Zion79685212016-12-01 14:06:34 +0200560 if (!tc_skip_hw(f->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200561 fl_hw_destroy_filter(tp, f, rtnl_held, extack);
Roi Dayan13fa8762016-11-01 16:08:29 +0200562 tcf_unbind_filter(tp, &f->res);
Vlad Buslov06177552019-03-21 15:17:35 +0200563 __fl_put(f);
Paul Blakey05cd2712018-04-30 14:28:30 +0300564
Vlad Buslovb2552b82019-03-21 15:17:36 +0200565 return 0;
Roi Dayan13fa8762016-11-01 16:08:29 +0200566}
567
Daniel Borkmannd9363772016-11-27 01:18:01 +0100568static void fl_destroy_sleepable(struct work_struct *work)
569{
Cong Wangaaa908f2018-05-23 15:26:53 -0700570 struct cls_fl_head *head = container_of(to_rcu_work(work),
571 struct cls_fl_head,
572 rwork);
Paul Blakeyde9dc652018-06-03 10:06:13 +0300573
574 rhashtable_destroy(&head->ht);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100575 kfree(head);
576 module_put(THIS_MODULE);
577}
578
Vlad Buslov12db03b2019-02-11 10:55:45 +0200579static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
580 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +0200581{
Vlad Buslove4746192019-03-21 15:17:33 +0200582 struct cls_fl_head *head = fl_head_dereference(tp);
Paul Blakey05cd2712018-04-30 14:28:30 +0300583 struct fl_flow_mask *mask, *next_mask;
Jiri Pirko77b99002015-05-12 14:56:21 +0200584 struct cls_fl_filter *f, *next;
Vlad Buslovb2552b82019-03-21 15:17:36 +0200585 bool last;
Jiri Pirko77b99002015-05-12 14:56:21 +0200586
Paul Blakey05cd2712018-04-30 14:28:30 +0300587 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
588 list_for_each_entry_safe(f, next, &mask->filters, list) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200589 __fl_delete(tp, f, &last, rtnl_held, extack);
Vlad Buslovb2552b82019-03-21 15:17:36 +0200590 if (last)
Paul Blakey05cd2712018-04-30 14:28:30 +0300591 break;
592 }
593 }
Chris Mic15ab232017-08-30 02:31:58 -0400594 idr_destroy(&head->handle_idr);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100595
596 __module_get(THIS_MODULE);
Cong Wangaaa908f2018-05-23 15:26:53 -0700597 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
Jiri Pirko77b99002015-05-12 14:56:21 +0200598}
599
Vlad Buslov06177552019-03-21 15:17:35 +0200600static void fl_put(struct tcf_proto *tp, void *arg)
601{
602 struct cls_fl_filter *f = arg;
603
604 __fl_put(f);
605}
606
WANG Cong8113c092017-08-04 21:31:43 -0700607static void *fl_get(struct tcf_proto *tp, u32 handle)
Jiri Pirko77b99002015-05-12 14:56:21 +0200608{
Vlad Buslove4746192019-03-21 15:17:33 +0200609 struct cls_fl_head *head = fl_head_dereference(tp);
Jiri Pirko77b99002015-05-12 14:56:21 +0200610
Vlad Buslov06177552019-03-21 15:17:35 +0200611 return __fl_get(head, handle);
Jiri Pirko77b99002015-05-12 14:56:21 +0200612}
613
614static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
615 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
616 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
617 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
618 .len = IFNAMSIZ },
619 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
620 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
621 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
622 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
623 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
624 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
625 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
626 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
627 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
628 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
629 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
630 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
631 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
632 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
633 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
634 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
Jamal Hadi Salimb175c3a2015-06-25 06:55:27 -0400635 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
636 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300637 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
638 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
639 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
Amir Vadaibc3103f2016-09-08 16:23:47 +0300640 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
641 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
642 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
643 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
644 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
645 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
646 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
647 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
648 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
Or Gerlitzaa72d702016-09-15 15:28:22 +0300649 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
650 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
651 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
652 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
Simon Horman5976c5f2016-11-03 13:24:21 +0100653 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
654 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
655 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
656 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200657 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
658 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
659 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
660 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200661 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
662 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
Simon Horman7b684882016-12-07 13:48:28 +0100663 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
664 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
665 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
666 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
667 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
668 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
669 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
670 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
Simon Horman99d31322017-01-11 14:05:43 +0100671 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
672 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
673 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
674 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
675 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
676 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
677 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
678 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
679 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
680 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400681 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
682 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
683 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
684 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
Guillaume Nault61aec252020-05-26 14:29:04 +0200685 [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED },
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +0200686 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
687 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300688 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
689 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
690 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
691 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
Jianbo Liud64efd02018-07-06 05:38:16 +0000692 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
693 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
694 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300695 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
696 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
697 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
698 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200699 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
700 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
wenxu1bcc51a2021-02-09 14:37:49 +0800701 [TCA_FLOWER_KEY_CT_STATE] =
702 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
703 [TCA_FLOWER_KEY_CT_STATE_MASK] =
704 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
Paul Blakeye0ace682019-07-09 10:30:50 +0300705 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 },
706 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 },
707 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 },
708 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 },
709 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY,
710 .len = 128 / BITS_PER_BYTE },
711 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
712 .len = 128 / BITS_PER_BYTE },
Davide Carattie2debf02020-02-11 19:33:40 +0100713 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
Ariel Levkovich5923b8f2020-07-23 01:03:01 +0300714 [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 },
715 [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 },
716
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200717};
718
719static const struct nla_policy
720enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
Xin Longd8f9dfa2019-11-21 18:03:28 +0800721 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = {
722 .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200723 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
Xin Longd8f9dfa2019-11-21 18:03:28 +0800724 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
Xin Long79b10112019-11-21 18:03:29 +0800725 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200726};
727
728static const struct nla_policy
729geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
730 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
731 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
732 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
733 .len = 128 },
Jiri Pirko77b99002015-05-12 14:56:21 +0200734};
735
Xin Longd8f9dfa2019-11-21 18:03:28 +0800736static const struct nla_policy
737vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
738 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
739};
740
Xin Long79b10112019-11-21 18:03:29 +0800741static const struct nla_policy
742erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
743 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
744 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
745 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
746 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
747};
748
Guillaume Nault61aec252020-05-26 14:29:04 +0200749static const struct nla_policy
Guillaume Nault61aec252020-05-26 14:29:04 +0200750mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
751 [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 },
752 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 },
753 [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 },
754 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 },
755 [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 },
756};
757
Jiri Pirko77b99002015-05-12 14:56:21 +0200758static void fl_set_key_val(struct nlattr **tb,
759 void *val, int val_type,
760 void *mask, int mask_type, int len)
761{
762 if (!tb[val_type])
763 return;
Paul Blakeye0ace682019-07-09 10:30:50 +0300764 nla_memcpy(val, tb[val_type], len);
Jiri Pirko77b99002015-05-12 14:56:21 +0200765 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
766 memset(mask, 0xff, len);
767 else
Paul Blakeye0ace682019-07-09 10:30:50 +0300768 nla_memcpy(mask, tb[mask_type], len);
Jiri Pirko77b99002015-05-12 14:56:21 +0200769}
770
Amritha Nambiar5c722992018-11-12 16:15:55 -0800771static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
Guillaume Naultbd7d4c12020-03-23 21:48:51 +0100772 struct fl_flow_key *mask,
773 struct netlink_ext_ack *extack)
Amritha Nambiar5c722992018-11-12 16:15:55 -0800774{
Yoshiki Komachi8ffb0552019-12-03 19:40:12 +0900775 fl_set_key_val(tb, &key->tp_range.tp_min.dst,
776 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
777 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
778 fl_set_key_val(tb, &key->tp_range.tp_max.dst,
779 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
780 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
781 fl_set_key_val(tb, &key->tp_range.tp_min.src,
782 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
783 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
784 fl_set_key_val(tb, &key->tp_range.tp_max.src,
785 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
786 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
Amritha Nambiar5c722992018-11-12 16:15:55 -0800787
Guillaume Naultbd7d4c12020-03-23 21:48:51 +0100788 if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
Vladimir Oltean6215afc2021-03-21 23:05:48 +0200789 ntohs(key->tp_range.tp_max.dst) <=
790 ntohs(key->tp_range.tp_min.dst)) {
Guillaume Naultbd7d4c12020-03-23 21:48:51 +0100791 NL_SET_ERR_MSG_ATTR(extack,
792 tb[TCA_FLOWER_KEY_PORT_DST_MIN],
793 "Invalid destination port range (min must be strictly smaller than max)");
Amritha Nambiar5c722992018-11-12 16:15:55 -0800794 return -EINVAL;
Guillaume Naultbd7d4c12020-03-23 21:48:51 +0100795 }
796 if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
Vladimir Oltean6215afc2021-03-21 23:05:48 +0200797 ntohs(key->tp_range.tp_max.src) <=
798 ntohs(key->tp_range.tp_min.src)) {
Guillaume Naultbd7d4c12020-03-23 21:48:51 +0100799 NL_SET_ERR_MSG_ATTR(extack,
800 tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
801 "Invalid source port range (min must be strictly smaller than max)");
802 return -EINVAL;
803 }
Amritha Nambiar5c722992018-11-12 16:15:55 -0800804
805 return 0;
806}
807
Guillaume Nault61aec252020-05-26 14:29:04 +0200808static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
809 struct flow_dissector_key_mpls *key_val,
810 struct flow_dissector_key_mpls *key_mask,
811 struct netlink_ext_ack *extack)
812{
813 struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
814 struct flow_dissector_mpls_lse *lse_mask;
815 struct flow_dissector_mpls_lse *lse_val;
816 u8 lse_index;
817 u8 depth;
818 int err;
819
820 err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
821 mpls_stack_entry_policy, extack);
822 if (err < 0)
823 return err;
824
825 if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
826 NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
827 return -EINVAL;
828 }
829
830 depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
831
832 /* LSE depth starts at 1, for consistency with terminology used by
833 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
834 */
835 if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
836 NL_SET_ERR_MSG_ATTR(extack,
837 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
838 "Invalid MPLS depth");
839 return -EINVAL;
840 }
841 lse_index = depth - 1;
842
843 dissector_set_mpls_lse(key_val, lse_index);
844 dissector_set_mpls_lse(key_mask, lse_index);
845
846 lse_val = &key_val->ls[lse_index];
847 lse_mask = &key_mask->ls[lse_index];
848
849 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
850 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
851 lse_mask->mpls_ttl = MPLS_TTL_MASK;
852 }
853 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
854 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
855
856 if (bos & ~MPLS_BOS_MASK) {
857 NL_SET_ERR_MSG_ATTR(extack,
858 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
859 "Bottom Of Stack (BOS) must be 0 or 1");
860 return -EINVAL;
861 }
862 lse_val->mpls_bos = bos;
863 lse_mask->mpls_bos = MPLS_BOS_MASK;
864 }
865 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
866 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
867
868 if (tc & ~MPLS_TC_MASK) {
869 NL_SET_ERR_MSG_ATTR(extack,
870 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
871 "Traffic Class (TC) must be between 0 and 7");
872 return -EINVAL;
873 }
874 lse_val->mpls_tc = tc;
875 lse_mask->mpls_tc = MPLS_TC_MASK;
876 }
877 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
878 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
879
880 if (label & ~MPLS_LABEL_MASK) {
881 NL_SET_ERR_MSG_ATTR(extack,
882 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
883 "Label must be between 0 and 1048575");
884 return -EINVAL;
885 }
886 lse_val->mpls_label = label;
887 lse_mask->mpls_label = MPLS_LABEL_MASK;
888 }
889
890 return 0;
891}
892
893static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
894 struct flow_dissector_key_mpls *key_val,
895 struct flow_dissector_key_mpls *key_mask,
896 struct netlink_ext_ack *extack)
897{
898 struct nlattr *nla_lse;
899 int rem;
900 int err;
901
902 if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
903 NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
904 "NLA_F_NESTED is missing");
905 return -EINVAL;
906 }
907
908 nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
909 if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
910 NL_SET_ERR_MSG_ATTR(extack, nla_lse,
911 "Invalid MPLS option type");
912 return -EINVAL;
913 }
914
915 err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
916 if (err < 0)
917 return err;
918 }
919 if (rem) {
920 NL_SET_ERR_MSG(extack,
921 "Bytes leftover after parsing MPLS options");
922 return -EINVAL;
923 }
924
925 return 0;
926}
927
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400928static int fl_set_key_mpls(struct nlattr **tb,
929 struct flow_dissector_key_mpls *key_val,
Guillaume Nault442f7302020-03-23 21:48:49 +0100930 struct flow_dissector_key_mpls *key_mask,
931 struct netlink_ext_ack *extack)
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400932{
Guillaume Nault58cff782020-05-26 14:29:00 +0200933 struct flow_dissector_mpls_lse *lse_mask;
934 struct flow_dissector_mpls_lse *lse_val;
935
Guillaume Nault61aec252020-05-26 14:29:04 +0200936 if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
937 if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
938 tb[TCA_FLOWER_KEY_MPLS_BOS] ||
939 tb[TCA_FLOWER_KEY_MPLS_TC] ||
940 tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
941 NL_SET_ERR_MSG_ATTR(extack,
942 tb[TCA_FLOWER_KEY_MPLS_OPTS],
943 "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
944 return -EBADMSG;
945 }
946
947 return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
948 key_val, key_mask, extack);
949 }
950
Guillaume Nault58cff782020-05-26 14:29:00 +0200951 lse_val = &key_val->ls[0];
952 lse_mask = &key_mask->ls[0];
953
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400954 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
Guillaume Nault58cff782020-05-26 14:29:00 +0200955 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
956 lse_mask->mpls_ttl = MPLS_TTL_MASK;
957 dissector_set_mpls_lse(key_val, 0);
958 dissector_set_mpls_lse(key_mask, 0);
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400959 }
960 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400961 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
962
Guillaume Nault442f7302020-03-23 21:48:49 +0100963 if (bos & ~MPLS_BOS_MASK) {
964 NL_SET_ERR_MSG_ATTR(extack,
965 tb[TCA_FLOWER_KEY_MPLS_BOS],
966 "Bottom Of Stack (BOS) must be 0 or 1");
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400967 return -EINVAL;
Guillaume Nault442f7302020-03-23 21:48:49 +0100968 }
Guillaume Nault58cff782020-05-26 14:29:00 +0200969 lse_val->mpls_bos = bos;
970 lse_mask->mpls_bos = MPLS_BOS_MASK;
971 dissector_set_mpls_lse(key_val, 0);
972 dissector_set_mpls_lse(key_mask, 0);
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400973 }
974 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400975 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
976
Guillaume Nault442f7302020-03-23 21:48:49 +0100977 if (tc & ~MPLS_TC_MASK) {
978 NL_SET_ERR_MSG_ATTR(extack,
979 tb[TCA_FLOWER_KEY_MPLS_TC],
980 "Traffic Class (TC) must be between 0 and 7");
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400981 return -EINVAL;
Guillaume Nault442f7302020-03-23 21:48:49 +0100982 }
Guillaume Nault58cff782020-05-26 14:29:00 +0200983 lse_val->mpls_tc = tc;
984 lse_mask->mpls_tc = MPLS_TC_MASK;
985 dissector_set_mpls_lse(key_val, 0);
986 dissector_set_mpls_lse(key_mask, 0);
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400987 }
988 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400989 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
990
Guillaume Nault442f7302020-03-23 21:48:49 +0100991 if (label & ~MPLS_LABEL_MASK) {
992 NL_SET_ERR_MSG_ATTR(extack,
993 tb[TCA_FLOWER_KEY_MPLS_LABEL],
994 "Label must be between 0 and 1048575");
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400995 return -EINVAL;
Guillaume Nault442f7302020-03-23 21:48:49 +0100996 }
Guillaume Nault58cff782020-05-26 14:29:00 +0200997 lse_val->mpls_label = label;
998 lse_mask->mpls_label = MPLS_LABEL_MASK;
999 dissector_set_mpls_lse(key_val, 0);
1000 dissector_set_mpls_lse(key_mask, 0);
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001001 }
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -04001002 return 0;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001003}
1004
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001005static void fl_set_key_vlan(struct nlattr **tb,
Jianbo Liuaaab0832018-07-06 05:38:13 +00001006 __be16 ethertype,
Jianbo Liud64efd02018-07-06 05:38:16 +00001007 int vlan_id_key, int vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001008 struct flow_dissector_key_vlan *key_val,
1009 struct flow_dissector_key_vlan *key_mask)
1010{
1011#define VLAN_PRIORITY_MASK 0x7
1012
Jianbo Liud64efd02018-07-06 05:38:16 +00001013 if (tb[vlan_id_key]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001014 key_val->vlan_id =
Jianbo Liud64efd02018-07-06 05:38:16 +00001015 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001016 key_mask->vlan_id = VLAN_VID_MASK;
1017 }
Jianbo Liud64efd02018-07-06 05:38:16 +00001018 if (tb[vlan_prio_key]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001019 key_val->vlan_priority =
Jianbo Liud64efd02018-07-06 05:38:16 +00001020 nla_get_u8(tb[vlan_prio_key]) &
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001021 VLAN_PRIORITY_MASK;
1022 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1023 }
Jianbo Liuaaab0832018-07-06 05:38:13 +00001024 key_val->vlan_tpid = ethertype;
1025 key_mask->vlan_tpid = cpu_to_be16(~0);
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001026}
1027
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001028static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1029 u32 *dissector_key, u32 *dissector_mask,
1030 u32 flower_flag_bit, u32 dissector_flag_bit)
1031{
1032 if (flower_mask & flower_flag_bit) {
1033 *dissector_mask |= dissector_flag_bit;
1034 if (flower_key & flower_flag_bit)
1035 *dissector_key |= dissector_flag_bit;
1036 }
1037}
1038
Guillaume Naulte304e212020-03-23 21:48:53 +01001039static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1040 u32 *flags_mask, struct netlink_ext_ack *extack)
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001041{
1042 u32 key, mask;
1043
Or Gerlitzd9724772016-12-22 14:28:15 +02001044 /* mask is mandatory for flags */
Guillaume Naulte304e212020-03-23 21:48:53 +01001045 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1046 NL_SET_ERR_MSG(extack, "Missing flags mask");
Or Gerlitzd9724772016-12-22 14:28:15 +02001047 return -EINVAL;
Guillaume Naulte304e212020-03-23 21:48:53 +01001048 }
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001049
Vladimir Olteanabee13f2021-03-21 23:05:49 +02001050 key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
1051 mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001052
1053 *flags_key = 0;
1054 *flags_mask = 0;
1055
1056 fl_set_key_flag(key, mask, flags_key, flags_mask,
1057 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Pieter Jansen van Vuuren459d1532018-03-06 18:11:14 +01001058 fl_set_key_flag(key, mask, flags_key, flags_mask,
1059 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1060 FLOW_DIS_FIRST_FRAG);
Or Gerlitzd9724772016-12-22 14:28:15 +02001061
1062 return 0;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001063}
1064
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001065static void fl_set_key_ip(struct nlattr **tb, bool encap,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001066 struct flow_dissector_key_ip *key,
1067 struct flow_dissector_key_ip *mask)
1068{
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001069 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1070 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1071 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1072 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001073
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001074 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1075 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001076}
1077
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001078static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1079 int depth, int option_len,
1080 struct netlink_ext_ack *extack)
1081{
1082 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1083 struct nlattr *class = NULL, *type = NULL, *data = NULL;
1084 struct geneve_opt *opt;
1085 int err, data_len = 0;
1086
1087 if (option_len > sizeof(struct geneve_opt))
1088 data_len = option_len - sizeof(struct geneve_opt);
1089
1090 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1091 memset(opt, 0xff, option_len);
1092 opt->length = data_len / 4;
1093 opt->r1 = 0;
1094 opt->r2 = 0;
1095 opt->r3 = 0;
1096
1097 /* If no mask has been prodived we assume an exact match. */
1098 if (!depth)
1099 return sizeof(struct geneve_opt) + data_len;
1100
1101 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1102 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1103 return -EINVAL;
1104 }
1105
Johannes Berg8cb08172019-04-26 14:07:28 +02001106 err = nla_parse_nested_deprecated(tb,
1107 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1108 nla, geneve_opt_policy, extack);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001109 if (err < 0)
1110 return err;
1111
1112 /* We are not allowed to omit any of CLASS, TYPE or DATA
1113 * fields from the key.
1114 */
1115 if (!option_len &&
1116 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1117 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1118 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1119 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1120 return -EINVAL;
1121 }
1122
1123 /* Omitting any of CLASS, TYPE or DATA fields is allowed
1124 * for the mask.
1125 */
1126 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1127 int new_len = key->enc_opts.len;
1128
1129 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1130 data_len = nla_len(data);
1131 if (data_len < 4) {
1132 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1133 return -ERANGE;
1134 }
1135 if (data_len % 4) {
1136 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1137 return -ERANGE;
1138 }
1139
1140 new_len += sizeof(struct geneve_opt) + data_len;
1141 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1142 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1143 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1144 return -ERANGE;
1145 }
1146 opt->length = data_len / 4;
1147 memcpy(opt->opt_data, nla_data(data), data_len);
1148 }
1149
1150 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1151 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1152 opt->opt_class = nla_get_be16(class);
1153 }
1154
1155 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1156 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1157 opt->type = nla_get_u8(type);
1158 }
1159
1160 return sizeof(struct geneve_opt) + data_len;
1161}
1162
Xin Longd8f9dfa2019-11-21 18:03:28 +08001163static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1164 int depth, int option_len,
1165 struct netlink_ext_ack *extack)
1166{
1167 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1168 struct vxlan_metadata *md;
1169 int err;
1170
1171 md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1172 memset(md, 0xff, sizeof(*md));
1173
1174 if (!depth)
1175 return sizeof(*md);
1176
1177 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1178 NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1179 return -EINVAL;
1180 }
1181
1182 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1183 vxlan_opt_policy, extack);
1184 if (err < 0)
1185 return err;
1186
1187 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1188 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1189 return -EINVAL;
1190 }
1191
Xin Long13e6ce92020-09-13 19:51:50 +08001192 if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
Xin Longd8f9dfa2019-11-21 18:03:28 +08001193 md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
Xin Long13e6ce92020-09-13 19:51:50 +08001194 md->gbp &= VXLAN_GBP_MASK;
1195 }
Xin Longd8f9dfa2019-11-21 18:03:28 +08001196
1197 return sizeof(*md);
1198}
1199
Xin Long79b10112019-11-21 18:03:29 +08001200static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1201 int depth, int option_len,
1202 struct netlink_ext_ack *extack)
1203{
1204 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1205 struct erspan_metadata *md;
1206 int err;
1207
1208 md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1209 memset(md, 0xff, sizeof(*md));
1210 md->version = 1;
1211
1212 if (!depth)
1213 return sizeof(*md);
1214
1215 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1216 NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1217 return -EINVAL;
1218 }
1219
1220 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1221 erspan_opt_policy, extack);
1222 if (err < 0)
1223 return err;
1224
1225 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1226 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1227 return -EINVAL;
1228 }
1229
1230 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1231 md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1232
1233 if (md->version == 1) {
1234 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1235 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1236 return -EINVAL;
1237 }
1238 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1239 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
Xin Long8e1b3ac2020-09-13 19:43:03 +08001240 memset(&md->u, 0x00, sizeof(md->u));
Xin Long79b10112019-11-21 18:03:29 +08001241 md->u.index = nla_get_be32(nla);
1242 }
1243 } else if (md->version == 2) {
1244 if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1245 !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1246 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1247 return -EINVAL;
1248 }
1249 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1250 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1251 md->u.md2.dir = nla_get_u8(nla);
1252 }
1253 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1254 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1255 set_hwid(&md->u.md2, nla_get_u8(nla));
1256 }
1257 } else {
1258 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1259 return -EINVAL;
1260 }
1261
1262 return sizeof(*md);
1263}
1264
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001265static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1266 struct fl_flow_key *mask,
1267 struct netlink_ext_ack *extack)
1268{
1269 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
Jakub Kicinski63c82992018-11-09 21:06:26 -08001270 int err, option_len, key_depth, msk_depth = 0;
1271
Johannes Berg8cb08172019-04-26 14:07:28 +02001272 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1273 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1274 enc_opts_policy, extack);
Jakub Kicinski63c82992018-11-09 21:06:26 -08001275 if (err)
1276 return err;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001277
1278 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1279
1280 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
Johannes Berg8cb08172019-04-26 14:07:28 +02001281 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1282 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1283 enc_opts_policy, extack);
Jakub Kicinski63c82992018-11-09 21:06:26 -08001284 if (err)
1285 return err;
1286
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001287 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1288 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
Cong Wangc96adff2021-01-15 10:50:24 -08001289 if (!nla_ok(nla_opt_msk, msk_depth)) {
1290 NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1291 return -EINVAL;
1292 }
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001293 }
1294
1295 nla_for_each_attr(nla_opt_key, nla_enc_key,
1296 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1297 switch (nla_type(nla_opt_key)) {
1298 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
Xin Longd8f9dfa2019-11-21 18:03:28 +08001299 if (key->enc_opts.dst_opt_type &&
1300 key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1301 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1302 return -EINVAL;
1303 }
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001304 option_len = 0;
1305 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1306 option_len = fl_set_geneve_opt(nla_opt_key, key,
1307 key_depth, option_len,
1308 extack);
1309 if (option_len < 0)
1310 return option_len;
1311
1312 key->enc_opts.len += option_len;
1313 /* At the same time we need to parse through the mask
1314 * in order to verify exact and mask attribute lengths.
1315 */
1316 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1317 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1318 msk_depth, option_len,
1319 extack);
1320 if (option_len < 0)
1321 return option_len;
1322
1323 mask->enc_opts.len += option_len;
1324 if (key->enc_opts.len != mask->enc_opts.len) {
1325 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1326 return -EINVAL;
1327 }
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001328 break;
Xin Longd8f9dfa2019-11-21 18:03:28 +08001329 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1330 if (key->enc_opts.dst_opt_type) {
1331 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1332 return -EINVAL;
1333 }
1334 option_len = 0;
1335 key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1336 option_len = fl_set_vxlan_opt(nla_opt_key, key,
1337 key_depth, option_len,
1338 extack);
1339 if (option_len < 0)
1340 return option_len;
1341
1342 key->enc_opts.len += option_len;
1343 /* At the same time we need to parse through the mask
1344 * in order to verify exact and mask attribute lengths.
1345 */
1346 mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1347 option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1348 msk_depth, option_len,
1349 extack);
1350 if (option_len < 0)
1351 return option_len;
1352
1353 mask->enc_opts.len += option_len;
1354 if (key->enc_opts.len != mask->enc_opts.len) {
1355 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1356 return -EINVAL;
1357 }
Xin Longd8f9dfa2019-11-21 18:03:28 +08001358 break;
Xin Long79b10112019-11-21 18:03:29 +08001359 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1360 if (key->enc_opts.dst_opt_type) {
1361 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1362 return -EINVAL;
1363 }
1364 option_len = 0;
1365 key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1366 option_len = fl_set_erspan_opt(nla_opt_key, key,
1367 key_depth, option_len,
1368 extack);
1369 if (option_len < 0)
1370 return option_len;
1371
1372 key->enc_opts.len += option_len;
1373 /* At the same time we need to parse through the mask
1374 * in order to verify exact and mask attribute lengths.
1375 */
1376 mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1377 option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1378 msk_depth, option_len,
1379 extack);
1380 if (option_len < 0)
1381 return option_len;
1382
1383 mask->enc_opts.len += option_len;
1384 if (key->enc_opts.len != mask->enc_opts.len) {
1385 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1386 return -EINVAL;
1387 }
Xin Long79b10112019-11-21 18:03:29 +08001388 break;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001389 default:
1390 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1391 return -EINVAL;
1392 }
Cong Wangc96adff2021-01-15 10:50:24 -08001393
1394 if (!msk_depth)
1395 continue;
1396
1397 if (!nla_ok(nla_opt_msk, msk_depth)) {
1398 NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1399 return -EINVAL;
1400 }
1401 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001402 }
1403
1404 return 0;
1405}
1406
wenxu1bcc51a2021-02-09 14:37:49 +08001407static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1408 struct netlink_ext_ack *extack)
1409{
1410 if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1411 NL_SET_ERR_MSG_ATTR(extack, tb,
1412 "no trk, so no other flag can be set");
1413 return -EINVAL;
1414 }
1415
1416 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1417 state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1418 NL_SET_ERR_MSG_ATTR(extack, tb,
1419 "new and est are mutually exclusive");
1420 return -EINVAL;
1421 }
1422
wenxu3aed8b62021-02-23 15:11:55 +08001423 if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1424 state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1425 TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1426 NL_SET_ERR_MSG_ATTR(extack, tb,
1427 "when inv is set, only trk may be set");
1428 return -EINVAL;
1429 }
1430
1431 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1432 state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1433 NL_SET_ERR_MSG_ATTR(extack, tb,
1434 "new and rpl are mutually exclusive");
1435 return -EINVAL;
1436 }
1437
wenxu1bcc51a2021-02-09 14:37:49 +08001438 return 0;
1439}
1440
Paul Blakeye0ace682019-07-09 10:30:50 +03001441static int fl_set_key_ct(struct nlattr **tb,
1442 struct flow_dissector_key_ct *key,
1443 struct flow_dissector_key_ct *mask,
1444 struct netlink_ext_ack *extack)
1445{
1446 if (tb[TCA_FLOWER_KEY_CT_STATE]) {
wenxu1bcc51a2021-02-09 14:37:49 +08001447 int err;
1448
Paul Blakeye0ace682019-07-09 10:30:50 +03001449 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1450 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1451 return -EOPNOTSUPP;
1452 }
1453 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1454 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1455 sizeof(key->ct_state));
wenxu1bcc51a2021-02-09 14:37:49 +08001456
wenxuafa536d2021-03-17 12:02:43 +08001457 err = fl_validate_ct_state(key->ct_state & mask->ct_state,
wenxu1bcc51a2021-02-09 14:37:49 +08001458 tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1459 extack);
1460 if (err)
1461 return err;
1462
Paul Blakeye0ace682019-07-09 10:30:50 +03001463 }
1464 if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1465 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1466 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1467 return -EOPNOTSUPP;
1468 }
1469 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1470 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1471 sizeof(key->ct_zone));
1472 }
1473 if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1474 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1475 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1476 return -EOPNOTSUPP;
1477 }
1478 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1479 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1480 sizeof(key->ct_mark));
1481 }
1482 if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1483 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1484 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1485 return -EOPNOTSUPP;
1486 }
1487 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1488 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1489 sizeof(key->ct_labels));
1490 }
1491
1492 return 0;
1493}
1494
Jiri Pirko77b99002015-05-12 14:56:21 +02001495static int fl_set_key(struct net *net, struct nlattr **tb,
Alexander Aring1057c552018-01-18 11:20:54 -05001496 struct fl_flow_key *key, struct fl_flow_key *mask,
1497 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001498{
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001499 __be16 ethertype;
Or Gerlitzd9724772016-12-22 14:28:15 +02001500 int ret = 0;
Jiri Pirkoa5148622019-06-15 11:03:49 +02001501
Jiri Pirko77b99002015-05-12 14:56:21 +02001502 if (tb[TCA_FLOWER_INDEV]) {
Alexander Aring1057c552018-01-18 11:20:54 -05001503 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001504 if (err < 0)
1505 return err;
Jiri Pirko8212ed72019-06-19 09:41:03 +03001506 key->meta.ingress_ifindex = err;
1507 mask->meta.ingress_ifindex = 0xffffffff;
Jiri Pirko77b99002015-05-12 14:56:21 +02001508 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001509
1510 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1511 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1512 sizeof(key->eth.dst));
1513 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1514 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1515 sizeof(key->eth.src));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001516
Arnd Bergmann0b498a52016-08-26 17:25:45 +02001517 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001518 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1519
Jianbo Liuaaab0832018-07-06 05:38:13 +00001520 if (eth_type_vlan(ethertype)) {
Jianbo Liud64efd02018-07-06 05:38:16 +00001521 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1522 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1523 &mask->vlan);
1524
Jianbo Liu5e9a0fe2018-07-09 02:26:20 +00001525 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1526 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1527 if (eth_type_vlan(ethertype)) {
1528 fl_set_key_vlan(tb, ethertype,
1529 TCA_FLOWER_KEY_CVLAN_ID,
1530 TCA_FLOWER_KEY_CVLAN_PRIO,
1531 &key->cvlan, &mask->cvlan);
1532 fl_set_key_val(tb, &key->basic.n_proto,
1533 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1534 &mask->basic.n_proto,
1535 TCA_FLOWER_UNSPEC,
1536 sizeof(key->basic.n_proto));
1537 } else {
1538 key->basic.n_proto = ethertype;
1539 mask->basic.n_proto = cpu_to_be16(~0);
1540 }
Jianbo Liud64efd02018-07-06 05:38:16 +00001541 }
Arnd Bergmann0b498a52016-08-26 17:25:45 +02001542 } else {
1543 key->basic.n_proto = ethertype;
1544 mask->basic.n_proto = cpu_to_be16(~0);
1545 }
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001546 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001547
Jiri Pirko77b99002015-05-12 14:56:21 +02001548 if (key->basic.n_proto == htons(ETH_P_IP) ||
1549 key->basic.n_proto == htons(ETH_P_IPV6)) {
1550 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1551 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1552 sizeof(key->basic.ip_proto));
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001553 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
Jiri Pirko77b99002015-05-12 14:56:21 +02001554 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001555
1556 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1557 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001558 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001559 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1560 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1561 sizeof(key->ipv4.src));
1562 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1563 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1564 sizeof(key->ipv4.dst));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001565 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1566 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001567 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001568 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1569 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1570 sizeof(key->ipv6.src));
1571 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1572 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1573 sizeof(key->ipv6.dst));
1574 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001575
Jiri Pirko77b99002015-05-12 14:56:21 +02001576 if (key->basic.ip_proto == IPPROTO_TCP) {
1577 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001578 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001579 sizeof(key->tp.src));
1580 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001581 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001582 sizeof(key->tp.dst));
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02001583 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1584 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1585 sizeof(key->tcp.flags));
Jiri Pirko77b99002015-05-12 14:56:21 +02001586 } else if (key->basic.ip_proto == IPPROTO_UDP) {
1587 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001588 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001589 sizeof(key->tp.src));
1590 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001591 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001592 sizeof(key->tp.dst));
Simon Horman5976c5f2016-11-03 13:24:21 +01001593 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
1594 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1595 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1596 sizeof(key->tp.src));
1597 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1598 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1599 sizeof(key->tp.dst));
Simon Horman7b684882016-12-07 13:48:28 +01001600 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
1601 key->basic.ip_proto == IPPROTO_ICMP) {
1602 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1603 &mask->icmp.type,
1604 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1605 sizeof(key->icmp.type));
1606 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1607 &mask->icmp.code,
1608 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1609 sizeof(key->icmp.code));
1610 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1611 key->basic.ip_proto == IPPROTO_ICMPV6) {
1612 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1613 &mask->icmp.type,
1614 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1615 sizeof(key->icmp.type));
Simon Horman040587a2017-01-30 16:19:02 +01001616 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
Simon Horman7b684882016-12-07 13:48:28 +01001617 &mask->icmp.code,
Simon Horman040587a2017-01-30 16:19:02 +01001618 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
Simon Horman7b684882016-12-07 13:48:28 +01001619 sizeof(key->icmp.code));
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001620 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1621 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
Guillaume Nault442f7302020-03-23 21:48:49 +01001622 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -04001623 if (ret)
1624 return ret;
Simon Horman99d31322017-01-11 14:05:43 +01001625 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1626 key->basic.n_proto == htons(ETH_P_RARP)) {
1627 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1628 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1629 sizeof(key->arp.sip));
1630 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1631 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1632 sizeof(key->arp.tip));
1633 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1634 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1635 sizeof(key->arp.op));
1636 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1637 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1638 sizeof(key->arp.sha));
1639 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1640 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1641 sizeof(key->arp.tha));
Jiri Pirko77b99002015-05-12 14:56:21 +02001642 }
1643
Amritha Nambiar5c722992018-11-12 16:15:55 -08001644 if (key->basic.ip_proto == IPPROTO_TCP ||
1645 key->basic.ip_proto == IPPROTO_UDP ||
1646 key->basic.ip_proto == IPPROTO_SCTP) {
Guillaume Naultbd7d4c12020-03-23 21:48:51 +01001647 ret = fl_set_key_port_range(tb, key, mask, extack);
Amritha Nambiar5c722992018-11-12 16:15:55 -08001648 if (ret)
1649 return ret;
1650 }
1651
Amir Vadaibc3103f2016-09-08 16:23:47 +03001652 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1653 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1654 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001655 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +03001656 fl_set_key_val(tb, &key->enc_ipv4.src,
1657 TCA_FLOWER_KEY_ENC_IPV4_SRC,
1658 &mask->enc_ipv4.src,
1659 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1660 sizeof(key->enc_ipv4.src));
1661 fl_set_key_val(tb, &key->enc_ipv4.dst,
1662 TCA_FLOWER_KEY_ENC_IPV4_DST,
1663 &mask->enc_ipv4.dst,
1664 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1665 sizeof(key->enc_ipv4.dst));
1666 }
1667
1668 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1669 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1670 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001671 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +03001672 fl_set_key_val(tb, &key->enc_ipv6.src,
1673 TCA_FLOWER_KEY_ENC_IPV6_SRC,
1674 &mask->enc_ipv6.src,
1675 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1676 sizeof(key->enc_ipv6.src));
1677 fl_set_key_val(tb, &key->enc_ipv6.dst,
1678 TCA_FLOWER_KEY_ENC_IPV6_DST,
1679 &mask->enc_ipv6.dst,
1680 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1681 sizeof(key->enc_ipv6.dst));
1682 }
1683
1684 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +03001685 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
Amir Vadaibc3103f2016-09-08 16:23:47 +03001686 sizeof(key->enc_key_id.keyid));
1687
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001688 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1689 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1690 sizeof(key->enc_tp.src));
1691
1692 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1693 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1694 sizeof(key->enc_tp.dst));
1695
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001696 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1697
Ariel Levkovich5923b8f2020-07-23 01:03:01 +03001698 fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1699 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1700 sizeof(key->hash.hash));
1701
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001702 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1703 ret = fl_set_enc_opt(tb, key, mask, extack);
1704 if (ret)
1705 return ret;
1706 }
1707
Paul Blakeye0ace682019-07-09 10:30:50 +03001708 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1709 if (ret)
1710 return ret;
1711
Or Gerlitzd9724772016-12-22 14:28:15 +02001712 if (tb[TCA_FLOWER_KEY_FLAGS])
Guillaume Naulte304e212020-03-23 21:48:53 +01001713 ret = fl_set_key_flags(tb, &key->control.flags,
1714 &mask->control.flags, extack);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001715
Or Gerlitzd9724772016-12-22 14:28:15 +02001716 return ret;
Jiri Pirko77b99002015-05-12 14:56:21 +02001717}
1718
Paul Blakey05cd2712018-04-30 14:28:30 +03001719static void fl_mask_copy(struct fl_flow_mask *dst,
1720 struct fl_flow_mask *src)
Jiri Pirko77b99002015-05-12 14:56:21 +02001721{
Paul Blakey05cd2712018-04-30 14:28:30 +03001722 const void *psrc = fl_key_get_start(&src->key, src);
1723 void *pdst = fl_key_get_start(&dst->key, src);
Jiri Pirko77b99002015-05-12 14:56:21 +02001724
Paul Blakey05cd2712018-04-30 14:28:30 +03001725 memcpy(pdst, psrc, fl_mask_range(src));
1726 dst->range = src->range;
Jiri Pirko77b99002015-05-12 14:56:21 +02001727}
1728
1729static const struct rhashtable_params fl_ht_params = {
1730 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1731 .head_offset = offsetof(struct cls_fl_filter, ht_node),
1732 .automatic_shrinking = true,
1733};
1734
Paul Blakey05cd2712018-04-30 14:28:30 +03001735static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02001736{
Paul Blakey05cd2712018-04-30 14:28:30 +03001737 mask->filter_ht_params = fl_ht_params;
1738 mask->filter_ht_params.key_len = fl_mask_range(mask);
1739 mask->filter_ht_params.key_offset += mask->range.start;
Jiri Pirko77b99002015-05-12 14:56:21 +02001740
Paul Blakey05cd2712018-04-30 14:28:30 +03001741 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
Jiri Pirko77b99002015-05-12 14:56:21 +02001742}
1743
1744#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
Pankaj Bharadiyac5936422019-12-09 10:31:43 -08001745#define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
Jiri Pirko77b99002015-05-12 14:56:21 +02001746
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001747#define FL_KEY_IS_MASKED(mask, member) \
1748 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1749 0, FL_KEY_MEMBER_SIZE(member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001750
1751#define FL_KEY_SET(keys, cnt, id, member) \
1752 do { \
1753 keys[cnt].key_id = id; \
1754 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1755 cnt++; \
1756 } while(0);
1757
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001758#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001759 do { \
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001760 if (FL_KEY_IS_MASKED(mask, member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001761 FL_KEY_SET(keys, cnt, id, member); \
1762 } while(0);
1763
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001764static void fl_init_dissector(struct flow_dissector *dissector,
1765 struct fl_flow_key *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02001766{
1767 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1768 size_t cnt = 0;
1769
Jiri Pirko8212ed72019-06-19 09:41:03 +03001770 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1771 FLOW_DISSECTOR_KEY_META, meta);
Tom Herbert42aecaa2015-06-04 09:16:39 -07001772 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
Jiri Pirko77b99002015-05-12 14:56:21 +02001773 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001774 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001775 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001776 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001777 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001778 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001779 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
Yoshiki Komachi8ffb0552019-12-03 19:40:12 +09001780 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1781 FLOW_DISSECTOR_KEY_PORTS, tp);
1782 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1783 FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001784 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001785 FLOW_DISSECTOR_KEY_IP, ip);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001786 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02001787 FLOW_DISSECTOR_KEY_TCP, tcp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001788 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Simon Horman7b684882016-12-07 13:48:28 +01001789 FLOW_DISSECTOR_KEY_ICMP, icmp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001790 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Simon Horman99d31322017-01-11 14:05:43 +01001791 FLOW_DISSECTOR_KEY_ARP, arp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001792 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001793 FLOW_DISSECTOR_KEY_MPLS, mpls);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001794 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001795 FLOW_DISSECTOR_KEY_VLAN, vlan);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001796 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Jianbo Liud64efd02018-07-06 05:38:16 +00001797 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001798 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001799 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001800 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001801 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001802 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001803 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001804 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1805 FL_KEY_IS_MASKED(mask, enc_ipv6))
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001806 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1807 enc_control);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001808 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001809 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001810 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001811 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001812 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1813 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
Paul Blakeye0ace682019-07-09 10:30:50 +03001814 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1815 FLOW_DISSECTOR_KEY_CT, ct);
Ariel Levkovich5923b8f2020-07-23 01:03:01 +03001816 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1817 FLOW_DISSECTOR_KEY_HASH, hash);
Jiri Pirko77b99002015-05-12 14:56:21 +02001818
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001819 skb_flow_dissector_init(dissector, keys, cnt);
Paul Blakey05cd2712018-04-30 14:28:30 +03001820}
1821
1822static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1823 struct fl_flow_mask *mask)
1824{
1825 struct fl_flow_mask *newmask;
1826 int err;
1827
1828 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1829 if (!newmask)
1830 return ERR_PTR(-ENOMEM);
1831
1832 fl_mask_copy(newmask, mask);
1833
Yoshiki Komachi8ffb0552019-12-03 19:40:12 +09001834 if ((newmask->key.tp_range.tp_min.dst &&
1835 newmask->key.tp_range.tp_max.dst) ||
1836 (newmask->key.tp_range.tp_min.src &&
1837 newmask->key.tp_range.tp_max.src))
Amritha Nambiar5c722992018-11-12 16:15:55 -08001838 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1839
Paul Blakey05cd2712018-04-30 14:28:30 +03001840 err = fl_init_mask_hashtable(newmask);
1841 if (err)
1842 goto errout_free;
1843
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001844 fl_init_dissector(&newmask->dissector, &newmask->key);
Paul Blakey05cd2712018-04-30 14:28:30 +03001845
1846 INIT_LIST_HEAD_RCU(&newmask->filters);
1847
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001848 refcount_set(&newmask->refcnt, 1);
Vlad Buslov195c2342019-03-21 15:17:38 +02001849 err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1850 &newmask->ht_node, mask_ht_params);
Paul Blakey05cd2712018-04-30 14:28:30 +03001851 if (err)
1852 goto errout_destroy;
1853
Vlad Buslov259e60f2019-03-21 15:17:39 +02001854 spin_lock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +03001855 list_add_tail_rcu(&newmask->list, &head->masks);
Vlad Buslov259e60f2019-03-21 15:17:39 +02001856 spin_unlock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +03001857
1858 return newmask;
1859
1860errout_destroy:
1861 rhashtable_destroy(&newmask->ht);
1862errout_free:
1863 kfree(newmask);
1864
1865 return ERR_PTR(err);
Jiri Pirko77b99002015-05-12 14:56:21 +02001866}
1867
1868static int fl_check_assign_mask(struct cls_fl_head *head,
Paul Blakey05cd2712018-04-30 14:28:30 +03001869 struct cls_fl_filter *fnew,
1870 struct cls_fl_filter *fold,
Jiri Pirko77b99002015-05-12 14:56:21 +02001871 struct fl_flow_mask *mask)
1872{
Paul Blakey05cd2712018-04-30 14:28:30 +03001873 struct fl_flow_mask *newmask;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001874 int ret = 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001875
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001876 rcu_read_lock();
Vlad Buslov195c2342019-03-21 15:17:38 +02001877
1878 /* Insert mask as temporary node to prevent concurrent creation of mask
1879 * with same key. Any concurrent lookups with same key will return
Vlad Buslov99815f52019-06-13 17:54:04 +03001880 * -EAGAIN because mask's refcnt is zero.
Vlad Buslov195c2342019-03-21 15:17:38 +02001881 */
1882 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1883 &mask->ht_node,
1884 mask_ht_params);
Paul Blakey05cd2712018-04-30 14:28:30 +03001885 if (!fnew->mask) {
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001886 rcu_read_unlock();
1887
Vlad Buslov195c2342019-03-21 15:17:38 +02001888 if (fold) {
1889 ret = -EINVAL;
1890 goto errout_cleanup;
1891 }
Paul Blakey05cd2712018-04-30 14:28:30 +03001892
1893 newmask = fl_create_new_mask(head, mask);
Vlad Buslov195c2342019-03-21 15:17:38 +02001894 if (IS_ERR(newmask)) {
1895 ret = PTR_ERR(newmask);
1896 goto errout_cleanup;
1897 }
Paul Blakey05cd2712018-04-30 14:28:30 +03001898
1899 fnew->mask = newmask;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001900 return 0;
Vlad Buslov195c2342019-03-21 15:17:38 +02001901 } else if (IS_ERR(fnew->mask)) {
1902 ret = PTR_ERR(fnew->mask);
Paul Blakeyf6521c52018-06-03 10:06:14 +03001903 } else if (fold && fold->mask != fnew->mask) {
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001904 ret = -EINVAL;
1905 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1906 /* Mask was deleted concurrently, try again */
1907 ret = -EAGAIN;
Jiri Pirko77b99002015-05-12 14:56:21 +02001908 }
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001909 rcu_read_unlock();
1910 return ret;
Vlad Buslov195c2342019-03-21 15:17:38 +02001911
1912errout_cleanup:
1913 rhashtable_remove_fast(&head->ht, &mask->ht_node,
1914 mask_ht_params);
Vlad Buslov195c2342019-03-21 15:17:38 +02001915 return ret;
Jiri Pirko77b99002015-05-12 14:56:21 +02001916}
1917
1918static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1919 struct cls_fl_filter *f, struct fl_flow_mask *mask,
1920 unsigned long base, struct nlattr **tb,
Cong Wang695176b2021-07-29 16:12:14 -07001921 struct nlattr *est,
Baowen Zhengc86e0202021-12-17 19:16:28 +01001922 struct fl_flow_tmplt *tmplt,
1923 u32 flags, u32 fl_flags,
Alexander Aring50a56192018-01-18 11:20:52 -05001924 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001925{
Jiri Pirko77b99002015-05-12 14:56:21 +02001926 int err;
1927
Baowen Zhengc86e0202021-12-17 19:16:28 +01001928 err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags,
1929 fl_flags, extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001930 if (err < 0)
1931 return err;
1932
1933 if (tb[TCA_FLOWER_CLASSID]) {
1934 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
Cong Wang695176b2021-07-29 16:12:14 -07001935 if (flags & TCA_ACT_FLAGS_NO_RTNL)
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001936 rtnl_lock();
Jiri Pirko77b99002015-05-12 14:56:21 +02001937 tcf_bind_filter(tp, &f->res, base);
Cong Wang695176b2021-07-29 16:12:14 -07001938 if (flags & TCA_ACT_FLAGS_NO_RTNL)
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001939 rtnl_unlock();
Jiri Pirko77b99002015-05-12 14:56:21 +02001940 }
1941
Alexander Aring1057c552018-01-18 11:20:54 -05001942 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001943 if (err)
Jiri Pirko45507522017-08-04 14:29:06 +02001944 return err;
Jiri Pirko77b99002015-05-12 14:56:21 +02001945
1946 fl_mask_update_range(mask);
1947 fl_set_masked_key(&f->mkey, &f->key, mask);
1948
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001949 if (!fl_mask_fits_tmplt(tmplt, mask)) {
1950 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1951 return -EINVAL;
1952 }
1953
Jiri Pirko77b99002015-05-12 14:56:21 +02001954 return 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001955}
1956
Vlad Buslov1f17f772019-04-05 20:56:26 +03001957static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1958 struct cls_fl_filter *fold,
1959 bool *in_ht)
1960{
1961 struct fl_flow_mask *mask = fnew->mask;
1962 int err;
1963
Vlad Buslov9e355522019-04-11 19:12:20 +03001964 err = rhashtable_lookup_insert_fast(&mask->ht,
1965 &fnew->ht_node,
1966 mask->filter_ht_params);
Vlad Buslov1f17f772019-04-05 20:56:26 +03001967 if (err) {
1968 *in_ht = false;
1969 /* It is okay if filter with same key exists when
1970 * overwriting.
1971 */
1972 return fold && err == -EEXIST ? 0 : err;
1973 }
1974
1975 *in_ht = true;
1976 return 0;
1977}
1978
Jiri Pirko77b99002015-05-12 14:56:21 +02001979static int fl_change(struct net *net, struct sk_buff *in_skb,
1980 struct tcf_proto *tp, unsigned long base,
1981 u32 handle, struct nlattr **tca,
Cong Wang695176b2021-07-29 16:12:14 -07001982 void **arg, u32 flags,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001983 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001984{
Vlad Buslove4746192019-03-21 15:17:33 +02001985 struct cls_fl_head *head = fl_head_dereference(tp);
Cong Wang695176b2021-07-29 16:12:14 -07001986 bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
WANG Cong8113c092017-08-04 21:31:43 -07001987 struct cls_fl_filter *fold = *arg;
Jiri Pirko77b99002015-05-12 14:56:21 +02001988 struct cls_fl_filter *fnew;
Ivan Vecera2cddd202019-01-16 16:53:52 +01001989 struct fl_flow_mask *mask;
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001990 struct nlattr **tb;
Vlad Buslov1f17f772019-04-05 20:56:26 +03001991 bool in_ht;
Jiri Pirko77b99002015-05-12 14:56:21 +02001992 int err;
1993
Vlad Buslov06177552019-03-21 15:17:35 +02001994 if (!tca[TCA_OPTIONS]) {
1995 err = -EINVAL;
1996 goto errout_fold;
1997 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001998
Ivan Vecera2cddd202019-01-16 16:53:52 +01001999 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
Vlad Buslov06177552019-03-21 15:17:35 +02002000 if (!mask) {
2001 err = -ENOBUFS;
2002 goto errout_fold;
2003 }
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01002004
Ivan Vecera2cddd202019-01-16 16:53:52 +01002005 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2006 if (!tb) {
2007 err = -ENOBUFS;
2008 goto errout_mask_alloc;
2009 }
2010
Johannes Berg8cb08172019-04-26 14:07:28 +02002011 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2012 tca[TCA_OPTIONS], fl_policy, NULL);
Jiri Pirko77b99002015-05-12 14:56:21 +02002013 if (err < 0)
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01002014 goto errout_tb;
Jiri Pirko77b99002015-05-12 14:56:21 +02002015
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01002016 if (fold && handle && fold->handle != handle) {
2017 err = -EINVAL;
2018 goto errout_tb;
2019 }
Jiri Pirko77b99002015-05-12 14:56:21 +02002020
2021 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01002022 if (!fnew) {
2023 err = -ENOBUFS;
2024 goto errout_tb;
2025 }
Vlad Buslovc049d562019-04-24 09:53:31 +03002026 INIT_LIST_HEAD(&fnew->hw_list);
Vlad Buslov06177552019-03-21 15:17:35 +02002027 refcount_set(&fnew->refcnt, 1);
Jiri Pirko77b99002015-05-12 14:56:21 +02002028
Cong Wang14215102019-02-20 21:37:42 -08002029 err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
WANG Congb9a24bb2016-08-19 12:36:54 -07002030 if (err < 0)
2031 goto errout;
Jiri Pirko77b99002015-05-12 14:56:21 +02002032
Vlad Buslovecb3dea2019-03-06 16:22:12 +02002033 if (tb[TCA_FLOWER_FLAGS]) {
2034 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2035
2036 if (!tc_flags_valid(fnew->flags)) {
2037 err = -EINVAL;
2038 goto errout;
2039 }
2040 }
2041
Cong Wang695176b2021-07-29 16:12:14 -07002042 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
Baowen Zhengc86e0202021-12-17 19:16:28 +01002043 tp->chain->tmplt_priv, flags, fnew->flags,
2044 extack);
Vlad Buslovecb3dea2019-03-06 16:22:12 +02002045 if (err)
2046 goto errout;
2047
2048 err = fl_check_assign_mask(head, fnew, fold, mask);
2049 if (err)
2050 goto errout;
2051
Vlad Buslov1f17f772019-04-05 20:56:26 +03002052 err = fl_ht_insert_unique(fnew, fold, &in_ht);
2053 if (err)
2054 goto errout_mask;
2055
Hadar Hen Zion79685212016-12-01 14:06:34 +02002056 if (!tc_skip_hw(fnew->flags)) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +02002057 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
Hadar Hen Zion79685212016-12-01 14:06:34 +02002058 if (err)
Vlad Buslov1f17f772019-04-05 20:56:26 +03002059 goto errout_ht;
Hadar Hen Zion79685212016-12-01 14:06:34 +02002060 }
Amir Vadai5b33f482016-03-08 12:42:29 +02002061
Or Gerlitz55593962017-02-16 10:31:13 +02002062 if (!tc_in_hw(fnew->flags))
2063 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2064
Vlad Buslov3d81e712019-03-21 15:17:42 +02002065 spin_lock(&tp->lock);
2066
Vlad Buslov272ffaa2019-03-21 15:17:41 +02002067 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2068 * proto again or create new one, if necessary.
2069 */
2070 if (tp->deleting) {
2071 err = -EAGAIN;
2072 goto errout_hw;
2073 }
2074
Amir Vadai5b33f482016-03-08 12:42:29 +02002075 if (fold) {
Vlad Buslovb2552b82019-03-21 15:17:36 +02002076 /* Fold filter was deleted concurrently. Retry lookup. */
2077 if (fold->deleted) {
2078 err = -EAGAIN;
2079 goto errout_hw;
2080 }
2081
Vlad Buslov620da482019-03-21 15:17:34 +02002082 fnew->handle = handle;
2083
Vlad Buslov1f17f772019-04-05 20:56:26 +03002084 if (!in_ht) {
2085 struct rhashtable_params params =
2086 fnew->mask->filter_ht_params;
2087
2088 err = rhashtable_insert_fast(&fnew->mask->ht,
2089 &fnew->ht_node,
2090 params);
2091 if (err)
2092 goto errout_hw;
2093 in_ht = true;
2094 }
Vlad Buslov620da482019-03-21 15:17:34 +02002095
Vlad Buslovc049d562019-04-24 09:53:31 +03002096 refcount_inc(&fnew->refcnt);
Roi Dayan599d2572018-12-19 18:07:56 +02002097 rhashtable_remove_fast(&fold->mask->ht,
2098 &fold->ht_node,
2099 fold->mask->filter_ht_params);
Matthew Wilcox234a4622017-11-28 09:56:36 -05002100 idr_replace(&head->handle_idr, fnew, fnew->handle);
Daniel Borkmannff3532f2015-07-17 22:38:44 +02002101 list_replace_rcu(&fold->list, &fnew->list);
Vlad Buslovb2552b82019-03-21 15:17:36 +02002102 fold->deleted = true;
Vlad Buslov620da482019-03-21 15:17:34 +02002103
Vlad Buslov3d81e712019-03-21 15:17:42 +02002104 spin_unlock(&tp->lock);
2105
Vlad Buslov99946772019-04-12 00:54:19 +03002106 fl_mask_put(head, fold->mask);
Vlad Buslov620da482019-03-21 15:17:34 +02002107 if (!tc_skip_hw(fold->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +02002108 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
Jiri Pirko77b99002015-05-12 14:56:21 +02002109 tcf_unbind_filter(tp, &fold->res);
Vlad Buslov06177552019-03-21 15:17:35 +02002110 /* Caller holds reference to fold, so refcnt is always > 0
2111 * after this.
2112 */
2113 refcount_dec(&fold->refcnt);
2114 __fl_put(fold);
Jiri Pirko77b99002015-05-12 14:56:21 +02002115 } else {
Vlad Buslov620da482019-03-21 15:17:34 +02002116 if (handle) {
2117 /* user specifies a handle and it doesn't exist */
2118 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2119 handle, GFP_ATOMIC);
Vlad Buslov9a2d9382019-03-21 15:17:40 +02002120
2121 /* Filter with specified handle was concurrently
2122 * inserted after initial check in cls_api. This is not
2123 * necessarily an error if NLM_F_EXCL is not set in
2124 * message flags. Returning EAGAIN will cause cls_api to
2125 * try to update concurrently inserted rule.
2126 */
2127 if (err == -ENOSPC)
2128 err = -EAGAIN;
Vlad Buslov620da482019-03-21 15:17:34 +02002129 } else {
2130 handle = 1;
2131 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2132 INT_MAX, GFP_ATOMIC);
2133 }
2134 if (err)
2135 goto errout_hw;
2136
Vlad Buslovc049d562019-04-24 09:53:31 +03002137 refcount_inc(&fnew->refcnt);
Vlad Buslov620da482019-03-21 15:17:34 +02002138 fnew->handle = handle;
Paul Blakey05cd2712018-04-30 14:28:30 +03002139 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
Vlad Buslov3d81e712019-03-21 15:17:42 +02002140 spin_unlock(&tp->lock);
Jiri Pirko77b99002015-05-12 14:56:21 +02002141 }
2142
Vlad Buslov620da482019-03-21 15:17:34 +02002143 *arg = fnew;
2144
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01002145 kfree(tb);
Vlad Buslov99815f52019-06-13 17:54:04 +03002146 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
Jiri Pirko77b99002015-05-12 14:56:21 +02002147 return 0;
2148
Vlad Buslovc049d562019-04-24 09:53:31 +03002149errout_ht:
2150 spin_lock(&tp->lock);
Vlad Buslov620da482019-03-21 15:17:34 +02002151errout_hw:
Vlad Buslovc049d562019-04-24 09:53:31 +03002152 fnew->deleted = true;
Vlad Buslov3d81e712019-03-21 15:17:42 +02002153 spin_unlock(&tp->lock);
Vlad Buslov620da482019-03-21 15:17:34 +02002154 if (!tc_skip_hw(fnew->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +02002155 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
Vlad Buslov1f17f772019-04-05 20:56:26 +03002156 if (in_ht)
2157 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2158 fnew->mask->filter_ht_params);
Vlad Buslovecb3dea2019-03-06 16:22:12 +02002159errout_mask:
Vlad Buslov99946772019-04-12 00:54:19 +03002160 fl_mask_put(head, fnew->mask);
Jiri Pirko77b99002015-05-12 14:56:21 +02002161errout:
Vlad Buslovc049d562019-04-24 09:53:31 +03002162 __fl_put(fnew);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01002163errout_tb:
2164 kfree(tb);
Ivan Vecera2cddd202019-01-16 16:53:52 +01002165errout_mask_alloc:
Vlad Buslov99815f52019-06-13 17:54:04 +03002166 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
Vlad Buslov06177552019-03-21 15:17:35 +02002167errout_fold:
2168 if (fold)
2169 __fl_put(fold);
Jiri Pirko77b99002015-05-12 14:56:21 +02002170 return err;
2171}
2172
Alexander Aring571acf22018-01-18 11:20:53 -05002173static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002174 bool rtnl_held, struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02002175{
Vlad Buslove4746192019-03-21 15:17:33 +02002176 struct cls_fl_head *head = fl_head_dereference(tp);
WANG Cong8113c092017-08-04 21:31:43 -07002177 struct cls_fl_filter *f = arg;
Vlad Buslovb2552b82019-03-21 15:17:36 +02002178 bool last_on_mask;
2179 int err = 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02002180
Vlad Buslovc24e43d82019-03-21 15:17:43 +02002181 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
Paul Blakey05cd2712018-04-30 14:28:30 +03002182 *last = list_empty(&head->masks);
Vlad Buslov06177552019-03-21 15:17:35 +02002183 __fl_put(f);
2184
Vlad Buslovb2552b82019-03-21 15:17:36 +02002185 return err;
Jiri Pirko77b99002015-05-12 14:56:21 +02002186}
2187
Vlad Buslov12db03b2019-02-11 10:55:45 +02002188static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2189 bool rtnl_held)
Jiri Pirko77b99002015-05-12 14:56:21 +02002190{
Cong Wangd39d7142019-06-28 11:03:42 -07002191 struct cls_fl_head *head = fl_head_dereference(tp);
2192 unsigned long id = arg->cookie, tmp;
Jiri Pirko77b99002015-05-12 14:56:21 +02002193 struct cls_fl_filter *f;
2194
Vlad Buslov01683a12018-07-09 13:29:11 +03002195 arg->count = arg->skip;
2196
Vlad Buslovd5ef1902021-09-29 18:08:49 +03002197 rcu_read_lock();
Cong Wangd39d7142019-06-28 11:03:42 -07002198 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2199 /* don't return filters that are being deleted */
2200 if (!refcount_inc_not_zero(&f->refcnt))
2201 continue;
Vlad Buslovd5ef1902021-09-29 18:08:49 +03002202 rcu_read_unlock();
2203
Vlad Buslov01683a12018-07-09 13:29:11 +03002204 if (arg->fn(tp, f, arg) < 0) {
Vlad Buslov06177552019-03-21 15:17:35 +02002205 __fl_put(f);
Vlad Buslov01683a12018-07-09 13:29:11 +03002206 arg->stop = 1;
Vlad Buslovd5ef1902021-09-29 18:08:49 +03002207 rcu_read_lock();
Vlad Buslov01683a12018-07-09 13:29:11 +03002208 break;
Paul Blakey05cd2712018-04-30 14:28:30 +03002209 }
Vlad Buslov06177552019-03-21 15:17:35 +02002210 __fl_put(f);
Vlad Buslov01683a12018-07-09 13:29:11 +03002211 arg->count++;
Vlad Buslovd5ef1902021-09-29 18:08:49 +03002212 rcu_read_lock();
Jiri Pirko77b99002015-05-12 14:56:21 +02002213 }
Vlad Buslovd5ef1902021-09-29 18:08:49 +03002214 rcu_read_unlock();
Cong Wangd39d7142019-06-28 11:03:42 -07002215 arg->cookie = id;
Jiri Pirko77b99002015-05-12 14:56:21 +02002216}
2217
Vlad Buslovc049d562019-04-24 09:53:31 +03002218static struct cls_fl_filter *
2219fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2220{
2221 struct cls_fl_head *head = fl_head_dereference(tp);
2222
2223 spin_lock(&tp->lock);
2224 if (list_empty(&head->hw_filters)) {
2225 spin_unlock(&tp->lock);
2226 return NULL;
2227 }
2228
2229 if (!f)
2230 f = list_entry(&head->hw_filters, struct cls_fl_filter,
2231 hw_list);
2232 list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2233 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2234 spin_unlock(&tp->lock);
2235 return f;
2236 }
2237 }
2238
2239 spin_unlock(&tp->lock);
2240 return NULL;
2241}
2242
Pablo Neira Ayusoa7323312019-07-19 18:20:15 +02002243static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
John Hurley31533cb2018-06-25 14:30:06 -07002244 void *cb_priv, struct netlink_ext_ack *extack)
2245{
John Hurley31533cb2018-06-25 14:30:06 -07002246 struct tcf_block *block = tp->chain->block;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02002247 struct flow_cls_offload cls_flower = {};
Vlad Buslovc049d562019-04-24 09:53:31 +03002248 struct cls_fl_filter *f = NULL;
John Hurley31533cb2018-06-25 14:30:06 -07002249 int err;
2250
Vlad Buslovc049d562019-04-24 09:53:31 +03002251 /* hw_filters list can only be changed by hw offload functions after
2252 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2253 * iterating it.
2254 */
2255 ASSERT_RTNL();
John Hurley31533cb2018-06-25 14:30:06 -07002256
Vlad Buslovc049d562019-04-24 09:53:31 +03002257 while ((f = fl_get_next_hw_filter(tp, f, add))) {
John Hurley95e27a42019-04-02 23:53:20 +01002258 cls_flower.rule =
2259 flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2260 if (!cls_flower.rule) {
2261 __fl_put(f);
2262 return -ENOMEM;
John Hurley31533cb2018-06-25 14:30:06 -07002263 }
John Hurley95e27a42019-04-02 23:53:20 +01002264
2265 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -07002266 extack);
John Hurley95e27a42019-04-02 23:53:20 +01002267 cls_flower.command = add ?
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02002268 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
John Hurley95e27a42019-04-02 23:53:20 +01002269 cls_flower.cookie = (unsigned long)f;
2270 cls_flower.rule->match.dissector = &f->mask->dissector;
2271 cls_flower.rule->match.mask = &f->mask->key;
2272 cls_flower.rule->match.key = &f->mkey;
2273
Baowen Zheng9c1c0e12021-12-17 19:16:20 +01002274 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts);
John Hurley95e27a42019-04-02 23:53:20 +01002275 if (err) {
2276 kfree(cls_flower.rule);
2277 if (tc_skip_sw(f->flags)) {
2278 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
2279 __fl_put(f);
2280 return err;
2281 }
2282 goto next_flow;
2283 }
2284
2285 cls_flower.classid = f->res.classid;
2286
Vlad Buslov40119212019-08-26 16:44:59 +03002287 err = tc_setup_cb_reoffload(block, tp, add, cb,
2288 TC_SETUP_CLSFLOWER, &cls_flower,
2289 cb_priv, &f->flags,
2290 &f->in_hw_count);
Baowen Zheng9c1c0e12021-12-17 19:16:20 +01002291 tc_cleanup_offload_action(&cls_flower.rule->action);
John Hurley95e27a42019-04-02 23:53:20 +01002292 kfree(cls_flower.rule);
2293
2294 if (err) {
Vlad Buslov40119212019-08-26 16:44:59 +03002295 __fl_put(f);
2296 return err;
John Hurley95e27a42019-04-02 23:53:20 +01002297 }
John Hurley95e27a42019-04-02 23:53:20 +01002298next_flow:
John Hurley95e27a42019-04-02 23:53:20 +01002299 __fl_put(f);
John Hurley31533cb2018-06-25 14:30:06 -07002300 }
2301
2302 return 0;
2303}
2304
Vlad Buslova449a3e2019-08-26 16:45:00 +03002305static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2306{
2307 struct flow_cls_offload *cls_flower = type_data;
2308 struct cls_fl_filter *f =
2309 (struct cls_fl_filter *) cls_flower->cookie;
2310 struct cls_fl_head *head = fl_head_dereference(tp);
2311
2312 spin_lock(&tp->lock);
2313 list_add(&f->hw_list, &head->hw_filters);
2314 spin_unlock(&tp->lock);
2315}
2316
2317static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2318{
2319 struct flow_cls_offload *cls_flower = type_data;
2320 struct cls_fl_filter *f =
2321 (struct cls_fl_filter *) cls_flower->cookie;
2322
2323 spin_lock(&tp->lock);
2324 if (!list_empty(&f->hw_list))
2325 list_del_init(&f->hw_list);
2326 spin_unlock(&tp->lock);
2327}
2328
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002329static int fl_hw_create_tmplt(struct tcf_chain *chain,
2330 struct fl_flow_tmplt *tmplt)
Jiri Pirko34738452018-07-23 09:23:11 +02002331{
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02002332 struct flow_cls_offload cls_flower = {};
Jiri Pirko34738452018-07-23 09:23:11 +02002333 struct tcf_block *block = chain->block;
Jiri Pirko34738452018-07-23 09:23:11 +02002334
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +01002335 cls_flower.rule = flow_rule_alloc(0);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002336 if (!cls_flower.rule)
2337 return -ENOMEM;
2338
Jiri Pirko34738452018-07-23 09:23:11 +02002339 cls_flower.common.chain_index = chain->index;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02002340 cls_flower.command = FLOW_CLS_TMPLT_CREATE;
Jiri Pirko34738452018-07-23 09:23:11 +02002341 cls_flower.cookie = (unsigned long) tmplt;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002342 cls_flower.rule->match.dissector = &tmplt->dissector;
2343 cls_flower.rule->match.mask = &tmplt->mask;
2344 cls_flower.rule->match.key = &tmplt->dummy_key;
Jiri Pirko34738452018-07-23 09:23:11 +02002345
2346 /* We don't care if driver (any of them) fails to handle this
2347 * call. It serves just as a hint for it.
2348 */
Vlad Buslov40119212019-08-26 16:44:59 +03002349 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002350 kfree(cls_flower.rule);
2351
2352 return 0;
Jiri Pirko34738452018-07-23 09:23:11 +02002353}
2354
2355static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2356 struct fl_flow_tmplt *tmplt)
2357{
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02002358 struct flow_cls_offload cls_flower = {};
Jiri Pirko34738452018-07-23 09:23:11 +02002359 struct tcf_block *block = chain->block;
2360
2361 cls_flower.common.chain_index = chain->index;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02002362 cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
Jiri Pirko34738452018-07-23 09:23:11 +02002363 cls_flower.cookie = (unsigned long) tmplt;
2364
Vlad Buslov40119212019-08-26 16:44:59 +03002365 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
Jiri Pirko34738452018-07-23 09:23:11 +02002366}
2367
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002368static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2369 struct nlattr **tca,
2370 struct netlink_ext_ack *extack)
2371{
2372 struct fl_flow_tmplt *tmplt;
2373 struct nlattr **tb;
2374 int err;
2375
2376 if (!tca[TCA_OPTIONS])
2377 return ERR_PTR(-EINVAL);
2378
2379 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2380 if (!tb)
2381 return ERR_PTR(-ENOBUFS);
Johannes Berg8cb08172019-04-26 14:07:28 +02002382 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2383 tca[TCA_OPTIONS], fl_policy, NULL);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002384 if (err)
2385 goto errout_tb;
2386
2387 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
Dan Carpenter1cbc36a52018-08-03 22:27:55 +03002388 if (!tmplt) {
2389 err = -ENOMEM;
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002390 goto errout_tb;
Dan Carpenter1cbc36a52018-08-03 22:27:55 +03002391 }
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002392 tmplt->chain = chain;
2393 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2394 if (err)
2395 goto errout_tmplt;
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002396
2397 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2398
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002399 err = fl_hw_create_tmplt(chain, tmplt);
2400 if (err)
2401 goto errout_tmplt;
Jiri Pirko34738452018-07-23 09:23:11 +02002402
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002403 kfree(tb);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002404 return tmplt;
2405
2406errout_tmplt:
2407 kfree(tmplt);
2408errout_tb:
2409 kfree(tb);
2410 return ERR_PTR(err);
2411}
2412
2413static void fl_tmplt_destroy(void *tmplt_priv)
2414{
2415 struct fl_flow_tmplt *tmplt = tmplt_priv;
2416
Cong Wang95278dd2018-10-02 12:50:19 -07002417 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2418 kfree(tmplt);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002419}
2420
Jiri Pirko77b99002015-05-12 14:56:21 +02002421static int fl_dump_key_val(struct sk_buff *skb,
2422 void *val, int val_type,
2423 void *mask, int mask_type, int len)
2424{
2425 int err;
2426
2427 if (!memchr_inv(mask, 0, len))
2428 return 0;
2429 err = nla_put(skb, val_type, len, val);
2430 if (err)
2431 return err;
2432 if (mask_type != TCA_FLOWER_UNSPEC) {
2433 err = nla_put(skb, mask_type, len, mask);
2434 if (err)
2435 return err;
2436 }
2437 return 0;
2438}
2439
Amritha Nambiar5c722992018-11-12 16:15:55 -08002440static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2441 struct fl_flow_key *mask)
2442{
Yoshiki Komachi8ffb0552019-12-03 19:40:12 +09002443 if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2444 TCA_FLOWER_KEY_PORT_DST_MIN,
2445 &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2446 sizeof(key->tp_range.tp_min.dst)) ||
2447 fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2448 TCA_FLOWER_KEY_PORT_DST_MAX,
2449 &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2450 sizeof(key->tp_range.tp_max.dst)) ||
2451 fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2452 TCA_FLOWER_KEY_PORT_SRC_MIN,
2453 &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2454 sizeof(key->tp_range.tp_min.src)) ||
2455 fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2456 TCA_FLOWER_KEY_PORT_SRC_MAX,
2457 &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2458 sizeof(key->tp_range.tp_max.src)))
Amritha Nambiar5c722992018-11-12 16:15:55 -08002459 return -1;
2460
2461 return 0;
2462}
2463
Guillaume Nault61aec252020-05-26 14:29:04 +02002464static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2465 struct flow_dissector_key_mpls *mpls_key,
2466 struct flow_dissector_key_mpls *mpls_mask,
2467 u8 lse_index)
2468{
2469 struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2470 struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2471 int err;
2472
2473 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2474 lse_index + 1);
2475 if (err)
2476 return err;
2477
2478 if (lse_mask->mpls_ttl) {
2479 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2480 lse_key->mpls_ttl);
2481 if (err)
2482 return err;
2483 }
2484 if (lse_mask->mpls_bos) {
2485 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2486 lse_key->mpls_bos);
2487 if (err)
2488 return err;
2489 }
2490 if (lse_mask->mpls_tc) {
2491 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2492 lse_key->mpls_tc);
2493 if (err)
2494 return err;
2495 }
2496 if (lse_mask->mpls_label) {
Guillaume Nault7fdd3752020-12-09 16:48:41 +01002497 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2498 lse_key->mpls_label);
Guillaume Nault61aec252020-05-26 14:29:04 +02002499 if (err)
2500 return err;
2501 }
2502
2503 return 0;
2504}
2505
2506static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2507 struct flow_dissector_key_mpls *mpls_key,
2508 struct flow_dissector_key_mpls *mpls_mask)
2509{
2510 struct nlattr *opts;
2511 struct nlattr *lse;
2512 u8 lse_index;
2513 int err;
2514
2515 opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2516 if (!opts)
2517 return -EMSGSIZE;
2518
2519 for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2520 if (!(mpls_mask->used_lses & 1 << lse_index))
2521 continue;
2522
2523 lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2524 if (!lse) {
2525 err = -EMSGSIZE;
2526 goto err_opts;
2527 }
2528
2529 err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2530 lse_index);
2531 if (err)
2532 goto err_opts_lse;
2533 nla_nest_end(skb, lse);
2534 }
2535 nla_nest_end(skb, opts);
2536
2537 return 0;
2538
2539err_opts_lse:
2540 nla_nest_cancel(skb, lse);
2541err_opts:
2542 nla_nest_cancel(skb, opts);
2543
2544 return err;
2545}
2546
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002547static int fl_dump_key_mpls(struct sk_buff *skb,
2548 struct flow_dissector_key_mpls *mpls_key,
2549 struct flow_dissector_key_mpls *mpls_mask)
2550{
Guillaume Nault58cff782020-05-26 14:29:00 +02002551 struct flow_dissector_mpls_lse *lse_mask;
2552 struct flow_dissector_mpls_lse *lse_key;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002553 int err;
2554
Guillaume Nault61aec252020-05-26 14:29:04 +02002555 if (!mpls_mask->used_lses)
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002556 return 0;
Guillaume Nault58cff782020-05-26 14:29:00 +02002557
2558 lse_mask = &mpls_mask->ls[0];
2559 lse_key = &mpls_key->ls[0];
2560
Guillaume Nault61aec252020-05-26 14:29:04 +02002561 /* For backward compatibility, don't use the MPLS nested attributes if
2562 * the rule can be expressed using the old attributes.
2563 */
2564 if (mpls_mask->used_lses & ~1 ||
2565 (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2566 !lse_mask->mpls_tc && !lse_mask->mpls_label))
2567 return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2568
Guillaume Nault58cff782020-05-26 14:29:00 +02002569 if (lse_mask->mpls_ttl) {
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002570 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
Guillaume Nault58cff782020-05-26 14:29:00 +02002571 lse_key->mpls_ttl);
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002572 if (err)
2573 return err;
2574 }
Guillaume Nault58cff782020-05-26 14:29:00 +02002575 if (lse_mask->mpls_tc) {
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002576 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
Guillaume Nault58cff782020-05-26 14:29:00 +02002577 lse_key->mpls_tc);
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002578 if (err)
2579 return err;
2580 }
Guillaume Nault58cff782020-05-26 14:29:00 +02002581 if (lse_mask->mpls_label) {
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002582 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
Guillaume Nault58cff782020-05-26 14:29:00 +02002583 lse_key->mpls_label);
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002584 if (err)
2585 return err;
2586 }
Guillaume Nault58cff782020-05-26 14:29:00 +02002587 if (lse_mask->mpls_bos) {
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002588 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
Guillaume Nault58cff782020-05-26 14:29:00 +02002589 lse_key->mpls_bos);
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002590 if (err)
2591 return err;
2592 }
2593 return 0;
2594}
2595
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002596static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002597 struct flow_dissector_key_ip *key,
2598 struct flow_dissector_key_ip *mask)
2599{
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002600 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2601 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2602 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2603 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2604
2605 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2606 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002607 return -1;
2608
2609 return 0;
2610}
2611
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002612static int fl_dump_key_vlan(struct sk_buff *skb,
Jianbo Liud64efd02018-07-06 05:38:16 +00002613 int vlan_id_key, int vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002614 struct flow_dissector_key_vlan *vlan_key,
2615 struct flow_dissector_key_vlan *vlan_mask)
2616{
2617 int err;
2618
2619 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2620 return 0;
2621 if (vlan_mask->vlan_id) {
Jianbo Liud64efd02018-07-06 05:38:16 +00002622 err = nla_put_u16(skb, vlan_id_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002623 vlan_key->vlan_id);
2624 if (err)
2625 return err;
2626 }
2627 if (vlan_mask->vlan_priority) {
Jianbo Liud64efd02018-07-06 05:38:16 +00002628 err = nla_put_u8(skb, vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002629 vlan_key->vlan_priority);
2630 if (err)
2631 return err;
2632 }
2633 return 0;
2634}
2635
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002636static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2637 u32 *flower_key, u32 *flower_mask,
2638 u32 flower_flag_bit, u32 dissector_flag_bit)
2639{
2640 if (dissector_mask & dissector_flag_bit) {
2641 *flower_mask |= flower_flag_bit;
2642 if (dissector_key & dissector_flag_bit)
2643 *flower_key |= flower_flag_bit;
2644 }
2645}
2646
2647static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2648{
2649 u32 key, mask;
2650 __be32 _key, _mask;
2651 int err;
2652
2653 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2654 return 0;
2655
2656 key = 0;
2657 mask = 0;
2658
2659 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2660 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Pieter Jansen van Vuuren459d1532018-03-06 18:11:14 +01002661 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2662 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2663 FLOW_DIS_FIRST_FRAG);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002664
2665 _key = cpu_to_be32(key);
2666 _mask = cpu_to_be32(mask);
2667
2668 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2669 if (err)
2670 return err;
2671
2672 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2673}
2674
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002675static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2676 struct flow_dissector_key_enc_opts *enc_opts)
2677{
2678 struct geneve_opt *opt;
2679 struct nlattr *nest;
2680 int opt_off = 0;
2681
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002682 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002683 if (!nest)
2684 goto nla_put_failure;
2685
2686 while (enc_opts->len > opt_off) {
2687 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2688
2689 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2690 opt->opt_class))
2691 goto nla_put_failure;
2692 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2693 opt->type))
2694 goto nla_put_failure;
2695 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2696 opt->length * 4, opt->opt_data))
2697 goto nla_put_failure;
2698
2699 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2700 }
2701 nla_nest_end(skb, nest);
2702 return 0;
2703
2704nla_put_failure:
2705 nla_nest_cancel(skb, nest);
2706 return -EMSGSIZE;
2707}
2708
Xin Longd8f9dfa2019-11-21 18:03:28 +08002709static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2710 struct flow_dissector_key_enc_opts *enc_opts)
2711{
2712 struct vxlan_metadata *md;
2713 struct nlattr *nest;
2714
2715 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2716 if (!nest)
2717 goto nla_put_failure;
2718
2719 md = (struct vxlan_metadata *)&enc_opts->data[0];
2720 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2721 goto nla_put_failure;
2722
2723 nla_nest_end(skb, nest);
2724 return 0;
2725
2726nla_put_failure:
2727 nla_nest_cancel(skb, nest);
2728 return -EMSGSIZE;
2729}
2730
Xin Long79b10112019-11-21 18:03:29 +08002731static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2732 struct flow_dissector_key_enc_opts *enc_opts)
2733{
2734 struct erspan_metadata *md;
2735 struct nlattr *nest;
2736
2737 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2738 if (!nest)
2739 goto nla_put_failure;
2740
2741 md = (struct erspan_metadata *)&enc_opts->data[0];
2742 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2743 goto nla_put_failure;
2744
2745 if (md->version == 1 &&
2746 nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2747 goto nla_put_failure;
2748
2749 if (md->version == 2 &&
2750 (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2751 md->u.md2.dir) ||
2752 nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2753 get_hwid(&md->u.md2))))
2754 goto nla_put_failure;
2755
2756 nla_nest_end(skb, nest);
2757 return 0;
2758
2759nla_put_failure:
2760 nla_nest_cancel(skb, nest);
2761 return -EMSGSIZE;
2762}
2763
Paul Blakeye0ace682019-07-09 10:30:50 +03002764static int fl_dump_key_ct(struct sk_buff *skb,
2765 struct flow_dissector_key_ct *key,
2766 struct flow_dissector_key_ct *mask)
2767{
2768 if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2769 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2770 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2771 sizeof(key->ct_state)))
2772 goto nla_put_failure;
2773
2774 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2775 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2776 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2777 sizeof(key->ct_zone)))
2778 goto nla_put_failure;
2779
2780 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2781 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2782 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2783 sizeof(key->ct_mark)))
2784 goto nla_put_failure;
2785
2786 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2787 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2788 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2789 sizeof(key->ct_labels)))
2790 goto nla_put_failure;
2791
2792 return 0;
2793
2794nla_put_failure:
2795 return -EMSGSIZE;
2796}
2797
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002798static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2799 struct flow_dissector_key_enc_opts *enc_opts)
2800{
2801 struct nlattr *nest;
2802 int err;
2803
2804 if (!enc_opts->len)
2805 return 0;
2806
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002807 nest = nla_nest_start_noflag(skb, enc_opt_type);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002808 if (!nest)
2809 goto nla_put_failure;
2810
2811 switch (enc_opts->dst_opt_type) {
2812 case TUNNEL_GENEVE_OPT:
2813 err = fl_dump_key_geneve_opt(skb, enc_opts);
2814 if (err)
2815 goto nla_put_failure;
2816 break;
Xin Longd8f9dfa2019-11-21 18:03:28 +08002817 case TUNNEL_VXLAN_OPT:
2818 err = fl_dump_key_vxlan_opt(skb, enc_opts);
2819 if (err)
2820 goto nla_put_failure;
2821 break;
Xin Long79b10112019-11-21 18:03:29 +08002822 case TUNNEL_ERSPAN_OPT:
2823 err = fl_dump_key_erspan_opt(skb, enc_opts);
2824 if (err)
2825 goto nla_put_failure;
2826 break;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002827 default:
2828 goto nla_put_failure;
2829 }
2830 nla_nest_end(skb, nest);
2831 return 0;
2832
2833nla_put_failure:
2834 nla_nest_cancel(skb, nest);
2835 return -EMSGSIZE;
2836}
2837
2838static int fl_dump_key_enc_opt(struct sk_buff *skb,
2839 struct flow_dissector_key_enc_opts *key_opts,
2840 struct flow_dissector_key_enc_opts *msk_opts)
2841{
2842 int err;
2843
2844 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2845 if (err)
2846 return err;
2847
2848 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2849}
2850
Jiri Pirkof5749082018-07-23 09:23:08 +02002851static int fl_dump_key(struct sk_buff *skb, struct net *net,
2852 struct fl_flow_key *key, struct fl_flow_key *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02002853{
Jiri Pirko8212ed72019-06-19 09:41:03 +03002854 if (mask->meta.ingress_ifindex) {
Jiri Pirko77b99002015-05-12 14:56:21 +02002855 struct net_device *dev;
2856
Jiri Pirko8212ed72019-06-19 09:41:03 +03002857 dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
Jiri Pirko77b99002015-05-12 14:56:21 +02002858 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2859 goto nla_put_failure;
2860 }
2861
2862 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2863 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2864 sizeof(key->eth.dst)) ||
2865 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2866 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2867 sizeof(key->eth.src)) ||
2868 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2869 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2870 sizeof(key->basic.n_proto)))
2871 goto nla_put_failure;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002872
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002873 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2874 goto nla_put_failure;
2875
Jianbo Liud64efd02018-07-06 05:38:16 +00002876 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2877 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002878 goto nla_put_failure;
2879
Jianbo Liud64efd02018-07-06 05:38:16 +00002880 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2881 TCA_FLOWER_KEY_CVLAN_PRIO,
2882 &key->cvlan, &mask->cvlan) ||
2883 (mask->cvlan.vlan_tpid &&
Jianbo Liu158abbf2018-07-25 02:31:25 +00002884 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2885 key->cvlan.vlan_tpid)))
Jianbo Liud3069512018-07-06 05:38:15 +00002886 goto nla_put_failure;
2887
Jianbo Liu5e9a0fe2018-07-09 02:26:20 +00002888 if (mask->basic.n_proto) {
2889 if (mask->cvlan.vlan_tpid) {
2890 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2891 key->basic.n_proto))
2892 goto nla_put_failure;
2893 } else if (mask->vlan.vlan_tpid) {
2894 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2895 key->basic.n_proto))
2896 goto nla_put_failure;
2897 }
Jianbo Liud64efd02018-07-06 05:38:16 +00002898 }
2899
Jiri Pirko77b99002015-05-12 14:56:21 +02002900 if ((key->basic.n_proto == htons(ETH_P_IP) ||
2901 key->basic.n_proto == htons(ETH_P_IPV6)) &&
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002902 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
Jiri Pirko77b99002015-05-12 14:56:21 +02002903 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002904 sizeof(key->basic.ip_proto)) ||
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002905 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
Jiri Pirko77b99002015-05-12 14:56:21 +02002906 goto nla_put_failure;
2907
Tom Herbertc3f83242015-06-04 09:16:40 -07002908 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02002909 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2910 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2911 sizeof(key->ipv4.src)) ||
2912 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2913 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2914 sizeof(key->ipv4.dst))))
2915 goto nla_put_failure;
Tom Herbertc3f83242015-06-04 09:16:40 -07002916 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02002917 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2918 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2919 sizeof(key->ipv6.src)) ||
2920 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2921 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2922 sizeof(key->ipv6.dst))))
2923 goto nla_put_failure;
2924
2925 if (key->basic.ip_proto == IPPROTO_TCP &&
2926 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002927 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002928 sizeof(key->tp.src)) ||
2929 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002930 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02002931 sizeof(key->tp.dst)) ||
2932 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2933 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2934 sizeof(key->tcp.flags))))
Jiri Pirko77b99002015-05-12 14:56:21 +02002935 goto nla_put_failure;
2936 else if (key->basic.ip_proto == IPPROTO_UDP &&
2937 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002938 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002939 sizeof(key->tp.src)) ||
2940 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002941 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002942 sizeof(key->tp.dst))))
2943 goto nla_put_failure;
Simon Horman5976c5f2016-11-03 13:24:21 +01002944 else if (key->basic.ip_proto == IPPROTO_SCTP &&
2945 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2946 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2947 sizeof(key->tp.src)) ||
2948 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2949 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2950 sizeof(key->tp.dst))))
2951 goto nla_put_failure;
Simon Horman7b684882016-12-07 13:48:28 +01002952 else if (key->basic.n_proto == htons(ETH_P_IP) &&
2953 key->basic.ip_proto == IPPROTO_ICMP &&
2954 (fl_dump_key_val(skb, &key->icmp.type,
2955 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2956 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2957 sizeof(key->icmp.type)) ||
2958 fl_dump_key_val(skb, &key->icmp.code,
2959 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2960 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2961 sizeof(key->icmp.code))))
2962 goto nla_put_failure;
2963 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2964 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2965 (fl_dump_key_val(skb, &key->icmp.type,
2966 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2967 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2968 sizeof(key->icmp.type)) ||
2969 fl_dump_key_val(skb, &key->icmp.code,
2970 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2971 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2972 sizeof(key->icmp.code))))
2973 goto nla_put_failure;
Simon Horman99d31322017-01-11 14:05:43 +01002974 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2975 key->basic.n_proto == htons(ETH_P_RARP)) &&
2976 (fl_dump_key_val(skb, &key->arp.sip,
2977 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2978 TCA_FLOWER_KEY_ARP_SIP_MASK,
2979 sizeof(key->arp.sip)) ||
2980 fl_dump_key_val(skb, &key->arp.tip,
2981 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2982 TCA_FLOWER_KEY_ARP_TIP_MASK,
2983 sizeof(key->arp.tip)) ||
2984 fl_dump_key_val(skb, &key->arp.op,
2985 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2986 TCA_FLOWER_KEY_ARP_OP_MASK,
2987 sizeof(key->arp.op)) ||
2988 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2989 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2990 sizeof(key->arp.sha)) ||
2991 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2992 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2993 sizeof(key->arp.tha))))
2994 goto nla_put_failure;
Jiri Pirko77b99002015-05-12 14:56:21 +02002995
Amritha Nambiar5c722992018-11-12 16:15:55 -08002996 if ((key->basic.ip_proto == IPPROTO_TCP ||
2997 key->basic.ip_proto == IPPROTO_UDP ||
2998 key->basic.ip_proto == IPPROTO_SCTP) &&
2999 fl_dump_key_port_range(skb, key, mask))
3000 goto nla_put_failure;
3001
Amir Vadaibc3103f2016-09-08 16:23:47 +03003002 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3003 (fl_dump_key_val(skb, &key->enc_ipv4.src,
3004 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3005 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3006 sizeof(key->enc_ipv4.src)) ||
3007 fl_dump_key_val(skb, &key->enc_ipv4.dst,
3008 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3009 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3010 sizeof(key->enc_ipv4.dst))))
3011 goto nla_put_failure;
3012 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3013 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3014 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3015 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3016 sizeof(key->enc_ipv6.src)) ||
3017 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3018 TCA_FLOWER_KEY_ENC_IPV6_DST,
3019 &mask->enc_ipv6.dst,
3020 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3021 sizeof(key->enc_ipv6.dst))))
3022 goto nla_put_failure;
3023
3024 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +03003025 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02003026 sizeof(key->enc_key_id)) ||
3027 fl_dump_key_val(skb, &key->enc_tp.src,
3028 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3029 &mask->enc_tp.src,
3030 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3031 sizeof(key->enc_tp.src)) ||
3032 fl_dump_key_val(skb, &key->enc_tp.dst,
3033 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3034 &mask->enc_tp.dst,
3035 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03003036 sizeof(key->enc_tp.dst)) ||
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02003037 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3038 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
Amir Vadaibc3103f2016-09-08 16:23:47 +03003039 goto nla_put_failure;
3040
Paul Blakeye0ace682019-07-09 10:30:50 +03003041 if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3042 goto nla_put_failure;
3043
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02003044 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3045 goto nla_put_failure;
3046
Ariel Levkovich5923b8f2020-07-23 01:03:01 +03003047 if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3048 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3049 sizeof(key->hash.hash)))
3050 goto nla_put_failure;
3051
Jiri Pirkof5749082018-07-23 09:23:08 +02003052 return 0;
3053
3054nla_put_failure:
3055 return -EMSGSIZE;
3056}
3057
3058static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
Vlad Buslov12db03b2019-02-11 10:55:45 +02003059 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
Jiri Pirkof5749082018-07-23 09:23:08 +02003060{
3061 struct cls_fl_filter *f = fh;
3062 struct nlattr *nest;
3063 struct fl_flow_key *key, *mask;
Vlad Buslov3d81e712019-03-21 15:17:42 +02003064 bool skip_hw;
Jiri Pirkof5749082018-07-23 09:23:08 +02003065
3066 if (!f)
3067 return skb->len;
3068
3069 t->tcm_handle = f->handle;
3070
Michal Kubecekae0be8d2019-04-26 11:13:06 +02003071 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Jiri Pirkof5749082018-07-23 09:23:08 +02003072 if (!nest)
3073 goto nla_put_failure;
3074
Vlad Buslov3d81e712019-03-21 15:17:42 +02003075 spin_lock(&tp->lock);
3076
Jiri Pirkof5749082018-07-23 09:23:08 +02003077 if (f->res.classid &&
3078 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
Vlad Buslov3d81e712019-03-21 15:17:42 +02003079 goto nla_put_failure_locked;
Jiri Pirkof5749082018-07-23 09:23:08 +02003080
3081 key = &f->key;
3082 mask = &f->mask->key;
Vlad Buslov3d81e712019-03-21 15:17:42 +02003083 skip_hw = tc_skip_hw(f->flags);
Jiri Pirkof5749082018-07-23 09:23:08 +02003084
3085 if (fl_dump_key(skb, net, key, mask))
Vlad Buslov3d81e712019-03-21 15:17:42 +02003086 goto nla_put_failure_locked;
Jiri Pirkof5749082018-07-23 09:23:08 +02003087
Or Gerlitz749e6722017-02-16 10:31:10 +02003088 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
Vlad Buslov3d81e712019-03-21 15:17:42 +02003089 goto nla_put_failure_locked;
3090
3091 spin_unlock(&tp->lock);
3092
3093 if (!skip_hw)
Vlad Buslovc24e43d82019-03-21 15:17:43 +02003094 fl_hw_update_stats(tp, f, rtnl_held);
Amir Vadaie69985c2016-06-05 17:11:18 +03003095
Vlad Buslov86c55362018-09-07 17:22:21 +03003096 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3097 goto nla_put_failure;
3098
Jiri Pirko77b99002015-05-12 14:56:21 +02003099 if (tcf_exts_dump(skb, &f->exts))
3100 goto nla_put_failure;
3101
3102 nla_nest_end(skb, nest);
3103
3104 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3105 goto nla_put_failure;
3106
3107 return skb->len;
3108
Vlad Buslov3d81e712019-03-21 15:17:42 +02003109nla_put_failure_locked:
3110 spin_unlock(&tp->lock);
Jiri Pirko77b99002015-05-12 14:56:21 +02003111nla_put_failure:
3112 nla_nest_cancel(skb, nest);
3113 return -1;
3114}
3115
Vlad Buslov03484512020-05-15 14:40:13 +03003116static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3117 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3118{
3119 struct cls_fl_filter *f = fh;
3120 struct nlattr *nest;
3121 bool skip_hw;
3122
3123 if (!f)
3124 return skb->len;
3125
3126 t->tcm_handle = f->handle;
3127
3128 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3129 if (!nest)
3130 goto nla_put_failure;
3131
3132 spin_lock(&tp->lock);
3133
3134 skip_hw = tc_skip_hw(f->flags);
3135
3136 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3137 goto nla_put_failure_locked;
3138
3139 spin_unlock(&tp->lock);
3140
3141 if (!skip_hw)
3142 fl_hw_update_stats(tp, f, rtnl_held);
3143
3144 if (tcf_exts_terse_dump(skb, &f->exts))
3145 goto nla_put_failure;
3146
3147 nla_nest_end(skb, nest);
3148
3149 return skb->len;
3150
3151nla_put_failure_locked:
3152 spin_unlock(&tp->lock);
3153nla_put_failure:
3154 nla_nest_cancel(skb, nest);
3155 return -1;
3156}
3157
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02003158static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3159{
3160 struct fl_flow_tmplt *tmplt = tmplt_priv;
3161 struct fl_flow_key *key, *mask;
3162 struct nlattr *nest;
3163
Michal Kubecekae0be8d2019-04-26 11:13:06 +02003164 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02003165 if (!nest)
3166 goto nla_put_failure;
3167
3168 key = &tmplt->dummy_key;
3169 mask = &tmplt->mask;
3170
3171 if (fl_dump_key(skb, net, key, mask))
3172 goto nla_put_failure;
3173
3174 nla_nest_end(skb, nest);
3175
3176 return skb->len;
3177
3178nla_put_failure:
3179 nla_nest_cancel(skb, nest);
3180 return -EMSGSIZE;
3181}
3182
Cong Wang2e24cd72020-01-23 16:26:18 -08003183static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3184 unsigned long base)
Cong Wang07d79fc2017-08-30 14:30:36 -07003185{
3186 struct cls_fl_filter *f = fh;
3187
Cong Wang2e24cd72020-01-23 16:26:18 -08003188 if (f && f->res.classid == classid) {
3189 if (cl)
3190 __tcf_bind_filter(q, &f->res, base);
3191 else
3192 __tcf_unbind_filter(q, &f->res);
3193 }
Cong Wang07d79fc2017-08-30 14:30:36 -07003194}
3195
Davide Carattia5b72a02019-12-28 16:36:58 +01003196static bool fl_delete_empty(struct tcf_proto *tp)
3197{
3198 struct cls_fl_head *head = fl_head_dereference(tp);
3199
3200 spin_lock(&tp->lock);
3201 tp->deleting = idr_is_empty(&head->handle_idr);
3202 spin_unlock(&tp->lock);
3203
3204 return tp->deleting;
3205}
3206
Jiri Pirko77b99002015-05-12 14:56:21 +02003207static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3208 .kind = "flower",
3209 .classify = fl_classify,
3210 .init = fl_init,
3211 .destroy = fl_destroy,
3212 .get = fl_get,
Vlad Buslov06177552019-03-21 15:17:35 +02003213 .put = fl_put,
Jiri Pirko77b99002015-05-12 14:56:21 +02003214 .change = fl_change,
3215 .delete = fl_delete,
Davide Carattia5b72a02019-12-28 16:36:58 +01003216 .delete_empty = fl_delete_empty,
Jiri Pirko77b99002015-05-12 14:56:21 +02003217 .walk = fl_walk,
John Hurley31533cb2018-06-25 14:30:06 -07003218 .reoffload = fl_reoffload,
Vlad Buslova449a3e2019-08-26 16:45:00 +03003219 .hw_add = fl_hw_add,
3220 .hw_del = fl_hw_del,
Jiri Pirko77b99002015-05-12 14:56:21 +02003221 .dump = fl_dump,
Vlad Buslov03484512020-05-15 14:40:13 +03003222 .terse_dump = fl_terse_dump,
Cong Wang07d79fc2017-08-30 14:30:36 -07003223 .bind_class = fl_bind_class,
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02003224 .tmplt_create = fl_tmplt_create,
3225 .tmplt_destroy = fl_tmplt_destroy,
3226 .tmplt_dump = fl_tmplt_dump,
Jiri Pirko77b99002015-05-12 14:56:21 +02003227 .owner = THIS_MODULE,
Vlad Buslov92149192019-03-21 15:17:44 +02003228 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
Jiri Pirko77b99002015-05-12 14:56:21 +02003229};
3230
3231static int __init cls_fl_init(void)
3232{
3233 return register_tcf_proto_ops(&cls_fl_ops);
3234}
3235
3236static void __exit cls_fl_exit(void)
3237{
3238 unregister_tcf_proto_ops(&cls_fl_ops);
3239}
3240
3241module_init(cls_fl_init);
3242module_exit(cls_fl_exit);
3243
3244MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3245MODULE_DESCRIPTION("Flower classifier");
3246MODULE_LICENSE("GPL v2");