blob: c307ee1d6ca6bc1e84580519a8c2e2c3e48124df [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Jiri Pirko77b99002015-05-12 14:56:21 +02002/*
3 * net/sched/cls_flower.c Flower classifier
4 *
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
Jiri Pirko77b99002015-05-12 14:56:21 +02006 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/rhashtable.h>
Daniel Borkmannd9363772016-11-27 01:18:01 +010012#include <linux/workqueue.h>
Vlad Buslov06177552019-03-21 15:17:35 +020013#include <linux/refcount.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020014
15#include <linux/if_ether.h>
16#include <linux/in6.h>
17#include <linux/ip.h>
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040018#include <linux/mpls.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020019
20#include <net/sch_generic.h>
21#include <net/pkt_cls.h>
22#include <net/ip.h>
23#include <net/flow_dissector.h>
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +020024#include <net/geneve.h>
Xin Longd8f9dfa2019-11-21 18:03:28 +080025#include <net/vxlan.h>
Xin Long79b10112019-11-21 18:03:29 +080026#include <net/erspan.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020027
Amir Vadaibc3103f2016-09-08 16:23:47 +030028#include <net/dst.h>
29#include <net/dst_metadata.h>
30
Paul Blakeye0ace682019-07-09 10:30:50 +030031#include <uapi/linux/netfilter/nf_conntrack_common.h>
32
Jiri Pirko77b99002015-05-12 14:56:21 +020033struct fl_flow_key {
Jiri Pirko8212ed72019-06-19 09:41:03 +030034 struct flow_dissector_key_meta meta;
Tom Herbert42aecaa2015-06-04 09:16:39 -070035 struct flow_dissector_key_control control;
Amir Vadaibc3103f2016-09-08 16:23:47 +030036 struct flow_dissector_key_control enc_control;
Jiri Pirko77b99002015-05-12 14:56:21 +020037 struct flow_dissector_key_basic basic;
38 struct flow_dissector_key_eth_addrs eth;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +030039 struct flow_dissector_key_vlan vlan;
Jianbo Liud64efd02018-07-06 05:38:16 +000040 struct flow_dissector_key_vlan cvlan;
Jiri Pirko77b99002015-05-12 14:56:21 +020041 union {
Tom Herbertc3f83242015-06-04 09:16:40 -070042 struct flow_dissector_key_ipv4_addrs ipv4;
Jiri Pirko77b99002015-05-12 14:56:21 +020043 struct flow_dissector_key_ipv6_addrs ipv6;
44 };
45 struct flow_dissector_key_ports tp;
Simon Horman7b684882016-12-07 13:48:28 +010046 struct flow_dissector_key_icmp icmp;
Simon Horman99d31322017-01-11 14:05:43 +010047 struct flow_dissector_key_arp arp;
Amir Vadaibc3103f2016-09-08 16:23:47 +030048 struct flow_dissector_key_keyid enc_key_id;
49 union {
50 struct flow_dissector_key_ipv4_addrs enc_ipv4;
51 struct flow_dissector_key_ipv6_addrs enc_ipv6;
52 };
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +020053 struct flow_dissector_key_ports enc_tp;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040054 struct flow_dissector_key_mpls mpls;
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +020055 struct flow_dissector_key_tcp tcp;
Or Gerlitz4d80cc02017-06-01 21:37:38 +030056 struct flow_dissector_key_ip ip;
Or Gerlitz0e2c17b2018-07-17 19:27:18 +030057 struct flow_dissector_key_ip enc_ip;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +020058 struct flow_dissector_key_enc_opts enc_opts;
Amritha Nambiar5c722992018-11-12 16:15:55 -080059 struct flow_dissector_key_ports tp_min;
60 struct flow_dissector_key_ports tp_max;
Paul Blakeye0ace682019-07-09 10:30:50 +030061 struct flow_dissector_key_ct ct;
Jiri Pirko77b99002015-05-12 14:56:21 +020062} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
63
64struct fl_flow_mask_range {
65 unsigned short int start;
66 unsigned short int end;
67};
68
69struct fl_flow_mask {
70 struct fl_flow_key key;
71 struct fl_flow_mask_range range;
Amritha Nambiar5c722992018-11-12 16:15:55 -080072 u32 flags;
Paul Blakey05cd2712018-04-30 14:28:30 +030073 struct rhash_head ht_node;
74 struct rhashtable ht;
75 struct rhashtable_params filter_ht_params;
76 struct flow_dissector dissector;
77 struct list_head filters;
Paolo Abeni44a5cd42018-06-21 20:02:16 +020078 struct rcu_work rwork;
Paul Blakey05cd2712018-04-30 14:28:30 +030079 struct list_head list;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +020080 refcount_t refcnt;
Jiri Pirko77b99002015-05-12 14:56:21 +020081};
82
Jiri Pirkob95ec7e2018-07-23 09:23:10 +020083struct fl_flow_tmplt {
84 struct fl_flow_key dummy_key;
85 struct fl_flow_key mask;
86 struct flow_dissector dissector;
87 struct tcf_chain *chain;
88};
89
Jiri Pirko77b99002015-05-12 14:56:21 +020090struct cls_fl_head {
91 struct rhashtable ht;
Vlad Buslov259e60f2019-03-21 15:17:39 +020092 spinlock_t masks_lock; /* Protect masks list */
Paul Blakey05cd2712018-04-30 14:28:30 +030093 struct list_head masks;
Vlad Buslovc049d562019-04-24 09:53:31 +030094 struct list_head hw_filters;
Cong Wangaaa908f2018-05-23 15:26:53 -070095 struct rcu_work rwork;
Chris Mic15ab232017-08-30 02:31:58 -040096 struct idr handle_idr;
Jiri Pirko77b99002015-05-12 14:56:21 +020097};
98
99struct cls_fl_filter {
Paul Blakey05cd2712018-04-30 14:28:30 +0300100 struct fl_flow_mask *mask;
Jiri Pirko77b99002015-05-12 14:56:21 +0200101 struct rhash_head ht_node;
102 struct fl_flow_key mkey;
103 struct tcf_exts exts;
104 struct tcf_result res;
105 struct fl_flow_key key;
106 struct list_head list;
Vlad Buslovc049d562019-04-24 09:53:31 +0300107 struct list_head hw_list;
Jiri Pirko77b99002015-05-12 14:56:21 +0200108 u32 handle;
Amir Vadaie69985c2016-06-05 17:11:18 +0300109 u32 flags;
Vlad Buslov86c55362018-09-07 17:22:21 +0300110 u32 in_hw_count;
Cong Wangaaa908f2018-05-23 15:26:53 -0700111 struct rcu_work rwork;
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200112 struct net_device *hw_dev;
Vlad Buslov06177552019-03-21 15:17:35 +0200113 /* Flower classifier is unlocked, which means that its reference counter
114 * can be changed concurrently without any kind of external
115 * synchronization. Use atomic reference counter to be concurrency-safe.
116 */
117 refcount_t refcnt;
Vlad Buslovb2552b82019-03-21 15:17:36 +0200118 bool deleted;
Jiri Pirko77b99002015-05-12 14:56:21 +0200119};
120
Paul Blakey05cd2712018-04-30 14:28:30 +0300121static const struct rhashtable_params mask_ht_params = {
122 .key_offset = offsetof(struct fl_flow_mask, key),
123 .key_len = sizeof(struct fl_flow_key),
124 .head_offset = offsetof(struct fl_flow_mask, ht_node),
125 .automatic_shrinking = true,
126};
127
Jiri Pirko77b99002015-05-12 14:56:21 +0200128static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
129{
130 return mask->range.end - mask->range.start;
131}
132
133static void fl_mask_update_range(struct fl_flow_mask *mask)
134{
135 const u8 *bytes = (const u8 *) &mask->key;
136 size_t size = sizeof(mask->key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300137 size_t i, first = 0, last;
Jiri Pirko77b99002015-05-12 14:56:21 +0200138
Paul Blakey05cd2712018-04-30 14:28:30 +0300139 for (i = 0; i < size; i++) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200140 if (bytes[i]) {
Paul Blakey05cd2712018-04-30 14:28:30 +0300141 first = i;
142 break;
143 }
144 }
145 last = first;
146 for (i = size - 1; i != first; i--) {
147 if (bytes[i]) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200148 last = i;
Paul Blakey05cd2712018-04-30 14:28:30 +0300149 break;
Jiri Pirko77b99002015-05-12 14:56:21 +0200150 }
151 }
152 mask->range.start = rounddown(first, sizeof(long));
153 mask->range.end = roundup(last + 1, sizeof(long));
154}
155
156static void *fl_key_get_start(struct fl_flow_key *key,
157 const struct fl_flow_mask *mask)
158{
159 return (u8 *) key + mask->range.start;
160}
161
162static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
163 struct fl_flow_mask *mask)
164{
165 const long *lkey = fl_key_get_start(key, mask);
166 const long *lmask = fl_key_get_start(&mask->key, mask);
167 long *lmkey = fl_key_get_start(mkey, mask);
168 int i;
169
170 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
171 *lmkey++ = *lkey++ & *lmask++;
172}
173
Jiri Pirkob95ec7e2018-07-23 09:23:10 +0200174static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
175 struct fl_flow_mask *mask)
176{
177 const long *lmask = fl_key_get_start(&mask->key, mask);
178 const long *ltmplt;
179 int i;
180
181 if (!tmplt)
182 return true;
183 ltmplt = fl_key_get_start(&tmplt->mask, mask);
184 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
185 if (~*ltmplt++ & *lmask++)
186 return false;
187 }
188 return true;
189}
190
Jiri Pirko77b99002015-05-12 14:56:21 +0200191static void fl_clear_masked_range(struct fl_flow_key *key,
192 struct fl_flow_mask *mask)
193{
194 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
195}
196
Amritha Nambiar5c722992018-11-12 16:15:55 -0800197static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
198 struct fl_flow_key *key,
199 struct fl_flow_key *mkey)
200{
201 __be16 min_mask, max_mask, min_val, max_val;
202
203 min_mask = htons(filter->mask->key.tp_min.dst);
204 max_mask = htons(filter->mask->key.tp_max.dst);
205 min_val = htons(filter->key.tp_min.dst);
206 max_val = htons(filter->key.tp_max.dst);
207
208 if (min_mask && max_mask) {
209 if (htons(key->tp.dst) < min_val ||
210 htons(key->tp.dst) > max_val)
211 return false;
212
213 /* skb does not have min and max values */
214 mkey->tp_min.dst = filter->mkey.tp_min.dst;
215 mkey->tp_max.dst = filter->mkey.tp_max.dst;
216 }
217 return true;
218}
219
220static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
221 struct fl_flow_key *key,
222 struct fl_flow_key *mkey)
223{
224 __be16 min_mask, max_mask, min_val, max_val;
225
226 min_mask = htons(filter->mask->key.tp_min.src);
227 max_mask = htons(filter->mask->key.tp_max.src);
228 min_val = htons(filter->key.tp_min.src);
229 max_val = htons(filter->key.tp_max.src);
230
231 if (min_mask && max_mask) {
232 if (htons(key->tp.src) < min_val ||
233 htons(key->tp.src) > max_val)
234 return false;
235
236 /* skb does not have min and max values */
237 mkey->tp_min.src = filter->mkey.tp_min.src;
238 mkey->tp_max.src = filter->mkey.tp_max.src;
239 }
240 return true;
241}
242
243static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
244 struct fl_flow_key *mkey)
Paul Blakeya3308d82017-01-16 10:45:13 +0200245{
Paul Blakey05cd2712018-04-30 14:28:30 +0300246 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
247 mask->filter_ht_params);
Paul Blakeya3308d82017-01-16 10:45:13 +0200248}
249
Amritha Nambiar5c722992018-11-12 16:15:55 -0800250static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
251 struct fl_flow_key *mkey,
252 struct fl_flow_key *key)
253{
254 struct cls_fl_filter *filter, *f;
255
256 list_for_each_entry_rcu(filter, &mask->filters, list) {
257 if (!fl_range_port_dst_cmp(filter, key, mkey))
258 continue;
259
260 if (!fl_range_port_src_cmp(filter, key, mkey))
261 continue;
262
263 f = __fl_lookup(mask, mkey);
264 if (f)
265 return f;
266 }
267 return NULL;
268}
269
270static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
271 struct fl_flow_key *mkey,
272 struct fl_flow_key *key)
273{
274 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
275 return fl_lookup_range(mask, mkey, key);
276
277 return __fl_lookup(mask, mkey);
278}
279
Paul Blakeye0ace682019-07-09 10:30:50 +0300280static u16 fl_ct_info_to_flower_map[] = {
281 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
282 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
283 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
284 TCA_FLOWER_KEY_CT_FLAGS_RELATED,
285 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
286 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
287 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
288 TCA_FLOWER_KEY_CT_FLAGS_RELATED,
289 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
290 TCA_FLOWER_KEY_CT_FLAGS_NEW,
291};
292
Jiri Pirko77b99002015-05-12 14:56:21 +0200293static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
294 struct tcf_result *res)
295{
296 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
Jiri Pirko77b99002015-05-12 14:56:21 +0200297 struct fl_flow_key skb_mkey;
Paul Blakeye0ace682019-07-09 10:30:50 +0300298 struct fl_flow_key skb_key;
299 struct fl_flow_mask *mask;
300 struct cls_fl_filter *f;
Jiri Pirko77b99002015-05-12 14:56:21 +0200301
Paul Blakey05cd2712018-04-30 14:28:30 +0300302 list_for_each_entry_rcu(mask, &head->masks, list) {
303 fl_clear_masked_range(&skb_key, mask);
Amir Vadaie69985c2016-06-05 17:11:18 +0300304
Jiri Pirko8212ed72019-06-19 09:41:03 +0300305 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300306 /* skb_flow_dissect() does not set n_proto in case an unknown
307 * protocol, so do it rather here.
308 */
309 skb_key.basic.n_proto = skb->protocol;
310 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
Paul Blakeye0ace682019-07-09 10:30:50 +0300311 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
312 fl_ct_info_to_flower_map,
313 ARRAY_SIZE(fl_ct_info_to_flower_map));
Paul Blakey05cd2712018-04-30 14:28:30 +0300314 skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
Amir Vadaibc3103f2016-09-08 16:23:47 +0300315
Paul Blakey05cd2712018-04-30 14:28:30 +0300316 fl_set_masked_key(&skb_mkey, &skb_key, mask);
Jiri Pirko77b99002015-05-12 14:56:21 +0200317
Amritha Nambiar5c722992018-11-12 16:15:55 -0800318 f = fl_lookup(mask, &skb_mkey, &skb_key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300319 if (f && !tc_skip_sw(f->flags)) {
320 *res = f->res;
321 return tcf_exts_exec(skb, &f->exts, res);
322 }
Jiri Pirko77b99002015-05-12 14:56:21 +0200323 }
324 return -1;
325}
326
327static int fl_init(struct tcf_proto *tp)
328{
329 struct cls_fl_head *head;
330
331 head = kzalloc(sizeof(*head), GFP_KERNEL);
332 if (!head)
333 return -ENOBUFS;
334
Vlad Buslov259e60f2019-03-21 15:17:39 +0200335 spin_lock_init(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +0300336 INIT_LIST_HEAD_RCU(&head->masks);
Vlad Buslovc049d562019-04-24 09:53:31 +0300337 INIT_LIST_HEAD(&head->hw_filters);
Jiri Pirko77b99002015-05-12 14:56:21 +0200338 rcu_assign_pointer(tp->root, head);
Chris Mic15ab232017-08-30 02:31:58 -0400339 idr_init(&head->handle_idr);
Jiri Pirko77b99002015-05-12 14:56:21 +0200340
Paul Blakey05cd2712018-04-30 14:28:30 +0300341 return rhashtable_init(&head->ht, &mask_ht_params);
342}
343
Vlad Buslov99815f52019-06-13 17:54:04 +0300344static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200345{
Vlad Buslov99815f52019-06-13 17:54:04 +0300346 /* temporary masks don't have their filters list and ht initialized */
347 if (mask_init_done) {
348 WARN_ON(!list_empty(&mask->filters));
349 rhashtable_destroy(&mask->ht);
350 }
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200351 kfree(mask);
352}
353
354static void fl_mask_free_work(struct work_struct *work)
355{
356 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
357 struct fl_flow_mask, rwork);
358
Vlad Buslov99815f52019-06-13 17:54:04 +0300359 fl_mask_free(mask, true);
360}
361
362static void fl_uninit_mask_free_work(struct work_struct *work)
363{
364 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
365 struct fl_flow_mask, rwork);
366
367 fl_mask_free(mask, false);
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200368}
369
Vlad Buslov99946772019-04-12 00:54:19 +0300370static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
Paul Blakey05cd2712018-04-30 14:28:30 +0300371{
Vlad Buslovf48ef4d2019-03-21 15:17:37 +0200372 if (!refcount_dec_and_test(&mask->refcnt))
Paul Blakey05cd2712018-04-30 14:28:30 +0300373 return false;
374
375 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
Vlad Buslov259e60f2019-03-21 15:17:39 +0200376
377 spin_lock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +0300378 list_del_rcu(&mask->list);
Vlad Buslov259e60f2019-03-21 15:17:39 +0200379 spin_unlock(&head->masks_lock);
380
Vlad Buslov99946772019-04-12 00:54:19 +0300381 tcf_queue_work(&mask->rwork, fl_mask_free_work);
Paul Blakey05cd2712018-04-30 14:28:30 +0300382
383 return true;
Jiri Pirko77b99002015-05-12 14:56:21 +0200384}
385
Vlad Buslovc049d562019-04-24 09:53:31 +0300386static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
387{
388 /* Flower classifier only changes root pointer during init and destroy.
389 * Users must obtain reference to tcf_proto instance before calling its
390 * API, so tp->root pointer is protected from concurrent call to
391 * fl_destroy() by reference counting.
392 */
393 return rcu_dereference_raw(tp->root);
394}
395
Cong Wang0dadc112017-11-06 13:47:24 -0800396static void __fl_destroy_filter(struct cls_fl_filter *f)
397{
398 tcf_exts_destroy(&f->exts);
399 tcf_exts_put_net(&f->exts);
400 kfree(f);
401}
402
Cong Wang0552c8a2017-10-26 18:24:33 -0700403static void fl_destroy_filter_work(struct work_struct *work)
404{
Cong Wangaaa908f2018-05-23 15:26:53 -0700405 struct cls_fl_filter *f = container_of(to_rcu_work(work),
406 struct cls_fl_filter, rwork);
Cong Wang0552c8a2017-10-26 18:24:33 -0700407
Cong Wang0dadc112017-11-06 13:47:24 -0800408 __fl_destroy_filter(f);
Cong Wang0552c8a2017-10-26 18:24:33 -0700409}
410
Jakub Kicinski1b0f8032018-01-24 12:54:21 -0800411static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200412 bool rtnl_held, struct netlink_ext_ack *extack)
Amir Vadai5b33f482016-03-08 12:42:29 +0200413{
Jiri Pirko208c0f42017-10-19 15:50:32 +0200414 struct tcf_block *block = tp->chain->block;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200415 struct flow_cls_offload cls_flower = {};
Amir Vadai5b33f482016-03-08 12:42:29 +0200416
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700417 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200418 cls_flower.command = FLOW_CLS_DESTROY;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200419 cls_flower.cookie = (unsigned long) f;
Amir Vadai5b33f482016-03-08 12:42:29 +0200420
Vlad Buslov40119212019-08-26 16:44:59 +0300421 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
Vlad Buslov918190f2019-08-26 16:45:06 +0300422 &f->flags, &f->in_hw_count, rtnl_held);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200423
Amir Vadai5b33f482016-03-08 12:42:29 +0200424}
425
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300426static int fl_hw_replace_filter(struct tcf_proto *tp,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200427 struct cls_fl_filter *f, bool rtnl_held,
Quentin Monnet41002032018-01-19 17:44:43 -0800428 struct netlink_ext_ack *extack)
Amir Vadai5b33f482016-03-08 12:42:29 +0200429{
Jiri Pirko208c0f42017-10-19 15:50:32 +0200430 struct tcf_block *block = tp->chain->block;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200431 struct flow_cls_offload cls_flower = {};
Jiri Pirko717503b2017-10-11 09:41:09 +0200432 bool skip_sw = tc_skip_sw(f->flags);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200433 int err = 0;
434
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +0100435 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
Vlad Buslov918190f2019-08-26 16:45:06 +0300436 if (!cls_flower.rule)
437 return -ENOMEM;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100438
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700439 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200440 cls_flower.command = FLOW_CLS_REPLACE;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200441 cls_flower.cookie = (unsigned long) f;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100442 cls_flower.rule->match.dissector = &f->mask->dissector;
443 cls_flower.rule->match.mask = &f->mask->key;
444 cls_flower.rule->match.key = &f->mkey;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700445 cls_flower.classid = f->res.classid;
Amir Vadai5b33f482016-03-08 12:42:29 +0200446
Vlad Buslov9838b202019-08-26 16:45:03 +0300447 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts,
Vlad Buslov918190f2019-08-26 16:45:06 +0300448 rtnl_held);
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +0100449 if (err) {
450 kfree(cls_flower.rule);
Vlad Buslov918190f2019-08-26 16:45:06 +0300451 if (skip_sw) {
Vlad Buslov1f15bb42019-02-12 23:39:06 +0200452 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
Vlad Buslov918190f2019-08-26 16:45:06 +0300453 return err;
454 }
455 return 0;
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +0100456 }
457
Vlad Buslov40119212019-08-26 16:44:59 +0300458 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
Vlad Buslov918190f2019-08-26 16:45:06 +0300459 skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
Vlad Buslov5a6ff4b2019-08-26 16:45:04 +0300460 tc_cleanup_flow_action(&cls_flower.rule->action);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100461 kfree(cls_flower.rule);
462
Vlad Buslov40119212019-08-26 16:44:59 +0300463 if (err) {
Vlad Buslov918190f2019-08-26 16:45:06 +0300464 fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
465 return err;
Jiri Pirko717503b2017-10-11 09:41:09 +0200466 }
467
Vlad Buslov918190f2019-08-26 16:45:06 +0300468 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
469 return -EINVAL;
Jiri Pirko717503b2017-10-11 09:41:09 +0200470
Vlad Buslov918190f2019-08-26 16:45:06 +0300471 return 0;
Amir Vadai5b33f482016-03-08 12:42:29 +0200472}
473
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200474static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
475 bool rtnl_held)
Amir Vadai10cbc682016-05-13 12:55:37 +0000476{
Jiri Pirko208c0f42017-10-19 15:50:32 +0200477 struct tcf_block *block = tp->chain->block;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200478 struct flow_cls_offload cls_flower = {};
Amir Vadai10cbc682016-05-13 12:55:37 +0000479
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700480 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200481 cls_flower.command = FLOW_CLS_STATS;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200482 cls_flower.cookie = (unsigned long) f;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700483 cls_flower.classid = f->res.classid;
Amir Vadai10cbc682016-05-13 12:55:37 +0000484
Vlad Buslov918190f2019-08-26 16:45:06 +0300485 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
486 rtnl_held);
Pablo Neira Ayuso3b1903e2019-02-02 12:50:47 +0100487
488 tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
489 cls_flower.stats.pkts,
490 cls_flower.stats.lastused);
Amir Vadai10cbc682016-05-13 12:55:37 +0000491}
492
Vlad Buslov06177552019-03-21 15:17:35 +0200493static void __fl_put(struct cls_fl_filter *f)
494{
495 if (!refcount_dec_and_test(&f->refcnt))
496 return;
497
498 if (tcf_exts_get_net(&f->exts))
499 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
500 else
501 __fl_destroy_filter(f);
502}
503
504static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
505{
506 struct cls_fl_filter *f;
507
508 rcu_read_lock();
509 f = idr_find(&head->handle_idr, handle);
510 if (f && !refcount_inc_not_zero(&f->refcnt))
511 f = NULL;
512 rcu_read_unlock();
513
514 return f;
515}
516
Vlad Buslovb2552b82019-03-21 15:17:36 +0200517static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200518 bool *last, bool rtnl_held,
519 struct netlink_ext_ack *extack)
Roi Dayan13fa8762016-11-01 16:08:29 +0200520{
Vlad Buslove4746192019-03-21 15:17:33 +0200521 struct cls_fl_head *head = fl_head_dereference(tp);
Chris Mic15ab232017-08-30 02:31:58 -0400522
Vlad Buslovb2552b82019-03-21 15:17:36 +0200523 *last = false;
524
Vlad Buslov3d81e712019-03-21 15:17:42 +0200525 spin_lock(&tp->lock);
526 if (f->deleted) {
527 spin_unlock(&tp->lock);
Vlad Buslovb2552b82019-03-21 15:17:36 +0200528 return -ENOENT;
Vlad Buslov3d81e712019-03-21 15:17:42 +0200529 }
Vlad Buslovb2552b82019-03-21 15:17:36 +0200530
531 f->deleted = true;
532 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
533 f->mask->filter_ht_params);
Matthew Wilcox9c160942017-11-28 09:48:43 -0500534 idr_remove(&head->handle_idr, f->handle);
Roi Dayan13fa8762016-11-01 16:08:29 +0200535 list_del_rcu(&f->list);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200536 spin_unlock(&tp->lock);
537
Vlad Buslov99946772019-04-12 00:54:19 +0300538 *last = fl_mask_put(head, f->mask);
Hadar Hen Zion79685212016-12-01 14:06:34 +0200539 if (!tc_skip_hw(f->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200540 fl_hw_destroy_filter(tp, f, rtnl_held, extack);
Roi Dayan13fa8762016-11-01 16:08:29 +0200541 tcf_unbind_filter(tp, &f->res);
Vlad Buslov06177552019-03-21 15:17:35 +0200542 __fl_put(f);
Paul Blakey05cd2712018-04-30 14:28:30 +0300543
Vlad Buslovb2552b82019-03-21 15:17:36 +0200544 return 0;
Roi Dayan13fa8762016-11-01 16:08:29 +0200545}
546
Daniel Borkmannd9363772016-11-27 01:18:01 +0100547static void fl_destroy_sleepable(struct work_struct *work)
548{
Cong Wangaaa908f2018-05-23 15:26:53 -0700549 struct cls_fl_head *head = container_of(to_rcu_work(work),
550 struct cls_fl_head,
551 rwork);
Paul Blakeyde9dc652018-06-03 10:06:13 +0300552
553 rhashtable_destroy(&head->ht);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100554 kfree(head);
555 module_put(THIS_MODULE);
556}
557
Vlad Buslov12db03b2019-02-11 10:55:45 +0200558static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
559 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +0200560{
Vlad Buslove4746192019-03-21 15:17:33 +0200561 struct cls_fl_head *head = fl_head_dereference(tp);
Paul Blakey05cd2712018-04-30 14:28:30 +0300562 struct fl_flow_mask *mask, *next_mask;
Jiri Pirko77b99002015-05-12 14:56:21 +0200563 struct cls_fl_filter *f, *next;
Vlad Buslovb2552b82019-03-21 15:17:36 +0200564 bool last;
Jiri Pirko77b99002015-05-12 14:56:21 +0200565
Paul Blakey05cd2712018-04-30 14:28:30 +0300566 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
567 list_for_each_entry_safe(f, next, &mask->filters, list) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200568 __fl_delete(tp, f, &last, rtnl_held, extack);
Vlad Buslovb2552b82019-03-21 15:17:36 +0200569 if (last)
Paul Blakey05cd2712018-04-30 14:28:30 +0300570 break;
571 }
572 }
Chris Mic15ab232017-08-30 02:31:58 -0400573 idr_destroy(&head->handle_idr);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100574
575 __module_get(THIS_MODULE);
Cong Wangaaa908f2018-05-23 15:26:53 -0700576 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
Jiri Pirko77b99002015-05-12 14:56:21 +0200577}
578
Vlad Buslov06177552019-03-21 15:17:35 +0200579static void fl_put(struct tcf_proto *tp, void *arg)
580{
581 struct cls_fl_filter *f = arg;
582
583 __fl_put(f);
584}
585
WANG Cong8113c092017-08-04 21:31:43 -0700586static void *fl_get(struct tcf_proto *tp, u32 handle)
Jiri Pirko77b99002015-05-12 14:56:21 +0200587{
Vlad Buslove4746192019-03-21 15:17:33 +0200588 struct cls_fl_head *head = fl_head_dereference(tp);
Jiri Pirko77b99002015-05-12 14:56:21 +0200589
Vlad Buslov06177552019-03-21 15:17:35 +0200590 return __fl_get(head, handle);
Jiri Pirko77b99002015-05-12 14:56:21 +0200591}
592
593static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
594 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
595 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
596 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
597 .len = IFNAMSIZ },
598 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
599 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
600 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
601 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
602 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
603 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
604 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
605 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
606 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
607 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
608 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
609 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
610 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
611 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
612 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
613 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
Jamal Hadi Salimb175c3a2015-06-25 06:55:27 -0400614 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
615 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300616 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
617 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
618 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
Amir Vadaibc3103f2016-09-08 16:23:47 +0300619 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
620 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
621 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
622 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
623 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
624 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
625 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
626 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
627 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
Or Gerlitzaa72d702016-09-15 15:28:22 +0300628 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
629 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
630 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
631 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
Simon Horman5976c5f2016-11-03 13:24:21 +0100632 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
633 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
634 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
635 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200636 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
637 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
638 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
639 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200640 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
641 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
Simon Horman7b684882016-12-07 13:48:28 +0100642 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
643 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
644 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
645 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
646 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
647 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
648 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
649 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
Simon Horman99d31322017-01-11 14:05:43 +0100650 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
651 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
652 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
653 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
654 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
655 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
656 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
657 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
658 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
659 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400660 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
661 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
662 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
663 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +0200664 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
665 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300666 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
667 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
668 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
669 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
Jianbo Liud64efd02018-07-06 05:38:16 +0000670 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
671 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
672 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300673 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
674 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
675 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
676 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200677 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
678 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
Paul Blakeye0ace682019-07-09 10:30:50 +0300679 [TCA_FLOWER_KEY_CT_STATE] = { .type = NLA_U16 },
680 [TCA_FLOWER_KEY_CT_STATE_MASK] = { .type = NLA_U16 },
681 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 },
682 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 },
683 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 },
684 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 },
685 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY,
686 .len = 128 / BITS_PER_BYTE },
687 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
688 .len = 128 / BITS_PER_BYTE },
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200689};
690
691static const struct nla_policy
692enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
Xin Longd8f9dfa2019-11-21 18:03:28 +0800693 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = {
694 .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200695 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
Xin Longd8f9dfa2019-11-21 18:03:28 +0800696 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
Xin Long79b10112019-11-21 18:03:29 +0800697 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200698};
699
700static const struct nla_policy
701geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
702 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
703 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
704 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
705 .len = 128 },
Jiri Pirko77b99002015-05-12 14:56:21 +0200706};
707
Xin Longd8f9dfa2019-11-21 18:03:28 +0800708static const struct nla_policy
709vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
710 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
711};
712
Xin Long79b10112019-11-21 18:03:29 +0800713static const struct nla_policy
714erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
715 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
716 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
717 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
718 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
719};
720
Jiri Pirko77b99002015-05-12 14:56:21 +0200721static void fl_set_key_val(struct nlattr **tb,
722 void *val, int val_type,
723 void *mask, int mask_type, int len)
724{
725 if (!tb[val_type])
726 return;
Paul Blakeye0ace682019-07-09 10:30:50 +0300727 nla_memcpy(val, tb[val_type], len);
Jiri Pirko77b99002015-05-12 14:56:21 +0200728 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
729 memset(mask, 0xff, len);
730 else
Paul Blakeye0ace682019-07-09 10:30:50 +0300731 nla_memcpy(mask, tb[mask_type], len);
Jiri Pirko77b99002015-05-12 14:56:21 +0200732}
733
Amritha Nambiar5c722992018-11-12 16:15:55 -0800734static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
735 struct fl_flow_key *mask)
736{
737 fl_set_key_val(tb, &key->tp_min.dst,
738 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst,
739 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst));
740 fl_set_key_val(tb, &key->tp_max.dst,
741 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst,
742 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst));
743 fl_set_key_val(tb, &key->tp_min.src,
744 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src,
745 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src));
746 fl_set_key_val(tb, &key->tp_max.src,
747 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src,
748 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src));
749
750 if ((mask->tp_min.dst && mask->tp_max.dst &&
751 htons(key->tp_max.dst) <= htons(key->tp_min.dst)) ||
752 (mask->tp_min.src && mask->tp_max.src &&
753 htons(key->tp_max.src) <= htons(key->tp_min.src)))
754 return -EINVAL;
755
756 return 0;
757}
758
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400759static int fl_set_key_mpls(struct nlattr **tb,
760 struct flow_dissector_key_mpls *key_val,
761 struct flow_dissector_key_mpls *key_mask)
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400762{
763 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
764 key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
765 key_mask->mpls_ttl = MPLS_TTL_MASK;
766 }
767 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400768 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
769
770 if (bos & ~MPLS_BOS_MASK)
771 return -EINVAL;
772 key_val->mpls_bos = bos;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400773 key_mask->mpls_bos = MPLS_BOS_MASK;
774 }
775 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400776 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
777
778 if (tc & ~MPLS_TC_MASK)
779 return -EINVAL;
780 key_val->mpls_tc = tc;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400781 key_mask->mpls_tc = MPLS_TC_MASK;
782 }
783 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400784 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
785
786 if (label & ~MPLS_LABEL_MASK)
787 return -EINVAL;
788 key_val->mpls_label = label;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400789 key_mask->mpls_label = MPLS_LABEL_MASK;
790 }
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400791 return 0;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400792}
793
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300794static void fl_set_key_vlan(struct nlattr **tb,
Jianbo Liuaaab0832018-07-06 05:38:13 +0000795 __be16 ethertype,
Jianbo Liud64efd02018-07-06 05:38:16 +0000796 int vlan_id_key, int vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300797 struct flow_dissector_key_vlan *key_val,
798 struct flow_dissector_key_vlan *key_mask)
799{
800#define VLAN_PRIORITY_MASK 0x7
801
Jianbo Liud64efd02018-07-06 05:38:16 +0000802 if (tb[vlan_id_key]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300803 key_val->vlan_id =
Jianbo Liud64efd02018-07-06 05:38:16 +0000804 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300805 key_mask->vlan_id = VLAN_VID_MASK;
806 }
Jianbo Liud64efd02018-07-06 05:38:16 +0000807 if (tb[vlan_prio_key]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300808 key_val->vlan_priority =
Jianbo Liud64efd02018-07-06 05:38:16 +0000809 nla_get_u8(tb[vlan_prio_key]) &
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300810 VLAN_PRIORITY_MASK;
811 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
812 }
Jianbo Liuaaab0832018-07-06 05:38:13 +0000813 key_val->vlan_tpid = ethertype;
814 key_mask->vlan_tpid = cpu_to_be16(~0);
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300815}
816
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200817static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
818 u32 *dissector_key, u32 *dissector_mask,
819 u32 flower_flag_bit, u32 dissector_flag_bit)
820{
821 if (flower_mask & flower_flag_bit) {
822 *dissector_mask |= dissector_flag_bit;
823 if (flower_key & flower_flag_bit)
824 *dissector_key |= dissector_flag_bit;
825 }
826}
827
Or Gerlitzd9724772016-12-22 14:28:15 +0200828static int fl_set_key_flags(struct nlattr **tb,
829 u32 *flags_key, u32 *flags_mask)
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200830{
831 u32 key, mask;
832
Or Gerlitzd9724772016-12-22 14:28:15 +0200833 /* mask is mandatory for flags */
834 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
835 return -EINVAL;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200836
837 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
Or Gerlitzd9724772016-12-22 14:28:15 +0200838 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200839
840 *flags_key = 0;
841 *flags_mask = 0;
842
843 fl_set_key_flag(key, mask, flags_key, flags_mask,
844 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Pieter Jansen van Vuuren459d1532018-03-06 18:11:14 +0100845 fl_set_key_flag(key, mask, flags_key, flags_mask,
846 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
847 FLOW_DIS_FIRST_FRAG);
Or Gerlitzd9724772016-12-22 14:28:15 +0200848
849 return 0;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200850}
851
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300852static void fl_set_key_ip(struct nlattr **tb, bool encap,
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300853 struct flow_dissector_key_ip *key,
854 struct flow_dissector_key_ip *mask)
855{
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300856 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
857 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
858 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
859 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300860
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300861 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
862 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300863}
864
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200865static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
866 int depth, int option_len,
867 struct netlink_ext_ack *extack)
868{
869 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
870 struct nlattr *class = NULL, *type = NULL, *data = NULL;
871 struct geneve_opt *opt;
872 int err, data_len = 0;
873
874 if (option_len > sizeof(struct geneve_opt))
875 data_len = option_len - sizeof(struct geneve_opt);
876
877 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
878 memset(opt, 0xff, option_len);
879 opt->length = data_len / 4;
880 opt->r1 = 0;
881 opt->r2 = 0;
882 opt->r3 = 0;
883
884 /* If no mask has been prodived we assume an exact match. */
885 if (!depth)
886 return sizeof(struct geneve_opt) + data_len;
887
888 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
889 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
890 return -EINVAL;
891 }
892
Johannes Berg8cb08172019-04-26 14:07:28 +0200893 err = nla_parse_nested_deprecated(tb,
894 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
895 nla, geneve_opt_policy, extack);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200896 if (err < 0)
897 return err;
898
899 /* We are not allowed to omit any of CLASS, TYPE or DATA
900 * fields from the key.
901 */
902 if (!option_len &&
903 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
904 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
905 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
906 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
907 return -EINVAL;
908 }
909
910 /* Omitting any of CLASS, TYPE or DATA fields is allowed
911 * for the mask.
912 */
913 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
914 int new_len = key->enc_opts.len;
915
916 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
917 data_len = nla_len(data);
918 if (data_len < 4) {
919 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
920 return -ERANGE;
921 }
922 if (data_len % 4) {
923 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
924 return -ERANGE;
925 }
926
927 new_len += sizeof(struct geneve_opt) + data_len;
928 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
929 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
930 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
931 return -ERANGE;
932 }
933 opt->length = data_len / 4;
934 memcpy(opt->opt_data, nla_data(data), data_len);
935 }
936
937 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
938 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
939 opt->opt_class = nla_get_be16(class);
940 }
941
942 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
943 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
944 opt->type = nla_get_u8(type);
945 }
946
947 return sizeof(struct geneve_opt) + data_len;
948}
949
Xin Longd8f9dfa2019-11-21 18:03:28 +0800950static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
951 int depth, int option_len,
952 struct netlink_ext_ack *extack)
953{
954 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
955 struct vxlan_metadata *md;
956 int err;
957
958 md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
959 memset(md, 0xff, sizeof(*md));
960
961 if (!depth)
962 return sizeof(*md);
963
964 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
965 NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
966 return -EINVAL;
967 }
968
969 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
970 vxlan_opt_policy, extack);
971 if (err < 0)
972 return err;
973
974 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
975 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
976 return -EINVAL;
977 }
978
979 if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP])
980 md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
981
982 return sizeof(*md);
983}
984
Xin Long79b10112019-11-21 18:03:29 +0800985static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
986 int depth, int option_len,
987 struct netlink_ext_ack *extack)
988{
989 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
990 struct erspan_metadata *md;
991 int err;
992
993 md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
994 memset(md, 0xff, sizeof(*md));
995 md->version = 1;
996
997 if (!depth)
998 return sizeof(*md);
999
1000 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1001 NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1002 return -EINVAL;
1003 }
1004
1005 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1006 erspan_opt_policy, extack);
1007 if (err < 0)
1008 return err;
1009
1010 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1011 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1012 return -EINVAL;
1013 }
1014
1015 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1016 md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1017
1018 if (md->version == 1) {
1019 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1020 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1021 return -EINVAL;
1022 }
1023 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1024 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1025 md->u.index = nla_get_be32(nla);
1026 }
1027 } else if (md->version == 2) {
1028 if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1029 !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1030 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1031 return -EINVAL;
1032 }
1033 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1034 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1035 md->u.md2.dir = nla_get_u8(nla);
1036 }
1037 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1038 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1039 set_hwid(&md->u.md2, nla_get_u8(nla));
1040 }
1041 } else {
1042 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1043 return -EINVAL;
1044 }
1045
1046 return sizeof(*md);
1047}
1048
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001049static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1050 struct fl_flow_key *mask,
1051 struct netlink_ext_ack *extack)
1052{
1053 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
Jakub Kicinski63c82992018-11-09 21:06:26 -08001054 int err, option_len, key_depth, msk_depth = 0;
1055
Johannes Berg8cb08172019-04-26 14:07:28 +02001056 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1057 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1058 enc_opts_policy, extack);
Jakub Kicinski63c82992018-11-09 21:06:26 -08001059 if (err)
1060 return err;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001061
1062 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1063
1064 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
Johannes Berg8cb08172019-04-26 14:07:28 +02001065 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1066 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1067 enc_opts_policy, extack);
Jakub Kicinski63c82992018-11-09 21:06:26 -08001068 if (err)
1069 return err;
1070
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001071 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1072 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1073 }
1074
1075 nla_for_each_attr(nla_opt_key, nla_enc_key,
1076 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1077 switch (nla_type(nla_opt_key)) {
1078 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
Xin Longd8f9dfa2019-11-21 18:03:28 +08001079 if (key->enc_opts.dst_opt_type &&
1080 key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1081 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1082 return -EINVAL;
1083 }
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001084 option_len = 0;
1085 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1086 option_len = fl_set_geneve_opt(nla_opt_key, key,
1087 key_depth, option_len,
1088 extack);
1089 if (option_len < 0)
1090 return option_len;
1091
1092 key->enc_opts.len += option_len;
1093 /* At the same time we need to parse through the mask
1094 * in order to verify exact and mask attribute lengths.
1095 */
1096 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1097 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1098 msk_depth, option_len,
1099 extack);
1100 if (option_len < 0)
1101 return option_len;
1102
1103 mask->enc_opts.len += option_len;
1104 if (key->enc_opts.len != mask->enc_opts.len) {
1105 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1106 return -EINVAL;
1107 }
1108
1109 if (msk_depth)
1110 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1111 break;
Xin Longd8f9dfa2019-11-21 18:03:28 +08001112 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1113 if (key->enc_opts.dst_opt_type) {
1114 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1115 return -EINVAL;
1116 }
1117 option_len = 0;
1118 key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1119 option_len = fl_set_vxlan_opt(nla_opt_key, key,
1120 key_depth, option_len,
1121 extack);
1122 if (option_len < 0)
1123 return option_len;
1124
1125 key->enc_opts.len += option_len;
1126 /* At the same time we need to parse through the mask
1127 * in order to verify exact and mask attribute lengths.
1128 */
1129 mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1130 option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1131 msk_depth, option_len,
1132 extack);
1133 if (option_len < 0)
1134 return option_len;
1135
1136 mask->enc_opts.len += option_len;
1137 if (key->enc_opts.len != mask->enc_opts.len) {
1138 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1139 return -EINVAL;
1140 }
1141
1142 if (msk_depth)
1143 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1144 break;
Xin Long79b10112019-11-21 18:03:29 +08001145 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1146 if (key->enc_opts.dst_opt_type) {
1147 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1148 return -EINVAL;
1149 }
1150 option_len = 0;
1151 key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1152 option_len = fl_set_erspan_opt(nla_opt_key, key,
1153 key_depth, option_len,
1154 extack);
1155 if (option_len < 0)
1156 return option_len;
1157
1158 key->enc_opts.len += option_len;
1159 /* At the same time we need to parse through the mask
1160 * in order to verify exact and mask attribute lengths.
1161 */
1162 mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1163 option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1164 msk_depth, option_len,
1165 extack);
1166 if (option_len < 0)
1167 return option_len;
1168
1169 mask->enc_opts.len += option_len;
1170 if (key->enc_opts.len != mask->enc_opts.len) {
1171 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1172 return -EINVAL;
1173 }
1174
1175 if (msk_depth)
1176 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1177 break;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001178 default:
1179 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1180 return -EINVAL;
1181 }
1182 }
1183
1184 return 0;
1185}
1186
Paul Blakeye0ace682019-07-09 10:30:50 +03001187static int fl_set_key_ct(struct nlattr **tb,
1188 struct flow_dissector_key_ct *key,
1189 struct flow_dissector_key_ct *mask,
1190 struct netlink_ext_ack *extack)
1191{
1192 if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1193 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1194 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1195 return -EOPNOTSUPP;
1196 }
1197 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1198 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1199 sizeof(key->ct_state));
1200 }
1201 if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1202 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1203 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1204 return -EOPNOTSUPP;
1205 }
1206 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1207 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1208 sizeof(key->ct_zone));
1209 }
1210 if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1211 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1212 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1213 return -EOPNOTSUPP;
1214 }
1215 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1216 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1217 sizeof(key->ct_mark));
1218 }
1219 if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1220 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1221 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1222 return -EOPNOTSUPP;
1223 }
1224 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1225 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1226 sizeof(key->ct_labels));
1227 }
1228
1229 return 0;
1230}
1231
Jiri Pirko77b99002015-05-12 14:56:21 +02001232static int fl_set_key(struct net *net, struct nlattr **tb,
Alexander Aring1057c552018-01-18 11:20:54 -05001233 struct fl_flow_key *key, struct fl_flow_key *mask,
1234 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001235{
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001236 __be16 ethertype;
Or Gerlitzd9724772016-12-22 14:28:15 +02001237 int ret = 0;
Jiri Pirkoa5148622019-06-15 11:03:49 +02001238
Jiri Pirko77b99002015-05-12 14:56:21 +02001239 if (tb[TCA_FLOWER_INDEV]) {
Alexander Aring1057c552018-01-18 11:20:54 -05001240 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001241 if (err < 0)
1242 return err;
Jiri Pirko8212ed72019-06-19 09:41:03 +03001243 key->meta.ingress_ifindex = err;
1244 mask->meta.ingress_ifindex = 0xffffffff;
Jiri Pirko77b99002015-05-12 14:56:21 +02001245 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001246
1247 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1248 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1249 sizeof(key->eth.dst));
1250 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1251 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1252 sizeof(key->eth.src));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001253
Arnd Bergmann0b498a52016-08-26 17:25:45 +02001254 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001255 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1256
Jianbo Liuaaab0832018-07-06 05:38:13 +00001257 if (eth_type_vlan(ethertype)) {
Jianbo Liud64efd02018-07-06 05:38:16 +00001258 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1259 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1260 &mask->vlan);
1261
Jianbo Liu5e9a0fe2018-07-09 02:26:20 +00001262 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1263 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1264 if (eth_type_vlan(ethertype)) {
1265 fl_set_key_vlan(tb, ethertype,
1266 TCA_FLOWER_KEY_CVLAN_ID,
1267 TCA_FLOWER_KEY_CVLAN_PRIO,
1268 &key->cvlan, &mask->cvlan);
1269 fl_set_key_val(tb, &key->basic.n_proto,
1270 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1271 &mask->basic.n_proto,
1272 TCA_FLOWER_UNSPEC,
1273 sizeof(key->basic.n_proto));
1274 } else {
1275 key->basic.n_proto = ethertype;
1276 mask->basic.n_proto = cpu_to_be16(~0);
1277 }
Jianbo Liud64efd02018-07-06 05:38:16 +00001278 }
Arnd Bergmann0b498a52016-08-26 17:25:45 +02001279 } else {
1280 key->basic.n_proto = ethertype;
1281 mask->basic.n_proto = cpu_to_be16(~0);
1282 }
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001283 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001284
Jiri Pirko77b99002015-05-12 14:56:21 +02001285 if (key->basic.n_proto == htons(ETH_P_IP) ||
1286 key->basic.n_proto == htons(ETH_P_IPV6)) {
1287 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1288 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1289 sizeof(key->basic.ip_proto));
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001290 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
Jiri Pirko77b99002015-05-12 14:56:21 +02001291 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001292
1293 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1294 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001295 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001296 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1297 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1298 sizeof(key->ipv4.src));
1299 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1300 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1301 sizeof(key->ipv4.dst));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001302 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1303 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001304 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001305 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1306 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1307 sizeof(key->ipv6.src));
1308 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1309 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1310 sizeof(key->ipv6.dst));
1311 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001312
Jiri Pirko77b99002015-05-12 14:56:21 +02001313 if (key->basic.ip_proto == IPPROTO_TCP) {
1314 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001315 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001316 sizeof(key->tp.src));
1317 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001318 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001319 sizeof(key->tp.dst));
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02001320 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1321 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1322 sizeof(key->tcp.flags));
Jiri Pirko77b99002015-05-12 14:56:21 +02001323 } else if (key->basic.ip_proto == IPPROTO_UDP) {
1324 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001325 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001326 sizeof(key->tp.src));
1327 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001328 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001329 sizeof(key->tp.dst));
Simon Horman5976c5f2016-11-03 13:24:21 +01001330 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
1331 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1332 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1333 sizeof(key->tp.src));
1334 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1335 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1336 sizeof(key->tp.dst));
Simon Horman7b684882016-12-07 13:48:28 +01001337 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
1338 key->basic.ip_proto == IPPROTO_ICMP) {
1339 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1340 &mask->icmp.type,
1341 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1342 sizeof(key->icmp.type));
1343 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1344 &mask->icmp.code,
1345 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1346 sizeof(key->icmp.code));
1347 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1348 key->basic.ip_proto == IPPROTO_ICMPV6) {
1349 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1350 &mask->icmp.type,
1351 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1352 sizeof(key->icmp.type));
Simon Horman040587a2017-01-30 16:19:02 +01001353 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
Simon Horman7b684882016-12-07 13:48:28 +01001354 &mask->icmp.code,
Simon Horman040587a2017-01-30 16:19:02 +01001355 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
Simon Horman7b684882016-12-07 13:48:28 +01001356 sizeof(key->icmp.code));
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001357 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1358 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -04001359 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
1360 if (ret)
1361 return ret;
Simon Horman99d31322017-01-11 14:05:43 +01001362 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1363 key->basic.n_proto == htons(ETH_P_RARP)) {
1364 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1365 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1366 sizeof(key->arp.sip));
1367 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1368 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1369 sizeof(key->arp.tip));
1370 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1371 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1372 sizeof(key->arp.op));
1373 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1374 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1375 sizeof(key->arp.sha));
1376 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1377 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1378 sizeof(key->arp.tha));
Jiri Pirko77b99002015-05-12 14:56:21 +02001379 }
1380
Amritha Nambiar5c722992018-11-12 16:15:55 -08001381 if (key->basic.ip_proto == IPPROTO_TCP ||
1382 key->basic.ip_proto == IPPROTO_UDP ||
1383 key->basic.ip_proto == IPPROTO_SCTP) {
1384 ret = fl_set_key_port_range(tb, key, mask);
1385 if (ret)
1386 return ret;
1387 }
1388
Amir Vadaibc3103f2016-09-08 16:23:47 +03001389 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1390 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1391 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001392 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +03001393 fl_set_key_val(tb, &key->enc_ipv4.src,
1394 TCA_FLOWER_KEY_ENC_IPV4_SRC,
1395 &mask->enc_ipv4.src,
1396 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1397 sizeof(key->enc_ipv4.src));
1398 fl_set_key_val(tb, &key->enc_ipv4.dst,
1399 TCA_FLOWER_KEY_ENC_IPV4_DST,
1400 &mask->enc_ipv4.dst,
1401 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1402 sizeof(key->enc_ipv4.dst));
1403 }
1404
1405 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1406 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1407 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001408 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +03001409 fl_set_key_val(tb, &key->enc_ipv6.src,
1410 TCA_FLOWER_KEY_ENC_IPV6_SRC,
1411 &mask->enc_ipv6.src,
1412 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1413 sizeof(key->enc_ipv6.src));
1414 fl_set_key_val(tb, &key->enc_ipv6.dst,
1415 TCA_FLOWER_KEY_ENC_IPV6_DST,
1416 &mask->enc_ipv6.dst,
1417 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1418 sizeof(key->enc_ipv6.dst));
1419 }
1420
1421 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +03001422 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
Amir Vadaibc3103f2016-09-08 16:23:47 +03001423 sizeof(key->enc_key_id.keyid));
1424
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001425 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1426 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1427 sizeof(key->enc_tp.src));
1428
1429 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1430 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1431 sizeof(key->enc_tp.dst));
1432
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001433 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1434
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001435 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1436 ret = fl_set_enc_opt(tb, key, mask, extack);
1437 if (ret)
1438 return ret;
1439 }
1440
Paul Blakeye0ace682019-07-09 10:30:50 +03001441 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1442 if (ret)
1443 return ret;
1444
Or Gerlitzd9724772016-12-22 14:28:15 +02001445 if (tb[TCA_FLOWER_KEY_FLAGS])
1446 ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001447
Or Gerlitzd9724772016-12-22 14:28:15 +02001448 return ret;
Jiri Pirko77b99002015-05-12 14:56:21 +02001449}
1450
Paul Blakey05cd2712018-04-30 14:28:30 +03001451static void fl_mask_copy(struct fl_flow_mask *dst,
1452 struct fl_flow_mask *src)
Jiri Pirko77b99002015-05-12 14:56:21 +02001453{
Paul Blakey05cd2712018-04-30 14:28:30 +03001454 const void *psrc = fl_key_get_start(&src->key, src);
1455 void *pdst = fl_key_get_start(&dst->key, src);
Jiri Pirko77b99002015-05-12 14:56:21 +02001456
Paul Blakey05cd2712018-04-30 14:28:30 +03001457 memcpy(pdst, psrc, fl_mask_range(src));
1458 dst->range = src->range;
Jiri Pirko77b99002015-05-12 14:56:21 +02001459}
1460
1461static const struct rhashtable_params fl_ht_params = {
1462 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1463 .head_offset = offsetof(struct cls_fl_filter, ht_node),
1464 .automatic_shrinking = true,
1465};
1466
Paul Blakey05cd2712018-04-30 14:28:30 +03001467static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02001468{
Paul Blakey05cd2712018-04-30 14:28:30 +03001469 mask->filter_ht_params = fl_ht_params;
1470 mask->filter_ht_params.key_len = fl_mask_range(mask);
1471 mask->filter_ht_params.key_offset += mask->range.start;
Jiri Pirko77b99002015-05-12 14:56:21 +02001472
Paul Blakey05cd2712018-04-30 14:28:30 +03001473 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
Jiri Pirko77b99002015-05-12 14:56:21 +02001474}
1475
1476#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
zhong jiangcb205a82018-09-19 19:32:11 +08001477#define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
Jiri Pirko77b99002015-05-12 14:56:21 +02001478
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001479#define FL_KEY_IS_MASKED(mask, member) \
1480 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1481 0, FL_KEY_MEMBER_SIZE(member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001482
1483#define FL_KEY_SET(keys, cnt, id, member) \
1484 do { \
1485 keys[cnt].key_id = id; \
1486 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1487 cnt++; \
1488 } while(0);
1489
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001490#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001491 do { \
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001492 if (FL_KEY_IS_MASKED(mask, member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001493 FL_KEY_SET(keys, cnt, id, member); \
1494 } while(0);
1495
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001496static void fl_init_dissector(struct flow_dissector *dissector,
1497 struct fl_flow_key *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02001498{
1499 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1500 size_t cnt = 0;
1501
Jiri Pirko8212ed72019-06-19 09:41:03 +03001502 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1503 FLOW_DISSECTOR_KEY_META, meta);
Tom Herbert42aecaa2015-06-04 09:16:39 -07001504 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
Jiri Pirko77b99002015-05-12 14:56:21 +02001505 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001506 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001507 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001508 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001509 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001510 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001511 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
Amritha Nambiar5c722992018-11-12 16:15:55 -08001512 if (FL_KEY_IS_MASKED(mask, tp) ||
1513 FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max))
1514 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001515 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001516 FLOW_DISSECTOR_KEY_IP, ip);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001517 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02001518 FLOW_DISSECTOR_KEY_TCP, tcp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001519 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Simon Horman7b684882016-12-07 13:48:28 +01001520 FLOW_DISSECTOR_KEY_ICMP, icmp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001521 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Simon Horman99d31322017-01-11 14:05:43 +01001522 FLOW_DISSECTOR_KEY_ARP, arp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001523 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001524 FLOW_DISSECTOR_KEY_MPLS, mpls);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001525 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001526 FLOW_DISSECTOR_KEY_VLAN, vlan);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001527 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Jianbo Liud64efd02018-07-06 05:38:16 +00001528 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001529 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001530 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001531 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001532 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001533 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001534 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001535 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1536 FL_KEY_IS_MASKED(mask, enc_ipv6))
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001537 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1538 enc_control);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001539 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001540 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001541 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001542 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001543 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1544 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
Paul Blakeye0ace682019-07-09 10:30:50 +03001545 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1546 FLOW_DISSECTOR_KEY_CT, ct);
Jiri Pirko77b99002015-05-12 14:56:21 +02001547
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001548 skb_flow_dissector_init(dissector, keys, cnt);
Paul Blakey05cd2712018-04-30 14:28:30 +03001549}
1550
1551static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1552 struct fl_flow_mask *mask)
1553{
1554 struct fl_flow_mask *newmask;
1555 int err;
1556
1557 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1558 if (!newmask)
1559 return ERR_PTR(-ENOMEM);
1560
1561 fl_mask_copy(newmask, mask);
1562
Amritha Nambiar5c722992018-11-12 16:15:55 -08001563 if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) ||
1564 (newmask->key.tp_min.src && newmask->key.tp_max.src))
1565 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1566
Paul Blakey05cd2712018-04-30 14:28:30 +03001567 err = fl_init_mask_hashtable(newmask);
1568 if (err)
1569 goto errout_free;
1570
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001571 fl_init_dissector(&newmask->dissector, &newmask->key);
Paul Blakey05cd2712018-04-30 14:28:30 +03001572
1573 INIT_LIST_HEAD_RCU(&newmask->filters);
1574
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001575 refcount_set(&newmask->refcnt, 1);
Vlad Buslov195c2342019-03-21 15:17:38 +02001576 err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1577 &newmask->ht_node, mask_ht_params);
Paul Blakey05cd2712018-04-30 14:28:30 +03001578 if (err)
1579 goto errout_destroy;
1580
Vlad Buslov259e60f2019-03-21 15:17:39 +02001581 spin_lock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +03001582 list_add_tail_rcu(&newmask->list, &head->masks);
Vlad Buslov259e60f2019-03-21 15:17:39 +02001583 spin_unlock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +03001584
1585 return newmask;
1586
1587errout_destroy:
1588 rhashtable_destroy(&newmask->ht);
1589errout_free:
1590 kfree(newmask);
1591
1592 return ERR_PTR(err);
Jiri Pirko77b99002015-05-12 14:56:21 +02001593}
1594
1595static int fl_check_assign_mask(struct cls_fl_head *head,
Paul Blakey05cd2712018-04-30 14:28:30 +03001596 struct cls_fl_filter *fnew,
1597 struct cls_fl_filter *fold,
Jiri Pirko77b99002015-05-12 14:56:21 +02001598 struct fl_flow_mask *mask)
1599{
Paul Blakey05cd2712018-04-30 14:28:30 +03001600 struct fl_flow_mask *newmask;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001601 int ret = 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001602
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001603 rcu_read_lock();
Vlad Buslov195c2342019-03-21 15:17:38 +02001604
1605 /* Insert mask as temporary node to prevent concurrent creation of mask
1606 * with same key. Any concurrent lookups with same key will return
Vlad Buslov99815f52019-06-13 17:54:04 +03001607 * -EAGAIN because mask's refcnt is zero.
Vlad Buslov195c2342019-03-21 15:17:38 +02001608 */
1609 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1610 &mask->ht_node,
1611 mask_ht_params);
Paul Blakey05cd2712018-04-30 14:28:30 +03001612 if (!fnew->mask) {
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001613 rcu_read_unlock();
1614
Vlad Buslov195c2342019-03-21 15:17:38 +02001615 if (fold) {
1616 ret = -EINVAL;
1617 goto errout_cleanup;
1618 }
Paul Blakey05cd2712018-04-30 14:28:30 +03001619
1620 newmask = fl_create_new_mask(head, mask);
Vlad Buslov195c2342019-03-21 15:17:38 +02001621 if (IS_ERR(newmask)) {
1622 ret = PTR_ERR(newmask);
1623 goto errout_cleanup;
1624 }
Paul Blakey05cd2712018-04-30 14:28:30 +03001625
1626 fnew->mask = newmask;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001627 return 0;
Vlad Buslov195c2342019-03-21 15:17:38 +02001628 } else if (IS_ERR(fnew->mask)) {
1629 ret = PTR_ERR(fnew->mask);
Paul Blakeyf6521c52018-06-03 10:06:14 +03001630 } else if (fold && fold->mask != fnew->mask) {
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001631 ret = -EINVAL;
1632 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1633 /* Mask was deleted concurrently, try again */
1634 ret = -EAGAIN;
Jiri Pirko77b99002015-05-12 14:56:21 +02001635 }
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001636 rcu_read_unlock();
1637 return ret;
Vlad Buslov195c2342019-03-21 15:17:38 +02001638
1639errout_cleanup:
1640 rhashtable_remove_fast(&head->ht, &mask->ht_node,
1641 mask_ht_params);
Vlad Buslov195c2342019-03-21 15:17:38 +02001642 return ret;
Jiri Pirko77b99002015-05-12 14:56:21 +02001643}
1644
1645static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1646 struct cls_fl_filter *f, struct fl_flow_mask *mask,
1647 unsigned long base, struct nlattr **tb,
Alexander Aring50a56192018-01-18 11:20:52 -05001648 struct nlattr *est, bool ovr,
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001649 struct fl_flow_tmplt *tmplt, bool rtnl_held,
Alexander Aring50a56192018-01-18 11:20:52 -05001650 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001651{
Jiri Pirko77b99002015-05-12 14:56:21 +02001652 int err;
1653
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001654 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
Vlad Buslovec6743a2019-02-11 10:55:43 +02001655 extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001656 if (err < 0)
1657 return err;
1658
1659 if (tb[TCA_FLOWER_CLASSID]) {
1660 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001661 if (!rtnl_held)
1662 rtnl_lock();
Jiri Pirko77b99002015-05-12 14:56:21 +02001663 tcf_bind_filter(tp, &f->res, base);
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001664 if (!rtnl_held)
1665 rtnl_unlock();
Jiri Pirko77b99002015-05-12 14:56:21 +02001666 }
1667
Alexander Aring1057c552018-01-18 11:20:54 -05001668 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001669 if (err)
Jiri Pirko45507522017-08-04 14:29:06 +02001670 return err;
Jiri Pirko77b99002015-05-12 14:56:21 +02001671
1672 fl_mask_update_range(mask);
1673 fl_set_masked_key(&f->mkey, &f->key, mask);
1674
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001675 if (!fl_mask_fits_tmplt(tmplt, mask)) {
1676 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1677 return -EINVAL;
1678 }
1679
Jiri Pirko77b99002015-05-12 14:56:21 +02001680 return 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001681}
1682
Vlad Buslov1f17f772019-04-05 20:56:26 +03001683static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1684 struct cls_fl_filter *fold,
1685 bool *in_ht)
1686{
1687 struct fl_flow_mask *mask = fnew->mask;
1688 int err;
1689
Vlad Buslov9e355522019-04-11 19:12:20 +03001690 err = rhashtable_lookup_insert_fast(&mask->ht,
1691 &fnew->ht_node,
1692 mask->filter_ht_params);
Vlad Buslov1f17f772019-04-05 20:56:26 +03001693 if (err) {
1694 *in_ht = false;
1695 /* It is okay if filter with same key exists when
1696 * overwriting.
1697 */
1698 return fold && err == -EEXIST ? 0 : err;
1699 }
1700
1701 *in_ht = true;
1702 return 0;
1703}
1704
Jiri Pirko77b99002015-05-12 14:56:21 +02001705static int fl_change(struct net *net, struct sk_buff *in_skb,
1706 struct tcf_proto *tp, unsigned long base,
1707 u32 handle, struct nlattr **tca,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001708 void **arg, bool ovr, bool rtnl_held,
1709 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001710{
Vlad Buslove4746192019-03-21 15:17:33 +02001711 struct cls_fl_head *head = fl_head_dereference(tp);
WANG Cong8113c092017-08-04 21:31:43 -07001712 struct cls_fl_filter *fold = *arg;
Jiri Pirko77b99002015-05-12 14:56:21 +02001713 struct cls_fl_filter *fnew;
Ivan Vecera2cddd202019-01-16 16:53:52 +01001714 struct fl_flow_mask *mask;
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001715 struct nlattr **tb;
Vlad Buslov1f17f772019-04-05 20:56:26 +03001716 bool in_ht;
Jiri Pirko77b99002015-05-12 14:56:21 +02001717 int err;
1718
Vlad Buslov06177552019-03-21 15:17:35 +02001719 if (!tca[TCA_OPTIONS]) {
1720 err = -EINVAL;
1721 goto errout_fold;
1722 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001723
Ivan Vecera2cddd202019-01-16 16:53:52 +01001724 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
Vlad Buslov06177552019-03-21 15:17:35 +02001725 if (!mask) {
1726 err = -ENOBUFS;
1727 goto errout_fold;
1728 }
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001729
Ivan Vecera2cddd202019-01-16 16:53:52 +01001730 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1731 if (!tb) {
1732 err = -ENOBUFS;
1733 goto errout_mask_alloc;
1734 }
1735
Johannes Berg8cb08172019-04-26 14:07:28 +02001736 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1737 tca[TCA_OPTIONS], fl_policy, NULL);
Jiri Pirko77b99002015-05-12 14:56:21 +02001738 if (err < 0)
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001739 goto errout_tb;
Jiri Pirko77b99002015-05-12 14:56:21 +02001740
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001741 if (fold && handle && fold->handle != handle) {
1742 err = -EINVAL;
1743 goto errout_tb;
1744 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001745
1746 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001747 if (!fnew) {
1748 err = -ENOBUFS;
1749 goto errout_tb;
1750 }
Vlad Buslovc049d562019-04-24 09:53:31 +03001751 INIT_LIST_HEAD(&fnew->hw_list);
Vlad Buslov06177552019-03-21 15:17:35 +02001752 refcount_set(&fnew->refcnt, 1);
Jiri Pirko77b99002015-05-12 14:56:21 +02001753
Cong Wang14215102019-02-20 21:37:42 -08001754 err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
WANG Congb9a24bb2016-08-19 12:36:54 -07001755 if (err < 0)
1756 goto errout;
Jiri Pirko77b99002015-05-12 14:56:21 +02001757
Vlad Buslovecb3dea2019-03-06 16:22:12 +02001758 if (tb[TCA_FLOWER_FLAGS]) {
1759 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1760
1761 if (!tc_flags_valid(fnew->flags)) {
1762 err = -EINVAL;
1763 goto errout;
1764 }
1765 }
1766
1767 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001768 tp->chain->tmplt_priv, rtnl_held, extack);
Vlad Buslovecb3dea2019-03-06 16:22:12 +02001769 if (err)
1770 goto errout;
1771
1772 err = fl_check_assign_mask(head, fnew, fold, mask);
1773 if (err)
1774 goto errout;
1775
Vlad Buslov1f17f772019-04-05 20:56:26 +03001776 err = fl_ht_insert_unique(fnew, fold, &in_ht);
1777 if (err)
1778 goto errout_mask;
1779
Hadar Hen Zion79685212016-12-01 14:06:34 +02001780 if (!tc_skip_hw(fnew->flags)) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001781 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
Hadar Hen Zion79685212016-12-01 14:06:34 +02001782 if (err)
Vlad Buslov1f17f772019-04-05 20:56:26 +03001783 goto errout_ht;
Hadar Hen Zion79685212016-12-01 14:06:34 +02001784 }
Amir Vadai5b33f482016-03-08 12:42:29 +02001785
Or Gerlitz55593962017-02-16 10:31:13 +02001786 if (!tc_in_hw(fnew->flags))
1787 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1788
Vlad Buslov3d81e712019-03-21 15:17:42 +02001789 spin_lock(&tp->lock);
1790
Vlad Buslov272ffaa2019-03-21 15:17:41 +02001791 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1792 * proto again or create new one, if necessary.
1793 */
1794 if (tp->deleting) {
1795 err = -EAGAIN;
1796 goto errout_hw;
1797 }
1798
Amir Vadai5b33f482016-03-08 12:42:29 +02001799 if (fold) {
Vlad Buslovb2552b82019-03-21 15:17:36 +02001800 /* Fold filter was deleted concurrently. Retry lookup. */
1801 if (fold->deleted) {
1802 err = -EAGAIN;
1803 goto errout_hw;
1804 }
1805
Vlad Buslov620da482019-03-21 15:17:34 +02001806 fnew->handle = handle;
1807
Vlad Buslov1f17f772019-04-05 20:56:26 +03001808 if (!in_ht) {
1809 struct rhashtable_params params =
1810 fnew->mask->filter_ht_params;
1811
1812 err = rhashtable_insert_fast(&fnew->mask->ht,
1813 &fnew->ht_node,
1814 params);
1815 if (err)
1816 goto errout_hw;
1817 in_ht = true;
1818 }
Vlad Buslov620da482019-03-21 15:17:34 +02001819
Vlad Buslovc049d562019-04-24 09:53:31 +03001820 refcount_inc(&fnew->refcnt);
Roi Dayan599d2572018-12-19 18:07:56 +02001821 rhashtable_remove_fast(&fold->mask->ht,
1822 &fold->ht_node,
1823 fold->mask->filter_ht_params);
Matthew Wilcox234a4622017-11-28 09:56:36 -05001824 idr_replace(&head->handle_idr, fnew, fnew->handle);
Daniel Borkmannff3532f2015-07-17 22:38:44 +02001825 list_replace_rcu(&fold->list, &fnew->list);
Vlad Buslovb2552b82019-03-21 15:17:36 +02001826 fold->deleted = true;
Vlad Buslov620da482019-03-21 15:17:34 +02001827
Vlad Buslov3d81e712019-03-21 15:17:42 +02001828 spin_unlock(&tp->lock);
1829
Vlad Buslov99946772019-04-12 00:54:19 +03001830 fl_mask_put(head, fold->mask);
Vlad Buslov620da482019-03-21 15:17:34 +02001831 if (!tc_skip_hw(fold->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001832 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
Jiri Pirko77b99002015-05-12 14:56:21 +02001833 tcf_unbind_filter(tp, &fold->res);
Vlad Buslov06177552019-03-21 15:17:35 +02001834 /* Caller holds reference to fold, so refcnt is always > 0
1835 * after this.
1836 */
1837 refcount_dec(&fold->refcnt);
1838 __fl_put(fold);
Jiri Pirko77b99002015-05-12 14:56:21 +02001839 } else {
Vlad Buslov620da482019-03-21 15:17:34 +02001840 if (handle) {
1841 /* user specifies a handle and it doesn't exist */
1842 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1843 handle, GFP_ATOMIC);
Vlad Buslov9a2d9382019-03-21 15:17:40 +02001844
1845 /* Filter with specified handle was concurrently
1846 * inserted after initial check in cls_api. This is not
1847 * necessarily an error if NLM_F_EXCL is not set in
1848 * message flags. Returning EAGAIN will cause cls_api to
1849 * try to update concurrently inserted rule.
1850 */
1851 if (err == -ENOSPC)
1852 err = -EAGAIN;
Vlad Buslov620da482019-03-21 15:17:34 +02001853 } else {
1854 handle = 1;
1855 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1856 INT_MAX, GFP_ATOMIC);
1857 }
1858 if (err)
1859 goto errout_hw;
1860
Vlad Buslovc049d562019-04-24 09:53:31 +03001861 refcount_inc(&fnew->refcnt);
Vlad Buslov620da482019-03-21 15:17:34 +02001862 fnew->handle = handle;
Paul Blakey05cd2712018-04-30 14:28:30 +03001863 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
Vlad Buslov3d81e712019-03-21 15:17:42 +02001864 spin_unlock(&tp->lock);
Jiri Pirko77b99002015-05-12 14:56:21 +02001865 }
1866
Vlad Buslov620da482019-03-21 15:17:34 +02001867 *arg = fnew;
1868
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001869 kfree(tb);
Vlad Buslov99815f52019-06-13 17:54:04 +03001870 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
Jiri Pirko77b99002015-05-12 14:56:21 +02001871 return 0;
1872
Vlad Buslovc049d562019-04-24 09:53:31 +03001873errout_ht:
1874 spin_lock(&tp->lock);
Vlad Buslov620da482019-03-21 15:17:34 +02001875errout_hw:
Vlad Buslovc049d562019-04-24 09:53:31 +03001876 fnew->deleted = true;
Vlad Buslov3d81e712019-03-21 15:17:42 +02001877 spin_unlock(&tp->lock);
Vlad Buslov620da482019-03-21 15:17:34 +02001878 if (!tc_skip_hw(fnew->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001879 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
Vlad Buslov1f17f772019-04-05 20:56:26 +03001880 if (in_ht)
1881 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1882 fnew->mask->filter_ht_params);
Vlad Buslovecb3dea2019-03-06 16:22:12 +02001883errout_mask:
Vlad Buslov99946772019-04-12 00:54:19 +03001884 fl_mask_put(head, fnew->mask);
Jiri Pirko77b99002015-05-12 14:56:21 +02001885errout:
Vlad Buslovc049d562019-04-24 09:53:31 +03001886 __fl_put(fnew);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001887errout_tb:
1888 kfree(tb);
Ivan Vecera2cddd202019-01-16 16:53:52 +01001889errout_mask_alloc:
Vlad Buslov99815f52019-06-13 17:54:04 +03001890 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
Vlad Buslov06177552019-03-21 15:17:35 +02001891errout_fold:
1892 if (fold)
1893 __fl_put(fold);
Jiri Pirko77b99002015-05-12 14:56:21 +02001894 return err;
1895}
1896
Alexander Aring571acf22018-01-18 11:20:53 -05001897static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001898 bool rtnl_held, struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001899{
Vlad Buslove4746192019-03-21 15:17:33 +02001900 struct cls_fl_head *head = fl_head_dereference(tp);
WANG Cong8113c092017-08-04 21:31:43 -07001901 struct cls_fl_filter *f = arg;
Vlad Buslovb2552b82019-03-21 15:17:36 +02001902 bool last_on_mask;
1903 int err = 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001904
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001905 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
Paul Blakey05cd2712018-04-30 14:28:30 +03001906 *last = list_empty(&head->masks);
Vlad Buslov06177552019-03-21 15:17:35 +02001907 __fl_put(f);
1908
Vlad Buslovb2552b82019-03-21 15:17:36 +02001909 return err;
Jiri Pirko77b99002015-05-12 14:56:21 +02001910}
1911
Vlad Buslov12db03b2019-02-11 10:55:45 +02001912static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1913 bool rtnl_held)
Jiri Pirko77b99002015-05-12 14:56:21 +02001914{
Cong Wangd39d7142019-06-28 11:03:42 -07001915 struct cls_fl_head *head = fl_head_dereference(tp);
1916 unsigned long id = arg->cookie, tmp;
Jiri Pirko77b99002015-05-12 14:56:21 +02001917 struct cls_fl_filter *f;
1918
Vlad Buslov01683a12018-07-09 13:29:11 +03001919 arg->count = arg->skip;
1920
Cong Wangd39d7142019-06-28 11:03:42 -07001921 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
1922 /* don't return filters that are being deleted */
1923 if (!refcount_inc_not_zero(&f->refcnt))
1924 continue;
Vlad Buslov01683a12018-07-09 13:29:11 +03001925 if (arg->fn(tp, f, arg) < 0) {
Vlad Buslov06177552019-03-21 15:17:35 +02001926 __fl_put(f);
Vlad Buslov01683a12018-07-09 13:29:11 +03001927 arg->stop = 1;
1928 break;
Paul Blakey05cd2712018-04-30 14:28:30 +03001929 }
Vlad Buslov06177552019-03-21 15:17:35 +02001930 __fl_put(f);
Vlad Buslov01683a12018-07-09 13:29:11 +03001931 arg->count++;
Jiri Pirko77b99002015-05-12 14:56:21 +02001932 }
Cong Wangd39d7142019-06-28 11:03:42 -07001933 arg->cookie = id;
Jiri Pirko77b99002015-05-12 14:56:21 +02001934}
1935
Vlad Buslovc049d562019-04-24 09:53:31 +03001936static struct cls_fl_filter *
1937fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
1938{
1939 struct cls_fl_head *head = fl_head_dereference(tp);
1940
1941 spin_lock(&tp->lock);
1942 if (list_empty(&head->hw_filters)) {
1943 spin_unlock(&tp->lock);
1944 return NULL;
1945 }
1946
1947 if (!f)
1948 f = list_entry(&head->hw_filters, struct cls_fl_filter,
1949 hw_list);
1950 list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
1951 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
1952 spin_unlock(&tp->lock);
1953 return f;
1954 }
1955 }
1956
1957 spin_unlock(&tp->lock);
1958 return NULL;
1959}
1960
Pablo Neira Ayusoa7323312019-07-19 18:20:15 +02001961static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
John Hurley31533cb2018-06-25 14:30:06 -07001962 void *cb_priv, struct netlink_ext_ack *extack)
1963{
John Hurley31533cb2018-06-25 14:30:06 -07001964 struct tcf_block *block = tp->chain->block;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02001965 struct flow_cls_offload cls_flower = {};
Vlad Buslovc049d562019-04-24 09:53:31 +03001966 struct cls_fl_filter *f = NULL;
John Hurley31533cb2018-06-25 14:30:06 -07001967 int err;
1968
Vlad Buslovc049d562019-04-24 09:53:31 +03001969 /* hw_filters list can only be changed by hw offload functions after
1970 * obtaining rtnl lock. Make sure it is not changed while reoffload is
1971 * iterating it.
1972 */
1973 ASSERT_RTNL();
John Hurley31533cb2018-06-25 14:30:06 -07001974
Vlad Buslovc049d562019-04-24 09:53:31 +03001975 while ((f = fl_get_next_hw_filter(tp, f, add))) {
John Hurley95e27a42019-04-02 23:53:20 +01001976 cls_flower.rule =
1977 flow_rule_alloc(tcf_exts_num_actions(&f->exts));
1978 if (!cls_flower.rule) {
1979 __fl_put(f);
1980 return -ENOMEM;
John Hurley31533cb2018-06-25 14:30:06 -07001981 }
John Hurley95e27a42019-04-02 23:53:20 +01001982
1983 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -07001984 extack);
John Hurley95e27a42019-04-02 23:53:20 +01001985 cls_flower.command = add ?
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02001986 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
John Hurley95e27a42019-04-02 23:53:20 +01001987 cls_flower.cookie = (unsigned long)f;
1988 cls_flower.rule->match.dissector = &f->mask->dissector;
1989 cls_flower.rule->match.mask = &f->mask->key;
1990 cls_flower.rule->match.key = &f->mkey;
1991
Vlad Buslov9838b202019-08-26 16:45:03 +03001992 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts,
1993 true);
John Hurley95e27a42019-04-02 23:53:20 +01001994 if (err) {
1995 kfree(cls_flower.rule);
1996 if (tc_skip_sw(f->flags)) {
1997 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
1998 __fl_put(f);
1999 return err;
2000 }
2001 goto next_flow;
2002 }
2003
2004 cls_flower.classid = f->res.classid;
2005
Vlad Buslov40119212019-08-26 16:44:59 +03002006 err = tc_setup_cb_reoffload(block, tp, add, cb,
2007 TC_SETUP_CLSFLOWER, &cls_flower,
2008 cb_priv, &f->flags,
2009 &f->in_hw_count);
Vlad Buslov5a6ff4b2019-08-26 16:45:04 +03002010 tc_cleanup_flow_action(&cls_flower.rule->action);
John Hurley95e27a42019-04-02 23:53:20 +01002011 kfree(cls_flower.rule);
2012
2013 if (err) {
Vlad Buslov40119212019-08-26 16:44:59 +03002014 __fl_put(f);
2015 return err;
John Hurley95e27a42019-04-02 23:53:20 +01002016 }
John Hurley95e27a42019-04-02 23:53:20 +01002017next_flow:
John Hurley95e27a42019-04-02 23:53:20 +01002018 __fl_put(f);
John Hurley31533cb2018-06-25 14:30:06 -07002019 }
2020
2021 return 0;
2022}
2023
Vlad Buslova449a3e2019-08-26 16:45:00 +03002024static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2025{
2026 struct flow_cls_offload *cls_flower = type_data;
2027 struct cls_fl_filter *f =
2028 (struct cls_fl_filter *) cls_flower->cookie;
2029 struct cls_fl_head *head = fl_head_dereference(tp);
2030
2031 spin_lock(&tp->lock);
2032 list_add(&f->hw_list, &head->hw_filters);
2033 spin_unlock(&tp->lock);
2034}
2035
2036static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2037{
2038 struct flow_cls_offload *cls_flower = type_data;
2039 struct cls_fl_filter *f =
2040 (struct cls_fl_filter *) cls_flower->cookie;
2041
2042 spin_lock(&tp->lock);
2043 if (!list_empty(&f->hw_list))
2044 list_del_init(&f->hw_list);
2045 spin_unlock(&tp->lock);
2046}
2047
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002048static int fl_hw_create_tmplt(struct tcf_chain *chain,
2049 struct fl_flow_tmplt *tmplt)
Jiri Pirko34738452018-07-23 09:23:11 +02002050{
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02002051 struct flow_cls_offload cls_flower = {};
Jiri Pirko34738452018-07-23 09:23:11 +02002052 struct tcf_block *block = chain->block;
Jiri Pirko34738452018-07-23 09:23:11 +02002053
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +01002054 cls_flower.rule = flow_rule_alloc(0);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002055 if (!cls_flower.rule)
2056 return -ENOMEM;
2057
Jiri Pirko34738452018-07-23 09:23:11 +02002058 cls_flower.common.chain_index = chain->index;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02002059 cls_flower.command = FLOW_CLS_TMPLT_CREATE;
Jiri Pirko34738452018-07-23 09:23:11 +02002060 cls_flower.cookie = (unsigned long) tmplt;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002061 cls_flower.rule->match.dissector = &tmplt->dissector;
2062 cls_flower.rule->match.mask = &tmplt->mask;
2063 cls_flower.rule->match.key = &tmplt->dummy_key;
Jiri Pirko34738452018-07-23 09:23:11 +02002064
2065 /* We don't care if driver (any of them) fails to handle this
2066 * call. It serves just as a hint for it.
2067 */
Vlad Buslov40119212019-08-26 16:44:59 +03002068 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002069 kfree(cls_flower.rule);
2070
2071 return 0;
Jiri Pirko34738452018-07-23 09:23:11 +02002072}
2073
2074static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2075 struct fl_flow_tmplt *tmplt)
2076{
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02002077 struct flow_cls_offload cls_flower = {};
Jiri Pirko34738452018-07-23 09:23:11 +02002078 struct tcf_block *block = chain->block;
2079
2080 cls_flower.common.chain_index = chain->index;
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +02002081 cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
Jiri Pirko34738452018-07-23 09:23:11 +02002082 cls_flower.cookie = (unsigned long) tmplt;
2083
Vlad Buslov40119212019-08-26 16:44:59 +03002084 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
Jiri Pirko34738452018-07-23 09:23:11 +02002085}
2086
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002087static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2088 struct nlattr **tca,
2089 struct netlink_ext_ack *extack)
2090{
2091 struct fl_flow_tmplt *tmplt;
2092 struct nlattr **tb;
2093 int err;
2094
2095 if (!tca[TCA_OPTIONS])
2096 return ERR_PTR(-EINVAL);
2097
2098 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2099 if (!tb)
2100 return ERR_PTR(-ENOBUFS);
Johannes Berg8cb08172019-04-26 14:07:28 +02002101 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2102 tca[TCA_OPTIONS], fl_policy, NULL);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002103 if (err)
2104 goto errout_tb;
2105
2106 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
Dan Carpenter1cbc36a52018-08-03 22:27:55 +03002107 if (!tmplt) {
2108 err = -ENOMEM;
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002109 goto errout_tb;
Dan Carpenter1cbc36a52018-08-03 22:27:55 +03002110 }
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002111 tmplt->chain = chain;
2112 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2113 if (err)
2114 goto errout_tmplt;
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002115
2116 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2117
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002118 err = fl_hw_create_tmplt(chain, tmplt);
2119 if (err)
2120 goto errout_tmplt;
Jiri Pirko34738452018-07-23 09:23:11 +02002121
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01002122 kfree(tb);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002123 return tmplt;
2124
2125errout_tmplt:
2126 kfree(tmplt);
2127errout_tb:
2128 kfree(tb);
2129 return ERR_PTR(err);
2130}
2131
2132static void fl_tmplt_destroy(void *tmplt_priv)
2133{
2134 struct fl_flow_tmplt *tmplt = tmplt_priv;
2135
Cong Wang95278dd2018-10-02 12:50:19 -07002136 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2137 kfree(tmplt);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002138}
2139
Jiri Pirko77b99002015-05-12 14:56:21 +02002140static int fl_dump_key_val(struct sk_buff *skb,
2141 void *val, int val_type,
2142 void *mask, int mask_type, int len)
2143{
2144 int err;
2145
2146 if (!memchr_inv(mask, 0, len))
2147 return 0;
2148 err = nla_put(skb, val_type, len, val);
2149 if (err)
2150 return err;
2151 if (mask_type != TCA_FLOWER_UNSPEC) {
2152 err = nla_put(skb, mask_type, len, mask);
2153 if (err)
2154 return err;
2155 }
2156 return 0;
2157}
2158
Amritha Nambiar5c722992018-11-12 16:15:55 -08002159static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2160 struct fl_flow_key *mask)
2161{
2162 if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN,
2163 &mask->tp_min.dst, TCA_FLOWER_UNSPEC,
2164 sizeof(key->tp_min.dst)) ||
2165 fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX,
2166 &mask->tp_max.dst, TCA_FLOWER_UNSPEC,
2167 sizeof(key->tp_max.dst)) ||
2168 fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN,
2169 &mask->tp_min.src, TCA_FLOWER_UNSPEC,
2170 sizeof(key->tp_min.src)) ||
2171 fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX,
2172 &mask->tp_max.src, TCA_FLOWER_UNSPEC,
2173 sizeof(key->tp_max.src)))
2174 return -1;
2175
2176 return 0;
2177}
2178
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002179static int fl_dump_key_mpls(struct sk_buff *skb,
2180 struct flow_dissector_key_mpls *mpls_key,
2181 struct flow_dissector_key_mpls *mpls_mask)
2182{
2183 int err;
2184
2185 if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
2186 return 0;
2187 if (mpls_mask->mpls_ttl) {
2188 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2189 mpls_key->mpls_ttl);
2190 if (err)
2191 return err;
2192 }
2193 if (mpls_mask->mpls_tc) {
2194 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2195 mpls_key->mpls_tc);
2196 if (err)
2197 return err;
2198 }
2199 if (mpls_mask->mpls_label) {
2200 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2201 mpls_key->mpls_label);
2202 if (err)
2203 return err;
2204 }
2205 if (mpls_mask->mpls_bos) {
2206 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2207 mpls_key->mpls_bos);
2208 if (err)
2209 return err;
2210 }
2211 return 0;
2212}
2213
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002214static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002215 struct flow_dissector_key_ip *key,
2216 struct flow_dissector_key_ip *mask)
2217{
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002218 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2219 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2220 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2221 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2222
2223 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2224 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002225 return -1;
2226
2227 return 0;
2228}
2229
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002230static int fl_dump_key_vlan(struct sk_buff *skb,
Jianbo Liud64efd02018-07-06 05:38:16 +00002231 int vlan_id_key, int vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002232 struct flow_dissector_key_vlan *vlan_key,
2233 struct flow_dissector_key_vlan *vlan_mask)
2234{
2235 int err;
2236
2237 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2238 return 0;
2239 if (vlan_mask->vlan_id) {
Jianbo Liud64efd02018-07-06 05:38:16 +00002240 err = nla_put_u16(skb, vlan_id_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002241 vlan_key->vlan_id);
2242 if (err)
2243 return err;
2244 }
2245 if (vlan_mask->vlan_priority) {
Jianbo Liud64efd02018-07-06 05:38:16 +00002246 err = nla_put_u8(skb, vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002247 vlan_key->vlan_priority);
2248 if (err)
2249 return err;
2250 }
2251 return 0;
2252}
2253
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002254static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2255 u32 *flower_key, u32 *flower_mask,
2256 u32 flower_flag_bit, u32 dissector_flag_bit)
2257{
2258 if (dissector_mask & dissector_flag_bit) {
2259 *flower_mask |= flower_flag_bit;
2260 if (dissector_key & dissector_flag_bit)
2261 *flower_key |= flower_flag_bit;
2262 }
2263}
2264
2265static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2266{
2267 u32 key, mask;
2268 __be32 _key, _mask;
2269 int err;
2270
2271 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2272 return 0;
2273
2274 key = 0;
2275 mask = 0;
2276
2277 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2278 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Pieter Jansen van Vuuren459d1532018-03-06 18:11:14 +01002279 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2280 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2281 FLOW_DIS_FIRST_FRAG);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002282
2283 _key = cpu_to_be32(key);
2284 _mask = cpu_to_be32(mask);
2285
2286 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2287 if (err)
2288 return err;
2289
2290 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2291}
2292
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002293static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2294 struct flow_dissector_key_enc_opts *enc_opts)
2295{
2296 struct geneve_opt *opt;
2297 struct nlattr *nest;
2298 int opt_off = 0;
2299
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002300 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002301 if (!nest)
2302 goto nla_put_failure;
2303
2304 while (enc_opts->len > opt_off) {
2305 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2306
2307 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2308 opt->opt_class))
2309 goto nla_put_failure;
2310 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2311 opt->type))
2312 goto nla_put_failure;
2313 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2314 opt->length * 4, opt->opt_data))
2315 goto nla_put_failure;
2316
2317 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2318 }
2319 nla_nest_end(skb, nest);
2320 return 0;
2321
2322nla_put_failure:
2323 nla_nest_cancel(skb, nest);
2324 return -EMSGSIZE;
2325}
2326
Xin Longd8f9dfa2019-11-21 18:03:28 +08002327static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2328 struct flow_dissector_key_enc_opts *enc_opts)
2329{
2330 struct vxlan_metadata *md;
2331 struct nlattr *nest;
2332
2333 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2334 if (!nest)
2335 goto nla_put_failure;
2336
2337 md = (struct vxlan_metadata *)&enc_opts->data[0];
2338 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2339 goto nla_put_failure;
2340
2341 nla_nest_end(skb, nest);
2342 return 0;
2343
2344nla_put_failure:
2345 nla_nest_cancel(skb, nest);
2346 return -EMSGSIZE;
2347}
2348
Xin Long79b10112019-11-21 18:03:29 +08002349static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2350 struct flow_dissector_key_enc_opts *enc_opts)
2351{
2352 struct erspan_metadata *md;
2353 struct nlattr *nest;
2354
2355 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2356 if (!nest)
2357 goto nla_put_failure;
2358
2359 md = (struct erspan_metadata *)&enc_opts->data[0];
2360 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2361 goto nla_put_failure;
2362
2363 if (md->version == 1 &&
2364 nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2365 goto nla_put_failure;
2366
2367 if (md->version == 2 &&
2368 (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2369 md->u.md2.dir) ||
2370 nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2371 get_hwid(&md->u.md2))))
2372 goto nla_put_failure;
2373
2374 nla_nest_end(skb, nest);
2375 return 0;
2376
2377nla_put_failure:
2378 nla_nest_cancel(skb, nest);
2379 return -EMSGSIZE;
2380}
2381
Paul Blakeye0ace682019-07-09 10:30:50 +03002382static int fl_dump_key_ct(struct sk_buff *skb,
2383 struct flow_dissector_key_ct *key,
2384 struct flow_dissector_key_ct *mask)
2385{
2386 if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2387 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2388 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2389 sizeof(key->ct_state)))
2390 goto nla_put_failure;
2391
2392 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2393 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2394 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2395 sizeof(key->ct_zone)))
2396 goto nla_put_failure;
2397
2398 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2399 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2400 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2401 sizeof(key->ct_mark)))
2402 goto nla_put_failure;
2403
2404 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2405 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2406 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2407 sizeof(key->ct_labels)))
2408 goto nla_put_failure;
2409
2410 return 0;
2411
2412nla_put_failure:
2413 return -EMSGSIZE;
2414}
2415
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002416static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2417 struct flow_dissector_key_enc_opts *enc_opts)
2418{
2419 struct nlattr *nest;
2420 int err;
2421
2422 if (!enc_opts->len)
2423 return 0;
2424
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002425 nest = nla_nest_start_noflag(skb, enc_opt_type);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002426 if (!nest)
2427 goto nla_put_failure;
2428
2429 switch (enc_opts->dst_opt_type) {
2430 case TUNNEL_GENEVE_OPT:
2431 err = fl_dump_key_geneve_opt(skb, enc_opts);
2432 if (err)
2433 goto nla_put_failure;
2434 break;
Xin Longd8f9dfa2019-11-21 18:03:28 +08002435 case TUNNEL_VXLAN_OPT:
2436 err = fl_dump_key_vxlan_opt(skb, enc_opts);
2437 if (err)
2438 goto nla_put_failure;
2439 break;
Xin Long79b10112019-11-21 18:03:29 +08002440 case TUNNEL_ERSPAN_OPT:
2441 err = fl_dump_key_erspan_opt(skb, enc_opts);
2442 if (err)
2443 goto nla_put_failure;
2444 break;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002445 default:
2446 goto nla_put_failure;
2447 }
2448 nla_nest_end(skb, nest);
2449 return 0;
2450
2451nla_put_failure:
2452 nla_nest_cancel(skb, nest);
2453 return -EMSGSIZE;
2454}
2455
2456static int fl_dump_key_enc_opt(struct sk_buff *skb,
2457 struct flow_dissector_key_enc_opts *key_opts,
2458 struct flow_dissector_key_enc_opts *msk_opts)
2459{
2460 int err;
2461
2462 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2463 if (err)
2464 return err;
2465
2466 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2467}
2468
Jiri Pirkof5749082018-07-23 09:23:08 +02002469static int fl_dump_key(struct sk_buff *skb, struct net *net,
2470 struct fl_flow_key *key, struct fl_flow_key *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02002471{
Jiri Pirko8212ed72019-06-19 09:41:03 +03002472 if (mask->meta.ingress_ifindex) {
Jiri Pirko77b99002015-05-12 14:56:21 +02002473 struct net_device *dev;
2474
Jiri Pirko8212ed72019-06-19 09:41:03 +03002475 dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
Jiri Pirko77b99002015-05-12 14:56:21 +02002476 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2477 goto nla_put_failure;
2478 }
2479
2480 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2481 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2482 sizeof(key->eth.dst)) ||
2483 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2484 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2485 sizeof(key->eth.src)) ||
2486 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2487 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2488 sizeof(key->basic.n_proto)))
2489 goto nla_put_failure;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002490
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002491 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2492 goto nla_put_failure;
2493
Jianbo Liud64efd02018-07-06 05:38:16 +00002494 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2495 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002496 goto nla_put_failure;
2497
Jianbo Liud64efd02018-07-06 05:38:16 +00002498 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2499 TCA_FLOWER_KEY_CVLAN_PRIO,
2500 &key->cvlan, &mask->cvlan) ||
2501 (mask->cvlan.vlan_tpid &&
Jianbo Liu158abbf2018-07-25 02:31:25 +00002502 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2503 key->cvlan.vlan_tpid)))
Jianbo Liud3069512018-07-06 05:38:15 +00002504 goto nla_put_failure;
2505
Jianbo Liu5e9a0fe2018-07-09 02:26:20 +00002506 if (mask->basic.n_proto) {
2507 if (mask->cvlan.vlan_tpid) {
2508 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2509 key->basic.n_proto))
2510 goto nla_put_failure;
2511 } else if (mask->vlan.vlan_tpid) {
2512 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2513 key->basic.n_proto))
2514 goto nla_put_failure;
2515 }
Jianbo Liud64efd02018-07-06 05:38:16 +00002516 }
2517
Jiri Pirko77b99002015-05-12 14:56:21 +02002518 if ((key->basic.n_proto == htons(ETH_P_IP) ||
2519 key->basic.n_proto == htons(ETH_P_IPV6)) &&
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002520 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
Jiri Pirko77b99002015-05-12 14:56:21 +02002521 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002522 sizeof(key->basic.ip_proto)) ||
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002523 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
Jiri Pirko77b99002015-05-12 14:56:21 +02002524 goto nla_put_failure;
2525
Tom Herbertc3f83242015-06-04 09:16:40 -07002526 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02002527 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2528 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2529 sizeof(key->ipv4.src)) ||
2530 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2531 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2532 sizeof(key->ipv4.dst))))
2533 goto nla_put_failure;
Tom Herbertc3f83242015-06-04 09:16:40 -07002534 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02002535 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2536 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2537 sizeof(key->ipv6.src)) ||
2538 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2539 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2540 sizeof(key->ipv6.dst))))
2541 goto nla_put_failure;
2542
2543 if (key->basic.ip_proto == IPPROTO_TCP &&
2544 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002545 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002546 sizeof(key->tp.src)) ||
2547 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002548 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02002549 sizeof(key->tp.dst)) ||
2550 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2551 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2552 sizeof(key->tcp.flags))))
Jiri Pirko77b99002015-05-12 14:56:21 +02002553 goto nla_put_failure;
2554 else if (key->basic.ip_proto == IPPROTO_UDP &&
2555 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002556 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002557 sizeof(key->tp.src)) ||
2558 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002559 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002560 sizeof(key->tp.dst))))
2561 goto nla_put_failure;
Simon Horman5976c5f2016-11-03 13:24:21 +01002562 else if (key->basic.ip_proto == IPPROTO_SCTP &&
2563 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2564 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2565 sizeof(key->tp.src)) ||
2566 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2567 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2568 sizeof(key->tp.dst))))
2569 goto nla_put_failure;
Simon Horman7b684882016-12-07 13:48:28 +01002570 else if (key->basic.n_proto == htons(ETH_P_IP) &&
2571 key->basic.ip_proto == IPPROTO_ICMP &&
2572 (fl_dump_key_val(skb, &key->icmp.type,
2573 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2574 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2575 sizeof(key->icmp.type)) ||
2576 fl_dump_key_val(skb, &key->icmp.code,
2577 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2578 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2579 sizeof(key->icmp.code))))
2580 goto nla_put_failure;
2581 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2582 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2583 (fl_dump_key_val(skb, &key->icmp.type,
2584 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2585 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2586 sizeof(key->icmp.type)) ||
2587 fl_dump_key_val(skb, &key->icmp.code,
2588 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2589 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2590 sizeof(key->icmp.code))))
2591 goto nla_put_failure;
Simon Horman99d31322017-01-11 14:05:43 +01002592 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2593 key->basic.n_proto == htons(ETH_P_RARP)) &&
2594 (fl_dump_key_val(skb, &key->arp.sip,
2595 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2596 TCA_FLOWER_KEY_ARP_SIP_MASK,
2597 sizeof(key->arp.sip)) ||
2598 fl_dump_key_val(skb, &key->arp.tip,
2599 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2600 TCA_FLOWER_KEY_ARP_TIP_MASK,
2601 sizeof(key->arp.tip)) ||
2602 fl_dump_key_val(skb, &key->arp.op,
2603 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2604 TCA_FLOWER_KEY_ARP_OP_MASK,
2605 sizeof(key->arp.op)) ||
2606 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2607 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2608 sizeof(key->arp.sha)) ||
2609 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2610 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2611 sizeof(key->arp.tha))))
2612 goto nla_put_failure;
Jiri Pirko77b99002015-05-12 14:56:21 +02002613
Amritha Nambiar5c722992018-11-12 16:15:55 -08002614 if ((key->basic.ip_proto == IPPROTO_TCP ||
2615 key->basic.ip_proto == IPPROTO_UDP ||
2616 key->basic.ip_proto == IPPROTO_SCTP) &&
2617 fl_dump_key_port_range(skb, key, mask))
2618 goto nla_put_failure;
2619
Amir Vadaibc3103f2016-09-08 16:23:47 +03002620 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2621 (fl_dump_key_val(skb, &key->enc_ipv4.src,
2622 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2623 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2624 sizeof(key->enc_ipv4.src)) ||
2625 fl_dump_key_val(skb, &key->enc_ipv4.dst,
2626 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2627 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2628 sizeof(key->enc_ipv4.dst))))
2629 goto nla_put_failure;
2630 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2631 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2632 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2633 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2634 sizeof(key->enc_ipv6.src)) ||
2635 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2636 TCA_FLOWER_KEY_ENC_IPV6_DST,
2637 &mask->enc_ipv6.dst,
2638 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2639 sizeof(key->enc_ipv6.dst))))
2640 goto nla_put_failure;
2641
2642 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +03002643 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02002644 sizeof(key->enc_key_id)) ||
2645 fl_dump_key_val(skb, &key->enc_tp.src,
2646 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2647 &mask->enc_tp.src,
2648 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2649 sizeof(key->enc_tp.src)) ||
2650 fl_dump_key_val(skb, &key->enc_tp.dst,
2651 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2652 &mask->enc_tp.dst,
2653 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002654 sizeof(key->enc_tp.dst)) ||
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002655 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2656 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
Amir Vadaibc3103f2016-09-08 16:23:47 +03002657 goto nla_put_failure;
2658
Paul Blakeye0ace682019-07-09 10:30:50 +03002659 if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
2660 goto nla_put_failure;
2661
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002662 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2663 goto nla_put_failure;
2664
Jiri Pirkof5749082018-07-23 09:23:08 +02002665 return 0;
2666
2667nla_put_failure:
2668 return -EMSGSIZE;
2669}
2670
2671static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002672 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
Jiri Pirkof5749082018-07-23 09:23:08 +02002673{
2674 struct cls_fl_filter *f = fh;
2675 struct nlattr *nest;
2676 struct fl_flow_key *key, *mask;
Vlad Buslov3d81e712019-03-21 15:17:42 +02002677 bool skip_hw;
Jiri Pirkof5749082018-07-23 09:23:08 +02002678
2679 if (!f)
2680 return skb->len;
2681
2682 t->tcm_handle = f->handle;
2683
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002684 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Jiri Pirkof5749082018-07-23 09:23:08 +02002685 if (!nest)
2686 goto nla_put_failure;
2687
Vlad Buslov3d81e712019-03-21 15:17:42 +02002688 spin_lock(&tp->lock);
2689
Jiri Pirkof5749082018-07-23 09:23:08 +02002690 if (f->res.classid &&
2691 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
Vlad Buslov3d81e712019-03-21 15:17:42 +02002692 goto nla_put_failure_locked;
Jiri Pirkof5749082018-07-23 09:23:08 +02002693
2694 key = &f->key;
2695 mask = &f->mask->key;
Vlad Buslov3d81e712019-03-21 15:17:42 +02002696 skip_hw = tc_skip_hw(f->flags);
Jiri Pirkof5749082018-07-23 09:23:08 +02002697
2698 if (fl_dump_key(skb, net, key, mask))
Vlad Buslov3d81e712019-03-21 15:17:42 +02002699 goto nla_put_failure_locked;
Jiri Pirkof5749082018-07-23 09:23:08 +02002700
Or Gerlitz749e6722017-02-16 10:31:10 +02002701 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
Vlad Buslov3d81e712019-03-21 15:17:42 +02002702 goto nla_put_failure_locked;
2703
2704 spin_unlock(&tp->lock);
2705
2706 if (!skip_hw)
Vlad Buslovc24e43d82019-03-21 15:17:43 +02002707 fl_hw_update_stats(tp, f, rtnl_held);
Amir Vadaie69985c2016-06-05 17:11:18 +03002708
Vlad Buslov86c55362018-09-07 17:22:21 +03002709 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
2710 goto nla_put_failure;
2711
Jiri Pirko77b99002015-05-12 14:56:21 +02002712 if (tcf_exts_dump(skb, &f->exts))
2713 goto nla_put_failure;
2714
2715 nla_nest_end(skb, nest);
2716
2717 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
2718 goto nla_put_failure;
2719
2720 return skb->len;
2721
Vlad Buslov3d81e712019-03-21 15:17:42 +02002722nla_put_failure_locked:
2723 spin_unlock(&tp->lock);
Jiri Pirko77b99002015-05-12 14:56:21 +02002724nla_put_failure:
2725 nla_nest_cancel(skb, nest);
2726 return -1;
2727}
2728
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002729static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
2730{
2731 struct fl_flow_tmplt *tmplt = tmplt_priv;
2732 struct fl_flow_key *key, *mask;
2733 struct nlattr *nest;
2734
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002735 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002736 if (!nest)
2737 goto nla_put_failure;
2738
2739 key = &tmplt->dummy_key;
2740 mask = &tmplt->mask;
2741
2742 if (fl_dump_key(skb, net, key, mask))
2743 goto nla_put_failure;
2744
2745 nla_nest_end(skb, nest);
2746
2747 return skb->len;
2748
2749nla_put_failure:
2750 nla_nest_cancel(skb, nest);
2751 return -EMSGSIZE;
2752}
2753
Cong Wang07d79fc2017-08-30 14:30:36 -07002754static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
2755{
2756 struct cls_fl_filter *f = fh;
2757
2758 if (f && f->res.classid == classid)
2759 f->res.class = cl;
2760}
2761
Jiri Pirko77b99002015-05-12 14:56:21 +02002762static struct tcf_proto_ops cls_fl_ops __read_mostly = {
2763 .kind = "flower",
2764 .classify = fl_classify,
2765 .init = fl_init,
2766 .destroy = fl_destroy,
2767 .get = fl_get,
Vlad Buslov06177552019-03-21 15:17:35 +02002768 .put = fl_put,
Jiri Pirko77b99002015-05-12 14:56:21 +02002769 .change = fl_change,
2770 .delete = fl_delete,
2771 .walk = fl_walk,
John Hurley31533cb2018-06-25 14:30:06 -07002772 .reoffload = fl_reoffload,
Vlad Buslova449a3e2019-08-26 16:45:00 +03002773 .hw_add = fl_hw_add,
2774 .hw_del = fl_hw_del,
Jiri Pirko77b99002015-05-12 14:56:21 +02002775 .dump = fl_dump,
Cong Wang07d79fc2017-08-30 14:30:36 -07002776 .bind_class = fl_bind_class,
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002777 .tmplt_create = fl_tmplt_create,
2778 .tmplt_destroy = fl_tmplt_destroy,
2779 .tmplt_dump = fl_tmplt_dump,
Jiri Pirko77b99002015-05-12 14:56:21 +02002780 .owner = THIS_MODULE,
Vlad Buslov92149192019-03-21 15:17:44 +02002781 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
Jiri Pirko77b99002015-05-12 14:56:21 +02002782};
2783
2784static int __init cls_fl_init(void)
2785{
2786 return register_tcf_proto_ops(&cls_fl_ops);
2787}
2788
2789static void __exit cls_fl_exit(void)
2790{
2791 unregister_tcf_proto_ops(&cls_fl_ops);
2792}
2793
2794module_init(cls_fl_init);
2795module_exit(cls_fl_exit);
2796
2797MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2798MODULE_DESCRIPTION("Flower classifier");
2799MODULE_LICENSE("GPL v2");