blob: ce2e9b1c985034d1565029626bee286014a052f4 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Jiri Pirko77b99002015-05-12 14:56:21 +02002/*
3 * net/sched/cls_flower.c Flower classifier
4 *
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
Jiri Pirko77b99002015-05-12 14:56:21 +02006 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/rhashtable.h>
Daniel Borkmannd9363772016-11-27 01:18:01 +010012#include <linux/workqueue.h>
Vlad Buslov06177552019-03-21 15:17:35 +020013#include <linux/refcount.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020014
15#include <linux/if_ether.h>
16#include <linux/in6.h>
17#include <linux/ip.h>
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040018#include <linux/mpls.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020019
20#include <net/sch_generic.h>
21#include <net/pkt_cls.h>
22#include <net/ip.h>
23#include <net/flow_dissector.h>
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +020024#include <net/geneve.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020025
Amir Vadaibc3103f2016-09-08 16:23:47 +030026#include <net/dst.h>
27#include <net/dst_metadata.h>
28
Jiri Pirko77b99002015-05-12 14:56:21 +020029struct fl_flow_key {
Jiri Pirko8212ed72019-06-19 09:41:03 +030030 struct flow_dissector_key_meta meta;
Tom Herbert42aecaa2015-06-04 09:16:39 -070031 struct flow_dissector_key_control control;
Amir Vadaibc3103f2016-09-08 16:23:47 +030032 struct flow_dissector_key_control enc_control;
Jiri Pirko77b99002015-05-12 14:56:21 +020033 struct flow_dissector_key_basic basic;
34 struct flow_dissector_key_eth_addrs eth;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +030035 struct flow_dissector_key_vlan vlan;
Jianbo Liud64efd02018-07-06 05:38:16 +000036 struct flow_dissector_key_vlan cvlan;
Jiri Pirko77b99002015-05-12 14:56:21 +020037 union {
Tom Herbertc3f83242015-06-04 09:16:40 -070038 struct flow_dissector_key_ipv4_addrs ipv4;
Jiri Pirko77b99002015-05-12 14:56:21 +020039 struct flow_dissector_key_ipv6_addrs ipv6;
40 };
41 struct flow_dissector_key_ports tp;
Simon Horman7b684882016-12-07 13:48:28 +010042 struct flow_dissector_key_icmp icmp;
Simon Horman99d31322017-01-11 14:05:43 +010043 struct flow_dissector_key_arp arp;
Amir Vadaibc3103f2016-09-08 16:23:47 +030044 struct flow_dissector_key_keyid enc_key_id;
45 union {
46 struct flow_dissector_key_ipv4_addrs enc_ipv4;
47 struct flow_dissector_key_ipv6_addrs enc_ipv6;
48 };
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +020049 struct flow_dissector_key_ports enc_tp;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040050 struct flow_dissector_key_mpls mpls;
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +020051 struct flow_dissector_key_tcp tcp;
Or Gerlitz4d80cc02017-06-01 21:37:38 +030052 struct flow_dissector_key_ip ip;
Or Gerlitz0e2c17b2018-07-17 19:27:18 +030053 struct flow_dissector_key_ip enc_ip;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +020054 struct flow_dissector_key_enc_opts enc_opts;
Amritha Nambiar5c722992018-11-12 16:15:55 -080055 struct flow_dissector_key_ports tp_min;
56 struct flow_dissector_key_ports tp_max;
Jiri Pirko77b99002015-05-12 14:56:21 +020057} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
58
59struct fl_flow_mask_range {
60 unsigned short int start;
61 unsigned short int end;
62};
63
64struct fl_flow_mask {
65 struct fl_flow_key key;
66 struct fl_flow_mask_range range;
Amritha Nambiar5c722992018-11-12 16:15:55 -080067 u32 flags;
Paul Blakey05cd2712018-04-30 14:28:30 +030068 struct rhash_head ht_node;
69 struct rhashtable ht;
70 struct rhashtable_params filter_ht_params;
71 struct flow_dissector dissector;
72 struct list_head filters;
Paolo Abeni44a5cd42018-06-21 20:02:16 +020073 struct rcu_work rwork;
Paul Blakey05cd2712018-04-30 14:28:30 +030074 struct list_head list;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +020075 refcount_t refcnt;
Jiri Pirko77b99002015-05-12 14:56:21 +020076};
77
Jiri Pirkob95ec7e2018-07-23 09:23:10 +020078struct fl_flow_tmplt {
79 struct fl_flow_key dummy_key;
80 struct fl_flow_key mask;
81 struct flow_dissector dissector;
82 struct tcf_chain *chain;
83};
84
Jiri Pirko77b99002015-05-12 14:56:21 +020085struct cls_fl_head {
86 struct rhashtable ht;
Vlad Buslov259e60f2019-03-21 15:17:39 +020087 spinlock_t masks_lock; /* Protect masks list */
Paul Blakey05cd2712018-04-30 14:28:30 +030088 struct list_head masks;
Vlad Buslovc049d562019-04-24 09:53:31 +030089 struct list_head hw_filters;
Cong Wangaaa908f2018-05-23 15:26:53 -070090 struct rcu_work rwork;
Chris Mic15ab232017-08-30 02:31:58 -040091 struct idr handle_idr;
Jiri Pirko77b99002015-05-12 14:56:21 +020092};
93
94struct cls_fl_filter {
Paul Blakey05cd2712018-04-30 14:28:30 +030095 struct fl_flow_mask *mask;
Jiri Pirko77b99002015-05-12 14:56:21 +020096 struct rhash_head ht_node;
97 struct fl_flow_key mkey;
98 struct tcf_exts exts;
99 struct tcf_result res;
100 struct fl_flow_key key;
101 struct list_head list;
Vlad Buslovc049d562019-04-24 09:53:31 +0300102 struct list_head hw_list;
Jiri Pirko77b99002015-05-12 14:56:21 +0200103 u32 handle;
Amir Vadaie69985c2016-06-05 17:11:18 +0300104 u32 flags;
Vlad Buslov86c55362018-09-07 17:22:21 +0300105 u32 in_hw_count;
Cong Wangaaa908f2018-05-23 15:26:53 -0700106 struct rcu_work rwork;
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200107 struct net_device *hw_dev;
Vlad Buslov06177552019-03-21 15:17:35 +0200108 /* Flower classifier is unlocked, which means that its reference counter
109 * can be changed concurrently without any kind of external
110 * synchronization. Use atomic reference counter to be concurrency-safe.
111 */
112 refcount_t refcnt;
Vlad Buslovb2552b82019-03-21 15:17:36 +0200113 bool deleted;
Jiri Pirko77b99002015-05-12 14:56:21 +0200114};
115
Paul Blakey05cd2712018-04-30 14:28:30 +0300116static const struct rhashtable_params mask_ht_params = {
117 .key_offset = offsetof(struct fl_flow_mask, key),
118 .key_len = sizeof(struct fl_flow_key),
119 .head_offset = offsetof(struct fl_flow_mask, ht_node),
120 .automatic_shrinking = true,
121};
122
Jiri Pirko77b99002015-05-12 14:56:21 +0200123static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
124{
125 return mask->range.end - mask->range.start;
126}
127
128static void fl_mask_update_range(struct fl_flow_mask *mask)
129{
130 const u8 *bytes = (const u8 *) &mask->key;
131 size_t size = sizeof(mask->key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300132 size_t i, first = 0, last;
Jiri Pirko77b99002015-05-12 14:56:21 +0200133
Paul Blakey05cd2712018-04-30 14:28:30 +0300134 for (i = 0; i < size; i++) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200135 if (bytes[i]) {
Paul Blakey05cd2712018-04-30 14:28:30 +0300136 first = i;
137 break;
138 }
139 }
140 last = first;
141 for (i = size - 1; i != first; i--) {
142 if (bytes[i]) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200143 last = i;
Paul Blakey05cd2712018-04-30 14:28:30 +0300144 break;
Jiri Pirko77b99002015-05-12 14:56:21 +0200145 }
146 }
147 mask->range.start = rounddown(first, sizeof(long));
148 mask->range.end = roundup(last + 1, sizeof(long));
149}
150
151static void *fl_key_get_start(struct fl_flow_key *key,
152 const struct fl_flow_mask *mask)
153{
154 return (u8 *) key + mask->range.start;
155}
156
157static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
158 struct fl_flow_mask *mask)
159{
160 const long *lkey = fl_key_get_start(key, mask);
161 const long *lmask = fl_key_get_start(&mask->key, mask);
162 long *lmkey = fl_key_get_start(mkey, mask);
163 int i;
164
165 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
166 *lmkey++ = *lkey++ & *lmask++;
167}
168
Jiri Pirkob95ec7e2018-07-23 09:23:10 +0200169static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
170 struct fl_flow_mask *mask)
171{
172 const long *lmask = fl_key_get_start(&mask->key, mask);
173 const long *ltmplt;
174 int i;
175
176 if (!tmplt)
177 return true;
178 ltmplt = fl_key_get_start(&tmplt->mask, mask);
179 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
180 if (~*ltmplt++ & *lmask++)
181 return false;
182 }
183 return true;
184}
185
Jiri Pirko77b99002015-05-12 14:56:21 +0200186static void fl_clear_masked_range(struct fl_flow_key *key,
187 struct fl_flow_mask *mask)
188{
189 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
190}
191
Amritha Nambiar5c722992018-11-12 16:15:55 -0800192static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
193 struct fl_flow_key *key,
194 struct fl_flow_key *mkey)
195{
196 __be16 min_mask, max_mask, min_val, max_val;
197
198 min_mask = htons(filter->mask->key.tp_min.dst);
199 max_mask = htons(filter->mask->key.tp_max.dst);
200 min_val = htons(filter->key.tp_min.dst);
201 max_val = htons(filter->key.tp_max.dst);
202
203 if (min_mask && max_mask) {
204 if (htons(key->tp.dst) < min_val ||
205 htons(key->tp.dst) > max_val)
206 return false;
207
208 /* skb does not have min and max values */
209 mkey->tp_min.dst = filter->mkey.tp_min.dst;
210 mkey->tp_max.dst = filter->mkey.tp_max.dst;
211 }
212 return true;
213}
214
215static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
216 struct fl_flow_key *key,
217 struct fl_flow_key *mkey)
218{
219 __be16 min_mask, max_mask, min_val, max_val;
220
221 min_mask = htons(filter->mask->key.tp_min.src);
222 max_mask = htons(filter->mask->key.tp_max.src);
223 min_val = htons(filter->key.tp_min.src);
224 max_val = htons(filter->key.tp_max.src);
225
226 if (min_mask && max_mask) {
227 if (htons(key->tp.src) < min_val ||
228 htons(key->tp.src) > max_val)
229 return false;
230
231 /* skb does not have min and max values */
232 mkey->tp_min.src = filter->mkey.tp_min.src;
233 mkey->tp_max.src = filter->mkey.tp_max.src;
234 }
235 return true;
236}
237
238static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
239 struct fl_flow_key *mkey)
Paul Blakeya3308d82017-01-16 10:45:13 +0200240{
Paul Blakey05cd2712018-04-30 14:28:30 +0300241 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
242 mask->filter_ht_params);
Paul Blakeya3308d82017-01-16 10:45:13 +0200243}
244
Amritha Nambiar5c722992018-11-12 16:15:55 -0800245static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
246 struct fl_flow_key *mkey,
247 struct fl_flow_key *key)
248{
249 struct cls_fl_filter *filter, *f;
250
251 list_for_each_entry_rcu(filter, &mask->filters, list) {
252 if (!fl_range_port_dst_cmp(filter, key, mkey))
253 continue;
254
255 if (!fl_range_port_src_cmp(filter, key, mkey))
256 continue;
257
258 f = __fl_lookup(mask, mkey);
259 if (f)
260 return f;
261 }
262 return NULL;
263}
264
265static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
266 struct fl_flow_key *mkey,
267 struct fl_flow_key *key)
268{
269 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
270 return fl_lookup_range(mask, mkey, key);
271
272 return __fl_lookup(mask, mkey);
273}
274
Jiri Pirko77b99002015-05-12 14:56:21 +0200275static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
276 struct tcf_result *res)
277{
278 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
279 struct cls_fl_filter *f;
Paul Blakey05cd2712018-04-30 14:28:30 +0300280 struct fl_flow_mask *mask;
Jiri Pirko77b99002015-05-12 14:56:21 +0200281 struct fl_flow_key skb_key;
282 struct fl_flow_key skb_mkey;
283
Paul Blakey05cd2712018-04-30 14:28:30 +0300284 list_for_each_entry_rcu(mask, &head->masks, list) {
285 fl_clear_masked_range(&skb_key, mask);
Amir Vadaie69985c2016-06-05 17:11:18 +0300286
Jiri Pirko8212ed72019-06-19 09:41:03 +0300287 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300288 /* skb_flow_dissect() does not set n_proto in case an unknown
289 * protocol, so do it rather here.
290 */
291 skb_key.basic.n_proto = skb->protocol;
292 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
293 skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
Amir Vadaibc3103f2016-09-08 16:23:47 +0300294
Paul Blakey05cd2712018-04-30 14:28:30 +0300295 fl_set_masked_key(&skb_mkey, &skb_key, mask);
Jiri Pirko77b99002015-05-12 14:56:21 +0200296
Amritha Nambiar5c722992018-11-12 16:15:55 -0800297 f = fl_lookup(mask, &skb_mkey, &skb_key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300298 if (f && !tc_skip_sw(f->flags)) {
299 *res = f->res;
300 return tcf_exts_exec(skb, &f->exts, res);
301 }
Jiri Pirko77b99002015-05-12 14:56:21 +0200302 }
303 return -1;
304}
305
306static int fl_init(struct tcf_proto *tp)
307{
308 struct cls_fl_head *head;
309
310 head = kzalloc(sizeof(*head), GFP_KERNEL);
311 if (!head)
312 return -ENOBUFS;
313
Vlad Buslov259e60f2019-03-21 15:17:39 +0200314 spin_lock_init(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +0300315 INIT_LIST_HEAD_RCU(&head->masks);
Vlad Buslovc049d562019-04-24 09:53:31 +0300316 INIT_LIST_HEAD(&head->hw_filters);
Jiri Pirko77b99002015-05-12 14:56:21 +0200317 rcu_assign_pointer(tp->root, head);
Chris Mic15ab232017-08-30 02:31:58 -0400318 idr_init(&head->handle_idr);
Jiri Pirko77b99002015-05-12 14:56:21 +0200319
Paul Blakey05cd2712018-04-30 14:28:30 +0300320 return rhashtable_init(&head->ht, &mask_ht_params);
321}
322
Vlad Buslov99815f52019-06-13 17:54:04 +0300323static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200324{
Vlad Buslov99815f52019-06-13 17:54:04 +0300325 /* temporary masks don't have their filters list and ht initialized */
326 if (mask_init_done) {
327 WARN_ON(!list_empty(&mask->filters));
328 rhashtable_destroy(&mask->ht);
329 }
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200330 kfree(mask);
331}
332
333static void fl_mask_free_work(struct work_struct *work)
334{
335 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
336 struct fl_flow_mask, rwork);
337
Vlad Buslov99815f52019-06-13 17:54:04 +0300338 fl_mask_free(mask, true);
339}
340
341static void fl_uninit_mask_free_work(struct work_struct *work)
342{
343 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
344 struct fl_flow_mask, rwork);
345
346 fl_mask_free(mask, false);
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200347}
348
Vlad Buslov99946772019-04-12 00:54:19 +0300349static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
Paul Blakey05cd2712018-04-30 14:28:30 +0300350{
Vlad Buslovf48ef4d2019-03-21 15:17:37 +0200351 if (!refcount_dec_and_test(&mask->refcnt))
Paul Blakey05cd2712018-04-30 14:28:30 +0300352 return false;
353
354 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
Vlad Buslov259e60f2019-03-21 15:17:39 +0200355
356 spin_lock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +0300357 list_del_rcu(&mask->list);
Vlad Buslov259e60f2019-03-21 15:17:39 +0200358 spin_unlock(&head->masks_lock);
359
Vlad Buslov99946772019-04-12 00:54:19 +0300360 tcf_queue_work(&mask->rwork, fl_mask_free_work);
Paul Blakey05cd2712018-04-30 14:28:30 +0300361
362 return true;
Jiri Pirko77b99002015-05-12 14:56:21 +0200363}
364
Vlad Buslovc049d562019-04-24 09:53:31 +0300365static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
366{
367 /* Flower classifier only changes root pointer during init and destroy.
368 * Users must obtain reference to tcf_proto instance before calling its
369 * API, so tp->root pointer is protected from concurrent call to
370 * fl_destroy() by reference counting.
371 */
372 return rcu_dereference_raw(tp->root);
373}
374
Cong Wang0dadc112017-11-06 13:47:24 -0800375static void __fl_destroy_filter(struct cls_fl_filter *f)
376{
377 tcf_exts_destroy(&f->exts);
378 tcf_exts_put_net(&f->exts);
379 kfree(f);
380}
381
Cong Wang0552c8a2017-10-26 18:24:33 -0700382static void fl_destroy_filter_work(struct work_struct *work)
383{
Cong Wangaaa908f2018-05-23 15:26:53 -0700384 struct cls_fl_filter *f = container_of(to_rcu_work(work),
385 struct cls_fl_filter, rwork);
Cong Wang0552c8a2017-10-26 18:24:33 -0700386
Cong Wang0dadc112017-11-06 13:47:24 -0800387 __fl_destroy_filter(f);
Cong Wang0552c8a2017-10-26 18:24:33 -0700388}
389
Jakub Kicinski1b0f8032018-01-24 12:54:21 -0800390static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200391 bool rtnl_held, struct netlink_ext_ack *extack)
Amir Vadai5b33f482016-03-08 12:42:29 +0200392{
Jiri Pirkode4784c2017-08-07 10:15:32 +0200393 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200394 struct tcf_block *block = tp->chain->block;
Amir Vadai5b33f482016-03-08 12:42:29 +0200395
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200396 if (!rtnl_held)
397 rtnl_lock();
398
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700399 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200400 cls_flower.command = TC_CLSFLOWER_DESTROY;
401 cls_flower.cookie = (unsigned long) f;
Amir Vadai5b33f482016-03-08 12:42:29 +0200402
Cong Wangaeb3fec2018-12-11 11:15:46 -0800403 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200404 spin_lock(&tp->lock);
Vlad Buslovc049d562019-04-24 09:53:31 +0300405 list_del_init(&f->hw_list);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100406 tcf_block_offload_dec(block, &f->flags);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200407 spin_unlock(&tp->lock);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200408
409 if (!rtnl_held)
410 rtnl_unlock();
Amir Vadai5b33f482016-03-08 12:42:29 +0200411}
412
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300413static int fl_hw_replace_filter(struct tcf_proto *tp,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200414 struct cls_fl_filter *f, bool rtnl_held,
Quentin Monnet41002032018-01-19 17:44:43 -0800415 struct netlink_ext_ack *extack)
Amir Vadai5b33f482016-03-08 12:42:29 +0200416{
Vlad Buslovc049d562019-04-24 09:53:31 +0300417 struct cls_fl_head *head = fl_head_dereference(tp);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200418 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200419 struct tcf_block *block = tp->chain->block;
Jiri Pirko717503b2017-10-11 09:41:09 +0200420 bool skip_sw = tc_skip_sw(f->flags);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200421 int err = 0;
422
423 if (!rtnl_held)
424 rtnl_lock();
Amir Vadai5b33f482016-03-08 12:42:29 +0200425
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +0100426 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200427 if (!cls_flower.rule) {
428 err = -ENOMEM;
429 goto errout;
430 }
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100431
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700432 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200433 cls_flower.command = TC_CLSFLOWER_REPLACE;
434 cls_flower.cookie = (unsigned long) f;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100435 cls_flower.rule->match.dissector = &f->mask->dissector;
436 cls_flower.rule->match.mask = &f->mask->key;
437 cls_flower.rule->match.key = &f->mkey;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700438 cls_flower.classid = f->res.classid;
Amir Vadai5b33f482016-03-08 12:42:29 +0200439
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +0100440 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
441 if (err) {
442 kfree(cls_flower.rule);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200443 if (skip_sw)
Vlad Buslov1f15bb42019-02-12 23:39:06 +0200444 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200445 else
446 err = 0;
447 goto errout;
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +0100448 }
449
Cong Wangaeb3fec2018-12-11 11:15:46 -0800450 err = tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100451 kfree(cls_flower.rule);
452
Jiri Pirko717503b2017-10-11 09:41:09 +0200453 if (err < 0) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200454 fl_hw_destroy_filter(tp, f, true, NULL);
455 goto errout;
Jiri Pirko717503b2017-10-11 09:41:09 +0200456 } else if (err > 0) {
John Hurley31533cb2018-06-25 14:30:06 -0700457 f->in_hw_count = err;
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200458 err = 0;
Vlad Buslov3d81e712019-03-21 15:17:42 +0200459 spin_lock(&tp->lock);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100460 tcf_block_offload_inc(block, &f->flags);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200461 spin_unlock(&tp->lock);
Jiri Pirko717503b2017-10-11 09:41:09 +0200462 }
463
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200464 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) {
465 err = -EINVAL;
466 goto errout;
467 }
Jiri Pirko717503b2017-10-11 09:41:09 +0200468
Vlad Buslovc049d562019-04-24 09:53:31 +0300469 spin_lock(&tp->lock);
470 list_add(&f->hw_list, &head->hw_filters);
471 spin_unlock(&tp->lock);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200472errout:
473 if (!rtnl_held)
474 rtnl_unlock();
475
476 return err;
Amir Vadai5b33f482016-03-08 12:42:29 +0200477}
478
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200479static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
480 bool rtnl_held)
Amir Vadai10cbc682016-05-13 12:55:37 +0000481{
Jiri Pirkode4784c2017-08-07 10:15:32 +0200482 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200483 struct tcf_block *block = tp->chain->block;
Amir Vadai10cbc682016-05-13 12:55:37 +0000484
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200485 if (!rtnl_held)
486 rtnl_lock();
487
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700488 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200489 cls_flower.command = TC_CLSFLOWER_STATS;
490 cls_flower.cookie = (unsigned long) f;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700491 cls_flower.classid = f->res.classid;
Amir Vadai10cbc682016-05-13 12:55:37 +0000492
Cong Wangaeb3fec2018-12-11 11:15:46 -0800493 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
Pablo Neira Ayuso3b1903e2019-02-02 12:50:47 +0100494
495 tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
496 cls_flower.stats.pkts,
497 cls_flower.stats.lastused);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200498
499 if (!rtnl_held)
500 rtnl_unlock();
Amir Vadai10cbc682016-05-13 12:55:37 +0000501}
502
Vlad Buslov06177552019-03-21 15:17:35 +0200503static void __fl_put(struct cls_fl_filter *f)
504{
505 if (!refcount_dec_and_test(&f->refcnt))
506 return;
507
508 if (tcf_exts_get_net(&f->exts))
509 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
510 else
511 __fl_destroy_filter(f);
512}
513
514static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
515{
516 struct cls_fl_filter *f;
517
518 rcu_read_lock();
519 f = idr_find(&head->handle_idr, handle);
520 if (f && !refcount_inc_not_zero(&f->refcnt))
521 f = NULL;
522 rcu_read_unlock();
523
524 return f;
525}
526
527static struct cls_fl_filter *fl_get_next_filter(struct tcf_proto *tp,
528 unsigned long *handle)
529{
530 struct cls_fl_head *head = fl_head_dereference(tp);
531 struct cls_fl_filter *f;
532
533 rcu_read_lock();
534 while ((f = idr_get_next_ul(&head->handle_idr, handle))) {
535 /* don't return filters that are being deleted */
536 if (refcount_inc_not_zero(&f->refcnt))
537 break;
538 ++(*handle);
539 }
540 rcu_read_unlock();
541
542 return f;
543}
544
Vlad Buslovb2552b82019-03-21 15:17:36 +0200545static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200546 bool *last, bool rtnl_held,
547 struct netlink_ext_ack *extack)
Roi Dayan13fa8762016-11-01 16:08:29 +0200548{
Vlad Buslove4746192019-03-21 15:17:33 +0200549 struct cls_fl_head *head = fl_head_dereference(tp);
Chris Mic15ab232017-08-30 02:31:58 -0400550
Vlad Buslovb2552b82019-03-21 15:17:36 +0200551 *last = false;
552
Vlad Buslov3d81e712019-03-21 15:17:42 +0200553 spin_lock(&tp->lock);
554 if (f->deleted) {
555 spin_unlock(&tp->lock);
Vlad Buslovb2552b82019-03-21 15:17:36 +0200556 return -ENOENT;
Vlad Buslov3d81e712019-03-21 15:17:42 +0200557 }
Vlad Buslovb2552b82019-03-21 15:17:36 +0200558
559 f->deleted = true;
560 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
561 f->mask->filter_ht_params);
Matthew Wilcox9c160942017-11-28 09:48:43 -0500562 idr_remove(&head->handle_idr, f->handle);
Roi Dayan13fa8762016-11-01 16:08:29 +0200563 list_del_rcu(&f->list);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200564 spin_unlock(&tp->lock);
565
Vlad Buslov99946772019-04-12 00:54:19 +0300566 *last = fl_mask_put(head, f->mask);
Hadar Hen Zion79685212016-12-01 14:06:34 +0200567 if (!tc_skip_hw(f->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200568 fl_hw_destroy_filter(tp, f, rtnl_held, extack);
Roi Dayan13fa8762016-11-01 16:08:29 +0200569 tcf_unbind_filter(tp, &f->res);
Vlad Buslov06177552019-03-21 15:17:35 +0200570 __fl_put(f);
Paul Blakey05cd2712018-04-30 14:28:30 +0300571
Vlad Buslovb2552b82019-03-21 15:17:36 +0200572 return 0;
Roi Dayan13fa8762016-11-01 16:08:29 +0200573}
574
Daniel Borkmannd9363772016-11-27 01:18:01 +0100575static void fl_destroy_sleepable(struct work_struct *work)
576{
Cong Wangaaa908f2018-05-23 15:26:53 -0700577 struct cls_fl_head *head = container_of(to_rcu_work(work),
578 struct cls_fl_head,
579 rwork);
Paul Blakeyde9dc652018-06-03 10:06:13 +0300580
581 rhashtable_destroy(&head->ht);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100582 kfree(head);
583 module_put(THIS_MODULE);
584}
585
Vlad Buslov12db03b2019-02-11 10:55:45 +0200586static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
587 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +0200588{
Vlad Buslove4746192019-03-21 15:17:33 +0200589 struct cls_fl_head *head = fl_head_dereference(tp);
Paul Blakey05cd2712018-04-30 14:28:30 +0300590 struct fl_flow_mask *mask, *next_mask;
Jiri Pirko77b99002015-05-12 14:56:21 +0200591 struct cls_fl_filter *f, *next;
Vlad Buslovb2552b82019-03-21 15:17:36 +0200592 bool last;
Jiri Pirko77b99002015-05-12 14:56:21 +0200593
Paul Blakey05cd2712018-04-30 14:28:30 +0300594 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
595 list_for_each_entry_safe(f, next, &mask->filters, list) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200596 __fl_delete(tp, f, &last, rtnl_held, extack);
Vlad Buslovb2552b82019-03-21 15:17:36 +0200597 if (last)
Paul Blakey05cd2712018-04-30 14:28:30 +0300598 break;
599 }
600 }
Chris Mic15ab232017-08-30 02:31:58 -0400601 idr_destroy(&head->handle_idr);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100602
603 __module_get(THIS_MODULE);
Cong Wangaaa908f2018-05-23 15:26:53 -0700604 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
Jiri Pirko77b99002015-05-12 14:56:21 +0200605}
606
Vlad Buslov06177552019-03-21 15:17:35 +0200607static void fl_put(struct tcf_proto *tp, void *arg)
608{
609 struct cls_fl_filter *f = arg;
610
611 __fl_put(f);
612}
613
WANG Cong8113c092017-08-04 21:31:43 -0700614static void *fl_get(struct tcf_proto *tp, u32 handle)
Jiri Pirko77b99002015-05-12 14:56:21 +0200615{
Vlad Buslove4746192019-03-21 15:17:33 +0200616 struct cls_fl_head *head = fl_head_dereference(tp);
Jiri Pirko77b99002015-05-12 14:56:21 +0200617
Vlad Buslov06177552019-03-21 15:17:35 +0200618 return __fl_get(head, handle);
Jiri Pirko77b99002015-05-12 14:56:21 +0200619}
620
621static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
622 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
623 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
624 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
625 .len = IFNAMSIZ },
626 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
627 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
628 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
629 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
630 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
631 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
632 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
633 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
634 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
635 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
636 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
637 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
638 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
639 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
640 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
641 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
Jamal Hadi Salimb175c3a2015-06-25 06:55:27 -0400642 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
643 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300644 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
645 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
646 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
Amir Vadaibc3103f2016-09-08 16:23:47 +0300647 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
648 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
649 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
650 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
651 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
652 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
653 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
654 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
655 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
Or Gerlitzaa72d702016-09-15 15:28:22 +0300656 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
657 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
658 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
659 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
Simon Horman5976c5f2016-11-03 13:24:21 +0100660 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
661 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
662 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
663 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200664 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
665 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
666 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
667 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200668 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
669 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
Simon Horman7b684882016-12-07 13:48:28 +0100670 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
671 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
672 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
673 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
674 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
675 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
676 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
677 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
Simon Horman99d31322017-01-11 14:05:43 +0100678 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
679 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
680 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
681 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
682 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
683 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
684 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
685 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
686 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
687 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400688 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
689 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
690 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
691 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +0200692 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
693 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300694 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
695 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
696 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
697 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
Jianbo Liud64efd02018-07-06 05:38:16 +0000698 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
699 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
700 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300701 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
702 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
703 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
704 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200705 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
706 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
707};
708
709static const struct nla_policy
710enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
711 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
712};
713
714static const struct nla_policy
715geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
716 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
717 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
718 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
719 .len = 128 },
Jiri Pirko77b99002015-05-12 14:56:21 +0200720};
721
722static void fl_set_key_val(struct nlattr **tb,
723 void *val, int val_type,
724 void *mask, int mask_type, int len)
725{
726 if (!tb[val_type])
727 return;
728 memcpy(val, nla_data(tb[val_type]), len);
729 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
730 memset(mask, 0xff, len);
731 else
732 memcpy(mask, nla_data(tb[mask_type]), len);
733}
734
Amritha Nambiar5c722992018-11-12 16:15:55 -0800735static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
736 struct fl_flow_key *mask)
737{
738 fl_set_key_val(tb, &key->tp_min.dst,
739 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst,
740 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst));
741 fl_set_key_val(tb, &key->tp_max.dst,
742 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst,
743 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst));
744 fl_set_key_val(tb, &key->tp_min.src,
745 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src,
746 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src));
747 fl_set_key_val(tb, &key->tp_max.src,
748 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src,
749 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src));
750
751 if ((mask->tp_min.dst && mask->tp_max.dst &&
752 htons(key->tp_max.dst) <= htons(key->tp_min.dst)) ||
753 (mask->tp_min.src && mask->tp_max.src &&
754 htons(key->tp_max.src) <= htons(key->tp_min.src)))
755 return -EINVAL;
756
757 return 0;
758}
759
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400760static int fl_set_key_mpls(struct nlattr **tb,
761 struct flow_dissector_key_mpls *key_val,
762 struct flow_dissector_key_mpls *key_mask)
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400763{
764 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
765 key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
766 key_mask->mpls_ttl = MPLS_TTL_MASK;
767 }
768 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400769 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
770
771 if (bos & ~MPLS_BOS_MASK)
772 return -EINVAL;
773 key_val->mpls_bos = bos;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400774 key_mask->mpls_bos = MPLS_BOS_MASK;
775 }
776 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400777 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
778
779 if (tc & ~MPLS_TC_MASK)
780 return -EINVAL;
781 key_val->mpls_tc = tc;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400782 key_mask->mpls_tc = MPLS_TC_MASK;
783 }
784 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400785 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
786
787 if (label & ~MPLS_LABEL_MASK)
788 return -EINVAL;
789 key_val->mpls_label = label;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400790 key_mask->mpls_label = MPLS_LABEL_MASK;
791 }
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400792 return 0;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400793}
794
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300795static void fl_set_key_vlan(struct nlattr **tb,
Jianbo Liuaaab0832018-07-06 05:38:13 +0000796 __be16 ethertype,
Jianbo Liud64efd02018-07-06 05:38:16 +0000797 int vlan_id_key, int vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300798 struct flow_dissector_key_vlan *key_val,
799 struct flow_dissector_key_vlan *key_mask)
800{
801#define VLAN_PRIORITY_MASK 0x7
802
Jianbo Liud64efd02018-07-06 05:38:16 +0000803 if (tb[vlan_id_key]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300804 key_val->vlan_id =
Jianbo Liud64efd02018-07-06 05:38:16 +0000805 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300806 key_mask->vlan_id = VLAN_VID_MASK;
807 }
Jianbo Liud64efd02018-07-06 05:38:16 +0000808 if (tb[vlan_prio_key]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300809 key_val->vlan_priority =
Jianbo Liud64efd02018-07-06 05:38:16 +0000810 nla_get_u8(tb[vlan_prio_key]) &
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300811 VLAN_PRIORITY_MASK;
812 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
813 }
Jianbo Liuaaab0832018-07-06 05:38:13 +0000814 key_val->vlan_tpid = ethertype;
815 key_mask->vlan_tpid = cpu_to_be16(~0);
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300816}
817
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200818static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
819 u32 *dissector_key, u32 *dissector_mask,
820 u32 flower_flag_bit, u32 dissector_flag_bit)
821{
822 if (flower_mask & flower_flag_bit) {
823 *dissector_mask |= dissector_flag_bit;
824 if (flower_key & flower_flag_bit)
825 *dissector_key |= dissector_flag_bit;
826 }
827}
828
Or Gerlitzd9724772016-12-22 14:28:15 +0200829static int fl_set_key_flags(struct nlattr **tb,
830 u32 *flags_key, u32 *flags_mask)
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200831{
832 u32 key, mask;
833
Or Gerlitzd9724772016-12-22 14:28:15 +0200834 /* mask is mandatory for flags */
835 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
836 return -EINVAL;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200837
838 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
Or Gerlitzd9724772016-12-22 14:28:15 +0200839 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200840
841 *flags_key = 0;
842 *flags_mask = 0;
843
844 fl_set_key_flag(key, mask, flags_key, flags_mask,
845 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Pieter Jansen van Vuuren459d1532018-03-06 18:11:14 +0100846 fl_set_key_flag(key, mask, flags_key, flags_mask,
847 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
848 FLOW_DIS_FIRST_FRAG);
Or Gerlitzd9724772016-12-22 14:28:15 +0200849
850 return 0;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200851}
852
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300853static void fl_set_key_ip(struct nlattr **tb, bool encap,
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300854 struct flow_dissector_key_ip *key,
855 struct flow_dissector_key_ip *mask)
856{
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300857 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
858 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
859 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
860 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300861
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300862 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
863 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300864}
865
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200866static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
867 int depth, int option_len,
868 struct netlink_ext_ack *extack)
869{
870 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
871 struct nlattr *class = NULL, *type = NULL, *data = NULL;
872 struct geneve_opt *opt;
873 int err, data_len = 0;
874
875 if (option_len > sizeof(struct geneve_opt))
876 data_len = option_len - sizeof(struct geneve_opt);
877
878 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
879 memset(opt, 0xff, option_len);
880 opt->length = data_len / 4;
881 opt->r1 = 0;
882 opt->r2 = 0;
883 opt->r3 = 0;
884
885 /* If no mask has been prodived we assume an exact match. */
886 if (!depth)
887 return sizeof(struct geneve_opt) + data_len;
888
889 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
890 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
891 return -EINVAL;
892 }
893
Johannes Berg8cb08172019-04-26 14:07:28 +0200894 err = nla_parse_nested_deprecated(tb,
895 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
896 nla, geneve_opt_policy, extack);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200897 if (err < 0)
898 return err;
899
900 /* We are not allowed to omit any of CLASS, TYPE or DATA
901 * fields from the key.
902 */
903 if (!option_len &&
904 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
905 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
906 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
907 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
908 return -EINVAL;
909 }
910
911 /* Omitting any of CLASS, TYPE or DATA fields is allowed
912 * for the mask.
913 */
914 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
915 int new_len = key->enc_opts.len;
916
917 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
918 data_len = nla_len(data);
919 if (data_len < 4) {
920 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
921 return -ERANGE;
922 }
923 if (data_len % 4) {
924 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
925 return -ERANGE;
926 }
927
928 new_len += sizeof(struct geneve_opt) + data_len;
929 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
930 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
931 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
932 return -ERANGE;
933 }
934 opt->length = data_len / 4;
935 memcpy(opt->opt_data, nla_data(data), data_len);
936 }
937
938 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
939 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
940 opt->opt_class = nla_get_be16(class);
941 }
942
943 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
944 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
945 opt->type = nla_get_u8(type);
946 }
947
948 return sizeof(struct geneve_opt) + data_len;
949}
950
951static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
952 struct fl_flow_key *mask,
953 struct netlink_ext_ack *extack)
954{
955 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
Jakub Kicinski63c82992018-11-09 21:06:26 -0800956 int err, option_len, key_depth, msk_depth = 0;
957
Johannes Berg8cb08172019-04-26 14:07:28 +0200958 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
959 TCA_FLOWER_KEY_ENC_OPTS_MAX,
960 enc_opts_policy, extack);
Jakub Kicinski63c82992018-11-09 21:06:26 -0800961 if (err)
962 return err;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200963
964 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
965
966 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
Johannes Berg8cb08172019-04-26 14:07:28 +0200967 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
968 TCA_FLOWER_KEY_ENC_OPTS_MAX,
969 enc_opts_policy, extack);
Jakub Kicinski63c82992018-11-09 21:06:26 -0800970 if (err)
971 return err;
972
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200973 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
974 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
975 }
976
977 nla_for_each_attr(nla_opt_key, nla_enc_key,
978 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
979 switch (nla_type(nla_opt_key)) {
980 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
981 option_len = 0;
982 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
983 option_len = fl_set_geneve_opt(nla_opt_key, key,
984 key_depth, option_len,
985 extack);
986 if (option_len < 0)
987 return option_len;
988
989 key->enc_opts.len += option_len;
990 /* At the same time we need to parse through the mask
991 * in order to verify exact and mask attribute lengths.
992 */
993 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
994 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
995 msk_depth, option_len,
996 extack);
997 if (option_len < 0)
998 return option_len;
999
1000 mask->enc_opts.len += option_len;
1001 if (key->enc_opts.len != mask->enc_opts.len) {
1002 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1003 return -EINVAL;
1004 }
1005
1006 if (msk_depth)
1007 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1008 break;
1009 default:
1010 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1011 return -EINVAL;
1012 }
1013 }
1014
1015 return 0;
1016}
1017
Jiri Pirko77b99002015-05-12 14:56:21 +02001018static int fl_set_key(struct net *net, struct nlattr **tb,
Alexander Aring1057c552018-01-18 11:20:54 -05001019 struct fl_flow_key *key, struct fl_flow_key *mask,
1020 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001021{
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001022 __be16 ethertype;
Or Gerlitzd9724772016-12-22 14:28:15 +02001023 int ret = 0;
Jiri Pirkoa5148622019-06-15 11:03:49 +02001024
Jiri Pirko77b99002015-05-12 14:56:21 +02001025 if (tb[TCA_FLOWER_INDEV]) {
Alexander Aring1057c552018-01-18 11:20:54 -05001026 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001027 if (err < 0)
1028 return err;
Jiri Pirko8212ed72019-06-19 09:41:03 +03001029 key->meta.ingress_ifindex = err;
1030 mask->meta.ingress_ifindex = 0xffffffff;
Jiri Pirko77b99002015-05-12 14:56:21 +02001031 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001032
1033 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1034 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1035 sizeof(key->eth.dst));
1036 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1037 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1038 sizeof(key->eth.src));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001039
Arnd Bergmann0b498a52016-08-26 17:25:45 +02001040 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001041 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1042
Jianbo Liuaaab0832018-07-06 05:38:13 +00001043 if (eth_type_vlan(ethertype)) {
Jianbo Liud64efd02018-07-06 05:38:16 +00001044 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1045 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1046 &mask->vlan);
1047
Jianbo Liu5e9a0fe2018-07-09 02:26:20 +00001048 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1049 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1050 if (eth_type_vlan(ethertype)) {
1051 fl_set_key_vlan(tb, ethertype,
1052 TCA_FLOWER_KEY_CVLAN_ID,
1053 TCA_FLOWER_KEY_CVLAN_PRIO,
1054 &key->cvlan, &mask->cvlan);
1055 fl_set_key_val(tb, &key->basic.n_proto,
1056 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1057 &mask->basic.n_proto,
1058 TCA_FLOWER_UNSPEC,
1059 sizeof(key->basic.n_proto));
1060 } else {
1061 key->basic.n_proto = ethertype;
1062 mask->basic.n_proto = cpu_to_be16(~0);
1063 }
Jianbo Liud64efd02018-07-06 05:38:16 +00001064 }
Arnd Bergmann0b498a52016-08-26 17:25:45 +02001065 } else {
1066 key->basic.n_proto = ethertype;
1067 mask->basic.n_proto = cpu_to_be16(~0);
1068 }
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001069 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001070
Jiri Pirko77b99002015-05-12 14:56:21 +02001071 if (key->basic.n_proto == htons(ETH_P_IP) ||
1072 key->basic.n_proto == htons(ETH_P_IPV6)) {
1073 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1074 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1075 sizeof(key->basic.ip_proto));
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001076 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
Jiri Pirko77b99002015-05-12 14:56:21 +02001077 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001078
1079 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1080 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001081 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001082 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1083 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1084 sizeof(key->ipv4.src));
1085 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1086 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1087 sizeof(key->ipv4.dst));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001088 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1089 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001090 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001091 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1092 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1093 sizeof(key->ipv6.src));
1094 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1095 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1096 sizeof(key->ipv6.dst));
1097 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001098
Jiri Pirko77b99002015-05-12 14:56:21 +02001099 if (key->basic.ip_proto == IPPROTO_TCP) {
1100 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001101 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001102 sizeof(key->tp.src));
1103 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001104 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001105 sizeof(key->tp.dst));
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02001106 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1107 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1108 sizeof(key->tcp.flags));
Jiri Pirko77b99002015-05-12 14:56:21 +02001109 } else if (key->basic.ip_proto == IPPROTO_UDP) {
1110 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001111 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001112 sizeof(key->tp.src));
1113 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001114 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001115 sizeof(key->tp.dst));
Simon Horman5976c5f2016-11-03 13:24:21 +01001116 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
1117 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1118 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1119 sizeof(key->tp.src));
1120 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1121 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1122 sizeof(key->tp.dst));
Simon Horman7b684882016-12-07 13:48:28 +01001123 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
1124 key->basic.ip_proto == IPPROTO_ICMP) {
1125 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1126 &mask->icmp.type,
1127 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1128 sizeof(key->icmp.type));
1129 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1130 &mask->icmp.code,
1131 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1132 sizeof(key->icmp.code));
1133 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1134 key->basic.ip_proto == IPPROTO_ICMPV6) {
1135 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1136 &mask->icmp.type,
1137 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1138 sizeof(key->icmp.type));
Simon Horman040587a2017-01-30 16:19:02 +01001139 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
Simon Horman7b684882016-12-07 13:48:28 +01001140 &mask->icmp.code,
Simon Horman040587a2017-01-30 16:19:02 +01001141 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
Simon Horman7b684882016-12-07 13:48:28 +01001142 sizeof(key->icmp.code));
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001143 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1144 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -04001145 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
1146 if (ret)
1147 return ret;
Simon Horman99d31322017-01-11 14:05:43 +01001148 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1149 key->basic.n_proto == htons(ETH_P_RARP)) {
1150 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1151 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1152 sizeof(key->arp.sip));
1153 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1154 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1155 sizeof(key->arp.tip));
1156 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1157 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1158 sizeof(key->arp.op));
1159 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1160 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1161 sizeof(key->arp.sha));
1162 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1163 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1164 sizeof(key->arp.tha));
Jiri Pirko77b99002015-05-12 14:56:21 +02001165 }
1166
Amritha Nambiar5c722992018-11-12 16:15:55 -08001167 if (key->basic.ip_proto == IPPROTO_TCP ||
1168 key->basic.ip_proto == IPPROTO_UDP ||
1169 key->basic.ip_proto == IPPROTO_SCTP) {
1170 ret = fl_set_key_port_range(tb, key, mask);
1171 if (ret)
1172 return ret;
1173 }
1174
Amir Vadaibc3103f2016-09-08 16:23:47 +03001175 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1176 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1177 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001178 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +03001179 fl_set_key_val(tb, &key->enc_ipv4.src,
1180 TCA_FLOWER_KEY_ENC_IPV4_SRC,
1181 &mask->enc_ipv4.src,
1182 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1183 sizeof(key->enc_ipv4.src));
1184 fl_set_key_val(tb, &key->enc_ipv4.dst,
1185 TCA_FLOWER_KEY_ENC_IPV4_DST,
1186 &mask->enc_ipv4.dst,
1187 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1188 sizeof(key->enc_ipv4.dst));
1189 }
1190
1191 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1192 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1193 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001194 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +03001195 fl_set_key_val(tb, &key->enc_ipv6.src,
1196 TCA_FLOWER_KEY_ENC_IPV6_SRC,
1197 &mask->enc_ipv6.src,
1198 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1199 sizeof(key->enc_ipv6.src));
1200 fl_set_key_val(tb, &key->enc_ipv6.dst,
1201 TCA_FLOWER_KEY_ENC_IPV6_DST,
1202 &mask->enc_ipv6.dst,
1203 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1204 sizeof(key->enc_ipv6.dst));
1205 }
1206
1207 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +03001208 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
Amir Vadaibc3103f2016-09-08 16:23:47 +03001209 sizeof(key->enc_key_id.keyid));
1210
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001211 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1212 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1213 sizeof(key->enc_tp.src));
1214
1215 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1216 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1217 sizeof(key->enc_tp.dst));
1218
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001219 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1220
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001221 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1222 ret = fl_set_enc_opt(tb, key, mask, extack);
1223 if (ret)
1224 return ret;
1225 }
1226
Or Gerlitzd9724772016-12-22 14:28:15 +02001227 if (tb[TCA_FLOWER_KEY_FLAGS])
1228 ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001229
Or Gerlitzd9724772016-12-22 14:28:15 +02001230 return ret;
Jiri Pirko77b99002015-05-12 14:56:21 +02001231}
1232
Paul Blakey05cd2712018-04-30 14:28:30 +03001233static void fl_mask_copy(struct fl_flow_mask *dst,
1234 struct fl_flow_mask *src)
Jiri Pirko77b99002015-05-12 14:56:21 +02001235{
Paul Blakey05cd2712018-04-30 14:28:30 +03001236 const void *psrc = fl_key_get_start(&src->key, src);
1237 void *pdst = fl_key_get_start(&dst->key, src);
Jiri Pirko77b99002015-05-12 14:56:21 +02001238
Paul Blakey05cd2712018-04-30 14:28:30 +03001239 memcpy(pdst, psrc, fl_mask_range(src));
1240 dst->range = src->range;
Jiri Pirko77b99002015-05-12 14:56:21 +02001241}
1242
1243static const struct rhashtable_params fl_ht_params = {
1244 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1245 .head_offset = offsetof(struct cls_fl_filter, ht_node),
1246 .automatic_shrinking = true,
1247};
1248
Paul Blakey05cd2712018-04-30 14:28:30 +03001249static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02001250{
Paul Blakey05cd2712018-04-30 14:28:30 +03001251 mask->filter_ht_params = fl_ht_params;
1252 mask->filter_ht_params.key_len = fl_mask_range(mask);
1253 mask->filter_ht_params.key_offset += mask->range.start;
Jiri Pirko77b99002015-05-12 14:56:21 +02001254
Paul Blakey05cd2712018-04-30 14:28:30 +03001255 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
Jiri Pirko77b99002015-05-12 14:56:21 +02001256}
1257
1258#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
zhong jiangcb205a82018-09-19 19:32:11 +08001259#define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
Jiri Pirko77b99002015-05-12 14:56:21 +02001260
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001261#define FL_KEY_IS_MASKED(mask, member) \
1262 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1263 0, FL_KEY_MEMBER_SIZE(member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001264
1265#define FL_KEY_SET(keys, cnt, id, member) \
1266 do { \
1267 keys[cnt].key_id = id; \
1268 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1269 cnt++; \
1270 } while(0);
1271
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001272#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001273 do { \
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001274 if (FL_KEY_IS_MASKED(mask, member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001275 FL_KEY_SET(keys, cnt, id, member); \
1276 } while(0);
1277
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001278static void fl_init_dissector(struct flow_dissector *dissector,
1279 struct fl_flow_key *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02001280{
1281 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1282 size_t cnt = 0;
1283
Jiri Pirko8212ed72019-06-19 09:41:03 +03001284 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1285 FLOW_DISSECTOR_KEY_META, meta);
Tom Herbert42aecaa2015-06-04 09:16:39 -07001286 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
Jiri Pirko77b99002015-05-12 14:56:21 +02001287 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001288 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001289 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001290 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001291 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001292 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001293 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
Amritha Nambiar5c722992018-11-12 16:15:55 -08001294 if (FL_KEY_IS_MASKED(mask, tp) ||
1295 FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max))
1296 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001297 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001298 FLOW_DISSECTOR_KEY_IP, ip);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001299 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02001300 FLOW_DISSECTOR_KEY_TCP, tcp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001301 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Simon Horman7b684882016-12-07 13:48:28 +01001302 FLOW_DISSECTOR_KEY_ICMP, icmp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001303 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Simon Horman99d31322017-01-11 14:05:43 +01001304 FLOW_DISSECTOR_KEY_ARP, arp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001305 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001306 FLOW_DISSECTOR_KEY_MPLS, mpls);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001307 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001308 FLOW_DISSECTOR_KEY_VLAN, vlan);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001309 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Jianbo Liud64efd02018-07-06 05:38:16 +00001310 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001311 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001312 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001313 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001314 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001315 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001316 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001317 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1318 FL_KEY_IS_MASKED(mask, enc_ipv6))
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001319 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1320 enc_control);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001321 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001322 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001323 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001324 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001325 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1326 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
Jiri Pirko77b99002015-05-12 14:56:21 +02001327
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001328 skb_flow_dissector_init(dissector, keys, cnt);
Paul Blakey05cd2712018-04-30 14:28:30 +03001329}
1330
1331static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1332 struct fl_flow_mask *mask)
1333{
1334 struct fl_flow_mask *newmask;
1335 int err;
1336
1337 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1338 if (!newmask)
1339 return ERR_PTR(-ENOMEM);
1340
1341 fl_mask_copy(newmask, mask);
1342
Amritha Nambiar5c722992018-11-12 16:15:55 -08001343 if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) ||
1344 (newmask->key.tp_min.src && newmask->key.tp_max.src))
1345 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1346
Paul Blakey05cd2712018-04-30 14:28:30 +03001347 err = fl_init_mask_hashtable(newmask);
1348 if (err)
1349 goto errout_free;
1350
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001351 fl_init_dissector(&newmask->dissector, &newmask->key);
Paul Blakey05cd2712018-04-30 14:28:30 +03001352
1353 INIT_LIST_HEAD_RCU(&newmask->filters);
1354
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001355 refcount_set(&newmask->refcnt, 1);
Vlad Buslov195c2342019-03-21 15:17:38 +02001356 err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1357 &newmask->ht_node, mask_ht_params);
Paul Blakey05cd2712018-04-30 14:28:30 +03001358 if (err)
1359 goto errout_destroy;
1360
Vlad Buslov259e60f2019-03-21 15:17:39 +02001361 spin_lock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +03001362 list_add_tail_rcu(&newmask->list, &head->masks);
Vlad Buslov259e60f2019-03-21 15:17:39 +02001363 spin_unlock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +03001364
1365 return newmask;
1366
1367errout_destroy:
1368 rhashtable_destroy(&newmask->ht);
1369errout_free:
1370 kfree(newmask);
1371
1372 return ERR_PTR(err);
Jiri Pirko77b99002015-05-12 14:56:21 +02001373}
1374
1375static int fl_check_assign_mask(struct cls_fl_head *head,
Paul Blakey05cd2712018-04-30 14:28:30 +03001376 struct cls_fl_filter *fnew,
1377 struct cls_fl_filter *fold,
Jiri Pirko77b99002015-05-12 14:56:21 +02001378 struct fl_flow_mask *mask)
1379{
Paul Blakey05cd2712018-04-30 14:28:30 +03001380 struct fl_flow_mask *newmask;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001381 int ret = 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001382
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001383 rcu_read_lock();
Vlad Buslov195c2342019-03-21 15:17:38 +02001384
1385 /* Insert mask as temporary node to prevent concurrent creation of mask
1386 * with same key. Any concurrent lookups with same key will return
Vlad Buslov99815f52019-06-13 17:54:04 +03001387 * -EAGAIN because mask's refcnt is zero.
Vlad Buslov195c2342019-03-21 15:17:38 +02001388 */
1389 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1390 &mask->ht_node,
1391 mask_ht_params);
Paul Blakey05cd2712018-04-30 14:28:30 +03001392 if (!fnew->mask) {
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001393 rcu_read_unlock();
1394
Vlad Buslov195c2342019-03-21 15:17:38 +02001395 if (fold) {
1396 ret = -EINVAL;
1397 goto errout_cleanup;
1398 }
Paul Blakey05cd2712018-04-30 14:28:30 +03001399
1400 newmask = fl_create_new_mask(head, mask);
Vlad Buslov195c2342019-03-21 15:17:38 +02001401 if (IS_ERR(newmask)) {
1402 ret = PTR_ERR(newmask);
1403 goto errout_cleanup;
1404 }
Paul Blakey05cd2712018-04-30 14:28:30 +03001405
1406 fnew->mask = newmask;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001407 return 0;
Vlad Buslov195c2342019-03-21 15:17:38 +02001408 } else if (IS_ERR(fnew->mask)) {
1409 ret = PTR_ERR(fnew->mask);
Paul Blakeyf6521c52018-06-03 10:06:14 +03001410 } else if (fold && fold->mask != fnew->mask) {
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001411 ret = -EINVAL;
1412 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1413 /* Mask was deleted concurrently, try again */
1414 ret = -EAGAIN;
Jiri Pirko77b99002015-05-12 14:56:21 +02001415 }
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001416 rcu_read_unlock();
1417 return ret;
Vlad Buslov195c2342019-03-21 15:17:38 +02001418
1419errout_cleanup:
1420 rhashtable_remove_fast(&head->ht, &mask->ht_node,
1421 mask_ht_params);
Vlad Buslov195c2342019-03-21 15:17:38 +02001422 return ret;
Jiri Pirko77b99002015-05-12 14:56:21 +02001423}
1424
1425static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1426 struct cls_fl_filter *f, struct fl_flow_mask *mask,
1427 unsigned long base, struct nlattr **tb,
Alexander Aring50a56192018-01-18 11:20:52 -05001428 struct nlattr *est, bool ovr,
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001429 struct fl_flow_tmplt *tmplt, bool rtnl_held,
Alexander Aring50a56192018-01-18 11:20:52 -05001430 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001431{
Jiri Pirko77b99002015-05-12 14:56:21 +02001432 int err;
1433
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001434 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
Vlad Buslovec6743a2019-02-11 10:55:43 +02001435 extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001436 if (err < 0)
1437 return err;
1438
1439 if (tb[TCA_FLOWER_CLASSID]) {
1440 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001441 if (!rtnl_held)
1442 rtnl_lock();
Jiri Pirko77b99002015-05-12 14:56:21 +02001443 tcf_bind_filter(tp, &f->res, base);
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001444 if (!rtnl_held)
1445 rtnl_unlock();
Jiri Pirko77b99002015-05-12 14:56:21 +02001446 }
1447
Alexander Aring1057c552018-01-18 11:20:54 -05001448 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001449 if (err)
Jiri Pirko45507522017-08-04 14:29:06 +02001450 return err;
Jiri Pirko77b99002015-05-12 14:56:21 +02001451
1452 fl_mask_update_range(mask);
1453 fl_set_masked_key(&f->mkey, &f->key, mask);
1454
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001455 if (!fl_mask_fits_tmplt(tmplt, mask)) {
1456 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1457 return -EINVAL;
1458 }
1459
Jiri Pirko77b99002015-05-12 14:56:21 +02001460 return 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001461}
1462
Vlad Buslov1f17f772019-04-05 20:56:26 +03001463static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1464 struct cls_fl_filter *fold,
1465 bool *in_ht)
1466{
1467 struct fl_flow_mask *mask = fnew->mask;
1468 int err;
1469
Vlad Buslov9e355522019-04-11 19:12:20 +03001470 err = rhashtable_lookup_insert_fast(&mask->ht,
1471 &fnew->ht_node,
1472 mask->filter_ht_params);
Vlad Buslov1f17f772019-04-05 20:56:26 +03001473 if (err) {
1474 *in_ht = false;
1475 /* It is okay if filter with same key exists when
1476 * overwriting.
1477 */
1478 return fold && err == -EEXIST ? 0 : err;
1479 }
1480
1481 *in_ht = true;
1482 return 0;
1483}
1484
Jiri Pirko77b99002015-05-12 14:56:21 +02001485static int fl_change(struct net *net, struct sk_buff *in_skb,
1486 struct tcf_proto *tp, unsigned long base,
1487 u32 handle, struct nlattr **tca,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001488 void **arg, bool ovr, bool rtnl_held,
1489 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001490{
Vlad Buslove4746192019-03-21 15:17:33 +02001491 struct cls_fl_head *head = fl_head_dereference(tp);
WANG Cong8113c092017-08-04 21:31:43 -07001492 struct cls_fl_filter *fold = *arg;
Jiri Pirko77b99002015-05-12 14:56:21 +02001493 struct cls_fl_filter *fnew;
Ivan Vecera2cddd202019-01-16 16:53:52 +01001494 struct fl_flow_mask *mask;
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001495 struct nlattr **tb;
Vlad Buslov1f17f772019-04-05 20:56:26 +03001496 bool in_ht;
Jiri Pirko77b99002015-05-12 14:56:21 +02001497 int err;
1498
Vlad Buslov06177552019-03-21 15:17:35 +02001499 if (!tca[TCA_OPTIONS]) {
1500 err = -EINVAL;
1501 goto errout_fold;
1502 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001503
Ivan Vecera2cddd202019-01-16 16:53:52 +01001504 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
Vlad Buslov06177552019-03-21 15:17:35 +02001505 if (!mask) {
1506 err = -ENOBUFS;
1507 goto errout_fold;
1508 }
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001509
Ivan Vecera2cddd202019-01-16 16:53:52 +01001510 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1511 if (!tb) {
1512 err = -ENOBUFS;
1513 goto errout_mask_alloc;
1514 }
1515
Johannes Berg8cb08172019-04-26 14:07:28 +02001516 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1517 tca[TCA_OPTIONS], fl_policy, NULL);
Jiri Pirko77b99002015-05-12 14:56:21 +02001518 if (err < 0)
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001519 goto errout_tb;
Jiri Pirko77b99002015-05-12 14:56:21 +02001520
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001521 if (fold && handle && fold->handle != handle) {
1522 err = -EINVAL;
1523 goto errout_tb;
1524 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001525
1526 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001527 if (!fnew) {
1528 err = -ENOBUFS;
1529 goto errout_tb;
1530 }
Vlad Buslovc049d562019-04-24 09:53:31 +03001531 INIT_LIST_HEAD(&fnew->hw_list);
Vlad Buslov06177552019-03-21 15:17:35 +02001532 refcount_set(&fnew->refcnt, 1);
Jiri Pirko77b99002015-05-12 14:56:21 +02001533
Cong Wang14215102019-02-20 21:37:42 -08001534 err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
WANG Congb9a24bb2016-08-19 12:36:54 -07001535 if (err < 0)
1536 goto errout;
Jiri Pirko77b99002015-05-12 14:56:21 +02001537
Vlad Buslovecb3dea2019-03-06 16:22:12 +02001538 if (tb[TCA_FLOWER_FLAGS]) {
1539 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1540
1541 if (!tc_flags_valid(fnew->flags)) {
1542 err = -EINVAL;
1543 goto errout;
1544 }
1545 }
1546
1547 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001548 tp->chain->tmplt_priv, rtnl_held, extack);
Vlad Buslovecb3dea2019-03-06 16:22:12 +02001549 if (err)
1550 goto errout;
1551
1552 err = fl_check_assign_mask(head, fnew, fold, mask);
1553 if (err)
1554 goto errout;
1555
Vlad Buslov1f17f772019-04-05 20:56:26 +03001556 err = fl_ht_insert_unique(fnew, fold, &in_ht);
1557 if (err)
1558 goto errout_mask;
1559
Hadar Hen Zion79685212016-12-01 14:06:34 +02001560 if (!tc_skip_hw(fnew->flags)) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001561 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
Hadar Hen Zion79685212016-12-01 14:06:34 +02001562 if (err)
Vlad Buslov1f17f772019-04-05 20:56:26 +03001563 goto errout_ht;
Hadar Hen Zion79685212016-12-01 14:06:34 +02001564 }
Amir Vadai5b33f482016-03-08 12:42:29 +02001565
Or Gerlitz55593962017-02-16 10:31:13 +02001566 if (!tc_in_hw(fnew->flags))
1567 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1568
Vlad Buslov3d81e712019-03-21 15:17:42 +02001569 spin_lock(&tp->lock);
1570
Vlad Buslov272ffaa2019-03-21 15:17:41 +02001571 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1572 * proto again or create new one, if necessary.
1573 */
1574 if (tp->deleting) {
1575 err = -EAGAIN;
1576 goto errout_hw;
1577 }
1578
Amir Vadai5b33f482016-03-08 12:42:29 +02001579 if (fold) {
Vlad Buslovb2552b82019-03-21 15:17:36 +02001580 /* Fold filter was deleted concurrently. Retry lookup. */
1581 if (fold->deleted) {
1582 err = -EAGAIN;
1583 goto errout_hw;
1584 }
1585
Vlad Buslov620da482019-03-21 15:17:34 +02001586 fnew->handle = handle;
1587
Vlad Buslov1f17f772019-04-05 20:56:26 +03001588 if (!in_ht) {
1589 struct rhashtable_params params =
1590 fnew->mask->filter_ht_params;
1591
1592 err = rhashtable_insert_fast(&fnew->mask->ht,
1593 &fnew->ht_node,
1594 params);
1595 if (err)
1596 goto errout_hw;
1597 in_ht = true;
1598 }
Vlad Buslov620da482019-03-21 15:17:34 +02001599
Vlad Buslovc049d562019-04-24 09:53:31 +03001600 refcount_inc(&fnew->refcnt);
Roi Dayan599d2572018-12-19 18:07:56 +02001601 rhashtable_remove_fast(&fold->mask->ht,
1602 &fold->ht_node,
1603 fold->mask->filter_ht_params);
Matthew Wilcox234a4622017-11-28 09:56:36 -05001604 idr_replace(&head->handle_idr, fnew, fnew->handle);
Daniel Borkmannff3532f2015-07-17 22:38:44 +02001605 list_replace_rcu(&fold->list, &fnew->list);
Vlad Buslovb2552b82019-03-21 15:17:36 +02001606 fold->deleted = true;
Vlad Buslov620da482019-03-21 15:17:34 +02001607
Vlad Buslov3d81e712019-03-21 15:17:42 +02001608 spin_unlock(&tp->lock);
1609
Vlad Buslov99946772019-04-12 00:54:19 +03001610 fl_mask_put(head, fold->mask);
Vlad Buslov620da482019-03-21 15:17:34 +02001611 if (!tc_skip_hw(fold->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001612 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
Jiri Pirko77b99002015-05-12 14:56:21 +02001613 tcf_unbind_filter(tp, &fold->res);
Vlad Buslov06177552019-03-21 15:17:35 +02001614 /* Caller holds reference to fold, so refcnt is always > 0
1615 * after this.
1616 */
1617 refcount_dec(&fold->refcnt);
1618 __fl_put(fold);
Jiri Pirko77b99002015-05-12 14:56:21 +02001619 } else {
Vlad Buslov620da482019-03-21 15:17:34 +02001620 if (handle) {
1621 /* user specifies a handle and it doesn't exist */
1622 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1623 handle, GFP_ATOMIC);
Vlad Buslov9a2d9382019-03-21 15:17:40 +02001624
1625 /* Filter with specified handle was concurrently
1626 * inserted after initial check in cls_api. This is not
1627 * necessarily an error if NLM_F_EXCL is not set in
1628 * message flags. Returning EAGAIN will cause cls_api to
1629 * try to update concurrently inserted rule.
1630 */
1631 if (err == -ENOSPC)
1632 err = -EAGAIN;
Vlad Buslov620da482019-03-21 15:17:34 +02001633 } else {
1634 handle = 1;
1635 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1636 INT_MAX, GFP_ATOMIC);
1637 }
1638 if (err)
1639 goto errout_hw;
1640
Vlad Buslovc049d562019-04-24 09:53:31 +03001641 refcount_inc(&fnew->refcnt);
Vlad Buslov620da482019-03-21 15:17:34 +02001642 fnew->handle = handle;
Paul Blakey05cd2712018-04-30 14:28:30 +03001643 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
Vlad Buslov3d81e712019-03-21 15:17:42 +02001644 spin_unlock(&tp->lock);
Jiri Pirko77b99002015-05-12 14:56:21 +02001645 }
1646
Vlad Buslov620da482019-03-21 15:17:34 +02001647 *arg = fnew;
1648
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001649 kfree(tb);
Vlad Buslov99815f52019-06-13 17:54:04 +03001650 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
Jiri Pirko77b99002015-05-12 14:56:21 +02001651 return 0;
1652
Vlad Buslovc049d562019-04-24 09:53:31 +03001653errout_ht:
1654 spin_lock(&tp->lock);
Vlad Buslov620da482019-03-21 15:17:34 +02001655errout_hw:
Vlad Buslovc049d562019-04-24 09:53:31 +03001656 fnew->deleted = true;
Vlad Buslov3d81e712019-03-21 15:17:42 +02001657 spin_unlock(&tp->lock);
Vlad Buslov620da482019-03-21 15:17:34 +02001658 if (!tc_skip_hw(fnew->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001659 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
Vlad Buslov1f17f772019-04-05 20:56:26 +03001660 if (in_ht)
1661 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1662 fnew->mask->filter_ht_params);
Vlad Buslovecb3dea2019-03-06 16:22:12 +02001663errout_mask:
Vlad Buslov99946772019-04-12 00:54:19 +03001664 fl_mask_put(head, fnew->mask);
Jiri Pirko77b99002015-05-12 14:56:21 +02001665errout:
Vlad Buslovc049d562019-04-24 09:53:31 +03001666 __fl_put(fnew);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001667errout_tb:
1668 kfree(tb);
Ivan Vecera2cddd202019-01-16 16:53:52 +01001669errout_mask_alloc:
Vlad Buslov99815f52019-06-13 17:54:04 +03001670 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
Vlad Buslov06177552019-03-21 15:17:35 +02001671errout_fold:
1672 if (fold)
1673 __fl_put(fold);
Jiri Pirko77b99002015-05-12 14:56:21 +02001674 return err;
1675}
1676
Alexander Aring571acf22018-01-18 11:20:53 -05001677static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001678 bool rtnl_held, struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001679{
Vlad Buslove4746192019-03-21 15:17:33 +02001680 struct cls_fl_head *head = fl_head_dereference(tp);
WANG Cong8113c092017-08-04 21:31:43 -07001681 struct cls_fl_filter *f = arg;
Vlad Buslovb2552b82019-03-21 15:17:36 +02001682 bool last_on_mask;
1683 int err = 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001684
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001685 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
Paul Blakey05cd2712018-04-30 14:28:30 +03001686 *last = list_empty(&head->masks);
Vlad Buslov06177552019-03-21 15:17:35 +02001687 __fl_put(f);
1688
Vlad Buslovb2552b82019-03-21 15:17:36 +02001689 return err;
Jiri Pirko77b99002015-05-12 14:56:21 +02001690}
1691
Vlad Buslov12db03b2019-02-11 10:55:45 +02001692static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1693 bool rtnl_held)
Jiri Pirko77b99002015-05-12 14:56:21 +02001694{
Jiri Pirko77b99002015-05-12 14:56:21 +02001695 struct cls_fl_filter *f;
1696
Vlad Buslov01683a12018-07-09 13:29:11 +03001697 arg->count = arg->skip;
1698
Vlad Buslov06177552019-03-21 15:17:35 +02001699 while ((f = fl_get_next_filter(tp, &arg->cookie)) != NULL) {
Vlad Buslov01683a12018-07-09 13:29:11 +03001700 if (arg->fn(tp, f, arg) < 0) {
Vlad Buslov06177552019-03-21 15:17:35 +02001701 __fl_put(f);
Vlad Buslov01683a12018-07-09 13:29:11 +03001702 arg->stop = 1;
1703 break;
Paul Blakey05cd2712018-04-30 14:28:30 +03001704 }
Vlad Buslov06177552019-03-21 15:17:35 +02001705 __fl_put(f);
1706 arg->cookie++;
Vlad Buslov01683a12018-07-09 13:29:11 +03001707 arg->count++;
Jiri Pirko77b99002015-05-12 14:56:21 +02001708 }
1709}
1710
Vlad Buslovc049d562019-04-24 09:53:31 +03001711static struct cls_fl_filter *
1712fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
1713{
1714 struct cls_fl_head *head = fl_head_dereference(tp);
1715
1716 spin_lock(&tp->lock);
1717 if (list_empty(&head->hw_filters)) {
1718 spin_unlock(&tp->lock);
1719 return NULL;
1720 }
1721
1722 if (!f)
1723 f = list_entry(&head->hw_filters, struct cls_fl_filter,
1724 hw_list);
1725 list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
1726 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
1727 spin_unlock(&tp->lock);
1728 return f;
1729 }
1730 }
1731
1732 spin_unlock(&tp->lock);
1733 return NULL;
1734}
1735
John Hurley31533cb2018-06-25 14:30:06 -07001736static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
1737 void *cb_priv, struct netlink_ext_ack *extack)
1738{
John Hurley31533cb2018-06-25 14:30:06 -07001739 struct tc_cls_flower_offload cls_flower = {};
1740 struct tcf_block *block = tp->chain->block;
Vlad Buslovc049d562019-04-24 09:53:31 +03001741 struct cls_fl_filter *f = NULL;
John Hurley31533cb2018-06-25 14:30:06 -07001742 int err;
1743
Vlad Buslovc049d562019-04-24 09:53:31 +03001744 /* hw_filters list can only be changed by hw offload functions after
1745 * obtaining rtnl lock. Make sure it is not changed while reoffload is
1746 * iterating it.
1747 */
1748 ASSERT_RTNL();
John Hurley31533cb2018-06-25 14:30:06 -07001749
Vlad Buslovc049d562019-04-24 09:53:31 +03001750 while ((f = fl_get_next_hw_filter(tp, f, add))) {
John Hurley95e27a42019-04-02 23:53:20 +01001751 cls_flower.rule =
1752 flow_rule_alloc(tcf_exts_num_actions(&f->exts));
1753 if (!cls_flower.rule) {
1754 __fl_put(f);
1755 return -ENOMEM;
John Hurley31533cb2018-06-25 14:30:06 -07001756 }
John Hurley95e27a42019-04-02 23:53:20 +01001757
1758 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -07001759 extack);
John Hurley95e27a42019-04-02 23:53:20 +01001760 cls_flower.command = add ?
1761 TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
1762 cls_flower.cookie = (unsigned long)f;
1763 cls_flower.rule->match.dissector = &f->mask->dissector;
1764 cls_flower.rule->match.mask = &f->mask->key;
1765 cls_flower.rule->match.key = &f->mkey;
1766
1767 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
1768 if (err) {
1769 kfree(cls_flower.rule);
1770 if (tc_skip_sw(f->flags)) {
1771 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
1772 __fl_put(f);
1773 return err;
1774 }
1775 goto next_flow;
1776 }
1777
1778 cls_flower.classid = f->res.classid;
1779
1780 err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
1781 kfree(cls_flower.rule);
1782
1783 if (err) {
1784 if (add && tc_skip_sw(f->flags)) {
1785 __fl_put(f);
1786 return err;
1787 }
1788 goto next_flow;
1789 }
1790
1791 spin_lock(&tp->lock);
1792 tc_cls_offload_cnt_update(block, &f->in_hw_count, &f->flags,
1793 add);
1794 spin_unlock(&tp->lock);
1795next_flow:
John Hurley95e27a42019-04-02 23:53:20 +01001796 __fl_put(f);
John Hurley31533cb2018-06-25 14:30:06 -07001797 }
1798
1799 return 0;
1800}
1801
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001802static int fl_hw_create_tmplt(struct tcf_chain *chain,
1803 struct fl_flow_tmplt *tmplt)
Jiri Pirko34738452018-07-23 09:23:11 +02001804{
1805 struct tc_cls_flower_offload cls_flower = {};
1806 struct tcf_block *block = chain->block;
Jiri Pirko34738452018-07-23 09:23:11 +02001807
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +01001808 cls_flower.rule = flow_rule_alloc(0);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001809 if (!cls_flower.rule)
1810 return -ENOMEM;
1811
Jiri Pirko34738452018-07-23 09:23:11 +02001812 cls_flower.common.chain_index = chain->index;
1813 cls_flower.command = TC_CLSFLOWER_TMPLT_CREATE;
1814 cls_flower.cookie = (unsigned long) tmplt;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001815 cls_flower.rule->match.dissector = &tmplt->dissector;
1816 cls_flower.rule->match.mask = &tmplt->mask;
1817 cls_flower.rule->match.key = &tmplt->dummy_key;
Jiri Pirko34738452018-07-23 09:23:11 +02001818
1819 /* We don't care if driver (any of them) fails to handle this
1820 * call. It serves just as a hint for it.
1821 */
Cong Wangaeb3fec2018-12-11 11:15:46 -08001822 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001823 kfree(cls_flower.rule);
1824
1825 return 0;
Jiri Pirko34738452018-07-23 09:23:11 +02001826}
1827
1828static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
1829 struct fl_flow_tmplt *tmplt)
1830{
1831 struct tc_cls_flower_offload cls_flower = {};
1832 struct tcf_block *block = chain->block;
1833
1834 cls_flower.common.chain_index = chain->index;
1835 cls_flower.command = TC_CLSFLOWER_TMPLT_DESTROY;
1836 cls_flower.cookie = (unsigned long) tmplt;
1837
Cong Wangaeb3fec2018-12-11 11:15:46 -08001838 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
Jiri Pirko34738452018-07-23 09:23:11 +02001839}
1840
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001841static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
1842 struct nlattr **tca,
1843 struct netlink_ext_ack *extack)
1844{
1845 struct fl_flow_tmplt *tmplt;
1846 struct nlattr **tb;
1847 int err;
1848
1849 if (!tca[TCA_OPTIONS])
1850 return ERR_PTR(-EINVAL);
1851
1852 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1853 if (!tb)
1854 return ERR_PTR(-ENOBUFS);
Johannes Berg8cb08172019-04-26 14:07:28 +02001855 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1856 tca[TCA_OPTIONS], fl_policy, NULL);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001857 if (err)
1858 goto errout_tb;
1859
1860 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
Dan Carpenter1cbc36a52018-08-03 22:27:55 +03001861 if (!tmplt) {
1862 err = -ENOMEM;
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001863 goto errout_tb;
Dan Carpenter1cbc36a52018-08-03 22:27:55 +03001864 }
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001865 tmplt->chain = chain;
1866 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
1867 if (err)
1868 goto errout_tmplt;
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001869
1870 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
1871
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001872 err = fl_hw_create_tmplt(chain, tmplt);
1873 if (err)
1874 goto errout_tmplt;
Jiri Pirko34738452018-07-23 09:23:11 +02001875
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001876 kfree(tb);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001877 return tmplt;
1878
1879errout_tmplt:
1880 kfree(tmplt);
1881errout_tb:
1882 kfree(tb);
1883 return ERR_PTR(err);
1884}
1885
1886static void fl_tmplt_destroy(void *tmplt_priv)
1887{
1888 struct fl_flow_tmplt *tmplt = tmplt_priv;
1889
Cong Wang95278dd2018-10-02 12:50:19 -07001890 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
1891 kfree(tmplt);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001892}
1893
Jiri Pirko77b99002015-05-12 14:56:21 +02001894static int fl_dump_key_val(struct sk_buff *skb,
1895 void *val, int val_type,
1896 void *mask, int mask_type, int len)
1897{
1898 int err;
1899
1900 if (!memchr_inv(mask, 0, len))
1901 return 0;
1902 err = nla_put(skb, val_type, len, val);
1903 if (err)
1904 return err;
1905 if (mask_type != TCA_FLOWER_UNSPEC) {
1906 err = nla_put(skb, mask_type, len, mask);
1907 if (err)
1908 return err;
1909 }
1910 return 0;
1911}
1912
Amritha Nambiar5c722992018-11-12 16:15:55 -08001913static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
1914 struct fl_flow_key *mask)
1915{
1916 if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN,
1917 &mask->tp_min.dst, TCA_FLOWER_UNSPEC,
1918 sizeof(key->tp_min.dst)) ||
1919 fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX,
1920 &mask->tp_max.dst, TCA_FLOWER_UNSPEC,
1921 sizeof(key->tp_max.dst)) ||
1922 fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN,
1923 &mask->tp_min.src, TCA_FLOWER_UNSPEC,
1924 sizeof(key->tp_min.src)) ||
1925 fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX,
1926 &mask->tp_max.src, TCA_FLOWER_UNSPEC,
1927 sizeof(key->tp_max.src)))
1928 return -1;
1929
1930 return 0;
1931}
1932
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001933static int fl_dump_key_mpls(struct sk_buff *skb,
1934 struct flow_dissector_key_mpls *mpls_key,
1935 struct flow_dissector_key_mpls *mpls_mask)
1936{
1937 int err;
1938
1939 if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
1940 return 0;
1941 if (mpls_mask->mpls_ttl) {
1942 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
1943 mpls_key->mpls_ttl);
1944 if (err)
1945 return err;
1946 }
1947 if (mpls_mask->mpls_tc) {
1948 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
1949 mpls_key->mpls_tc);
1950 if (err)
1951 return err;
1952 }
1953 if (mpls_mask->mpls_label) {
1954 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
1955 mpls_key->mpls_label);
1956 if (err)
1957 return err;
1958 }
1959 if (mpls_mask->mpls_bos) {
1960 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
1961 mpls_key->mpls_bos);
1962 if (err)
1963 return err;
1964 }
1965 return 0;
1966}
1967
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001968static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001969 struct flow_dissector_key_ip *key,
1970 struct flow_dissector_key_ip *mask)
1971{
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001972 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1973 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1974 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1975 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1976
1977 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
1978 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001979 return -1;
1980
1981 return 0;
1982}
1983
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001984static int fl_dump_key_vlan(struct sk_buff *skb,
Jianbo Liud64efd02018-07-06 05:38:16 +00001985 int vlan_id_key, int vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001986 struct flow_dissector_key_vlan *vlan_key,
1987 struct flow_dissector_key_vlan *vlan_mask)
1988{
1989 int err;
1990
1991 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
1992 return 0;
1993 if (vlan_mask->vlan_id) {
Jianbo Liud64efd02018-07-06 05:38:16 +00001994 err = nla_put_u16(skb, vlan_id_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001995 vlan_key->vlan_id);
1996 if (err)
1997 return err;
1998 }
1999 if (vlan_mask->vlan_priority) {
Jianbo Liud64efd02018-07-06 05:38:16 +00002000 err = nla_put_u8(skb, vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002001 vlan_key->vlan_priority);
2002 if (err)
2003 return err;
2004 }
2005 return 0;
2006}
2007
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002008static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2009 u32 *flower_key, u32 *flower_mask,
2010 u32 flower_flag_bit, u32 dissector_flag_bit)
2011{
2012 if (dissector_mask & dissector_flag_bit) {
2013 *flower_mask |= flower_flag_bit;
2014 if (dissector_key & dissector_flag_bit)
2015 *flower_key |= flower_flag_bit;
2016 }
2017}
2018
2019static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2020{
2021 u32 key, mask;
2022 __be32 _key, _mask;
2023 int err;
2024
2025 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2026 return 0;
2027
2028 key = 0;
2029 mask = 0;
2030
2031 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2032 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Pieter Jansen van Vuuren459d1532018-03-06 18:11:14 +01002033 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2034 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2035 FLOW_DIS_FIRST_FRAG);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002036
2037 _key = cpu_to_be32(key);
2038 _mask = cpu_to_be32(mask);
2039
2040 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2041 if (err)
2042 return err;
2043
2044 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2045}
2046
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002047static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2048 struct flow_dissector_key_enc_opts *enc_opts)
2049{
2050 struct geneve_opt *opt;
2051 struct nlattr *nest;
2052 int opt_off = 0;
2053
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002054 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002055 if (!nest)
2056 goto nla_put_failure;
2057
2058 while (enc_opts->len > opt_off) {
2059 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2060
2061 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2062 opt->opt_class))
2063 goto nla_put_failure;
2064 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2065 opt->type))
2066 goto nla_put_failure;
2067 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2068 opt->length * 4, opt->opt_data))
2069 goto nla_put_failure;
2070
2071 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2072 }
2073 nla_nest_end(skb, nest);
2074 return 0;
2075
2076nla_put_failure:
2077 nla_nest_cancel(skb, nest);
2078 return -EMSGSIZE;
2079}
2080
2081static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2082 struct flow_dissector_key_enc_opts *enc_opts)
2083{
2084 struct nlattr *nest;
2085 int err;
2086
2087 if (!enc_opts->len)
2088 return 0;
2089
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002090 nest = nla_nest_start_noflag(skb, enc_opt_type);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002091 if (!nest)
2092 goto nla_put_failure;
2093
2094 switch (enc_opts->dst_opt_type) {
2095 case TUNNEL_GENEVE_OPT:
2096 err = fl_dump_key_geneve_opt(skb, enc_opts);
2097 if (err)
2098 goto nla_put_failure;
2099 break;
2100 default:
2101 goto nla_put_failure;
2102 }
2103 nla_nest_end(skb, nest);
2104 return 0;
2105
2106nla_put_failure:
2107 nla_nest_cancel(skb, nest);
2108 return -EMSGSIZE;
2109}
2110
2111static int fl_dump_key_enc_opt(struct sk_buff *skb,
2112 struct flow_dissector_key_enc_opts *key_opts,
2113 struct flow_dissector_key_enc_opts *msk_opts)
2114{
2115 int err;
2116
2117 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2118 if (err)
2119 return err;
2120
2121 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2122}
2123
Jiri Pirkof5749082018-07-23 09:23:08 +02002124static int fl_dump_key(struct sk_buff *skb, struct net *net,
2125 struct fl_flow_key *key, struct fl_flow_key *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02002126{
Jiri Pirko8212ed72019-06-19 09:41:03 +03002127 if (mask->meta.ingress_ifindex) {
Jiri Pirko77b99002015-05-12 14:56:21 +02002128 struct net_device *dev;
2129
Jiri Pirko8212ed72019-06-19 09:41:03 +03002130 dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
Jiri Pirko77b99002015-05-12 14:56:21 +02002131 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2132 goto nla_put_failure;
2133 }
2134
2135 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2136 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2137 sizeof(key->eth.dst)) ||
2138 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2139 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2140 sizeof(key->eth.src)) ||
2141 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2142 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2143 sizeof(key->basic.n_proto)))
2144 goto nla_put_failure;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002145
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002146 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2147 goto nla_put_failure;
2148
Jianbo Liud64efd02018-07-06 05:38:16 +00002149 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2150 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002151 goto nla_put_failure;
2152
Jianbo Liud64efd02018-07-06 05:38:16 +00002153 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2154 TCA_FLOWER_KEY_CVLAN_PRIO,
2155 &key->cvlan, &mask->cvlan) ||
2156 (mask->cvlan.vlan_tpid &&
Jianbo Liu158abbf2018-07-25 02:31:25 +00002157 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2158 key->cvlan.vlan_tpid)))
Jianbo Liud3069512018-07-06 05:38:15 +00002159 goto nla_put_failure;
2160
Jianbo Liu5e9a0fe2018-07-09 02:26:20 +00002161 if (mask->basic.n_proto) {
2162 if (mask->cvlan.vlan_tpid) {
2163 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2164 key->basic.n_proto))
2165 goto nla_put_failure;
2166 } else if (mask->vlan.vlan_tpid) {
2167 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2168 key->basic.n_proto))
2169 goto nla_put_failure;
2170 }
Jianbo Liud64efd02018-07-06 05:38:16 +00002171 }
2172
Jiri Pirko77b99002015-05-12 14:56:21 +02002173 if ((key->basic.n_proto == htons(ETH_P_IP) ||
2174 key->basic.n_proto == htons(ETH_P_IPV6)) &&
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002175 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
Jiri Pirko77b99002015-05-12 14:56:21 +02002176 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002177 sizeof(key->basic.ip_proto)) ||
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002178 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
Jiri Pirko77b99002015-05-12 14:56:21 +02002179 goto nla_put_failure;
2180
Tom Herbertc3f83242015-06-04 09:16:40 -07002181 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02002182 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2183 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2184 sizeof(key->ipv4.src)) ||
2185 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2186 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2187 sizeof(key->ipv4.dst))))
2188 goto nla_put_failure;
Tom Herbertc3f83242015-06-04 09:16:40 -07002189 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02002190 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2191 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2192 sizeof(key->ipv6.src)) ||
2193 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2194 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2195 sizeof(key->ipv6.dst))))
2196 goto nla_put_failure;
2197
2198 if (key->basic.ip_proto == IPPROTO_TCP &&
2199 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002200 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002201 sizeof(key->tp.src)) ||
2202 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002203 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02002204 sizeof(key->tp.dst)) ||
2205 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2206 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2207 sizeof(key->tcp.flags))))
Jiri Pirko77b99002015-05-12 14:56:21 +02002208 goto nla_put_failure;
2209 else if (key->basic.ip_proto == IPPROTO_UDP &&
2210 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002211 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002212 sizeof(key->tp.src)) ||
2213 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002214 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002215 sizeof(key->tp.dst))))
2216 goto nla_put_failure;
Simon Horman5976c5f2016-11-03 13:24:21 +01002217 else if (key->basic.ip_proto == IPPROTO_SCTP &&
2218 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2219 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2220 sizeof(key->tp.src)) ||
2221 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2222 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2223 sizeof(key->tp.dst))))
2224 goto nla_put_failure;
Simon Horman7b684882016-12-07 13:48:28 +01002225 else if (key->basic.n_proto == htons(ETH_P_IP) &&
2226 key->basic.ip_proto == IPPROTO_ICMP &&
2227 (fl_dump_key_val(skb, &key->icmp.type,
2228 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2229 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2230 sizeof(key->icmp.type)) ||
2231 fl_dump_key_val(skb, &key->icmp.code,
2232 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2233 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2234 sizeof(key->icmp.code))))
2235 goto nla_put_failure;
2236 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2237 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2238 (fl_dump_key_val(skb, &key->icmp.type,
2239 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2240 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2241 sizeof(key->icmp.type)) ||
2242 fl_dump_key_val(skb, &key->icmp.code,
2243 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2244 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2245 sizeof(key->icmp.code))))
2246 goto nla_put_failure;
Simon Horman99d31322017-01-11 14:05:43 +01002247 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2248 key->basic.n_proto == htons(ETH_P_RARP)) &&
2249 (fl_dump_key_val(skb, &key->arp.sip,
2250 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2251 TCA_FLOWER_KEY_ARP_SIP_MASK,
2252 sizeof(key->arp.sip)) ||
2253 fl_dump_key_val(skb, &key->arp.tip,
2254 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2255 TCA_FLOWER_KEY_ARP_TIP_MASK,
2256 sizeof(key->arp.tip)) ||
2257 fl_dump_key_val(skb, &key->arp.op,
2258 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2259 TCA_FLOWER_KEY_ARP_OP_MASK,
2260 sizeof(key->arp.op)) ||
2261 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2262 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2263 sizeof(key->arp.sha)) ||
2264 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2265 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2266 sizeof(key->arp.tha))))
2267 goto nla_put_failure;
Jiri Pirko77b99002015-05-12 14:56:21 +02002268
Amritha Nambiar5c722992018-11-12 16:15:55 -08002269 if ((key->basic.ip_proto == IPPROTO_TCP ||
2270 key->basic.ip_proto == IPPROTO_UDP ||
2271 key->basic.ip_proto == IPPROTO_SCTP) &&
2272 fl_dump_key_port_range(skb, key, mask))
2273 goto nla_put_failure;
2274
Amir Vadaibc3103f2016-09-08 16:23:47 +03002275 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2276 (fl_dump_key_val(skb, &key->enc_ipv4.src,
2277 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2278 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2279 sizeof(key->enc_ipv4.src)) ||
2280 fl_dump_key_val(skb, &key->enc_ipv4.dst,
2281 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2282 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2283 sizeof(key->enc_ipv4.dst))))
2284 goto nla_put_failure;
2285 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2286 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2287 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2288 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2289 sizeof(key->enc_ipv6.src)) ||
2290 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2291 TCA_FLOWER_KEY_ENC_IPV6_DST,
2292 &mask->enc_ipv6.dst,
2293 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2294 sizeof(key->enc_ipv6.dst))))
2295 goto nla_put_failure;
2296
2297 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +03002298 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02002299 sizeof(key->enc_key_id)) ||
2300 fl_dump_key_val(skb, &key->enc_tp.src,
2301 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2302 &mask->enc_tp.src,
2303 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2304 sizeof(key->enc_tp.src)) ||
2305 fl_dump_key_val(skb, &key->enc_tp.dst,
2306 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2307 &mask->enc_tp.dst,
2308 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002309 sizeof(key->enc_tp.dst)) ||
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002310 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2311 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
Amir Vadaibc3103f2016-09-08 16:23:47 +03002312 goto nla_put_failure;
2313
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002314 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2315 goto nla_put_failure;
2316
Jiri Pirkof5749082018-07-23 09:23:08 +02002317 return 0;
2318
2319nla_put_failure:
2320 return -EMSGSIZE;
2321}
2322
2323static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002324 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
Jiri Pirkof5749082018-07-23 09:23:08 +02002325{
2326 struct cls_fl_filter *f = fh;
2327 struct nlattr *nest;
2328 struct fl_flow_key *key, *mask;
Vlad Buslov3d81e712019-03-21 15:17:42 +02002329 bool skip_hw;
Jiri Pirkof5749082018-07-23 09:23:08 +02002330
2331 if (!f)
2332 return skb->len;
2333
2334 t->tcm_handle = f->handle;
2335
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002336 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Jiri Pirkof5749082018-07-23 09:23:08 +02002337 if (!nest)
2338 goto nla_put_failure;
2339
Vlad Buslov3d81e712019-03-21 15:17:42 +02002340 spin_lock(&tp->lock);
2341
Jiri Pirkof5749082018-07-23 09:23:08 +02002342 if (f->res.classid &&
2343 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
Vlad Buslov3d81e712019-03-21 15:17:42 +02002344 goto nla_put_failure_locked;
Jiri Pirkof5749082018-07-23 09:23:08 +02002345
2346 key = &f->key;
2347 mask = &f->mask->key;
Vlad Buslov3d81e712019-03-21 15:17:42 +02002348 skip_hw = tc_skip_hw(f->flags);
Jiri Pirkof5749082018-07-23 09:23:08 +02002349
2350 if (fl_dump_key(skb, net, key, mask))
Vlad Buslov3d81e712019-03-21 15:17:42 +02002351 goto nla_put_failure_locked;
Jiri Pirkof5749082018-07-23 09:23:08 +02002352
Or Gerlitz749e6722017-02-16 10:31:10 +02002353 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
Vlad Buslov3d81e712019-03-21 15:17:42 +02002354 goto nla_put_failure_locked;
2355
2356 spin_unlock(&tp->lock);
2357
2358 if (!skip_hw)
Vlad Buslovc24e43d82019-03-21 15:17:43 +02002359 fl_hw_update_stats(tp, f, rtnl_held);
Amir Vadaie69985c2016-06-05 17:11:18 +03002360
Vlad Buslov86c55362018-09-07 17:22:21 +03002361 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
2362 goto nla_put_failure;
2363
Jiri Pirko77b99002015-05-12 14:56:21 +02002364 if (tcf_exts_dump(skb, &f->exts))
2365 goto nla_put_failure;
2366
2367 nla_nest_end(skb, nest);
2368
2369 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
2370 goto nla_put_failure;
2371
2372 return skb->len;
2373
Vlad Buslov3d81e712019-03-21 15:17:42 +02002374nla_put_failure_locked:
2375 spin_unlock(&tp->lock);
Jiri Pirko77b99002015-05-12 14:56:21 +02002376nla_put_failure:
2377 nla_nest_cancel(skb, nest);
2378 return -1;
2379}
2380
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002381static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
2382{
2383 struct fl_flow_tmplt *tmplt = tmplt_priv;
2384 struct fl_flow_key *key, *mask;
2385 struct nlattr *nest;
2386
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002387 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002388 if (!nest)
2389 goto nla_put_failure;
2390
2391 key = &tmplt->dummy_key;
2392 mask = &tmplt->mask;
2393
2394 if (fl_dump_key(skb, net, key, mask))
2395 goto nla_put_failure;
2396
2397 nla_nest_end(skb, nest);
2398
2399 return skb->len;
2400
2401nla_put_failure:
2402 nla_nest_cancel(skb, nest);
2403 return -EMSGSIZE;
2404}
2405
Cong Wang07d79fc2017-08-30 14:30:36 -07002406static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
2407{
2408 struct cls_fl_filter *f = fh;
2409
2410 if (f && f->res.classid == classid)
2411 f->res.class = cl;
2412}
2413
Jiri Pirko77b99002015-05-12 14:56:21 +02002414static struct tcf_proto_ops cls_fl_ops __read_mostly = {
2415 .kind = "flower",
2416 .classify = fl_classify,
2417 .init = fl_init,
2418 .destroy = fl_destroy,
2419 .get = fl_get,
Vlad Buslov06177552019-03-21 15:17:35 +02002420 .put = fl_put,
Jiri Pirko77b99002015-05-12 14:56:21 +02002421 .change = fl_change,
2422 .delete = fl_delete,
2423 .walk = fl_walk,
John Hurley31533cb2018-06-25 14:30:06 -07002424 .reoffload = fl_reoffload,
Jiri Pirko77b99002015-05-12 14:56:21 +02002425 .dump = fl_dump,
Cong Wang07d79fc2017-08-30 14:30:36 -07002426 .bind_class = fl_bind_class,
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002427 .tmplt_create = fl_tmplt_create,
2428 .tmplt_destroy = fl_tmplt_destroy,
2429 .tmplt_dump = fl_tmplt_dump,
Jiri Pirko77b99002015-05-12 14:56:21 +02002430 .owner = THIS_MODULE,
Vlad Buslov92149192019-03-21 15:17:44 +02002431 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
Jiri Pirko77b99002015-05-12 14:56:21 +02002432};
2433
2434static int __init cls_fl_init(void)
2435{
2436 return register_tcf_proto_ops(&cls_fl_ops);
2437}
2438
2439static void __exit cls_fl_exit(void)
2440{
2441 unregister_tcf_proto_ops(&cls_fl_ops);
2442}
2443
2444module_init(cls_fl_init);
2445module_exit(cls_fl_exit);
2446
2447MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2448MODULE_DESCRIPTION("Flower classifier");
2449MODULE_LICENSE("GPL v2");