blob: c388372df0e2f9f77b1e51dd2eb3c75138186a29 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Jiri Pirko77b99002015-05-12 14:56:21 +02002/*
3 * net/sched/cls_flower.c Flower classifier
4 *
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
Jiri Pirko77b99002015-05-12 14:56:21 +02006 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/rhashtable.h>
Daniel Borkmannd9363772016-11-27 01:18:01 +010012#include <linux/workqueue.h>
Vlad Buslov06177552019-03-21 15:17:35 +020013#include <linux/refcount.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020014
15#include <linux/if_ether.h>
16#include <linux/in6.h>
17#include <linux/ip.h>
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040018#include <linux/mpls.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020019
20#include <net/sch_generic.h>
21#include <net/pkt_cls.h>
22#include <net/ip.h>
23#include <net/flow_dissector.h>
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +020024#include <net/geneve.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020025
Amir Vadaibc3103f2016-09-08 16:23:47 +030026#include <net/dst.h>
27#include <net/dst_metadata.h>
28
Jiri Pirko77b99002015-05-12 14:56:21 +020029struct fl_flow_key {
30 int indev_ifindex;
Tom Herbert42aecaa2015-06-04 09:16:39 -070031 struct flow_dissector_key_control control;
Amir Vadaibc3103f2016-09-08 16:23:47 +030032 struct flow_dissector_key_control enc_control;
Jiri Pirko77b99002015-05-12 14:56:21 +020033 struct flow_dissector_key_basic basic;
34 struct flow_dissector_key_eth_addrs eth;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +030035 struct flow_dissector_key_vlan vlan;
Jianbo Liud64efd02018-07-06 05:38:16 +000036 struct flow_dissector_key_vlan cvlan;
Jiri Pirko77b99002015-05-12 14:56:21 +020037 union {
Tom Herbertc3f83242015-06-04 09:16:40 -070038 struct flow_dissector_key_ipv4_addrs ipv4;
Jiri Pirko77b99002015-05-12 14:56:21 +020039 struct flow_dissector_key_ipv6_addrs ipv6;
40 };
41 struct flow_dissector_key_ports tp;
Simon Horman7b684882016-12-07 13:48:28 +010042 struct flow_dissector_key_icmp icmp;
Simon Horman99d31322017-01-11 14:05:43 +010043 struct flow_dissector_key_arp arp;
Amir Vadaibc3103f2016-09-08 16:23:47 +030044 struct flow_dissector_key_keyid enc_key_id;
45 union {
46 struct flow_dissector_key_ipv4_addrs enc_ipv4;
47 struct flow_dissector_key_ipv6_addrs enc_ipv6;
48 };
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +020049 struct flow_dissector_key_ports enc_tp;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040050 struct flow_dissector_key_mpls mpls;
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +020051 struct flow_dissector_key_tcp tcp;
Or Gerlitz4d80cc02017-06-01 21:37:38 +030052 struct flow_dissector_key_ip ip;
Or Gerlitz0e2c17b2018-07-17 19:27:18 +030053 struct flow_dissector_key_ip enc_ip;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +020054 struct flow_dissector_key_enc_opts enc_opts;
Amritha Nambiar5c722992018-11-12 16:15:55 -080055 struct flow_dissector_key_ports tp_min;
56 struct flow_dissector_key_ports tp_max;
Jiri Pirko77b99002015-05-12 14:56:21 +020057} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
58
59struct fl_flow_mask_range {
60 unsigned short int start;
61 unsigned short int end;
62};
63
64struct fl_flow_mask {
65 struct fl_flow_key key;
66 struct fl_flow_mask_range range;
Amritha Nambiar5c722992018-11-12 16:15:55 -080067 u32 flags;
Paul Blakey05cd2712018-04-30 14:28:30 +030068 struct rhash_head ht_node;
69 struct rhashtable ht;
70 struct rhashtable_params filter_ht_params;
71 struct flow_dissector dissector;
72 struct list_head filters;
Paolo Abeni44a5cd42018-06-21 20:02:16 +020073 struct rcu_work rwork;
Paul Blakey05cd2712018-04-30 14:28:30 +030074 struct list_head list;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +020075 refcount_t refcnt;
Jiri Pirko77b99002015-05-12 14:56:21 +020076};
77
Jiri Pirkob95ec7e2018-07-23 09:23:10 +020078struct fl_flow_tmplt {
79 struct fl_flow_key dummy_key;
80 struct fl_flow_key mask;
81 struct flow_dissector dissector;
82 struct tcf_chain *chain;
83};
84
Jiri Pirko77b99002015-05-12 14:56:21 +020085struct cls_fl_head {
86 struct rhashtable ht;
Vlad Buslov259e60f2019-03-21 15:17:39 +020087 spinlock_t masks_lock; /* Protect masks list */
Paul Blakey05cd2712018-04-30 14:28:30 +030088 struct list_head masks;
Vlad Buslovc049d562019-04-24 09:53:31 +030089 struct list_head hw_filters;
Cong Wangaaa908f2018-05-23 15:26:53 -070090 struct rcu_work rwork;
Chris Mic15ab232017-08-30 02:31:58 -040091 struct idr handle_idr;
Jiri Pirko77b99002015-05-12 14:56:21 +020092};
93
94struct cls_fl_filter {
Paul Blakey05cd2712018-04-30 14:28:30 +030095 struct fl_flow_mask *mask;
Jiri Pirko77b99002015-05-12 14:56:21 +020096 struct rhash_head ht_node;
97 struct fl_flow_key mkey;
98 struct tcf_exts exts;
99 struct tcf_result res;
100 struct fl_flow_key key;
101 struct list_head list;
Vlad Buslovc049d562019-04-24 09:53:31 +0300102 struct list_head hw_list;
Jiri Pirko77b99002015-05-12 14:56:21 +0200103 u32 handle;
Amir Vadaie69985c2016-06-05 17:11:18 +0300104 u32 flags;
Vlad Buslov86c55362018-09-07 17:22:21 +0300105 u32 in_hw_count;
Cong Wangaaa908f2018-05-23 15:26:53 -0700106 struct rcu_work rwork;
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200107 struct net_device *hw_dev;
Vlad Buslov06177552019-03-21 15:17:35 +0200108 /* Flower classifier is unlocked, which means that its reference counter
109 * can be changed concurrently without any kind of external
110 * synchronization. Use atomic reference counter to be concurrency-safe.
111 */
112 refcount_t refcnt;
Vlad Buslovb2552b82019-03-21 15:17:36 +0200113 bool deleted;
Jiri Pirko77b99002015-05-12 14:56:21 +0200114};
115
Paul Blakey05cd2712018-04-30 14:28:30 +0300116static const struct rhashtable_params mask_ht_params = {
117 .key_offset = offsetof(struct fl_flow_mask, key),
118 .key_len = sizeof(struct fl_flow_key),
119 .head_offset = offsetof(struct fl_flow_mask, ht_node),
120 .automatic_shrinking = true,
121};
122
Jiri Pirko77b99002015-05-12 14:56:21 +0200123static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
124{
125 return mask->range.end - mask->range.start;
126}
127
128static void fl_mask_update_range(struct fl_flow_mask *mask)
129{
130 const u8 *bytes = (const u8 *) &mask->key;
131 size_t size = sizeof(mask->key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300132 size_t i, first = 0, last;
Jiri Pirko77b99002015-05-12 14:56:21 +0200133
Paul Blakey05cd2712018-04-30 14:28:30 +0300134 for (i = 0; i < size; i++) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200135 if (bytes[i]) {
Paul Blakey05cd2712018-04-30 14:28:30 +0300136 first = i;
137 break;
138 }
139 }
140 last = first;
141 for (i = size - 1; i != first; i--) {
142 if (bytes[i]) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200143 last = i;
Paul Blakey05cd2712018-04-30 14:28:30 +0300144 break;
Jiri Pirko77b99002015-05-12 14:56:21 +0200145 }
146 }
147 mask->range.start = rounddown(first, sizeof(long));
148 mask->range.end = roundup(last + 1, sizeof(long));
149}
150
151static void *fl_key_get_start(struct fl_flow_key *key,
152 const struct fl_flow_mask *mask)
153{
154 return (u8 *) key + mask->range.start;
155}
156
157static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
158 struct fl_flow_mask *mask)
159{
160 const long *lkey = fl_key_get_start(key, mask);
161 const long *lmask = fl_key_get_start(&mask->key, mask);
162 long *lmkey = fl_key_get_start(mkey, mask);
163 int i;
164
165 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
166 *lmkey++ = *lkey++ & *lmask++;
167}
168
Jiri Pirkob95ec7e2018-07-23 09:23:10 +0200169static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
170 struct fl_flow_mask *mask)
171{
172 const long *lmask = fl_key_get_start(&mask->key, mask);
173 const long *ltmplt;
174 int i;
175
176 if (!tmplt)
177 return true;
178 ltmplt = fl_key_get_start(&tmplt->mask, mask);
179 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
180 if (~*ltmplt++ & *lmask++)
181 return false;
182 }
183 return true;
184}
185
Jiri Pirko77b99002015-05-12 14:56:21 +0200186static void fl_clear_masked_range(struct fl_flow_key *key,
187 struct fl_flow_mask *mask)
188{
189 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
190}
191
Amritha Nambiar5c722992018-11-12 16:15:55 -0800192static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
193 struct fl_flow_key *key,
194 struct fl_flow_key *mkey)
195{
196 __be16 min_mask, max_mask, min_val, max_val;
197
198 min_mask = htons(filter->mask->key.tp_min.dst);
199 max_mask = htons(filter->mask->key.tp_max.dst);
200 min_val = htons(filter->key.tp_min.dst);
201 max_val = htons(filter->key.tp_max.dst);
202
203 if (min_mask && max_mask) {
204 if (htons(key->tp.dst) < min_val ||
205 htons(key->tp.dst) > max_val)
206 return false;
207
208 /* skb does not have min and max values */
209 mkey->tp_min.dst = filter->mkey.tp_min.dst;
210 mkey->tp_max.dst = filter->mkey.tp_max.dst;
211 }
212 return true;
213}
214
215static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
216 struct fl_flow_key *key,
217 struct fl_flow_key *mkey)
218{
219 __be16 min_mask, max_mask, min_val, max_val;
220
221 min_mask = htons(filter->mask->key.tp_min.src);
222 max_mask = htons(filter->mask->key.tp_max.src);
223 min_val = htons(filter->key.tp_min.src);
224 max_val = htons(filter->key.tp_max.src);
225
226 if (min_mask && max_mask) {
227 if (htons(key->tp.src) < min_val ||
228 htons(key->tp.src) > max_val)
229 return false;
230
231 /* skb does not have min and max values */
232 mkey->tp_min.src = filter->mkey.tp_min.src;
233 mkey->tp_max.src = filter->mkey.tp_max.src;
234 }
235 return true;
236}
237
238static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
239 struct fl_flow_key *mkey)
Paul Blakeya3308d82017-01-16 10:45:13 +0200240{
Paul Blakey05cd2712018-04-30 14:28:30 +0300241 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
242 mask->filter_ht_params);
Paul Blakeya3308d82017-01-16 10:45:13 +0200243}
244
Amritha Nambiar5c722992018-11-12 16:15:55 -0800245static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
246 struct fl_flow_key *mkey,
247 struct fl_flow_key *key)
248{
249 struct cls_fl_filter *filter, *f;
250
251 list_for_each_entry_rcu(filter, &mask->filters, list) {
252 if (!fl_range_port_dst_cmp(filter, key, mkey))
253 continue;
254
255 if (!fl_range_port_src_cmp(filter, key, mkey))
256 continue;
257
258 f = __fl_lookup(mask, mkey);
259 if (f)
260 return f;
261 }
262 return NULL;
263}
264
265static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
266 struct fl_flow_key *mkey,
267 struct fl_flow_key *key)
268{
269 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
270 return fl_lookup_range(mask, mkey, key);
271
272 return __fl_lookup(mask, mkey);
273}
274
Jiri Pirko77b99002015-05-12 14:56:21 +0200275static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
276 struct tcf_result *res)
277{
278 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
279 struct cls_fl_filter *f;
Paul Blakey05cd2712018-04-30 14:28:30 +0300280 struct fl_flow_mask *mask;
Jiri Pirko77b99002015-05-12 14:56:21 +0200281 struct fl_flow_key skb_key;
282 struct fl_flow_key skb_mkey;
283
Paul Blakey05cd2712018-04-30 14:28:30 +0300284 list_for_each_entry_rcu(mask, &head->masks, list) {
285 fl_clear_masked_range(&skb_key, mask);
Amir Vadaie69985c2016-06-05 17:11:18 +0300286
Paul Blakey05cd2712018-04-30 14:28:30 +0300287 skb_key.indev_ifindex = skb->skb_iif;
288 /* skb_flow_dissect() does not set n_proto in case an unknown
289 * protocol, so do it rather here.
290 */
291 skb_key.basic.n_proto = skb->protocol;
292 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
293 skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
Amir Vadaibc3103f2016-09-08 16:23:47 +0300294
Paul Blakey05cd2712018-04-30 14:28:30 +0300295 fl_set_masked_key(&skb_mkey, &skb_key, mask);
Jiri Pirko77b99002015-05-12 14:56:21 +0200296
Amritha Nambiar5c722992018-11-12 16:15:55 -0800297 f = fl_lookup(mask, &skb_mkey, &skb_key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300298 if (f && !tc_skip_sw(f->flags)) {
299 *res = f->res;
300 return tcf_exts_exec(skb, &f->exts, res);
301 }
Jiri Pirko77b99002015-05-12 14:56:21 +0200302 }
303 return -1;
304}
305
306static int fl_init(struct tcf_proto *tp)
307{
308 struct cls_fl_head *head;
309
310 head = kzalloc(sizeof(*head), GFP_KERNEL);
311 if (!head)
312 return -ENOBUFS;
313
Vlad Buslov259e60f2019-03-21 15:17:39 +0200314 spin_lock_init(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +0300315 INIT_LIST_HEAD_RCU(&head->masks);
Vlad Buslovc049d562019-04-24 09:53:31 +0300316 INIT_LIST_HEAD(&head->hw_filters);
Jiri Pirko77b99002015-05-12 14:56:21 +0200317 rcu_assign_pointer(tp->root, head);
Chris Mic15ab232017-08-30 02:31:58 -0400318 idr_init(&head->handle_idr);
Jiri Pirko77b99002015-05-12 14:56:21 +0200319
Paul Blakey05cd2712018-04-30 14:28:30 +0300320 return rhashtable_init(&head->ht, &mask_ht_params);
321}
322
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200323static void fl_mask_free(struct fl_flow_mask *mask)
324{
Vlad Buslovf48ef4d2019-03-21 15:17:37 +0200325 WARN_ON(!list_empty(&mask->filters));
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200326 rhashtable_destroy(&mask->ht);
327 kfree(mask);
328}
329
330static void fl_mask_free_work(struct work_struct *work)
331{
332 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
333 struct fl_flow_mask, rwork);
334
335 fl_mask_free(mask);
336}
337
Vlad Buslov99946772019-04-12 00:54:19 +0300338static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
Paul Blakey05cd2712018-04-30 14:28:30 +0300339{
Vlad Buslovf48ef4d2019-03-21 15:17:37 +0200340 if (!refcount_dec_and_test(&mask->refcnt))
Paul Blakey05cd2712018-04-30 14:28:30 +0300341 return false;
342
343 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
Vlad Buslov259e60f2019-03-21 15:17:39 +0200344
345 spin_lock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +0300346 list_del_rcu(&mask->list);
Vlad Buslov259e60f2019-03-21 15:17:39 +0200347 spin_unlock(&head->masks_lock);
348
Vlad Buslov99946772019-04-12 00:54:19 +0300349 tcf_queue_work(&mask->rwork, fl_mask_free_work);
Paul Blakey05cd2712018-04-30 14:28:30 +0300350
351 return true;
Jiri Pirko77b99002015-05-12 14:56:21 +0200352}
353
Vlad Buslovc049d562019-04-24 09:53:31 +0300354static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
355{
356 /* Flower classifier only changes root pointer during init and destroy.
357 * Users must obtain reference to tcf_proto instance before calling its
358 * API, so tp->root pointer is protected from concurrent call to
359 * fl_destroy() by reference counting.
360 */
361 return rcu_dereference_raw(tp->root);
362}
363
Cong Wang0dadc112017-11-06 13:47:24 -0800364static void __fl_destroy_filter(struct cls_fl_filter *f)
365{
366 tcf_exts_destroy(&f->exts);
367 tcf_exts_put_net(&f->exts);
368 kfree(f);
369}
370
Cong Wang0552c8a2017-10-26 18:24:33 -0700371static void fl_destroy_filter_work(struct work_struct *work)
372{
Cong Wangaaa908f2018-05-23 15:26:53 -0700373 struct cls_fl_filter *f = container_of(to_rcu_work(work),
374 struct cls_fl_filter, rwork);
Cong Wang0552c8a2017-10-26 18:24:33 -0700375
Cong Wang0dadc112017-11-06 13:47:24 -0800376 __fl_destroy_filter(f);
Cong Wang0552c8a2017-10-26 18:24:33 -0700377}
378
Jakub Kicinski1b0f8032018-01-24 12:54:21 -0800379static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200380 bool rtnl_held, struct netlink_ext_ack *extack)
Amir Vadai5b33f482016-03-08 12:42:29 +0200381{
Jiri Pirkode4784c2017-08-07 10:15:32 +0200382 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200383 struct tcf_block *block = tp->chain->block;
Amir Vadai5b33f482016-03-08 12:42:29 +0200384
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200385 if (!rtnl_held)
386 rtnl_lock();
387
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700388 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200389 cls_flower.command = TC_CLSFLOWER_DESTROY;
390 cls_flower.cookie = (unsigned long) f;
Amir Vadai5b33f482016-03-08 12:42:29 +0200391
Cong Wangaeb3fec2018-12-11 11:15:46 -0800392 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200393 spin_lock(&tp->lock);
Vlad Buslovc049d562019-04-24 09:53:31 +0300394 list_del_init(&f->hw_list);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100395 tcf_block_offload_dec(block, &f->flags);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200396 spin_unlock(&tp->lock);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200397
398 if (!rtnl_held)
399 rtnl_unlock();
Amir Vadai5b33f482016-03-08 12:42:29 +0200400}
401
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300402static int fl_hw_replace_filter(struct tcf_proto *tp,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200403 struct cls_fl_filter *f, bool rtnl_held,
Quentin Monnet41002032018-01-19 17:44:43 -0800404 struct netlink_ext_ack *extack)
Amir Vadai5b33f482016-03-08 12:42:29 +0200405{
Vlad Buslovc049d562019-04-24 09:53:31 +0300406 struct cls_fl_head *head = fl_head_dereference(tp);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200407 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200408 struct tcf_block *block = tp->chain->block;
Jiri Pirko717503b2017-10-11 09:41:09 +0200409 bool skip_sw = tc_skip_sw(f->flags);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200410 int err = 0;
411
412 if (!rtnl_held)
413 rtnl_lock();
Amir Vadai5b33f482016-03-08 12:42:29 +0200414
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +0100415 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200416 if (!cls_flower.rule) {
417 err = -ENOMEM;
418 goto errout;
419 }
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100420
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700421 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200422 cls_flower.command = TC_CLSFLOWER_REPLACE;
423 cls_flower.cookie = (unsigned long) f;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100424 cls_flower.rule->match.dissector = &f->mask->dissector;
425 cls_flower.rule->match.mask = &f->mask->key;
426 cls_flower.rule->match.key = &f->mkey;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700427 cls_flower.classid = f->res.classid;
Amir Vadai5b33f482016-03-08 12:42:29 +0200428
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +0100429 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
430 if (err) {
431 kfree(cls_flower.rule);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200432 if (skip_sw)
Vlad Buslov1f15bb42019-02-12 23:39:06 +0200433 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200434 else
435 err = 0;
436 goto errout;
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +0100437 }
438
Cong Wangaeb3fec2018-12-11 11:15:46 -0800439 err = tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100440 kfree(cls_flower.rule);
441
Jiri Pirko717503b2017-10-11 09:41:09 +0200442 if (err < 0) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200443 fl_hw_destroy_filter(tp, f, true, NULL);
444 goto errout;
Jiri Pirko717503b2017-10-11 09:41:09 +0200445 } else if (err > 0) {
John Hurley31533cb2018-06-25 14:30:06 -0700446 f->in_hw_count = err;
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200447 err = 0;
Vlad Buslov3d81e712019-03-21 15:17:42 +0200448 spin_lock(&tp->lock);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100449 tcf_block_offload_inc(block, &f->flags);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200450 spin_unlock(&tp->lock);
Jiri Pirko717503b2017-10-11 09:41:09 +0200451 }
452
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200453 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) {
454 err = -EINVAL;
455 goto errout;
456 }
Jiri Pirko717503b2017-10-11 09:41:09 +0200457
Vlad Buslovc049d562019-04-24 09:53:31 +0300458 spin_lock(&tp->lock);
459 list_add(&f->hw_list, &head->hw_filters);
460 spin_unlock(&tp->lock);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200461errout:
462 if (!rtnl_held)
463 rtnl_unlock();
464
465 return err;
Amir Vadai5b33f482016-03-08 12:42:29 +0200466}
467
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200468static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
469 bool rtnl_held)
Amir Vadai10cbc682016-05-13 12:55:37 +0000470{
Jiri Pirkode4784c2017-08-07 10:15:32 +0200471 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200472 struct tcf_block *block = tp->chain->block;
Amir Vadai10cbc682016-05-13 12:55:37 +0000473
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200474 if (!rtnl_held)
475 rtnl_lock();
476
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700477 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200478 cls_flower.command = TC_CLSFLOWER_STATS;
479 cls_flower.cookie = (unsigned long) f;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700480 cls_flower.classid = f->res.classid;
Amir Vadai10cbc682016-05-13 12:55:37 +0000481
Cong Wangaeb3fec2018-12-11 11:15:46 -0800482 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
Pablo Neira Ayuso3b1903e2019-02-02 12:50:47 +0100483
484 tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
485 cls_flower.stats.pkts,
486 cls_flower.stats.lastused);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200487
488 if (!rtnl_held)
489 rtnl_unlock();
Amir Vadai10cbc682016-05-13 12:55:37 +0000490}
491
Vlad Buslov06177552019-03-21 15:17:35 +0200492static void __fl_put(struct cls_fl_filter *f)
493{
494 if (!refcount_dec_and_test(&f->refcnt))
495 return;
496
497 if (tcf_exts_get_net(&f->exts))
498 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
499 else
500 __fl_destroy_filter(f);
501}
502
503static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
504{
505 struct cls_fl_filter *f;
506
507 rcu_read_lock();
508 f = idr_find(&head->handle_idr, handle);
509 if (f && !refcount_inc_not_zero(&f->refcnt))
510 f = NULL;
511 rcu_read_unlock();
512
513 return f;
514}
515
516static struct cls_fl_filter *fl_get_next_filter(struct tcf_proto *tp,
517 unsigned long *handle)
518{
519 struct cls_fl_head *head = fl_head_dereference(tp);
520 struct cls_fl_filter *f;
521
522 rcu_read_lock();
523 while ((f = idr_get_next_ul(&head->handle_idr, handle))) {
524 /* don't return filters that are being deleted */
525 if (refcount_inc_not_zero(&f->refcnt))
526 break;
527 ++(*handle);
528 }
529 rcu_read_unlock();
530
531 return f;
532}
533
Vlad Buslovb2552b82019-03-21 15:17:36 +0200534static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200535 bool *last, bool rtnl_held,
536 struct netlink_ext_ack *extack)
Roi Dayan13fa8762016-11-01 16:08:29 +0200537{
Vlad Buslove4746192019-03-21 15:17:33 +0200538 struct cls_fl_head *head = fl_head_dereference(tp);
Chris Mic15ab232017-08-30 02:31:58 -0400539
Vlad Buslovb2552b82019-03-21 15:17:36 +0200540 *last = false;
541
Vlad Buslov3d81e712019-03-21 15:17:42 +0200542 spin_lock(&tp->lock);
543 if (f->deleted) {
544 spin_unlock(&tp->lock);
Vlad Buslovb2552b82019-03-21 15:17:36 +0200545 return -ENOENT;
Vlad Buslov3d81e712019-03-21 15:17:42 +0200546 }
Vlad Buslovb2552b82019-03-21 15:17:36 +0200547
548 f->deleted = true;
549 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
550 f->mask->filter_ht_params);
Matthew Wilcox9c160942017-11-28 09:48:43 -0500551 idr_remove(&head->handle_idr, f->handle);
Roi Dayan13fa8762016-11-01 16:08:29 +0200552 list_del_rcu(&f->list);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200553 spin_unlock(&tp->lock);
554
Vlad Buslov99946772019-04-12 00:54:19 +0300555 *last = fl_mask_put(head, f->mask);
Hadar Hen Zion79685212016-12-01 14:06:34 +0200556 if (!tc_skip_hw(f->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200557 fl_hw_destroy_filter(tp, f, rtnl_held, extack);
Roi Dayan13fa8762016-11-01 16:08:29 +0200558 tcf_unbind_filter(tp, &f->res);
Vlad Buslov06177552019-03-21 15:17:35 +0200559 __fl_put(f);
Paul Blakey05cd2712018-04-30 14:28:30 +0300560
Vlad Buslovb2552b82019-03-21 15:17:36 +0200561 return 0;
Roi Dayan13fa8762016-11-01 16:08:29 +0200562}
563
Daniel Borkmannd9363772016-11-27 01:18:01 +0100564static void fl_destroy_sleepable(struct work_struct *work)
565{
Cong Wangaaa908f2018-05-23 15:26:53 -0700566 struct cls_fl_head *head = container_of(to_rcu_work(work),
567 struct cls_fl_head,
568 rwork);
Paul Blakeyde9dc652018-06-03 10:06:13 +0300569
570 rhashtable_destroy(&head->ht);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100571 kfree(head);
572 module_put(THIS_MODULE);
573}
574
Vlad Buslov12db03b2019-02-11 10:55:45 +0200575static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
576 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +0200577{
Vlad Buslove4746192019-03-21 15:17:33 +0200578 struct cls_fl_head *head = fl_head_dereference(tp);
Paul Blakey05cd2712018-04-30 14:28:30 +0300579 struct fl_flow_mask *mask, *next_mask;
Jiri Pirko77b99002015-05-12 14:56:21 +0200580 struct cls_fl_filter *f, *next;
Vlad Buslovb2552b82019-03-21 15:17:36 +0200581 bool last;
Jiri Pirko77b99002015-05-12 14:56:21 +0200582
Paul Blakey05cd2712018-04-30 14:28:30 +0300583 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
584 list_for_each_entry_safe(f, next, &mask->filters, list) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200585 __fl_delete(tp, f, &last, rtnl_held, extack);
Vlad Buslovb2552b82019-03-21 15:17:36 +0200586 if (last)
Paul Blakey05cd2712018-04-30 14:28:30 +0300587 break;
588 }
589 }
Chris Mic15ab232017-08-30 02:31:58 -0400590 idr_destroy(&head->handle_idr);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100591
592 __module_get(THIS_MODULE);
Cong Wangaaa908f2018-05-23 15:26:53 -0700593 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
Jiri Pirko77b99002015-05-12 14:56:21 +0200594}
595
Vlad Buslov06177552019-03-21 15:17:35 +0200596static void fl_put(struct tcf_proto *tp, void *arg)
597{
598 struct cls_fl_filter *f = arg;
599
600 __fl_put(f);
601}
602
WANG Cong8113c092017-08-04 21:31:43 -0700603static void *fl_get(struct tcf_proto *tp, u32 handle)
Jiri Pirko77b99002015-05-12 14:56:21 +0200604{
Vlad Buslove4746192019-03-21 15:17:33 +0200605 struct cls_fl_head *head = fl_head_dereference(tp);
Jiri Pirko77b99002015-05-12 14:56:21 +0200606
Vlad Buslov06177552019-03-21 15:17:35 +0200607 return __fl_get(head, handle);
Jiri Pirko77b99002015-05-12 14:56:21 +0200608}
609
610static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
611 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
612 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
613 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
614 .len = IFNAMSIZ },
615 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
616 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
617 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
618 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
619 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
620 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
621 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
622 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
623 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
624 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
625 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
626 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
627 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
628 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
629 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
630 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
Jamal Hadi Salimb175c3a2015-06-25 06:55:27 -0400631 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
632 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300633 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
634 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
635 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
Amir Vadaibc3103f2016-09-08 16:23:47 +0300636 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
637 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
638 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
639 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
640 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
641 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
642 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
643 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
644 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
Or Gerlitzaa72d702016-09-15 15:28:22 +0300645 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
646 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
647 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
648 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
Simon Horman5976c5f2016-11-03 13:24:21 +0100649 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
650 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
651 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
652 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200653 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
654 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
655 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
656 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200657 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
658 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
Simon Horman7b684882016-12-07 13:48:28 +0100659 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
660 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
661 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
662 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
663 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
664 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
665 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
666 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
Simon Horman99d31322017-01-11 14:05:43 +0100667 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
668 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
669 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
670 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
671 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
672 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
673 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
674 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
675 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
676 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400677 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
678 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
679 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
680 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +0200681 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
682 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300683 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
684 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
685 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
686 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
Jianbo Liud64efd02018-07-06 05:38:16 +0000687 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
688 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
689 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300690 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
691 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
692 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
693 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200694 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
695 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
696};
697
698static const struct nla_policy
699enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
700 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
701};
702
703static const struct nla_policy
704geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
705 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
706 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
707 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
708 .len = 128 },
Jiri Pirko77b99002015-05-12 14:56:21 +0200709};
710
711static void fl_set_key_val(struct nlattr **tb,
712 void *val, int val_type,
713 void *mask, int mask_type, int len)
714{
715 if (!tb[val_type])
716 return;
717 memcpy(val, nla_data(tb[val_type]), len);
718 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
719 memset(mask, 0xff, len);
720 else
721 memcpy(mask, nla_data(tb[mask_type]), len);
722}
723
Amritha Nambiar5c722992018-11-12 16:15:55 -0800724static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
725 struct fl_flow_key *mask)
726{
727 fl_set_key_val(tb, &key->tp_min.dst,
728 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst,
729 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst));
730 fl_set_key_val(tb, &key->tp_max.dst,
731 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst,
732 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst));
733 fl_set_key_val(tb, &key->tp_min.src,
734 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src,
735 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src));
736 fl_set_key_val(tb, &key->tp_max.src,
737 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src,
738 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src));
739
740 if ((mask->tp_min.dst && mask->tp_max.dst &&
741 htons(key->tp_max.dst) <= htons(key->tp_min.dst)) ||
742 (mask->tp_min.src && mask->tp_max.src &&
743 htons(key->tp_max.src) <= htons(key->tp_min.src)))
744 return -EINVAL;
745
746 return 0;
747}
748
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400749static int fl_set_key_mpls(struct nlattr **tb,
750 struct flow_dissector_key_mpls *key_val,
751 struct flow_dissector_key_mpls *key_mask)
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400752{
753 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
754 key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
755 key_mask->mpls_ttl = MPLS_TTL_MASK;
756 }
757 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400758 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
759
760 if (bos & ~MPLS_BOS_MASK)
761 return -EINVAL;
762 key_val->mpls_bos = bos;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400763 key_mask->mpls_bos = MPLS_BOS_MASK;
764 }
765 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400766 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
767
768 if (tc & ~MPLS_TC_MASK)
769 return -EINVAL;
770 key_val->mpls_tc = tc;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400771 key_mask->mpls_tc = MPLS_TC_MASK;
772 }
773 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400774 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
775
776 if (label & ~MPLS_LABEL_MASK)
777 return -EINVAL;
778 key_val->mpls_label = label;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400779 key_mask->mpls_label = MPLS_LABEL_MASK;
780 }
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400781 return 0;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400782}
783
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300784static void fl_set_key_vlan(struct nlattr **tb,
Jianbo Liuaaab0832018-07-06 05:38:13 +0000785 __be16 ethertype,
Jianbo Liud64efd02018-07-06 05:38:16 +0000786 int vlan_id_key, int vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300787 struct flow_dissector_key_vlan *key_val,
788 struct flow_dissector_key_vlan *key_mask)
789{
790#define VLAN_PRIORITY_MASK 0x7
791
Jianbo Liud64efd02018-07-06 05:38:16 +0000792 if (tb[vlan_id_key]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300793 key_val->vlan_id =
Jianbo Liud64efd02018-07-06 05:38:16 +0000794 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300795 key_mask->vlan_id = VLAN_VID_MASK;
796 }
Jianbo Liud64efd02018-07-06 05:38:16 +0000797 if (tb[vlan_prio_key]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300798 key_val->vlan_priority =
Jianbo Liud64efd02018-07-06 05:38:16 +0000799 nla_get_u8(tb[vlan_prio_key]) &
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300800 VLAN_PRIORITY_MASK;
801 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
802 }
Jianbo Liuaaab0832018-07-06 05:38:13 +0000803 key_val->vlan_tpid = ethertype;
804 key_mask->vlan_tpid = cpu_to_be16(~0);
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300805}
806
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200807static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
808 u32 *dissector_key, u32 *dissector_mask,
809 u32 flower_flag_bit, u32 dissector_flag_bit)
810{
811 if (flower_mask & flower_flag_bit) {
812 *dissector_mask |= dissector_flag_bit;
813 if (flower_key & flower_flag_bit)
814 *dissector_key |= dissector_flag_bit;
815 }
816}
817
Or Gerlitzd9724772016-12-22 14:28:15 +0200818static int fl_set_key_flags(struct nlattr **tb,
819 u32 *flags_key, u32 *flags_mask)
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200820{
821 u32 key, mask;
822
Or Gerlitzd9724772016-12-22 14:28:15 +0200823 /* mask is mandatory for flags */
824 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
825 return -EINVAL;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200826
827 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
Or Gerlitzd9724772016-12-22 14:28:15 +0200828 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200829
830 *flags_key = 0;
831 *flags_mask = 0;
832
833 fl_set_key_flag(key, mask, flags_key, flags_mask,
834 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Pieter Jansen van Vuuren459d1532018-03-06 18:11:14 +0100835 fl_set_key_flag(key, mask, flags_key, flags_mask,
836 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
837 FLOW_DIS_FIRST_FRAG);
Or Gerlitzd9724772016-12-22 14:28:15 +0200838
839 return 0;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200840}
841
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300842static void fl_set_key_ip(struct nlattr **tb, bool encap,
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300843 struct flow_dissector_key_ip *key,
844 struct flow_dissector_key_ip *mask)
845{
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300846 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
847 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
848 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
849 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300850
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300851 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
852 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300853}
854
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200855static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
856 int depth, int option_len,
857 struct netlink_ext_ack *extack)
858{
859 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
860 struct nlattr *class = NULL, *type = NULL, *data = NULL;
861 struct geneve_opt *opt;
862 int err, data_len = 0;
863
864 if (option_len > sizeof(struct geneve_opt))
865 data_len = option_len - sizeof(struct geneve_opt);
866
867 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
868 memset(opt, 0xff, option_len);
869 opt->length = data_len / 4;
870 opt->r1 = 0;
871 opt->r2 = 0;
872 opt->r3 = 0;
873
874 /* If no mask has been prodived we assume an exact match. */
875 if (!depth)
876 return sizeof(struct geneve_opt) + data_len;
877
878 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
879 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
880 return -EINVAL;
881 }
882
Johannes Berg8cb08172019-04-26 14:07:28 +0200883 err = nla_parse_nested_deprecated(tb,
884 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
885 nla, geneve_opt_policy, extack);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200886 if (err < 0)
887 return err;
888
889 /* We are not allowed to omit any of CLASS, TYPE or DATA
890 * fields from the key.
891 */
892 if (!option_len &&
893 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
894 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
895 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
896 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
897 return -EINVAL;
898 }
899
900 /* Omitting any of CLASS, TYPE or DATA fields is allowed
901 * for the mask.
902 */
903 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
904 int new_len = key->enc_opts.len;
905
906 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
907 data_len = nla_len(data);
908 if (data_len < 4) {
909 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
910 return -ERANGE;
911 }
912 if (data_len % 4) {
913 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
914 return -ERANGE;
915 }
916
917 new_len += sizeof(struct geneve_opt) + data_len;
918 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
919 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
920 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
921 return -ERANGE;
922 }
923 opt->length = data_len / 4;
924 memcpy(opt->opt_data, nla_data(data), data_len);
925 }
926
927 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
928 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
929 opt->opt_class = nla_get_be16(class);
930 }
931
932 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
933 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
934 opt->type = nla_get_u8(type);
935 }
936
937 return sizeof(struct geneve_opt) + data_len;
938}
939
940static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
941 struct fl_flow_key *mask,
942 struct netlink_ext_ack *extack)
943{
944 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
Jakub Kicinski63c82992018-11-09 21:06:26 -0800945 int err, option_len, key_depth, msk_depth = 0;
946
Johannes Berg8cb08172019-04-26 14:07:28 +0200947 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
948 TCA_FLOWER_KEY_ENC_OPTS_MAX,
949 enc_opts_policy, extack);
Jakub Kicinski63c82992018-11-09 21:06:26 -0800950 if (err)
951 return err;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200952
953 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
954
955 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
Johannes Berg8cb08172019-04-26 14:07:28 +0200956 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
957 TCA_FLOWER_KEY_ENC_OPTS_MAX,
958 enc_opts_policy, extack);
Jakub Kicinski63c82992018-11-09 21:06:26 -0800959 if (err)
960 return err;
961
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200962 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
963 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
964 }
965
966 nla_for_each_attr(nla_opt_key, nla_enc_key,
967 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
968 switch (nla_type(nla_opt_key)) {
969 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
970 option_len = 0;
971 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
972 option_len = fl_set_geneve_opt(nla_opt_key, key,
973 key_depth, option_len,
974 extack);
975 if (option_len < 0)
976 return option_len;
977
978 key->enc_opts.len += option_len;
979 /* At the same time we need to parse through the mask
980 * in order to verify exact and mask attribute lengths.
981 */
982 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
983 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
984 msk_depth, option_len,
985 extack);
986 if (option_len < 0)
987 return option_len;
988
989 mask->enc_opts.len += option_len;
990 if (key->enc_opts.len != mask->enc_opts.len) {
991 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
992 return -EINVAL;
993 }
994
995 if (msk_depth)
996 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
997 break;
998 default:
999 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1000 return -EINVAL;
1001 }
1002 }
1003
1004 return 0;
1005}
1006
Jiri Pirko77b99002015-05-12 14:56:21 +02001007static int fl_set_key(struct net *net, struct nlattr **tb,
Alexander Aring1057c552018-01-18 11:20:54 -05001008 struct fl_flow_key *key, struct fl_flow_key *mask,
1009 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001010{
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001011 __be16 ethertype;
Or Gerlitzd9724772016-12-22 14:28:15 +02001012 int ret = 0;
Brian Haleydd3aa3b2015-05-14 13:20:15 -04001013#ifdef CONFIG_NET_CLS_IND
Jiri Pirko77b99002015-05-12 14:56:21 +02001014 if (tb[TCA_FLOWER_INDEV]) {
Alexander Aring1057c552018-01-18 11:20:54 -05001015 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001016 if (err < 0)
1017 return err;
1018 key->indev_ifindex = err;
1019 mask->indev_ifindex = 0xffffffff;
1020 }
Brian Haleydd3aa3b2015-05-14 13:20:15 -04001021#endif
Jiri Pirko77b99002015-05-12 14:56:21 +02001022
1023 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1024 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1025 sizeof(key->eth.dst));
1026 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1027 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1028 sizeof(key->eth.src));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001029
Arnd Bergmann0b498a52016-08-26 17:25:45 +02001030 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001031 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1032
Jianbo Liuaaab0832018-07-06 05:38:13 +00001033 if (eth_type_vlan(ethertype)) {
Jianbo Liud64efd02018-07-06 05:38:16 +00001034 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1035 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1036 &mask->vlan);
1037
Jianbo Liu5e9a0fe2018-07-09 02:26:20 +00001038 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1039 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1040 if (eth_type_vlan(ethertype)) {
1041 fl_set_key_vlan(tb, ethertype,
1042 TCA_FLOWER_KEY_CVLAN_ID,
1043 TCA_FLOWER_KEY_CVLAN_PRIO,
1044 &key->cvlan, &mask->cvlan);
1045 fl_set_key_val(tb, &key->basic.n_proto,
1046 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1047 &mask->basic.n_proto,
1048 TCA_FLOWER_UNSPEC,
1049 sizeof(key->basic.n_proto));
1050 } else {
1051 key->basic.n_proto = ethertype;
1052 mask->basic.n_proto = cpu_to_be16(~0);
1053 }
Jianbo Liud64efd02018-07-06 05:38:16 +00001054 }
Arnd Bergmann0b498a52016-08-26 17:25:45 +02001055 } else {
1056 key->basic.n_proto = ethertype;
1057 mask->basic.n_proto = cpu_to_be16(~0);
1058 }
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001059 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001060
Jiri Pirko77b99002015-05-12 14:56:21 +02001061 if (key->basic.n_proto == htons(ETH_P_IP) ||
1062 key->basic.n_proto == htons(ETH_P_IPV6)) {
1063 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1064 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1065 sizeof(key->basic.ip_proto));
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001066 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
Jiri Pirko77b99002015-05-12 14:56:21 +02001067 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001068
1069 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1070 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001071 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001072 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1073 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1074 sizeof(key->ipv4.src));
1075 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1076 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1077 sizeof(key->ipv4.dst));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001078 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1079 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001080 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001081 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1082 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1083 sizeof(key->ipv6.src));
1084 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1085 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1086 sizeof(key->ipv6.dst));
1087 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001088
Jiri Pirko77b99002015-05-12 14:56:21 +02001089 if (key->basic.ip_proto == IPPROTO_TCP) {
1090 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001091 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001092 sizeof(key->tp.src));
1093 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001094 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001095 sizeof(key->tp.dst));
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02001096 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1097 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1098 sizeof(key->tcp.flags));
Jiri Pirko77b99002015-05-12 14:56:21 +02001099 } else if (key->basic.ip_proto == IPPROTO_UDP) {
1100 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001101 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001102 sizeof(key->tp.src));
1103 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001104 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001105 sizeof(key->tp.dst));
Simon Horman5976c5f2016-11-03 13:24:21 +01001106 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
1107 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1108 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1109 sizeof(key->tp.src));
1110 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1111 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1112 sizeof(key->tp.dst));
Simon Horman7b684882016-12-07 13:48:28 +01001113 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
1114 key->basic.ip_proto == IPPROTO_ICMP) {
1115 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1116 &mask->icmp.type,
1117 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1118 sizeof(key->icmp.type));
1119 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1120 &mask->icmp.code,
1121 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1122 sizeof(key->icmp.code));
1123 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1124 key->basic.ip_proto == IPPROTO_ICMPV6) {
1125 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1126 &mask->icmp.type,
1127 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1128 sizeof(key->icmp.type));
Simon Horman040587a2017-01-30 16:19:02 +01001129 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
Simon Horman7b684882016-12-07 13:48:28 +01001130 &mask->icmp.code,
Simon Horman040587a2017-01-30 16:19:02 +01001131 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
Simon Horman7b684882016-12-07 13:48:28 +01001132 sizeof(key->icmp.code));
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001133 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1134 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -04001135 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
1136 if (ret)
1137 return ret;
Simon Horman99d31322017-01-11 14:05:43 +01001138 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1139 key->basic.n_proto == htons(ETH_P_RARP)) {
1140 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1141 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1142 sizeof(key->arp.sip));
1143 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1144 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1145 sizeof(key->arp.tip));
1146 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1147 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1148 sizeof(key->arp.op));
1149 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1150 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1151 sizeof(key->arp.sha));
1152 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1153 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1154 sizeof(key->arp.tha));
Jiri Pirko77b99002015-05-12 14:56:21 +02001155 }
1156
Amritha Nambiar5c722992018-11-12 16:15:55 -08001157 if (key->basic.ip_proto == IPPROTO_TCP ||
1158 key->basic.ip_proto == IPPROTO_UDP ||
1159 key->basic.ip_proto == IPPROTO_SCTP) {
1160 ret = fl_set_key_port_range(tb, key, mask);
1161 if (ret)
1162 return ret;
1163 }
1164
Amir Vadaibc3103f2016-09-08 16:23:47 +03001165 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1166 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1167 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001168 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +03001169 fl_set_key_val(tb, &key->enc_ipv4.src,
1170 TCA_FLOWER_KEY_ENC_IPV4_SRC,
1171 &mask->enc_ipv4.src,
1172 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1173 sizeof(key->enc_ipv4.src));
1174 fl_set_key_val(tb, &key->enc_ipv4.dst,
1175 TCA_FLOWER_KEY_ENC_IPV4_DST,
1176 &mask->enc_ipv4.dst,
1177 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1178 sizeof(key->enc_ipv4.dst));
1179 }
1180
1181 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1182 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1183 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001184 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +03001185 fl_set_key_val(tb, &key->enc_ipv6.src,
1186 TCA_FLOWER_KEY_ENC_IPV6_SRC,
1187 &mask->enc_ipv6.src,
1188 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1189 sizeof(key->enc_ipv6.src));
1190 fl_set_key_val(tb, &key->enc_ipv6.dst,
1191 TCA_FLOWER_KEY_ENC_IPV6_DST,
1192 &mask->enc_ipv6.dst,
1193 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1194 sizeof(key->enc_ipv6.dst));
1195 }
1196
1197 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +03001198 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
Amir Vadaibc3103f2016-09-08 16:23:47 +03001199 sizeof(key->enc_key_id.keyid));
1200
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001201 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1202 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1203 sizeof(key->enc_tp.src));
1204
1205 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1206 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1207 sizeof(key->enc_tp.dst));
1208
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001209 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1210
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001211 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1212 ret = fl_set_enc_opt(tb, key, mask, extack);
1213 if (ret)
1214 return ret;
1215 }
1216
Or Gerlitzd9724772016-12-22 14:28:15 +02001217 if (tb[TCA_FLOWER_KEY_FLAGS])
1218 ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001219
Or Gerlitzd9724772016-12-22 14:28:15 +02001220 return ret;
Jiri Pirko77b99002015-05-12 14:56:21 +02001221}
1222
Paul Blakey05cd2712018-04-30 14:28:30 +03001223static void fl_mask_copy(struct fl_flow_mask *dst,
1224 struct fl_flow_mask *src)
Jiri Pirko77b99002015-05-12 14:56:21 +02001225{
Paul Blakey05cd2712018-04-30 14:28:30 +03001226 const void *psrc = fl_key_get_start(&src->key, src);
1227 void *pdst = fl_key_get_start(&dst->key, src);
Jiri Pirko77b99002015-05-12 14:56:21 +02001228
Paul Blakey05cd2712018-04-30 14:28:30 +03001229 memcpy(pdst, psrc, fl_mask_range(src));
1230 dst->range = src->range;
Jiri Pirko77b99002015-05-12 14:56:21 +02001231}
1232
1233static const struct rhashtable_params fl_ht_params = {
1234 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1235 .head_offset = offsetof(struct cls_fl_filter, ht_node),
1236 .automatic_shrinking = true,
1237};
1238
Paul Blakey05cd2712018-04-30 14:28:30 +03001239static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02001240{
Paul Blakey05cd2712018-04-30 14:28:30 +03001241 mask->filter_ht_params = fl_ht_params;
1242 mask->filter_ht_params.key_len = fl_mask_range(mask);
1243 mask->filter_ht_params.key_offset += mask->range.start;
Jiri Pirko77b99002015-05-12 14:56:21 +02001244
Paul Blakey05cd2712018-04-30 14:28:30 +03001245 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
Jiri Pirko77b99002015-05-12 14:56:21 +02001246}
1247
1248#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
zhong jiangcb205a82018-09-19 19:32:11 +08001249#define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
Jiri Pirko77b99002015-05-12 14:56:21 +02001250
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001251#define FL_KEY_IS_MASKED(mask, member) \
1252 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1253 0, FL_KEY_MEMBER_SIZE(member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001254
1255#define FL_KEY_SET(keys, cnt, id, member) \
1256 do { \
1257 keys[cnt].key_id = id; \
1258 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1259 cnt++; \
1260 } while(0);
1261
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001262#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001263 do { \
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001264 if (FL_KEY_IS_MASKED(mask, member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001265 FL_KEY_SET(keys, cnt, id, member); \
1266 } while(0);
1267
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001268static void fl_init_dissector(struct flow_dissector *dissector,
1269 struct fl_flow_key *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02001270{
1271 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1272 size_t cnt = 0;
1273
Tom Herbert42aecaa2015-06-04 09:16:39 -07001274 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
Jiri Pirko77b99002015-05-12 14:56:21 +02001275 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001276 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001277 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001278 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001279 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001280 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001281 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
Amritha Nambiar5c722992018-11-12 16:15:55 -08001282 if (FL_KEY_IS_MASKED(mask, tp) ||
1283 FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max))
1284 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001285 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001286 FLOW_DISSECTOR_KEY_IP, ip);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001287 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02001288 FLOW_DISSECTOR_KEY_TCP, tcp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001289 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Simon Horman7b684882016-12-07 13:48:28 +01001290 FLOW_DISSECTOR_KEY_ICMP, icmp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001291 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Simon Horman99d31322017-01-11 14:05:43 +01001292 FLOW_DISSECTOR_KEY_ARP, arp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001293 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001294 FLOW_DISSECTOR_KEY_MPLS, mpls);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001295 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001296 FLOW_DISSECTOR_KEY_VLAN, vlan);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001297 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Jianbo Liud64efd02018-07-06 05:38:16 +00001298 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001299 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001300 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001301 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001302 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001303 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001304 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001305 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1306 FL_KEY_IS_MASKED(mask, enc_ipv6))
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001307 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1308 enc_control);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001309 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001310 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001311 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001312 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001313 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1314 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
Jiri Pirko77b99002015-05-12 14:56:21 +02001315
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001316 skb_flow_dissector_init(dissector, keys, cnt);
Paul Blakey05cd2712018-04-30 14:28:30 +03001317}
1318
1319static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1320 struct fl_flow_mask *mask)
1321{
1322 struct fl_flow_mask *newmask;
1323 int err;
1324
1325 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1326 if (!newmask)
1327 return ERR_PTR(-ENOMEM);
1328
1329 fl_mask_copy(newmask, mask);
1330
Amritha Nambiar5c722992018-11-12 16:15:55 -08001331 if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) ||
1332 (newmask->key.tp_min.src && newmask->key.tp_max.src))
1333 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1334
Paul Blakey05cd2712018-04-30 14:28:30 +03001335 err = fl_init_mask_hashtable(newmask);
1336 if (err)
1337 goto errout_free;
1338
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001339 fl_init_dissector(&newmask->dissector, &newmask->key);
Paul Blakey05cd2712018-04-30 14:28:30 +03001340
1341 INIT_LIST_HEAD_RCU(&newmask->filters);
1342
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001343 refcount_set(&newmask->refcnt, 1);
Vlad Buslov195c2342019-03-21 15:17:38 +02001344 err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1345 &newmask->ht_node, mask_ht_params);
Paul Blakey05cd2712018-04-30 14:28:30 +03001346 if (err)
1347 goto errout_destroy;
1348
Vlad Buslov195c2342019-03-21 15:17:38 +02001349 /* Wait until any potential concurrent users of mask are finished */
1350 synchronize_rcu();
1351
Vlad Buslov259e60f2019-03-21 15:17:39 +02001352 spin_lock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +03001353 list_add_tail_rcu(&newmask->list, &head->masks);
Vlad Buslov259e60f2019-03-21 15:17:39 +02001354 spin_unlock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +03001355
1356 return newmask;
1357
1358errout_destroy:
1359 rhashtable_destroy(&newmask->ht);
1360errout_free:
1361 kfree(newmask);
1362
1363 return ERR_PTR(err);
Jiri Pirko77b99002015-05-12 14:56:21 +02001364}
1365
1366static int fl_check_assign_mask(struct cls_fl_head *head,
Paul Blakey05cd2712018-04-30 14:28:30 +03001367 struct cls_fl_filter *fnew,
1368 struct cls_fl_filter *fold,
Jiri Pirko77b99002015-05-12 14:56:21 +02001369 struct fl_flow_mask *mask)
1370{
Paul Blakey05cd2712018-04-30 14:28:30 +03001371 struct fl_flow_mask *newmask;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001372 int ret = 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001373
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001374 rcu_read_lock();
Vlad Buslov195c2342019-03-21 15:17:38 +02001375
1376 /* Insert mask as temporary node to prevent concurrent creation of mask
1377 * with same key. Any concurrent lookups with same key will return
1378 * -EAGAIN because mask's refcnt is zero. It is safe to insert
1379 * stack-allocated 'mask' to masks hash table because we call
1380 * synchronize_rcu() before returning from this function (either in case
1381 * of error or after replacing it with heap-allocated mask in
1382 * fl_create_new_mask()).
1383 */
1384 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1385 &mask->ht_node,
1386 mask_ht_params);
Paul Blakey05cd2712018-04-30 14:28:30 +03001387 if (!fnew->mask) {
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001388 rcu_read_unlock();
1389
Vlad Buslov195c2342019-03-21 15:17:38 +02001390 if (fold) {
1391 ret = -EINVAL;
1392 goto errout_cleanup;
1393 }
Paul Blakey05cd2712018-04-30 14:28:30 +03001394
1395 newmask = fl_create_new_mask(head, mask);
Vlad Buslov195c2342019-03-21 15:17:38 +02001396 if (IS_ERR(newmask)) {
1397 ret = PTR_ERR(newmask);
1398 goto errout_cleanup;
1399 }
Paul Blakey05cd2712018-04-30 14:28:30 +03001400
1401 fnew->mask = newmask;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001402 return 0;
Vlad Buslov195c2342019-03-21 15:17:38 +02001403 } else if (IS_ERR(fnew->mask)) {
1404 ret = PTR_ERR(fnew->mask);
Paul Blakeyf6521c52018-06-03 10:06:14 +03001405 } else if (fold && fold->mask != fnew->mask) {
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001406 ret = -EINVAL;
1407 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1408 /* Mask was deleted concurrently, try again */
1409 ret = -EAGAIN;
Jiri Pirko77b99002015-05-12 14:56:21 +02001410 }
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001411 rcu_read_unlock();
1412 return ret;
Vlad Buslov195c2342019-03-21 15:17:38 +02001413
1414errout_cleanup:
1415 rhashtable_remove_fast(&head->ht, &mask->ht_node,
1416 mask_ht_params);
1417 /* Wait until any potential concurrent users of mask are finished */
1418 synchronize_rcu();
1419 return ret;
Jiri Pirko77b99002015-05-12 14:56:21 +02001420}
1421
1422static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1423 struct cls_fl_filter *f, struct fl_flow_mask *mask,
1424 unsigned long base, struct nlattr **tb,
Alexander Aring50a56192018-01-18 11:20:52 -05001425 struct nlattr *est, bool ovr,
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001426 struct fl_flow_tmplt *tmplt, bool rtnl_held,
Alexander Aring50a56192018-01-18 11:20:52 -05001427 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001428{
Jiri Pirko77b99002015-05-12 14:56:21 +02001429 int err;
1430
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001431 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
Vlad Buslovec6743a2019-02-11 10:55:43 +02001432 extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001433 if (err < 0)
1434 return err;
1435
1436 if (tb[TCA_FLOWER_CLASSID]) {
1437 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001438 if (!rtnl_held)
1439 rtnl_lock();
Jiri Pirko77b99002015-05-12 14:56:21 +02001440 tcf_bind_filter(tp, &f->res, base);
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001441 if (!rtnl_held)
1442 rtnl_unlock();
Jiri Pirko77b99002015-05-12 14:56:21 +02001443 }
1444
Alexander Aring1057c552018-01-18 11:20:54 -05001445 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001446 if (err)
Jiri Pirko45507522017-08-04 14:29:06 +02001447 return err;
Jiri Pirko77b99002015-05-12 14:56:21 +02001448
1449 fl_mask_update_range(mask);
1450 fl_set_masked_key(&f->mkey, &f->key, mask);
1451
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001452 if (!fl_mask_fits_tmplt(tmplt, mask)) {
1453 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1454 return -EINVAL;
1455 }
1456
Jiri Pirko77b99002015-05-12 14:56:21 +02001457 return 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001458}
1459
Vlad Buslov1f17f772019-04-05 20:56:26 +03001460static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1461 struct cls_fl_filter *fold,
1462 bool *in_ht)
1463{
1464 struct fl_flow_mask *mask = fnew->mask;
1465 int err;
1466
Vlad Buslov9e355522019-04-11 19:12:20 +03001467 err = rhashtable_lookup_insert_fast(&mask->ht,
1468 &fnew->ht_node,
1469 mask->filter_ht_params);
Vlad Buslov1f17f772019-04-05 20:56:26 +03001470 if (err) {
1471 *in_ht = false;
1472 /* It is okay if filter with same key exists when
1473 * overwriting.
1474 */
1475 return fold && err == -EEXIST ? 0 : err;
1476 }
1477
1478 *in_ht = true;
1479 return 0;
1480}
1481
Jiri Pirko77b99002015-05-12 14:56:21 +02001482static int fl_change(struct net *net, struct sk_buff *in_skb,
1483 struct tcf_proto *tp, unsigned long base,
1484 u32 handle, struct nlattr **tca,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001485 void **arg, bool ovr, bool rtnl_held,
1486 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001487{
Vlad Buslove4746192019-03-21 15:17:33 +02001488 struct cls_fl_head *head = fl_head_dereference(tp);
WANG Cong8113c092017-08-04 21:31:43 -07001489 struct cls_fl_filter *fold = *arg;
Jiri Pirko77b99002015-05-12 14:56:21 +02001490 struct cls_fl_filter *fnew;
Ivan Vecera2cddd202019-01-16 16:53:52 +01001491 struct fl_flow_mask *mask;
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001492 struct nlattr **tb;
Vlad Buslov1f17f772019-04-05 20:56:26 +03001493 bool in_ht;
Jiri Pirko77b99002015-05-12 14:56:21 +02001494 int err;
1495
Vlad Buslov06177552019-03-21 15:17:35 +02001496 if (!tca[TCA_OPTIONS]) {
1497 err = -EINVAL;
1498 goto errout_fold;
1499 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001500
Ivan Vecera2cddd202019-01-16 16:53:52 +01001501 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
Vlad Buslov06177552019-03-21 15:17:35 +02001502 if (!mask) {
1503 err = -ENOBUFS;
1504 goto errout_fold;
1505 }
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001506
Ivan Vecera2cddd202019-01-16 16:53:52 +01001507 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1508 if (!tb) {
1509 err = -ENOBUFS;
1510 goto errout_mask_alloc;
1511 }
1512
Johannes Berg8cb08172019-04-26 14:07:28 +02001513 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1514 tca[TCA_OPTIONS], fl_policy, NULL);
Jiri Pirko77b99002015-05-12 14:56:21 +02001515 if (err < 0)
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001516 goto errout_tb;
Jiri Pirko77b99002015-05-12 14:56:21 +02001517
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001518 if (fold && handle && fold->handle != handle) {
1519 err = -EINVAL;
1520 goto errout_tb;
1521 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001522
1523 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001524 if (!fnew) {
1525 err = -ENOBUFS;
1526 goto errout_tb;
1527 }
Vlad Buslovc049d562019-04-24 09:53:31 +03001528 INIT_LIST_HEAD(&fnew->hw_list);
Vlad Buslov06177552019-03-21 15:17:35 +02001529 refcount_set(&fnew->refcnt, 1);
Jiri Pirko77b99002015-05-12 14:56:21 +02001530
Cong Wang14215102019-02-20 21:37:42 -08001531 err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
WANG Congb9a24bb2016-08-19 12:36:54 -07001532 if (err < 0)
1533 goto errout;
Jiri Pirko77b99002015-05-12 14:56:21 +02001534
Vlad Buslovecb3dea2019-03-06 16:22:12 +02001535 if (tb[TCA_FLOWER_FLAGS]) {
1536 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1537
1538 if (!tc_flags_valid(fnew->flags)) {
1539 err = -EINVAL;
1540 goto errout;
1541 }
1542 }
1543
1544 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001545 tp->chain->tmplt_priv, rtnl_held, extack);
Vlad Buslovecb3dea2019-03-06 16:22:12 +02001546 if (err)
1547 goto errout;
1548
1549 err = fl_check_assign_mask(head, fnew, fold, mask);
1550 if (err)
1551 goto errout;
1552
Vlad Buslov1f17f772019-04-05 20:56:26 +03001553 err = fl_ht_insert_unique(fnew, fold, &in_ht);
1554 if (err)
1555 goto errout_mask;
1556
Hadar Hen Zion79685212016-12-01 14:06:34 +02001557 if (!tc_skip_hw(fnew->flags)) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001558 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
Hadar Hen Zion79685212016-12-01 14:06:34 +02001559 if (err)
Vlad Buslov1f17f772019-04-05 20:56:26 +03001560 goto errout_ht;
Hadar Hen Zion79685212016-12-01 14:06:34 +02001561 }
Amir Vadai5b33f482016-03-08 12:42:29 +02001562
Or Gerlitz55593962017-02-16 10:31:13 +02001563 if (!tc_in_hw(fnew->flags))
1564 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1565
Vlad Buslov3d81e712019-03-21 15:17:42 +02001566 spin_lock(&tp->lock);
1567
Vlad Buslov272ffaa2019-03-21 15:17:41 +02001568 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1569 * proto again or create new one, if necessary.
1570 */
1571 if (tp->deleting) {
1572 err = -EAGAIN;
1573 goto errout_hw;
1574 }
1575
Amir Vadai5b33f482016-03-08 12:42:29 +02001576 if (fold) {
Vlad Buslovb2552b82019-03-21 15:17:36 +02001577 /* Fold filter was deleted concurrently. Retry lookup. */
1578 if (fold->deleted) {
1579 err = -EAGAIN;
1580 goto errout_hw;
1581 }
1582
Vlad Buslov620da482019-03-21 15:17:34 +02001583 fnew->handle = handle;
1584
Vlad Buslov1f17f772019-04-05 20:56:26 +03001585 if (!in_ht) {
1586 struct rhashtable_params params =
1587 fnew->mask->filter_ht_params;
1588
1589 err = rhashtable_insert_fast(&fnew->mask->ht,
1590 &fnew->ht_node,
1591 params);
1592 if (err)
1593 goto errout_hw;
1594 in_ht = true;
1595 }
Vlad Buslov620da482019-03-21 15:17:34 +02001596
Vlad Buslovc049d562019-04-24 09:53:31 +03001597 refcount_inc(&fnew->refcnt);
Roi Dayan599d2572018-12-19 18:07:56 +02001598 rhashtable_remove_fast(&fold->mask->ht,
1599 &fold->ht_node,
1600 fold->mask->filter_ht_params);
Matthew Wilcox234a4622017-11-28 09:56:36 -05001601 idr_replace(&head->handle_idr, fnew, fnew->handle);
Daniel Borkmannff3532f2015-07-17 22:38:44 +02001602 list_replace_rcu(&fold->list, &fnew->list);
Vlad Buslovb2552b82019-03-21 15:17:36 +02001603 fold->deleted = true;
Vlad Buslov620da482019-03-21 15:17:34 +02001604
Vlad Buslov3d81e712019-03-21 15:17:42 +02001605 spin_unlock(&tp->lock);
1606
Vlad Buslov99946772019-04-12 00:54:19 +03001607 fl_mask_put(head, fold->mask);
Vlad Buslov620da482019-03-21 15:17:34 +02001608 if (!tc_skip_hw(fold->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001609 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
Jiri Pirko77b99002015-05-12 14:56:21 +02001610 tcf_unbind_filter(tp, &fold->res);
Vlad Buslov06177552019-03-21 15:17:35 +02001611 /* Caller holds reference to fold, so refcnt is always > 0
1612 * after this.
1613 */
1614 refcount_dec(&fold->refcnt);
1615 __fl_put(fold);
Jiri Pirko77b99002015-05-12 14:56:21 +02001616 } else {
Vlad Buslov620da482019-03-21 15:17:34 +02001617 if (handle) {
1618 /* user specifies a handle and it doesn't exist */
1619 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1620 handle, GFP_ATOMIC);
Vlad Buslov9a2d9382019-03-21 15:17:40 +02001621
1622 /* Filter with specified handle was concurrently
1623 * inserted after initial check in cls_api. This is not
1624 * necessarily an error if NLM_F_EXCL is not set in
1625 * message flags. Returning EAGAIN will cause cls_api to
1626 * try to update concurrently inserted rule.
1627 */
1628 if (err == -ENOSPC)
1629 err = -EAGAIN;
Vlad Buslov620da482019-03-21 15:17:34 +02001630 } else {
1631 handle = 1;
1632 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1633 INT_MAX, GFP_ATOMIC);
1634 }
1635 if (err)
1636 goto errout_hw;
1637
Vlad Buslovc049d562019-04-24 09:53:31 +03001638 refcount_inc(&fnew->refcnt);
Vlad Buslov620da482019-03-21 15:17:34 +02001639 fnew->handle = handle;
Paul Blakey05cd2712018-04-30 14:28:30 +03001640 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
Vlad Buslov3d81e712019-03-21 15:17:42 +02001641 spin_unlock(&tp->lock);
Jiri Pirko77b99002015-05-12 14:56:21 +02001642 }
1643
Vlad Buslov620da482019-03-21 15:17:34 +02001644 *arg = fnew;
1645
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001646 kfree(tb);
Ivan Vecera2cddd202019-01-16 16:53:52 +01001647 kfree(mask);
Jiri Pirko77b99002015-05-12 14:56:21 +02001648 return 0;
1649
Vlad Buslovc049d562019-04-24 09:53:31 +03001650errout_ht:
1651 spin_lock(&tp->lock);
Vlad Buslov620da482019-03-21 15:17:34 +02001652errout_hw:
Vlad Buslovc049d562019-04-24 09:53:31 +03001653 fnew->deleted = true;
Vlad Buslov3d81e712019-03-21 15:17:42 +02001654 spin_unlock(&tp->lock);
Vlad Buslov620da482019-03-21 15:17:34 +02001655 if (!tc_skip_hw(fnew->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001656 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
Vlad Buslov1f17f772019-04-05 20:56:26 +03001657 if (in_ht)
1658 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1659 fnew->mask->filter_ht_params);
Vlad Buslovecb3dea2019-03-06 16:22:12 +02001660errout_mask:
Vlad Buslov99946772019-04-12 00:54:19 +03001661 fl_mask_put(head, fnew->mask);
Jiri Pirko77b99002015-05-12 14:56:21 +02001662errout:
Vlad Buslovc049d562019-04-24 09:53:31 +03001663 __fl_put(fnew);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001664errout_tb:
1665 kfree(tb);
Ivan Vecera2cddd202019-01-16 16:53:52 +01001666errout_mask_alloc:
1667 kfree(mask);
Vlad Buslov06177552019-03-21 15:17:35 +02001668errout_fold:
1669 if (fold)
1670 __fl_put(fold);
Jiri Pirko77b99002015-05-12 14:56:21 +02001671 return err;
1672}
1673
Alexander Aring571acf22018-01-18 11:20:53 -05001674static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001675 bool rtnl_held, struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001676{
Vlad Buslove4746192019-03-21 15:17:33 +02001677 struct cls_fl_head *head = fl_head_dereference(tp);
WANG Cong8113c092017-08-04 21:31:43 -07001678 struct cls_fl_filter *f = arg;
Vlad Buslovb2552b82019-03-21 15:17:36 +02001679 bool last_on_mask;
1680 int err = 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001681
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001682 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
Paul Blakey05cd2712018-04-30 14:28:30 +03001683 *last = list_empty(&head->masks);
Vlad Buslov06177552019-03-21 15:17:35 +02001684 __fl_put(f);
1685
Vlad Buslovb2552b82019-03-21 15:17:36 +02001686 return err;
Jiri Pirko77b99002015-05-12 14:56:21 +02001687}
1688
Vlad Buslov12db03b2019-02-11 10:55:45 +02001689static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1690 bool rtnl_held)
Jiri Pirko77b99002015-05-12 14:56:21 +02001691{
Jiri Pirko77b99002015-05-12 14:56:21 +02001692 struct cls_fl_filter *f;
1693
Vlad Buslov01683a12018-07-09 13:29:11 +03001694 arg->count = arg->skip;
1695
Vlad Buslov06177552019-03-21 15:17:35 +02001696 while ((f = fl_get_next_filter(tp, &arg->cookie)) != NULL) {
Vlad Buslov01683a12018-07-09 13:29:11 +03001697 if (arg->fn(tp, f, arg) < 0) {
Vlad Buslov06177552019-03-21 15:17:35 +02001698 __fl_put(f);
Vlad Buslov01683a12018-07-09 13:29:11 +03001699 arg->stop = 1;
1700 break;
Paul Blakey05cd2712018-04-30 14:28:30 +03001701 }
Vlad Buslov06177552019-03-21 15:17:35 +02001702 __fl_put(f);
1703 arg->cookie++;
Vlad Buslov01683a12018-07-09 13:29:11 +03001704 arg->count++;
Jiri Pirko77b99002015-05-12 14:56:21 +02001705 }
1706}
1707
Vlad Buslovc049d562019-04-24 09:53:31 +03001708static struct cls_fl_filter *
1709fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
1710{
1711 struct cls_fl_head *head = fl_head_dereference(tp);
1712
1713 spin_lock(&tp->lock);
1714 if (list_empty(&head->hw_filters)) {
1715 spin_unlock(&tp->lock);
1716 return NULL;
1717 }
1718
1719 if (!f)
1720 f = list_entry(&head->hw_filters, struct cls_fl_filter,
1721 hw_list);
1722 list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
1723 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
1724 spin_unlock(&tp->lock);
1725 return f;
1726 }
1727 }
1728
1729 spin_unlock(&tp->lock);
1730 return NULL;
1731}
1732
John Hurley31533cb2018-06-25 14:30:06 -07001733static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
1734 void *cb_priv, struct netlink_ext_ack *extack)
1735{
John Hurley31533cb2018-06-25 14:30:06 -07001736 struct tc_cls_flower_offload cls_flower = {};
1737 struct tcf_block *block = tp->chain->block;
Vlad Buslovc049d562019-04-24 09:53:31 +03001738 struct cls_fl_filter *f = NULL;
John Hurley31533cb2018-06-25 14:30:06 -07001739 int err;
1740
Vlad Buslovc049d562019-04-24 09:53:31 +03001741 /* hw_filters list can only be changed by hw offload functions after
1742 * obtaining rtnl lock. Make sure it is not changed while reoffload is
1743 * iterating it.
1744 */
1745 ASSERT_RTNL();
John Hurley31533cb2018-06-25 14:30:06 -07001746
Vlad Buslovc049d562019-04-24 09:53:31 +03001747 while ((f = fl_get_next_hw_filter(tp, f, add))) {
John Hurley95e27a42019-04-02 23:53:20 +01001748 cls_flower.rule =
1749 flow_rule_alloc(tcf_exts_num_actions(&f->exts));
1750 if (!cls_flower.rule) {
1751 __fl_put(f);
1752 return -ENOMEM;
John Hurley31533cb2018-06-25 14:30:06 -07001753 }
John Hurley95e27a42019-04-02 23:53:20 +01001754
1755 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -07001756 extack);
John Hurley95e27a42019-04-02 23:53:20 +01001757 cls_flower.command = add ?
1758 TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
1759 cls_flower.cookie = (unsigned long)f;
1760 cls_flower.rule->match.dissector = &f->mask->dissector;
1761 cls_flower.rule->match.mask = &f->mask->key;
1762 cls_flower.rule->match.key = &f->mkey;
1763
1764 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
1765 if (err) {
1766 kfree(cls_flower.rule);
1767 if (tc_skip_sw(f->flags)) {
1768 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
1769 __fl_put(f);
1770 return err;
1771 }
1772 goto next_flow;
1773 }
1774
1775 cls_flower.classid = f->res.classid;
1776
1777 err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
1778 kfree(cls_flower.rule);
1779
1780 if (err) {
1781 if (add && tc_skip_sw(f->flags)) {
1782 __fl_put(f);
1783 return err;
1784 }
1785 goto next_flow;
1786 }
1787
1788 spin_lock(&tp->lock);
1789 tc_cls_offload_cnt_update(block, &f->in_hw_count, &f->flags,
1790 add);
1791 spin_unlock(&tp->lock);
1792next_flow:
John Hurley95e27a42019-04-02 23:53:20 +01001793 __fl_put(f);
John Hurley31533cb2018-06-25 14:30:06 -07001794 }
1795
1796 return 0;
1797}
1798
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001799static int fl_hw_create_tmplt(struct tcf_chain *chain,
1800 struct fl_flow_tmplt *tmplt)
Jiri Pirko34738452018-07-23 09:23:11 +02001801{
1802 struct tc_cls_flower_offload cls_flower = {};
1803 struct tcf_block *block = chain->block;
Jiri Pirko34738452018-07-23 09:23:11 +02001804
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +01001805 cls_flower.rule = flow_rule_alloc(0);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001806 if (!cls_flower.rule)
1807 return -ENOMEM;
1808
Jiri Pirko34738452018-07-23 09:23:11 +02001809 cls_flower.common.chain_index = chain->index;
1810 cls_flower.command = TC_CLSFLOWER_TMPLT_CREATE;
1811 cls_flower.cookie = (unsigned long) tmplt;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001812 cls_flower.rule->match.dissector = &tmplt->dissector;
1813 cls_flower.rule->match.mask = &tmplt->mask;
1814 cls_flower.rule->match.key = &tmplt->dummy_key;
Jiri Pirko34738452018-07-23 09:23:11 +02001815
1816 /* We don't care if driver (any of them) fails to handle this
1817 * call. It serves just as a hint for it.
1818 */
Cong Wangaeb3fec2018-12-11 11:15:46 -08001819 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001820 kfree(cls_flower.rule);
1821
1822 return 0;
Jiri Pirko34738452018-07-23 09:23:11 +02001823}
1824
1825static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
1826 struct fl_flow_tmplt *tmplt)
1827{
1828 struct tc_cls_flower_offload cls_flower = {};
1829 struct tcf_block *block = chain->block;
1830
1831 cls_flower.common.chain_index = chain->index;
1832 cls_flower.command = TC_CLSFLOWER_TMPLT_DESTROY;
1833 cls_flower.cookie = (unsigned long) tmplt;
1834
Cong Wangaeb3fec2018-12-11 11:15:46 -08001835 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
Jiri Pirko34738452018-07-23 09:23:11 +02001836}
1837
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001838static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
1839 struct nlattr **tca,
1840 struct netlink_ext_ack *extack)
1841{
1842 struct fl_flow_tmplt *tmplt;
1843 struct nlattr **tb;
1844 int err;
1845
1846 if (!tca[TCA_OPTIONS])
1847 return ERR_PTR(-EINVAL);
1848
1849 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1850 if (!tb)
1851 return ERR_PTR(-ENOBUFS);
Johannes Berg8cb08172019-04-26 14:07:28 +02001852 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1853 tca[TCA_OPTIONS], fl_policy, NULL);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001854 if (err)
1855 goto errout_tb;
1856
1857 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
Dan Carpenter1cbc36a52018-08-03 22:27:55 +03001858 if (!tmplt) {
1859 err = -ENOMEM;
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001860 goto errout_tb;
Dan Carpenter1cbc36a52018-08-03 22:27:55 +03001861 }
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001862 tmplt->chain = chain;
1863 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
1864 if (err)
1865 goto errout_tmplt;
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001866
1867 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
1868
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001869 err = fl_hw_create_tmplt(chain, tmplt);
1870 if (err)
1871 goto errout_tmplt;
Jiri Pirko34738452018-07-23 09:23:11 +02001872
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001873 kfree(tb);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001874 return tmplt;
1875
1876errout_tmplt:
1877 kfree(tmplt);
1878errout_tb:
1879 kfree(tb);
1880 return ERR_PTR(err);
1881}
1882
1883static void fl_tmplt_destroy(void *tmplt_priv)
1884{
1885 struct fl_flow_tmplt *tmplt = tmplt_priv;
1886
Cong Wang95278dd2018-10-02 12:50:19 -07001887 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
1888 kfree(tmplt);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001889}
1890
Jiri Pirko77b99002015-05-12 14:56:21 +02001891static int fl_dump_key_val(struct sk_buff *skb,
1892 void *val, int val_type,
1893 void *mask, int mask_type, int len)
1894{
1895 int err;
1896
1897 if (!memchr_inv(mask, 0, len))
1898 return 0;
1899 err = nla_put(skb, val_type, len, val);
1900 if (err)
1901 return err;
1902 if (mask_type != TCA_FLOWER_UNSPEC) {
1903 err = nla_put(skb, mask_type, len, mask);
1904 if (err)
1905 return err;
1906 }
1907 return 0;
1908}
1909
Amritha Nambiar5c722992018-11-12 16:15:55 -08001910static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
1911 struct fl_flow_key *mask)
1912{
1913 if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN,
1914 &mask->tp_min.dst, TCA_FLOWER_UNSPEC,
1915 sizeof(key->tp_min.dst)) ||
1916 fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX,
1917 &mask->tp_max.dst, TCA_FLOWER_UNSPEC,
1918 sizeof(key->tp_max.dst)) ||
1919 fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN,
1920 &mask->tp_min.src, TCA_FLOWER_UNSPEC,
1921 sizeof(key->tp_min.src)) ||
1922 fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX,
1923 &mask->tp_max.src, TCA_FLOWER_UNSPEC,
1924 sizeof(key->tp_max.src)))
1925 return -1;
1926
1927 return 0;
1928}
1929
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001930static int fl_dump_key_mpls(struct sk_buff *skb,
1931 struct flow_dissector_key_mpls *mpls_key,
1932 struct flow_dissector_key_mpls *mpls_mask)
1933{
1934 int err;
1935
1936 if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
1937 return 0;
1938 if (mpls_mask->mpls_ttl) {
1939 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
1940 mpls_key->mpls_ttl);
1941 if (err)
1942 return err;
1943 }
1944 if (mpls_mask->mpls_tc) {
1945 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
1946 mpls_key->mpls_tc);
1947 if (err)
1948 return err;
1949 }
1950 if (mpls_mask->mpls_label) {
1951 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
1952 mpls_key->mpls_label);
1953 if (err)
1954 return err;
1955 }
1956 if (mpls_mask->mpls_bos) {
1957 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
1958 mpls_key->mpls_bos);
1959 if (err)
1960 return err;
1961 }
1962 return 0;
1963}
1964
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001965static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001966 struct flow_dissector_key_ip *key,
1967 struct flow_dissector_key_ip *mask)
1968{
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001969 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1970 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1971 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1972 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1973
1974 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
1975 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001976 return -1;
1977
1978 return 0;
1979}
1980
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001981static int fl_dump_key_vlan(struct sk_buff *skb,
Jianbo Liud64efd02018-07-06 05:38:16 +00001982 int vlan_id_key, int vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001983 struct flow_dissector_key_vlan *vlan_key,
1984 struct flow_dissector_key_vlan *vlan_mask)
1985{
1986 int err;
1987
1988 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
1989 return 0;
1990 if (vlan_mask->vlan_id) {
Jianbo Liud64efd02018-07-06 05:38:16 +00001991 err = nla_put_u16(skb, vlan_id_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001992 vlan_key->vlan_id);
1993 if (err)
1994 return err;
1995 }
1996 if (vlan_mask->vlan_priority) {
Jianbo Liud64efd02018-07-06 05:38:16 +00001997 err = nla_put_u8(skb, vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001998 vlan_key->vlan_priority);
1999 if (err)
2000 return err;
2001 }
2002 return 0;
2003}
2004
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002005static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2006 u32 *flower_key, u32 *flower_mask,
2007 u32 flower_flag_bit, u32 dissector_flag_bit)
2008{
2009 if (dissector_mask & dissector_flag_bit) {
2010 *flower_mask |= flower_flag_bit;
2011 if (dissector_key & dissector_flag_bit)
2012 *flower_key |= flower_flag_bit;
2013 }
2014}
2015
2016static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2017{
2018 u32 key, mask;
2019 __be32 _key, _mask;
2020 int err;
2021
2022 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2023 return 0;
2024
2025 key = 0;
2026 mask = 0;
2027
2028 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2029 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Pieter Jansen van Vuuren459d1532018-03-06 18:11:14 +01002030 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2031 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2032 FLOW_DIS_FIRST_FRAG);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002033
2034 _key = cpu_to_be32(key);
2035 _mask = cpu_to_be32(mask);
2036
2037 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2038 if (err)
2039 return err;
2040
2041 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2042}
2043
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002044static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2045 struct flow_dissector_key_enc_opts *enc_opts)
2046{
2047 struct geneve_opt *opt;
2048 struct nlattr *nest;
2049 int opt_off = 0;
2050
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002051 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002052 if (!nest)
2053 goto nla_put_failure;
2054
2055 while (enc_opts->len > opt_off) {
2056 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2057
2058 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2059 opt->opt_class))
2060 goto nla_put_failure;
2061 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2062 opt->type))
2063 goto nla_put_failure;
2064 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2065 opt->length * 4, opt->opt_data))
2066 goto nla_put_failure;
2067
2068 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2069 }
2070 nla_nest_end(skb, nest);
2071 return 0;
2072
2073nla_put_failure:
2074 nla_nest_cancel(skb, nest);
2075 return -EMSGSIZE;
2076}
2077
2078static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2079 struct flow_dissector_key_enc_opts *enc_opts)
2080{
2081 struct nlattr *nest;
2082 int err;
2083
2084 if (!enc_opts->len)
2085 return 0;
2086
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002087 nest = nla_nest_start_noflag(skb, enc_opt_type);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002088 if (!nest)
2089 goto nla_put_failure;
2090
2091 switch (enc_opts->dst_opt_type) {
2092 case TUNNEL_GENEVE_OPT:
2093 err = fl_dump_key_geneve_opt(skb, enc_opts);
2094 if (err)
2095 goto nla_put_failure;
2096 break;
2097 default:
2098 goto nla_put_failure;
2099 }
2100 nla_nest_end(skb, nest);
2101 return 0;
2102
2103nla_put_failure:
2104 nla_nest_cancel(skb, nest);
2105 return -EMSGSIZE;
2106}
2107
2108static int fl_dump_key_enc_opt(struct sk_buff *skb,
2109 struct flow_dissector_key_enc_opts *key_opts,
2110 struct flow_dissector_key_enc_opts *msk_opts)
2111{
2112 int err;
2113
2114 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2115 if (err)
2116 return err;
2117
2118 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2119}
2120
Jiri Pirkof5749082018-07-23 09:23:08 +02002121static int fl_dump_key(struct sk_buff *skb, struct net *net,
2122 struct fl_flow_key *key, struct fl_flow_key *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02002123{
Jiri Pirko77b99002015-05-12 14:56:21 +02002124 if (mask->indev_ifindex) {
2125 struct net_device *dev;
2126
2127 dev = __dev_get_by_index(net, key->indev_ifindex);
2128 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2129 goto nla_put_failure;
2130 }
2131
2132 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2133 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2134 sizeof(key->eth.dst)) ||
2135 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2136 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2137 sizeof(key->eth.src)) ||
2138 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2139 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2140 sizeof(key->basic.n_proto)))
2141 goto nla_put_failure;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002142
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002143 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2144 goto nla_put_failure;
2145
Jianbo Liud64efd02018-07-06 05:38:16 +00002146 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2147 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002148 goto nla_put_failure;
2149
Jianbo Liud64efd02018-07-06 05:38:16 +00002150 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2151 TCA_FLOWER_KEY_CVLAN_PRIO,
2152 &key->cvlan, &mask->cvlan) ||
2153 (mask->cvlan.vlan_tpid &&
Jianbo Liu158abbf2018-07-25 02:31:25 +00002154 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2155 key->cvlan.vlan_tpid)))
Jianbo Liud3069512018-07-06 05:38:15 +00002156 goto nla_put_failure;
2157
Jianbo Liu5e9a0fe2018-07-09 02:26:20 +00002158 if (mask->basic.n_proto) {
2159 if (mask->cvlan.vlan_tpid) {
2160 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2161 key->basic.n_proto))
2162 goto nla_put_failure;
2163 } else if (mask->vlan.vlan_tpid) {
2164 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2165 key->basic.n_proto))
2166 goto nla_put_failure;
2167 }
Jianbo Liud64efd02018-07-06 05:38:16 +00002168 }
2169
Jiri Pirko77b99002015-05-12 14:56:21 +02002170 if ((key->basic.n_proto == htons(ETH_P_IP) ||
2171 key->basic.n_proto == htons(ETH_P_IPV6)) &&
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002172 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
Jiri Pirko77b99002015-05-12 14:56:21 +02002173 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002174 sizeof(key->basic.ip_proto)) ||
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002175 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
Jiri Pirko77b99002015-05-12 14:56:21 +02002176 goto nla_put_failure;
2177
Tom Herbertc3f83242015-06-04 09:16:40 -07002178 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02002179 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2180 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2181 sizeof(key->ipv4.src)) ||
2182 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2183 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2184 sizeof(key->ipv4.dst))))
2185 goto nla_put_failure;
Tom Herbertc3f83242015-06-04 09:16:40 -07002186 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02002187 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2188 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2189 sizeof(key->ipv6.src)) ||
2190 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2191 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2192 sizeof(key->ipv6.dst))))
2193 goto nla_put_failure;
2194
2195 if (key->basic.ip_proto == IPPROTO_TCP &&
2196 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002197 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002198 sizeof(key->tp.src)) ||
2199 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002200 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02002201 sizeof(key->tp.dst)) ||
2202 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2203 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2204 sizeof(key->tcp.flags))))
Jiri Pirko77b99002015-05-12 14:56:21 +02002205 goto nla_put_failure;
2206 else if (key->basic.ip_proto == IPPROTO_UDP &&
2207 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002208 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002209 sizeof(key->tp.src)) ||
2210 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002211 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002212 sizeof(key->tp.dst))))
2213 goto nla_put_failure;
Simon Horman5976c5f2016-11-03 13:24:21 +01002214 else if (key->basic.ip_proto == IPPROTO_SCTP &&
2215 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2216 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2217 sizeof(key->tp.src)) ||
2218 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2219 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2220 sizeof(key->tp.dst))))
2221 goto nla_put_failure;
Simon Horman7b684882016-12-07 13:48:28 +01002222 else if (key->basic.n_proto == htons(ETH_P_IP) &&
2223 key->basic.ip_proto == IPPROTO_ICMP &&
2224 (fl_dump_key_val(skb, &key->icmp.type,
2225 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2226 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2227 sizeof(key->icmp.type)) ||
2228 fl_dump_key_val(skb, &key->icmp.code,
2229 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2230 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2231 sizeof(key->icmp.code))))
2232 goto nla_put_failure;
2233 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2234 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2235 (fl_dump_key_val(skb, &key->icmp.type,
2236 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2237 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2238 sizeof(key->icmp.type)) ||
2239 fl_dump_key_val(skb, &key->icmp.code,
2240 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2241 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2242 sizeof(key->icmp.code))))
2243 goto nla_put_failure;
Simon Horman99d31322017-01-11 14:05:43 +01002244 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2245 key->basic.n_proto == htons(ETH_P_RARP)) &&
2246 (fl_dump_key_val(skb, &key->arp.sip,
2247 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2248 TCA_FLOWER_KEY_ARP_SIP_MASK,
2249 sizeof(key->arp.sip)) ||
2250 fl_dump_key_val(skb, &key->arp.tip,
2251 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2252 TCA_FLOWER_KEY_ARP_TIP_MASK,
2253 sizeof(key->arp.tip)) ||
2254 fl_dump_key_val(skb, &key->arp.op,
2255 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2256 TCA_FLOWER_KEY_ARP_OP_MASK,
2257 sizeof(key->arp.op)) ||
2258 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2259 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2260 sizeof(key->arp.sha)) ||
2261 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2262 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2263 sizeof(key->arp.tha))))
2264 goto nla_put_failure;
Jiri Pirko77b99002015-05-12 14:56:21 +02002265
Amritha Nambiar5c722992018-11-12 16:15:55 -08002266 if ((key->basic.ip_proto == IPPROTO_TCP ||
2267 key->basic.ip_proto == IPPROTO_UDP ||
2268 key->basic.ip_proto == IPPROTO_SCTP) &&
2269 fl_dump_key_port_range(skb, key, mask))
2270 goto nla_put_failure;
2271
Amir Vadaibc3103f2016-09-08 16:23:47 +03002272 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2273 (fl_dump_key_val(skb, &key->enc_ipv4.src,
2274 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2275 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2276 sizeof(key->enc_ipv4.src)) ||
2277 fl_dump_key_val(skb, &key->enc_ipv4.dst,
2278 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2279 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2280 sizeof(key->enc_ipv4.dst))))
2281 goto nla_put_failure;
2282 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2283 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2284 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2285 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2286 sizeof(key->enc_ipv6.src)) ||
2287 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2288 TCA_FLOWER_KEY_ENC_IPV6_DST,
2289 &mask->enc_ipv6.dst,
2290 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2291 sizeof(key->enc_ipv6.dst))))
2292 goto nla_put_failure;
2293
2294 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +03002295 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02002296 sizeof(key->enc_key_id)) ||
2297 fl_dump_key_val(skb, &key->enc_tp.src,
2298 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2299 &mask->enc_tp.src,
2300 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2301 sizeof(key->enc_tp.src)) ||
2302 fl_dump_key_val(skb, &key->enc_tp.dst,
2303 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2304 &mask->enc_tp.dst,
2305 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002306 sizeof(key->enc_tp.dst)) ||
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002307 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2308 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
Amir Vadaibc3103f2016-09-08 16:23:47 +03002309 goto nla_put_failure;
2310
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002311 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2312 goto nla_put_failure;
2313
Jiri Pirkof5749082018-07-23 09:23:08 +02002314 return 0;
2315
2316nla_put_failure:
2317 return -EMSGSIZE;
2318}
2319
2320static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002321 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
Jiri Pirkof5749082018-07-23 09:23:08 +02002322{
2323 struct cls_fl_filter *f = fh;
2324 struct nlattr *nest;
2325 struct fl_flow_key *key, *mask;
Vlad Buslov3d81e712019-03-21 15:17:42 +02002326 bool skip_hw;
Jiri Pirkof5749082018-07-23 09:23:08 +02002327
2328 if (!f)
2329 return skb->len;
2330
2331 t->tcm_handle = f->handle;
2332
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002333 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Jiri Pirkof5749082018-07-23 09:23:08 +02002334 if (!nest)
2335 goto nla_put_failure;
2336
Vlad Buslov3d81e712019-03-21 15:17:42 +02002337 spin_lock(&tp->lock);
2338
Jiri Pirkof5749082018-07-23 09:23:08 +02002339 if (f->res.classid &&
2340 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
Vlad Buslov3d81e712019-03-21 15:17:42 +02002341 goto nla_put_failure_locked;
Jiri Pirkof5749082018-07-23 09:23:08 +02002342
2343 key = &f->key;
2344 mask = &f->mask->key;
Vlad Buslov3d81e712019-03-21 15:17:42 +02002345 skip_hw = tc_skip_hw(f->flags);
Jiri Pirkof5749082018-07-23 09:23:08 +02002346
2347 if (fl_dump_key(skb, net, key, mask))
Vlad Buslov3d81e712019-03-21 15:17:42 +02002348 goto nla_put_failure_locked;
Jiri Pirkof5749082018-07-23 09:23:08 +02002349
Or Gerlitz749e6722017-02-16 10:31:10 +02002350 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
Vlad Buslov3d81e712019-03-21 15:17:42 +02002351 goto nla_put_failure_locked;
2352
2353 spin_unlock(&tp->lock);
2354
2355 if (!skip_hw)
Vlad Buslovc24e43d82019-03-21 15:17:43 +02002356 fl_hw_update_stats(tp, f, rtnl_held);
Amir Vadaie69985c2016-06-05 17:11:18 +03002357
Vlad Buslov86c55362018-09-07 17:22:21 +03002358 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
2359 goto nla_put_failure;
2360
Jiri Pirko77b99002015-05-12 14:56:21 +02002361 if (tcf_exts_dump(skb, &f->exts))
2362 goto nla_put_failure;
2363
2364 nla_nest_end(skb, nest);
2365
2366 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
2367 goto nla_put_failure;
2368
2369 return skb->len;
2370
Vlad Buslov3d81e712019-03-21 15:17:42 +02002371nla_put_failure_locked:
2372 spin_unlock(&tp->lock);
Jiri Pirko77b99002015-05-12 14:56:21 +02002373nla_put_failure:
2374 nla_nest_cancel(skb, nest);
2375 return -1;
2376}
2377
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002378static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
2379{
2380 struct fl_flow_tmplt *tmplt = tmplt_priv;
2381 struct fl_flow_key *key, *mask;
2382 struct nlattr *nest;
2383
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002384 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002385 if (!nest)
2386 goto nla_put_failure;
2387
2388 key = &tmplt->dummy_key;
2389 mask = &tmplt->mask;
2390
2391 if (fl_dump_key(skb, net, key, mask))
2392 goto nla_put_failure;
2393
2394 nla_nest_end(skb, nest);
2395
2396 return skb->len;
2397
2398nla_put_failure:
2399 nla_nest_cancel(skb, nest);
2400 return -EMSGSIZE;
2401}
2402
Cong Wang07d79fc2017-08-30 14:30:36 -07002403static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
2404{
2405 struct cls_fl_filter *f = fh;
2406
2407 if (f && f->res.classid == classid)
2408 f->res.class = cl;
2409}
2410
Jiri Pirko77b99002015-05-12 14:56:21 +02002411static struct tcf_proto_ops cls_fl_ops __read_mostly = {
2412 .kind = "flower",
2413 .classify = fl_classify,
2414 .init = fl_init,
2415 .destroy = fl_destroy,
2416 .get = fl_get,
Vlad Buslov06177552019-03-21 15:17:35 +02002417 .put = fl_put,
Jiri Pirko77b99002015-05-12 14:56:21 +02002418 .change = fl_change,
2419 .delete = fl_delete,
2420 .walk = fl_walk,
John Hurley31533cb2018-06-25 14:30:06 -07002421 .reoffload = fl_reoffload,
Jiri Pirko77b99002015-05-12 14:56:21 +02002422 .dump = fl_dump,
Cong Wang07d79fc2017-08-30 14:30:36 -07002423 .bind_class = fl_bind_class,
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002424 .tmplt_create = fl_tmplt_create,
2425 .tmplt_destroy = fl_tmplt_destroy,
2426 .tmplt_dump = fl_tmplt_dump,
Jiri Pirko77b99002015-05-12 14:56:21 +02002427 .owner = THIS_MODULE,
Vlad Buslov92149192019-03-21 15:17:44 +02002428 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
Jiri Pirko77b99002015-05-12 14:56:21 +02002429};
2430
2431static int __init cls_fl_init(void)
2432{
2433 return register_tcf_proto_ops(&cls_fl_ops);
2434}
2435
2436static void __exit cls_fl_exit(void)
2437{
2438 unregister_tcf_proto_ops(&cls_fl_ops);
2439}
2440
2441module_init(cls_fl_init);
2442module_exit(cls_fl_exit);
2443
2444MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2445MODULE_DESCRIPTION("Flower classifier");
2446MODULE_LICENSE("GPL v2");