blob: 79aa5049f028de8517e8478c9cd6b18df50f66b5 [file] [log] [blame]
Jiri Pirko77b99002015-05-12 14:56:21 +02001/*
2 * net/sched/cls_flower.c Flower classifier
3 *
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/rhashtable.h>
Daniel Borkmannd9363772016-11-27 01:18:01 +010016#include <linux/workqueue.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020017
18#include <linux/if_ether.h>
19#include <linux/in6.h>
20#include <linux/ip.h>
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040021#include <linux/mpls.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020022
23#include <net/sch_generic.h>
24#include <net/pkt_cls.h>
25#include <net/ip.h>
26#include <net/flow_dissector.h>
27
Amir Vadaibc3103f2016-09-08 16:23:47 +030028#include <net/dst.h>
29#include <net/dst_metadata.h>
30
Jiri Pirko77b99002015-05-12 14:56:21 +020031struct fl_flow_key {
32 int indev_ifindex;
Tom Herbert42aecaa2015-06-04 09:16:39 -070033 struct flow_dissector_key_control control;
Amir Vadaibc3103f2016-09-08 16:23:47 +030034 struct flow_dissector_key_control enc_control;
Jiri Pirko77b99002015-05-12 14:56:21 +020035 struct flow_dissector_key_basic basic;
36 struct flow_dissector_key_eth_addrs eth;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +030037 struct flow_dissector_key_vlan vlan;
Jiri Pirko77b99002015-05-12 14:56:21 +020038 union {
Tom Herbertc3f83242015-06-04 09:16:40 -070039 struct flow_dissector_key_ipv4_addrs ipv4;
Jiri Pirko77b99002015-05-12 14:56:21 +020040 struct flow_dissector_key_ipv6_addrs ipv6;
41 };
42 struct flow_dissector_key_ports tp;
Simon Horman7b684882016-12-07 13:48:28 +010043 struct flow_dissector_key_icmp icmp;
Simon Horman99d31322017-01-11 14:05:43 +010044 struct flow_dissector_key_arp arp;
Amir Vadaibc3103f2016-09-08 16:23:47 +030045 struct flow_dissector_key_keyid enc_key_id;
46 union {
47 struct flow_dissector_key_ipv4_addrs enc_ipv4;
48 struct flow_dissector_key_ipv6_addrs enc_ipv6;
49 };
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +020050 struct flow_dissector_key_ports enc_tp;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040051 struct flow_dissector_key_mpls mpls;
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +020052 struct flow_dissector_key_tcp tcp;
Or Gerlitz4d80cc02017-06-01 21:37:38 +030053 struct flow_dissector_key_ip ip;
Jiri Pirko77b99002015-05-12 14:56:21 +020054} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
55
56struct fl_flow_mask_range {
57 unsigned short int start;
58 unsigned short int end;
59};
60
61struct fl_flow_mask {
62 struct fl_flow_key key;
63 struct fl_flow_mask_range range;
64 struct rcu_head rcu;
65};
66
67struct cls_fl_head {
68 struct rhashtable ht;
69 struct fl_flow_mask mask;
70 struct flow_dissector dissector;
Jiri Pirko77b99002015-05-12 14:56:21 +020071 bool mask_assigned;
72 struct list_head filters;
73 struct rhashtable_params ht_params;
Daniel Borkmannd9363772016-11-27 01:18:01 +010074 union {
75 struct work_struct work;
76 struct rcu_head rcu;
77 };
Chris Mic15ab232017-08-30 02:31:58 -040078 struct idr handle_idr;
Jiri Pirko77b99002015-05-12 14:56:21 +020079};
80
81struct cls_fl_filter {
82 struct rhash_head ht_node;
83 struct fl_flow_key mkey;
84 struct tcf_exts exts;
85 struct tcf_result res;
86 struct fl_flow_key key;
87 struct list_head list;
88 u32 handle;
Amir Vadaie69985c2016-06-05 17:11:18 +030089 u32 flags;
Cong Wang0552c8a2017-10-26 18:24:33 -070090 union {
91 struct work_struct work;
92 struct rcu_head rcu;
93 };
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +020094 struct net_device *hw_dev;
Jiri Pirko77b99002015-05-12 14:56:21 +020095};
96
97static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
98{
99 return mask->range.end - mask->range.start;
100}
101
102static void fl_mask_update_range(struct fl_flow_mask *mask)
103{
104 const u8 *bytes = (const u8 *) &mask->key;
105 size_t size = sizeof(mask->key);
106 size_t i, first = 0, last = size - 1;
107
108 for (i = 0; i < sizeof(mask->key); i++) {
109 if (bytes[i]) {
110 if (!first && i)
111 first = i;
112 last = i;
113 }
114 }
115 mask->range.start = rounddown(first, sizeof(long));
116 mask->range.end = roundup(last + 1, sizeof(long));
117}
118
119static void *fl_key_get_start(struct fl_flow_key *key,
120 const struct fl_flow_mask *mask)
121{
122 return (u8 *) key + mask->range.start;
123}
124
125static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
126 struct fl_flow_mask *mask)
127{
128 const long *lkey = fl_key_get_start(key, mask);
129 const long *lmask = fl_key_get_start(&mask->key, mask);
130 long *lmkey = fl_key_get_start(mkey, mask);
131 int i;
132
133 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
134 *lmkey++ = *lkey++ & *lmask++;
135}
136
137static void fl_clear_masked_range(struct fl_flow_key *key,
138 struct fl_flow_mask *mask)
139{
140 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
141}
142
Paul Blakeya3308d82017-01-16 10:45:13 +0200143static struct cls_fl_filter *fl_lookup(struct cls_fl_head *head,
144 struct fl_flow_key *mkey)
145{
146 return rhashtable_lookup_fast(&head->ht,
147 fl_key_get_start(mkey, &head->mask),
148 head->ht_params);
149}
150
Jiri Pirko77b99002015-05-12 14:56:21 +0200151static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
152 struct tcf_result *res)
153{
154 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
155 struct cls_fl_filter *f;
156 struct fl_flow_key skb_key;
157 struct fl_flow_key skb_mkey;
158
Amir Vadaie69985c2016-06-05 17:11:18 +0300159 if (!atomic_read(&head->ht.nelems))
160 return -1;
161
Jiri Pirko77b99002015-05-12 14:56:21 +0200162 fl_clear_masked_range(&skb_key, &head->mask);
Amir Vadaibc3103f2016-09-08 16:23:47 +0300163
Jiri Pirko77b99002015-05-12 14:56:21 +0200164 skb_key.indev_ifindex = skb->skb_iif;
165 /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
166 * so do it rather here.
167 */
168 skb_key.basic.n_proto = skb->protocol;
Simon Horman62b32372017-12-04 11:31:48 +0100169 skb_flow_dissect_tunnel_info(skb, &head->dissector, &skb_key);
Tom Herbertcd79a232015-09-01 09:24:27 -0700170 skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
Jiri Pirko77b99002015-05-12 14:56:21 +0200171
172 fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
173
Paul Blakeya3308d82017-01-16 10:45:13 +0200174 f = fl_lookup(head, &skb_mkey);
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300175 if (f && !tc_skip_sw(f->flags)) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200176 *res = f->res;
177 return tcf_exts_exec(skb, &f->exts, res);
178 }
179 return -1;
180}
181
182static int fl_init(struct tcf_proto *tp)
183{
184 struct cls_fl_head *head;
185
186 head = kzalloc(sizeof(*head), GFP_KERNEL);
187 if (!head)
188 return -ENOBUFS;
189
190 INIT_LIST_HEAD_RCU(&head->filters);
191 rcu_assign_pointer(tp->root, head);
Chris Mic15ab232017-08-30 02:31:58 -0400192 idr_init(&head->handle_idr);
Jiri Pirko77b99002015-05-12 14:56:21 +0200193
194 return 0;
195}
196
Cong Wang0dadc112017-11-06 13:47:24 -0800197static void __fl_destroy_filter(struct cls_fl_filter *f)
198{
199 tcf_exts_destroy(&f->exts);
200 tcf_exts_put_net(&f->exts);
201 kfree(f);
202}
203
Cong Wang0552c8a2017-10-26 18:24:33 -0700204static void fl_destroy_filter_work(struct work_struct *work)
205{
206 struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work);
207
208 rtnl_lock();
Cong Wang0dadc112017-11-06 13:47:24 -0800209 __fl_destroy_filter(f);
Cong Wang0552c8a2017-10-26 18:24:33 -0700210 rtnl_unlock();
211}
212
Jiri Pirko77b99002015-05-12 14:56:21 +0200213static void fl_destroy_filter(struct rcu_head *head)
214{
215 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
216
Cong Wang0552c8a2017-10-26 18:24:33 -0700217 INIT_WORK(&f->work, fl_destroy_filter_work);
218 tcf_queue_work(&f->work);
Jiri Pirko77b99002015-05-12 14:56:21 +0200219}
220
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200221static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
Amir Vadai5b33f482016-03-08 12:42:29 +0200222{
Jiri Pirkode4784c2017-08-07 10:15:32 +0200223 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200224 struct tcf_block *block = tp->chain->block;
Amir Vadai5b33f482016-03-08 12:42:29 +0200225
Jakub Kicinskiea205942018-01-24 12:54:20 -0800226 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200227 cls_flower.command = TC_CLSFLOWER_DESTROY;
228 cls_flower.cookie = (unsigned long) f;
Amir Vadai5b33f482016-03-08 12:42:29 +0200229
Jiri Pirko208c0f42017-10-19 15:50:32 +0200230 tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
Jiri Pirko717503b2017-10-11 09:41:09 +0200231 &cls_flower, false);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100232 tcf_block_offload_dec(block, &f->flags);
Amir Vadai5b33f482016-03-08 12:42:29 +0200233}
234
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300235static int fl_hw_replace_filter(struct tcf_proto *tp,
236 struct flow_dissector *dissector,
237 struct fl_flow_key *mask,
Quentin Monnet41002032018-01-19 17:44:43 -0800238 struct cls_fl_filter *f,
239 struct netlink_ext_ack *extack)
Amir Vadai5b33f482016-03-08 12:42:29 +0200240{
Jiri Pirkode4784c2017-08-07 10:15:32 +0200241 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200242 struct tcf_block *block = tp->chain->block;
Jiri Pirko717503b2017-10-11 09:41:09 +0200243 bool skip_sw = tc_skip_sw(f->flags);
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300244 int err;
Amir Vadai5b33f482016-03-08 12:42:29 +0200245
Jakub Kicinskiea205942018-01-24 12:54:20 -0800246 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200247 cls_flower.command = TC_CLSFLOWER_REPLACE;
248 cls_flower.cookie = (unsigned long) f;
249 cls_flower.dissector = dissector;
250 cls_flower.mask = mask;
251 cls_flower.key = &f->mkey;
252 cls_flower.exts = &f->exts;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700253 cls_flower.classid = f->res.classid;
Amir Vadai5b33f482016-03-08 12:42:29 +0200254
Jiri Pirko208c0f42017-10-19 15:50:32 +0200255 err = tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
Jiri Pirko717503b2017-10-11 09:41:09 +0200256 &cls_flower, skip_sw);
257 if (err < 0) {
258 fl_hw_destroy_filter(tp, f);
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300259 return err;
Jiri Pirko717503b2017-10-11 09:41:09 +0200260 } else if (err > 0) {
Jiri Pirkocaa72602018-01-17 11:46:50 +0100261 tcf_block_offload_inc(block, &f->flags);
Jiri Pirko717503b2017-10-11 09:41:09 +0200262 }
263
264 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
265 return -EINVAL;
266
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300267 return 0;
Amir Vadai5b33f482016-03-08 12:42:29 +0200268}
269
Amir Vadai10cbc682016-05-13 12:55:37 +0000270static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
271{
Jiri Pirkode4784c2017-08-07 10:15:32 +0200272 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200273 struct tcf_block *block = tp->chain->block;
Amir Vadai10cbc682016-05-13 12:55:37 +0000274
Jakub Kicinskiea205942018-01-24 12:54:20 -0800275 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200276 cls_flower.command = TC_CLSFLOWER_STATS;
277 cls_flower.cookie = (unsigned long) f;
278 cls_flower.exts = &f->exts;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700279 cls_flower.classid = f->res.classid;
Amir Vadai10cbc682016-05-13 12:55:37 +0000280
Jiri Pirko208c0f42017-10-19 15:50:32 +0200281 tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
Jiri Pirko717503b2017-10-11 09:41:09 +0200282 &cls_flower, false);
Amir Vadai10cbc682016-05-13 12:55:37 +0000283}
284
Roi Dayan13fa8762016-11-01 16:08:29 +0200285static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
286{
Chris Mic15ab232017-08-30 02:31:58 -0400287 struct cls_fl_head *head = rtnl_dereference(tp->root);
288
289 idr_remove_ext(&head->handle_idr, f->handle);
Roi Dayan13fa8762016-11-01 16:08:29 +0200290 list_del_rcu(&f->list);
Hadar Hen Zion79685212016-12-01 14:06:34 +0200291 if (!tc_skip_hw(f->flags))
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200292 fl_hw_destroy_filter(tp, f);
Roi Dayan13fa8762016-11-01 16:08:29 +0200293 tcf_unbind_filter(tp, &f->res);
Cong Wang0dadc112017-11-06 13:47:24 -0800294 if (tcf_exts_get_net(&f->exts))
295 call_rcu(&f->rcu, fl_destroy_filter);
296 else
297 __fl_destroy_filter(f);
Roi Dayan13fa8762016-11-01 16:08:29 +0200298}
299
Daniel Borkmannd9363772016-11-27 01:18:01 +0100300static void fl_destroy_sleepable(struct work_struct *work)
301{
302 struct cls_fl_head *head = container_of(work, struct cls_fl_head,
303 work);
304 if (head->mask_assigned)
305 rhashtable_destroy(&head->ht);
306 kfree(head);
307 module_put(THIS_MODULE);
308}
309
310static void fl_destroy_rcu(struct rcu_head *rcu)
311{
312 struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
313
314 INIT_WORK(&head->work, fl_destroy_sleepable);
315 schedule_work(&head->work);
316}
317
Jakub Kicinski715df5e2018-01-24 12:54:13 -0800318static void fl_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +0200319{
320 struct cls_fl_head *head = rtnl_dereference(tp->root);
321 struct cls_fl_filter *f, *next;
322
Roi Dayan13fa8762016-11-01 16:08:29 +0200323 list_for_each_entry_safe(f, next, &head->filters, list)
324 __fl_delete(tp, f);
Chris Mic15ab232017-08-30 02:31:58 -0400325 idr_destroy(&head->handle_idr);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100326
327 __module_get(THIS_MODULE);
328 call_rcu(&head->rcu, fl_destroy_rcu);
Jiri Pirko77b99002015-05-12 14:56:21 +0200329}
330
WANG Cong8113c092017-08-04 21:31:43 -0700331static void *fl_get(struct tcf_proto *tp, u32 handle)
Jiri Pirko77b99002015-05-12 14:56:21 +0200332{
333 struct cls_fl_head *head = rtnl_dereference(tp->root);
Jiri Pirko77b99002015-05-12 14:56:21 +0200334
Chris Mic15ab232017-08-30 02:31:58 -0400335 return idr_find_ext(&head->handle_idr, handle);
Jiri Pirko77b99002015-05-12 14:56:21 +0200336}
337
338static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
339 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
340 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
341 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
342 .len = IFNAMSIZ },
343 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
344 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
345 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
346 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
347 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
348 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
349 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
350 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
351 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
352 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
353 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
354 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
355 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
356 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
357 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
358 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
Jamal Hadi Salimb175c3a2015-06-25 06:55:27 -0400359 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
360 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300361 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
362 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
363 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
Amir Vadaibc3103f2016-09-08 16:23:47 +0300364 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
365 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
366 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
367 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
368 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
369 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
370 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
371 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
372 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
Or Gerlitzaa72d702016-09-15 15:28:22 +0300373 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
374 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
375 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
376 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
Simon Horman5976c5f2016-11-03 13:24:21 +0100377 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
378 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
379 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
380 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200381 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
382 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
383 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
384 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200385 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
386 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
Simon Horman7b684882016-12-07 13:48:28 +0100387 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
388 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
389 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
390 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
391 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
392 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
393 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
394 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
Simon Horman99d31322017-01-11 14:05:43 +0100395 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
396 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
397 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
398 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
399 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
400 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
401 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
402 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
403 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
404 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400405 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
406 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
407 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
408 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +0200409 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
410 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300411 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
412 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
413 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
414 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
Jiri Pirko77b99002015-05-12 14:56:21 +0200415};
416
417static void fl_set_key_val(struct nlattr **tb,
418 void *val, int val_type,
419 void *mask, int mask_type, int len)
420{
421 if (!tb[val_type])
422 return;
423 memcpy(val, nla_data(tb[val_type]), len);
424 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
425 memset(mask, 0xff, len);
426 else
427 memcpy(mask, nla_data(tb[mask_type]), len);
428}
429
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400430static int fl_set_key_mpls(struct nlattr **tb,
431 struct flow_dissector_key_mpls *key_val,
432 struct flow_dissector_key_mpls *key_mask)
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400433{
434 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
435 key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
436 key_mask->mpls_ttl = MPLS_TTL_MASK;
437 }
438 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400439 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
440
441 if (bos & ~MPLS_BOS_MASK)
442 return -EINVAL;
443 key_val->mpls_bos = bos;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400444 key_mask->mpls_bos = MPLS_BOS_MASK;
445 }
446 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400447 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
448
449 if (tc & ~MPLS_TC_MASK)
450 return -EINVAL;
451 key_val->mpls_tc = tc;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400452 key_mask->mpls_tc = MPLS_TC_MASK;
453 }
454 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400455 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
456
457 if (label & ~MPLS_LABEL_MASK)
458 return -EINVAL;
459 key_val->mpls_label = label;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400460 key_mask->mpls_label = MPLS_LABEL_MASK;
461 }
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400462 return 0;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400463}
464
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300465static void fl_set_key_vlan(struct nlattr **tb,
466 struct flow_dissector_key_vlan *key_val,
467 struct flow_dissector_key_vlan *key_mask)
468{
469#define VLAN_PRIORITY_MASK 0x7
470
471 if (tb[TCA_FLOWER_KEY_VLAN_ID]) {
472 key_val->vlan_id =
473 nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK;
474 key_mask->vlan_id = VLAN_VID_MASK;
475 }
476 if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) {
477 key_val->vlan_priority =
478 nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) &
479 VLAN_PRIORITY_MASK;
480 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
481 }
482}
483
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200484static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
485 u32 *dissector_key, u32 *dissector_mask,
486 u32 flower_flag_bit, u32 dissector_flag_bit)
487{
488 if (flower_mask & flower_flag_bit) {
489 *dissector_mask |= dissector_flag_bit;
490 if (flower_key & flower_flag_bit)
491 *dissector_key |= dissector_flag_bit;
492 }
493}
494
Or Gerlitzd9724772016-12-22 14:28:15 +0200495static int fl_set_key_flags(struct nlattr **tb,
496 u32 *flags_key, u32 *flags_mask)
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200497{
498 u32 key, mask;
499
Or Gerlitzd9724772016-12-22 14:28:15 +0200500 /* mask is mandatory for flags */
501 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
502 return -EINVAL;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200503
504 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
Or Gerlitzd9724772016-12-22 14:28:15 +0200505 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200506
507 *flags_key = 0;
508 *flags_mask = 0;
509
510 fl_set_key_flag(key, mask, flags_key, flags_mask,
511 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Or Gerlitzd9724772016-12-22 14:28:15 +0200512
513 return 0;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200514}
515
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300516static void fl_set_key_ip(struct nlattr **tb,
517 struct flow_dissector_key_ip *key,
518 struct flow_dissector_key_ip *mask)
519{
520 fl_set_key_val(tb, &key->tos, TCA_FLOWER_KEY_IP_TOS,
521 &mask->tos, TCA_FLOWER_KEY_IP_TOS_MASK,
522 sizeof(key->tos));
523
524 fl_set_key_val(tb, &key->ttl, TCA_FLOWER_KEY_IP_TTL,
525 &mask->ttl, TCA_FLOWER_KEY_IP_TTL_MASK,
526 sizeof(key->ttl));
527}
528
Jiri Pirko77b99002015-05-12 14:56:21 +0200529static int fl_set_key(struct net *net, struct nlattr **tb,
Alexander Aring1057c552018-01-18 11:20:54 -0500530 struct fl_flow_key *key, struct fl_flow_key *mask,
531 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +0200532{
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300533 __be16 ethertype;
Or Gerlitzd9724772016-12-22 14:28:15 +0200534 int ret = 0;
Brian Haleydd3aa3b2015-05-14 13:20:15 -0400535#ifdef CONFIG_NET_CLS_IND
Jiri Pirko77b99002015-05-12 14:56:21 +0200536 if (tb[TCA_FLOWER_INDEV]) {
Alexander Aring1057c552018-01-18 11:20:54 -0500537 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
Jiri Pirko77b99002015-05-12 14:56:21 +0200538 if (err < 0)
539 return err;
540 key->indev_ifindex = err;
541 mask->indev_ifindex = 0xffffffff;
542 }
Brian Haleydd3aa3b2015-05-14 13:20:15 -0400543#endif
Jiri Pirko77b99002015-05-12 14:56:21 +0200544
545 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
546 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
547 sizeof(key->eth.dst));
548 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
549 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
550 sizeof(key->eth.src));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500551
Arnd Bergmann0b498a52016-08-26 17:25:45 +0200552 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300553 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
554
Arnd Bergmann0b498a52016-08-26 17:25:45 +0200555 if (ethertype == htons(ETH_P_8021Q)) {
556 fl_set_key_vlan(tb, &key->vlan, &mask->vlan);
557 fl_set_key_val(tb, &key->basic.n_proto,
558 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
559 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
560 sizeof(key->basic.n_proto));
561 } else {
562 key->basic.n_proto = ethertype;
563 mask->basic.n_proto = cpu_to_be16(~0);
564 }
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300565 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500566
Jiri Pirko77b99002015-05-12 14:56:21 +0200567 if (key->basic.n_proto == htons(ETH_P_IP) ||
568 key->basic.n_proto == htons(ETH_P_IPV6)) {
569 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
570 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
571 sizeof(key->basic.ip_proto));
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300572 fl_set_key_ip(tb, &key->ip, &mask->ip);
Jiri Pirko77b99002015-05-12 14:56:21 +0200573 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500574
575 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
576 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +0200577 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +0200578 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
579 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
580 sizeof(key->ipv4.src));
581 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
582 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
583 sizeof(key->ipv4.dst));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500584 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
585 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +0200586 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +0200587 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
588 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
589 sizeof(key->ipv6.src));
590 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
591 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
592 sizeof(key->ipv6.dst));
593 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500594
Jiri Pirko77b99002015-05-12 14:56:21 +0200595 if (key->basic.ip_proto == IPPROTO_TCP) {
596 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300597 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200598 sizeof(key->tp.src));
599 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300600 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200601 sizeof(key->tp.dst));
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +0200602 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
603 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
604 sizeof(key->tcp.flags));
Jiri Pirko77b99002015-05-12 14:56:21 +0200605 } else if (key->basic.ip_proto == IPPROTO_UDP) {
606 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300607 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200608 sizeof(key->tp.src));
609 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300610 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200611 sizeof(key->tp.dst));
Simon Horman5976c5f2016-11-03 13:24:21 +0100612 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
613 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
614 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
615 sizeof(key->tp.src));
616 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
617 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
618 sizeof(key->tp.dst));
Simon Horman7b684882016-12-07 13:48:28 +0100619 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
620 key->basic.ip_proto == IPPROTO_ICMP) {
621 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
622 &mask->icmp.type,
623 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
624 sizeof(key->icmp.type));
625 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
626 &mask->icmp.code,
627 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
628 sizeof(key->icmp.code));
629 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
630 key->basic.ip_proto == IPPROTO_ICMPV6) {
631 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
632 &mask->icmp.type,
633 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
634 sizeof(key->icmp.type));
Simon Horman040587a2017-01-30 16:19:02 +0100635 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
Simon Horman7b684882016-12-07 13:48:28 +0100636 &mask->icmp.code,
Simon Horman040587a2017-01-30 16:19:02 +0100637 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
Simon Horman7b684882016-12-07 13:48:28 +0100638 sizeof(key->icmp.code));
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400639 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
640 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400641 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
642 if (ret)
643 return ret;
Simon Horman99d31322017-01-11 14:05:43 +0100644 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
645 key->basic.n_proto == htons(ETH_P_RARP)) {
646 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
647 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
648 sizeof(key->arp.sip));
649 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
650 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
651 sizeof(key->arp.tip));
652 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
653 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
654 sizeof(key->arp.op));
655 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
656 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
657 sizeof(key->arp.sha));
658 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
659 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
660 sizeof(key->arp.tha));
Jiri Pirko77b99002015-05-12 14:56:21 +0200661 }
662
Amir Vadaibc3103f2016-09-08 16:23:47 +0300663 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
664 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
665 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +0200666 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +0300667 fl_set_key_val(tb, &key->enc_ipv4.src,
668 TCA_FLOWER_KEY_ENC_IPV4_SRC,
669 &mask->enc_ipv4.src,
670 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
671 sizeof(key->enc_ipv4.src));
672 fl_set_key_val(tb, &key->enc_ipv4.dst,
673 TCA_FLOWER_KEY_ENC_IPV4_DST,
674 &mask->enc_ipv4.dst,
675 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
676 sizeof(key->enc_ipv4.dst));
677 }
678
679 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
680 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
681 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +0200682 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +0300683 fl_set_key_val(tb, &key->enc_ipv6.src,
684 TCA_FLOWER_KEY_ENC_IPV6_SRC,
685 &mask->enc_ipv6.src,
686 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
687 sizeof(key->enc_ipv6.src));
688 fl_set_key_val(tb, &key->enc_ipv6.dst,
689 TCA_FLOWER_KEY_ENC_IPV6_DST,
690 &mask->enc_ipv6.dst,
691 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
692 sizeof(key->enc_ipv6.dst));
693 }
694
695 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +0300696 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
Amir Vadaibc3103f2016-09-08 16:23:47 +0300697 sizeof(key->enc_key_id.keyid));
698
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200699 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
700 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
701 sizeof(key->enc_tp.src));
702
703 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
704 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
705 sizeof(key->enc_tp.dst));
706
Or Gerlitzd9724772016-12-22 14:28:15 +0200707 if (tb[TCA_FLOWER_KEY_FLAGS])
708 ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200709
Or Gerlitzd9724772016-12-22 14:28:15 +0200710 return ret;
Jiri Pirko77b99002015-05-12 14:56:21 +0200711}
712
713static bool fl_mask_eq(struct fl_flow_mask *mask1,
714 struct fl_flow_mask *mask2)
715{
716 const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
717 const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
718
719 return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
720 !memcmp(lmask1, lmask2, fl_mask_range(mask1));
721}
722
723static const struct rhashtable_params fl_ht_params = {
724 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
725 .head_offset = offsetof(struct cls_fl_filter, ht_node),
726 .automatic_shrinking = true,
727};
728
729static int fl_init_hashtable(struct cls_fl_head *head,
730 struct fl_flow_mask *mask)
731{
732 head->ht_params = fl_ht_params;
733 head->ht_params.key_len = fl_mask_range(mask);
734 head->ht_params.key_offset += mask->range.start;
735
736 return rhashtable_init(&head->ht, &head->ht_params);
737}
738
739#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
740#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
Jiri Pirko77b99002015-05-12 14:56:21 +0200741
Hadar Hen Zion339ba872016-08-17 13:36:12 +0300742#define FL_KEY_IS_MASKED(mask, member) \
743 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
744 0, FL_KEY_MEMBER_SIZE(member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +0200745
746#define FL_KEY_SET(keys, cnt, id, member) \
747 do { \
748 keys[cnt].key_id = id; \
749 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
750 cnt++; \
751 } while(0);
752
Hadar Hen Zion339ba872016-08-17 13:36:12 +0300753#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
Jiri Pirko77b99002015-05-12 14:56:21 +0200754 do { \
Hadar Hen Zion339ba872016-08-17 13:36:12 +0300755 if (FL_KEY_IS_MASKED(mask, member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +0200756 FL_KEY_SET(keys, cnt, id, member); \
757 } while(0);
758
759static void fl_init_dissector(struct cls_fl_head *head,
760 struct fl_flow_mask *mask)
761{
762 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
763 size_t cnt = 0;
764
Tom Herbert42aecaa2015-06-04 09:16:39 -0700765 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
Jiri Pirko77b99002015-05-12 14:56:21 +0200766 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
Hadar Hen Zion339ba872016-08-17 13:36:12 +0300767 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
768 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
769 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
770 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
771 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
772 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
773 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
774 FLOW_DISSECTOR_KEY_PORTS, tp);
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300775 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300776 FLOW_DISSECTOR_KEY_IP, ip);
777 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +0200778 FLOW_DISSECTOR_KEY_TCP, tcp);
779 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
Simon Horman7b684882016-12-07 13:48:28 +0100780 FLOW_DISSECTOR_KEY_ICMP, icmp);
781 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
Simon Horman99d31322017-01-11 14:05:43 +0100782 FLOW_DISSECTOR_KEY_ARP, arp);
783 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400784 FLOW_DISSECTOR_KEY_MPLS, mpls);
785 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300786 FLOW_DISSECTOR_KEY_VLAN, vlan);
Hadar Hen Zion519d1052016-11-07 15:14:38 +0200787 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
788 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
789 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
790 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
791 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
792 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
793 if (FL_KEY_IS_MASKED(&mask->key, enc_ipv4) ||
794 FL_KEY_IS_MASKED(&mask->key, enc_ipv6))
795 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
796 enc_control);
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200797 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
798 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
Jiri Pirko77b99002015-05-12 14:56:21 +0200799
800 skb_flow_dissector_init(&head->dissector, keys, cnt);
801}
802
803static int fl_check_assign_mask(struct cls_fl_head *head,
804 struct fl_flow_mask *mask)
805{
806 int err;
807
808 if (head->mask_assigned) {
809 if (!fl_mask_eq(&head->mask, mask))
810 return -EINVAL;
811 else
812 return 0;
813 }
814
815 /* Mask is not assigned yet. So assign it and init hashtable
816 * according to that.
817 */
818 err = fl_init_hashtable(head, mask);
819 if (err)
820 return err;
821 memcpy(&head->mask, mask, sizeof(head->mask));
822 head->mask_assigned = true;
823
824 fl_init_dissector(head, mask);
825
826 return 0;
827}
828
829static int fl_set_parms(struct net *net, struct tcf_proto *tp,
830 struct cls_fl_filter *f, struct fl_flow_mask *mask,
831 unsigned long base, struct nlattr **tb,
Alexander Aring50a56192018-01-18 11:20:52 -0500832 struct nlattr *est, bool ovr,
833 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +0200834{
Jiri Pirko77b99002015-05-12 14:56:21 +0200835 int err;
836
Alexander Aring50a56192018-01-18 11:20:52 -0500837 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, extack);
Jiri Pirko77b99002015-05-12 14:56:21 +0200838 if (err < 0)
839 return err;
840
841 if (tb[TCA_FLOWER_CLASSID]) {
842 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
843 tcf_bind_filter(tp, &f->res, base);
844 }
845
Alexander Aring1057c552018-01-18 11:20:54 -0500846 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
Jiri Pirko77b99002015-05-12 14:56:21 +0200847 if (err)
Jiri Pirko45507522017-08-04 14:29:06 +0200848 return err;
Jiri Pirko77b99002015-05-12 14:56:21 +0200849
850 fl_mask_update_range(mask);
851 fl_set_masked_key(&f->mkey, &f->key, mask);
852
Jiri Pirko77b99002015-05-12 14:56:21 +0200853 return 0;
Jiri Pirko77b99002015-05-12 14:56:21 +0200854}
855
Jiri Pirko77b99002015-05-12 14:56:21 +0200856static int fl_change(struct net *net, struct sk_buff *in_skb,
857 struct tcf_proto *tp, unsigned long base,
858 u32 handle, struct nlattr **tca,
Alexander Aring7306db32018-01-18 11:20:51 -0500859 void **arg, bool ovr, struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +0200860{
861 struct cls_fl_head *head = rtnl_dereference(tp->root);
WANG Cong8113c092017-08-04 21:31:43 -0700862 struct cls_fl_filter *fold = *arg;
Jiri Pirko77b99002015-05-12 14:56:21 +0200863 struct cls_fl_filter *fnew;
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +0100864 struct nlattr **tb;
Jiri Pirko77b99002015-05-12 14:56:21 +0200865 struct fl_flow_mask mask = {};
Chris Mic15ab232017-08-30 02:31:58 -0400866 unsigned long idr_index;
Jiri Pirko77b99002015-05-12 14:56:21 +0200867 int err;
868
869 if (!tca[TCA_OPTIONS])
870 return -EINVAL;
871
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +0100872 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
873 if (!tb)
874 return -ENOBUFS;
875
Johannes Bergfceb6432017-04-12 14:34:07 +0200876 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
877 fl_policy, NULL);
Jiri Pirko77b99002015-05-12 14:56:21 +0200878 if (err < 0)
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +0100879 goto errout_tb;
Jiri Pirko77b99002015-05-12 14:56:21 +0200880
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +0100881 if (fold && handle && fold->handle != handle) {
882 err = -EINVAL;
883 goto errout_tb;
884 }
Jiri Pirko77b99002015-05-12 14:56:21 +0200885
886 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +0100887 if (!fnew) {
888 err = -ENOBUFS;
889 goto errout_tb;
890 }
Jiri Pirko77b99002015-05-12 14:56:21 +0200891
WANG Congb9a24bb2016-08-19 12:36:54 -0700892 err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
893 if (err < 0)
894 goto errout;
Jiri Pirko77b99002015-05-12 14:56:21 +0200895
896 if (!handle) {
Chris Mic15ab232017-08-30 02:31:58 -0400897 err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index,
898 1, 0x80000000, GFP_KERNEL);
899 if (err)
Jiri Pirko77b99002015-05-12 14:56:21 +0200900 goto errout;
Chris Mic15ab232017-08-30 02:31:58 -0400901 fnew->handle = idr_index;
Jiri Pirko77b99002015-05-12 14:56:21 +0200902 }
Chris Mic15ab232017-08-30 02:31:58 -0400903
904 /* user specifies a handle and it doesn't exist */
905 if (handle && !fold) {
906 err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index,
907 handle, handle + 1, GFP_KERNEL);
908 if (err)
909 goto errout;
910 fnew->handle = idr_index;
911 }
Jiri Pirko77b99002015-05-12 14:56:21 +0200912
Amir Vadaie69985c2016-06-05 17:11:18 +0300913 if (tb[TCA_FLOWER_FLAGS]) {
914 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
915
916 if (!tc_flags_valid(fnew->flags)) {
917 err = -EINVAL;
Cong Wangfe2502e2017-09-20 09:18:45 -0700918 goto errout_idr;
Amir Vadaie69985c2016-06-05 17:11:18 +0300919 }
920 }
Amir Vadai5b33f482016-03-08 12:42:29 +0200921
Alexander Aring50a56192018-01-18 11:20:52 -0500922 err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
923 extack);
Jiri Pirko77b99002015-05-12 14:56:21 +0200924 if (err)
Cong Wangfe2502e2017-09-20 09:18:45 -0700925 goto errout_idr;
Jiri Pirko77b99002015-05-12 14:56:21 +0200926
927 err = fl_check_assign_mask(head, &mask);
928 if (err)
Cong Wangfe2502e2017-09-20 09:18:45 -0700929 goto errout_idr;
Jiri Pirko77b99002015-05-12 14:56:21 +0200930
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300931 if (!tc_skip_sw(fnew->flags)) {
Paul Blakeya3308d82017-01-16 10:45:13 +0200932 if (!fold && fl_lookup(head, &fnew->mkey)) {
933 err = -EEXIST;
Cong Wangfe2502e2017-09-20 09:18:45 -0700934 goto errout_idr;
Paul Blakeya3308d82017-01-16 10:45:13 +0200935 }
936
Amir Vadaie69985c2016-06-05 17:11:18 +0300937 err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
938 head->ht_params);
939 if (err)
Cong Wangfe2502e2017-09-20 09:18:45 -0700940 goto errout_idr;
Amir Vadaie69985c2016-06-05 17:11:18 +0300941 }
Amir Vadai5b33f482016-03-08 12:42:29 +0200942
Hadar Hen Zion79685212016-12-01 14:06:34 +0200943 if (!tc_skip_hw(fnew->flags)) {
944 err = fl_hw_replace_filter(tp,
945 &head->dissector,
946 &mask.key,
Quentin Monnet41002032018-01-19 17:44:43 -0800947 fnew,
948 extack);
Hadar Hen Zion79685212016-12-01 14:06:34 +0200949 if (err)
Cong Wangfe2502e2017-09-20 09:18:45 -0700950 goto errout_idr;
Hadar Hen Zion79685212016-12-01 14:06:34 +0200951 }
Amir Vadai5b33f482016-03-08 12:42:29 +0200952
Or Gerlitz55593962017-02-16 10:31:13 +0200953 if (!tc_in_hw(fnew->flags))
954 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
955
Amir Vadai5b33f482016-03-08 12:42:29 +0200956 if (fold) {
Jiri Pirko725cbb622016-11-28 15:40:13 +0100957 if (!tc_skip_sw(fold->flags))
958 rhashtable_remove_fast(&head->ht, &fold->ht_node,
959 head->ht_params);
Hadar Hen Zion79685212016-12-01 14:06:34 +0200960 if (!tc_skip_hw(fold->flags))
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200961 fl_hw_destroy_filter(tp, fold);
Amir Vadai5b33f482016-03-08 12:42:29 +0200962 }
Jiri Pirko77b99002015-05-12 14:56:21 +0200963
WANG Cong8113c092017-08-04 21:31:43 -0700964 *arg = fnew;
Jiri Pirko77b99002015-05-12 14:56:21 +0200965
966 if (fold) {
Chris Mic15ab232017-08-30 02:31:58 -0400967 fnew->handle = handle;
968 idr_replace_ext(&head->handle_idr, fnew, fnew->handle);
Daniel Borkmannff3532f2015-07-17 22:38:44 +0200969 list_replace_rcu(&fold->list, &fnew->list);
Jiri Pirko77b99002015-05-12 14:56:21 +0200970 tcf_unbind_filter(tp, &fold->res);
Cong Wang0dadc112017-11-06 13:47:24 -0800971 tcf_exts_get_net(&fold->exts);
Jiri Pirko77b99002015-05-12 14:56:21 +0200972 call_rcu(&fold->rcu, fl_destroy_filter);
973 } else {
974 list_add_tail_rcu(&fnew->list, &head->filters);
975 }
976
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +0100977 kfree(tb);
Jiri Pirko77b99002015-05-12 14:56:21 +0200978 return 0;
979
Cong Wangfe2502e2017-09-20 09:18:45 -0700980errout_idr:
981 if (fnew->handle)
982 idr_remove_ext(&head->handle_idr, fnew->handle);
Jiri Pirko77b99002015-05-12 14:56:21 +0200983errout:
WANG Congb9a24bb2016-08-19 12:36:54 -0700984 tcf_exts_destroy(&fnew->exts);
Jiri Pirko77b99002015-05-12 14:56:21 +0200985 kfree(fnew);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +0100986errout_tb:
987 kfree(tb);
Jiri Pirko77b99002015-05-12 14:56:21 +0200988 return err;
989}
990
Alexander Aring571acf22018-01-18 11:20:53 -0500991static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
992 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +0200993{
994 struct cls_fl_head *head = rtnl_dereference(tp->root);
WANG Cong8113c092017-08-04 21:31:43 -0700995 struct cls_fl_filter *f = arg;
Jiri Pirko77b99002015-05-12 14:56:21 +0200996
Jiri Pirko725cbb622016-11-28 15:40:13 +0100997 if (!tc_skip_sw(f->flags))
998 rhashtable_remove_fast(&head->ht, &f->ht_node,
999 head->ht_params);
Roi Dayan13fa8762016-11-01 16:08:29 +02001000 __fl_delete(tp, f);
WANG Cong763dbf62017-04-19 14:21:21 -07001001 *last = list_empty(&head->filters);
Jiri Pirko77b99002015-05-12 14:56:21 +02001002 return 0;
1003}
1004
1005static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
1006{
1007 struct cls_fl_head *head = rtnl_dereference(tp->root);
1008 struct cls_fl_filter *f;
1009
1010 list_for_each_entry_rcu(f, &head->filters, list) {
1011 if (arg->count < arg->skip)
1012 goto skip;
WANG Cong8113c092017-08-04 21:31:43 -07001013 if (arg->fn(tp, f, arg) < 0) {
Jiri Pirko77b99002015-05-12 14:56:21 +02001014 arg->stop = 1;
1015 break;
1016 }
1017skip:
1018 arg->count++;
1019 }
1020}
1021
1022static int fl_dump_key_val(struct sk_buff *skb,
1023 void *val, int val_type,
1024 void *mask, int mask_type, int len)
1025{
1026 int err;
1027
1028 if (!memchr_inv(mask, 0, len))
1029 return 0;
1030 err = nla_put(skb, val_type, len, val);
1031 if (err)
1032 return err;
1033 if (mask_type != TCA_FLOWER_UNSPEC) {
1034 err = nla_put(skb, mask_type, len, mask);
1035 if (err)
1036 return err;
1037 }
1038 return 0;
1039}
1040
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001041static int fl_dump_key_mpls(struct sk_buff *skb,
1042 struct flow_dissector_key_mpls *mpls_key,
1043 struct flow_dissector_key_mpls *mpls_mask)
1044{
1045 int err;
1046
1047 if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
1048 return 0;
1049 if (mpls_mask->mpls_ttl) {
1050 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
1051 mpls_key->mpls_ttl);
1052 if (err)
1053 return err;
1054 }
1055 if (mpls_mask->mpls_tc) {
1056 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
1057 mpls_key->mpls_tc);
1058 if (err)
1059 return err;
1060 }
1061 if (mpls_mask->mpls_label) {
1062 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
1063 mpls_key->mpls_label);
1064 if (err)
1065 return err;
1066 }
1067 if (mpls_mask->mpls_bos) {
1068 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
1069 mpls_key->mpls_bos);
1070 if (err)
1071 return err;
1072 }
1073 return 0;
1074}
1075
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001076static int fl_dump_key_ip(struct sk_buff *skb,
1077 struct flow_dissector_key_ip *key,
1078 struct flow_dissector_key_ip *mask)
1079{
1080 if (fl_dump_key_val(skb, &key->tos, TCA_FLOWER_KEY_IP_TOS, &mask->tos,
1081 TCA_FLOWER_KEY_IP_TOS_MASK, sizeof(key->tos)) ||
1082 fl_dump_key_val(skb, &key->ttl, TCA_FLOWER_KEY_IP_TTL, &mask->ttl,
1083 TCA_FLOWER_KEY_IP_TTL_MASK, sizeof(key->ttl)))
1084 return -1;
1085
1086 return 0;
1087}
1088
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001089static int fl_dump_key_vlan(struct sk_buff *skb,
1090 struct flow_dissector_key_vlan *vlan_key,
1091 struct flow_dissector_key_vlan *vlan_mask)
1092{
1093 int err;
1094
1095 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
1096 return 0;
1097 if (vlan_mask->vlan_id) {
1098 err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID,
1099 vlan_key->vlan_id);
1100 if (err)
1101 return err;
1102 }
1103 if (vlan_mask->vlan_priority) {
1104 err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO,
1105 vlan_key->vlan_priority);
1106 if (err)
1107 return err;
1108 }
1109 return 0;
1110}
1111
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001112static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
1113 u32 *flower_key, u32 *flower_mask,
1114 u32 flower_flag_bit, u32 dissector_flag_bit)
1115{
1116 if (dissector_mask & dissector_flag_bit) {
1117 *flower_mask |= flower_flag_bit;
1118 if (dissector_key & dissector_flag_bit)
1119 *flower_key |= flower_flag_bit;
1120 }
1121}
1122
1123static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
1124{
1125 u32 key, mask;
1126 __be32 _key, _mask;
1127 int err;
1128
1129 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
1130 return 0;
1131
1132 key = 0;
1133 mask = 0;
1134
1135 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
1136 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1137
1138 _key = cpu_to_be32(key);
1139 _mask = cpu_to_be32(mask);
1140
1141 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
1142 if (err)
1143 return err;
1144
1145 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
1146}
1147
WANG Cong8113c092017-08-04 21:31:43 -07001148static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
Jiri Pirko77b99002015-05-12 14:56:21 +02001149 struct sk_buff *skb, struct tcmsg *t)
1150{
1151 struct cls_fl_head *head = rtnl_dereference(tp->root);
WANG Cong8113c092017-08-04 21:31:43 -07001152 struct cls_fl_filter *f = fh;
Jiri Pirko77b99002015-05-12 14:56:21 +02001153 struct nlattr *nest;
1154 struct fl_flow_key *key, *mask;
1155
1156 if (!f)
1157 return skb->len;
1158
1159 t->tcm_handle = f->handle;
1160
1161 nest = nla_nest_start(skb, TCA_OPTIONS);
1162 if (!nest)
1163 goto nla_put_failure;
1164
1165 if (f->res.classid &&
1166 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
1167 goto nla_put_failure;
1168
1169 key = &f->key;
1170 mask = &head->mask.key;
1171
1172 if (mask->indev_ifindex) {
1173 struct net_device *dev;
1174
1175 dev = __dev_get_by_index(net, key->indev_ifindex);
1176 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
1177 goto nla_put_failure;
1178 }
1179
Hadar Hen Zion79685212016-12-01 14:06:34 +02001180 if (!tc_skip_hw(f->flags))
1181 fl_hw_update_stats(tp, f);
Amir Vadai10cbc682016-05-13 12:55:37 +00001182
Jiri Pirko77b99002015-05-12 14:56:21 +02001183 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1184 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1185 sizeof(key->eth.dst)) ||
1186 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1187 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1188 sizeof(key->eth.src)) ||
1189 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
1190 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
1191 sizeof(key->basic.n_proto)))
1192 goto nla_put_failure;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001193
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001194 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
1195 goto nla_put_failure;
1196
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001197 if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan))
1198 goto nla_put_failure;
1199
Jiri Pirko77b99002015-05-12 14:56:21 +02001200 if ((key->basic.n_proto == htons(ETH_P_IP) ||
1201 key->basic.n_proto == htons(ETH_P_IPV6)) &&
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001202 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
Jiri Pirko77b99002015-05-12 14:56:21 +02001203 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001204 sizeof(key->basic.ip_proto)) ||
1205 fl_dump_key_ip(skb, &key->ip, &mask->ip)))
Jiri Pirko77b99002015-05-12 14:56:21 +02001206 goto nla_put_failure;
1207
Tom Herbertc3f83242015-06-04 09:16:40 -07001208 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02001209 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1210 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1211 sizeof(key->ipv4.src)) ||
1212 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1213 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1214 sizeof(key->ipv4.dst))))
1215 goto nla_put_failure;
Tom Herbertc3f83242015-06-04 09:16:40 -07001216 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02001217 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1218 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1219 sizeof(key->ipv6.src)) ||
1220 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1221 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1222 sizeof(key->ipv6.dst))))
1223 goto nla_put_failure;
1224
1225 if (key->basic.ip_proto == IPPROTO_TCP &&
1226 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001227 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001228 sizeof(key->tp.src)) ||
1229 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001230 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02001231 sizeof(key->tp.dst)) ||
1232 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1233 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1234 sizeof(key->tcp.flags))))
Jiri Pirko77b99002015-05-12 14:56:21 +02001235 goto nla_put_failure;
1236 else if (key->basic.ip_proto == IPPROTO_UDP &&
1237 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001238 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001239 sizeof(key->tp.src)) ||
1240 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001241 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001242 sizeof(key->tp.dst))))
1243 goto nla_put_failure;
Simon Horman5976c5f2016-11-03 13:24:21 +01001244 else if (key->basic.ip_proto == IPPROTO_SCTP &&
1245 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1246 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1247 sizeof(key->tp.src)) ||
1248 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1249 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1250 sizeof(key->tp.dst))))
1251 goto nla_put_failure;
Simon Horman7b684882016-12-07 13:48:28 +01001252 else if (key->basic.n_proto == htons(ETH_P_IP) &&
1253 key->basic.ip_proto == IPPROTO_ICMP &&
1254 (fl_dump_key_val(skb, &key->icmp.type,
1255 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
1256 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1257 sizeof(key->icmp.type)) ||
1258 fl_dump_key_val(skb, &key->icmp.code,
1259 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
1260 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1261 sizeof(key->icmp.code))))
1262 goto nla_put_failure;
1263 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1264 key->basic.ip_proto == IPPROTO_ICMPV6 &&
1265 (fl_dump_key_val(skb, &key->icmp.type,
1266 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
1267 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1268 sizeof(key->icmp.type)) ||
1269 fl_dump_key_val(skb, &key->icmp.code,
1270 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
1271 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1272 sizeof(key->icmp.code))))
1273 goto nla_put_failure;
Simon Horman99d31322017-01-11 14:05:43 +01001274 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
1275 key->basic.n_proto == htons(ETH_P_RARP)) &&
1276 (fl_dump_key_val(skb, &key->arp.sip,
1277 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
1278 TCA_FLOWER_KEY_ARP_SIP_MASK,
1279 sizeof(key->arp.sip)) ||
1280 fl_dump_key_val(skb, &key->arp.tip,
1281 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
1282 TCA_FLOWER_KEY_ARP_TIP_MASK,
1283 sizeof(key->arp.tip)) ||
1284 fl_dump_key_val(skb, &key->arp.op,
1285 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
1286 TCA_FLOWER_KEY_ARP_OP_MASK,
1287 sizeof(key->arp.op)) ||
1288 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1289 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1290 sizeof(key->arp.sha)) ||
1291 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1292 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1293 sizeof(key->arp.tha))))
1294 goto nla_put_failure;
Jiri Pirko77b99002015-05-12 14:56:21 +02001295
Amir Vadaibc3103f2016-09-08 16:23:47 +03001296 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
1297 (fl_dump_key_val(skb, &key->enc_ipv4.src,
1298 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
1299 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1300 sizeof(key->enc_ipv4.src)) ||
1301 fl_dump_key_val(skb, &key->enc_ipv4.dst,
1302 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
1303 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1304 sizeof(key->enc_ipv4.dst))))
1305 goto nla_put_failure;
1306 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
1307 (fl_dump_key_val(skb, &key->enc_ipv6.src,
1308 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
1309 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1310 sizeof(key->enc_ipv6.src)) ||
1311 fl_dump_key_val(skb, &key->enc_ipv6.dst,
1312 TCA_FLOWER_KEY_ENC_IPV6_DST,
1313 &mask->enc_ipv6.dst,
1314 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1315 sizeof(key->enc_ipv6.dst))))
1316 goto nla_put_failure;
1317
1318 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +03001319 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001320 sizeof(key->enc_key_id)) ||
1321 fl_dump_key_val(skb, &key->enc_tp.src,
1322 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1323 &mask->enc_tp.src,
1324 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1325 sizeof(key->enc_tp.src)) ||
1326 fl_dump_key_val(skb, &key->enc_tp.dst,
1327 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1328 &mask->enc_tp.dst,
1329 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1330 sizeof(key->enc_tp.dst)))
Amir Vadaibc3103f2016-09-08 16:23:47 +03001331 goto nla_put_failure;
1332
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001333 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
1334 goto nla_put_failure;
1335
Or Gerlitz749e6722017-02-16 10:31:10 +02001336 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
1337 goto nla_put_failure;
Amir Vadaie69985c2016-06-05 17:11:18 +03001338
Jiri Pirko77b99002015-05-12 14:56:21 +02001339 if (tcf_exts_dump(skb, &f->exts))
1340 goto nla_put_failure;
1341
1342 nla_nest_end(skb, nest);
1343
1344 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
1345 goto nla_put_failure;
1346
1347 return skb->len;
1348
1349nla_put_failure:
1350 nla_nest_cancel(skb, nest);
1351 return -1;
1352}
1353
Cong Wang07d79fc2017-08-30 14:30:36 -07001354static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
1355{
1356 struct cls_fl_filter *f = fh;
1357
1358 if (f && f->res.classid == classid)
1359 f->res.class = cl;
1360}
1361
Jiri Pirko77b99002015-05-12 14:56:21 +02001362static struct tcf_proto_ops cls_fl_ops __read_mostly = {
1363 .kind = "flower",
1364 .classify = fl_classify,
1365 .init = fl_init,
1366 .destroy = fl_destroy,
1367 .get = fl_get,
1368 .change = fl_change,
1369 .delete = fl_delete,
1370 .walk = fl_walk,
1371 .dump = fl_dump,
Cong Wang07d79fc2017-08-30 14:30:36 -07001372 .bind_class = fl_bind_class,
Jiri Pirko77b99002015-05-12 14:56:21 +02001373 .owner = THIS_MODULE,
1374};
1375
1376static int __init cls_fl_init(void)
1377{
1378 return register_tcf_proto_ops(&cls_fl_ops);
1379}
1380
1381static void __exit cls_fl_exit(void)
1382{
1383 unregister_tcf_proto_ops(&cls_fl_ops);
1384}
1385
1386module_init(cls_fl_init);
1387module_exit(cls_fl_exit);
1388
1389MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
1390MODULE_DESCRIPTION("Flower classifier");
1391MODULE_LICENSE("GPL v2");