blob: 13b349f426a7329f74500a6c6c6e235ebd1c3533 [file] [log] [blame]
Jiri Pirko77b99002015-05-12 14:56:21 +02001/*
2 * net/sched/cls_flower.c Flower classifier
3 *
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/rhashtable.h>
16
17#include <linux/if_ether.h>
18#include <linux/in6.h>
19#include <linux/ip.h>
20
21#include <net/sch_generic.h>
22#include <net/pkt_cls.h>
23#include <net/ip.h>
24#include <net/flow_dissector.h>
25
Amir Vadaibc3103f2016-09-08 16:23:47 +030026#include <net/dst.h>
27#include <net/dst_metadata.h>
28
Jiri Pirko77b99002015-05-12 14:56:21 +020029struct fl_flow_key {
30 int indev_ifindex;
Tom Herbert42aecaa2015-06-04 09:16:39 -070031 struct flow_dissector_key_control control;
Amir Vadaibc3103f2016-09-08 16:23:47 +030032 struct flow_dissector_key_control enc_control;
Jiri Pirko77b99002015-05-12 14:56:21 +020033 struct flow_dissector_key_basic basic;
34 struct flow_dissector_key_eth_addrs eth;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +030035 struct flow_dissector_key_vlan vlan;
Jiri Pirko77b99002015-05-12 14:56:21 +020036 union {
Tom Herbertc3f83242015-06-04 09:16:40 -070037 struct flow_dissector_key_ipv4_addrs ipv4;
Jiri Pirko77b99002015-05-12 14:56:21 +020038 struct flow_dissector_key_ipv6_addrs ipv6;
39 };
40 struct flow_dissector_key_ports tp;
Amir Vadaibc3103f2016-09-08 16:23:47 +030041 struct flow_dissector_key_keyid enc_key_id;
42 union {
43 struct flow_dissector_key_ipv4_addrs enc_ipv4;
44 struct flow_dissector_key_ipv6_addrs enc_ipv6;
45 };
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +020046 struct flow_dissector_key_ports enc_tp;
Jiri Pirko77b99002015-05-12 14:56:21 +020047} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
48
49struct fl_flow_mask_range {
50 unsigned short int start;
51 unsigned short int end;
52};
53
54struct fl_flow_mask {
55 struct fl_flow_key key;
56 struct fl_flow_mask_range range;
57 struct rcu_head rcu;
58};
59
60struct cls_fl_head {
61 struct rhashtable ht;
62 struct fl_flow_mask mask;
63 struct flow_dissector dissector;
64 u32 hgen;
65 bool mask_assigned;
66 struct list_head filters;
67 struct rhashtable_params ht_params;
68 struct rcu_head rcu;
69};
70
71struct cls_fl_filter {
72 struct rhash_head ht_node;
73 struct fl_flow_key mkey;
74 struct tcf_exts exts;
75 struct tcf_result res;
76 struct fl_flow_key key;
77 struct list_head list;
78 u32 handle;
Amir Vadaie69985c2016-06-05 17:11:18 +030079 u32 flags;
Jiri Pirko77b99002015-05-12 14:56:21 +020080 struct rcu_head rcu;
81};
82
83static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
84{
85 return mask->range.end - mask->range.start;
86}
87
88static void fl_mask_update_range(struct fl_flow_mask *mask)
89{
90 const u8 *bytes = (const u8 *) &mask->key;
91 size_t size = sizeof(mask->key);
92 size_t i, first = 0, last = size - 1;
93
94 for (i = 0; i < sizeof(mask->key); i++) {
95 if (bytes[i]) {
96 if (!first && i)
97 first = i;
98 last = i;
99 }
100 }
101 mask->range.start = rounddown(first, sizeof(long));
102 mask->range.end = roundup(last + 1, sizeof(long));
103}
104
105static void *fl_key_get_start(struct fl_flow_key *key,
106 const struct fl_flow_mask *mask)
107{
108 return (u8 *) key + mask->range.start;
109}
110
111static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
112 struct fl_flow_mask *mask)
113{
114 const long *lkey = fl_key_get_start(key, mask);
115 const long *lmask = fl_key_get_start(&mask->key, mask);
116 long *lmkey = fl_key_get_start(mkey, mask);
117 int i;
118
119 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
120 *lmkey++ = *lkey++ & *lmask++;
121}
122
123static void fl_clear_masked_range(struct fl_flow_key *key,
124 struct fl_flow_mask *mask)
125{
126 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
127}
128
129static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
130 struct tcf_result *res)
131{
132 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
133 struct cls_fl_filter *f;
134 struct fl_flow_key skb_key;
135 struct fl_flow_key skb_mkey;
Amir Vadaibc3103f2016-09-08 16:23:47 +0300136 struct ip_tunnel_info *info;
Jiri Pirko77b99002015-05-12 14:56:21 +0200137
Amir Vadaie69985c2016-06-05 17:11:18 +0300138 if (!atomic_read(&head->ht.nelems))
139 return -1;
140
Jiri Pirko77b99002015-05-12 14:56:21 +0200141 fl_clear_masked_range(&skb_key, &head->mask);
Amir Vadaibc3103f2016-09-08 16:23:47 +0300142
143 info = skb_tunnel_info(skb);
144 if (info) {
145 struct ip_tunnel_key *key = &info->key;
146
147 switch (ip_tunnel_info_af(info)) {
148 case AF_INET:
149 skb_key.enc_ipv4.src = key->u.ipv4.src;
150 skb_key.enc_ipv4.dst = key->u.ipv4.dst;
151 break;
152 case AF_INET6:
153 skb_key.enc_ipv6.src = key->u.ipv6.src;
154 skb_key.enc_ipv6.dst = key->u.ipv6.dst;
155 break;
156 }
157
158 skb_key.enc_key_id.keyid = tunnel_id_to_key32(key->tun_id);
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200159 skb_key.enc_tp.src = key->tp_src;
160 skb_key.enc_tp.dst = key->tp_dst;
Amir Vadaibc3103f2016-09-08 16:23:47 +0300161 }
162
Jiri Pirko77b99002015-05-12 14:56:21 +0200163 skb_key.indev_ifindex = skb->skb_iif;
164 /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
165 * so do it rather here.
166 */
167 skb_key.basic.n_proto = skb->protocol;
Tom Herbertcd79a232015-09-01 09:24:27 -0700168 skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
Jiri Pirko77b99002015-05-12 14:56:21 +0200169
170 fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
171
172 f = rhashtable_lookup_fast(&head->ht,
173 fl_key_get_start(&skb_mkey, &head->mask),
174 head->ht_params);
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300175 if (f && !tc_skip_sw(f->flags)) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200176 *res = f->res;
177 return tcf_exts_exec(skb, &f->exts, res);
178 }
179 return -1;
180}
181
182static int fl_init(struct tcf_proto *tp)
183{
184 struct cls_fl_head *head;
185
186 head = kzalloc(sizeof(*head), GFP_KERNEL);
187 if (!head)
188 return -ENOBUFS;
189
190 INIT_LIST_HEAD_RCU(&head->filters);
191 rcu_assign_pointer(tp->root, head);
192
193 return 0;
194}
195
196static void fl_destroy_filter(struct rcu_head *head)
197{
198 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
199
200 tcf_exts_destroy(&f->exts);
201 kfree(f);
202}
203
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200204static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
Amir Vadai5b33f482016-03-08 12:42:29 +0200205{
206 struct net_device *dev = tp->q->dev_queue->dev;
207 struct tc_cls_flower_offload offload = {0};
208 struct tc_to_netdev tc;
209
Hadar Hen Zion79685212016-12-01 14:06:34 +0200210 if (!tc_can_offload(dev, tp))
Amir Vadai5b33f482016-03-08 12:42:29 +0200211 return;
212
213 offload.command = TC_CLSFLOWER_DESTROY;
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200214 offload.cookie = (unsigned long)f;
Amir Vadai5b33f482016-03-08 12:42:29 +0200215
216 tc.type = TC_SETUP_CLSFLOWER;
217 tc.cls_flower = &offload;
218
219 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
220}
221
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300222static int fl_hw_replace_filter(struct tcf_proto *tp,
223 struct flow_dissector *dissector,
224 struct fl_flow_key *mask,
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200225 struct cls_fl_filter *f)
Amir Vadai5b33f482016-03-08 12:42:29 +0200226{
227 struct net_device *dev = tp->q->dev_queue->dev;
228 struct tc_cls_flower_offload offload = {0};
229 struct tc_to_netdev tc;
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300230 int err;
Amir Vadai5b33f482016-03-08 12:42:29 +0200231
Hadar Hen Zion79685212016-12-01 14:06:34 +0200232 if (!tc_can_offload(dev, tp))
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200233 return tc_skip_sw(f->flags) ? -EINVAL : 0;
Amir Vadai5b33f482016-03-08 12:42:29 +0200234
235 offload.command = TC_CLSFLOWER_REPLACE;
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200236 offload.cookie = (unsigned long)f;
Amir Vadai5b33f482016-03-08 12:42:29 +0200237 offload.dissector = dissector;
238 offload.mask = mask;
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200239 offload.key = &f->key;
240 offload.exts = &f->exts;
Amir Vadai5b33f482016-03-08 12:42:29 +0200241
242 tc.type = TC_SETUP_CLSFLOWER;
243 tc.cls_flower = &offload;
244
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -0400245 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
246 &tc);
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300247
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200248 if (tc_skip_sw(f->flags))
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300249 return err;
250
251 return 0;
Amir Vadai5b33f482016-03-08 12:42:29 +0200252}
253
Amir Vadai10cbc682016-05-13 12:55:37 +0000254static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
255{
256 struct net_device *dev = tp->q->dev_queue->dev;
257 struct tc_cls_flower_offload offload = {0};
258 struct tc_to_netdev tc;
259
Hadar Hen Zion79685212016-12-01 14:06:34 +0200260 if (!tc_can_offload(dev, tp))
Amir Vadai10cbc682016-05-13 12:55:37 +0000261 return;
262
263 offload.command = TC_CLSFLOWER_STATS;
264 offload.cookie = (unsigned long)f;
265 offload.exts = &f->exts;
266
267 tc.type = TC_SETUP_CLSFLOWER;
268 tc.cls_flower = &offload;
269
270 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
271}
272
Roi Dayan13fa8762016-11-01 16:08:29 +0200273static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
274{
275 list_del_rcu(&f->list);
Hadar Hen Zion79685212016-12-01 14:06:34 +0200276 if (!tc_skip_hw(f->flags))
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200277 fl_hw_destroy_filter(tp, f);
Roi Dayan13fa8762016-11-01 16:08:29 +0200278 tcf_unbind_filter(tp, &f->res);
279 call_rcu(&f->rcu, fl_destroy_filter);
280}
281
Jiri Pirko77b99002015-05-12 14:56:21 +0200282static bool fl_destroy(struct tcf_proto *tp, bool force)
283{
284 struct cls_fl_head *head = rtnl_dereference(tp->root);
285 struct cls_fl_filter *f, *next;
286
287 if (!force && !list_empty(&head->filters))
288 return false;
289
Roi Dayan13fa8762016-11-01 16:08:29 +0200290 list_for_each_entry_safe(f, next, &head->filters, list)
291 __fl_delete(tp, f);
Jiri Pirko77b99002015-05-12 14:56:21 +0200292 RCU_INIT_POINTER(tp->root, NULL);
293 if (head->mask_assigned)
294 rhashtable_destroy(&head->ht);
295 kfree_rcu(head, rcu);
296 return true;
297}
298
299static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
300{
301 struct cls_fl_head *head = rtnl_dereference(tp->root);
302 struct cls_fl_filter *f;
303
304 list_for_each_entry(f, &head->filters, list)
305 if (f->handle == handle)
306 return (unsigned long) f;
307 return 0;
308}
309
310static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
311 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
312 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
313 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
314 .len = IFNAMSIZ },
315 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
316 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
317 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
318 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
319 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
320 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
321 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
322 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
323 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
324 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
325 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
326 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
327 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
328 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
329 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
330 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
Jamal Hadi Salimb175c3a2015-06-25 06:55:27 -0400331 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
332 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300333 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
334 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
335 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
Amir Vadaibc3103f2016-09-08 16:23:47 +0300336 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
337 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
338 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
339 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
340 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
341 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
342 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
343 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
344 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
Or Gerlitzaa72d702016-09-15 15:28:22 +0300345 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
346 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
347 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
348 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
Simon Horman5976c5f2016-11-03 13:24:21 +0100349 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
350 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
351 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
352 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200353 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
354 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
355 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
356 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
Jiri Pirko77b99002015-05-12 14:56:21 +0200357};
358
359static void fl_set_key_val(struct nlattr **tb,
360 void *val, int val_type,
361 void *mask, int mask_type, int len)
362{
363 if (!tb[val_type])
364 return;
365 memcpy(val, nla_data(tb[val_type]), len);
366 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
367 memset(mask, 0xff, len);
368 else
369 memcpy(mask, nla_data(tb[mask_type]), len);
370}
371
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300372static void fl_set_key_vlan(struct nlattr **tb,
373 struct flow_dissector_key_vlan *key_val,
374 struct flow_dissector_key_vlan *key_mask)
375{
376#define VLAN_PRIORITY_MASK 0x7
377
378 if (tb[TCA_FLOWER_KEY_VLAN_ID]) {
379 key_val->vlan_id =
380 nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK;
381 key_mask->vlan_id = VLAN_VID_MASK;
382 }
383 if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) {
384 key_val->vlan_priority =
385 nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) &
386 VLAN_PRIORITY_MASK;
387 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
388 }
389}
390
Jiri Pirko77b99002015-05-12 14:56:21 +0200391static int fl_set_key(struct net *net, struct nlattr **tb,
392 struct fl_flow_key *key, struct fl_flow_key *mask)
393{
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300394 __be16 ethertype;
Brian Haleydd3aa3b2015-05-14 13:20:15 -0400395#ifdef CONFIG_NET_CLS_IND
Jiri Pirko77b99002015-05-12 14:56:21 +0200396 if (tb[TCA_FLOWER_INDEV]) {
Brian Haleydd3aa3b2015-05-14 13:20:15 -0400397 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
Jiri Pirko77b99002015-05-12 14:56:21 +0200398 if (err < 0)
399 return err;
400 key->indev_ifindex = err;
401 mask->indev_ifindex = 0xffffffff;
402 }
Brian Haleydd3aa3b2015-05-14 13:20:15 -0400403#endif
Jiri Pirko77b99002015-05-12 14:56:21 +0200404
405 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
406 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
407 sizeof(key->eth.dst));
408 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
409 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
410 sizeof(key->eth.src));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500411
Arnd Bergmann0b498a52016-08-26 17:25:45 +0200412 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300413 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
414
Arnd Bergmann0b498a52016-08-26 17:25:45 +0200415 if (ethertype == htons(ETH_P_8021Q)) {
416 fl_set_key_vlan(tb, &key->vlan, &mask->vlan);
417 fl_set_key_val(tb, &key->basic.n_proto,
418 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
419 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
420 sizeof(key->basic.n_proto));
421 } else {
422 key->basic.n_proto = ethertype;
423 mask->basic.n_proto = cpu_to_be16(~0);
424 }
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300425 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500426
Jiri Pirko77b99002015-05-12 14:56:21 +0200427 if (key->basic.n_proto == htons(ETH_P_IP) ||
428 key->basic.n_proto == htons(ETH_P_IPV6)) {
429 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
430 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
431 sizeof(key->basic.ip_proto));
432 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500433
434 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
435 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Jiri Pirko77b99002015-05-12 14:56:21 +0200436 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
437 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
438 sizeof(key->ipv4.src));
439 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
440 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
441 sizeof(key->ipv4.dst));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500442 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
443 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Jiri Pirko77b99002015-05-12 14:56:21 +0200444 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
445 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
446 sizeof(key->ipv6.src));
447 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
448 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
449 sizeof(key->ipv6.dst));
450 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500451
Jiri Pirko77b99002015-05-12 14:56:21 +0200452 if (key->basic.ip_proto == IPPROTO_TCP) {
453 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300454 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200455 sizeof(key->tp.src));
456 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300457 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200458 sizeof(key->tp.dst));
459 } else if (key->basic.ip_proto == IPPROTO_UDP) {
460 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300461 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200462 sizeof(key->tp.src));
463 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300464 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200465 sizeof(key->tp.dst));
Simon Horman5976c5f2016-11-03 13:24:21 +0100466 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
467 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
468 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
469 sizeof(key->tp.src));
470 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
471 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
472 sizeof(key->tp.dst));
Jiri Pirko77b99002015-05-12 14:56:21 +0200473 }
474
Amir Vadaibc3103f2016-09-08 16:23:47 +0300475 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
476 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
477 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
478 fl_set_key_val(tb, &key->enc_ipv4.src,
479 TCA_FLOWER_KEY_ENC_IPV4_SRC,
480 &mask->enc_ipv4.src,
481 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
482 sizeof(key->enc_ipv4.src));
483 fl_set_key_val(tb, &key->enc_ipv4.dst,
484 TCA_FLOWER_KEY_ENC_IPV4_DST,
485 &mask->enc_ipv4.dst,
486 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
487 sizeof(key->enc_ipv4.dst));
488 }
489
490 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
491 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
492 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
493 fl_set_key_val(tb, &key->enc_ipv6.src,
494 TCA_FLOWER_KEY_ENC_IPV6_SRC,
495 &mask->enc_ipv6.src,
496 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
497 sizeof(key->enc_ipv6.src));
498 fl_set_key_val(tb, &key->enc_ipv6.dst,
499 TCA_FLOWER_KEY_ENC_IPV6_DST,
500 &mask->enc_ipv6.dst,
501 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
502 sizeof(key->enc_ipv6.dst));
503 }
504
505 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +0300506 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
Amir Vadaibc3103f2016-09-08 16:23:47 +0300507 sizeof(key->enc_key_id.keyid));
508
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200509 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
510 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
511 sizeof(key->enc_tp.src));
512
513 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
514 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
515 sizeof(key->enc_tp.dst));
516
Jiri Pirko77b99002015-05-12 14:56:21 +0200517 return 0;
518}
519
520static bool fl_mask_eq(struct fl_flow_mask *mask1,
521 struct fl_flow_mask *mask2)
522{
523 const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
524 const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
525
526 return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
527 !memcmp(lmask1, lmask2, fl_mask_range(mask1));
528}
529
530static const struct rhashtable_params fl_ht_params = {
531 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
532 .head_offset = offsetof(struct cls_fl_filter, ht_node),
533 .automatic_shrinking = true,
534};
535
536static int fl_init_hashtable(struct cls_fl_head *head,
537 struct fl_flow_mask *mask)
538{
539 head->ht_params = fl_ht_params;
540 head->ht_params.key_len = fl_mask_range(mask);
541 head->ht_params.key_offset += mask->range.start;
542
543 return rhashtable_init(&head->ht, &head->ht_params);
544}
545
546#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
547#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
Jiri Pirko77b99002015-05-12 14:56:21 +0200548
Hadar Hen Zion339ba872016-08-17 13:36:12 +0300549#define FL_KEY_IS_MASKED(mask, member) \
550 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
551 0, FL_KEY_MEMBER_SIZE(member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +0200552
553#define FL_KEY_SET(keys, cnt, id, member) \
554 do { \
555 keys[cnt].key_id = id; \
556 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
557 cnt++; \
558 } while(0);
559
Hadar Hen Zion339ba872016-08-17 13:36:12 +0300560#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
Jiri Pirko77b99002015-05-12 14:56:21 +0200561 do { \
Hadar Hen Zion339ba872016-08-17 13:36:12 +0300562 if (FL_KEY_IS_MASKED(mask, member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +0200563 FL_KEY_SET(keys, cnt, id, member); \
564 } while(0);
565
566static void fl_init_dissector(struct cls_fl_head *head,
567 struct fl_flow_mask *mask)
568{
569 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
570 size_t cnt = 0;
571
Tom Herbert42aecaa2015-06-04 09:16:39 -0700572 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
Jiri Pirko77b99002015-05-12 14:56:21 +0200573 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
Hadar Hen Zion339ba872016-08-17 13:36:12 +0300574 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
575 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
576 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
577 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
578 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
579 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
580 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
581 FLOW_DISSECTOR_KEY_PORTS, tp);
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300582 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
583 FLOW_DISSECTOR_KEY_VLAN, vlan);
Hadar Hen Zion519d1052016-11-07 15:14:38 +0200584 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
585 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
586 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
587 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
588 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
589 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
590 if (FL_KEY_IS_MASKED(&mask->key, enc_ipv4) ||
591 FL_KEY_IS_MASKED(&mask->key, enc_ipv6))
592 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
593 enc_control);
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200594 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
595 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
Jiri Pirko77b99002015-05-12 14:56:21 +0200596
597 skb_flow_dissector_init(&head->dissector, keys, cnt);
598}
599
600static int fl_check_assign_mask(struct cls_fl_head *head,
601 struct fl_flow_mask *mask)
602{
603 int err;
604
605 if (head->mask_assigned) {
606 if (!fl_mask_eq(&head->mask, mask))
607 return -EINVAL;
608 else
609 return 0;
610 }
611
612 /* Mask is not assigned yet. So assign it and init hashtable
613 * according to that.
614 */
615 err = fl_init_hashtable(head, mask);
616 if (err)
617 return err;
618 memcpy(&head->mask, mask, sizeof(head->mask));
619 head->mask_assigned = true;
620
621 fl_init_dissector(head, mask);
622
623 return 0;
624}
625
626static int fl_set_parms(struct net *net, struct tcf_proto *tp,
627 struct cls_fl_filter *f, struct fl_flow_mask *mask,
628 unsigned long base, struct nlattr **tb,
629 struct nlattr *est, bool ovr)
630{
631 struct tcf_exts e;
632 int err;
633
WANG Congb9a24bb2016-08-19 12:36:54 -0700634 err = tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
Jiri Pirko77b99002015-05-12 14:56:21 +0200635 if (err < 0)
636 return err;
WANG Congb9a24bb2016-08-19 12:36:54 -0700637 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
638 if (err < 0)
639 goto errout;
Jiri Pirko77b99002015-05-12 14:56:21 +0200640
641 if (tb[TCA_FLOWER_CLASSID]) {
642 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
643 tcf_bind_filter(tp, &f->res, base);
644 }
645
646 err = fl_set_key(net, tb, &f->key, &mask->key);
647 if (err)
648 goto errout;
649
650 fl_mask_update_range(mask);
651 fl_set_masked_key(&f->mkey, &f->key, mask);
652
653 tcf_exts_change(tp, &f->exts, &e);
654
655 return 0;
656errout:
657 tcf_exts_destroy(&e);
658 return err;
659}
660
661static u32 fl_grab_new_handle(struct tcf_proto *tp,
662 struct cls_fl_head *head)
663{
664 unsigned int i = 0x80000000;
665 u32 handle;
666
667 do {
668 if (++head->hgen == 0x7FFFFFFF)
669 head->hgen = 1;
670 } while (--i > 0 && fl_get(tp, head->hgen));
671
672 if (unlikely(i == 0)) {
673 pr_err("Insufficient number of handles\n");
674 handle = 0;
675 } else {
676 handle = head->hgen;
677 }
678
679 return handle;
680}
681
682static int fl_change(struct net *net, struct sk_buff *in_skb,
683 struct tcf_proto *tp, unsigned long base,
684 u32 handle, struct nlattr **tca,
685 unsigned long *arg, bool ovr)
686{
687 struct cls_fl_head *head = rtnl_dereference(tp->root);
688 struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
689 struct cls_fl_filter *fnew;
690 struct nlattr *tb[TCA_FLOWER_MAX + 1];
691 struct fl_flow_mask mask = {};
692 int err;
693
694 if (!tca[TCA_OPTIONS])
695 return -EINVAL;
696
697 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
698 if (err < 0)
699 return err;
700
701 if (fold && handle && fold->handle != handle)
702 return -EINVAL;
703
704 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
705 if (!fnew)
706 return -ENOBUFS;
707
WANG Congb9a24bb2016-08-19 12:36:54 -0700708 err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
709 if (err < 0)
710 goto errout;
Jiri Pirko77b99002015-05-12 14:56:21 +0200711
712 if (!handle) {
713 handle = fl_grab_new_handle(tp, head);
714 if (!handle) {
715 err = -EINVAL;
716 goto errout;
717 }
718 }
719 fnew->handle = handle;
720
Amir Vadaie69985c2016-06-05 17:11:18 +0300721 if (tb[TCA_FLOWER_FLAGS]) {
722 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
723
724 if (!tc_flags_valid(fnew->flags)) {
725 err = -EINVAL;
726 goto errout;
727 }
728 }
Amir Vadai5b33f482016-03-08 12:42:29 +0200729
Jiri Pirko77b99002015-05-12 14:56:21 +0200730 err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
731 if (err)
732 goto errout;
733
734 err = fl_check_assign_mask(head, &mask);
735 if (err)
736 goto errout;
737
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300738 if (!tc_skip_sw(fnew->flags)) {
Amir Vadaie69985c2016-06-05 17:11:18 +0300739 err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
740 head->ht_params);
741 if (err)
742 goto errout;
743 }
Amir Vadai5b33f482016-03-08 12:42:29 +0200744
Hadar Hen Zion79685212016-12-01 14:06:34 +0200745 if (!tc_skip_hw(fnew->flags)) {
746 err = fl_hw_replace_filter(tp,
747 &head->dissector,
748 &mask.key,
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200749 fnew);
Hadar Hen Zion79685212016-12-01 14:06:34 +0200750 if (err)
751 goto errout;
752 }
Amir Vadai5b33f482016-03-08 12:42:29 +0200753
754 if (fold) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200755 rhashtable_remove_fast(&head->ht, &fold->ht_node,
756 head->ht_params);
Hadar Hen Zion79685212016-12-01 14:06:34 +0200757 if (!tc_skip_hw(fold->flags))
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200758 fl_hw_destroy_filter(tp, fold);
Amir Vadai5b33f482016-03-08 12:42:29 +0200759 }
Jiri Pirko77b99002015-05-12 14:56:21 +0200760
761 *arg = (unsigned long) fnew;
762
763 if (fold) {
Daniel Borkmannff3532f2015-07-17 22:38:44 +0200764 list_replace_rcu(&fold->list, &fnew->list);
Jiri Pirko77b99002015-05-12 14:56:21 +0200765 tcf_unbind_filter(tp, &fold->res);
766 call_rcu(&fold->rcu, fl_destroy_filter);
767 } else {
768 list_add_tail_rcu(&fnew->list, &head->filters);
769 }
770
771 return 0;
772
773errout:
WANG Congb9a24bb2016-08-19 12:36:54 -0700774 tcf_exts_destroy(&fnew->exts);
Jiri Pirko77b99002015-05-12 14:56:21 +0200775 kfree(fnew);
776 return err;
777}
778
779static int fl_delete(struct tcf_proto *tp, unsigned long arg)
780{
781 struct cls_fl_head *head = rtnl_dereference(tp->root);
782 struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
783
784 rhashtable_remove_fast(&head->ht, &f->ht_node,
785 head->ht_params);
Roi Dayan13fa8762016-11-01 16:08:29 +0200786 __fl_delete(tp, f);
Jiri Pirko77b99002015-05-12 14:56:21 +0200787 return 0;
788}
789
790static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
791{
792 struct cls_fl_head *head = rtnl_dereference(tp->root);
793 struct cls_fl_filter *f;
794
795 list_for_each_entry_rcu(f, &head->filters, list) {
796 if (arg->count < arg->skip)
797 goto skip;
798 if (arg->fn(tp, (unsigned long) f, arg) < 0) {
799 arg->stop = 1;
800 break;
801 }
802skip:
803 arg->count++;
804 }
805}
806
807static int fl_dump_key_val(struct sk_buff *skb,
808 void *val, int val_type,
809 void *mask, int mask_type, int len)
810{
811 int err;
812
813 if (!memchr_inv(mask, 0, len))
814 return 0;
815 err = nla_put(skb, val_type, len, val);
816 if (err)
817 return err;
818 if (mask_type != TCA_FLOWER_UNSPEC) {
819 err = nla_put(skb, mask_type, len, mask);
820 if (err)
821 return err;
822 }
823 return 0;
824}
825
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300826static int fl_dump_key_vlan(struct sk_buff *skb,
827 struct flow_dissector_key_vlan *vlan_key,
828 struct flow_dissector_key_vlan *vlan_mask)
829{
830 int err;
831
832 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
833 return 0;
834 if (vlan_mask->vlan_id) {
835 err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID,
836 vlan_key->vlan_id);
837 if (err)
838 return err;
839 }
840 if (vlan_mask->vlan_priority) {
841 err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO,
842 vlan_key->vlan_priority);
843 if (err)
844 return err;
845 }
846 return 0;
847}
848
Jiri Pirko77b99002015-05-12 14:56:21 +0200849static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
850 struct sk_buff *skb, struct tcmsg *t)
851{
852 struct cls_fl_head *head = rtnl_dereference(tp->root);
853 struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
854 struct nlattr *nest;
855 struct fl_flow_key *key, *mask;
856
857 if (!f)
858 return skb->len;
859
860 t->tcm_handle = f->handle;
861
862 nest = nla_nest_start(skb, TCA_OPTIONS);
863 if (!nest)
864 goto nla_put_failure;
865
866 if (f->res.classid &&
867 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
868 goto nla_put_failure;
869
870 key = &f->key;
871 mask = &head->mask.key;
872
873 if (mask->indev_ifindex) {
874 struct net_device *dev;
875
876 dev = __dev_get_by_index(net, key->indev_ifindex);
877 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
878 goto nla_put_failure;
879 }
880
Hadar Hen Zion79685212016-12-01 14:06:34 +0200881 if (!tc_skip_hw(f->flags))
882 fl_hw_update_stats(tp, f);
Amir Vadai10cbc682016-05-13 12:55:37 +0000883
Jiri Pirko77b99002015-05-12 14:56:21 +0200884 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
885 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
886 sizeof(key->eth.dst)) ||
887 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
888 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
889 sizeof(key->eth.src)) ||
890 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
891 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
892 sizeof(key->basic.n_proto)))
893 goto nla_put_failure;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300894
895 if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan))
896 goto nla_put_failure;
897
Jiri Pirko77b99002015-05-12 14:56:21 +0200898 if ((key->basic.n_proto == htons(ETH_P_IP) ||
899 key->basic.n_proto == htons(ETH_P_IPV6)) &&
900 fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
901 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
902 sizeof(key->basic.ip_proto)))
903 goto nla_put_failure;
904
Tom Herbertc3f83242015-06-04 09:16:40 -0700905 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +0200906 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
907 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
908 sizeof(key->ipv4.src)) ||
909 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
910 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
911 sizeof(key->ipv4.dst))))
912 goto nla_put_failure;
Tom Herbertc3f83242015-06-04 09:16:40 -0700913 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +0200914 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
915 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
916 sizeof(key->ipv6.src)) ||
917 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
918 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
919 sizeof(key->ipv6.dst))))
920 goto nla_put_failure;
921
922 if (key->basic.ip_proto == IPPROTO_TCP &&
923 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300924 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200925 sizeof(key->tp.src)) ||
926 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300927 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200928 sizeof(key->tp.dst))))
929 goto nla_put_failure;
930 else if (key->basic.ip_proto == IPPROTO_UDP &&
931 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300932 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200933 sizeof(key->tp.src)) ||
934 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300935 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200936 sizeof(key->tp.dst))))
937 goto nla_put_failure;
Simon Horman5976c5f2016-11-03 13:24:21 +0100938 else if (key->basic.ip_proto == IPPROTO_SCTP &&
939 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
940 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
941 sizeof(key->tp.src)) ||
942 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
943 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
944 sizeof(key->tp.dst))))
945 goto nla_put_failure;
Jiri Pirko77b99002015-05-12 14:56:21 +0200946
Amir Vadaibc3103f2016-09-08 16:23:47 +0300947 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
948 (fl_dump_key_val(skb, &key->enc_ipv4.src,
949 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
950 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
951 sizeof(key->enc_ipv4.src)) ||
952 fl_dump_key_val(skb, &key->enc_ipv4.dst,
953 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
954 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
955 sizeof(key->enc_ipv4.dst))))
956 goto nla_put_failure;
957 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
958 (fl_dump_key_val(skb, &key->enc_ipv6.src,
959 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
960 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
961 sizeof(key->enc_ipv6.src)) ||
962 fl_dump_key_val(skb, &key->enc_ipv6.dst,
963 TCA_FLOWER_KEY_ENC_IPV6_DST,
964 &mask->enc_ipv6.dst,
965 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
966 sizeof(key->enc_ipv6.dst))))
967 goto nla_put_failure;
968
969 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +0300970 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200971 sizeof(key->enc_key_id)) ||
972 fl_dump_key_val(skb, &key->enc_tp.src,
973 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
974 &mask->enc_tp.src,
975 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
976 sizeof(key->enc_tp.src)) ||
977 fl_dump_key_val(skb, &key->enc_tp.dst,
978 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
979 &mask->enc_tp.dst,
980 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
981 sizeof(key->enc_tp.dst)))
Amir Vadaibc3103f2016-09-08 16:23:47 +0300982 goto nla_put_failure;
983
Amir Vadaie69985c2016-06-05 17:11:18 +0300984 nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags);
985
Jiri Pirko77b99002015-05-12 14:56:21 +0200986 if (tcf_exts_dump(skb, &f->exts))
987 goto nla_put_failure;
988
989 nla_nest_end(skb, nest);
990
991 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
992 goto nla_put_failure;
993
994 return skb->len;
995
996nla_put_failure:
997 nla_nest_cancel(skb, nest);
998 return -1;
999}
1000
1001static struct tcf_proto_ops cls_fl_ops __read_mostly = {
1002 .kind = "flower",
1003 .classify = fl_classify,
1004 .init = fl_init,
1005 .destroy = fl_destroy,
1006 .get = fl_get,
1007 .change = fl_change,
1008 .delete = fl_delete,
1009 .walk = fl_walk,
1010 .dump = fl_dump,
1011 .owner = THIS_MODULE,
1012};
1013
1014static int __init cls_fl_init(void)
1015{
1016 return register_tcf_proto_ops(&cls_fl_ops);
1017}
1018
1019static void __exit cls_fl_exit(void)
1020{
1021 unregister_tcf_proto_ops(&cls_fl_ops);
1022}
1023
1024module_init(cls_fl_init);
1025module_exit(cls_fl_exit);
1026
1027MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
1028MODULE_DESCRIPTION("Flower classifier");
1029MODULE_LICENSE("GPL v2");