blob: 2c213a69c196827cc3ab9f9521f6c09bdf0bad3e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __NET_PKT_CLS_H
2#define __NET_PKT_CLS_H
3
4#include <linux/pkt_cls.h>
5#include <net/sch_generic.h>
6#include <net/act_api.h>
7
8/* Basic packet classifier frontend definitions. */
9
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000010struct tcf_walker {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 int stop;
12 int skip;
13 int count;
14 int (*fn)(struct tcf_proto *, unsigned long node, struct tcf_walker *);
15};
16
Joe Perches5c152572013-07-30 22:47:13 -070017int register_tcf_proto_ops(struct tcf_proto_ops *ops);
18int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Jiri Pirko8ae70032017-02-15 11:57:50 +010020#ifdef CONFIG_NET_CLS
Jiri Pirko5bc17012017-05-17 11:08:01 +020021struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index);
22void tcf_chain_put(struct tcf_chain *chain);
Jiri Pirko6529eab2017-05-17 11:07:55 +020023int tcf_block_get(struct tcf_block **p_block,
24 struct tcf_proto __rcu **p_filter_chain);
25void tcf_block_put(struct tcf_block *block);
Jiri Pirko87d83092017-05-17 11:07:54 +020026int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
27 struct tcf_result *res, bool compat_mode);
28
Jiri Pirko8ae70032017-02-15 11:57:50 +010029#else
Jiri Pirko6529eab2017-05-17 11:07:55 +020030static inline
31int tcf_block_get(struct tcf_block **p_block,
32 struct tcf_proto __rcu **p_filter_chain)
33{
34 return 0;
35}
36
37static inline void tcf_block_put(struct tcf_block *block)
Jiri Pirko8ae70032017-02-15 11:57:50 +010038{
39}
Jiri Pirko87d83092017-05-17 11:07:54 +020040
41static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
42 struct tcf_result *res, bool compat_mode)
43{
44 return TC_ACT_UNSPEC;
45}
Jiri Pirko8ae70032017-02-15 11:57:50 +010046#endif
Jiri Pirkocf1facd2017-02-09 14:38:56 +010047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048static inline unsigned long
49__cls_set_class(unsigned long *clp, unsigned long cl)
50{
WANG Conga0efb802014-09-30 16:07:24 -070051 return xchg(clp, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052}
53
54static inline unsigned long
55cls_set_class(struct tcf_proto *tp, unsigned long *clp,
56 unsigned long cl)
57{
58 unsigned long old_cl;
59
60 tcf_tree_lock(tp);
61 old_cl = __cls_set_class(clp, cl);
62 tcf_tree_unlock(tp);
63
64 return old_cl;
65}
66
67static inline void
68tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
69{
70 unsigned long cl;
71
72 cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, r->classid);
73 cl = cls_set_class(tp, &r->class, cl);
74 if (cl)
75 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
76}
77
78static inline void
79tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
80{
81 unsigned long cl;
82
83 if ((cl = __cls_set_class(&r->class, 0)) != 0)
84 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
85}
86
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000087struct tcf_exts {
Linus Torvalds1da177e2005-04-16 15:20:36 -070088#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -080089 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
WANG Cong22dc13c2016-08-13 22:35:00 -070090 int nr_actions;
91 struct tc_action **actions;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092#endif
WANG Cong5da57f42013-12-15 20:15:07 -080093 /* Map to export classifier specific extension TLV types to the
94 * generic extensions API. Unsupported extensions must be set to 0.
95 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 int action;
97 int police;
98};
99
WANG Congb9a24bb2016-08-19 12:36:54 -0700100static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
WANG Cong33be6272013-12-15 20:15:05 -0800101{
102#ifdef CONFIG_NET_CLS_ACT
WANG Cong5da57f42013-12-15 20:15:07 -0800103 exts->type = 0;
WANG Cong22dc13c2016-08-13 22:35:00 -0700104 exts->nr_actions = 0;
105 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
106 GFP_KERNEL);
WANG Congb9a24bb2016-08-19 12:36:54 -0700107 if (!exts->actions)
108 return -ENOMEM;
WANG Cong33be6272013-12-15 20:15:05 -0800109#endif
WANG Cong5da57f42013-12-15 20:15:07 -0800110 exts->action = action;
111 exts->police = police;
WANG Congb9a24bb2016-08-19 12:36:54 -0700112 return 0;
WANG Cong33be6272013-12-15 20:15:05 -0800113}
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115/**
116 * tcf_exts_is_predicative - check if a predicative extension is present
117 * @exts: tc filter extensions handle
118 *
119 * Returns 1 if a predicative extension is present, i.e. an extension which
120 * might cause further actions and thus overrule the regular tcf_result.
121 */
122static inline int
123tcf_exts_is_predicative(struct tcf_exts *exts)
124{
125#ifdef CONFIG_NET_CLS_ACT
WANG Cong22dc13c2016-08-13 22:35:00 -0700126 return exts->nr_actions;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#else
128 return 0;
129#endif
130}
131
132/**
133 * tcf_exts_is_available - check if at least one extension is present
134 * @exts: tc filter extensions handle
135 *
136 * Returns 1 if at least one extension is present.
137 */
138static inline int
139tcf_exts_is_available(struct tcf_exts *exts)
140{
141 /* All non-predicative extensions must be added here. */
142 return tcf_exts_is_predicative(exts);
143}
144
WANG Cong22dc13c2016-08-13 22:35:00 -0700145static inline void tcf_exts_to_list(const struct tcf_exts *exts,
146 struct list_head *actions)
147{
148#ifdef CONFIG_NET_CLS_ACT
149 int i;
150
151 for (i = 0; i < exts->nr_actions; i++) {
152 struct tc_action *a = exts->actions[i];
153
Hadar Hen Zionfa5effe2016-09-27 11:09:51 +0300154 list_add_tail(&a->list, actions);
WANG Cong22dc13c2016-08-13 22:35:00 -0700155 }
156#endif
157}
158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159/**
160 * tcf_exts_exec - execute tc filter extensions
161 * @skb: socket buffer
162 * @exts: tc filter extensions handle
163 * @res: desired result
164 *
165 * Executes all configured extensions. Returns 0 on a normal execution,
166 * a negative number if the filter must be considered unmatched or
167 * a positive action code (TC_ACT_*) which must be returned to the
168 * underlying layer.
169 */
170static inline int
171tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
172 struct tcf_result *res)
173{
174#ifdef CONFIG_NET_CLS_ACT
WANG Cong22dc13c2016-08-13 22:35:00 -0700175 if (exts->nr_actions)
176 return tcf_action_exec(skb, exts->actions, exts->nr_actions,
177 res);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 return 0;
180}
181
WANG Cong2734437e2016-08-13 22:34:59 -0700182#ifdef CONFIG_NET_CLS_ACT
183
WANG Cong22dc13c2016-08-13 22:35:00 -0700184#define tc_no_actions(_exts) ((_exts)->nr_actions == 0)
185#define tc_single_action(_exts) ((_exts)->nr_actions == 1)
WANG Cong2734437e2016-08-13 22:34:59 -0700186
187#else /* CONFIG_NET_CLS_ACT */
188
189#define tc_no_actions(_exts) true
WANG Cong2734437e2016-08-13 22:34:59 -0700190#define tc_single_action(_exts) false
191
192#endif /* CONFIG_NET_CLS_ACT */
193
Joe Perches5c152572013-07-30 22:47:13 -0700194int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
195 struct nlattr **tb, struct nlattr *rate_tlv,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700196 struct tcf_exts *exts, bool ovr);
WANG Cong18d02642014-09-25 10:26:37 -0700197void tcf_exts_destroy(struct tcf_exts *exts);
Joe Perches5c152572013-07-30 22:47:13 -0700198void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
199 struct tcf_exts *src);
WANG Cong5da57f42013-12-15 20:15:07 -0800200int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
201int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200202int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts,
203 struct net_device **hw_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
205/**
206 * struct tcf_pkt_info - packet information
207 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000208struct tcf_pkt_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 unsigned char * ptr;
210 int nexthdr;
211};
212
213#ifdef CONFIG_NET_EMATCH
214
215struct tcf_ematch_ops;
216
217/**
218 * struct tcf_ematch - extended match (ematch)
219 *
220 * @matchid: identifier to allow userspace to reidentify a match
221 * @flags: flags specifying attributes and the relation to other matches
222 * @ops: the operations lookup table of the corresponding ematch module
223 * @datalen: length of the ematch specific configuration data
224 * @data: ematch specific data
225 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000226struct tcf_ematch {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 struct tcf_ematch_ops * ops;
228 unsigned long data;
229 unsigned int datalen;
230 u16 matchid;
231 u16 flags;
John Fastabend82a470f2014-10-05 21:27:53 -0700232 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233};
234
235static inline int tcf_em_is_container(struct tcf_ematch *em)
236{
237 return !em->ops;
238}
239
240static inline int tcf_em_is_simple(struct tcf_ematch *em)
241{
242 return em->flags & TCF_EM_SIMPLE;
243}
244
245static inline int tcf_em_is_inverted(struct tcf_ematch *em)
246{
247 return em->flags & TCF_EM_INVERT;
248}
249
250static inline int tcf_em_last_match(struct tcf_ematch *em)
251{
252 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
253}
254
255static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
256{
257 if (tcf_em_last_match(em))
258 return 1;
259
260 if (result == 0 && em->flags & TCF_EM_REL_AND)
261 return 1;
262
263 if (result != 0 && em->flags & TCF_EM_REL_OR)
264 return 1;
265
266 return 0;
267}
268
269/**
270 * struct tcf_ematch_tree - ematch tree handle
271 *
272 * @hdr: ematch tree header supplied by userspace
273 * @matches: array of ematches
274 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000275struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 struct tcf_ematch_tree_hdr hdr;
277 struct tcf_ematch * matches;
278
279};
280
281/**
282 * struct tcf_ematch_ops - ematch module operations
283 *
284 * @kind: identifier (kind) of this ematch module
285 * @datalen: length of expected configuration data (optional)
286 * @change: called during validation (optional)
287 * @match: called during ematch tree evaluation, must return 1/0
288 * @destroy: called during destroyage (optional)
289 * @dump: called during dumping process (optional)
290 * @owner: owner, must be set to THIS_MODULE
291 * @link: link to previous/next ematch module (internal use)
292 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000293struct tcf_ematch_ops {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 int kind;
295 int datalen;
John Fastabend82a470f2014-10-05 21:27:53 -0700296 int (*change)(struct net *net, void *,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 int, struct tcf_ematch *);
298 int (*match)(struct sk_buff *, struct tcf_ematch *,
299 struct tcf_pkt_info *);
John Fastabend82a470f2014-10-05 21:27:53 -0700300 void (*destroy)(struct tcf_ematch *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 int (*dump)(struct sk_buff *, struct tcf_ematch *);
302 struct module *owner;
303 struct list_head link;
304};
305
Joe Perches5c152572013-07-30 22:47:13 -0700306int tcf_em_register(struct tcf_ematch_ops *);
307void tcf_em_unregister(struct tcf_ematch_ops *);
308int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
309 struct tcf_ematch_tree *);
John Fastabend82a470f2014-10-05 21:27:53 -0700310void tcf_em_tree_destroy(struct tcf_ematch_tree *);
Joe Perches5c152572013-07-30 22:47:13 -0700311int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
312int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
313 struct tcf_pkt_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
315/**
316 * tcf_em_tree_change - replace ematch tree of a running classifier
317 *
318 * @tp: classifier kind handle
319 * @dst: destination ematch tree variable
320 * @src: source ematch tree (temporary tree from tcf_em_tree_validate)
321 *
322 * This functions replaces the ematch tree in @dst with the ematch
323 * tree in @src. The classifier in charge of the ematch tree may be
324 * running.
325 */
326static inline void tcf_em_tree_change(struct tcf_proto *tp,
327 struct tcf_ematch_tree *dst,
328 struct tcf_ematch_tree *src)
329{
330 tcf_tree_lock(tp);
331 memcpy(dst, src, sizeof(*dst));
332 tcf_tree_unlock(tp);
333}
334
335/**
336 * tcf_em_tree_match - evaulate an ematch tree
337 *
338 * @skb: socket buffer of the packet in question
339 * @tree: ematch tree to be used for evaluation
340 * @info: packet information examined by classifier
341 *
342 * This function matches @skb against the ematch tree in @tree by going
343 * through all ematches respecting their logic relations returning
344 * as soon as the result is obvious.
345 *
346 * Returns 1 if the ematch tree as-one matches, no ematches are configured
347 * or ematch is not enabled in the kernel, otherwise 0 is returned.
348 */
349static inline int tcf_em_tree_match(struct sk_buff *skb,
350 struct tcf_ematch_tree *tree,
351 struct tcf_pkt_info *info)
352{
353 if (tree->hdr.nmatches)
354 return __tcf_em_tree_match(skb, tree, info);
355 else
356 return 1;
357}
358
Patrick McHardydb3d99c2007-07-11 19:46:26 -0700359#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361#else /* CONFIG_NET_EMATCH */
362
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000363struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364};
365
366#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
John Fastabend82a470f2014-10-05 21:27:53 -0700367#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368#define tcf_em_tree_dump(skb, t, tlv) (0)
369#define tcf_em_tree_change(tp, dst, src) do { } while(0)
370#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
371
372#endif /* CONFIG_NET_EMATCH */
373
374static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
375{
376 switch (layer) {
377 case TCF_LAYER_LINK:
378 return skb->data;
379 case TCF_LAYER_NETWORK:
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700380 return skb_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 case TCF_LAYER_TRANSPORT:
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -0700382 return skb_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 }
384
385 return NULL;
386}
387
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700388static inline int tcf_valid_offset(const struct sk_buff *skb,
389 const unsigned char *ptr, const int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390{
David S. Millerda521b22010-12-21 12:43:16 -0800391 return likely((ptr + len) <= skb_tail_pointer(skb) &&
392 ptr >= skb->head &&
393 (ptr <= (ptr + len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394}
395
396#ifdef CONFIG_NET_CLS_IND
Denis V. Lunev0eeb8ff2007-12-04 01:15:45 -0800397#include <net/net_namespace.h>
398
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399static inline int
WANG Cong2519a602014-01-09 16:14:02 -0800400tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401{
WANG Cong2519a602014-01-09 16:14:02 -0800402 char indev[IFNAMSIZ];
Patrick McHardyc01003c2007-03-29 11:46:52 -0700403 struct net_device *dev;
404
WANG Cong2519a602014-01-09 16:14:02 -0800405 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
406 return -EINVAL;
407 dev = __dev_get_by_name(net, indev);
408 if (!dev)
409 return -ENODEV;
410 return dev->ifindex;
411}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
WANG Cong2519a602014-01-09 16:14:02 -0800413static inline bool
414tcf_match_indev(struct sk_buff *skb, int ifindex)
415{
416 if (!ifindex)
417 return true;
418 if (!skb->skb_iif)
419 return false;
420 return ifindex == skb->skb_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421}
422#endif /* CONFIG_NET_CLS_IND */
423
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800424struct tc_cls_u32_knode {
425 struct tcf_exts *exts;
John Fastabende0148602016-02-17 14:59:30 -0800426 struct tc_u32_sel *sel;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800427 u32 handle;
428 u32 val;
429 u32 mask;
430 u32 link_handle;
John Fastabende0148602016-02-17 14:59:30 -0800431 u8 fshift;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800432};
433
434struct tc_cls_u32_hnode {
435 u32 handle;
436 u32 prio;
437 unsigned int divisor;
438};
439
440enum tc_clsu32_command {
441 TC_CLSU32_NEW_KNODE,
442 TC_CLSU32_REPLACE_KNODE,
443 TC_CLSU32_DELETE_KNODE,
444 TC_CLSU32_NEW_HNODE,
445 TC_CLSU32_REPLACE_HNODE,
446 TC_CLSU32_DELETE_HNODE,
447};
448
449struct tc_cls_u32_offload {
450 /* knode values */
451 enum tc_clsu32_command command;
452 union {
453 struct tc_cls_u32_knode knode;
454 struct tc_cls_u32_hnode hnode;
455 };
456};
457
Hadar Hen Zion55330f02016-12-01 14:06:33 +0200458static inline bool tc_can_offload(const struct net_device *dev,
459 const struct tcf_proto *tp)
John Fastabend6843e7a2016-02-26 07:53:49 -0800460{
Daniel Borkmann92c075d2016-06-06 22:50:39 +0200461 const struct Qdisc *sch = tp->q;
462 const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
463
John Fastabend2b6ab0d2016-02-26 07:54:13 -0800464 if (!(dev->features & NETIF_F_HW_TC))
465 return false;
John Fastabend9e8ce792016-02-26 07:54:39 -0800466 if (!dev->netdev_ops->ndo_setup_tc)
467 return false;
Daniel Borkmann92c075d2016-06-06 22:50:39 +0200468 if (cops && cops->tcf_cl_offload)
469 return cops->tcf_cl_offload(tp->classid);
John Fastabend9e8ce792016-02-26 07:54:39 -0800470
471 return true;
John Fastabend6843e7a2016-02-26 07:53:49 -0800472}
473
Hadar Hen Zion55330f02016-12-01 14:06:33 +0200474static inline bool tc_skip_hw(u32 flags)
475{
476 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
477}
478
479static inline bool tc_should_offload(const struct net_device *dev,
480 const struct tcf_proto *tp, u32 flags)
481{
482 if (tc_skip_hw(flags))
483 return false;
484 return tc_can_offload(dev, tp);
485}
486
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700487static inline bool tc_skip_sw(u32 flags)
488{
489 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
490}
491
492/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
493static inline bool tc_flags_valid(u32 flags)
494{
495 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
496 return false;
497
498 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
499 return false;
500
501 return true;
502}
503
Or Gerlitze6960282017-02-16 10:31:12 +0200504static inline bool tc_in_hw(u32 flags)
505{
506 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
507}
508
Amir Vadai5b33f482016-03-08 12:42:29 +0200509enum tc_fl_command {
510 TC_CLSFLOWER_REPLACE,
511 TC_CLSFLOWER_DESTROY,
Amir Vadai10cbc682016-05-13 12:55:37 +0000512 TC_CLSFLOWER_STATS,
Amir Vadai5b33f482016-03-08 12:42:29 +0200513};
514
515struct tc_cls_flower_offload {
516 enum tc_fl_command command;
Jiri Pirko69ca05c2017-02-03 10:29:08 +0100517 u32 prio;
Amir Vadai8208d212016-03-11 11:08:45 +0200518 unsigned long cookie;
Amir Vadai5b33f482016-03-08 12:42:29 +0200519 struct flow_dissector *dissector;
520 struct fl_flow_key *mask;
521 struct fl_flow_key *key;
522 struct tcf_exts *exts;
523};
524
Yotam Gigib87f7932016-07-21 12:03:12 +0200525enum tc_matchall_command {
526 TC_CLSMATCHALL_REPLACE,
527 TC_CLSMATCHALL_DESTROY,
528};
529
530struct tc_cls_matchall_offload {
531 enum tc_matchall_command command;
532 struct tcf_exts *exts;
533 unsigned long cookie;
534};
535
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100536enum tc_clsbpf_command {
537 TC_CLSBPF_ADD,
538 TC_CLSBPF_REPLACE,
539 TC_CLSBPF_DESTROY,
Jakub Kicinski68d64062016-09-21 11:44:02 +0100540 TC_CLSBPF_STATS,
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100541};
542
543struct tc_cls_bpf_offload {
544 enum tc_clsbpf_command command;
545 struct tcf_exts *exts;
546 struct bpf_prog *prog;
547 const char *name;
548 bool exts_integrated;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100549 u32 gen_flags;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100550};
551
Jamal Hadi Salim1045ba72017-01-24 07:02:41 -0500552
553/* This structure holds cookie structure that is passed from user
554 * to the kernel for actions and classifiers
555 */
556struct tc_cookie {
557 u8 *data;
558 u32 len;
559};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560#endif