blob: 60d39789e4f07d19269df1acf2c57d1d6058b348 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __NET_PKT_CLS_H
2#define __NET_PKT_CLS_H
3
4#include <linux/pkt_cls.h>
5#include <net/sch_generic.h>
6#include <net/act_api.h>
7
8/* Basic packet classifier frontend definitions. */
9
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000010struct tcf_walker {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 int stop;
12 int skip;
13 int count;
WANG Cong8113c092017-08-04 21:31:43 -070014 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015};
16
Joe Perches5c152572013-07-30 22:47:13 -070017int register_tcf_proto_ops(struct tcf_proto_ops *ops);
18int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Jiri Pirko8ae70032017-02-15 11:57:50 +010020#ifdef CONFIG_NET_CLS
WANG Cong367a8ce2017-05-23 09:42:37 -070021struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
22 bool create);
Jiri Pirko5bc17012017-05-17 11:08:01 +020023void tcf_chain_put(struct tcf_chain *chain);
Jiri Pirko6529eab2017-05-17 11:07:55 +020024int tcf_block_get(struct tcf_block **p_block,
25 struct tcf_proto __rcu **p_filter_chain);
26void tcf_block_put(struct tcf_block *block);
Jiri Pirko87d83092017-05-17 11:07:54 +020027int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
28 struct tcf_result *res, bool compat_mode);
29
Jiri Pirko8ae70032017-02-15 11:57:50 +010030#else
Jiri Pirko6529eab2017-05-17 11:07:55 +020031static inline
32int tcf_block_get(struct tcf_block **p_block,
33 struct tcf_proto __rcu **p_filter_chain)
34{
35 return 0;
36}
37
38static inline void tcf_block_put(struct tcf_block *block)
Jiri Pirko8ae70032017-02-15 11:57:50 +010039{
40}
Jiri Pirko87d83092017-05-17 11:07:54 +020041
42static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
43 struct tcf_result *res, bool compat_mode)
44{
45 return TC_ACT_UNSPEC;
46}
Jiri Pirko8ae70032017-02-15 11:57:50 +010047#endif
Jiri Pirkocf1facd2017-02-09 14:38:56 +010048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049static inline unsigned long
50__cls_set_class(unsigned long *clp, unsigned long cl)
51{
WANG Conga0efb802014-09-30 16:07:24 -070052 return xchg(clp, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053}
54
55static inline unsigned long
56cls_set_class(struct tcf_proto *tp, unsigned long *clp,
57 unsigned long cl)
58{
59 unsigned long old_cl;
60
61 tcf_tree_lock(tp);
62 old_cl = __cls_set_class(clp, cl);
63 tcf_tree_unlock(tp);
64
65 return old_cl;
66}
67
68static inline void
69tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
70{
71 unsigned long cl;
72
73 cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, r->classid);
74 cl = cls_set_class(tp, &r->class, cl);
75 if (cl)
76 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
77}
78
79static inline void
80tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
81{
82 unsigned long cl;
83
84 if ((cl = __cls_set_class(&r->class, 0)) != 0)
85 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
86}
87
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000088struct tcf_exts {
Linus Torvalds1da177e2005-04-16 15:20:36 -070089#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -080090 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
WANG Cong22dc13c2016-08-13 22:35:00 -070091 int nr_actions;
92 struct tc_action **actions;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#endif
WANG Cong5da57f42013-12-15 20:15:07 -080094 /* Map to export classifier specific extension TLV types to the
95 * generic extensions API. Unsupported extensions must be set to 0.
96 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 int action;
98 int police;
99};
100
WANG Congb9a24bb2016-08-19 12:36:54 -0700101static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
WANG Cong33be6272013-12-15 20:15:05 -0800102{
103#ifdef CONFIG_NET_CLS_ACT
WANG Cong5da57f42013-12-15 20:15:07 -0800104 exts->type = 0;
WANG Cong22dc13c2016-08-13 22:35:00 -0700105 exts->nr_actions = 0;
106 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
107 GFP_KERNEL);
WANG Congb9a24bb2016-08-19 12:36:54 -0700108 if (!exts->actions)
109 return -ENOMEM;
WANG Cong33be6272013-12-15 20:15:05 -0800110#endif
WANG Cong5da57f42013-12-15 20:15:07 -0800111 exts->action = action;
112 exts->police = police;
WANG Congb9a24bb2016-08-19 12:36:54 -0700113 return 0;
WANG Cong33be6272013-12-15 20:15:05 -0800114}
115
WANG Cong22dc13c2016-08-13 22:35:00 -0700116static inline void tcf_exts_to_list(const struct tcf_exts *exts,
117 struct list_head *actions)
118{
119#ifdef CONFIG_NET_CLS_ACT
120 int i;
121
122 for (i = 0; i < exts->nr_actions; i++) {
123 struct tc_action *a = exts->actions[i];
124
Hadar Hen Zionfa5effe2016-09-27 11:09:51 +0300125 list_add_tail(&a->list, actions);
WANG Cong22dc13c2016-08-13 22:35:00 -0700126 }
127#endif
128}
129
Jakub Kicinskid897a632017-05-31 08:06:43 -0700130static inline void
131tcf_exts_stats_update(const struct tcf_exts *exts,
132 u64 bytes, u64 packets, u64 lastuse)
133{
134#ifdef CONFIG_NET_CLS_ACT
135 int i;
136
137 preempt_disable();
138
139 for (i = 0; i < exts->nr_actions; i++) {
140 struct tc_action *a = exts->actions[i];
141
142 tcf_action_stats_update(a, bytes, packets, lastuse);
143 }
144
145 preempt_enable();
146#endif
147}
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149/**
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200150 * tcf_exts_has_actions - check if at least one action is present
151 * @exts: tc filter extensions handle
152 *
153 * Returns true if at least one action is present.
154 */
155static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
156{
WANG Cong2734437e2016-08-13 22:34:59 -0700157#ifdef CONFIG_NET_CLS_ACT
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200158 return exts->nr_actions;
159#else
160 return false;
161#endif
162}
WANG Cong2734437e2016-08-13 22:34:59 -0700163
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200164/**
165 * tcf_exts_has_one_action - check if exactly one action is present
166 * @exts: tc filter extensions handle
167 *
168 * Returns true if exactly one action is present.
169 */
170static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
171{
172#ifdef CONFIG_NET_CLS_ACT
173 return exts->nr_actions == 1;
174#else
175 return false;
176#endif
177}
WANG Cong2734437e2016-08-13 22:34:59 -0700178
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200179/**
180 * tcf_exts_exec - execute tc filter extensions
181 * @skb: socket buffer
182 * @exts: tc filter extensions handle
183 * @res: desired result
184 *
Jiri Pirkoaf089e72017-08-04 14:29:01 +0200185 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200186 * a negative number if the filter must be considered unmatched or
187 * a positive action code (TC_ACT_*) which must be returned to the
188 * underlying layer.
189 */
190static inline int
191tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
192 struct tcf_result *res)
193{
194#ifdef CONFIG_NET_CLS_ACT
Jiri Pirkoec1a9cc2017-08-04 14:29:02 +0200195 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200196#endif
Jiri Pirkoaf089e72017-08-04 14:29:01 +0200197 return TC_ACT_OK;
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200198}
199
Joe Perches5c152572013-07-30 22:47:13 -0700200int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
201 struct nlattr **tb, struct nlattr *rate_tlv,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700202 struct tcf_exts *exts, bool ovr);
WANG Cong18d02642014-09-25 10:26:37 -0700203void tcf_exts_destroy(struct tcf_exts *exts);
Jiri Pirko9b0d4442017-08-04 14:29:15 +0200204void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
WANG Cong5da57f42013-12-15 20:15:07 -0800205int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
206int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
208/**
209 * struct tcf_pkt_info - packet information
210 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000211struct tcf_pkt_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 unsigned char * ptr;
213 int nexthdr;
214};
215
216#ifdef CONFIG_NET_EMATCH
217
218struct tcf_ematch_ops;
219
220/**
221 * struct tcf_ematch - extended match (ematch)
222 *
223 * @matchid: identifier to allow userspace to reidentify a match
224 * @flags: flags specifying attributes and the relation to other matches
225 * @ops: the operations lookup table of the corresponding ematch module
226 * @datalen: length of the ematch specific configuration data
227 * @data: ematch specific data
228 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000229struct tcf_ematch {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 struct tcf_ematch_ops * ops;
231 unsigned long data;
232 unsigned int datalen;
233 u16 matchid;
234 u16 flags;
John Fastabend82a470f2014-10-05 21:27:53 -0700235 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236};
237
238static inline int tcf_em_is_container(struct tcf_ematch *em)
239{
240 return !em->ops;
241}
242
243static inline int tcf_em_is_simple(struct tcf_ematch *em)
244{
245 return em->flags & TCF_EM_SIMPLE;
246}
247
248static inline int tcf_em_is_inverted(struct tcf_ematch *em)
249{
250 return em->flags & TCF_EM_INVERT;
251}
252
253static inline int tcf_em_last_match(struct tcf_ematch *em)
254{
255 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
256}
257
258static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
259{
260 if (tcf_em_last_match(em))
261 return 1;
262
263 if (result == 0 && em->flags & TCF_EM_REL_AND)
264 return 1;
265
266 if (result != 0 && em->flags & TCF_EM_REL_OR)
267 return 1;
268
269 return 0;
270}
271
272/**
273 * struct tcf_ematch_tree - ematch tree handle
274 *
275 * @hdr: ematch tree header supplied by userspace
276 * @matches: array of ematches
277 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000278struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 struct tcf_ematch_tree_hdr hdr;
280 struct tcf_ematch * matches;
281
282};
283
284/**
285 * struct tcf_ematch_ops - ematch module operations
286 *
287 * @kind: identifier (kind) of this ematch module
288 * @datalen: length of expected configuration data (optional)
289 * @change: called during validation (optional)
290 * @match: called during ematch tree evaluation, must return 1/0
291 * @destroy: called during destroyage (optional)
292 * @dump: called during dumping process (optional)
293 * @owner: owner, must be set to THIS_MODULE
294 * @link: link to previous/next ematch module (internal use)
295 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000296struct tcf_ematch_ops {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 int kind;
298 int datalen;
John Fastabend82a470f2014-10-05 21:27:53 -0700299 int (*change)(struct net *net, void *,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 int, struct tcf_ematch *);
301 int (*match)(struct sk_buff *, struct tcf_ematch *,
302 struct tcf_pkt_info *);
John Fastabend82a470f2014-10-05 21:27:53 -0700303 void (*destroy)(struct tcf_ematch *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 int (*dump)(struct sk_buff *, struct tcf_ematch *);
305 struct module *owner;
306 struct list_head link;
307};
308
Joe Perches5c152572013-07-30 22:47:13 -0700309int tcf_em_register(struct tcf_ematch_ops *);
310void tcf_em_unregister(struct tcf_ematch_ops *);
311int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
312 struct tcf_ematch_tree *);
John Fastabend82a470f2014-10-05 21:27:53 -0700313void tcf_em_tree_destroy(struct tcf_ematch_tree *);
Joe Perches5c152572013-07-30 22:47:13 -0700314int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
315int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
316 struct tcf_pkt_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
318/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 * tcf_em_tree_match - evaulate an ematch tree
320 *
321 * @skb: socket buffer of the packet in question
322 * @tree: ematch tree to be used for evaluation
323 * @info: packet information examined by classifier
324 *
325 * This function matches @skb against the ematch tree in @tree by going
326 * through all ematches respecting their logic relations returning
327 * as soon as the result is obvious.
328 *
329 * Returns 1 if the ematch tree as-one matches, no ematches are configured
330 * or ematch is not enabled in the kernel, otherwise 0 is returned.
331 */
332static inline int tcf_em_tree_match(struct sk_buff *skb,
333 struct tcf_ematch_tree *tree,
334 struct tcf_pkt_info *info)
335{
336 if (tree->hdr.nmatches)
337 return __tcf_em_tree_match(skb, tree, info);
338 else
339 return 1;
340}
341
Patrick McHardydb3d99c2007-07-11 19:46:26 -0700342#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344#else /* CONFIG_NET_EMATCH */
345
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000346struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347};
348
349#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
John Fastabend82a470f2014-10-05 21:27:53 -0700350#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351#define tcf_em_tree_dump(skb, t, tlv) (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
353
354#endif /* CONFIG_NET_EMATCH */
355
356static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
357{
358 switch (layer) {
359 case TCF_LAYER_LINK:
360 return skb->data;
361 case TCF_LAYER_NETWORK:
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700362 return skb_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 case TCF_LAYER_TRANSPORT:
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -0700364 return skb_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 }
366
367 return NULL;
368}
369
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700370static inline int tcf_valid_offset(const struct sk_buff *skb,
371 const unsigned char *ptr, const int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372{
David S. Millerda521b22010-12-21 12:43:16 -0800373 return likely((ptr + len) <= skb_tail_pointer(skb) &&
374 ptr >= skb->head &&
375 (ptr <= (ptr + len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376}
377
378#ifdef CONFIG_NET_CLS_IND
Denis V. Lunev0eeb8ff2007-12-04 01:15:45 -0800379#include <net/net_namespace.h>
380
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381static inline int
WANG Cong2519a602014-01-09 16:14:02 -0800382tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383{
WANG Cong2519a602014-01-09 16:14:02 -0800384 char indev[IFNAMSIZ];
Patrick McHardyc01003c2007-03-29 11:46:52 -0700385 struct net_device *dev;
386
WANG Cong2519a602014-01-09 16:14:02 -0800387 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
388 return -EINVAL;
389 dev = __dev_get_by_name(net, indev);
390 if (!dev)
391 return -ENODEV;
392 return dev->ifindex;
393}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
WANG Cong2519a602014-01-09 16:14:02 -0800395static inline bool
396tcf_match_indev(struct sk_buff *skb, int ifindex)
397{
398 if (!ifindex)
399 return true;
400 if (!skb->skb_iif)
401 return false;
402 return ifindex == skb->skb_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403}
404#endif /* CONFIG_NET_CLS_IND */
405
Jiri Pirko717503b2017-10-11 09:41:09 +0200406int tc_setup_cb_call(struct tcf_exts *exts, enum tc_setup_type type,
407 void *type_data, bool err_stop);
408
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200409struct tc_cls_common_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200410 u32 chain_index;
411 __be16 protocol;
Jiri Pirkod7c1c8d2017-08-07 10:15:30 +0200412 u32 prio;
Jiri Pirko7690f2a2017-08-09 14:30:32 +0200413 u32 classid;
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200414};
415
416static inline void
417tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
418 const struct tcf_proto *tp)
419{
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200420 cls_common->chain_index = tp->chain->index;
421 cls_common->protocol = tp->protocol;
Jiri Pirkod7c1c8d2017-08-07 10:15:30 +0200422 cls_common->prio = tp->prio;
Jiri Pirko7690f2a2017-08-09 14:30:32 +0200423 cls_common->classid = tp->classid;
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200424}
425
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800426struct tc_cls_u32_knode {
427 struct tcf_exts *exts;
John Fastabende0148602016-02-17 14:59:30 -0800428 struct tc_u32_sel *sel;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800429 u32 handle;
430 u32 val;
431 u32 mask;
432 u32 link_handle;
John Fastabende0148602016-02-17 14:59:30 -0800433 u8 fshift;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800434};
435
436struct tc_cls_u32_hnode {
437 u32 handle;
438 u32 prio;
439 unsigned int divisor;
440};
441
442enum tc_clsu32_command {
443 TC_CLSU32_NEW_KNODE,
444 TC_CLSU32_REPLACE_KNODE,
445 TC_CLSU32_DELETE_KNODE,
446 TC_CLSU32_NEW_HNODE,
447 TC_CLSU32_REPLACE_HNODE,
448 TC_CLSU32_DELETE_HNODE,
449};
450
451struct tc_cls_u32_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200452 struct tc_cls_common_offload common;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800453 /* knode values */
454 enum tc_clsu32_command command;
455 union {
456 struct tc_cls_u32_knode knode;
457 struct tc_cls_u32_hnode hnode;
458 };
459};
460
Jiri Pirko7b06e8a2017-08-09 14:30:35 +0200461static inline bool tc_can_offload(const struct net_device *dev)
John Fastabend6843e7a2016-02-26 07:53:49 -0800462{
John Fastabend2b6ab0d2016-02-26 07:54:13 -0800463 if (!(dev->features & NETIF_F_HW_TC))
464 return false;
John Fastabend9e8ce792016-02-26 07:54:39 -0800465 if (!dev->netdev_ops->ndo_setup_tc)
466 return false;
John Fastabend9e8ce792016-02-26 07:54:39 -0800467 return true;
John Fastabend6843e7a2016-02-26 07:53:49 -0800468}
469
Hadar Hen Zion55330f02016-12-01 14:06:33 +0200470static inline bool tc_skip_hw(u32 flags)
471{
472 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
473}
474
Jiri Pirko7b06e8a2017-08-09 14:30:35 +0200475static inline bool tc_should_offload(const struct net_device *dev, u32 flags)
Hadar Hen Zion55330f02016-12-01 14:06:33 +0200476{
477 if (tc_skip_hw(flags))
478 return false;
Jiri Pirko7b06e8a2017-08-09 14:30:35 +0200479 return tc_can_offload(dev);
Hadar Hen Zion55330f02016-12-01 14:06:33 +0200480}
481
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700482static inline bool tc_skip_sw(u32 flags)
483{
484 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
485}
486
487/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
488static inline bool tc_flags_valid(u32 flags)
489{
490 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
491 return false;
492
493 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
494 return false;
495
496 return true;
497}
498
Or Gerlitze6960282017-02-16 10:31:12 +0200499static inline bool tc_in_hw(u32 flags)
500{
501 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
502}
503
Amir Vadai5b33f482016-03-08 12:42:29 +0200504enum tc_fl_command {
505 TC_CLSFLOWER_REPLACE,
506 TC_CLSFLOWER_DESTROY,
Amir Vadai10cbc682016-05-13 12:55:37 +0000507 TC_CLSFLOWER_STATS,
Amir Vadai5b33f482016-03-08 12:42:29 +0200508};
509
510struct tc_cls_flower_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200511 struct tc_cls_common_offload common;
Amir Vadai5b33f482016-03-08 12:42:29 +0200512 enum tc_fl_command command;
Amir Vadai8208d212016-03-11 11:08:45 +0200513 unsigned long cookie;
Amir Vadai5b33f482016-03-08 12:42:29 +0200514 struct flow_dissector *dissector;
515 struct fl_flow_key *mask;
516 struct fl_flow_key *key;
517 struct tcf_exts *exts;
518};
519
Yotam Gigib87f7932016-07-21 12:03:12 +0200520enum tc_matchall_command {
521 TC_CLSMATCHALL_REPLACE,
522 TC_CLSMATCHALL_DESTROY,
523};
524
525struct tc_cls_matchall_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200526 struct tc_cls_common_offload common;
Yotam Gigib87f7932016-07-21 12:03:12 +0200527 enum tc_matchall_command command;
528 struct tcf_exts *exts;
529 unsigned long cookie;
530};
531
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100532enum tc_clsbpf_command {
533 TC_CLSBPF_ADD,
534 TC_CLSBPF_REPLACE,
535 TC_CLSBPF_DESTROY,
Jakub Kicinski68d64062016-09-21 11:44:02 +0100536 TC_CLSBPF_STATS,
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100537};
538
539struct tc_cls_bpf_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200540 struct tc_cls_common_offload common;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100541 enum tc_clsbpf_command command;
542 struct tcf_exts *exts;
543 struct bpf_prog *prog;
544 const char *name;
545 bool exts_integrated;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100546 u32 gen_flags;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100547};
548
Amritha Nambiar4e8b86c2017-09-07 04:00:06 -0700549struct tc_mqprio_qopt_offload {
550 /* struct tc_mqprio_qopt must always be the first element */
551 struct tc_mqprio_qopt qopt;
552 u16 mode;
553 u16 shaper;
554 u32 flags;
555 u64 min_rate[TC_QOPT_MAX_QUEUE];
556 u64 max_rate[TC_QOPT_MAX_QUEUE];
557};
Jamal Hadi Salim1045ba72017-01-24 07:02:41 -0500558
559/* This structure holds cookie structure that is passed from user
560 * to the kernel for actions and classifiers
561 */
562struct tc_cookie {
563 u8 *data;
564 u32 len;
565};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566#endif