blob: 9c341f003091edfcad5a039b27a9a3ad5b9db64a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __NET_PKT_CLS_H
3#define __NET_PKT_CLS_H
4
5#include <linux/pkt_cls.h>
Cong Wang7aa00452017-10-26 18:24:28 -07006#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <net/sch_generic.h>
8#include <net/act_api.h>
9
10/* Basic packet classifier frontend definitions. */
11
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000012struct tcf_walker {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 int stop;
14 int skip;
15 int count;
WANG Cong8113c092017-08-04 21:31:43 -070016 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017};
18
Joe Perches5c152572013-07-30 22:47:13 -070019int register_tcf_proto_ops(struct tcf_proto_ops *ops);
20int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Jiri Pirko8c4083b2017-10-19 15:50:29 +020022enum tcf_block_binder_type {
23 TCF_BLOCK_BINDER_TYPE_UNSPEC,
Jiri Pirko6e40cf22017-10-19 15:50:30 +020024 TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
25 TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
Jiri Pirko8c4083b2017-10-19 15:50:29 +020026};
27
28struct tcf_block_ext_info {
29 enum tcf_block_binder_type binder_type;
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +010030 tcf_chain_head_change_t *chain_head_change;
31 void *chain_head_change_priv;
Jiri Pirko8c4083b2017-10-19 15:50:29 +020032};
33
Jiri Pirkoacb67442017-10-19 15:50:31 +020034struct tcf_block_cb;
Cong Wang7aa00452017-10-26 18:24:28 -070035bool tcf_queue_work(struct work_struct *work);
Jiri Pirkoacb67442017-10-19 15:50:31 +020036
Jiri Pirko8ae70032017-02-15 11:57:50 +010037#ifdef CONFIG_NET_CLS
WANG Cong367a8ce2017-05-23 09:42:37 -070038struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
39 bool create);
Jiri Pirko5bc17012017-05-17 11:08:01 +020040void tcf_chain_put(struct tcf_chain *chain);
Jiri Pirko6529eab2017-05-17 11:07:55 +020041int tcf_block_get(struct tcf_block **p_block,
Alexander Aring8d1a77f2017-12-20 12:35:19 -050042 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
43 struct netlink_ext_ack *extack);
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +010044int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
Alexander Aring8d1a77f2017-12-20 12:35:19 -050045 struct tcf_block_ext_info *ei,
46 struct netlink_ext_ack *extack);
Jiri Pirko6529eab2017-05-17 11:07:55 +020047void tcf_block_put(struct tcf_block *block);
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +010048void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
Jiri Pirko8c4083b2017-10-19 15:50:29 +020049 struct tcf_block_ext_info *ei);
Jiri Pirko44186462017-10-13 14:00:59 +020050
51static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
52{
53 return block->q;
54}
55
56static inline struct net_device *tcf_block_dev(struct tcf_block *block)
57{
58 return tcf_block_q(block)->dev_queue->dev;
59}
60
Jiri Pirkoacb67442017-10-19 15:50:31 +020061void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
62struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
63 tc_setup_cb_t *cb, void *cb_ident);
64void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
65unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
66struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
67 tc_setup_cb_t *cb, void *cb_ident,
68 void *cb_priv);
69int tcf_block_cb_register(struct tcf_block *block,
70 tc_setup_cb_t *cb, void *cb_ident,
71 void *cb_priv);
72void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb);
73void tcf_block_cb_unregister(struct tcf_block *block,
74 tc_setup_cb_t *cb, void *cb_ident);
75
Jiri Pirko87d83092017-05-17 11:07:54 +020076int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
77 struct tcf_result *res, bool compat_mode);
78
Jiri Pirko8ae70032017-02-15 11:57:50 +010079#else
Jiri Pirko6529eab2017-05-17 11:07:55 +020080static inline
81int tcf_block_get(struct tcf_block **p_block,
Sudip Mukherjee3c149092017-12-22 15:52:05 +000082 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
83 struct netlink_ext_ack *extack)
Jiri Pirko6529eab2017-05-17 11:07:55 +020084{
85 return 0;
86}
87
Jiri Pirko8c4083b2017-10-19 15:50:29 +020088static inline
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +010089int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
Quentin Monnet33c30a82018-01-03 17:30:45 -080090 struct tcf_block_ext_info *ei,
91 struct netlink_ext_ack *extack)
Jiri Pirko8c4083b2017-10-19 15:50:29 +020092{
93 return 0;
94}
95
Jiri Pirko6529eab2017-05-17 11:07:55 +020096static inline void tcf_block_put(struct tcf_block *block)
Jiri Pirko8ae70032017-02-15 11:57:50 +010097{
98}
Jiri Pirko87d83092017-05-17 11:07:54 +020099
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200100static inline
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +0100101void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200102 struct tcf_block_ext_info *ei)
103{
104}
105
Jiri Pirko44186462017-10-13 14:00:59 +0200106static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
107{
108 return NULL;
109}
110
111static inline struct net_device *tcf_block_dev(struct tcf_block *block)
112{
113 return NULL;
114}
115
Jiri Pirkoacb67442017-10-19 15:50:31 +0200116static inline
117int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
118 void *cb_priv)
119{
120 return 0;
121}
122
123static inline
124void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
125 void *cb_priv)
126{
127}
128
129static inline
130void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
131{
132 return NULL;
133}
134
135static inline
136struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
137 tc_setup_cb_t *cb, void *cb_ident)
138{
139 return NULL;
140}
141
142static inline
143void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
144{
145}
146
147static inline
148unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
149{
150 return 0;
151}
152
153static inline
154struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
155 tc_setup_cb_t *cb, void *cb_ident,
156 void *cb_priv)
157{
158 return NULL;
159}
160
161static inline
162int tcf_block_cb_register(struct tcf_block *block,
163 tc_setup_cb_t *cb, void *cb_ident,
164 void *cb_priv)
165{
166 return 0;
167}
168
169static inline
170void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
171{
172}
173
174static inline
175void tcf_block_cb_unregister(struct tcf_block *block,
176 tc_setup_cb_t *cb, void *cb_ident)
177{
178}
179
Jiri Pirko87d83092017-05-17 11:07:54 +0200180static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
181 struct tcf_result *res, bool compat_mode)
182{
183 return TC_ACT_UNSPEC;
184}
Jiri Pirko8ae70032017-02-15 11:57:50 +0100185#endif
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187static inline unsigned long
188__cls_set_class(unsigned long *clp, unsigned long cl)
189{
WANG Conga0efb802014-09-30 16:07:24 -0700190 return xchg(clp, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191}
192
193static inline unsigned long
Jiri Pirko34e37592017-10-13 14:01:00 +0200194cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
196 unsigned long old_cl;
Jiri Pirko34e37592017-10-13 14:01:00 +0200197
198 sch_tree_lock(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 old_cl = __cls_set_class(clp, cl);
Jiri Pirko34e37592017-10-13 14:01:00 +0200200 sch_tree_unlock(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 return old_cl;
202}
203
204static inline void
205tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
206{
Jiri Pirko34e37592017-10-13 14:01:00 +0200207 struct Qdisc *q = tp->chain->block->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 unsigned long cl;
209
Jiri Pirko34e37592017-10-13 14:01:00 +0200210 /* Check q as it is not set for shared blocks. In that case,
211 * setting class is not supported.
212 */
213 if (!q)
214 return;
215 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
216 cl = cls_set_class(q, &r->class, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 if (cl)
Jiri Pirko34e37592017-10-13 14:01:00 +0200218 q->ops->cl_ops->unbind_tcf(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219}
220
221static inline void
222tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
223{
Jiri Pirko34e37592017-10-13 14:01:00 +0200224 struct Qdisc *q = tp->chain->block->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 unsigned long cl;
226
Jiri Pirko34e37592017-10-13 14:01:00 +0200227 if (!q)
228 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 if ((cl = __cls_set_class(&r->class, 0)) != 0)
Jiri Pirko34e37592017-10-13 14:01:00 +0200230 q->ops->cl_ops->unbind_tcf(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231}
232
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000233struct tcf_exts {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -0800235 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
WANG Cong22dc13c2016-08-13 22:35:00 -0700236 int nr_actions;
237 struct tc_action **actions;
Cong Wange4b95c42017-11-06 13:47:19 -0800238 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239#endif
WANG Cong5da57f42013-12-15 20:15:07 -0800240 /* Map to export classifier specific extension TLV types to the
241 * generic extensions API. Unsupported extensions must be set to 0.
242 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 int action;
244 int police;
245};
246
WANG Congb9a24bb2016-08-19 12:36:54 -0700247static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
WANG Cong33be6272013-12-15 20:15:05 -0800248{
249#ifdef CONFIG_NET_CLS_ACT
WANG Cong5da57f42013-12-15 20:15:07 -0800250 exts->type = 0;
WANG Cong22dc13c2016-08-13 22:35:00 -0700251 exts->nr_actions = 0;
Cong Wange4b95c42017-11-06 13:47:19 -0800252 exts->net = NULL;
WANG Cong22dc13c2016-08-13 22:35:00 -0700253 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
254 GFP_KERNEL);
WANG Congb9a24bb2016-08-19 12:36:54 -0700255 if (!exts->actions)
256 return -ENOMEM;
WANG Cong33be6272013-12-15 20:15:05 -0800257#endif
WANG Cong5da57f42013-12-15 20:15:07 -0800258 exts->action = action;
259 exts->police = police;
WANG Congb9a24bb2016-08-19 12:36:54 -0700260 return 0;
WANG Cong33be6272013-12-15 20:15:05 -0800261}
262
Cong Wange4b95c42017-11-06 13:47:19 -0800263/* Return false if the netns is being destroyed in cleanup_net(). Callers
264 * need to do cleanup synchronously in this case, otherwise may race with
265 * tc_action_net_exit(). Return true for other cases.
266 */
267static inline bool tcf_exts_get_net(struct tcf_exts *exts)
268{
269#ifdef CONFIG_NET_CLS_ACT
270 exts->net = maybe_get_net(exts->net);
271 return exts->net != NULL;
272#else
273 return true;
274#endif
275}
276
277static inline void tcf_exts_put_net(struct tcf_exts *exts)
278{
279#ifdef CONFIG_NET_CLS_ACT
280 if (exts->net)
281 put_net(exts->net);
282#endif
283}
284
WANG Cong22dc13c2016-08-13 22:35:00 -0700285static inline void tcf_exts_to_list(const struct tcf_exts *exts,
286 struct list_head *actions)
287{
288#ifdef CONFIG_NET_CLS_ACT
289 int i;
290
291 for (i = 0; i < exts->nr_actions; i++) {
292 struct tc_action *a = exts->actions[i];
293
Hadar Hen Zionfa5effe2016-09-27 11:09:51 +0300294 list_add_tail(&a->list, actions);
WANG Cong22dc13c2016-08-13 22:35:00 -0700295 }
296#endif
297}
298
Jakub Kicinskid897a632017-05-31 08:06:43 -0700299static inline void
300tcf_exts_stats_update(const struct tcf_exts *exts,
301 u64 bytes, u64 packets, u64 lastuse)
302{
303#ifdef CONFIG_NET_CLS_ACT
304 int i;
305
306 preempt_disable();
307
308 for (i = 0; i < exts->nr_actions; i++) {
309 struct tc_action *a = exts->actions[i];
310
311 tcf_action_stats_update(a, bytes, packets, lastuse);
312 }
313
314 preempt_enable();
315#endif
316}
317
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318/**
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200319 * tcf_exts_has_actions - check if at least one action is present
320 * @exts: tc filter extensions handle
321 *
322 * Returns true if at least one action is present.
323 */
324static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
325{
WANG Cong2734437e2016-08-13 22:34:59 -0700326#ifdef CONFIG_NET_CLS_ACT
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200327 return exts->nr_actions;
328#else
329 return false;
330#endif
331}
WANG Cong2734437e2016-08-13 22:34:59 -0700332
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200333/**
334 * tcf_exts_has_one_action - check if exactly one action is present
335 * @exts: tc filter extensions handle
336 *
337 * Returns true if exactly one action is present.
338 */
339static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
340{
341#ifdef CONFIG_NET_CLS_ACT
342 return exts->nr_actions == 1;
343#else
344 return false;
345#endif
346}
WANG Cong2734437e2016-08-13 22:34:59 -0700347
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200348/**
349 * tcf_exts_exec - execute tc filter extensions
350 * @skb: socket buffer
351 * @exts: tc filter extensions handle
352 * @res: desired result
353 *
Jiri Pirkoaf089e72017-08-04 14:29:01 +0200354 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200355 * a negative number if the filter must be considered unmatched or
356 * a positive action code (TC_ACT_*) which must be returned to the
357 * underlying layer.
358 */
359static inline int
360tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
361 struct tcf_result *res)
362{
363#ifdef CONFIG_NET_CLS_ACT
Jiri Pirkoec1a9cc2017-08-04 14:29:02 +0200364 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200365#endif
Jiri Pirkoaf089e72017-08-04 14:29:01 +0200366 return TC_ACT_OK;
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200367}
368
Joe Perches5c152572013-07-30 22:47:13 -0700369int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
370 struct nlattr **tb, struct nlattr *rate_tlv,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700371 struct tcf_exts *exts, bool ovr);
WANG Cong18d02642014-09-25 10:26:37 -0700372void tcf_exts_destroy(struct tcf_exts *exts);
Jiri Pirko9b0d4442017-08-04 14:29:15 +0200373void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
WANG Cong5da57f42013-12-15 20:15:07 -0800374int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
375int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376
377/**
378 * struct tcf_pkt_info - packet information
379 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000380struct tcf_pkt_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 unsigned char * ptr;
382 int nexthdr;
383};
384
385#ifdef CONFIG_NET_EMATCH
386
387struct tcf_ematch_ops;
388
389/**
390 * struct tcf_ematch - extended match (ematch)
391 *
392 * @matchid: identifier to allow userspace to reidentify a match
393 * @flags: flags specifying attributes and the relation to other matches
394 * @ops: the operations lookup table of the corresponding ematch module
395 * @datalen: length of the ematch specific configuration data
396 * @data: ematch specific data
397 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000398struct tcf_ematch {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 struct tcf_ematch_ops * ops;
400 unsigned long data;
401 unsigned int datalen;
402 u16 matchid;
403 u16 flags;
John Fastabend82a470f2014-10-05 21:27:53 -0700404 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405};
406
407static inline int tcf_em_is_container(struct tcf_ematch *em)
408{
409 return !em->ops;
410}
411
412static inline int tcf_em_is_simple(struct tcf_ematch *em)
413{
414 return em->flags & TCF_EM_SIMPLE;
415}
416
417static inline int tcf_em_is_inverted(struct tcf_ematch *em)
418{
419 return em->flags & TCF_EM_INVERT;
420}
421
422static inline int tcf_em_last_match(struct tcf_ematch *em)
423{
424 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
425}
426
427static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
428{
429 if (tcf_em_last_match(em))
430 return 1;
431
432 if (result == 0 && em->flags & TCF_EM_REL_AND)
433 return 1;
434
435 if (result != 0 && em->flags & TCF_EM_REL_OR)
436 return 1;
437
438 return 0;
439}
440
441/**
442 * struct tcf_ematch_tree - ematch tree handle
443 *
444 * @hdr: ematch tree header supplied by userspace
445 * @matches: array of ematches
446 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000447struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 struct tcf_ematch_tree_hdr hdr;
449 struct tcf_ematch * matches;
450
451};
452
453/**
454 * struct tcf_ematch_ops - ematch module operations
455 *
456 * @kind: identifier (kind) of this ematch module
457 * @datalen: length of expected configuration data (optional)
458 * @change: called during validation (optional)
459 * @match: called during ematch tree evaluation, must return 1/0
460 * @destroy: called during destroyage (optional)
461 * @dump: called during dumping process (optional)
462 * @owner: owner, must be set to THIS_MODULE
463 * @link: link to previous/next ematch module (internal use)
464 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000465struct tcf_ematch_ops {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 int kind;
467 int datalen;
John Fastabend82a470f2014-10-05 21:27:53 -0700468 int (*change)(struct net *net, void *,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 int, struct tcf_ematch *);
470 int (*match)(struct sk_buff *, struct tcf_ematch *,
471 struct tcf_pkt_info *);
John Fastabend82a470f2014-10-05 21:27:53 -0700472 void (*destroy)(struct tcf_ematch *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 int (*dump)(struct sk_buff *, struct tcf_ematch *);
474 struct module *owner;
475 struct list_head link;
476};
477
Joe Perches5c152572013-07-30 22:47:13 -0700478int tcf_em_register(struct tcf_ematch_ops *);
479void tcf_em_unregister(struct tcf_ematch_ops *);
480int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
481 struct tcf_ematch_tree *);
John Fastabend82a470f2014-10-05 21:27:53 -0700482void tcf_em_tree_destroy(struct tcf_ematch_tree *);
Joe Perches5c152572013-07-30 22:47:13 -0700483int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
484int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
485 struct tcf_pkt_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486
487/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 * tcf_em_tree_match - evaulate an ematch tree
489 *
490 * @skb: socket buffer of the packet in question
491 * @tree: ematch tree to be used for evaluation
492 * @info: packet information examined by classifier
493 *
494 * This function matches @skb against the ematch tree in @tree by going
495 * through all ematches respecting their logic relations returning
496 * as soon as the result is obvious.
497 *
498 * Returns 1 if the ematch tree as-one matches, no ematches are configured
499 * or ematch is not enabled in the kernel, otherwise 0 is returned.
500 */
501static inline int tcf_em_tree_match(struct sk_buff *skb,
502 struct tcf_ematch_tree *tree,
503 struct tcf_pkt_info *info)
504{
505 if (tree->hdr.nmatches)
506 return __tcf_em_tree_match(skb, tree, info);
507 else
508 return 1;
509}
510
Patrick McHardydb3d99c2007-07-11 19:46:26 -0700511#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
512
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513#else /* CONFIG_NET_EMATCH */
514
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000515struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516};
517
518#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
John Fastabend82a470f2014-10-05 21:27:53 -0700519#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520#define tcf_em_tree_dump(skb, t, tlv) (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
522
523#endif /* CONFIG_NET_EMATCH */
524
525static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
526{
527 switch (layer) {
528 case TCF_LAYER_LINK:
529 return skb->data;
530 case TCF_LAYER_NETWORK:
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700531 return skb_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 case TCF_LAYER_TRANSPORT:
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -0700533 return skb_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 }
535
536 return NULL;
537}
538
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700539static inline int tcf_valid_offset(const struct sk_buff *skb,
540 const unsigned char *ptr, const int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541{
David S. Millerda521b22010-12-21 12:43:16 -0800542 return likely((ptr + len) <= skb_tail_pointer(skb) &&
543 ptr >= skb->head &&
544 (ptr <= (ptr + len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545}
546
547#ifdef CONFIG_NET_CLS_IND
Denis V. Lunev0eeb8ff2007-12-04 01:15:45 -0800548#include <net/net_namespace.h>
549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550static inline int
WANG Cong2519a602014-01-09 16:14:02 -0800551tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552{
WANG Cong2519a602014-01-09 16:14:02 -0800553 char indev[IFNAMSIZ];
Patrick McHardyc01003c2007-03-29 11:46:52 -0700554 struct net_device *dev;
555
WANG Cong2519a602014-01-09 16:14:02 -0800556 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
557 return -EINVAL;
558 dev = __dev_get_by_name(net, indev);
559 if (!dev)
560 return -ENODEV;
561 return dev->ifindex;
562}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563
WANG Cong2519a602014-01-09 16:14:02 -0800564static inline bool
565tcf_match_indev(struct sk_buff *skb, int ifindex)
566{
567 if (!ifindex)
568 return true;
569 if (!skb->skb_iif)
570 return false;
571 return ifindex == skb->skb_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572}
573#endif /* CONFIG_NET_CLS_IND */
574
Jiri Pirko208c0f42017-10-19 15:50:32 +0200575int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
576 enum tc_setup_type type, void *type_data, bool err_stop);
Jiri Pirko717503b2017-10-11 09:41:09 +0200577
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200578enum tc_block_command {
579 TC_BLOCK_BIND,
580 TC_BLOCK_UNBIND,
581};
582
583struct tc_block_offload {
584 enum tc_block_command command;
585 enum tcf_block_binder_type binder_type;
586 struct tcf_block *block;
587};
588
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200589struct tc_cls_common_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200590 u32 chain_index;
591 __be16 protocol;
Jiri Pirkod7c1c8d2017-08-07 10:15:30 +0200592 u32 prio;
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200593};
594
595static inline void
596tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
597 const struct tcf_proto *tp)
598{
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200599 cls_common->chain_index = tp->chain->index;
600 cls_common->protocol = tp->protocol;
Jiri Pirkod7c1c8d2017-08-07 10:15:30 +0200601 cls_common->prio = tp->prio;
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200602}
603
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800604struct tc_cls_u32_knode {
605 struct tcf_exts *exts;
John Fastabende0148602016-02-17 14:59:30 -0800606 struct tc_u32_sel *sel;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800607 u32 handle;
608 u32 val;
609 u32 mask;
610 u32 link_handle;
John Fastabende0148602016-02-17 14:59:30 -0800611 u8 fshift;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800612};
613
614struct tc_cls_u32_hnode {
615 u32 handle;
616 u32 prio;
617 unsigned int divisor;
618};
619
620enum tc_clsu32_command {
621 TC_CLSU32_NEW_KNODE,
622 TC_CLSU32_REPLACE_KNODE,
623 TC_CLSU32_DELETE_KNODE,
624 TC_CLSU32_NEW_HNODE,
625 TC_CLSU32_REPLACE_HNODE,
626 TC_CLSU32_DELETE_HNODE,
627};
628
629struct tc_cls_u32_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200630 struct tc_cls_common_offload common;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800631 /* knode values */
632 enum tc_clsu32_command command;
633 union {
634 struct tc_cls_u32_knode knode;
635 struct tc_cls_u32_hnode hnode;
636 };
637};
638
Jiri Pirko7b06e8a2017-08-09 14:30:35 +0200639static inline bool tc_can_offload(const struct net_device *dev)
John Fastabend6843e7a2016-02-26 07:53:49 -0800640{
Jiri Pirko70b5aee2017-11-01 11:47:41 +0100641 return dev->features & NETIF_F_HW_TC;
John Fastabend6843e7a2016-02-26 07:53:49 -0800642}
643
Hadar Hen Zion55330f02016-12-01 14:06:33 +0200644static inline bool tc_skip_hw(u32 flags)
645{
646 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
647}
648
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700649static inline bool tc_skip_sw(u32 flags)
650{
651 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
652}
653
654/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
655static inline bool tc_flags_valid(u32 flags)
656{
657 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
658 return false;
659
660 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
661 return false;
662
663 return true;
664}
665
Or Gerlitze6960282017-02-16 10:31:12 +0200666static inline bool tc_in_hw(u32 flags)
667{
668 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
669}
670
Amir Vadai5b33f482016-03-08 12:42:29 +0200671enum tc_fl_command {
672 TC_CLSFLOWER_REPLACE,
673 TC_CLSFLOWER_DESTROY,
Amir Vadai10cbc682016-05-13 12:55:37 +0000674 TC_CLSFLOWER_STATS,
Amir Vadai5b33f482016-03-08 12:42:29 +0200675};
676
677struct tc_cls_flower_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200678 struct tc_cls_common_offload common;
Amir Vadai5b33f482016-03-08 12:42:29 +0200679 enum tc_fl_command command;
Amir Vadai8208d212016-03-11 11:08:45 +0200680 unsigned long cookie;
Amir Vadai5b33f482016-03-08 12:42:29 +0200681 struct flow_dissector *dissector;
682 struct fl_flow_key *mask;
683 struct fl_flow_key *key;
684 struct tcf_exts *exts;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700685 u32 classid;
Amir Vadai5b33f482016-03-08 12:42:29 +0200686};
687
Yotam Gigib87f7932016-07-21 12:03:12 +0200688enum tc_matchall_command {
689 TC_CLSMATCHALL_REPLACE,
690 TC_CLSMATCHALL_DESTROY,
691};
692
693struct tc_cls_matchall_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200694 struct tc_cls_common_offload common;
Yotam Gigib87f7932016-07-21 12:03:12 +0200695 enum tc_matchall_command command;
696 struct tcf_exts *exts;
697 unsigned long cookie;
698};
699
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100700enum tc_clsbpf_command {
Jakub Kicinski102740b2017-12-19 13:32:13 -0800701 TC_CLSBPF_OFFLOAD,
Jakub Kicinski68d64062016-09-21 11:44:02 +0100702 TC_CLSBPF_STATS,
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100703};
704
705struct tc_cls_bpf_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200706 struct tc_cls_common_offload common;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100707 enum tc_clsbpf_command command;
708 struct tcf_exts *exts;
709 struct bpf_prog *prog;
Jakub Kicinski102740b2017-12-19 13:32:13 -0800710 struct bpf_prog *oldprog;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100711 const char *name;
712 bool exts_integrated;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100713 u32 gen_flags;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100714};
715
Amritha Nambiar4e8b86c2017-09-07 04:00:06 -0700716struct tc_mqprio_qopt_offload {
717 /* struct tc_mqprio_qopt must always be the first element */
718 struct tc_mqprio_qopt qopt;
719 u16 mode;
720 u16 shaper;
721 u32 flags;
722 u64 min_rate[TC_QOPT_MAX_QUEUE];
723 u64 max_rate[TC_QOPT_MAX_QUEUE];
724};
Jamal Hadi Salim1045ba72017-01-24 07:02:41 -0500725
726/* This structure holds cookie structure that is passed from user
727 * to the kernel for actions and classifiers
728 */
729struct tc_cookie {
730 u8 *data;
731 u32 len;
732};
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100733
Nogah Frankelf34b4aa2018-01-10 14:59:58 +0100734struct tc_qopt_offload_stats {
735 struct gnet_stats_basic_packed *bstats;
736 struct gnet_stats_queue *qstats;
737};
738
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100739enum tc_red_command {
740 TC_RED_REPLACE,
741 TC_RED_DESTROY,
742 TC_RED_STATS,
743 TC_RED_XSTATS,
744};
745
746struct tc_red_qopt_offload_params {
747 u32 min;
748 u32 max;
749 u32 probability;
750 bool is_ecn;
751};
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100752
753struct tc_red_qopt_offload {
754 enum tc_red_command command;
755 u32 handle;
756 u32 parent;
757 union {
758 struct tc_red_qopt_offload_params set;
Nogah Frankelf34b4aa2018-01-10 14:59:58 +0100759 struct tc_qopt_offload_stats stats;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100760 struct red_stats *xstats;
761 };
762};
763
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100764enum tc_prio_command {
765 TC_PRIO_REPLACE,
766 TC_PRIO_DESTROY,
767 TC_PRIO_STATS,
768};
769
770struct tc_prio_qopt_offload_params {
771 int bands;
772 u8 priomap[TC_PRIO_MAX + 1];
773 /* In case that a prio qdisc is offloaded and now is changed to a
774 * non-offloadedable config, it needs to update the backlog & qlen
775 * values to negate the HW backlog & qlen values (and only them).
776 */
777 struct gnet_stats_queue *qstats;
778};
779
780struct tc_prio_qopt_offload {
781 enum tc_prio_command command;
782 u32 handle;
783 u32 parent;
784 union {
785 struct tc_prio_qopt_offload_params replace_params;
786 struct tc_qopt_offload_stats stats;
787 };
788};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789#endif