blob: 6d02f31abba86cf407d1f0b41acf8b09f0cd2f6d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __NET_PKT_CLS_H
3#define __NET_PKT_CLS_H
4
5#include <linux/pkt_cls.h>
Cong Wang7aa00452017-10-26 18:24:28 -07006#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <net/sch_generic.h>
8#include <net/act_api.h>
9
10/* Basic packet classifier frontend definitions. */
11
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000012struct tcf_walker {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 int stop;
14 int skip;
15 int count;
Vlad Buslov01683a12018-07-09 13:29:11 +030016 unsigned long cookie;
WANG Cong8113c092017-08-04 21:31:43 -070017 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070018};
19
Joe Perches5c152572013-07-30 22:47:13 -070020int register_tcf_proto_ops(struct tcf_proto_ops *ops);
21int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Jiri Pirko8c4083b2017-10-19 15:50:29 +020023enum tcf_block_binder_type {
24 TCF_BLOCK_BINDER_TYPE_UNSPEC,
Jiri Pirko6e40cf22017-10-19 15:50:30 +020025 TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
26 TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
Jiri Pirko8c4083b2017-10-19 15:50:29 +020027};
28
29struct tcf_block_ext_info {
30 enum tcf_block_binder_type binder_type;
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +010031 tcf_chain_head_change_t *chain_head_change;
32 void *chain_head_change_priv;
Jiri Pirko48617382018-01-17 11:46:46 +010033 u32 block_index;
Jiri Pirko8c4083b2017-10-19 15:50:29 +020034};
35
Jiri Pirkoacb67442017-10-19 15:50:31 +020036struct tcf_block_cb;
Cong Wangaaa908f2018-05-23 15:26:53 -070037bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
Jiri Pirkoacb67442017-10-19 15:50:31 +020038
Jiri Pirko8ae70032017-02-15 11:57:50 +010039#ifdef CONFIG_NET_CLS
WANG Cong367a8ce2017-05-23 09:42:37 -070040struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
41 bool create);
Jiri Pirko1f3ed382018-07-27 09:45:05 +020042struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
43 u32 chain_index);
Jiri Pirko5bc17012017-05-17 11:08:01 +020044void tcf_chain_put(struct tcf_chain *chain);
Jiri Pirko1f3ed382018-07-27 09:45:05 +020045void tcf_chain_put_by_act(struct tcf_chain *chain);
Jiri Pirkof36fe1c2018-01-17 11:46:48 +010046void tcf_block_netif_keep_dst(struct tcf_block *block);
Jiri Pirko6529eab2017-05-17 11:07:55 +020047int tcf_block_get(struct tcf_block **p_block,
Alexander Aring8d1a77f2017-12-20 12:35:19 -050048 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
49 struct netlink_ext_ack *extack);
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +010050int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
Alexander Aring8d1a77f2017-12-20 12:35:19 -050051 struct tcf_block_ext_info *ei,
52 struct netlink_ext_ack *extack);
Jiri Pirko6529eab2017-05-17 11:07:55 +020053void tcf_block_put(struct tcf_block *block);
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +010054void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
Jiri Pirko8c4083b2017-10-19 15:50:29 +020055 struct tcf_block_ext_info *ei);
Jiri Pirko44186462017-10-13 14:00:59 +020056
Jiri Pirko48617382018-01-17 11:46:46 +010057static inline bool tcf_block_shared(struct tcf_block *block)
58{
59 return block->index;
60}
61
Jiri Pirko44186462017-10-13 14:00:59 +020062static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
63{
Jiri Pirko48617382018-01-17 11:46:46 +010064 WARN_ON(tcf_block_shared(block));
Jiri Pirko44186462017-10-13 14:00:59 +020065 return block->q;
66}
67
68static inline struct net_device *tcf_block_dev(struct tcf_block *block)
69{
70 return tcf_block_q(block)->dev_queue->dev;
71}
72
Jiri Pirkoacb67442017-10-19 15:50:31 +020073void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
74struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
75 tc_setup_cb_t *cb, void *cb_ident);
76void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
77unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
78struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
79 tc_setup_cb_t *cb, void *cb_ident,
John Hurley60513bd2018-06-25 14:30:04 -070080 void *cb_priv,
81 struct netlink_ext_ack *extack);
Jiri Pirkoacb67442017-10-19 15:50:31 +020082int tcf_block_cb_register(struct tcf_block *block,
83 tc_setup_cb_t *cb, void *cb_ident,
John Hurley60513bd2018-06-25 14:30:04 -070084 void *cb_priv, struct netlink_ext_ack *extack);
John Hurley32636742018-06-25 14:30:10 -070085void __tcf_block_cb_unregister(struct tcf_block *block,
86 struct tcf_block_cb *block_cb);
Jiri Pirkoacb67442017-10-19 15:50:31 +020087void tcf_block_cb_unregister(struct tcf_block *block,
88 tc_setup_cb_t *cb, void *cb_ident);
89
Jiri Pirko87d83092017-05-17 11:07:54 +020090int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
91 struct tcf_result *res, bool compat_mode);
92
Jiri Pirko8ae70032017-02-15 11:57:50 +010093#else
Jiri Pirko6529eab2017-05-17 11:07:55 +020094static inline
95int tcf_block_get(struct tcf_block **p_block,
Sudip Mukherjee3c149092017-12-22 15:52:05 +000096 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
97 struct netlink_ext_ack *extack)
Jiri Pirko6529eab2017-05-17 11:07:55 +020098{
99 return 0;
100}
101
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200102static inline
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +0100103int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
Quentin Monnet33c30a82018-01-03 17:30:45 -0800104 struct tcf_block_ext_info *ei,
105 struct netlink_ext_ack *extack)
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200106{
107 return 0;
108}
109
Jiri Pirko6529eab2017-05-17 11:07:55 +0200110static inline void tcf_block_put(struct tcf_block *block)
Jiri Pirko8ae70032017-02-15 11:57:50 +0100111{
112}
Jiri Pirko87d83092017-05-17 11:07:54 +0200113
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200114static inline
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +0100115void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200116 struct tcf_block_ext_info *ei)
117{
118}
119
Jiri Pirko44186462017-10-13 14:00:59 +0200120static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
121{
122 return NULL;
123}
124
125static inline struct net_device *tcf_block_dev(struct tcf_block *block)
126{
127 return NULL;
128}
129
Jiri Pirkoacb67442017-10-19 15:50:31 +0200130static inline
131int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
132 void *cb_priv)
133{
134 return 0;
135}
136
137static inline
138void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
139 void *cb_priv)
140{
141}
142
143static inline
144void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
145{
146 return NULL;
147}
148
149static inline
150struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
151 tc_setup_cb_t *cb, void *cb_ident)
152{
153 return NULL;
154}
155
156static inline
157void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
158{
159}
160
161static inline
162unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
163{
164 return 0;
165}
166
167static inline
168struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
169 tc_setup_cb_t *cb, void *cb_ident,
John Hurley60513bd2018-06-25 14:30:04 -0700170 void *cb_priv,
171 struct netlink_ext_ack *extack)
Jiri Pirkoacb67442017-10-19 15:50:31 +0200172{
173 return NULL;
174}
175
176static inline
177int tcf_block_cb_register(struct tcf_block *block,
178 tc_setup_cb_t *cb, void *cb_ident,
John Hurley60513bd2018-06-25 14:30:04 -0700179 void *cb_priv, struct netlink_ext_ack *extack)
Jiri Pirkoacb67442017-10-19 15:50:31 +0200180{
181 return 0;
182}
183
184static inline
John Hurley32636742018-06-25 14:30:10 -0700185void __tcf_block_cb_unregister(struct tcf_block *block,
186 struct tcf_block_cb *block_cb)
Jiri Pirkoacb67442017-10-19 15:50:31 +0200187{
188}
189
190static inline
191void tcf_block_cb_unregister(struct tcf_block *block,
192 tc_setup_cb_t *cb, void *cb_ident)
193{
194}
195
Jiri Pirko87d83092017-05-17 11:07:54 +0200196static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
197 struct tcf_result *res, bool compat_mode)
198{
199 return TC_ACT_UNSPEC;
200}
Jiri Pirko8ae70032017-02-15 11:57:50 +0100201#endif
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203static inline unsigned long
204__cls_set_class(unsigned long *clp, unsigned long cl)
205{
WANG Conga0efb802014-09-30 16:07:24 -0700206 return xchg(clp, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
208
209static inline unsigned long
Jiri Pirko34e37592017-10-13 14:01:00 +0200210cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211{
212 unsigned long old_cl;
Jiri Pirko34e37592017-10-13 14:01:00 +0200213
214 sch_tree_lock(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 old_cl = __cls_set_class(clp, cl);
Jiri Pirko34e37592017-10-13 14:01:00 +0200216 sch_tree_unlock(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 return old_cl;
218}
219
220static inline void
221tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
222{
Jiri Pirko34e37592017-10-13 14:01:00 +0200223 struct Qdisc *q = tp->chain->block->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 unsigned long cl;
225
Jiri Pirko34e37592017-10-13 14:01:00 +0200226 /* Check q as it is not set for shared blocks. In that case,
227 * setting class is not supported.
228 */
229 if (!q)
230 return;
231 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
232 cl = cls_set_class(q, &r->class, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 if (cl)
Jiri Pirko34e37592017-10-13 14:01:00 +0200234 q->ops->cl_ops->unbind_tcf(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235}
236
237static inline void
238tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
239{
Jiri Pirko34e37592017-10-13 14:01:00 +0200240 struct Qdisc *q = tp->chain->block->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 unsigned long cl;
242
Jiri Pirko34e37592017-10-13 14:01:00 +0200243 if (!q)
244 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 if ((cl = __cls_set_class(&r->class, 0)) != 0)
Jiri Pirko34e37592017-10-13 14:01:00 +0200246 q->ops->cl_ops->unbind_tcf(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247}
248
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000249struct tcf_exts {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -0800251 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
WANG Cong22dc13c2016-08-13 22:35:00 -0700252 int nr_actions;
253 struct tc_action **actions;
Cong Wange4b95c42017-11-06 13:47:19 -0800254 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255#endif
WANG Cong5da57f42013-12-15 20:15:07 -0800256 /* Map to export classifier specific extension TLV types to the
257 * generic extensions API. Unsupported extensions must be set to 0.
258 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 int action;
260 int police;
261};
262
WANG Congb9a24bb2016-08-19 12:36:54 -0700263static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
WANG Cong33be6272013-12-15 20:15:05 -0800264{
265#ifdef CONFIG_NET_CLS_ACT
WANG Cong5da57f42013-12-15 20:15:07 -0800266 exts->type = 0;
WANG Cong22dc13c2016-08-13 22:35:00 -0700267 exts->nr_actions = 0;
Cong Wange4b95c42017-11-06 13:47:19 -0800268 exts->net = NULL;
WANG Cong22dc13c2016-08-13 22:35:00 -0700269 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
270 GFP_KERNEL);
WANG Congb9a24bb2016-08-19 12:36:54 -0700271 if (!exts->actions)
272 return -ENOMEM;
WANG Cong33be6272013-12-15 20:15:05 -0800273#endif
WANG Cong5da57f42013-12-15 20:15:07 -0800274 exts->action = action;
275 exts->police = police;
WANG Congb9a24bb2016-08-19 12:36:54 -0700276 return 0;
WANG Cong33be6272013-12-15 20:15:05 -0800277}
278
Cong Wange4b95c42017-11-06 13:47:19 -0800279/* Return false if the netns is being destroyed in cleanup_net(). Callers
280 * need to do cleanup synchronously in this case, otherwise may race with
281 * tc_action_net_exit(). Return true for other cases.
282 */
283static inline bool tcf_exts_get_net(struct tcf_exts *exts)
284{
285#ifdef CONFIG_NET_CLS_ACT
286 exts->net = maybe_get_net(exts->net);
287 return exts->net != NULL;
288#else
289 return true;
290#endif
291}
292
293static inline void tcf_exts_put_net(struct tcf_exts *exts)
294{
295#ifdef CONFIG_NET_CLS_ACT
296 if (exts->net)
297 put_net(exts->net);
298#endif
299}
300
WANG Cong22dc13c2016-08-13 22:35:00 -0700301static inline void tcf_exts_to_list(const struct tcf_exts *exts,
302 struct list_head *actions)
303{
304#ifdef CONFIG_NET_CLS_ACT
305 int i;
306
307 for (i = 0; i < exts->nr_actions; i++) {
308 struct tc_action *a = exts->actions[i];
309
Hadar Hen Zionfa5effe2016-09-27 11:09:51 +0300310 list_add_tail(&a->list, actions);
WANG Cong22dc13c2016-08-13 22:35:00 -0700311 }
312#endif
313}
314
Jakub Kicinskid897a632017-05-31 08:06:43 -0700315static inline void
316tcf_exts_stats_update(const struct tcf_exts *exts,
317 u64 bytes, u64 packets, u64 lastuse)
318{
319#ifdef CONFIG_NET_CLS_ACT
320 int i;
321
322 preempt_disable();
323
324 for (i = 0; i < exts->nr_actions; i++) {
325 struct tc_action *a = exts->actions[i];
326
327 tcf_action_stats_update(a, bytes, packets, lastuse);
328 }
329
330 preempt_enable();
331#endif
332}
333
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334/**
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200335 * tcf_exts_has_actions - check if at least one action is present
336 * @exts: tc filter extensions handle
337 *
338 * Returns true if at least one action is present.
339 */
340static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
341{
WANG Cong2734437e2016-08-13 22:34:59 -0700342#ifdef CONFIG_NET_CLS_ACT
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200343 return exts->nr_actions;
344#else
345 return false;
346#endif
347}
WANG Cong2734437e2016-08-13 22:34:59 -0700348
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200349/**
350 * tcf_exts_has_one_action - check if exactly one action is present
351 * @exts: tc filter extensions handle
352 *
353 * Returns true if exactly one action is present.
354 */
355static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
356{
357#ifdef CONFIG_NET_CLS_ACT
358 return exts->nr_actions == 1;
359#else
360 return false;
361#endif
362}
WANG Cong2734437e2016-08-13 22:34:59 -0700363
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200364/**
365 * tcf_exts_exec - execute tc filter extensions
366 * @skb: socket buffer
367 * @exts: tc filter extensions handle
368 * @res: desired result
369 *
Jiri Pirkoaf089e72017-08-04 14:29:01 +0200370 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200371 * a negative number if the filter must be considered unmatched or
372 * a positive action code (TC_ACT_*) which must be returned to the
373 * underlying layer.
374 */
375static inline int
376tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
377 struct tcf_result *res)
378{
379#ifdef CONFIG_NET_CLS_ACT
Jiri Pirkoec1a9cc2017-08-04 14:29:02 +0200380 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200381#endif
Jiri Pirkoaf089e72017-08-04 14:29:01 +0200382 return TC_ACT_OK;
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200383}
384
Joe Perches5c152572013-07-30 22:47:13 -0700385int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
386 struct nlattr **tb, struct nlattr *rate_tlv,
Alexander Aring50a56192018-01-18 11:20:52 -0500387 struct tcf_exts *exts, bool ovr,
388 struct netlink_ext_ack *extack);
WANG Cong18d02642014-09-25 10:26:37 -0700389void tcf_exts_destroy(struct tcf_exts *exts);
Jiri Pirko9b0d4442017-08-04 14:29:15 +0200390void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
WANG Cong5da57f42013-12-15 20:15:07 -0800391int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
392int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
394/**
395 * struct tcf_pkt_info - packet information
396 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000397struct tcf_pkt_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 unsigned char * ptr;
399 int nexthdr;
400};
401
402#ifdef CONFIG_NET_EMATCH
403
404struct tcf_ematch_ops;
405
406/**
407 * struct tcf_ematch - extended match (ematch)
408 *
409 * @matchid: identifier to allow userspace to reidentify a match
410 * @flags: flags specifying attributes and the relation to other matches
411 * @ops: the operations lookup table of the corresponding ematch module
412 * @datalen: length of the ematch specific configuration data
413 * @data: ematch specific data
414 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000415struct tcf_ematch {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 struct tcf_ematch_ops * ops;
417 unsigned long data;
418 unsigned int datalen;
419 u16 matchid;
420 u16 flags;
John Fastabend82a470f2014-10-05 21:27:53 -0700421 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422};
423
424static inline int tcf_em_is_container(struct tcf_ematch *em)
425{
426 return !em->ops;
427}
428
429static inline int tcf_em_is_simple(struct tcf_ematch *em)
430{
431 return em->flags & TCF_EM_SIMPLE;
432}
433
434static inline int tcf_em_is_inverted(struct tcf_ematch *em)
435{
436 return em->flags & TCF_EM_INVERT;
437}
438
439static inline int tcf_em_last_match(struct tcf_ematch *em)
440{
441 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
442}
443
444static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
445{
446 if (tcf_em_last_match(em))
447 return 1;
448
449 if (result == 0 && em->flags & TCF_EM_REL_AND)
450 return 1;
451
452 if (result != 0 && em->flags & TCF_EM_REL_OR)
453 return 1;
454
455 return 0;
456}
457
458/**
459 * struct tcf_ematch_tree - ematch tree handle
460 *
461 * @hdr: ematch tree header supplied by userspace
462 * @matches: array of ematches
463 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000464struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 struct tcf_ematch_tree_hdr hdr;
466 struct tcf_ematch * matches;
467
468};
469
470/**
471 * struct tcf_ematch_ops - ematch module operations
472 *
473 * @kind: identifier (kind) of this ematch module
474 * @datalen: length of expected configuration data (optional)
475 * @change: called during validation (optional)
476 * @match: called during ematch tree evaluation, must return 1/0
477 * @destroy: called during destroyage (optional)
478 * @dump: called during dumping process (optional)
479 * @owner: owner, must be set to THIS_MODULE
480 * @link: link to previous/next ematch module (internal use)
481 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000482struct tcf_ematch_ops {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 int kind;
484 int datalen;
John Fastabend82a470f2014-10-05 21:27:53 -0700485 int (*change)(struct net *net, void *,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 int, struct tcf_ematch *);
487 int (*match)(struct sk_buff *, struct tcf_ematch *,
488 struct tcf_pkt_info *);
John Fastabend82a470f2014-10-05 21:27:53 -0700489 void (*destroy)(struct tcf_ematch *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 int (*dump)(struct sk_buff *, struct tcf_ematch *);
491 struct module *owner;
492 struct list_head link;
493};
494
Joe Perches5c152572013-07-30 22:47:13 -0700495int tcf_em_register(struct tcf_ematch_ops *);
496void tcf_em_unregister(struct tcf_ematch_ops *);
497int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
498 struct tcf_ematch_tree *);
John Fastabend82a470f2014-10-05 21:27:53 -0700499void tcf_em_tree_destroy(struct tcf_ematch_tree *);
Joe Perches5c152572013-07-30 22:47:13 -0700500int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
501int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
502 struct tcf_pkt_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
504/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 * tcf_em_tree_match - evaulate an ematch tree
506 *
507 * @skb: socket buffer of the packet in question
508 * @tree: ematch tree to be used for evaluation
509 * @info: packet information examined by classifier
510 *
511 * This function matches @skb against the ematch tree in @tree by going
512 * through all ematches respecting their logic relations returning
513 * as soon as the result is obvious.
514 *
515 * Returns 1 if the ematch tree as-one matches, no ematches are configured
516 * or ematch is not enabled in the kernel, otherwise 0 is returned.
517 */
518static inline int tcf_em_tree_match(struct sk_buff *skb,
519 struct tcf_ematch_tree *tree,
520 struct tcf_pkt_info *info)
521{
522 if (tree->hdr.nmatches)
523 return __tcf_em_tree_match(skb, tree, info);
524 else
525 return 1;
526}
527
Patrick McHardydb3d99c2007-07-11 19:46:26 -0700528#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
529
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530#else /* CONFIG_NET_EMATCH */
531
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000532struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533};
534
535#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
John Fastabend82a470f2014-10-05 21:27:53 -0700536#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537#define tcf_em_tree_dump(skb, t, tlv) (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
539
540#endif /* CONFIG_NET_EMATCH */
541
542static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
543{
544 switch (layer) {
545 case TCF_LAYER_LINK:
Wolfgang Bumillerd3303a62018-01-18 11:32:36 +0100546 return skb_mac_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 case TCF_LAYER_NETWORK:
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700548 return skb_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 case TCF_LAYER_TRANSPORT:
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -0700550 return skb_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 }
552
553 return NULL;
554}
555
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700556static inline int tcf_valid_offset(const struct sk_buff *skb,
557 const unsigned char *ptr, const int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558{
David S. Millerda521b22010-12-21 12:43:16 -0800559 return likely((ptr + len) <= skb_tail_pointer(skb) &&
560 ptr >= skb->head &&
561 (ptr <= (ptr + len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562}
563
564#ifdef CONFIG_NET_CLS_IND
Denis V. Lunev0eeb8ff2007-12-04 01:15:45 -0800565#include <net/net_namespace.h>
566
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567static inline int
Alexander Aring1057c552018-01-18 11:20:54 -0500568tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
569 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570{
WANG Cong2519a602014-01-09 16:14:02 -0800571 char indev[IFNAMSIZ];
Patrick McHardyc01003c2007-03-29 11:46:52 -0700572 struct net_device *dev;
573
Alexander Aring1057c552018-01-18 11:20:54 -0500574 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
575 NL_SET_ERR_MSG(extack, "Interface name too long");
WANG Cong2519a602014-01-09 16:14:02 -0800576 return -EINVAL;
Alexander Aring1057c552018-01-18 11:20:54 -0500577 }
WANG Cong2519a602014-01-09 16:14:02 -0800578 dev = __dev_get_by_name(net, indev);
579 if (!dev)
580 return -ENODEV;
581 return dev->ifindex;
582}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583
WANG Cong2519a602014-01-09 16:14:02 -0800584static inline bool
585tcf_match_indev(struct sk_buff *skb, int ifindex)
586{
587 if (!ifindex)
588 return true;
589 if (!skb->skb_iif)
590 return false;
591 return ifindex == skb->skb_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592}
593#endif /* CONFIG_NET_CLS_IND */
594
Jiri Pirko208c0f42017-10-19 15:50:32 +0200595int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
596 enum tc_setup_type type, void *type_data, bool err_stop);
Jiri Pirko717503b2017-10-11 09:41:09 +0200597
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200598enum tc_block_command {
599 TC_BLOCK_BIND,
600 TC_BLOCK_UNBIND,
601};
602
603struct tc_block_offload {
604 enum tc_block_command command;
605 enum tcf_block_binder_type binder_type;
606 struct tcf_block *block;
John Hurley60513bd2018-06-25 14:30:04 -0700607 struct netlink_ext_ack *extack;
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200608};
609
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200610struct tc_cls_common_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200611 u32 chain_index;
612 __be16 protocol;
Jiri Pirkod7c1c8d2017-08-07 10:15:30 +0200613 u32 prio;
Quentin Monnet8f0b4252018-01-19 17:44:47 -0800614 struct netlink_ext_ack *extack;
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200615};
616
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800617struct tc_cls_u32_knode {
618 struct tcf_exts *exts;
John Fastabende0148602016-02-17 14:59:30 -0800619 struct tc_u32_sel *sel;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800620 u32 handle;
621 u32 val;
622 u32 mask;
623 u32 link_handle;
John Fastabende0148602016-02-17 14:59:30 -0800624 u8 fshift;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800625};
626
627struct tc_cls_u32_hnode {
628 u32 handle;
629 u32 prio;
630 unsigned int divisor;
631};
632
633enum tc_clsu32_command {
634 TC_CLSU32_NEW_KNODE,
635 TC_CLSU32_REPLACE_KNODE,
636 TC_CLSU32_DELETE_KNODE,
637 TC_CLSU32_NEW_HNODE,
638 TC_CLSU32_REPLACE_HNODE,
639 TC_CLSU32_DELETE_HNODE,
640};
641
642struct tc_cls_u32_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200643 struct tc_cls_common_offload common;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800644 /* knode values */
645 enum tc_clsu32_command command;
646 union {
647 struct tc_cls_u32_knode knode;
648 struct tc_cls_u32_hnode hnode;
649 };
650};
651
Jiri Pirko7b06e8a2017-08-09 14:30:35 +0200652static inline bool tc_can_offload(const struct net_device *dev)
John Fastabend6843e7a2016-02-26 07:53:49 -0800653{
Jiri Pirko70b5aee2017-11-01 11:47:41 +0100654 return dev->features & NETIF_F_HW_TC;
John Fastabend6843e7a2016-02-26 07:53:49 -0800655}
656
Quentin Monnetf9eda142018-01-19 17:44:48 -0800657static inline bool tc_can_offload_extack(const struct net_device *dev,
658 struct netlink_ext_ack *extack)
659{
660 bool can = tc_can_offload(dev);
661
662 if (!can)
663 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
664
665 return can;
666}
667
Jakub Kicinski878db9f2018-01-25 14:00:43 -0800668static inline bool
669tc_cls_can_offload_and_chain0(const struct net_device *dev,
670 struct tc_cls_common_offload *common)
671{
672 if (!tc_can_offload_extack(dev, common->extack))
673 return false;
674 if (common->chain_index) {
675 NL_SET_ERR_MSG(common->extack,
676 "Driver supports only offload of chain 0");
677 return false;
678 }
679 return true;
680}
681
Hadar Hen Zion55330f02016-12-01 14:06:33 +0200682static inline bool tc_skip_hw(u32 flags)
683{
684 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
685}
686
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700687static inline bool tc_skip_sw(u32 flags)
688{
689 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
690}
691
692/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
693static inline bool tc_flags_valid(u32 flags)
694{
Marcelo Ricardo Leitner81c72882018-05-13 17:44:27 -0300695 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
696 TCA_CLS_FLAGS_VERBOSE))
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700697 return false;
698
Marcelo Ricardo Leitner81c72882018-05-13 17:44:27 -0300699 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700700 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
701 return false;
702
703 return true;
704}
705
Or Gerlitze6960282017-02-16 10:31:12 +0200706static inline bool tc_in_hw(u32 flags)
707{
708 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
709}
710
Jakub Kicinski34832e12018-01-24 12:54:14 -0800711static inline void
712tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
713 const struct tcf_proto *tp, u32 flags,
714 struct netlink_ext_ack *extack)
715{
716 cls_common->chain_index = tp->chain->index;
717 cls_common->protocol = tp->protocol;
718 cls_common->prio = tp->prio;
Marcelo Ricardo Leitner81c72882018-05-13 17:44:27 -0300719 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
Jakub Kicinski34832e12018-01-24 12:54:14 -0800720 cls_common->extack = extack;
721}
722
Amir Vadai5b33f482016-03-08 12:42:29 +0200723enum tc_fl_command {
724 TC_CLSFLOWER_REPLACE,
725 TC_CLSFLOWER_DESTROY,
Amir Vadai10cbc682016-05-13 12:55:37 +0000726 TC_CLSFLOWER_STATS,
Jiri Pirko34738452018-07-23 09:23:11 +0200727 TC_CLSFLOWER_TMPLT_CREATE,
728 TC_CLSFLOWER_TMPLT_DESTROY,
Amir Vadai5b33f482016-03-08 12:42:29 +0200729};
730
731struct tc_cls_flower_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200732 struct tc_cls_common_offload common;
Amir Vadai5b33f482016-03-08 12:42:29 +0200733 enum tc_fl_command command;
Amir Vadai8208d212016-03-11 11:08:45 +0200734 unsigned long cookie;
Amir Vadai5b33f482016-03-08 12:42:29 +0200735 struct flow_dissector *dissector;
736 struct fl_flow_key *mask;
737 struct fl_flow_key *key;
738 struct tcf_exts *exts;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700739 u32 classid;
Amir Vadai5b33f482016-03-08 12:42:29 +0200740};
741
Yotam Gigib87f7932016-07-21 12:03:12 +0200742enum tc_matchall_command {
743 TC_CLSMATCHALL_REPLACE,
744 TC_CLSMATCHALL_DESTROY,
745};
746
747struct tc_cls_matchall_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200748 struct tc_cls_common_offload common;
Yotam Gigib87f7932016-07-21 12:03:12 +0200749 enum tc_matchall_command command;
750 struct tcf_exts *exts;
751 unsigned long cookie;
752};
753
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100754enum tc_clsbpf_command {
Jakub Kicinski102740b2017-12-19 13:32:13 -0800755 TC_CLSBPF_OFFLOAD,
Jakub Kicinski68d64062016-09-21 11:44:02 +0100756 TC_CLSBPF_STATS,
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100757};
758
759struct tc_cls_bpf_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200760 struct tc_cls_common_offload common;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100761 enum tc_clsbpf_command command;
762 struct tcf_exts *exts;
763 struct bpf_prog *prog;
Jakub Kicinski102740b2017-12-19 13:32:13 -0800764 struct bpf_prog *oldprog;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100765 const char *name;
766 bool exts_integrated;
767};
768
Amritha Nambiar4e8b86c2017-09-07 04:00:06 -0700769struct tc_mqprio_qopt_offload {
770 /* struct tc_mqprio_qopt must always be the first element */
771 struct tc_mqprio_qopt qopt;
772 u16 mode;
773 u16 shaper;
774 u32 flags;
775 u64 min_rate[TC_QOPT_MAX_QUEUE];
776 u64 max_rate[TC_QOPT_MAX_QUEUE];
777};
Jamal Hadi Salim1045ba72017-01-24 07:02:41 -0500778
779/* This structure holds cookie structure that is passed from user
780 * to the kernel for actions and classifiers
781 */
782struct tc_cookie {
783 u8 *data;
784 u32 len;
Vlad Busloveec94fd2018-07-05 17:24:23 +0300785 struct rcu_head rcu;
Jamal Hadi Salim1045ba72017-01-24 07:02:41 -0500786};
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100787
Nogah Frankelf34b4aa2018-01-10 14:59:58 +0100788struct tc_qopt_offload_stats {
789 struct gnet_stats_basic_packed *bstats;
790 struct gnet_stats_queue *qstats;
791};
792
Jakub Kicinskif971b132018-05-25 21:53:35 -0700793enum tc_mq_command {
794 TC_MQ_CREATE,
795 TC_MQ_DESTROY,
Jakub Kicinski47c669a42018-05-25 21:53:37 -0700796 TC_MQ_STATS,
Jakub Kicinskif971b132018-05-25 21:53:35 -0700797};
798
799struct tc_mq_qopt_offload {
800 enum tc_mq_command command;
801 u32 handle;
Jakub Kicinski47c669a42018-05-25 21:53:37 -0700802 struct tc_qopt_offload_stats stats;
Jakub Kicinskif971b132018-05-25 21:53:35 -0700803};
804
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100805enum tc_red_command {
806 TC_RED_REPLACE,
807 TC_RED_DESTROY,
808 TC_RED_STATS,
809 TC_RED_XSTATS,
810};
811
812struct tc_red_qopt_offload_params {
813 u32 min;
814 u32 max;
815 u32 probability;
816 bool is_ecn;
Jakub Kicinski416ef9b2018-01-14 20:01:26 -0800817 struct gnet_stats_queue *qstats;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100818};
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100819
820struct tc_red_qopt_offload {
821 enum tc_red_command command;
822 u32 handle;
823 u32 parent;
824 union {
825 struct tc_red_qopt_offload_params set;
Nogah Frankelf34b4aa2018-01-10 14:59:58 +0100826 struct tc_qopt_offload_stats stats;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100827 struct red_stats *xstats;
828 };
829};
830
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100831enum tc_prio_command {
832 TC_PRIO_REPLACE,
833 TC_PRIO_DESTROY,
834 TC_PRIO_STATS,
Nogah Frankelb9c7a7a2018-02-28 10:45:06 +0100835 TC_PRIO_GRAFT,
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100836};
837
838struct tc_prio_qopt_offload_params {
839 int bands;
840 u8 priomap[TC_PRIO_MAX + 1];
841 /* In case that a prio qdisc is offloaded and now is changed to a
842 * non-offloadedable config, it needs to update the backlog & qlen
843 * values to negate the HW backlog & qlen values (and only them).
844 */
845 struct gnet_stats_queue *qstats;
846};
847
Nogah Frankelb9c7a7a2018-02-28 10:45:06 +0100848struct tc_prio_qopt_offload_graft_params {
849 u8 band;
850 u32 child_handle;
851};
852
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100853struct tc_prio_qopt_offload {
854 enum tc_prio_command command;
855 u32 handle;
856 u32 parent;
857 union {
858 struct tc_prio_qopt_offload_params replace_params;
859 struct tc_qopt_offload_stats stats;
Nogah Frankelb9c7a7a2018-02-28 10:45:06 +0100860 struct tc_prio_qopt_offload_graft_params graft_params;
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100861 };
862};
Nogah Frankelb9c7a7a2018-02-28 10:45:06 +0100863
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864#endif