blob: 341a66af8d59f8522b0aa8c587a5ac604ba57c9f [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __NET_PKT_CLS_H
3#define __NET_PKT_CLS_H
4
5#include <linux/pkt_cls.h>
Cong Wang7aa00452017-10-26 18:24:28 -07006#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <net/sch_generic.h>
8#include <net/act_api.h>
Jiri Pirkoa5148622019-06-15 11:03:49 +02009#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
Paolo Abenicd11b1642018-07-30 14:30:44 +020011/* TC action not accessible from user space */
John Hurley720f22f2019-06-24 23:13:35 +010012#define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1)
Paolo Abenicd11b1642018-07-30 14:30:44 +020013
Linus Torvalds1da177e2005-04-16 15:20:36 -070014/* Basic packet classifier frontend definitions. */
15
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000016struct tcf_walker {
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 int stop;
18 int skip;
19 int count;
Vlad Buslov6676d5e2019-02-25 17:38:31 +020020 bool nonempty;
Vlad Buslov01683a12018-07-09 13:29:11 +030021 unsigned long cookie;
WANG Cong8113c092017-08-04 21:31:43 -070022 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070023};
24
Joe Perches5c152572013-07-30 22:47:13 -070025int register_tcf_proto_ops(struct tcf_proto_ops *ops);
26int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Jiri Pirko8c4083b2017-10-19 15:50:29 +020028struct tcf_block_ext_info {
Pablo Neira Ayuso32f8c402019-07-09 22:55:41 +020029 enum flow_block_binder_type binder_type;
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +010030 tcf_chain_head_change_t *chain_head_change;
31 void *chain_head_change_priv;
Jiri Pirko48617382018-01-17 11:46:46 +010032 u32 block_index;
Jiri Pirko8c4083b2017-10-19 15:50:29 +020033};
34
Jiri Pirkoacb67442017-10-19 15:50:31 +020035struct tcf_block_cb;
Cong Wangaaa908f2018-05-23 15:26:53 -070036bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
Jiri Pirkoacb67442017-10-19 15:50:31 +020037
Jiri Pirko8ae70032017-02-15 11:57:50 +010038#ifdef CONFIG_NET_CLS
Jiri Pirko1f3ed382018-07-27 09:45:05 +020039struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
40 u32 chain_index);
Jiri Pirko1f3ed382018-07-27 09:45:05 +020041void tcf_chain_put_by_act(struct tcf_chain *chain);
Vlad Buslovbbf73832019-02-11 10:55:36 +020042struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
43 struct tcf_chain *chain);
Vlad Buslovfe2923a2019-02-11 10:55:40 +020044struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
Vlad Buslov12db03b2019-02-11 10:55:45 +020045 struct tcf_proto *tp, bool rtnl_held);
Jiri Pirkof36fe1c2018-01-17 11:46:48 +010046void tcf_block_netif_keep_dst(struct tcf_block *block);
Jiri Pirko6529eab2017-05-17 11:07:55 +020047int tcf_block_get(struct tcf_block **p_block,
Alexander Aring8d1a77f2017-12-20 12:35:19 -050048 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
49 struct netlink_ext_ack *extack);
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +010050int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
Alexander Aring8d1a77f2017-12-20 12:35:19 -050051 struct tcf_block_ext_info *ei,
52 struct netlink_ext_ack *extack);
Jiri Pirko6529eab2017-05-17 11:07:55 +020053void tcf_block_put(struct tcf_block *block);
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +010054void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
Jiri Pirko8c4083b2017-10-19 15:50:29 +020055 struct tcf_block_ext_info *ei);
Jiri Pirko44186462017-10-13 14:00:59 +020056
Jiri Pirko48617382018-01-17 11:46:46 +010057static inline bool tcf_block_shared(struct tcf_block *block)
58{
59 return block->index;
60}
61
Vlad Buslovc1a970d2019-07-10 20:12:29 +030062static inline bool tcf_block_non_null_shared(struct tcf_block *block)
63{
64 return block && block->index;
65}
66
Jiri Pirko44186462017-10-13 14:00:59 +020067static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
68{
Jiri Pirko48617382018-01-17 11:46:46 +010069 WARN_ON(tcf_block_shared(block));
Jiri Pirko44186462017-10-13 14:00:59 +020070 return block->q;
71}
72
Jiri Pirko87d83092017-05-17 11:07:54 +020073int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
74 struct tcf_result *res, bool compat_mode);
75
Jiri Pirko8ae70032017-02-15 11:57:50 +010076#else
Pieter Jansen van Vuuren88c44a52019-05-04 04:46:25 -070077static inline bool tcf_block_shared(struct tcf_block *block)
78{
79 return false;
80}
81
Vlad Buslovc1a970d2019-07-10 20:12:29 +030082static inline bool tcf_block_non_null_shared(struct tcf_block *block)
83{
84 return false;
85}
86
Jiri Pirko6529eab2017-05-17 11:07:55 +020087static inline
88int tcf_block_get(struct tcf_block **p_block,
Sudip Mukherjee3c149092017-12-22 15:52:05 +000089 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
90 struct netlink_ext_ack *extack)
Jiri Pirko6529eab2017-05-17 11:07:55 +020091{
92 return 0;
93}
94
Jiri Pirko8c4083b2017-10-19 15:50:29 +020095static inline
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +010096int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
Quentin Monnet33c30a82018-01-03 17:30:45 -080097 struct tcf_block_ext_info *ei,
98 struct netlink_ext_ack *extack)
Jiri Pirko8c4083b2017-10-19 15:50:29 +020099{
100 return 0;
101}
102
Jiri Pirko6529eab2017-05-17 11:07:55 +0200103static inline void tcf_block_put(struct tcf_block *block)
Jiri Pirko8ae70032017-02-15 11:57:50 +0100104{
105}
Jiri Pirko87d83092017-05-17 11:07:54 +0200106
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200107static inline
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +0100108void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200109 struct tcf_block_ext_info *ei)
110{
111}
112
Jiri Pirko44186462017-10-13 14:00:59 +0200113static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
114{
115 return NULL;
116}
117
Jiri Pirkoacb67442017-10-19 15:50:31 +0200118static inline
Pablo Neira Ayusoa7323312019-07-19 18:20:15 +0200119int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
Jiri Pirkoacb67442017-10-19 15:50:31 +0200120 void *cb_priv)
121{
122 return 0;
123}
124
125static inline
Pablo Neira Ayusoa7323312019-07-19 18:20:15 +0200126void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
Jiri Pirkoacb67442017-10-19 15:50:31 +0200127 void *cb_priv)
128{
129}
130
Jiri Pirko87d83092017-05-17 11:07:54 +0200131static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
132 struct tcf_result *res, bool compat_mode)
133{
134 return TC_ACT_UNSPEC;
135}
Jiri Pirko8ae70032017-02-15 11:57:50 +0100136#endif
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138static inline unsigned long
139__cls_set_class(unsigned long *clp, unsigned long cl)
140{
WANG Conga0efb802014-09-30 16:07:24 -0700141 return xchg(clp, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142}
143
Cong Wang2e24cd72020-01-23 16:26:18 -0800144static inline void
145__tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146{
Cong Wang2e24cd72020-01-23 16:26:18 -0800147 unsigned long cl;
Jiri Pirko34e37592017-10-13 14:01:00 +0200148
Cong Wang2e24cd72020-01-23 16:26:18 -0800149 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
150 cl = __cls_set_class(&r->class, cl);
151 if (cl)
152 q->ops->cl_ops->unbind_tcf(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153}
154
155static inline void
156tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
157{
Jiri Pirko34e37592017-10-13 14:01:00 +0200158 struct Qdisc *q = tp->chain->block->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Jiri Pirko34e37592017-10-13 14:01:00 +0200160 /* Check q as it is not set for shared blocks. In that case,
161 * setting class is not supported.
162 */
163 if (!q)
164 return;
Cong Wang2e24cd72020-01-23 16:26:18 -0800165 sch_tree_lock(q);
166 __tcf_bind_filter(q, r, base);
167 sch_tree_unlock(q);
168}
169
170static inline void
171__tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
172{
173 unsigned long cl;
174
175 if ((cl = __cls_set_class(&r->class, 0)) != 0)
Jiri Pirko34e37592017-10-13 14:01:00 +0200176 q->ops->cl_ops->unbind_tcf(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177}
178
179static inline void
180tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
181{
Jiri Pirko34e37592017-10-13 14:01:00 +0200182 struct Qdisc *q = tp->chain->block->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Jiri Pirko34e37592017-10-13 14:01:00 +0200184 if (!q)
185 return;
Cong Wang2e24cd72020-01-23 16:26:18 -0800186 __tcf_unbind_filter(q, r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187}
188
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000189struct tcf_exts {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -0800191 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
WANG Cong22dc13c2016-08-13 22:35:00 -0700192 int nr_actions;
193 struct tc_action **actions;
Cong Wange4b95c42017-11-06 13:47:19 -0800194 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195#endif
WANG Cong5da57f42013-12-15 20:15:07 -0800196 /* Map to export classifier specific extension TLV types to the
197 * generic extensions API. Unsupported extensions must be set to 0.
198 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 int action;
200 int police;
201};
202
Cong Wang14215102019-02-20 21:37:42 -0800203static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
204 int action, int police)
WANG Cong33be6272013-12-15 20:15:05 -0800205{
206#ifdef CONFIG_NET_CLS_ACT
WANG Cong5da57f42013-12-15 20:15:07 -0800207 exts->type = 0;
WANG Cong22dc13c2016-08-13 22:35:00 -0700208 exts->nr_actions = 0;
Cong Wang14215102019-02-20 21:37:42 -0800209 exts->net = net;
WANG Cong22dc13c2016-08-13 22:35:00 -0700210 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
211 GFP_KERNEL);
WANG Congb9a24bb2016-08-19 12:36:54 -0700212 if (!exts->actions)
213 return -ENOMEM;
WANG Cong33be6272013-12-15 20:15:05 -0800214#endif
WANG Cong5da57f42013-12-15 20:15:07 -0800215 exts->action = action;
216 exts->police = police;
WANG Congb9a24bb2016-08-19 12:36:54 -0700217 return 0;
WANG Cong33be6272013-12-15 20:15:05 -0800218}
219
Cong Wange4b95c42017-11-06 13:47:19 -0800220/* Return false if the netns is being destroyed in cleanup_net(). Callers
221 * need to do cleanup synchronously in this case, otherwise may race with
222 * tc_action_net_exit(). Return true for other cases.
223 */
224static inline bool tcf_exts_get_net(struct tcf_exts *exts)
225{
226#ifdef CONFIG_NET_CLS_ACT
227 exts->net = maybe_get_net(exts->net);
228 return exts->net != NULL;
229#else
230 return true;
231#endif
232}
233
234static inline void tcf_exts_put_net(struct tcf_exts *exts)
235{
236#ifdef CONFIG_NET_CLS_ACT
237 if (exts->net)
238 put_net(exts->net);
239#endif
240}
241
WANG Cong22dc13c2016-08-13 22:35:00 -0700242#ifdef CONFIG_NET_CLS_ACT
Cong Wang244cd962018-08-19 12:22:09 -0700243#define tcf_exts_for_each_action(i, a, exts) \
244 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
245#else
246#define tcf_exts_for_each_action(i, a, exts) \
Arnd Bergmann191672c2018-08-22 17:25:44 +0200247 for (; 0; (void)(i), (void)(a), (void)(exts))
WANG Cong22dc13c2016-08-13 22:35:00 -0700248#endif
WANG Cong22dc13c2016-08-13 22:35:00 -0700249
Jakub Kicinskid897a632017-05-31 08:06:43 -0700250static inline void
251tcf_exts_stats_update(const struct tcf_exts *exts,
252 u64 bytes, u64 packets, u64 lastuse)
253{
254#ifdef CONFIG_NET_CLS_ACT
255 int i;
256
257 preempt_disable();
258
259 for (i = 0; i < exts->nr_actions; i++) {
260 struct tc_action *a = exts->actions[i];
261
Eelco Chaudron28169ab2018-09-21 07:14:02 -0400262 tcf_action_stats_update(a, bytes, packets, lastuse, true);
Jakub Kicinskid897a632017-05-31 08:06:43 -0700263 }
264
265 preempt_enable();
266#endif
267}
268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269/**
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200270 * tcf_exts_has_actions - check if at least one action is present
271 * @exts: tc filter extensions handle
272 *
273 * Returns true if at least one action is present.
274 */
275static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
276{
WANG Cong2734437e2016-08-13 22:34:59 -0700277#ifdef CONFIG_NET_CLS_ACT
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200278 return exts->nr_actions;
279#else
280 return false;
281#endif
282}
WANG Cong2734437e2016-08-13 22:34:59 -0700283
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200284/**
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200285 * tcf_exts_exec - execute tc filter extensions
286 * @skb: socket buffer
287 * @exts: tc filter extensions handle
288 * @res: desired result
289 *
Jiri Pirkoaf089e72017-08-04 14:29:01 +0200290 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200291 * a negative number if the filter must be considered unmatched or
292 * a positive action code (TC_ACT_*) which must be returned to the
293 * underlying layer.
294 */
295static inline int
296tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
297 struct tcf_result *res)
298{
299#ifdef CONFIG_NET_CLS_ACT
Jiri Pirkoec1a9cc2017-08-04 14:29:02 +0200300 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200301#endif
Jiri Pirkoaf089e72017-08-04 14:29:01 +0200302 return TC_ACT_OK;
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200303}
304
Joe Perches5c152572013-07-30 22:47:13 -0700305int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
306 struct nlattr **tb, struct nlattr *rate_tlv,
Vlad Buslovec6743a2019-02-11 10:55:43 +0200307 struct tcf_exts *exts, bool ovr, bool rtnl_held,
Alexander Aring50a56192018-01-18 11:20:52 -0500308 struct netlink_ext_ack *extack);
WANG Cong18d02642014-09-25 10:26:37 -0700309void tcf_exts_destroy(struct tcf_exts *exts);
Jiri Pirko9b0d4442017-08-04 14:29:15 +0200310void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
WANG Cong5da57f42013-12-15 20:15:07 -0800311int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
312int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
314/**
315 * struct tcf_pkt_info - packet information
316 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000317struct tcf_pkt_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 unsigned char * ptr;
319 int nexthdr;
320};
321
322#ifdef CONFIG_NET_EMATCH
323
324struct tcf_ematch_ops;
325
326/**
327 * struct tcf_ematch - extended match (ematch)
328 *
329 * @matchid: identifier to allow userspace to reidentify a match
330 * @flags: flags specifying attributes and the relation to other matches
331 * @ops: the operations lookup table of the corresponding ematch module
332 * @datalen: length of the ematch specific configuration data
333 * @data: ematch specific data
334 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000335struct tcf_ematch {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 struct tcf_ematch_ops * ops;
337 unsigned long data;
338 unsigned int datalen;
339 u16 matchid;
340 u16 flags;
John Fastabend82a470f2014-10-05 21:27:53 -0700341 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342};
343
344static inline int tcf_em_is_container(struct tcf_ematch *em)
345{
346 return !em->ops;
347}
348
349static inline int tcf_em_is_simple(struct tcf_ematch *em)
350{
351 return em->flags & TCF_EM_SIMPLE;
352}
353
354static inline int tcf_em_is_inverted(struct tcf_ematch *em)
355{
356 return em->flags & TCF_EM_INVERT;
357}
358
359static inline int tcf_em_last_match(struct tcf_ematch *em)
360{
361 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
362}
363
364static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
365{
366 if (tcf_em_last_match(em))
367 return 1;
368
369 if (result == 0 && em->flags & TCF_EM_REL_AND)
370 return 1;
371
372 if (result != 0 && em->flags & TCF_EM_REL_OR)
373 return 1;
374
375 return 0;
376}
377
378/**
379 * struct tcf_ematch_tree - ematch tree handle
380 *
381 * @hdr: ematch tree header supplied by userspace
382 * @matches: array of ematches
383 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000384struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 struct tcf_ematch_tree_hdr hdr;
386 struct tcf_ematch * matches;
387
388};
389
390/**
391 * struct tcf_ematch_ops - ematch module operations
392 *
393 * @kind: identifier (kind) of this ematch module
394 * @datalen: length of expected configuration data (optional)
395 * @change: called during validation (optional)
396 * @match: called during ematch tree evaluation, must return 1/0
397 * @destroy: called during destroyage (optional)
398 * @dump: called during dumping process (optional)
399 * @owner: owner, must be set to THIS_MODULE
400 * @link: link to previous/next ematch module (internal use)
401 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000402struct tcf_ematch_ops {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 int kind;
404 int datalen;
John Fastabend82a470f2014-10-05 21:27:53 -0700405 int (*change)(struct net *net, void *,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 int, struct tcf_ematch *);
407 int (*match)(struct sk_buff *, struct tcf_ematch *,
408 struct tcf_pkt_info *);
John Fastabend82a470f2014-10-05 21:27:53 -0700409 void (*destroy)(struct tcf_ematch *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 int (*dump)(struct sk_buff *, struct tcf_ematch *);
411 struct module *owner;
412 struct list_head link;
413};
414
Joe Perches5c152572013-07-30 22:47:13 -0700415int tcf_em_register(struct tcf_ematch_ops *);
416void tcf_em_unregister(struct tcf_ematch_ops *);
417int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
418 struct tcf_ematch_tree *);
John Fastabend82a470f2014-10-05 21:27:53 -0700419void tcf_em_tree_destroy(struct tcf_ematch_tree *);
Joe Perches5c152572013-07-30 22:47:13 -0700420int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
421int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
422 struct tcf_pkt_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
424/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 * tcf_em_tree_match - evaulate an ematch tree
426 *
427 * @skb: socket buffer of the packet in question
428 * @tree: ematch tree to be used for evaluation
429 * @info: packet information examined by classifier
430 *
431 * This function matches @skb against the ematch tree in @tree by going
432 * through all ematches respecting their logic relations returning
433 * as soon as the result is obvious.
434 *
435 * Returns 1 if the ematch tree as-one matches, no ematches are configured
436 * or ematch is not enabled in the kernel, otherwise 0 is returned.
437 */
438static inline int tcf_em_tree_match(struct sk_buff *skb,
439 struct tcf_ematch_tree *tree,
440 struct tcf_pkt_info *info)
441{
442 if (tree->hdr.nmatches)
443 return __tcf_em_tree_match(skb, tree, info);
444 else
445 return 1;
446}
447
Patrick McHardydb3d99c2007-07-11 19:46:26 -0700448#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
449
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450#else /* CONFIG_NET_EMATCH */
451
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000452struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453};
454
455#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
John Fastabend82a470f2014-10-05 21:27:53 -0700456#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457#define tcf_em_tree_dump(skb, t, tlv) (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
459
460#endif /* CONFIG_NET_EMATCH */
461
462static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
463{
464 switch (layer) {
465 case TCF_LAYER_LINK:
Wolfgang Bumillerd3303a62018-01-18 11:32:36 +0100466 return skb_mac_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 case TCF_LAYER_NETWORK:
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700468 return skb_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 case TCF_LAYER_TRANSPORT:
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -0700470 return skb_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 }
472
473 return NULL;
474}
475
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700476static inline int tcf_valid_offset(const struct sk_buff *skb,
477 const unsigned char *ptr, const int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478{
David S. Millerda521b22010-12-21 12:43:16 -0800479 return likely((ptr + len) <= skb_tail_pointer(skb) &&
480 ptr >= skb->head &&
481 (ptr <= (ptr + len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482}
483
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484static inline int
Alexander Aring1057c552018-01-18 11:20:54 -0500485tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
486 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487{
WANG Cong2519a602014-01-09 16:14:02 -0800488 char indev[IFNAMSIZ];
Patrick McHardyc01003c2007-03-29 11:46:52 -0700489 struct net_device *dev;
490
Alexander Aring1057c552018-01-18 11:20:54 -0500491 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
492 NL_SET_ERR_MSG(extack, "Interface name too long");
WANG Cong2519a602014-01-09 16:14:02 -0800493 return -EINVAL;
Alexander Aring1057c552018-01-18 11:20:54 -0500494 }
WANG Cong2519a602014-01-09 16:14:02 -0800495 dev = __dev_get_by_name(net, indev);
496 if (!dev)
497 return -ENODEV;
498 return dev->ifindex;
499}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
WANG Cong2519a602014-01-09 16:14:02 -0800501static inline bool
502tcf_match_indev(struct sk_buff *skb, int ifindex)
503{
504 if (!ifindex)
505 return true;
506 if (!skb->skb_iif)
507 return false;
508 return ifindex == skb->skb_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +0100511int tc_setup_flow_action(struct flow_action *flow_action,
Vlad Buslovb15e7a62020-02-17 12:12:12 +0200512 const struct tcf_exts *exts);
Vlad Buslov5a6ff4b2019-08-26 16:45:04 +0300513void tc_cleanup_flow_action(struct flow_action *flow_action);
514
Cong Wangaeb3fec2018-12-11 11:15:46 -0800515int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
Vlad Buslov40119212019-08-26 16:44:59 +0300516 void *type_data, bool err_stop, bool rtnl_held);
517int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
518 enum tc_setup_type type, void *type_data, bool err_stop,
519 u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
520int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
521 enum tc_setup_type type, void *type_data, bool err_stop,
522 u32 *old_flags, unsigned int *old_in_hw_count,
523 u32 *new_flags, unsigned int *new_in_hw_count,
524 bool rtnl_held);
525int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
526 enum tc_setup_type type, void *type_data, bool err_stop,
527 u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
528int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
529 bool add, flow_setup_cb_t *cb,
530 enum tc_setup_type type, void *type_data,
531 void *cb_priv, u32 *flags, unsigned int *in_hw_count);
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +0100532unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
Jiri Pirko717503b2017-10-11 09:41:09 +0200533
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800534struct tc_cls_u32_knode {
535 struct tcf_exts *exts;
Jakub Kicinski068ceb32018-11-19 15:21:46 -0800536 struct tcf_result *res;
John Fastabende0148602016-02-17 14:59:30 -0800537 struct tc_u32_sel *sel;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800538 u32 handle;
539 u32 val;
540 u32 mask;
541 u32 link_handle;
John Fastabende0148602016-02-17 14:59:30 -0800542 u8 fshift;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800543};
544
545struct tc_cls_u32_hnode {
546 u32 handle;
547 u32 prio;
548 unsigned int divisor;
549};
550
551enum tc_clsu32_command {
552 TC_CLSU32_NEW_KNODE,
553 TC_CLSU32_REPLACE_KNODE,
554 TC_CLSU32_DELETE_KNODE,
555 TC_CLSU32_NEW_HNODE,
556 TC_CLSU32_REPLACE_HNODE,
557 TC_CLSU32_DELETE_HNODE,
558};
559
560struct tc_cls_u32_offload {
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200561 struct flow_cls_common_offload common;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800562 /* knode values */
563 enum tc_clsu32_command command;
564 union {
565 struct tc_cls_u32_knode knode;
566 struct tc_cls_u32_hnode hnode;
567 };
568};
569
Jiri Pirko7b06e8a2017-08-09 14:30:35 +0200570static inline bool tc_can_offload(const struct net_device *dev)
John Fastabend6843e7a2016-02-26 07:53:49 -0800571{
Jiri Pirko70b5aee2017-11-01 11:47:41 +0100572 return dev->features & NETIF_F_HW_TC;
John Fastabend6843e7a2016-02-26 07:53:49 -0800573}
574
Quentin Monnetf9eda142018-01-19 17:44:48 -0800575static inline bool tc_can_offload_extack(const struct net_device *dev,
576 struct netlink_ext_ack *extack)
577{
578 bool can = tc_can_offload(dev);
579
580 if (!can)
581 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
582
583 return can;
584}
585
Jakub Kicinski878db9f2018-01-25 14:00:43 -0800586static inline bool
587tc_cls_can_offload_and_chain0(const struct net_device *dev,
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200588 struct flow_cls_common_offload *common)
Jakub Kicinski878db9f2018-01-25 14:00:43 -0800589{
590 if (!tc_can_offload_extack(dev, common->extack))
591 return false;
592 if (common->chain_index) {
593 NL_SET_ERR_MSG(common->extack,
594 "Driver supports only offload of chain 0");
595 return false;
596 }
597 return true;
598}
599
Hadar Hen Zion55330f02016-12-01 14:06:33 +0200600static inline bool tc_skip_hw(u32 flags)
601{
602 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
603}
604
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700605static inline bool tc_skip_sw(u32 flags)
606{
607 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
608}
609
610/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
611static inline bool tc_flags_valid(u32 flags)
612{
Marcelo Ricardo Leitner81c72882018-05-13 17:44:27 -0300613 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
614 TCA_CLS_FLAGS_VERBOSE))
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700615 return false;
616
Marcelo Ricardo Leitner81c72882018-05-13 17:44:27 -0300617 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700618 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
619 return false;
620
621 return true;
622}
623
Or Gerlitze6960282017-02-16 10:31:12 +0200624static inline bool tc_in_hw(u32 flags)
625{
626 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
627}
628
Jakub Kicinski34832e12018-01-24 12:54:14 -0800629static inline void
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200630tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
Jakub Kicinski34832e12018-01-24 12:54:14 -0800631 const struct tcf_proto *tp, u32 flags,
632 struct netlink_ext_ack *extack)
633{
634 cls_common->chain_index = tp->chain->index;
635 cls_common->protocol = tp->protocol;
Pablo Neira Ayusoef01ada2019-08-16 03:24:09 +0200636 cls_common->prio = tp->prio >> 16;
Marcelo Ricardo Leitner81c72882018-05-13 17:44:27 -0300637 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
Jakub Kicinski34832e12018-01-24 12:54:14 -0800638 cls_common->extack = extack;
639}
640
Yotam Gigib87f7932016-07-21 12:03:12 +0200641enum tc_matchall_command {
642 TC_CLSMATCHALL_REPLACE,
643 TC_CLSMATCHALL_DESTROY,
Pieter Jansen van Vuurenb7fe4ab2019-05-04 04:46:23 -0700644 TC_CLSMATCHALL_STATS,
Yotam Gigib87f7932016-07-21 12:03:12 +0200645};
646
647struct tc_cls_matchall_offload {
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200648 struct flow_cls_common_offload common;
Yotam Gigib87f7932016-07-21 12:03:12 +0200649 enum tc_matchall_command command;
Pieter Jansen van Vuurenf00cbf192019-05-04 04:46:17 -0700650 struct flow_rule *rule;
Pieter Jansen van Vuurenb7fe4ab2019-05-04 04:46:23 -0700651 struct flow_stats stats;
Yotam Gigib87f7932016-07-21 12:03:12 +0200652 unsigned long cookie;
653};
654
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100655enum tc_clsbpf_command {
Jakub Kicinski102740b2017-12-19 13:32:13 -0800656 TC_CLSBPF_OFFLOAD,
Jakub Kicinski68d64062016-09-21 11:44:02 +0100657 TC_CLSBPF_STATS,
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100658};
659
660struct tc_cls_bpf_offload {
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200661 struct flow_cls_common_offload common;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100662 enum tc_clsbpf_command command;
663 struct tcf_exts *exts;
664 struct bpf_prog *prog;
Jakub Kicinski102740b2017-12-19 13:32:13 -0800665 struct bpf_prog *oldprog;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100666 const char *name;
667 bool exts_integrated;
668};
669
Amritha Nambiar4e8b86c2017-09-07 04:00:06 -0700670struct tc_mqprio_qopt_offload {
671 /* struct tc_mqprio_qopt must always be the first element */
672 struct tc_mqprio_qopt qopt;
673 u16 mode;
674 u16 shaper;
675 u32 flags;
676 u64 min_rate[TC_QOPT_MAX_QUEUE];
677 u64 max_rate[TC_QOPT_MAX_QUEUE];
678};
Jamal Hadi Salim1045ba72017-01-24 07:02:41 -0500679
680/* This structure holds cookie structure that is passed from user
681 * to the kernel for actions and classifiers
682 */
683struct tc_cookie {
684 u8 *data;
685 u32 len;
Vlad Busloveec94fd2018-07-05 17:24:23 +0300686 struct rcu_head rcu;
Jamal Hadi Salim1045ba72017-01-24 07:02:41 -0500687};
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100688
Nogah Frankelf34b4aa2018-01-10 14:59:58 +0100689struct tc_qopt_offload_stats {
690 struct gnet_stats_basic_packed *bstats;
691 struct gnet_stats_queue *qstats;
692};
693
Jakub Kicinskif971b132018-05-25 21:53:35 -0700694enum tc_mq_command {
695 TC_MQ_CREATE,
696 TC_MQ_DESTROY,
Jakub Kicinski47c669a42018-05-25 21:53:37 -0700697 TC_MQ_STATS,
Jakub Kicinskid577a3d2018-11-12 14:58:14 -0800698 TC_MQ_GRAFT,
699};
700
701struct tc_mq_opt_offload_graft_params {
702 unsigned long queue;
703 u32 child_handle;
Jakub Kicinskif971b132018-05-25 21:53:35 -0700704};
705
706struct tc_mq_qopt_offload {
707 enum tc_mq_command command;
708 u32 handle;
Jakub Kicinskid577a3d2018-11-12 14:58:14 -0800709 union {
710 struct tc_qopt_offload_stats stats;
711 struct tc_mq_opt_offload_graft_params graft_params;
712 };
Jakub Kicinskif971b132018-05-25 21:53:35 -0700713};
714
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100715enum tc_red_command {
716 TC_RED_REPLACE,
717 TC_RED_DESTROY,
718 TC_RED_STATS,
719 TC_RED_XSTATS,
Jakub Kicinskibf2a7522018-11-12 14:58:13 -0800720 TC_RED_GRAFT,
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100721};
722
723struct tc_red_qopt_offload_params {
724 u32 min;
725 u32 max;
726 u32 probability;
Jakub Kicinskic0b74902018-11-12 14:58:16 -0800727 u32 limit;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100728 bool is_ecn;
Jakub Kicinski190852a2018-11-08 19:50:38 -0800729 bool is_harddrop;
Jakub Kicinski416ef9b2018-01-14 20:01:26 -0800730 struct gnet_stats_queue *qstats;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100731};
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100732
733struct tc_red_qopt_offload {
734 enum tc_red_command command;
735 u32 handle;
736 u32 parent;
737 union {
738 struct tc_red_qopt_offload_params set;
Nogah Frankelf34b4aa2018-01-10 14:59:58 +0100739 struct tc_qopt_offload_stats stats;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100740 struct red_stats *xstats;
Jakub Kicinskibf2a7522018-11-12 14:58:13 -0800741 u32 child_handle;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100742 };
743};
744
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800745enum tc_gred_command {
746 TC_GRED_REPLACE,
747 TC_GRED_DESTROY,
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800748 TC_GRED_STATS,
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800749};
750
751struct tc_gred_vq_qopt_offload_params {
752 bool present;
753 u32 limit;
754 u32 prio;
755 u32 min;
756 u32 max;
757 bool is_ecn;
758 bool is_harddrop;
759 u32 probability;
760 /* Only need backlog, see struct tc_prio_qopt_offload_params */
761 u32 *backlog;
762};
763
764struct tc_gred_qopt_offload_params {
765 bool grio_on;
766 bool wred_on;
767 unsigned int dp_cnt;
768 unsigned int dp_def;
769 struct gnet_stats_queue *qstats;
770 struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
771};
772
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800773struct tc_gred_qopt_offload_stats {
774 struct gnet_stats_basic_packed bstats[MAX_DPs];
775 struct gnet_stats_queue qstats[MAX_DPs];
776 struct red_stats *xstats[MAX_DPs];
777};
778
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800779struct tc_gred_qopt_offload {
780 enum tc_gred_command command;
781 u32 handle;
782 u32 parent;
783 union {
784 struct tc_gred_qopt_offload_params set;
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800785 struct tc_gred_qopt_offload_stats stats;
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800786 };
787};
788
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100789enum tc_prio_command {
790 TC_PRIO_REPLACE,
791 TC_PRIO_DESTROY,
792 TC_PRIO_STATS,
Nogah Frankelb9c7a7a2018-02-28 10:45:06 +0100793 TC_PRIO_GRAFT,
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100794};
795
796struct tc_prio_qopt_offload_params {
797 int bands;
798 u8 priomap[TC_PRIO_MAX + 1];
Petr Machata9586a992019-12-18 14:55:08 +0000799 /* At the point of un-offloading the Qdisc, the reported backlog and
800 * qlen need to be reduced by the portion that is in HW.
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100801 */
802 struct gnet_stats_queue *qstats;
803};
804
Nogah Frankelb9c7a7a2018-02-28 10:45:06 +0100805struct tc_prio_qopt_offload_graft_params {
806 u8 band;
807 u32 child_handle;
808};
809
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100810struct tc_prio_qopt_offload {
811 enum tc_prio_command command;
812 u32 handle;
813 u32 parent;
814 union {
815 struct tc_prio_qopt_offload_params replace_params;
816 struct tc_qopt_offload_stats stats;
Nogah Frankelb9c7a7a2018-02-28 10:45:06 +0100817 struct tc_prio_qopt_offload_graft_params graft_params;
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100818 };
819};
Nogah Frankelb9c7a7a2018-02-28 10:45:06 +0100820
Jakub Kicinski98b0e5f2018-11-12 14:58:10 -0800821enum tc_root_command {
822 TC_ROOT_GRAFT,
823};
824
825struct tc_root_qopt_offload {
826 enum tc_root_command command;
827 u32 handle;
828 bool ingress;
829};
830
Petr Machatad35eb522019-12-18 14:55:15 +0000831enum tc_ets_command {
832 TC_ETS_REPLACE,
833 TC_ETS_DESTROY,
834 TC_ETS_STATS,
835 TC_ETS_GRAFT,
836};
837
838struct tc_ets_qopt_offload_replace_params {
839 unsigned int bands;
840 u8 priomap[TC_PRIO_MAX + 1];
841 unsigned int quanta[TCQ_ETS_MAX_BANDS]; /* 0 for strict bands. */
842 unsigned int weights[TCQ_ETS_MAX_BANDS];
843 struct gnet_stats_queue *qstats;
844};
845
846struct tc_ets_qopt_offload_graft_params {
847 u8 band;
848 u32 child_handle;
849};
850
851struct tc_ets_qopt_offload {
852 enum tc_ets_command command;
853 u32 handle;
854 u32 parent;
855 union {
856 struct tc_ets_qopt_offload_replace_params replace_params;
857 struct tc_qopt_offload_stats stats;
858 struct tc_ets_qopt_offload_graft_params graft_params;
859 };
860};
861
Petr Machataef6aadc2020-01-24 15:23:06 +0200862enum tc_tbf_command {
863 TC_TBF_REPLACE,
864 TC_TBF_DESTROY,
865 TC_TBF_STATS,
866};
867
868struct tc_tbf_qopt_offload_replace_params {
869 struct psched_ratecfg rate;
870 u32 max_size;
871 struct gnet_stats_queue *qstats;
872};
873
874struct tc_tbf_qopt_offload {
875 enum tc_tbf_command command;
876 u32 handle;
877 u32 parent;
878 union {
879 struct tc_tbf_qopt_offload_replace_params replace_params;
880 struct tc_qopt_offload_stats stats;
881 };
882};
883
Petr Machataaaca9402020-03-05 09:16:40 +0200884enum tc_fifo_command {
885 TC_FIFO_REPLACE,
886 TC_FIFO_DESTROY,
887 TC_FIFO_STATS,
888};
889
890struct tc_fifo_qopt_offload {
891 enum tc_fifo_command command;
892 u32 handle;
893 u32 parent;
894 union {
895 struct tc_qopt_offload_stats stats;
896 };
897};
898
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899#endif