blob: b5db0f79db14e048ad55d4f3fb1840fd84ce3712 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/cls_api.c Packet classifier API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
12 *
13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
14 *
15 */
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/errno.h>
Jiri Pirko33a48922017-02-09 14:38:57 +010022#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/init.h>
25#include <linux/kmod.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Jiri Pirko48617382018-01-17 11:46:46 +010027#include <linux/idr.h>
John Hurley7f76fa32018-11-09 21:21:26 -080028#include <linux/rhashtable.h>
Denis V. Lunevb8542722007-12-01 00:21:31 +110029#include <net/net_namespace.h>
30#include <net/sock.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070031#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <net/pkt_sched.h>
33#include <net/pkt_cls.h>
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +010034#include <net/tc_act/tc_pedit.h>
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +010035#include <net/tc_act/tc_mirred.h>
36#include <net/tc_act/tc_vlan.h>
37#include <net/tc_act/tc_tunnel_key.h>
38#include <net/tc_act/tc_csum.h>
39#include <net/tc_act/tc_gact.h>
40#include <net/tc_act/tc_skbedit.h>
41#include <net/tc_act/tc_mirred.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Davide Carattie3314732018-10-10 22:00:58 +020043extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/* The list of all installed classifier types */
WANG Cong36272872013-12-15 20:15:11 -080046static LIST_HEAD(tcf_proto_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48/* Protects list of registered TC modules. It is pure SMP lock. */
49static DEFINE_RWLOCK(cls_mod_lock);
50
51/* Find classifier type by string name */
52
Jiri Pirkof34e8bf2018-07-23 09:23:04 +020053static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Eric Dumazetdcd76082013-12-20 10:04:18 -080055 const struct tcf_proto_ops *t, *res = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57 if (kind) {
58 read_lock(&cls_mod_lock);
WANG Cong36272872013-12-15 20:15:11 -080059 list_for_each_entry(t, &tcf_proto_base, head) {
Jiri Pirko33a48922017-02-09 14:38:57 +010060 if (strcmp(kind, t->kind) == 0) {
Eric Dumazetdcd76082013-12-20 10:04:18 -080061 if (try_module_get(t->owner))
62 res = t;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 break;
64 }
65 }
66 read_unlock(&cls_mod_lock);
67 }
Eric Dumazetdcd76082013-12-20 10:04:18 -080068 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069}
70
Jiri Pirkof34e8bf2018-07-23 09:23:04 +020071static const struct tcf_proto_ops *
72tcf_proto_lookup_ops(const char *kind, struct netlink_ext_ack *extack)
73{
74 const struct tcf_proto_ops *ops;
75
76 ops = __tcf_proto_lookup_ops(kind);
77 if (ops)
78 return ops;
79#ifdef CONFIG_MODULES
80 rtnl_unlock();
81 request_module("cls_%s", kind);
82 rtnl_lock();
83 ops = __tcf_proto_lookup_ops(kind);
84 /* We dropped the RTNL semaphore in order to perform
85 * the module load. So, even if we succeeded in loading
86 * the module we have to replay the request. We indicate
87 * this using -EAGAIN.
88 */
89 if (ops) {
90 module_put(ops->owner);
91 return ERR_PTR(-EAGAIN);
92 }
93#endif
94 NL_SET_ERR_MSG(extack, "TC classifier not found");
95 return ERR_PTR(-ENOENT);
96}
97
Linus Torvalds1da177e2005-04-16 15:20:36 -070098/* Register(unregister) new classifier type */
99
100int register_tcf_proto_ops(struct tcf_proto_ops *ops)
101{
WANG Cong36272872013-12-15 20:15:11 -0800102 struct tcf_proto_ops *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 int rc = -EEXIST;
104
105 write_lock(&cls_mod_lock);
WANG Cong36272872013-12-15 20:15:11 -0800106 list_for_each_entry(t, &tcf_proto_base, head)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 if (!strcmp(ops->kind, t->kind))
108 goto out;
109
WANG Cong36272872013-12-15 20:15:11 -0800110 list_add_tail(&ops->head, &tcf_proto_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 rc = 0;
112out:
113 write_unlock(&cls_mod_lock);
114 return rc;
115}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800116EXPORT_SYMBOL(register_tcf_proto_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
Cong Wang7aa00452017-10-26 18:24:28 -0700118static struct workqueue_struct *tc_filter_wq;
119
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
121{
WANG Cong36272872013-12-15 20:15:11 -0800122 struct tcf_proto_ops *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 int rc = -ENOENT;
124
Daniel Borkmannc78e1742015-05-20 17:13:33 +0200125 /* Wait for outstanding call_rcu()s, if any, from a
126 * tcf_proto_ops's destroy() handler.
127 */
128 rcu_barrier();
Cong Wang7aa00452017-10-26 18:24:28 -0700129 flush_workqueue(tc_filter_wq);
Daniel Borkmannc78e1742015-05-20 17:13:33 +0200130
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 write_lock(&cls_mod_lock);
Eric Dumazetdcd76082013-12-20 10:04:18 -0800132 list_for_each_entry(t, &tcf_proto_base, head) {
133 if (t == ops) {
134 list_del(&t->head);
135 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 break;
Eric Dumazetdcd76082013-12-20 10:04:18 -0800137 }
138 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 write_unlock(&cls_mod_lock);
140 return rc;
141}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800142EXPORT_SYMBOL(unregister_tcf_proto_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Cong Wangaaa908f2018-05-23 15:26:53 -0700144bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
Cong Wang7aa00452017-10-26 18:24:28 -0700145{
Cong Wangaaa908f2018-05-23 15:26:53 -0700146 INIT_RCU_WORK(rwork, func);
147 return queue_rcu_work(tc_filter_wq, rwork);
Cong Wang7aa00452017-10-26 18:24:28 -0700148}
149EXPORT_SYMBOL(tcf_queue_work);
150
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151/* Select new prio value from the range, managed by kernel. */
152
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800153static inline u32 tcf_auto_prio(struct tcf_proto *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800155 u32 first = TC_H_MAKE(0xC0000000U, 0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
157 if (tp)
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000158 first = tp->prio - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Jiri Pirko79619732017-05-17 11:07:58 +0200160 return TC_H_MAJ(first);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161}
162
Jiri Pirko33a48922017-02-09 14:38:57 +0100163static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
Alexander Aringc35a4ac2018-01-18 11:20:50 -0500164 u32 prio, struct tcf_chain *chain,
165 struct netlink_ext_ack *extack)
Jiri Pirko33a48922017-02-09 14:38:57 +0100166{
167 struct tcf_proto *tp;
168 int err;
169
170 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
171 if (!tp)
172 return ERR_PTR(-ENOBUFS);
173
Jiri Pirkof34e8bf2018-07-23 09:23:04 +0200174 tp->ops = tcf_proto_lookup_ops(kind, extack);
175 if (IS_ERR(tp->ops)) {
176 err = PTR_ERR(tp->ops);
Jiri Pirkod68d75f2018-05-11 17:45:32 +0200177 goto errout;
Jiri Pirko33a48922017-02-09 14:38:57 +0100178 }
179 tp->classify = tp->ops->classify;
180 tp->protocol = protocol;
181 tp->prio = prio;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200182 tp->chain = chain;
Jiri Pirko33a48922017-02-09 14:38:57 +0100183
184 err = tp->ops->init(tp);
185 if (err) {
186 module_put(tp->ops->owner);
187 goto errout;
188 }
189 return tp;
190
191errout:
192 kfree(tp);
193 return ERR_PTR(err);
194}
195
Jakub Kicinski715df5e2018-01-24 12:54:13 -0800196static void tcf_proto_destroy(struct tcf_proto *tp,
197 struct netlink_ext_ack *extack)
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100198{
Jakub Kicinski715df5e2018-01-24 12:54:13 -0800199 tp->ops->destroy(tp, extack);
WANG Cong763dbf62017-04-19 14:21:21 -0700200 module_put(tp->ops->owner);
201 kfree_rcu(tp, rcu);
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100202}
203
Vlad Buslovc266f642019-02-11 10:55:32 +0200204#define ASSERT_BLOCK_LOCKED(block) \
205 lockdep_assert_held(&(block)->lock)
206
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100207struct tcf_filter_chain_list_item {
208 struct list_head list;
209 tcf_chain_head_change_t *chain_head_change;
210 void *chain_head_change_priv;
211};
212
Jiri Pirko5bc17012017-05-17 11:08:01 +0200213static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
214 u32 chain_index)
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200215{
Jiri Pirko5bc17012017-05-17 11:08:01 +0200216 struct tcf_chain *chain;
217
Vlad Buslovc266f642019-02-11 10:55:32 +0200218 ASSERT_BLOCK_LOCKED(block);
219
Jiri Pirko5bc17012017-05-17 11:08:01 +0200220 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
221 if (!chain)
222 return NULL;
223 list_add_tail(&chain->list, &block->chain_list);
224 chain->block = block;
225 chain->index = chain_index;
Cong Wange2ef7542017-09-11 16:33:31 -0700226 chain->refcnt = 1;
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200227 if (!chain->index)
228 block->chain0.chain = chain;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200229 return chain;
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200230}
231
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100232static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
233 struct tcf_proto *tp_head)
234{
235 if (item->chain_head_change)
236 item->chain_head_change(tp_head, item->chain_head_change_priv);
237}
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200238
239static void tcf_chain0_head_change(struct tcf_chain *chain,
240 struct tcf_proto *tp_head)
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +0100241{
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100242 struct tcf_filter_chain_list_item *item;
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200243 struct tcf_block *block = chain->block;
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100244
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200245 if (chain->index)
246 return;
247 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100248 tcf_chain_head_change_item(item, tp_head);
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +0100249}
250
Vlad Buslovc266f642019-02-11 10:55:32 +0200251/* Returns true if block can be safely freed. */
252
253static bool tcf_chain_detach(struct tcf_chain *chain)
Jiri Pirkof93e1cd2017-05-20 15:01:32 +0200254{
Cong Wangefbf7892017-12-04 10:48:18 -0800255 struct tcf_block *block = chain->block;
256
Vlad Buslovc266f642019-02-11 10:55:32 +0200257 ASSERT_BLOCK_LOCKED(block);
258
Cong Wange2ef7542017-09-11 16:33:31 -0700259 list_del(&chain->list);
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200260 if (!chain->index)
261 block->chain0.chain = NULL;
Vlad Buslovc266f642019-02-11 10:55:32 +0200262
263 if (list_empty(&block->chain_list) &&
264 refcount_read(&block->refcnt) == 0)
265 return true;
266
267 return false;
268}
269
270static void tcf_block_destroy(struct tcf_block *block)
271{
272 mutex_destroy(&block->lock);
273 kfree_rcu(block, rcu);
274}
275
276static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
277{
278 struct tcf_block *block = chain->block;
279
Cong Wange2ef7542017-09-11 16:33:31 -0700280 kfree(chain);
Vlad Buslovc266f642019-02-11 10:55:32 +0200281 if (free_block)
282 tcf_block_destroy(block);
Cong Wange2ef7542017-09-11 16:33:31 -0700283}
Jiri Pirko744a4cf2017-08-22 22:46:49 +0200284
Cong Wange2ef7542017-09-11 16:33:31 -0700285static void tcf_chain_hold(struct tcf_chain *chain)
286{
Vlad Buslovc266f642019-02-11 10:55:32 +0200287 ASSERT_BLOCK_LOCKED(chain->block);
288
Cong Wange2ef7542017-09-11 16:33:31 -0700289 ++chain->refcnt;
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200290}
291
Jiri Pirko3d32f4c2018-08-01 12:36:55 +0200292static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200293{
Vlad Buslovc266f642019-02-11 10:55:32 +0200294 ASSERT_BLOCK_LOCKED(chain->block);
295
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200296 /* In case all the references are action references, this
Jiri Pirko3d32f4c2018-08-01 12:36:55 +0200297 * chain should not be shown to the user.
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200298 */
299 return chain->refcnt == chain->action_refcnt;
300}
301
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200302static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
303 u32 chain_index)
Jiri Pirko5bc17012017-05-17 11:08:01 +0200304{
305 struct tcf_chain *chain;
306
Vlad Buslovc266f642019-02-11 10:55:32 +0200307 ASSERT_BLOCK_LOCKED(block);
308
Jiri Pirko5bc17012017-05-17 11:08:01 +0200309 list_for_each_entry(chain, &block->chain_list, list) {
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200310 if (chain->index == chain_index)
Cong Wange2ef7542017-09-11 16:33:31 -0700311 return chain;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200312 }
313 return NULL;
314}
315
316static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
317 u32 seq, u16 flags, int event, bool unicast);
318
Jiri Pirko53681402018-08-01 12:36:56 +0200319static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
320 u32 chain_index, bool create,
321 bool by_act)
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200322{
Vlad Buslovc266f642019-02-11 10:55:32 +0200323 struct tcf_chain *chain = NULL;
324 bool is_first_reference;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200325
Vlad Buslovc266f642019-02-11 10:55:32 +0200326 mutex_lock(&block->lock);
327 chain = tcf_chain_lookup(block, chain_index);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200328 if (chain) {
329 tcf_chain_hold(chain);
Jiri Pirko53681402018-08-01 12:36:56 +0200330 } else {
331 if (!create)
Vlad Buslovc266f642019-02-11 10:55:32 +0200332 goto errout;
Jiri Pirko53681402018-08-01 12:36:56 +0200333 chain = tcf_chain_create(block, chain_index);
334 if (!chain)
Vlad Buslovc266f642019-02-11 10:55:32 +0200335 goto errout;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200336 }
Jiri Pirko80532382017-09-06 13:14:19 +0200337
Jiri Pirko53681402018-08-01 12:36:56 +0200338 if (by_act)
339 ++chain->action_refcnt;
Vlad Buslovc266f642019-02-11 10:55:32 +0200340 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
341 mutex_unlock(&block->lock);
Jiri Pirko53681402018-08-01 12:36:56 +0200342
343 /* Send notification only in case we got the first
344 * non-action reference. Until then, the chain acts only as
345 * a placeholder for actions pointing to it and user ought
346 * not know about them.
347 */
Vlad Buslovc266f642019-02-11 10:55:32 +0200348 if (is_first_reference && !by_act)
Jiri Pirko53681402018-08-01 12:36:56 +0200349 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
350 RTM_NEWCHAIN, false);
351
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200352 return chain;
Vlad Buslovc266f642019-02-11 10:55:32 +0200353
354errout:
355 mutex_unlock(&block->lock);
356 return chain;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200357}
Jiri Pirko53681402018-08-01 12:36:56 +0200358
Jiri Pirko290b1c82018-08-01 12:36:57 +0200359static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
360 bool create)
Jiri Pirko53681402018-08-01 12:36:56 +0200361{
362 return __tcf_chain_get(block, chain_index, create, false);
363}
Jiri Pirko5bc17012017-05-17 11:08:01 +0200364
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200365struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
366{
Jiri Pirko53681402018-08-01 12:36:56 +0200367 return __tcf_chain_get(block, chain_index, true, true);
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200368}
369EXPORT_SYMBOL(tcf_chain_get_by_act);
370
Jiri Pirko9f407f12018-07-23 09:23:07 +0200371static void tc_chain_tmplt_del(struct tcf_chain *chain);
372
Vlad Buslov91052fa2019-02-11 10:55:33 +0200373static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
374 bool explicitly_created)
Jiri Pirko5bc17012017-05-17 11:08:01 +0200375{
Vlad Buslovc266f642019-02-11 10:55:32 +0200376 struct tcf_block *block = chain->block;
377 bool is_last, free_block = false;
378 unsigned int refcnt;
379
380 mutex_lock(&block->lock);
Vlad Buslov91052fa2019-02-11 10:55:33 +0200381 if (explicitly_created) {
382 if (!chain->explicitly_created) {
383 mutex_unlock(&block->lock);
384 return;
385 }
386 chain->explicitly_created = false;
387 }
388
Jiri Pirko53681402018-08-01 12:36:56 +0200389 if (by_act)
390 chain->action_refcnt--;
Vlad Buslovc266f642019-02-11 10:55:32 +0200391
392 /* tc_chain_notify_delete can't be called while holding block lock.
393 * However, when block is unlocked chain can be changed concurrently, so
394 * save these to temporary variables.
395 */
396 refcnt = --chain->refcnt;
397 is_last = refcnt - chain->action_refcnt == 0;
398 if (refcnt == 0)
399 free_block = tcf_chain_detach(chain);
400 mutex_unlock(&block->lock);
Jiri Pirko53681402018-08-01 12:36:56 +0200401
402 /* The last dropped non-action reference will trigger notification. */
Vlad Buslovc266f642019-02-11 10:55:32 +0200403 if (is_last && !by_act)
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200404 tc_chain_notify(chain, NULL, 0, 0, RTM_DELCHAIN, false);
Jiri Pirko53681402018-08-01 12:36:56 +0200405
Vlad Buslovc266f642019-02-11 10:55:32 +0200406 if (refcnt == 0) {
Jiri Pirko9f407f12018-07-23 09:23:07 +0200407 tc_chain_tmplt_del(chain);
Vlad Buslovc266f642019-02-11 10:55:32 +0200408 tcf_chain_destroy(chain, free_block);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200409 }
Jiri Pirko5bc17012017-05-17 11:08:01 +0200410}
Jiri Pirko53681402018-08-01 12:36:56 +0200411
Jiri Pirko290b1c82018-08-01 12:36:57 +0200412static void tcf_chain_put(struct tcf_chain *chain)
Jiri Pirko53681402018-08-01 12:36:56 +0200413{
Vlad Buslov91052fa2019-02-11 10:55:33 +0200414 __tcf_chain_put(chain, false, false);
Jiri Pirko53681402018-08-01 12:36:56 +0200415}
Jiri Pirko5bc17012017-05-17 11:08:01 +0200416
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200417void tcf_chain_put_by_act(struct tcf_chain *chain)
418{
Vlad Buslov91052fa2019-02-11 10:55:33 +0200419 __tcf_chain_put(chain, true, false);
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200420}
421EXPORT_SYMBOL(tcf_chain_put_by_act);
422
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200423static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
424{
Vlad Buslov91052fa2019-02-11 10:55:33 +0200425 __tcf_chain_put(chain, false, true);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200426}
427
Jiri Pirko290b1c82018-08-01 12:36:57 +0200428static void tcf_chain_flush(struct tcf_chain *chain)
429{
430 struct tcf_proto *tp = rtnl_dereference(chain->filter_chain);
431
432 tcf_chain0_head_change(chain, NULL);
433 while (tp) {
434 RCU_INIT_POINTER(chain->filter_chain, tp->next);
435 tcf_proto_destroy(tp, NULL);
436 tp = rtnl_dereference(chain->filter_chain);
437 tcf_chain_put(chain);
438 }
439}
440
John Hurley7f76fa32018-11-09 21:21:26 -0800441static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
442{
443 const struct Qdisc_class_ops *cops;
444 struct Qdisc *qdisc;
445
446 if (!dev_ingress_queue(dev))
447 return NULL;
448
449 qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
450 if (!qdisc)
451 return NULL;
452
453 cops = qdisc->ops->cl_ops;
454 if (!cops)
455 return NULL;
456
457 if (!cops->tcf_block)
458 return NULL;
459
460 return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
461}
462
463static struct rhashtable indr_setup_block_ht;
464
465struct tc_indr_block_dev {
466 struct rhash_head ht_node;
467 struct net_device *dev;
468 unsigned int refcnt;
469 struct list_head cb_list;
470 struct tcf_block *block;
471};
472
473struct tc_indr_block_cb {
474 struct list_head list;
475 void *cb_priv;
476 tc_indr_block_bind_cb_t *cb;
477 void *cb_ident;
478};
479
480static const struct rhashtable_params tc_indr_setup_block_ht_params = {
481 .key_offset = offsetof(struct tc_indr_block_dev, dev),
482 .head_offset = offsetof(struct tc_indr_block_dev, ht_node),
483 .key_len = sizeof(struct net_device *),
484};
485
486static struct tc_indr_block_dev *
487tc_indr_block_dev_lookup(struct net_device *dev)
488{
489 return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
490 tc_indr_setup_block_ht_params);
491}
492
493static struct tc_indr_block_dev *tc_indr_block_dev_get(struct net_device *dev)
494{
495 struct tc_indr_block_dev *indr_dev;
496
497 indr_dev = tc_indr_block_dev_lookup(dev);
498 if (indr_dev)
499 goto inc_ref;
500
501 indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
502 if (!indr_dev)
503 return NULL;
504
505 INIT_LIST_HEAD(&indr_dev->cb_list);
506 indr_dev->dev = dev;
507 indr_dev->block = tc_dev_ingress_block(dev);
508 if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
509 tc_indr_setup_block_ht_params)) {
510 kfree(indr_dev);
511 return NULL;
512 }
513
514inc_ref:
515 indr_dev->refcnt++;
516 return indr_dev;
517}
518
519static void tc_indr_block_dev_put(struct tc_indr_block_dev *indr_dev)
520{
521 if (--indr_dev->refcnt)
522 return;
523
524 rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
525 tc_indr_setup_block_ht_params);
526 kfree(indr_dev);
527}
528
529static struct tc_indr_block_cb *
530tc_indr_block_cb_lookup(struct tc_indr_block_dev *indr_dev,
531 tc_indr_block_bind_cb_t *cb, void *cb_ident)
532{
533 struct tc_indr_block_cb *indr_block_cb;
534
535 list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
536 if (indr_block_cb->cb == cb &&
537 indr_block_cb->cb_ident == cb_ident)
538 return indr_block_cb;
539 return NULL;
540}
541
542static struct tc_indr_block_cb *
543tc_indr_block_cb_add(struct tc_indr_block_dev *indr_dev, void *cb_priv,
544 tc_indr_block_bind_cb_t *cb, void *cb_ident)
545{
546 struct tc_indr_block_cb *indr_block_cb;
547
548 indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
549 if (indr_block_cb)
550 return ERR_PTR(-EEXIST);
551
552 indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
553 if (!indr_block_cb)
554 return ERR_PTR(-ENOMEM);
555
556 indr_block_cb->cb_priv = cb_priv;
557 indr_block_cb->cb = cb;
558 indr_block_cb->cb_ident = cb_ident;
559 list_add(&indr_block_cb->list, &indr_dev->cb_list);
560
561 return indr_block_cb;
562}
563
564static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb)
565{
566 list_del(&indr_block_cb->list);
567 kfree(indr_block_cb);
568}
569
570static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
571 struct tc_indr_block_cb *indr_block_cb,
572 enum tc_block_command command)
573{
574 struct tc_block_offload bo = {
575 .command = command,
576 .binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
577 .block = indr_dev->block,
578 };
579
580 if (!indr_dev->block)
581 return;
582
583 indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
584 &bo);
585}
586
587int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
588 tc_indr_block_bind_cb_t *cb, void *cb_ident)
589{
590 struct tc_indr_block_cb *indr_block_cb;
591 struct tc_indr_block_dev *indr_dev;
592 int err;
593
594 indr_dev = tc_indr_block_dev_get(dev);
595 if (!indr_dev)
596 return -ENOMEM;
597
598 indr_block_cb = tc_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
599 err = PTR_ERR_OR_ZERO(indr_block_cb);
600 if (err)
601 goto err_dev_put;
602
603 tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_BIND);
604 return 0;
605
606err_dev_put:
607 tc_indr_block_dev_put(indr_dev);
608 return err;
609}
610EXPORT_SYMBOL_GPL(__tc_indr_block_cb_register);
611
612int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
613 tc_indr_block_bind_cb_t *cb, void *cb_ident)
614{
615 int err;
616
617 rtnl_lock();
618 err = __tc_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
619 rtnl_unlock();
620
621 return err;
622}
623EXPORT_SYMBOL_GPL(tc_indr_block_cb_register);
624
625void __tc_indr_block_cb_unregister(struct net_device *dev,
626 tc_indr_block_bind_cb_t *cb, void *cb_ident)
627{
628 struct tc_indr_block_cb *indr_block_cb;
629 struct tc_indr_block_dev *indr_dev;
630
631 indr_dev = tc_indr_block_dev_lookup(dev);
632 if (!indr_dev)
633 return;
634
635 indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
636 if (!indr_block_cb)
637 return;
638
639 /* Send unbind message if required to free any block cbs. */
640 tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_UNBIND);
641 tc_indr_block_cb_del(indr_block_cb);
642 tc_indr_block_dev_put(indr_dev);
643}
644EXPORT_SYMBOL_GPL(__tc_indr_block_cb_unregister);
645
646void tc_indr_block_cb_unregister(struct net_device *dev,
647 tc_indr_block_bind_cb_t *cb, void *cb_ident)
648{
649 rtnl_lock();
650 __tc_indr_block_cb_unregister(dev, cb, cb_ident);
651 rtnl_unlock();
652}
653EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister);
654
655static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
656 struct tcf_block_ext_info *ei,
657 enum tc_block_command command,
658 struct netlink_ext_ack *extack)
659{
660 struct tc_indr_block_cb *indr_block_cb;
661 struct tc_indr_block_dev *indr_dev;
662 struct tc_block_offload bo = {
663 .command = command,
664 .binder_type = ei->binder_type,
665 .block = block,
666 .extack = extack,
667 };
668
669 indr_dev = tc_indr_block_dev_lookup(dev);
670 if (!indr_dev)
671 return;
672
673 indr_dev->block = command == TC_BLOCK_BIND ? block : NULL;
674
675 list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
676 indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
677 &bo);
678}
679
Jiri Pirkocaa72602018-01-17 11:46:50 +0100680static bool tcf_block_offload_in_use(struct tcf_block *block)
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200681{
Jiri Pirkocaa72602018-01-17 11:46:50 +0100682 return block->offloadcnt;
683}
684
685static int tcf_block_offload_cmd(struct tcf_block *block,
686 struct net_device *dev,
687 struct tcf_block_ext_info *ei,
John Hurley60513bd2018-06-25 14:30:04 -0700688 enum tc_block_command command,
689 struct netlink_ext_ack *extack)
Jiri Pirkocaa72602018-01-17 11:46:50 +0100690{
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200691 struct tc_block_offload bo = {};
692
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200693 bo.command = command;
694 bo.binder_type = ei->binder_type;
695 bo.block = block;
John Hurley60513bd2018-06-25 14:30:04 -0700696 bo.extack = extack;
Jiri Pirkocaa72602018-01-17 11:46:50 +0100697 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200698}
699
Jiri Pirkocaa72602018-01-17 11:46:50 +0100700static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
John Hurley60513bd2018-06-25 14:30:04 -0700701 struct tcf_block_ext_info *ei,
702 struct netlink_ext_ack *extack)
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200703{
Jiri Pirkocaa72602018-01-17 11:46:50 +0100704 struct net_device *dev = q->dev_queue->dev;
705 int err;
706
707 if (!dev->netdev_ops->ndo_setup_tc)
708 goto no_offload_dev_inc;
709
710 /* If tc offload feature is disabled and the block we try to bind
711 * to already has some offloaded filters, forbid to bind.
712 */
John Hurley60513bd2018-06-25 14:30:04 -0700713 if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
714 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
Jiri Pirkocaa72602018-01-17 11:46:50 +0100715 return -EOPNOTSUPP;
John Hurley60513bd2018-06-25 14:30:04 -0700716 }
Jiri Pirkocaa72602018-01-17 11:46:50 +0100717
John Hurley60513bd2018-06-25 14:30:04 -0700718 err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100719 if (err == -EOPNOTSUPP)
720 goto no_offload_dev_inc;
John Hurley7f76fa32018-11-09 21:21:26 -0800721 if (err)
722 return err;
723
724 tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
725 return 0;
Jiri Pirkocaa72602018-01-17 11:46:50 +0100726
727no_offload_dev_inc:
728 if (tcf_block_offload_in_use(block))
729 return -EOPNOTSUPP;
730 block->nooffloaddevcnt++;
John Hurley7f76fa32018-11-09 21:21:26 -0800731 tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100732 return 0;
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200733}
734
735static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
736 struct tcf_block_ext_info *ei)
737{
Jiri Pirkocaa72602018-01-17 11:46:50 +0100738 struct net_device *dev = q->dev_queue->dev;
739 int err;
740
John Hurley7f76fa32018-11-09 21:21:26 -0800741 tc_indr_block_call(block, dev, ei, TC_BLOCK_UNBIND, NULL);
742
Jiri Pirkocaa72602018-01-17 11:46:50 +0100743 if (!dev->netdev_ops->ndo_setup_tc)
744 goto no_offload_dev_dec;
John Hurley60513bd2018-06-25 14:30:04 -0700745 err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100746 if (err == -EOPNOTSUPP)
747 goto no_offload_dev_dec;
748 return;
749
750no_offload_dev_dec:
751 WARN_ON(block->nooffloaddevcnt-- == 0);
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200752}
753
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100754static int
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200755tcf_chain0_head_change_cb_add(struct tcf_block *block,
756 struct tcf_block_ext_info *ei,
757 struct netlink_ext_ack *extack)
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100758{
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200759 struct tcf_chain *chain0 = block->chain0.chain;
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100760 struct tcf_filter_chain_list_item *item;
761
762 item = kmalloc(sizeof(*item), GFP_KERNEL);
763 if (!item) {
764 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
765 return -ENOMEM;
766 }
767 item->chain_head_change = ei->chain_head_change;
768 item->chain_head_change_priv = ei->chain_head_change_priv;
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200769 if (chain0 && chain0->filter_chain)
770 tcf_chain_head_change_item(item, chain0->filter_chain);
771 list_add(&item->list, &block->chain0.filter_chain_list);
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100772 return 0;
773}
774
775static void
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200776tcf_chain0_head_change_cb_del(struct tcf_block *block,
777 struct tcf_block_ext_info *ei)
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100778{
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200779 struct tcf_chain *chain0 = block->chain0.chain;
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100780 struct tcf_filter_chain_list_item *item;
781
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200782 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100783 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
784 (item->chain_head_change == ei->chain_head_change &&
785 item->chain_head_change_priv == ei->chain_head_change_priv)) {
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200786 if (chain0)
787 tcf_chain_head_change_item(item, NULL);
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100788 list_del(&item->list);
789 kfree(item);
790 return;
791 }
792 }
793 WARN_ON(1);
794}
795
Jiri Pirko48617382018-01-17 11:46:46 +0100796struct tcf_net {
Vlad Buslovab281622018-09-24 19:22:56 +0300797 spinlock_t idr_lock; /* Protects idr */
Jiri Pirko48617382018-01-17 11:46:46 +0100798 struct idr idr;
799};
800
801static unsigned int tcf_net_id;
802
803static int tcf_block_insert(struct tcf_block *block, struct net *net,
Jiri Pirkobb047dd2018-02-13 12:00:16 +0100804 struct netlink_ext_ack *extack)
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100805{
Jiri Pirko48617382018-01-17 11:46:46 +0100806 struct tcf_net *tn = net_generic(net, tcf_net_id);
Vlad Buslovab281622018-09-24 19:22:56 +0300807 int err;
Jiri Pirko48617382018-01-17 11:46:46 +0100808
Vlad Buslovab281622018-09-24 19:22:56 +0300809 idr_preload(GFP_KERNEL);
810 spin_lock(&tn->idr_lock);
811 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
812 GFP_NOWAIT);
813 spin_unlock(&tn->idr_lock);
814 idr_preload_end();
815
816 return err;
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100817}
818
Jiri Pirko48617382018-01-17 11:46:46 +0100819static void tcf_block_remove(struct tcf_block *block, struct net *net)
Jiri Pirko6529eab2017-05-17 11:07:55 +0200820{
Jiri Pirko48617382018-01-17 11:46:46 +0100821 struct tcf_net *tn = net_generic(net, tcf_net_id);
822
Vlad Buslovab281622018-09-24 19:22:56 +0300823 spin_lock(&tn->idr_lock);
Matthew Wilcox9c160942017-11-28 09:48:43 -0500824 idr_remove(&tn->idr, block->index);
Vlad Buslovab281622018-09-24 19:22:56 +0300825 spin_unlock(&tn->idr_lock);
Jiri Pirko48617382018-01-17 11:46:46 +0100826}
827
828static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
Jiri Pirkobb047dd2018-02-13 12:00:16 +0100829 u32 block_index,
Jiri Pirko48617382018-01-17 11:46:46 +0100830 struct netlink_ext_ack *extack)
831{
832 struct tcf_block *block;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200833
Jiri Pirko48617382018-01-17 11:46:46 +0100834 block = kzalloc(sizeof(*block), GFP_KERNEL);
Alexander Aring8d1a77f2017-12-20 12:35:19 -0500835 if (!block) {
836 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
Jiri Pirko48617382018-01-17 11:46:46 +0100837 return ERR_PTR(-ENOMEM);
Alexander Aring8d1a77f2017-12-20 12:35:19 -0500838 }
Vlad Buslovc266f642019-02-11 10:55:32 +0200839 mutex_init(&block->lock);
Jiri Pirko5bc17012017-05-17 11:08:01 +0200840 INIT_LIST_HEAD(&block->chain_list);
Jiri Pirkoacb67442017-10-19 15:50:31 +0200841 INIT_LIST_HEAD(&block->cb_list);
Jiri Pirkof36fe1c2018-01-17 11:46:48 +0100842 INIT_LIST_HEAD(&block->owner_list);
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200843 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
Jiri Pirkoacb67442017-10-19 15:50:31 +0200844
Vlad Buslovcfebd7e2018-09-24 19:22:54 +0300845 refcount_set(&block->refcnt, 1);
Jiri Pirko48617382018-01-17 11:46:46 +0100846 block->net = net;
Jiri Pirkobb047dd2018-02-13 12:00:16 +0100847 block->index = block_index;
848
849 /* Don't store q pointer for blocks which are shared */
850 if (!tcf_block_shared(block))
851 block->q = q;
Jiri Pirko48617382018-01-17 11:46:46 +0100852 return block;
Jiri Pirko48617382018-01-17 11:46:46 +0100853}
854
855static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
856{
857 struct tcf_net *tn = net_generic(net, tcf_net_id);
858
Matthew Wilcox322d8842017-11-28 10:01:24 -0500859 return idr_find(&tn->idr, block_index);
Jiri Pirko48617382018-01-17 11:46:46 +0100860}
861
Vlad Buslov0607e432018-09-24 19:22:57 +0300862static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
863{
864 struct tcf_block *block;
865
866 rcu_read_lock();
867 block = tcf_block_lookup(net, block_index);
868 if (block && !refcount_inc_not_zero(&block->refcnt))
869 block = NULL;
870 rcu_read_unlock();
871
872 return block;
873}
874
Vlad Buslovf0023432018-09-24 19:22:55 +0300875static void tcf_block_flush_all_chains(struct tcf_block *block)
876{
877 struct tcf_chain *chain;
878
879 /* Hold a refcnt for all chains, so that they don't disappear
880 * while we are iterating.
881 */
882 list_for_each_entry(chain, &block->chain_list, list)
883 tcf_chain_hold(chain);
884
885 list_for_each_entry(chain, &block->chain_list, list)
886 tcf_chain_flush(chain);
887}
888
889static void tcf_block_put_all_chains(struct tcf_block *block)
890{
891 struct tcf_chain *chain, *tmp;
892
893 /* At this point, all the chains should have refcnt >= 1. */
894 list_for_each_entry_safe(chain, tmp, &block->chain_list, list) {
895 tcf_chain_put_explicitly_created(chain);
896 tcf_chain_put(chain);
897 }
898}
899
Vlad Buslov0607e432018-09-24 19:22:57 +0300900static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
901 struct tcf_block_ext_info *ei)
902{
Vlad Buslovc266f642019-02-11 10:55:32 +0200903 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
Vlad Buslov0607e432018-09-24 19:22:57 +0300904 /* Flushing/putting all chains will cause the block to be
905 * deallocated when last chain is freed. However, if chain_list
906 * is empty, block has to be manually deallocated. After block
907 * reference counter reached 0, it is no longer possible to
908 * increment it or add new chains to block.
909 */
910 bool free_block = list_empty(&block->chain_list);
911
Vlad Buslovc266f642019-02-11 10:55:32 +0200912 mutex_unlock(&block->lock);
Vlad Buslov0607e432018-09-24 19:22:57 +0300913 if (tcf_block_shared(block))
914 tcf_block_remove(block, block->net);
915 if (!free_block)
916 tcf_block_flush_all_chains(block);
917
918 if (q)
919 tcf_block_offload_unbind(block, q, ei);
920
921 if (free_block)
Vlad Buslovc266f642019-02-11 10:55:32 +0200922 tcf_block_destroy(block);
Vlad Buslov0607e432018-09-24 19:22:57 +0300923 else
924 tcf_block_put_all_chains(block);
925 } else if (q) {
926 tcf_block_offload_unbind(block, q, ei);
927 }
928}
929
930static void tcf_block_refcnt_put(struct tcf_block *block)
931{
932 __tcf_block_put(block, NULL, NULL);
933}
934
Vlad Buslovc431f892018-05-31 09:52:53 +0300935/* Find tcf block.
936 * Set q, parent, cl when appropriate.
937 */
938
939static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
940 u32 *parent, unsigned long *cl,
941 int ifindex, u32 block_index,
942 struct netlink_ext_ack *extack)
943{
944 struct tcf_block *block;
Vlad Buslove368fdb2018-09-24 19:22:53 +0300945 int err = 0;
Vlad Buslovc431f892018-05-31 09:52:53 +0300946
947 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
Vlad Buslov787ce6d2018-09-24 19:22:58 +0300948 block = tcf_block_refcnt_get(net, block_index);
Vlad Buslovc431f892018-05-31 09:52:53 +0300949 if (!block) {
950 NL_SET_ERR_MSG(extack, "Block of given index was not found");
951 return ERR_PTR(-EINVAL);
952 }
953 } else {
954 const struct Qdisc_class_ops *cops;
955 struct net_device *dev;
956
Vlad Buslove368fdb2018-09-24 19:22:53 +0300957 rcu_read_lock();
958
Vlad Buslovc431f892018-05-31 09:52:53 +0300959 /* Find link */
Vlad Buslove368fdb2018-09-24 19:22:53 +0300960 dev = dev_get_by_index_rcu(net, ifindex);
961 if (!dev) {
962 rcu_read_unlock();
Vlad Buslovc431f892018-05-31 09:52:53 +0300963 return ERR_PTR(-ENODEV);
Vlad Buslove368fdb2018-09-24 19:22:53 +0300964 }
Vlad Buslovc431f892018-05-31 09:52:53 +0300965
966 /* Find qdisc */
967 if (!*parent) {
968 *q = dev->qdisc;
969 *parent = (*q)->handle;
970 } else {
Vlad Buslove368fdb2018-09-24 19:22:53 +0300971 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
Vlad Buslovc431f892018-05-31 09:52:53 +0300972 if (!*q) {
973 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
Vlad Buslove368fdb2018-09-24 19:22:53 +0300974 err = -EINVAL;
975 goto errout_rcu;
Vlad Buslovc431f892018-05-31 09:52:53 +0300976 }
977 }
978
Vlad Buslove368fdb2018-09-24 19:22:53 +0300979 *q = qdisc_refcount_inc_nz(*q);
980 if (!*q) {
981 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
982 err = -EINVAL;
983 goto errout_rcu;
984 }
985
Vlad Buslovc431f892018-05-31 09:52:53 +0300986 /* Is it classful? */
987 cops = (*q)->ops->cl_ops;
988 if (!cops) {
989 NL_SET_ERR_MSG(extack, "Qdisc not classful");
Vlad Buslove368fdb2018-09-24 19:22:53 +0300990 err = -EINVAL;
991 goto errout_rcu;
Vlad Buslovc431f892018-05-31 09:52:53 +0300992 }
993
994 if (!cops->tcf_block) {
995 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
Vlad Buslove368fdb2018-09-24 19:22:53 +0300996 err = -EOPNOTSUPP;
997 goto errout_rcu;
Vlad Buslovc431f892018-05-31 09:52:53 +0300998 }
999
Vlad Buslove368fdb2018-09-24 19:22:53 +03001000 /* At this point we know that qdisc is not noop_qdisc,
1001 * which means that qdisc holds a reference to net_device
1002 * and we hold a reference to qdisc, so it is safe to release
1003 * rcu read lock.
1004 */
1005 rcu_read_unlock();
1006
Vlad Buslovc431f892018-05-31 09:52:53 +03001007 /* Do we search for filter, attached to class? */
1008 if (TC_H_MIN(*parent)) {
1009 *cl = cops->find(*q, *parent);
1010 if (*cl == 0) {
1011 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
Vlad Buslove368fdb2018-09-24 19:22:53 +03001012 err = -ENOENT;
1013 goto errout_qdisc;
Vlad Buslovc431f892018-05-31 09:52:53 +03001014 }
1015 }
1016
1017 /* And the last stroke */
1018 block = cops->tcf_block(*q, *cl, extack);
Vlad Buslove368fdb2018-09-24 19:22:53 +03001019 if (!block) {
1020 err = -EINVAL;
1021 goto errout_qdisc;
1022 }
Vlad Buslovc431f892018-05-31 09:52:53 +03001023 if (tcf_block_shared(block)) {
1024 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
Vlad Buslove368fdb2018-09-24 19:22:53 +03001025 err = -EOPNOTSUPP;
1026 goto errout_qdisc;
Vlad Buslovc431f892018-05-31 09:52:53 +03001027 }
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001028
1029 /* Always take reference to block in order to support execution
1030 * of rules update path of cls API without rtnl lock. Caller
1031 * must release block when it is finished using it. 'if' block
1032 * of this conditional obtain reference to block by calling
1033 * tcf_block_refcnt_get().
1034 */
1035 refcount_inc(&block->refcnt);
Vlad Buslovc431f892018-05-31 09:52:53 +03001036 }
1037
1038 return block;
Vlad Buslove368fdb2018-09-24 19:22:53 +03001039
1040errout_rcu:
1041 rcu_read_unlock();
1042errout_qdisc:
Cong Wang460b3602018-09-27 13:42:19 -07001043 if (*q) {
Vlad Buslove368fdb2018-09-24 19:22:53 +03001044 qdisc_put(*q);
Cong Wang460b3602018-09-27 13:42:19 -07001045 *q = NULL;
1046 }
Vlad Buslove368fdb2018-09-24 19:22:53 +03001047 return ERR_PTR(err);
1048}
1049
1050static void tcf_block_release(struct Qdisc *q, struct tcf_block *block)
1051{
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001052 if (!IS_ERR_OR_NULL(block))
1053 tcf_block_refcnt_put(block);
1054
Vlad Buslove368fdb2018-09-24 19:22:53 +03001055 if (q)
1056 qdisc_put(q);
Vlad Buslovc431f892018-05-31 09:52:53 +03001057}
1058
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001059struct tcf_block_owner_item {
1060 struct list_head list;
1061 struct Qdisc *q;
1062 enum tcf_block_binder_type binder_type;
1063};
1064
1065static void
1066tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1067 struct Qdisc *q,
1068 enum tcf_block_binder_type binder_type)
1069{
1070 if (block->keep_dst &&
1071 binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1072 binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1073 netif_keep_dst(qdisc_dev(q));
1074}
1075
1076void tcf_block_netif_keep_dst(struct tcf_block *block)
1077{
1078 struct tcf_block_owner_item *item;
1079
1080 block->keep_dst = true;
1081 list_for_each_entry(item, &block->owner_list, list)
1082 tcf_block_owner_netif_keep_dst(block, item->q,
1083 item->binder_type);
1084}
1085EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1086
1087static int tcf_block_owner_add(struct tcf_block *block,
1088 struct Qdisc *q,
1089 enum tcf_block_binder_type binder_type)
1090{
1091 struct tcf_block_owner_item *item;
1092
1093 item = kmalloc(sizeof(*item), GFP_KERNEL);
1094 if (!item)
1095 return -ENOMEM;
1096 item->q = q;
1097 item->binder_type = binder_type;
1098 list_add(&item->list, &block->owner_list);
1099 return 0;
1100}
1101
1102static void tcf_block_owner_del(struct tcf_block *block,
1103 struct Qdisc *q,
1104 enum tcf_block_binder_type binder_type)
1105{
1106 struct tcf_block_owner_item *item;
1107
1108 list_for_each_entry(item, &block->owner_list, list) {
1109 if (item->q == q && item->binder_type == binder_type) {
1110 list_del(&item->list);
1111 kfree(item);
1112 return;
1113 }
1114 }
1115 WARN_ON(1);
1116}
1117
Jiri Pirko48617382018-01-17 11:46:46 +01001118int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1119 struct tcf_block_ext_info *ei,
1120 struct netlink_ext_ack *extack)
1121{
1122 struct net *net = qdisc_net(q);
1123 struct tcf_block *block = NULL;
Jiri Pirko48617382018-01-17 11:46:46 +01001124 int err;
1125
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001126 if (ei->block_index)
Jiri Pirko48617382018-01-17 11:46:46 +01001127 /* block_index not 0 means the shared block is requested */
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001128 block = tcf_block_refcnt_get(net, ei->block_index);
Jiri Pirko48617382018-01-17 11:46:46 +01001129
1130 if (!block) {
Jiri Pirkobb047dd2018-02-13 12:00:16 +01001131 block = tcf_block_create(net, q, ei->block_index, extack);
Jiri Pirko48617382018-01-17 11:46:46 +01001132 if (IS_ERR(block))
1133 return PTR_ERR(block);
Jiri Pirkobb047dd2018-02-13 12:00:16 +01001134 if (tcf_block_shared(block)) {
1135 err = tcf_block_insert(block, net, extack);
Jiri Pirko48617382018-01-17 11:46:46 +01001136 if (err)
1137 goto err_block_insert;
1138 }
1139 }
1140
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001141 err = tcf_block_owner_add(block, q, ei->binder_type);
1142 if (err)
1143 goto err_block_owner_add;
1144
1145 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1146
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001147 err = tcf_chain0_head_change_cb_add(block, ei, extack);
Jiri Pirkoa9b19442018-01-17 11:46:45 +01001148 if (err)
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001149 goto err_chain0_head_change_cb_add;
Jiri Pirkocaa72602018-01-17 11:46:50 +01001150
John Hurley60513bd2018-06-25 14:30:04 -07001151 err = tcf_block_offload_bind(block, q, ei, extack);
Jiri Pirkocaa72602018-01-17 11:46:50 +01001152 if (err)
1153 goto err_block_offload_bind;
1154
Jiri Pirko6529eab2017-05-17 11:07:55 +02001155 *p_block = block;
1156 return 0;
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001157
Jiri Pirkocaa72602018-01-17 11:46:50 +01001158err_block_offload_bind:
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001159 tcf_chain0_head_change_cb_del(block, ei);
1160err_chain0_head_change_cb_add:
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001161 tcf_block_owner_del(block, q, ei->binder_type);
1162err_block_owner_add:
Jiri Pirko48617382018-01-17 11:46:46 +01001163err_block_insert:
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001164 tcf_block_refcnt_put(block);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001165 return err;
Jiri Pirko6529eab2017-05-17 11:07:55 +02001166}
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001167EXPORT_SYMBOL(tcf_block_get_ext);
1168
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001169static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1170{
1171 struct tcf_proto __rcu **p_filter_chain = priv;
1172
1173 rcu_assign_pointer(*p_filter_chain, tp_head);
1174}
1175
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001176int tcf_block_get(struct tcf_block **p_block,
Alexander Aring8d1a77f2017-12-20 12:35:19 -05001177 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1178 struct netlink_ext_ack *extack)
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001179{
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001180 struct tcf_block_ext_info ei = {
1181 .chain_head_change = tcf_chain_head_change_dflt,
1182 .chain_head_change_priv = p_filter_chain,
1183 };
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001184
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001185 WARN_ON(!p_filter_chain);
Alexander Aring8d1a77f2017-12-20 12:35:19 -05001186 return tcf_block_get_ext(p_block, q, &ei, extack);
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001187}
Jiri Pirko6529eab2017-05-17 11:07:55 +02001188EXPORT_SYMBOL(tcf_block_get);
1189
Cong Wang7aa00452017-10-26 18:24:28 -07001190/* XXX: Standalone actions are not allowed to jump to any chain, and bound
Roman Kapla60b3f52017-11-24 12:27:58 +01001191 * actions should be all removed after flushing.
Cong Wang7aa00452017-10-26 18:24:28 -07001192 */
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001193void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
David S. Millere1ea2f92017-10-30 14:10:01 +09001194 struct tcf_block_ext_info *ei)
Cong Wang7aa00452017-10-26 18:24:28 -07001195{
David S. Millerc30abd52017-12-16 22:11:55 -05001196 if (!block)
1197 return;
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001198 tcf_chain0_head_change_cb_del(block, ei);
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001199 tcf_block_owner_del(block, q, ei->binder_type);
Roman Kapla60b3f52017-11-24 12:27:58 +01001200
Vlad Buslov0607e432018-09-24 19:22:57 +03001201 __tcf_block_put(block, q, ei);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001202}
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001203EXPORT_SYMBOL(tcf_block_put_ext);
1204
1205void tcf_block_put(struct tcf_block *block)
1206{
1207 struct tcf_block_ext_info ei = {0, };
1208
Jiri Pirko4853f122017-12-21 13:13:59 +01001209 if (!block)
1210 return;
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001211 tcf_block_put_ext(block, block->q, &ei);
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001212}
David S. Millere1ea2f92017-10-30 14:10:01 +09001213
Jiri Pirko6529eab2017-05-17 11:07:55 +02001214EXPORT_SYMBOL(tcf_block_put);
Jiri Pirkocf1facd2017-02-09 14:38:56 +01001215
Jiri Pirkoacb67442017-10-19 15:50:31 +02001216struct tcf_block_cb {
1217 struct list_head list;
1218 tc_setup_cb_t *cb;
1219 void *cb_ident;
1220 void *cb_priv;
1221 unsigned int refcnt;
1222};
1223
1224void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
1225{
1226 return block_cb->cb_priv;
1227}
1228EXPORT_SYMBOL(tcf_block_cb_priv);
1229
1230struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
1231 tc_setup_cb_t *cb, void *cb_ident)
1232{ struct tcf_block_cb *block_cb;
1233
1234 list_for_each_entry(block_cb, &block->cb_list, list)
1235 if (block_cb->cb == cb && block_cb->cb_ident == cb_ident)
1236 return block_cb;
1237 return NULL;
1238}
1239EXPORT_SYMBOL(tcf_block_cb_lookup);
1240
1241void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
1242{
1243 block_cb->refcnt++;
1244}
1245EXPORT_SYMBOL(tcf_block_cb_incref);
1246
1247unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
1248{
1249 return --block_cb->refcnt;
1250}
1251EXPORT_SYMBOL(tcf_block_cb_decref);
1252
John Hurley32636742018-06-25 14:30:10 -07001253static int
1254tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
1255 void *cb_priv, bool add, bool offload_in_use,
1256 struct netlink_ext_ack *extack)
1257{
1258 struct tcf_chain *chain;
1259 struct tcf_proto *tp;
1260 int err;
1261
1262 list_for_each_entry(chain, &block->chain_list, list) {
1263 for (tp = rtnl_dereference(chain->filter_chain); tp;
1264 tp = rtnl_dereference(tp->next)) {
1265 if (tp->ops->reoffload) {
1266 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1267 extack);
1268 if (err && add)
1269 goto err_playback_remove;
1270 } else if (add && offload_in_use) {
1271 err = -EOPNOTSUPP;
1272 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1273 goto err_playback_remove;
1274 }
1275 }
1276 }
1277
1278 return 0;
1279
1280err_playback_remove:
1281 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1282 extack);
1283 return err;
1284}
1285
Jiri Pirkoacb67442017-10-19 15:50:31 +02001286struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
1287 tc_setup_cb_t *cb, void *cb_ident,
John Hurley60513bd2018-06-25 14:30:04 -07001288 void *cb_priv,
1289 struct netlink_ext_ack *extack)
Jiri Pirkoacb67442017-10-19 15:50:31 +02001290{
1291 struct tcf_block_cb *block_cb;
John Hurley32636742018-06-25 14:30:10 -07001292 int err;
Jiri Pirkoacb67442017-10-19 15:50:31 +02001293
John Hurley32636742018-06-25 14:30:10 -07001294 /* Replay any already present rules */
1295 err = tcf_block_playback_offloads(block, cb, cb_priv, true,
1296 tcf_block_offload_in_use(block),
1297 extack);
1298 if (err)
1299 return ERR_PTR(err);
Jiri Pirkocaa72602018-01-17 11:46:50 +01001300
Jiri Pirkoacb67442017-10-19 15:50:31 +02001301 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
1302 if (!block_cb)
Jiri Pirkocaa72602018-01-17 11:46:50 +01001303 return ERR_PTR(-ENOMEM);
Jiri Pirkoacb67442017-10-19 15:50:31 +02001304 block_cb->cb = cb;
1305 block_cb->cb_ident = cb_ident;
1306 block_cb->cb_priv = cb_priv;
1307 list_add(&block_cb->list, &block->cb_list);
1308 return block_cb;
1309}
1310EXPORT_SYMBOL(__tcf_block_cb_register);
1311
1312int tcf_block_cb_register(struct tcf_block *block,
1313 tc_setup_cb_t *cb, void *cb_ident,
John Hurley60513bd2018-06-25 14:30:04 -07001314 void *cb_priv, struct netlink_ext_ack *extack)
Jiri Pirkoacb67442017-10-19 15:50:31 +02001315{
1316 struct tcf_block_cb *block_cb;
1317
John Hurley60513bd2018-06-25 14:30:04 -07001318 block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv,
1319 extack);
Gustavo A. R. Silvabaa2d2b2018-07-18 23:14:17 -05001320 return PTR_ERR_OR_ZERO(block_cb);
Jiri Pirkoacb67442017-10-19 15:50:31 +02001321}
1322EXPORT_SYMBOL(tcf_block_cb_register);
1323
John Hurley32636742018-06-25 14:30:10 -07001324void __tcf_block_cb_unregister(struct tcf_block *block,
1325 struct tcf_block_cb *block_cb)
Jiri Pirkoacb67442017-10-19 15:50:31 +02001326{
John Hurley32636742018-06-25 14:30:10 -07001327 tcf_block_playback_offloads(block, block_cb->cb, block_cb->cb_priv,
1328 false, tcf_block_offload_in_use(block),
1329 NULL);
Jiri Pirkoacb67442017-10-19 15:50:31 +02001330 list_del(&block_cb->list);
1331 kfree(block_cb);
1332}
1333EXPORT_SYMBOL(__tcf_block_cb_unregister);
1334
1335void tcf_block_cb_unregister(struct tcf_block *block,
1336 tc_setup_cb_t *cb, void *cb_ident)
1337{
1338 struct tcf_block_cb *block_cb;
1339
1340 block_cb = tcf_block_cb_lookup(block, cb, cb_ident);
1341 if (!block_cb)
1342 return;
John Hurley32636742018-06-25 14:30:10 -07001343 __tcf_block_cb_unregister(block, block_cb);
Jiri Pirkoacb67442017-10-19 15:50:31 +02001344}
1345EXPORT_SYMBOL(tcf_block_cb_unregister);
1346
Jiri Pirko87d83092017-05-17 11:07:54 +02001347/* Main classifier routine: scans classifier chain attached
1348 * to this qdisc, (optionally) tests for protocol and asks
1349 * specific classifiers.
1350 */
1351int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1352 struct tcf_result *res, bool compat_mode)
1353{
Jiri Pirko87d83092017-05-17 11:07:54 +02001354#ifdef CONFIG_NET_CLS_ACT
1355 const int max_reclassify_loop = 4;
Jiri Pirkoee538dc2017-05-23 09:11:59 +02001356 const struct tcf_proto *orig_tp = tp;
1357 const struct tcf_proto *first_tp;
Jiri Pirko87d83092017-05-17 11:07:54 +02001358 int limit = 0;
1359
1360reclassify:
1361#endif
1362 for (; tp; tp = rcu_dereference_bh(tp->next)) {
Cong Wangcd0c4e72019-01-11 18:55:42 -08001363 __be16 protocol = tc_skb_protocol(skb);
Jiri Pirko87d83092017-05-17 11:07:54 +02001364 int err;
1365
1366 if (tp->protocol != protocol &&
1367 tp->protocol != htons(ETH_P_ALL))
1368 continue;
1369
1370 err = tp->classify(skb, tp, res);
1371#ifdef CONFIG_NET_CLS_ACT
Jiri Pirkodb505142017-05-17 11:08:03 +02001372 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
Jiri Pirkoee538dc2017-05-23 09:11:59 +02001373 first_tp = orig_tp;
Jiri Pirko87d83092017-05-17 11:07:54 +02001374 goto reset;
Jiri Pirkodb505142017-05-17 11:08:03 +02001375 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
Jiri Pirkoee538dc2017-05-23 09:11:59 +02001376 first_tp = res->goto_tp;
Jiri Pirkodb505142017-05-17 11:08:03 +02001377 goto reset;
1378 }
Jiri Pirko87d83092017-05-17 11:07:54 +02001379#endif
1380 if (err >= 0)
1381 return err;
1382 }
1383
1384 return TC_ACT_UNSPEC; /* signal: continue lookup */
1385#ifdef CONFIG_NET_CLS_ACT
1386reset:
1387 if (unlikely(limit++ >= max_reclassify_loop)) {
Jiri Pirko9d3aaff2018-01-17 11:46:47 +01001388 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1389 tp->chain->block->index,
1390 tp->prio & 0xffff,
Jiri Pirko87d83092017-05-17 11:07:54 +02001391 ntohs(tp->protocol));
1392 return TC_ACT_SHOT;
1393 }
1394
Jiri Pirkoee538dc2017-05-23 09:11:59 +02001395 tp = first_tp;
Jiri Pirko87d83092017-05-17 11:07:54 +02001396 goto reclassify;
1397#endif
1398}
1399EXPORT_SYMBOL(tcf_classify);
1400
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001401struct tcf_chain_info {
1402 struct tcf_proto __rcu **pprev;
1403 struct tcf_proto __rcu *next;
1404};
1405
1406static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info)
1407{
1408 return rtnl_dereference(*chain_info->pprev);
1409}
1410
1411static void tcf_chain_tp_insert(struct tcf_chain *chain,
1412 struct tcf_chain_info *chain_info,
1413 struct tcf_proto *tp)
1414{
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001415 if (*chain_info->pprev == chain->filter_chain)
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001416 tcf_chain0_head_change(chain, tp);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001417 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
1418 rcu_assign_pointer(*chain_info->pprev, tp);
Cong Wange2ef7542017-09-11 16:33:31 -07001419 tcf_chain_hold(chain);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001420}
1421
1422static void tcf_chain_tp_remove(struct tcf_chain *chain,
1423 struct tcf_chain_info *chain_info,
1424 struct tcf_proto *tp)
1425{
1426 struct tcf_proto *next = rtnl_dereference(chain_info->next);
1427
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001428 if (tp == chain->filter_chain)
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001429 tcf_chain0_head_change(chain, next);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001430 RCU_INIT_POINTER(*chain_info->pprev, next);
Cong Wange2ef7542017-09-11 16:33:31 -07001431 tcf_chain_put(chain);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001432}
1433
1434static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1435 struct tcf_chain_info *chain_info,
1436 u32 protocol, u32 prio,
1437 bool prio_allocate)
1438{
1439 struct tcf_proto **pprev;
1440 struct tcf_proto *tp;
1441
1442 /* Check the chain for existence of proto-tcf with this priority */
1443 for (pprev = &chain->filter_chain;
1444 (tp = rtnl_dereference(*pprev)); pprev = &tp->next) {
1445 if (tp->prio >= prio) {
1446 if (tp->prio == prio) {
1447 if (prio_allocate ||
1448 (tp->protocol != protocol && protocol))
1449 return ERR_PTR(-EINVAL);
1450 } else {
1451 tp = NULL;
1452 }
1453 break;
1454 }
1455 }
1456 chain_info->pprev = pprev;
1457 chain_info->next = tp ? tp->next : NULL;
1458 return tp;
1459}
1460
WANG Cong71203712017-08-07 15:26:50 -07001461static int tcf_fill_node(struct net *net, struct sk_buff *skb,
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001462 struct tcf_proto *tp, struct tcf_block *block,
1463 struct Qdisc *q, u32 parent, void *fh,
1464 u32 portid, u32 seq, u16 flags, int event)
WANG Cong71203712017-08-07 15:26:50 -07001465{
1466 struct tcmsg *tcm;
1467 struct nlmsghdr *nlh;
1468 unsigned char *b = skb_tail_pointer(skb);
1469
1470 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1471 if (!nlh)
1472 goto out_nlmsg_trim;
1473 tcm = nlmsg_data(nlh);
1474 tcm->tcm_family = AF_UNSPEC;
1475 tcm->tcm__pad1 = 0;
1476 tcm->tcm__pad2 = 0;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001477 if (q) {
1478 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1479 tcm->tcm_parent = parent;
1480 } else {
1481 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1482 tcm->tcm_block_index = block->index;
1483 }
WANG Cong71203712017-08-07 15:26:50 -07001484 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1485 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1486 goto nla_put_failure;
1487 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1488 goto nla_put_failure;
1489 if (!fh) {
1490 tcm->tcm_handle = 0;
1491 } else {
1492 if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
1493 goto nla_put_failure;
1494 }
1495 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1496 return skb->len;
1497
1498out_nlmsg_trim:
1499nla_put_failure:
1500 nlmsg_trim(skb, b);
1501 return -1;
1502}
1503
1504static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1505 struct nlmsghdr *n, struct tcf_proto *tp,
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001506 struct tcf_block *block, struct Qdisc *q,
1507 u32 parent, void *fh, int event, bool unicast)
WANG Cong71203712017-08-07 15:26:50 -07001508{
1509 struct sk_buff *skb;
1510 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1511
1512 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1513 if (!skb)
1514 return -ENOBUFS;
1515
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001516 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1517 n->nlmsg_seq, n->nlmsg_flags, event) <= 0) {
WANG Cong71203712017-08-07 15:26:50 -07001518 kfree_skb(skb);
1519 return -EINVAL;
1520 }
1521
1522 if (unicast)
1523 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1524
1525 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1526 n->nlmsg_flags & NLM_F_ECHO);
1527}
1528
1529static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1530 struct nlmsghdr *n, struct tcf_proto *tp,
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001531 struct tcf_block *block, struct Qdisc *q,
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001532 u32 parent, void *fh, bool unicast, bool *last,
1533 struct netlink_ext_ack *extack)
WANG Cong71203712017-08-07 15:26:50 -07001534{
1535 struct sk_buff *skb;
1536 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1537 int err;
1538
1539 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1540 if (!skb)
1541 return -ENOBUFS;
1542
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001543 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1544 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001545 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
WANG Cong71203712017-08-07 15:26:50 -07001546 kfree_skb(skb);
1547 return -EINVAL;
1548 }
1549
Alexander Aring571acf22018-01-18 11:20:53 -05001550 err = tp->ops->delete(tp, fh, last, extack);
WANG Cong71203712017-08-07 15:26:50 -07001551 if (err) {
1552 kfree_skb(skb);
1553 return err;
1554 }
1555
1556 if (unicast)
1557 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1558
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001559 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1560 n->nlmsg_flags & NLM_F_ECHO);
1561 if (err < 0)
1562 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1563 return err;
WANG Cong71203712017-08-07 15:26:50 -07001564}
1565
1566static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001567 struct tcf_block *block, struct Qdisc *q,
1568 u32 parent, struct nlmsghdr *n,
WANG Cong71203712017-08-07 15:26:50 -07001569 struct tcf_chain *chain, int event)
1570{
1571 struct tcf_proto *tp;
1572
1573 for (tp = rtnl_dereference(chain->filter_chain);
1574 tp; tp = rtnl_dereference(tp->next))
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001575 tfilter_notify(net, oskb, n, tp, block,
YueHaibing53189182018-07-17 20:58:14 +08001576 q, parent, NULL, event, false);
WANG Cong71203712017-08-07 15:26:50 -07001577}
1578
Vlad Buslovc431f892018-05-31 09:52:53 +03001579static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
David Ahernc21ef3e2017-04-16 09:48:24 -07001580 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001582 struct net *net = sock_net(skb->sk);
Patrick McHardyadd93b62008-01-22 22:11:33 -08001583 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 struct tcmsg *t;
1585 u32 protocol;
1586 u32 prio;
Jiri Pirko9d36d9e2017-05-17 11:07:57 +02001587 bool prio_allocate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 u32 parent;
Jiri Pirko5bc17012017-05-17 11:08:01 +02001589 u32 chain_index;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001590 struct Qdisc *q = NULL;
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001591 struct tcf_chain_info chain_info;
Jiri Pirko5bc17012017-05-17 11:08:01 +02001592 struct tcf_chain *chain = NULL;
Jiri Pirko6529eab2017-05-17 11:07:55 +02001593 struct tcf_block *block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 struct tcf_proto *tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 unsigned long cl;
WANG Cong8113c092017-08-04 21:31:43 -07001596 void *fh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 int err;
Daniel Borkmann628185c2016-12-21 18:04:11 +01001598 int tp_created;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599
Vlad Buslovc431f892018-05-31 09:52:53 +03001600 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +00001601 return -EPERM;
Hong zhi guode179c82013-03-25 17:36:33 +00001602
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603replay:
Daniel Borkmann628185c2016-12-21 18:04:11 +01001604 tp_created = 0;
1605
Davide Carattie3314732018-10-10 22:00:58 +02001606 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
Hong zhi guode179c82013-03-25 17:36:33 +00001607 if (err < 0)
1608 return err;
1609
David S. Miller942b8162012-06-26 21:48:50 -07001610 t = nlmsg_data(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 protocol = TC_H_MIN(t->tcm_info);
1612 prio = TC_H_MAJ(t->tcm_info);
Jiri Pirko9d36d9e2017-05-17 11:07:57 +02001613 prio_allocate = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 parent = t->tcm_parent;
1615 cl = 0;
1616
1617 if (prio == 0) {
Vlad Buslovc431f892018-05-31 09:52:53 +03001618 /* If no priority is provided by the user,
1619 * we allocate one.
1620 */
1621 if (n->nlmsg_flags & NLM_F_CREATE) {
1622 prio = TC_H_MAKE(0x80000000U, 0U);
1623 prio_allocate = true;
1624 } else {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001625 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 return -ENOENT;
Daniel Borkmannea7f8272016-06-10 23:10:22 +02001627 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 }
1629
1630 /* Find head of filter chain. */
1631
Vlad Buslovc431f892018-05-31 09:52:53 +03001632 block = tcf_block_find(net, &q, &parent, &cl,
1633 t->tcm_ifindex, t->tcm_block_index, extack);
1634 if (IS_ERR(block)) {
1635 err = PTR_ERR(block);
1636 goto errout;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001637 }
Jiri Pirko5bc17012017-05-17 11:08:01 +02001638
1639 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1640 if (chain_index > TC_ACT_EXT_VAL_MASK) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001641 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
Jiri Pirko5bc17012017-05-17 11:08:01 +02001642 err = -EINVAL;
1643 goto errout;
1644 }
Vlad Buslovc431f892018-05-31 09:52:53 +03001645 chain = tcf_chain_get(block, chain_index, true);
Jiri Pirko5bc17012017-05-17 11:08:01 +02001646 if (!chain) {
Jiri Pirkod5ed72a2018-08-27 20:58:43 +02001647 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
Vlad Buslovc431f892018-05-31 09:52:53 +03001648 err = -ENOMEM;
Daniel Borkmannea7f8272016-06-10 23:10:22 +02001649 goto errout;
1650 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001652 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1653 prio, prio_allocate);
1654 if (IS_ERR(tp)) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001655 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001656 err = PTR_ERR(tp);
1657 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 }
1659
1660 if (tp == NULL) {
1661 /* Proto-tcf does not exist, create new one */
1662
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001663 if (tca[TCA_KIND] == NULL || !protocol) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001664 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001665 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 goto errout;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001667 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668
Vlad Buslovc431f892018-05-31 09:52:53 +03001669 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001670 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001671 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 goto errout;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001673 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
Jiri Pirko9d36d9e2017-05-17 11:07:57 +02001675 if (prio_allocate)
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001676 prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
Jiri Pirko33a48922017-02-09 14:38:57 +01001678 tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001679 protocol, prio, chain, extack);
Jiri Pirko33a48922017-02-09 14:38:57 +01001680 if (IS_ERR(tp)) {
1681 err = PTR_ERR(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 goto errout;
1683 }
Minoru Usui12186be2009-06-02 02:17:34 -07001684 tp_created = 1;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001685 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001686 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001687 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 goto errout;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001689 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690
1691 fh = tp->ops->get(tp, t->tcm_handle);
1692
WANG Cong8113c092017-08-04 21:31:43 -07001693 if (!fh) {
Vlad Buslovc431f892018-05-31 09:52:53 +03001694 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001695 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001696 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 goto errout;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001698 }
Vlad Buslovc431f892018-05-31 09:52:53 +03001699 } else if (n->nlmsg_flags & NLM_F_EXCL) {
1700 NL_SET_ERR_MSG(extack, "Filter already exists");
1701 err = -EEXIST;
1702 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 }
1704
Jiri Pirko9f407f12018-07-23 09:23:07 +02001705 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
1706 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
1707 err = -EINVAL;
1708 goto errout;
1709 }
1710
Cong Wang2f7ef2f2014-04-25 13:54:06 -07001711 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
Alexander Aring7306db32018-01-18 11:20:51 -05001712 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
1713 extack);
Minoru Usui12186be2009-06-02 02:17:34 -07001714 if (err == 0) {
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001715 if (tp_created)
1716 tcf_chain_tp_insert(chain, &chain_info, tp);
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001717 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
Jiri Pirkoa10fa202017-10-13 14:01:05 +02001718 RTM_NEWTFILTER, false);
Minoru Usui12186be2009-06-02 02:17:34 -07001719 } else {
1720 if (tp_created)
Jakub Kicinski715df5e2018-01-24 12:54:13 -08001721 tcf_proto_destroy(tp, NULL);
Minoru Usui12186be2009-06-02 02:17:34 -07001722 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
1724errout:
Jiri Pirko5bc17012017-05-17 11:08:01 +02001725 if (chain)
1726 tcf_chain_put(chain);
Vlad Buslove368fdb2018-09-24 19:22:53 +03001727 tcf_block_release(q, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 if (err == -EAGAIN)
1729 /* Replay the request. */
1730 goto replay;
1731 return err;
1732}
1733
Vlad Buslovc431f892018-05-31 09:52:53 +03001734static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1735 struct netlink_ext_ack *extack)
1736{
1737 struct net *net = sock_net(skb->sk);
1738 struct nlattr *tca[TCA_MAX + 1];
1739 struct tcmsg *t;
1740 u32 protocol;
1741 u32 prio;
1742 u32 parent;
1743 u32 chain_index;
1744 struct Qdisc *q = NULL;
1745 struct tcf_chain_info chain_info;
1746 struct tcf_chain *chain = NULL;
1747 struct tcf_block *block;
1748 struct tcf_proto *tp = NULL;
1749 unsigned long cl = 0;
1750 void *fh = NULL;
1751 int err;
1752
1753 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1754 return -EPERM;
1755
Davide Carattie3314732018-10-10 22:00:58 +02001756 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
Vlad Buslovc431f892018-05-31 09:52:53 +03001757 if (err < 0)
1758 return err;
1759
1760 t = nlmsg_data(n);
1761 protocol = TC_H_MIN(t->tcm_info);
1762 prio = TC_H_MAJ(t->tcm_info);
1763 parent = t->tcm_parent;
1764
1765 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
1766 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
1767 return -ENOENT;
1768 }
1769
1770 /* Find head of filter chain. */
1771
1772 block = tcf_block_find(net, &q, &parent, &cl,
1773 t->tcm_ifindex, t->tcm_block_index, extack);
1774 if (IS_ERR(block)) {
1775 err = PTR_ERR(block);
1776 goto errout;
1777 }
1778
1779 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1780 if (chain_index > TC_ACT_EXT_VAL_MASK) {
1781 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
1782 err = -EINVAL;
1783 goto errout;
1784 }
1785 chain = tcf_chain_get(block, chain_index, false);
1786 if (!chain) {
Jiri Pirko5ca8a252018-08-03 11:08:47 +02001787 /* User requested flush on non-existent chain. Nothing to do,
1788 * so just return success.
1789 */
1790 if (prio == 0) {
1791 err = 0;
1792 goto errout;
1793 }
Vlad Buslovc431f892018-05-31 09:52:53 +03001794 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
Jiri Pirkob7b42472018-08-27 20:58:44 +02001795 err = -ENOENT;
Vlad Buslovc431f892018-05-31 09:52:53 +03001796 goto errout;
1797 }
1798
1799 if (prio == 0) {
1800 tfilter_notify_chain(net, skb, block, q, parent, n,
1801 chain, RTM_DELTFILTER);
1802 tcf_chain_flush(chain);
1803 err = 0;
1804 goto errout;
1805 }
1806
1807 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1808 prio, false);
1809 if (!tp || IS_ERR(tp)) {
1810 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
Vlad Buslov0e399032018-06-04 18:32:23 +03001811 err = tp ? PTR_ERR(tp) : -ENOENT;
Vlad Buslovc431f892018-05-31 09:52:53 +03001812 goto errout;
1813 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
1814 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
1815 err = -EINVAL;
1816 goto errout;
1817 }
1818
1819 fh = tp->ops->get(tp, t->tcm_handle);
1820
1821 if (!fh) {
1822 if (t->tcm_handle == 0) {
1823 tcf_chain_tp_remove(chain, &chain_info, tp);
1824 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
1825 RTM_DELTFILTER, false);
1826 tcf_proto_destroy(tp, extack);
1827 err = 0;
1828 } else {
1829 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
1830 err = -ENOENT;
1831 }
1832 } else {
1833 bool last;
1834
1835 err = tfilter_del_notify(net, skb, n, tp, block,
1836 q, parent, fh, false, &last,
1837 extack);
1838 if (err)
1839 goto errout;
1840 if (last) {
1841 tcf_chain_tp_remove(chain, &chain_info, tp);
1842 tcf_proto_destroy(tp, extack);
1843 }
1844 }
1845
1846errout:
1847 if (chain)
1848 tcf_chain_put(chain);
Vlad Buslove368fdb2018-09-24 19:22:53 +03001849 tcf_block_release(q, block);
Vlad Buslovc431f892018-05-31 09:52:53 +03001850 return err;
1851}
1852
1853static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1854 struct netlink_ext_ack *extack)
1855{
1856 struct net *net = sock_net(skb->sk);
1857 struct nlattr *tca[TCA_MAX + 1];
1858 struct tcmsg *t;
1859 u32 protocol;
1860 u32 prio;
1861 u32 parent;
1862 u32 chain_index;
1863 struct Qdisc *q = NULL;
1864 struct tcf_chain_info chain_info;
1865 struct tcf_chain *chain = NULL;
1866 struct tcf_block *block;
1867 struct tcf_proto *tp = NULL;
1868 unsigned long cl = 0;
1869 void *fh = NULL;
1870 int err;
1871
Davide Carattie3314732018-10-10 22:00:58 +02001872 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
Vlad Buslovc431f892018-05-31 09:52:53 +03001873 if (err < 0)
1874 return err;
1875
1876 t = nlmsg_data(n);
1877 protocol = TC_H_MIN(t->tcm_info);
1878 prio = TC_H_MAJ(t->tcm_info);
1879 parent = t->tcm_parent;
1880
1881 if (prio == 0) {
1882 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1883 return -ENOENT;
1884 }
1885
1886 /* Find head of filter chain. */
1887
1888 block = tcf_block_find(net, &q, &parent, &cl,
1889 t->tcm_ifindex, t->tcm_block_index, extack);
1890 if (IS_ERR(block)) {
1891 err = PTR_ERR(block);
1892 goto errout;
1893 }
1894
1895 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1896 if (chain_index > TC_ACT_EXT_VAL_MASK) {
1897 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
1898 err = -EINVAL;
1899 goto errout;
1900 }
1901 chain = tcf_chain_get(block, chain_index, false);
1902 if (!chain) {
1903 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
1904 err = -EINVAL;
1905 goto errout;
1906 }
1907
1908 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1909 prio, false);
1910 if (!tp || IS_ERR(tp)) {
1911 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
Vlad Buslov0e399032018-06-04 18:32:23 +03001912 err = tp ? PTR_ERR(tp) : -ENOENT;
Vlad Buslovc431f892018-05-31 09:52:53 +03001913 goto errout;
1914 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
1915 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
1916 err = -EINVAL;
1917 goto errout;
1918 }
1919
1920 fh = tp->ops->get(tp, t->tcm_handle);
1921
1922 if (!fh) {
1923 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
1924 err = -ENOENT;
1925 } else {
1926 err = tfilter_notify(net, skb, n, tp, block, q, parent,
1927 fh, RTM_NEWTFILTER, true);
1928 if (err < 0)
1929 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
1930 }
1931
1932errout:
1933 if (chain)
1934 tcf_chain_put(chain);
Vlad Buslove368fdb2018-09-24 19:22:53 +03001935 tcf_block_release(q, block);
Vlad Buslovc431f892018-05-31 09:52:53 +03001936 return err;
1937}
1938
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08001939struct tcf_dump_args {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 struct tcf_walker w;
1941 struct sk_buff *skb;
1942 struct netlink_callback *cb;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001943 struct tcf_block *block;
Jiri Pirkoa10fa202017-10-13 14:01:05 +02001944 struct Qdisc *q;
1945 u32 parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946};
1947
WANG Cong8113c092017-08-04 21:31:43 -07001948static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949{
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08001950 struct tcf_dump_args *a = (void *)arg;
WANG Cong832d1d52014-01-09 16:14:01 -08001951 struct net *net = sock_net(a->skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001953 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
Jiri Pirkoa10fa202017-10-13 14:01:05 +02001954 n, NETLINK_CB(a->cb->skb).portid,
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -04001955 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1956 RTM_NEWTFILTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957}
1958
Jiri Pirkoa10fa202017-10-13 14:01:05 +02001959static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
1960 struct sk_buff *skb, struct netlink_callback *cb,
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02001961 long index_start, long *p_index)
1962{
1963 struct net *net = sock_net(skb->sk);
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001964 struct tcf_block *block = chain->block;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02001965 struct tcmsg *tcm = nlmsg_data(cb->nlh);
1966 struct tcf_dump_args arg;
1967 struct tcf_proto *tp;
1968
1969 for (tp = rtnl_dereference(chain->filter_chain);
1970 tp; tp = rtnl_dereference(tp->next), (*p_index)++) {
1971 if (*p_index < index_start)
1972 continue;
1973 if (TC_H_MAJ(tcm->tcm_info) &&
1974 TC_H_MAJ(tcm->tcm_info) != tp->prio)
1975 continue;
1976 if (TC_H_MIN(tcm->tcm_info) &&
1977 TC_H_MIN(tcm->tcm_info) != tp->protocol)
1978 continue;
1979 if (*p_index > index_start)
1980 memset(&cb->args[1], 0,
1981 sizeof(cb->args) - sizeof(cb->args[0]));
1982 if (cb->args[1] == 0) {
YueHaibing53189182018-07-17 20:58:14 +08001983 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02001984 NETLINK_CB(cb->skb).portid,
1985 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1986 RTM_NEWTFILTER) <= 0)
Jiri Pirko5bc17012017-05-17 11:08:01 +02001987 return false;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02001988
1989 cb->args[1] = 1;
1990 }
1991 if (!tp->ops->walk)
1992 continue;
1993 arg.w.fn = tcf_node_dump;
1994 arg.skb = skb;
1995 arg.cb = cb;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001996 arg.block = block;
Jiri Pirkoa10fa202017-10-13 14:01:05 +02001997 arg.q = q;
1998 arg.parent = parent;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02001999 arg.w.stop = 0;
2000 arg.w.skip = cb->args[1] - 1;
2001 arg.w.count = 0;
Vlad Buslov01683a12018-07-09 13:29:11 +03002002 arg.w.cookie = cb->args[2];
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002003 tp->ops->walk(tp, &arg.w);
Vlad Buslov01683a12018-07-09 13:29:11 +03002004 cb->args[2] = arg.w.cookie;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002005 cb->args[1] = arg.w.count + 1;
2006 if (arg.w.stop)
Jiri Pirko5bc17012017-05-17 11:08:01 +02002007 return false;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002008 }
Jiri Pirko5bc17012017-05-17 11:08:01 +02002009 return true;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002010}
2011
Eric Dumazetbd27a872009-11-05 20:57:26 -08002012/* called with RTNL */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2014{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002015 struct net *net = sock_net(skb->sk);
Jiri Pirko5bc17012017-05-17 11:08:01 +02002016 struct nlattr *tca[TCA_MAX + 1];
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002017 struct Qdisc *q = NULL;
Jiri Pirko6529eab2017-05-17 11:07:55 +02002018 struct tcf_block *block;
Jiri Pirko2190d1d2017-05-17 11:07:59 +02002019 struct tcf_chain *chain;
David S. Miller942b8162012-06-26 21:48:50 -07002020 struct tcmsg *tcm = nlmsg_data(cb->nlh);
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002021 long index_start;
2022 long index;
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002023 u32 parent;
Jiri Pirko5bc17012017-05-17 11:08:01 +02002024 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025
Hong zhi guo573ce262013-03-27 06:47:04 +00002026 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 return skb->len;
Jiri Pirko5bc17012017-05-17 11:08:01 +02002028
David Aherndac9c972018-10-07 20:16:24 -07002029 err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL,
2030 cb->extack);
Jiri Pirko5bc17012017-05-17 11:08:01 +02002031 if (err)
2032 return err;
2033
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002034 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
Vlad Buslov787ce6d2018-09-24 19:22:58 +03002035 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002036 if (!block)
WANG Cong143976c2017-08-24 16:51:29 -07002037 goto out;
Jiri Pirkod680b352018-01-18 16:14:49 +01002038 /* If we work with block index, q is NULL and parent value
2039 * will never be used in the following code. The check
2040 * in tcf_fill_node prevents it. However, compiler does not
2041 * see that far, so set parent to zero to silence the warning
2042 * about parent being uninitialized.
2043 */
2044 parent = 0;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002045 } else {
2046 const struct Qdisc_class_ops *cops;
2047 struct net_device *dev;
2048 unsigned long cl = 0;
2049
2050 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2051 if (!dev)
2052 return skb->len;
2053
2054 parent = tcm->tcm_parent;
2055 if (!parent) {
2056 q = dev->qdisc;
2057 parent = q->handle;
2058 } else {
2059 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2060 }
2061 if (!q)
2062 goto out;
2063 cops = q->ops->cl_ops;
2064 if (!cops)
2065 goto out;
2066 if (!cops->tcf_block)
2067 goto out;
2068 if (TC_H_MIN(tcm->tcm_parent)) {
2069 cl = cops->find(q, tcm->tcm_parent);
2070 if (cl == 0)
2071 goto out;
2072 }
2073 block = cops->tcf_block(q, cl, NULL);
2074 if (!block)
2075 goto out;
2076 if (tcf_block_shared(block))
2077 q = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002080 index_start = cb->args[0];
2081 index = 0;
Jiri Pirko5bc17012017-05-17 11:08:01 +02002082
2083 list_for_each_entry(chain, &block->chain_list, list) {
2084 if (tca[TCA_CHAIN] &&
2085 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2086 continue;
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002087 if (!tcf_chain_dump(chain, q, parent, skb, cb,
Roman Kapl5ae437a2018-02-19 21:32:51 +01002088 index_start, &index)) {
2089 err = -EMSGSIZE;
Jiri Pirko5bc17012017-05-17 11:08:01 +02002090 break;
Roman Kapl5ae437a2018-02-19 21:32:51 +01002091 }
Jiri Pirko5bc17012017-05-17 11:08:01 +02002092 }
2093
Vlad Buslov787ce6d2018-09-24 19:22:58 +03002094 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2095 tcf_block_refcnt_put(block);
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002096 cb->args[0] = index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098out:
Roman Kapl5ae437a2018-02-19 21:32:51 +01002099 /* If we did no progress, the error (EMSGSIZE) is real */
2100 if (skb->len == 0 && err)
2101 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 return skb->len;
2103}
2104
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002105static int tc_chain_fill_node(struct tcf_chain *chain, struct net *net,
2106 struct sk_buff *skb, struct tcf_block *block,
2107 u32 portid, u32 seq, u16 flags, int event)
2108{
2109 unsigned char *b = skb_tail_pointer(skb);
Jiri Pirko9f407f12018-07-23 09:23:07 +02002110 const struct tcf_proto_ops *ops;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002111 struct nlmsghdr *nlh;
2112 struct tcmsg *tcm;
Jiri Pirko9f407f12018-07-23 09:23:07 +02002113 void *priv;
2114
2115 ops = chain->tmplt_ops;
2116 priv = chain->tmplt_priv;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002117
2118 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2119 if (!nlh)
2120 goto out_nlmsg_trim;
2121 tcm = nlmsg_data(nlh);
2122 tcm->tcm_family = AF_UNSPEC;
2123 tcm->tcm__pad1 = 0;
2124 tcm->tcm__pad2 = 0;
2125 tcm->tcm_handle = 0;
2126 if (block->q) {
2127 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2128 tcm->tcm_parent = block->q->handle;
2129 } else {
2130 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2131 tcm->tcm_block_index = block->index;
2132 }
2133
2134 if (nla_put_u32(skb, TCA_CHAIN, chain->index))
2135 goto nla_put_failure;
2136
Jiri Pirko9f407f12018-07-23 09:23:07 +02002137 if (ops) {
2138 if (nla_put_string(skb, TCA_KIND, ops->kind))
2139 goto nla_put_failure;
2140 if (ops->tmplt_dump(skb, net, priv) < 0)
2141 goto nla_put_failure;
2142 }
2143
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002144 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2145 return skb->len;
2146
2147out_nlmsg_trim:
2148nla_put_failure:
2149 nlmsg_trim(skb, b);
2150 return -EMSGSIZE;
2151}
2152
2153static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2154 u32 seq, u16 flags, int event, bool unicast)
2155{
2156 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2157 struct tcf_block *block = chain->block;
2158 struct net *net = block->net;
2159 struct sk_buff *skb;
2160
2161 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2162 if (!skb)
2163 return -ENOBUFS;
2164
2165 if (tc_chain_fill_node(chain, net, skb, block, portid,
2166 seq, flags, event) <= 0) {
2167 kfree_skb(skb);
2168 return -EINVAL;
2169 }
2170
2171 if (unicast)
2172 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2173
2174 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2175}
2176
Jiri Pirko9f407f12018-07-23 09:23:07 +02002177static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2178 struct nlattr **tca,
2179 struct netlink_ext_ack *extack)
2180{
2181 const struct tcf_proto_ops *ops;
2182 void *tmplt_priv;
2183
2184 /* If kind is not set, user did not specify template. */
2185 if (!tca[TCA_KIND])
2186 return 0;
2187
2188 ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), extack);
2189 if (IS_ERR(ops))
2190 return PTR_ERR(ops);
2191 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2192 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2193 return -EOPNOTSUPP;
2194 }
2195
2196 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2197 if (IS_ERR(tmplt_priv)) {
2198 module_put(ops->owner);
2199 return PTR_ERR(tmplt_priv);
2200 }
2201 chain->tmplt_ops = ops;
2202 chain->tmplt_priv = tmplt_priv;
2203 return 0;
2204}
2205
2206static void tc_chain_tmplt_del(struct tcf_chain *chain)
2207{
2208 const struct tcf_proto_ops *ops = chain->tmplt_ops;
2209
2210 /* If template ops are set, no work to do for us. */
2211 if (!ops)
2212 return;
2213
2214 ops->tmplt_destroy(chain->tmplt_priv);
2215 module_put(ops->owner);
2216}
2217
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002218/* Add/delete/get a chain */
2219
2220static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2221 struct netlink_ext_ack *extack)
2222{
2223 struct net *net = sock_net(skb->sk);
2224 struct nlattr *tca[TCA_MAX + 1];
2225 struct tcmsg *t;
2226 u32 parent;
2227 u32 chain_index;
2228 struct Qdisc *q = NULL;
2229 struct tcf_chain *chain = NULL;
2230 struct tcf_block *block;
2231 unsigned long cl;
2232 int err;
2233
2234 if (n->nlmsg_type != RTM_GETCHAIN &&
2235 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2236 return -EPERM;
2237
2238replay:
Davide Carattie3314732018-10-10 22:00:58 +02002239 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002240 if (err < 0)
2241 return err;
2242
2243 t = nlmsg_data(n);
2244 parent = t->tcm_parent;
2245 cl = 0;
2246
2247 block = tcf_block_find(net, &q, &parent, &cl,
2248 t->tcm_ifindex, t->tcm_block_index, extack);
2249 if (IS_ERR(block))
2250 return PTR_ERR(block);
2251
2252 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2253 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2254 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002255 err = -EINVAL;
2256 goto errout_block;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002257 }
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002258
2259 mutex_lock(&block->lock);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002260 chain = tcf_chain_lookup(block, chain_index);
2261 if (n->nlmsg_type == RTM_NEWCHAIN) {
2262 if (chain) {
Jiri Pirko3d32f4c2018-08-01 12:36:55 +02002263 if (tcf_chain_held_by_acts_only(chain)) {
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002264 /* The chain exists only because there is
Jiri Pirko3d32f4c2018-08-01 12:36:55 +02002265 * some action referencing it.
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002266 */
2267 tcf_chain_hold(chain);
2268 } else {
2269 NL_SET_ERR_MSG(extack, "Filter chain already exists");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002270 err = -EEXIST;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002271 goto errout_block_locked;
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002272 }
2273 } else {
2274 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2275 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002276 err = -ENOENT;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002277 goto errout_block_locked;
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002278 }
2279 chain = tcf_chain_create(block, chain_index);
2280 if (!chain) {
2281 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002282 err = -ENOMEM;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002283 goto errout_block_locked;
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002284 }
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002285 }
2286 } else {
Jiri Pirko3d32f4c2018-08-01 12:36:55 +02002287 if (!chain || tcf_chain_held_by_acts_only(chain)) {
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002288 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002289 err = -EINVAL;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002290 goto errout_block_locked;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002291 }
2292 tcf_chain_hold(chain);
2293 }
2294
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002295 if (n->nlmsg_type == RTM_NEWCHAIN) {
2296 /* Modifying chain requires holding parent block lock. In case
2297 * the chain was successfully added, take a reference to the
2298 * chain. This ensures that an empty chain does not disappear at
2299 * the end of this function.
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002300 */
2301 tcf_chain_hold(chain);
2302 chain->explicitly_created = true;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002303 }
2304 mutex_unlock(&block->lock);
2305
2306 switch (n->nlmsg_type) {
2307 case RTM_NEWCHAIN:
2308 err = tc_chain_tmplt_add(chain, net, tca, extack);
2309 if (err) {
2310 tcf_chain_put_explicitly_created(chain);
2311 goto errout;
2312 }
2313
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002314 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2315 RTM_NEWCHAIN, false);
2316 break;
2317 case RTM_DELCHAIN:
Cong Wangf5b9bac2018-09-11 14:22:23 -07002318 tfilter_notify_chain(net, skb, block, q, parent, n,
2319 chain, RTM_DELTFILTER);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002320 /* Flush the chain first as the user requested chain removal. */
2321 tcf_chain_flush(chain);
2322 /* In case the chain was successfully deleted, put a reference
2323 * to the chain previously taken during addition.
2324 */
2325 tcf_chain_put_explicitly_created(chain);
2326 break;
2327 case RTM_GETCHAIN:
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002328 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2329 n->nlmsg_seq, n->nlmsg_type, true);
2330 if (err < 0)
2331 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2332 break;
2333 default:
2334 err = -EOPNOTSUPP;
2335 NL_SET_ERR_MSG(extack, "Unsupported message type");
2336 goto errout;
2337 }
2338
2339errout:
2340 tcf_chain_put(chain);
Vlad Buslove368fdb2018-09-24 19:22:53 +03002341errout_block:
2342 tcf_block_release(q, block);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002343 if (err == -EAGAIN)
2344 /* Replay the request. */
2345 goto replay;
2346 return err;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002347
2348errout_block_locked:
2349 mutex_unlock(&block->lock);
2350 goto errout_block;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002351}
2352
2353/* called with RTNL */
2354static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2355{
2356 struct net *net = sock_net(skb->sk);
2357 struct nlattr *tca[TCA_MAX + 1];
2358 struct Qdisc *q = NULL;
2359 struct tcf_block *block;
2360 struct tcf_chain *chain;
2361 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2362 long index_start;
2363 long index;
2364 u32 parent;
2365 int err;
2366
2367 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2368 return skb->len;
2369
Davide Carattie3314732018-10-10 22:00:58 +02002370 err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
David Aherndac9c972018-10-07 20:16:24 -07002371 cb->extack);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002372 if (err)
2373 return err;
2374
2375 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
Vlad Buslov787ce6d2018-09-24 19:22:58 +03002376 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002377 if (!block)
2378 goto out;
2379 /* If we work with block index, q is NULL and parent value
2380 * will never be used in the following code. The check
2381 * in tcf_fill_node prevents it. However, compiler does not
2382 * see that far, so set parent to zero to silence the warning
2383 * about parent being uninitialized.
2384 */
2385 parent = 0;
2386 } else {
2387 const struct Qdisc_class_ops *cops;
2388 struct net_device *dev;
2389 unsigned long cl = 0;
2390
2391 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2392 if (!dev)
2393 return skb->len;
2394
2395 parent = tcm->tcm_parent;
2396 if (!parent) {
2397 q = dev->qdisc;
2398 parent = q->handle;
2399 } else {
2400 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2401 }
2402 if (!q)
2403 goto out;
2404 cops = q->ops->cl_ops;
2405 if (!cops)
2406 goto out;
2407 if (!cops->tcf_block)
2408 goto out;
2409 if (TC_H_MIN(tcm->tcm_parent)) {
2410 cl = cops->find(q, tcm->tcm_parent);
2411 if (cl == 0)
2412 goto out;
2413 }
2414 block = cops->tcf_block(q, cl, NULL);
2415 if (!block)
2416 goto out;
2417 if (tcf_block_shared(block))
2418 q = NULL;
2419 }
2420
2421 index_start = cb->args[0];
2422 index = 0;
2423
2424 list_for_each_entry(chain, &block->chain_list, list) {
2425 if ((tca[TCA_CHAIN] &&
2426 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2427 continue;
2428 if (index < index_start) {
2429 index++;
2430 continue;
2431 }
Jiri Pirko3d32f4c2018-08-01 12:36:55 +02002432 if (tcf_chain_held_by_acts_only(chain))
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002433 continue;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002434 err = tc_chain_fill_node(chain, net, skb, block,
2435 NETLINK_CB(cb->skb).portid,
2436 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2437 RTM_NEWCHAIN);
2438 if (err <= 0)
2439 break;
2440 index++;
2441 }
2442
Vlad Buslov787ce6d2018-09-24 19:22:58 +03002443 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2444 tcf_block_refcnt_put(block);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002445 cb->args[0] = index;
2446
2447out:
2448 /* If we did no progress, the error (EMSGSIZE) is real */
2449 if (skb->len == 0 && err)
2450 return err;
2451 return skb->len;
2452}
2453
WANG Cong18d02642014-09-25 10:26:37 -07002454void tcf_exts_destroy(struct tcf_exts *exts)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455{
2456#ifdef CONFIG_NET_CLS_ACT
Vlad Buslov90b73b72018-07-05 17:24:33 +03002457 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
WANG Cong22dc13c2016-08-13 22:35:00 -07002458 kfree(exts->actions);
2459 exts->nr_actions = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460#endif
2461}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002462EXPORT_SYMBOL(tcf_exts_destroy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463
Benjamin LaHaisec1b52732013-01-14 05:15:39 +00002464int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
Alexander Aring50a56192018-01-18 11:20:52 -05002465 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
2466 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468#ifdef CONFIG_NET_CLS_ACT
2469 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 struct tc_action *act;
Roman Mashakd04e6992018-03-08 16:59:17 -05002471 size_t attr_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472
WANG Cong5da57f42013-12-15 20:15:07 -08002473 if (exts->police && tb[exts->police]) {
Jiri Pirko9fb9f252017-05-17 11:08:02 +02002474 act = tcf_action_init_1(net, tp, tb[exts->police],
2475 rate_tlv, "police", ovr,
Vlad Buslov789871b2018-07-05 17:24:25 +03002476 TCA_ACT_BIND, true, extack);
Patrick McHardyab27cfb2008-01-23 20:33:13 -08002477 if (IS_ERR(act))
2478 return PTR_ERR(act);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479
WANG Cong33be6272013-12-15 20:15:05 -08002480 act->type = exts->type = TCA_OLD_COMPAT;
WANG Cong22dc13c2016-08-13 22:35:00 -07002481 exts->actions[0] = act;
2482 exts->nr_actions = 1;
WANG Cong5da57f42013-12-15 20:15:07 -08002483 } else if (exts->action && tb[exts->action]) {
Vlad Buslov90b73b72018-07-05 17:24:33 +03002484 int err;
WANG Cong22dc13c2016-08-13 22:35:00 -07002485
Jiri Pirko9fb9f252017-05-17 11:08:02 +02002486 err = tcf_action_init(net, tp, tb[exts->action],
2487 rate_tlv, NULL, ovr, TCA_ACT_BIND,
Vlad Buslov90b73b72018-07-05 17:24:33 +03002488 exts->actions, &attr_size, true,
Vlad Buslov789871b2018-07-05 17:24:25 +03002489 extack);
Vlad Buslov90b73b72018-07-05 17:24:33 +03002490 if (err < 0)
WANG Cong33be6272013-12-15 20:15:05 -08002491 return err;
Vlad Buslov90b73b72018-07-05 17:24:33 +03002492 exts->nr_actions = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 }
Cong Wange4b95c42017-11-06 13:47:19 -08002494 exts->net = net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496#else
WANG Cong5da57f42013-12-15 20:15:07 -08002497 if ((exts->action && tb[exts->action]) ||
Alexander Aring50a56192018-01-18 11:20:52 -05002498 (exts->police && tb[exts->police])) {
2499 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 return -EOPNOTSUPP;
Alexander Aring50a56192018-01-18 11:20:52 -05002501 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502#endif
2503
2504 return 0;
2505}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002506EXPORT_SYMBOL(tcf_exts_validate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507
Jiri Pirko9b0d4442017-08-04 14:29:15 +02002508void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509{
2510#ifdef CONFIG_NET_CLS_ACT
WANG Cong22dc13c2016-08-13 22:35:00 -07002511 struct tcf_exts old = *dst;
2512
Jiri Pirko9b0d4442017-08-04 14:29:15 +02002513 *dst = *src;
WANG Cong22dc13c2016-08-13 22:35:00 -07002514 tcf_exts_destroy(&old);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515#endif
2516}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002517EXPORT_SYMBOL(tcf_exts_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518
WANG Cong22dc13c2016-08-13 22:35:00 -07002519#ifdef CONFIG_NET_CLS_ACT
2520static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
2521{
2522 if (exts->nr_actions == 0)
2523 return NULL;
2524 else
2525 return exts->actions[0];
2526}
2527#endif
WANG Cong33be6272013-12-15 20:15:05 -08002528
WANG Cong5da57f42013-12-15 20:15:07 -08002529int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530{
2531#ifdef CONFIG_NET_CLS_ACT
Cong Wang9cc63db2014-07-16 14:25:30 -07002532 struct nlattr *nest;
2533
Jiri Pirko978dfd82017-08-04 14:29:03 +02002534 if (exts->action && tcf_exts_has_actions(exts)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 /*
2536 * again for backward compatible mode - we want
2537 * to work with both old and new modes of entering
2538 * tc data even if iproute2 was newer - jhs
2539 */
WANG Cong33be6272013-12-15 20:15:05 -08002540 if (exts->type != TCA_OLD_COMPAT) {
WANG Cong5da57f42013-12-15 20:15:07 -08002541 nest = nla_nest_start(skb, exts->action);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08002542 if (nest == NULL)
2543 goto nla_put_failure;
WANG Cong22dc13c2016-08-13 22:35:00 -07002544
Vlad Buslov90b73b72018-07-05 17:24:33 +03002545 if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -08002546 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08002547 nla_nest_end(skb, nest);
WANG Cong5da57f42013-12-15 20:15:07 -08002548 } else if (exts->police) {
WANG Cong33be6272013-12-15 20:15:05 -08002549 struct tc_action *act = tcf_exts_first_act(exts);
WANG Cong5da57f42013-12-15 20:15:07 -08002550 nest = nla_nest_start(skb, exts->police);
Jamal Hadi Salim63acd682013-12-23 08:02:12 -05002551 if (nest == NULL || !act)
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08002552 goto nla_put_failure;
WANG Cong33be6272013-12-15 20:15:05 -08002553 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -08002554 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08002555 nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 }
2557 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 return 0;
Cong Wang9cc63db2014-07-16 14:25:30 -07002559
2560nla_put_failure:
2561 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 return -1;
Cong Wang9cc63db2014-07-16 14:25:30 -07002563#else
2564 return 0;
2565#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002567EXPORT_SYMBOL(tcf_exts_dump);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002569
WANG Cong5da57f42013-12-15 20:15:07 -08002570int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571{
2572#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -08002573 struct tc_action *a = tcf_exts_first_act(exts);
Ignacy Gawędzkib057df22015-02-03 19:05:18 +01002574 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
WANG Cong33be6272013-12-15 20:15:05 -08002575 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576#endif
2577 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002579EXPORT_SYMBOL(tcf_exts_dump_stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580
Cong Wangaeb3fec2018-12-11 11:15:46 -08002581int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
2582 void *type_data, bool err_stop)
Jiri Pirko717503b2017-10-11 09:41:09 +02002583{
Cong Wangaeb3fec2018-12-11 11:15:46 -08002584 struct tcf_block_cb *block_cb;
2585 int ok_count = 0;
2586 int err;
2587
2588 /* Make sure all netdevs sharing this block are offload-capable. */
2589 if (block->nooffloaddevcnt && err_stop)
2590 return -EOPNOTSUPP;
2591
2592 list_for_each_entry(block_cb, &block->cb_list, list) {
2593 err = block_cb->cb(type, type_data, block_cb->cb_priv);
2594 if (err) {
2595 if (err_stop)
2596 return err;
2597 } else {
2598 ok_count++;
2599 }
2600 }
2601 return ok_count;
Jiri Pirko717503b2017-10-11 09:41:09 +02002602}
2603EXPORT_SYMBOL(tc_setup_cb_call);
Jiri Pirkob3f55bd2017-10-11 09:41:08 +02002604
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01002605int tc_setup_flow_action(struct flow_action *flow_action,
2606 const struct tcf_exts *exts)
2607{
2608 const struct tc_action *act;
2609 int i, j, k;
2610
2611 if (!exts)
2612 return 0;
2613
2614 j = 0;
2615 tcf_exts_for_each_action(i, act, exts) {
2616 struct flow_action_entry *entry;
2617
2618 entry = &flow_action->entries[j];
2619 if (is_tcf_gact_ok(act)) {
2620 entry->id = FLOW_ACTION_ACCEPT;
2621 } else if (is_tcf_gact_shot(act)) {
2622 entry->id = FLOW_ACTION_DROP;
2623 } else if (is_tcf_gact_trap(act)) {
2624 entry->id = FLOW_ACTION_TRAP;
2625 } else if (is_tcf_gact_goto_chain(act)) {
2626 entry->id = FLOW_ACTION_GOTO;
2627 entry->chain_index = tcf_gact_goto_chain_index(act);
2628 } else if (is_tcf_mirred_egress_redirect(act)) {
2629 entry->id = FLOW_ACTION_REDIRECT;
2630 entry->dev = tcf_mirred_dev(act);
2631 } else if (is_tcf_mirred_egress_mirror(act)) {
2632 entry->id = FLOW_ACTION_MIRRED;
2633 entry->dev = tcf_mirred_dev(act);
2634 } else if (is_tcf_vlan(act)) {
2635 switch (tcf_vlan_action(act)) {
2636 case TCA_VLAN_ACT_PUSH:
2637 entry->id = FLOW_ACTION_VLAN_PUSH;
2638 entry->vlan.vid = tcf_vlan_push_vid(act);
2639 entry->vlan.proto = tcf_vlan_push_proto(act);
2640 entry->vlan.prio = tcf_vlan_push_prio(act);
2641 break;
2642 case TCA_VLAN_ACT_POP:
2643 entry->id = FLOW_ACTION_VLAN_POP;
2644 break;
2645 case TCA_VLAN_ACT_MODIFY:
2646 entry->id = FLOW_ACTION_VLAN_MANGLE;
2647 entry->vlan.vid = tcf_vlan_push_vid(act);
2648 entry->vlan.proto = tcf_vlan_push_proto(act);
2649 entry->vlan.prio = tcf_vlan_push_prio(act);
2650 break;
2651 default:
2652 goto err_out;
2653 }
2654 } else if (is_tcf_tunnel_set(act)) {
2655 entry->id = FLOW_ACTION_TUNNEL_ENCAP;
2656 entry->tunnel = tcf_tunnel_info(act);
2657 } else if (is_tcf_tunnel_release(act)) {
2658 entry->id = FLOW_ACTION_TUNNEL_DECAP;
2659 entry->tunnel = tcf_tunnel_info(act);
2660 } else if (is_tcf_pedit(act)) {
2661 for (k = 0; k < tcf_pedit_nkeys(act); k++) {
2662 switch (tcf_pedit_cmd(act, k)) {
2663 case TCA_PEDIT_KEY_EX_CMD_SET:
2664 entry->id = FLOW_ACTION_MANGLE;
2665 break;
2666 case TCA_PEDIT_KEY_EX_CMD_ADD:
2667 entry->id = FLOW_ACTION_ADD;
2668 break;
2669 default:
2670 goto err_out;
2671 }
2672 entry->mangle.htype = tcf_pedit_htype(act, k);
2673 entry->mangle.mask = tcf_pedit_mask(act, k);
2674 entry->mangle.val = tcf_pedit_val(act, k);
2675 entry->mangle.offset = tcf_pedit_offset(act, k);
2676 entry = &flow_action->entries[++j];
2677 }
2678 } else if (is_tcf_csum(act)) {
2679 entry->id = FLOW_ACTION_CSUM;
2680 entry->csum_flags = tcf_csum_update_flags(act);
2681 } else if (is_tcf_skbedit_mark(act)) {
2682 entry->id = FLOW_ACTION_MARK;
2683 entry->mark = tcf_skbedit_mark(act);
2684 } else {
2685 goto err_out;
2686 }
2687
2688 if (!is_tcf_pedit(act))
2689 j++;
2690 }
2691 return 0;
2692err_out:
2693 return -EOPNOTSUPP;
2694}
2695EXPORT_SYMBOL(tc_setup_flow_action);
2696
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +01002697unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
2698{
2699 unsigned int num_acts = 0;
2700 struct tc_action *act;
2701 int i;
2702
2703 tcf_exts_for_each_action(i, act, exts) {
2704 if (is_tcf_pedit(act))
2705 num_acts += tcf_pedit_nkeys(act);
2706 else
2707 num_acts++;
2708 }
2709 return num_acts;
2710}
2711EXPORT_SYMBOL(tcf_exts_num_actions);
2712
Jiri Pirko48617382018-01-17 11:46:46 +01002713static __net_init int tcf_net_init(struct net *net)
2714{
2715 struct tcf_net *tn = net_generic(net, tcf_net_id);
2716
Vlad Buslovab281622018-09-24 19:22:56 +03002717 spin_lock_init(&tn->idr_lock);
Jiri Pirko48617382018-01-17 11:46:46 +01002718 idr_init(&tn->idr);
2719 return 0;
2720}
2721
2722static void __net_exit tcf_net_exit(struct net *net)
2723{
2724 struct tcf_net *tn = net_generic(net, tcf_net_id);
2725
2726 idr_destroy(&tn->idr);
2727}
2728
2729static struct pernet_operations tcf_net_ops = {
2730 .init = tcf_net_init,
2731 .exit = tcf_net_exit,
2732 .id = &tcf_net_id,
2733 .size = sizeof(struct tcf_net),
2734};
2735
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736static int __init tc_filter_init(void)
2737{
Jiri Pirko48617382018-01-17 11:46:46 +01002738 int err;
2739
Cong Wang7aa00452017-10-26 18:24:28 -07002740 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
2741 if (!tc_filter_wq)
2742 return -ENOMEM;
2743
Jiri Pirko48617382018-01-17 11:46:46 +01002744 err = register_pernet_subsys(&tcf_net_ops);
2745 if (err)
2746 goto err_register_pernet_subsys;
2747
John Hurley7f76fa32018-11-09 21:21:26 -08002748 err = rhashtable_init(&indr_setup_block_ht,
2749 &tc_indr_setup_block_ht_params);
2750 if (err)
2751 goto err_rhash_setup_block_ht;
2752
Vlad Buslovc431f892018-05-31 09:52:53 +03002753 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 0);
2754 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 0);
2755 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
Florian Westphalb97bac62017-08-09 20:41:48 +02002756 tc_dump_tfilter, 0);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002757 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
2758 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
2759 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
2760 tc_dump_chain, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 return 0;
Jiri Pirko48617382018-01-17 11:46:46 +01002763
John Hurley7f76fa32018-11-09 21:21:26 -08002764err_rhash_setup_block_ht:
2765 unregister_pernet_subsys(&tcf_net_ops);
Jiri Pirko48617382018-01-17 11:46:46 +01002766err_register_pernet_subsys:
2767 destroy_workqueue(tc_filter_wq);
2768 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769}
2770
2771subsys_initcall(tc_filter_init);