blob: 8fe38aa180cfb8ae938708ad83761f72fd30e969 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/cls_api.c Packet classifier API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
12 *
13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
14 *
15 */
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/errno.h>
Jiri Pirko33a48922017-02-09 14:38:57 +010022#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/init.h>
25#include <linux/kmod.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Jiri Pirko48617382018-01-17 11:46:46 +010027#include <linux/idr.h>
John Hurley7f76fa32018-11-09 21:21:26 -080028#include <linux/rhashtable.h>
Denis V. Lunevb8542722007-12-01 00:21:31 +110029#include <net/net_namespace.h>
30#include <net/sock.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070031#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <net/pkt_sched.h>
33#include <net/pkt_cls.h>
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +010034#include <net/tc_act/tc_pedit.h>
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +010035#include <net/tc_act/tc_mirred.h>
36#include <net/tc_act/tc_vlan.h>
37#include <net/tc_act/tc_tunnel_key.h>
38#include <net/tc_act/tc_csum.h>
39#include <net/tc_act/tc_gact.h>
40#include <net/tc_act/tc_skbedit.h>
41#include <net/tc_act/tc_mirred.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Davide Carattie3314732018-10-10 22:00:58 +020043extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/* The list of all installed classifier types */
WANG Cong36272872013-12-15 20:15:11 -080046static LIST_HEAD(tcf_proto_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48/* Protects list of registered TC modules. It is pure SMP lock. */
49static DEFINE_RWLOCK(cls_mod_lock);
50
51/* Find classifier type by string name */
52
Jiri Pirkof34e8bf2018-07-23 09:23:04 +020053static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Eric Dumazetdcd76082013-12-20 10:04:18 -080055 const struct tcf_proto_ops *t, *res = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57 if (kind) {
58 read_lock(&cls_mod_lock);
WANG Cong36272872013-12-15 20:15:11 -080059 list_for_each_entry(t, &tcf_proto_base, head) {
Jiri Pirko33a48922017-02-09 14:38:57 +010060 if (strcmp(kind, t->kind) == 0) {
Eric Dumazetdcd76082013-12-20 10:04:18 -080061 if (try_module_get(t->owner))
62 res = t;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 break;
64 }
65 }
66 read_unlock(&cls_mod_lock);
67 }
Eric Dumazetdcd76082013-12-20 10:04:18 -080068 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069}
70
Jiri Pirkof34e8bf2018-07-23 09:23:04 +020071static const struct tcf_proto_ops *
72tcf_proto_lookup_ops(const char *kind, struct netlink_ext_ack *extack)
73{
74 const struct tcf_proto_ops *ops;
75
76 ops = __tcf_proto_lookup_ops(kind);
77 if (ops)
78 return ops;
79#ifdef CONFIG_MODULES
80 rtnl_unlock();
81 request_module("cls_%s", kind);
82 rtnl_lock();
83 ops = __tcf_proto_lookup_ops(kind);
84 /* We dropped the RTNL semaphore in order to perform
85 * the module load. So, even if we succeeded in loading
86 * the module we have to replay the request. We indicate
87 * this using -EAGAIN.
88 */
89 if (ops) {
90 module_put(ops->owner);
91 return ERR_PTR(-EAGAIN);
92 }
93#endif
94 NL_SET_ERR_MSG(extack, "TC classifier not found");
95 return ERR_PTR(-ENOENT);
96}
97
Linus Torvalds1da177e2005-04-16 15:20:36 -070098/* Register(unregister) new classifier type */
99
100int register_tcf_proto_ops(struct tcf_proto_ops *ops)
101{
WANG Cong36272872013-12-15 20:15:11 -0800102 struct tcf_proto_ops *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 int rc = -EEXIST;
104
105 write_lock(&cls_mod_lock);
WANG Cong36272872013-12-15 20:15:11 -0800106 list_for_each_entry(t, &tcf_proto_base, head)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 if (!strcmp(ops->kind, t->kind))
108 goto out;
109
WANG Cong36272872013-12-15 20:15:11 -0800110 list_add_tail(&ops->head, &tcf_proto_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 rc = 0;
112out:
113 write_unlock(&cls_mod_lock);
114 return rc;
115}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800116EXPORT_SYMBOL(register_tcf_proto_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
Cong Wang7aa00452017-10-26 18:24:28 -0700118static struct workqueue_struct *tc_filter_wq;
119
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
121{
WANG Cong36272872013-12-15 20:15:11 -0800122 struct tcf_proto_ops *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 int rc = -ENOENT;
124
Daniel Borkmannc78e1742015-05-20 17:13:33 +0200125 /* Wait for outstanding call_rcu()s, if any, from a
126 * tcf_proto_ops's destroy() handler.
127 */
128 rcu_barrier();
Cong Wang7aa00452017-10-26 18:24:28 -0700129 flush_workqueue(tc_filter_wq);
Daniel Borkmannc78e1742015-05-20 17:13:33 +0200130
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 write_lock(&cls_mod_lock);
Eric Dumazetdcd76082013-12-20 10:04:18 -0800132 list_for_each_entry(t, &tcf_proto_base, head) {
133 if (t == ops) {
134 list_del(&t->head);
135 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 break;
Eric Dumazetdcd76082013-12-20 10:04:18 -0800137 }
138 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 write_unlock(&cls_mod_lock);
140 return rc;
141}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800142EXPORT_SYMBOL(unregister_tcf_proto_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Cong Wangaaa908f2018-05-23 15:26:53 -0700144bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
Cong Wang7aa00452017-10-26 18:24:28 -0700145{
Cong Wangaaa908f2018-05-23 15:26:53 -0700146 INIT_RCU_WORK(rwork, func);
147 return queue_rcu_work(tc_filter_wq, rwork);
Cong Wang7aa00452017-10-26 18:24:28 -0700148}
149EXPORT_SYMBOL(tcf_queue_work);
150
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151/* Select new prio value from the range, managed by kernel. */
152
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800153static inline u32 tcf_auto_prio(struct tcf_proto *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800155 u32 first = TC_H_MAKE(0xC0000000U, 0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
157 if (tp)
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000158 first = tp->prio - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Jiri Pirko79619732017-05-17 11:07:58 +0200160 return TC_H_MAJ(first);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161}
162
Jiri Pirko33a48922017-02-09 14:38:57 +0100163static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
Alexander Aringc35a4ac2018-01-18 11:20:50 -0500164 u32 prio, struct tcf_chain *chain,
165 struct netlink_ext_ack *extack)
Jiri Pirko33a48922017-02-09 14:38:57 +0100166{
167 struct tcf_proto *tp;
168 int err;
169
170 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
171 if (!tp)
172 return ERR_PTR(-ENOBUFS);
173
Jiri Pirkof34e8bf2018-07-23 09:23:04 +0200174 tp->ops = tcf_proto_lookup_ops(kind, extack);
175 if (IS_ERR(tp->ops)) {
176 err = PTR_ERR(tp->ops);
Jiri Pirkod68d75f2018-05-11 17:45:32 +0200177 goto errout;
Jiri Pirko33a48922017-02-09 14:38:57 +0100178 }
179 tp->classify = tp->ops->classify;
180 tp->protocol = protocol;
181 tp->prio = prio;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200182 tp->chain = chain;
Vlad Buslov8b646782019-02-11 10:55:41 +0200183 spin_lock_init(&tp->lock);
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200184 refcount_set(&tp->refcnt, 1);
Jiri Pirko33a48922017-02-09 14:38:57 +0100185
186 err = tp->ops->init(tp);
187 if (err) {
188 module_put(tp->ops->owner);
189 goto errout;
190 }
191 return tp;
192
193errout:
194 kfree(tp);
195 return ERR_PTR(err);
196}
197
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200198static void tcf_proto_get(struct tcf_proto *tp)
199{
200 refcount_inc(&tp->refcnt);
201}
202
203static void tcf_chain_put(struct tcf_chain *chain);
204
Jakub Kicinski715df5e2018-01-24 12:54:13 -0800205static void tcf_proto_destroy(struct tcf_proto *tp,
206 struct netlink_ext_ack *extack)
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100207{
Jakub Kicinski715df5e2018-01-24 12:54:13 -0800208 tp->ops->destroy(tp, extack);
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200209 tcf_chain_put(tp->chain);
WANG Cong763dbf62017-04-19 14:21:21 -0700210 module_put(tp->ops->owner);
211 kfree_rcu(tp, rcu);
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100212}
213
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200214static void tcf_proto_put(struct tcf_proto *tp,
215 struct netlink_ext_ack *extack)
216{
217 if (refcount_dec_and_test(&tp->refcnt))
218 tcf_proto_destroy(tp, extack);
219}
220
Vlad Buslov8b646782019-02-11 10:55:41 +0200221static int walker_noop(struct tcf_proto *tp, void *d, struct tcf_walker *arg)
222{
223 return -1;
224}
225
226static bool tcf_proto_is_empty(struct tcf_proto *tp)
227{
228 struct tcf_walker walker = { .fn = walker_noop, };
229
230 if (tp->ops->walk) {
231 tp->ops->walk(tp, &walker);
232 return !walker.stop;
233 }
234 return true;
235}
236
237static bool tcf_proto_check_delete(struct tcf_proto *tp)
238{
239 spin_lock(&tp->lock);
240 if (tcf_proto_is_empty(tp))
241 tp->deleting = true;
242 spin_unlock(&tp->lock);
243 return tp->deleting;
244}
245
246static void tcf_proto_mark_delete(struct tcf_proto *tp)
247{
248 spin_lock(&tp->lock);
249 tp->deleting = true;
250 spin_unlock(&tp->lock);
251}
252
253static bool tcf_proto_is_deleting(struct tcf_proto *tp)
254{
255 bool deleting;
256
257 spin_lock(&tp->lock);
258 deleting = tp->deleting;
259 spin_unlock(&tp->lock);
260
261 return deleting;
262}
263
Vlad Buslovc266f642019-02-11 10:55:32 +0200264#define ASSERT_BLOCK_LOCKED(block) \
265 lockdep_assert_held(&(block)->lock)
266
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100267struct tcf_filter_chain_list_item {
268 struct list_head list;
269 tcf_chain_head_change_t *chain_head_change;
270 void *chain_head_change_priv;
271};
272
Jiri Pirko5bc17012017-05-17 11:08:01 +0200273static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
274 u32 chain_index)
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200275{
Jiri Pirko5bc17012017-05-17 11:08:01 +0200276 struct tcf_chain *chain;
277
Vlad Buslovc266f642019-02-11 10:55:32 +0200278 ASSERT_BLOCK_LOCKED(block);
279
Jiri Pirko5bc17012017-05-17 11:08:01 +0200280 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
281 if (!chain)
282 return NULL;
283 list_add_tail(&chain->list, &block->chain_list);
Vlad Busloved76f5e2019-02-11 10:55:38 +0200284 mutex_init(&chain->filter_chain_lock);
Jiri Pirko5bc17012017-05-17 11:08:01 +0200285 chain->block = block;
286 chain->index = chain_index;
Cong Wange2ef7542017-09-11 16:33:31 -0700287 chain->refcnt = 1;
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200288 if (!chain->index)
289 block->chain0.chain = chain;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200290 return chain;
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200291}
292
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100293static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
294 struct tcf_proto *tp_head)
295{
296 if (item->chain_head_change)
297 item->chain_head_change(tp_head, item->chain_head_change_priv);
298}
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200299
300static void tcf_chain0_head_change(struct tcf_chain *chain,
301 struct tcf_proto *tp_head)
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +0100302{
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100303 struct tcf_filter_chain_list_item *item;
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200304 struct tcf_block *block = chain->block;
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100305
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200306 if (chain->index)
307 return;
Vlad Buslov165f0132019-02-11 10:55:35 +0200308
309 mutex_lock(&block->lock);
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200310 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100311 tcf_chain_head_change_item(item, tp_head);
Vlad Buslov165f0132019-02-11 10:55:35 +0200312 mutex_unlock(&block->lock);
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +0100313}
314
Vlad Buslovc266f642019-02-11 10:55:32 +0200315/* Returns true if block can be safely freed. */
316
317static bool tcf_chain_detach(struct tcf_chain *chain)
Jiri Pirkof93e1cd2017-05-20 15:01:32 +0200318{
Cong Wangefbf7892017-12-04 10:48:18 -0800319 struct tcf_block *block = chain->block;
320
Vlad Buslovc266f642019-02-11 10:55:32 +0200321 ASSERT_BLOCK_LOCKED(block);
322
Cong Wange2ef7542017-09-11 16:33:31 -0700323 list_del(&chain->list);
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200324 if (!chain->index)
325 block->chain0.chain = NULL;
Vlad Buslovc266f642019-02-11 10:55:32 +0200326
327 if (list_empty(&block->chain_list) &&
328 refcount_read(&block->refcnt) == 0)
329 return true;
330
331 return false;
332}
333
334static void tcf_block_destroy(struct tcf_block *block)
335{
336 mutex_destroy(&block->lock);
337 kfree_rcu(block, rcu);
338}
339
340static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
341{
342 struct tcf_block *block = chain->block;
343
Vlad Busloved76f5e2019-02-11 10:55:38 +0200344 mutex_destroy(&chain->filter_chain_lock);
Cong Wange2ef7542017-09-11 16:33:31 -0700345 kfree(chain);
Vlad Buslovc266f642019-02-11 10:55:32 +0200346 if (free_block)
347 tcf_block_destroy(block);
Cong Wange2ef7542017-09-11 16:33:31 -0700348}
Jiri Pirko744a4cf2017-08-22 22:46:49 +0200349
Cong Wange2ef7542017-09-11 16:33:31 -0700350static void tcf_chain_hold(struct tcf_chain *chain)
351{
Vlad Buslovc266f642019-02-11 10:55:32 +0200352 ASSERT_BLOCK_LOCKED(chain->block);
353
Cong Wange2ef7542017-09-11 16:33:31 -0700354 ++chain->refcnt;
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200355}
356
Jiri Pirko3d32f4c2018-08-01 12:36:55 +0200357static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200358{
Vlad Buslovc266f642019-02-11 10:55:32 +0200359 ASSERT_BLOCK_LOCKED(chain->block);
360
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200361 /* In case all the references are action references, this
Jiri Pirko3d32f4c2018-08-01 12:36:55 +0200362 * chain should not be shown to the user.
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200363 */
364 return chain->refcnt == chain->action_refcnt;
365}
366
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200367static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
368 u32 chain_index)
Jiri Pirko5bc17012017-05-17 11:08:01 +0200369{
370 struct tcf_chain *chain;
371
Vlad Buslovc266f642019-02-11 10:55:32 +0200372 ASSERT_BLOCK_LOCKED(block);
373
Jiri Pirko5bc17012017-05-17 11:08:01 +0200374 list_for_each_entry(chain, &block->chain_list, list) {
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200375 if (chain->index == chain_index)
Cong Wange2ef7542017-09-11 16:33:31 -0700376 return chain;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200377 }
378 return NULL;
379}
380
381static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
382 u32 seq, u16 flags, int event, bool unicast);
383
Jiri Pirko53681402018-08-01 12:36:56 +0200384static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
385 u32 chain_index, bool create,
386 bool by_act)
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200387{
Vlad Buslovc266f642019-02-11 10:55:32 +0200388 struct tcf_chain *chain = NULL;
389 bool is_first_reference;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200390
Vlad Buslovc266f642019-02-11 10:55:32 +0200391 mutex_lock(&block->lock);
392 chain = tcf_chain_lookup(block, chain_index);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200393 if (chain) {
394 tcf_chain_hold(chain);
Jiri Pirko53681402018-08-01 12:36:56 +0200395 } else {
396 if (!create)
Vlad Buslovc266f642019-02-11 10:55:32 +0200397 goto errout;
Jiri Pirko53681402018-08-01 12:36:56 +0200398 chain = tcf_chain_create(block, chain_index);
399 if (!chain)
Vlad Buslovc266f642019-02-11 10:55:32 +0200400 goto errout;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200401 }
Jiri Pirko80532382017-09-06 13:14:19 +0200402
Jiri Pirko53681402018-08-01 12:36:56 +0200403 if (by_act)
404 ++chain->action_refcnt;
Vlad Buslovc266f642019-02-11 10:55:32 +0200405 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
406 mutex_unlock(&block->lock);
Jiri Pirko53681402018-08-01 12:36:56 +0200407
408 /* Send notification only in case we got the first
409 * non-action reference. Until then, the chain acts only as
410 * a placeholder for actions pointing to it and user ought
411 * not know about them.
412 */
Vlad Buslovc266f642019-02-11 10:55:32 +0200413 if (is_first_reference && !by_act)
Jiri Pirko53681402018-08-01 12:36:56 +0200414 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
415 RTM_NEWCHAIN, false);
416
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200417 return chain;
Vlad Buslovc266f642019-02-11 10:55:32 +0200418
419errout:
420 mutex_unlock(&block->lock);
421 return chain;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200422}
Jiri Pirko53681402018-08-01 12:36:56 +0200423
Jiri Pirko290b1c82018-08-01 12:36:57 +0200424static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
425 bool create)
Jiri Pirko53681402018-08-01 12:36:56 +0200426{
427 return __tcf_chain_get(block, chain_index, create, false);
428}
Jiri Pirko5bc17012017-05-17 11:08:01 +0200429
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200430struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
431{
Jiri Pirko53681402018-08-01 12:36:56 +0200432 return __tcf_chain_get(block, chain_index, true, true);
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200433}
434EXPORT_SYMBOL(tcf_chain_get_by_act);
435
Vlad Buslova5654822019-02-11 10:55:37 +0200436static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
437 void *tmplt_priv);
438static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
439 void *tmplt_priv, u32 chain_index,
440 struct tcf_block *block, struct sk_buff *oskb,
441 u32 seq, u16 flags, bool unicast);
Jiri Pirko9f407f12018-07-23 09:23:07 +0200442
Vlad Buslov91052fa2019-02-11 10:55:33 +0200443static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
444 bool explicitly_created)
Jiri Pirko5bc17012017-05-17 11:08:01 +0200445{
Vlad Buslovc266f642019-02-11 10:55:32 +0200446 struct tcf_block *block = chain->block;
Vlad Buslova5654822019-02-11 10:55:37 +0200447 const struct tcf_proto_ops *tmplt_ops;
Vlad Buslovc266f642019-02-11 10:55:32 +0200448 bool is_last, free_block = false;
449 unsigned int refcnt;
Vlad Buslova5654822019-02-11 10:55:37 +0200450 void *tmplt_priv;
451 u32 chain_index;
Vlad Buslovc266f642019-02-11 10:55:32 +0200452
453 mutex_lock(&block->lock);
Vlad Buslov91052fa2019-02-11 10:55:33 +0200454 if (explicitly_created) {
455 if (!chain->explicitly_created) {
456 mutex_unlock(&block->lock);
457 return;
458 }
459 chain->explicitly_created = false;
460 }
461
Jiri Pirko53681402018-08-01 12:36:56 +0200462 if (by_act)
463 chain->action_refcnt--;
Vlad Buslovc266f642019-02-11 10:55:32 +0200464
465 /* tc_chain_notify_delete can't be called while holding block lock.
466 * However, when block is unlocked chain can be changed concurrently, so
467 * save these to temporary variables.
468 */
469 refcnt = --chain->refcnt;
470 is_last = refcnt - chain->action_refcnt == 0;
Vlad Buslova5654822019-02-11 10:55:37 +0200471 tmplt_ops = chain->tmplt_ops;
472 tmplt_priv = chain->tmplt_priv;
473 chain_index = chain->index;
474
Vlad Buslovc266f642019-02-11 10:55:32 +0200475 if (refcnt == 0)
476 free_block = tcf_chain_detach(chain);
477 mutex_unlock(&block->lock);
Jiri Pirko53681402018-08-01 12:36:56 +0200478
479 /* The last dropped non-action reference will trigger notification. */
Vlad Buslov726d06122019-02-11 10:55:42 +0200480 if (is_last && !by_act) {
Vlad Buslova5654822019-02-11 10:55:37 +0200481 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain_index,
482 block, NULL, 0, 0, false);
Vlad Buslov726d06122019-02-11 10:55:42 +0200483 /* Last reference to chain, no need to lock. */
484 chain->flushing = false;
485 }
Jiri Pirko53681402018-08-01 12:36:56 +0200486
Vlad Buslovc266f642019-02-11 10:55:32 +0200487 if (refcnt == 0) {
Vlad Buslova5654822019-02-11 10:55:37 +0200488 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
Vlad Buslovc266f642019-02-11 10:55:32 +0200489 tcf_chain_destroy(chain, free_block);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200490 }
Jiri Pirko5bc17012017-05-17 11:08:01 +0200491}
Jiri Pirko53681402018-08-01 12:36:56 +0200492
Jiri Pirko290b1c82018-08-01 12:36:57 +0200493static void tcf_chain_put(struct tcf_chain *chain)
Jiri Pirko53681402018-08-01 12:36:56 +0200494{
Vlad Buslov91052fa2019-02-11 10:55:33 +0200495 __tcf_chain_put(chain, false, false);
Jiri Pirko53681402018-08-01 12:36:56 +0200496}
Jiri Pirko5bc17012017-05-17 11:08:01 +0200497
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200498void tcf_chain_put_by_act(struct tcf_chain *chain)
499{
Vlad Buslov91052fa2019-02-11 10:55:33 +0200500 __tcf_chain_put(chain, true, false);
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200501}
502EXPORT_SYMBOL(tcf_chain_put_by_act);
503
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200504static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
505{
Vlad Buslov91052fa2019-02-11 10:55:33 +0200506 __tcf_chain_put(chain, false, true);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200507}
508
Jiri Pirko290b1c82018-08-01 12:36:57 +0200509static void tcf_chain_flush(struct tcf_chain *chain)
510{
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200511 struct tcf_proto *tp, *tp_next;
Jiri Pirko290b1c82018-08-01 12:36:57 +0200512
Vlad Busloved76f5e2019-02-11 10:55:38 +0200513 mutex_lock(&chain->filter_chain_lock);
514 tp = tcf_chain_dereference(chain->filter_chain, chain);
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200515 RCU_INIT_POINTER(chain->filter_chain, NULL);
Jiri Pirko290b1c82018-08-01 12:36:57 +0200516 tcf_chain0_head_change(chain, NULL);
Vlad Buslov726d06122019-02-11 10:55:42 +0200517 chain->flushing = true;
Vlad Busloved76f5e2019-02-11 10:55:38 +0200518 mutex_unlock(&chain->filter_chain_lock);
519
Jiri Pirko290b1c82018-08-01 12:36:57 +0200520 while (tp) {
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200521 tp_next = rcu_dereference_protected(tp->next, 1);
522 tcf_proto_put(tp, NULL);
523 tp = tp_next;
Jiri Pirko290b1c82018-08-01 12:36:57 +0200524 }
525}
526
John Hurley7f76fa32018-11-09 21:21:26 -0800527static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
528{
529 const struct Qdisc_class_ops *cops;
530 struct Qdisc *qdisc;
531
532 if (!dev_ingress_queue(dev))
533 return NULL;
534
535 qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
536 if (!qdisc)
537 return NULL;
538
539 cops = qdisc->ops->cl_ops;
540 if (!cops)
541 return NULL;
542
543 if (!cops->tcf_block)
544 return NULL;
545
546 return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
547}
548
549static struct rhashtable indr_setup_block_ht;
550
551struct tc_indr_block_dev {
552 struct rhash_head ht_node;
553 struct net_device *dev;
554 unsigned int refcnt;
555 struct list_head cb_list;
556 struct tcf_block *block;
557};
558
559struct tc_indr_block_cb {
560 struct list_head list;
561 void *cb_priv;
562 tc_indr_block_bind_cb_t *cb;
563 void *cb_ident;
564};
565
566static const struct rhashtable_params tc_indr_setup_block_ht_params = {
567 .key_offset = offsetof(struct tc_indr_block_dev, dev),
568 .head_offset = offsetof(struct tc_indr_block_dev, ht_node),
569 .key_len = sizeof(struct net_device *),
570};
571
572static struct tc_indr_block_dev *
573tc_indr_block_dev_lookup(struct net_device *dev)
574{
575 return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
576 tc_indr_setup_block_ht_params);
577}
578
579static struct tc_indr_block_dev *tc_indr_block_dev_get(struct net_device *dev)
580{
581 struct tc_indr_block_dev *indr_dev;
582
583 indr_dev = tc_indr_block_dev_lookup(dev);
584 if (indr_dev)
585 goto inc_ref;
586
587 indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
588 if (!indr_dev)
589 return NULL;
590
591 INIT_LIST_HEAD(&indr_dev->cb_list);
592 indr_dev->dev = dev;
593 indr_dev->block = tc_dev_ingress_block(dev);
594 if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
595 tc_indr_setup_block_ht_params)) {
596 kfree(indr_dev);
597 return NULL;
598 }
599
600inc_ref:
601 indr_dev->refcnt++;
602 return indr_dev;
603}
604
605static void tc_indr_block_dev_put(struct tc_indr_block_dev *indr_dev)
606{
607 if (--indr_dev->refcnt)
608 return;
609
610 rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
611 tc_indr_setup_block_ht_params);
612 kfree(indr_dev);
613}
614
615static struct tc_indr_block_cb *
616tc_indr_block_cb_lookup(struct tc_indr_block_dev *indr_dev,
617 tc_indr_block_bind_cb_t *cb, void *cb_ident)
618{
619 struct tc_indr_block_cb *indr_block_cb;
620
621 list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
622 if (indr_block_cb->cb == cb &&
623 indr_block_cb->cb_ident == cb_ident)
624 return indr_block_cb;
625 return NULL;
626}
627
628static struct tc_indr_block_cb *
629tc_indr_block_cb_add(struct tc_indr_block_dev *indr_dev, void *cb_priv,
630 tc_indr_block_bind_cb_t *cb, void *cb_ident)
631{
632 struct tc_indr_block_cb *indr_block_cb;
633
634 indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
635 if (indr_block_cb)
636 return ERR_PTR(-EEXIST);
637
638 indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
639 if (!indr_block_cb)
640 return ERR_PTR(-ENOMEM);
641
642 indr_block_cb->cb_priv = cb_priv;
643 indr_block_cb->cb = cb;
644 indr_block_cb->cb_ident = cb_ident;
645 list_add(&indr_block_cb->list, &indr_dev->cb_list);
646
647 return indr_block_cb;
648}
649
650static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb)
651{
652 list_del(&indr_block_cb->list);
653 kfree(indr_block_cb);
654}
655
656static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
657 struct tc_indr_block_cb *indr_block_cb,
658 enum tc_block_command command)
659{
660 struct tc_block_offload bo = {
661 .command = command,
662 .binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
663 .block = indr_dev->block,
664 };
665
666 if (!indr_dev->block)
667 return;
668
669 indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
670 &bo);
671}
672
673int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
674 tc_indr_block_bind_cb_t *cb, void *cb_ident)
675{
676 struct tc_indr_block_cb *indr_block_cb;
677 struct tc_indr_block_dev *indr_dev;
678 int err;
679
680 indr_dev = tc_indr_block_dev_get(dev);
681 if (!indr_dev)
682 return -ENOMEM;
683
684 indr_block_cb = tc_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
685 err = PTR_ERR_OR_ZERO(indr_block_cb);
686 if (err)
687 goto err_dev_put;
688
689 tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_BIND);
690 return 0;
691
692err_dev_put:
693 tc_indr_block_dev_put(indr_dev);
694 return err;
695}
696EXPORT_SYMBOL_GPL(__tc_indr_block_cb_register);
697
698int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
699 tc_indr_block_bind_cb_t *cb, void *cb_ident)
700{
701 int err;
702
703 rtnl_lock();
704 err = __tc_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
705 rtnl_unlock();
706
707 return err;
708}
709EXPORT_SYMBOL_GPL(tc_indr_block_cb_register);
710
711void __tc_indr_block_cb_unregister(struct net_device *dev,
712 tc_indr_block_bind_cb_t *cb, void *cb_ident)
713{
714 struct tc_indr_block_cb *indr_block_cb;
715 struct tc_indr_block_dev *indr_dev;
716
717 indr_dev = tc_indr_block_dev_lookup(dev);
718 if (!indr_dev)
719 return;
720
721 indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
722 if (!indr_block_cb)
723 return;
724
725 /* Send unbind message if required to free any block cbs. */
726 tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_UNBIND);
727 tc_indr_block_cb_del(indr_block_cb);
728 tc_indr_block_dev_put(indr_dev);
729}
730EXPORT_SYMBOL_GPL(__tc_indr_block_cb_unregister);
731
732void tc_indr_block_cb_unregister(struct net_device *dev,
733 tc_indr_block_bind_cb_t *cb, void *cb_ident)
734{
735 rtnl_lock();
736 __tc_indr_block_cb_unregister(dev, cb, cb_ident);
737 rtnl_unlock();
738}
739EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister);
740
741static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
742 struct tcf_block_ext_info *ei,
743 enum tc_block_command command,
744 struct netlink_ext_ack *extack)
745{
746 struct tc_indr_block_cb *indr_block_cb;
747 struct tc_indr_block_dev *indr_dev;
748 struct tc_block_offload bo = {
749 .command = command,
750 .binder_type = ei->binder_type,
751 .block = block,
752 .extack = extack,
753 };
754
755 indr_dev = tc_indr_block_dev_lookup(dev);
756 if (!indr_dev)
757 return;
758
759 indr_dev->block = command == TC_BLOCK_BIND ? block : NULL;
760
761 list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
762 indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
763 &bo);
764}
765
Jiri Pirkocaa72602018-01-17 11:46:50 +0100766static bool tcf_block_offload_in_use(struct tcf_block *block)
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200767{
Jiri Pirkocaa72602018-01-17 11:46:50 +0100768 return block->offloadcnt;
769}
770
771static int tcf_block_offload_cmd(struct tcf_block *block,
772 struct net_device *dev,
773 struct tcf_block_ext_info *ei,
John Hurley60513bd2018-06-25 14:30:04 -0700774 enum tc_block_command command,
775 struct netlink_ext_ack *extack)
Jiri Pirkocaa72602018-01-17 11:46:50 +0100776{
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200777 struct tc_block_offload bo = {};
778
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200779 bo.command = command;
780 bo.binder_type = ei->binder_type;
781 bo.block = block;
John Hurley60513bd2018-06-25 14:30:04 -0700782 bo.extack = extack;
Jiri Pirkocaa72602018-01-17 11:46:50 +0100783 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200784}
785
Jiri Pirkocaa72602018-01-17 11:46:50 +0100786static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
John Hurley60513bd2018-06-25 14:30:04 -0700787 struct tcf_block_ext_info *ei,
788 struct netlink_ext_ack *extack)
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200789{
Jiri Pirkocaa72602018-01-17 11:46:50 +0100790 struct net_device *dev = q->dev_queue->dev;
791 int err;
792
793 if (!dev->netdev_ops->ndo_setup_tc)
794 goto no_offload_dev_inc;
795
796 /* If tc offload feature is disabled and the block we try to bind
797 * to already has some offloaded filters, forbid to bind.
798 */
John Hurley60513bd2018-06-25 14:30:04 -0700799 if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
800 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
Jiri Pirkocaa72602018-01-17 11:46:50 +0100801 return -EOPNOTSUPP;
John Hurley60513bd2018-06-25 14:30:04 -0700802 }
Jiri Pirkocaa72602018-01-17 11:46:50 +0100803
John Hurley60513bd2018-06-25 14:30:04 -0700804 err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100805 if (err == -EOPNOTSUPP)
806 goto no_offload_dev_inc;
John Hurley7f76fa32018-11-09 21:21:26 -0800807 if (err)
808 return err;
809
810 tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
811 return 0;
Jiri Pirkocaa72602018-01-17 11:46:50 +0100812
813no_offload_dev_inc:
814 if (tcf_block_offload_in_use(block))
815 return -EOPNOTSUPP;
816 block->nooffloaddevcnt++;
John Hurley7f76fa32018-11-09 21:21:26 -0800817 tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100818 return 0;
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200819}
820
821static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
822 struct tcf_block_ext_info *ei)
823{
Jiri Pirkocaa72602018-01-17 11:46:50 +0100824 struct net_device *dev = q->dev_queue->dev;
825 int err;
826
John Hurley7f76fa32018-11-09 21:21:26 -0800827 tc_indr_block_call(block, dev, ei, TC_BLOCK_UNBIND, NULL);
828
Jiri Pirkocaa72602018-01-17 11:46:50 +0100829 if (!dev->netdev_ops->ndo_setup_tc)
830 goto no_offload_dev_dec;
John Hurley60513bd2018-06-25 14:30:04 -0700831 err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100832 if (err == -EOPNOTSUPP)
833 goto no_offload_dev_dec;
834 return;
835
836no_offload_dev_dec:
837 WARN_ON(block->nooffloaddevcnt-- == 0);
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200838}
839
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100840static int
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200841tcf_chain0_head_change_cb_add(struct tcf_block *block,
842 struct tcf_block_ext_info *ei,
843 struct netlink_ext_ack *extack)
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100844{
845 struct tcf_filter_chain_list_item *item;
Vlad Buslov165f0132019-02-11 10:55:35 +0200846 struct tcf_chain *chain0;
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100847
848 item = kmalloc(sizeof(*item), GFP_KERNEL);
849 if (!item) {
850 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
851 return -ENOMEM;
852 }
853 item->chain_head_change = ei->chain_head_change;
854 item->chain_head_change_priv = ei->chain_head_change_priv;
Vlad Buslov165f0132019-02-11 10:55:35 +0200855
856 mutex_lock(&block->lock);
857 chain0 = block->chain0.chain;
Vlad Busloved76f5e2019-02-11 10:55:38 +0200858 if (chain0)
859 tcf_chain_hold(chain0);
860 else
861 list_add(&item->list, &block->chain0.filter_chain_list);
Vlad Buslov165f0132019-02-11 10:55:35 +0200862 mutex_unlock(&block->lock);
863
Vlad Busloved76f5e2019-02-11 10:55:38 +0200864 if (chain0) {
865 struct tcf_proto *tp_head;
866
867 mutex_lock(&chain0->filter_chain_lock);
868
869 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
870 if (tp_head)
871 tcf_chain_head_change_item(item, tp_head);
872
873 mutex_lock(&block->lock);
874 list_add(&item->list, &block->chain0.filter_chain_list);
875 mutex_unlock(&block->lock);
876
877 mutex_unlock(&chain0->filter_chain_lock);
878 tcf_chain_put(chain0);
879 }
880
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100881 return 0;
882}
883
884static void
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200885tcf_chain0_head_change_cb_del(struct tcf_block *block,
886 struct tcf_block_ext_info *ei)
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100887{
888 struct tcf_filter_chain_list_item *item;
889
Vlad Buslov165f0132019-02-11 10:55:35 +0200890 mutex_lock(&block->lock);
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200891 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100892 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
893 (item->chain_head_change == ei->chain_head_change &&
894 item->chain_head_change_priv == ei->chain_head_change_priv)) {
Vlad Buslov165f0132019-02-11 10:55:35 +0200895 if (block->chain0.chain)
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200896 tcf_chain_head_change_item(item, NULL);
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100897 list_del(&item->list);
Vlad Buslov165f0132019-02-11 10:55:35 +0200898 mutex_unlock(&block->lock);
899
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100900 kfree(item);
901 return;
902 }
903 }
Vlad Buslov165f0132019-02-11 10:55:35 +0200904 mutex_unlock(&block->lock);
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100905 WARN_ON(1);
906}
907
Jiri Pirko48617382018-01-17 11:46:46 +0100908struct tcf_net {
Vlad Buslovab281622018-09-24 19:22:56 +0300909 spinlock_t idr_lock; /* Protects idr */
Jiri Pirko48617382018-01-17 11:46:46 +0100910 struct idr idr;
911};
912
913static unsigned int tcf_net_id;
914
915static int tcf_block_insert(struct tcf_block *block, struct net *net,
Jiri Pirkobb047dd2018-02-13 12:00:16 +0100916 struct netlink_ext_ack *extack)
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100917{
Jiri Pirko48617382018-01-17 11:46:46 +0100918 struct tcf_net *tn = net_generic(net, tcf_net_id);
Vlad Buslovab281622018-09-24 19:22:56 +0300919 int err;
Jiri Pirko48617382018-01-17 11:46:46 +0100920
Vlad Buslovab281622018-09-24 19:22:56 +0300921 idr_preload(GFP_KERNEL);
922 spin_lock(&tn->idr_lock);
923 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
924 GFP_NOWAIT);
925 spin_unlock(&tn->idr_lock);
926 idr_preload_end();
927
928 return err;
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100929}
930
Jiri Pirko48617382018-01-17 11:46:46 +0100931static void tcf_block_remove(struct tcf_block *block, struct net *net)
Jiri Pirko6529eab2017-05-17 11:07:55 +0200932{
Jiri Pirko48617382018-01-17 11:46:46 +0100933 struct tcf_net *tn = net_generic(net, tcf_net_id);
934
Vlad Buslovab281622018-09-24 19:22:56 +0300935 spin_lock(&tn->idr_lock);
Matthew Wilcox9c160942017-11-28 09:48:43 -0500936 idr_remove(&tn->idr, block->index);
Vlad Buslovab281622018-09-24 19:22:56 +0300937 spin_unlock(&tn->idr_lock);
Jiri Pirko48617382018-01-17 11:46:46 +0100938}
939
940static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
Jiri Pirkobb047dd2018-02-13 12:00:16 +0100941 u32 block_index,
Jiri Pirko48617382018-01-17 11:46:46 +0100942 struct netlink_ext_ack *extack)
943{
944 struct tcf_block *block;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200945
Jiri Pirko48617382018-01-17 11:46:46 +0100946 block = kzalloc(sizeof(*block), GFP_KERNEL);
Alexander Aring8d1a77f2017-12-20 12:35:19 -0500947 if (!block) {
948 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
Jiri Pirko48617382018-01-17 11:46:46 +0100949 return ERR_PTR(-ENOMEM);
Alexander Aring8d1a77f2017-12-20 12:35:19 -0500950 }
Vlad Buslovc266f642019-02-11 10:55:32 +0200951 mutex_init(&block->lock);
Jiri Pirko5bc17012017-05-17 11:08:01 +0200952 INIT_LIST_HEAD(&block->chain_list);
Jiri Pirkoacb67442017-10-19 15:50:31 +0200953 INIT_LIST_HEAD(&block->cb_list);
Jiri Pirkof36fe1c2018-01-17 11:46:48 +0100954 INIT_LIST_HEAD(&block->owner_list);
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200955 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
Jiri Pirkoacb67442017-10-19 15:50:31 +0200956
Vlad Buslovcfebd7e2018-09-24 19:22:54 +0300957 refcount_set(&block->refcnt, 1);
Jiri Pirko48617382018-01-17 11:46:46 +0100958 block->net = net;
Jiri Pirkobb047dd2018-02-13 12:00:16 +0100959 block->index = block_index;
960
961 /* Don't store q pointer for blocks which are shared */
962 if (!tcf_block_shared(block))
963 block->q = q;
Jiri Pirko48617382018-01-17 11:46:46 +0100964 return block;
Jiri Pirko48617382018-01-17 11:46:46 +0100965}
966
967static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
968{
969 struct tcf_net *tn = net_generic(net, tcf_net_id);
970
Matthew Wilcox322d8842017-11-28 10:01:24 -0500971 return idr_find(&tn->idr, block_index);
Jiri Pirko48617382018-01-17 11:46:46 +0100972}
973
Vlad Buslov0607e432018-09-24 19:22:57 +0300974static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
975{
976 struct tcf_block *block;
977
978 rcu_read_lock();
979 block = tcf_block_lookup(net, block_index);
980 if (block && !refcount_inc_not_zero(&block->refcnt))
981 block = NULL;
982 rcu_read_unlock();
983
984 return block;
985}
986
Vlad Buslovbbf73832019-02-11 10:55:36 +0200987static struct tcf_chain *
988__tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
989{
990 mutex_lock(&block->lock);
991 if (chain)
992 chain = list_is_last(&chain->list, &block->chain_list) ?
993 NULL : list_next_entry(chain, list);
994 else
995 chain = list_first_entry_or_null(&block->chain_list,
996 struct tcf_chain, list);
997
998 /* skip all action-only chains */
999 while (chain && tcf_chain_held_by_acts_only(chain))
1000 chain = list_is_last(&chain->list, &block->chain_list) ?
1001 NULL : list_next_entry(chain, list);
1002
1003 if (chain)
1004 tcf_chain_hold(chain);
1005 mutex_unlock(&block->lock);
1006
1007 return chain;
1008}
1009
1010/* Function to be used by all clients that want to iterate over all chains on
1011 * block. It properly obtains block->lock and takes reference to chain before
1012 * returning it. Users of this function must be tolerant to concurrent chain
1013 * insertion/deletion or ensure that no concurrent chain modification is
1014 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1015 * consistent dump because rtnl lock is released each time skb is filled with
1016 * data and sent to user-space.
1017 */
1018
1019struct tcf_chain *
1020tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1021{
1022 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1023
1024 if (chain)
1025 tcf_chain_put(chain);
1026
1027 return chain_next;
1028}
1029EXPORT_SYMBOL(tcf_get_next_chain);
1030
Vlad Buslovfe2923a2019-02-11 10:55:40 +02001031static struct tcf_proto *
1032__tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1033{
Vlad Buslov8b646782019-02-11 10:55:41 +02001034 u32 prio = 0;
1035
Vlad Buslovfe2923a2019-02-11 10:55:40 +02001036 ASSERT_RTNL();
1037 mutex_lock(&chain->filter_chain_lock);
1038
Vlad Buslov8b646782019-02-11 10:55:41 +02001039 if (!tp) {
Vlad Buslovfe2923a2019-02-11 10:55:40 +02001040 tp = tcf_chain_dereference(chain->filter_chain, chain);
Vlad Buslov8b646782019-02-11 10:55:41 +02001041 } else if (tcf_proto_is_deleting(tp)) {
1042 /* 'deleting' flag is set and chain->filter_chain_lock was
1043 * unlocked, which means next pointer could be invalid. Restart
1044 * search.
1045 */
1046 prio = tp->prio + 1;
1047 tp = tcf_chain_dereference(chain->filter_chain, chain);
1048
1049 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1050 if (!tp->deleting && tp->prio >= prio)
1051 break;
1052 } else {
Vlad Buslovfe2923a2019-02-11 10:55:40 +02001053 tp = tcf_chain_dereference(tp->next, chain);
Vlad Buslov8b646782019-02-11 10:55:41 +02001054 }
Vlad Buslovfe2923a2019-02-11 10:55:40 +02001055
1056 if (tp)
1057 tcf_proto_get(tp);
1058
1059 mutex_unlock(&chain->filter_chain_lock);
1060
1061 return tp;
1062}
1063
1064/* Function to be used by all clients that want to iterate over all tp's on
1065 * chain. Users of this function must be tolerant to concurrent tp
1066 * insertion/deletion or ensure that no concurrent chain modification is
1067 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1068 * consistent dump because rtnl lock is released each time skb is filled with
1069 * data and sent to user-space.
1070 */
1071
1072struct tcf_proto *
1073tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1074{
1075 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1076
1077 if (tp)
1078 tcf_proto_put(tp, NULL);
1079
1080 return tp_next;
1081}
1082EXPORT_SYMBOL(tcf_get_next_proto);
1083
Vlad Buslovf0023432018-09-24 19:22:55 +03001084static void tcf_block_flush_all_chains(struct tcf_block *block)
1085{
1086 struct tcf_chain *chain;
1087
Vlad Buslovbbf73832019-02-11 10:55:36 +02001088 /* Last reference to block. At this point chains cannot be added or
1089 * removed concurrently.
Vlad Buslovf0023432018-09-24 19:22:55 +03001090 */
Vlad Buslovbbf73832019-02-11 10:55:36 +02001091 for (chain = tcf_get_next_chain(block, NULL);
1092 chain;
1093 chain = tcf_get_next_chain(block, chain)) {
Vlad Buslovf0023432018-09-24 19:22:55 +03001094 tcf_chain_put_explicitly_created(chain);
Vlad Buslovbbf73832019-02-11 10:55:36 +02001095 tcf_chain_flush(chain);
Vlad Buslovf0023432018-09-24 19:22:55 +03001096 }
1097}
1098
Vlad Buslov0607e432018-09-24 19:22:57 +03001099static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1100 struct tcf_block_ext_info *ei)
1101{
Vlad Buslovc266f642019-02-11 10:55:32 +02001102 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
Vlad Buslov0607e432018-09-24 19:22:57 +03001103 /* Flushing/putting all chains will cause the block to be
1104 * deallocated when last chain is freed. However, if chain_list
1105 * is empty, block has to be manually deallocated. After block
1106 * reference counter reached 0, it is no longer possible to
1107 * increment it or add new chains to block.
1108 */
1109 bool free_block = list_empty(&block->chain_list);
1110
Vlad Buslovc266f642019-02-11 10:55:32 +02001111 mutex_unlock(&block->lock);
Vlad Buslov0607e432018-09-24 19:22:57 +03001112 if (tcf_block_shared(block))
1113 tcf_block_remove(block, block->net);
Vlad Buslov0607e432018-09-24 19:22:57 +03001114
1115 if (q)
1116 tcf_block_offload_unbind(block, q, ei);
1117
1118 if (free_block)
Vlad Buslovc266f642019-02-11 10:55:32 +02001119 tcf_block_destroy(block);
Vlad Buslov0607e432018-09-24 19:22:57 +03001120 else
Vlad Buslovbbf73832019-02-11 10:55:36 +02001121 tcf_block_flush_all_chains(block);
Vlad Buslov0607e432018-09-24 19:22:57 +03001122 } else if (q) {
1123 tcf_block_offload_unbind(block, q, ei);
1124 }
1125}
1126
1127static void tcf_block_refcnt_put(struct tcf_block *block)
1128{
1129 __tcf_block_put(block, NULL, NULL);
1130}
1131
Vlad Buslovc431f892018-05-31 09:52:53 +03001132/* Find tcf block.
1133 * Set q, parent, cl when appropriate.
1134 */
1135
1136static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1137 u32 *parent, unsigned long *cl,
1138 int ifindex, u32 block_index,
1139 struct netlink_ext_ack *extack)
1140{
1141 struct tcf_block *block;
Vlad Buslove368fdb2018-09-24 19:22:53 +03001142 int err = 0;
Vlad Buslovc431f892018-05-31 09:52:53 +03001143
1144 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001145 block = tcf_block_refcnt_get(net, block_index);
Vlad Buslovc431f892018-05-31 09:52:53 +03001146 if (!block) {
1147 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1148 return ERR_PTR(-EINVAL);
1149 }
1150 } else {
1151 const struct Qdisc_class_ops *cops;
1152 struct net_device *dev;
1153
Vlad Buslove368fdb2018-09-24 19:22:53 +03001154 rcu_read_lock();
1155
Vlad Buslovc431f892018-05-31 09:52:53 +03001156 /* Find link */
Vlad Buslove368fdb2018-09-24 19:22:53 +03001157 dev = dev_get_by_index_rcu(net, ifindex);
1158 if (!dev) {
1159 rcu_read_unlock();
Vlad Buslovc431f892018-05-31 09:52:53 +03001160 return ERR_PTR(-ENODEV);
Vlad Buslove368fdb2018-09-24 19:22:53 +03001161 }
Vlad Buslovc431f892018-05-31 09:52:53 +03001162
1163 /* Find qdisc */
1164 if (!*parent) {
1165 *q = dev->qdisc;
1166 *parent = (*q)->handle;
1167 } else {
Vlad Buslove368fdb2018-09-24 19:22:53 +03001168 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
Vlad Buslovc431f892018-05-31 09:52:53 +03001169 if (!*q) {
1170 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
Vlad Buslove368fdb2018-09-24 19:22:53 +03001171 err = -EINVAL;
1172 goto errout_rcu;
Vlad Buslovc431f892018-05-31 09:52:53 +03001173 }
1174 }
1175
Vlad Buslove368fdb2018-09-24 19:22:53 +03001176 *q = qdisc_refcount_inc_nz(*q);
1177 if (!*q) {
1178 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1179 err = -EINVAL;
1180 goto errout_rcu;
1181 }
1182
Vlad Buslovc431f892018-05-31 09:52:53 +03001183 /* Is it classful? */
1184 cops = (*q)->ops->cl_ops;
1185 if (!cops) {
1186 NL_SET_ERR_MSG(extack, "Qdisc not classful");
Vlad Buslove368fdb2018-09-24 19:22:53 +03001187 err = -EINVAL;
1188 goto errout_rcu;
Vlad Buslovc431f892018-05-31 09:52:53 +03001189 }
1190
1191 if (!cops->tcf_block) {
1192 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
Vlad Buslove368fdb2018-09-24 19:22:53 +03001193 err = -EOPNOTSUPP;
1194 goto errout_rcu;
Vlad Buslovc431f892018-05-31 09:52:53 +03001195 }
1196
Vlad Buslove368fdb2018-09-24 19:22:53 +03001197 /* At this point we know that qdisc is not noop_qdisc,
1198 * which means that qdisc holds a reference to net_device
1199 * and we hold a reference to qdisc, so it is safe to release
1200 * rcu read lock.
1201 */
1202 rcu_read_unlock();
1203
Vlad Buslovc431f892018-05-31 09:52:53 +03001204 /* Do we search for filter, attached to class? */
1205 if (TC_H_MIN(*parent)) {
1206 *cl = cops->find(*q, *parent);
1207 if (*cl == 0) {
1208 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
Vlad Buslove368fdb2018-09-24 19:22:53 +03001209 err = -ENOENT;
1210 goto errout_qdisc;
Vlad Buslovc431f892018-05-31 09:52:53 +03001211 }
1212 }
1213
1214 /* And the last stroke */
1215 block = cops->tcf_block(*q, *cl, extack);
Vlad Buslove368fdb2018-09-24 19:22:53 +03001216 if (!block) {
1217 err = -EINVAL;
1218 goto errout_qdisc;
1219 }
Vlad Buslovc431f892018-05-31 09:52:53 +03001220 if (tcf_block_shared(block)) {
1221 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
Vlad Buslove368fdb2018-09-24 19:22:53 +03001222 err = -EOPNOTSUPP;
1223 goto errout_qdisc;
Vlad Buslovc431f892018-05-31 09:52:53 +03001224 }
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001225
1226 /* Always take reference to block in order to support execution
1227 * of rules update path of cls API without rtnl lock. Caller
1228 * must release block when it is finished using it. 'if' block
1229 * of this conditional obtain reference to block by calling
1230 * tcf_block_refcnt_get().
1231 */
1232 refcount_inc(&block->refcnt);
Vlad Buslovc431f892018-05-31 09:52:53 +03001233 }
1234
1235 return block;
Vlad Buslove368fdb2018-09-24 19:22:53 +03001236
1237errout_rcu:
1238 rcu_read_unlock();
1239errout_qdisc:
Cong Wang460b3602018-09-27 13:42:19 -07001240 if (*q) {
Vlad Buslove368fdb2018-09-24 19:22:53 +03001241 qdisc_put(*q);
Cong Wang460b3602018-09-27 13:42:19 -07001242 *q = NULL;
1243 }
Vlad Buslove368fdb2018-09-24 19:22:53 +03001244 return ERR_PTR(err);
1245}
1246
1247static void tcf_block_release(struct Qdisc *q, struct tcf_block *block)
1248{
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001249 if (!IS_ERR_OR_NULL(block))
1250 tcf_block_refcnt_put(block);
1251
Vlad Buslove368fdb2018-09-24 19:22:53 +03001252 if (q)
1253 qdisc_put(q);
Vlad Buslovc431f892018-05-31 09:52:53 +03001254}
1255
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001256struct tcf_block_owner_item {
1257 struct list_head list;
1258 struct Qdisc *q;
1259 enum tcf_block_binder_type binder_type;
1260};
1261
1262static void
1263tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1264 struct Qdisc *q,
1265 enum tcf_block_binder_type binder_type)
1266{
1267 if (block->keep_dst &&
1268 binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1269 binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1270 netif_keep_dst(qdisc_dev(q));
1271}
1272
1273void tcf_block_netif_keep_dst(struct tcf_block *block)
1274{
1275 struct tcf_block_owner_item *item;
1276
1277 block->keep_dst = true;
1278 list_for_each_entry(item, &block->owner_list, list)
1279 tcf_block_owner_netif_keep_dst(block, item->q,
1280 item->binder_type);
1281}
1282EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1283
1284static int tcf_block_owner_add(struct tcf_block *block,
1285 struct Qdisc *q,
1286 enum tcf_block_binder_type binder_type)
1287{
1288 struct tcf_block_owner_item *item;
1289
1290 item = kmalloc(sizeof(*item), GFP_KERNEL);
1291 if (!item)
1292 return -ENOMEM;
1293 item->q = q;
1294 item->binder_type = binder_type;
1295 list_add(&item->list, &block->owner_list);
1296 return 0;
1297}
1298
1299static void tcf_block_owner_del(struct tcf_block *block,
1300 struct Qdisc *q,
1301 enum tcf_block_binder_type binder_type)
1302{
1303 struct tcf_block_owner_item *item;
1304
1305 list_for_each_entry(item, &block->owner_list, list) {
1306 if (item->q == q && item->binder_type == binder_type) {
1307 list_del(&item->list);
1308 kfree(item);
1309 return;
1310 }
1311 }
1312 WARN_ON(1);
1313}
1314
Jiri Pirko48617382018-01-17 11:46:46 +01001315int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1316 struct tcf_block_ext_info *ei,
1317 struct netlink_ext_ack *extack)
1318{
1319 struct net *net = qdisc_net(q);
1320 struct tcf_block *block = NULL;
Jiri Pirko48617382018-01-17 11:46:46 +01001321 int err;
1322
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001323 if (ei->block_index)
Jiri Pirko48617382018-01-17 11:46:46 +01001324 /* block_index not 0 means the shared block is requested */
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001325 block = tcf_block_refcnt_get(net, ei->block_index);
Jiri Pirko48617382018-01-17 11:46:46 +01001326
1327 if (!block) {
Jiri Pirkobb047dd2018-02-13 12:00:16 +01001328 block = tcf_block_create(net, q, ei->block_index, extack);
Jiri Pirko48617382018-01-17 11:46:46 +01001329 if (IS_ERR(block))
1330 return PTR_ERR(block);
Jiri Pirkobb047dd2018-02-13 12:00:16 +01001331 if (tcf_block_shared(block)) {
1332 err = tcf_block_insert(block, net, extack);
Jiri Pirko48617382018-01-17 11:46:46 +01001333 if (err)
1334 goto err_block_insert;
1335 }
1336 }
1337
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001338 err = tcf_block_owner_add(block, q, ei->binder_type);
1339 if (err)
1340 goto err_block_owner_add;
1341
1342 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1343
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001344 err = tcf_chain0_head_change_cb_add(block, ei, extack);
Jiri Pirkoa9b19442018-01-17 11:46:45 +01001345 if (err)
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001346 goto err_chain0_head_change_cb_add;
Jiri Pirkocaa72602018-01-17 11:46:50 +01001347
John Hurley60513bd2018-06-25 14:30:04 -07001348 err = tcf_block_offload_bind(block, q, ei, extack);
Jiri Pirkocaa72602018-01-17 11:46:50 +01001349 if (err)
1350 goto err_block_offload_bind;
1351
Jiri Pirko6529eab2017-05-17 11:07:55 +02001352 *p_block = block;
1353 return 0;
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001354
Jiri Pirkocaa72602018-01-17 11:46:50 +01001355err_block_offload_bind:
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001356 tcf_chain0_head_change_cb_del(block, ei);
1357err_chain0_head_change_cb_add:
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001358 tcf_block_owner_del(block, q, ei->binder_type);
1359err_block_owner_add:
Jiri Pirko48617382018-01-17 11:46:46 +01001360err_block_insert:
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001361 tcf_block_refcnt_put(block);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001362 return err;
Jiri Pirko6529eab2017-05-17 11:07:55 +02001363}
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001364EXPORT_SYMBOL(tcf_block_get_ext);
1365
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001366static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1367{
1368 struct tcf_proto __rcu **p_filter_chain = priv;
1369
1370 rcu_assign_pointer(*p_filter_chain, tp_head);
1371}
1372
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001373int tcf_block_get(struct tcf_block **p_block,
Alexander Aring8d1a77f2017-12-20 12:35:19 -05001374 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1375 struct netlink_ext_ack *extack)
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001376{
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001377 struct tcf_block_ext_info ei = {
1378 .chain_head_change = tcf_chain_head_change_dflt,
1379 .chain_head_change_priv = p_filter_chain,
1380 };
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001381
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001382 WARN_ON(!p_filter_chain);
Alexander Aring8d1a77f2017-12-20 12:35:19 -05001383 return tcf_block_get_ext(p_block, q, &ei, extack);
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001384}
Jiri Pirko6529eab2017-05-17 11:07:55 +02001385EXPORT_SYMBOL(tcf_block_get);
1386
Cong Wang7aa00452017-10-26 18:24:28 -07001387/* XXX: Standalone actions are not allowed to jump to any chain, and bound
Roman Kapla60b3f52017-11-24 12:27:58 +01001388 * actions should be all removed after flushing.
Cong Wang7aa00452017-10-26 18:24:28 -07001389 */
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001390void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
David S. Millere1ea2f92017-10-30 14:10:01 +09001391 struct tcf_block_ext_info *ei)
Cong Wang7aa00452017-10-26 18:24:28 -07001392{
David S. Millerc30abd52017-12-16 22:11:55 -05001393 if (!block)
1394 return;
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001395 tcf_chain0_head_change_cb_del(block, ei);
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001396 tcf_block_owner_del(block, q, ei->binder_type);
Roman Kapla60b3f52017-11-24 12:27:58 +01001397
Vlad Buslov0607e432018-09-24 19:22:57 +03001398 __tcf_block_put(block, q, ei);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001399}
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001400EXPORT_SYMBOL(tcf_block_put_ext);
1401
1402void tcf_block_put(struct tcf_block *block)
1403{
1404 struct tcf_block_ext_info ei = {0, };
1405
Jiri Pirko4853f122017-12-21 13:13:59 +01001406 if (!block)
1407 return;
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001408 tcf_block_put_ext(block, block->q, &ei);
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001409}
David S. Millere1ea2f92017-10-30 14:10:01 +09001410
Jiri Pirko6529eab2017-05-17 11:07:55 +02001411EXPORT_SYMBOL(tcf_block_put);
Jiri Pirkocf1facd2017-02-09 14:38:56 +01001412
Jiri Pirkoacb67442017-10-19 15:50:31 +02001413struct tcf_block_cb {
1414 struct list_head list;
1415 tc_setup_cb_t *cb;
1416 void *cb_ident;
1417 void *cb_priv;
1418 unsigned int refcnt;
1419};
1420
1421void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
1422{
1423 return block_cb->cb_priv;
1424}
1425EXPORT_SYMBOL(tcf_block_cb_priv);
1426
1427struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
1428 tc_setup_cb_t *cb, void *cb_ident)
1429{ struct tcf_block_cb *block_cb;
1430
1431 list_for_each_entry(block_cb, &block->cb_list, list)
1432 if (block_cb->cb == cb && block_cb->cb_ident == cb_ident)
1433 return block_cb;
1434 return NULL;
1435}
1436EXPORT_SYMBOL(tcf_block_cb_lookup);
1437
1438void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
1439{
1440 block_cb->refcnt++;
1441}
1442EXPORT_SYMBOL(tcf_block_cb_incref);
1443
1444unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
1445{
1446 return --block_cb->refcnt;
1447}
1448EXPORT_SYMBOL(tcf_block_cb_decref);
1449
John Hurley32636742018-06-25 14:30:10 -07001450static int
1451tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
1452 void *cb_priv, bool add, bool offload_in_use,
1453 struct netlink_ext_ack *extack)
1454{
Vlad Buslovbbf73832019-02-11 10:55:36 +02001455 struct tcf_chain *chain, *chain_prev;
Vlad Buslovfe2923a2019-02-11 10:55:40 +02001456 struct tcf_proto *tp, *tp_prev;
John Hurley32636742018-06-25 14:30:10 -07001457 int err;
1458
Vlad Buslovbbf73832019-02-11 10:55:36 +02001459 for (chain = __tcf_get_next_chain(block, NULL);
1460 chain;
1461 chain_prev = chain,
1462 chain = __tcf_get_next_chain(block, chain),
1463 tcf_chain_put(chain_prev)) {
Vlad Buslovfe2923a2019-02-11 10:55:40 +02001464 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1465 tp_prev = tp,
1466 tp = __tcf_get_next_proto(chain, tp),
1467 tcf_proto_put(tp_prev, NULL)) {
John Hurley32636742018-06-25 14:30:10 -07001468 if (tp->ops->reoffload) {
1469 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1470 extack);
1471 if (err && add)
1472 goto err_playback_remove;
1473 } else if (add && offload_in_use) {
1474 err = -EOPNOTSUPP;
1475 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1476 goto err_playback_remove;
1477 }
1478 }
1479 }
1480
1481 return 0;
1482
1483err_playback_remove:
Vlad Buslovfe2923a2019-02-11 10:55:40 +02001484 tcf_proto_put(tp, NULL);
Vlad Buslovbbf73832019-02-11 10:55:36 +02001485 tcf_chain_put(chain);
John Hurley32636742018-06-25 14:30:10 -07001486 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1487 extack);
1488 return err;
1489}
1490
Jiri Pirkoacb67442017-10-19 15:50:31 +02001491struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
1492 tc_setup_cb_t *cb, void *cb_ident,
John Hurley60513bd2018-06-25 14:30:04 -07001493 void *cb_priv,
1494 struct netlink_ext_ack *extack)
Jiri Pirkoacb67442017-10-19 15:50:31 +02001495{
1496 struct tcf_block_cb *block_cb;
John Hurley32636742018-06-25 14:30:10 -07001497 int err;
Jiri Pirkoacb67442017-10-19 15:50:31 +02001498
John Hurley32636742018-06-25 14:30:10 -07001499 /* Replay any already present rules */
1500 err = tcf_block_playback_offloads(block, cb, cb_priv, true,
1501 tcf_block_offload_in_use(block),
1502 extack);
1503 if (err)
1504 return ERR_PTR(err);
Jiri Pirkocaa72602018-01-17 11:46:50 +01001505
Jiri Pirkoacb67442017-10-19 15:50:31 +02001506 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
1507 if (!block_cb)
Jiri Pirkocaa72602018-01-17 11:46:50 +01001508 return ERR_PTR(-ENOMEM);
Jiri Pirkoacb67442017-10-19 15:50:31 +02001509 block_cb->cb = cb;
1510 block_cb->cb_ident = cb_ident;
1511 block_cb->cb_priv = cb_priv;
1512 list_add(&block_cb->list, &block->cb_list);
1513 return block_cb;
1514}
1515EXPORT_SYMBOL(__tcf_block_cb_register);
1516
1517int tcf_block_cb_register(struct tcf_block *block,
1518 tc_setup_cb_t *cb, void *cb_ident,
John Hurley60513bd2018-06-25 14:30:04 -07001519 void *cb_priv, struct netlink_ext_ack *extack)
Jiri Pirkoacb67442017-10-19 15:50:31 +02001520{
1521 struct tcf_block_cb *block_cb;
1522
John Hurley60513bd2018-06-25 14:30:04 -07001523 block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv,
1524 extack);
Gustavo A. R. Silvabaa2d2b2018-07-18 23:14:17 -05001525 return PTR_ERR_OR_ZERO(block_cb);
Jiri Pirkoacb67442017-10-19 15:50:31 +02001526}
1527EXPORT_SYMBOL(tcf_block_cb_register);
1528
John Hurley32636742018-06-25 14:30:10 -07001529void __tcf_block_cb_unregister(struct tcf_block *block,
1530 struct tcf_block_cb *block_cb)
Jiri Pirkoacb67442017-10-19 15:50:31 +02001531{
John Hurley32636742018-06-25 14:30:10 -07001532 tcf_block_playback_offloads(block, block_cb->cb, block_cb->cb_priv,
1533 false, tcf_block_offload_in_use(block),
1534 NULL);
Jiri Pirkoacb67442017-10-19 15:50:31 +02001535 list_del(&block_cb->list);
1536 kfree(block_cb);
1537}
1538EXPORT_SYMBOL(__tcf_block_cb_unregister);
1539
1540void tcf_block_cb_unregister(struct tcf_block *block,
1541 tc_setup_cb_t *cb, void *cb_ident)
1542{
1543 struct tcf_block_cb *block_cb;
1544
1545 block_cb = tcf_block_cb_lookup(block, cb, cb_ident);
1546 if (!block_cb)
1547 return;
John Hurley32636742018-06-25 14:30:10 -07001548 __tcf_block_cb_unregister(block, block_cb);
Jiri Pirkoacb67442017-10-19 15:50:31 +02001549}
1550EXPORT_SYMBOL(tcf_block_cb_unregister);
1551
Jiri Pirko87d83092017-05-17 11:07:54 +02001552/* Main classifier routine: scans classifier chain attached
1553 * to this qdisc, (optionally) tests for protocol and asks
1554 * specific classifiers.
1555 */
1556int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1557 struct tcf_result *res, bool compat_mode)
1558{
Jiri Pirko87d83092017-05-17 11:07:54 +02001559#ifdef CONFIG_NET_CLS_ACT
1560 const int max_reclassify_loop = 4;
Jiri Pirkoee538dc2017-05-23 09:11:59 +02001561 const struct tcf_proto *orig_tp = tp;
1562 const struct tcf_proto *first_tp;
Jiri Pirko87d83092017-05-17 11:07:54 +02001563 int limit = 0;
1564
1565reclassify:
1566#endif
1567 for (; tp; tp = rcu_dereference_bh(tp->next)) {
Cong Wangcd0c4e72019-01-11 18:55:42 -08001568 __be16 protocol = tc_skb_protocol(skb);
Jiri Pirko87d83092017-05-17 11:07:54 +02001569 int err;
1570
1571 if (tp->protocol != protocol &&
1572 tp->protocol != htons(ETH_P_ALL))
1573 continue;
1574
1575 err = tp->classify(skb, tp, res);
1576#ifdef CONFIG_NET_CLS_ACT
Jiri Pirkodb505142017-05-17 11:08:03 +02001577 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
Jiri Pirkoee538dc2017-05-23 09:11:59 +02001578 first_tp = orig_tp;
Jiri Pirko87d83092017-05-17 11:07:54 +02001579 goto reset;
Jiri Pirkodb505142017-05-17 11:08:03 +02001580 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
Jiri Pirkoee538dc2017-05-23 09:11:59 +02001581 first_tp = res->goto_tp;
Jiri Pirkodb505142017-05-17 11:08:03 +02001582 goto reset;
1583 }
Jiri Pirko87d83092017-05-17 11:07:54 +02001584#endif
1585 if (err >= 0)
1586 return err;
1587 }
1588
1589 return TC_ACT_UNSPEC; /* signal: continue lookup */
1590#ifdef CONFIG_NET_CLS_ACT
1591reset:
1592 if (unlikely(limit++ >= max_reclassify_loop)) {
Jiri Pirko9d3aaff2018-01-17 11:46:47 +01001593 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1594 tp->chain->block->index,
1595 tp->prio & 0xffff,
Jiri Pirko87d83092017-05-17 11:07:54 +02001596 ntohs(tp->protocol));
1597 return TC_ACT_SHOT;
1598 }
1599
Jiri Pirkoee538dc2017-05-23 09:11:59 +02001600 tp = first_tp;
Jiri Pirko87d83092017-05-17 11:07:54 +02001601 goto reclassify;
1602#endif
1603}
1604EXPORT_SYMBOL(tcf_classify);
1605
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001606struct tcf_chain_info {
1607 struct tcf_proto __rcu **pprev;
1608 struct tcf_proto __rcu *next;
1609};
1610
Vlad Busloved76f5e2019-02-11 10:55:38 +02001611static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1612 struct tcf_chain_info *chain_info)
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001613{
Vlad Busloved76f5e2019-02-11 10:55:38 +02001614 return tcf_chain_dereference(*chain_info->pprev, chain);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001615}
1616
Vlad Buslov726d06122019-02-11 10:55:42 +02001617static int tcf_chain_tp_insert(struct tcf_chain *chain,
1618 struct tcf_chain_info *chain_info,
1619 struct tcf_proto *tp)
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001620{
Vlad Buslov726d06122019-02-11 10:55:42 +02001621 if (chain->flushing)
1622 return -EAGAIN;
1623
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001624 if (*chain_info->pprev == chain->filter_chain)
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001625 tcf_chain0_head_change(chain, tp);
Vlad Buslov4dbfa762019-02-11 10:55:39 +02001626 tcf_proto_get(tp);
Vlad Busloved76f5e2019-02-11 10:55:38 +02001627 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001628 rcu_assign_pointer(*chain_info->pprev, tp);
Vlad Buslov726d06122019-02-11 10:55:42 +02001629
1630 return 0;
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001631}
1632
1633static void tcf_chain_tp_remove(struct tcf_chain *chain,
1634 struct tcf_chain_info *chain_info,
1635 struct tcf_proto *tp)
1636{
Vlad Busloved76f5e2019-02-11 10:55:38 +02001637 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001638
Vlad Buslov8b646782019-02-11 10:55:41 +02001639 tcf_proto_mark_delete(tp);
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001640 if (tp == chain->filter_chain)
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001641 tcf_chain0_head_change(chain, next);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001642 RCU_INIT_POINTER(*chain_info->pprev, next);
1643}
1644
1645static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1646 struct tcf_chain_info *chain_info,
1647 u32 protocol, u32 prio,
Vlad Buslov8b646782019-02-11 10:55:41 +02001648 bool prio_allocate);
1649
1650/* Try to insert new proto.
1651 * If proto with specified priority already exists, free new proto
1652 * and return existing one.
1653 */
1654
1655static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1656 struct tcf_proto *tp_new,
1657 u32 protocol, u32 prio)
1658{
1659 struct tcf_chain_info chain_info;
1660 struct tcf_proto *tp;
Vlad Buslov726d06122019-02-11 10:55:42 +02001661 int err = 0;
Vlad Buslov8b646782019-02-11 10:55:41 +02001662
1663 mutex_lock(&chain->filter_chain_lock);
1664
1665 tp = tcf_chain_tp_find(chain, &chain_info,
1666 protocol, prio, false);
1667 if (!tp)
Vlad Buslov726d06122019-02-11 10:55:42 +02001668 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
Vlad Buslov8b646782019-02-11 10:55:41 +02001669 mutex_unlock(&chain->filter_chain_lock);
1670
1671 if (tp) {
1672 tcf_proto_destroy(tp_new, NULL);
1673 tp_new = tp;
Vlad Buslov726d06122019-02-11 10:55:42 +02001674 } else if (err) {
1675 tcf_proto_destroy(tp_new, NULL);
1676 tp_new = ERR_PTR(err);
Vlad Buslov8b646782019-02-11 10:55:41 +02001677 }
1678
1679 return tp_new;
1680}
1681
1682static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1683 struct tcf_proto *tp,
1684 struct netlink_ext_ack *extack)
1685{
1686 struct tcf_chain_info chain_info;
1687 struct tcf_proto *tp_iter;
1688 struct tcf_proto **pprev;
1689 struct tcf_proto *next;
1690
1691 mutex_lock(&chain->filter_chain_lock);
1692
1693 /* Atomically find and remove tp from chain. */
1694 for (pprev = &chain->filter_chain;
1695 (tp_iter = tcf_chain_dereference(*pprev, chain));
1696 pprev = &tp_iter->next) {
1697 if (tp_iter == tp) {
1698 chain_info.pprev = pprev;
1699 chain_info.next = tp_iter->next;
1700 WARN_ON(tp_iter->deleting);
1701 break;
1702 }
1703 }
1704 /* Verify that tp still exists and no new filters were inserted
1705 * concurrently.
1706 * Mark tp for deletion if it is empty.
1707 */
1708 if (!tp_iter || !tcf_proto_check_delete(tp)) {
1709 mutex_unlock(&chain->filter_chain_lock);
1710 return;
1711 }
1712
1713 next = tcf_chain_dereference(chain_info.next, chain);
1714 if (tp == chain->filter_chain)
1715 tcf_chain0_head_change(chain, next);
1716 RCU_INIT_POINTER(*chain_info.pprev, next);
1717 mutex_unlock(&chain->filter_chain_lock);
1718
1719 tcf_proto_put(tp, extack);
1720}
1721
1722static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1723 struct tcf_chain_info *chain_info,
1724 u32 protocol, u32 prio,
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001725 bool prio_allocate)
1726{
1727 struct tcf_proto **pprev;
1728 struct tcf_proto *tp;
1729
1730 /* Check the chain for existence of proto-tcf with this priority */
1731 for (pprev = &chain->filter_chain;
Vlad Busloved76f5e2019-02-11 10:55:38 +02001732 (tp = tcf_chain_dereference(*pprev, chain));
1733 pprev = &tp->next) {
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001734 if (tp->prio >= prio) {
1735 if (tp->prio == prio) {
1736 if (prio_allocate ||
1737 (tp->protocol != protocol && protocol))
1738 return ERR_PTR(-EINVAL);
1739 } else {
1740 tp = NULL;
1741 }
1742 break;
1743 }
1744 }
1745 chain_info->pprev = pprev;
Vlad Buslov4dbfa762019-02-11 10:55:39 +02001746 if (tp) {
1747 chain_info->next = tp->next;
1748 tcf_proto_get(tp);
1749 } else {
1750 chain_info->next = NULL;
1751 }
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001752 return tp;
1753}
1754
WANG Cong71203712017-08-07 15:26:50 -07001755static int tcf_fill_node(struct net *net, struct sk_buff *skb,
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001756 struct tcf_proto *tp, struct tcf_block *block,
1757 struct Qdisc *q, u32 parent, void *fh,
1758 u32 portid, u32 seq, u16 flags, int event)
WANG Cong71203712017-08-07 15:26:50 -07001759{
1760 struct tcmsg *tcm;
1761 struct nlmsghdr *nlh;
1762 unsigned char *b = skb_tail_pointer(skb);
1763
1764 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1765 if (!nlh)
1766 goto out_nlmsg_trim;
1767 tcm = nlmsg_data(nlh);
1768 tcm->tcm_family = AF_UNSPEC;
1769 tcm->tcm__pad1 = 0;
1770 tcm->tcm__pad2 = 0;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001771 if (q) {
1772 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1773 tcm->tcm_parent = parent;
1774 } else {
1775 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1776 tcm->tcm_block_index = block->index;
1777 }
WANG Cong71203712017-08-07 15:26:50 -07001778 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1779 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1780 goto nla_put_failure;
1781 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1782 goto nla_put_failure;
1783 if (!fh) {
1784 tcm->tcm_handle = 0;
1785 } else {
1786 if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
1787 goto nla_put_failure;
1788 }
1789 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1790 return skb->len;
1791
1792out_nlmsg_trim:
1793nla_put_failure:
1794 nlmsg_trim(skb, b);
1795 return -1;
1796}
1797
1798static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1799 struct nlmsghdr *n, struct tcf_proto *tp,
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001800 struct tcf_block *block, struct Qdisc *q,
1801 u32 parent, void *fh, int event, bool unicast)
WANG Cong71203712017-08-07 15:26:50 -07001802{
1803 struct sk_buff *skb;
1804 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1805
1806 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1807 if (!skb)
1808 return -ENOBUFS;
1809
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001810 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1811 n->nlmsg_seq, n->nlmsg_flags, event) <= 0) {
WANG Cong71203712017-08-07 15:26:50 -07001812 kfree_skb(skb);
1813 return -EINVAL;
1814 }
1815
1816 if (unicast)
1817 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1818
1819 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1820 n->nlmsg_flags & NLM_F_ECHO);
1821}
1822
1823static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1824 struct nlmsghdr *n, struct tcf_proto *tp,
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001825 struct tcf_block *block, struct Qdisc *q,
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001826 u32 parent, void *fh, bool unicast, bool *last,
1827 struct netlink_ext_ack *extack)
WANG Cong71203712017-08-07 15:26:50 -07001828{
1829 struct sk_buff *skb;
1830 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1831 int err;
1832
1833 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1834 if (!skb)
1835 return -ENOBUFS;
1836
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001837 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1838 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001839 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
WANG Cong71203712017-08-07 15:26:50 -07001840 kfree_skb(skb);
1841 return -EINVAL;
1842 }
1843
Alexander Aring571acf22018-01-18 11:20:53 -05001844 err = tp->ops->delete(tp, fh, last, extack);
WANG Cong71203712017-08-07 15:26:50 -07001845 if (err) {
1846 kfree_skb(skb);
1847 return err;
1848 }
1849
1850 if (unicast)
1851 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1852
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001853 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1854 n->nlmsg_flags & NLM_F_ECHO);
1855 if (err < 0)
1856 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1857 return err;
WANG Cong71203712017-08-07 15:26:50 -07001858}
1859
1860static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001861 struct tcf_block *block, struct Qdisc *q,
1862 u32 parent, struct nlmsghdr *n,
WANG Cong71203712017-08-07 15:26:50 -07001863 struct tcf_chain *chain, int event)
1864{
1865 struct tcf_proto *tp;
1866
Vlad Buslovfe2923a2019-02-11 10:55:40 +02001867 for (tp = tcf_get_next_proto(chain, NULL);
1868 tp; tp = tcf_get_next_proto(chain, tp))
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001869 tfilter_notify(net, oskb, n, tp, block,
YueHaibing53189182018-07-17 20:58:14 +08001870 q, parent, NULL, event, false);
WANG Cong71203712017-08-07 15:26:50 -07001871}
1872
Vlad Buslov7d5509f2019-02-11 10:55:44 +02001873static void tfilter_put(struct tcf_proto *tp, void *fh)
1874{
1875 if (tp->ops->put && fh)
1876 tp->ops->put(tp, fh);
1877}
1878
Vlad Buslovc431f892018-05-31 09:52:53 +03001879static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
David Ahernc21ef3e2017-04-16 09:48:24 -07001880 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001882 struct net *net = sock_net(skb->sk);
Patrick McHardyadd93b62008-01-22 22:11:33 -08001883 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 struct tcmsg *t;
1885 u32 protocol;
1886 u32 prio;
Jiri Pirko9d36d9e2017-05-17 11:07:57 +02001887 bool prio_allocate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 u32 parent;
Jiri Pirko5bc17012017-05-17 11:08:01 +02001889 u32 chain_index;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001890 struct Qdisc *q = NULL;
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001891 struct tcf_chain_info chain_info;
Jiri Pirko5bc17012017-05-17 11:08:01 +02001892 struct tcf_chain *chain = NULL;
Jiri Pirko6529eab2017-05-17 11:07:55 +02001893 struct tcf_block *block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 struct tcf_proto *tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 unsigned long cl;
WANG Cong8113c092017-08-04 21:31:43 -07001896 void *fh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 int err;
Daniel Borkmann628185c2016-12-21 18:04:11 +01001898 int tp_created;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899
Vlad Buslovc431f892018-05-31 09:52:53 +03001900 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +00001901 return -EPERM;
Hong zhi guode179c82013-03-25 17:36:33 +00001902
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903replay:
Daniel Borkmann628185c2016-12-21 18:04:11 +01001904 tp_created = 0;
1905
Davide Carattie3314732018-10-10 22:00:58 +02001906 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
Hong zhi guode179c82013-03-25 17:36:33 +00001907 if (err < 0)
1908 return err;
1909
David S. Miller942b8162012-06-26 21:48:50 -07001910 t = nlmsg_data(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 protocol = TC_H_MIN(t->tcm_info);
1912 prio = TC_H_MAJ(t->tcm_info);
Jiri Pirko9d36d9e2017-05-17 11:07:57 +02001913 prio_allocate = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 parent = t->tcm_parent;
Vlad Buslov4dbfa762019-02-11 10:55:39 +02001915 tp = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 cl = 0;
1917
1918 if (prio == 0) {
Vlad Buslovc431f892018-05-31 09:52:53 +03001919 /* If no priority is provided by the user,
1920 * we allocate one.
1921 */
1922 if (n->nlmsg_flags & NLM_F_CREATE) {
1923 prio = TC_H_MAKE(0x80000000U, 0U);
1924 prio_allocate = true;
1925 } else {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001926 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 return -ENOENT;
Daniel Borkmannea7f8272016-06-10 23:10:22 +02001928 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 }
1930
1931 /* Find head of filter chain. */
1932
Vlad Buslovc431f892018-05-31 09:52:53 +03001933 block = tcf_block_find(net, &q, &parent, &cl,
1934 t->tcm_ifindex, t->tcm_block_index, extack);
1935 if (IS_ERR(block)) {
1936 err = PTR_ERR(block);
1937 goto errout;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001938 }
Jiri Pirko5bc17012017-05-17 11:08:01 +02001939
1940 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1941 if (chain_index > TC_ACT_EXT_VAL_MASK) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001942 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
Jiri Pirko5bc17012017-05-17 11:08:01 +02001943 err = -EINVAL;
1944 goto errout;
1945 }
Vlad Buslovc431f892018-05-31 09:52:53 +03001946 chain = tcf_chain_get(block, chain_index, true);
Jiri Pirko5bc17012017-05-17 11:08:01 +02001947 if (!chain) {
Jiri Pirkod5ed72a2018-08-27 20:58:43 +02001948 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
Vlad Buslovc431f892018-05-31 09:52:53 +03001949 err = -ENOMEM;
Daniel Borkmannea7f8272016-06-10 23:10:22 +02001950 goto errout;
1951 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952
Vlad Busloved76f5e2019-02-11 10:55:38 +02001953 mutex_lock(&chain->filter_chain_lock);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001954 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1955 prio, prio_allocate);
1956 if (IS_ERR(tp)) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001957 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001958 err = PTR_ERR(tp);
Vlad Busloved76f5e2019-02-11 10:55:38 +02001959 goto errout_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 }
1961
1962 if (tp == NULL) {
Vlad Buslov8b646782019-02-11 10:55:41 +02001963 struct tcf_proto *tp_new = NULL;
1964
Vlad Buslov726d06122019-02-11 10:55:42 +02001965 if (chain->flushing) {
1966 err = -EAGAIN;
1967 goto errout_locked;
1968 }
1969
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 /* Proto-tcf does not exist, create new one */
1971
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001972 if (tca[TCA_KIND] == NULL || !protocol) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001973 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001974 err = -EINVAL;
Vlad Busloved76f5e2019-02-11 10:55:38 +02001975 goto errout_locked;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001976 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
Vlad Buslovc431f892018-05-31 09:52:53 +03001978 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001979 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001980 err = -ENOENT;
Vlad Busloved76f5e2019-02-11 10:55:38 +02001981 goto errout_locked;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001982 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
Jiri Pirko9d36d9e2017-05-17 11:07:57 +02001984 if (prio_allocate)
Vlad Busloved76f5e2019-02-11 10:55:38 +02001985 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
1986 &chain_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987
Vlad Busloved76f5e2019-02-11 10:55:38 +02001988 mutex_unlock(&chain->filter_chain_lock);
Vlad Buslov8b646782019-02-11 10:55:41 +02001989 tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
1990 protocol, prio, chain, extack);
1991 if (IS_ERR(tp_new)) {
1992 err = PTR_ERR(tp_new);
Vlad Buslov726d06122019-02-11 10:55:42 +02001993 goto errout_tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 }
Vlad Busloved76f5e2019-02-11 10:55:38 +02001995
Minoru Usui12186be2009-06-02 02:17:34 -07001996 tp_created = 1;
Vlad Buslov8b646782019-02-11 10:55:41 +02001997 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio);
Vlad Buslov726d06122019-02-11 10:55:42 +02001998 if (IS_ERR(tp)) {
1999 err = PTR_ERR(tp);
2000 goto errout_tp;
2001 }
Vlad Busloved76f5e2019-02-11 10:55:38 +02002002 } else {
2003 mutex_unlock(&chain->filter_chain_lock);
Jiri Pirko6bb16e72017-02-09 14:38:58 +01002004 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005
Vlad Buslov8b646782019-02-11 10:55:41 +02002006 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2007 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2008 err = -EINVAL;
2009 goto errout;
2010 }
2011
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 fh = tp->ops->get(tp, t->tcm_handle);
2013
WANG Cong8113c092017-08-04 21:31:43 -07002014 if (!fh) {
Vlad Buslovc431f892018-05-31 09:52:53 +03002015 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05002016 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
Jiri Pirko6bb16e72017-02-09 14:38:58 +01002017 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 goto errout;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01002019 }
Vlad Buslovc431f892018-05-31 09:52:53 +03002020 } else if (n->nlmsg_flags & NLM_F_EXCL) {
Vlad Buslov7d5509f2019-02-11 10:55:44 +02002021 tfilter_put(tp, fh);
Vlad Buslovc431f892018-05-31 09:52:53 +03002022 NL_SET_ERR_MSG(extack, "Filter already exists");
2023 err = -EEXIST;
2024 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 }
2026
Jiri Pirko9f407f12018-07-23 09:23:07 +02002027 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2028 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2029 err = -EINVAL;
2030 goto errout;
2031 }
2032
Cong Wang2f7ef2f2014-04-25 13:54:06 -07002033 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
Alexander Aring7306db32018-01-18 11:20:51 -05002034 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2035 extack);
Vlad Buslov7d5509f2019-02-11 10:55:44 +02002036 if (err == 0) {
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002037 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002038 RTM_NEWTFILTER, false);
Vlad Buslov7d5509f2019-02-11 10:55:44 +02002039 tfilter_put(tp, fh);
2040 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
2042errout:
Vlad Buslov8b646782019-02-11 10:55:41 +02002043 if (err && tp_created)
2044 tcf_chain_tp_delete_empty(chain, tp, NULL);
Vlad Buslov726d06122019-02-11 10:55:42 +02002045errout_tp:
Vlad Buslov4dbfa762019-02-11 10:55:39 +02002046 if (chain) {
2047 if (tp && !IS_ERR(tp))
2048 tcf_proto_put(tp, NULL);
2049 if (!tp_created)
2050 tcf_chain_put(chain);
2051 }
Vlad Buslove368fdb2018-09-24 19:22:53 +03002052 tcf_block_release(q, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 if (err == -EAGAIN)
2054 /* Replay the request. */
2055 goto replay;
2056 return err;
Vlad Busloved76f5e2019-02-11 10:55:38 +02002057
2058errout_locked:
2059 mutex_unlock(&chain->filter_chain_lock);
2060 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061}
2062
Vlad Buslovc431f892018-05-31 09:52:53 +03002063static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2064 struct netlink_ext_ack *extack)
2065{
2066 struct net *net = sock_net(skb->sk);
2067 struct nlattr *tca[TCA_MAX + 1];
2068 struct tcmsg *t;
2069 u32 protocol;
2070 u32 prio;
2071 u32 parent;
2072 u32 chain_index;
2073 struct Qdisc *q = NULL;
2074 struct tcf_chain_info chain_info;
2075 struct tcf_chain *chain = NULL;
2076 struct tcf_block *block;
2077 struct tcf_proto *tp = NULL;
2078 unsigned long cl = 0;
2079 void *fh = NULL;
2080 int err;
2081
2082 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2083 return -EPERM;
2084
Davide Carattie3314732018-10-10 22:00:58 +02002085 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
Vlad Buslovc431f892018-05-31 09:52:53 +03002086 if (err < 0)
2087 return err;
2088
2089 t = nlmsg_data(n);
2090 protocol = TC_H_MIN(t->tcm_info);
2091 prio = TC_H_MAJ(t->tcm_info);
2092 parent = t->tcm_parent;
2093
2094 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2095 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2096 return -ENOENT;
2097 }
2098
2099 /* Find head of filter chain. */
2100
2101 block = tcf_block_find(net, &q, &parent, &cl,
2102 t->tcm_ifindex, t->tcm_block_index, extack);
2103 if (IS_ERR(block)) {
2104 err = PTR_ERR(block);
2105 goto errout;
2106 }
2107
2108 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2109 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2110 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2111 err = -EINVAL;
2112 goto errout;
2113 }
2114 chain = tcf_chain_get(block, chain_index, false);
2115 if (!chain) {
Jiri Pirko5ca8a252018-08-03 11:08:47 +02002116 /* User requested flush on non-existent chain. Nothing to do,
2117 * so just return success.
2118 */
2119 if (prio == 0) {
2120 err = 0;
2121 goto errout;
2122 }
Vlad Buslovc431f892018-05-31 09:52:53 +03002123 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
Jiri Pirkob7b42472018-08-27 20:58:44 +02002124 err = -ENOENT;
Vlad Buslovc431f892018-05-31 09:52:53 +03002125 goto errout;
2126 }
2127
2128 if (prio == 0) {
2129 tfilter_notify_chain(net, skb, block, q, parent, n,
2130 chain, RTM_DELTFILTER);
2131 tcf_chain_flush(chain);
2132 err = 0;
2133 goto errout;
2134 }
2135
Vlad Busloved76f5e2019-02-11 10:55:38 +02002136 mutex_lock(&chain->filter_chain_lock);
Vlad Buslovc431f892018-05-31 09:52:53 +03002137 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2138 prio, false);
2139 if (!tp || IS_ERR(tp)) {
2140 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
Vlad Buslov0e399032018-06-04 18:32:23 +03002141 err = tp ? PTR_ERR(tp) : -ENOENT;
Vlad Busloved76f5e2019-02-11 10:55:38 +02002142 goto errout_locked;
Vlad Buslovc431f892018-05-31 09:52:53 +03002143 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2144 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2145 err = -EINVAL;
Vlad Busloved76f5e2019-02-11 10:55:38 +02002146 goto errout_locked;
2147 } else if (t->tcm_handle == 0) {
2148 tcf_chain_tp_remove(chain, &chain_info, tp);
2149 mutex_unlock(&chain->filter_chain_lock);
2150
Vlad Buslov8b646782019-02-11 10:55:41 +02002151 tcf_proto_put(tp, NULL);
Vlad Busloved76f5e2019-02-11 10:55:38 +02002152 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2153 RTM_DELTFILTER, false);
Vlad Busloved76f5e2019-02-11 10:55:38 +02002154 err = 0;
Vlad Buslovc431f892018-05-31 09:52:53 +03002155 goto errout;
2156 }
Vlad Busloved76f5e2019-02-11 10:55:38 +02002157 mutex_unlock(&chain->filter_chain_lock);
Vlad Buslovc431f892018-05-31 09:52:53 +03002158
2159 fh = tp->ops->get(tp, t->tcm_handle);
2160
2161 if (!fh) {
Vlad Busloved76f5e2019-02-11 10:55:38 +02002162 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2163 err = -ENOENT;
Vlad Buslovc431f892018-05-31 09:52:53 +03002164 } else {
2165 bool last;
2166
2167 err = tfilter_del_notify(net, skb, n, tp, block,
2168 q, parent, fh, false, &last,
2169 extack);
2170 if (err)
2171 goto errout;
Vlad Buslov8b646782019-02-11 10:55:41 +02002172 if (last)
2173 tcf_chain_tp_delete_empty(chain, tp, extack);
Vlad Buslovc431f892018-05-31 09:52:53 +03002174 }
2175
2176errout:
Vlad Buslov4dbfa762019-02-11 10:55:39 +02002177 if (chain) {
2178 if (tp && !IS_ERR(tp))
2179 tcf_proto_put(tp, NULL);
Vlad Buslovc431f892018-05-31 09:52:53 +03002180 tcf_chain_put(chain);
Vlad Buslov4dbfa762019-02-11 10:55:39 +02002181 }
Vlad Buslove368fdb2018-09-24 19:22:53 +03002182 tcf_block_release(q, block);
Vlad Buslovc431f892018-05-31 09:52:53 +03002183 return err;
Vlad Busloved76f5e2019-02-11 10:55:38 +02002184
2185errout_locked:
2186 mutex_unlock(&chain->filter_chain_lock);
2187 goto errout;
Vlad Buslovc431f892018-05-31 09:52:53 +03002188}
2189
2190static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2191 struct netlink_ext_ack *extack)
2192{
2193 struct net *net = sock_net(skb->sk);
2194 struct nlattr *tca[TCA_MAX + 1];
2195 struct tcmsg *t;
2196 u32 protocol;
2197 u32 prio;
2198 u32 parent;
2199 u32 chain_index;
2200 struct Qdisc *q = NULL;
2201 struct tcf_chain_info chain_info;
2202 struct tcf_chain *chain = NULL;
2203 struct tcf_block *block;
2204 struct tcf_proto *tp = NULL;
2205 unsigned long cl = 0;
2206 void *fh = NULL;
2207 int err;
2208
Davide Carattie3314732018-10-10 22:00:58 +02002209 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
Vlad Buslovc431f892018-05-31 09:52:53 +03002210 if (err < 0)
2211 return err;
2212
2213 t = nlmsg_data(n);
2214 protocol = TC_H_MIN(t->tcm_info);
2215 prio = TC_H_MAJ(t->tcm_info);
2216 parent = t->tcm_parent;
2217
2218 if (prio == 0) {
2219 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2220 return -ENOENT;
2221 }
2222
2223 /* Find head of filter chain. */
2224
2225 block = tcf_block_find(net, &q, &parent, &cl,
2226 t->tcm_ifindex, t->tcm_block_index, extack);
2227 if (IS_ERR(block)) {
2228 err = PTR_ERR(block);
2229 goto errout;
2230 }
2231
2232 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2233 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2234 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2235 err = -EINVAL;
2236 goto errout;
2237 }
2238 chain = tcf_chain_get(block, chain_index, false);
2239 if (!chain) {
2240 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2241 err = -EINVAL;
2242 goto errout;
2243 }
2244
Vlad Busloved76f5e2019-02-11 10:55:38 +02002245 mutex_lock(&chain->filter_chain_lock);
Vlad Buslovc431f892018-05-31 09:52:53 +03002246 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2247 prio, false);
Vlad Busloved76f5e2019-02-11 10:55:38 +02002248 mutex_unlock(&chain->filter_chain_lock);
Vlad Buslovc431f892018-05-31 09:52:53 +03002249 if (!tp || IS_ERR(tp)) {
2250 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
Vlad Buslov0e399032018-06-04 18:32:23 +03002251 err = tp ? PTR_ERR(tp) : -ENOENT;
Vlad Buslovc431f892018-05-31 09:52:53 +03002252 goto errout;
2253 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2254 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2255 err = -EINVAL;
2256 goto errout;
2257 }
2258
2259 fh = tp->ops->get(tp, t->tcm_handle);
2260
2261 if (!fh) {
2262 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2263 err = -ENOENT;
2264 } else {
2265 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2266 fh, RTM_NEWTFILTER, true);
2267 if (err < 0)
2268 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2269 }
2270
Vlad Buslov7d5509f2019-02-11 10:55:44 +02002271 tfilter_put(tp, fh);
Vlad Buslovc431f892018-05-31 09:52:53 +03002272errout:
Vlad Buslov4dbfa762019-02-11 10:55:39 +02002273 if (chain) {
2274 if (tp && !IS_ERR(tp))
2275 tcf_proto_put(tp, NULL);
Vlad Buslovc431f892018-05-31 09:52:53 +03002276 tcf_chain_put(chain);
Vlad Buslov4dbfa762019-02-11 10:55:39 +02002277 }
Vlad Buslove368fdb2018-09-24 19:22:53 +03002278 tcf_block_release(q, block);
Vlad Buslovc431f892018-05-31 09:52:53 +03002279 return err;
2280}
2281
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002282struct tcf_dump_args {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 struct tcf_walker w;
2284 struct sk_buff *skb;
2285 struct netlink_callback *cb;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002286 struct tcf_block *block;
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002287 struct Qdisc *q;
2288 u32 parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289};
2290
WANG Cong8113c092017-08-04 21:31:43 -07002291static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292{
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002293 struct tcf_dump_args *a = (void *)arg;
WANG Cong832d1d52014-01-09 16:14:01 -08002294 struct net *net = sock_net(a->skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002296 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002297 n, NETLINK_CB(a->cb->skb).portid,
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -04002298 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2299 RTM_NEWTFILTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300}
2301
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002302static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2303 struct sk_buff *skb, struct netlink_callback *cb,
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002304 long index_start, long *p_index)
2305{
2306 struct net *net = sock_net(skb->sk);
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002307 struct tcf_block *block = chain->block;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002308 struct tcmsg *tcm = nlmsg_data(cb->nlh);
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002309 struct tcf_proto *tp, *tp_prev;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002310 struct tcf_dump_args arg;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002311
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002312 for (tp = __tcf_get_next_proto(chain, NULL);
2313 tp;
2314 tp_prev = tp,
2315 tp = __tcf_get_next_proto(chain, tp),
2316 tcf_proto_put(tp_prev, NULL),
2317 (*p_index)++) {
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002318 if (*p_index < index_start)
2319 continue;
2320 if (TC_H_MAJ(tcm->tcm_info) &&
2321 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2322 continue;
2323 if (TC_H_MIN(tcm->tcm_info) &&
2324 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2325 continue;
2326 if (*p_index > index_start)
2327 memset(&cb->args[1], 0,
2328 sizeof(cb->args) - sizeof(cb->args[0]));
2329 if (cb->args[1] == 0) {
YueHaibing53189182018-07-17 20:58:14 +08002330 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002331 NETLINK_CB(cb->skb).portid,
2332 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2333 RTM_NEWTFILTER) <= 0)
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002334 goto errout;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002335
2336 cb->args[1] = 1;
2337 }
2338 if (!tp->ops->walk)
2339 continue;
2340 arg.w.fn = tcf_node_dump;
2341 arg.skb = skb;
2342 arg.cb = cb;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002343 arg.block = block;
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002344 arg.q = q;
2345 arg.parent = parent;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002346 arg.w.stop = 0;
2347 arg.w.skip = cb->args[1] - 1;
2348 arg.w.count = 0;
Vlad Buslov01683a12018-07-09 13:29:11 +03002349 arg.w.cookie = cb->args[2];
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002350 tp->ops->walk(tp, &arg.w);
Vlad Buslov01683a12018-07-09 13:29:11 +03002351 cb->args[2] = arg.w.cookie;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002352 cb->args[1] = arg.w.count + 1;
2353 if (arg.w.stop)
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002354 goto errout;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002355 }
Jiri Pirko5bc17012017-05-17 11:08:01 +02002356 return true;
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002357
2358errout:
2359 tcf_proto_put(tp, NULL);
2360 return false;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002361}
2362
Eric Dumazetbd27a872009-11-05 20:57:26 -08002363/* called with RTNL */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2365{
Vlad Buslovbbf73832019-02-11 10:55:36 +02002366 struct tcf_chain *chain, *chain_prev;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002367 struct net *net = sock_net(skb->sk);
Jiri Pirko5bc17012017-05-17 11:08:01 +02002368 struct nlattr *tca[TCA_MAX + 1];
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002369 struct Qdisc *q = NULL;
Jiri Pirko6529eab2017-05-17 11:07:55 +02002370 struct tcf_block *block;
David S. Miller942b8162012-06-26 21:48:50 -07002371 struct tcmsg *tcm = nlmsg_data(cb->nlh);
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002372 long index_start;
2373 long index;
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002374 u32 parent;
Jiri Pirko5bc17012017-05-17 11:08:01 +02002375 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376
Hong zhi guo573ce262013-03-27 06:47:04 +00002377 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 return skb->len;
Jiri Pirko5bc17012017-05-17 11:08:01 +02002379
David Aherndac9c972018-10-07 20:16:24 -07002380 err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL,
2381 cb->extack);
Jiri Pirko5bc17012017-05-17 11:08:01 +02002382 if (err)
2383 return err;
2384
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002385 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
Vlad Buslov787ce6d2018-09-24 19:22:58 +03002386 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002387 if (!block)
WANG Cong143976c2017-08-24 16:51:29 -07002388 goto out;
Jiri Pirkod680b352018-01-18 16:14:49 +01002389 /* If we work with block index, q is NULL and parent value
2390 * will never be used in the following code. The check
2391 * in tcf_fill_node prevents it. However, compiler does not
2392 * see that far, so set parent to zero to silence the warning
2393 * about parent being uninitialized.
2394 */
2395 parent = 0;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002396 } else {
2397 const struct Qdisc_class_ops *cops;
2398 struct net_device *dev;
2399 unsigned long cl = 0;
2400
2401 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2402 if (!dev)
2403 return skb->len;
2404
2405 parent = tcm->tcm_parent;
2406 if (!parent) {
2407 q = dev->qdisc;
2408 parent = q->handle;
2409 } else {
2410 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2411 }
2412 if (!q)
2413 goto out;
2414 cops = q->ops->cl_ops;
2415 if (!cops)
2416 goto out;
2417 if (!cops->tcf_block)
2418 goto out;
2419 if (TC_H_MIN(tcm->tcm_parent)) {
2420 cl = cops->find(q, tcm->tcm_parent);
2421 if (cl == 0)
2422 goto out;
2423 }
2424 block = cops->tcf_block(q, cl, NULL);
2425 if (!block)
2426 goto out;
2427 if (tcf_block_shared(block))
2428 q = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002431 index_start = cb->args[0];
2432 index = 0;
Jiri Pirko5bc17012017-05-17 11:08:01 +02002433
Vlad Buslovbbf73832019-02-11 10:55:36 +02002434 for (chain = __tcf_get_next_chain(block, NULL);
2435 chain;
2436 chain_prev = chain,
2437 chain = __tcf_get_next_chain(block, chain),
2438 tcf_chain_put(chain_prev)) {
Jiri Pirko5bc17012017-05-17 11:08:01 +02002439 if (tca[TCA_CHAIN] &&
2440 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2441 continue;
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002442 if (!tcf_chain_dump(chain, q, parent, skb, cb,
Roman Kapl5ae437a2018-02-19 21:32:51 +01002443 index_start, &index)) {
Vlad Buslovbbf73832019-02-11 10:55:36 +02002444 tcf_chain_put(chain);
Roman Kapl5ae437a2018-02-19 21:32:51 +01002445 err = -EMSGSIZE;
Jiri Pirko5bc17012017-05-17 11:08:01 +02002446 break;
Roman Kapl5ae437a2018-02-19 21:32:51 +01002447 }
Jiri Pirko5bc17012017-05-17 11:08:01 +02002448 }
2449
Vlad Buslov787ce6d2018-09-24 19:22:58 +03002450 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2451 tcf_block_refcnt_put(block);
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002452 cb->args[0] = index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454out:
Roman Kapl5ae437a2018-02-19 21:32:51 +01002455 /* If we did no progress, the error (EMSGSIZE) is real */
2456 if (skb->len == 0 && err)
2457 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 return skb->len;
2459}
2460
Vlad Buslova5654822019-02-11 10:55:37 +02002461static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2462 void *tmplt_priv, u32 chain_index,
2463 struct net *net, struct sk_buff *skb,
2464 struct tcf_block *block,
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002465 u32 portid, u32 seq, u16 flags, int event)
2466{
2467 unsigned char *b = skb_tail_pointer(skb);
Jiri Pirko9f407f12018-07-23 09:23:07 +02002468 const struct tcf_proto_ops *ops;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002469 struct nlmsghdr *nlh;
2470 struct tcmsg *tcm;
Jiri Pirko9f407f12018-07-23 09:23:07 +02002471 void *priv;
2472
Vlad Buslova5654822019-02-11 10:55:37 +02002473 ops = tmplt_ops;
2474 priv = tmplt_priv;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002475
2476 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2477 if (!nlh)
2478 goto out_nlmsg_trim;
2479 tcm = nlmsg_data(nlh);
2480 tcm->tcm_family = AF_UNSPEC;
2481 tcm->tcm__pad1 = 0;
2482 tcm->tcm__pad2 = 0;
2483 tcm->tcm_handle = 0;
2484 if (block->q) {
2485 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2486 tcm->tcm_parent = block->q->handle;
2487 } else {
2488 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2489 tcm->tcm_block_index = block->index;
2490 }
2491
Vlad Buslova5654822019-02-11 10:55:37 +02002492 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002493 goto nla_put_failure;
2494
Jiri Pirko9f407f12018-07-23 09:23:07 +02002495 if (ops) {
2496 if (nla_put_string(skb, TCA_KIND, ops->kind))
2497 goto nla_put_failure;
2498 if (ops->tmplt_dump(skb, net, priv) < 0)
2499 goto nla_put_failure;
2500 }
2501
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002502 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2503 return skb->len;
2504
2505out_nlmsg_trim:
2506nla_put_failure:
2507 nlmsg_trim(skb, b);
2508 return -EMSGSIZE;
2509}
2510
2511static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2512 u32 seq, u16 flags, int event, bool unicast)
2513{
2514 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2515 struct tcf_block *block = chain->block;
2516 struct net *net = block->net;
2517 struct sk_buff *skb;
2518
2519 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2520 if (!skb)
2521 return -ENOBUFS;
2522
Vlad Buslova5654822019-02-11 10:55:37 +02002523 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2524 chain->index, net, skb, block, portid,
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002525 seq, flags, event) <= 0) {
2526 kfree_skb(skb);
2527 return -EINVAL;
2528 }
2529
2530 if (unicast)
2531 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2532
2533 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2534}
2535
Vlad Buslova5654822019-02-11 10:55:37 +02002536static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2537 void *tmplt_priv, u32 chain_index,
2538 struct tcf_block *block, struct sk_buff *oskb,
2539 u32 seq, u16 flags, bool unicast)
2540{
2541 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2542 struct net *net = block->net;
2543 struct sk_buff *skb;
2544
2545 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2546 if (!skb)
2547 return -ENOBUFS;
2548
2549 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2550 block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2551 kfree_skb(skb);
2552 return -EINVAL;
2553 }
2554
2555 if (unicast)
2556 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2557
2558 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2559}
2560
Jiri Pirko9f407f12018-07-23 09:23:07 +02002561static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2562 struct nlattr **tca,
2563 struct netlink_ext_ack *extack)
2564{
2565 const struct tcf_proto_ops *ops;
2566 void *tmplt_priv;
2567
2568 /* If kind is not set, user did not specify template. */
2569 if (!tca[TCA_KIND])
2570 return 0;
2571
2572 ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), extack);
2573 if (IS_ERR(ops))
2574 return PTR_ERR(ops);
2575 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2576 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2577 return -EOPNOTSUPP;
2578 }
2579
2580 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2581 if (IS_ERR(tmplt_priv)) {
2582 module_put(ops->owner);
2583 return PTR_ERR(tmplt_priv);
2584 }
2585 chain->tmplt_ops = ops;
2586 chain->tmplt_priv = tmplt_priv;
2587 return 0;
2588}
2589
Vlad Buslova5654822019-02-11 10:55:37 +02002590static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2591 void *tmplt_priv)
Jiri Pirko9f407f12018-07-23 09:23:07 +02002592{
Jiri Pirko9f407f12018-07-23 09:23:07 +02002593 /* If template ops are set, no work to do for us. */
Vlad Buslova5654822019-02-11 10:55:37 +02002594 if (!tmplt_ops)
Jiri Pirko9f407f12018-07-23 09:23:07 +02002595 return;
2596
Vlad Buslova5654822019-02-11 10:55:37 +02002597 tmplt_ops->tmplt_destroy(tmplt_priv);
2598 module_put(tmplt_ops->owner);
Jiri Pirko9f407f12018-07-23 09:23:07 +02002599}
2600
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002601/* Add/delete/get a chain */
2602
2603static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2604 struct netlink_ext_ack *extack)
2605{
2606 struct net *net = sock_net(skb->sk);
2607 struct nlattr *tca[TCA_MAX + 1];
2608 struct tcmsg *t;
2609 u32 parent;
2610 u32 chain_index;
2611 struct Qdisc *q = NULL;
2612 struct tcf_chain *chain = NULL;
2613 struct tcf_block *block;
2614 unsigned long cl;
2615 int err;
2616
2617 if (n->nlmsg_type != RTM_GETCHAIN &&
2618 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2619 return -EPERM;
2620
2621replay:
Davide Carattie3314732018-10-10 22:00:58 +02002622 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002623 if (err < 0)
2624 return err;
2625
2626 t = nlmsg_data(n);
2627 parent = t->tcm_parent;
2628 cl = 0;
2629
2630 block = tcf_block_find(net, &q, &parent, &cl,
2631 t->tcm_ifindex, t->tcm_block_index, extack);
2632 if (IS_ERR(block))
2633 return PTR_ERR(block);
2634
2635 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2636 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2637 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002638 err = -EINVAL;
2639 goto errout_block;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002640 }
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002641
2642 mutex_lock(&block->lock);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002643 chain = tcf_chain_lookup(block, chain_index);
2644 if (n->nlmsg_type == RTM_NEWCHAIN) {
2645 if (chain) {
Jiri Pirko3d32f4c2018-08-01 12:36:55 +02002646 if (tcf_chain_held_by_acts_only(chain)) {
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002647 /* The chain exists only because there is
Jiri Pirko3d32f4c2018-08-01 12:36:55 +02002648 * some action referencing it.
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002649 */
2650 tcf_chain_hold(chain);
2651 } else {
2652 NL_SET_ERR_MSG(extack, "Filter chain already exists");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002653 err = -EEXIST;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002654 goto errout_block_locked;
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002655 }
2656 } else {
2657 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2658 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002659 err = -ENOENT;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002660 goto errout_block_locked;
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002661 }
2662 chain = tcf_chain_create(block, chain_index);
2663 if (!chain) {
2664 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002665 err = -ENOMEM;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002666 goto errout_block_locked;
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002667 }
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002668 }
2669 } else {
Jiri Pirko3d32f4c2018-08-01 12:36:55 +02002670 if (!chain || tcf_chain_held_by_acts_only(chain)) {
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002671 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002672 err = -EINVAL;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002673 goto errout_block_locked;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002674 }
2675 tcf_chain_hold(chain);
2676 }
2677
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002678 if (n->nlmsg_type == RTM_NEWCHAIN) {
2679 /* Modifying chain requires holding parent block lock. In case
2680 * the chain was successfully added, take a reference to the
2681 * chain. This ensures that an empty chain does not disappear at
2682 * the end of this function.
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002683 */
2684 tcf_chain_hold(chain);
2685 chain->explicitly_created = true;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002686 }
2687 mutex_unlock(&block->lock);
2688
2689 switch (n->nlmsg_type) {
2690 case RTM_NEWCHAIN:
2691 err = tc_chain_tmplt_add(chain, net, tca, extack);
2692 if (err) {
2693 tcf_chain_put_explicitly_created(chain);
2694 goto errout;
2695 }
2696
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002697 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2698 RTM_NEWCHAIN, false);
2699 break;
2700 case RTM_DELCHAIN:
Cong Wangf5b9bac2018-09-11 14:22:23 -07002701 tfilter_notify_chain(net, skb, block, q, parent, n,
2702 chain, RTM_DELTFILTER);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002703 /* Flush the chain first as the user requested chain removal. */
2704 tcf_chain_flush(chain);
2705 /* In case the chain was successfully deleted, put a reference
2706 * to the chain previously taken during addition.
2707 */
2708 tcf_chain_put_explicitly_created(chain);
2709 break;
2710 case RTM_GETCHAIN:
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002711 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2712 n->nlmsg_seq, n->nlmsg_type, true);
2713 if (err < 0)
2714 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2715 break;
2716 default:
2717 err = -EOPNOTSUPP;
2718 NL_SET_ERR_MSG(extack, "Unsupported message type");
2719 goto errout;
2720 }
2721
2722errout:
2723 tcf_chain_put(chain);
Vlad Buslove368fdb2018-09-24 19:22:53 +03002724errout_block:
2725 tcf_block_release(q, block);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002726 if (err == -EAGAIN)
2727 /* Replay the request. */
2728 goto replay;
2729 return err;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002730
2731errout_block_locked:
2732 mutex_unlock(&block->lock);
2733 goto errout_block;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002734}
2735
2736/* called with RTNL */
2737static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2738{
Vlad Buslovbbf73832019-02-11 10:55:36 +02002739 struct tcf_chain *chain, *chain_prev;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002740 struct net *net = sock_net(skb->sk);
2741 struct nlattr *tca[TCA_MAX + 1];
2742 struct Qdisc *q = NULL;
2743 struct tcf_block *block;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002744 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2745 long index_start;
2746 long index;
2747 u32 parent;
2748 int err;
2749
2750 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2751 return skb->len;
2752
Davide Carattie3314732018-10-10 22:00:58 +02002753 err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
David Aherndac9c972018-10-07 20:16:24 -07002754 cb->extack);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002755 if (err)
2756 return err;
2757
2758 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
Vlad Buslov787ce6d2018-09-24 19:22:58 +03002759 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002760 if (!block)
2761 goto out;
2762 /* If we work with block index, q is NULL and parent value
2763 * will never be used in the following code. The check
2764 * in tcf_fill_node prevents it. However, compiler does not
2765 * see that far, so set parent to zero to silence the warning
2766 * about parent being uninitialized.
2767 */
2768 parent = 0;
2769 } else {
2770 const struct Qdisc_class_ops *cops;
2771 struct net_device *dev;
2772 unsigned long cl = 0;
2773
2774 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2775 if (!dev)
2776 return skb->len;
2777
2778 parent = tcm->tcm_parent;
2779 if (!parent) {
2780 q = dev->qdisc;
2781 parent = q->handle;
2782 } else {
2783 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2784 }
2785 if (!q)
2786 goto out;
2787 cops = q->ops->cl_ops;
2788 if (!cops)
2789 goto out;
2790 if (!cops->tcf_block)
2791 goto out;
2792 if (TC_H_MIN(tcm->tcm_parent)) {
2793 cl = cops->find(q, tcm->tcm_parent);
2794 if (cl == 0)
2795 goto out;
2796 }
2797 block = cops->tcf_block(q, cl, NULL);
2798 if (!block)
2799 goto out;
2800 if (tcf_block_shared(block))
2801 q = NULL;
2802 }
2803
2804 index_start = cb->args[0];
2805 index = 0;
2806
Vlad Buslovbbf73832019-02-11 10:55:36 +02002807 for (chain = __tcf_get_next_chain(block, NULL);
2808 chain;
2809 chain_prev = chain,
2810 chain = __tcf_get_next_chain(block, chain),
2811 tcf_chain_put(chain_prev)) {
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002812 if ((tca[TCA_CHAIN] &&
2813 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2814 continue;
2815 if (index < index_start) {
2816 index++;
2817 continue;
2818 }
Vlad Buslova5654822019-02-11 10:55:37 +02002819 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2820 chain->index, net, skb, block,
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002821 NETLINK_CB(cb->skb).portid,
2822 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2823 RTM_NEWCHAIN);
Vlad Buslovbbf73832019-02-11 10:55:36 +02002824 if (err <= 0) {
2825 tcf_chain_put(chain);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002826 break;
Vlad Buslovbbf73832019-02-11 10:55:36 +02002827 }
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002828 index++;
2829 }
2830
Vlad Buslov787ce6d2018-09-24 19:22:58 +03002831 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2832 tcf_block_refcnt_put(block);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002833 cb->args[0] = index;
2834
2835out:
2836 /* If we did no progress, the error (EMSGSIZE) is real */
2837 if (skb->len == 0 && err)
2838 return err;
2839 return skb->len;
2840}
2841
WANG Cong18d02642014-09-25 10:26:37 -07002842void tcf_exts_destroy(struct tcf_exts *exts)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843{
2844#ifdef CONFIG_NET_CLS_ACT
Vlad Buslov90b73b72018-07-05 17:24:33 +03002845 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
WANG Cong22dc13c2016-08-13 22:35:00 -07002846 kfree(exts->actions);
2847 exts->nr_actions = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848#endif
2849}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002850EXPORT_SYMBOL(tcf_exts_destroy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851
Benjamin LaHaisec1b52732013-01-14 05:15:39 +00002852int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
Alexander Aring50a56192018-01-18 11:20:52 -05002853 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
Vlad Buslovec6743a2019-02-11 10:55:43 +02002854 bool rtnl_held, struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856#ifdef CONFIG_NET_CLS_ACT
2857 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 struct tc_action *act;
Roman Mashakd04e6992018-03-08 16:59:17 -05002859 size_t attr_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860
WANG Cong5da57f42013-12-15 20:15:07 -08002861 if (exts->police && tb[exts->police]) {
Jiri Pirko9fb9f252017-05-17 11:08:02 +02002862 act = tcf_action_init_1(net, tp, tb[exts->police],
2863 rate_tlv, "police", ovr,
Vlad Buslovec6743a2019-02-11 10:55:43 +02002864 TCA_ACT_BIND, rtnl_held,
2865 extack);
Patrick McHardyab27cfb2008-01-23 20:33:13 -08002866 if (IS_ERR(act))
2867 return PTR_ERR(act);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868
WANG Cong33be6272013-12-15 20:15:05 -08002869 act->type = exts->type = TCA_OLD_COMPAT;
WANG Cong22dc13c2016-08-13 22:35:00 -07002870 exts->actions[0] = act;
2871 exts->nr_actions = 1;
WANG Cong5da57f42013-12-15 20:15:07 -08002872 } else if (exts->action && tb[exts->action]) {
Vlad Buslov90b73b72018-07-05 17:24:33 +03002873 int err;
WANG Cong22dc13c2016-08-13 22:35:00 -07002874
Jiri Pirko9fb9f252017-05-17 11:08:02 +02002875 err = tcf_action_init(net, tp, tb[exts->action],
2876 rate_tlv, NULL, ovr, TCA_ACT_BIND,
Vlad Buslovec6743a2019-02-11 10:55:43 +02002877 exts->actions, &attr_size,
2878 rtnl_held, extack);
Vlad Buslov90b73b72018-07-05 17:24:33 +03002879 if (err < 0)
WANG Cong33be6272013-12-15 20:15:05 -08002880 return err;
Vlad Buslov90b73b72018-07-05 17:24:33 +03002881 exts->nr_actions = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 }
Cong Wange4b95c42017-11-06 13:47:19 -08002883 exts->net = net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885#else
WANG Cong5da57f42013-12-15 20:15:07 -08002886 if ((exts->action && tb[exts->action]) ||
Alexander Aring50a56192018-01-18 11:20:52 -05002887 (exts->police && tb[exts->police])) {
2888 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 return -EOPNOTSUPP;
Alexander Aring50a56192018-01-18 11:20:52 -05002890 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891#endif
2892
2893 return 0;
2894}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002895EXPORT_SYMBOL(tcf_exts_validate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896
Jiri Pirko9b0d4442017-08-04 14:29:15 +02002897void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898{
2899#ifdef CONFIG_NET_CLS_ACT
WANG Cong22dc13c2016-08-13 22:35:00 -07002900 struct tcf_exts old = *dst;
2901
Jiri Pirko9b0d4442017-08-04 14:29:15 +02002902 *dst = *src;
WANG Cong22dc13c2016-08-13 22:35:00 -07002903 tcf_exts_destroy(&old);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904#endif
2905}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002906EXPORT_SYMBOL(tcf_exts_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907
WANG Cong22dc13c2016-08-13 22:35:00 -07002908#ifdef CONFIG_NET_CLS_ACT
2909static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
2910{
2911 if (exts->nr_actions == 0)
2912 return NULL;
2913 else
2914 return exts->actions[0];
2915}
2916#endif
WANG Cong33be6272013-12-15 20:15:05 -08002917
WANG Cong5da57f42013-12-15 20:15:07 -08002918int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919{
2920#ifdef CONFIG_NET_CLS_ACT
Cong Wang9cc63db2014-07-16 14:25:30 -07002921 struct nlattr *nest;
2922
Jiri Pirko978dfd82017-08-04 14:29:03 +02002923 if (exts->action && tcf_exts_has_actions(exts)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 /*
2925 * again for backward compatible mode - we want
2926 * to work with both old and new modes of entering
2927 * tc data even if iproute2 was newer - jhs
2928 */
WANG Cong33be6272013-12-15 20:15:05 -08002929 if (exts->type != TCA_OLD_COMPAT) {
WANG Cong5da57f42013-12-15 20:15:07 -08002930 nest = nla_nest_start(skb, exts->action);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08002931 if (nest == NULL)
2932 goto nla_put_failure;
WANG Cong22dc13c2016-08-13 22:35:00 -07002933
Vlad Buslov90b73b72018-07-05 17:24:33 +03002934 if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -08002935 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08002936 nla_nest_end(skb, nest);
WANG Cong5da57f42013-12-15 20:15:07 -08002937 } else if (exts->police) {
WANG Cong33be6272013-12-15 20:15:05 -08002938 struct tc_action *act = tcf_exts_first_act(exts);
WANG Cong5da57f42013-12-15 20:15:07 -08002939 nest = nla_nest_start(skb, exts->police);
Jamal Hadi Salim63acd682013-12-23 08:02:12 -05002940 if (nest == NULL || !act)
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08002941 goto nla_put_failure;
WANG Cong33be6272013-12-15 20:15:05 -08002942 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -08002943 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08002944 nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 }
2946 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947 return 0;
Cong Wang9cc63db2014-07-16 14:25:30 -07002948
2949nla_put_failure:
2950 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 return -1;
Cong Wang9cc63db2014-07-16 14:25:30 -07002952#else
2953 return 0;
2954#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002956EXPORT_SYMBOL(tcf_exts_dump);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002958
WANG Cong5da57f42013-12-15 20:15:07 -08002959int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960{
2961#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -08002962 struct tc_action *a = tcf_exts_first_act(exts);
Ignacy Gawędzkib057df22015-02-03 19:05:18 +01002963 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
WANG Cong33be6272013-12-15 20:15:05 -08002964 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965#endif
2966 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002968EXPORT_SYMBOL(tcf_exts_dump_stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969
Cong Wangaeb3fec2018-12-11 11:15:46 -08002970int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
2971 void *type_data, bool err_stop)
Jiri Pirko717503b2017-10-11 09:41:09 +02002972{
Cong Wangaeb3fec2018-12-11 11:15:46 -08002973 struct tcf_block_cb *block_cb;
2974 int ok_count = 0;
2975 int err;
2976
2977 /* Make sure all netdevs sharing this block are offload-capable. */
2978 if (block->nooffloaddevcnt && err_stop)
2979 return -EOPNOTSUPP;
2980
2981 list_for_each_entry(block_cb, &block->cb_list, list) {
2982 err = block_cb->cb(type, type_data, block_cb->cb_priv);
2983 if (err) {
2984 if (err_stop)
2985 return err;
2986 } else {
2987 ok_count++;
2988 }
2989 }
2990 return ok_count;
Jiri Pirko717503b2017-10-11 09:41:09 +02002991}
2992EXPORT_SYMBOL(tc_setup_cb_call);
Jiri Pirkob3f55bd2017-10-11 09:41:08 +02002993
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01002994int tc_setup_flow_action(struct flow_action *flow_action,
2995 const struct tcf_exts *exts)
2996{
2997 const struct tc_action *act;
2998 int i, j, k;
2999
3000 if (!exts)
3001 return 0;
3002
3003 j = 0;
3004 tcf_exts_for_each_action(i, act, exts) {
3005 struct flow_action_entry *entry;
3006
3007 entry = &flow_action->entries[j];
3008 if (is_tcf_gact_ok(act)) {
3009 entry->id = FLOW_ACTION_ACCEPT;
3010 } else if (is_tcf_gact_shot(act)) {
3011 entry->id = FLOW_ACTION_DROP;
3012 } else if (is_tcf_gact_trap(act)) {
3013 entry->id = FLOW_ACTION_TRAP;
3014 } else if (is_tcf_gact_goto_chain(act)) {
3015 entry->id = FLOW_ACTION_GOTO;
3016 entry->chain_index = tcf_gact_goto_chain_index(act);
3017 } else if (is_tcf_mirred_egress_redirect(act)) {
3018 entry->id = FLOW_ACTION_REDIRECT;
3019 entry->dev = tcf_mirred_dev(act);
3020 } else if (is_tcf_mirred_egress_mirror(act)) {
3021 entry->id = FLOW_ACTION_MIRRED;
3022 entry->dev = tcf_mirred_dev(act);
3023 } else if (is_tcf_vlan(act)) {
3024 switch (tcf_vlan_action(act)) {
3025 case TCA_VLAN_ACT_PUSH:
3026 entry->id = FLOW_ACTION_VLAN_PUSH;
3027 entry->vlan.vid = tcf_vlan_push_vid(act);
3028 entry->vlan.proto = tcf_vlan_push_proto(act);
3029 entry->vlan.prio = tcf_vlan_push_prio(act);
3030 break;
3031 case TCA_VLAN_ACT_POP:
3032 entry->id = FLOW_ACTION_VLAN_POP;
3033 break;
3034 case TCA_VLAN_ACT_MODIFY:
3035 entry->id = FLOW_ACTION_VLAN_MANGLE;
3036 entry->vlan.vid = tcf_vlan_push_vid(act);
3037 entry->vlan.proto = tcf_vlan_push_proto(act);
3038 entry->vlan.prio = tcf_vlan_push_prio(act);
3039 break;
3040 default:
3041 goto err_out;
3042 }
3043 } else if (is_tcf_tunnel_set(act)) {
3044 entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3045 entry->tunnel = tcf_tunnel_info(act);
3046 } else if (is_tcf_tunnel_release(act)) {
3047 entry->id = FLOW_ACTION_TUNNEL_DECAP;
3048 entry->tunnel = tcf_tunnel_info(act);
3049 } else if (is_tcf_pedit(act)) {
3050 for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3051 switch (tcf_pedit_cmd(act, k)) {
3052 case TCA_PEDIT_KEY_EX_CMD_SET:
3053 entry->id = FLOW_ACTION_MANGLE;
3054 break;
3055 case TCA_PEDIT_KEY_EX_CMD_ADD:
3056 entry->id = FLOW_ACTION_ADD;
3057 break;
3058 default:
3059 goto err_out;
3060 }
3061 entry->mangle.htype = tcf_pedit_htype(act, k);
3062 entry->mangle.mask = tcf_pedit_mask(act, k);
3063 entry->mangle.val = tcf_pedit_val(act, k);
3064 entry->mangle.offset = tcf_pedit_offset(act, k);
3065 entry = &flow_action->entries[++j];
3066 }
3067 } else if (is_tcf_csum(act)) {
3068 entry->id = FLOW_ACTION_CSUM;
3069 entry->csum_flags = tcf_csum_update_flags(act);
3070 } else if (is_tcf_skbedit_mark(act)) {
3071 entry->id = FLOW_ACTION_MARK;
3072 entry->mark = tcf_skbedit_mark(act);
3073 } else {
3074 goto err_out;
3075 }
3076
3077 if (!is_tcf_pedit(act))
3078 j++;
3079 }
3080 return 0;
3081err_out:
3082 return -EOPNOTSUPP;
3083}
3084EXPORT_SYMBOL(tc_setup_flow_action);
3085
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +01003086unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3087{
3088 unsigned int num_acts = 0;
3089 struct tc_action *act;
3090 int i;
3091
3092 tcf_exts_for_each_action(i, act, exts) {
3093 if (is_tcf_pedit(act))
3094 num_acts += tcf_pedit_nkeys(act);
3095 else
3096 num_acts++;
3097 }
3098 return num_acts;
3099}
3100EXPORT_SYMBOL(tcf_exts_num_actions);
3101
Jiri Pirko48617382018-01-17 11:46:46 +01003102static __net_init int tcf_net_init(struct net *net)
3103{
3104 struct tcf_net *tn = net_generic(net, tcf_net_id);
3105
Vlad Buslovab281622018-09-24 19:22:56 +03003106 spin_lock_init(&tn->idr_lock);
Jiri Pirko48617382018-01-17 11:46:46 +01003107 idr_init(&tn->idr);
3108 return 0;
3109}
3110
3111static void __net_exit tcf_net_exit(struct net *net)
3112{
3113 struct tcf_net *tn = net_generic(net, tcf_net_id);
3114
3115 idr_destroy(&tn->idr);
3116}
3117
3118static struct pernet_operations tcf_net_ops = {
3119 .init = tcf_net_init,
3120 .exit = tcf_net_exit,
3121 .id = &tcf_net_id,
3122 .size = sizeof(struct tcf_net),
3123};
3124
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125static int __init tc_filter_init(void)
3126{
Jiri Pirko48617382018-01-17 11:46:46 +01003127 int err;
3128
Cong Wang7aa00452017-10-26 18:24:28 -07003129 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3130 if (!tc_filter_wq)
3131 return -ENOMEM;
3132
Jiri Pirko48617382018-01-17 11:46:46 +01003133 err = register_pernet_subsys(&tcf_net_ops);
3134 if (err)
3135 goto err_register_pernet_subsys;
3136
John Hurley7f76fa32018-11-09 21:21:26 -08003137 err = rhashtable_init(&indr_setup_block_ht,
3138 &tc_indr_setup_block_ht_params);
3139 if (err)
3140 goto err_rhash_setup_block_ht;
3141
Vlad Buslovc431f892018-05-31 09:52:53 +03003142 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 0);
3143 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 0);
3144 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
Florian Westphalb97bac62017-08-09 20:41:48 +02003145 tc_dump_tfilter, 0);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02003146 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3147 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3148 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3149 tc_dump_chain, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003150
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151 return 0;
Jiri Pirko48617382018-01-17 11:46:46 +01003152
John Hurley7f76fa32018-11-09 21:21:26 -08003153err_rhash_setup_block_ht:
3154 unregister_pernet_subsys(&tcf_net_ops);
Jiri Pirko48617382018-01-17 11:46:46 +01003155err_register_pernet_subsys:
3156 destroy_workqueue(tc_filter_wq);
3157 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158}
3159
3160subsys_initcall(tc_filter_init);