blob: 1a39779bdbadee970b0c3951ba6cf57f6650c6b1 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * net/sched/cls_api.c Packet classifier API.
4 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Changes:
8 *
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/errno.h>
Jiri Pirko33a48922017-02-09 14:38:57 +010017#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/init.h>
20#include <linux/kmod.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Jiri Pirko48617382018-01-17 11:46:46 +010022#include <linux/idr.h>
John Hurley7f76fa32018-11-09 21:21:26 -080023#include <linux/rhashtable.h>
Denis V. Lunevb8542722007-12-01 00:21:31 +110024#include <net/net_namespace.h>
25#include <net/sock.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070026#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <net/pkt_sched.h>
28#include <net/pkt_cls.h>
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +010029#include <net/tc_act/tc_pedit.h>
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +010030#include <net/tc_act/tc_mirred.h>
31#include <net/tc_act/tc_vlan.h>
32#include <net/tc_act/tc_tunnel_key.h>
33#include <net/tc_act/tc_csum.h>
34#include <net/tc_act/tc_gact.h>
Pieter Jansen van Vuuren8c8cfc62019-05-04 04:46:22 -070035#include <net/tc_act/tc_police.h>
Pieter Jansen van Vuurena7a7be62019-05-04 04:46:16 -070036#include <net/tc_act/tc_sample.h>
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +010037#include <net/tc_act/tc_skbedit.h>
Paul Blakeyb57dc7c2019-07-09 10:30:48 +030038#include <net/tc_act/tc_ct.h>
John Hurley6749d5902019-07-23 15:33:59 +010039#include <net/tc_act/tc_mpls.h>
wenxu4e481902019-08-07 09:13:52 +080040#include <net/flow_offload.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Davide Carattie3314732018-10-10 22:00:58 +020042extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* The list of all installed classifier types */
WANG Cong36272872013-12-15 20:15:11 -080045static LIST_HEAD(tcf_proto_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47/* Protects list of registered TC modules. It is pure SMP lock. */
48static DEFINE_RWLOCK(cls_mod_lock);
49
50/* Find classifier type by string name */
51
Jiri Pirkof34e8bf2018-07-23 09:23:04 +020052static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
Linus Torvalds1da177e2005-04-16 15:20:36 -070053{
Eric Dumazetdcd76082013-12-20 10:04:18 -080054 const struct tcf_proto_ops *t, *res = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56 if (kind) {
57 read_lock(&cls_mod_lock);
WANG Cong36272872013-12-15 20:15:11 -080058 list_for_each_entry(t, &tcf_proto_base, head) {
Jiri Pirko33a48922017-02-09 14:38:57 +010059 if (strcmp(kind, t->kind) == 0) {
Eric Dumazetdcd76082013-12-20 10:04:18 -080060 if (try_module_get(t->owner))
61 res = t;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 break;
63 }
64 }
65 read_unlock(&cls_mod_lock);
66 }
Eric Dumazetdcd76082013-12-20 10:04:18 -080067 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068}
69
Jiri Pirkof34e8bf2018-07-23 09:23:04 +020070static const struct tcf_proto_ops *
Vlad Buslov12db03b2019-02-11 10:55:45 +020071tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
72 struct netlink_ext_ack *extack)
Jiri Pirkof34e8bf2018-07-23 09:23:04 +020073{
74 const struct tcf_proto_ops *ops;
75
76 ops = __tcf_proto_lookup_ops(kind);
77 if (ops)
78 return ops;
79#ifdef CONFIG_MODULES
Vlad Buslov12db03b2019-02-11 10:55:45 +020080 if (rtnl_held)
81 rtnl_unlock();
Jiri Pirkof34e8bf2018-07-23 09:23:04 +020082 request_module("cls_%s", kind);
Vlad Buslov12db03b2019-02-11 10:55:45 +020083 if (rtnl_held)
84 rtnl_lock();
Jiri Pirkof34e8bf2018-07-23 09:23:04 +020085 ops = __tcf_proto_lookup_ops(kind);
86 /* We dropped the RTNL semaphore in order to perform
87 * the module load. So, even if we succeeded in loading
88 * the module we have to replay the request. We indicate
89 * this using -EAGAIN.
90 */
91 if (ops) {
92 module_put(ops->owner);
93 return ERR_PTR(-EAGAIN);
94 }
95#endif
96 NL_SET_ERR_MSG(extack, "TC classifier not found");
97 return ERR_PTR(-ENOENT);
98}
99
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100/* Register(unregister) new classifier type */
101
102int register_tcf_proto_ops(struct tcf_proto_ops *ops)
103{
WANG Cong36272872013-12-15 20:15:11 -0800104 struct tcf_proto_ops *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 int rc = -EEXIST;
106
107 write_lock(&cls_mod_lock);
WANG Cong36272872013-12-15 20:15:11 -0800108 list_for_each_entry(t, &tcf_proto_base, head)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 if (!strcmp(ops->kind, t->kind))
110 goto out;
111
WANG Cong36272872013-12-15 20:15:11 -0800112 list_add_tail(&ops->head, &tcf_proto_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 rc = 0;
114out:
115 write_unlock(&cls_mod_lock);
116 return rc;
117}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800118EXPORT_SYMBOL(register_tcf_proto_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Cong Wang7aa00452017-10-26 18:24:28 -0700120static struct workqueue_struct *tc_filter_wq;
121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
123{
WANG Cong36272872013-12-15 20:15:11 -0800124 struct tcf_proto_ops *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 int rc = -ENOENT;
126
Daniel Borkmannc78e1742015-05-20 17:13:33 +0200127 /* Wait for outstanding call_rcu()s, if any, from a
128 * tcf_proto_ops's destroy() handler.
129 */
130 rcu_barrier();
Cong Wang7aa00452017-10-26 18:24:28 -0700131 flush_workqueue(tc_filter_wq);
Daniel Borkmannc78e1742015-05-20 17:13:33 +0200132
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 write_lock(&cls_mod_lock);
Eric Dumazetdcd76082013-12-20 10:04:18 -0800134 list_for_each_entry(t, &tcf_proto_base, head) {
135 if (t == ops) {
136 list_del(&t->head);
137 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 break;
Eric Dumazetdcd76082013-12-20 10:04:18 -0800139 }
140 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 write_unlock(&cls_mod_lock);
142 return rc;
143}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800144EXPORT_SYMBOL(unregister_tcf_proto_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Cong Wangaaa908f2018-05-23 15:26:53 -0700146bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
Cong Wang7aa00452017-10-26 18:24:28 -0700147{
Cong Wangaaa908f2018-05-23 15:26:53 -0700148 INIT_RCU_WORK(rwork, func);
149 return queue_rcu_work(tc_filter_wq, rwork);
Cong Wang7aa00452017-10-26 18:24:28 -0700150}
151EXPORT_SYMBOL(tcf_queue_work);
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153/* Select new prio value from the range, managed by kernel. */
154
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800155static inline u32 tcf_auto_prio(struct tcf_proto *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800157 u32 first = TC_H_MAKE(0xC0000000U, 0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
159 if (tp)
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000160 first = tp->prio - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Jiri Pirko79619732017-05-17 11:07:58 +0200162 return TC_H_MAJ(first);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163}
164
Vlad Buslov470502d2019-02-11 10:55:48 +0200165static bool tcf_proto_is_unlocked(const char *kind)
166{
167 const struct tcf_proto_ops *ops;
168 bool ret;
169
170 ops = tcf_proto_lookup_ops(kind, false, NULL);
171 /* On error return false to take rtnl lock. Proto lookup/create
172 * functions will perform lookup again and properly handle errors.
173 */
174 if (IS_ERR(ops))
175 return false;
176
177 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
178 module_put(ops->owner);
179 return ret;
180}
181
Jiri Pirko33a48922017-02-09 14:38:57 +0100182static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
Alexander Aringc35a4ac2018-01-18 11:20:50 -0500183 u32 prio, struct tcf_chain *chain,
Vlad Buslov12db03b2019-02-11 10:55:45 +0200184 bool rtnl_held,
Alexander Aringc35a4ac2018-01-18 11:20:50 -0500185 struct netlink_ext_ack *extack)
Jiri Pirko33a48922017-02-09 14:38:57 +0100186{
187 struct tcf_proto *tp;
188 int err;
189
190 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
191 if (!tp)
192 return ERR_PTR(-ENOBUFS);
193
Vlad Buslov12db03b2019-02-11 10:55:45 +0200194 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
Jiri Pirkof34e8bf2018-07-23 09:23:04 +0200195 if (IS_ERR(tp->ops)) {
196 err = PTR_ERR(tp->ops);
Jiri Pirkod68d75f2018-05-11 17:45:32 +0200197 goto errout;
Jiri Pirko33a48922017-02-09 14:38:57 +0100198 }
199 tp->classify = tp->ops->classify;
200 tp->protocol = protocol;
201 tp->prio = prio;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200202 tp->chain = chain;
Vlad Buslov8b646782019-02-11 10:55:41 +0200203 spin_lock_init(&tp->lock);
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200204 refcount_set(&tp->refcnt, 1);
Jiri Pirko33a48922017-02-09 14:38:57 +0100205
206 err = tp->ops->init(tp);
207 if (err) {
208 module_put(tp->ops->owner);
209 goto errout;
210 }
211 return tp;
212
213errout:
214 kfree(tp);
215 return ERR_PTR(err);
216}
217
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200218static void tcf_proto_get(struct tcf_proto *tp)
219{
220 refcount_inc(&tp->refcnt);
221}
222
223static void tcf_chain_put(struct tcf_chain *chain);
224
Vlad Buslov12db03b2019-02-11 10:55:45 +0200225static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
Jakub Kicinski715df5e2018-01-24 12:54:13 -0800226 struct netlink_ext_ack *extack)
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100227{
Vlad Buslov12db03b2019-02-11 10:55:45 +0200228 tp->ops->destroy(tp, rtnl_held, extack);
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200229 tcf_chain_put(tp->chain);
WANG Cong763dbf62017-04-19 14:21:21 -0700230 module_put(tp->ops->owner);
231 kfree_rcu(tp, rcu);
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100232}
233
Vlad Buslov12db03b2019-02-11 10:55:45 +0200234static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200235 struct netlink_ext_ack *extack)
236{
237 if (refcount_dec_and_test(&tp->refcnt))
Vlad Buslov12db03b2019-02-11 10:55:45 +0200238 tcf_proto_destroy(tp, rtnl_held, extack);
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200239}
240
Vlad Buslov268a3512019-02-26 17:34:40 +0200241static int walker_check_empty(struct tcf_proto *tp, void *fh,
Vlad Buslov6676d5e2019-02-25 17:38:31 +0200242 struct tcf_walker *arg)
Vlad Buslov8b646782019-02-11 10:55:41 +0200243{
Vlad Buslov268a3512019-02-26 17:34:40 +0200244 if (fh) {
Vlad Buslov6676d5e2019-02-25 17:38:31 +0200245 arg->nonempty = true;
246 return -1;
247 }
248 return 0;
Vlad Buslov8b646782019-02-11 10:55:41 +0200249}
250
Vlad Buslov12db03b2019-02-11 10:55:45 +0200251static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held)
Vlad Buslov8b646782019-02-11 10:55:41 +0200252{
Vlad Buslov6676d5e2019-02-25 17:38:31 +0200253 struct tcf_walker walker = { .fn = walker_check_empty, };
Vlad Buslov8b646782019-02-11 10:55:41 +0200254
255 if (tp->ops->walk) {
Vlad Buslov12db03b2019-02-11 10:55:45 +0200256 tp->ops->walk(tp, &walker, rtnl_held);
Vlad Buslov6676d5e2019-02-25 17:38:31 +0200257 return !walker.nonempty;
Vlad Buslov8b646782019-02-11 10:55:41 +0200258 }
259 return true;
260}
261
Vlad Buslov12db03b2019-02-11 10:55:45 +0200262static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held)
Vlad Buslov8b646782019-02-11 10:55:41 +0200263{
264 spin_lock(&tp->lock);
Vlad Buslov12db03b2019-02-11 10:55:45 +0200265 if (tcf_proto_is_empty(tp, rtnl_held))
Vlad Buslov8b646782019-02-11 10:55:41 +0200266 tp->deleting = true;
267 spin_unlock(&tp->lock);
268 return tp->deleting;
269}
270
271static void tcf_proto_mark_delete(struct tcf_proto *tp)
272{
273 spin_lock(&tp->lock);
274 tp->deleting = true;
275 spin_unlock(&tp->lock);
276}
277
278static bool tcf_proto_is_deleting(struct tcf_proto *tp)
279{
280 bool deleting;
281
282 spin_lock(&tp->lock);
283 deleting = tp->deleting;
284 spin_unlock(&tp->lock);
285
286 return deleting;
287}
288
Vlad Buslovc266f642019-02-11 10:55:32 +0200289#define ASSERT_BLOCK_LOCKED(block) \
290 lockdep_assert_held(&(block)->lock)
291
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100292struct tcf_filter_chain_list_item {
293 struct list_head list;
294 tcf_chain_head_change_t *chain_head_change;
295 void *chain_head_change_priv;
296};
297
Jiri Pirko5bc17012017-05-17 11:08:01 +0200298static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
299 u32 chain_index)
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200300{
Jiri Pirko5bc17012017-05-17 11:08:01 +0200301 struct tcf_chain *chain;
302
Vlad Buslovc266f642019-02-11 10:55:32 +0200303 ASSERT_BLOCK_LOCKED(block);
304
Jiri Pirko5bc17012017-05-17 11:08:01 +0200305 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
306 if (!chain)
307 return NULL;
308 list_add_tail(&chain->list, &block->chain_list);
Vlad Busloved76f5e2019-02-11 10:55:38 +0200309 mutex_init(&chain->filter_chain_lock);
Jiri Pirko5bc17012017-05-17 11:08:01 +0200310 chain->block = block;
311 chain->index = chain_index;
Cong Wange2ef7542017-09-11 16:33:31 -0700312 chain->refcnt = 1;
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200313 if (!chain->index)
314 block->chain0.chain = chain;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200315 return chain;
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200316}
317
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100318static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
319 struct tcf_proto *tp_head)
320{
321 if (item->chain_head_change)
322 item->chain_head_change(tp_head, item->chain_head_change_priv);
323}
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200324
325static void tcf_chain0_head_change(struct tcf_chain *chain,
326 struct tcf_proto *tp_head)
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +0100327{
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100328 struct tcf_filter_chain_list_item *item;
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200329 struct tcf_block *block = chain->block;
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100330
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200331 if (chain->index)
332 return;
Vlad Buslov165f0132019-02-11 10:55:35 +0200333
334 mutex_lock(&block->lock);
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200335 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100336 tcf_chain_head_change_item(item, tp_head);
Vlad Buslov165f0132019-02-11 10:55:35 +0200337 mutex_unlock(&block->lock);
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +0100338}
339
Vlad Buslovc266f642019-02-11 10:55:32 +0200340/* Returns true if block can be safely freed. */
341
342static bool tcf_chain_detach(struct tcf_chain *chain)
Jiri Pirkof93e1cd2017-05-20 15:01:32 +0200343{
Cong Wangefbf7892017-12-04 10:48:18 -0800344 struct tcf_block *block = chain->block;
345
Vlad Buslovc266f642019-02-11 10:55:32 +0200346 ASSERT_BLOCK_LOCKED(block);
347
Cong Wange2ef7542017-09-11 16:33:31 -0700348 list_del(&chain->list);
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200349 if (!chain->index)
350 block->chain0.chain = NULL;
Vlad Buslovc266f642019-02-11 10:55:32 +0200351
352 if (list_empty(&block->chain_list) &&
353 refcount_read(&block->refcnt) == 0)
354 return true;
355
356 return false;
357}
358
359static void tcf_block_destroy(struct tcf_block *block)
360{
361 mutex_destroy(&block->lock);
362 kfree_rcu(block, rcu);
363}
364
365static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
366{
367 struct tcf_block *block = chain->block;
368
Vlad Busloved76f5e2019-02-11 10:55:38 +0200369 mutex_destroy(&chain->filter_chain_lock);
Davide Carattiee3bbfe2019-03-20 15:00:16 +0100370 kfree_rcu(chain, rcu);
Vlad Buslovc266f642019-02-11 10:55:32 +0200371 if (free_block)
372 tcf_block_destroy(block);
Cong Wange2ef7542017-09-11 16:33:31 -0700373}
Jiri Pirko744a4cf2017-08-22 22:46:49 +0200374
Cong Wange2ef7542017-09-11 16:33:31 -0700375static void tcf_chain_hold(struct tcf_chain *chain)
376{
Vlad Buslovc266f642019-02-11 10:55:32 +0200377 ASSERT_BLOCK_LOCKED(chain->block);
378
Cong Wange2ef7542017-09-11 16:33:31 -0700379 ++chain->refcnt;
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200380}
381
Jiri Pirko3d32f4c2018-08-01 12:36:55 +0200382static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200383{
Vlad Buslovc266f642019-02-11 10:55:32 +0200384 ASSERT_BLOCK_LOCKED(chain->block);
385
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200386 /* In case all the references are action references, this
Jiri Pirko3d32f4c2018-08-01 12:36:55 +0200387 * chain should not be shown to the user.
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200388 */
389 return chain->refcnt == chain->action_refcnt;
390}
391
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200392static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
393 u32 chain_index)
Jiri Pirko5bc17012017-05-17 11:08:01 +0200394{
395 struct tcf_chain *chain;
396
Vlad Buslovc266f642019-02-11 10:55:32 +0200397 ASSERT_BLOCK_LOCKED(block);
398
Jiri Pirko5bc17012017-05-17 11:08:01 +0200399 list_for_each_entry(chain, &block->chain_list, list) {
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200400 if (chain->index == chain_index)
Cong Wange2ef7542017-09-11 16:33:31 -0700401 return chain;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200402 }
403 return NULL;
404}
405
406static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
407 u32 seq, u16 flags, int event, bool unicast);
408
Jiri Pirko53681402018-08-01 12:36:56 +0200409static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
410 u32 chain_index, bool create,
411 bool by_act)
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200412{
Vlad Buslovc266f642019-02-11 10:55:32 +0200413 struct tcf_chain *chain = NULL;
414 bool is_first_reference;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200415
Vlad Buslovc266f642019-02-11 10:55:32 +0200416 mutex_lock(&block->lock);
417 chain = tcf_chain_lookup(block, chain_index);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200418 if (chain) {
419 tcf_chain_hold(chain);
Jiri Pirko53681402018-08-01 12:36:56 +0200420 } else {
421 if (!create)
Vlad Buslovc266f642019-02-11 10:55:32 +0200422 goto errout;
Jiri Pirko53681402018-08-01 12:36:56 +0200423 chain = tcf_chain_create(block, chain_index);
424 if (!chain)
Vlad Buslovc266f642019-02-11 10:55:32 +0200425 goto errout;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200426 }
Jiri Pirko80532382017-09-06 13:14:19 +0200427
Jiri Pirko53681402018-08-01 12:36:56 +0200428 if (by_act)
429 ++chain->action_refcnt;
Vlad Buslovc266f642019-02-11 10:55:32 +0200430 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
431 mutex_unlock(&block->lock);
Jiri Pirko53681402018-08-01 12:36:56 +0200432
433 /* Send notification only in case we got the first
434 * non-action reference. Until then, the chain acts only as
435 * a placeholder for actions pointing to it and user ought
436 * not know about them.
437 */
Vlad Buslovc266f642019-02-11 10:55:32 +0200438 if (is_first_reference && !by_act)
Jiri Pirko53681402018-08-01 12:36:56 +0200439 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
440 RTM_NEWCHAIN, false);
441
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200442 return chain;
Vlad Buslovc266f642019-02-11 10:55:32 +0200443
444errout:
445 mutex_unlock(&block->lock);
446 return chain;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200447}
Jiri Pirko53681402018-08-01 12:36:56 +0200448
Jiri Pirko290b1c82018-08-01 12:36:57 +0200449static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
450 bool create)
Jiri Pirko53681402018-08-01 12:36:56 +0200451{
452 return __tcf_chain_get(block, chain_index, create, false);
453}
Jiri Pirko5bc17012017-05-17 11:08:01 +0200454
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200455struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
456{
Jiri Pirko53681402018-08-01 12:36:56 +0200457 return __tcf_chain_get(block, chain_index, true, true);
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200458}
459EXPORT_SYMBOL(tcf_chain_get_by_act);
460
Vlad Buslova5654822019-02-11 10:55:37 +0200461static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
462 void *tmplt_priv);
463static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
464 void *tmplt_priv, u32 chain_index,
465 struct tcf_block *block, struct sk_buff *oskb,
466 u32 seq, u16 flags, bool unicast);
Jiri Pirko9f407f12018-07-23 09:23:07 +0200467
Vlad Buslov91052fa2019-02-11 10:55:33 +0200468static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
469 bool explicitly_created)
Jiri Pirko5bc17012017-05-17 11:08:01 +0200470{
Vlad Buslovc266f642019-02-11 10:55:32 +0200471 struct tcf_block *block = chain->block;
Vlad Buslova5654822019-02-11 10:55:37 +0200472 const struct tcf_proto_ops *tmplt_ops;
Vlad Buslovb62989f2019-03-06 17:50:43 +0200473 bool free_block = false;
Vlad Buslovc266f642019-02-11 10:55:32 +0200474 unsigned int refcnt;
Vlad Buslova5654822019-02-11 10:55:37 +0200475 void *tmplt_priv;
Vlad Buslovc266f642019-02-11 10:55:32 +0200476
477 mutex_lock(&block->lock);
Vlad Buslov91052fa2019-02-11 10:55:33 +0200478 if (explicitly_created) {
479 if (!chain->explicitly_created) {
480 mutex_unlock(&block->lock);
481 return;
482 }
483 chain->explicitly_created = false;
484 }
485
Jiri Pirko53681402018-08-01 12:36:56 +0200486 if (by_act)
487 chain->action_refcnt--;
Vlad Buslovc266f642019-02-11 10:55:32 +0200488
489 /* tc_chain_notify_delete can't be called while holding block lock.
490 * However, when block is unlocked chain can be changed concurrently, so
491 * save these to temporary variables.
492 */
493 refcnt = --chain->refcnt;
Vlad Buslova5654822019-02-11 10:55:37 +0200494 tmplt_ops = chain->tmplt_ops;
495 tmplt_priv = chain->tmplt_priv;
Jiri Pirko53681402018-08-01 12:36:56 +0200496
497 /* The last dropped non-action reference will trigger notification. */
Vlad Buslovb62989f2019-03-06 17:50:43 +0200498 if (refcnt - chain->action_refcnt == 0 && !by_act) {
499 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
Vlad Buslova5654822019-02-11 10:55:37 +0200500 block, NULL, 0, 0, false);
Vlad Buslov726d06122019-02-11 10:55:42 +0200501 /* Last reference to chain, no need to lock. */
502 chain->flushing = false;
503 }
Jiri Pirko53681402018-08-01 12:36:56 +0200504
Vlad Buslovb62989f2019-03-06 17:50:43 +0200505 if (refcnt == 0)
506 free_block = tcf_chain_detach(chain);
507 mutex_unlock(&block->lock);
508
Vlad Buslovc266f642019-02-11 10:55:32 +0200509 if (refcnt == 0) {
Vlad Buslova5654822019-02-11 10:55:37 +0200510 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
Vlad Buslovc266f642019-02-11 10:55:32 +0200511 tcf_chain_destroy(chain, free_block);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200512 }
Jiri Pirko5bc17012017-05-17 11:08:01 +0200513}
Jiri Pirko53681402018-08-01 12:36:56 +0200514
Jiri Pirko290b1c82018-08-01 12:36:57 +0200515static void tcf_chain_put(struct tcf_chain *chain)
Jiri Pirko53681402018-08-01 12:36:56 +0200516{
Vlad Buslov91052fa2019-02-11 10:55:33 +0200517 __tcf_chain_put(chain, false, false);
Jiri Pirko53681402018-08-01 12:36:56 +0200518}
Jiri Pirko5bc17012017-05-17 11:08:01 +0200519
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200520void tcf_chain_put_by_act(struct tcf_chain *chain)
521{
Vlad Buslov91052fa2019-02-11 10:55:33 +0200522 __tcf_chain_put(chain, true, false);
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200523}
524EXPORT_SYMBOL(tcf_chain_put_by_act);
525
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200526static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
527{
Vlad Buslov91052fa2019-02-11 10:55:33 +0200528 __tcf_chain_put(chain, false, true);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200529}
530
Vlad Buslov12db03b2019-02-11 10:55:45 +0200531static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
Jiri Pirko290b1c82018-08-01 12:36:57 +0200532{
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200533 struct tcf_proto *tp, *tp_next;
Jiri Pirko290b1c82018-08-01 12:36:57 +0200534
Vlad Busloved76f5e2019-02-11 10:55:38 +0200535 mutex_lock(&chain->filter_chain_lock);
536 tp = tcf_chain_dereference(chain->filter_chain, chain);
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200537 RCU_INIT_POINTER(chain->filter_chain, NULL);
Jiri Pirko290b1c82018-08-01 12:36:57 +0200538 tcf_chain0_head_change(chain, NULL);
Vlad Buslov726d06122019-02-11 10:55:42 +0200539 chain->flushing = true;
Vlad Busloved76f5e2019-02-11 10:55:38 +0200540 mutex_unlock(&chain->filter_chain_lock);
541
Jiri Pirko290b1c82018-08-01 12:36:57 +0200542 while (tp) {
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200543 tp_next = rcu_dereference_protected(tp->next, 1);
Vlad Buslov12db03b2019-02-11 10:55:45 +0200544 tcf_proto_put(tp, rtnl_held, NULL);
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200545 tp = tp_next;
Jiri Pirko290b1c82018-08-01 12:36:57 +0200546 }
547}
548
wenxu4e481902019-08-07 09:13:52 +0800549static int tcf_block_setup(struct tcf_block *block,
550 struct flow_block_offload *bo);
551
552static void tc_indr_block_ing_cmd(struct net_device *dev,
553 struct tcf_block *block,
554 flow_indr_block_bind_cb_t *cb,
555 void *cb_priv,
556 enum flow_block_command command)
557{
558 struct flow_block_offload bo = {
559 .command = command,
560 .binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
561 .net = dev_net(dev),
562 .block_shared = tcf_block_non_null_shared(block),
563 };
564 INIT_LIST_HEAD(&bo.cb_list);
565
566 if (!block)
567 return;
568
569 bo.block = &block->flow_block;
570
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300571 down_write(&block->cb_lock);
wenxu4e481902019-08-07 09:13:52 +0800572 cb(dev, cb_priv, TC_SETUP_BLOCK, &bo);
573
574 tcf_block_setup(block, &bo);
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300575 up_write(&block->cb_lock);
wenxu4e481902019-08-07 09:13:52 +0800576}
577
John Hurley7f76fa32018-11-09 21:21:26 -0800578static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
579{
580 const struct Qdisc_class_ops *cops;
581 struct Qdisc *qdisc;
582
583 if (!dev_ingress_queue(dev))
584 return NULL;
585
586 qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
587 if (!qdisc)
588 return NULL;
589
590 cops = qdisc->ops->cl_ops;
591 if (!cops)
592 return NULL;
593
594 if (!cops->tcf_block)
595 return NULL;
596
597 return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
598}
599
wenxu4e481902019-08-07 09:13:52 +0800600static void tc_indr_block_get_and_ing_cmd(struct net_device *dev,
601 flow_indr_block_bind_cb_t *cb,
602 void *cb_priv,
603 enum flow_block_command command)
John Hurley7f76fa32018-11-09 21:21:26 -0800604{
wenxu4e481902019-08-07 09:13:52 +0800605 struct tcf_block *block = tc_dev_ingress_block(dev);
606
607 tc_indr_block_ing_cmd(dev, block, cb, cb_priv, command);
John Hurley7f76fa32018-11-09 21:21:26 -0800608}
609
wenxu4e481902019-08-07 09:13:52 +0800610static void tc_indr_block_call(struct tcf_block *block,
611 struct net_device *dev,
John Hurley7f76fa32018-11-09 21:21:26 -0800612 struct tcf_block_ext_info *ei,
Pablo Neira Ayuso9c0e1892019-07-09 22:55:40 +0200613 enum flow_block_command command,
John Hurley7f76fa32018-11-09 21:21:26 -0800614 struct netlink_ext_ack *extack)
615{
Pablo Neira Ayuso955bcb62019-07-09 22:55:46 +0200616 struct flow_block_offload bo = {
John Hurley7f76fa32018-11-09 21:21:26 -0800617 .command = command,
618 .binder_type = ei->binder_type,
Pablo Neira Ayusoda3eeb92019-07-09 22:55:43 +0200619 .net = dev_net(dev),
Pablo Neira Ayuso14bfb132019-07-19 18:20:16 +0200620 .block = &block->flow_block,
Pablo Neira Ayuso955bcb62019-07-09 22:55:46 +0200621 .block_shared = tcf_block_shared(block),
John Hurley7f76fa32018-11-09 21:21:26 -0800622 .extack = extack,
623 };
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +0200624 INIT_LIST_HEAD(&bo.cb_list);
John Hurley7f76fa32018-11-09 21:21:26 -0800625
wenxu1150ab02019-08-07 09:13:53 +0800626 flow_indr_block_call(dev, &bo, command);
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +0200627 tcf_block_setup(block, &bo);
John Hurley7f76fa32018-11-09 21:21:26 -0800628}
629
Jiri Pirkocaa72602018-01-17 11:46:50 +0100630static bool tcf_block_offload_in_use(struct tcf_block *block)
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200631{
Vlad Buslov97394be2019-08-26 16:44:58 +0300632 return atomic_read(&block->offloadcnt);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100633}
634
635static int tcf_block_offload_cmd(struct tcf_block *block,
636 struct net_device *dev,
637 struct tcf_block_ext_info *ei,
Pablo Neira Ayuso9c0e1892019-07-09 22:55:40 +0200638 enum flow_block_command command,
John Hurley60513bd2018-06-25 14:30:04 -0700639 struct netlink_ext_ack *extack)
Jiri Pirkocaa72602018-01-17 11:46:50 +0100640{
Pablo Neira Ayuso955bcb62019-07-09 22:55:46 +0200641 struct flow_block_offload bo = {};
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +0200642 int err;
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200643
Pablo Neira Ayusoda3eeb92019-07-09 22:55:43 +0200644 bo.net = dev_net(dev);
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200645 bo.command = command;
646 bo.binder_type = ei->binder_type;
Pablo Neira Ayuso14bfb132019-07-19 18:20:16 +0200647 bo.block = &block->flow_block;
Pablo Neira Ayuso955bcb62019-07-09 22:55:46 +0200648 bo.block_shared = tcf_block_shared(block);
John Hurley60513bd2018-06-25 14:30:04 -0700649 bo.extack = extack;
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +0200650 INIT_LIST_HEAD(&bo.cb_list);
651
652 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
653 if (err < 0)
654 return err;
655
656 return tcf_block_setup(block, &bo);
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200657}
658
Jiri Pirkocaa72602018-01-17 11:46:50 +0100659static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
John Hurley60513bd2018-06-25 14:30:04 -0700660 struct tcf_block_ext_info *ei,
661 struct netlink_ext_ack *extack)
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200662{
Jiri Pirkocaa72602018-01-17 11:46:50 +0100663 struct net_device *dev = q->dev_queue->dev;
664 int err;
665
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300666 down_write(&block->cb_lock);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100667 if (!dev->netdev_ops->ndo_setup_tc)
668 goto no_offload_dev_inc;
669
670 /* If tc offload feature is disabled and the block we try to bind
671 * to already has some offloaded filters, forbid to bind.
672 */
John Hurley60513bd2018-06-25 14:30:04 -0700673 if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
674 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300675 err = -EOPNOTSUPP;
676 goto err_unlock;
John Hurley60513bd2018-06-25 14:30:04 -0700677 }
Jiri Pirkocaa72602018-01-17 11:46:50 +0100678
Pablo Neira Ayuso9c0e1892019-07-09 22:55:40 +0200679 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100680 if (err == -EOPNOTSUPP)
681 goto no_offload_dev_inc;
John Hurley7f76fa32018-11-09 21:21:26 -0800682 if (err)
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300683 goto err_unlock;
John Hurley7f76fa32018-11-09 21:21:26 -0800684
Pablo Neira Ayuso9c0e1892019-07-09 22:55:40 +0200685 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300686 up_write(&block->cb_lock);
John Hurley7f76fa32018-11-09 21:21:26 -0800687 return 0;
Jiri Pirkocaa72602018-01-17 11:46:50 +0100688
689no_offload_dev_inc:
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300690 if (tcf_block_offload_in_use(block)) {
691 err = -EOPNOTSUPP;
692 goto err_unlock;
693 }
694 err = 0;
Jiri Pirkocaa72602018-01-17 11:46:50 +0100695 block->nooffloaddevcnt++;
Pablo Neira Ayuso9c0e1892019-07-09 22:55:40 +0200696 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300697err_unlock:
698 up_write(&block->cb_lock);
699 return err;
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200700}
701
702static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
703 struct tcf_block_ext_info *ei)
704{
Jiri Pirkocaa72602018-01-17 11:46:50 +0100705 struct net_device *dev = q->dev_queue->dev;
706 int err;
707
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300708 down_write(&block->cb_lock);
Pablo Neira Ayuso9c0e1892019-07-09 22:55:40 +0200709 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
John Hurley7f76fa32018-11-09 21:21:26 -0800710
Jiri Pirkocaa72602018-01-17 11:46:50 +0100711 if (!dev->netdev_ops->ndo_setup_tc)
712 goto no_offload_dev_dec;
Pablo Neira Ayuso9c0e1892019-07-09 22:55:40 +0200713 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100714 if (err == -EOPNOTSUPP)
715 goto no_offload_dev_dec;
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300716 up_write(&block->cb_lock);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100717 return;
718
719no_offload_dev_dec:
720 WARN_ON(block->nooffloaddevcnt-- == 0);
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300721 up_write(&block->cb_lock);
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200722}
723
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100724static int
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200725tcf_chain0_head_change_cb_add(struct tcf_block *block,
726 struct tcf_block_ext_info *ei,
727 struct netlink_ext_ack *extack)
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100728{
729 struct tcf_filter_chain_list_item *item;
Vlad Buslov165f0132019-02-11 10:55:35 +0200730 struct tcf_chain *chain0;
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100731
732 item = kmalloc(sizeof(*item), GFP_KERNEL);
733 if (!item) {
734 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
735 return -ENOMEM;
736 }
737 item->chain_head_change = ei->chain_head_change;
738 item->chain_head_change_priv = ei->chain_head_change_priv;
Vlad Buslov165f0132019-02-11 10:55:35 +0200739
740 mutex_lock(&block->lock);
741 chain0 = block->chain0.chain;
Vlad Busloved76f5e2019-02-11 10:55:38 +0200742 if (chain0)
743 tcf_chain_hold(chain0);
744 else
745 list_add(&item->list, &block->chain0.filter_chain_list);
Vlad Buslov165f0132019-02-11 10:55:35 +0200746 mutex_unlock(&block->lock);
747
Vlad Busloved76f5e2019-02-11 10:55:38 +0200748 if (chain0) {
749 struct tcf_proto *tp_head;
750
751 mutex_lock(&chain0->filter_chain_lock);
752
753 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
754 if (tp_head)
755 tcf_chain_head_change_item(item, tp_head);
756
757 mutex_lock(&block->lock);
758 list_add(&item->list, &block->chain0.filter_chain_list);
759 mutex_unlock(&block->lock);
760
761 mutex_unlock(&chain0->filter_chain_lock);
762 tcf_chain_put(chain0);
763 }
764
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100765 return 0;
766}
767
768static void
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200769tcf_chain0_head_change_cb_del(struct tcf_block *block,
770 struct tcf_block_ext_info *ei)
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100771{
772 struct tcf_filter_chain_list_item *item;
773
Vlad Buslov165f0132019-02-11 10:55:35 +0200774 mutex_lock(&block->lock);
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200775 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100776 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
777 (item->chain_head_change == ei->chain_head_change &&
778 item->chain_head_change_priv == ei->chain_head_change_priv)) {
Vlad Buslov165f0132019-02-11 10:55:35 +0200779 if (block->chain0.chain)
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200780 tcf_chain_head_change_item(item, NULL);
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100781 list_del(&item->list);
Vlad Buslov165f0132019-02-11 10:55:35 +0200782 mutex_unlock(&block->lock);
783
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100784 kfree(item);
785 return;
786 }
787 }
Vlad Buslov165f0132019-02-11 10:55:35 +0200788 mutex_unlock(&block->lock);
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100789 WARN_ON(1);
790}
791
Jiri Pirko48617382018-01-17 11:46:46 +0100792struct tcf_net {
Vlad Buslovab281622018-09-24 19:22:56 +0300793 spinlock_t idr_lock; /* Protects idr */
Jiri Pirko48617382018-01-17 11:46:46 +0100794 struct idr idr;
795};
796
797static unsigned int tcf_net_id;
798
799static int tcf_block_insert(struct tcf_block *block, struct net *net,
Jiri Pirkobb047dd2018-02-13 12:00:16 +0100800 struct netlink_ext_ack *extack)
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100801{
Jiri Pirko48617382018-01-17 11:46:46 +0100802 struct tcf_net *tn = net_generic(net, tcf_net_id);
Vlad Buslovab281622018-09-24 19:22:56 +0300803 int err;
Jiri Pirko48617382018-01-17 11:46:46 +0100804
Vlad Buslovab281622018-09-24 19:22:56 +0300805 idr_preload(GFP_KERNEL);
806 spin_lock(&tn->idr_lock);
807 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
808 GFP_NOWAIT);
809 spin_unlock(&tn->idr_lock);
810 idr_preload_end();
811
812 return err;
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100813}
814
Jiri Pirko48617382018-01-17 11:46:46 +0100815static void tcf_block_remove(struct tcf_block *block, struct net *net)
Jiri Pirko6529eab2017-05-17 11:07:55 +0200816{
Jiri Pirko48617382018-01-17 11:46:46 +0100817 struct tcf_net *tn = net_generic(net, tcf_net_id);
818
Vlad Buslovab281622018-09-24 19:22:56 +0300819 spin_lock(&tn->idr_lock);
Matthew Wilcox9c160942017-11-28 09:48:43 -0500820 idr_remove(&tn->idr, block->index);
Vlad Buslovab281622018-09-24 19:22:56 +0300821 spin_unlock(&tn->idr_lock);
Jiri Pirko48617382018-01-17 11:46:46 +0100822}
823
824static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
Jiri Pirkobb047dd2018-02-13 12:00:16 +0100825 u32 block_index,
Jiri Pirko48617382018-01-17 11:46:46 +0100826 struct netlink_ext_ack *extack)
827{
828 struct tcf_block *block;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200829
Jiri Pirko48617382018-01-17 11:46:46 +0100830 block = kzalloc(sizeof(*block), GFP_KERNEL);
Alexander Aring8d1a77f2017-12-20 12:35:19 -0500831 if (!block) {
832 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
Jiri Pirko48617382018-01-17 11:46:46 +0100833 return ERR_PTR(-ENOMEM);
Alexander Aring8d1a77f2017-12-20 12:35:19 -0500834 }
Vlad Buslovc266f642019-02-11 10:55:32 +0200835 mutex_init(&block->lock);
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300836 init_rwsem(&block->cb_lock);
Pablo Neira Ayuso14bfb132019-07-19 18:20:16 +0200837 flow_block_init(&block->flow_block);
Jiri Pirko5bc17012017-05-17 11:08:01 +0200838 INIT_LIST_HEAD(&block->chain_list);
Jiri Pirkof36fe1c2018-01-17 11:46:48 +0100839 INIT_LIST_HEAD(&block->owner_list);
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200840 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
Jiri Pirkoacb67442017-10-19 15:50:31 +0200841
Vlad Buslovcfebd7e2018-09-24 19:22:54 +0300842 refcount_set(&block->refcnt, 1);
Jiri Pirko48617382018-01-17 11:46:46 +0100843 block->net = net;
Jiri Pirkobb047dd2018-02-13 12:00:16 +0100844 block->index = block_index;
845
846 /* Don't store q pointer for blocks which are shared */
847 if (!tcf_block_shared(block))
848 block->q = q;
Jiri Pirko48617382018-01-17 11:46:46 +0100849 return block;
Jiri Pirko48617382018-01-17 11:46:46 +0100850}
851
852static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
853{
854 struct tcf_net *tn = net_generic(net, tcf_net_id);
855
Matthew Wilcox322d8842017-11-28 10:01:24 -0500856 return idr_find(&tn->idr, block_index);
Jiri Pirko48617382018-01-17 11:46:46 +0100857}
858
Vlad Buslov0607e432018-09-24 19:22:57 +0300859static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
860{
861 struct tcf_block *block;
862
863 rcu_read_lock();
864 block = tcf_block_lookup(net, block_index);
865 if (block && !refcount_inc_not_zero(&block->refcnt))
866 block = NULL;
867 rcu_read_unlock();
868
869 return block;
870}
871
Vlad Buslovbbf73832019-02-11 10:55:36 +0200872static struct tcf_chain *
873__tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
874{
875 mutex_lock(&block->lock);
876 if (chain)
877 chain = list_is_last(&chain->list, &block->chain_list) ?
878 NULL : list_next_entry(chain, list);
879 else
880 chain = list_first_entry_or_null(&block->chain_list,
881 struct tcf_chain, list);
882
883 /* skip all action-only chains */
884 while (chain && tcf_chain_held_by_acts_only(chain))
885 chain = list_is_last(&chain->list, &block->chain_list) ?
886 NULL : list_next_entry(chain, list);
887
888 if (chain)
889 tcf_chain_hold(chain);
890 mutex_unlock(&block->lock);
891
892 return chain;
893}
894
895/* Function to be used by all clients that want to iterate over all chains on
896 * block. It properly obtains block->lock and takes reference to chain before
897 * returning it. Users of this function must be tolerant to concurrent chain
898 * insertion/deletion or ensure that no concurrent chain modification is
899 * possible. Note that all netlink dump callbacks cannot guarantee to provide
900 * consistent dump because rtnl lock is released each time skb is filled with
901 * data and sent to user-space.
902 */
903
904struct tcf_chain *
905tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
906{
907 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
908
909 if (chain)
910 tcf_chain_put(chain);
911
912 return chain_next;
913}
914EXPORT_SYMBOL(tcf_get_next_chain);
915
Vlad Buslovfe2923a2019-02-11 10:55:40 +0200916static struct tcf_proto *
917__tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
918{
Vlad Buslov8b646782019-02-11 10:55:41 +0200919 u32 prio = 0;
920
Vlad Buslovfe2923a2019-02-11 10:55:40 +0200921 ASSERT_RTNL();
922 mutex_lock(&chain->filter_chain_lock);
923
Vlad Buslov8b646782019-02-11 10:55:41 +0200924 if (!tp) {
Vlad Buslovfe2923a2019-02-11 10:55:40 +0200925 tp = tcf_chain_dereference(chain->filter_chain, chain);
Vlad Buslov8b646782019-02-11 10:55:41 +0200926 } else if (tcf_proto_is_deleting(tp)) {
927 /* 'deleting' flag is set and chain->filter_chain_lock was
928 * unlocked, which means next pointer could be invalid. Restart
929 * search.
930 */
931 prio = tp->prio + 1;
932 tp = tcf_chain_dereference(chain->filter_chain, chain);
933
934 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
935 if (!tp->deleting && tp->prio >= prio)
936 break;
937 } else {
Vlad Buslovfe2923a2019-02-11 10:55:40 +0200938 tp = tcf_chain_dereference(tp->next, chain);
Vlad Buslov8b646782019-02-11 10:55:41 +0200939 }
Vlad Buslovfe2923a2019-02-11 10:55:40 +0200940
941 if (tp)
942 tcf_proto_get(tp);
943
944 mutex_unlock(&chain->filter_chain_lock);
945
946 return tp;
947}
948
949/* Function to be used by all clients that want to iterate over all tp's on
950 * chain. Users of this function must be tolerant to concurrent tp
951 * insertion/deletion or ensure that no concurrent chain modification is
952 * possible. Note that all netlink dump callbacks cannot guarantee to provide
953 * consistent dump because rtnl lock is released each time skb is filled with
954 * data and sent to user-space.
955 */
956
957struct tcf_proto *
Vlad Buslov12db03b2019-02-11 10:55:45 +0200958tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
959 bool rtnl_held)
Vlad Buslovfe2923a2019-02-11 10:55:40 +0200960{
961 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
962
963 if (tp)
Vlad Buslov12db03b2019-02-11 10:55:45 +0200964 tcf_proto_put(tp, rtnl_held, NULL);
Vlad Buslovfe2923a2019-02-11 10:55:40 +0200965
966 return tp_next;
967}
968EXPORT_SYMBOL(tcf_get_next_proto);
969
Vlad Buslov12db03b2019-02-11 10:55:45 +0200970static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
Vlad Buslovf0023432018-09-24 19:22:55 +0300971{
972 struct tcf_chain *chain;
973
Vlad Buslovbbf73832019-02-11 10:55:36 +0200974 /* Last reference to block. At this point chains cannot be added or
975 * removed concurrently.
Vlad Buslovf0023432018-09-24 19:22:55 +0300976 */
Vlad Buslovbbf73832019-02-11 10:55:36 +0200977 for (chain = tcf_get_next_chain(block, NULL);
978 chain;
979 chain = tcf_get_next_chain(block, chain)) {
Vlad Buslovf0023432018-09-24 19:22:55 +0300980 tcf_chain_put_explicitly_created(chain);
Vlad Buslov12db03b2019-02-11 10:55:45 +0200981 tcf_chain_flush(chain, rtnl_held);
Vlad Buslovf0023432018-09-24 19:22:55 +0300982 }
983}
984
Vlad Buslov18d3eef2019-02-11 10:55:47 +0200985/* Lookup Qdisc and increments its reference counter.
986 * Set parent, if necessary.
987 */
988
989static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
990 u32 *parent, int ifindex, bool rtnl_held,
991 struct netlink_ext_ack *extack)
992{
993 const struct Qdisc_class_ops *cops;
994 struct net_device *dev;
995 int err = 0;
996
997 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
998 return 0;
999
1000 rcu_read_lock();
1001
1002 /* Find link */
1003 dev = dev_get_by_index_rcu(net, ifindex);
1004 if (!dev) {
1005 rcu_read_unlock();
1006 return -ENODEV;
1007 }
1008
1009 /* Find qdisc */
1010 if (!*parent) {
1011 *q = dev->qdisc;
1012 *parent = (*q)->handle;
1013 } else {
1014 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1015 if (!*q) {
1016 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1017 err = -EINVAL;
1018 goto errout_rcu;
1019 }
1020 }
1021
1022 *q = qdisc_refcount_inc_nz(*q);
1023 if (!*q) {
1024 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1025 err = -EINVAL;
1026 goto errout_rcu;
1027 }
1028
1029 /* Is it classful? */
1030 cops = (*q)->ops->cl_ops;
1031 if (!cops) {
1032 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1033 err = -EINVAL;
1034 goto errout_qdisc;
1035 }
1036
1037 if (!cops->tcf_block) {
1038 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1039 err = -EOPNOTSUPP;
1040 goto errout_qdisc;
1041 }
1042
1043errout_rcu:
1044 /* At this point we know that qdisc is not noop_qdisc,
1045 * which means that qdisc holds a reference to net_device
1046 * and we hold a reference to qdisc, so it is safe to release
1047 * rcu read lock.
1048 */
1049 rcu_read_unlock();
1050 return err;
1051
1052errout_qdisc:
1053 rcu_read_unlock();
1054
1055 if (rtnl_held)
1056 qdisc_put(*q);
1057 else
1058 qdisc_put_unlocked(*q);
1059 *q = NULL;
1060
1061 return err;
1062}
1063
1064static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1065 int ifindex, struct netlink_ext_ack *extack)
1066{
1067 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1068 return 0;
1069
1070 /* Do we search for filter, attached to class? */
1071 if (TC_H_MIN(parent)) {
1072 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1073
1074 *cl = cops->find(q, parent);
1075 if (*cl == 0) {
1076 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1077 return -ENOENT;
1078 }
1079 }
1080
1081 return 0;
1082}
1083
1084static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1085 unsigned long cl, int ifindex,
1086 u32 block_index,
1087 struct netlink_ext_ack *extack)
1088{
1089 struct tcf_block *block;
1090
1091 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1092 block = tcf_block_refcnt_get(net, block_index);
1093 if (!block) {
1094 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1095 return ERR_PTR(-EINVAL);
1096 }
1097 } else {
1098 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1099
1100 block = cops->tcf_block(q, cl, extack);
1101 if (!block)
1102 return ERR_PTR(-EINVAL);
1103
1104 if (tcf_block_shared(block)) {
1105 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1106 return ERR_PTR(-EOPNOTSUPP);
1107 }
1108
1109 /* Always take reference to block in order to support execution
1110 * of rules update path of cls API without rtnl lock. Caller
1111 * must release block when it is finished using it. 'if' block
1112 * of this conditional obtain reference to block by calling
1113 * tcf_block_refcnt_get().
1114 */
1115 refcount_inc(&block->refcnt);
1116 }
1117
1118 return block;
1119}
1120
Vlad Buslov0607e432018-09-24 19:22:57 +03001121static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001122 struct tcf_block_ext_info *ei, bool rtnl_held)
Vlad Buslov0607e432018-09-24 19:22:57 +03001123{
Vlad Buslovc266f642019-02-11 10:55:32 +02001124 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
Vlad Buslov0607e432018-09-24 19:22:57 +03001125 /* Flushing/putting all chains will cause the block to be
1126 * deallocated when last chain is freed. However, if chain_list
1127 * is empty, block has to be manually deallocated. After block
1128 * reference counter reached 0, it is no longer possible to
1129 * increment it or add new chains to block.
1130 */
1131 bool free_block = list_empty(&block->chain_list);
1132
Vlad Buslovc266f642019-02-11 10:55:32 +02001133 mutex_unlock(&block->lock);
Vlad Buslov0607e432018-09-24 19:22:57 +03001134 if (tcf_block_shared(block))
1135 tcf_block_remove(block, block->net);
Vlad Buslov0607e432018-09-24 19:22:57 +03001136
1137 if (q)
1138 tcf_block_offload_unbind(block, q, ei);
1139
1140 if (free_block)
Vlad Buslovc266f642019-02-11 10:55:32 +02001141 tcf_block_destroy(block);
Vlad Buslov0607e432018-09-24 19:22:57 +03001142 else
Vlad Buslov12db03b2019-02-11 10:55:45 +02001143 tcf_block_flush_all_chains(block, rtnl_held);
Vlad Buslov0607e432018-09-24 19:22:57 +03001144 } else if (q) {
1145 tcf_block_offload_unbind(block, q, ei);
1146 }
1147}
1148
Vlad Buslov12db03b2019-02-11 10:55:45 +02001149static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
Vlad Buslov0607e432018-09-24 19:22:57 +03001150{
Vlad Buslov12db03b2019-02-11 10:55:45 +02001151 __tcf_block_put(block, NULL, NULL, rtnl_held);
Vlad Buslov0607e432018-09-24 19:22:57 +03001152}
1153
Vlad Buslovc431f892018-05-31 09:52:53 +03001154/* Find tcf block.
1155 * Set q, parent, cl when appropriate.
1156 */
1157
1158static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1159 u32 *parent, unsigned long *cl,
1160 int ifindex, u32 block_index,
1161 struct netlink_ext_ack *extack)
1162{
1163 struct tcf_block *block;
Vlad Buslove368fdb2018-09-24 19:22:53 +03001164 int err = 0;
Vlad Buslovc431f892018-05-31 09:52:53 +03001165
Vlad Buslov18d3eef2019-02-11 10:55:47 +02001166 ASSERT_RTNL();
Vlad Buslovc431f892018-05-31 09:52:53 +03001167
Vlad Buslov18d3eef2019-02-11 10:55:47 +02001168 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1169 if (err)
1170 goto errout;
Vlad Buslove368fdb2018-09-24 19:22:53 +03001171
Vlad Buslov18d3eef2019-02-11 10:55:47 +02001172 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1173 if (err)
1174 goto errout_qdisc;
Vlad Buslovc431f892018-05-31 09:52:53 +03001175
Vlad Buslov18d3eef2019-02-11 10:55:47 +02001176 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
Dan Carpenteraf736bf2019-02-18 12:26:32 +03001177 if (IS_ERR(block)) {
1178 err = PTR_ERR(block);
Vlad Buslov18d3eef2019-02-11 10:55:47 +02001179 goto errout_qdisc;
Dan Carpenteraf736bf2019-02-18 12:26:32 +03001180 }
Vlad Buslovc431f892018-05-31 09:52:53 +03001181
1182 return block;
Vlad Buslove368fdb2018-09-24 19:22:53 +03001183
Vlad Buslove368fdb2018-09-24 19:22:53 +03001184errout_qdisc:
Vlad Buslov18d3eef2019-02-11 10:55:47 +02001185 if (*q)
Vlad Buslove368fdb2018-09-24 19:22:53 +03001186 qdisc_put(*q);
Vlad Buslov18d3eef2019-02-11 10:55:47 +02001187errout:
1188 *q = NULL;
Vlad Buslove368fdb2018-09-24 19:22:53 +03001189 return ERR_PTR(err);
1190}
1191
Vlad Buslov12db03b2019-02-11 10:55:45 +02001192static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1193 bool rtnl_held)
Vlad Buslove368fdb2018-09-24 19:22:53 +03001194{
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001195 if (!IS_ERR_OR_NULL(block))
Vlad Buslov12db03b2019-02-11 10:55:45 +02001196 tcf_block_refcnt_put(block, rtnl_held);
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001197
Vlad Buslov470502d2019-02-11 10:55:48 +02001198 if (q) {
1199 if (rtnl_held)
1200 qdisc_put(q);
1201 else
1202 qdisc_put_unlocked(q);
1203 }
Vlad Buslovc431f892018-05-31 09:52:53 +03001204}
1205
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001206struct tcf_block_owner_item {
1207 struct list_head list;
1208 struct Qdisc *q;
Pablo Neira Ayuso32f8c402019-07-09 22:55:41 +02001209 enum flow_block_binder_type binder_type;
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001210};
1211
1212static void
1213tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1214 struct Qdisc *q,
Pablo Neira Ayuso32f8c402019-07-09 22:55:41 +02001215 enum flow_block_binder_type binder_type)
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001216{
1217 if (block->keep_dst &&
Pablo Neira Ayuso32f8c402019-07-09 22:55:41 +02001218 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1219 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001220 netif_keep_dst(qdisc_dev(q));
1221}
1222
1223void tcf_block_netif_keep_dst(struct tcf_block *block)
1224{
1225 struct tcf_block_owner_item *item;
1226
1227 block->keep_dst = true;
1228 list_for_each_entry(item, &block->owner_list, list)
1229 tcf_block_owner_netif_keep_dst(block, item->q,
1230 item->binder_type);
1231}
1232EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1233
1234static int tcf_block_owner_add(struct tcf_block *block,
1235 struct Qdisc *q,
Pablo Neira Ayuso32f8c402019-07-09 22:55:41 +02001236 enum flow_block_binder_type binder_type)
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001237{
1238 struct tcf_block_owner_item *item;
1239
1240 item = kmalloc(sizeof(*item), GFP_KERNEL);
1241 if (!item)
1242 return -ENOMEM;
1243 item->q = q;
1244 item->binder_type = binder_type;
1245 list_add(&item->list, &block->owner_list);
1246 return 0;
1247}
1248
1249static void tcf_block_owner_del(struct tcf_block *block,
1250 struct Qdisc *q,
Pablo Neira Ayuso32f8c402019-07-09 22:55:41 +02001251 enum flow_block_binder_type binder_type)
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001252{
1253 struct tcf_block_owner_item *item;
1254
1255 list_for_each_entry(item, &block->owner_list, list) {
1256 if (item->q == q && item->binder_type == binder_type) {
1257 list_del(&item->list);
1258 kfree(item);
1259 return;
1260 }
1261 }
1262 WARN_ON(1);
1263}
1264
Jiri Pirko48617382018-01-17 11:46:46 +01001265int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1266 struct tcf_block_ext_info *ei,
1267 struct netlink_ext_ack *extack)
1268{
1269 struct net *net = qdisc_net(q);
1270 struct tcf_block *block = NULL;
Jiri Pirko48617382018-01-17 11:46:46 +01001271 int err;
1272
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001273 if (ei->block_index)
Jiri Pirko48617382018-01-17 11:46:46 +01001274 /* block_index not 0 means the shared block is requested */
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001275 block = tcf_block_refcnt_get(net, ei->block_index);
Jiri Pirko48617382018-01-17 11:46:46 +01001276
1277 if (!block) {
Jiri Pirkobb047dd2018-02-13 12:00:16 +01001278 block = tcf_block_create(net, q, ei->block_index, extack);
Jiri Pirko48617382018-01-17 11:46:46 +01001279 if (IS_ERR(block))
1280 return PTR_ERR(block);
Jiri Pirkobb047dd2018-02-13 12:00:16 +01001281 if (tcf_block_shared(block)) {
1282 err = tcf_block_insert(block, net, extack);
Jiri Pirko48617382018-01-17 11:46:46 +01001283 if (err)
1284 goto err_block_insert;
1285 }
1286 }
1287
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001288 err = tcf_block_owner_add(block, q, ei->binder_type);
1289 if (err)
1290 goto err_block_owner_add;
1291
1292 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1293
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001294 err = tcf_chain0_head_change_cb_add(block, ei, extack);
Jiri Pirkoa9b19442018-01-17 11:46:45 +01001295 if (err)
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001296 goto err_chain0_head_change_cb_add;
Jiri Pirkocaa72602018-01-17 11:46:50 +01001297
John Hurley60513bd2018-06-25 14:30:04 -07001298 err = tcf_block_offload_bind(block, q, ei, extack);
Jiri Pirkocaa72602018-01-17 11:46:50 +01001299 if (err)
1300 goto err_block_offload_bind;
1301
Jiri Pirko6529eab2017-05-17 11:07:55 +02001302 *p_block = block;
1303 return 0;
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001304
Jiri Pirkocaa72602018-01-17 11:46:50 +01001305err_block_offload_bind:
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001306 tcf_chain0_head_change_cb_del(block, ei);
1307err_chain0_head_change_cb_add:
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001308 tcf_block_owner_del(block, q, ei->binder_type);
1309err_block_owner_add:
Jiri Pirko48617382018-01-17 11:46:46 +01001310err_block_insert:
Vlad Buslov12db03b2019-02-11 10:55:45 +02001311 tcf_block_refcnt_put(block, true);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001312 return err;
Jiri Pirko6529eab2017-05-17 11:07:55 +02001313}
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001314EXPORT_SYMBOL(tcf_block_get_ext);
1315
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001316static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1317{
1318 struct tcf_proto __rcu **p_filter_chain = priv;
1319
1320 rcu_assign_pointer(*p_filter_chain, tp_head);
1321}
1322
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001323int tcf_block_get(struct tcf_block **p_block,
Alexander Aring8d1a77f2017-12-20 12:35:19 -05001324 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1325 struct netlink_ext_ack *extack)
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001326{
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001327 struct tcf_block_ext_info ei = {
1328 .chain_head_change = tcf_chain_head_change_dflt,
1329 .chain_head_change_priv = p_filter_chain,
1330 };
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001331
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001332 WARN_ON(!p_filter_chain);
Alexander Aring8d1a77f2017-12-20 12:35:19 -05001333 return tcf_block_get_ext(p_block, q, &ei, extack);
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001334}
Jiri Pirko6529eab2017-05-17 11:07:55 +02001335EXPORT_SYMBOL(tcf_block_get);
1336
Cong Wang7aa00452017-10-26 18:24:28 -07001337/* XXX: Standalone actions are not allowed to jump to any chain, and bound
Roman Kapla60b3f52017-11-24 12:27:58 +01001338 * actions should be all removed after flushing.
Cong Wang7aa00452017-10-26 18:24:28 -07001339 */
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001340void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
David S. Millere1ea2f92017-10-30 14:10:01 +09001341 struct tcf_block_ext_info *ei)
Cong Wang7aa00452017-10-26 18:24:28 -07001342{
David S. Millerc30abd52017-12-16 22:11:55 -05001343 if (!block)
1344 return;
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001345 tcf_chain0_head_change_cb_del(block, ei);
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001346 tcf_block_owner_del(block, q, ei->binder_type);
Roman Kapla60b3f52017-11-24 12:27:58 +01001347
Vlad Buslov12db03b2019-02-11 10:55:45 +02001348 __tcf_block_put(block, q, ei, true);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001349}
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001350EXPORT_SYMBOL(tcf_block_put_ext);
1351
1352void tcf_block_put(struct tcf_block *block)
1353{
1354 struct tcf_block_ext_info ei = {0, };
1355
Jiri Pirko4853f122017-12-21 13:13:59 +01001356 if (!block)
1357 return;
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001358 tcf_block_put_ext(block, block->q, &ei);
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001359}
David S. Millere1ea2f92017-10-30 14:10:01 +09001360
Jiri Pirko6529eab2017-05-17 11:07:55 +02001361EXPORT_SYMBOL(tcf_block_put);
Jiri Pirkocf1facd2017-02-09 14:38:56 +01001362
John Hurley32636742018-06-25 14:30:10 -07001363static int
Pablo Neira Ayusoa7323312019-07-19 18:20:15 +02001364tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
John Hurley32636742018-06-25 14:30:10 -07001365 void *cb_priv, bool add, bool offload_in_use,
1366 struct netlink_ext_ack *extack)
1367{
Vlad Buslovbbf73832019-02-11 10:55:36 +02001368 struct tcf_chain *chain, *chain_prev;
Vlad Buslovfe2923a2019-02-11 10:55:40 +02001369 struct tcf_proto *tp, *tp_prev;
John Hurley32636742018-06-25 14:30:10 -07001370 int err;
1371
Vlad Buslov4f8116c2019-08-26 16:44:57 +03001372 lockdep_assert_held(&block->cb_lock);
1373
Vlad Buslovbbf73832019-02-11 10:55:36 +02001374 for (chain = __tcf_get_next_chain(block, NULL);
1375 chain;
1376 chain_prev = chain,
1377 chain = __tcf_get_next_chain(block, chain),
1378 tcf_chain_put(chain_prev)) {
Vlad Buslovfe2923a2019-02-11 10:55:40 +02001379 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1380 tp_prev = tp,
1381 tp = __tcf_get_next_proto(chain, tp),
Vlad Buslov12db03b2019-02-11 10:55:45 +02001382 tcf_proto_put(tp_prev, true, NULL)) {
John Hurley32636742018-06-25 14:30:10 -07001383 if (tp->ops->reoffload) {
1384 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1385 extack);
1386 if (err && add)
1387 goto err_playback_remove;
1388 } else if (add && offload_in_use) {
1389 err = -EOPNOTSUPP;
1390 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1391 goto err_playback_remove;
1392 }
1393 }
1394 }
1395
1396 return 0;
1397
1398err_playback_remove:
Vlad Buslov12db03b2019-02-11 10:55:45 +02001399 tcf_proto_put(tp, true, NULL);
Vlad Buslovbbf73832019-02-11 10:55:36 +02001400 tcf_chain_put(chain);
John Hurley32636742018-06-25 14:30:10 -07001401 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1402 extack);
1403 return err;
1404}
1405
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +02001406static int tcf_block_bind(struct tcf_block *block,
1407 struct flow_block_offload *bo)
1408{
1409 struct flow_block_cb *block_cb, *next;
1410 int err, i = 0;
1411
Vlad Buslov4f8116c2019-08-26 16:44:57 +03001412 lockdep_assert_held(&block->cb_lock);
1413
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +02001414 list_for_each_entry(block_cb, &bo->cb_list, list) {
1415 err = tcf_block_playback_offloads(block, block_cb->cb,
1416 block_cb->cb_priv, true,
1417 tcf_block_offload_in_use(block),
1418 bo->extack);
1419 if (err)
1420 goto err_unroll;
Vlad Buslovc9f14472019-08-26 16:45:01 +03001421 if (!bo->unlocked_driver_cb)
1422 block->lockeddevcnt++;
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +02001423
1424 i++;
1425 }
Pablo Neira Ayuso14bfb132019-07-19 18:20:16 +02001426 list_splice(&bo->cb_list, &block->flow_block.cb_list);
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +02001427
1428 return 0;
1429
1430err_unroll:
1431 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1432 if (i-- > 0) {
1433 list_del(&block_cb->list);
1434 tcf_block_playback_offloads(block, block_cb->cb,
1435 block_cb->cb_priv, false,
1436 tcf_block_offload_in_use(block),
1437 NULL);
Vlad Buslovc9f14472019-08-26 16:45:01 +03001438 if (!bo->unlocked_driver_cb)
1439 block->lockeddevcnt--;
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +02001440 }
1441 flow_block_cb_free(block_cb);
1442 }
1443
1444 return err;
1445}
1446
1447static void tcf_block_unbind(struct tcf_block *block,
1448 struct flow_block_offload *bo)
1449{
1450 struct flow_block_cb *block_cb, *next;
1451
Vlad Buslov4f8116c2019-08-26 16:44:57 +03001452 lockdep_assert_held(&block->cb_lock);
1453
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +02001454 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1455 tcf_block_playback_offloads(block, block_cb->cb,
1456 block_cb->cb_priv, false,
1457 tcf_block_offload_in_use(block),
1458 NULL);
1459 list_del(&block_cb->list);
1460 flow_block_cb_free(block_cb);
Vlad Buslovc9f14472019-08-26 16:45:01 +03001461 if (!bo->unlocked_driver_cb)
1462 block->lockeddevcnt--;
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +02001463 }
1464}
1465
1466static int tcf_block_setup(struct tcf_block *block,
1467 struct flow_block_offload *bo)
1468{
1469 int err;
1470
1471 switch (bo->command) {
1472 case FLOW_BLOCK_BIND:
1473 err = tcf_block_bind(block, bo);
1474 break;
1475 case FLOW_BLOCK_UNBIND:
1476 err = 0;
1477 tcf_block_unbind(block, bo);
1478 break;
1479 default:
1480 WARN_ON_ONCE(1);
1481 err = -EOPNOTSUPP;
1482 }
1483
1484 return err;
1485}
1486
Jiri Pirko87d83092017-05-17 11:07:54 +02001487/* Main classifier routine: scans classifier chain attached
1488 * to this qdisc, (optionally) tests for protocol and asks
1489 * specific classifiers.
1490 */
1491int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1492 struct tcf_result *res, bool compat_mode)
1493{
Jiri Pirko87d83092017-05-17 11:07:54 +02001494#ifdef CONFIG_NET_CLS_ACT
1495 const int max_reclassify_loop = 4;
Jiri Pirkoee538dc2017-05-23 09:11:59 +02001496 const struct tcf_proto *orig_tp = tp;
1497 const struct tcf_proto *first_tp;
Jiri Pirko87d83092017-05-17 11:07:54 +02001498 int limit = 0;
1499
1500reclassify:
1501#endif
1502 for (; tp; tp = rcu_dereference_bh(tp->next)) {
Cong Wangcd0c4e72019-01-11 18:55:42 -08001503 __be16 protocol = tc_skb_protocol(skb);
Jiri Pirko87d83092017-05-17 11:07:54 +02001504 int err;
1505
1506 if (tp->protocol != protocol &&
1507 tp->protocol != htons(ETH_P_ALL))
1508 continue;
1509
1510 err = tp->classify(skb, tp, res);
1511#ifdef CONFIG_NET_CLS_ACT
Jiri Pirkodb505142017-05-17 11:08:03 +02001512 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
Jiri Pirkoee538dc2017-05-23 09:11:59 +02001513 first_tp = orig_tp;
Jiri Pirko87d83092017-05-17 11:07:54 +02001514 goto reset;
Jiri Pirkodb505142017-05-17 11:08:03 +02001515 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
Jiri Pirkoee538dc2017-05-23 09:11:59 +02001516 first_tp = res->goto_tp;
Jiri Pirkodb505142017-05-17 11:08:03 +02001517 goto reset;
1518 }
Jiri Pirko87d83092017-05-17 11:07:54 +02001519#endif
1520 if (err >= 0)
1521 return err;
1522 }
1523
1524 return TC_ACT_UNSPEC; /* signal: continue lookup */
1525#ifdef CONFIG_NET_CLS_ACT
1526reset:
1527 if (unlikely(limit++ >= max_reclassify_loop)) {
Jiri Pirko9d3aaff2018-01-17 11:46:47 +01001528 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1529 tp->chain->block->index,
1530 tp->prio & 0xffff,
Jiri Pirko87d83092017-05-17 11:07:54 +02001531 ntohs(tp->protocol));
1532 return TC_ACT_SHOT;
1533 }
1534
Jiri Pirkoee538dc2017-05-23 09:11:59 +02001535 tp = first_tp;
Jiri Pirko87d83092017-05-17 11:07:54 +02001536 goto reclassify;
1537#endif
1538}
1539EXPORT_SYMBOL(tcf_classify);
1540
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001541struct tcf_chain_info {
1542 struct tcf_proto __rcu **pprev;
1543 struct tcf_proto __rcu *next;
1544};
1545
Vlad Busloved76f5e2019-02-11 10:55:38 +02001546static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1547 struct tcf_chain_info *chain_info)
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001548{
Vlad Busloved76f5e2019-02-11 10:55:38 +02001549 return tcf_chain_dereference(*chain_info->pprev, chain);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001550}
1551
Vlad Buslov726d06122019-02-11 10:55:42 +02001552static int tcf_chain_tp_insert(struct tcf_chain *chain,
1553 struct tcf_chain_info *chain_info,
1554 struct tcf_proto *tp)
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001555{
Vlad Buslov726d06122019-02-11 10:55:42 +02001556 if (chain->flushing)
1557 return -EAGAIN;
1558
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001559 if (*chain_info->pprev == chain->filter_chain)
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001560 tcf_chain0_head_change(chain, tp);
Vlad Buslov4dbfa762019-02-11 10:55:39 +02001561 tcf_proto_get(tp);
Vlad Busloved76f5e2019-02-11 10:55:38 +02001562 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001563 rcu_assign_pointer(*chain_info->pprev, tp);
Vlad Buslov726d06122019-02-11 10:55:42 +02001564
1565 return 0;
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001566}
1567
1568static void tcf_chain_tp_remove(struct tcf_chain *chain,
1569 struct tcf_chain_info *chain_info,
1570 struct tcf_proto *tp)
1571{
Vlad Busloved76f5e2019-02-11 10:55:38 +02001572 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001573
Vlad Buslov8b646782019-02-11 10:55:41 +02001574 tcf_proto_mark_delete(tp);
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001575 if (tp == chain->filter_chain)
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001576 tcf_chain0_head_change(chain, next);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001577 RCU_INIT_POINTER(*chain_info->pprev, next);
1578}
1579
1580static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1581 struct tcf_chain_info *chain_info,
1582 u32 protocol, u32 prio,
Vlad Buslov8b646782019-02-11 10:55:41 +02001583 bool prio_allocate);
1584
1585/* Try to insert new proto.
1586 * If proto with specified priority already exists, free new proto
1587 * and return existing one.
1588 */
1589
1590static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1591 struct tcf_proto *tp_new,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001592 u32 protocol, u32 prio,
1593 bool rtnl_held)
Vlad Buslov8b646782019-02-11 10:55:41 +02001594{
1595 struct tcf_chain_info chain_info;
1596 struct tcf_proto *tp;
Vlad Buslov726d06122019-02-11 10:55:42 +02001597 int err = 0;
Vlad Buslov8b646782019-02-11 10:55:41 +02001598
1599 mutex_lock(&chain->filter_chain_lock);
1600
1601 tp = tcf_chain_tp_find(chain, &chain_info,
1602 protocol, prio, false);
1603 if (!tp)
Vlad Buslov726d06122019-02-11 10:55:42 +02001604 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
Vlad Buslov8b646782019-02-11 10:55:41 +02001605 mutex_unlock(&chain->filter_chain_lock);
1606
1607 if (tp) {
Vlad Buslov12db03b2019-02-11 10:55:45 +02001608 tcf_proto_destroy(tp_new, rtnl_held, NULL);
Vlad Buslov8b646782019-02-11 10:55:41 +02001609 tp_new = tp;
Vlad Buslov726d06122019-02-11 10:55:42 +02001610 } else if (err) {
Vlad Buslov12db03b2019-02-11 10:55:45 +02001611 tcf_proto_destroy(tp_new, rtnl_held, NULL);
Vlad Buslov726d06122019-02-11 10:55:42 +02001612 tp_new = ERR_PTR(err);
Vlad Buslov8b646782019-02-11 10:55:41 +02001613 }
1614
1615 return tp_new;
1616}
1617
1618static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001619 struct tcf_proto *tp, bool rtnl_held,
Vlad Buslov8b646782019-02-11 10:55:41 +02001620 struct netlink_ext_ack *extack)
1621{
1622 struct tcf_chain_info chain_info;
1623 struct tcf_proto *tp_iter;
1624 struct tcf_proto **pprev;
1625 struct tcf_proto *next;
1626
1627 mutex_lock(&chain->filter_chain_lock);
1628
1629 /* Atomically find and remove tp from chain. */
1630 for (pprev = &chain->filter_chain;
1631 (tp_iter = tcf_chain_dereference(*pprev, chain));
1632 pprev = &tp_iter->next) {
1633 if (tp_iter == tp) {
1634 chain_info.pprev = pprev;
1635 chain_info.next = tp_iter->next;
1636 WARN_ON(tp_iter->deleting);
1637 break;
1638 }
1639 }
1640 /* Verify that tp still exists and no new filters were inserted
1641 * concurrently.
1642 * Mark tp for deletion if it is empty.
1643 */
Vlad Buslov12db03b2019-02-11 10:55:45 +02001644 if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) {
Vlad Buslov8b646782019-02-11 10:55:41 +02001645 mutex_unlock(&chain->filter_chain_lock);
1646 return;
1647 }
1648
1649 next = tcf_chain_dereference(chain_info.next, chain);
1650 if (tp == chain->filter_chain)
1651 tcf_chain0_head_change(chain, next);
1652 RCU_INIT_POINTER(*chain_info.pprev, next);
1653 mutex_unlock(&chain->filter_chain_lock);
1654
Vlad Buslov12db03b2019-02-11 10:55:45 +02001655 tcf_proto_put(tp, rtnl_held, extack);
Vlad Buslov8b646782019-02-11 10:55:41 +02001656}
1657
1658static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1659 struct tcf_chain_info *chain_info,
1660 u32 protocol, u32 prio,
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001661 bool prio_allocate)
1662{
1663 struct tcf_proto **pprev;
1664 struct tcf_proto *tp;
1665
1666 /* Check the chain for existence of proto-tcf with this priority */
1667 for (pprev = &chain->filter_chain;
Vlad Busloved76f5e2019-02-11 10:55:38 +02001668 (tp = tcf_chain_dereference(*pprev, chain));
1669 pprev = &tp->next) {
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001670 if (tp->prio >= prio) {
1671 if (tp->prio == prio) {
1672 if (prio_allocate ||
1673 (tp->protocol != protocol && protocol))
1674 return ERR_PTR(-EINVAL);
1675 } else {
1676 tp = NULL;
1677 }
1678 break;
1679 }
1680 }
1681 chain_info->pprev = pprev;
Vlad Buslov4dbfa762019-02-11 10:55:39 +02001682 if (tp) {
1683 chain_info->next = tp->next;
1684 tcf_proto_get(tp);
1685 } else {
1686 chain_info->next = NULL;
1687 }
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001688 return tp;
1689}
1690
WANG Cong71203712017-08-07 15:26:50 -07001691static int tcf_fill_node(struct net *net, struct sk_buff *skb,
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001692 struct tcf_proto *tp, struct tcf_block *block,
1693 struct Qdisc *q, u32 parent, void *fh,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001694 u32 portid, u32 seq, u16 flags, int event,
1695 bool rtnl_held)
WANG Cong71203712017-08-07 15:26:50 -07001696{
1697 struct tcmsg *tcm;
1698 struct nlmsghdr *nlh;
1699 unsigned char *b = skb_tail_pointer(skb);
1700
1701 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1702 if (!nlh)
1703 goto out_nlmsg_trim;
1704 tcm = nlmsg_data(nlh);
1705 tcm->tcm_family = AF_UNSPEC;
1706 tcm->tcm__pad1 = 0;
1707 tcm->tcm__pad2 = 0;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001708 if (q) {
1709 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1710 tcm->tcm_parent = parent;
1711 } else {
1712 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1713 tcm->tcm_block_index = block->index;
1714 }
WANG Cong71203712017-08-07 15:26:50 -07001715 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1716 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1717 goto nla_put_failure;
1718 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1719 goto nla_put_failure;
1720 if (!fh) {
1721 tcm->tcm_handle = 0;
1722 } else {
Vlad Buslov12db03b2019-02-11 10:55:45 +02001723 if (tp->ops->dump &&
1724 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
WANG Cong71203712017-08-07 15:26:50 -07001725 goto nla_put_failure;
1726 }
1727 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1728 return skb->len;
1729
1730out_nlmsg_trim:
1731nla_put_failure:
1732 nlmsg_trim(skb, b);
1733 return -1;
1734}
1735
1736static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1737 struct nlmsghdr *n, struct tcf_proto *tp,
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001738 struct tcf_block *block, struct Qdisc *q,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001739 u32 parent, void *fh, int event, bool unicast,
1740 bool rtnl_held)
WANG Cong71203712017-08-07 15:26:50 -07001741{
1742 struct sk_buff *skb;
1743 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
Zhike Wang5b5f99b2019-03-11 03:15:54 -07001744 int err = 0;
WANG Cong71203712017-08-07 15:26:50 -07001745
1746 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1747 if (!skb)
1748 return -ENOBUFS;
1749
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001750 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001751 n->nlmsg_seq, n->nlmsg_flags, event,
1752 rtnl_held) <= 0) {
WANG Cong71203712017-08-07 15:26:50 -07001753 kfree_skb(skb);
1754 return -EINVAL;
1755 }
1756
1757 if (unicast)
Zhike Wang5b5f99b2019-03-11 03:15:54 -07001758 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1759 else
1760 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1761 n->nlmsg_flags & NLM_F_ECHO);
WANG Cong71203712017-08-07 15:26:50 -07001762
Zhike Wang5b5f99b2019-03-11 03:15:54 -07001763 if (err > 0)
1764 err = 0;
1765 return err;
WANG Cong71203712017-08-07 15:26:50 -07001766}
1767
1768static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1769 struct nlmsghdr *n, struct tcf_proto *tp,
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001770 struct tcf_block *block, struct Qdisc *q,
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001771 u32 parent, void *fh, bool unicast, bool *last,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001772 bool rtnl_held, struct netlink_ext_ack *extack)
WANG Cong71203712017-08-07 15:26:50 -07001773{
1774 struct sk_buff *skb;
1775 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1776 int err;
1777
1778 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1779 if (!skb)
1780 return -ENOBUFS;
1781
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001782 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001783 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1784 rtnl_held) <= 0) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001785 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
WANG Cong71203712017-08-07 15:26:50 -07001786 kfree_skb(skb);
1787 return -EINVAL;
1788 }
1789
Vlad Buslov12db03b2019-02-11 10:55:45 +02001790 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
WANG Cong71203712017-08-07 15:26:50 -07001791 if (err) {
1792 kfree_skb(skb);
1793 return err;
1794 }
1795
1796 if (unicast)
Zhike Wang5b5f99b2019-03-11 03:15:54 -07001797 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1798 else
1799 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1800 n->nlmsg_flags & NLM_F_ECHO);
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001801 if (err < 0)
1802 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
Zhike Wang5b5f99b2019-03-11 03:15:54 -07001803
1804 if (err > 0)
1805 err = 0;
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001806 return err;
WANG Cong71203712017-08-07 15:26:50 -07001807}
1808
1809static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001810 struct tcf_block *block, struct Qdisc *q,
1811 u32 parent, struct nlmsghdr *n,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001812 struct tcf_chain *chain, int event,
1813 bool rtnl_held)
WANG Cong71203712017-08-07 15:26:50 -07001814{
1815 struct tcf_proto *tp;
1816
Vlad Buslov12db03b2019-02-11 10:55:45 +02001817 for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1818 tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001819 tfilter_notify(net, oskb, n, tp, block,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001820 q, parent, NULL, event, false, rtnl_held);
WANG Cong71203712017-08-07 15:26:50 -07001821}
1822
Vlad Buslov7d5509f2019-02-11 10:55:44 +02001823static void tfilter_put(struct tcf_proto *tp, void *fh)
1824{
1825 if (tp->ops->put && fh)
1826 tp->ops->put(tp, fh);
1827}
1828
Vlad Buslovc431f892018-05-31 09:52:53 +03001829static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
David Ahernc21ef3e2017-04-16 09:48:24 -07001830 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001832 struct net *net = sock_net(skb->sk);
Patrick McHardyadd93b62008-01-22 22:11:33 -08001833 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 struct tcmsg *t;
1835 u32 protocol;
1836 u32 prio;
Jiri Pirko9d36d9e2017-05-17 11:07:57 +02001837 bool prio_allocate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 u32 parent;
Jiri Pirko5bc17012017-05-17 11:08:01 +02001839 u32 chain_index;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001840 struct Qdisc *q = NULL;
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001841 struct tcf_chain_info chain_info;
Jiri Pirko5bc17012017-05-17 11:08:01 +02001842 struct tcf_chain *chain = NULL;
Jiri Pirko6529eab2017-05-17 11:07:55 +02001843 struct tcf_block *block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 struct tcf_proto *tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 unsigned long cl;
WANG Cong8113c092017-08-04 21:31:43 -07001846 void *fh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 int err;
Daniel Borkmann628185c2016-12-21 18:04:11 +01001848 int tp_created;
Vlad Buslov470502d2019-02-11 10:55:48 +02001849 bool rtnl_held = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850
Vlad Buslovc431f892018-05-31 09:52:53 +03001851 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +00001852 return -EPERM;
Hong zhi guode179c82013-03-25 17:36:33 +00001853
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854replay:
Daniel Borkmann628185c2016-12-21 18:04:11 +01001855 tp_created = 0;
1856
Johannes Berg8cb08172019-04-26 14:07:28 +02001857 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1858 rtm_tca_policy, extack);
Hong zhi guode179c82013-03-25 17:36:33 +00001859 if (err < 0)
1860 return err;
1861
David S. Miller942b8162012-06-26 21:48:50 -07001862 t = nlmsg_data(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 protocol = TC_H_MIN(t->tcm_info);
1864 prio = TC_H_MAJ(t->tcm_info);
Jiri Pirko9d36d9e2017-05-17 11:07:57 +02001865 prio_allocate = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 parent = t->tcm_parent;
Vlad Buslov4dbfa762019-02-11 10:55:39 +02001867 tp = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 cl = 0;
Vlad Buslov470502d2019-02-11 10:55:48 +02001869 block = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870
1871 if (prio == 0) {
Vlad Buslovc431f892018-05-31 09:52:53 +03001872 /* If no priority is provided by the user,
1873 * we allocate one.
1874 */
1875 if (n->nlmsg_flags & NLM_F_CREATE) {
1876 prio = TC_H_MAKE(0x80000000U, 0U);
1877 prio_allocate = true;
1878 } else {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001879 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 return -ENOENT;
Daniel Borkmannea7f8272016-06-10 23:10:22 +02001881 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 }
1883
1884 /* Find head of filter chain. */
1885
Vlad Buslov470502d2019-02-11 10:55:48 +02001886 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
1887 if (err)
1888 return err;
1889
1890 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
1891 * block is shared (no qdisc found), qdisc is not unlocked, classifier
1892 * type is not specified, classifier is not unlocked.
1893 */
1894 if (rtnl_held ||
1895 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
1896 !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
1897 rtnl_held = true;
1898 rtnl_lock();
1899 }
1900
1901 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
1902 if (err)
1903 goto errout;
1904
1905 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
1906 extack);
Vlad Buslovc431f892018-05-31 09:52:53 +03001907 if (IS_ERR(block)) {
1908 err = PTR_ERR(block);
1909 goto errout;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001910 }
Jiri Pirko5bc17012017-05-17 11:08:01 +02001911
1912 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1913 if (chain_index > TC_ACT_EXT_VAL_MASK) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001914 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
Jiri Pirko5bc17012017-05-17 11:08:01 +02001915 err = -EINVAL;
1916 goto errout;
1917 }
Vlad Buslovc431f892018-05-31 09:52:53 +03001918 chain = tcf_chain_get(block, chain_index, true);
Jiri Pirko5bc17012017-05-17 11:08:01 +02001919 if (!chain) {
Jiri Pirkod5ed72a2018-08-27 20:58:43 +02001920 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
Vlad Buslovc431f892018-05-31 09:52:53 +03001921 err = -ENOMEM;
Daniel Borkmannea7f8272016-06-10 23:10:22 +02001922 goto errout;
1923 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924
Vlad Busloved76f5e2019-02-11 10:55:38 +02001925 mutex_lock(&chain->filter_chain_lock);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001926 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1927 prio, prio_allocate);
1928 if (IS_ERR(tp)) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001929 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001930 err = PTR_ERR(tp);
Vlad Busloved76f5e2019-02-11 10:55:38 +02001931 goto errout_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 }
1933
1934 if (tp == NULL) {
Vlad Buslov8b646782019-02-11 10:55:41 +02001935 struct tcf_proto *tp_new = NULL;
1936
Vlad Buslov726d06122019-02-11 10:55:42 +02001937 if (chain->flushing) {
1938 err = -EAGAIN;
1939 goto errout_locked;
1940 }
1941
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 /* Proto-tcf does not exist, create new one */
1943
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001944 if (tca[TCA_KIND] == NULL || !protocol) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001945 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001946 err = -EINVAL;
Vlad Busloved76f5e2019-02-11 10:55:38 +02001947 goto errout_locked;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001948 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949
Vlad Buslovc431f892018-05-31 09:52:53 +03001950 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001951 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001952 err = -ENOENT;
Vlad Busloved76f5e2019-02-11 10:55:38 +02001953 goto errout_locked;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001954 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955
Jiri Pirko9d36d9e2017-05-17 11:07:57 +02001956 if (prio_allocate)
Vlad Busloved76f5e2019-02-11 10:55:38 +02001957 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
1958 &chain_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959
Vlad Busloved76f5e2019-02-11 10:55:38 +02001960 mutex_unlock(&chain->filter_chain_lock);
Vlad Buslov8b646782019-02-11 10:55:41 +02001961 tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
Vlad Buslov12db03b2019-02-11 10:55:45 +02001962 protocol, prio, chain, rtnl_held,
1963 extack);
Vlad Buslov8b646782019-02-11 10:55:41 +02001964 if (IS_ERR(tp_new)) {
1965 err = PTR_ERR(tp_new);
Vlad Buslov726d06122019-02-11 10:55:42 +02001966 goto errout_tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 }
Vlad Busloved76f5e2019-02-11 10:55:38 +02001968
Minoru Usui12186be2009-06-02 02:17:34 -07001969 tp_created = 1;
Vlad Buslov12db03b2019-02-11 10:55:45 +02001970 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
1971 rtnl_held);
Vlad Buslov726d06122019-02-11 10:55:42 +02001972 if (IS_ERR(tp)) {
1973 err = PTR_ERR(tp);
1974 goto errout_tp;
1975 }
Vlad Busloved76f5e2019-02-11 10:55:38 +02001976 } else {
1977 mutex_unlock(&chain->filter_chain_lock);
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001978 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979
Vlad Buslov8b646782019-02-11 10:55:41 +02001980 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
1981 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
1982 err = -EINVAL;
1983 goto errout;
1984 }
1985
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 fh = tp->ops->get(tp, t->tcm_handle);
1987
WANG Cong8113c092017-08-04 21:31:43 -07001988 if (!fh) {
Vlad Buslovc431f892018-05-31 09:52:53 +03001989 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001990 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001991 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 goto errout;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001993 }
Vlad Buslovc431f892018-05-31 09:52:53 +03001994 } else if (n->nlmsg_flags & NLM_F_EXCL) {
Vlad Buslov7d5509f2019-02-11 10:55:44 +02001995 tfilter_put(tp, fh);
Vlad Buslovc431f892018-05-31 09:52:53 +03001996 NL_SET_ERR_MSG(extack, "Filter already exists");
1997 err = -EEXIST;
1998 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 }
2000
Jiri Pirko9f407f12018-07-23 09:23:07 +02002001 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2002 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2003 err = -EINVAL;
2004 goto errout;
2005 }
2006
Cong Wang2f7ef2f2014-04-25 13:54:06 -07002007 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
Alexander Aring7306db32018-01-18 11:20:51 -05002008 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002009 rtnl_held, extack);
Vlad Buslov7d5509f2019-02-11 10:55:44 +02002010 if (err == 0) {
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002011 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002012 RTM_NEWTFILTER, false, rtnl_held);
Vlad Buslov7d5509f2019-02-11 10:55:44 +02002013 tfilter_put(tp, fh);
Vlad Buslov503d81d2019-07-21 17:44:12 +03002014 /* q pointer is NULL for shared blocks */
2015 if (q)
2016 q->flags &= ~TCQ_F_CAN_BYPASS;
Vlad Buslov7d5509f2019-02-11 10:55:44 +02002017 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018
2019errout:
Vlad Buslov8b646782019-02-11 10:55:41 +02002020 if (err && tp_created)
Vlad Buslov12db03b2019-02-11 10:55:45 +02002021 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
Vlad Buslov726d06122019-02-11 10:55:42 +02002022errout_tp:
Vlad Buslov4dbfa762019-02-11 10:55:39 +02002023 if (chain) {
2024 if (tp && !IS_ERR(tp))
Vlad Buslov12db03b2019-02-11 10:55:45 +02002025 tcf_proto_put(tp, rtnl_held, NULL);
Vlad Buslov4dbfa762019-02-11 10:55:39 +02002026 if (!tp_created)
2027 tcf_chain_put(chain);
2028 }
Vlad Buslov12db03b2019-02-11 10:55:45 +02002029 tcf_block_release(q, block, rtnl_held);
Vlad Buslov470502d2019-02-11 10:55:48 +02002030
2031 if (rtnl_held)
2032 rtnl_unlock();
2033
2034 if (err == -EAGAIN) {
2035 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2036 * of target chain.
2037 */
2038 rtnl_held = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 /* Replay the request. */
2040 goto replay;
Vlad Buslov470502d2019-02-11 10:55:48 +02002041 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 return err;
Vlad Busloved76f5e2019-02-11 10:55:38 +02002043
2044errout_locked:
2045 mutex_unlock(&chain->filter_chain_lock);
2046 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047}
2048
Vlad Buslovc431f892018-05-31 09:52:53 +03002049static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2050 struct netlink_ext_ack *extack)
2051{
2052 struct net *net = sock_net(skb->sk);
2053 struct nlattr *tca[TCA_MAX + 1];
2054 struct tcmsg *t;
2055 u32 protocol;
2056 u32 prio;
2057 u32 parent;
2058 u32 chain_index;
2059 struct Qdisc *q = NULL;
2060 struct tcf_chain_info chain_info;
2061 struct tcf_chain *chain = NULL;
Vlad Buslov470502d2019-02-11 10:55:48 +02002062 struct tcf_block *block = NULL;
Vlad Buslovc431f892018-05-31 09:52:53 +03002063 struct tcf_proto *tp = NULL;
2064 unsigned long cl = 0;
2065 void *fh = NULL;
2066 int err;
Vlad Buslov470502d2019-02-11 10:55:48 +02002067 bool rtnl_held = false;
Vlad Buslovc431f892018-05-31 09:52:53 +03002068
2069 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2070 return -EPERM;
2071
Johannes Berg8cb08172019-04-26 14:07:28 +02002072 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2073 rtm_tca_policy, extack);
Vlad Buslovc431f892018-05-31 09:52:53 +03002074 if (err < 0)
2075 return err;
2076
2077 t = nlmsg_data(n);
2078 protocol = TC_H_MIN(t->tcm_info);
2079 prio = TC_H_MAJ(t->tcm_info);
2080 parent = t->tcm_parent;
2081
2082 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2083 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2084 return -ENOENT;
2085 }
2086
2087 /* Find head of filter chain. */
2088
Vlad Buslov470502d2019-02-11 10:55:48 +02002089 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2090 if (err)
2091 return err;
2092
2093 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2094 * found), qdisc is not unlocked, classifier type is not specified,
2095 * classifier is not unlocked.
2096 */
2097 if (!prio ||
2098 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2099 !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2100 rtnl_held = true;
2101 rtnl_lock();
2102 }
2103
2104 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2105 if (err)
2106 goto errout;
2107
2108 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2109 extack);
Vlad Buslovc431f892018-05-31 09:52:53 +03002110 if (IS_ERR(block)) {
2111 err = PTR_ERR(block);
2112 goto errout;
2113 }
2114
2115 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2116 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2117 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2118 err = -EINVAL;
2119 goto errout;
2120 }
2121 chain = tcf_chain_get(block, chain_index, false);
2122 if (!chain) {
Jiri Pirko5ca8a252018-08-03 11:08:47 +02002123 /* User requested flush on non-existent chain. Nothing to do,
2124 * so just return success.
2125 */
2126 if (prio == 0) {
2127 err = 0;
2128 goto errout;
2129 }
Vlad Buslovc431f892018-05-31 09:52:53 +03002130 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
Jiri Pirkob7b42472018-08-27 20:58:44 +02002131 err = -ENOENT;
Vlad Buslovc431f892018-05-31 09:52:53 +03002132 goto errout;
2133 }
2134
2135 if (prio == 0) {
2136 tfilter_notify_chain(net, skb, block, q, parent, n,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002137 chain, RTM_DELTFILTER, rtnl_held);
2138 tcf_chain_flush(chain, rtnl_held);
Vlad Buslovc431f892018-05-31 09:52:53 +03002139 err = 0;
2140 goto errout;
2141 }
2142
Vlad Busloved76f5e2019-02-11 10:55:38 +02002143 mutex_lock(&chain->filter_chain_lock);
Vlad Buslovc431f892018-05-31 09:52:53 +03002144 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2145 prio, false);
2146 if (!tp || IS_ERR(tp)) {
2147 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
Vlad Buslov0e399032018-06-04 18:32:23 +03002148 err = tp ? PTR_ERR(tp) : -ENOENT;
Vlad Busloved76f5e2019-02-11 10:55:38 +02002149 goto errout_locked;
Vlad Buslovc431f892018-05-31 09:52:53 +03002150 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2151 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2152 err = -EINVAL;
Vlad Busloved76f5e2019-02-11 10:55:38 +02002153 goto errout_locked;
2154 } else if (t->tcm_handle == 0) {
2155 tcf_chain_tp_remove(chain, &chain_info, tp);
2156 mutex_unlock(&chain->filter_chain_lock);
2157
Vlad Buslov12db03b2019-02-11 10:55:45 +02002158 tcf_proto_put(tp, rtnl_held, NULL);
Vlad Busloved76f5e2019-02-11 10:55:38 +02002159 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002160 RTM_DELTFILTER, false, rtnl_held);
Vlad Busloved76f5e2019-02-11 10:55:38 +02002161 err = 0;
Vlad Buslovc431f892018-05-31 09:52:53 +03002162 goto errout;
2163 }
Vlad Busloved76f5e2019-02-11 10:55:38 +02002164 mutex_unlock(&chain->filter_chain_lock);
Vlad Buslovc431f892018-05-31 09:52:53 +03002165
2166 fh = tp->ops->get(tp, t->tcm_handle);
2167
2168 if (!fh) {
Vlad Busloved76f5e2019-02-11 10:55:38 +02002169 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2170 err = -ENOENT;
Vlad Buslovc431f892018-05-31 09:52:53 +03002171 } else {
2172 bool last;
2173
2174 err = tfilter_del_notify(net, skb, n, tp, block,
2175 q, parent, fh, false, &last,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002176 rtnl_held, extack);
2177
Vlad Buslovc431f892018-05-31 09:52:53 +03002178 if (err)
2179 goto errout;
Vlad Buslov8b646782019-02-11 10:55:41 +02002180 if (last)
Vlad Buslov12db03b2019-02-11 10:55:45 +02002181 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
Vlad Buslovc431f892018-05-31 09:52:53 +03002182 }
2183
2184errout:
Vlad Buslov4dbfa762019-02-11 10:55:39 +02002185 if (chain) {
2186 if (tp && !IS_ERR(tp))
Vlad Buslov12db03b2019-02-11 10:55:45 +02002187 tcf_proto_put(tp, rtnl_held, NULL);
Vlad Buslovc431f892018-05-31 09:52:53 +03002188 tcf_chain_put(chain);
Vlad Buslov4dbfa762019-02-11 10:55:39 +02002189 }
Vlad Buslov12db03b2019-02-11 10:55:45 +02002190 tcf_block_release(q, block, rtnl_held);
Vlad Buslov470502d2019-02-11 10:55:48 +02002191
2192 if (rtnl_held)
2193 rtnl_unlock();
2194
Vlad Buslovc431f892018-05-31 09:52:53 +03002195 return err;
Vlad Busloved76f5e2019-02-11 10:55:38 +02002196
2197errout_locked:
2198 mutex_unlock(&chain->filter_chain_lock);
2199 goto errout;
Vlad Buslovc431f892018-05-31 09:52:53 +03002200}
2201
2202static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2203 struct netlink_ext_ack *extack)
2204{
2205 struct net *net = sock_net(skb->sk);
2206 struct nlattr *tca[TCA_MAX + 1];
2207 struct tcmsg *t;
2208 u32 protocol;
2209 u32 prio;
2210 u32 parent;
2211 u32 chain_index;
2212 struct Qdisc *q = NULL;
2213 struct tcf_chain_info chain_info;
2214 struct tcf_chain *chain = NULL;
Vlad Buslov470502d2019-02-11 10:55:48 +02002215 struct tcf_block *block = NULL;
Vlad Buslovc431f892018-05-31 09:52:53 +03002216 struct tcf_proto *tp = NULL;
2217 unsigned long cl = 0;
2218 void *fh = NULL;
2219 int err;
Vlad Buslov470502d2019-02-11 10:55:48 +02002220 bool rtnl_held = false;
Vlad Buslovc431f892018-05-31 09:52:53 +03002221
Johannes Berg8cb08172019-04-26 14:07:28 +02002222 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2223 rtm_tca_policy, extack);
Vlad Buslovc431f892018-05-31 09:52:53 +03002224 if (err < 0)
2225 return err;
2226
2227 t = nlmsg_data(n);
2228 protocol = TC_H_MIN(t->tcm_info);
2229 prio = TC_H_MAJ(t->tcm_info);
2230 parent = t->tcm_parent;
2231
2232 if (prio == 0) {
2233 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2234 return -ENOENT;
2235 }
2236
2237 /* Find head of filter chain. */
2238
Vlad Buslov470502d2019-02-11 10:55:48 +02002239 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2240 if (err)
2241 return err;
2242
2243 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2244 * unlocked, classifier type is not specified, classifier is not
2245 * unlocked.
2246 */
2247 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2248 !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2249 rtnl_held = true;
2250 rtnl_lock();
2251 }
2252
2253 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2254 if (err)
2255 goto errout;
2256
2257 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2258 extack);
Vlad Buslovc431f892018-05-31 09:52:53 +03002259 if (IS_ERR(block)) {
2260 err = PTR_ERR(block);
2261 goto errout;
2262 }
2263
2264 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2265 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2266 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2267 err = -EINVAL;
2268 goto errout;
2269 }
2270 chain = tcf_chain_get(block, chain_index, false);
2271 if (!chain) {
2272 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2273 err = -EINVAL;
2274 goto errout;
2275 }
2276
Vlad Busloved76f5e2019-02-11 10:55:38 +02002277 mutex_lock(&chain->filter_chain_lock);
Vlad Buslovc431f892018-05-31 09:52:53 +03002278 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2279 prio, false);
Vlad Busloved76f5e2019-02-11 10:55:38 +02002280 mutex_unlock(&chain->filter_chain_lock);
Vlad Buslovc431f892018-05-31 09:52:53 +03002281 if (!tp || IS_ERR(tp)) {
2282 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
Vlad Buslov0e399032018-06-04 18:32:23 +03002283 err = tp ? PTR_ERR(tp) : -ENOENT;
Vlad Buslovc431f892018-05-31 09:52:53 +03002284 goto errout;
2285 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2286 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2287 err = -EINVAL;
2288 goto errout;
2289 }
2290
2291 fh = tp->ops->get(tp, t->tcm_handle);
2292
2293 if (!fh) {
2294 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2295 err = -ENOENT;
2296 } else {
2297 err = tfilter_notify(net, skb, n, tp, block, q, parent,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002298 fh, RTM_NEWTFILTER, true, rtnl_held);
Vlad Buslovc431f892018-05-31 09:52:53 +03002299 if (err < 0)
2300 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2301 }
2302
Vlad Buslov7d5509f2019-02-11 10:55:44 +02002303 tfilter_put(tp, fh);
Vlad Buslovc431f892018-05-31 09:52:53 +03002304errout:
Vlad Buslov4dbfa762019-02-11 10:55:39 +02002305 if (chain) {
2306 if (tp && !IS_ERR(tp))
Vlad Buslov12db03b2019-02-11 10:55:45 +02002307 tcf_proto_put(tp, rtnl_held, NULL);
Vlad Buslovc431f892018-05-31 09:52:53 +03002308 tcf_chain_put(chain);
Vlad Buslov4dbfa762019-02-11 10:55:39 +02002309 }
Vlad Buslov12db03b2019-02-11 10:55:45 +02002310 tcf_block_release(q, block, rtnl_held);
Vlad Buslov470502d2019-02-11 10:55:48 +02002311
2312 if (rtnl_held)
2313 rtnl_unlock();
2314
Vlad Buslovc431f892018-05-31 09:52:53 +03002315 return err;
2316}
2317
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002318struct tcf_dump_args {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 struct tcf_walker w;
2320 struct sk_buff *skb;
2321 struct netlink_callback *cb;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002322 struct tcf_block *block;
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002323 struct Qdisc *q;
2324 u32 parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325};
2326
WANG Cong8113c092017-08-04 21:31:43 -07002327static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328{
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002329 struct tcf_dump_args *a = (void *)arg;
WANG Cong832d1d52014-01-09 16:14:01 -08002330 struct net *net = sock_net(a->skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002332 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002333 n, NETLINK_CB(a->cb->skb).portid,
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -04002334 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002335 RTM_NEWTFILTER, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336}
2337
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002338static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2339 struct sk_buff *skb, struct netlink_callback *cb,
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002340 long index_start, long *p_index)
2341{
2342 struct net *net = sock_net(skb->sk);
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002343 struct tcf_block *block = chain->block;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002344 struct tcmsg *tcm = nlmsg_data(cb->nlh);
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002345 struct tcf_proto *tp, *tp_prev;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002346 struct tcf_dump_args arg;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002347
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002348 for (tp = __tcf_get_next_proto(chain, NULL);
2349 tp;
2350 tp_prev = tp,
2351 tp = __tcf_get_next_proto(chain, tp),
Vlad Buslov12db03b2019-02-11 10:55:45 +02002352 tcf_proto_put(tp_prev, true, NULL),
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002353 (*p_index)++) {
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002354 if (*p_index < index_start)
2355 continue;
2356 if (TC_H_MAJ(tcm->tcm_info) &&
2357 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2358 continue;
2359 if (TC_H_MIN(tcm->tcm_info) &&
2360 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2361 continue;
2362 if (*p_index > index_start)
2363 memset(&cb->args[1], 0,
2364 sizeof(cb->args) - sizeof(cb->args[0]));
2365 if (cb->args[1] == 0) {
YueHaibing53189182018-07-17 20:58:14 +08002366 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002367 NETLINK_CB(cb->skb).portid,
2368 cb->nlh->nlmsg_seq, NLM_F_MULTI,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002369 RTM_NEWTFILTER, true) <= 0)
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002370 goto errout;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002371 cb->args[1] = 1;
2372 }
2373 if (!tp->ops->walk)
2374 continue;
2375 arg.w.fn = tcf_node_dump;
2376 arg.skb = skb;
2377 arg.cb = cb;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002378 arg.block = block;
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002379 arg.q = q;
2380 arg.parent = parent;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002381 arg.w.stop = 0;
2382 arg.w.skip = cb->args[1] - 1;
2383 arg.w.count = 0;
Vlad Buslov01683a12018-07-09 13:29:11 +03002384 arg.w.cookie = cb->args[2];
Vlad Buslov12db03b2019-02-11 10:55:45 +02002385 tp->ops->walk(tp, &arg.w, true);
Vlad Buslov01683a12018-07-09 13:29:11 +03002386 cb->args[2] = arg.w.cookie;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002387 cb->args[1] = arg.w.count + 1;
2388 if (arg.w.stop)
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002389 goto errout;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002390 }
Jiri Pirko5bc17012017-05-17 11:08:01 +02002391 return true;
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002392
2393errout:
Vlad Buslov12db03b2019-02-11 10:55:45 +02002394 tcf_proto_put(tp, true, NULL);
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002395 return false;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002396}
2397
Eric Dumazetbd27a872009-11-05 20:57:26 -08002398/* called with RTNL */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2400{
Vlad Buslovbbf73832019-02-11 10:55:36 +02002401 struct tcf_chain *chain, *chain_prev;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002402 struct net *net = sock_net(skb->sk);
Jiri Pirko5bc17012017-05-17 11:08:01 +02002403 struct nlattr *tca[TCA_MAX + 1];
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002404 struct Qdisc *q = NULL;
Jiri Pirko6529eab2017-05-17 11:07:55 +02002405 struct tcf_block *block;
David S. Miller942b8162012-06-26 21:48:50 -07002406 struct tcmsg *tcm = nlmsg_data(cb->nlh);
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002407 long index_start;
2408 long index;
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002409 u32 parent;
Jiri Pirko5bc17012017-05-17 11:08:01 +02002410 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411
Hong zhi guo573ce262013-03-27 06:47:04 +00002412 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 return skb->len;
Jiri Pirko5bc17012017-05-17 11:08:01 +02002414
Johannes Berg8cb08172019-04-26 14:07:28 +02002415 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2416 NULL, cb->extack);
Jiri Pirko5bc17012017-05-17 11:08:01 +02002417 if (err)
2418 return err;
2419
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002420 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
Vlad Buslov787ce6d2018-09-24 19:22:58 +03002421 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002422 if (!block)
WANG Cong143976c2017-08-24 16:51:29 -07002423 goto out;
Jiri Pirkod680b352018-01-18 16:14:49 +01002424 /* If we work with block index, q is NULL and parent value
2425 * will never be used in the following code. The check
2426 * in tcf_fill_node prevents it. However, compiler does not
2427 * see that far, so set parent to zero to silence the warning
2428 * about parent being uninitialized.
2429 */
2430 parent = 0;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002431 } else {
2432 const struct Qdisc_class_ops *cops;
2433 struct net_device *dev;
2434 unsigned long cl = 0;
2435
2436 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2437 if (!dev)
2438 return skb->len;
2439
2440 parent = tcm->tcm_parent;
2441 if (!parent) {
2442 q = dev->qdisc;
2443 parent = q->handle;
2444 } else {
2445 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2446 }
2447 if (!q)
2448 goto out;
2449 cops = q->ops->cl_ops;
2450 if (!cops)
2451 goto out;
2452 if (!cops->tcf_block)
2453 goto out;
2454 if (TC_H_MIN(tcm->tcm_parent)) {
2455 cl = cops->find(q, tcm->tcm_parent);
2456 if (cl == 0)
2457 goto out;
2458 }
2459 block = cops->tcf_block(q, cl, NULL);
2460 if (!block)
2461 goto out;
2462 if (tcf_block_shared(block))
2463 q = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002466 index_start = cb->args[0];
2467 index = 0;
Jiri Pirko5bc17012017-05-17 11:08:01 +02002468
Vlad Buslovbbf73832019-02-11 10:55:36 +02002469 for (chain = __tcf_get_next_chain(block, NULL);
2470 chain;
2471 chain_prev = chain,
2472 chain = __tcf_get_next_chain(block, chain),
2473 tcf_chain_put(chain_prev)) {
Jiri Pirko5bc17012017-05-17 11:08:01 +02002474 if (tca[TCA_CHAIN] &&
2475 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2476 continue;
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002477 if (!tcf_chain_dump(chain, q, parent, skb, cb,
Roman Kapl5ae437a2018-02-19 21:32:51 +01002478 index_start, &index)) {
Vlad Buslovbbf73832019-02-11 10:55:36 +02002479 tcf_chain_put(chain);
Roman Kapl5ae437a2018-02-19 21:32:51 +01002480 err = -EMSGSIZE;
Jiri Pirko5bc17012017-05-17 11:08:01 +02002481 break;
Roman Kapl5ae437a2018-02-19 21:32:51 +01002482 }
Jiri Pirko5bc17012017-05-17 11:08:01 +02002483 }
2484
Vlad Buslov787ce6d2018-09-24 19:22:58 +03002485 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
Vlad Buslov12db03b2019-02-11 10:55:45 +02002486 tcf_block_refcnt_put(block, true);
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002487 cb->args[0] = index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489out:
Roman Kapl5ae437a2018-02-19 21:32:51 +01002490 /* If we did no progress, the error (EMSGSIZE) is real */
2491 if (skb->len == 0 && err)
2492 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 return skb->len;
2494}
2495
Vlad Buslova5654822019-02-11 10:55:37 +02002496static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2497 void *tmplt_priv, u32 chain_index,
2498 struct net *net, struct sk_buff *skb,
2499 struct tcf_block *block,
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002500 u32 portid, u32 seq, u16 flags, int event)
2501{
2502 unsigned char *b = skb_tail_pointer(skb);
Jiri Pirko9f407f12018-07-23 09:23:07 +02002503 const struct tcf_proto_ops *ops;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002504 struct nlmsghdr *nlh;
2505 struct tcmsg *tcm;
Jiri Pirko9f407f12018-07-23 09:23:07 +02002506 void *priv;
2507
Vlad Buslova5654822019-02-11 10:55:37 +02002508 ops = tmplt_ops;
2509 priv = tmplt_priv;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002510
2511 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2512 if (!nlh)
2513 goto out_nlmsg_trim;
2514 tcm = nlmsg_data(nlh);
2515 tcm->tcm_family = AF_UNSPEC;
2516 tcm->tcm__pad1 = 0;
2517 tcm->tcm__pad2 = 0;
2518 tcm->tcm_handle = 0;
2519 if (block->q) {
2520 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2521 tcm->tcm_parent = block->q->handle;
2522 } else {
2523 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2524 tcm->tcm_block_index = block->index;
2525 }
2526
Vlad Buslova5654822019-02-11 10:55:37 +02002527 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002528 goto nla_put_failure;
2529
Jiri Pirko9f407f12018-07-23 09:23:07 +02002530 if (ops) {
2531 if (nla_put_string(skb, TCA_KIND, ops->kind))
2532 goto nla_put_failure;
2533 if (ops->tmplt_dump(skb, net, priv) < 0)
2534 goto nla_put_failure;
2535 }
2536
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002537 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2538 return skb->len;
2539
2540out_nlmsg_trim:
2541nla_put_failure:
2542 nlmsg_trim(skb, b);
2543 return -EMSGSIZE;
2544}
2545
2546static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2547 u32 seq, u16 flags, int event, bool unicast)
2548{
2549 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2550 struct tcf_block *block = chain->block;
2551 struct net *net = block->net;
2552 struct sk_buff *skb;
Zhike Wang5b5f99b2019-03-11 03:15:54 -07002553 int err = 0;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002554
2555 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2556 if (!skb)
2557 return -ENOBUFS;
2558
Vlad Buslova5654822019-02-11 10:55:37 +02002559 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2560 chain->index, net, skb, block, portid,
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002561 seq, flags, event) <= 0) {
2562 kfree_skb(skb);
2563 return -EINVAL;
2564 }
2565
2566 if (unicast)
Zhike Wang5b5f99b2019-03-11 03:15:54 -07002567 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2568 else
2569 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2570 flags & NLM_F_ECHO);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002571
Zhike Wang5b5f99b2019-03-11 03:15:54 -07002572 if (err > 0)
2573 err = 0;
2574 return err;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002575}
2576
Vlad Buslova5654822019-02-11 10:55:37 +02002577static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2578 void *tmplt_priv, u32 chain_index,
2579 struct tcf_block *block, struct sk_buff *oskb,
2580 u32 seq, u16 flags, bool unicast)
2581{
2582 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2583 struct net *net = block->net;
2584 struct sk_buff *skb;
2585
2586 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2587 if (!skb)
2588 return -ENOBUFS;
2589
2590 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2591 block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2592 kfree_skb(skb);
2593 return -EINVAL;
2594 }
2595
2596 if (unicast)
2597 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2598
2599 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2600}
2601
Jiri Pirko9f407f12018-07-23 09:23:07 +02002602static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2603 struct nlattr **tca,
2604 struct netlink_ext_ack *extack)
2605{
2606 const struct tcf_proto_ops *ops;
2607 void *tmplt_priv;
2608
2609 /* If kind is not set, user did not specify template. */
2610 if (!tca[TCA_KIND])
2611 return 0;
2612
Vlad Buslov12db03b2019-02-11 10:55:45 +02002613 ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), true, extack);
Jiri Pirko9f407f12018-07-23 09:23:07 +02002614 if (IS_ERR(ops))
2615 return PTR_ERR(ops);
2616 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2617 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2618 return -EOPNOTSUPP;
2619 }
2620
2621 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2622 if (IS_ERR(tmplt_priv)) {
2623 module_put(ops->owner);
2624 return PTR_ERR(tmplt_priv);
2625 }
2626 chain->tmplt_ops = ops;
2627 chain->tmplt_priv = tmplt_priv;
2628 return 0;
2629}
2630
Vlad Buslova5654822019-02-11 10:55:37 +02002631static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2632 void *tmplt_priv)
Jiri Pirko9f407f12018-07-23 09:23:07 +02002633{
Jiri Pirko9f407f12018-07-23 09:23:07 +02002634 /* If template ops are set, no work to do for us. */
Vlad Buslova5654822019-02-11 10:55:37 +02002635 if (!tmplt_ops)
Jiri Pirko9f407f12018-07-23 09:23:07 +02002636 return;
2637
Vlad Buslova5654822019-02-11 10:55:37 +02002638 tmplt_ops->tmplt_destroy(tmplt_priv);
2639 module_put(tmplt_ops->owner);
Jiri Pirko9f407f12018-07-23 09:23:07 +02002640}
2641
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002642/* Add/delete/get a chain */
2643
2644static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2645 struct netlink_ext_ack *extack)
2646{
2647 struct net *net = sock_net(skb->sk);
2648 struct nlattr *tca[TCA_MAX + 1];
2649 struct tcmsg *t;
2650 u32 parent;
2651 u32 chain_index;
2652 struct Qdisc *q = NULL;
2653 struct tcf_chain *chain = NULL;
2654 struct tcf_block *block;
2655 unsigned long cl;
2656 int err;
2657
2658 if (n->nlmsg_type != RTM_GETCHAIN &&
2659 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2660 return -EPERM;
2661
2662replay:
Johannes Berg8cb08172019-04-26 14:07:28 +02002663 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2664 rtm_tca_policy, extack);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002665 if (err < 0)
2666 return err;
2667
2668 t = nlmsg_data(n);
2669 parent = t->tcm_parent;
2670 cl = 0;
2671
2672 block = tcf_block_find(net, &q, &parent, &cl,
2673 t->tcm_ifindex, t->tcm_block_index, extack);
2674 if (IS_ERR(block))
2675 return PTR_ERR(block);
2676
2677 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2678 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2679 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002680 err = -EINVAL;
2681 goto errout_block;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002682 }
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002683
2684 mutex_lock(&block->lock);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002685 chain = tcf_chain_lookup(block, chain_index);
2686 if (n->nlmsg_type == RTM_NEWCHAIN) {
2687 if (chain) {
Jiri Pirko3d32f4c2018-08-01 12:36:55 +02002688 if (tcf_chain_held_by_acts_only(chain)) {
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002689 /* The chain exists only because there is
Jiri Pirko3d32f4c2018-08-01 12:36:55 +02002690 * some action referencing it.
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002691 */
2692 tcf_chain_hold(chain);
2693 } else {
2694 NL_SET_ERR_MSG(extack, "Filter chain already exists");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002695 err = -EEXIST;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002696 goto errout_block_locked;
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002697 }
2698 } else {
2699 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2700 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002701 err = -ENOENT;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002702 goto errout_block_locked;
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002703 }
2704 chain = tcf_chain_create(block, chain_index);
2705 if (!chain) {
2706 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002707 err = -ENOMEM;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002708 goto errout_block_locked;
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002709 }
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002710 }
2711 } else {
Jiri Pirko3d32f4c2018-08-01 12:36:55 +02002712 if (!chain || tcf_chain_held_by_acts_only(chain)) {
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002713 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002714 err = -EINVAL;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002715 goto errout_block_locked;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002716 }
2717 tcf_chain_hold(chain);
2718 }
2719
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002720 if (n->nlmsg_type == RTM_NEWCHAIN) {
2721 /* Modifying chain requires holding parent block lock. In case
2722 * the chain was successfully added, take a reference to the
2723 * chain. This ensures that an empty chain does not disappear at
2724 * the end of this function.
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002725 */
2726 tcf_chain_hold(chain);
2727 chain->explicitly_created = true;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002728 }
2729 mutex_unlock(&block->lock);
2730
2731 switch (n->nlmsg_type) {
2732 case RTM_NEWCHAIN:
2733 err = tc_chain_tmplt_add(chain, net, tca, extack);
2734 if (err) {
2735 tcf_chain_put_explicitly_created(chain);
2736 goto errout;
2737 }
2738
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002739 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2740 RTM_NEWCHAIN, false);
2741 break;
2742 case RTM_DELCHAIN:
Cong Wangf5b9bac2018-09-11 14:22:23 -07002743 tfilter_notify_chain(net, skb, block, q, parent, n,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002744 chain, RTM_DELTFILTER, true);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002745 /* Flush the chain first as the user requested chain removal. */
Vlad Buslov12db03b2019-02-11 10:55:45 +02002746 tcf_chain_flush(chain, true);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002747 /* In case the chain was successfully deleted, put a reference
2748 * to the chain previously taken during addition.
2749 */
2750 tcf_chain_put_explicitly_created(chain);
2751 break;
2752 case RTM_GETCHAIN:
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002753 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2754 n->nlmsg_seq, n->nlmsg_type, true);
2755 if (err < 0)
2756 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2757 break;
2758 default:
2759 err = -EOPNOTSUPP;
2760 NL_SET_ERR_MSG(extack, "Unsupported message type");
2761 goto errout;
2762 }
2763
2764errout:
2765 tcf_chain_put(chain);
Vlad Buslove368fdb2018-09-24 19:22:53 +03002766errout_block:
Vlad Buslov12db03b2019-02-11 10:55:45 +02002767 tcf_block_release(q, block, true);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002768 if (err == -EAGAIN)
2769 /* Replay the request. */
2770 goto replay;
2771 return err;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002772
2773errout_block_locked:
2774 mutex_unlock(&block->lock);
2775 goto errout_block;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002776}
2777
2778/* called with RTNL */
2779static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2780{
2781 struct net *net = sock_net(skb->sk);
2782 struct nlattr *tca[TCA_MAX + 1];
2783 struct Qdisc *q = NULL;
2784 struct tcf_block *block;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002785 struct tcmsg *tcm = nlmsg_data(cb->nlh);
Vlad Buslovace4a262019-02-25 17:45:44 +02002786 struct tcf_chain *chain;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002787 long index_start;
2788 long index;
2789 u32 parent;
2790 int err;
2791
2792 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2793 return skb->len;
2794
Johannes Berg8cb08172019-04-26 14:07:28 +02002795 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2796 rtm_tca_policy, cb->extack);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002797 if (err)
2798 return err;
2799
2800 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
Vlad Buslov787ce6d2018-09-24 19:22:58 +03002801 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002802 if (!block)
2803 goto out;
2804 /* If we work with block index, q is NULL and parent value
2805 * will never be used in the following code. The check
2806 * in tcf_fill_node prevents it. However, compiler does not
2807 * see that far, so set parent to zero to silence the warning
2808 * about parent being uninitialized.
2809 */
2810 parent = 0;
2811 } else {
2812 const struct Qdisc_class_ops *cops;
2813 struct net_device *dev;
2814 unsigned long cl = 0;
2815
2816 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2817 if (!dev)
2818 return skb->len;
2819
2820 parent = tcm->tcm_parent;
2821 if (!parent) {
2822 q = dev->qdisc;
2823 parent = q->handle;
2824 } else {
2825 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2826 }
2827 if (!q)
2828 goto out;
2829 cops = q->ops->cl_ops;
2830 if (!cops)
2831 goto out;
2832 if (!cops->tcf_block)
2833 goto out;
2834 if (TC_H_MIN(tcm->tcm_parent)) {
2835 cl = cops->find(q, tcm->tcm_parent);
2836 if (cl == 0)
2837 goto out;
2838 }
2839 block = cops->tcf_block(q, cl, NULL);
2840 if (!block)
2841 goto out;
2842 if (tcf_block_shared(block))
2843 q = NULL;
2844 }
2845
2846 index_start = cb->args[0];
2847 index = 0;
2848
Vlad Buslovace4a262019-02-25 17:45:44 +02002849 mutex_lock(&block->lock);
2850 list_for_each_entry(chain, &block->chain_list, list) {
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002851 if ((tca[TCA_CHAIN] &&
2852 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2853 continue;
2854 if (index < index_start) {
2855 index++;
2856 continue;
2857 }
Vlad Buslovace4a262019-02-25 17:45:44 +02002858 if (tcf_chain_held_by_acts_only(chain))
2859 continue;
Vlad Buslova5654822019-02-11 10:55:37 +02002860 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2861 chain->index, net, skb, block,
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002862 NETLINK_CB(cb->skb).portid,
2863 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2864 RTM_NEWCHAIN);
Vlad Buslovace4a262019-02-25 17:45:44 +02002865 if (err <= 0)
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002866 break;
2867 index++;
2868 }
Vlad Buslovace4a262019-02-25 17:45:44 +02002869 mutex_unlock(&block->lock);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002870
Vlad Buslov787ce6d2018-09-24 19:22:58 +03002871 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
Vlad Buslov12db03b2019-02-11 10:55:45 +02002872 tcf_block_refcnt_put(block, true);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002873 cb->args[0] = index;
2874
2875out:
2876 /* If we did no progress, the error (EMSGSIZE) is real */
2877 if (skb->len == 0 && err)
2878 return err;
2879 return skb->len;
2880}
2881
WANG Cong18d02642014-09-25 10:26:37 -07002882void tcf_exts_destroy(struct tcf_exts *exts)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883{
2884#ifdef CONFIG_NET_CLS_ACT
Vlad Buslov90b73b72018-07-05 17:24:33 +03002885 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
WANG Cong22dc13c2016-08-13 22:35:00 -07002886 kfree(exts->actions);
2887 exts->nr_actions = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888#endif
2889}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002890EXPORT_SYMBOL(tcf_exts_destroy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891
Benjamin LaHaisec1b52732013-01-14 05:15:39 +00002892int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
Alexander Aring50a56192018-01-18 11:20:52 -05002893 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
Vlad Buslovec6743a2019-02-11 10:55:43 +02002894 bool rtnl_held, struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896#ifdef CONFIG_NET_CLS_ACT
2897 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 struct tc_action *act;
Roman Mashakd04e6992018-03-08 16:59:17 -05002899 size_t attr_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900
WANG Cong5da57f42013-12-15 20:15:07 -08002901 if (exts->police && tb[exts->police]) {
Jiri Pirko9fb9f252017-05-17 11:08:02 +02002902 act = tcf_action_init_1(net, tp, tb[exts->police],
2903 rate_tlv, "police", ovr,
Vlad Buslovec6743a2019-02-11 10:55:43 +02002904 TCA_ACT_BIND, rtnl_held,
2905 extack);
Patrick McHardyab27cfb2008-01-23 20:33:13 -08002906 if (IS_ERR(act))
2907 return PTR_ERR(act);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908
WANG Cong33be6272013-12-15 20:15:05 -08002909 act->type = exts->type = TCA_OLD_COMPAT;
WANG Cong22dc13c2016-08-13 22:35:00 -07002910 exts->actions[0] = act;
2911 exts->nr_actions = 1;
WANG Cong5da57f42013-12-15 20:15:07 -08002912 } else if (exts->action && tb[exts->action]) {
Vlad Buslov90b73b72018-07-05 17:24:33 +03002913 int err;
WANG Cong22dc13c2016-08-13 22:35:00 -07002914
Jiri Pirko9fb9f252017-05-17 11:08:02 +02002915 err = tcf_action_init(net, tp, tb[exts->action],
2916 rate_tlv, NULL, ovr, TCA_ACT_BIND,
Vlad Buslovec6743a2019-02-11 10:55:43 +02002917 exts->actions, &attr_size,
2918 rtnl_held, extack);
Vlad Buslov90b73b72018-07-05 17:24:33 +03002919 if (err < 0)
WANG Cong33be6272013-12-15 20:15:05 -08002920 return err;
Vlad Buslov90b73b72018-07-05 17:24:33 +03002921 exts->nr_actions = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 }
2923 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924#else
WANG Cong5da57f42013-12-15 20:15:07 -08002925 if ((exts->action && tb[exts->action]) ||
Alexander Aring50a56192018-01-18 11:20:52 -05002926 (exts->police && tb[exts->police])) {
2927 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928 return -EOPNOTSUPP;
Alexander Aring50a56192018-01-18 11:20:52 -05002929 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930#endif
2931
2932 return 0;
2933}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002934EXPORT_SYMBOL(tcf_exts_validate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935
Jiri Pirko9b0d4442017-08-04 14:29:15 +02002936void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937{
2938#ifdef CONFIG_NET_CLS_ACT
WANG Cong22dc13c2016-08-13 22:35:00 -07002939 struct tcf_exts old = *dst;
2940
Jiri Pirko9b0d4442017-08-04 14:29:15 +02002941 *dst = *src;
WANG Cong22dc13c2016-08-13 22:35:00 -07002942 tcf_exts_destroy(&old);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943#endif
2944}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002945EXPORT_SYMBOL(tcf_exts_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946
WANG Cong22dc13c2016-08-13 22:35:00 -07002947#ifdef CONFIG_NET_CLS_ACT
2948static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
2949{
2950 if (exts->nr_actions == 0)
2951 return NULL;
2952 else
2953 return exts->actions[0];
2954}
2955#endif
WANG Cong33be6272013-12-15 20:15:05 -08002956
WANG Cong5da57f42013-12-15 20:15:07 -08002957int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958{
2959#ifdef CONFIG_NET_CLS_ACT
Cong Wang9cc63db2014-07-16 14:25:30 -07002960 struct nlattr *nest;
2961
Jiri Pirko978dfd82017-08-04 14:29:03 +02002962 if (exts->action && tcf_exts_has_actions(exts)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 /*
2964 * again for backward compatible mode - we want
2965 * to work with both old and new modes of entering
2966 * tc data even if iproute2 was newer - jhs
2967 */
WANG Cong33be6272013-12-15 20:15:05 -08002968 if (exts->type != TCA_OLD_COMPAT) {
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002969 nest = nla_nest_start_noflag(skb, exts->action);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08002970 if (nest == NULL)
2971 goto nla_put_failure;
WANG Cong22dc13c2016-08-13 22:35:00 -07002972
Vlad Buslov90b73b72018-07-05 17:24:33 +03002973 if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -08002974 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08002975 nla_nest_end(skb, nest);
WANG Cong5da57f42013-12-15 20:15:07 -08002976 } else if (exts->police) {
WANG Cong33be6272013-12-15 20:15:05 -08002977 struct tc_action *act = tcf_exts_first_act(exts);
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002978 nest = nla_nest_start_noflag(skb, exts->police);
Jamal Hadi Salim63acd682013-12-23 08:02:12 -05002979 if (nest == NULL || !act)
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08002980 goto nla_put_failure;
WANG Cong33be6272013-12-15 20:15:05 -08002981 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -08002982 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08002983 nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984 }
2985 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 return 0;
Cong Wang9cc63db2014-07-16 14:25:30 -07002987
2988nla_put_failure:
2989 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 return -1;
Cong Wang9cc63db2014-07-16 14:25:30 -07002991#else
2992 return 0;
2993#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002995EXPORT_SYMBOL(tcf_exts_dump);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002997
WANG Cong5da57f42013-12-15 20:15:07 -08002998int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999{
3000#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -08003001 struct tc_action *a = tcf_exts_first_act(exts);
Ignacy Gawędzkib057df22015-02-03 19:05:18 +01003002 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
WANG Cong33be6272013-12-15 20:15:05 -08003003 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004#endif
3005 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08003007EXPORT_SYMBOL(tcf_exts_dump_stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008
Vlad Buslov40119212019-08-26 16:44:59 +03003009static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3010{
3011 if (*flags & TCA_CLS_FLAGS_IN_HW)
3012 return;
3013 *flags |= TCA_CLS_FLAGS_IN_HW;
3014 atomic_inc(&block->offloadcnt);
3015}
3016
3017static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3018{
3019 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3020 return;
3021 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3022 atomic_dec(&block->offloadcnt);
3023}
3024
3025static void tc_cls_offload_cnt_update(struct tcf_block *block,
3026 struct tcf_proto *tp, u32 *cnt,
3027 u32 *flags, u32 diff, bool add)
3028{
3029 lockdep_assert_held(&block->cb_lock);
3030
3031 spin_lock(&tp->lock);
3032 if (add) {
3033 if (!*cnt)
3034 tcf_block_offload_inc(block, flags);
3035 *cnt += diff;
3036 } else {
3037 *cnt -= diff;
3038 if (!*cnt)
3039 tcf_block_offload_dec(block, flags);
3040 }
3041 spin_unlock(&tp->lock);
3042}
3043
3044static void
3045tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3046 u32 *cnt, u32 *flags)
3047{
3048 lockdep_assert_held(&block->cb_lock);
3049
3050 spin_lock(&tp->lock);
3051 tcf_block_offload_dec(block, flags);
3052 *cnt = 0;
3053 spin_unlock(&tp->lock);
3054}
3055
3056static int
3057__tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3058 void *type_data, bool err_stop)
Jiri Pirko717503b2017-10-11 09:41:09 +02003059{
Pablo Neira Ayuso955bcb62019-07-09 22:55:46 +02003060 struct flow_block_cb *block_cb;
Cong Wangaeb3fec2018-12-11 11:15:46 -08003061 int ok_count = 0;
3062 int err;
3063
Vlad Buslov40119212019-08-26 16:44:59 +03003064 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3065 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3066 if (err) {
3067 if (err_stop)
3068 return err;
3069 } else {
3070 ok_count++;
3071 }
3072 }
3073 return ok_count;
3074}
3075
3076int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3077 void *type_data, bool err_stop, bool rtnl_held)
3078{
3079 int ok_count;
3080
3081 down_read(&block->cb_lock);
3082 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3083 up_read(&block->cb_lock);
3084 return ok_count;
3085}
3086EXPORT_SYMBOL(tc_setup_cb_call);
3087
3088/* Non-destructive filter add. If filter that wasn't already in hardware is
3089 * successfully offloaded, increment block offloads counter. On failure,
3090 * previously offloaded filter is considered to be intact and offloads counter
3091 * is not decremented.
3092 */
3093
3094int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3095 enum tc_setup_type type, void *type_data, bool err_stop,
3096 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3097{
3098 int ok_count;
3099
Vlad Buslov4f8116c2019-08-26 16:44:57 +03003100 down_read(&block->cb_lock);
Cong Wangaeb3fec2018-12-11 11:15:46 -08003101 /* Make sure all netdevs sharing this block are offload-capable. */
Vlad Buslov4f8116c2019-08-26 16:44:57 +03003102 if (block->nooffloaddevcnt && err_stop) {
3103 ok_count = -EOPNOTSUPP;
3104 goto err_unlock;
3105 }
Cong Wangaeb3fec2018-12-11 11:15:46 -08003106
Vlad Buslov40119212019-08-26 16:44:59 +03003107 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
Vlad Buslova449a3e2019-08-26 16:45:00 +03003108 if (ok_count < 0)
3109 goto err_unlock;
3110
3111 if (tp->ops->hw_add)
3112 tp->ops->hw_add(tp, type_data);
Vlad Buslov40119212019-08-26 16:44:59 +03003113 if (ok_count > 0)
3114 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3115 ok_count, true);
Vlad Buslov4f8116c2019-08-26 16:44:57 +03003116err_unlock:
3117 up_read(&block->cb_lock);
Vlad Buslov40119212019-08-26 16:44:59 +03003118 return ok_count < 0 ? ok_count : 0;
Jiri Pirko717503b2017-10-11 09:41:09 +02003119}
Vlad Buslov40119212019-08-26 16:44:59 +03003120EXPORT_SYMBOL(tc_setup_cb_add);
3121
3122/* Destructive filter replace. If filter that wasn't already in hardware is
3123 * successfully offloaded, increment block offload counter. On failure,
3124 * previously offloaded filter is considered to be destroyed and offload counter
3125 * is decremented.
3126 */
3127
3128int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3129 enum tc_setup_type type, void *type_data, bool err_stop,
3130 u32 *old_flags, unsigned int *old_in_hw_count,
3131 u32 *new_flags, unsigned int *new_in_hw_count,
3132 bool rtnl_held)
3133{
3134 int ok_count;
3135
3136 down_read(&block->cb_lock);
3137 /* Make sure all netdevs sharing this block are offload-capable. */
3138 if (block->nooffloaddevcnt && err_stop) {
3139 ok_count = -EOPNOTSUPP;
3140 goto err_unlock;
3141 }
3142
3143 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
Vlad Buslova449a3e2019-08-26 16:45:00 +03003144 if (tp->ops->hw_del)
3145 tp->ops->hw_del(tp, type_data);
Vlad Buslov40119212019-08-26 16:44:59 +03003146
3147 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
Vlad Buslova449a3e2019-08-26 16:45:00 +03003148 if (ok_count < 0)
3149 goto err_unlock;
3150
3151 if (tp->ops->hw_add)
3152 tp->ops->hw_add(tp, type_data);
Vlad Buslov40119212019-08-26 16:44:59 +03003153 if (ok_count > 0)
Vlad Buslova449a3e2019-08-26 16:45:00 +03003154 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3155 new_flags, ok_count, true);
Vlad Buslov40119212019-08-26 16:44:59 +03003156err_unlock:
3157 up_read(&block->cb_lock);
3158 return ok_count < 0 ? ok_count : 0;
3159}
3160EXPORT_SYMBOL(tc_setup_cb_replace);
3161
3162/* Destroy filter and decrement block offload counter, if filter was previously
3163 * offloaded.
3164 */
3165
3166int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3167 enum tc_setup_type type, void *type_data, bool err_stop,
3168 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3169{
3170 int ok_count;
3171
3172 down_read(&block->cb_lock);
3173 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3174
3175 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
Vlad Buslova449a3e2019-08-26 16:45:00 +03003176 if (tp->ops->hw_del)
3177 tp->ops->hw_del(tp, type_data);
3178
Vlad Buslov40119212019-08-26 16:44:59 +03003179 up_read(&block->cb_lock);
3180 return ok_count < 0 ? ok_count : 0;
3181}
3182EXPORT_SYMBOL(tc_setup_cb_destroy);
3183
3184int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3185 bool add, flow_setup_cb_t *cb,
3186 enum tc_setup_type type, void *type_data,
3187 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3188{
3189 int err = cb(type, type_data, cb_priv);
3190
3191 if (err) {
3192 if (add && tc_skip_sw(*flags))
3193 return err;
3194 } else {
3195 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3196 add);
3197 }
3198
3199 return 0;
3200}
3201EXPORT_SYMBOL(tc_setup_cb_reoffload);
Jiri Pirkob3f55bd2017-10-11 09:41:08 +02003202
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003203int tc_setup_flow_action(struct flow_action *flow_action,
3204 const struct tcf_exts *exts)
3205{
3206 const struct tc_action *act;
3207 int i, j, k;
3208
3209 if (!exts)
3210 return 0;
3211
3212 j = 0;
3213 tcf_exts_for_each_action(i, act, exts) {
3214 struct flow_action_entry *entry;
3215
3216 entry = &flow_action->entries[j];
3217 if (is_tcf_gact_ok(act)) {
3218 entry->id = FLOW_ACTION_ACCEPT;
3219 } else if (is_tcf_gact_shot(act)) {
3220 entry->id = FLOW_ACTION_DROP;
3221 } else if (is_tcf_gact_trap(act)) {
3222 entry->id = FLOW_ACTION_TRAP;
3223 } else if (is_tcf_gact_goto_chain(act)) {
3224 entry->id = FLOW_ACTION_GOTO;
3225 entry->chain_index = tcf_gact_goto_chain_index(act);
3226 } else if (is_tcf_mirred_egress_redirect(act)) {
3227 entry->id = FLOW_ACTION_REDIRECT;
3228 entry->dev = tcf_mirred_dev(act);
3229 } else if (is_tcf_mirred_egress_mirror(act)) {
3230 entry->id = FLOW_ACTION_MIRRED;
3231 entry->dev = tcf_mirred_dev(act);
John Hurley48e584a2019-08-04 16:09:06 +01003232 } else if (is_tcf_mirred_ingress_redirect(act)) {
3233 entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3234 entry->dev = tcf_mirred_dev(act);
3235 } else if (is_tcf_mirred_ingress_mirror(act)) {
3236 entry->id = FLOW_ACTION_MIRRED_INGRESS;
3237 entry->dev = tcf_mirred_dev(act);
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003238 } else if (is_tcf_vlan(act)) {
3239 switch (tcf_vlan_action(act)) {
3240 case TCA_VLAN_ACT_PUSH:
3241 entry->id = FLOW_ACTION_VLAN_PUSH;
3242 entry->vlan.vid = tcf_vlan_push_vid(act);
3243 entry->vlan.proto = tcf_vlan_push_proto(act);
3244 entry->vlan.prio = tcf_vlan_push_prio(act);
3245 break;
3246 case TCA_VLAN_ACT_POP:
3247 entry->id = FLOW_ACTION_VLAN_POP;
3248 break;
3249 case TCA_VLAN_ACT_MODIFY:
3250 entry->id = FLOW_ACTION_VLAN_MANGLE;
3251 entry->vlan.vid = tcf_vlan_push_vid(act);
3252 entry->vlan.proto = tcf_vlan_push_proto(act);
3253 entry->vlan.prio = tcf_vlan_push_prio(act);
3254 break;
3255 default:
3256 goto err_out;
3257 }
3258 } else if (is_tcf_tunnel_set(act)) {
3259 entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3260 entry->tunnel = tcf_tunnel_info(act);
3261 } else if (is_tcf_tunnel_release(act)) {
3262 entry->id = FLOW_ACTION_TUNNEL_DECAP;
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003263 } else if (is_tcf_pedit(act)) {
3264 for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3265 switch (tcf_pedit_cmd(act, k)) {
3266 case TCA_PEDIT_KEY_EX_CMD_SET:
3267 entry->id = FLOW_ACTION_MANGLE;
3268 break;
3269 case TCA_PEDIT_KEY_EX_CMD_ADD:
3270 entry->id = FLOW_ACTION_ADD;
3271 break;
3272 default:
3273 goto err_out;
3274 }
3275 entry->mangle.htype = tcf_pedit_htype(act, k);
3276 entry->mangle.mask = tcf_pedit_mask(act, k);
3277 entry->mangle.val = tcf_pedit_val(act, k);
3278 entry->mangle.offset = tcf_pedit_offset(act, k);
3279 entry = &flow_action->entries[++j];
3280 }
3281 } else if (is_tcf_csum(act)) {
3282 entry->id = FLOW_ACTION_CSUM;
3283 entry->csum_flags = tcf_csum_update_flags(act);
3284 } else if (is_tcf_skbedit_mark(act)) {
3285 entry->id = FLOW_ACTION_MARK;
3286 entry->mark = tcf_skbedit_mark(act);
Pieter Jansen van Vuurena7a7be62019-05-04 04:46:16 -07003287 } else if (is_tcf_sample(act)) {
3288 entry->id = FLOW_ACTION_SAMPLE;
3289 entry->sample.psample_group =
3290 tcf_sample_psample_group(act);
3291 entry->sample.trunc_size = tcf_sample_trunc_size(act);
3292 entry->sample.truncate = tcf_sample_truncate(act);
3293 entry->sample.rate = tcf_sample_rate(act);
Pieter Jansen van Vuuren8c8cfc62019-05-04 04:46:22 -07003294 } else if (is_tcf_police(act)) {
3295 entry->id = FLOW_ACTION_POLICE;
3296 entry->police.burst = tcf_police_tcfp_burst(act);
3297 entry->police.rate_bytes_ps =
3298 tcf_police_rate_bytes_ps(act);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03003299 } else if (is_tcf_ct(act)) {
3300 entry->id = FLOW_ACTION_CT;
3301 entry->ct.action = tcf_ct_action(act);
3302 entry->ct.zone = tcf_ct_zone(act);
John Hurley6749d5902019-07-23 15:33:59 +01003303 } else if (is_tcf_mpls(act)) {
3304 switch (tcf_mpls_action(act)) {
3305 case TCA_MPLS_ACT_PUSH:
3306 entry->id = FLOW_ACTION_MPLS_PUSH;
3307 entry->mpls_push.proto = tcf_mpls_proto(act);
3308 entry->mpls_push.label = tcf_mpls_label(act);
3309 entry->mpls_push.tc = tcf_mpls_tc(act);
3310 entry->mpls_push.bos = tcf_mpls_bos(act);
3311 entry->mpls_push.ttl = tcf_mpls_ttl(act);
3312 break;
3313 case TCA_MPLS_ACT_POP:
3314 entry->id = FLOW_ACTION_MPLS_POP;
3315 entry->mpls_pop.proto = tcf_mpls_proto(act);
3316 break;
3317 case TCA_MPLS_ACT_MODIFY:
3318 entry->id = FLOW_ACTION_MPLS_MANGLE;
3319 entry->mpls_mangle.label = tcf_mpls_label(act);
3320 entry->mpls_mangle.tc = tcf_mpls_tc(act);
3321 entry->mpls_mangle.bos = tcf_mpls_bos(act);
3322 entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3323 break;
3324 default:
3325 goto err_out;
3326 }
John Hurleyfb1b7752019-08-04 16:09:04 +01003327 } else if (is_tcf_skbedit_ptype(act)) {
3328 entry->id = FLOW_ACTION_PTYPE;
3329 entry->ptype = tcf_skbedit_ptype(act);
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003330 } else {
3331 goto err_out;
3332 }
3333
3334 if (!is_tcf_pedit(act))
3335 j++;
3336 }
3337 return 0;
3338err_out:
3339 return -EOPNOTSUPP;
3340}
3341EXPORT_SYMBOL(tc_setup_flow_action);
3342
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +01003343unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3344{
3345 unsigned int num_acts = 0;
3346 struct tc_action *act;
3347 int i;
3348
3349 tcf_exts_for_each_action(i, act, exts) {
3350 if (is_tcf_pedit(act))
3351 num_acts += tcf_pedit_nkeys(act);
3352 else
3353 num_acts++;
3354 }
3355 return num_acts;
3356}
3357EXPORT_SYMBOL(tcf_exts_num_actions);
3358
Jiri Pirko48617382018-01-17 11:46:46 +01003359static __net_init int tcf_net_init(struct net *net)
3360{
3361 struct tcf_net *tn = net_generic(net, tcf_net_id);
3362
Vlad Buslovab281622018-09-24 19:22:56 +03003363 spin_lock_init(&tn->idr_lock);
Jiri Pirko48617382018-01-17 11:46:46 +01003364 idr_init(&tn->idr);
3365 return 0;
3366}
3367
3368static void __net_exit tcf_net_exit(struct net *net)
3369{
3370 struct tcf_net *tn = net_generic(net, tcf_net_id);
3371
3372 idr_destroy(&tn->idr);
3373}
3374
3375static struct pernet_operations tcf_net_ops = {
3376 .init = tcf_net_init,
3377 .exit = tcf_net_exit,
3378 .id = &tcf_net_id,
3379 .size = sizeof(struct tcf_net),
3380};
3381
wenxu1150ab02019-08-07 09:13:53 +08003382static struct flow_indr_block_ing_entry block_ing_entry = {
3383 .cb = tc_indr_block_get_and_ing_cmd,
3384 .list = LIST_HEAD_INIT(block_ing_entry.list),
3385};
3386
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387static int __init tc_filter_init(void)
3388{
Jiri Pirko48617382018-01-17 11:46:46 +01003389 int err;
3390
Cong Wang7aa00452017-10-26 18:24:28 -07003391 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3392 if (!tc_filter_wq)
3393 return -ENOMEM;
3394
Jiri Pirko48617382018-01-17 11:46:46 +01003395 err = register_pernet_subsys(&tcf_net_ops);
3396 if (err)
3397 goto err_register_pernet_subsys;
3398
wenxu1150ab02019-08-07 09:13:53 +08003399 flow_indr_add_block_ing_cb(&block_ing_entry);
3400
Vlad Buslov470502d2019-02-11 10:55:48 +02003401 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3402 RTNL_FLAG_DOIT_UNLOCKED);
3403 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3404 RTNL_FLAG_DOIT_UNLOCKED);
Vlad Buslovc431f892018-05-31 09:52:53 +03003405 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
Vlad Buslov470502d2019-02-11 10:55:48 +02003406 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02003407 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3408 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3409 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3410 tc_dump_chain, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412 return 0;
Jiri Pirko48617382018-01-17 11:46:46 +01003413
3414err_register_pernet_subsys:
3415 destroy_workqueue(tc_filter_wq);
3416 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417}
3418
3419subsys_initcall(tc_filter_init);