blob: 32577c2489687bd9f19aa919f5926b5158eabbd3 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * net/sched/cls_api.c Packet classifier API.
4 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Changes:
8 *
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/errno.h>
Jiri Pirko33a48922017-02-09 14:38:57 +010017#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/init.h>
20#include <linux/kmod.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Jiri Pirko48617382018-01-17 11:46:46 +010022#include <linux/idr.h>
John Hurley7f76fa32018-11-09 21:21:26 -080023#include <linux/rhashtable.h>
Denis V. Lunevb8542722007-12-01 00:21:31 +110024#include <net/net_namespace.h>
25#include <net/sock.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070026#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <net/pkt_sched.h>
28#include <net/pkt_cls.h>
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +010029#include <net/tc_act/tc_pedit.h>
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +010030#include <net/tc_act/tc_mirred.h>
31#include <net/tc_act/tc_vlan.h>
32#include <net/tc_act/tc_tunnel_key.h>
33#include <net/tc_act/tc_csum.h>
34#include <net/tc_act/tc_gact.h>
Pieter Jansen van Vuuren8c8cfc62019-05-04 04:46:22 -070035#include <net/tc_act/tc_police.h>
Pieter Jansen van Vuurena7a7be62019-05-04 04:46:16 -070036#include <net/tc_act/tc_sample.h>
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +010037#include <net/tc_act/tc_skbedit.h>
Paul Blakeyb57dc7c2019-07-09 10:30:48 +030038#include <net/tc_act/tc_ct.h>
John Hurley6749d5902019-07-23 15:33:59 +010039#include <net/tc_act/tc_mpls.h>
wenxu4e481902019-08-07 09:13:52 +080040#include <net/flow_offload.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Davide Carattie3314732018-10-10 22:00:58 +020042extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* The list of all installed classifier types */
WANG Cong36272872013-12-15 20:15:11 -080045static LIST_HEAD(tcf_proto_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47/* Protects list of registered TC modules. It is pure SMP lock. */
48static DEFINE_RWLOCK(cls_mod_lock);
49
50/* Find classifier type by string name */
51
Jiri Pirkof34e8bf2018-07-23 09:23:04 +020052static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
Linus Torvalds1da177e2005-04-16 15:20:36 -070053{
Eric Dumazetdcd76082013-12-20 10:04:18 -080054 const struct tcf_proto_ops *t, *res = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56 if (kind) {
57 read_lock(&cls_mod_lock);
WANG Cong36272872013-12-15 20:15:11 -080058 list_for_each_entry(t, &tcf_proto_base, head) {
Jiri Pirko33a48922017-02-09 14:38:57 +010059 if (strcmp(kind, t->kind) == 0) {
Eric Dumazetdcd76082013-12-20 10:04:18 -080060 if (try_module_get(t->owner))
61 res = t;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 break;
63 }
64 }
65 read_unlock(&cls_mod_lock);
66 }
Eric Dumazetdcd76082013-12-20 10:04:18 -080067 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068}
69
Jiri Pirkof34e8bf2018-07-23 09:23:04 +020070static const struct tcf_proto_ops *
Vlad Buslov12db03b2019-02-11 10:55:45 +020071tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
72 struct netlink_ext_ack *extack)
Jiri Pirkof34e8bf2018-07-23 09:23:04 +020073{
74 const struct tcf_proto_ops *ops;
75
76 ops = __tcf_proto_lookup_ops(kind);
77 if (ops)
78 return ops;
79#ifdef CONFIG_MODULES
Vlad Buslov12db03b2019-02-11 10:55:45 +020080 if (rtnl_held)
81 rtnl_unlock();
Jiri Pirkof34e8bf2018-07-23 09:23:04 +020082 request_module("cls_%s", kind);
Vlad Buslov12db03b2019-02-11 10:55:45 +020083 if (rtnl_held)
84 rtnl_lock();
Jiri Pirkof34e8bf2018-07-23 09:23:04 +020085 ops = __tcf_proto_lookup_ops(kind);
86 /* We dropped the RTNL semaphore in order to perform
87 * the module load. So, even if we succeeded in loading
88 * the module we have to replay the request. We indicate
89 * this using -EAGAIN.
90 */
91 if (ops) {
92 module_put(ops->owner);
93 return ERR_PTR(-EAGAIN);
94 }
95#endif
96 NL_SET_ERR_MSG(extack, "TC classifier not found");
97 return ERR_PTR(-ENOENT);
98}
99
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100/* Register(unregister) new classifier type */
101
102int register_tcf_proto_ops(struct tcf_proto_ops *ops)
103{
WANG Cong36272872013-12-15 20:15:11 -0800104 struct tcf_proto_ops *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 int rc = -EEXIST;
106
107 write_lock(&cls_mod_lock);
WANG Cong36272872013-12-15 20:15:11 -0800108 list_for_each_entry(t, &tcf_proto_base, head)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 if (!strcmp(ops->kind, t->kind))
110 goto out;
111
WANG Cong36272872013-12-15 20:15:11 -0800112 list_add_tail(&ops->head, &tcf_proto_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 rc = 0;
114out:
115 write_unlock(&cls_mod_lock);
116 return rc;
117}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800118EXPORT_SYMBOL(register_tcf_proto_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Cong Wang7aa00452017-10-26 18:24:28 -0700120static struct workqueue_struct *tc_filter_wq;
121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
123{
WANG Cong36272872013-12-15 20:15:11 -0800124 struct tcf_proto_ops *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 int rc = -ENOENT;
126
Daniel Borkmannc78e1742015-05-20 17:13:33 +0200127 /* Wait for outstanding call_rcu()s, if any, from a
128 * tcf_proto_ops's destroy() handler.
129 */
130 rcu_barrier();
Cong Wang7aa00452017-10-26 18:24:28 -0700131 flush_workqueue(tc_filter_wq);
Daniel Borkmannc78e1742015-05-20 17:13:33 +0200132
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 write_lock(&cls_mod_lock);
Eric Dumazetdcd76082013-12-20 10:04:18 -0800134 list_for_each_entry(t, &tcf_proto_base, head) {
135 if (t == ops) {
136 list_del(&t->head);
137 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 break;
Eric Dumazetdcd76082013-12-20 10:04:18 -0800139 }
140 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 write_unlock(&cls_mod_lock);
142 return rc;
143}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800144EXPORT_SYMBOL(unregister_tcf_proto_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Cong Wangaaa908f2018-05-23 15:26:53 -0700146bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
Cong Wang7aa00452017-10-26 18:24:28 -0700147{
Cong Wangaaa908f2018-05-23 15:26:53 -0700148 INIT_RCU_WORK(rwork, func);
149 return queue_rcu_work(tc_filter_wq, rwork);
Cong Wang7aa00452017-10-26 18:24:28 -0700150}
151EXPORT_SYMBOL(tcf_queue_work);
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153/* Select new prio value from the range, managed by kernel. */
154
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800155static inline u32 tcf_auto_prio(struct tcf_proto *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800157 u32 first = TC_H_MAKE(0xC0000000U, 0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
159 if (tp)
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000160 first = tp->prio - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Jiri Pirko79619732017-05-17 11:07:58 +0200162 return TC_H_MAJ(first);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163}
164
Vlad Buslov470502d2019-02-11 10:55:48 +0200165static bool tcf_proto_is_unlocked(const char *kind)
166{
167 const struct tcf_proto_ops *ops;
168 bool ret;
169
170 ops = tcf_proto_lookup_ops(kind, false, NULL);
171 /* On error return false to take rtnl lock. Proto lookup/create
172 * functions will perform lookup again and properly handle errors.
173 */
174 if (IS_ERR(ops))
175 return false;
176
177 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
178 module_put(ops->owner);
179 return ret;
180}
181
Jiri Pirko33a48922017-02-09 14:38:57 +0100182static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
Alexander Aringc35a4ac2018-01-18 11:20:50 -0500183 u32 prio, struct tcf_chain *chain,
Vlad Buslov12db03b2019-02-11 10:55:45 +0200184 bool rtnl_held,
Alexander Aringc35a4ac2018-01-18 11:20:50 -0500185 struct netlink_ext_ack *extack)
Jiri Pirko33a48922017-02-09 14:38:57 +0100186{
187 struct tcf_proto *tp;
188 int err;
189
190 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
191 if (!tp)
192 return ERR_PTR(-ENOBUFS);
193
Vlad Buslov12db03b2019-02-11 10:55:45 +0200194 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
Jiri Pirkof34e8bf2018-07-23 09:23:04 +0200195 if (IS_ERR(tp->ops)) {
196 err = PTR_ERR(tp->ops);
Jiri Pirkod68d75f2018-05-11 17:45:32 +0200197 goto errout;
Jiri Pirko33a48922017-02-09 14:38:57 +0100198 }
199 tp->classify = tp->ops->classify;
200 tp->protocol = protocol;
201 tp->prio = prio;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200202 tp->chain = chain;
Vlad Buslov8b646782019-02-11 10:55:41 +0200203 spin_lock_init(&tp->lock);
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200204 refcount_set(&tp->refcnt, 1);
Jiri Pirko33a48922017-02-09 14:38:57 +0100205
206 err = tp->ops->init(tp);
207 if (err) {
208 module_put(tp->ops->owner);
209 goto errout;
210 }
211 return tp;
212
213errout:
214 kfree(tp);
215 return ERR_PTR(err);
216}
217
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200218static void tcf_proto_get(struct tcf_proto *tp)
219{
220 refcount_inc(&tp->refcnt);
221}
222
223static void tcf_chain_put(struct tcf_chain *chain);
224
Vlad Buslov12db03b2019-02-11 10:55:45 +0200225static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
Jakub Kicinski715df5e2018-01-24 12:54:13 -0800226 struct netlink_ext_ack *extack)
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100227{
Vlad Buslov12db03b2019-02-11 10:55:45 +0200228 tp->ops->destroy(tp, rtnl_held, extack);
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200229 tcf_chain_put(tp->chain);
WANG Cong763dbf62017-04-19 14:21:21 -0700230 module_put(tp->ops->owner);
231 kfree_rcu(tp, rcu);
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100232}
233
Vlad Buslov12db03b2019-02-11 10:55:45 +0200234static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200235 struct netlink_ext_ack *extack)
236{
237 if (refcount_dec_and_test(&tp->refcnt))
Vlad Buslov12db03b2019-02-11 10:55:45 +0200238 tcf_proto_destroy(tp, rtnl_held, extack);
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200239}
240
Vlad Buslov268a3512019-02-26 17:34:40 +0200241static int walker_check_empty(struct tcf_proto *tp, void *fh,
Vlad Buslov6676d5e2019-02-25 17:38:31 +0200242 struct tcf_walker *arg)
Vlad Buslov8b646782019-02-11 10:55:41 +0200243{
Vlad Buslov268a3512019-02-26 17:34:40 +0200244 if (fh) {
Vlad Buslov6676d5e2019-02-25 17:38:31 +0200245 arg->nonempty = true;
246 return -1;
247 }
248 return 0;
Vlad Buslov8b646782019-02-11 10:55:41 +0200249}
250
Vlad Buslov12db03b2019-02-11 10:55:45 +0200251static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held)
Vlad Buslov8b646782019-02-11 10:55:41 +0200252{
Vlad Buslov6676d5e2019-02-25 17:38:31 +0200253 struct tcf_walker walker = { .fn = walker_check_empty, };
Vlad Buslov8b646782019-02-11 10:55:41 +0200254
255 if (tp->ops->walk) {
Vlad Buslov12db03b2019-02-11 10:55:45 +0200256 tp->ops->walk(tp, &walker, rtnl_held);
Vlad Buslov6676d5e2019-02-25 17:38:31 +0200257 return !walker.nonempty;
Vlad Buslov8b646782019-02-11 10:55:41 +0200258 }
259 return true;
260}
261
Vlad Buslov12db03b2019-02-11 10:55:45 +0200262static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held)
Vlad Buslov8b646782019-02-11 10:55:41 +0200263{
264 spin_lock(&tp->lock);
Vlad Buslov12db03b2019-02-11 10:55:45 +0200265 if (tcf_proto_is_empty(tp, rtnl_held))
Vlad Buslov8b646782019-02-11 10:55:41 +0200266 tp->deleting = true;
267 spin_unlock(&tp->lock);
268 return tp->deleting;
269}
270
271static void tcf_proto_mark_delete(struct tcf_proto *tp)
272{
273 spin_lock(&tp->lock);
274 tp->deleting = true;
275 spin_unlock(&tp->lock);
276}
277
278static bool tcf_proto_is_deleting(struct tcf_proto *tp)
279{
280 bool deleting;
281
282 spin_lock(&tp->lock);
283 deleting = tp->deleting;
284 spin_unlock(&tp->lock);
285
286 return deleting;
287}
288
Vlad Buslovc266f642019-02-11 10:55:32 +0200289#define ASSERT_BLOCK_LOCKED(block) \
290 lockdep_assert_held(&(block)->lock)
291
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100292struct tcf_filter_chain_list_item {
293 struct list_head list;
294 tcf_chain_head_change_t *chain_head_change;
295 void *chain_head_change_priv;
296};
297
Jiri Pirko5bc17012017-05-17 11:08:01 +0200298static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
299 u32 chain_index)
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200300{
Jiri Pirko5bc17012017-05-17 11:08:01 +0200301 struct tcf_chain *chain;
302
Vlad Buslovc266f642019-02-11 10:55:32 +0200303 ASSERT_BLOCK_LOCKED(block);
304
Jiri Pirko5bc17012017-05-17 11:08:01 +0200305 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
306 if (!chain)
307 return NULL;
308 list_add_tail(&chain->list, &block->chain_list);
Vlad Busloved76f5e2019-02-11 10:55:38 +0200309 mutex_init(&chain->filter_chain_lock);
Jiri Pirko5bc17012017-05-17 11:08:01 +0200310 chain->block = block;
311 chain->index = chain_index;
Cong Wange2ef7542017-09-11 16:33:31 -0700312 chain->refcnt = 1;
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200313 if (!chain->index)
314 block->chain0.chain = chain;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200315 return chain;
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200316}
317
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100318static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
319 struct tcf_proto *tp_head)
320{
321 if (item->chain_head_change)
322 item->chain_head_change(tp_head, item->chain_head_change_priv);
323}
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200324
325static void tcf_chain0_head_change(struct tcf_chain *chain,
326 struct tcf_proto *tp_head)
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +0100327{
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100328 struct tcf_filter_chain_list_item *item;
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200329 struct tcf_block *block = chain->block;
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100330
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200331 if (chain->index)
332 return;
Vlad Buslov165f0132019-02-11 10:55:35 +0200333
334 mutex_lock(&block->lock);
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200335 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100336 tcf_chain_head_change_item(item, tp_head);
Vlad Buslov165f0132019-02-11 10:55:35 +0200337 mutex_unlock(&block->lock);
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +0100338}
339
Vlad Buslovc266f642019-02-11 10:55:32 +0200340/* Returns true if block can be safely freed. */
341
342static bool tcf_chain_detach(struct tcf_chain *chain)
Jiri Pirkof93e1cd2017-05-20 15:01:32 +0200343{
Cong Wangefbf7892017-12-04 10:48:18 -0800344 struct tcf_block *block = chain->block;
345
Vlad Buslovc266f642019-02-11 10:55:32 +0200346 ASSERT_BLOCK_LOCKED(block);
347
Cong Wange2ef7542017-09-11 16:33:31 -0700348 list_del(&chain->list);
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200349 if (!chain->index)
350 block->chain0.chain = NULL;
Vlad Buslovc266f642019-02-11 10:55:32 +0200351
352 if (list_empty(&block->chain_list) &&
353 refcount_read(&block->refcnt) == 0)
354 return true;
355
356 return false;
357}
358
359static void tcf_block_destroy(struct tcf_block *block)
360{
361 mutex_destroy(&block->lock);
362 kfree_rcu(block, rcu);
363}
364
365static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
366{
367 struct tcf_block *block = chain->block;
368
Vlad Busloved76f5e2019-02-11 10:55:38 +0200369 mutex_destroy(&chain->filter_chain_lock);
Davide Carattiee3bbfe2019-03-20 15:00:16 +0100370 kfree_rcu(chain, rcu);
Vlad Buslovc266f642019-02-11 10:55:32 +0200371 if (free_block)
372 tcf_block_destroy(block);
Cong Wange2ef7542017-09-11 16:33:31 -0700373}
Jiri Pirko744a4cf2017-08-22 22:46:49 +0200374
Cong Wange2ef7542017-09-11 16:33:31 -0700375static void tcf_chain_hold(struct tcf_chain *chain)
376{
Vlad Buslovc266f642019-02-11 10:55:32 +0200377 ASSERT_BLOCK_LOCKED(chain->block);
378
Cong Wange2ef7542017-09-11 16:33:31 -0700379 ++chain->refcnt;
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200380}
381
Jiri Pirko3d32f4c2018-08-01 12:36:55 +0200382static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200383{
Vlad Buslovc266f642019-02-11 10:55:32 +0200384 ASSERT_BLOCK_LOCKED(chain->block);
385
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200386 /* In case all the references are action references, this
Jiri Pirko3d32f4c2018-08-01 12:36:55 +0200387 * chain should not be shown to the user.
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200388 */
389 return chain->refcnt == chain->action_refcnt;
390}
391
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200392static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
393 u32 chain_index)
Jiri Pirko5bc17012017-05-17 11:08:01 +0200394{
395 struct tcf_chain *chain;
396
Vlad Buslovc266f642019-02-11 10:55:32 +0200397 ASSERT_BLOCK_LOCKED(block);
398
Jiri Pirko5bc17012017-05-17 11:08:01 +0200399 list_for_each_entry(chain, &block->chain_list, list) {
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200400 if (chain->index == chain_index)
Cong Wange2ef7542017-09-11 16:33:31 -0700401 return chain;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200402 }
403 return NULL;
404}
405
406static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
407 u32 seq, u16 flags, int event, bool unicast);
408
Jiri Pirko53681402018-08-01 12:36:56 +0200409static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
410 u32 chain_index, bool create,
411 bool by_act)
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200412{
Vlad Buslovc266f642019-02-11 10:55:32 +0200413 struct tcf_chain *chain = NULL;
414 bool is_first_reference;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200415
Vlad Buslovc266f642019-02-11 10:55:32 +0200416 mutex_lock(&block->lock);
417 chain = tcf_chain_lookup(block, chain_index);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200418 if (chain) {
419 tcf_chain_hold(chain);
Jiri Pirko53681402018-08-01 12:36:56 +0200420 } else {
421 if (!create)
Vlad Buslovc266f642019-02-11 10:55:32 +0200422 goto errout;
Jiri Pirko53681402018-08-01 12:36:56 +0200423 chain = tcf_chain_create(block, chain_index);
424 if (!chain)
Vlad Buslovc266f642019-02-11 10:55:32 +0200425 goto errout;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200426 }
Jiri Pirko80532382017-09-06 13:14:19 +0200427
Jiri Pirko53681402018-08-01 12:36:56 +0200428 if (by_act)
429 ++chain->action_refcnt;
Vlad Buslovc266f642019-02-11 10:55:32 +0200430 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
431 mutex_unlock(&block->lock);
Jiri Pirko53681402018-08-01 12:36:56 +0200432
433 /* Send notification only in case we got the first
434 * non-action reference. Until then, the chain acts only as
435 * a placeholder for actions pointing to it and user ought
436 * not know about them.
437 */
Vlad Buslovc266f642019-02-11 10:55:32 +0200438 if (is_first_reference && !by_act)
Jiri Pirko53681402018-08-01 12:36:56 +0200439 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
440 RTM_NEWCHAIN, false);
441
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200442 return chain;
Vlad Buslovc266f642019-02-11 10:55:32 +0200443
444errout:
445 mutex_unlock(&block->lock);
446 return chain;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200447}
Jiri Pirko53681402018-08-01 12:36:56 +0200448
Jiri Pirko290b1c82018-08-01 12:36:57 +0200449static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
450 bool create)
Jiri Pirko53681402018-08-01 12:36:56 +0200451{
452 return __tcf_chain_get(block, chain_index, create, false);
453}
Jiri Pirko5bc17012017-05-17 11:08:01 +0200454
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200455struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
456{
Jiri Pirko53681402018-08-01 12:36:56 +0200457 return __tcf_chain_get(block, chain_index, true, true);
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200458}
459EXPORT_SYMBOL(tcf_chain_get_by_act);
460
Vlad Buslova5654822019-02-11 10:55:37 +0200461static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
462 void *tmplt_priv);
463static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
464 void *tmplt_priv, u32 chain_index,
465 struct tcf_block *block, struct sk_buff *oskb,
466 u32 seq, u16 flags, bool unicast);
Jiri Pirko9f407f12018-07-23 09:23:07 +0200467
Vlad Buslov91052fa2019-02-11 10:55:33 +0200468static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
469 bool explicitly_created)
Jiri Pirko5bc17012017-05-17 11:08:01 +0200470{
Vlad Buslovc266f642019-02-11 10:55:32 +0200471 struct tcf_block *block = chain->block;
Vlad Buslova5654822019-02-11 10:55:37 +0200472 const struct tcf_proto_ops *tmplt_ops;
Vlad Buslovb62989f2019-03-06 17:50:43 +0200473 bool free_block = false;
Vlad Buslovc266f642019-02-11 10:55:32 +0200474 unsigned int refcnt;
Vlad Buslova5654822019-02-11 10:55:37 +0200475 void *tmplt_priv;
Vlad Buslovc266f642019-02-11 10:55:32 +0200476
477 mutex_lock(&block->lock);
Vlad Buslov91052fa2019-02-11 10:55:33 +0200478 if (explicitly_created) {
479 if (!chain->explicitly_created) {
480 mutex_unlock(&block->lock);
481 return;
482 }
483 chain->explicitly_created = false;
484 }
485
Jiri Pirko53681402018-08-01 12:36:56 +0200486 if (by_act)
487 chain->action_refcnt--;
Vlad Buslovc266f642019-02-11 10:55:32 +0200488
489 /* tc_chain_notify_delete can't be called while holding block lock.
490 * However, when block is unlocked chain can be changed concurrently, so
491 * save these to temporary variables.
492 */
493 refcnt = --chain->refcnt;
Vlad Buslova5654822019-02-11 10:55:37 +0200494 tmplt_ops = chain->tmplt_ops;
495 tmplt_priv = chain->tmplt_priv;
Jiri Pirko53681402018-08-01 12:36:56 +0200496
497 /* The last dropped non-action reference will trigger notification. */
Vlad Buslovb62989f2019-03-06 17:50:43 +0200498 if (refcnt - chain->action_refcnt == 0 && !by_act) {
499 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
Vlad Buslova5654822019-02-11 10:55:37 +0200500 block, NULL, 0, 0, false);
Vlad Buslov726d06122019-02-11 10:55:42 +0200501 /* Last reference to chain, no need to lock. */
502 chain->flushing = false;
503 }
Jiri Pirko53681402018-08-01 12:36:56 +0200504
Vlad Buslovb62989f2019-03-06 17:50:43 +0200505 if (refcnt == 0)
506 free_block = tcf_chain_detach(chain);
507 mutex_unlock(&block->lock);
508
Vlad Buslovc266f642019-02-11 10:55:32 +0200509 if (refcnt == 0) {
Vlad Buslova5654822019-02-11 10:55:37 +0200510 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
Vlad Buslovc266f642019-02-11 10:55:32 +0200511 tcf_chain_destroy(chain, free_block);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200512 }
Jiri Pirko5bc17012017-05-17 11:08:01 +0200513}
Jiri Pirko53681402018-08-01 12:36:56 +0200514
Jiri Pirko290b1c82018-08-01 12:36:57 +0200515static void tcf_chain_put(struct tcf_chain *chain)
Jiri Pirko53681402018-08-01 12:36:56 +0200516{
Vlad Buslov91052fa2019-02-11 10:55:33 +0200517 __tcf_chain_put(chain, false, false);
Jiri Pirko53681402018-08-01 12:36:56 +0200518}
Jiri Pirko5bc17012017-05-17 11:08:01 +0200519
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200520void tcf_chain_put_by_act(struct tcf_chain *chain)
521{
Vlad Buslov91052fa2019-02-11 10:55:33 +0200522 __tcf_chain_put(chain, true, false);
Jiri Pirko1f3ed382018-07-27 09:45:05 +0200523}
524EXPORT_SYMBOL(tcf_chain_put_by_act);
525
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200526static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
527{
Vlad Buslov91052fa2019-02-11 10:55:33 +0200528 __tcf_chain_put(chain, false, true);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +0200529}
530
Vlad Buslov12db03b2019-02-11 10:55:45 +0200531static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
Jiri Pirko290b1c82018-08-01 12:36:57 +0200532{
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200533 struct tcf_proto *tp, *tp_next;
Jiri Pirko290b1c82018-08-01 12:36:57 +0200534
Vlad Busloved76f5e2019-02-11 10:55:38 +0200535 mutex_lock(&chain->filter_chain_lock);
536 tp = tcf_chain_dereference(chain->filter_chain, chain);
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200537 RCU_INIT_POINTER(chain->filter_chain, NULL);
Jiri Pirko290b1c82018-08-01 12:36:57 +0200538 tcf_chain0_head_change(chain, NULL);
Vlad Buslov726d06122019-02-11 10:55:42 +0200539 chain->flushing = true;
Vlad Busloved76f5e2019-02-11 10:55:38 +0200540 mutex_unlock(&chain->filter_chain_lock);
541
Jiri Pirko290b1c82018-08-01 12:36:57 +0200542 while (tp) {
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200543 tp_next = rcu_dereference_protected(tp->next, 1);
Vlad Buslov12db03b2019-02-11 10:55:45 +0200544 tcf_proto_put(tp, rtnl_held, NULL);
Vlad Buslov4dbfa762019-02-11 10:55:39 +0200545 tp = tp_next;
Jiri Pirko290b1c82018-08-01 12:36:57 +0200546 }
547}
548
wenxu4e481902019-08-07 09:13:52 +0800549static int tcf_block_setup(struct tcf_block *block,
550 struct flow_block_offload *bo);
551
552static void tc_indr_block_ing_cmd(struct net_device *dev,
553 struct tcf_block *block,
554 flow_indr_block_bind_cb_t *cb,
555 void *cb_priv,
556 enum flow_block_command command)
557{
558 struct flow_block_offload bo = {
559 .command = command,
560 .binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
561 .net = dev_net(dev),
562 .block_shared = tcf_block_non_null_shared(block),
563 };
564 INIT_LIST_HEAD(&bo.cb_list);
565
566 if (!block)
567 return;
568
569 bo.block = &block->flow_block;
570
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300571 down_write(&block->cb_lock);
wenxu4e481902019-08-07 09:13:52 +0800572 cb(dev, cb_priv, TC_SETUP_BLOCK, &bo);
573
574 tcf_block_setup(block, &bo);
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300575 up_write(&block->cb_lock);
wenxu4e481902019-08-07 09:13:52 +0800576}
577
John Hurley7f76fa32018-11-09 21:21:26 -0800578static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
579{
580 const struct Qdisc_class_ops *cops;
581 struct Qdisc *qdisc;
582
583 if (!dev_ingress_queue(dev))
584 return NULL;
585
586 qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
587 if (!qdisc)
588 return NULL;
589
590 cops = qdisc->ops->cl_ops;
591 if (!cops)
592 return NULL;
593
594 if (!cops->tcf_block)
595 return NULL;
596
597 return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
598}
599
wenxu4e481902019-08-07 09:13:52 +0800600static void tc_indr_block_get_and_ing_cmd(struct net_device *dev,
601 flow_indr_block_bind_cb_t *cb,
602 void *cb_priv,
603 enum flow_block_command command)
John Hurley7f76fa32018-11-09 21:21:26 -0800604{
wenxu4e481902019-08-07 09:13:52 +0800605 struct tcf_block *block = tc_dev_ingress_block(dev);
606
607 tc_indr_block_ing_cmd(dev, block, cb, cb_priv, command);
John Hurley7f76fa32018-11-09 21:21:26 -0800608}
609
wenxu4e481902019-08-07 09:13:52 +0800610static void tc_indr_block_call(struct tcf_block *block,
611 struct net_device *dev,
John Hurley7f76fa32018-11-09 21:21:26 -0800612 struct tcf_block_ext_info *ei,
Pablo Neira Ayuso9c0e1892019-07-09 22:55:40 +0200613 enum flow_block_command command,
John Hurley7f76fa32018-11-09 21:21:26 -0800614 struct netlink_ext_ack *extack)
615{
Pablo Neira Ayuso955bcb62019-07-09 22:55:46 +0200616 struct flow_block_offload bo = {
John Hurley7f76fa32018-11-09 21:21:26 -0800617 .command = command,
618 .binder_type = ei->binder_type,
Pablo Neira Ayusoda3eeb92019-07-09 22:55:43 +0200619 .net = dev_net(dev),
Pablo Neira Ayuso14bfb132019-07-19 18:20:16 +0200620 .block = &block->flow_block,
Pablo Neira Ayuso955bcb62019-07-09 22:55:46 +0200621 .block_shared = tcf_block_shared(block),
John Hurley7f76fa32018-11-09 21:21:26 -0800622 .extack = extack,
623 };
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +0200624 INIT_LIST_HEAD(&bo.cb_list);
John Hurley7f76fa32018-11-09 21:21:26 -0800625
wenxu1150ab02019-08-07 09:13:53 +0800626 flow_indr_block_call(dev, &bo, command);
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +0200627 tcf_block_setup(block, &bo);
John Hurley7f76fa32018-11-09 21:21:26 -0800628}
629
Jiri Pirkocaa72602018-01-17 11:46:50 +0100630static bool tcf_block_offload_in_use(struct tcf_block *block)
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200631{
Vlad Buslov97394be2019-08-26 16:44:58 +0300632 return atomic_read(&block->offloadcnt);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100633}
634
635static int tcf_block_offload_cmd(struct tcf_block *block,
636 struct net_device *dev,
637 struct tcf_block_ext_info *ei,
Pablo Neira Ayuso9c0e1892019-07-09 22:55:40 +0200638 enum flow_block_command command,
John Hurley60513bd2018-06-25 14:30:04 -0700639 struct netlink_ext_ack *extack)
Jiri Pirkocaa72602018-01-17 11:46:50 +0100640{
Pablo Neira Ayuso955bcb62019-07-09 22:55:46 +0200641 struct flow_block_offload bo = {};
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +0200642 int err;
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200643
Pablo Neira Ayusoda3eeb92019-07-09 22:55:43 +0200644 bo.net = dev_net(dev);
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200645 bo.command = command;
646 bo.binder_type = ei->binder_type;
Pablo Neira Ayuso14bfb132019-07-19 18:20:16 +0200647 bo.block = &block->flow_block;
Pablo Neira Ayuso955bcb62019-07-09 22:55:46 +0200648 bo.block_shared = tcf_block_shared(block);
John Hurley60513bd2018-06-25 14:30:04 -0700649 bo.extack = extack;
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +0200650 INIT_LIST_HEAD(&bo.cb_list);
651
652 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
653 if (err < 0)
654 return err;
655
656 return tcf_block_setup(block, &bo);
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200657}
658
Jiri Pirkocaa72602018-01-17 11:46:50 +0100659static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
John Hurley60513bd2018-06-25 14:30:04 -0700660 struct tcf_block_ext_info *ei,
661 struct netlink_ext_ack *extack)
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200662{
Jiri Pirkocaa72602018-01-17 11:46:50 +0100663 struct net_device *dev = q->dev_queue->dev;
664 int err;
665
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300666 down_write(&block->cb_lock);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100667 if (!dev->netdev_ops->ndo_setup_tc)
668 goto no_offload_dev_inc;
669
670 /* If tc offload feature is disabled and the block we try to bind
671 * to already has some offloaded filters, forbid to bind.
672 */
John Hurley60513bd2018-06-25 14:30:04 -0700673 if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
674 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300675 err = -EOPNOTSUPP;
676 goto err_unlock;
John Hurley60513bd2018-06-25 14:30:04 -0700677 }
Jiri Pirkocaa72602018-01-17 11:46:50 +0100678
Pablo Neira Ayuso9c0e1892019-07-09 22:55:40 +0200679 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100680 if (err == -EOPNOTSUPP)
681 goto no_offload_dev_inc;
John Hurley7f76fa32018-11-09 21:21:26 -0800682 if (err)
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300683 goto err_unlock;
John Hurley7f76fa32018-11-09 21:21:26 -0800684
Pablo Neira Ayuso9c0e1892019-07-09 22:55:40 +0200685 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300686 up_write(&block->cb_lock);
John Hurley7f76fa32018-11-09 21:21:26 -0800687 return 0;
Jiri Pirkocaa72602018-01-17 11:46:50 +0100688
689no_offload_dev_inc:
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300690 if (tcf_block_offload_in_use(block)) {
691 err = -EOPNOTSUPP;
692 goto err_unlock;
693 }
694 err = 0;
Jiri Pirkocaa72602018-01-17 11:46:50 +0100695 block->nooffloaddevcnt++;
Pablo Neira Ayuso9c0e1892019-07-09 22:55:40 +0200696 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300697err_unlock:
698 up_write(&block->cb_lock);
699 return err;
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200700}
701
702static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
703 struct tcf_block_ext_info *ei)
704{
Jiri Pirkocaa72602018-01-17 11:46:50 +0100705 struct net_device *dev = q->dev_queue->dev;
706 int err;
707
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300708 down_write(&block->cb_lock);
Pablo Neira Ayuso9c0e1892019-07-09 22:55:40 +0200709 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
John Hurley7f76fa32018-11-09 21:21:26 -0800710
Jiri Pirkocaa72602018-01-17 11:46:50 +0100711 if (!dev->netdev_ops->ndo_setup_tc)
712 goto no_offload_dev_dec;
Pablo Neira Ayuso9c0e1892019-07-09 22:55:40 +0200713 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100714 if (err == -EOPNOTSUPP)
715 goto no_offload_dev_dec;
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300716 up_write(&block->cb_lock);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100717 return;
718
719no_offload_dev_dec:
720 WARN_ON(block->nooffloaddevcnt-- == 0);
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300721 up_write(&block->cb_lock);
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200722}
723
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100724static int
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200725tcf_chain0_head_change_cb_add(struct tcf_block *block,
726 struct tcf_block_ext_info *ei,
727 struct netlink_ext_ack *extack)
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100728{
729 struct tcf_filter_chain_list_item *item;
Vlad Buslov165f0132019-02-11 10:55:35 +0200730 struct tcf_chain *chain0;
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100731
732 item = kmalloc(sizeof(*item), GFP_KERNEL);
733 if (!item) {
734 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
735 return -ENOMEM;
736 }
737 item->chain_head_change = ei->chain_head_change;
738 item->chain_head_change_priv = ei->chain_head_change_priv;
Vlad Buslov165f0132019-02-11 10:55:35 +0200739
740 mutex_lock(&block->lock);
741 chain0 = block->chain0.chain;
Vlad Busloved76f5e2019-02-11 10:55:38 +0200742 if (chain0)
743 tcf_chain_hold(chain0);
744 else
745 list_add(&item->list, &block->chain0.filter_chain_list);
Vlad Buslov165f0132019-02-11 10:55:35 +0200746 mutex_unlock(&block->lock);
747
Vlad Busloved76f5e2019-02-11 10:55:38 +0200748 if (chain0) {
749 struct tcf_proto *tp_head;
750
751 mutex_lock(&chain0->filter_chain_lock);
752
753 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
754 if (tp_head)
755 tcf_chain_head_change_item(item, tp_head);
756
757 mutex_lock(&block->lock);
758 list_add(&item->list, &block->chain0.filter_chain_list);
759 mutex_unlock(&block->lock);
760
761 mutex_unlock(&chain0->filter_chain_lock);
762 tcf_chain_put(chain0);
763 }
764
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100765 return 0;
766}
767
768static void
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200769tcf_chain0_head_change_cb_del(struct tcf_block *block,
770 struct tcf_block_ext_info *ei)
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100771{
772 struct tcf_filter_chain_list_item *item;
773
Vlad Buslov165f0132019-02-11 10:55:35 +0200774 mutex_lock(&block->lock);
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200775 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100776 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
777 (item->chain_head_change == ei->chain_head_change &&
778 item->chain_head_change_priv == ei->chain_head_change_priv)) {
Vlad Buslov165f0132019-02-11 10:55:35 +0200779 if (block->chain0.chain)
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200780 tcf_chain_head_change_item(item, NULL);
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100781 list_del(&item->list);
Vlad Buslov165f0132019-02-11 10:55:35 +0200782 mutex_unlock(&block->lock);
783
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100784 kfree(item);
785 return;
786 }
787 }
Vlad Buslov165f0132019-02-11 10:55:35 +0200788 mutex_unlock(&block->lock);
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100789 WARN_ON(1);
790}
791
Jiri Pirko48617382018-01-17 11:46:46 +0100792struct tcf_net {
Vlad Buslovab281622018-09-24 19:22:56 +0300793 spinlock_t idr_lock; /* Protects idr */
Jiri Pirko48617382018-01-17 11:46:46 +0100794 struct idr idr;
795};
796
797static unsigned int tcf_net_id;
798
799static int tcf_block_insert(struct tcf_block *block, struct net *net,
Jiri Pirkobb047dd2018-02-13 12:00:16 +0100800 struct netlink_ext_ack *extack)
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100801{
Jiri Pirko48617382018-01-17 11:46:46 +0100802 struct tcf_net *tn = net_generic(net, tcf_net_id);
Vlad Buslovab281622018-09-24 19:22:56 +0300803 int err;
Jiri Pirko48617382018-01-17 11:46:46 +0100804
Vlad Buslovab281622018-09-24 19:22:56 +0300805 idr_preload(GFP_KERNEL);
806 spin_lock(&tn->idr_lock);
807 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
808 GFP_NOWAIT);
809 spin_unlock(&tn->idr_lock);
810 idr_preload_end();
811
812 return err;
Jiri Pirkoa9b19442018-01-17 11:46:45 +0100813}
814
Jiri Pirko48617382018-01-17 11:46:46 +0100815static void tcf_block_remove(struct tcf_block *block, struct net *net)
Jiri Pirko6529eab2017-05-17 11:07:55 +0200816{
Jiri Pirko48617382018-01-17 11:46:46 +0100817 struct tcf_net *tn = net_generic(net, tcf_net_id);
818
Vlad Buslovab281622018-09-24 19:22:56 +0300819 spin_lock(&tn->idr_lock);
Matthew Wilcox9c160942017-11-28 09:48:43 -0500820 idr_remove(&tn->idr, block->index);
Vlad Buslovab281622018-09-24 19:22:56 +0300821 spin_unlock(&tn->idr_lock);
Jiri Pirko48617382018-01-17 11:46:46 +0100822}
823
824static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
Jiri Pirkobb047dd2018-02-13 12:00:16 +0100825 u32 block_index,
Jiri Pirko48617382018-01-17 11:46:46 +0100826 struct netlink_ext_ack *extack)
827{
828 struct tcf_block *block;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200829
Jiri Pirko48617382018-01-17 11:46:46 +0100830 block = kzalloc(sizeof(*block), GFP_KERNEL);
Alexander Aring8d1a77f2017-12-20 12:35:19 -0500831 if (!block) {
832 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
Jiri Pirko48617382018-01-17 11:46:46 +0100833 return ERR_PTR(-ENOMEM);
Alexander Aring8d1a77f2017-12-20 12:35:19 -0500834 }
Vlad Buslovc266f642019-02-11 10:55:32 +0200835 mutex_init(&block->lock);
Vlad Buslov4f8116c2019-08-26 16:44:57 +0300836 init_rwsem(&block->cb_lock);
Pablo Neira Ayuso14bfb132019-07-19 18:20:16 +0200837 flow_block_init(&block->flow_block);
Jiri Pirko5bc17012017-05-17 11:08:01 +0200838 INIT_LIST_HEAD(&block->chain_list);
Jiri Pirkof36fe1c2018-01-17 11:46:48 +0100839 INIT_LIST_HEAD(&block->owner_list);
Jiri Pirkof71e0ca42018-07-23 09:23:05 +0200840 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
Jiri Pirkoacb67442017-10-19 15:50:31 +0200841
Vlad Buslovcfebd7e2018-09-24 19:22:54 +0300842 refcount_set(&block->refcnt, 1);
Jiri Pirko48617382018-01-17 11:46:46 +0100843 block->net = net;
Jiri Pirkobb047dd2018-02-13 12:00:16 +0100844 block->index = block_index;
845
846 /* Don't store q pointer for blocks which are shared */
847 if (!tcf_block_shared(block))
848 block->q = q;
Jiri Pirko48617382018-01-17 11:46:46 +0100849 return block;
Jiri Pirko48617382018-01-17 11:46:46 +0100850}
851
852static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
853{
854 struct tcf_net *tn = net_generic(net, tcf_net_id);
855
Matthew Wilcox322d8842017-11-28 10:01:24 -0500856 return idr_find(&tn->idr, block_index);
Jiri Pirko48617382018-01-17 11:46:46 +0100857}
858
Vlad Buslov0607e432018-09-24 19:22:57 +0300859static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
860{
861 struct tcf_block *block;
862
863 rcu_read_lock();
864 block = tcf_block_lookup(net, block_index);
865 if (block && !refcount_inc_not_zero(&block->refcnt))
866 block = NULL;
867 rcu_read_unlock();
868
869 return block;
870}
871
Vlad Buslovbbf73832019-02-11 10:55:36 +0200872static struct tcf_chain *
873__tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
874{
875 mutex_lock(&block->lock);
876 if (chain)
877 chain = list_is_last(&chain->list, &block->chain_list) ?
878 NULL : list_next_entry(chain, list);
879 else
880 chain = list_first_entry_or_null(&block->chain_list,
881 struct tcf_chain, list);
882
883 /* skip all action-only chains */
884 while (chain && tcf_chain_held_by_acts_only(chain))
885 chain = list_is_last(&chain->list, &block->chain_list) ?
886 NULL : list_next_entry(chain, list);
887
888 if (chain)
889 tcf_chain_hold(chain);
890 mutex_unlock(&block->lock);
891
892 return chain;
893}
894
895/* Function to be used by all clients that want to iterate over all chains on
896 * block. It properly obtains block->lock and takes reference to chain before
897 * returning it. Users of this function must be tolerant to concurrent chain
898 * insertion/deletion or ensure that no concurrent chain modification is
899 * possible. Note that all netlink dump callbacks cannot guarantee to provide
900 * consistent dump because rtnl lock is released each time skb is filled with
901 * data and sent to user-space.
902 */
903
904struct tcf_chain *
905tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
906{
907 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
908
909 if (chain)
910 tcf_chain_put(chain);
911
912 return chain_next;
913}
914EXPORT_SYMBOL(tcf_get_next_chain);
915
Vlad Buslovfe2923a2019-02-11 10:55:40 +0200916static struct tcf_proto *
917__tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
918{
Vlad Buslov8b646782019-02-11 10:55:41 +0200919 u32 prio = 0;
920
Vlad Buslovfe2923a2019-02-11 10:55:40 +0200921 ASSERT_RTNL();
922 mutex_lock(&chain->filter_chain_lock);
923
Vlad Buslov8b646782019-02-11 10:55:41 +0200924 if (!tp) {
Vlad Buslovfe2923a2019-02-11 10:55:40 +0200925 tp = tcf_chain_dereference(chain->filter_chain, chain);
Vlad Buslov8b646782019-02-11 10:55:41 +0200926 } else if (tcf_proto_is_deleting(tp)) {
927 /* 'deleting' flag is set and chain->filter_chain_lock was
928 * unlocked, which means next pointer could be invalid. Restart
929 * search.
930 */
931 prio = tp->prio + 1;
932 tp = tcf_chain_dereference(chain->filter_chain, chain);
933
934 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
935 if (!tp->deleting && tp->prio >= prio)
936 break;
937 } else {
Vlad Buslovfe2923a2019-02-11 10:55:40 +0200938 tp = tcf_chain_dereference(tp->next, chain);
Vlad Buslov8b646782019-02-11 10:55:41 +0200939 }
Vlad Buslovfe2923a2019-02-11 10:55:40 +0200940
941 if (tp)
942 tcf_proto_get(tp);
943
944 mutex_unlock(&chain->filter_chain_lock);
945
946 return tp;
947}
948
949/* Function to be used by all clients that want to iterate over all tp's on
950 * chain. Users of this function must be tolerant to concurrent tp
951 * insertion/deletion or ensure that no concurrent chain modification is
952 * possible. Note that all netlink dump callbacks cannot guarantee to provide
953 * consistent dump because rtnl lock is released each time skb is filled with
954 * data and sent to user-space.
955 */
956
957struct tcf_proto *
Vlad Buslov12db03b2019-02-11 10:55:45 +0200958tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
959 bool rtnl_held)
Vlad Buslovfe2923a2019-02-11 10:55:40 +0200960{
961 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
962
963 if (tp)
Vlad Buslov12db03b2019-02-11 10:55:45 +0200964 tcf_proto_put(tp, rtnl_held, NULL);
Vlad Buslovfe2923a2019-02-11 10:55:40 +0200965
966 return tp_next;
967}
968EXPORT_SYMBOL(tcf_get_next_proto);
969
Vlad Buslov12db03b2019-02-11 10:55:45 +0200970static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
Vlad Buslovf0023432018-09-24 19:22:55 +0300971{
972 struct tcf_chain *chain;
973
Vlad Buslovbbf73832019-02-11 10:55:36 +0200974 /* Last reference to block. At this point chains cannot be added or
975 * removed concurrently.
Vlad Buslovf0023432018-09-24 19:22:55 +0300976 */
Vlad Buslovbbf73832019-02-11 10:55:36 +0200977 for (chain = tcf_get_next_chain(block, NULL);
978 chain;
979 chain = tcf_get_next_chain(block, chain)) {
Vlad Buslovf0023432018-09-24 19:22:55 +0300980 tcf_chain_put_explicitly_created(chain);
Vlad Buslov12db03b2019-02-11 10:55:45 +0200981 tcf_chain_flush(chain, rtnl_held);
Vlad Buslovf0023432018-09-24 19:22:55 +0300982 }
983}
984
Vlad Buslov18d3eef2019-02-11 10:55:47 +0200985/* Lookup Qdisc and increments its reference counter.
986 * Set parent, if necessary.
987 */
988
989static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
990 u32 *parent, int ifindex, bool rtnl_held,
991 struct netlink_ext_ack *extack)
992{
993 const struct Qdisc_class_ops *cops;
994 struct net_device *dev;
995 int err = 0;
996
997 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
998 return 0;
999
1000 rcu_read_lock();
1001
1002 /* Find link */
1003 dev = dev_get_by_index_rcu(net, ifindex);
1004 if (!dev) {
1005 rcu_read_unlock();
1006 return -ENODEV;
1007 }
1008
1009 /* Find qdisc */
1010 if (!*parent) {
1011 *q = dev->qdisc;
1012 *parent = (*q)->handle;
1013 } else {
1014 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1015 if (!*q) {
1016 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1017 err = -EINVAL;
1018 goto errout_rcu;
1019 }
1020 }
1021
1022 *q = qdisc_refcount_inc_nz(*q);
1023 if (!*q) {
1024 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1025 err = -EINVAL;
1026 goto errout_rcu;
1027 }
1028
1029 /* Is it classful? */
1030 cops = (*q)->ops->cl_ops;
1031 if (!cops) {
1032 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1033 err = -EINVAL;
1034 goto errout_qdisc;
1035 }
1036
1037 if (!cops->tcf_block) {
1038 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1039 err = -EOPNOTSUPP;
1040 goto errout_qdisc;
1041 }
1042
1043errout_rcu:
1044 /* At this point we know that qdisc is not noop_qdisc,
1045 * which means that qdisc holds a reference to net_device
1046 * and we hold a reference to qdisc, so it is safe to release
1047 * rcu read lock.
1048 */
1049 rcu_read_unlock();
1050 return err;
1051
1052errout_qdisc:
1053 rcu_read_unlock();
1054
1055 if (rtnl_held)
1056 qdisc_put(*q);
1057 else
1058 qdisc_put_unlocked(*q);
1059 *q = NULL;
1060
1061 return err;
1062}
1063
1064static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1065 int ifindex, struct netlink_ext_ack *extack)
1066{
1067 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1068 return 0;
1069
1070 /* Do we search for filter, attached to class? */
1071 if (TC_H_MIN(parent)) {
1072 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1073
1074 *cl = cops->find(q, parent);
1075 if (*cl == 0) {
1076 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1077 return -ENOENT;
1078 }
1079 }
1080
1081 return 0;
1082}
1083
1084static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1085 unsigned long cl, int ifindex,
1086 u32 block_index,
1087 struct netlink_ext_ack *extack)
1088{
1089 struct tcf_block *block;
1090
1091 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1092 block = tcf_block_refcnt_get(net, block_index);
1093 if (!block) {
1094 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1095 return ERR_PTR(-EINVAL);
1096 }
1097 } else {
1098 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1099
1100 block = cops->tcf_block(q, cl, extack);
1101 if (!block)
1102 return ERR_PTR(-EINVAL);
1103
1104 if (tcf_block_shared(block)) {
1105 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1106 return ERR_PTR(-EOPNOTSUPP);
1107 }
1108
1109 /* Always take reference to block in order to support execution
1110 * of rules update path of cls API without rtnl lock. Caller
1111 * must release block when it is finished using it. 'if' block
1112 * of this conditional obtain reference to block by calling
1113 * tcf_block_refcnt_get().
1114 */
1115 refcount_inc(&block->refcnt);
1116 }
1117
1118 return block;
1119}
1120
Vlad Buslov0607e432018-09-24 19:22:57 +03001121static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001122 struct tcf_block_ext_info *ei, bool rtnl_held)
Vlad Buslov0607e432018-09-24 19:22:57 +03001123{
Vlad Buslovc266f642019-02-11 10:55:32 +02001124 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
Vlad Buslov0607e432018-09-24 19:22:57 +03001125 /* Flushing/putting all chains will cause the block to be
1126 * deallocated when last chain is freed. However, if chain_list
1127 * is empty, block has to be manually deallocated. After block
1128 * reference counter reached 0, it is no longer possible to
1129 * increment it or add new chains to block.
1130 */
1131 bool free_block = list_empty(&block->chain_list);
1132
Vlad Buslovc266f642019-02-11 10:55:32 +02001133 mutex_unlock(&block->lock);
Vlad Buslov0607e432018-09-24 19:22:57 +03001134 if (tcf_block_shared(block))
1135 tcf_block_remove(block, block->net);
Vlad Buslov0607e432018-09-24 19:22:57 +03001136
1137 if (q)
1138 tcf_block_offload_unbind(block, q, ei);
1139
1140 if (free_block)
Vlad Buslovc266f642019-02-11 10:55:32 +02001141 tcf_block_destroy(block);
Vlad Buslov0607e432018-09-24 19:22:57 +03001142 else
Vlad Buslov12db03b2019-02-11 10:55:45 +02001143 tcf_block_flush_all_chains(block, rtnl_held);
Vlad Buslov0607e432018-09-24 19:22:57 +03001144 } else if (q) {
1145 tcf_block_offload_unbind(block, q, ei);
1146 }
1147}
1148
Vlad Buslov12db03b2019-02-11 10:55:45 +02001149static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
Vlad Buslov0607e432018-09-24 19:22:57 +03001150{
Vlad Buslov12db03b2019-02-11 10:55:45 +02001151 __tcf_block_put(block, NULL, NULL, rtnl_held);
Vlad Buslov0607e432018-09-24 19:22:57 +03001152}
1153
Vlad Buslovc431f892018-05-31 09:52:53 +03001154/* Find tcf block.
1155 * Set q, parent, cl when appropriate.
1156 */
1157
1158static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1159 u32 *parent, unsigned long *cl,
1160 int ifindex, u32 block_index,
1161 struct netlink_ext_ack *extack)
1162{
1163 struct tcf_block *block;
Vlad Buslove368fdb2018-09-24 19:22:53 +03001164 int err = 0;
Vlad Buslovc431f892018-05-31 09:52:53 +03001165
Vlad Buslov18d3eef2019-02-11 10:55:47 +02001166 ASSERT_RTNL();
Vlad Buslovc431f892018-05-31 09:52:53 +03001167
Vlad Buslov18d3eef2019-02-11 10:55:47 +02001168 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1169 if (err)
1170 goto errout;
Vlad Buslove368fdb2018-09-24 19:22:53 +03001171
Vlad Buslov18d3eef2019-02-11 10:55:47 +02001172 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1173 if (err)
1174 goto errout_qdisc;
Vlad Buslovc431f892018-05-31 09:52:53 +03001175
Vlad Buslov18d3eef2019-02-11 10:55:47 +02001176 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
Dan Carpenteraf736bf2019-02-18 12:26:32 +03001177 if (IS_ERR(block)) {
1178 err = PTR_ERR(block);
Vlad Buslov18d3eef2019-02-11 10:55:47 +02001179 goto errout_qdisc;
Dan Carpenteraf736bf2019-02-18 12:26:32 +03001180 }
Vlad Buslovc431f892018-05-31 09:52:53 +03001181
1182 return block;
Vlad Buslove368fdb2018-09-24 19:22:53 +03001183
Vlad Buslove368fdb2018-09-24 19:22:53 +03001184errout_qdisc:
Vlad Buslov18d3eef2019-02-11 10:55:47 +02001185 if (*q)
Vlad Buslove368fdb2018-09-24 19:22:53 +03001186 qdisc_put(*q);
Vlad Buslov18d3eef2019-02-11 10:55:47 +02001187errout:
1188 *q = NULL;
Vlad Buslove368fdb2018-09-24 19:22:53 +03001189 return ERR_PTR(err);
1190}
1191
Vlad Buslov12db03b2019-02-11 10:55:45 +02001192static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1193 bool rtnl_held)
Vlad Buslove368fdb2018-09-24 19:22:53 +03001194{
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001195 if (!IS_ERR_OR_NULL(block))
Vlad Buslov12db03b2019-02-11 10:55:45 +02001196 tcf_block_refcnt_put(block, rtnl_held);
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001197
Vlad Buslov470502d2019-02-11 10:55:48 +02001198 if (q) {
1199 if (rtnl_held)
1200 qdisc_put(q);
1201 else
1202 qdisc_put_unlocked(q);
1203 }
Vlad Buslovc431f892018-05-31 09:52:53 +03001204}
1205
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001206struct tcf_block_owner_item {
1207 struct list_head list;
1208 struct Qdisc *q;
Pablo Neira Ayuso32f8c402019-07-09 22:55:41 +02001209 enum flow_block_binder_type binder_type;
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001210};
1211
1212static void
1213tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1214 struct Qdisc *q,
Pablo Neira Ayuso32f8c402019-07-09 22:55:41 +02001215 enum flow_block_binder_type binder_type)
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001216{
1217 if (block->keep_dst &&
Pablo Neira Ayuso32f8c402019-07-09 22:55:41 +02001218 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1219 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001220 netif_keep_dst(qdisc_dev(q));
1221}
1222
1223void tcf_block_netif_keep_dst(struct tcf_block *block)
1224{
1225 struct tcf_block_owner_item *item;
1226
1227 block->keep_dst = true;
1228 list_for_each_entry(item, &block->owner_list, list)
1229 tcf_block_owner_netif_keep_dst(block, item->q,
1230 item->binder_type);
1231}
1232EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1233
1234static int tcf_block_owner_add(struct tcf_block *block,
1235 struct Qdisc *q,
Pablo Neira Ayuso32f8c402019-07-09 22:55:41 +02001236 enum flow_block_binder_type binder_type)
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001237{
1238 struct tcf_block_owner_item *item;
1239
1240 item = kmalloc(sizeof(*item), GFP_KERNEL);
1241 if (!item)
1242 return -ENOMEM;
1243 item->q = q;
1244 item->binder_type = binder_type;
1245 list_add(&item->list, &block->owner_list);
1246 return 0;
1247}
1248
1249static void tcf_block_owner_del(struct tcf_block *block,
1250 struct Qdisc *q,
Pablo Neira Ayuso32f8c402019-07-09 22:55:41 +02001251 enum flow_block_binder_type binder_type)
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001252{
1253 struct tcf_block_owner_item *item;
1254
1255 list_for_each_entry(item, &block->owner_list, list) {
1256 if (item->q == q && item->binder_type == binder_type) {
1257 list_del(&item->list);
1258 kfree(item);
1259 return;
1260 }
1261 }
1262 WARN_ON(1);
1263}
1264
Jiri Pirko48617382018-01-17 11:46:46 +01001265int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1266 struct tcf_block_ext_info *ei,
1267 struct netlink_ext_ack *extack)
1268{
1269 struct net *net = qdisc_net(q);
1270 struct tcf_block *block = NULL;
Jiri Pirko48617382018-01-17 11:46:46 +01001271 int err;
1272
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001273 if (ei->block_index)
Jiri Pirko48617382018-01-17 11:46:46 +01001274 /* block_index not 0 means the shared block is requested */
Vlad Buslov787ce6d2018-09-24 19:22:58 +03001275 block = tcf_block_refcnt_get(net, ei->block_index);
Jiri Pirko48617382018-01-17 11:46:46 +01001276
1277 if (!block) {
Jiri Pirkobb047dd2018-02-13 12:00:16 +01001278 block = tcf_block_create(net, q, ei->block_index, extack);
Jiri Pirko48617382018-01-17 11:46:46 +01001279 if (IS_ERR(block))
1280 return PTR_ERR(block);
Jiri Pirkobb047dd2018-02-13 12:00:16 +01001281 if (tcf_block_shared(block)) {
1282 err = tcf_block_insert(block, net, extack);
Jiri Pirko48617382018-01-17 11:46:46 +01001283 if (err)
1284 goto err_block_insert;
1285 }
1286 }
1287
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001288 err = tcf_block_owner_add(block, q, ei->binder_type);
1289 if (err)
1290 goto err_block_owner_add;
1291
1292 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1293
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001294 err = tcf_chain0_head_change_cb_add(block, ei, extack);
Jiri Pirkoa9b19442018-01-17 11:46:45 +01001295 if (err)
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001296 goto err_chain0_head_change_cb_add;
Jiri Pirkocaa72602018-01-17 11:46:50 +01001297
John Hurley60513bd2018-06-25 14:30:04 -07001298 err = tcf_block_offload_bind(block, q, ei, extack);
Jiri Pirkocaa72602018-01-17 11:46:50 +01001299 if (err)
1300 goto err_block_offload_bind;
1301
Jiri Pirko6529eab2017-05-17 11:07:55 +02001302 *p_block = block;
1303 return 0;
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001304
Jiri Pirkocaa72602018-01-17 11:46:50 +01001305err_block_offload_bind:
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001306 tcf_chain0_head_change_cb_del(block, ei);
1307err_chain0_head_change_cb_add:
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001308 tcf_block_owner_del(block, q, ei->binder_type);
1309err_block_owner_add:
Jiri Pirko48617382018-01-17 11:46:46 +01001310err_block_insert:
Vlad Buslov12db03b2019-02-11 10:55:45 +02001311 tcf_block_refcnt_put(block, true);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001312 return err;
Jiri Pirko6529eab2017-05-17 11:07:55 +02001313}
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001314EXPORT_SYMBOL(tcf_block_get_ext);
1315
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001316static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1317{
1318 struct tcf_proto __rcu **p_filter_chain = priv;
1319
1320 rcu_assign_pointer(*p_filter_chain, tp_head);
1321}
1322
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001323int tcf_block_get(struct tcf_block **p_block,
Alexander Aring8d1a77f2017-12-20 12:35:19 -05001324 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1325 struct netlink_ext_ack *extack)
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001326{
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001327 struct tcf_block_ext_info ei = {
1328 .chain_head_change = tcf_chain_head_change_dflt,
1329 .chain_head_change_priv = p_filter_chain,
1330 };
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001331
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001332 WARN_ON(!p_filter_chain);
Alexander Aring8d1a77f2017-12-20 12:35:19 -05001333 return tcf_block_get_ext(p_block, q, &ei, extack);
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001334}
Jiri Pirko6529eab2017-05-17 11:07:55 +02001335EXPORT_SYMBOL(tcf_block_get);
1336
Cong Wang7aa00452017-10-26 18:24:28 -07001337/* XXX: Standalone actions are not allowed to jump to any chain, and bound
Roman Kapla60b3f52017-11-24 12:27:58 +01001338 * actions should be all removed after flushing.
Cong Wang7aa00452017-10-26 18:24:28 -07001339 */
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001340void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
David S. Millere1ea2f92017-10-30 14:10:01 +09001341 struct tcf_block_ext_info *ei)
Cong Wang7aa00452017-10-26 18:24:28 -07001342{
David S. Millerc30abd52017-12-16 22:11:55 -05001343 if (!block)
1344 return;
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001345 tcf_chain0_head_change_cb_del(block, ei);
Jiri Pirkof36fe1c2018-01-17 11:46:48 +01001346 tcf_block_owner_del(block, q, ei->binder_type);
Roman Kapla60b3f52017-11-24 12:27:58 +01001347
Vlad Buslov12db03b2019-02-11 10:55:45 +02001348 __tcf_block_put(block, q, ei, true);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001349}
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001350EXPORT_SYMBOL(tcf_block_put_ext);
1351
1352void tcf_block_put(struct tcf_block *block)
1353{
1354 struct tcf_block_ext_info ei = {0, };
1355
Jiri Pirko4853f122017-12-21 13:13:59 +01001356 if (!block)
1357 return;
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001358 tcf_block_put_ext(block, block->q, &ei);
Jiri Pirko8c4083b2017-10-19 15:50:29 +02001359}
David S. Millere1ea2f92017-10-30 14:10:01 +09001360
Jiri Pirko6529eab2017-05-17 11:07:55 +02001361EXPORT_SYMBOL(tcf_block_put);
Jiri Pirkocf1facd2017-02-09 14:38:56 +01001362
John Hurley32636742018-06-25 14:30:10 -07001363static int
Pablo Neira Ayusoa7323312019-07-19 18:20:15 +02001364tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
John Hurley32636742018-06-25 14:30:10 -07001365 void *cb_priv, bool add, bool offload_in_use,
1366 struct netlink_ext_ack *extack)
1367{
Vlad Buslovbbf73832019-02-11 10:55:36 +02001368 struct tcf_chain *chain, *chain_prev;
Vlad Buslovfe2923a2019-02-11 10:55:40 +02001369 struct tcf_proto *tp, *tp_prev;
John Hurley32636742018-06-25 14:30:10 -07001370 int err;
1371
Vlad Buslov4f8116c2019-08-26 16:44:57 +03001372 lockdep_assert_held(&block->cb_lock);
1373
Vlad Buslovbbf73832019-02-11 10:55:36 +02001374 for (chain = __tcf_get_next_chain(block, NULL);
1375 chain;
1376 chain_prev = chain,
1377 chain = __tcf_get_next_chain(block, chain),
1378 tcf_chain_put(chain_prev)) {
Vlad Buslovfe2923a2019-02-11 10:55:40 +02001379 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1380 tp_prev = tp,
1381 tp = __tcf_get_next_proto(chain, tp),
Vlad Buslov12db03b2019-02-11 10:55:45 +02001382 tcf_proto_put(tp_prev, true, NULL)) {
John Hurley32636742018-06-25 14:30:10 -07001383 if (tp->ops->reoffload) {
1384 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1385 extack);
1386 if (err && add)
1387 goto err_playback_remove;
1388 } else if (add && offload_in_use) {
1389 err = -EOPNOTSUPP;
1390 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1391 goto err_playback_remove;
1392 }
1393 }
1394 }
1395
1396 return 0;
1397
1398err_playback_remove:
Vlad Buslov12db03b2019-02-11 10:55:45 +02001399 tcf_proto_put(tp, true, NULL);
Vlad Buslovbbf73832019-02-11 10:55:36 +02001400 tcf_chain_put(chain);
John Hurley32636742018-06-25 14:30:10 -07001401 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1402 extack);
1403 return err;
1404}
1405
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +02001406static int tcf_block_bind(struct tcf_block *block,
1407 struct flow_block_offload *bo)
1408{
1409 struct flow_block_cb *block_cb, *next;
1410 int err, i = 0;
1411
Vlad Buslov4f8116c2019-08-26 16:44:57 +03001412 lockdep_assert_held(&block->cb_lock);
1413
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +02001414 list_for_each_entry(block_cb, &bo->cb_list, list) {
1415 err = tcf_block_playback_offloads(block, block_cb->cb,
1416 block_cb->cb_priv, true,
1417 tcf_block_offload_in_use(block),
1418 bo->extack);
1419 if (err)
1420 goto err_unroll;
Vlad Buslovc9f14472019-08-26 16:45:01 +03001421 if (!bo->unlocked_driver_cb)
1422 block->lockeddevcnt++;
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +02001423
1424 i++;
1425 }
Pablo Neira Ayuso14bfb132019-07-19 18:20:16 +02001426 list_splice(&bo->cb_list, &block->flow_block.cb_list);
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +02001427
1428 return 0;
1429
1430err_unroll:
1431 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1432 if (i-- > 0) {
1433 list_del(&block_cb->list);
1434 tcf_block_playback_offloads(block, block_cb->cb,
1435 block_cb->cb_priv, false,
1436 tcf_block_offload_in_use(block),
1437 NULL);
Vlad Buslovc9f14472019-08-26 16:45:01 +03001438 if (!bo->unlocked_driver_cb)
1439 block->lockeddevcnt--;
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +02001440 }
1441 flow_block_cb_free(block_cb);
1442 }
1443
1444 return err;
1445}
1446
1447static void tcf_block_unbind(struct tcf_block *block,
1448 struct flow_block_offload *bo)
1449{
1450 struct flow_block_cb *block_cb, *next;
1451
Vlad Buslov4f8116c2019-08-26 16:44:57 +03001452 lockdep_assert_held(&block->cb_lock);
1453
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +02001454 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1455 tcf_block_playback_offloads(block, block_cb->cb,
1456 block_cb->cb_priv, false,
1457 tcf_block_offload_in_use(block),
1458 NULL);
1459 list_del(&block_cb->list);
1460 flow_block_cb_free(block_cb);
Vlad Buslovc9f14472019-08-26 16:45:01 +03001461 if (!bo->unlocked_driver_cb)
1462 block->lockeddevcnt--;
Pablo Neira Ayuso59094b12019-07-09 22:55:45 +02001463 }
1464}
1465
1466static int tcf_block_setup(struct tcf_block *block,
1467 struct flow_block_offload *bo)
1468{
1469 int err;
1470
1471 switch (bo->command) {
1472 case FLOW_BLOCK_BIND:
1473 err = tcf_block_bind(block, bo);
1474 break;
1475 case FLOW_BLOCK_UNBIND:
1476 err = 0;
1477 tcf_block_unbind(block, bo);
1478 break;
1479 default:
1480 WARN_ON_ONCE(1);
1481 err = -EOPNOTSUPP;
1482 }
1483
1484 return err;
1485}
1486
Jiri Pirko87d83092017-05-17 11:07:54 +02001487/* Main classifier routine: scans classifier chain attached
1488 * to this qdisc, (optionally) tests for protocol and asks
1489 * specific classifiers.
1490 */
1491int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1492 struct tcf_result *res, bool compat_mode)
1493{
Jiri Pirko87d83092017-05-17 11:07:54 +02001494#ifdef CONFIG_NET_CLS_ACT
1495 const int max_reclassify_loop = 4;
Jiri Pirkoee538dc2017-05-23 09:11:59 +02001496 const struct tcf_proto *orig_tp = tp;
1497 const struct tcf_proto *first_tp;
Jiri Pirko87d83092017-05-17 11:07:54 +02001498 int limit = 0;
1499
1500reclassify:
1501#endif
1502 for (; tp; tp = rcu_dereference_bh(tp->next)) {
Cong Wangcd0c4e72019-01-11 18:55:42 -08001503 __be16 protocol = tc_skb_protocol(skb);
Jiri Pirko87d83092017-05-17 11:07:54 +02001504 int err;
1505
1506 if (tp->protocol != protocol &&
1507 tp->protocol != htons(ETH_P_ALL))
1508 continue;
1509
1510 err = tp->classify(skb, tp, res);
1511#ifdef CONFIG_NET_CLS_ACT
Jiri Pirkodb505142017-05-17 11:08:03 +02001512 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
Jiri Pirkoee538dc2017-05-23 09:11:59 +02001513 first_tp = orig_tp;
Jiri Pirko87d83092017-05-17 11:07:54 +02001514 goto reset;
Jiri Pirkodb505142017-05-17 11:08:03 +02001515 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
Jiri Pirkoee538dc2017-05-23 09:11:59 +02001516 first_tp = res->goto_tp;
Paul Blakey95a72332019-09-04 16:56:37 +03001517
1518#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1519 {
1520 struct tc_skb_ext *ext;
1521
1522 ext = skb_ext_add(skb, TC_SKB_EXT);
1523 if (WARN_ON_ONCE(!ext))
1524 return TC_ACT_SHOT;
1525
1526 ext->chain = err & TC_ACT_EXT_VAL_MASK;
1527 }
1528#endif
Jiri Pirkodb505142017-05-17 11:08:03 +02001529 goto reset;
1530 }
Jiri Pirko87d83092017-05-17 11:07:54 +02001531#endif
1532 if (err >= 0)
1533 return err;
1534 }
1535
1536 return TC_ACT_UNSPEC; /* signal: continue lookup */
1537#ifdef CONFIG_NET_CLS_ACT
1538reset:
1539 if (unlikely(limit++ >= max_reclassify_loop)) {
Jiri Pirko9d3aaff2018-01-17 11:46:47 +01001540 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1541 tp->chain->block->index,
1542 tp->prio & 0xffff,
Jiri Pirko87d83092017-05-17 11:07:54 +02001543 ntohs(tp->protocol));
1544 return TC_ACT_SHOT;
1545 }
1546
Jiri Pirkoee538dc2017-05-23 09:11:59 +02001547 tp = first_tp;
Jiri Pirko87d83092017-05-17 11:07:54 +02001548 goto reclassify;
1549#endif
1550}
1551EXPORT_SYMBOL(tcf_classify);
1552
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001553struct tcf_chain_info {
1554 struct tcf_proto __rcu **pprev;
1555 struct tcf_proto __rcu *next;
1556};
1557
Vlad Busloved76f5e2019-02-11 10:55:38 +02001558static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1559 struct tcf_chain_info *chain_info)
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001560{
Vlad Busloved76f5e2019-02-11 10:55:38 +02001561 return tcf_chain_dereference(*chain_info->pprev, chain);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001562}
1563
Vlad Buslov726d06122019-02-11 10:55:42 +02001564static int tcf_chain_tp_insert(struct tcf_chain *chain,
1565 struct tcf_chain_info *chain_info,
1566 struct tcf_proto *tp)
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001567{
Vlad Buslov726d06122019-02-11 10:55:42 +02001568 if (chain->flushing)
1569 return -EAGAIN;
1570
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001571 if (*chain_info->pprev == chain->filter_chain)
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001572 tcf_chain0_head_change(chain, tp);
Vlad Buslov4dbfa762019-02-11 10:55:39 +02001573 tcf_proto_get(tp);
Vlad Busloved76f5e2019-02-11 10:55:38 +02001574 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001575 rcu_assign_pointer(*chain_info->pprev, tp);
Vlad Buslov726d06122019-02-11 10:55:42 +02001576
1577 return 0;
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001578}
1579
1580static void tcf_chain_tp_remove(struct tcf_chain *chain,
1581 struct tcf_chain_info *chain_info,
1582 struct tcf_proto *tp)
1583{
Vlad Busloved76f5e2019-02-11 10:55:38 +02001584 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001585
Vlad Buslov8b646782019-02-11 10:55:41 +02001586 tcf_proto_mark_delete(tp);
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +01001587 if (tp == chain->filter_chain)
Jiri Pirkof71e0ca42018-07-23 09:23:05 +02001588 tcf_chain0_head_change(chain, next);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001589 RCU_INIT_POINTER(*chain_info->pprev, next);
1590}
1591
1592static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1593 struct tcf_chain_info *chain_info,
1594 u32 protocol, u32 prio,
Vlad Buslov8b646782019-02-11 10:55:41 +02001595 bool prio_allocate);
1596
1597/* Try to insert new proto.
1598 * If proto with specified priority already exists, free new proto
1599 * and return existing one.
1600 */
1601
1602static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1603 struct tcf_proto *tp_new,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001604 u32 protocol, u32 prio,
1605 bool rtnl_held)
Vlad Buslov8b646782019-02-11 10:55:41 +02001606{
1607 struct tcf_chain_info chain_info;
1608 struct tcf_proto *tp;
Vlad Buslov726d06122019-02-11 10:55:42 +02001609 int err = 0;
Vlad Buslov8b646782019-02-11 10:55:41 +02001610
1611 mutex_lock(&chain->filter_chain_lock);
1612
1613 tp = tcf_chain_tp_find(chain, &chain_info,
1614 protocol, prio, false);
1615 if (!tp)
Vlad Buslov726d06122019-02-11 10:55:42 +02001616 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
Vlad Buslov8b646782019-02-11 10:55:41 +02001617 mutex_unlock(&chain->filter_chain_lock);
1618
1619 if (tp) {
Vlad Buslov12db03b2019-02-11 10:55:45 +02001620 tcf_proto_destroy(tp_new, rtnl_held, NULL);
Vlad Buslov8b646782019-02-11 10:55:41 +02001621 tp_new = tp;
Vlad Buslov726d06122019-02-11 10:55:42 +02001622 } else if (err) {
Vlad Buslov12db03b2019-02-11 10:55:45 +02001623 tcf_proto_destroy(tp_new, rtnl_held, NULL);
Vlad Buslov726d06122019-02-11 10:55:42 +02001624 tp_new = ERR_PTR(err);
Vlad Buslov8b646782019-02-11 10:55:41 +02001625 }
1626
1627 return tp_new;
1628}
1629
1630static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001631 struct tcf_proto *tp, bool rtnl_held,
Vlad Buslov8b646782019-02-11 10:55:41 +02001632 struct netlink_ext_ack *extack)
1633{
1634 struct tcf_chain_info chain_info;
1635 struct tcf_proto *tp_iter;
1636 struct tcf_proto **pprev;
1637 struct tcf_proto *next;
1638
1639 mutex_lock(&chain->filter_chain_lock);
1640
1641 /* Atomically find and remove tp from chain. */
1642 for (pprev = &chain->filter_chain;
1643 (tp_iter = tcf_chain_dereference(*pprev, chain));
1644 pprev = &tp_iter->next) {
1645 if (tp_iter == tp) {
1646 chain_info.pprev = pprev;
1647 chain_info.next = tp_iter->next;
1648 WARN_ON(tp_iter->deleting);
1649 break;
1650 }
1651 }
1652 /* Verify that tp still exists and no new filters were inserted
1653 * concurrently.
1654 * Mark tp for deletion if it is empty.
1655 */
Vlad Buslov12db03b2019-02-11 10:55:45 +02001656 if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) {
Vlad Buslov8b646782019-02-11 10:55:41 +02001657 mutex_unlock(&chain->filter_chain_lock);
1658 return;
1659 }
1660
1661 next = tcf_chain_dereference(chain_info.next, chain);
1662 if (tp == chain->filter_chain)
1663 tcf_chain0_head_change(chain, next);
1664 RCU_INIT_POINTER(*chain_info.pprev, next);
1665 mutex_unlock(&chain->filter_chain_lock);
1666
Vlad Buslov12db03b2019-02-11 10:55:45 +02001667 tcf_proto_put(tp, rtnl_held, extack);
Vlad Buslov8b646782019-02-11 10:55:41 +02001668}
1669
1670static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1671 struct tcf_chain_info *chain_info,
1672 u32 protocol, u32 prio,
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001673 bool prio_allocate)
1674{
1675 struct tcf_proto **pprev;
1676 struct tcf_proto *tp;
1677
1678 /* Check the chain for existence of proto-tcf with this priority */
1679 for (pprev = &chain->filter_chain;
Vlad Busloved76f5e2019-02-11 10:55:38 +02001680 (tp = tcf_chain_dereference(*pprev, chain));
1681 pprev = &tp->next) {
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001682 if (tp->prio >= prio) {
1683 if (tp->prio == prio) {
1684 if (prio_allocate ||
1685 (tp->protocol != protocol && protocol))
1686 return ERR_PTR(-EINVAL);
1687 } else {
1688 tp = NULL;
1689 }
1690 break;
1691 }
1692 }
1693 chain_info->pprev = pprev;
Vlad Buslov4dbfa762019-02-11 10:55:39 +02001694 if (tp) {
1695 chain_info->next = tp->next;
1696 tcf_proto_get(tp);
1697 } else {
1698 chain_info->next = NULL;
1699 }
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001700 return tp;
1701}
1702
WANG Cong71203712017-08-07 15:26:50 -07001703static int tcf_fill_node(struct net *net, struct sk_buff *skb,
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001704 struct tcf_proto *tp, struct tcf_block *block,
1705 struct Qdisc *q, u32 parent, void *fh,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001706 u32 portid, u32 seq, u16 flags, int event,
1707 bool rtnl_held)
WANG Cong71203712017-08-07 15:26:50 -07001708{
1709 struct tcmsg *tcm;
1710 struct nlmsghdr *nlh;
1711 unsigned char *b = skb_tail_pointer(skb);
1712
1713 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1714 if (!nlh)
1715 goto out_nlmsg_trim;
1716 tcm = nlmsg_data(nlh);
1717 tcm->tcm_family = AF_UNSPEC;
1718 tcm->tcm__pad1 = 0;
1719 tcm->tcm__pad2 = 0;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001720 if (q) {
1721 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1722 tcm->tcm_parent = parent;
1723 } else {
1724 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1725 tcm->tcm_block_index = block->index;
1726 }
WANG Cong71203712017-08-07 15:26:50 -07001727 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1728 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1729 goto nla_put_failure;
1730 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1731 goto nla_put_failure;
1732 if (!fh) {
1733 tcm->tcm_handle = 0;
1734 } else {
Vlad Buslov12db03b2019-02-11 10:55:45 +02001735 if (tp->ops->dump &&
1736 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
WANG Cong71203712017-08-07 15:26:50 -07001737 goto nla_put_failure;
1738 }
1739 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1740 return skb->len;
1741
1742out_nlmsg_trim:
1743nla_put_failure:
1744 nlmsg_trim(skb, b);
1745 return -1;
1746}
1747
1748static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1749 struct nlmsghdr *n, struct tcf_proto *tp,
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001750 struct tcf_block *block, struct Qdisc *q,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001751 u32 parent, void *fh, int event, bool unicast,
1752 bool rtnl_held)
WANG Cong71203712017-08-07 15:26:50 -07001753{
1754 struct sk_buff *skb;
1755 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
Zhike Wang5b5f99b2019-03-11 03:15:54 -07001756 int err = 0;
WANG Cong71203712017-08-07 15:26:50 -07001757
1758 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1759 if (!skb)
1760 return -ENOBUFS;
1761
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001762 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001763 n->nlmsg_seq, n->nlmsg_flags, event,
1764 rtnl_held) <= 0) {
WANG Cong71203712017-08-07 15:26:50 -07001765 kfree_skb(skb);
1766 return -EINVAL;
1767 }
1768
1769 if (unicast)
Zhike Wang5b5f99b2019-03-11 03:15:54 -07001770 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1771 else
1772 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1773 n->nlmsg_flags & NLM_F_ECHO);
WANG Cong71203712017-08-07 15:26:50 -07001774
Zhike Wang5b5f99b2019-03-11 03:15:54 -07001775 if (err > 0)
1776 err = 0;
1777 return err;
WANG Cong71203712017-08-07 15:26:50 -07001778}
1779
1780static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1781 struct nlmsghdr *n, struct tcf_proto *tp,
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001782 struct tcf_block *block, struct Qdisc *q,
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001783 u32 parent, void *fh, bool unicast, bool *last,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001784 bool rtnl_held, struct netlink_ext_ack *extack)
WANG Cong71203712017-08-07 15:26:50 -07001785{
1786 struct sk_buff *skb;
1787 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1788 int err;
1789
1790 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1791 if (!skb)
1792 return -ENOBUFS;
1793
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001794 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001795 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1796 rtnl_held) <= 0) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001797 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
WANG Cong71203712017-08-07 15:26:50 -07001798 kfree_skb(skb);
1799 return -EINVAL;
1800 }
1801
Vlad Buslov12db03b2019-02-11 10:55:45 +02001802 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
WANG Cong71203712017-08-07 15:26:50 -07001803 if (err) {
1804 kfree_skb(skb);
1805 return err;
1806 }
1807
1808 if (unicast)
Zhike Wang5b5f99b2019-03-11 03:15:54 -07001809 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1810 else
1811 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1812 n->nlmsg_flags & NLM_F_ECHO);
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001813 if (err < 0)
1814 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
Zhike Wang5b5f99b2019-03-11 03:15:54 -07001815
1816 if (err > 0)
1817 err = 0;
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001818 return err;
WANG Cong71203712017-08-07 15:26:50 -07001819}
1820
1821static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001822 struct tcf_block *block, struct Qdisc *q,
1823 u32 parent, struct nlmsghdr *n,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001824 struct tcf_chain *chain, int event,
1825 bool rtnl_held)
WANG Cong71203712017-08-07 15:26:50 -07001826{
1827 struct tcf_proto *tp;
1828
Vlad Buslov12db03b2019-02-11 10:55:45 +02001829 for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1830 tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001831 tfilter_notify(net, oskb, n, tp, block,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001832 q, parent, NULL, event, false, rtnl_held);
WANG Cong71203712017-08-07 15:26:50 -07001833}
1834
Vlad Buslov7d5509f2019-02-11 10:55:44 +02001835static void tfilter_put(struct tcf_proto *tp, void *fh)
1836{
1837 if (tp->ops->put && fh)
1838 tp->ops->put(tp, fh);
1839}
1840
Vlad Buslovc431f892018-05-31 09:52:53 +03001841static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
David Ahernc21ef3e2017-04-16 09:48:24 -07001842 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001844 struct net *net = sock_net(skb->sk);
Patrick McHardyadd93b62008-01-22 22:11:33 -08001845 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 struct tcmsg *t;
1847 u32 protocol;
1848 u32 prio;
Jiri Pirko9d36d9e2017-05-17 11:07:57 +02001849 bool prio_allocate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 u32 parent;
Jiri Pirko5bc17012017-05-17 11:08:01 +02001851 u32 chain_index;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01001852 struct Qdisc *q = NULL;
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001853 struct tcf_chain_info chain_info;
Jiri Pirko5bc17012017-05-17 11:08:01 +02001854 struct tcf_chain *chain = NULL;
Jiri Pirko6529eab2017-05-17 11:07:55 +02001855 struct tcf_block *block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 struct tcf_proto *tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 unsigned long cl;
WANG Cong8113c092017-08-04 21:31:43 -07001858 void *fh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 int err;
Daniel Borkmann628185c2016-12-21 18:04:11 +01001860 int tp_created;
Vlad Buslov470502d2019-02-11 10:55:48 +02001861 bool rtnl_held = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862
Vlad Buslovc431f892018-05-31 09:52:53 +03001863 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +00001864 return -EPERM;
Hong zhi guode179c82013-03-25 17:36:33 +00001865
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866replay:
Daniel Borkmann628185c2016-12-21 18:04:11 +01001867 tp_created = 0;
1868
Johannes Berg8cb08172019-04-26 14:07:28 +02001869 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1870 rtm_tca_policy, extack);
Hong zhi guode179c82013-03-25 17:36:33 +00001871 if (err < 0)
1872 return err;
1873
David S. Miller942b8162012-06-26 21:48:50 -07001874 t = nlmsg_data(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 protocol = TC_H_MIN(t->tcm_info);
1876 prio = TC_H_MAJ(t->tcm_info);
Jiri Pirko9d36d9e2017-05-17 11:07:57 +02001877 prio_allocate = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 parent = t->tcm_parent;
Vlad Buslov4dbfa762019-02-11 10:55:39 +02001879 tp = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 cl = 0;
Vlad Buslov470502d2019-02-11 10:55:48 +02001881 block = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
1883 if (prio == 0) {
Vlad Buslovc431f892018-05-31 09:52:53 +03001884 /* If no priority is provided by the user,
1885 * we allocate one.
1886 */
1887 if (n->nlmsg_flags & NLM_F_CREATE) {
1888 prio = TC_H_MAKE(0x80000000U, 0U);
1889 prio_allocate = true;
1890 } else {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001891 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 return -ENOENT;
Daniel Borkmannea7f8272016-06-10 23:10:22 +02001893 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 }
1895
1896 /* Find head of filter chain. */
1897
Vlad Buslov470502d2019-02-11 10:55:48 +02001898 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
1899 if (err)
1900 return err;
1901
1902 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
1903 * block is shared (no qdisc found), qdisc is not unlocked, classifier
1904 * type is not specified, classifier is not unlocked.
1905 */
1906 if (rtnl_held ||
1907 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
1908 !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
1909 rtnl_held = true;
1910 rtnl_lock();
1911 }
1912
1913 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
1914 if (err)
1915 goto errout;
1916
1917 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
1918 extack);
Vlad Buslovc431f892018-05-31 09:52:53 +03001919 if (IS_ERR(block)) {
1920 err = PTR_ERR(block);
1921 goto errout;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001922 }
Jiri Pirko5bc17012017-05-17 11:08:01 +02001923
1924 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1925 if (chain_index > TC_ACT_EXT_VAL_MASK) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001926 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
Jiri Pirko5bc17012017-05-17 11:08:01 +02001927 err = -EINVAL;
1928 goto errout;
1929 }
Vlad Buslovc431f892018-05-31 09:52:53 +03001930 chain = tcf_chain_get(block, chain_index, true);
Jiri Pirko5bc17012017-05-17 11:08:01 +02001931 if (!chain) {
Jiri Pirkod5ed72a2018-08-27 20:58:43 +02001932 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
Vlad Buslovc431f892018-05-31 09:52:53 +03001933 err = -ENOMEM;
Daniel Borkmannea7f8272016-06-10 23:10:22 +02001934 goto errout;
1935 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
Vlad Busloved76f5e2019-02-11 10:55:38 +02001937 mutex_lock(&chain->filter_chain_lock);
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001938 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1939 prio, prio_allocate);
1940 if (IS_ERR(tp)) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001941 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
Jiri Pirko2190d1d2017-05-17 11:07:59 +02001942 err = PTR_ERR(tp);
Vlad Busloved76f5e2019-02-11 10:55:38 +02001943 goto errout_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 }
1945
1946 if (tp == NULL) {
Vlad Buslov8b646782019-02-11 10:55:41 +02001947 struct tcf_proto *tp_new = NULL;
1948
Vlad Buslov726d06122019-02-11 10:55:42 +02001949 if (chain->flushing) {
1950 err = -EAGAIN;
1951 goto errout_locked;
1952 }
1953
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 /* Proto-tcf does not exist, create new one */
1955
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001956 if (tca[TCA_KIND] == NULL || !protocol) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001957 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001958 err = -EINVAL;
Vlad Busloved76f5e2019-02-11 10:55:38 +02001959 goto errout_locked;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001960 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961
Vlad Buslovc431f892018-05-31 09:52:53 +03001962 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05001963 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001964 err = -ENOENT;
Vlad Busloved76f5e2019-02-11 10:55:38 +02001965 goto errout_locked;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001966 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
Jiri Pirko9d36d9e2017-05-17 11:07:57 +02001968 if (prio_allocate)
Vlad Busloved76f5e2019-02-11 10:55:38 +02001969 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
1970 &chain_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971
Vlad Busloved76f5e2019-02-11 10:55:38 +02001972 mutex_unlock(&chain->filter_chain_lock);
Vlad Buslov8b646782019-02-11 10:55:41 +02001973 tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
Vlad Buslov12db03b2019-02-11 10:55:45 +02001974 protocol, prio, chain, rtnl_held,
1975 extack);
Vlad Buslov8b646782019-02-11 10:55:41 +02001976 if (IS_ERR(tp_new)) {
1977 err = PTR_ERR(tp_new);
Vlad Buslov726d06122019-02-11 10:55:42 +02001978 goto errout_tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 }
Vlad Busloved76f5e2019-02-11 10:55:38 +02001980
Minoru Usui12186be2009-06-02 02:17:34 -07001981 tp_created = 1;
Vlad Buslov12db03b2019-02-11 10:55:45 +02001982 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
1983 rtnl_held);
Vlad Buslov726d06122019-02-11 10:55:42 +02001984 if (IS_ERR(tp)) {
1985 err = PTR_ERR(tp);
1986 goto errout_tp;
1987 }
Vlad Busloved76f5e2019-02-11 10:55:38 +02001988 } else {
1989 mutex_unlock(&chain->filter_chain_lock);
Jiri Pirko6bb16e72017-02-09 14:38:58 +01001990 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991
Vlad Buslov8b646782019-02-11 10:55:41 +02001992 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
1993 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
1994 err = -EINVAL;
1995 goto errout;
1996 }
1997
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 fh = tp->ops->get(tp, t->tcm_handle);
1999
WANG Cong8113c092017-08-04 21:31:43 -07002000 if (!fh) {
Vlad Buslovc431f892018-05-31 09:52:53 +03002001 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
Alexander Aringc35a4ac2018-01-18 11:20:50 -05002002 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
Jiri Pirko6bb16e72017-02-09 14:38:58 +01002003 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 goto errout;
Jiri Pirko6bb16e72017-02-09 14:38:58 +01002005 }
Vlad Buslovc431f892018-05-31 09:52:53 +03002006 } else if (n->nlmsg_flags & NLM_F_EXCL) {
Vlad Buslov7d5509f2019-02-11 10:55:44 +02002007 tfilter_put(tp, fh);
Vlad Buslovc431f892018-05-31 09:52:53 +03002008 NL_SET_ERR_MSG(extack, "Filter already exists");
2009 err = -EEXIST;
2010 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 }
2012
Jiri Pirko9f407f12018-07-23 09:23:07 +02002013 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2014 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2015 err = -EINVAL;
2016 goto errout;
2017 }
2018
Cong Wang2f7ef2f2014-04-25 13:54:06 -07002019 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
Alexander Aring7306db32018-01-18 11:20:51 -05002020 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002021 rtnl_held, extack);
Vlad Buslov7d5509f2019-02-11 10:55:44 +02002022 if (err == 0) {
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002023 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002024 RTM_NEWTFILTER, false, rtnl_held);
Vlad Buslov7d5509f2019-02-11 10:55:44 +02002025 tfilter_put(tp, fh);
Vlad Buslov503d81d2019-07-21 17:44:12 +03002026 /* q pointer is NULL for shared blocks */
2027 if (q)
2028 q->flags &= ~TCQ_F_CAN_BYPASS;
Vlad Buslov7d5509f2019-02-11 10:55:44 +02002029 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030
2031errout:
Vlad Buslov8b646782019-02-11 10:55:41 +02002032 if (err && tp_created)
Vlad Buslov12db03b2019-02-11 10:55:45 +02002033 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
Vlad Buslov726d06122019-02-11 10:55:42 +02002034errout_tp:
Vlad Buslov4dbfa762019-02-11 10:55:39 +02002035 if (chain) {
2036 if (tp && !IS_ERR(tp))
Vlad Buslov12db03b2019-02-11 10:55:45 +02002037 tcf_proto_put(tp, rtnl_held, NULL);
Vlad Buslov4dbfa762019-02-11 10:55:39 +02002038 if (!tp_created)
2039 tcf_chain_put(chain);
2040 }
Vlad Buslov12db03b2019-02-11 10:55:45 +02002041 tcf_block_release(q, block, rtnl_held);
Vlad Buslov470502d2019-02-11 10:55:48 +02002042
2043 if (rtnl_held)
2044 rtnl_unlock();
2045
2046 if (err == -EAGAIN) {
2047 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2048 * of target chain.
2049 */
2050 rtnl_held = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 /* Replay the request. */
2052 goto replay;
Vlad Buslov470502d2019-02-11 10:55:48 +02002053 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 return err;
Vlad Busloved76f5e2019-02-11 10:55:38 +02002055
2056errout_locked:
2057 mutex_unlock(&chain->filter_chain_lock);
2058 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059}
2060
Vlad Buslovc431f892018-05-31 09:52:53 +03002061static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2062 struct netlink_ext_ack *extack)
2063{
2064 struct net *net = sock_net(skb->sk);
2065 struct nlattr *tca[TCA_MAX + 1];
2066 struct tcmsg *t;
2067 u32 protocol;
2068 u32 prio;
2069 u32 parent;
2070 u32 chain_index;
2071 struct Qdisc *q = NULL;
2072 struct tcf_chain_info chain_info;
2073 struct tcf_chain *chain = NULL;
Vlad Buslov470502d2019-02-11 10:55:48 +02002074 struct tcf_block *block = NULL;
Vlad Buslovc431f892018-05-31 09:52:53 +03002075 struct tcf_proto *tp = NULL;
2076 unsigned long cl = 0;
2077 void *fh = NULL;
2078 int err;
Vlad Buslov470502d2019-02-11 10:55:48 +02002079 bool rtnl_held = false;
Vlad Buslovc431f892018-05-31 09:52:53 +03002080
2081 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2082 return -EPERM;
2083
Johannes Berg8cb08172019-04-26 14:07:28 +02002084 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2085 rtm_tca_policy, extack);
Vlad Buslovc431f892018-05-31 09:52:53 +03002086 if (err < 0)
2087 return err;
2088
2089 t = nlmsg_data(n);
2090 protocol = TC_H_MIN(t->tcm_info);
2091 prio = TC_H_MAJ(t->tcm_info);
2092 parent = t->tcm_parent;
2093
2094 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2095 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2096 return -ENOENT;
2097 }
2098
2099 /* Find head of filter chain. */
2100
Vlad Buslov470502d2019-02-11 10:55:48 +02002101 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2102 if (err)
2103 return err;
2104
2105 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2106 * found), qdisc is not unlocked, classifier type is not specified,
2107 * classifier is not unlocked.
2108 */
2109 if (!prio ||
2110 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2111 !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2112 rtnl_held = true;
2113 rtnl_lock();
2114 }
2115
2116 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2117 if (err)
2118 goto errout;
2119
2120 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2121 extack);
Vlad Buslovc431f892018-05-31 09:52:53 +03002122 if (IS_ERR(block)) {
2123 err = PTR_ERR(block);
2124 goto errout;
2125 }
2126
2127 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2128 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2129 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2130 err = -EINVAL;
2131 goto errout;
2132 }
2133 chain = tcf_chain_get(block, chain_index, false);
2134 if (!chain) {
Jiri Pirko5ca8a252018-08-03 11:08:47 +02002135 /* User requested flush on non-existent chain. Nothing to do,
2136 * so just return success.
2137 */
2138 if (prio == 0) {
2139 err = 0;
2140 goto errout;
2141 }
Vlad Buslovc431f892018-05-31 09:52:53 +03002142 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
Jiri Pirkob7b42472018-08-27 20:58:44 +02002143 err = -ENOENT;
Vlad Buslovc431f892018-05-31 09:52:53 +03002144 goto errout;
2145 }
2146
2147 if (prio == 0) {
2148 tfilter_notify_chain(net, skb, block, q, parent, n,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002149 chain, RTM_DELTFILTER, rtnl_held);
2150 tcf_chain_flush(chain, rtnl_held);
Vlad Buslovc431f892018-05-31 09:52:53 +03002151 err = 0;
2152 goto errout;
2153 }
2154
Vlad Busloved76f5e2019-02-11 10:55:38 +02002155 mutex_lock(&chain->filter_chain_lock);
Vlad Buslovc431f892018-05-31 09:52:53 +03002156 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2157 prio, false);
2158 if (!tp || IS_ERR(tp)) {
2159 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
Vlad Buslov0e399032018-06-04 18:32:23 +03002160 err = tp ? PTR_ERR(tp) : -ENOENT;
Vlad Busloved76f5e2019-02-11 10:55:38 +02002161 goto errout_locked;
Vlad Buslovc431f892018-05-31 09:52:53 +03002162 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2163 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2164 err = -EINVAL;
Vlad Busloved76f5e2019-02-11 10:55:38 +02002165 goto errout_locked;
2166 } else if (t->tcm_handle == 0) {
2167 tcf_chain_tp_remove(chain, &chain_info, tp);
2168 mutex_unlock(&chain->filter_chain_lock);
2169
Vlad Buslov12db03b2019-02-11 10:55:45 +02002170 tcf_proto_put(tp, rtnl_held, NULL);
Vlad Busloved76f5e2019-02-11 10:55:38 +02002171 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002172 RTM_DELTFILTER, false, rtnl_held);
Vlad Busloved76f5e2019-02-11 10:55:38 +02002173 err = 0;
Vlad Buslovc431f892018-05-31 09:52:53 +03002174 goto errout;
2175 }
Vlad Busloved76f5e2019-02-11 10:55:38 +02002176 mutex_unlock(&chain->filter_chain_lock);
Vlad Buslovc431f892018-05-31 09:52:53 +03002177
2178 fh = tp->ops->get(tp, t->tcm_handle);
2179
2180 if (!fh) {
Vlad Busloved76f5e2019-02-11 10:55:38 +02002181 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2182 err = -ENOENT;
Vlad Buslovc431f892018-05-31 09:52:53 +03002183 } else {
2184 bool last;
2185
2186 err = tfilter_del_notify(net, skb, n, tp, block,
2187 q, parent, fh, false, &last,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002188 rtnl_held, extack);
2189
Vlad Buslovc431f892018-05-31 09:52:53 +03002190 if (err)
2191 goto errout;
Vlad Buslov8b646782019-02-11 10:55:41 +02002192 if (last)
Vlad Buslov12db03b2019-02-11 10:55:45 +02002193 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
Vlad Buslovc431f892018-05-31 09:52:53 +03002194 }
2195
2196errout:
Vlad Buslov4dbfa762019-02-11 10:55:39 +02002197 if (chain) {
2198 if (tp && !IS_ERR(tp))
Vlad Buslov12db03b2019-02-11 10:55:45 +02002199 tcf_proto_put(tp, rtnl_held, NULL);
Vlad Buslovc431f892018-05-31 09:52:53 +03002200 tcf_chain_put(chain);
Vlad Buslov4dbfa762019-02-11 10:55:39 +02002201 }
Vlad Buslov12db03b2019-02-11 10:55:45 +02002202 tcf_block_release(q, block, rtnl_held);
Vlad Buslov470502d2019-02-11 10:55:48 +02002203
2204 if (rtnl_held)
2205 rtnl_unlock();
2206
Vlad Buslovc431f892018-05-31 09:52:53 +03002207 return err;
Vlad Busloved76f5e2019-02-11 10:55:38 +02002208
2209errout_locked:
2210 mutex_unlock(&chain->filter_chain_lock);
2211 goto errout;
Vlad Buslovc431f892018-05-31 09:52:53 +03002212}
2213
2214static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2215 struct netlink_ext_ack *extack)
2216{
2217 struct net *net = sock_net(skb->sk);
2218 struct nlattr *tca[TCA_MAX + 1];
2219 struct tcmsg *t;
2220 u32 protocol;
2221 u32 prio;
2222 u32 parent;
2223 u32 chain_index;
2224 struct Qdisc *q = NULL;
2225 struct tcf_chain_info chain_info;
2226 struct tcf_chain *chain = NULL;
Vlad Buslov470502d2019-02-11 10:55:48 +02002227 struct tcf_block *block = NULL;
Vlad Buslovc431f892018-05-31 09:52:53 +03002228 struct tcf_proto *tp = NULL;
2229 unsigned long cl = 0;
2230 void *fh = NULL;
2231 int err;
Vlad Buslov470502d2019-02-11 10:55:48 +02002232 bool rtnl_held = false;
Vlad Buslovc431f892018-05-31 09:52:53 +03002233
Johannes Berg8cb08172019-04-26 14:07:28 +02002234 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2235 rtm_tca_policy, extack);
Vlad Buslovc431f892018-05-31 09:52:53 +03002236 if (err < 0)
2237 return err;
2238
2239 t = nlmsg_data(n);
2240 protocol = TC_H_MIN(t->tcm_info);
2241 prio = TC_H_MAJ(t->tcm_info);
2242 parent = t->tcm_parent;
2243
2244 if (prio == 0) {
2245 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2246 return -ENOENT;
2247 }
2248
2249 /* Find head of filter chain. */
2250
Vlad Buslov470502d2019-02-11 10:55:48 +02002251 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2252 if (err)
2253 return err;
2254
2255 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2256 * unlocked, classifier type is not specified, classifier is not
2257 * unlocked.
2258 */
2259 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2260 !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2261 rtnl_held = true;
2262 rtnl_lock();
2263 }
2264
2265 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2266 if (err)
2267 goto errout;
2268
2269 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2270 extack);
Vlad Buslovc431f892018-05-31 09:52:53 +03002271 if (IS_ERR(block)) {
2272 err = PTR_ERR(block);
2273 goto errout;
2274 }
2275
2276 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2277 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2278 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2279 err = -EINVAL;
2280 goto errout;
2281 }
2282 chain = tcf_chain_get(block, chain_index, false);
2283 if (!chain) {
2284 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2285 err = -EINVAL;
2286 goto errout;
2287 }
2288
Vlad Busloved76f5e2019-02-11 10:55:38 +02002289 mutex_lock(&chain->filter_chain_lock);
Vlad Buslovc431f892018-05-31 09:52:53 +03002290 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2291 prio, false);
Vlad Busloved76f5e2019-02-11 10:55:38 +02002292 mutex_unlock(&chain->filter_chain_lock);
Vlad Buslovc431f892018-05-31 09:52:53 +03002293 if (!tp || IS_ERR(tp)) {
2294 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
Vlad Buslov0e399032018-06-04 18:32:23 +03002295 err = tp ? PTR_ERR(tp) : -ENOENT;
Vlad Buslovc431f892018-05-31 09:52:53 +03002296 goto errout;
2297 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2298 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2299 err = -EINVAL;
2300 goto errout;
2301 }
2302
2303 fh = tp->ops->get(tp, t->tcm_handle);
2304
2305 if (!fh) {
2306 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2307 err = -ENOENT;
2308 } else {
2309 err = tfilter_notify(net, skb, n, tp, block, q, parent,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002310 fh, RTM_NEWTFILTER, true, rtnl_held);
Vlad Buslovc431f892018-05-31 09:52:53 +03002311 if (err < 0)
2312 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2313 }
2314
Vlad Buslov7d5509f2019-02-11 10:55:44 +02002315 tfilter_put(tp, fh);
Vlad Buslovc431f892018-05-31 09:52:53 +03002316errout:
Vlad Buslov4dbfa762019-02-11 10:55:39 +02002317 if (chain) {
2318 if (tp && !IS_ERR(tp))
Vlad Buslov12db03b2019-02-11 10:55:45 +02002319 tcf_proto_put(tp, rtnl_held, NULL);
Vlad Buslovc431f892018-05-31 09:52:53 +03002320 tcf_chain_put(chain);
Vlad Buslov4dbfa762019-02-11 10:55:39 +02002321 }
Vlad Buslov12db03b2019-02-11 10:55:45 +02002322 tcf_block_release(q, block, rtnl_held);
Vlad Buslov470502d2019-02-11 10:55:48 +02002323
2324 if (rtnl_held)
2325 rtnl_unlock();
2326
Vlad Buslovc431f892018-05-31 09:52:53 +03002327 return err;
2328}
2329
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002330struct tcf_dump_args {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 struct tcf_walker w;
2332 struct sk_buff *skb;
2333 struct netlink_callback *cb;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002334 struct tcf_block *block;
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002335 struct Qdisc *q;
2336 u32 parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337};
2338
WANG Cong8113c092017-08-04 21:31:43 -07002339static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340{
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002341 struct tcf_dump_args *a = (void *)arg;
WANG Cong832d1d52014-01-09 16:14:01 -08002342 struct net *net = sock_net(a->skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002344 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002345 n, NETLINK_CB(a->cb->skb).portid,
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -04002346 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002347 RTM_NEWTFILTER, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348}
2349
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002350static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2351 struct sk_buff *skb, struct netlink_callback *cb,
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002352 long index_start, long *p_index)
2353{
2354 struct net *net = sock_net(skb->sk);
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002355 struct tcf_block *block = chain->block;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002356 struct tcmsg *tcm = nlmsg_data(cb->nlh);
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002357 struct tcf_proto *tp, *tp_prev;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002358 struct tcf_dump_args arg;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002359
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002360 for (tp = __tcf_get_next_proto(chain, NULL);
2361 tp;
2362 tp_prev = tp,
2363 tp = __tcf_get_next_proto(chain, tp),
Vlad Buslov12db03b2019-02-11 10:55:45 +02002364 tcf_proto_put(tp_prev, true, NULL),
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002365 (*p_index)++) {
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002366 if (*p_index < index_start)
2367 continue;
2368 if (TC_H_MAJ(tcm->tcm_info) &&
2369 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2370 continue;
2371 if (TC_H_MIN(tcm->tcm_info) &&
2372 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2373 continue;
2374 if (*p_index > index_start)
2375 memset(&cb->args[1], 0,
2376 sizeof(cb->args) - sizeof(cb->args[0]));
2377 if (cb->args[1] == 0) {
YueHaibing53189182018-07-17 20:58:14 +08002378 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002379 NETLINK_CB(cb->skb).portid,
2380 cb->nlh->nlmsg_seq, NLM_F_MULTI,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002381 RTM_NEWTFILTER, true) <= 0)
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002382 goto errout;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002383 cb->args[1] = 1;
2384 }
2385 if (!tp->ops->walk)
2386 continue;
2387 arg.w.fn = tcf_node_dump;
2388 arg.skb = skb;
2389 arg.cb = cb;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002390 arg.block = block;
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002391 arg.q = q;
2392 arg.parent = parent;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002393 arg.w.stop = 0;
2394 arg.w.skip = cb->args[1] - 1;
2395 arg.w.count = 0;
Vlad Buslov01683a12018-07-09 13:29:11 +03002396 arg.w.cookie = cb->args[2];
Vlad Buslov12db03b2019-02-11 10:55:45 +02002397 tp->ops->walk(tp, &arg.w, true);
Vlad Buslov01683a12018-07-09 13:29:11 +03002398 cb->args[2] = arg.w.cookie;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002399 cb->args[1] = arg.w.count + 1;
2400 if (arg.w.stop)
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002401 goto errout;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002402 }
Jiri Pirko5bc17012017-05-17 11:08:01 +02002403 return true;
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002404
2405errout:
Vlad Buslov12db03b2019-02-11 10:55:45 +02002406 tcf_proto_put(tp, true, NULL);
Vlad Buslovfe2923a2019-02-11 10:55:40 +02002407 return false;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002408}
2409
Eric Dumazetbd27a872009-11-05 20:57:26 -08002410/* called with RTNL */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2412{
Vlad Buslovbbf73832019-02-11 10:55:36 +02002413 struct tcf_chain *chain, *chain_prev;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002414 struct net *net = sock_net(skb->sk);
Jiri Pirko5bc17012017-05-17 11:08:01 +02002415 struct nlattr *tca[TCA_MAX + 1];
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002416 struct Qdisc *q = NULL;
Jiri Pirko6529eab2017-05-17 11:07:55 +02002417 struct tcf_block *block;
David S. Miller942b8162012-06-26 21:48:50 -07002418 struct tcmsg *tcm = nlmsg_data(cb->nlh);
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002419 long index_start;
2420 long index;
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002421 u32 parent;
Jiri Pirko5bc17012017-05-17 11:08:01 +02002422 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423
Hong zhi guo573ce262013-03-27 06:47:04 +00002424 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 return skb->len;
Jiri Pirko5bc17012017-05-17 11:08:01 +02002426
Johannes Berg8cb08172019-04-26 14:07:28 +02002427 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2428 NULL, cb->extack);
Jiri Pirko5bc17012017-05-17 11:08:01 +02002429 if (err)
2430 return err;
2431
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002432 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
Vlad Buslov787ce6d2018-09-24 19:22:58 +03002433 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002434 if (!block)
WANG Cong143976c2017-08-24 16:51:29 -07002435 goto out;
Jiri Pirkod680b352018-01-18 16:14:49 +01002436 /* If we work with block index, q is NULL and parent value
2437 * will never be used in the following code. The check
2438 * in tcf_fill_node prevents it. However, compiler does not
2439 * see that far, so set parent to zero to silence the warning
2440 * about parent being uninitialized.
2441 */
2442 parent = 0;
Jiri Pirko7960d1d2018-01-17 11:46:51 +01002443 } else {
2444 const struct Qdisc_class_ops *cops;
2445 struct net_device *dev;
2446 unsigned long cl = 0;
2447
2448 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2449 if (!dev)
2450 return skb->len;
2451
2452 parent = tcm->tcm_parent;
2453 if (!parent) {
2454 q = dev->qdisc;
2455 parent = q->handle;
2456 } else {
2457 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2458 }
2459 if (!q)
2460 goto out;
2461 cops = q->ops->cl_ops;
2462 if (!cops)
2463 goto out;
2464 if (!cops->tcf_block)
2465 goto out;
2466 if (TC_H_MIN(tcm->tcm_parent)) {
2467 cl = cops->find(q, tcm->tcm_parent);
2468 if (cl == 0)
2469 goto out;
2470 }
2471 block = cops->tcf_block(q, cl, NULL);
2472 if (!block)
2473 goto out;
2474 if (tcf_block_shared(block))
2475 q = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002478 index_start = cb->args[0];
2479 index = 0;
Jiri Pirko5bc17012017-05-17 11:08:01 +02002480
Vlad Buslovbbf73832019-02-11 10:55:36 +02002481 for (chain = __tcf_get_next_chain(block, NULL);
2482 chain;
2483 chain_prev = chain,
2484 chain = __tcf_get_next_chain(block, chain),
2485 tcf_chain_put(chain_prev)) {
Jiri Pirko5bc17012017-05-17 11:08:01 +02002486 if (tca[TCA_CHAIN] &&
2487 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2488 continue;
Jiri Pirkoa10fa202017-10-13 14:01:05 +02002489 if (!tcf_chain_dump(chain, q, parent, skb, cb,
Roman Kapl5ae437a2018-02-19 21:32:51 +01002490 index_start, &index)) {
Vlad Buslovbbf73832019-02-11 10:55:36 +02002491 tcf_chain_put(chain);
Roman Kapl5ae437a2018-02-19 21:32:51 +01002492 err = -EMSGSIZE;
Jiri Pirko5bc17012017-05-17 11:08:01 +02002493 break;
Roman Kapl5ae437a2018-02-19 21:32:51 +01002494 }
Jiri Pirko5bc17012017-05-17 11:08:01 +02002495 }
2496
Vlad Buslov787ce6d2018-09-24 19:22:58 +03002497 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
Vlad Buslov12db03b2019-02-11 10:55:45 +02002498 tcf_block_refcnt_put(block, true);
Jiri Pirkoacb31fa2017-05-17 11:08:00 +02002499 cb->args[0] = index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501out:
Roman Kapl5ae437a2018-02-19 21:32:51 +01002502 /* If we did no progress, the error (EMSGSIZE) is real */
2503 if (skb->len == 0 && err)
2504 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 return skb->len;
2506}
2507
Vlad Buslova5654822019-02-11 10:55:37 +02002508static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2509 void *tmplt_priv, u32 chain_index,
2510 struct net *net, struct sk_buff *skb,
2511 struct tcf_block *block,
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002512 u32 portid, u32 seq, u16 flags, int event)
2513{
2514 unsigned char *b = skb_tail_pointer(skb);
Jiri Pirko9f407f12018-07-23 09:23:07 +02002515 const struct tcf_proto_ops *ops;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002516 struct nlmsghdr *nlh;
2517 struct tcmsg *tcm;
Jiri Pirko9f407f12018-07-23 09:23:07 +02002518 void *priv;
2519
Vlad Buslova5654822019-02-11 10:55:37 +02002520 ops = tmplt_ops;
2521 priv = tmplt_priv;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002522
2523 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2524 if (!nlh)
2525 goto out_nlmsg_trim;
2526 tcm = nlmsg_data(nlh);
2527 tcm->tcm_family = AF_UNSPEC;
2528 tcm->tcm__pad1 = 0;
2529 tcm->tcm__pad2 = 0;
2530 tcm->tcm_handle = 0;
2531 if (block->q) {
2532 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2533 tcm->tcm_parent = block->q->handle;
2534 } else {
2535 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2536 tcm->tcm_block_index = block->index;
2537 }
2538
Vlad Buslova5654822019-02-11 10:55:37 +02002539 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002540 goto nla_put_failure;
2541
Jiri Pirko9f407f12018-07-23 09:23:07 +02002542 if (ops) {
2543 if (nla_put_string(skb, TCA_KIND, ops->kind))
2544 goto nla_put_failure;
2545 if (ops->tmplt_dump(skb, net, priv) < 0)
2546 goto nla_put_failure;
2547 }
2548
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002549 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2550 return skb->len;
2551
2552out_nlmsg_trim:
2553nla_put_failure:
2554 nlmsg_trim(skb, b);
2555 return -EMSGSIZE;
2556}
2557
2558static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2559 u32 seq, u16 flags, int event, bool unicast)
2560{
2561 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2562 struct tcf_block *block = chain->block;
2563 struct net *net = block->net;
2564 struct sk_buff *skb;
Zhike Wang5b5f99b2019-03-11 03:15:54 -07002565 int err = 0;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002566
2567 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2568 if (!skb)
2569 return -ENOBUFS;
2570
Vlad Buslova5654822019-02-11 10:55:37 +02002571 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2572 chain->index, net, skb, block, portid,
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002573 seq, flags, event) <= 0) {
2574 kfree_skb(skb);
2575 return -EINVAL;
2576 }
2577
2578 if (unicast)
Zhike Wang5b5f99b2019-03-11 03:15:54 -07002579 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2580 else
2581 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2582 flags & NLM_F_ECHO);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002583
Zhike Wang5b5f99b2019-03-11 03:15:54 -07002584 if (err > 0)
2585 err = 0;
2586 return err;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002587}
2588
Vlad Buslova5654822019-02-11 10:55:37 +02002589static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2590 void *tmplt_priv, u32 chain_index,
2591 struct tcf_block *block, struct sk_buff *oskb,
2592 u32 seq, u16 flags, bool unicast)
2593{
2594 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2595 struct net *net = block->net;
2596 struct sk_buff *skb;
2597
2598 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2599 if (!skb)
2600 return -ENOBUFS;
2601
2602 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2603 block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2604 kfree_skb(skb);
2605 return -EINVAL;
2606 }
2607
2608 if (unicast)
2609 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2610
2611 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2612}
2613
Jiri Pirko9f407f12018-07-23 09:23:07 +02002614static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2615 struct nlattr **tca,
2616 struct netlink_ext_ack *extack)
2617{
2618 const struct tcf_proto_ops *ops;
2619 void *tmplt_priv;
2620
2621 /* If kind is not set, user did not specify template. */
2622 if (!tca[TCA_KIND])
2623 return 0;
2624
Vlad Buslov12db03b2019-02-11 10:55:45 +02002625 ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), true, extack);
Jiri Pirko9f407f12018-07-23 09:23:07 +02002626 if (IS_ERR(ops))
2627 return PTR_ERR(ops);
2628 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2629 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2630 return -EOPNOTSUPP;
2631 }
2632
2633 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2634 if (IS_ERR(tmplt_priv)) {
2635 module_put(ops->owner);
2636 return PTR_ERR(tmplt_priv);
2637 }
2638 chain->tmplt_ops = ops;
2639 chain->tmplt_priv = tmplt_priv;
2640 return 0;
2641}
2642
Vlad Buslova5654822019-02-11 10:55:37 +02002643static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2644 void *tmplt_priv)
Jiri Pirko9f407f12018-07-23 09:23:07 +02002645{
Jiri Pirko9f407f12018-07-23 09:23:07 +02002646 /* If template ops are set, no work to do for us. */
Vlad Buslova5654822019-02-11 10:55:37 +02002647 if (!tmplt_ops)
Jiri Pirko9f407f12018-07-23 09:23:07 +02002648 return;
2649
Vlad Buslova5654822019-02-11 10:55:37 +02002650 tmplt_ops->tmplt_destroy(tmplt_priv);
2651 module_put(tmplt_ops->owner);
Jiri Pirko9f407f12018-07-23 09:23:07 +02002652}
2653
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002654/* Add/delete/get a chain */
2655
2656static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2657 struct netlink_ext_ack *extack)
2658{
2659 struct net *net = sock_net(skb->sk);
2660 struct nlattr *tca[TCA_MAX + 1];
2661 struct tcmsg *t;
2662 u32 parent;
2663 u32 chain_index;
2664 struct Qdisc *q = NULL;
2665 struct tcf_chain *chain = NULL;
2666 struct tcf_block *block;
2667 unsigned long cl;
2668 int err;
2669
2670 if (n->nlmsg_type != RTM_GETCHAIN &&
2671 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2672 return -EPERM;
2673
2674replay:
Johannes Berg8cb08172019-04-26 14:07:28 +02002675 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2676 rtm_tca_policy, extack);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002677 if (err < 0)
2678 return err;
2679
2680 t = nlmsg_data(n);
2681 parent = t->tcm_parent;
2682 cl = 0;
2683
2684 block = tcf_block_find(net, &q, &parent, &cl,
2685 t->tcm_ifindex, t->tcm_block_index, extack);
2686 if (IS_ERR(block))
2687 return PTR_ERR(block);
2688
2689 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2690 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2691 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002692 err = -EINVAL;
2693 goto errout_block;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002694 }
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002695
2696 mutex_lock(&block->lock);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002697 chain = tcf_chain_lookup(block, chain_index);
2698 if (n->nlmsg_type == RTM_NEWCHAIN) {
2699 if (chain) {
Jiri Pirko3d32f4c2018-08-01 12:36:55 +02002700 if (tcf_chain_held_by_acts_only(chain)) {
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002701 /* The chain exists only because there is
Jiri Pirko3d32f4c2018-08-01 12:36:55 +02002702 * some action referencing it.
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002703 */
2704 tcf_chain_hold(chain);
2705 } else {
2706 NL_SET_ERR_MSG(extack, "Filter chain already exists");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002707 err = -EEXIST;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002708 goto errout_block_locked;
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002709 }
2710 } else {
2711 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2712 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002713 err = -ENOENT;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002714 goto errout_block_locked;
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002715 }
2716 chain = tcf_chain_create(block, chain_index);
2717 if (!chain) {
2718 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002719 err = -ENOMEM;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002720 goto errout_block_locked;
Jiri Pirko1f3ed382018-07-27 09:45:05 +02002721 }
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002722 }
2723 } else {
Jiri Pirko3d32f4c2018-08-01 12:36:55 +02002724 if (!chain || tcf_chain_held_by_acts_only(chain)) {
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002725 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
Vlad Buslove368fdb2018-09-24 19:22:53 +03002726 err = -EINVAL;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002727 goto errout_block_locked;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002728 }
2729 tcf_chain_hold(chain);
2730 }
2731
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002732 if (n->nlmsg_type == RTM_NEWCHAIN) {
2733 /* Modifying chain requires holding parent block lock. In case
2734 * the chain was successfully added, take a reference to the
2735 * chain. This ensures that an empty chain does not disappear at
2736 * the end of this function.
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002737 */
2738 tcf_chain_hold(chain);
2739 chain->explicitly_created = true;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002740 }
2741 mutex_unlock(&block->lock);
2742
2743 switch (n->nlmsg_type) {
2744 case RTM_NEWCHAIN:
2745 err = tc_chain_tmplt_add(chain, net, tca, extack);
2746 if (err) {
2747 tcf_chain_put_explicitly_created(chain);
2748 goto errout;
2749 }
2750
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002751 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2752 RTM_NEWCHAIN, false);
2753 break;
2754 case RTM_DELCHAIN:
Cong Wangf5b9bac2018-09-11 14:22:23 -07002755 tfilter_notify_chain(net, skb, block, q, parent, n,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002756 chain, RTM_DELTFILTER, true);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002757 /* Flush the chain first as the user requested chain removal. */
Vlad Buslov12db03b2019-02-11 10:55:45 +02002758 tcf_chain_flush(chain, true);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002759 /* In case the chain was successfully deleted, put a reference
2760 * to the chain previously taken during addition.
2761 */
2762 tcf_chain_put_explicitly_created(chain);
2763 break;
2764 case RTM_GETCHAIN:
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002765 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2766 n->nlmsg_seq, n->nlmsg_type, true);
2767 if (err < 0)
2768 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2769 break;
2770 default:
2771 err = -EOPNOTSUPP;
2772 NL_SET_ERR_MSG(extack, "Unsupported message type");
2773 goto errout;
2774 }
2775
2776errout:
2777 tcf_chain_put(chain);
Vlad Buslove368fdb2018-09-24 19:22:53 +03002778errout_block:
Vlad Buslov12db03b2019-02-11 10:55:45 +02002779 tcf_block_release(q, block, true);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002780 if (err == -EAGAIN)
2781 /* Replay the request. */
2782 goto replay;
2783 return err;
Vlad Buslov2cbfab02019-02-11 10:55:34 +02002784
2785errout_block_locked:
2786 mutex_unlock(&block->lock);
2787 goto errout_block;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002788}
2789
2790/* called with RTNL */
2791static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2792{
2793 struct net *net = sock_net(skb->sk);
2794 struct nlattr *tca[TCA_MAX + 1];
2795 struct Qdisc *q = NULL;
2796 struct tcf_block *block;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002797 struct tcmsg *tcm = nlmsg_data(cb->nlh);
Vlad Buslovace4a262019-02-25 17:45:44 +02002798 struct tcf_chain *chain;
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002799 long index_start;
2800 long index;
2801 u32 parent;
2802 int err;
2803
2804 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2805 return skb->len;
2806
Johannes Berg8cb08172019-04-26 14:07:28 +02002807 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2808 rtm_tca_policy, cb->extack);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002809 if (err)
2810 return err;
2811
2812 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
Vlad Buslov787ce6d2018-09-24 19:22:58 +03002813 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002814 if (!block)
2815 goto out;
2816 /* If we work with block index, q is NULL and parent value
2817 * will never be used in the following code. The check
2818 * in tcf_fill_node prevents it. However, compiler does not
2819 * see that far, so set parent to zero to silence the warning
2820 * about parent being uninitialized.
2821 */
2822 parent = 0;
2823 } else {
2824 const struct Qdisc_class_ops *cops;
2825 struct net_device *dev;
2826 unsigned long cl = 0;
2827
2828 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2829 if (!dev)
2830 return skb->len;
2831
2832 parent = tcm->tcm_parent;
2833 if (!parent) {
2834 q = dev->qdisc;
2835 parent = q->handle;
2836 } else {
2837 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2838 }
2839 if (!q)
2840 goto out;
2841 cops = q->ops->cl_ops;
2842 if (!cops)
2843 goto out;
2844 if (!cops->tcf_block)
2845 goto out;
2846 if (TC_H_MIN(tcm->tcm_parent)) {
2847 cl = cops->find(q, tcm->tcm_parent);
2848 if (cl == 0)
2849 goto out;
2850 }
2851 block = cops->tcf_block(q, cl, NULL);
2852 if (!block)
2853 goto out;
2854 if (tcf_block_shared(block))
2855 q = NULL;
2856 }
2857
2858 index_start = cb->args[0];
2859 index = 0;
2860
Vlad Buslovace4a262019-02-25 17:45:44 +02002861 mutex_lock(&block->lock);
2862 list_for_each_entry(chain, &block->chain_list, list) {
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002863 if ((tca[TCA_CHAIN] &&
2864 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2865 continue;
2866 if (index < index_start) {
2867 index++;
2868 continue;
2869 }
Vlad Buslovace4a262019-02-25 17:45:44 +02002870 if (tcf_chain_held_by_acts_only(chain))
2871 continue;
Vlad Buslova5654822019-02-11 10:55:37 +02002872 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2873 chain->index, net, skb, block,
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002874 NETLINK_CB(cb->skb).portid,
2875 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2876 RTM_NEWCHAIN);
Vlad Buslovace4a262019-02-25 17:45:44 +02002877 if (err <= 0)
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002878 break;
2879 index++;
2880 }
Vlad Buslovace4a262019-02-25 17:45:44 +02002881 mutex_unlock(&block->lock);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002882
Vlad Buslov787ce6d2018-09-24 19:22:58 +03002883 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
Vlad Buslov12db03b2019-02-11 10:55:45 +02002884 tcf_block_refcnt_put(block, true);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02002885 cb->args[0] = index;
2886
2887out:
2888 /* If we did no progress, the error (EMSGSIZE) is real */
2889 if (skb->len == 0 && err)
2890 return err;
2891 return skb->len;
2892}
2893
WANG Cong18d02642014-09-25 10:26:37 -07002894void tcf_exts_destroy(struct tcf_exts *exts)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895{
2896#ifdef CONFIG_NET_CLS_ACT
Vlad Buslov90b73b72018-07-05 17:24:33 +03002897 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
WANG Cong22dc13c2016-08-13 22:35:00 -07002898 kfree(exts->actions);
2899 exts->nr_actions = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900#endif
2901}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002902EXPORT_SYMBOL(tcf_exts_destroy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903
Benjamin LaHaisec1b52732013-01-14 05:15:39 +00002904int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
Alexander Aring50a56192018-01-18 11:20:52 -05002905 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
Vlad Buslovec6743a2019-02-11 10:55:43 +02002906 bool rtnl_held, struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908#ifdef CONFIG_NET_CLS_ACT
2909 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 struct tc_action *act;
Roman Mashakd04e6992018-03-08 16:59:17 -05002911 size_t attr_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912
WANG Cong5da57f42013-12-15 20:15:07 -08002913 if (exts->police && tb[exts->police]) {
Jiri Pirko9fb9f252017-05-17 11:08:02 +02002914 act = tcf_action_init_1(net, tp, tb[exts->police],
2915 rate_tlv, "police", ovr,
Vlad Buslovec6743a2019-02-11 10:55:43 +02002916 TCA_ACT_BIND, rtnl_held,
2917 extack);
Patrick McHardyab27cfb2008-01-23 20:33:13 -08002918 if (IS_ERR(act))
2919 return PTR_ERR(act);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920
WANG Cong33be6272013-12-15 20:15:05 -08002921 act->type = exts->type = TCA_OLD_COMPAT;
WANG Cong22dc13c2016-08-13 22:35:00 -07002922 exts->actions[0] = act;
2923 exts->nr_actions = 1;
WANG Cong5da57f42013-12-15 20:15:07 -08002924 } else if (exts->action && tb[exts->action]) {
Vlad Buslov90b73b72018-07-05 17:24:33 +03002925 int err;
WANG Cong22dc13c2016-08-13 22:35:00 -07002926
Jiri Pirko9fb9f252017-05-17 11:08:02 +02002927 err = tcf_action_init(net, tp, tb[exts->action],
2928 rate_tlv, NULL, ovr, TCA_ACT_BIND,
Vlad Buslovec6743a2019-02-11 10:55:43 +02002929 exts->actions, &attr_size,
2930 rtnl_held, extack);
Vlad Buslov90b73b72018-07-05 17:24:33 +03002931 if (err < 0)
WANG Cong33be6272013-12-15 20:15:05 -08002932 return err;
Vlad Buslov90b73b72018-07-05 17:24:33 +03002933 exts->nr_actions = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 }
2935 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936#else
WANG Cong5da57f42013-12-15 20:15:07 -08002937 if ((exts->action && tb[exts->action]) ||
Alexander Aring50a56192018-01-18 11:20:52 -05002938 (exts->police && tb[exts->police])) {
2939 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 return -EOPNOTSUPP;
Alexander Aring50a56192018-01-18 11:20:52 -05002941 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942#endif
2943
2944 return 0;
2945}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002946EXPORT_SYMBOL(tcf_exts_validate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947
Jiri Pirko9b0d4442017-08-04 14:29:15 +02002948void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949{
2950#ifdef CONFIG_NET_CLS_ACT
WANG Cong22dc13c2016-08-13 22:35:00 -07002951 struct tcf_exts old = *dst;
2952
Jiri Pirko9b0d4442017-08-04 14:29:15 +02002953 *dst = *src;
WANG Cong22dc13c2016-08-13 22:35:00 -07002954 tcf_exts_destroy(&old);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955#endif
2956}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08002957EXPORT_SYMBOL(tcf_exts_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958
WANG Cong22dc13c2016-08-13 22:35:00 -07002959#ifdef CONFIG_NET_CLS_ACT
2960static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
2961{
2962 if (exts->nr_actions == 0)
2963 return NULL;
2964 else
2965 return exts->actions[0];
2966}
2967#endif
WANG Cong33be6272013-12-15 20:15:05 -08002968
WANG Cong5da57f42013-12-15 20:15:07 -08002969int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970{
2971#ifdef CONFIG_NET_CLS_ACT
Cong Wang9cc63db2014-07-16 14:25:30 -07002972 struct nlattr *nest;
2973
Jiri Pirko978dfd82017-08-04 14:29:03 +02002974 if (exts->action && tcf_exts_has_actions(exts)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 /*
2976 * again for backward compatible mode - we want
2977 * to work with both old and new modes of entering
2978 * tc data even if iproute2 was newer - jhs
2979 */
WANG Cong33be6272013-12-15 20:15:05 -08002980 if (exts->type != TCA_OLD_COMPAT) {
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002981 nest = nla_nest_start_noflag(skb, exts->action);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08002982 if (nest == NULL)
2983 goto nla_put_failure;
WANG Cong22dc13c2016-08-13 22:35:00 -07002984
Vlad Buslov90b73b72018-07-05 17:24:33 +03002985 if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -08002986 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08002987 nla_nest_end(skb, nest);
WANG Cong5da57f42013-12-15 20:15:07 -08002988 } else if (exts->police) {
WANG Cong33be6272013-12-15 20:15:05 -08002989 struct tc_action *act = tcf_exts_first_act(exts);
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002990 nest = nla_nest_start_noflag(skb, exts->police);
Jamal Hadi Salim63acd682013-12-23 08:02:12 -05002991 if (nest == NULL || !act)
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08002992 goto nla_put_failure;
WANG Cong33be6272013-12-15 20:15:05 -08002993 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -08002994 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08002995 nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 }
2997 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 return 0;
Cong Wang9cc63db2014-07-16 14:25:30 -07002999
3000nla_put_failure:
3001 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 return -1;
Cong Wang9cc63db2014-07-16 14:25:30 -07003003#else
3004 return 0;
3005#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08003007EXPORT_SYMBOL(tcf_exts_dump);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08003009
WANG Cong5da57f42013-12-15 20:15:07 -08003010int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011{
3012#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -08003013 struct tc_action *a = tcf_exts_first_act(exts);
Ignacy Gawędzkib057df22015-02-03 19:05:18 +01003014 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
WANG Cong33be6272013-12-15 20:15:05 -08003015 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016#endif
3017 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08003019EXPORT_SYMBOL(tcf_exts_dump_stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020
Vlad Buslov40119212019-08-26 16:44:59 +03003021static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3022{
3023 if (*flags & TCA_CLS_FLAGS_IN_HW)
3024 return;
3025 *flags |= TCA_CLS_FLAGS_IN_HW;
3026 atomic_inc(&block->offloadcnt);
3027}
3028
3029static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3030{
3031 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3032 return;
3033 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3034 atomic_dec(&block->offloadcnt);
3035}
3036
3037static void tc_cls_offload_cnt_update(struct tcf_block *block,
3038 struct tcf_proto *tp, u32 *cnt,
3039 u32 *flags, u32 diff, bool add)
3040{
3041 lockdep_assert_held(&block->cb_lock);
3042
3043 spin_lock(&tp->lock);
3044 if (add) {
3045 if (!*cnt)
3046 tcf_block_offload_inc(block, flags);
3047 *cnt += diff;
3048 } else {
3049 *cnt -= diff;
3050 if (!*cnt)
3051 tcf_block_offload_dec(block, flags);
3052 }
3053 spin_unlock(&tp->lock);
3054}
3055
3056static void
3057tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3058 u32 *cnt, u32 *flags)
3059{
3060 lockdep_assert_held(&block->cb_lock);
3061
3062 spin_lock(&tp->lock);
3063 tcf_block_offload_dec(block, flags);
3064 *cnt = 0;
3065 spin_unlock(&tp->lock);
3066}
3067
3068static int
3069__tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3070 void *type_data, bool err_stop)
Jiri Pirko717503b2017-10-11 09:41:09 +02003071{
Pablo Neira Ayuso955bcb62019-07-09 22:55:46 +02003072 struct flow_block_cb *block_cb;
Cong Wangaeb3fec2018-12-11 11:15:46 -08003073 int ok_count = 0;
3074 int err;
3075
Vlad Buslov40119212019-08-26 16:44:59 +03003076 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3077 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3078 if (err) {
3079 if (err_stop)
3080 return err;
3081 } else {
3082 ok_count++;
3083 }
3084 }
3085 return ok_count;
3086}
3087
3088int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3089 void *type_data, bool err_stop, bool rtnl_held)
3090{
Vlad Buslov11bd6342019-08-26 16:45:02 +03003091 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
Vlad Buslov40119212019-08-26 16:44:59 +03003092 int ok_count;
3093
Vlad Buslov11bd6342019-08-26 16:45:02 +03003094retry:
3095 if (take_rtnl)
3096 rtnl_lock();
Vlad Buslov40119212019-08-26 16:44:59 +03003097 down_read(&block->cb_lock);
Vlad Buslov11bd6342019-08-26 16:45:02 +03003098 /* Need to obtain rtnl lock if block is bound to devs that require it.
3099 * In block bind code cb_lock is obtained while holding rtnl, so we must
3100 * obtain the locks in same order here.
3101 */
3102 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3103 up_read(&block->cb_lock);
3104 take_rtnl = true;
3105 goto retry;
3106 }
3107
Vlad Buslov40119212019-08-26 16:44:59 +03003108 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
Vlad Buslov11bd6342019-08-26 16:45:02 +03003109
Vlad Buslov40119212019-08-26 16:44:59 +03003110 up_read(&block->cb_lock);
Vlad Buslov11bd6342019-08-26 16:45:02 +03003111 if (take_rtnl)
3112 rtnl_unlock();
Vlad Buslov40119212019-08-26 16:44:59 +03003113 return ok_count;
3114}
3115EXPORT_SYMBOL(tc_setup_cb_call);
3116
3117/* Non-destructive filter add. If filter that wasn't already in hardware is
3118 * successfully offloaded, increment block offloads counter. On failure,
3119 * previously offloaded filter is considered to be intact and offloads counter
3120 * is not decremented.
3121 */
3122
3123int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3124 enum tc_setup_type type, void *type_data, bool err_stop,
3125 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3126{
Vlad Buslov11bd6342019-08-26 16:45:02 +03003127 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
Vlad Buslov40119212019-08-26 16:44:59 +03003128 int ok_count;
3129
Vlad Buslov11bd6342019-08-26 16:45:02 +03003130retry:
3131 if (take_rtnl)
3132 rtnl_lock();
Vlad Buslov4f8116c2019-08-26 16:44:57 +03003133 down_read(&block->cb_lock);
Vlad Buslov11bd6342019-08-26 16:45:02 +03003134 /* Need to obtain rtnl lock if block is bound to devs that require it.
3135 * In block bind code cb_lock is obtained while holding rtnl, so we must
3136 * obtain the locks in same order here.
3137 */
3138 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3139 up_read(&block->cb_lock);
3140 take_rtnl = true;
3141 goto retry;
3142 }
3143
Cong Wangaeb3fec2018-12-11 11:15:46 -08003144 /* Make sure all netdevs sharing this block are offload-capable. */
Vlad Buslov4f8116c2019-08-26 16:44:57 +03003145 if (block->nooffloaddevcnt && err_stop) {
3146 ok_count = -EOPNOTSUPP;
3147 goto err_unlock;
3148 }
Cong Wangaeb3fec2018-12-11 11:15:46 -08003149
Vlad Buslov40119212019-08-26 16:44:59 +03003150 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
Vlad Buslova449a3e2019-08-26 16:45:00 +03003151 if (ok_count < 0)
3152 goto err_unlock;
3153
3154 if (tp->ops->hw_add)
3155 tp->ops->hw_add(tp, type_data);
Vlad Buslov40119212019-08-26 16:44:59 +03003156 if (ok_count > 0)
3157 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3158 ok_count, true);
Vlad Buslov4f8116c2019-08-26 16:44:57 +03003159err_unlock:
3160 up_read(&block->cb_lock);
Vlad Buslov11bd6342019-08-26 16:45:02 +03003161 if (take_rtnl)
3162 rtnl_unlock();
Vlad Buslov40119212019-08-26 16:44:59 +03003163 return ok_count < 0 ? ok_count : 0;
Jiri Pirko717503b2017-10-11 09:41:09 +02003164}
Vlad Buslov40119212019-08-26 16:44:59 +03003165EXPORT_SYMBOL(tc_setup_cb_add);
3166
3167/* Destructive filter replace. If filter that wasn't already in hardware is
3168 * successfully offloaded, increment block offload counter. On failure,
3169 * previously offloaded filter is considered to be destroyed and offload counter
3170 * is decremented.
3171 */
3172
3173int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3174 enum tc_setup_type type, void *type_data, bool err_stop,
3175 u32 *old_flags, unsigned int *old_in_hw_count,
3176 u32 *new_flags, unsigned int *new_in_hw_count,
3177 bool rtnl_held)
3178{
Vlad Buslov11bd6342019-08-26 16:45:02 +03003179 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
Vlad Buslov40119212019-08-26 16:44:59 +03003180 int ok_count;
3181
Vlad Buslov11bd6342019-08-26 16:45:02 +03003182retry:
3183 if (take_rtnl)
3184 rtnl_lock();
Vlad Buslov40119212019-08-26 16:44:59 +03003185 down_read(&block->cb_lock);
Vlad Buslov11bd6342019-08-26 16:45:02 +03003186 /* Need to obtain rtnl lock if block is bound to devs that require it.
3187 * In block bind code cb_lock is obtained while holding rtnl, so we must
3188 * obtain the locks in same order here.
3189 */
3190 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3191 up_read(&block->cb_lock);
3192 take_rtnl = true;
3193 goto retry;
3194 }
3195
Vlad Buslov40119212019-08-26 16:44:59 +03003196 /* Make sure all netdevs sharing this block are offload-capable. */
3197 if (block->nooffloaddevcnt && err_stop) {
3198 ok_count = -EOPNOTSUPP;
3199 goto err_unlock;
3200 }
3201
3202 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
Vlad Buslova449a3e2019-08-26 16:45:00 +03003203 if (tp->ops->hw_del)
3204 tp->ops->hw_del(tp, type_data);
Vlad Buslov40119212019-08-26 16:44:59 +03003205
3206 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
Vlad Buslova449a3e2019-08-26 16:45:00 +03003207 if (ok_count < 0)
3208 goto err_unlock;
3209
3210 if (tp->ops->hw_add)
3211 tp->ops->hw_add(tp, type_data);
Vlad Buslov40119212019-08-26 16:44:59 +03003212 if (ok_count > 0)
Vlad Buslova449a3e2019-08-26 16:45:00 +03003213 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3214 new_flags, ok_count, true);
Vlad Buslov40119212019-08-26 16:44:59 +03003215err_unlock:
3216 up_read(&block->cb_lock);
Vlad Buslov11bd6342019-08-26 16:45:02 +03003217 if (take_rtnl)
3218 rtnl_unlock();
Vlad Buslov40119212019-08-26 16:44:59 +03003219 return ok_count < 0 ? ok_count : 0;
3220}
3221EXPORT_SYMBOL(tc_setup_cb_replace);
3222
3223/* Destroy filter and decrement block offload counter, if filter was previously
3224 * offloaded.
3225 */
3226
3227int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3228 enum tc_setup_type type, void *type_data, bool err_stop,
3229 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3230{
Vlad Buslov11bd6342019-08-26 16:45:02 +03003231 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
Vlad Buslov40119212019-08-26 16:44:59 +03003232 int ok_count;
3233
Vlad Buslov11bd6342019-08-26 16:45:02 +03003234retry:
3235 if (take_rtnl)
3236 rtnl_lock();
Vlad Buslov40119212019-08-26 16:44:59 +03003237 down_read(&block->cb_lock);
Vlad Buslov11bd6342019-08-26 16:45:02 +03003238 /* Need to obtain rtnl lock if block is bound to devs that require it.
3239 * In block bind code cb_lock is obtained while holding rtnl, so we must
3240 * obtain the locks in same order here.
3241 */
3242 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3243 up_read(&block->cb_lock);
3244 take_rtnl = true;
3245 goto retry;
3246 }
3247
Vlad Buslov40119212019-08-26 16:44:59 +03003248 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3249
3250 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
Vlad Buslova449a3e2019-08-26 16:45:00 +03003251 if (tp->ops->hw_del)
3252 tp->ops->hw_del(tp, type_data);
3253
Vlad Buslov40119212019-08-26 16:44:59 +03003254 up_read(&block->cb_lock);
Vlad Buslov11bd6342019-08-26 16:45:02 +03003255 if (take_rtnl)
3256 rtnl_unlock();
Vlad Buslov40119212019-08-26 16:44:59 +03003257 return ok_count < 0 ? ok_count : 0;
3258}
3259EXPORT_SYMBOL(tc_setup_cb_destroy);
3260
3261int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3262 bool add, flow_setup_cb_t *cb,
3263 enum tc_setup_type type, void *type_data,
3264 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3265{
3266 int err = cb(type, type_data, cb_priv);
3267
3268 if (err) {
3269 if (add && tc_skip_sw(*flags))
3270 return err;
3271 } else {
3272 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3273 add);
3274 }
3275
3276 return 0;
3277}
3278EXPORT_SYMBOL(tc_setup_cb_reoffload);
Jiri Pirkob3f55bd2017-10-11 09:41:08 +02003279
Vlad Buslov5a6ff4b2019-08-26 16:45:04 +03003280void tc_cleanup_flow_action(struct flow_action *flow_action)
3281{
3282 struct flow_action_entry *entry;
3283 int i;
3284
Vlad Buslov11589582019-09-13 18:28:39 +03003285 flow_action_for_each(i, entry, flow_action)
3286 if (entry->destructor)
3287 entry->destructor(entry->destructor_priv);
Vlad Buslov5a6ff4b2019-08-26 16:45:04 +03003288}
3289EXPORT_SYMBOL(tc_cleanup_flow_action);
3290
Vlad Buslov11589582019-09-13 18:28:39 +03003291static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3292 const struct tc_action *act)
3293{
Vlad Buslov470d5062019-09-13 18:28:41 +03003294#ifdef CONFIG_NET_CLS_ACT
3295 entry->dev = act->ops->get_dev(act, &entry->destructor);
Vlad Buslov11589582019-09-13 18:28:39 +03003296 if (!entry->dev)
3297 return;
Vlad Buslov11589582019-09-13 18:28:39 +03003298 entry->destructor_priv = entry->dev;
Vlad Buslov470d5062019-09-13 18:28:41 +03003299#endif
Vlad Buslov11589582019-09-13 18:28:39 +03003300}
3301
3302static void tcf_tunnel_encap_put_tunnel(void *priv)
3303{
3304 struct ip_tunnel_info *tunnel = priv;
3305
3306 kfree(tunnel);
3307}
3308
3309static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3310 const struct tc_action *act)
3311{
3312 entry->tunnel = tcf_tunnel_info_copy(act);
3313 if (!entry->tunnel)
3314 return -ENOMEM;
3315 entry->destructor = tcf_tunnel_encap_put_tunnel;
3316 entry->destructor_priv = entry->tunnel;
3317 return 0;
3318}
3319
Vlad Buslov4a5da472019-09-13 18:28:40 +03003320static void tcf_sample_get_group(struct flow_action_entry *entry,
3321 const struct tc_action *act)
3322{
3323#ifdef CONFIG_NET_CLS_ACT
3324 entry->sample.psample_group =
3325 act->ops->get_psample_group(act, &entry->destructor);
3326 entry->destructor_priv = entry->sample.psample_group;
3327#endif
3328}
3329
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003330int tc_setup_flow_action(struct flow_action *flow_action,
Vlad Buslov9838b202019-08-26 16:45:03 +03003331 const struct tcf_exts *exts, bool rtnl_held)
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003332{
3333 const struct tc_action *act;
Vlad Buslov9838b202019-08-26 16:45:03 +03003334 int i, j, k, err = 0;
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003335
3336 if (!exts)
3337 return 0;
3338
Vlad Buslov9838b202019-08-26 16:45:03 +03003339 if (!rtnl_held)
3340 rtnl_lock();
3341
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003342 j = 0;
3343 tcf_exts_for_each_action(i, act, exts) {
3344 struct flow_action_entry *entry;
3345
3346 entry = &flow_action->entries[j];
3347 if (is_tcf_gact_ok(act)) {
3348 entry->id = FLOW_ACTION_ACCEPT;
3349 } else if (is_tcf_gact_shot(act)) {
3350 entry->id = FLOW_ACTION_DROP;
3351 } else if (is_tcf_gact_trap(act)) {
3352 entry->id = FLOW_ACTION_TRAP;
3353 } else if (is_tcf_gact_goto_chain(act)) {
3354 entry->id = FLOW_ACTION_GOTO;
3355 entry->chain_index = tcf_gact_goto_chain_index(act);
3356 } else if (is_tcf_mirred_egress_redirect(act)) {
3357 entry->id = FLOW_ACTION_REDIRECT;
Vlad Buslov11589582019-09-13 18:28:39 +03003358 tcf_mirred_get_dev(entry, act);
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003359 } else if (is_tcf_mirred_egress_mirror(act)) {
3360 entry->id = FLOW_ACTION_MIRRED;
Vlad Buslov11589582019-09-13 18:28:39 +03003361 tcf_mirred_get_dev(entry, act);
John Hurley48e584a2019-08-04 16:09:06 +01003362 } else if (is_tcf_mirred_ingress_redirect(act)) {
3363 entry->id = FLOW_ACTION_REDIRECT_INGRESS;
Vlad Buslov11589582019-09-13 18:28:39 +03003364 tcf_mirred_get_dev(entry, act);
John Hurley48e584a2019-08-04 16:09:06 +01003365 } else if (is_tcf_mirred_ingress_mirror(act)) {
3366 entry->id = FLOW_ACTION_MIRRED_INGRESS;
Vlad Buslov11589582019-09-13 18:28:39 +03003367 tcf_mirred_get_dev(entry, act);
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003368 } else if (is_tcf_vlan(act)) {
3369 switch (tcf_vlan_action(act)) {
3370 case TCA_VLAN_ACT_PUSH:
3371 entry->id = FLOW_ACTION_VLAN_PUSH;
3372 entry->vlan.vid = tcf_vlan_push_vid(act);
3373 entry->vlan.proto = tcf_vlan_push_proto(act);
3374 entry->vlan.prio = tcf_vlan_push_prio(act);
3375 break;
3376 case TCA_VLAN_ACT_POP:
3377 entry->id = FLOW_ACTION_VLAN_POP;
3378 break;
3379 case TCA_VLAN_ACT_MODIFY:
3380 entry->id = FLOW_ACTION_VLAN_MANGLE;
3381 entry->vlan.vid = tcf_vlan_push_vid(act);
3382 entry->vlan.proto = tcf_vlan_push_proto(act);
3383 entry->vlan.prio = tcf_vlan_push_prio(act);
3384 break;
3385 default:
Vlad Buslov9838b202019-08-26 16:45:03 +03003386 err = -EOPNOTSUPP;
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003387 goto err_out;
3388 }
3389 } else if (is_tcf_tunnel_set(act)) {
3390 entry->id = FLOW_ACTION_TUNNEL_ENCAP;
Vlad Buslov11589582019-09-13 18:28:39 +03003391 err = tcf_tunnel_encap_get_tunnel(entry, act);
3392 if (err)
Vlad Buslov1444c172019-08-26 16:45:05 +03003393 goto err_out;
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003394 } else if (is_tcf_tunnel_release(act)) {
3395 entry->id = FLOW_ACTION_TUNNEL_DECAP;
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003396 } else if (is_tcf_pedit(act)) {
3397 for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3398 switch (tcf_pedit_cmd(act, k)) {
3399 case TCA_PEDIT_KEY_EX_CMD_SET:
3400 entry->id = FLOW_ACTION_MANGLE;
3401 break;
3402 case TCA_PEDIT_KEY_EX_CMD_ADD:
3403 entry->id = FLOW_ACTION_ADD;
3404 break;
3405 default:
Vlad Buslov9838b202019-08-26 16:45:03 +03003406 err = -EOPNOTSUPP;
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003407 goto err_out;
3408 }
3409 entry->mangle.htype = tcf_pedit_htype(act, k);
3410 entry->mangle.mask = tcf_pedit_mask(act, k);
3411 entry->mangle.val = tcf_pedit_val(act, k);
3412 entry->mangle.offset = tcf_pedit_offset(act, k);
3413 entry = &flow_action->entries[++j];
3414 }
3415 } else if (is_tcf_csum(act)) {
3416 entry->id = FLOW_ACTION_CSUM;
3417 entry->csum_flags = tcf_csum_update_flags(act);
3418 } else if (is_tcf_skbedit_mark(act)) {
3419 entry->id = FLOW_ACTION_MARK;
3420 entry->mark = tcf_skbedit_mark(act);
Pieter Jansen van Vuurena7a7be62019-05-04 04:46:16 -07003421 } else if (is_tcf_sample(act)) {
3422 entry->id = FLOW_ACTION_SAMPLE;
Pieter Jansen van Vuurena7a7be62019-05-04 04:46:16 -07003423 entry->sample.trunc_size = tcf_sample_trunc_size(act);
3424 entry->sample.truncate = tcf_sample_truncate(act);
3425 entry->sample.rate = tcf_sample_rate(act);
Vlad Buslov4a5da472019-09-13 18:28:40 +03003426 tcf_sample_get_group(entry, act);
Pieter Jansen van Vuuren8c8cfc62019-05-04 04:46:22 -07003427 } else if (is_tcf_police(act)) {
3428 entry->id = FLOW_ACTION_POLICE;
3429 entry->police.burst = tcf_police_tcfp_burst(act);
3430 entry->police.rate_bytes_ps =
3431 tcf_police_rate_bytes_ps(act);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03003432 } else if (is_tcf_ct(act)) {
3433 entry->id = FLOW_ACTION_CT;
3434 entry->ct.action = tcf_ct_action(act);
3435 entry->ct.zone = tcf_ct_zone(act);
John Hurley6749d5902019-07-23 15:33:59 +01003436 } else if (is_tcf_mpls(act)) {
3437 switch (tcf_mpls_action(act)) {
3438 case TCA_MPLS_ACT_PUSH:
3439 entry->id = FLOW_ACTION_MPLS_PUSH;
3440 entry->mpls_push.proto = tcf_mpls_proto(act);
3441 entry->mpls_push.label = tcf_mpls_label(act);
3442 entry->mpls_push.tc = tcf_mpls_tc(act);
3443 entry->mpls_push.bos = tcf_mpls_bos(act);
3444 entry->mpls_push.ttl = tcf_mpls_ttl(act);
3445 break;
3446 case TCA_MPLS_ACT_POP:
3447 entry->id = FLOW_ACTION_MPLS_POP;
3448 entry->mpls_pop.proto = tcf_mpls_proto(act);
3449 break;
3450 case TCA_MPLS_ACT_MODIFY:
3451 entry->id = FLOW_ACTION_MPLS_MANGLE;
3452 entry->mpls_mangle.label = tcf_mpls_label(act);
3453 entry->mpls_mangle.tc = tcf_mpls_tc(act);
3454 entry->mpls_mangle.bos = tcf_mpls_bos(act);
3455 entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3456 break;
3457 default:
3458 goto err_out;
3459 }
John Hurleyfb1b7752019-08-04 16:09:04 +01003460 } else if (is_tcf_skbedit_ptype(act)) {
3461 entry->id = FLOW_ACTION_PTYPE;
3462 entry->ptype = tcf_skbedit_ptype(act);
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003463 } else {
Vlad Buslov9838b202019-08-26 16:45:03 +03003464 err = -EOPNOTSUPP;
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003465 goto err_out;
3466 }
3467
3468 if (!is_tcf_pedit(act))
3469 j++;
3470 }
Vlad Buslov9838b202019-08-26 16:45:03 +03003471
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003472err_out:
Vlad Buslov9838b202019-08-26 16:45:03 +03003473 if (!rtnl_held)
3474 rtnl_unlock();
3475
Vlad Buslov5a6ff4b2019-08-26 16:45:04 +03003476 if (err)
3477 tc_cleanup_flow_action(flow_action);
3478
Vlad Buslov9838b202019-08-26 16:45:03 +03003479 return err;
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +01003480}
3481EXPORT_SYMBOL(tc_setup_flow_action);
3482
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +01003483unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3484{
3485 unsigned int num_acts = 0;
3486 struct tc_action *act;
3487 int i;
3488
3489 tcf_exts_for_each_action(i, act, exts) {
3490 if (is_tcf_pedit(act))
3491 num_acts += tcf_pedit_nkeys(act);
3492 else
3493 num_acts++;
3494 }
3495 return num_acts;
3496}
3497EXPORT_SYMBOL(tcf_exts_num_actions);
3498
Jiri Pirko48617382018-01-17 11:46:46 +01003499static __net_init int tcf_net_init(struct net *net)
3500{
3501 struct tcf_net *tn = net_generic(net, tcf_net_id);
3502
Vlad Buslovab281622018-09-24 19:22:56 +03003503 spin_lock_init(&tn->idr_lock);
Jiri Pirko48617382018-01-17 11:46:46 +01003504 idr_init(&tn->idr);
3505 return 0;
3506}
3507
3508static void __net_exit tcf_net_exit(struct net *net)
3509{
3510 struct tcf_net *tn = net_generic(net, tcf_net_id);
3511
3512 idr_destroy(&tn->idr);
3513}
3514
3515static struct pernet_operations tcf_net_ops = {
3516 .init = tcf_net_init,
3517 .exit = tcf_net_exit,
3518 .id = &tcf_net_id,
3519 .size = sizeof(struct tcf_net),
3520};
3521
wenxu1150ab02019-08-07 09:13:53 +08003522static struct flow_indr_block_ing_entry block_ing_entry = {
3523 .cb = tc_indr_block_get_and_ing_cmd,
3524 .list = LIST_HEAD_INIT(block_ing_entry.list),
3525};
3526
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527static int __init tc_filter_init(void)
3528{
Jiri Pirko48617382018-01-17 11:46:46 +01003529 int err;
3530
Cong Wang7aa00452017-10-26 18:24:28 -07003531 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3532 if (!tc_filter_wq)
3533 return -ENOMEM;
3534
Jiri Pirko48617382018-01-17 11:46:46 +01003535 err = register_pernet_subsys(&tcf_net_ops);
3536 if (err)
3537 goto err_register_pernet_subsys;
3538
wenxu1150ab02019-08-07 09:13:53 +08003539 flow_indr_add_block_ing_cb(&block_ing_entry);
3540
Vlad Buslov470502d2019-02-11 10:55:48 +02003541 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3542 RTNL_FLAG_DOIT_UNLOCKED);
3543 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3544 RTNL_FLAG_DOIT_UNLOCKED);
Vlad Buslovc431f892018-05-31 09:52:53 +03003545 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
Vlad Buslov470502d2019-02-11 10:55:48 +02003546 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
Jiri Pirko32a4f5e2018-07-23 09:23:06 +02003547 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3548 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3549 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3550 tc_dump_chain, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552 return 0;
Jiri Pirko48617382018-01-17 11:46:46 +01003553
3554err_register_pernet_subsys:
3555 destroy_workqueue(tc_filter_wq);
3556 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557}
3558
3559subsys_initcall(tc_filter_init);