blob: 045d13679ad69b12fdec83f8ed2871ad0f8dd527 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/cls_api.c Packet classifier API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
12 *
13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
14 *
15 */
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/errno.h>
Jiri Pirko33a48922017-02-09 14:38:57 +010022#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/init.h>
25#include <linux/kmod.h>
Patrick McHardyab27cfb2008-01-23 20:33:13 -080026#include <linux/err.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Denis V. Lunevb8542722007-12-01 00:21:31 +110028#include <net/net_namespace.h>
29#include <net/sock.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070030#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <net/pkt_sched.h>
32#include <net/pkt_cls.h>
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/* The list of all installed classifier types */
WANG Cong36272872013-12-15 20:15:11 -080035static LIST_HEAD(tcf_proto_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37/* Protects list of registered TC modules. It is pure SMP lock. */
38static DEFINE_RWLOCK(cls_mod_lock);
39
40/* Find classifier type by string name */
41
Jiri Pirko33a48922017-02-09 14:38:57 +010042static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind)
Linus Torvalds1da177e2005-04-16 15:20:36 -070043{
Eric Dumazetdcd76082013-12-20 10:04:18 -080044 const struct tcf_proto_ops *t, *res = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46 if (kind) {
47 read_lock(&cls_mod_lock);
WANG Cong36272872013-12-15 20:15:11 -080048 list_for_each_entry(t, &tcf_proto_base, head) {
Jiri Pirko33a48922017-02-09 14:38:57 +010049 if (strcmp(kind, t->kind) == 0) {
Eric Dumazetdcd76082013-12-20 10:04:18 -080050 if (try_module_get(t->owner))
51 res = t;
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 break;
53 }
54 }
55 read_unlock(&cls_mod_lock);
56 }
Eric Dumazetdcd76082013-12-20 10:04:18 -080057 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058}
59
60/* Register(unregister) new classifier type */
61
62int register_tcf_proto_ops(struct tcf_proto_ops *ops)
63{
WANG Cong36272872013-12-15 20:15:11 -080064 struct tcf_proto_ops *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 int rc = -EEXIST;
66
67 write_lock(&cls_mod_lock);
WANG Cong36272872013-12-15 20:15:11 -080068 list_for_each_entry(t, &tcf_proto_base, head)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 if (!strcmp(ops->kind, t->kind))
70 goto out;
71
WANG Cong36272872013-12-15 20:15:11 -080072 list_add_tail(&ops->head, &tcf_proto_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 rc = 0;
74out:
75 write_unlock(&cls_mod_lock);
76 return rc;
77}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -080078EXPORT_SYMBOL(register_tcf_proto_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Cong Wang7aa00452017-10-26 18:24:28 -070080static struct workqueue_struct *tc_filter_wq;
81
Linus Torvalds1da177e2005-04-16 15:20:36 -070082int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
83{
WANG Cong36272872013-12-15 20:15:11 -080084 struct tcf_proto_ops *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 int rc = -ENOENT;
86
Daniel Borkmannc78e1742015-05-20 17:13:33 +020087 /* Wait for outstanding call_rcu()s, if any, from a
88 * tcf_proto_ops's destroy() handler.
89 */
90 rcu_barrier();
Cong Wang7aa00452017-10-26 18:24:28 -070091 flush_workqueue(tc_filter_wq);
Daniel Borkmannc78e1742015-05-20 17:13:33 +020092
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 write_lock(&cls_mod_lock);
Eric Dumazetdcd76082013-12-20 10:04:18 -080094 list_for_each_entry(t, &tcf_proto_base, head) {
95 if (t == ops) {
96 list_del(&t->head);
97 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 break;
Eric Dumazetdcd76082013-12-20 10:04:18 -080099 }
100 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 write_unlock(&cls_mod_lock);
102 return rc;
103}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800104EXPORT_SYMBOL(unregister_tcf_proto_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Cong Wang7aa00452017-10-26 18:24:28 -0700106bool tcf_queue_work(struct work_struct *work)
107{
108 return queue_work(tc_filter_wq, work);
109}
110EXPORT_SYMBOL(tcf_queue_work);
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112/* Select new prio value from the range, managed by kernel. */
113
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800114static inline u32 tcf_auto_prio(struct tcf_proto *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115{
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800116 u32 first = TC_H_MAKE(0xC0000000U, 0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118 if (tp)
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000119 first = tp->prio - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Jiri Pirko79619732017-05-17 11:07:58 +0200121 return TC_H_MAJ(first);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122}
123
Jiri Pirko33a48922017-02-09 14:38:57 +0100124static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
Jiri Pirko6529eab2017-05-17 11:07:55 +0200125 u32 prio, u32 parent, struct Qdisc *q,
Jiri Pirko5bc17012017-05-17 11:08:01 +0200126 struct tcf_chain *chain)
Jiri Pirko33a48922017-02-09 14:38:57 +0100127{
128 struct tcf_proto *tp;
129 int err;
130
131 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
132 if (!tp)
133 return ERR_PTR(-ENOBUFS);
134
135 err = -ENOENT;
136 tp->ops = tcf_proto_lookup_ops(kind);
137 if (!tp->ops) {
138#ifdef CONFIG_MODULES
139 rtnl_unlock();
140 request_module("cls_%s", kind);
141 rtnl_lock();
142 tp->ops = tcf_proto_lookup_ops(kind);
143 /* We dropped the RTNL semaphore in order to perform
144 * the module load. So, even if we succeeded in loading
145 * the module we have to replay the request. We indicate
146 * this using -EAGAIN.
147 */
148 if (tp->ops) {
149 module_put(tp->ops->owner);
150 err = -EAGAIN;
151 } else {
152 err = -ENOENT;
153 }
154 goto errout;
155#endif
156 }
157 tp->classify = tp->ops->classify;
158 tp->protocol = protocol;
159 tp->prio = prio;
160 tp->classid = parent;
161 tp->q = q;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200162 tp->chain = chain;
Jiri Pirko33a48922017-02-09 14:38:57 +0100163
164 err = tp->ops->init(tp);
165 if (err) {
166 module_put(tp->ops->owner);
167 goto errout;
168 }
169 return tp;
170
171errout:
172 kfree(tp);
173 return ERR_PTR(err);
174}
175
WANG Cong763dbf62017-04-19 14:21:21 -0700176static void tcf_proto_destroy(struct tcf_proto *tp)
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100177{
WANG Cong763dbf62017-04-19 14:21:21 -0700178 tp->ops->destroy(tp);
179 module_put(tp->ops->owner);
180 kfree_rcu(tp, rcu);
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100181}
182
Jiri Pirko5bc17012017-05-17 11:08:01 +0200183static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
184 u32 chain_index)
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200185{
Jiri Pirko5bc17012017-05-17 11:08:01 +0200186 struct tcf_chain *chain;
187
188 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
189 if (!chain)
190 return NULL;
191 list_add_tail(&chain->list, &block->chain_list);
192 chain->block = block;
193 chain->index = chain_index;
Cong Wange2ef7542017-09-11 16:33:31 -0700194 chain->refcnt = 1;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200195 return chain;
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200196}
197
Jiri Pirkof93e1cd2017-05-20 15:01:32 +0200198static void tcf_chain_flush(struct tcf_chain *chain)
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100199{
200 struct tcf_proto *tp;
201
Jiri Pirkoacc8b312017-08-18 10:10:43 +0200202 if (chain->p_filter_chain)
Jiri Pirkof93e1cd2017-05-20 15:01:32 +0200203 RCU_INIT_POINTER(*chain->p_filter_chain, NULL);
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200204 while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) {
205 RCU_INIT_POINTER(chain->filter_chain, tp->next);
Cong Wange2ef7542017-09-11 16:33:31 -0700206 tcf_chain_put(chain);
WANG Cong763dbf62017-04-19 14:21:21 -0700207 tcf_proto_destroy(tp);
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100208 }
Jiri Pirkof93e1cd2017-05-20 15:01:32 +0200209}
210
211static void tcf_chain_destroy(struct tcf_chain *chain)
212{
Cong Wange2ef7542017-09-11 16:33:31 -0700213 list_del(&chain->list);
214 kfree(chain);
215}
Jiri Pirko744a4cf2017-08-22 22:46:49 +0200216
Cong Wange2ef7542017-09-11 16:33:31 -0700217static void tcf_chain_hold(struct tcf_chain *chain)
218{
219 ++chain->refcnt;
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200220}
221
WANG Cong367a8ce2017-05-23 09:42:37 -0700222struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
223 bool create)
Jiri Pirko5bc17012017-05-17 11:08:01 +0200224{
225 struct tcf_chain *chain;
226
227 list_for_each_entry(chain, &block->chain_list, list) {
Cong Wange2ef7542017-09-11 16:33:31 -0700228 if (chain->index == chain_index) {
229 tcf_chain_hold(chain);
230 return chain;
231 }
Jiri Pirko5bc17012017-05-17 11:08:01 +0200232 }
Jiri Pirko80532382017-09-06 13:14:19 +0200233
Cong Wange2ef7542017-09-11 16:33:31 -0700234 return create ? tcf_chain_create(block, chain_index) : NULL;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200235}
236EXPORT_SYMBOL(tcf_chain_get);
237
238void tcf_chain_put(struct tcf_chain *chain)
239{
Cong Wange2ef7542017-09-11 16:33:31 -0700240 if (--chain->refcnt == 0)
Jiri Pirko5bc17012017-05-17 11:08:01 +0200241 tcf_chain_destroy(chain);
242}
243EXPORT_SYMBOL(tcf_chain_put);
244
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200245static void
246tcf_chain_filter_chain_ptr_set(struct tcf_chain *chain,
247 struct tcf_proto __rcu **p_filter_chain)
248{
249 chain->p_filter_chain = p_filter_chain;
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100250}
Jiri Pirko6529eab2017-05-17 11:07:55 +0200251
252int tcf_block_get(struct tcf_block **p_block,
253 struct tcf_proto __rcu **p_filter_chain)
254{
255 struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
Jiri Pirko5bc17012017-05-17 11:08:01 +0200256 struct tcf_chain *chain;
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200257 int err;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200258
259 if (!block)
260 return -ENOMEM;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200261 INIT_LIST_HEAD(&block->chain_list);
262 /* Create chain 0 by default, it has to be always present. */
263 chain = tcf_chain_create(block, 0);
264 if (!chain) {
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200265 err = -ENOMEM;
266 goto err_chain_create;
267 }
Jiri Pirko5bc17012017-05-17 11:08:01 +0200268 tcf_chain_filter_chain_ptr_set(chain, p_filter_chain);
Jiri Pirko6529eab2017-05-17 11:07:55 +0200269 *p_block = block;
270 return 0;
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200271
272err_chain_create:
273 kfree(block);
274 return err;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200275}
276EXPORT_SYMBOL(tcf_block_get);
277
Cong Wang7aa00452017-10-26 18:24:28 -0700278static void tcf_block_put_final(struct work_struct *work)
Jiri Pirko6529eab2017-05-17 11:07:55 +0200279{
Cong Wang7aa00452017-10-26 18:24:28 -0700280 struct tcf_block *block = container_of(work, struct tcf_block, work);
Jiri Pirko5bc17012017-05-17 11:08:01 +0200281 struct tcf_chain *chain, *tmp;
282
Cong Wang7aa00452017-10-26 18:24:28 -0700283 /* At this point, all the chains should have refcnt == 1. */
284 rtnl_lock();
285 list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
286 tcf_chain_put(chain);
287 rtnl_unlock();
288 kfree(block);
289}
Jiri Pirko5bc17012017-05-17 11:08:01 +0200290
Cong Wang7aa00452017-10-26 18:24:28 -0700291/* XXX: Standalone actions are not allowed to jump to any chain, and bound
292 * actions should be all removed after flushing. However, filters are destroyed
293 * in RCU callbacks, we have to hold the chains first, otherwise we would
294 * always race with RCU callbacks on this list without proper locking.
295 */
296static void tcf_block_put_deferred(struct work_struct *work)
297{
298 struct tcf_block *block = container_of(work, struct tcf_block, work);
299 struct tcf_chain *chain;
Cong Wange2ef7542017-09-11 16:33:31 -0700300
Cong Wang7aa00452017-10-26 18:24:28 -0700301 rtnl_lock();
Cong Wang1697c4b2017-09-11 16:33:32 -0700302 /* Hold a refcnt for all chains, except 0, in case they are gone. */
303 list_for_each_entry(chain, &block->chain_list, list)
304 if (chain->index)
305 tcf_chain_hold(chain);
306
307 /* No race on the list, because no chain could be destroyed. */
308 list_for_each_entry(chain, &block->chain_list, list)
309 tcf_chain_flush(chain);
310
Cong Wang7aa00452017-10-26 18:24:28 -0700311 INIT_WORK(&block->work, tcf_block_put_final);
312 /* Wait for RCU callbacks to release the reference count and make
313 * sure their works have been queued before this.
314 */
Cong Wang1697c4b2017-09-11 16:33:32 -0700315 rcu_barrier();
Cong Wang7aa00452017-10-26 18:24:28 -0700316 tcf_queue_work(&block->work);
317 rtnl_unlock();
318}
Cong Wang1697c4b2017-09-11 16:33:32 -0700319
Cong Wang7aa00452017-10-26 18:24:28 -0700320void tcf_block_put(struct tcf_block *block)
321{
322 if (!block)
323 return;
324
325 INIT_WORK(&block->work, tcf_block_put_deferred);
326 /* Wait for existing RCU callbacks to cool down, make sure their works
327 * have been queued before this. We can not flush pending works here
328 * because we are holding the RTNL lock.
329 */
330 rcu_barrier();
331 tcf_queue_work(&block->work);
Jiri Pirko6529eab2017-05-17 11:07:55 +0200332}
333EXPORT_SYMBOL(tcf_block_put);
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100334
Jiri Pirko87d83092017-05-17 11:07:54 +0200335/* Main classifier routine: scans classifier chain attached
336 * to this qdisc, (optionally) tests for protocol and asks
337 * specific classifiers.
338 */
339int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
340 struct tcf_result *res, bool compat_mode)
341{
342 __be16 protocol = tc_skb_protocol(skb);
343#ifdef CONFIG_NET_CLS_ACT
344 const int max_reclassify_loop = 4;
Jiri Pirkoee538dc2017-05-23 09:11:59 +0200345 const struct tcf_proto *orig_tp = tp;
346 const struct tcf_proto *first_tp;
Jiri Pirko87d83092017-05-17 11:07:54 +0200347 int limit = 0;
348
349reclassify:
350#endif
351 for (; tp; tp = rcu_dereference_bh(tp->next)) {
352 int err;
353
354 if (tp->protocol != protocol &&
355 tp->protocol != htons(ETH_P_ALL))
356 continue;
357
358 err = tp->classify(skb, tp, res);
359#ifdef CONFIG_NET_CLS_ACT
Jiri Pirkodb505142017-05-17 11:08:03 +0200360 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
Jiri Pirkoee538dc2017-05-23 09:11:59 +0200361 first_tp = orig_tp;
Jiri Pirko87d83092017-05-17 11:07:54 +0200362 goto reset;
Jiri Pirkodb505142017-05-17 11:08:03 +0200363 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
Jiri Pirkoee538dc2017-05-23 09:11:59 +0200364 first_tp = res->goto_tp;
Jiri Pirkodb505142017-05-17 11:08:03 +0200365 goto reset;
366 }
Jiri Pirko87d83092017-05-17 11:07:54 +0200367#endif
368 if (err >= 0)
369 return err;
370 }
371
372 return TC_ACT_UNSPEC; /* signal: continue lookup */
373#ifdef CONFIG_NET_CLS_ACT
374reset:
375 if (unlikely(limit++ >= max_reclassify_loop)) {
376 net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
377 tp->q->ops->id, tp->prio & 0xffff,
378 ntohs(tp->protocol));
379 return TC_ACT_SHOT;
380 }
381
Jiri Pirkoee538dc2017-05-23 09:11:59 +0200382 tp = first_tp;
Jiri Pirko87d83092017-05-17 11:07:54 +0200383 protocol = tc_skb_protocol(skb);
384 goto reclassify;
385#endif
386}
387EXPORT_SYMBOL(tcf_classify);
388
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200389struct tcf_chain_info {
390 struct tcf_proto __rcu **pprev;
391 struct tcf_proto __rcu *next;
392};
393
394static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info)
395{
396 return rtnl_dereference(*chain_info->pprev);
397}
398
399static void tcf_chain_tp_insert(struct tcf_chain *chain,
400 struct tcf_chain_info *chain_info,
401 struct tcf_proto *tp)
402{
403 if (chain->p_filter_chain &&
404 *chain_info->pprev == chain->filter_chain)
Jiri Pirko31efcc22017-05-20 15:01:31 +0200405 rcu_assign_pointer(*chain->p_filter_chain, tp);
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200406 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
407 rcu_assign_pointer(*chain_info->pprev, tp);
Cong Wange2ef7542017-09-11 16:33:31 -0700408 tcf_chain_hold(chain);
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200409}
410
411static void tcf_chain_tp_remove(struct tcf_chain *chain,
412 struct tcf_chain_info *chain_info,
413 struct tcf_proto *tp)
414{
415 struct tcf_proto *next = rtnl_dereference(chain_info->next);
416
417 if (chain->p_filter_chain && tp == chain->filter_chain)
Jiri Pirko31efcc22017-05-20 15:01:31 +0200418 RCU_INIT_POINTER(*chain->p_filter_chain, next);
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200419 RCU_INIT_POINTER(*chain_info->pprev, next);
Cong Wange2ef7542017-09-11 16:33:31 -0700420 tcf_chain_put(chain);
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200421}
422
423static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
424 struct tcf_chain_info *chain_info,
425 u32 protocol, u32 prio,
426 bool prio_allocate)
427{
428 struct tcf_proto **pprev;
429 struct tcf_proto *tp;
430
431 /* Check the chain for existence of proto-tcf with this priority */
432 for (pprev = &chain->filter_chain;
433 (tp = rtnl_dereference(*pprev)); pprev = &tp->next) {
434 if (tp->prio >= prio) {
435 if (tp->prio == prio) {
436 if (prio_allocate ||
437 (tp->protocol != protocol && protocol))
438 return ERR_PTR(-EINVAL);
439 } else {
440 tp = NULL;
441 }
442 break;
443 }
444 }
445 chain_info->pprev = pprev;
446 chain_info->next = tp ? tp->next : NULL;
447 return tp;
448}
449
WANG Cong71203712017-08-07 15:26:50 -0700450static int tcf_fill_node(struct net *net, struct sk_buff *skb,
451 struct tcf_proto *tp, void *fh, u32 portid,
452 u32 seq, u16 flags, int event)
453{
454 struct tcmsg *tcm;
455 struct nlmsghdr *nlh;
456 unsigned char *b = skb_tail_pointer(skb);
457
458 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
459 if (!nlh)
460 goto out_nlmsg_trim;
461 tcm = nlmsg_data(nlh);
462 tcm->tcm_family = AF_UNSPEC;
463 tcm->tcm__pad1 = 0;
464 tcm->tcm__pad2 = 0;
465 tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
466 tcm->tcm_parent = tp->classid;
467 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
468 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
469 goto nla_put_failure;
470 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
471 goto nla_put_failure;
472 if (!fh) {
473 tcm->tcm_handle = 0;
474 } else {
475 if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
476 goto nla_put_failure;
477 }
478 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
479 return skb->len;
480
481out_nlmsg_trim:
482nla_put_failure:
483 nlmsg_trim(skb, b);
484 return -1;
485}
486
487static int tfilter_notify(struct net *net, struct sk_buff *oskb,
488 struct nlmsghdr *n, struct tcf_proto *tp,
489 void *fh, int event, bool unicast)
490{
491 struct sk_buff *skb;
492 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
493
494 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
495 if (!skb)
496 return -ENOBUFS;
497
498 if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq,
499 n->nlmsg_flags, event) <= 0) {
500 kfree_skb(skb);
501 return -EINVAL;
502 }
503
504 if (unicast)
505 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
506
507 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
508 n->nlmsg_flags & NLM_F_ECHO);
509}
510
511static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
512 struct nlmsghdr *n, struct tcf_proto *tp,
513 void *fh, bool unicast, bool *last)
514{
515 struct sk_buff *skb;
516 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
517 int err;
518
519 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
520 if (!skb)
521 return -ENOBUFS;
522
523 if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq,
524 n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
525 kfree_skb(skb);
526 return -EINVAL;
527 }
528
529 err = tp->ops->delete(tp, fh, last);
530 if (err) {
531 kfree_skb(skb);
532 return err;
533 }
534
535 if (unicast)
536 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
537
538 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
539 n->nlmsg_flags & NLM_F_ECHO);
540}
541
542static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
543 struct nlmsghdr *n,
544 struct tcf_chain *chain, int event)
545{
546 struct tcf_proto *tp;
547
548 for (tp = rtnl_dereference(chain->filter_chain);
549 tp; tp = rtnl_dereference(tp->next))
550 tfilter_notify(net, oskb, n, tp, 0, event, false);
551}
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553/* Add/change/delete/get a filter node */
554
David Ahernc21ef3e2017-04-16 09:48:24 -0700555static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
556 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900558 struct net *net = sock_net(skb->sk);
Patrick McHardyadd93b62008-01-22 22:11:33 -0800559 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 struct tcmsg *t;
561 u32 protocol;
562 u32 prio;
Jiri Pirko9d36d9e2017-05-17 11:07:57 +0200563 bool prio_allocate;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 u32 parent;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200565 u32 chain_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 struct net_device *dev;
567 struct Qdisc *q;
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200568 struct tcf_chain_info chain_info;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200569 struct tcf_chain *chain = NULL;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200570 struct tcf_block *block;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 struct tcf_proto *tp;
Eric Dumazet20fea082007-11-14 01:44:41 -0800572 const struct Qdisc_class_ops *cops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 unsigned long cl;
WANG Cong8113c092017-08-04 21:31:43 -0700574 void *fh;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 int err;
Daniel Borkmann628185c2016-12-21 18:04:11 +0100576 int tp_created;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
Stéphane Graber4e8bbb82014-04-30 11:25:43 -0400578 if ((n->nlmsg_type != RTM_GETTFILTER) &&
David S. Miller5f013c9b2014-05-12 13:19:14 -0400579 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +0000580 return -EPERM;
Hong zhi guode179c82013-03-25 17:36:33 +0000581
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582replay:
Daniel Borkmann628185c2016-12-21 18:04:11 +0100583 tp_created = 0;
584
David Ahernc21ef3e2017-04-16 09:48:24 -0700585 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
Hong zhi guode179c82013-03-25 17:36:33 +0000586 if (err < 0)
587 return err;
588
David S. Miller942b8162012-06-26 21:48:50 -0700589 t = nlmsg_data(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 protocol = TC_H_MIN(t->tcm_info);
591 prio = TC_H_MAJ(t->tcm_info);
Jiri Pirko9d36d9e2017-05-17 11:07:57 +0200592 prio_allocate = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 parent = t->tcm_parent;
594 cl = 0;
595
596 if (prio == 0) {
Daniel Borkmannea7f8272016-06-10 23:10:22 +0200597 switch (n->nlmsg_type) {
598 case RTM_DELTFILTER:
Daniel Borkmann9f6ed032016-06-16 23:19:29 +0200599 if (protocol || t->tcm_handle || tca[TCA_KIND])
Daniel Borkmannea7f8272016-06-10 23:10:22 +0200600 return -ENOENT;
601 break;
602 case RTM_NEWTFILTER:
603 /* If no priority is provided by the user,
604 * we allocate one.
605 */
606 if (n->nlmsg_flags & NLM_F_CREATE) {
607 prio = TC_H_MAKE(0x80000000U, 0U);
Jiri Pirko9d36d9e2017-05-17 11:07:57 +0200608 prio_allocate = true;
Daniel Borkmannea7f8272016-06-10 23:10:22 +0200609 break;
610 }
611 /* fall-through */
612 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 return -ENOENT;
Daniel Borkmannea7f8272016-06-10 23:10:22 +0200614 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 }
616
617 /* Find head of filter chain. */
618
619 /* Find link */
Tom Goff7316ae82010-03-19 15:40:13 +0000620 dev = __dev_get_by_index(net, t->tcm_ifindex);
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800621 if (dev == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 return -ENODEV;
623
624 /* Find qdisc */
625 if (!parent) {
Patrick McHardyaf356af2009-09-04 06:41:18 +0000626 q = dev->qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 parent = q->handle;
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800628 } else {
629 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
630 if (q == NULL)
631 return -EINVAL;
632 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633
634 /* Is it classful? */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000635 cops = q->ops->cl_ops;
636 if (!cops)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 return -EINVAL;
638
Jiri Pirko6529eab2017-05-17 11:07:55 +0200639 if (!cops->tcf_block)
Patrick McHardy71ebe5e2009-09-04 06:41:15 +0000640 return -EOPNOTSUPP;
641
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 /* Do we search for filter, attached to class? */
643 if (TC_H_MIN(parent)) {
WANG Cong143976c2017-08-24 16:51:29 -0700644 cl = cops->find(q, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 if (cl == 0)
646 return -ENOENT;
647 }
648
649 /* And the last stroke */
Jiri Pirko6529eab2017-05-17 11:07:55 +0200650 block = cops->tcf_block(q, cl);
651 if (!block) {
Jiri Pirko6bb16e72017-02-09 14:38:58 +0100652 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 goto errout;
Jiri Pirko6bb16e72017-02-09 14:38:58 +0100654 }
Jiri Pirko5bc17012017-05-17 11:08:01 +0200655
656 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
657 if (chain_index > TC_ACT_EXT_VAL_MASK) {
658 err = -EINVAL;
659 goto errout;
660 }
WANG Cong367a8ce2017-05-23 09:42:37 -0700661 chain = tcf_chain_get(block, chain_index,
662 n->nlmsg_type == RTM_NEWTFILTER);
Jiri Pirko5bc17012017-05-17 11:08:01 +0200663 if (!chain) {
WANG Cong367a8ce2017-05-23 09:42:37 -0700664 err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200665 goto errout;
666 }
Jiri Pirko6529eab2017-05-17 11:07:55 +0200667
Daniel Borkmannea7f8272016-06-10 23:10:22 +0200668 if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
669 tfilter_notify_chain(net, skb, n, chain, RTM_DELTFILTER);
Jiri Pirkof93e1cd2017-05-20 15:01:32 +0200670 tcf_chain_flush(chain);
Daniel Borkmannea7f8272016-06-10 23:10:22 +0200671 err = 0;
672 goto errout;
673 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200675 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
676 prio, prio_allocate);
677 if (IS_ERR(tp)) {
678 err = PTR_ERR(tp);
679 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 }
681
682 if (tp == NULL) {
683 /* Proto-tcf does not exist, create new one */
684
Jiri Pirko6bb16e72017-02-09 14:38:58 +0100685 if (tca[TCA_KIND] == NULL || !protocol) {
686 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 goto errout;
Jiri Pirko6bb16e72017-02-09 14:38:58 +0100688 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000690 if (n->nlmsg_type != RTM_NEWTFILTER ||
Jiri Pirko6bb16e72017-02-09 14:38:58 +0100691 !(n->nlmsg_flags & NLM_F_CREATE)) {
692 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 goto errout;
Jiri Pirko6bb16e72017-02-09 14:38:58 +0100694 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
Jiri Pirko9d36d9e2017-05-17 11:07:57 +0200696 if (prio_allocate)
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200697 prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
Jiri Pirko33a48922017-02-09 14:38:57 +0100699 tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
Jiri Pirko5bc17012017-05-17 11:08:01 +0200700 protocol, prio, parent, q, chain);
Jiri Pirko33a48922017-02-09 14:38:57 +0100701 if (IS_ERR(tp)) {
702 err = PTR_ERR(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 goto errout;
704 }
Minoru Usui12186be2009-06-02 02:17:34 -0700705 tp_created = 1;
Jiri Pirko6bb16e72017-02-09 14:38:58 +0100706 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
707 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 goto errout;
Jiri Pirko6bb16e72017-02-09 14:38:58 +0100709 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
711 fh = tp->ops->get(tp, t->tcm_handle);
712
WANG Cong8113c092017-08-04 21:31:43 -0700713 if (!fh) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200715 tcf_chain_tp_remove(chain, &chain_info, tp);
Eric Dumazetfa59b272016-10-09 20:25:55 -0700716 tfilter_notify(net, skb, n, tp, fh,
717 RTM_DELTFILTER, false);
WANG Cong763dbf62017-04-19 14:21:21 -0700718 tcf_proto_destroy(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 err = 0;
720 goto errout;
721 }
722
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800723 if (n->nlmsg_type != RTM_NEWTFILTER ||
Jiri Pirko6bb16e72017-02-09 14:38:58 +0100724 !(n->nlmsg_flags & NLM_F_CREATE)) {
725 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 goto errout;
Jiri Pirko6bb16e72017-02-09 14:38:58 +0100727 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 } else {
WANG Cong763dbf62017-04-19 14:21:21 -0700729 bool last;
730
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 switch (n->nlmsg_type) {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900732 case RTM_NEWTFILTER:
Minoru Usui12186be2009-06-02 02:17:34 -0700733 if (n->nlmsg_flags & NLM_F_EXCL) {
734 if (tp_created)
WANG Cong763dbf62017-04-19 14:21:21 -0700735 tcf_proto_destroy(tp);
Jiri Pirko6bb16e72017-02-09 14:38:58 +0100736 err = -EEXIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 goto errout;
Minoru Usui12186be2009-06-02 02:17:34 -0700738 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 break;
740 case RTM_DELTFILTER:
WANG Cong54df2cf2017-08-04 21:31:42 -0700741 err = tfilter_del_notify(net, skb, n, tp, fh, false,
742 &last);
Jiri Pirko40c81b22017-02-09 14:39:00 +0100743 if (err)
744 goto errout;
WANG Cong763dbf62017-04-19 14:21:21 -0700745 if (last) {
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200746 tcf_chain_tp_remove(chain, &chain_info, tp);
WANG Cong763dbf62017-04-19 14:21:21 -0700747 tcf_proto_destroy(tp);
748 }
Jiri Pirkod7cf52c2017-02-14 16:27:13 +0100749 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 case RTM_GETTFILTER:
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -0400751 err = tfilter_notify(net, skb, n, tp, fh,
Eric Dumazetfa59b272016-10-09 20:25:55 -0700752 RTM_NEWTFILTER, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 goto errout;
754 default:
755 err = -EINVAL;
756 goto errout;
757 }
758 }
759
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700760 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
761 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
Minoru Usui12186be2009-06-02 02:17:34 -0700762 if (err == 0) {
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200763 if (tp_created)
764 tcf_chain_tp_insert(chain, &chain_info, tp);
Eric Dumazetfa59b272016-10-09 20:25:55 -0700765 tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, false);
Minoru Usui12186be2009-06-02 02:17:34 -0700766 } else {
767 if (tp_created)
WANG Cong763dbf62017-04-19 14:21:21 -0700768 tcf_proto_destroy(tp);
Minoru Usui12186be2009-06-02 02:17:34 -0700769 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
771errout:
Jiri Pirko5bc17012017-05-17 11:08:01 +0200772 if (chain)
773 tcf_chain_put(chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 if (err == -EAGAIN)
775 /* Replay the request. */
776 goto replay;
777 return err;
778}
779
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800780struct tcf_dump_args {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 struct tcf_walker w;
782 struct sk_buff *skb;
783 struct netlink_callback *cb;
784};
785
WANG Cong8113c092017-08-04 21:31:43 -0700786static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787{
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800788 struct tcf_dump_args *a = (void *)arg;
WANG Cong832d1d52014-01-09 16:14:01 -0800789 struct net *net = sock_net(a->skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
WANG Cong832d1d52014-01-09 16:14:01 -0800791 return tcf_fill_node(net, a->skb, tp, n, NETLINK_CB(a->cb->skb).portid,
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -0400792 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
793 RTM_NEWTFILTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794}
795
Jiri Pirko5bc17012017-05-17 11:08:01 +0200796static bool tcf_chain_dump(struct tcf_chain *chain, struct sk_buff *skb,
Jiri Pirkoacb31fa2017-05-17 11:08:00 +0200797 struct netlink_callback *cb,
798 long index_start, long *p_index)
799{
800 struct net *net = sock_net(skb->sk);
801 struct tcmsg *tcm = nlmsg_data(cb->nlh);
802 struct tcf_dump_args arg;
803 struct tcf_proto *tp;
804
805 for (tp = rtnl_dereference(chain->filter_chain);
806 tp; tp = rtnl_dereference(tp->next), (*p_index)++) {
807 if (*p_index < index_start)
808 continue;
809 if (TC_H_MAJ(tcm->tcm_info) &&
810 TC_H_MAJ(tcm->tcm_info) != tp->prio)
811 continue;
812 if (TC_H_MIN(tcm->tcm_info) &&
813 TC_H_MIN(tcm->tcm_info) != tp->protocol)
814 continue;
815 if (*p_index > index_start)
816 memset(&cb->args[1], 0,
817 sizeof(cb->args) - sizeof(cb->args[0]));
818 if (cb->args[1] == 0) {
819 if (tcf_fill_node(net, skb, tp, 0,
820 NETLINK_CB(cb->skb).portid,
821 cb->nlh->nlmsg_seq, NLM_F_MULTI,
822 RTM_NEWTFILTER) <= 0)
Jiri Pirko5bc17012017-05-17 11:08:01 +0200823 return false;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +0200824
825 cb->args[1] = 1;
826 }
827 if (!tp->ops->walk)
828 continue;
829 arg.w.fn = tcf_node_dump;
830 arg.skb = skb;
831 arg.cb = cb;
832 arg.w.stop = 0;
833 arg.w.skip = cb->args[1] - 1;
834 arg.w.count = 0;
835 tp->ops->walk(tp, &arg.w);
836 cb->args[1] = arg.w.count + 1;
837 if (arg.w.stop)
Jiri Pirko5bc17012017-05-17 11:08:01 +0200838 return false;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +0200839 }
Jiri Pirko5bc17012017-05-17 11:08:01 +0200840 return true;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +0200841}
842
Eric Dumazetbd27a872009-11-05 20:57:26 -0800843/* called with RTNL */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
845{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900846 struct net *net = sock_net(skb->sk);
Jiri Pirko5bc17012017-05-17 11:08:01 +0200847 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 struct net_device *dev;
849 struct Qdisc *q;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200850 struct tcf_block *block;
Jiri Pirko2190d1d2017-05-17 11:07:59 +0200851 struct tcf_chain *chain;
David S. Miller942b8162012-06-26 21:48:50 -0700852 struct tcmsg *tcm = nlmsg_data(cb->nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 unsigned long cl = 0;
Eric Dumazet20fea082007-11-14 01:44:41 -0800854 const struct Qdisc_class_ops *cops;
Jiri Pirkoacb31fa2017-05-17 11:08:00 +0200855 long index_start;
856 long index;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200857 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
Hong zhi guo573ce262013-03-27 06:47:04 +0000859 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 return skb->len;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200861
862 err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
863 if (err)
864 return err;
865
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000866 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
867 if (!dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 return skb->len;
869
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 if (!tcm->tcm_parent)
Patrick McHardyaf356af2009-09-04 06:41:18 +0000871 q = dev->qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 else
873 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
874 if (!q)
875 goto out;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000876 cops = q->ops->cl_ops;
877 if (!cops)
WANG Cong143976c2017-08-24 16:51:29 -0700878 goto out;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200879 if (!cops->tcf_block)
WANG Cong143976c2017-08-24 16:51:29 -0700880 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 if (TC_H_MIN(tcm->tcm_parent)) {
WANG Cong143976c2017-08-24 16:51:29 -0700882 cl = cops->find(q, tcm->tcm_parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 if (cl == 0)
WANG Cong143976c2017-08-24 16:51:29 -0700884 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 }
Jiri Pirko6529eab2017-05-17 11:07:55 +0200886 block = cops->tcf_block(q, cl);
887 if (!block)
WANG Cong143976c2017-08-24 16:51:29 -0700888 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
Jiri Pirkoacb31fa2017-05-17 11:08:00 +0200890 index_start = cb->args[0];
891 index = 0;
Jiri Pirko5bc17012017-05-17 11:08:01 +0200892
893 list_for_each_entry(chain, &block->chain_list, list) {
894 if (tca[TCA_CHAIN] &&
895 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
896 continue;
897 if (!tcf_chain_dump(chain, skb, cb, index_start, &index))
898 break;
899 }
900
Jiri Pirkoacb31fa2017-05-17 11:08:00 +0200901 cb->args[0] = index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 return skb->len;
905}
906
WANG Cong18d02642014-09-25 10:26:37 -0700907void tcf_exts_destroy(struct tcf_exts *exts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908{
909#ifdef CONFIG_NET_CLS_ACT
WANG Cong22dc13c2016-08-13 22:35:00 -0700910 LIST_HEAD(actions);
911
912 tcf_exts_to_list(exts, &actions);
913 tcf_action_destroy(&actions, TCA_ACT_UNBIND);
914 kfree(exts->actions);
915 exts->nr_actions = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916#endif
917}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800918EXPORT_SYMBOL(tcf_exts_destroy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919
Benjamin LaHaisec1b52732013-01-14 05:15:39 +0000920int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -0400921 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923#ifdef CONFIG_NET_CLS_ACT
924 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 struct tc_action *act;
926
WANG Cong5da57f42013-12-15 20:15:07 -0800927 if (exts->police && tb[exts->police]) {
Jiri Pirko9fb9f252017-05-17 11:08:02 +0200928 act = tcf_action_init_1(net, tp, tb[exts->police],
929 rate_tlv, "police", ovr,
930 TCA_ACT_BIND);
Patrick McHardyab27cfb2008-01-23 20:33:13 -0800931 if (IS_ERR(act))
932 return PTR_ERR(act);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
WANG Cong33be6272013-12-15 20:15:05 -0800934 act->type = exts->type = TCA_OLD_COMPAT;
WANG Cong22dc13c2016-08-13 22:35:00 -0700935 exts->actions[0] = act;
936 exts->nr_actions = 1;
WANG Cong5da57f42013-12-15 20:15:07 -0800937 } else if (exts->action && tb[exts->action]) {
WANG Cong22dc13c2016-08-13 22:35:00 -0700938 LIST_HEAD(actions);
939 int err, i = 0;
940
Jiri Pirko9fb9f252017-05-17 11:08:02 +0200941 err = tcf_action_init(net, tp, tb[exts->action],
942 rate_tlv, NULL, ovr, TCA_ACT_BIND,
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -0400943 &actions);
WANG Cong33be6272013-12-15 20:15:05 -0800944 if (err)
945 return err;
WANG Cong22dc13c2016-08-13 22:35:00 -0700946 list_for_each_entry(act, &actions, list)
947 exts->actions[i++] = act;
948 exts->nr_actions = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 }
950 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951#else
WANG Cong5da57f42013-12-15 20:15:07 -0800952 if ((exts->action && tb[exts->action]) ||
953 (exts->police && tb[exts->police]))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 return -EOPNOTSUPP;
955#endif
956
957 return 0;
958}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800959EXPORT_SYMBOL(tcf_exts_validate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960
Jiri Pirko9b0d4442017-08-04 14:29:15 +0200961void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962{
963#ifdef CONFIG_NET_CLS_ACT
WANG Cong22dc13c2016-08-13 22:35:00 -0700964 struct tcf_exts old = *dst;
965
Jiri Pirko9b0d4442017-08-04 14:29:15 +0200966 *dst = *src;
WANG Cong22dc13c2016-08-13 22:35:00 -0700967 tcf_exts_destroy(&old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968#endif
969}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -0800970EXPORT_SYMBOL(tcf_exts_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
WANG Cong22dc13c2016-08-13 22:35:00 -0700972#ifdef CONFIG_NET_CLS_ACT
973static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
974{
975 if (exts->nr_actions == 0)
976 return NULL;
977 else
978 return exts->actions[0];
979}
980#endif
WANG Cong33be6272013-12-15 20:15:05 -0800981
WANG Cong5da57f42013-12-15 20:15:07 -0800982int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983{
984#ifdef CONFIG_NET_CLS_ACT
Cong Wang9cc63db2014-07-16 14:25:30 -0700985 struct nlattr *nest;
986
Jiri Pirko978dfd82017-08-04 14:29:03 +0200987 if (exts->action && tcf_exts_has_actions(exts)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 /*
989 * again for backward compatible mode - we want
990 * to work with both old and new modes of entering
991 * tc data even if iproute2 was newer - jhs
992 */
WANG Cong33be6272013-12-15 20:15:05 -0800993 if (exts->type != TCA_OLD_COMPAT) {
WANG Cong22dc13c2016-08-13 22:35:00 -0700994 LIST_HEAD(actions);
995
WANG Cong5da57f42013-12-15 20:15:07 -0800996 nest = nla_nest_start(skb, exts->action);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800997 if (nest == NULL)
998 goto nla_put_failure;
WANG Cong22dc13c2016-08-13 22:35:00 -0700999
1000 tcf_exts_to_list(exts, &actions);
1001 if (tcf_action_dump(skb, &actions, 0, 0) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -08001002 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001003 nla_nest_end(skb, nest);
WANG Cong5da57f42013-12-15 20:15:07 -08001004 } else if (exts->police) {
WANG Cong33be6272013-12-15 20:15:05 -08001005 struct tc_action *act = tcf_exts_first_act(exts);
WANG Cong5da57f42013-12-15 20:15:07 -08001006 nest = nla_nest_start(skb, exts->police);
Jamal Hadi Salim63acd682013-12-23 08:02:12 -05001007 if (nest == NULL || !act)
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001008 goto nla_put_failure;
WANG Cong33be6272013-12-15 20:15:05 -08001009 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -08001010 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001011 nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 }
1013 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 return 0;
Cong Wang9cc63db2014-07-16 14:25:30 -07001015
1016nla_put_failure:
1017 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 return -1;
Cong Wang9cc63db2014-07-16 14:25:30 -07001019#else
1020 return 0;
1021#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08001023EXPORT_SYMBOL(tcf_exts_dump);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08001025
WANG Cong5da57f42013-12-15 20:15:07 -08001026int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027{
1028#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -08001029 struct tc_action *a = tcf_exts_first_act(exts);
Ignacy Gawędzkib057df22015-02-03 19:05:18 +01001030 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
WANG Cong33be6272013-12-15 20:15:05 -08001031 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032#endif
1033 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034}
Stephen Hemmingeraa767bf2008-01-21 02:26:41 -08001035EXPORT_SYMBOL(tcf_exts_dump_stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +02001037int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts,
1038 struct net_device **hw_dev)
1039{
1040#ifdef CONFIG_NET_CLS_ACT
1041 const struct tc_action *a;
1042 LIST_HEAD(actions);
1043
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +02001044 if (!tcf_exts_has_actions(exts))
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +02001045 return -EINVAL;
1046
1047 tcf_exts_to_list(exts, &actions);
1048 list_for_each_entry(a, &actions, list) {
1049 if (a->ops->get_dev) {
1050 a->ops->get_dev(a, dev_net(dev), hw_dev);
1051 break;
1052 }
1053 }
1054 if (*hw_dev)
1055 return 0;
1056#endif
1057 return -EOPNOTSUPP;
1058}
1059EXPORT_SYMBOL(tcf_exts_get_dev);
1060
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061static int __init tc_filter_init(void)
1062{
Cong Wang7aa00452017-10-26 18:24:28 -07001063 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
1064 if (!tc_filter_wq)
1065 return -ENOMEM;
1066
Florian Westphalb97bac62017-08-09 20:41:48 +02001067 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
1068 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
Thomas Graf82623c02007-03-22 11:56:22 -07001069 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
Florian Westphalb97bac62017-08-09 20:41:48 +02001070 tc_dump_tfilter, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 return 0;
1073}
1074
1075subsys_initcall(tc_filter_init);