Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * net/sched/cls_api.c Packet classifier API. |
| 4 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
| 6 | * |
| 7 | * Changes: |
| 8 | * |
| 9 | * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/module.h> |
| 13 | #include <linux/types.h> |
| 14 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/string.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/errno.h> |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 17 | #include <linux/err.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/skbuff.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/init.h> |
| 20 | #include <linux/kmod.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 21 | #include <linux/slab.h> |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 22 | #include <linux/idr.h> |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 23 | #include <linux/rhashtable.h> |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 24 | #include <linux/jhash.h> |
Paul Blakey | 4371929 | 2020-02-16 12:01:23 +0200 | [diff] [blame] | 25 | #include <linux/rculist.h> |
Denis V. Lunev | b854272 | 2007-12-01 00:21:31 +1100 | [diff] [blame] | 26 | #include <net/net_namespace.h> |
| 27 | #include <net/sock.h> |
Arnaldo Carvalho de Melo | dc5fc57 | 2007-03-25 23:06:12 -0700 | [diff] [blame] | 28 | #include <net/netlink.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <net/pkt_sched.h> |
| 30 | #include <net/pkt_cls.h> |
Pablo Neira Ayuso | e3ab786 | 2019-02-02 12:50:45 +0100 | [diff] [blame] | 31 | #include <net/tc_act/tc_pedit.h> |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 32 | #include <net/tc_act/tc_mirred.h> |
| 33 | #include <net/tc_act/tc_vlan.h> |
| 34 | #include <net/tc_act/tc_tunnel_key.h> |
| 35 | #include <net/tc_act/tc_csum.h> |
| 36 | #include <net/tc_act/tc_gact.h> |
Pieter Jansen van Vuuren | 8c8cfc6 | 2019-05-04 04:46:22 -0700 | [diff] [blame] | 37 | #include <net/tc_act/tc_police.h> |
Pieter Jansen van Vuuren | a7a7be6 | 2019-05-04 04:46:16 -0700 | [diff] [blame] | 38 | #include <net/tc_act/tc_sample.h> |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 39 | #include <net/tc_act/tc_skbedit.h> |
Paul Blakey | b57dc7c | 2019-07-09 10:30:48 +0300 | [diff] [blame] | 40 | #include <net/tc_act/tc_ct.h> |
John Hurley | 6749d590 | 2019-07-23 15:33:59 +0100 | [diff] [blame] | 41 | #include <net/tc_act/tc_mpls.h> |
Po Liu | d29bdd6 | 2020-05-01 08:53:16 +0800 | [diff] [blame] | 42 | #include <net/tc_act/tc_gate.h> |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 43 | #include <net/flow_offload.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | |
Davide Caratti | e331473 | 2018-10-10 22:00:58 +0200 | [diff] [blame] | 45 | extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; |
| 46 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | /* The list of all installed classifier types */ |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 48 | static LIST_HEAD(tcf_proto_base); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
| 50 | /* Protects list of registered TC modules. It is pure SMP lock. */ |
| 51 | static DEFINE_RWLOCK(cls_mod_lock); |
| 52 | |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 53 | static u32 destroy_obj_hashfn(const struct tcf_proto *tp) |
| 54 | { |
| 55 | return jhash_3words(tp->chain->index, tp->prio, |
| 56 | (__force __u32)tp->protocol, 0); |
| 57 | } |
| 58 | |
| 59 | static void tcf_proto_signal_destroying(struct tcf_chain *chain, |
| 60 | struct tcf_proto *tp) |
| 61 | { |
| 62 | struct tcf_block *block = chain->block; |
| 63 | |
| 64 | mutex_lock(&block->proto_destroy_lock); |
| 65 | hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, |
| 66 | destroy_obj_hashfn(tp)); |
| 67 | mutex_unlock(&block->proto_destroy_lock); |
| 68 | } |
| 69 | |
| 70 | static bool tcf_proto_cmp(const struct tcf_proto *tp1, |
| 71 | const struct tcf_proto *tp2) |
| 72 | { |
| 73 | return tp1->chain->index == tp2->chain->index && |
| 74 | tp1->prio == tp2->prio && |
| 75 | tp1->protocol == tp2->protocol; |
| 76 | } |
| 77 | |
| 78 | static bool tcf_proto_exists_destroying(struct tcf_chain *chain, |
| 79 | struct tcf_proto *tp) |
| 80 | { |
| 81 | u32 hash = destroy_obj_hashfn(tp); |
| 82 | struct tcf_proto *iter; |
| 83 | bool found = false; |
| 84 | |
| 85 | rcu_read_lock(); |
| 86 | hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, |
| 87 | destroy_ht_node, hash) { |
| 88 | if (tcf_proto_cmp(tp, iter)) { |
| 89 | found = true; |
| 90 | break; |
| 91 | } |
| 92 | } |
| 93 | rcu_read_unlock(); |
| 94 | |
| 95 | return found; |
| 96 | } |
| 97 | |
| 98 | static void |
| 99 | tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp) |
| 100 | { |
| 101 | struct tcf_block *block = chain->block; |
| 102 | |
| 103 | mutex_lock(&block->proto_destroy_lock); |
| 104 | if (hash_hashed(&tp->destroy_ht_node)) |
| 105 | hash_del_rcu(&tp->destroy_ht_node); |
| 106 | mutex_unlock(&block->proto_destroy_lock); |
| 107 | } |
| 108 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | /* Find classifier type by string name */ |
| 110 | |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 111 | static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | { |
Eric Dumazet | dcd7608 | 2013-12-20 10:04:18 -0800 | [diff] [blame] | 113 | const struct tcf_proto_ops *t, *res = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | |
| 115 | if (kind) { |
| 116 | read_lock(&cls_mod_lock); |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 117 | list_for_each_entry(t, &tcf_proto_base, head) { |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 118 | if (strcmp(kind, t->kind) == 0) { |
Eric Dumazet | dcd7608 | 2013-12-20 10:04:18 -0800 | [diff] [blame] | 119 | if (try_module_get(t->owner)) |
| 120 | res = t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | break; |
| 122 | } |
| 123 | } |
| 124 | read_unlock(&cls_mod_lock); |
| 125 | } |
Eric Dumazet | dcd7608 | 2013-12-20 10:04:18 -0800 | [diff] [blame] | 126 | return res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | } |
| 128 | |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 129 | static const struct tcf_proto_ops * |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 130 | tcf_proto_lookup_ops(const char *kind, bool rtnl_held, |
| 131 | struct netlink_ext_ack *extack) |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 132 | { |
| 133 | const struct tcf_proto_ops *ops; |
| 134 | |
| 135 | ops = __tcf_proto_lookup_ops(kind); |
| 136 | if (ops) |
| 137 | return ops; |
| 138 | #ifdef CONFIG_MODULES |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 139 | if (rtnl_held) |
| 140 | rtnl_unlock(); |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 141 | request_module("cls_%s", kind); |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 142 | if (rtnl_held) |
| 143 | rtnl_lock(); |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 144 | ops = __tcf_proto_lookup_ops(kind); |
| 145 | /* We dropped the RTNL semaphore in order to perform |
| 146 | * the module load. So, even if we succeeded in loading |
| 147 | * the module we have to replay the request. We indicate |
| 148 | * this using -EAGAIN. |
| 149 | */ |
| 150 | if (ops) { |
| 151 | module_put(ops->owner); |
| 152 | return ERR_PTR(-EAGAIN); |
| 153 | } |
| 154 | #endif |
| 155 | NL_SET_ERR_MSG(extack, "TC classifier not found"); |
| 156 | return ERR_PTR(-ENOENT); |
| 157 | } |
| 158 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | /* Register(unregister) new classifier type */ |
| 160 | |
| 161 | int register_tcf_proto_ops(struct tcf_proto_ops *ops) |
| 162 | { |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 163 | struct tcf_proto_ops *t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | int rc = -EEXIST; |
| 165 | |
| 166 | write_lock(&cls_mod_lock); |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 167 | list_for_each_entry(t, &tcf_proto_base, head) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | if (!strcmp(ops->kind, t->kind)) |
| 169 | goto out; |
| 170 | |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 171 | list_add_tail(&ops->head, &tcf_proto_base); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | rc = 0; |
| 173 | out: |
| 174 | write_unlock(&cls_mod_lock); |
| 175 | return rc; |
| 176 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 177 | EXPORT_SYMBOL(register_tcf_proto_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 179 | static struct workqueue_struct *tc_filter_wq; |
| 180 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) |
| 182 | { |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 183 | struct tcf_proto_ops *t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | int rc = -ENOENT; |
| 185 | |
Daniel Borkmann | c78e174 | 2015-05-20 17:13:33 +0200 | [diff] [blame] | 186 | /* Wait for outstanding call_rcu()s, if any, from a |
| 187 | * tcf_proto_ops's destroy() handler. |
| 188 | */ |
| 189 | rcu_barrier(); |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 190 | flush_workqueue(tc_filter_wq); |
Daniel Borkmann | c78e174 | 2015-05-20 17:13:33 +0200 | [diff] [blame] | 191 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | write_lock(&cls_mod_lock); |
Eric Dumazet | dcd7608 | 2013-12-20 10:04:18 -0800 | [diff] [blame] | 193 | list_for_each_entry(t, &tcf_proto_base, head) { |
| 194 | if (t == ops) { |
| 195 | list_del(&t->head); |
| 196 | rc = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | break; |
Eric Dumazet | dcd7608 | 2013-12-20 10:04:18 -0800 | [diff] [blame] | 198 | } |
| 199 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | write_unlock(&cls_mod_lock); |
| 201 | return rc; |
| 202 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 203 | EXPORT_SYMBOL(unregister_tcf_proto_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | |
Cong Wang | aaa908f | 2018-05-23 15:26:53 -0700 | [diff] [blame] | 205 | bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 206 | { |
Cong Wang | aaa908f | 2018-05-23 15:26:53 -0700 | [diff] [blame] | 207 | INIT_RCU_WORK(rwork, func); |
| 208 | return queue_rcu_work(tc_filter_wq, rwork); |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 209 | } |
| 210 | EXPORT_SYMBOL(tcf_queue_work); |
| 211 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | /* Select new prio value from the range, managed by kernel. */ |
| 213 | |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 214 | static inline u32 tcf_auto_prio(struct tcf_proto *tp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | { |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 216 | u32 first = TC_H_MAKE(0xC0000000U, 0U); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | |
| 218 | if (tp) |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 219 | first = tp->prio - 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | |
Jiri Pirko | 7961973 | 2017-05-17 11:07:58 +0200 | [diff] [blame] | 221 | return TC_H_MAJ(first); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | } |
| 223 | |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 224 | static bool tcf_proto_check_kind(struct nlattr *kind, char *name) |
| 225 | { |
| 226 | if (kind) |
| 227 | return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ; |
| 228 | memset(name, 0, IFNAMSIZ); |
| 229 | return false; |
| 230 | } |
| 231 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 232 | static bool tcf_proto_is_unlocked(const char *kind) |
| 233 | { |
| 234 | const struct tcf_proto_ops *ops; |
| 235 | bool ret; |
| 236 | |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 237 | if (strlen(kind) == 0) |
| 238 | return false; |
| 239 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 240 | ops = tcf_proto_lookup_ops(kind, false, NULL); |
| 241 | /* On error return false to take rtnl lock. Proto lookup/create |
| 242 | * functions will perform lookup again and properly handle errors. |
| 243 | */ |
| 244 | if (IS_ERR(ops)) |
| 245 | return false; |
| 246 | |
| 247 | ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED); |
| 248 | module_put(ops->owner); |
| 249 | return ret; |
| 250 | } |
| 251 | |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 252 | static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 253 | u32 prio, struct tcf_chain *chain, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 254 | bool rtnl_held, |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 255 | struct netlink_ext_ack *extack) |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 256 | { |
| 257 | struct tcf_proto *tp; |
| 258 | int err; |
| 259 | |
| 260 | tp = kzalloc(sizeof(*tp), GFP_KERNEL); |
| 261 | if (!tp) |
| 262 | return ERR_PTR(-ENOBUFS); |
| 263 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 264 | tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack); |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 265 | if (IS_ERR(tp->ops)) { |
| 266 | err = PTR_ERR(tp->ops); |
Jiri Pirko | d68d75f | 2018-05-11 17:45:32 +0200 | [diff] [blame] | 267 | goto errout; |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 268 | } |
| 269 | tp->classify = tp->ops->classify; |
| 270 | tp->protocol = protocol; |
| 271 | tp->prio = prio; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 272 | tp->chain = chain; |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 273 | spin_lock_init(&tp->lock); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 274 | refcount_set(&tp->refcnt, 1); |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 275 | |
| 276 | err = tp->ops->init(tp); |
| 277 | if (err) { |
| 278 | module_put(tp->ops->owner); |
| 279 | goto errout; |
| 280 | } |
| 281 | return tp; |
| 282 | |
| 283 | errout: |
| 284 | kfree(tp); |
| 285 | return ERR_PTR(err); |
| 286 | } |
| 287 | |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 288 | static void tcf_proto_get(struct tcf_proto *tp) |
| 289 | { |
| 290 | refcount_inc(&tp->refcnt); |
| 291 | } |
| 292 | |
| 293 | static void tcf_chain_put(struct tcf_chain *chain); |
| 294 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 295 | static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 296 | bool sig_destroy, struct netlink_ext_ack *extack) |
Jiri Pirko | cf1facd | 2017-02-09 14:38:56 +0100 | [diff] [blame] | 297 | { |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 298 | tp->ops->destroy(tp, rtnl_held, extack); |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 299 | if (sig_destroy) |
| 300 | tcf_proto_signal_destroyed(tp->chain, tp); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 301 | tcf_chain_put(tp->chain); |
WANG Cong | 763dbf6 | 2017-04-19 14:21:21 -0700 | [diff] [blame] | 302 | module_put(tp->ops->owner); |
| 303 | kfree_rcu(tp, rcu); |
Jiri Pirko | cf1facd | 2017-02-09 14:38:56 +0100 | [diff] [blame] | 304 | } |
| 305 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 306 | static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 307 | struct netlink_ext_ack *extack) |
| 308 | { |
| 309 | if (refcount_dec_and_test(&tp->refcnt)) |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 310 | tcf_proto_destroy(tp, rtnl_held, true, extack); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 311 | } |
| 312 | |
Davide Caratti | a5b72a0 | 2019-12-28 16:36:58 +0100 | [diff] [blame] | 313 | static bool tcf_proto_check_delete(struct tcf_proto *tp) |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 314 | { |
Davide Caratti | a5b72a0 | 2019-12-28 16:36:58 +0100 | [diff] [blame] | 315 | if (tp->ops->delete_empty) |
| 316 | return tp->ops->delete_empty(tp); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 317 | |
Davide Caratti | a5b72a0 | 2019-12-28 16:36:58 +0100 | [diff] [blame] | 318 | tp->deleting = true; |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 319 | return tp->deleting; |
| 320 | } |
| 321 | |
| 322 | static void tcf_proto_mark_delete(struct tcf_proto *tp) |
| 323 | { |
| 324 | spin_lock(&tp->lock); |
| 325 | tp->deleting = true; |
| 326 | spin_unlock(&tp->lock); |
| 327 | } |
| 328 | |
| 329 | static bool tcf_proto_is_deleting(struct tcf_proto *tp) |
| 330 | { |
| 331 | bool deleting; |
| 332 | |
| 333 | spin_lock(&tp->lock); |
| 334 | deleting = tp->deleting; |
| 335 | spin_unlock(&tp->lock); |
| 336 | |
| 337 | return deleting; |
| 338 | } |
| 339 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 340 | #define ASSERT_BLOCK_LOCKED(block) \ |
| 341 | lockdep_assert_held(&(block)->lock) |
| 342 | |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 343 | struct tcf_filter_chain_list_item { |
| 344 | struct list_head list; |
| 345 | tcf_chain_head_change_t *chain_head_change; |
| 346 | void *chain_head_change_priv; |
| 347 | }; |
| 348 | |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 349 | static struct tcf_chain *tcf_chain_create(struct tcf_block *block, |
| 350 | u32 chain_index) |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 351 | { |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 352 | struct tcf_chain *chain; |
| 353 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 354 | ASSERT_BLOCK_LOCKED(block); |
| 355 | |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 356 | chain = kzalloc(sizeof(*chain), GFP_KERNEL); |
| 357 | if (!chain) |
| 358 | return NULL; |
Paul Blakey | 4371929 | 2020-02-16 12:01:23 +0200 | [diff] [blame] | 359 | list_add_tail_rcu(&chain->list, &block->chain_list); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 360 | mutex_init(&chain->filter_chain_lock); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 361 | chain->block = block; |
| 362 | chain->index = chain_index; |
Cong Wang | e2ef754 | 2017-09-11 16:33:31 -0700 | [diff] [blame] | 363 | chain->refcnt = 1; |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 364 | if (!chain->index) |
| 365 | block->chain0.chain = chain; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 366 | return chain; |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 367 | } |
| 368 | |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 369 | static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, |
| 370 | struct tcf_proto *tp_head) |
| 371 | { |
| 372 | if (item->chain_head_change) |
| 373 | item->chain_head_change(tp_head, item->chain_head_change_priv); |
| 374 | } |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 375 | |
| 376 | static void tcf_chain0_head_change(struct tcf_chain *chain, |
| 377 | struct tcf_proto *tp_head) |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 378 | { |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 379 | struct tcf_filter_chain_list_item *item; |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 380 | struct tcf_block *block = chain->block; |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 381 | |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 382 | if (chain->index) |
| 383 | return; |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 384 | |
| 385 | mutex_lock(&block->lock); |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 386 | list_for_each_entry(item, &block->chain0.filter_chain_list, list) |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 387 | tcf_chain_head_change_item(item, tp_head); |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 388 | mutex_unlock(&block->lock); |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 389 | } |
| 390 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 391 | /* Returns true if block can be safely freed. */ |
| 392 | |
| 393 | static bool tcf_chain_detach(struct tcf_chain *chain) |
Jiri Pirko | f93e1cd | 2017-05-20 15:01:32 +0200 | [diff] [blame] | 394 | { |
Cong Wang | efbf789 | 2017-12-04 10:48:18 -0800 | [diff] [blame] | 395 | struct tcf_block *block = chain->block; |
| 396 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 397 | ASSERT_BLOCK_LOCKED(block); |
| 398 | |
Paul Blakey | 4371929 | 2020-02-16 12:01:23 +0200 | [diff] [blame] | 399 | list_del_rcu(&chain->list); |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 400 | if (!chain->index) |
| 401 | block->chain0.chain = NULL; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 402 | |
| 403 | if (list_empty(&block->chain_list) && |
| 404 | refcount_read(&block->refcnt) == 0) |
| 405 | return true; |
| 406 | |
| 407 | return false; |
| 408 | } |
| 409 | |
| 410 | static void tcf_block_destroy(struct tcf_block *block) |
| 411 | { |
| 412 | mutex_destroy(&block->lock); |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 413 | mutex_destroy(&block->proto_destroy_lock); |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 414 | kfree_rcu(block, rcu); |
| 415 | } |
| 416 | |
| 417 | static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block) |
| 418 | { |
| 419 | struct tcf_block *block = chain->block; |
| 420 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 421 | mutex_destroy(&chain->filter_chain_lock); |
Davide Caratti | ee3bbfe | 2019-03-20 15:00:16 +0100 | [diff] [blame] | 422 | kfree_rcu(chain, rcu); |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 423 | if (free_block) |
| 424 | tcf_block_destroy(block); |
Cong Wang | e2ef754 | 2017-09-11 16:33:31 -0700 | [diff] [blame] | 425 | } |
Jiri Pirko | 744a4cf | 2017-08-22 22:46:49 +0200 | [diff] [blame] | 426 | |
Cong Wang | e2ef754 | 2017-09-11 16:33:31 -0700 | [diff] [blame] | 427 | static void tcf_chain_hold(struct tcf_chain *chain) |
| 428 | { |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 429 | ASSERT_BLOCK_LOCKED(chain->block); |
| 430 | |
Cong Wang | e2ef754 | 2017-09-11 16:33:31 -0700 | [diff] [blame] | 431 | ++chain->refcnt; |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 432 | } |
| 433 | |
Jiri Pirko | 3d32f4c | 2018-08-01 12:36:55 +0200 | [diff] [blame] | 434 | static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 435 | { |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 436 | ASSERT_BLOCK_LOCKED(chain->block); |
| 437 | |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 438 | /* In case all the references are action references, this |
Jiri Pirko | 3d32f4c | 2018-08-01 12:36:55 +0200 | [diff] [blame] | 439 | * chain should not be shown to the user. |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 440 | */ |
| 441 | return chain->refcnt == chain->action_refcnt; |
| 442 | } |
| 443 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 444 | static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, |
| 445 | u32 chain_index) |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 446 | { |
| 447 | struct tcf_chain *chain; |
| 448 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 449 | ASSERT_BLOCK_LOCKED(block); |
| 450 | |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 451 | list_for_each_entry(chain, &block->chain_list, list) { |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 452 | if (chain->index == chain_index) |
Cong Wang | e2ef754 | 2017-09-11 16:33:31 -0700 | [diff] [blame] | 453 | return chain; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 454 | } |
| 455 | return NULL; |
| 456 | } |
| 457 | |
Paul Blakey | af69962 | 2020-02-16 12:01:24 +0200 | [diff] [blame] | 458 | #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) |
| 459 | static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, |
| 460 | u32 chain_index) |
| 461 | { |
| 462 | struct tcf_chain *chain; |
| 463 | |
| 464 | list_for_each_entry_rcu(chain, &block->chain_list, list) { |
| 465 | if (chain->index == chain_index) |
| 466 | return chain; |
| 467 | } |
| 468 | return NULL; |
| 469 | } |
| 470 | #endif |
| 471 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 472 | static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, |
| 473 | u32 seq, u16 flags, int event, bool unicast); |
| 474 | |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 475 | static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, |
| 476 | u32 chain_index, bool create, |
| 477 | bool by_act) |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 478 | { |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 479 | struct tcf_chain *chain = NULL; |
| 480 | bool is_first_reference; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 481 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 482 | mutex_lock(&block->lock); |
| 483 | chain = tcf_chain_lookup(block, chain_index); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 484 | if (chain) { |
| 485 | tcf_chain_hold(chain); |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 486 | } else { |
| 487 | if (!create) |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 488 | goto errout; |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 489 | chain = tcf_chain_create(block, chain_index); |
| 490 | if (!chain) |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 491 | goto errout; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 492 | } |
Jiri Pirko | 8053238 | 2017-09-06 13:14:19 +0200 | [diff] [blame] | 493 | |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 494 | if (by_act) |
| 495 | ++chain->action_refcnt; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 496 | is_first_reference = chain->refcnt - chain->action_refcnt == 1; |
| 497 | mutex_unlock(&block->lock); |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 498 | |
| 499 | /* Send notification only in case we got the first |
| 500 | * non-action reference. Until then, the chain acts only as |
| 501 | * a placeholder for actions pointing to it and user ought |
| 502 | * not know about them. |
| 503 | */ |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 504 | if (is_first_reference && !by_act) |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 505 | tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, |
| 506 | RTM_NEWCHAIN, false); |
| 507 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 508 | return chain; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 509 | |
| 510 | errout: |
| 511 | mutex_unlock(&block->lock); |
| 512 | return chain; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 513 | } |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 514 | |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 515 | static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, |
| 516 | bool create) |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 517 | { |
| 518 | return __tcf_chain_get(block, chain_index, create, false); |
| 519 | } |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 520 | |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 521 | struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) |
| 522 | { |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 523 | return __tcf_chain_get(block, chain_index, true, true); |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 524 | } |
| 525 | EXPORT_SYMBOL(tcf_chain_get_by_act); |
| 526 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 527 | static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, |
| 528 | void *tmplt_priv); |
| 529 | static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, |
| 530 | void *tmplt_priv, u32 chain_index, |
| 531 | struct tcf_block *block, struct sk_buff *oskb, |
| 532 | u32 seq, u16 flags, bool unicast); |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 533 | |
Vlad Buslov | 91052fa | 2019-02-11 10:55:33 +0200 | [diff] [blame] | 534 | static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, |
| 535 | bool explicitly_created) |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 536 | { |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 537 | struct tcf_block *block = chain->block; |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 538 | const struct tcf_proto_ops *tmplt_ops; |
Vlad Buslov | b62989f | 2019-03-06 17:50:43 +0200 | [diff] [blame] | 539 | bool free_block = false; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 540 | unsigned int refcnt; |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 541 | void *tmplt_priv; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 542 | |
| 543 | mutex_lock(&block->lock); |
Vlad Buslov | 91052fa | 2019-02-11 10:55:33 +0200 | [diff] [blame] | 544 | if (explicitly_created) { |
| 545 | if (!chain->explicitly_created) { |
| 546 | mutex_unlock(&block->lock); |
| 547 | return; |
| 548 | } |
| 549 | chain->explicitly_created = false; |
| 550 | } |
| 551 | |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 552 | if (by_act) |
| 553 | chain->action_refcnt--; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 554 | |
| 555 | /* tc_chain_notify_delete can't be called while holding block lock. |
| 556 | * However, when block is unlocked chain can be changed concurrently, so |
| 557 | * save these to temporary variables. |
| 558 | */ |
| 559 | refcnt = --chain->refcnt; |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 560 | tmplt_ops = chain->tmplt_ops; |
| 561 | tmplt_priv = chain->tmplt_priv; |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 562 | |
| 563 | /* The last dropped non-action reference will trigger notification. */ |
Vlad Buslov | b62989f | 2019-03-06 17:50:43 +0200 | [diff] [blame] | 564 | if (refcnt - chain->action_refcnt == 0 && !by_act) { |
| 565 | tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index, |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 566 | block, NULL, 0, 0, false); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 567 | /* Last reference to chain, no need to lock. */ |
| 568 | chain->flushing = false; |
| 569 | } |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 570 | |
Vlad Buslov | b62989f | 2019-03-06 17:50:43 +0200 | [diff] [blame] | 571 | if (refcnt == 0) |
| 572 | free_block = tcf_chain_detach(chain); |
| 573 | mutex_unlock(&block->lock); |
| 574 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 575 | if (refcnt == 0) { |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 576 | tc_chain_tmplt_del(tmplt_ops, tmplt_priv); |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 577 | tcf_chain_destroy(chain, free_block); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 578 | } |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 579 | } |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 580 | |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 581 | static void tcf_chain_put(struct tcf_chain *chain) |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 582 | { |
Vlad Buslov | 91052fa | 2019-02-11 10:55:33 +0200 | [diff] [blame] | 583 | __tcf_chain_put(chain, false, false); |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 584 | } |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 585 | |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 586 | void tcf_chain_put_by_act(struct tcf_chain *chain) |
| 587 | { |
Vlad Buslov | 91052fa | 2019-02-11 10:55:33 +0200 | [diff] [blame] | 588 | __tcf_chain_put(chain, true, false); |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 589 | } |
| 590 | EXPORT_SYMBOL(tcf_chain_put_by_act); |
| 591 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 592 | static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) |
| 593 | { |
Vlad Buslov | 91052fa | 2019-02-11 10:55:33 +0200 | [diff] [blame] | 594 | __tcf_chain_put(chain, false, true); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 595 | } |
| 596 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 597 | static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 598 | { |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 599 | struct tcf_proto *tp, *tp_next; |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 600 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 601 | mutex_lock(&chain->filter_chain_lock); |
| 602 | tp = tcf_chain_dereference(chain->filter_chain, chain); |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 603 | while (tp) { |
| 604 | tp_next = rcu_dereference_protected(tp->next, 1); |
| 605 | tcf_proto_signal_destroying(chain, tp); |
| 606 | tp = tp_next; |
| 607 | } |
| 608 | tp = tcf_chain_dereference(chain->filter_chain, chain); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 609 | RCU_INIT_POINTER(chain->filter_chain, NULL); |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 610 | tcf_chain0_head_change(chain, NULL); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 611 | chain->flushing = true; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 612 | mutex_unlock(&chain->filter_chain_lock); |
| 613 | |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 614 | while (tp) { |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 615 | tp_next = rcu_dereference_protected(tp->next, 1); |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 616 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 617 | tp = tp_next; |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 618 | } |
| 619 | } |
| 620 | |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 621 | static int tcf_block_setup(struct tcf_block *block, |
| 622 | struct flow_block_offload *bo); |
| 623 | |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 624 | static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block, |
| 625 | flow_indr_block_bind_cb_t *cb, void *cb_priv, |
| 626 | enum flow_block_command command, bool ingress) |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 627 | { |
| 628 | struct flow_block_offload bo = { |
| 629 | .command = command, |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 630 | .binder_type = ingress ? |
| 631 | FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS : |
| 632 | FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 633 | .net = dev_net(dev), |
| 634 | .block_shared = tcf_block_non_null_shared(block), |
| 635 | }; |
| 636 | INIT_LIST_HEAD(&bo.cb_list); |
| 637 | |
| 638 | if (!block) |
| 639 | return; |
| 640 | |
| 641 | bo.block = &block->flow_block; |
| 642 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 643 | down_write(&block->cb_lock); |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 644 | cb(dev, cb_priv, TC_SETUP_BLOCK, &bo); |
| 645 | |
| 646 | tcf_block_setup(block, &bo); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 647 | up_write(&block->cb_lock); |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 648 | } |
| 649 | |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 650 | static struct tcf_block *tc_dev_block(struct net_device *dev, bool ingress) |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 651 | { |
| 652 | const struct Qdisc_class_ops *cops; |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 653 | const struct Qdisc_ops *ops; |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 654 | struct Qdisc *qdisc; |
| 655 | |
| 656 | if (!dev_ingress_queue(dev)) |
| 657 | return NULL; |
| 658 | |
| 659 | qdisc = dev_ingress_queue(dev)->qdisc_sleeping; |
| 660 | if (!qdisc) |
| 661 | return NULL; |
| 662 | |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 663 | ops = qdisc->ops; |
| 664 | if (!ops) |
| 665 | return NULL; |
| 666 | |
| 667 | if (!ingress && !strcmp("ingress", ops->id)) |
| 668 | return NULL; |
| 669 | |
| 670 | cops = ops->cl_ops; |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 671 | if (!cops) |
| 672 | return NULL; |
| 673 | |
| 674 | if (!cops->tcf_block) |
| 675 | return NULL; |
| 676 | |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 677 | return cops->tcf_block(qdisc, |
| 678 | ingress ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS, |
| 679 | NULL); |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 680 | } |
| 681 | |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 682 | static void tc_indr_block_get_and_cmd(struct net_device *dev, |
| 683 | flow_indr_block_bind_cb_t *cb, |
| 684 | void *cb_priv, |
| 685 | enum flow_block_command command) |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 686 | { |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 687 | struct tcf_block *block; |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 688 | |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 689 | block = tc_dev_block(dev, true); |
| 690 | tc_indr_block_cmd(dev, block, cb, cb_priv, command, true); |
| 691 | |
| 692 | block = tc_dev_block(dev, false); |
| 693 | tc_indr_block_cmd(dev, block, cb, cb_priv, command, false); |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 694 | } |
| 695 | |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 696 | static void tc_indr_block_call(struct tcf_block *block, |
| 697 | struct net_device *dev, |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 698 | struct tcf_block_ext_info *ei, |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 699 | enum flow_block_command command, |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 700 | struct netlink_ext_ack *extack) |
| 701 | { |
Pablo Neira Ayuso | 955bcb6 | 2019-07-09 22:55:46 +0200 | [diff] [blame] | 702 | struct flow_block_offload bo = { |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 703 | .command = command, |
| 704 | .binder_type = ei->binder_type, |
Pablo Neira Ayuso | da3eeb9 | 2019-07-09 22:55:43 +0200 | [diff] [blame] | 705 | .net = dev_net(dev), |
Pablo Neira Ayuso | 14bfb13 | 2019-07-19 18:20:16 +0200 | [diff] [blame] | 706 | .block = &block->flow_block, |
Pablo Neira Ayuso | 955bcb6 | 2019-07-09 22:55:46 +0200 | [diff] [blame] | 707 | .block_shared = tcf_block_shared(block), |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 708 | .extack = extack, |
| 709 | }; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 710 | INIT_LIST_HEAD(&bo.cb_list); |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 711 | |
wenxu | 133a2fe5 | 2020-03-24 07:34:25 +0800 | [diff] [blame] | 712 | flow_indr_block_call(dev, &bo, command, TC_SETUP_BLOCK); |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 713 | tcf_block_setup(block, &bo); |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 714 | } |
| 715 | |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 716 | static bool tcf_block_offload_in_use(struct tcf_block *block) |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 717 | { |
Vlad Buslov | 97394be | 2019-08-26 16:44:58 +0300 | [diff] [blame] | 718 | return atomic_read(&block->offloadcnt); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 719 | } |
| 720 | |
| 721 | static int tcf_block_offload_cmd(struct tcf_block *block, |
| 722 | struct net_device *dev, |
| 723 | struct tcf_block_ext_info *ei, |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 724 | enum flow_block_command command, |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 725 | struct netlink_ext_ack *extack) |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 726 | { |
Pablo Neira Ayuso | 955bcb6 | 2019-07-09 22:55:46 +0200 | [diff] [blame] | 727 | struct flow_block_offload bo = {}; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 728 | int err; |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 729 | |
Pablo Neira Ayuso | da3eeb9 | 2019-07-09 22:55:43 +0200 | [diff] [blame] | 730 | bo.net = dev_net(dev); |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 731 | bo.command = command; |
| 732 | bo.binder_type = ei->binder_type; |
Pablo Neira Ayuso | 14bfb13 | 2019-07-19 18:20:16 +0200 | [diff] [blame] | 733 | bo.block = &block->flow_block; |
Pablo Neira Ayuso | 955bcb6 | 2019-07-09 22:55:46 +0200 | [diff] [blame] | 734 | bo.block_shared = tcf_block_shared(block); |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 735 | bo.extack = extack; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 736 | INIT_LIST_HEAD(&bo.cb_list); |
| 737 | |
| 738 | err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); |
Jesper Dangaard Brouer | b70ba69 | 2020-04-23 16:57:45 +0200 | [diff] [blame] | 739 | if (err < 0) { |
| 740 | if (err != -EOPNOTSUPP) |
| 741 | NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed"); |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 742 | return err; |
Jesper Dangaard Brouer | b70ba69 | 2020-04-23 16:57:45 +0200 | [diff] [blame] | 743 | } |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 744 | |
| 745 | return tcf_block_setup(block, &bo); |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 746 | } |
| 747 | |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 748 | static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 749 | struct tcf_block_ext_info *ei, |
| 750 | struct netlink_ext_ack *extack) |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 751 | { |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 752 | struct net_device *dev = q->dev_queue->dev; |
| 753 | int err; |
| 754 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 755 | down_write(&block->cb_lock); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 756 | if (!dev->netdev_ops->ndo_setup_tc) |
| 757 | goto no_offload_dev_inc; |
| 758 | |
| 759 | /* If tc offload feature is disabled and the block we try to bind |
| 760 | * to already has some offloaded filters, forbid to bind. |
| 761 | */ |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 762 | if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) { |
| 763 | NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 764 | err = -EOPNOTSUPP; |
| 765 | goto err_unlock; |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 766 | } |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 767 | |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 768 | err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 769 | if (err == -EOPNOTSUPP) |
| 770 | goto no_offload_dev_inc; |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 771 | if (err) |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 772 | goto err_unlock; |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 773 | |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 774 | tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 775 | up_write(&block->cb_lock); |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 776 | return 0; |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 777 | |
| 778 | no_offload_dev_inc: |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 779 | if (tcf_block_offload_in_use(block)) { |
| 780 | err = -EOPNOTSUPP; |
| 781 | goto err_unlock; |
| 782 | } |
| 783 | err = 0; |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 784 | block->nooffloaddevcnt++; |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 785 | tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 786 | err_unlock: |
| 787 | up_write(&block->cb_lock); |
| 788 | return err; |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 789 | } |
| 790 | |
| 791 | static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, |
| 792 | struct tcf_block_ext_info *ei) |
| 793 | { |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 794 | struct net_device *dev = q->dev_queue->dev; |
| 795 | int err; |
| 796 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 797 | down_write(&block->cb_lock); |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 798 | tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 799 | |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 800 | if (!dev->netdev_ops->ndo_setup_tc) |
| 801 | goto no_offload_dev_dec; |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 802 | err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 803 | if (err == -EOPNOTSUPP) |
| 804 | goto no_offload_dev_dec; |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 805 | up_write(&block->cb_lock); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 806 | return; |
| 807 | |
| 808 | no_offload_dev_dec: |
| 809 | WARN_ON(block->nooffloaddevcnt-- == 0); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 810 | up_write(&block->cb_lock); |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 811 | } |
| 812 | |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 813 | static int |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 814 | tcf_chain0_head_change_cb_add(struct tcf_block *block, |
| 815 | struct tcf_block_ext_info *ei, |
| 816 | struct netlink_ext_ack *extack) |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 817 | { |
| 818 | struct tcf_filter_chain_list_item *item; |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 819 | struct tcf_chain *chain0; |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 820 | |
| 821 | item = kmalloc(sizeof(*item), GFP_KERNEL); |
| 822 | if (!item) { |
| 823 | NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed"); |
| 824 | return -ENOMEM; |
| 825 | } |
| 826 | item->chain_head_change = ei->chain_head_change; |
| 827 | item->chain_head_change_priv = ei->chain_head_change_priv; |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 828 | |
| 829 | mutex_lock(&block->lock); |
| 830 | chain0 = block->chain0.chain; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 831 | if (chain0) |
| 832 | tcf_chain_hold(chain0); |
| 833 | else |
| 834 | list_add(&item->list, &block->chain0.filter_chain_list); |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 835 | mutex_unlock(&block->lock); |
| 836 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 837 | if (chain0) { |
| 838 | struct tcf_proto *tp_head; |
| 839 | |
| 840 | mutex_lock(&chain0->filter_chain_lock); |
| 841 | |
| 842 | tp_head = tcf_chain_dereference(chain0->filter_chain, chain0); |
| 843 | if (tp_head) |
| 844 | tcf_chain_head_change_item(item, tp_head); |
| 845 | |
| 846 | mutex_lock(&block->lock); |
| 847 | list_add(&item->list, &block->chain0.filter_chain_list); |
| 848 | mutex_unlock(&block->lock); |
| 849 | |
| 850 | mutex_unlock(&chain0->filter_chain_lock); |
| 851 | tcf_chain_put(chain0); |
| 852 | } |
| 853 | |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 854 | return 0; |
| 855 | } |
| 856 | |
| 857 | static void |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 858 | tcf_chain0_head_change_cb_del(struct tcf_block *block, |
| 859 | struct tcf_block_ext_info *ei) |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 860 | { |
| 861 | struct tcf_filter_chain_list_item *item; |
| 862 | |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 863 | mutex_lock(&block->lock); |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 864 | list_for_each_entry(item, &block->chain0.filter_chain_list, list) { |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 865 | if ((!ei->chain_head_change && !ei->chain_head_change_priv) || |
| 866 | (item->chain_head_change == ei->chain_head_change && |
| 867 | item->chain_head_change_priv == ei->chain_head_change_priv)) { |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 868 | if (block->chain0.chain) |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 869 | tcf_chain_head_change_item(item, NULL); |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 870 | list_del(&item->list); |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 871 | mutex_unlock(&block->lock); |
| 872 | |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 873 | kfree(item); |
| 874 | return; |
| 875 | } |
| 876 | } |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 877 | mutex_unlock(&block->lock); |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 878 | WARN_ON(1); |
| 879 | } |
| 880 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 881 | struct tcf_net { |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 882 | spinlock_t idr_lock; /* Protects idr */ |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 883 | struct idr idr; |
| 884 | }; |
| 885 | |
| 886 | static unsigned int tcf_net_id; |
| 887 | |
| 888 | static int tcf_block_insert(struct tcf_block *block, struct net *net, |
Jiri Pirko | bb047dd | 2018-02-13 12:00:16 +0100 | [diff] [blame] | 889 | struct netlink_ext_ack *extack) |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 890 | { |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 891 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 892 | int err; |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 893 | |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 894 | idr_preload(GFP_KERNEL); |
| 895 | spin_lock(&tn->idr_lock); |
| 896 | err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, |
| 897 | GFP_NOWAIT); |
| 898 | spin_unlock(&tn->idr_lock); |
| 899 | idr_preload_end(); |
| 900 | |
| 901 | return err; |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 902 | } |
| 903 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 904 | static void tcf_block_remove(struct tcf_block *block, struct net *net) |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 905 | { |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 906 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
| 907 | |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 908 | spin_lock(&tn->idr_lock); |
Matthew Wilcox | 9c16094 | 2017-11-28 09:48:43 -0500 | [diff] [blame] | 909 | idr_remove(&tn->idr, block->index); |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 910 | spin_unlock(&tn->idr_lock); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 911 | } |
| 912 | |
| 913 | static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, |
Jiri Pirko | bb047dd | 2018-02-13 12:00:16 +0100 | [diff] [blame] | 914 | u32 block_index, |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 915 | struct netlink_ext_ack *extack) |
| 916 | { |
| 917 | struct tcf_block *block; |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 918 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 919 | block = kzalloc(sizeof(*block), GFP_KERNEL); |
Alexander Aring | 8d1a77f | 2017-12-20 12:35:19 -0500 | [diff] [blame] | 920 | if (!block) { |
| 921 | NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 922 | return ERR_PTR(-ENOMEM); |
Alexander Aring | 8d1a77f | 2017-12-20 12:35:19 -0500 | [diff] [blame] | 923 | } |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 924 | mutex_init(&block->lock); |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 925 | mutex_init(&block->proto_destroy_lock); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 926 | init_rwsem(&block->cb_lock); |
Pablo Neira Ayuso | 14bfb13 | 2019-07-19 18:20:16 +0200 | [diff] [blame] | 927 | flow_block_init(&block->flow_block); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 928 | INIT_LIST_HEAD(&block->chain_list); |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 929 | INIT_LIST_HEAD(&block->owner_list); |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 930 | INIT_LIST_HEAD(&block->chain0.filter_chain_list); |
Jiri Pirko | acb6744 | 2017-10-19 15:50:31 +0200 | [diff] [blame] | 931 | |
Vlad Buslov | cfebd7e | 2018-09-24 19:22:54 +0300 | [diff] [blame] | 932 | refcount_set(&block->refcnt, 1); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 933 | block->net = net; |
Jiri Pirko | bb047dd | 2018-02-13 12:00:16 +0100 | [diff] [blame] | 934 | block->index = block_index; |
| 935 | |
| 936 | /* Don't store q pointer for blocks which are shared */ |
| 937 | if (!tcf_block_shared(block)) |
| 938 | block->q = q; |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 939 | return block; |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 940 | } |
| 941 | |
| 942 | static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) |
| 943 | { |
| 944 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
| 945 | |
Matthew Wilcox | 322d884 | 2017-11-28 10:01:24 -0500 | [diff] [blame] | 946 | return idr_find(&tn->idr, block_index); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 947 | } |
| 948 | |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 949 | static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index) |
| 950 | { |
| 951 | struct tcf_block *block; |
| 952 | |
| 953 | rcu_read_lock(); |
| 954 | block = tcf_block_lookup(net, block_index); |
| 955 | if (block && !refcount_inc_not_zero(&block->refcnt)) |
| 956 | block = NULL; |
| 957 | rcu_read_unlock(); |
| 958 | |
| 959 | return block; |
| 960 | } |
| 961 | |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 962 | static struct tcf_chain * |
| 963 | __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) |
| 964 | { |
| 965 | mutex_lock(&block->lock); |
| 966 | if (chain) |
| 967 | chain = list_is_last(&chain->list, &block->chain_list) ? |
| 968 | NULL : list_next_entry(chain, list); |
| 969 | else |
| 970 | chain = list_first_entry_or_null(&block->chain_list, |
| 971 | struct tcf_chain, list); |
| 972 | |
| 973 | /* skip all action-only chains */ |
| 974 | while (chain && tcf_chain_held_by_acts_only(chain)) |
| 975 | chain = list_is_last(&chain->list, &block->chain_list) ? |
| 976 | NULL : list_next_entry(chain, list); |
| 977 | |
| 978 | if (chain) |
| 979 | tcf_chain_hold(chain); |
| 980 | mutex_unlock(&block->lock); |
| 981 | |
| 982 | return chain; |
| 983 | } |
| 984 | |
| 985 | /* Function to be used by all clients that want to iterate over all chains on |
| 986 | * block. It properly obtains block->lock and takes reference to chain before |
| 987 | * returning it. Users of this function must be tolerant to concurrent chain |
| 988 | * insertion/deletion or ensure that no concurrent chain modification is |
| 989 | * possible. Note that all netlink dump callbacks cannot guarantee to provide |
| 990 | * consistent dump because rtnl lock is released each time skb is filled with |
| 991 | * data and sent to user-space. |
| 992 | */ |
| 993 | |
| 994 | struct tcf_chain * |
| 995 | tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) |
| 996 | { |
| 997 | struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); |
| 998 | |
| 999 | if (chain) |
| 1000 | tcf_chain_put(chain); |
| 1001 | |
| 1002 | return chain_next; |
| 1003 | } |
| 1004 | EXPORT_SYMBOL(tcf_get_next_chain); |
| 1005 | |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1006 | static struct tcf_proto * |
| 1007 | __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) |
| 1008 | { |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1009 | u32 prio = 0; |
| 1010 | |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1011 | ASSERT_RTNL(); |
| 1012 | mutex_lock(&chain->filter_chain_lock); |
| 1013 | |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1014 | if (!tp) { |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1015 | tp = tcf_chain_dereference(chain->filter_chain, chain); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1016 | } else if (tcf_proto_is_deleting(tp)) { |
| 1017 | /* 'deleting' flag is set and chain->filter_chain_lock was |
| 1018 | * unlocked, which means next pointer could be invalid. Restart |
| 1019 | * search. |
| 1020 | */ |
| 1021 | prio = tp->prio + 1; |
| 1022 | tp = tcf_chain_dereference(chain->filter_chain, chain); |
| 1023 | |
| 1024 | for (; tp; tp = tcf_chain_dereference(tp->next, chain)) |
| 1025 | if (!tp->deleting && tp->prio >= prio) |
| 1026 | break; |
| 1027 | } else { |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1028 | tp = tcf_chain_dereference(tp->next, chain); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1029 | } |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1030 | |
| 1031 | if (tp) |
| 1032 | tcf_proto_get(tp); |
| 1033 | |
| 1034 | mutex_unlock(&chain->filter_chain_lock); |
| 1035 | |
| 1036 | return tp; |
| 1037 | } |
| 1038 | |
| 1039 | /* Function to be used by all clients that want to iterate over all tp's on |
| 1040 | * chain. Users of this function must be tolerant to concurrent tp |
| 1041 | * insertion/deletion or ensure that no concurrent chain modification is |
| 1042 | * possible. Note that all netlink dump callbacks cannot guarantee to provide |
| 1043 | * consistent dump because rtnl lock is released each time skb is filled with |
| 1044 | * data and sent to user-space. |
| 1045 | */ |
| 1046 | |
| 1047 | struct tcf_proto * |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1048 | tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp, |
| 1049 | bool rtnl_held) |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1050 | { |
| 1051 | struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp); |
| 1052 | |
| 1053 | if (tp) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1054 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1055 | |
| 1056 | return tp_next; |
| 1057 | } |
| 1058 | EXPORT_SYMBOL(tcf_get_next_proto); |
| 1059 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1060 | static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) |
Vlad Buslov | f002343 | 2018-09-24 19:22:55 +0300 | [diff] [blame] | 1061 | { |
| 1062 | struct tcf_chain *chain; |
| 1063 | |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 1064 | /* Last reference to block. At this point chains cannot be added or |
| 1065 | * removed concurrently. |
Vlad Buslov | f002343 | 2018-09-24 19:22:55 +0300 | [diff] [blame] | 1066 | */ |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 1067 | for (chain = tcf_get_next_chain(block, NULL); |
| 1068 | chain; |
| 1069 | chain = tcf_get_next_chain(block, chain)) { |
Vlad Buslov | f002343 | 2018-09-24 19:22:55 +0300 | [diff] [blame] | 1070 | tcf_chain_put_explicitly_created(chain); |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1071 | tcf_chain_flush(chain, rtnl_held); |
Vlad Buslov | f002343 | 2018-09-24 19:22:55 +0300 | [diff] [blame] | 1072 | } |
| 1073 | } |
| 1074 | |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1075 | /* Lookup Qdisc and increments its reference counter. |
| 1076 | * Set parent, if necessary. |
| 1077 | */ |
| 1078 | |
| 1079 | static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, |
| 1080 | u32 *parent, int ifindex, bool rtnl_held, |
| 1081 | struct netlink_ext_ack *extack) |
| 1082 | { |
| 1083 | const struct Qdisc_class_ops *cops; |
| 1084 | struct net_device *dev; |
| 1085 | int err = 0; |
| 1086 | |
| 1087 | if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) |
| 1088 | return 0; |
| 1089 | |
| 1090 | rcu_read_lock(); |
| 1091 | |
| 1092 | /* Find link */ |
| 1093 | dev = dev_get_by_index_rcu(net, ifindex); |
| 1094 | if (!dev) { |
| 1095 | rcu_read_unlock(); |
| 1096 | return -ENODEV; |
| 1097 | } |
| 1098 | |
| 1099 | /* Find qdisc */ |
| 1100 | if (!*parent) { |
| 1101 | *q = dev->qdisc; |
| 1102 | *parent = (*q)->handle; |
| 1103 | } else { |
| 1104 | *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); |
| 1105 | if (!*q) { |
| 1106 | NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); |
| 1107 | err = -EINVAL; |
| 1108 | goto errout_rcu; |
| 1109 | } |
| 1110 | } |
| 1111 | |
| 1112 | *q = qdisc_refcount_inc_nz(*q); |
| 1113 | if (!*q) { |
| 1114 | NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); |
| 1115 | err = -EINVAL; |
| 1116 | goto errout_rcu; |
| 1117 | } |
| 1118 | |
| 1119 | /* Is it classful? */ |
| 1120 | cops = (*q)->ops->cl_ops; |
| 1121 | if (!cops) { |
| 1122 | NL_SET_ERR_MSG(extack, "Qdisc not classful"); |
| 1123 | err = -EINVAL; |
| 1124 | goto errout_qdisc; |
| 1125 | } |
| 1126 | |
| 1127 | if (!cops->tcf_block) { |
| 1128 | NL_SET_ERR_MSG(extack, "Class doesn't support blocks"); |
| 1129 | err = -EOPNOTSUPP; |
| 1130 | goto errout_qdisc; |
| 1131 | } |
| 1132 | |
| 1133 | errout_rcu: |
| 1134 | /* At this point we know that qdisc is not noop_qdisc, |
| 1135 | * which means that qdisc holds a reference to net_device |
| 1136 | * and we hold a reference to qdisc, so it is safe to release |
| 1137 | * rcu read lock. |
| 1138 | */ |
| 1139 | rcu_read_unlock(); |
| 1140 | return err; |
| 1141 | |
| 1142 | errout_qdisc: |
| 1143 | rcu_read_unlock(); |
| 1144 | |
| 1145 | if (rtnl_held) |
| 1146 | qdisc_put(*q); |
| 1147 | else |
| 1148 | qdisc_put_unlocked(*q); |
| 1149 | *q = NULL; |
| 1150 | |
| 1151 | return err; |
| 1152 | } |
| 1153 | |
| 1154 | static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl, |
| 1155 | int ifindex, struct netlink_ext_ack *extack) |
| 1156 | { |
| 1157 | if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) |
| 1158 | return 0; |
| 1159 | |
| 1160 | /* Do we search for filter, attached to class? */ |
| 1161 | if (TC_H_MIN(parent)) { |
| 1162 | const struct Qdisc_class_ops *cops = q->ops->cl_ops; |
| 1163 | |
| 1164 | *cl = cops->find(q, parent); |
| 1165 | if (*cl == 0) { |
| 1166 | NL_SET_ERR_MSG(extack, "Specified class doesn't exist"); |
| 1167 | return -ENOENT; |
| 1168 | } |
| 1169 | } |
| 1170 | |
| 1171 | return 0; |
| 1172 | } |
| 1173 | |
| 1174 | static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q, |
| 1175 | unsigned long cl, int ifindex, |
| 1176 | u32 block_index, |
| 1177 | struct netlink_ext_ack *extack) |
| 1178 | { |
| 1179 | struct tcf_block *block; |
| 1180 | |
| 1181 | if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) { |
| 1182 | block = tcf_block_refcnt_get(net, block_index); |
| 1183 | if (!block) { |
| 1184 | NL_SET_ERR_MSG(extack, "Block of given index was not found"); |
| 1185 | return ERR_PTR(-EINVAL); |
| 1186 | } |
| 1187 | } else { |
| 1188 | const struct Qdisc_class_ops *cops = q->ops->cl_ops; |
| 1189 | |
| 1190 | block = cops->tcf_block(q, cl, extack); |
| 1191 | if (!block) |
| 1192 | return ERR_PTR(-EINVAL); |
| 1193 | |
| 1194 | if (tcf_block_shared(block)) { |
| 1195 | NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters"); |
| 1196 | return ERR_PTR(-EOPNOTSUPP); |
| 1197 | } |
| 1198 | |
| 1199 | /* Always take reference to block in order to support execution |
| 1200 | * of rules update path of cls API without rtnl lock. Caller |
| 1201 | * must release block when it is finished using it. 'if' block |
| 1202 | * of this conditional obtain reference to block by calling |
| 1203 | * tcf_block_refcnt_get(). |
| 1204 | */ |
| 1205 | refcount_inc(&block->refcnt); |
| 1206 | } |
| 1207 | |
| 1208 | return block; |
| 1209 | } |
| 1210 | |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1211 | static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1212 | struct tcf_block_ext_info *ei, bool rtnl_held) |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1213 | { |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 1214 | if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1215 | /* Flushing/putting all chains will cause the block to be |
| 1216 | * deallocated when last chain is freed. However, if chain_list |
| 1217 | * is empty, block has to be manually deallocated. After block |
| 1218 | * reference counter reached 0, it is no longer possible to |
| 1219 | * increment it or add new chains to block. |
| 1220 | */ |
| 1221 | bool free_block = list_empty(&block->chain_list); |
| 1222 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 1223 | mutex_unlock(&block->lock); |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1224 | if (tcf_block_shared(block)) |
| 1225 | tcf_block_remove(block, block->net); |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1226 | |
| 1227 | if (q) |
| 1228 | tcf_block_offload_unbind(block, q, ei); |
| 1229 | |
| 1230 | if (free_block) |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 1231 | tcf_block_destroy(block); |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1232 | else |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1233 | tcf_block_flush_all_chains(block, rtnl_held); |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1234 | } else if (q) { |
| 1235 | tcf_block_offload_unbind(block, q, ei); |
| 1236 | } |
| 1237 | } |
| 1238 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1239 | static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1240 | { |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1241 | __tcf_block_put(block, NULL, NULL, rtnl_held); |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1242 | } |
| 1243 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1244 | /* Find tcf block. |
| 1245 | * Set q, parent, cl when appropriate. |
| 1246 | */ |
| 1247 | |
| 1248 | static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, |
| 1249 | u32 *parent, unsigned long *cl, |
| 1250 | int ifindex, u32 block_index, |
| 1251 | struct netlink_ext_ack *extack) |
| 1252 | { |
| 1253 | struct tcf_block *block; |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1254 | int err = 0; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1255 | |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1256 | ASSERT_RTNL(); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1257 | |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1258 | err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack); |
| 1259 | if (err) |
| 1260 | goto errout; |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1261 | |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1262 | err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack); |
| 1263 | if (err) |
| 1264 | goto errout_qdisc; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1265 | |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1266 | block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); |
Dan Carpenter | af736bf | 2019-02-18 12:26:32 +0300 | [diff] [blame] | 1267 | if (IS_ERR(block)) { |
| 1268 | err = PTR_ERR(block); |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1269 | goto errout_qdisc; |
Dan Carpenter | af736bf | 2019-02-18 12:26:32 +0300 | [diff] [blame] | 1270 | } |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1271 | |
| 1272 | return block; |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1273 | |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1274 | errout_qdisc: |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1275 | if (*q) |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1276 | qdisc_put(*q); |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1277 | errout: |
| 1278 | *q = NULL; |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1279 | return ERR_PTR(err); |
| 1280 | } |
| 1281 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1282 | static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, |
| 1283 | bool rtnl_held) |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1284 | { |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 1285 | if (!IS_ERR_OR_NULL(block)) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1286 | tcf_block_refcnt_put(block, rtnl_held); |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 1287 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 1288 | if (q) { |
| 1289 | if (rtnl_held) |
| 1290 | qdisc_put(q); |
| 1291 | else |
| 1292 | qdisc_put_unlocked(q); |
| 1293 | } |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1294 | } |
| 1295 | |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1296 | struct tcf_block_owner_item { |
| 1297 | struct list_head list; |
| 1298 | struct Qdisc *q; |
Pablo Neira Ayuso | 32f8c40 | 2019-07-09 22:55:41 +0200 | [diff] [blame] | 1299 | enum flow_block_binder_type binder_type; |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1300 | }; |
| 1301 | |
| 1302 | static void |
| 1303 | tcf_block_owner_netif_keep_dst(struct tcf_block *block, |
| 1304 | struct Qdisc *q, |
Pablo Neira Ayuso | 32f8c40 | 2019-07-09 22:55:41 +0200 | [diff] [blame] | 1305 | enum flow_block_binder_type binder_type) |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1306 | { |
| 1307 | if (block->keep_dst && |
Pablo Neira Ayuso | 32f8c40 | 2019-07-09 22:55:41 +0200 | [diff] [blame] | 1308 | binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && |
| 1309 | binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1310 | netif_keep_dst(qdisc_dev(q)); |
| 1311 | } |
| 1312 | |
| 1313 | void tcf_block_netif_keep_dst(struct tcf_block *block) |
| 1314 | { |
| 1315 | struct tcf_block_owner_item *item; |
| 1316 | |
| 1317 | block->keep_dst = true; |
| 1318 | list_for_each_entry(item, &block->owner_list, list) |
| 1319 | tcf_block_owner_netif_keep_dst(block, item->q, |
| 1320 | item->binder_type); |
| 1321 | } |
| 1322 | EXPORT_SYMBOL(tcf_block_netif_keep_dst); |
| 1323 | |
| 1324 | static int tcf_block_owner_add(struct tcf_block *block, |
| 1325 | struct Qdisc *q, |
Pablo Neira Ayuso | 32f8c40 | 2019-07-09 22:55:41 +0200 | [diff] [blame] | 1326 | enum flow_block_binder_type binder_type) |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1327 | { |
| 1328 | struct tcf_block_owner_item *item; |
| 1329 | |
| 1330 | item = kmalloc(sizeof(*item), GFP_KERNEL); |
| 1331 | if (!item) |
| 1332 | return -ENOMEM; |
| 1333 | item->q = q; |
| 1334 | item->binder_type = binder_type; |
| 1335 | list_add(&item->list, &block->owner_list); |
| 1336 | return 0; |
| 1337 | } |
| 1338 | |
| 1339 | static void tcf_block_owner_del(struct tcf_block *block, |
| 1340 | struct Qdisc *q, |
Pablo Neira Ayuso | 32f8c40 | 2019-07-09 22:55:41 +0200 | [diff] [blame] | 1341 | enum flow_block_binder_type binder_type) |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1342 | { |
| 1343 | struct tcf_block_owner_item *item; |
| 1344 | |
| 1345 | list_for_each_entry(item, &block->owner_list, list) { |
| 1346 | if (item->q == q && item->binder_type == binder_type) { |
| 1347 | list_del(&item->list); |
| 1348 | kfree(item); |
| 1349 | return; |
| 1350 | } |
| 1351 | } |
| 1352 | WARN_ON(1); |
| 1353 | } |
| 1354 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1355 | int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, |
| 1356 | struct tcf_block_ext_info *ei, |
| 1357 | struct netlink_ext_ack *extack) |
| 1358 | { |
| 1359 | struct net *net = qdisc_net(q); |
| 1360 | struct tcf_block *block = NULL; |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1361 | int err; |
| 1362 | |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 1363 | if (ei->block_index) |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1364 | /* block_index not 0 means the shared block is requested */ |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 1365 | block = tcf_block_refcnt_get(net, ei->block_index); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1366 | |
| 1367 | if (!block) { |
Jiri Pirko | bb047dd | 2018-02-13 12:00:16 +0100 | [diff] [blame] | 1368 | block = tcf_block_create(net, q, ei->block_index, extack); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1369 | if (IS_ERR(block)) |
| 1370 | return PTR_ERR(block); |
Jiri Pirko | bb047dd | 2018-02-13 12:00:16 +0100 | [diff] [blame] | 1371 | if (tcf_block_shared(block)) { |
| 1372 | err = tcf_block_insert(block, net, extack); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1373 | if (err) |
| 1374 | goto err_block_insert; |
| 1375 | } |
| 1376 | } |
| 1377 | |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1378 | err = tcf_block_owner_add(block, q, ei->binder_type); |
| 1379 | if (err) |
| 1380 | goto err_block_owner_add; |
| 1381 | |
| 1382 | tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); |
| 1383 | |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1384 | err = tcf_chain0_head_change_cb_add(block, ei, extack); |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 1385 | if (err) |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1386 | goto err_chain0_head_change_cb_add; |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 1387 | |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 1388 | err = tcf_block_offload_bind(block, q, ei, extack); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 1389 | if (err) |
| 1390 | goto err_block_offload_bind; |
| 1391 | |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 1392 | *p_block = block; |
| 1393 | return 0; |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1394 | |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 1395 | err_block_offload_bind: |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1396 | tcf_chain0_head_change_cb_del(block, ei); |
| 1397 | err_chain0_head_change_cb_add: |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1398 | tcf_block_owner_del(block, q, ei->binder_type); |
| 1399 | err_block_owner_add: |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1400 | err_block_insert: |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1401 | tcf_block_refcnt_put(block, true); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1402 | return err; |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 1403 | } |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1404 | EXPORT_SYMBOL(tcf_block_get_ext); |
| 1405 | |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1406 | static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) |
| 1407 | { |
| 1408 | struct tcf_proto __rcu **p_filter_chain = priv; |
| 1409 | |
| 1410 | rcu_assign_pointer(*p_filter_chain, tp_head); |
| 1411 | } |
| 1412 | |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1413 | int tcf_block_get(struct tcf_block **p_block, |
Alexander Aring | 8d1a77f | 2017-12-20 12:35:19 -0500 | [diff] [blame] | 1414 | struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, |
| 1415 | struct netlink_ext_ack *extack) |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1416 | { |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1417 | struct tcf_block_ext_info ei = { |
| 1418 | .chain_head_change = tcf_chain_head_change_dflt, |
| 1419 | .chain_head_change_priv = p_filter_chain, |
| 1420 | }; |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1421 | |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1422 | WARN_ON(!p_filter_chain); |
Alexander Aring | 8d1a77f | 2017-12-20 12:35:19 -0500 | [diff] [blame] | 1423 | return tcf_block_get_ext(p_block, q, &ei, extack); |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1424 | } |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 1425 | EXPORT_SYMBOL(tcf_block_get); |
| 1426 | |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 1427 | /* XXX: Standalone actions are not allowed to jump to any chain, and bound |
Roman Kapl | a60b3f5 | 2017-11-24 12:27:58 +0100 | [diff] [blame] | 1428 | * actions should be all removed after flushing. |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 1429 | */ |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1430 | void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, |
David S. Miller | e1ea2f9 | 2017-10-30 14:10:01 +0900 | [diff] [blame] | 1431 | struct tcf_block_ext_info *ei) |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 1432 | { |
David S. Miller | c30abd5 | 2017-12-16 22:11:55 -0500 | [diff] [blame] | 1433 | if (!block) |
| 1434 | return; |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1435 | tcf_chain0_head_change_cb_del(block, ei); |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1436 | tcf_block_owner_del(block, q, ei->binder_type); |
Roman Kapl | a60b3f5 | 2017-11-24 12:27:58 +0100 | [diff] [blame] | 1437 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1438 | __tcf_block_put(block, q, ei, true); |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 1439 | } |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1440 | EXPORT_SYMBOL(tcf_block_put_ext); |
| 1441 | |
| 1442 | void tcf_block_put(struct tcf_block *block) |
| 1443 | { |
| 1444 | struct tcf_block_ext_info ei = {0, }; |
| 1445 | |
Jiri Pirko | 4853f12 | 2017-12-21 13:13:59 +0100 | [diff] [blame] | 1446 | if (!block) |
| 1447 | return; |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1448 | tcf_block_put_ext(block, block->q, &ei); |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1449 | } |
David S. Miller | e1ea2f9 | 2017-10-30 14:10:01 +0900 | [diff] [blame] | 1450 | |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 1451 | EXPORT_SYMBOL(tcf_block_put); |
Jiri Pirko | cf1facd | 2017-02-09 14:38:56 +0100 | [diff] [blame] | 1452 | |
John Hurley | 3263674 | 2018-06-25 14:30:10 -0700 | [diff] [blame] | 1453 | static int |
Pablo Neira Ayuso | a732331 | 2019-07-19 18:20:15 +0200 | [diff] [blame] | 1454 | tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, |
John Hurley | 3263674 | 2018-06-25 14:30:10 -0700 | [diff] [blame] | 1455 | void *cb_priv, bool add, bool offload_in_use, |
| 1456 | struct netlink_ext_ack *extack) |
| 1457 | { |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 1458 | struct tcf_chain *chain, *chain_prev; |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1459 | struct tcf_proto *tp, *tp_prev; |
John Hurley | 3263674 | 2018-06-25 14:30:10 -0700 | [diff] [blame] | 1460 | int err; |
| 1461 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 1462 | lockdep_assert_held(&block->cb_lock); |
| 1463 | |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 1464 | for (chain = __tcf_get_next_chain(block, NULL); |
| 1465 | chain; |
| 1466 | chain_prev = chain, |
| 1467 | chain = __tcf_get_next_chain(block, chain), |
| 1468 | tcf_chain_put(chain_prev)) { |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1469 | for (tp = __tcf_get_next_proto(chain, NULL); tp; |
| 1470 | tp_prev = tp, |
| 1471 | tp = __tcf_get_next_proto(chain, tp), |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1472 | tcf_proto_put(tp_prev, true, NULL)) { |
John Hurley | 3263674 | 2018-06-25 14:30:10 -0700 | [diff] [blame] | 1473 | if (tp->ops->reoffload) { |
| 1474 | err = tp->ops->reoffload(tp, add, cb, cb_priv, |
| 1475 | extack); |
| 1476 | if (err && add) |
| 1477 | goto err_playback_remove; |
| 1478 | } else if (add && offload_in_use) { |
| 1479 | err = -EOPNOTSUPP; |
| 1480 | NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); |
| 1481 | goto err_playback_remove; |
| 1482 | } |
| 1483 | } |
| 1484 | } |
| 1485 | |
| 1486 | return 0; |
| 1487 | |
| 1488 | err_playback_remove: |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1489 | tcf_proto_put(tp, true, NULL); |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 1490 | tcf_chain_put(chain); |
John Hurley | 3263674 | 2018-06-25 14:30:10 -0700 | [diff] [blame] | 1491 | tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, |
| 1492 | extack); |
| 1493 | return err; |
| 1494 | } |
| 1495 | |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1496 | static int tcf_block_bind(struct tcf_block *block, |
| 1497 | struct flow_block_offload *bo) |
| 1498 | { |
| 1499 | struct flow_block_cb *block_cb, *next; |
| 1500 | int err, i = 0; |
| 1501 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 1502 | lockdep_assert_held(&block->cb_lock); |
| 1503 | |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1504 | list_for_each_entry(block_cb, &bo->cb_list, list) { |
| 1505 | err = tcf_block_playback_offloads(block, block_cb->cb, |
| 1506 | block_cb->cb_priv, true, |
| 1507 | tcf_block_offload_in_use(block), |
| 1508 | bo->extack); |
| 1509 | if (err) |
| 1510 | goto err_unroll; |
Vlad Buslov | c9f1447 | 2019-08-26 16:45:01 +0300 | [diff] [blame] | 1511 | if (!bo->unlocked_driver_cb) |
| 1512 | block->lockeddevcnt++; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1513 | |
| 1514 | i++; |
| 1515 | } |
Pablo Neira Ayuso | 14bfb13 | 2019-07-19 18:20:16 +0200 | [diff] [blame] | 1516 | list_splice(&bo->cb_list, &block->flow_block.cb_list); |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1517 | |
| 1518 | return 0; |
| 1519 | |
| 1520 | err_unroll: |
| 1521 | list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { |
| 1522 | if (i-- > 0) { |
| 1523 | list_del(&block_cb->list); |
| 1524 | tcf_block_playback_offloads(block, block_cb->cb, |
| 1525 | block_cb->cb_priv, false, |
| 1526 | tcf_block_offload_in_use(block), |
| 1527 | NULL); |
Vlad Buslov | c9f1447 | 2019-08-26 16:45:01 +0300 | [diff] [blame] | 1528 | if (!bo->unlocked_driver_cb) |
| 1529 | block->lockeddevcnt--; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1530 | } |
| 1531 | flow_block_cb_free(block_cb); |
| 1532 | } |
| 1533 | |
| 1534 | return err; |
| 1535 | } |
| 1536 | |
| 1537 | static void tcf_block_unbind(struct tcf_block *block, |
| 1538 | struct flow_block_offload *bo) |
| 1539 | { |
| 1540 | struct flow_block_cb *block_cb, *next; |
| 1541 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 1542 | lockdep_assert_held(&block->cb_lock); |
| 1543 | |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1544 | list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { |
| 1545 | tcf_block_playback_offloads(block, block_cb->cb, |
| 1546 | block_cb->cb_priv, false, |
| 1547 | tcf_block_offload_in_use(block), |
| 1548 | NULL); |
| 1549 | list_del(&block_cb->list); |
| 1550 | flow_block_cb_free(block_cb); |
Vlad Buslov | c9f1447 | 2019-08-26 16:45:01 +0300 | [diff] [blame] | 1551 | if (!bo->unlocked_driver_cb) |
| 1552 | block->lockeddevcnt--; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1553 | } |
| 1554 | } |
| 1555 | |
| 1556 | static int tcf_block_setup(struct tcf_block *block, |
| 1557 | struct flow_block_offload *bo) |
| 1558 | { |
| 1559 | int err; |
| 1560 | |
| 1561 | switch (bo->command) { |
| 1562 | case FLOW_BLOCK_BIND: |
| 1563 | err = tcf_block_bind(block, bo); |
| 1564 | break; |
| 1565 | case FLOW_BLOCK_UNBIND: |
| 1566 | err = 0; |
| 1567 | tcf_block_unbind(block, bo); |
| 1568 | break; |
| 1569 | default: |
| 1570 | WARN_ON_ONCE(1); |
| 1571 | err = -EOPNOTSUPP; |
| 1572 | } |
| 1573 | |
| 1574 | return err; |
| 1575 | } |
| 1576 | |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1577 | /* Main classifier routine: scans classifier chain attached |
| 1578 | * to this qdisc, (optionally) tests for protocol and asks |
| 1579 | * specific classifiers. |
| 1580 | */ |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1581 | static inline int __tcf_classify(struct sk_buff *skb, |
| 1582 | const struct tcf_proto *tp, |
Paul Blakey | af69962 | 2020-02-16 12:01:24 +0200 | [diff] [blame] | 1583 | const struct tcf_proto *orig_tp, |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1584 | struct tcf_result *res, |
| 1585 | bool compat_mode, |
| 1586 | u32 *last_executed_chain) |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1587 | { |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1588 | #ifdef CONFIG_NET_CLS_ACT |
| 1589 | const int max_reclassify_loop = 4; |
Jiri Pirko | ee538dc | 2017-05-23 09:11:59 +0200 | [diff] [blame] | 1590 | const struct tcf_proto *first_tp; |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1591 | int limit = 0; |
| 1592 | |
| 1593 | reclassify: |
| 1594 | #endif |
| 1595 | for (; tp; tp = rcu_dereference_bh(tp->next)) { |
Cong Wang | cd0c4e7 | 2019-01-11 18:55:42 -0800 | [diff] [blame] | 1596 | __be16 protocol = tc_skb_protocol(skb); |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1597 | int err; |
| 1598 | |
| 1599 | if (tp->protocol != protocol && |
| 1600 | tp->protocol != htons(ETH_P_ALL)) |
| 1601 | continue; |
| 1602 | |
| 1603 | err = tp->classify(skb, tp, res); |
| 1604 | #ifdef CONFIG_NET_CLS_ACT |
Jiri Pirko | db50514 | 2017-05-17 11:08:03 +0200 | [diff] [blame] | 1605 | if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { |
Jiri Pirko | ee538dc | 2017-05-23 09:11:59 +0200 | [diff] [blame] | 1606 | first_tp = orig_tp; |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1607 | *last_executed_chain = first_tp->chain->index; |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1608 | goto reset; |
Jiri Pirko | db50514 | 2017-05-17 11:08:03 +0200 | [diff] [blame] | 1609 | } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { |
Jiri Pirko | ee538dc | 2017-05-23 09:11:59 +0200 | [diff] [blame] | 1610 | first_tp = res->goto_tp; |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1611 | *last_executed_chain = err & TC_ACT_EXT_VAL_MASK; |
Jiri Pirko | db50514 | 2017-05-17 11:08:03 +0200 | [diff] [blame] | 1612 | goto reset; |
| 1613 | } |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1614 | #endif |
| 1615 | if (err >= 0) |
| 1616 | return err; |
| 1617 | } |
| 1618 | |
| 1619 | return TC_ACT_UNSPEC; /* signal: continue lookup */ |
| 1620 | #ifdef CONFIG_NET_CLS_ACT |
| 1621 | reset: |
| 1622 | if (unlikely(limit++ >= max_reclassify_loop)) { |
Jiri Pirko | 9d3aaff | 2018-01-17 11:46:47 +0100 | [diff] [blame] | 1623 | net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n", |
| 1624 | tp->chain->block->index, |
| 1625 | tp->prio & 0xffff, |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1626 | ntohs(tp->protocol)); |
| 1627 | return TC_ACT_SHOT; |
| 1628 | } |
| 1629 | |
Jiri Pirko | ee538dc | 2017-05-23 09:11:59 +0200 | [diff] [blame] | 1630 | tp = first_tp; |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1631 | goto reclassify; |
| 1632 | #endif |
| 1633 | } |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1634 | |
| 1635 | int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, |
| 1636 | struct tcf_result *res, bool compat_mode) |
| 1637 | { |
| 1638 | u32 last_executed_chain = 0; |
| 1639 | |
Paul Blakey | af69962 | 2020-02-16 12:01:24 +0200 | [diff] [blame] | 1640 | return __tcf_classify(skb, tp, tp, res, compat_mode, |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1641 | &last_executed_chain); |
| 1642 | } |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1643 | EXPORT_SYMBOL(tcf_classify); |
| 1644 | |
Paul Blakey | 7d17c54 | 2020-02-16 12:01:22 +0200 | [diff] [blame] | 1645 | int tcf_classify_ingress(struct sk_buff *skb, |
| 1646 | const struct tcf_block *ingress_block, |
| 1647 | const struct tcf_proto *tp, |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1648 | struct tcf_result *res, bool compat_mode) |
| 1649 | { |
| 1650 | #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) |
| 1651 | u32 last_executed_chain = 0; |
| 1652 | |
Paul Blakey | af69962 | 2020-02-16 12:01:24 +0200 | [diff] [blame] | 1653 | return __tcf_classify(skb, tp, tp, res, compat_mode, |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1654 | &last_executed_chain); |
| 1655 | #else |
| 1656 | u32 last_executed_chain = tp ? tp->chain->index : 0; |
Paul Blakey | af69962 | 2020-02-16 12:01:24 +0200 | [diff] [blame] | 1657 | const struct tcf_proto *orig_tp = tp; |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1658 | struct tc_skb_ext *ext; |
| 1659 | int ret; |
| 1660 | |
Paul Blakey | af69962 | 2020-02-16 12:01:24 +0200 | [diff] [blame] | 1661 | ext = skb_ext_find(skb, TC_SKB_EXT); |
| 1662 | |
| 1663 | if (ext && ext->chain) { |
| 1664 | struct tcf_chain *fchain; |
| 1665 | |
| 1666 | fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain); |
| 1667 | if (!fchain) |
| 1668 | return TC_ACT_SHOT; |
| 1669 | |
| 1670 | /* Consume, so cloned/redirect skbs won't inherit ext */ |
| 1671 | skb_ext_del(skb, TC_SKB_EXT); |
| 1672 | |
| 1673 | tp = rcu_dereference_bh(fchain->filter_chain); |
Paul Blakey | a080da6 | 2020-04-06 18:36:56 +0300 | [diff] [blame] | 1674 | last_executed_chain = fchain->index; |
Paul Blakey | af69962 | 2020-02-16 12:01:24 +0200 | [diff] [blame] | 1675 | } |
| 1676 | |
| 1677 | ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, |
| 1678 | &last_executed_chain); |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1679 | |
| 1680 | /* If we missed on some chain */ |
| 1681 | if (ret == TC_ACT_UNSPEC && last_executed_chain) { |
| 1682 | ext = skb_ext_add(skb, TC_SKB_EXT); |
| 1683 | if (WARN_ON_ONCE(!ext)) |
| 1684 | return TC_ACT_SHOT; |
| 1685 | ext->chain = last_executed_chain; |
| 1686 | } |
| 1687 | |
| 1688 | return ret; |
| 1689 | #endif |
| 1690 | } |
| 1691 | EXPORT_SYMBOL(tcf_classify_ingress); |
| 1692 | |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1693 | struct tcf_chain_info { |
| 1694 | struct tcf_proto __rcu **pprev; |
| 1695 | struct tcf_proto __rcu *next; |
| 1696 | }; |
| 1697 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1698 | static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain, |
| 1699 | struct tcf_chain_info *chain_info) |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1700 | { |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1701 | return tcf_chain_dereference(*chain_info->pprev, chain); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1702 | } |
| 1703 | |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1704 | static int tcf_chain_tp_insert(struct tcf_chain *chain, |
| 1705 | struct tcf_chain_info *chain_info, |
| 1706 | struct tcf_proto *tp) |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1707 | { |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1708 | if (chain->flushing) |
| 1709 | return -EAGAIN; |
| 1710 | |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1711 | if (*chain_info->pprev == chain->filter_chain) |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1712 | tcf_chain0_head_change(chain, tp); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 1713 | tcf_proto_get(tp); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1714 | RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1715 | rcu_assign_pointer(*chain_info->pprev, tp); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1716 | |
| 1717 | return 0; |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1718 | } |
| 1719 | |
| 1720 | static void tcf_chain_tp_remove(struct tcf_chain *chain, |
| 1721 | struct tcf_chain_info *chain_info, |
| 1722 | struct tcf_proto *tp) |
| 1723 | { |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1724 | struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1725 | |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1726 | tcf_proto_mark_delete(tp); |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1727 | if (tp == chain->filter_chain) |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1728 | tcf_chain0_head_change(chain, next); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1729 | RCU_INIT_POINTER(*chain_info->pprev, next); |
| 1730 | } |
| 1731 | |
| 1732 | static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, |
| 1733 | struct tcf_chain_info *chain_info, |
| 1734 | u32 protocol, u32 prio, |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1735 | bool prio_allocate); |
| 1736 | |
| 1737 | /* Try to insert new proto. |
| 1738 | * If proto with specified priority already exists, free new proto |
| 1739 | * and return existing one. |
| 1740 | */ |
| 1741 | |
| 1742 | static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain, |
| 1743 | struct tcf_proto *tp_new, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1744 | u32 protocol, u32 prio, |
| 1745 | bool rtnl_held) |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1746 | { |
| 1747 | struct tcf_chain_info chain_info; |
| 1748 | struct tcf_proto *tp; |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1749 | int err = 0; |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1750 | |
| 1751 | mutex_lock(&chain->filter_chain_lock); |
| 1752 | |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 1753 | if (tcf_proto_exists_destroying(chain, tp_new)) { |
| 1754 | mutex_unlock(&chain->filter_chain_lock); |
| 1755 | tcf_proto_destroy(tp_new, rtnl_held, false, NULL); |
| 1756 | return ERR_PTR(-EAGAIN); |
| 1757 | } |
| 1758 | |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1759 | tp = tcf_chain_tp_find(chain, &chain_info, |
| 1760 | protocol, prio, false); |
| 1761 | if (!tp) |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1762 | err = tcf_chain_tp_insert(chain, &chain_info, tp_new); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1763 | mutex_unlock(&chain->filter_chain_lock); |
| 1764 | |
| 1765 | if (tp) { |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 1766 | tcf_proto_destroy(tp_new, rtnl_held, false, NULL); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1767 | tp_new = tp; |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1768 | } else if (err) { |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 1769 | tcf_proto_destroy(tp_new, rtnl_held, false, NULL); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1770 | tp_new = ERR_PTR(err); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1771 | } |
| 1772 | |
| 1773 | return tp_new; |
| 1774 | } |
| 1775 | |
| 1776 | static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1777 | struct tcf_proto *tp, bool rtnl_held, |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1778 | struct netlink_ext_ack *extack) |
| 1779 | { |
| 1780 | struct tcf_chain_info chain_info; |
| 1781 | struct tcf_proto *tp_iter; |
| 1782 | struct tcf_proto **pprev; |
| 1783 | struct tcf_proto *next; |
| 1784 | |
| 1785 | mutex_lock(&chain->filter_chain_lock); |
| 1786 | |
| 1787 | /* Atomically find and remove tp from chain. */ |
| 1788 | for (pprev = &chain->filter_chain; |
| 1789 | (tp_iter = tcf_chain_dereference(*pprev, chain)); |
| 1790 | pprev = &tp_iter->next) { |
| 1791 | if (tp_iter == tp) { |
| 1792 | chain_info.pprev = pprev; |
| 1793 | chain_info.next = tp_iter->next; |
| 1794 | WARN_ON(tp_iter->deleting); |
| 1795 | break; |
| 1796 | } |
| 1797 | } |
| 1798 | /* Verify that tp still exists and no new filters were inserted |
| 1799 | * concurrently. |
| 1800 | * Mark tp for deletion if it is empty. |
| 1801 | */ |
Davide Caratti | a5b72a0 | 2019-12-28 16:36:58 +0100 | [diff] [blame] | 1802 | if (!tp_iter || !tcf_proto_check_delete(tp)) { |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1803 | mutex_unlock(&chain->filter_chain_lock); |
| 1804 | return; |
| 1805 | } |
| 1806 | |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 1807 | tcf_proto_signal_destroying(chain, tp); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1808 | next = tcf_chain_dereference(chain_info.next, chain); |
| 1809 | if (tp == chain->filter_chain) |
| 1810 | tcf_chain0_head_change(chain, next); |
| 1811 | RCU_INIT_POINTER(*chain_info.pprev, next); |
| 1812 | mutex_unlock(&chain->filter_chain_lock); |
| 1813 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1814 | tcf_proto_put(tp, rtnl_held, extack); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1815 | } |
| 1816 | |
| 1817 | static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, |
| 1818 | struct tcf_chain_info *chain_info, |
| 1819 | u32 protocol, u32 prio, |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1820 | bool prio_allocate) |
| 1821 | { |
| 1822 | struct tcf_proto **pprev; |
| 1823 | struct tcf_proto *tp; |
| 1824 | |
| 1825 | /* Check the chain for existence of proto-tcf with this priority */ |
| 1826 | for (pprev = &chain->filter_chain; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1827 | (tp = tcf_chain_dereference(*pprev, chain)); |
| 1828 | pprev = &tp->next) { |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1829 | if (tp->prio >= prio) { |
| 1830 | if (tp->prio == prio) { |
| 1831 | if (prio_allocate || |
| 1832 | (tp->protocol != protocol && protocol)) |
| 1833 | return ERR_PTR(-EINVAL); |
| 1834 | } else { |
| 1835 | tp = NULL; |
| 1836 | } |
| 1837 | break; |
| 1838 | } |
| 1839 | } |
| 1840 | chain_info->pprev = pprev; |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 1841 | if (tp) { |
| 1842 | chain_info->next = tp->next; |
| 1843 | tcf_proto_get(tp); |
| 1844 | } else { |
| 1845 | chain_info->next = NULL; |
| 1846 | } |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1847 | return tp; |
| 1848 | } |
| 1849 | |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1850 | static int tcf_fill_node(struct net *net, struct sk_buff *skb, |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1851 | struct tcf_proto *tp, struct tcf_block *block, |
| 1852 | struct Qdisc *q, u32 parent, void *fh, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1853 | u32 portid, u32 seq, u16 flags, int event, |
Vlad Buslov | f8ab180 | 2020-05-15 14:40:11 +0300 | [diff] [blame^] | 1854 | bool terse_dump, bool rtnl_held) |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1855 | { |
| 1856 | struct tcmsg *tcm; |
| 1857 | struct nlmsghdr *nlh; |
| 1858 | unsigned char *b = skb_tail_pointer(skb); |
| 1859 | |
| 1860 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); |
| 1861 | if (!nlh) |
| 1862 | goto out_nlmsg_trim; |
| 1863 | tcm = nlmsg_data(nlh); |
| 1864 | tcm->tcm_family = AF_UNSPEC; |
| 1865 | tcm->tcm__pad1 = 0; |
| 1866 | tcm->tcm__pad2 = 0; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1867 | if (q) { |
| 1868 | tcm->tcm_ifindex = qdisc_dev(q)->ifindex; |
| 1869 | tcm->tcm_parent = parent; |
| 1870 | } else { |
| 1871 | tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; |
| 1872 | tcm->tcm_block_index = block->index; |
| 1873 | } |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1874 | tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); |
| 1875 | if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) |
| 1876 | goto nla_put_failure; |
| 1877 | if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) |
| 1878 | goto nla_put_failure; |
| 1879 | if (!fh) { |
| 1880 | tcm->tcm_handle = 0; |
Vlad Buslov | f8ab180 | 2020-05-15 14:40:11 +0300 | [diff] [blame^] | 1881 | } else if (terse_dump) { |
| 1882 | if (tp->ops->terse_dump) { |
| 1883 | if (tp->ops->terse_dump(net, tp, fh, skb, tcm, |
| 1884 | rtnl_held) < 0) |
| 1885 | goto nla_put_failure; |
| 1886 | } else { |
| 1887 | goto cls_op_not_supp; |
| 1888 | } |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1889 | } else { |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1890 | if (tp->ops->dump && |
| 1891 | tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0) |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1892 | goto nla_put_failure; |
| 1893 | } |
| 1894 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; |
| 1895 | return skb->len; |
| 1896 | |
| 1897 | out_nlmsg_trim: |
| 1898 | nla_put_failure: |
Vlad Buslov | f8ab180 | 2020-05-15 14:40:11 +0300 | [diff] [blame^] | 1899 | cls_op_not_supp: |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1900 | nlmsg_trim(skb, b); |
| 1901 | return -1; |
| 1902 | } |
| 1903 | |
| 1904 | static int tfilter_notify(struct net *net, struct sk_buff *oskb, |
| 1905 | struct nlmsghdr *n, struct tcf_proto *tp, |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1906 | struct tcf_block *block, struct Qdisc *q, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1907 | u32 parent, void *fh, int event, bool unicast, |
| 1908 | bool rtnl_held) |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1909 | { |
| 1910 | struct sk_buff *skb; |
| 1911 | u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 1912 | int err = 0; |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1913 | |
| 1914 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
| 1915 | if (!skb) |
| 1916 | return -ENOBUFS; |
| 1917 | |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1918 | if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1919 | n->nlmsg_seq, n->nlmsg_flags, event, |
Vlad Buslov | f8ab180 | 2020-05-15 14:40:11 +0300 | [diff] [blame^] | 1920 | false, rtnl_held) <= 0) { |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1921 | kfree_skb(skb); |
| 1922 | return -EINVAL; |
| 1923 | } |
| 1924 | |
| 1925 | if (unicast) |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 1926 | err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); |
| 1927 | else |
| 1928 | err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, |
| 1929 | n->nlmsg_flags & NLM_F_ECHO); |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1930 | |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 1931 | if (err > 0) |
| 1932 | err = 0; |
| 1933 | return err; |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1934 | } |
| 1935 | |
| 1936 | static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, |
| 1937 | struct nlmsghdr *n, struct tcf_proto *tp, |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1938 | struct tcf_block *block, struct Qdisc *q, |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1939 | u32 parent, void *fh, bool unicast, bool *last, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1940 | bool rtnl_held, struct netlink_ext_ack *extack) |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1941 | { |
| 1942 | struct sk_buff *skb; |
| 1943 | u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; |
| 1944 | int err; |
| 1945 | |
| 1946 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
| 1947 | if (!skb) |
| 1948 | return -ENOBUFS; |
| 1949 | |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1950 | if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1951 | n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, |
Vlad Buslov | f8ab180 | 2020-05-15 14:40:11 +0300 | [diff] [blame^] | 1952 | false, rtnl_held) <= 0) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1953 | NL_SET_ERR_MSG(extack, "Failed to build del event notification"); |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1954 | kfree_skb(skb); |
| 1955 | return -EINVAL; |
| 1956 | } |
| 1957 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1958 | err = tp->ops->delete(tp, fh, last, rtnl_held, extack); |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1959 | if (err) { |
| 1960 | kfree_skb(skb); |
| 1961 | return err; |
| 1962 | } |
| 1963 | |
| 1964 | if (unicast) |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 1965 | err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); |
| 1966 | else |
| 1967 | err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, |
| 1968 | n->nlmsg_flags & NLM_F_ECHO); |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1969 | if (err < 0) |
| 1970 | NL_SET_ERR_MSG(extack, "Failed to send filter delete notification"); |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 1971 | |
| 1972 | if (err > 0) |
| 1973 | err = 0; |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1974 | return err; |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1975 | } |
| 1976 | |
| 1977 | static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1978 | struct tcf_block *block, struct Qdisc *q, |
| 1979 | u32 parent, struct nlmsghdr *n, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1980 | struct tcf_chain *chain, int event, |
| 1981 | bool rtnl_held) |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1982 | { |
| 1983 | struct tcf_proto *tp; |
| 1984 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1985 | for (tp = tcf_get_next_proto(chain, NULL, rtnl_held); |
| 1986 | tp; tp = tcf_get_next_proto(chain, tp, rtnl_held)) |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1987 | tfilter_notify(net, oskb, n, tp, block, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1988 | q, parent, NULL, event, false, rtnl_held); |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1989 | } |
| 1990 | |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 1991 | static void tfilter_put(struct tcf_proto *tp, void *fh) |
| 1992 | { |
| 1993 | if (tp->ops->put && fh) |
| 1994 | tp->ops->put(tp, fh); |
| 1995 | } |
| 1996 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1997 | static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, |
David Ahern | c21ef3e | 2017-04-16 09:48:24 -0700 | [diff] [blame] | 1998 | struct netlink_ext_ack *extack) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1999 | { |
YOSHIFUJI Hideaki | 3b1e0a6 | 2008-03-26 02:26:21 +0900 | [diff] [blame] | 2000 | struct net *net = sock_net(skb->sk); |
Patrick McHardy | add93b6 | 2008-01-22 22:11:33 -0800 | [diff] [blame] | 2001 | struct nlattr *tca[TCA_MAX + 1]; |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 2002 | char name[IFNAMSIZ]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2003 | struct tcmsg *t; |
| 2004 | u32 protocol; |
| 2005 | u32 prio; |
Jiri Pirko | 9d36d9e | 2017-05-17 11:07:57 +0200 | [diff] [blame] | 2006 | bool prio_allocate; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2007 | u32 parent; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2008 | u32 chain_index; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2009 | struct Qdisc *q = NULL; |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 2010 | struct tcf_chain_info chain_info; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2011 | struct tcf_chain *chain = NULL; |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 2012 | struct tcf_block *block; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2013 | struct tcf_proto *tp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2014 | unsigned long cl; |
WANG Cong | 8113c09 | 2017-08-04 21:31:43 -0700 | [diff] [blame] | 2015 | void *fh; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2016 | int err; |
Daniel Borkmann | 628185c | 2016-12-21 18:04:11 +0100 | [diff] [blame] | 2017 | int tp_created; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2018 | bool rtnl_held = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2019 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2020 | if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) |
Eric W. Biederman | dfc47ef | 2012-11-16 03:03:00 +0000 | [diff] [blame] | 2021 | return -EPERM; |
Hong zhi guo | de179c8 | 2013-03-25 17:36:33 +0000 | [diff] [blame] | 2022 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2023 | replay: |
Daniel Borkmann | 628185c | 2016-12-21 18:04:11 +0100 | [diff] [blame] | 2024 | tp_created = 0; |
| 2025 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 2026 | err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, |
| 2027 | rtm_tca_policy, extack); |
Hong zhi guo | de179c8 | 2013-03-25 17:36:33 +0000 | [diff] [blame] | 2028 | if (err < 0) |
| 2029 | return err; |
| 2030 | |
David S. Miller | 942b816 | 2012-06-26 21:48:50 -0700 | [diff] [blame] | 2031 | t = nlmsg_data(n); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2032 | protocol = TC_H_MIN(t->tcm_info); |
| 2033 | prio = TC_H_MAJ(t->tcm_info); |
Jiri Pirko | 9d36d9e | 2017-05-17 11:07:57 +0200 | [diff] [blame] | 2034 | prio_allocate = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2035 | parent = t->tcm_parent; |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2036 | tp = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2037 | cl = 0; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2038 | block = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2039 | |
| 2040 | if (prio == 0) { |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2041 | /* If no priority is provided by the user, |
| 2042 | * we allocate one. |
| 2043 | */ |
| 2044 | if (n->nlmsg_flags & NLM_F_CREATE) { |
| 2045 | prio = TC_H_MAKE(0x80000000U, 0U); |
| 2046 | prio_allocate = true; |
| 2047 | } else { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 2048 | NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2049 | return -ENOENT; |
Daniel Borkmann | ea7f827 | 2016-06-10 23:10:22 +0200 | [diff] [blame] | 2050 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2051 | } |
| 2052 | |
| 2053 | /* Find head of filter chain. */ |
| 2054 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2055 | err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); |
| 2056 | if (err) |
| 2057 | return err; |
| 2058 | |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 2059 | if (tcf_proto_check_kind(tca[TCA_KIND], name)) { |
| 2060 | NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); |
| 2061 | err = -EINVAL; |
| 2062 | goto errout; |
| 2063 | } |
| 2064 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2065 | /* Take rtnl mutex if rtnl_held was set to true on previous iteration, |
| 2066 | * block is shared (no qdisc found), qdisc is not unlocked, classifier |
| 2067 | * type is not specified, classifier is not unlocked. |
| 2068 | */ |
| 2069 | if (rtnl_held || |
| 2070 | (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 2071 | !tcf_proto_is_unlocked(name)) { |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2072 | rtnl_held = true; |
| 2073 | rtnl_lock(); |
| 2074 | } |
| 2075 | |
| 2076 | err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); |
| 2077 | if (err) |
| 2078 | goto errout; |
| 2079 | |
| 2080 | block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, |
| 2081 | extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2082 | if (IS_ERR(block)) { |
| 2083 | err = PTR_ERR(block); |
| 2084 | goto errout; |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2085 | } |
Cong Wang | a7df487 | 2020-04-30 20:53:49 -0700 | [diff] [blame] | 2086 | block->classid = parent; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2087 | |
| 2088 | chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; |
| 2089 | if (chain_index > TC_ACT_EXT_VAL_MASK) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 2090 | NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2091 | err = -EINVAL; |
| 2092 | goto errout; |
| 2093 | } |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2094 | chain = tcf_chain_get(block, chain_index, true); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2095 | if (!chain) { |
Jiri Pirko | d5ed72a | 2018-08-27 20:58:43 +0200 | [diff] [blame] | 2096 | NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2097 | err = -ENOMEM; |
Daniel Borkmann | ea7f827 | 2016-06-10 23:10:22 +0200 | [diff] [blame] | 2098 | goto errout; |
| 2099 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2100 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2101 | mutex_lock(&chain->filter_chain_lock); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 2102 | tp = tcf_chain_tp_find(chain, &chain_info, protocol, |
| 2103 | prio, prio_allocate); |
| 2104 | if (IS_ERR(tp)) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 2105 | NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 2106 | err = PTR_ERR(tp); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2107 | goto errout_locked; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2108 | } |
| 2109 | |
| 2110 | if (tp == NULL) { |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 2111 | struct tcf_proto *tp_new = NULL; |
| 2112 | |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 2113 | if (chain->flushing) { |
| 2114 | err = -EAGAIN; |
| 2115 | goto errout_locked; |
| 2116 | } |
| 2117 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2118 | /* Proto-tcf does not exist, create new one */ |
| 2119 | |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2120 | if (tca[TCA_KIND] == NULL || !protocol) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 2121 | NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified"); |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2122 | err = -EINVAL; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2123 | goto errout_locked; |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2124 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2125 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2126 | if (!(n->nlmsg_flags & NLM_F_CREATE)) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 2127 | NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2128 | err = -ENOENT; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2129 | goto errout_locked; |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2130 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2131 | |
Jiri Pirko | 9d36d9e | 2017-05-17 11:07:57 +0200 | [diff] [blame] | 2132 | if (prio_allocate) |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2133 | prio = tcf_auto_prio(tcf_chain_tp_prev(chain, |
| 2134 | &chain_info)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2135 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2136 | mutex_unlock(&chain->filter_chain_lock); |
Eric Dumazet | 36d79af | 2020-01-21 11:02:20 -0800 | [diff] [blame] | 2137 | tp_new = tcf_proto_create(name, protocol, prio, chain, |
| 2138 | rtnl_held, extack); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 2139 | if (IS_ERR(tp_new)) { |
| 2140 | err = PTR_ERR(tp_new); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 2141 | goto errout_tp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2142 | } |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2143 | |
Minoru Usui | 12186be | 2009-06-02 02:17:34 -0700 | [diff] [blame] | 2144 | tp_created = 1; |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2145 | tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio, |
| 2146 | rtnl_held); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 2147 | if (IS_ERR(tp)) { |
| 2148 | err = PTR_ERR(tp); |
| 2149 | goto errout_tp; |
| 2150 | } |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2151 | } else { |
| 2152 | mutex_unlock(&chain->filter_chain_lock); |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2153 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2154 | |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 2155 | if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { |
| 2156 | NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); |
| 2157 | err = -EINVAL; |
| 2158 | goto errout; |
| 2159 | } |
| 2160 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2161 | fh = tp->ops->get(tp, t->tcm_handle); |
| 2162 | |
WANG Cong | 8113c09 | 2017-08-04 21:31:43 -0700 | [diff] [blame] | 2163 | if (!fh) { |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2164 | if (!(n->nlmsg_flags & NLM_F_CREATE)) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 2165 | NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2166 | err = -ENOENT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2167 | goto errout; |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2168 | } |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2169 | } else if (n->nlmsg_flags & NLM_F_EXCL) { |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 2170 | tfilter_put(tp, fh); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2171 | NL_SET_ERR_MSG(extack, "Filter already exists"); |
| 2172 | err = -EEXIST; |
| 2173 | goto errout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2174 | } |
| 2175 | |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2176 | if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { |
| 2177 | NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); |
| 2178 | err = -EINVAL; |
| 2179 | goto errout; |
| 2180 | } |
| 2181 | |
Cong Wang | 2f7ef2f | 2014-04-25 13:54:06 -0700 | [diff] [blame] | 2182 | err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, |
Alexander Aring | 7306db3 | 2018-01-18 11:20:51 -0500 | [diff] [blame] | 2183 | n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2184 | rtnl_held, extack); |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 2185 | if (err == 0) { |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2186 | tfilter_notify(net, skb, n, tp, block, q, parent, fh, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2187 | RTM_NEWTFILTER, false, rtnl_held); |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 2188 | tfilter_put(tp, fh); |
Vlad Buslov | 503d81d | 2019-07-21 17:44:12 +0300 | [diff] [blame] | 2189 | /* q pointer is NULL for shared blocks */ |
| 2190 | if (q) |
| 2191 | q->flags &= ~TCQ_F_CAN_BYPASS; |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 2192 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2193 | |
| 2194 | errout: |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 2195 | if (err && tp_created) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2196 | tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 2197 | errout_tp: |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2198 | if (chain) { |
| 2199 | if (tp && !IS_ERR(tp)) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2200 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2201 | if (!tp_created) |
| 2202 | tcf_chain_put(chain); |
| 2203 | } |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2204 | tcf_block_release(q, block, rtnl_held); |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2205 | |
| 2206 | if (rtnl_held) |
| 2207 | rtnl_unlock(); |
| 2208 | |
| 2209 | if (err == -EAGAIN) { |
| 2210 | /* Take rtnl lock in case EAGAIN is caused by concurrent flush |
| 2211 | * of target chain. |
| 2212 | */ |
| 2213 | rtnl_held = true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2214 | /* Replay the request. */ |
| 2215 | goto replay; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2216 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2217 | return err; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2218 | |
| 2219 | errout_locked: |
| 2220 | mutex_unlock(&chain->filter_chain_lock); |
| 2221 | goto errout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2222 | } |
| 2223 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2224 | static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, |
| 2225 | struct netlink_ext_ack *extack) |
| 2226 | { |
| 2227 | struct net *net = sock_net(skb->sk); |
| 2228 | struct nlattr *tca[TCA_MAX + 1]; |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 2229 | char name[IFNAMSIZ]; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2230 | struct tcmsg *t; |
| 2231 | u32 protocol; |
| 2232 | u32 prio; |
| 2233 | u32 parent; |
| 2234 | u32 chain_index; |
| 2235 | struct Qdisc *q = NULL; |
| 2236 | struct tcf_chain_info chain_info; |
| 2237 | struct tcf_chain *chain = NULL; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2238 | struct tcf_block *block = NULL; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2239 | struct tcf_proto *tp = NULL; |
| 2240 | unsigned long cl = 0; |
| 2241 | void *fh = NULL; |
| 2242 | int err; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2243 | bool rtnl_held = false; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2244 | |
| 2245 | if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) |
| 2246 | return -EPERM; |
| 2247 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 2248 | err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, |
| 2249 | rtm_tca_policy, extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2250 | if (err < 0) |
| 2251 | return err; |
| 2252 | |
| 2253 | t = nlmsg_data(n); |
| 2254 | protocol = TC_H_MIN(t->tcm_info); |
| 2255 | prio = TC_H_MAJ(t->tcm_info); |
| 2256 | parent = t->tcm_parent; |
| 2257 | |
| 2258 | if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) { |
| 2259 | NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set"); |
| 2260 | return -ENOENT; |
| 2261 | } |
| 2262 | |
| 2263 | /* Find head of filter chain. */ |
| 2264 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2265 | err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); |
| 2266 | if (err) |
| 2267 | return err; |
| 2268 | |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 2269 | if (tcf_proto_check_kind(tca[TCA_KIND], name)) { |
| 2270 | NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); |
| 2271 | err = -EINVAL; |
| 2272 | goto errout; |
| 2273 | } |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2274 | /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc |
| 2275 | * found), qdisc is not unlocked, classifier type is not specified, |
| 2276 | * classifier is not unlocked. |
| 2277 | */ |
| 2278 | if (!prio || |
| 2279 | (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 2280 | !tcf_proto_is_unlocked(name)) { |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2281 | rtnl_held = true; |
| 2282 | rtnl_lock(); |
| 2283 | } |
| 2284 | |
| 2285 | err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); |
| 2286 | if (err) |
| 2287 | goto errout; |
| 2288 | |
| 2289 | block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, |
| 2290 | extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2291 | if (IS_ERR(block)) { |
| 2292 | err = PTR_ERR(block); |
| 2293 | goto errout; |
| 2294 | } |
| 2295 | |
| 2296 | chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; |
| 2297 | if (chain_index > TC_ACT_EXT_VAL_MASK) { |
| 2298 | NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); |
| 2299 | err = -EINVAL; |
| 2300 | goto errout; |
| 2301 | } |
| 2302 | chain = tcf_chain_get(block, chain_index, false); |
| 2303 | if (!chain) { |
Jiri Pirko | 5ca8a25 | 2018-08-03 11:08:47 +0200 | [diff] [blame] | 2304 | /* User requested flush on non-existent chain. Nothing to do, |
| 2305 | * so just return success. |
| 2306 | */ |
| 2307 | if (prio == 0) { |
| 2308 | err = 0; |
| 2309 | goto errout; |
| 2310 | } |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2311 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); |
Jiri Pirko | b7b4247 | 2018-08-27 20:58:44 +0200 | [diff] [blame] | 2312 | err = -ENOENT; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2313 | goto errout; |
| 2314 | } |
| 2315 | |
| 2316 | if (prio == 0) { |
| 2317 | tfilter_notify_chain(net, skb, block, q, parent, n, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2318 | chain, RTM_DELTFILTER, rtnl_held); |
| 2319 | tcf_chain_flush(chain, rtnl_held); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2320 | err = 0; |
| 2321 | goto errout; |
| 2322 | } |
| 2323 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2324 | mutex_lock(&chain->filter_chain_lock); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2325 | tp = tcf_chain_tp_find(chain, &chain_info, protocol, |
| 2326 | prio, false); |
| 2327 | if (!tp || IS_ERR(tp)) { |
| 2328 | NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); |
Vlad Buslov | 0e39903 | 2018-06-04 18:32:23 +0300 | [diff] [blame] | 2329 | err = tp ? PTR_ERR(tp) : -ENOENT; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2330 | goto errout_locked; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2331 | } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { |
| 2332 | NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); |
| 2333 | err = -EINVAL; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2334 | goto errout_locked; |
| 2335 | } else if (t->tcm_handle == 0) { |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 2336 | tcf_proto_signal_destroying(chain, tp); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2337 | tcf_chain_tp_remove(chain, &chain_info, tp); |
| 2338 | mutex_unlock(&chain->filter_chain_lock); |
| 2339 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2340 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2341 | tfilter_notify(net, skb, n, tp, block, q, parent, fh, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2342 | RTM_DELTFILTER, false, rtnl_held); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2343 | err = 0; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2344 | goto errout; |
| 2345 | } |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2346 | mutex_unlock(&chain->filter_chain_lock); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2347 | |
| 2348 | fh = tp->ops->get(tp, t->tcm_handle); |
| 2349 | |
| 2350 | if (!fh) { |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2351 | NL_SET_ERR_MSG(extack, "Specified filter handle not found"); |
| 2352 | err = -ENOENT; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2353 | } else { |
| 2354 | bool last; |
| 2355 | |
| 2356 | err = tfilter_del_notify(net, skb, n, tp, block, |
| 2357 | q, parent, fh, false, &last, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2358 | rtnl_held, extack); |
| 2359 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2360 | if (err) |
| 2361 | goto errout; |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 2362 | if (last) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2363 | tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2364 | } |
| 2365 | |
| 2366 | errout: |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2367 | if (chain) { |
| 2368 | if (tp && !IS_ERR(tp)) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2369 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2370 | tcf_chain_put(chain); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2371 | } |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2372 | tcf_block_release(q, block, rtnl_held); |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2373 | |
| 2374 | if (rtnl_held) |
| 2375 | rtnl_unlock(); |
| 2376 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2377 | return err; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2378 | |
| 2379 | errout_locked: |
| 2380 | mutex_unlock(&chain->filter_chain_lock); |
| 2381 | goto errout; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2382 | } |
| 2383 | |
| 2384 | static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, |
| 2385 | struct netlink_ext_ack *extack) |
| 2386 | { |
| 2387 | struct net *net = sock_net(skb->sk); |
| 2388 | struct nlattr *tca[TCA_MAX + 1]; |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 2389 | char name[IFNAMSIZ]; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2390 | struct tcmsg *t; |
| 2391 | u32 protocol; |
| 2392 | u32 prio; |
| 2393 | u32 parent; |
| 2394 | u32 chain_index; |
| 2395 | struct Qdisc *q = NULL; |
| 2396 | struct tcf_chain_info chain_info; |
| 2397 | struct tcf_chain *chain = NULL; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2398 | struct tcf_block *block = NULL; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2399 | struct tcf_proto *tp = NULL; |
| 2400 | unsigned long cl = 0; |
| 2401 | void *fh = NULL; |
| 2402 | int err; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2403 | bool rtnl_held = false; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2404 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 2405 | err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, |
| 2406 | rtm_tca_policy, extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2407 | if (err < 0) |
| 2408 | return err; |
| 2409 | |
| 2410 | t = nlmsg_data(n); |
| 2411 | protocol = TC_H_MIN(t->tcm_info); |
| 2412 | prio = TC_H_MAJ(t->tcm_info); |
| 2413 | parent = t->tcm_parent; |
| 2414 | |
| 2415 | if (prio == 0) { |
| 2416 | NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); |
| 2417 | return -ENOENT; |
| 2418 | } |
| 2419 | |
| 2420 | /* Find head of filter chain. */ |
| 2421 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2422 | err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); |
| 2423 | if (err) |
| 2424 | return err; |
| 2425 | |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 2426 | if (tcf_proto_check_kind(tca[TCA_KIND], name)) { |
| 2427 | NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); |
| 2428 | err = -EINVAL; |
| 2429 | goto errout; |
| 2430 | } |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2431 | /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not |
| 2432 | * unlocked, classifier type is not specified, classifier is not |
| 2433 | * unlocked. |
| 2434 | */ |
| 2435 | if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 2436 | !tcf_proto_is_unlocked(name)) { |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2437 | rtnl_held = true; |
| 2438 | rtnl_lock(); |
| 2439 | } |
| 2440 | |
| 2441 | err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); |
| 2442 | if (err) |
| 2443 | goto errout; |
| 2444 | |
| 2445 | block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, |
| 2446 | extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2447 | if (IS_ERR(block)) { |
| 2448 | err = PTR_ERR(block); |
| 2449 | goto errout; |
| 2450 | } |
| 2451 | |
| 2452 | chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; |
| 2453 | if (chain_index > TC_ACT_EXT_VAL_MASK) { |
| 2454 | NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); |
| 2455 | err = -EINVAL; |
| 2456 | goto errout; |
| 2457 | } |
| 2458 | chain = tcf_chain_get(block, chain_index, false); |
| 2459 | if (!chain) { |
| 2460 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); |
| 2461 | err = -EINVAL; |
| 2462 | goto errout; |
| 2463 | } |
| 2464 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2465 | mutex_lock(&chain->filter_chain_lock); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2466 | tp = tcf_chain_tp_find(chain, &chain_info, protocol, |
| 2467 | prio, false); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2468 | mutex_unlock(&chain->filter_chain_lock); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2469 | if (!tp || IS_ERR(tp)) { |
| 2470 | NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); |
Vlad Buslov | 0e39903 | 2018-06-04 18:32:23 +0300 | [diff] [blame] | 2471 | err = tp ? PTR_ERR(tp) : -ENOENT; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2472 | goto errout; |
| 2473 | } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { |
| 2474 | NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); |
| 2475 | err = -EINVAL; |
| 2476 | goto errout; |
| 2477 | } |
| 2478 | |
| 2479 | fh = tp->ops->get(tp, t->tcm_handle); |
| 2480 | |
| 2481 | if (!fh) { |
| 2482 | NL_SET_ERR_MSG(extack, "Specified filter handle not found"); |
| 2483 | err = -ENOENT; |
| 2484 | } else { |
| 2485 | err = tfilter_notify(net, skb, n, tp, block, q, parent, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2486 | fh, RTM_NEWTFILTER, true, rtnl_held); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2487 | if (err < 0) |
| 2488 | NL_SET_ERR_MSG(extack, "Failed to send filter notify message"); |
| 2489 | } |
| 2490 | |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 2491 | tfilter_put(tp, fh); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2492 | errout: |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2493 | if (chain) { |
| 2494 | if (tp && !IS_ERR(tp)) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2495 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2496 | tcf_chain_put(chain); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2497 | } |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2498 | tcf_block_release(q, block, rtnl_held); |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2499 | |
| 2500 | if (rtnl_held) |
| 2501 | rtnl_unlock(); |
| 2502 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2503 | return err; |
| 2504 | } |
| 2505 | |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 2506 | struct tcf_dump_args { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2507 | struct tcf_walker w; |
| 2508 | struct sk_buff *skb; |
| 2509 | struct netlink_callback *cb; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2510 | struct tcf_block *block; |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2511 | struct Qdisc *q; |
| 2512 | u32 parent; |
Vlad Buslov | f8ab180 | 2020-05-15 14:40:11 +0300 | [diff] [blame^] | 2513 | bool terse_dump; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2514 | }; |
| 2515 | |
WANG Cong | 8113c09 | 2017-08-04 21:31:43 -0700 | [diff] [blame] | 2516 | static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2517 | { |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 2518 | struct tcf_dump_args *a = (void *)arg; |
WANG Cong | 832d1d5 | 2014-01-09 16:14:01 -0800 | [diff] [blame] | 2519 | struct net *net = sock_net(a->skb->sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2520 | |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2521 | return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2522 | n, NETLINK_CB(a->cb->skb).portid, |
Jamal Hadi Salim | 5a7a555 | 2016-09-18 08:45:33 -0400 | [diff] [blame] | 2523 | a->cb->nlh->nlmsg_seq, NLM_F_MULTI, |
Vlad Buslov | f8ab180 | 2020-05-15 14:40:11 +0300 | [diff] [blame^] | 2524 | RTM_NEWTFILTER, a->terse_dump, true); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2525 | } |
| 2526 | |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2527 | static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, |
| 2528 | struct sk_buff *skb, struct netlink_callback *cb, |
Vlad Buslov | f8ab180 | 2020-05-15 14:40:11 +0300 | [diff] [blame^] | 2529 | long index_start, long *p_index, bool terse) |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2530 | { |
| 2531 | struct net *net = sock_net(skb->sk); |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2532 | struct tcf_block *block = chain->block; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2533 | struct tcmsg *tcm = nlmsg_data(cb->nlh); |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2534 | struct tcf_proto *tp, *tp_prev; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2535 | struct tcf_dump_args arg; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2536 | |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2537 | for (tp = __tcf_get_next_proto(chain, NULL); |
| 2538 | tp; |
| 2539 | tp_prev = tp, |
| 2540 | tp = __tcf_get_next_proto(chain, tp), |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2541 | tcf_proto_put(tp_prev, true, NULL), |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2542 | (*p_index)++) { |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2543 | if (*p_index < index_start) |
| 2544 | continue; |
| 2545 | if (TC_H_MAJ(tcm->tcm_info) && |
| 2546 | TC_H_MAJ(tcm->tcm_info) != tp->prio) |
| 2547 | continue; |
| 2548 | if (TC_H_MIN(tcm->tcm_info) && |
| 2549 | TC_H_MIN(tcm->tcm_info) != tp->protocol) |
| 2550 | continue; |
| 2551 | if (*p_index > index_start) |
| 2552 | memset(&cb->args[1], 0, |
| 2553 | sizeof(cb->args) - sizeof(cb->args[0])); |
| 2554 | if (cb->args[1] == 0) { |
YueHaibing | 5318918 | 2018-07-17 20:58:14 +0800 | [diff] [blame] | 2555 | if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2556 | NETLINK_CB(cb->skb).portid, |
| 2557 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
Vlad Buslov | f8ab180 | 2020-05-15 14:40:11 +0300 | [diff] [blame^] | 2558 | RTM_NEWTFILTER, false, true) <= 0) |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2559 | goto errout; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2560 | cb->args[1] = 1; |
| 2561 | } |
| 2562 | if (!tp->ops->walk) |
| 2563 | continue; |
| 2564 | arg.w.fn = tcf_node_dump; |
| 2565 | arg.skb = skb; |
| 2566 | arg.cb = cb; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2567 | arg.block = block; |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2568 | arg.q = q; |
| 2569 | arg.parent = parent; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2570 | arg.w.stop = 0; |
| 2571 | arg.w.skip = cb->args[1] - 1; |
| 2572 | arg.w.count = 0; |
Vlad Buslov | 01683a1 | 2018-07-09 13:29:11 +0300 | [diff] [blame] | 2573 | arg.w.cookie = cb->args[2]; |
Vlad Buslov | f8ab180 | 2020-05-15 14:40:11 +0300 | [diff] [blame^] | 2574 | arg.terse_dump = terse; |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2575 | tp->ops->walk(tp, &arg.w, true); |
Vlad Buslov | 01683a1 | 2018-07-09 13:29:11 +0300 | [diff] [blame] | 2576 | cb->args[2] = arg.w.cookie; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2577 | cb->args[1] = arg.w.count + 1; |
| 2578 | if (arg.w.stop) |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2579 | goto errout; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2580 | } |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2581 | return true; |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2582 | |
| 2583 | errout: |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2584 | tcf_proto_put(tp, true, NULL); |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2585 | return false; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2586 | } |
| 2587 | |
Vlad Buslov | f8ab180 | 2020-05-15 14:40:11 +0300 | [diff] [blame^] | 2588 | static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = { |
| 2589 | [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE), |
| 2590 | }; |
| 2591 | |
Eric Dumazet | bd27a87 | 2009-11-05 20:57:26 -0800 | [diff] [blame] | 2592 | /* called with RTNL */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2593 | static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) |
| 2594 | { |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 2595 | struct tcf_chain *chain, *chain_prev; |
YOSHIFUJI Hideaki | 3b1e0a6 | 2008-03-26 02:26:21 +0900 | [diff] [blame] | 2596 | struct net *net = sock_net(skb->sk); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2597 | struct nlattr *tca[TCA_MAX + 1]; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2598 | struct Qdisc *q = NULL; |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 2599 | struct tcf_block *block; |
David S. Miller | 942b816 | 2012-06-26 21:48:50 -0700 | [diff] [blame] | 2600 | struct tcmsg *tcm = nlmsg_data(cb->nlh); |
Vlad Buslov | f8ab180 | 2020-05-15 14:40:11 +0300 | [diff] [blame^] | 2601 | bool terse_dump = false; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2602 | long index_start; |
| 2603 | long index; |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2604 | u32 parent; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2605 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2606 | |
Hong zhi guo | 573ce26 | 2013-03-27 06:47:04 +0000 | [diff] [blame] | 2607 | if (nlmsg_len(cb->nlh) < sizeof(*tcm)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2608 | return skb->len; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2609 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 2610 | err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, |
Vlad Buslov | f8ab180 | 2020-05-15 14:40:11 +0300 | [diff] [blame^] | 2611 | tcf_tfilter_dump_policy, cb->extack); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2612 | if (err) |
| 2613 | return err; |
| 2614 | |
Vlad Buslov | f8ab180 | 2020-05-15 14:40:11 +0300 | [diff] [blame^] | 2615 | if (tca[TCA_DUMP_FLAGS]) { |
| 2616 | struct nla_bitfield32 flags = |
| 2617 | nla_get_bitfield32(tca[TCA_DUMP_FLAGS]); |
| 2618 | |
| 2619 | terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE; |
| 2620 | } |
| 2621 | |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2622 | if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 2623 | block = tcf_block_refcnt_get(net, tcm->tcm_block_index); |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2624 | if (!block) |
WANG Cong | 143976c | 2017-08-24 16:51:29 -0700 | [diff] [blame] | 2625 | goto out; |
Jiri Pirko | d680b35 | 2018-01-18 16:14:49 +0100 | [diff] [blame] | 2626 | /* If we work with block index, q is NULL and parent value |
| 2627 | * will never be used in the following code. The check |
| 2628 | * in tcf_fill_node prevents it. However, compiler does not |
| 2629 | * see that far, so set parent to zero to silence the warning |
| 2630 | * about parent being uninitialized. |
| 2631 | */ |
| 2632 | parent = 0; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2633 | } else { |
| 2634 | const struct Qdisc_class_ops *cops; |
| 2635 | struct net_device *dev; |
| 2636 | unsigned long cl = 0; |
| 2637 | |
| 2638 | dev = __dev_get_by_index(net, tcm->tcm_ifindex); |
| 2639 | if (!dev) |
| 2640 | return skb->len; |
| 2641 | |
| 2642 | parent = tcm->tcm_parent; |
Cong Wang | a7df487 | 2020-04-30 20:53:49 -0700 | [diff] [blame] | 2643 | if (!parent) |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2644 | q = dev->qdisc; |
Cong Wang | a7df487 | 2020-04-30 20:53:49 -0700 | [diff] [blame] | 2645 | else |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2646 | q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2647 | if (!q) |
| 2648 | goto out; |
| 2649 | cops = q->ops->cl_ops; |
| 2650 | if (!cops) |
| 2651 | goto out; |
| 2652 | if (!cops->tcf_block) |
| 2653 | goto out; |
| 2654 | if (TC_H_MIN(tcm->tcm_parent)) { |
| 2655 | cl = cops->find(q, tcm->tcm_parent); |
| 2656 | if (cl == 0) |
| 2657 | goto out; |
| 2658 | } |
| 2659 | block = cops->tcf_block(q, cl, NULL); |
| 2660 | if (!block) |
| 2661 | goto out; |
Cong Wang | a7df487 | 2020-04-30 20:53:49 -0700 | [diff] [blame] | 2662 | parent = block->classid; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2663 | if (tcf_block_shared(block)) |
| 2664 | q = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2665 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2666 | |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2667 | index_start = cb->args[0]; |
| 2668 | index = 0; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2669 | |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 2670 | for (chain = __tcf_get_next_chain(block, NULL); |
| 2671 | chain; |
| 2672 | chain_prev = chain, |
| 2673 | chain = __tcf_get_next_chain(block, chain), |
| 2674 | tcf_chain_put(chain_prev)) { |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2675 | if (tca[TCA_CHAIN] && |
| 2676 | nla_get_u32(tca[TCA_CHAIN]) != chain->index) |
| 2677 | continue; |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2678 | if (!tcf_chain_dump(chain, q, parent, skb, cb, |
Vlad Buslov | f8ab180 | 2020-05-15 14:40:11 +0300 | [diff] [blame^] | 2679 | index_start, &index, terse_dump)) { |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 2680 | tcf_chain_put(chain); |
Roman Kapl | 5ae437a | 2018-02-19 21:32:51 +0100 | [diff] [blame] | 2681 | err = -EMSGSIZE; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2682 | break; |
Roman Kapl | 5ae437a | 2018-02-19 21:32:51 +0100 | [diff] [blame] | 2683 | } |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2684 | } |
| 2685 | |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 2686 | if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2687 | tcf_block_refcnt_put(block, true); |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2688 | cb->args[0] = index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2689 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2690 | out: |
Roman Kapl | 5ae437a | 2018-02-19 21:32:51 +0100 | [diff] [blame] | 2691 | /* If we did no progress, the error (EMSGSIZE) is real */ |
| 2692 | if (skb->len == 0 && err) |
| 2693 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2694 | return skb->len; |
| 2695 | } |
| 2696 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2697 | static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, |
| 2698 | void *tmplt_priv, u32 chain_index, |
| 2699 | struct net *net, struct sk_buff *skb, |
| 2700 | struct tcf_block *block, |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2701 | u32 portid, u32 seq, u16 flags, int event) |
| 2702 | { |
| 2703 | unsigned char *b = skb_tail_pointer(skb); |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2704 | const struct tcf_proto_ops *ops; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2705 | struct nlmsghdr *nlh; |
| 2706 | struct tcmsg *tcm; |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2707 | void *priv; |
| 2708 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2709 | ops = tmplt_ops; |
| 2710 | priv = tmplt_priv; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2711 | |
| 2712 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); |
| 2713 | if (!nlh) |
| 2714 | goto out_nlmsg_trim; |
| 2715 | tcm = nlmsg_data(nlh); |
| 2716 | tcm->tcm_family = AF_UNSPEC; |
| 2717 | tcm->tcm__pad1 = 0; |
| 2718 | tcm->tcm__pad2 = 0; |
| 2719 | tcm->tcm_handle = 0; |
| 2720 | if (block->q) { |
| 2721 | tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; |
| 2722 | tcm->tcm_parent = block->q->handle; |
| 2723 | } else { |
| 2724 | tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; |
| 2725 | tcm->tcm_block_index = block->index; |
| 2726 | } |
| 2727 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2728 | if (nla_put_u32(skb, TCA_CHAIN, chain_index)) |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2729 | goto nla_put_failure; |
| 2730 | |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2731 | if (ops) { |
| 2732 | if (nla_put_string(skb, TCA_KIND, ops->kind)) |
| 2733 | goto nla_put_failure; |
| 2734 | if (ops->tmplt_dump(skb, net, priv) < 0) |
| 2735 | goto nla_put_failure; |
| 2736 | } |
| 2737 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2738 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; |
| 2739 | return skb->len; |
| 2740 | |
| 2741 | out_nlmsg_trim: |
| 2742 | nla_put_failure: |
| 2743 | nlmsg_trim(skb, b); |
| 2744 | return -EMSGSIZE; |
| 2745 | } |
| 2746 | |
| 2747 | static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, |
| 2748 | u32 seq, u16 flags, int event, bool unicast) |
| 2749 | { |
| 2750 | u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; |
| 2751 | struct tcf_block *block = chain->block; |
| 2752 | struct net *net = block->net; |
| 2753 | struct sk_buff *skb; |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 2754 | int err = 0; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2755 | |
| 2756 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
| 2757 | if (!skb) |
| 2758 | return -ENOBUFS; |
| 2759 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2760 | if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, |
| 2761 | chain->index, net, skb, block, portid, |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2762 | seq, flags, event) <= 0) { |
| 2763 | kfree_skb(skb); |
| 2764 | return -EINVAL; |
| 2765 | } |
| 2766 | |
| 2767 | if (unicast) |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 2768 | err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); |
| 2769 | else |
| 2770 | err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, |
| 2771 | flags & NLM_F_ECHO); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2772 | |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 2773 | if (err > 0) |
| 2774 | err = 0; |
| 2775 | return err; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2776 | } |
| 2777 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2778 | static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, |
| 2779 | void *tmplt_priv, u32 chain_index, |
| 2780 | struct tcf_block *block, struct sk_buff *oskb, |
| 2781 | u32 seq, u16 flags, bool unicast) |
| 2782 | { |
| 2783 | u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; |
| 2784 | struct net *net = block->net; |
| 2785 | struct sk_buff *skb; |
| 2786 | |
| 2787 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
| 2788 | if (!skb) |
| 2789 | return -ENOBUFS; |
| 2790 | |
| 2791 | if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb, |
| 2792 | block, portid, seq, flags, RTM_DELCHAIN) <= 0) { |
| 2793 | kfree_skb(skb); |
| 2794 | return -EINVAL; |
| 2795 | } |
| 2796 | |
| 2797 | if (unicast) |
| 2798 | return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); |
| 2799 | |
| 2800 | return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); |
| 2801 | } |
| 2802 | |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2803 | static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, |
| 2804 | struct nlattr **tca, |
| 2805 | struct netlink_ext_ack *extack) |
| 2806 | { |
| 2807 | const struct tcf_proto_ops *ops; |
Eric Dumazet | 2dd5616 | 2019-12-07 11:34:45 -0800 | [diff] [blame] | 2808 | char name[IFNAMSIZ]; |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2809 | void *tmplt_priv; |
| 2810 | |
| 2811 | /* If kind is not set, user did not specify template. */ |
| 2812 | if (!tca[TCA_KIND]) |
| 2813 | return 0; |
| 2814 | |
Eric Dumazet | 2dd5616 | 2019-12-07 11:34:45 -0800 | [diff] [blame] | 2815 | if (tcf_proto_check_kind(tca[TCA_KIND], name)) { |
| 2816 | NL_SET_ERR_MSG(extack, "Specified TC chain template name too long"); |
| 2817 | return -EINVAL; |
| 2818 | } |
| 2819 | |
| 2820 | ops = tcf_proto_lookup_ops(name, true, extack); |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2821 | if (IS_ERR(ops)) |
| 2822 | return PTR_ERR(ops); |
| 2823 | if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { |
| 2824 | NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); |
| 2825 | return -EOPNOTSUPP; |
| 2826 | } |
| 2827 | |
| 2828 | tmplt_priv = ops->tmplt_create(net, chain, tca, extack); |
| 2829 | if (IS_ERR(tmplt_priv)) { |
| 2830 | module_put(ops->owner); |
| 2831 | return PTR_ERR(tmplt_priv); |
| 2832 | } |
| 2833 | chain->tmplt_ops = ops; |
| 2834 | chain->tmplt_priv = tmplt_priv; |
| 2835 | return 0; |
| 2836 | } |
| 2837 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2838 | static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, |
| 2839 | void *tmplt_priv) |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2840 | { |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2841 | /* If template ops are set, no work to do for us. */ |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2842 | if (!tmplt_ops) |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2843 | return; |
| 2844 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2845 | tmplt_ops->tmplt_destroy(tmplt_priv); |
| 2846 | module_put(tmplt_ops->owner); |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2847 | } |
| 2848 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2849 | /* Add/delete/get a chain */ |
| 2850 | |
| 2851 | static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, |
| 2852 | struct netlink_ext_ack *extack) |
| 2853 | { |
| 2854 | struct net *net = sock_net(skb->sk); |
| 2855 | struct nlattr *tca[TCA_MAX + 1]; |
| 2856 | struct tcmsg *t; |
| 2857 | u32 parent; |
| 2858 | u32 chain_index; |
| 2859 | struct Qdisc *q = NULL; |
| 2860 | struct tcf_chain *chain = NULL; |
| 2861 | struct tcf_block *block; |
| 2862 | unsigned long cl; |
| 2863 | int err; |
| 2864 | |
| 2865 | if (n->nlmsg_type != RTM_GETCHAIN && |
| 2866 | !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) |
| 2867 | return -EPERM; |
| 2868 | |
| 2869 | replay: |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 2870 | err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, |
| 2871 | rtm_tca_policy, extack); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2872 | if (err < 0) |
| 2873 | return err; |
| 2874 | |
| 2875 | t = nlmsg_data(n); |
| 2876 | parent = t->tcm_parent; |
| 2877 | cl = 0; |
| 2878 | |
| 2879 | block = tcf_block_find(net, &q, &parent, &cl, |
| 2880 | t->tcm_ifindex, t->tcm_block_index, extack); |
| 2881 | if (IS_ERR(block)) |
| 2882 | return PTR_ERR(block); |
| 2883 | |
| 2884 | chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; |
| 2885 | if (chain_index > TC_ACT_EXT_VAL_MASK) { |
| 2886 | NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2887 | err = -EINVAL; |
| 2888 | goto errout_block; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2889 | } |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2890 | |
| 2891 | mutex_lock(&block->lock); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2892 | chain = tcf_chain_lookup(block, chain_index); |
| 2893 | if (n->nlmsg_type == RTM_NEWCHAIN) { |
| 2894 | if (chain) { |
Jiri Pirko | 3d32f4c | 2018-08-01 12:36:55 +0200 | [diff] [blame] | 2895 | if (tcf_chain_held_by_acts_only(chain)) { |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 2896 | /* The chain exists only because there is |
Jiri Pirko | 3d32f4c | 2018-08-01 12:36:55 +0200 | [diff] [blame] | 2897 | * some action referencing it. |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 2898 | */ |
| 2899 | tcf_chain_hold(chain); |
| 2900 | } else { |
| 2901 | NL_SET_ERR_MSG(extack, "Filter chain already exists"); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2902 | err = -EEXIST; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2903 | goto errout_block_locked; |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 2904 | } |
| 2905 | } else { |
| 2906 | if (!(n->nlmsg_flags & NLM_F_CREATE)) { |
| 2907 | NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2908 | err = -ENOENT; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2909 | goto errout_block_locked; |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 2910 | } |
| 2911 | chain = tcf_chain_create(block, chain_index); |
| 2912 | if (!chain) { |
| 2913 | NL_SET_ERR_MSG(extack, "Failed to create filter chain"); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2914 | err = -ENOMEM; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2915 | goto errout_block_locked; |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 2916 | } |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2917 | } |
| 2918 | } else { |
Jiri Pirko | 3d32f4c | 2018-08-01 12:36:55 +0200 | [diff] [blame] | 2919 | if (!chain || tcf_chain_held_by_acts_only(chain)) { |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2920 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2921 | err = -EINVAL; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2922 | goto errout_block_locked; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2923 | } |
| 2924 | tcf_chain_hold(chain); |
| 2925 | } |
| 2926 | |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2927 | if (n->nlmsg_type == RTM_NEWCHAIN) { |
| 2928 | /* Modifying chain requires holding parent block lock. In case |
| 2929 | * the chain was successfully added, take a reference to the |
| 2930 | * chain. This ensures that an empty chain does not disappear at |
| 2931 | * the end of this function. |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2932 | */ |
| 2933 | tcf_chain_hold(chain); |
| 2934 | chain->explicitly_created = true; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2935 | } |
| 2936 | mutex_unlock(&block->lock); |
| 2937 | |
| 2938 | switch (n->nlmsg_type) { |
| 2939 | case RTM_NEWCHAIN: |
| 2940 | err = tc_chain_tmplt_add(chain, net, tca, extack); |
| 2941 | if (err) { |
| 2942 | tcf_chain_put_explicitly_created(chain); |
| 2943 | goto errout; |
| 2944 | } |
| 2945 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2946 | tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, |
| 2947 | RTM_NEWCHAIN, false); |
| 2948 | break; |
| 2949 | case RTM_DELCHAIN: |
Cong Wang | f5b9bac | 2018-09-11 14:22:23 -0700 | [diff] [blame] | 2950 | tfilter_notify_chain(net, skb, block, q, parent, n, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2951 | chain, RTM_DELTFILTER, true); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2952 | /* Flush the chain first as the user requested chain removal. */ |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2953 | tcf_chain_flush(chain, true); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2954 | /* In case the chain was successfully deleted, put a reference |
| 2955 | * to the chain previously taken during addition. |
| 2956 | */ |
| 2957 | tcf_chain_put_explicitly_created(chain); |
| 2958 | break; |
| 2959 | case RTM_GETCHAIN: |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2960 | err = tc_chain_notify(chain, skb, n->nlmsg_seq, |
| 2961 | n->nlmsg_seq, n->nlmsg_type, true); |
| 2962 | if (err < 0) |
| 2963 | NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); |
| 2964 | break; |
| 2965 | default: |
| 2966 | err = -EOPNOTSUPP; |
| 2967 | NL_SET_ERR_MSG(extack, "Unsupported message type"); |
| 2968 | goto errout; |
| 2969 | } |
| 2970 | |
| 2971 | errout: |
| 2972 | tcf_chain_put(chain); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2973 | errout_block: |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2974 | tcf_block_release(q, block, true); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2975 | if (err == -EAGAIN) |
| 2976 | /* Replay the request. */ |
| 2977 | goto replay; |
| 2978 | return err; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2979 | |
| 2980 | errout_block_locked: |
| 2981 | mutex_unlock(&block->lock); |
| 2982 | goto errout_block; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2983 | } |
| 2984 | |
| 2985 | /* called with RTNL */ |
| 2986 | static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) |
| 2987 | { |
| 2988 | struct net *net = sock_net(skb->sk); |
| 2989 | struct nlattr *tca[TCA_MAX + 1]; |
| 2990 | struct Qdisc *q = NULL; |
| 2991 | struct tcf_block *block; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2992 | struct tcmsg *tcm = nlmsg_data(cb->nlh); |
Vlad Buslov | ace4a26 | 2019-02-25 17:45:44 +0200 | [diff] [blame] | 2993 | struct tcf_chain *chain; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2994 | long index_start; |
| 2995 | long index; |
| 2996 | u32 parent; |
| 2997 | int err; |
| 2998 | |
| 2999 | if (nlmsg_len(cb->nlh) < sizeof(*tcm)) |
| 3000 | return skb->len; |
| 3001 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 3002 | err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, |
| 3003 | rtm_tca_policy, cb->extack); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 3004 | if (err) |
| 3005 | return err; |
| 3006 | |
| 3007 | if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 3008 | block = tcf_block_refcnt_get(net, tcm->tcm_block_index); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 3009 | if (!block) |
| 3010 | goto out; |
| 3011 | /* If we work with block index, q is NULL and parent value |
| 3012 | * will never be used in the following code. The check |
| 3013 | * in tcf_fill_node prevents it. However, compiler does not |
| 3014 | * see that far, so set parent to zero to silence the warning |
| 3015 | * about parent being uninitialized. |
| 3016 | */ |
| 3017 | parent = 0; |
| 3018 | } else { |
| 3019 | const struct Qdisc_class_ops *cops; |
| 3020 | struct net_device *dev; |
| 3021 | unsigned long cl = 0; |
| 3022 | |
| 3023 | dev = __dev_get_by_index(net, tcm->tcm_ifindex); |
| 3024 | if (!dev) |
| 3025 | return skb->len; |
| 3026 | |
| 3027 | parent = tcm->tcm_parent; |
| 3028 | if (!parent) { |
| 3029 | q = dev->qdisc; |
| 3030 | parent = q->handle; |
| 3031 | } else { |
| 3032 | q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); |
| 3033 | } |
| 3034 | if (!q) |
| 3035 | goto out; |
| 3036 | cops = q->ops->cl_ops; |
| 3037 | if (!cops) |
| 3038 | goto out; |
| 3039 | if (!cops->tcf_block) |
| 3040 | goto out; |
| 3041 | if (TC_H_MIN(tcm->tcm_parent)) { |
| 3042 | cl = cops->find(q, tcm->tcm_parent); |
| 3043 | if (cl == 0) |
| 3044 | goto out; |
| 3045 | } |
| 3046 | block = cops->tcf_block(q, cl, NULL); |
| 3047 | if (!block) |
| 3048 | goto out; |
| 3049 | if (tcf_block_shared(block)) |
| 3050 | q = NULL; |
| 3051 | } |
| 3052 | |
| 3053 | index_start = cb->args[0]; |
| 3054 | index = 0; |
| 3055 | |
Vlad Buslov | ace4a26 | 2019-02-25 17:45:44 +0200 | [diff] [blame] | 3056 | mutex_lock(&block->lock); |
| 3057 | list_for_each_entry(chain, &block->chain_list, list) { |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 3058 | if ((tca[TCA_CHAIN] && |
| 3059 | nla_get_u32(tca[TCA_CHAIN]) != chain->index)) |
| 3060 | continue; |
| 3061 | if (index < index_start) { |
| 3062 | index++; |
| 3063 | continue; |
| 3064 | } |
Vlad Buslov | ace4a26 | 2019-02-25 17:45:44 +0200 | [diff] [blame] | 3065 | if (tcf_chain_held_by_acts_only(chain)) |
| 3066 | continue; |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 3067 | err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, |
| 3068 | chain->index, net, skb, block, |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 3069 | NETLINK_CB(cb->skb).portid, |
| 3070 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
| 3071 | RTM_NEWCHAIN); |
Vlad Buslov | ace4a26 | 2019-02-25 17:45:44 +0200 | [diff] [blame] | 3072 | if (err <= 0) |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 3073 | break; |
| 3074 | index++; |
| 3075 | } |
Vlad Buslov | ace4a26 | 2019-02-25 17:45:44 +0200 | [diff] [blame] | 3076 | mutex_unlock(&block->lock); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 3077 | |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 3078 | if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 3079 | tcf_block_refcnt_put(block, true); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 3080 | cb->args[0] = index; |
| 3081 | |
| 3082 | out: |
| 3083 | /* If we did no progress, the error (EMSGSIZE) is real */ |
| 3084 | if (skb->len == 0 && err) |
| 3085 | return err; |
| 3086 | return skb->len; |
| 3087 | } |
| 3088 | |
WANG Cong | 18d0264 | 2014-09-25 10:26:37 -0700 | [diff] [blame] | 3089 | void tcf_exts_destroy(struct tcf_exts *exts) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3090 | { |
| 3091 | #ifdef CONFIG_NET_CLS_ACT |
Eric Dumazet | 3d66b89 | 2019-09-18 12:57:04 -0700 | [diff] [blame] | 3092 | if (exts->actions) { |
| 3093 | tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); |
| 3094 | kfree(exts->actions); |
| 3095 | } |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 3096 | exts->nr_actions = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3097 | #endif |
| 3098 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 3099 | EXPORT_SYMBOL(tcf_exts_destroy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3100 | |
Benjamin LaHaise | c1b5273 | 2013-01-14 05:15:39 +0000 | [diff] [blame] | 3101 | int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, |
Alexander Aring | 50a5619 | 2018-01-18 11:20:52 -0500 | [diff] [blame] | 3102 | struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr, |
Vlad Buslov | ec6743a | 2019-02-11 10:55:43 +0200 | [diff] [blame] | 3103 | bool rtnl_held, struct netlink_ext_ack *extack) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3104 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3105 | #ifdef CONFIG_NET_CLS_ACT |
| 3106 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3107 | struct tc_action *act; |
Roman Mashak | d04e699 | 2018-03-08 16:59:17 -0500 | [diff] [blame] | 3108 | size_t attr_size = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3109 | |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 3110 | if (exts->police && tb[exts->police]) { |
Jiri Pirko | 9fb9f25 | 2017-05-17 11:08:02 +0200 | [diff] [blame] | 3111 | act = tcf_action_init_1(net, tp, tb[exts->police], |
| 3112 | rate_tlv, "police", ovr, |
Vlad Buslov | ec6743a | 2019-02-11 10:55:43 +0200 | [diff] [blame] | 3113 | TCA_ACT_BIND, rtnl_held, |
| 3114 | extack); |
Patrick McHardy | ab27cfb | 2008-01-23 20:33:13 -0800 | [diff] [blame] | 3115 | if (IS_ERR(act)) |
| 3116 | return PTR_ERR(act); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3117 | |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3118 | act->type = exts->type = TCA_OLD_COMPAT; |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 3119 | exts->actions[0] = act; |
| 3120 | exts->nr_actions = 1; |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 3121 | } else if (exts->action && tb[exts->action]) { |
Vlad Buslov | 90b73b7 | 2018-07-05 17:24:33 +0300 | [diff] [blame] | 3122 | int err; |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 3123 | |
Jiri Pirko | 9fb9f25 | 2017-05-17 11:08:02 +0200 | [diff] [blame] | 3124 | err = tcf_action_init(net, tp, tb[exts->action], |
| 3125 | rate_tlv, NULL, ovr, TCA_ACT_BIND, |
Vlad Buslov | ec6743a | 2019-02-11 10:55:43 +0200 | [diff] [blame] | 3126 | exts->actions, &attr_size, |
| 3127 | rtnl_held, extack); |
Vlad Buslov | 90b73b7 | 2018-07-05 17:24:33 +0300 | [diff] [blame] | 3128 | if (err < 0) |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3129 | return err; |
Vlad Buslov | 90b73b7 | 2018-07-05 17:24:33 +0300 | [diff] [blame] | 3130 | exts->nr_actions = err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3131 | } |
| 3132 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3133 | #else |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 3134 | if ((exts->action && tb[exts->action]) || |
Alexander Aring | 50a5619 | 2018-01-18 11:20:52 -0500 | [diff] [blame] | 3135 | (exts->police && tb[exts->police])) { |
| 3136 | NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3137 | return -EOPNOTSUPP; |
Alexander Aring | 50a5619 | 2018-01-18 11:20:52 -0500 | [diff] [blame] | 3138 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3139 | #endif |
| 3140 | |
| 3141 | return 0; |
| 3142 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 3143 | EXPORT_SYMBOL(tcf_exts_validate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3144 | |
Jiri Pirko | 9b0d444 | 2017-08-04 14:29:15 +0200 | [diff] [blame] | 3145 | void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3146 | { |
| 3147 | #ifdef CONFIG_NET_CLS_ACT |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 3148 | struct tcf_exts old = *dst; |
| 3149 | |
Jiri Pirko | 9b0d444 | 2017-08-04 14:29:15 +0200 | [diff] [blame] | 3150 | *dst = *src; |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 3151 | tcf_exts_destroy(&old); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3152 | #endif |
| 3153 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 3154 | EXPORT_SYMBOL(tcf_exts_change); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3155 | |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 3156 | #ifdef CONFIG_NET_CLS_ACT |
| 3157 | static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) |
| 3158 | { |
| 3159 | if (exts->nr_actions == 0) |
| 3160 | return NULL; |
| 3161 | else |
| 3162 | return exts->actions[0]; |
| 3163 | } |
| 3164 | #endif |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3165 | |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 3166 | int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3167 | { |
| 3168 | #ifdef CONFIG_NET_CLS_ACT |
Cong Wang | 9cc63db | 2014-07-16 14:25:30 -0700 | [diff] [blame] | 3169 | struct nlattr *nest; |
| 3170 | |
Jiri Pirko | 978dfd8 | 2017-08-04 14:29:03 +0200 | [diff] [blame] | 3171 | if (exts->action && tcf_exts_has_actions(exts)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3172 | /* |
| 3173 | * again for backward compatible mode - we want |
| 3174 | * to work with both old and new modes of entering |
| 3175 | * tc data even if iproute2 was newer - jhs |
| 3176 | */ |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3177 | if (exts->type != TCA_OLD_COMPAT) { |
Michal Kubecek | ae0be8d | 2019-04-26 11:13:06 +0200 | [diff] [blame] | 3178 | nest = nla_nest_start_noflag(skb, exts->action); |
Patrick McHardy | 4b3550ef | 2008-01-23 20:34:11 -0800 | [diff] [blame] | 3179 | if (nest == NULL) |
| 3180 | goto nla_put_failure; |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 3181 | |
Vlad Buslov | 90b73b7 | 2018-07-05 17:24:33 +0300 | [diff] [blame] | 3182 | if (tcf_action_dump(skb, exts->actions, 0, 0) < 0) |
Patrick McHardy | add93b6 | 2008-01-22 22:11:33 -0800 | [diff] [blame] | 3183 | goto nla_put_failure; |
Patrick McHardy | 4b3550ef | 2008-01-23 20:34:11 -0800 | [diff] [blame] | 3184 | nla_nest_end(skb, nest); |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 3185 | } else if (exts->police) { |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3186 | struct tc_action *act = tcf_exts_first_act(exts); |
Michal Kubecek | ae0be8d | 2019-04-26 11:13:06 +0200 | [diff] [blame] | 3187 | nest = nla_nest_start_noflag(skb, exts->police); |
Jamal Hadi Salim | 63acd68 | 2013-12-23 08:02:12 -0500 | [diff] [blame] | 3188 | if (nest == NULL || !act) |
Patrick McHardy | 4b3550ef | 2008-01-23 20:34:11 -0800 | [diff] [blame] | 3189 | goto nla_put_failure; |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3190 | if (tcf_action_dump_old(skb, act, 0, 0) < 0) |
Patrick McHardy | add93b6 | 2008-01-22 22:11:33 -0800 | [diff] [blame] | 3191 | goto nla_put_failure; |
Patrick McHardy | 4b3550ef | 2008-01-23 20:34:11 -0800 | [diff] [blame] | 3192 | nla_nest_end(skb, nest); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3193 | } |
| 3194 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3195 | return 0; |
Cong Wang | 9cc63db | 2014-07-16 14:25:30 -0700 | [diff] [blame] | 3196 | |
| 3197 | nla_put_failure: |
| 3198 | nla_nest_cancel(skb, nest); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3199 | return -1; |
Cong Wang | 9cc63db | 2014-07-16 14:25:30 -0700 | [diff] [blame] | 3200 | #else |
| 3201 | return 0; |
| 3202 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3203 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 3204 | EXPORT_SYMBOL(tcf_exts_dump); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3205 | |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 3206 | |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 3207 | int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3208 | { |
| 3209 | #ifdef CONFIG_NET_CLS_ACT |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3210 | struct tc_action *a = tcf_exts_first_act(exts); |
Ignacy Gawędzki | b057df2 | 2015-02-03 19:05:18 +0100 | [diff] [blame] | 3211 | if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3212 | return -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3213 | #endif |
| 3214 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3215 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 3216 | EXPORT_SYMBOL(tcf_exts_dump_stats); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3217 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3218 | static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) |
| 3219 | { |
| 3220 | if (*flags & TCA_CLS_FLAGS_IN_HW) |
| 3221 | return; |
| 3222 | *flags |= TCA_CLS_FLAGS_IN_HW; |
| 3223 | atomic_inc(&block->offloadcnt); |
| 3224 | } |
| 3225 | |
| 3226 | static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) |
| 3227 | { |
| 3228 | if (!(*flags & TCA_CLS_FLAGS_IN_HW)) |
| 3229 | return; |
| 3230 | *flags &= ~TCA_CLS_FLAGS_IN_HW; |
| 3231 | atomic_dec(&block->offloadcnt); |
| 3232 | } |
| 3233 | |
| 3234 | static void tc_cls_offload_cnt_update(struct tcf_block *block, |
| 3235 | struct tcf_proto *tp, u32 *cnt, |
| 3236 | u32 *flags, u32 diff, bool add) |
| 3237 | { |
| 3238 | lockdep_assert_held(&block->cb_lock); |
| 3239 | |
| 3240 | spin_lock(&tp->lock); |
| 3241 | if (add) { |
| 3242 | if (!*cnt) |
| 3243 | tcf_block_offload_inc(block, flags); |
| 3244 | *cnt += diff; |
| 3245 | } else { |
| 3246 | *cnt -= diff; |
| 3247 | if (!*cnt) |
| 3248 | tcf_block_offload_dec(block, flags); |
| 3249 | } |
| 3250 | spin_unlock(&tp->lock); |
| 3251 | } |
| 3252 | |
| 3253 | static void |
| 3254 | tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, |
| 3255 | u32 *cnt, u32 *flags) |
| 3256 | { |
| 3257 | lockdep_assert_held(&block->cb_lock); |
| 3258 | |
| 3259 | spin_lock(&tp->lock); |
| 3260 | tcf_block_offload_dec(block, flags); |
| 3261 | *cnt = 0; |
| 3262 | spin_unlock(&tp->lock); |
| 3263 | } |
| 3264 | |
| 3265 | static int |
| 3266 | __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, |
| 3267 | void *type_data, bool err_stop) |
Jiri Pirko | 717503b | 2017-10-11 09:41:09 +0200 | [diff] [blame] | 3268 | { |
Pablo Neira Ayuso | 955bcb6 | 2019-07-09 22:55:46 +0200 | [diff] [blame] | 3269 | struct flow_block_cb *block_cb; |
Cong Wang | aeb3fec | 2018-12-11 11:15:46 -0800 | [diff] [blame] | 3270 | int ok_count = 0; |
| 3271 | int err; |
| 3272 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3273 | list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { |
| 3274 | err = block_cb->cb(type, type_data, block_cb->cb_priv); |
| 3275 | if (err) { |
| 3276 | if (err_stop) |
| 3277 | return err; |
| 3278 | } else { |
| 3279 | ok_count++; |
| 3280 | } |
| 3281 | } |
| 3282 | return ok_count; |
| 3283 | } |
| 3284 | |
| 3285 | int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, |
| 3286 | void *type_data, bool err_stop, bool rtnl_held) |
| 3287 | { |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3288 | bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3289 | int ok_count; |
| 3290 | |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3291 | retry: |
| 3292 | if (take_rtnl) |
| 3293 | rtnl_lock(); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3294 | down_read(&block->cb_lock); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3295 | /* Need to obtain rtnl lock if block is bound to devs that require it. |
| 3296 | * In block bind code cb_lock is obtained while holding rtnl, so we must |
| 3297 | * obtain the locks in same order here. |
| 3298 | */ |
| 3299 | if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { |
| 3300 | up_read(&block->cb_lock); |
| 3301 | take_rtnl = true; |
| 3302 | goto retry; |
| 3303 | } |
| 3304 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3305 | ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3306 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3307 | up_read(&block->cb_lock); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3308 | if (take_rtnl) |
| 3309 | rtnl_unlock(); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3310 | return ok_count; |
| 3311 | } |
| 3312 | EXPORT_SYMBOL(tc_setup_cb_call); |
| 3313 | |
| 3314 | /* Non-destructive filter add. If filter that wasn't already in hardware is |
| 3315 | * successfully offloaded, increment block offloads counter. On failure, |
| 3316 | * previously offloaded filter is considered to be intact and offloads counter |
| 3317 | * is not decremented. |
| 3318 | */ |
| 3319 | |
| 3320 | int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, |
| 3321 | enum tc_setup_type type, void *type_data, bool err_stop, |
| 3322 | u32 *flags, unsigned int *in_hw_count, bool rtnl_held) |
| 3323 | { |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3324 | bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3325 | int ok_count; |
| 3326 | |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3327 | retry: |
| 3328 | if (take_rtnl) |
| 3329 | rtnl_lock(); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 3330 | down_read(&block->cb_lock); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3331 | /* Need to obtain rtnl lock if block is bound to devs that require it. |
| 3332 | * In block bind code cb_lock is obtained while holding rtnl, so we must |
| 3333 | * obtain the locks in same order here. |
| 3334 | */ |
| 3335 | if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { |
| 3336 | up_read(&block->cb_lock); |
| 3337 | take_rtnl = true; |
| 3338 | goto retry; |
| 3339 | } |
| 3340 | |
Cong Wang | aeb3fec | 2018-12-11 11:15:46 -0800 | [diff] [blame] | 3341 | /* Make sure all netdevs sharing this block are offload-capable. */ |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 3342 | if (block->nooffloaddevcnt && err_stop) { |
| 3343 | ok_count = -EOPNOTSUPP; |
| 3344 | goto err_unlock; |
| 3345 | } |
Cong Wang | aeb3fec | 2018-12-11 11:15:46 -0800 | [diff] [blame] | 3346 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3347 | ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); |
Vlad Buslov | a449a3e | 2019-08-26 16:45:00 +0300 | [diff] [blame] | 3348 | if (ok_count < 0) |
| 3349 | goto err_unlock; |
| 3350 | |
| 3351 | if (tp->ops->hw_add) |
| 3352 | tp->ops->hw_add(tp, type_data); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3353 | if (ok_count > 0) |
| 3354 | tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, |
| 3355 | ok_count, true); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 3356 | err_unlock: |
| 3357 | up_read(&block->cb_lock); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3358 | if (take_rtnl) |
| 3359 | rtnl_unlock(); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3360 | return ok_count < 0 ? ok_count : 0; |
Jiri Pirko | 717503b | 2017-10-11 09:41:09 +0200 | [diff] [blame] | 3361 | } |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3362 | EXPORT_SYMBOL(tc_setup_cb_add); |
| 3363 | |
| 3364 | /* Destructive filter replace. If filter that wasn't already in hardware is |
| 3365 | * successfully offloaded, increment block offload counter. On failure, |
| 3366 | * previously offloaded filter is considered to be destroyed and offload counter |
| 3367 | * is decremented. |
| 3368 | */ |
| 3369 | |
| 3370 | int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, |
| 3371 | enum tc_setup_type type, void *type_data, bool err_stop, |
| 3372 | u32 *old_flags, unsigned int *old_in_hw_count, |
| 3373 | u32 *new_flags, unsigned int *new_in_hw_count, |
| 3374 | bool rtnl_held) |
| 3375 | { |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3376 | bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3377 | int ok_count; |
| 3378 | |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3379 | retry: |
| 3380 | if (take_rtnl) |
| 3381 | rtnl_lock(); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3382 | down_read(&block->cb_lock); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3383 | /* Need to obtain rtnl lock if block is bound to devs that require it. |
| 3384 | * In block bind code cb_lock is obtained while holding rtnl, so we must |
| 3385 | * obtain the locks in same order here. |
| 3386 | */ |
| 3387 | if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { |
| 3388 | up_read(&block->cb_lock); |
| 3389 | take_rtnl = true; |
| 3390 | goto retry; |
| 3391 | } |
| 3392 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3393 | /* Make sure all netdevs sharing this block are offload-capable. */ |
| 3394 | if (block->nooffloaddevcnt && err_stop) { |
| 3395 | ok_count = -EOPNOTSUPP; |
| 3396 | goto err_unlock; |
| 3397 | } |
| 3398 | |
| 3399 | tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); |
Vlad Buslov | a449a3e | 2019-08-26 16:45:00 +0300 | [diff] [blame] | 3400 | if (tp->ops->hw_del) |
| 3401 | tp->ops->hw_del(tp, type_data); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3402 | |
| 3403 | ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); |
Vlad Buslov | a449a3e | 2019-08-26 16:45:00 +0300 | [diff] [blame] | 3404 | if (ok_count < 0) |
| 3405 | goto err_unlock; |
| 3406 | |
| 3407 | if (tp->ops->hw_add) |
| 3408 | tp->ops->hw_add(tp, type_data); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3409 | if (ok_count > 0) |
Vlad Buslov | a449a3e | 2019-08-26 16:45:00 +0300 | [diff] [blame] | 3410 | tc_cls_offload_cnt_update(block, tp, new_in_hw_count, |
| 3411 | new_flags, ok_count, true); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3412 | err_unlock: |
| 3413 | up_read(&block->cb_lock); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3414 | if (take_rtnl) |
| 3415 | rtnl_unlock(); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3416 | return ok_count < 0 ? ok_count : 0; |
| 3417 | } |
| 3418 | EXPORT_SYMBOL(tc_setup_cb_replace); |
| 3419 | |
| 3420 | /* Destroy filter and decrement block offload counter, if filter was previously |
| 3421 | * offloaded. |
| 3422 | */ |
| 3423 | |
| 3424 | int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, |
| 3425 | enum tc_setup_type type, void *type_data, bool err_stop, |
| 3426 | u32 *flags, unsigned int *in_hw_count, bool rtnl_held) |
| 3427 | { |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3428 | bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3429 | int ok_count; |
| 3430 | |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3431 | retry: |
| 3432 | if (take_rtnl) |
| 3433 | rtnl_lock(); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3434 | down_read(&block->cb_lock); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3435 | /* Need to obtain rtnl lock if block is bound to devs that require it. |
| 3436 | * In block bind code cb_lock is obtained while holding rtnl, so we must |
| 3437 | * obtain the locks in same order here. |
| 3438 | */ |
| 3439 | if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { |
| 3440 | up_read(&block->cb_lock); |
| 3441 | take_rtnl = true; |
| 3442 | goto retry; |
| 3443 | } |
| 3444 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3445 | ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); |
| 3446 | |
| 3447 | tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); |
Vlad Buslov | a449a3e | 2019-08-26 16:45:00 +0300 | [diff] [blame] | 3448 | if (tp->ops->hw_del) |
| 3449 | tp->ops->hw_del(tp, type_data); |
| 3450 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3451 | up_read(&block->cb_lock); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3452 | if (take_rtnl) |
| 3453 | rtnl_unlock(); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3454 | return ok_count < 0 ? ok_count : 0; |
| 3455 | } |
| 3456 | EXPORT_SYMBOL(tc_setup_cb_destroy); |
| 3457 | |
| 3458 | int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, |
| 3459 | bool add, flow_setup_cb_t *cb, |
| 3460 | enum tc_setup_type type, void *type_data, |
| 3461 | void *cb_priv, u32 *flags, unsigned int *in_hw_count) |
| 3462 | { |
| 3463 | int err = cb(type, type_data, cb_priv); |
| 3464 | |
| 3465 | if (err) { |
| 3466 | if (add && tc_skip_sw(*flags)) |
| 3467 | return err; |
| 3468 | } else { |
| 3469 | tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, |
| 3470 | add); |
| 3471 | } |
| 3472 | |
| 3473 | return 0; |
| 3474 | } |
| 3475 | EXPORT_SYMBOL(tc_setup_cb_reoffload); |
Jiri Pirko | b3f55bd | 2017-10-11 09:41:08 +0200 | [diff] [blame] | 3476 | |
Jiri Pirko | 2008495 | 2020-02-25 11:45:18 +0100 | [diff] [blame] | 3477 | static int tcf_act_get_cookie(struct flow_action_entry *entry, |
| 3478 | const struct tc_action *act) |
| 3479 | { |
| 3480 | struct tc_cookie *cookie; |
| 3481 | int err = 0; |
| 3482 | |
| 3483 | rcu_read_lock(); |
| 3484 | cookie = rcu_dereference(act->act_cookie); |
| 3485 | if (cookie) { |
| 3486 | entry->cookie = flow_action_cookie_create(cookie->data, |
| 3487 | cookie->len, |
| 3488 | GFP_ATOMIC); |
| 3489 | if (!entry->cookie) |
| 3490 | err = -ENOMEM; |
| 3491 | } |
| 3492 | rcu_read_unlock(); |
| 3493 | return err; |
| 3494 | } |
| 3495 | |
| 3496 | static void tcf_act_put_cookie(struct flow_action_entry *entry) |
| 3497 | { |
| 3498 | flow_action_cookie_destroy(entry->cookie); |
| 3499 | } |
| 3500 | |
Vlad Buslov | 5a6ff4b | 2019-08-26 16:45:04 +0300 | [diff] [blame] | 3501 | void tc_cleanup_flow_action(struct flow_action *flow_action) |
| 3502 | { |
| 3503 | struct flow_action_entry *entry; |
| 3504 | int i; |
| 3505 | |
Jiri Pirko | 2008495 | 2020-02-25 11:45:18 +0100 | [diff] [blame] | 3506 | flow_action_for_each(i, entry, flow_action) { |
| 3507 | tcf_act_put_cookie(entry); |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3508 | if (entry->destructor) |
| 3509 | entry->destructor(entry->destructor_priv); |
Jiri Pirko | 2008495 | 2020-02-25 11:45:18 +0100 | [diff] [blame] | 3510 | } |
Vlad Buslov | 5a6ff4b | 2019-08-26 16:45:04 +0300 | [diff] [blame] | 3511 | } |
| 3512 | EXPORT_SYMBOL(tc_cleanup_flow_action); |
| 3513 | |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3514 | static void tcf_mirred_get_dev(struct flow_action_entry *entry, |
| 3515 | const struct tc_action *act) |
| 3516 | { |
Vlad Buslov | 470d506 | 2019-09-13 18:28:41 +0300 | [diff] [blame] | 3517 | #ifdef CONFIG_NET_CLS_ACT |
| 3518 | entry->dev = act->ops->get_dev(act, &entry->destructor); |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3519 | if (!entry->dev) |
| 3520 | return; |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3521 | entry->destructor_priv = entry->dev; |
Vlad Buslov | 470d506 | 2019-09-13 18:28:41 +0300 | [diff] [blame] | 3522 | #endif |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3523 | } |
| 3524 | |
| 3525 | static void tcf_tunnel_encap_put_tunnel(void *priv) |
| 3526 | { |
| 3527 | struct ip_tunnel_info *tunnel = priv; |
| 3528 | |
| 3529 | kfree(tunnel); |
| 3530 | } |
| 3531 | |
| 3532 | static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry, |
| 3533 | const struct tc_action *act) |
| 3534 | { |
| 3535 | entry->tunnel = tcf_tunnel_info_copy(act); |
| 3536 | if (!entry->tunnel) |
| 3537 | return -ENOMEM; |
| 3538 | entry->destructor = tcf_tunnel_encap_put_tunnel; |
| 3539 | entry->destructor_priv = entry->tunnel; |
| 3540 | return 0; |
| 3541 | } |
| 3542 | |
Vlad Buslov | 4a5da47 | 2019-09-13 18:28:40 +0300 | [diff] [blame] | 3543 | static void tcf_sample_get_group(struct flow_action_entry *entry, |
| 3544 | const struct tc_action *act) |
| 3545 | { |
| 3546 | #ifdef CONFIG_NET_CLS_ACT |
| 3547 | entry->sample.psample_group = |
| 3548 | act->ops->get_psample_group(act, &entry->destructor); |
| 3549 | entry->destructor_priv = entry->sample.psample_group; |
| 3550 | #endif |
| 3551 | } |
| 3552 | |
Po Liu | d29bdd6 | 2020-05-01 08:53:16 +0800 | [diff] [blame] | 3553 | static void tcf_gate_entry_destructor(void *priv) |
| 3554 | { |
| 3555 | struct action_gate_entry *oe = priv; |
| 3556 | |
| 3557 | kfree(oe); |
| 3558 | } |
| 3559 | |
| 3560 | static int tcf_gate_get_entries(struct flow_action_entry *entry, |
| 3561 | const struct tc_action *act) |
| 3562 | { |
| 3563 | entry->gate.entries = tcf_gate_get_list(act); |
| 3564 | |
| 3565 | if (!entry->gate.entries) |
| 3566 | return -EINVAL; |
| 3567 | |
| 3568 | entry->destructor = tcf_gate_entry_destructor; |
| 3569 | entry->destructor_priv = entry->gate.entries; |
| 3570 | |
| 3571 | return 0; |
| 3572 | } |
| 3573 | |
Pablo Neira Ayuso | 16f8036 | 2020-05-06 20:34:50 +0200 | [diff] [blame] | 3574 | static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats) |
| 3575 | { |
| 3576 | if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY)) |
| 3577 | return FLOW_ACTION_HW_STATS_DONT_CARE; |
| 3578 | else if (!hw_stats) |
| 3579 | return FLOW_ACTION_HW_STATS_DISABLED; |
| 3580 | |
| 3581 | return hw_stats; |
| 3582 | } |
| 3583 | |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3584 | int tc_setup_flow_action(struct flow_action *flow_action, |
Vlad Buslov | b15e7a6 | 2020-02-17 12:12:12 +0200 | [diff] [blame] | 3585 | const struct tcf_exts *exts) |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3586 | { |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3587 | struct tc_action *act; |
Vlad Buslov | 9838b20 | 2019-08-26 16:45:03 +0300 | [diff] [blame] | 3588 | int i, j, k, err = 0; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3589 | |
Jakub Kicinski | 0dfb2d8 | 2020-03-19 16:26:23 -0700 | [diff] [blame] | 3590 | BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); |
| 3591 | BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE); |
| 3592 | BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED); |
Jiri Pirko | 44f8658 | 2020-03-07 12:40:20 +0100 | [diff] [blame] | 3593 | |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3594 | if (!exts) |
| 3595 | return 0; |
| 3596 | |
| 3597 | j = 0; |
| 3598 | tcf_exts_for_each_action(i, act, exts) { |
| 3599 | struct flow_action_entry *entry; |
| 3600 | |
| 3601 | entry = &flow_action->entries[j]; |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3602 | spin_lock_bh(&act->tcfa_lock); |
Jiri Pirko | 2008495 | 2020-02-25 11:45:18 +0100 | [diff] [blame] | 3603 | err = tcf_act_get_cookie(entry, act); |
| 3604 | if (err) |
| 3605 | goto err_out_locked; |
Jiri Pirko | 44f8658 | 2020-03-07 12:40:20 +0100 | [diff] [blame] | 3606 | |
Pablo Neira Ayuso | 16f8036 | 2020-05-06 20:34:50 +0200 | [diff] [blame] | 3607 | entry->hw_stats = tc_act_hw_stats(act->hw_stats); |
Jiri Pirko | 44f8658 | 2020-03-07 12:40:20 +0100 | [diff] [blame] | 3608 | |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3609 | if (is_tcf_gact_ok(act)) { |
| 3610 | entry->id = FLOW_ACTION_ACCEPT; |
| 3611 | } else if (is_tcf_gact_shot(act)) { |
| 3612 | entry->id = FLOW_ACTION_DROP; |
| 3613 | } else if (is_tcf_gact_trap(act)) { |
| 3614 | entry->id = FLOW_ACTION_TRAP; |
| 3615 | } else if (is_tcf_gact_goto_chain(act)) { |
| 3616 | entry->id = FLOW_ACTION_GOTO; |
| 3617 | entry->chain_index = tcf_gact_goto_chain_index(act); |
| 3618 | } else if (is_tcf_mirred_egress_redirect(act)) { |
| 3619 | entry->id = FLOW_ACTION_REDIRECT; |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3620 | tcf_mirred_get_dev(entry, act); |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3621 | } else if (is_tcf_mirred_egress_mirror(act)) { |
| 3622 | entry->id = FLOW_ACTION_MIRRED; |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3623 | tcf_mirred_get_dev(entry, act); |
John Hurley | 48e584a | 2019-08-04 16:09:06 +0100 | [diff] [blame] | 3624 | } else if (is_tcf_mirred_ingress_redirect(act)) { |
| 3625 | entry->id = FLOW_ACTION_REDIRECT_INGRESS; |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3626 | tcf_mirred_get_dev(entry, act); |
John Hurley | 48e584a | 2019-08-04 16:09:06 +0100 | [diff] [blame] | 3627 | } else if (is_tcf_mirred_ingress_mirror(act)) { |
| 3628 | entry->id = FLOW_ACTION_MIRRED_INGRESS; |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3629 | tcf_mirred_get_dev(entry, act); |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3630 | } else if (is_tcf_vlan(act)) { |
| 3631 | switch (tcf_vlan_action(act)) { |
| 3632 | case TCA_VLAN_ACT_PUSH: |
| 3633 | entry->id = FLOW_ACTION_VLAN_PUSH; |
| 3634 | entry->vlan.vid = tcf_vlan_push_vid(act); |
| 3635 | entry->vlan.proto = tcf_vlan_push_proto(act); |
| 3636 | entry->vlan.prio = tcf_vlan_push_prio(act); |
| 3637 | break; |
| 3638 | case TCA_VLAN_ACT_POP: |
| 3639 | entry->id = FLOW_ACTION_VLAN_POP; |
| 3640 | break; |
| 3641 | case TCA_VLAN_ACT_MODIFY: |
| 3642 | entry->id = FLOW_ACTION_VLAN_MANGLE; |
| 3643 | entry->vlan.vid = tcf_vlan_push_vid(act); |
| 3644 | entry->vlan.proto = tcf_vlan_push_proto(act); |
| 3645 | entry->vlan.prio = tcf_vlan_push_prio(act); |
| 3646 | break; |
| 3647 | default: |
Vlad Buslov | 9838b20 | 2019-08-26 16:45:03 +0300 | [diff] [blame] | 3648 | err = -EOPNOTSUPP; |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3649 | goto err_out_locked; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3650 | } |
| 3651 | } else if (is_tcf_tunnel_set(act)) { |
| 3652 | entry->id = FLOW_ACTION_TUNNEL_ENCAP; |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3653 | err = tcf_tunnel_encap_get_tunnel(entry, act); |
| 3654 | if (err) |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3655 | goto err_out_locked; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3656 | } else if (is_tcf_tunnel_release(act)) { |
| 3657 | entry->id = FLOW_ACTION_TUNNEL_DECAP; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3658 | } else if (is_tcf_pedit(act)) { |
| 3659 | for (k = 0; k < tcf_pedit_nkeys(act); k++) { |
| 3660 | switch (tcf_pedit_cmd(act, k)) { |
| 3661 | case TCA_PEDIT_KEY_EX_CMD_SET: |
| 3662 | entry->id = FLOW_ACTION_MANGLE; |
| 3663 | break; |
| 3664 | case TCA_PEDIT_KEY_EX_CMD_ADD: |
| 3665 | entry->id = FLOW_ACTION_ADD; |
| 3666 | break; |
| 3667 | default: |
Vlad Buslov | 9838b20 | 2019-08-26 16:45:03 +0300 | [diff] [blame] | 3668 | err = -EOPNOTSUPP; |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3669 | goto err_out_locked; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3670 | } |
| 3671 | entry->mangle.htype = tcf_pedit_htype(act, k); |
| 3672 | entry->mangle.mask = tcf_pedit_mask(act, k); |
| 3673 | entry->mangle.val = tcf_pedit_val(act, k); |
| 3674 | entry->mangle.offset = tcf_pedit_offset(act, k); |
Pablo Neira Ayuso | 16f8036 | 2020-05-06 20:34:50 +0200 | [diff] [blame] | 3675 | entry->hw_stats = tc_act_hw_stats(act->hw_stats); |
Petr Machata | 2c4b58d | 2020-03-18 19:42:29 +0200 | [diff] [blame] | 3676 | entry = &flow_action->entries[++j]; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3677 | } |
| 3678 | } else if (is_tcf_csum(act)) { |
| 3679 | entry->id = FLOW_ACTION_CSUM; |
| 3680 | entry->csum_flags = tcf_csum_update_flags(act); |
| 3681 | } else if (is_tcf_skbedit_mark(act)) { |
| 3682 | entry->id = FLOW_ACTION_MARK; |
| 3683 | entry->mark = tcf_skbedit_mark(act); |
Pieter Jansen van Vuuren | a7a7be6 | 2019-05-04 04:46:16 -0700 | [diff] [blame] | 3684 | } else if (is_tcf_sample(act)) { |
| 3685 | entry->id = FLOW_ACTION_SAMPLE; |
Pieter Jansen van Vuuren | a7a7be6 | 2019-05-04 04:46:16 -0700 | [diff] [blame] | 3686 | entry->sample.trunc_size = tcf_sample_trunc_size(act); |
| 3687 | entry->sample.truncate = tcf_sample_truncate(act); |
| 3688 | entry->sample.rate = tcf_sample_rate(act); |
Vlad Buslov | 4a5da47 | 2019-09-13 18:28:40 +0300 | [diff] [blame] | 3689 | tcf_sample_get_group(entry, act); |
Pieter Jansen van Vuuren | 8c8cfc6 | 2019-05-04 04:46:22 -0700 | [diff] [blame] | 3690 | } else if (is_tcf_police(act)) { |
| 3691 | entry->id = FLOW_ACTION_POLICE; |
| 3692 | entry->police.burst = tcf_police_tcfp_burst(act); |
| 3693 | entry->police.rate_bytes_ps = |
| 3694 | tcf_police_rate_bytes_ps(act); |
Paul Blakey | b57dc7c | 2019-07-09 10:30:48 +0300 | [diff] [blame] | 3695 | } else if (is_tcf_ct(act)) { |
| 3696 | entry->id = FLOW_ACTION_CT; |
| 3697 | entry->ct.action = tcf_ct_action(act); |
| 3698 | entry->ct.zone = tcf_ct_zone(act); |
Paul Blakey | edd5861 | 2020-03-12 12:23:09 +0200 | [diff] [blame] | 3699 | entry->ct.flow_table = tcf_ct_ft(act); |
John Hurley | 6749d590 | 2019-07-23 15:33:59 +0100 | [diff] [blame] | 3700 | } else if (is_tcf_mpls(act)) { |
| 3701 | switch (tcf_mpls_action(act)) { |
| 3702 | case TCA_MPLS_ACT_PUSH: |
| 3703 | entry->id = FLOW_ACTION_MPLS_PUSH; |
| 3704 | entry->mpls_push.proto = tcf_mpls_proto(act); |
| 3705 | entry->mpls_push.label = tcf_mpls_label(act); |
| 3706 | entry->mpls_push.tc = tcf_mpls_tc(act); |
| 3707 | entry->mpls_push.bos = tcf_mpls_bos(act); |
| 3708 | entry->mpls_push.ttl = tcf_mpls_ttl(act); |
| 3709 | break; |
| 3710 | case TCA_MPLS_ACT_POP: |
| 3711 | entry->id = FLOW_ACTION_MPLS_POP; |
| 3712 | entry->mpls_pop.proto = tcf_mpls_proto(act); |
| 3713 | break; |
| 3714 | case TCA_MPLS_ACT_MODIFY: |
| 3715 | entry->id = FLOW_ACTION_MPLS_MANGLE; |
| 3716 | entry->mpls_mangle.label = tcf_mpls_label(act); |
| 3717 | entry->mpls_mangle.tc = tcf_mpls_tc(act); |
| 3718 | entry->mpls_mangle.bos = tcf_mpls_bos(act); |
| 3719 | entry->mpls_mangle.ttl = tcf_mpls_ttl(act); |
| 3720 | break; |
| 3721 | default: |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3722 | goto err_out_locked; |
John Hurley | 6749d590 | 2019-07-23 15:33:59 +0100 | [diff] [blame] | 3723 | } |
John Hurley | fb1b775 | 2019-08-04 16:09:04 +0100 | [diff] [blame] | 3724 | } else if (is_tcf_skbedit_ptype(act)) { |
| 3725 | entry->id = FLOW_ACTION_PTYPE; |
| 3726 | entry->ptype = tcf_skbedit_ptype(act); |
Petr Machata | 2ce1241 | 2020-03-19 15:47:21 +0200 | [diff] [blame] | 3727 | } else if (is_tcf_skbedit_priority(act)) { |
| 3728 | entry->id = FLOW_ACTION_PRIORITY; |
| 3729 | entry->priority = tcf_skbedit_priority(act); |
Po Liu | d29bdd6 | 2020-05-01 08:53:16 +0800 | [diff] [blame] | 3730 | } else if (is_tcf_gate(act)) { |
| 3731 | entry->id = FLOW_ACTION_GATE; |
| 3732 | entry->gate.index = tcf_gate_index(act); |
| 3733 | entry->gate.prio = tcf_gate_prio(act); |
| 3734 | entry->gate.basetime = tcf_gate_basetime(act); |
| 3735 | entry->gate.cycletime = tcf_gate_cycletime(act); |
| 3736 | entry->gate.cycletimeext = tcf_gate_cycletimeext(act); |
| 3737 | entry->gate.num_entries = tcf_gate_num_entries(act); |
| 3738 | err = tcf_gate_get_entries(entry, act); |
| 3739 | if (err) |
| 3740 | goto err_out; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3741 | } else { |
Vlad Buslov | 9838b20 | 2019-08-26 16:45:03 +0300 | [diff] [blame] | 3742 | err = -EOPNOTSUPP; |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3743 | goto err_out_locked; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3744 | } |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3745 | spin_unlock_bh(&act->tcfa_lock); |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3746 | |
| 3747 | if (!is_tcf_pedit(act)) |
| 3748 | j++; |
| 3749 | } |
Vlad Buslov | 9838b20 | 2019-08-26 16:45:03 +0300 | [diff] [blame] | 3750 | |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3751 | err_out: |
Vlad Buslov | 5a6ff4b | 2019-08-26 16:45:04 +0300 | [diff] [blame] | 3752 | if (err) |
| 3753 | tc_cleanup_flow_action(flow_action); |
| 3754 | |
Vlad Buslov | 9838b20 | 2019-08-26 16:45:03 +0300 | [diff] [blame] | 3755 | return err; |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3756 | err_out_locked: |
| 3757 | spin_unlock_bh(&act->tcfa_lock); |
| 3758 | goto err_out; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3759 | } |
| 3760 | EXPORT_SYMBOL(tc_setup_flow_action); |
| 3761 | |
Pablo Neira Ayuso | e3ab786 | 2019-02-02 12:50:45 +0100 | [diff] [blame] | 3762 | unsigned int tcf_exts_num_actions(struct tcf_exts *exts) |
| 3763 | { |
| 3764 | unsigned int num_acts = 0; |
| 3765 | struct tc_action *act; |
| 3766 | int i; |
| 3767 | |
| 3768 | tcf_exts_for_each_action(i, act, exts) { |
| 3769 | if (is_tcf_pedit(act)) |
| 3770 | num_acts += tcf_pedit_nkeys(act); |
| 3771 | else |
| 3772 | num_acts++; |
| 3773 | } |
| 3774 | return num_acts; |
| 3775 | } |
| 3776 | EXPORT_SYMBOL(tcf_exts_num_actions); |
| 3777 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 3778 | static __net_init int tcf_net_init(struct net *net) |
| 3779 | { |
| 3780 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
| 3781 | |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 3782 | spin_lock_init(&tn->idr_lock); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 3783 | idr_init(&tn->idr); |
| 3784 | return 0; |
| 3785 | } |
| 3786 | |
| 3787 | static void __net_exit tcf_net_exit(struct net *net) |
| 3788 | { |
| 3789 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
| 3790 | |
| 3791 | idr_destroy(&tn->idr); |
| 3792 | } |
| 3793 | |
| 3794 | static struct pernet_operations tcf_net_ops = { |
| 3795 | .init = tcf_net_init, |
| 3796 | .exit = tcf_net_exit, |
| 3797 | .id = &tcf_net_id, |
| 3798 | .size = sizeof(struct tcf_net), |
| 3799 | }; |
| 3800 | |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 3801 | static struct flow_indr_block_entry block_entry = { |
| 3802 | .cb = tc_indr_block_get_and_cmd, |
| 3803 | .list = LIST_HEAD_INIT(block_entry.list), |
wenxu | 1150ab0 | 2019-08-07 09:13:53 +0800 | [diff] [blame] | 3804 | }; |
| 3805 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3806 | static int __init tc_filter_init(void) |
| 3807 | { |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 3808 | int err; |
| 3809 | |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 3810 | tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); |
| 3811 | if (!tc_filter_wq) |
| 3812 | return -ENOMEM; |
| 3813 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 3814 | err = register_pernet_subsys(&tcf_net_ops); |
| 3815 | if (err) |
| 3816 | goto err_register_pernet_subsys; |
| 3817 | |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 3818 | flow_indr_add_block_cb(&block_entry); |
wenxu | 1150ab0 | 2019-08-07 09:13:53 +0800 | [diff] [blame] | 3819 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 3820 | rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, |
| 3821 | RTNL_FLAG_DOIT_UNLOCKED); |
| 3822 | rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, |
| 3823 | RTNL_FLAG_DOIT_UNLOCKED); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 3824 | rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 3825 | tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 3826 | rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0); |
| 3827 | rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0); |
| 3828 | rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain, |
| 3829 | tc_dump_chain, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3830 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3831 | return 0; |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 3832 | |
| 3833 | err_register_pernet_subsys: |
| 3834 | destroy_workqueue(tc_filter_wq); |
| 3835 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3836 | } |
| 3837 | |
| 3838 | subsys_initcall(tc_filter_init); |