Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * net/sched/cls_api.c Packet classifier API. |
| 4 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
| 6 | * |
| 7 | * Changes: |
| 8 | * |
| 9 | * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/module.h> |
| 13 | #include <linux/types.h> |
| 14 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/string.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/errno.h> |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 17 | #include <linux/err.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/skbuff.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/init.h> |
| 20 | #include <linux/kmod.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 21 | #include <linux/slab.h> |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 22 | #include <linux/idr.h> |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 23 | #include <linux/rhashtable.h> |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 24 | #include <linux/jhash.h> |
Paul Blakey | 4371929 | 2020-02-16 12:01:23 +0200 | [diff] [blame] | 25 | #include <linux/rculist.h> |
Denis V. Lunev | b854272 | 2007-12-01 00:21:31 +1100 | [diff] [blame] | 26 | #include <net/net_namespace.h> |
| 27 | #include <net/sock.h> |
Arnaldo Carvalho de Melo | dc5fc57 | 2007-03-25 23:06:12 -0700 | [diff] [blame] | 28 | #include <net/netlink.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <net/pkt_sched.h> |
| 30 | #include <net/pkt_cls.h> |
Pablo Neira Ayuso | e3ab786 | 2019-02-02 12:50:45 +0100 | [diff] [blame] | 31 | #include <net/tc_act/tc_pedit.h> |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 32 | #include <net/tc_act/tc_mirred.h> |
| 33 | #include <net/tc_act/tc_vlan.h> |
| 34 | #include <net/tc_act/tc_tunnel_key.h> |
| 35 | #include <net/tc_act/tc_csum.h> |
| 36 | #include <net/tc_act/tc_gact.h> |
Pieter Jansen van Vuuren | 8c8cfc6 | 2019-05-04 04:46:22 -0700 | [diff] [blame] | 37 | #include <net/tc_act/tc_police.h> |
Pieter Jansen van Vuuren | a7a7be6 | 2019-05-04 04:46:16 -0700 | [diff] [blame] | 38 | #include <net/tc_act/tc_sample.h> |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 39 | #include <net/tc_act/tc_skbedit.h> |
Paul Blakey | b57dc7c | 2019-07-09 10:30:48 +0300 | [diff] [blame] | 40 | #include <net/tc_act/tc_ct.h> |
John Hurley | 6749d590 | 2019-07-23 15:33:59 +0100 | [diff] [blame] | 41 | #include <net/tc_act/tc_mpls.h> |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 42 | #include <net/flow_offload.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | |
Davide Caratti | e331473 | 2018-10-10 22:00:58 +0200 | [diff] [blame] | 44 | extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; |
| 45 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | /* The list of all installed classifier types */ |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 47 | static LIST_HEAD(tcf_proto_base); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
| 49 | /* Protects list of registered TC modules. It is pure SMP lock. */ |
| 50 | static DEFINE_RWLOCK(cls_mod_lock); |
| 51 | |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 52 | static u32 destroy_obj_hashfn(const struct tcf_proto *tp) |
| 53 | { |
| 54 | return jhash_3words(tp->chain->index, tp->prio, |
| 55 | (__force __u32)tp->protocol, 0); |
| 56 | } |
| 57 | |
| 58 | static void tcf_proto_signal_destroying(struct tcf_chain *chain, |
| 59 | struct tcf_proto *tp) |
| 60 | { |
| 61 | struct tcf_block *block = chain->block; |
| 62 | |
| 63 | mutex_lock(&block->proto_destroy_lock); |
| 64 | hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, |
| 65 | destroy_obj_hashfn(tp)); |
| 66 | mutex_unlock(&block->proto_destroy_lock); |
| 67 | } |
| 68 | |
| 69 | static bool tcf_proto_cmp(const struct tcf_proto *tp1, |
| 70 | const struct tcf_proto *tp2) |
| 71 | { |
| 72 | return tp1->chain->index == tp2->chain->index && |
| 73 | tp1->prio == tp2->prio && |
| 74 | tp1->protocol == tp2->protocol; |
| 75 | } |
| 76 | |
| 77 | static bool tcf_proto_exists_destroying(struct tcf_chain *chain, |
| 78 | struct tcf_proto *tp) |
| 79 | { |
| 80 | u32 hash = destroy_obj_hashfn(tp); |
| 81 | struct tcf_proto *iter; |
| 82 | bool found = false; |
| 83 | |
| 84 | rcu_read_lock(); |
| 85 | hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, |
| 86 | destroy_ht_node, hash) { |
| 87 | if (tcf_proto_cmp(tp, iter)) { |
| 88 | found = true; |
| 89 | break; |
| 90 | } |
| 91 | } |
| 92 | rcu_read_unlock(); |
| 93 | |
| 94 | return found; |
| 95 | } |
| 96 | |
| 97 | static void |
| 98 | tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp) |
| 99 | { |
| 100 | struct tcf_block *block = chain->block; |
| 101 | |
| 102 | mutex_lock(&block->proto_destroy_lock); |
| 103 | if (hash_hashed(&tp->destroy_ht_node)) |
| 104 | hash_del_rcu(&tp->destroy_ht_node); |
| 105 | mutex_unlock(&block->proto_destroy_lock); |
| 106 | } |
| 107 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | /* Find classifier type by string name */ |
| 109 | |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 110 | static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | { |
Eric Dumazet | dcd7608 | 2013-12-20 10:04:18 -0800 | [diff] [blame] | 112 | const struct tcf_proto_ops *t, *res = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | |
| 114 | if (kind) { |
| 115 | read_lock(&cls_mod_lock); |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 116 | list_for_each_entry(t, &tcf_proto_base, head) { |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 117 | if (strcmp(kind, t->kind) == 0) { |
Eric Dumazet | dcd7608 | 2013-12-20 10:04:18 -0800 | [diff] [blame] | 118 | if (try_module_get(t->owner)) |
| 119 | res = t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | break; |
| 121 | } |
| 122 | } |
| 123 | read_unlock(&cls_mod_lock); |
| 124 | } |
Eric Dumazet | dcd7608 | 2013-12-20 10:04:18 -0800 | [diff] [blame] | 125 | return res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | } |
| 127 | |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 128 | static const struct tcf_proto_ops * |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 129 | tcf_proto_lookup_ops(const char *kind, bool rtnl_held, |
| 130 | struct netlink_ext_ack *extack) |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 131 | { |
| 132 | const struct tcf_proto_ops *ops; |
| 133 | |
| 134 | ops = __tcf_proto_lookup_ops(kind); |
| 135 | if (ops) |
| 136 | return ops; |
| 137 | #ifdef CONFIG_MODULES |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 138 | if (rtnl_held) |
| 139 | rtnl_unlock(); |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 140 | request_module("cls_%s", kind); |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 141 | if (rtnl_held) |
| 142 | rtnl_lock(); |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 143 | ops = __tcf_proto_lookup_ops(kind); |
| 144 | /* We dropped the RTNL semaphore in order to perform |
| 145 | * the module load. So, even if we succeeded in loading |
| 146 | * the module we have to replay the request. We indicate |
| 147 | * this using -EAGAIN. |
| 148 | */ |
| 149 | if (ops) { |
| 150 | module_put(ops->owner); |
| 151 | return ERR_PTR(-EAGAIN); |
| 152 | } |
| 153 | #endif |
| 154 | NL_SET_ERR_MSG(extack, "TC classifier not found"); |
| 155 | return ERR_PTR(-ENOENT); |
| 156 | } |
| 157 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | /* Register(unregister) new classifier type */ |
| 159 | |
| 160 | int register_tcf_proto_ops(struct tcf_proto_ops *ops) |
| 161 | { |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 162 | struct tcf_proto_ops *t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | int rc = -EEXIST; |
| 164 | |
| 165 | write_lock(&cls_mod_lock); |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 166 | list_for_each_entry(t, &tcf_proto_base, head) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | if (!strcmp(ops->kind, t->kind)) |
| 168 | goto out; |
| 169 | |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 170 | list_add_tail(&ops->head, &tcf_proto_base); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | rc = 0; |
| 172 | out: |
| 173 | write_unlock(&cls_mod_lock); |
| 174 | return rc; |
| 175 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 176 | EXPORT_SYMBOL(register_tcf_proto_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 178 | static struct workqueue_struct *tc_filter_wq; |
| 179 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) |
| 181 | { |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 182 | struct tcf_proto_ops *t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | int rc = -ENOENT; |
| 184 | |
Daniel Borkmann | c78e174 | 2015-05-20 17:13:33 +0200 | [diff] [blame] | 185 | /* Wait for outstanding call_rcu()s, if any, from a |
| 186 | * tcf_proto_ops's destroy() handler. |
| 187 | */ |
| 188 | rcu_barrier(); |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 189 | flush_workqueue(tc_filter_wq); |
Daniel Borkmann | c78e174 | 2015-05-20 17:13:33 +0200 | [diff] [blame] | 190 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | write_lock(&cls_mod_lock); |
Eric Dumazet | dcd7608 | 2013-12-20 10:04:18 -0800 | [diff] [blame] | 192 | list_for_each_entry(t, &tcf_proto_base, head) { |
| 193 | if (t == ops) { |
| 194 | list_del(&t->head); |
| 195 | rc = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | break; |
Eric Dumazet | dcd7608 | 2013-12-20 10:04:18 -0800 | [diff] [blame] | 197 | } |
| 198 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | write_unlock(&cls_mod_lock); |
| 200 | return rc; |
| 201 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 202 | EXPORT_SYMBOL(unregister_tcf_proto_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | |
Cong Wang | aaa908f | 2018-05-23 15:26:53 -0700 | [diff] [blame] | 204 | bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 205 | { |
Cong Wang | aaa908f | 2018-05-23 15:26:53 -0700 | [diff] [blame] | 206 | INIT_RCU_WORK(rwork, func); |
| 207 | return queue_rcu_work(tc_filter_wq, rwork); |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 208 | } |
| 209 | EXPORT_SYMBOL(tcf_queue_work); |
| 210 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | /* Select new prio value from the range, managed by kernel. */ |
| 212 | |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 213 | static inline u32 tcf_auto_prio(struct tcf_proto *tp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | { |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 215 | u32 first = TC_H_MAKE(0xC0000000U, 0U); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | |
| 217 | if (tp) |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 218 | first = tp->prio - 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | |
Jiri Pirko | 7961973 | 2017-05-17 11:07:58 +0200 | [diff] [blame] | 220 | return TC_H_MAJ(first); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | } |
| 222 | |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 223 | static bool tcf_proto_check_kind(struct nlattr *kind, char *name) |
| 224 | { |
| 225 | if (kind) |
| 226 | return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ; |
| 227 | memset(name, 0, IFNAMSIZ); |
| 228 | return false; |
| 229 | } |
| 230 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 231 | static bool tcf_proto_is_unlocked(const char *kind) |
| 232 | { |
| 233 | const struct tcf_proto_ops *ops; |
| 234 | bool ret; |
| 235 | |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 236 | if (strlen(kind) == 0) |
| 237 | return false; |
| 238 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 239 | ops = tcf_proto_lookup_ops(kind, false, NULL); |
| 240 | /* On error return false to take rtnl lock. Proto lookup/create |
| 241 | * functions will perform lookup again and properly handle errors. |
| 242 | */ |
| 243 | if (IS_ERR(ops)) |
| 244 | return false; |
| 245 | |
| 246 | ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED); |
| 247 | module_put(ops->owner); |
| 248 | return ret; |
| 249 | } |
| 250 | |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 251 | static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 252 | u32 prio, struct tcf_chain *chain, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 253 | bool rtnl_held, |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 254 | struct netlink_ext_ack *extack) |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 255 | { |
| 256 | struct tcf_proto *tp; |
| 257 | int err; |
| 258 | |
| 259 | tp = kzalloc(sizeof(*tp), GFP_KERNEL); |
| 260 | if (!tp) |
| 261 | return ERR_PTR(-ENOBUFS); |
| 262 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 263 | tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack); |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 264 | if (IS_ERR(tp->ops)) { |
| 265 | err = PTR_ERR(tp->ops); |
Jiri Pirko | d68d75f | 2018-05-11 17:45:32 +0200 | [diff] [blame] | 266 | goto errout; |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 267 | } |
| 268 | tp->classify = tp->ops->classify; |
| 269 | tp->protocol = protocol; |
| 270 | tp->prio = prio; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 271 | tp->chain = chain; |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 272 | spin_lock_init(&tp->lock); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 273 | refcount_set(&tp->refcnt, 1); |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 274 | |
| 275 | err = tp->ops->init(tp); |
| 276 | if (err) { |
| 277 | module_put(tp->ops->owner); |
| 278 | goto errout; |
| 279 | } |
| 280 | return tp; |
| 281 | |
| 282 | errout: |
| 283 | kfree(tp); |
| 284 | return ERR_PTR(err); |
| 285 | } |
| 286 | |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 287 | static void tcf_proto_get(struct tcf_proto *tp) |
| 288 | { |
| 289 | refcount_inc(&tp->refcnt); |
| 290 | } |
| 291 | |
| 292 | static void tcf_chain_put(struct tcf_chain *chain); |
| 293 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 294 | static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 295 | bool sig_destroy, struct netlink_ext_ack *extack) |
Jiri Pirko | cf1facd | 2017-02-09 14:38:56 +0100 | [diff] [blame] | 296 | { |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 297 | tp->ops->destroy(tp, rtnl_held, extack); |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 298 | if (sig_destroy) |
| 299 | tcf_proto_signal_destroyed(tp->chain, tp); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 300 | tcf_chain_put(tp->chain); |
WANG Cong | 763dbf6 | 2017-04-19 14:21:21 -0700 | [diff] [blame] | 301 | module_put(tp->ops->owner); |
| 302 | kfree_rcu(tp, rcu); |
Jiri Pirko | cf1facd | 2017-02-09 14:38:56 +0100 | [diff] [blame] | 303 | } |
| 304 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 305 | static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 306 | struct netlink_ext_ack *extack) |
| 307 | { |
| 308 | if (refcount_dec_and_test(&tp->refcnt)) |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 309 | tcf_proto_destroy(tp, rtnl_held, true, extack); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 310 | } |
| 311 | |
Davide Caratti | a5b72a0 | 2019-12-28 16:36:58 +0100 | [diff] [blame] | 312 | static bool tcf_proto_check_delete(struct tcf_proto *tp) |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 313 | { |
Davide Caratti | a5b72a0 | 2019-12-28 16:36:58 +0100 | [diff] [blame] | 314 | if (tp->ops->delete_empty) |
| 315 | return tp->ops->delete_empty(tp); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 316 | |
Davide Caratti | a5b72a0 | 2019-12-28 16:36:58 +0100 | [diff] [blame] | 317 | tp->deleting = true; |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 318 | return tp->deleting; |
| 319 | } |
| 320 | |
| 321 | static void tcf_proto_mark_delete(struct tcf_proto *tp) |
| 322 | { |
| 323 | spin_lock(&tp->lock); |
| 324 | tp->deleting = true; |
| 325 | spin_unlock(&tp->lock); |
| 326 | } |
| 327 | |
| 328 | static bool tcf_proto_is_deleting(struct tcf_proto *tp) |
| 329 | { |
| 330 | bool deleting; |
| 331 | |
| 332 | spin_lock(&tp->lock); |
| 333 | deleting = tp->deleting; |
| 334 | spin_unlock(&tp->lock); |
| 335 | |
| 336 | return deleting; |
| 337 | } |
| 338 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 339 | #define ASSERT_BLOCK_LOCKED(block) \ |
| 340 | lockdep_assert_held(&(block)->lock) |
| 341 | |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 342 | struct tcf_filter_chain_list_item { |
| 343 | struct list_head list; |
| 344 | tcf_chain_head_change_t *chain_head_change; |
| 345 | void *chain_head_change_priv; |
| 346 | }; |
| 347 | |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 348 | static struct tcf_chain *tcf_chain_create(struct tcf_block *block, |
| 349 | u32 chain_index) |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 350 | { |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 351 | struct tcf_chain *chain; |
| 352 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 353 | ASSERT_BLOCK_LOCKED(block); |
| 354 | |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 355 | chain = kzalloc(sizeof(*chain), GFP_KERNEL); |
| 356 | if (!chain) |
| 357 | return NULL; |
Paul Blakey | 4371929 | 2020-02-16 12:01:23 +0200 | [diff] [blame] | 358 | list_add_tail_rcu(&chain->list, &block->chain_list); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 359 | mutex_init(&chain->filter_chain_lock); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 360 | chain->block = block; |
| 361 | chain->index = chain_index; |
Cong Wang | e2ef754 | 2017-09-11 16:33:31 -0700 | [diff] [blame] | 362 | chain->refcnt = 1; |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 363 | if (!chain->index) |
| 364 | block->chain0.chain = chain; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 365 | return chain; |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 366 | } |
| 367 | |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 368 | static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, |
| 369 | struct tcf_proto *tp_head) |
| 370 | { |
| 371 | if (item->chain_head_change) |
| 372 | item->chain_head_change(tp_head, item->chain_head_change_priv); |
| 373 | } |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 374 | |
| 375 | static void tcf_chain0_head_change(struct tcf_chain *chain, |
| 376 | struct tcf_proto *tp_head) |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 377 | { |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 378 | struct tcf_filter_chain_list_item *item; |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 379 | struct tcf_block *block = chain->block; |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 380 | |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 381 | if (chain->index) |
| 382 | return; |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 383 | |
| 384 | mutex_lock(&block->lock); |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 385 | list_for_each_entry(item, &block->chain0.filter_chain_list, list) |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 386 | tcf_chain_head_change_item(item, tp_head); |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 387 | mutex_unlock(&block->lock); |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 388 | } |
| 389 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 390 | /* Returns true if block can be safely freed. */ |
| 391 | |
| 392 | static bool tcf_chain_detach(struct tcf_chain *chain) |
Jiri Pirko | f93e1cd | 2017-05-20 15:01:32 +0200 | [diff] [blame] | 393 | { |
Cong Wang | efbf789 | 2017-12-04 10:48:18 -0800 | [diff] [blame] | 394 | struct tcf_block *block = chain->block; |
| 395 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 396 | ASSERT_BLOCK_LOCKED(block); |
| 397 | |
Paul Blakey | 4371929 | 2020-02-16 12:01:23 +0200 | [diff] [blame] | 398 | list_del_rcu(&chain->list); |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 399 | if (!chain->index) |
| 400 | block->chain0.chain = NULL; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 401 | |
| 402 | if (list_empty(&block->chain_list) && |
| 403 | refcount_read(&block->refcnt) == 0) |
| 404 | return true; |
| 405 | |
| 406 | return false; |
| 407 | } |
| 408 | |
| 409 | static void tcf_block_destroy(struct tcf_block *block) |
| 410 | { |
| 411 | mutex_destroy(&block->lock); |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 412 | mutex_destroy(&block->proto_destroy_lock); |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 413 | kfree_rcu(block, rcu); |
| 414 | } |
| 415 | |
| 416 | static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block) |
| 417 | { |
| 418 | struct tcf_block *block = chain->block; |
| 419 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 420 | mutex_destroy(&chain->filter_chain_lock); |
Davide Caratti | ee3bbfe | 2019-03-20 15:00:16 +0100 | [diff] [blame] | 421 | kfree_rcu(chain, rcu); |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 422 | if (free_block) |
| 423 | tcf_block_destroy(block); |
Cong Wang | e2ef754 | 2017-09-11 16:33:31 -0700 | [diff] [blame] | 424 | } |
Jiri Pirko | 744a4cf | 2017-08-22 22:46:49 +0200 | [diff] [blame] | 425 | |
Cong Wang | e2ef754 | 2017-09-11 16:33:31 -0700 | [diff] [blame] | 426 | static void tcf_chain_hold(struct tcf_chain *chain) |
| 427 | { |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 428 | ASSERT_BLOCK_LOCKED(chain->block); |
| 429 | |
Cong Wang | e2ef754 | 2017-09-11 16:33:31 -0700 | [diff] [blame] | 430 | ++chain->refcnt; |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 431 | } |
| 432 | |
Jiri Pirko | 3d32f4c | 2018-08-01 12:36:55 +0200 | [diff] [blame] | 433 | static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 434 | { |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 435 | ASSERT_BLOCK_LOCKED(chain->block); |
| 436 | |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 437 | /* In case all the references are action references, this |
Jiri Pirko | 3d32f4c | 2018-08-01 12:36:55 +0200 | [diff] [blame] | 438 | * chain should not be shown to the user. |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 439 | */ |
| 440 | return chain->refcnt == chain->action_refcnt; |
| 441 | } |
| 442 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 443 | static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, |
| 444 | u32 chain_index) |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 445 | { |
| 446 | struct tcf_chain *chain; |
| 447 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 448 | ASSERT_BLOCK_LOCKED(block); |
| 449 | |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 450 | list_for_each_entry(chain, &block->chain_list, list) { |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 451 | if (chain->index == chain_index) |
Cong Wang | e2ef754 | 2017-09-11 16:33:31 -0700 | [diff] [blame] | 452 | return chain; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 453 | } |
| 454 | return NULL; |
| 455 | } |
| 456 | |
Paul Blakey | af69962 | 2020-02-16 12:01:24 +0200 | [diff] [blame] | 457 | #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) |
| 458 | static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, |
| 459 | u32 chain_index) |
| 460 | { |
| 461 | struct tcf_chain *chain; |
| 462 | |
| 463 | list_for_each_entry_rcu(chain, &block->chain_list, list) { |
| 464 | if (chain->index == chain_index) |
| 465 | return chain; |
| 466 | } |
| 467 | return NULL; |
| 468 | } |
| 469 | #endif |
| 470 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 471 | static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, |
| 472 | u32 seq, u16 flags, int event, bool unicast); |
| 473 | |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 474 | static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, |
| 475 | u32 chain_index, bool create, |
| 476 | bool by_act) |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 477 | { |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 478 | struct tcf_chain *chain = NULL; |
| 479 | bool is_first_reference; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 480 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 481 | mutex_lock(&block->lock); |
| 482 | chain = tcf_chain_lookup(block, chain_index); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 483 | if (chain) { |
| 484 | tcf_chain_hold(chain); |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 485 | } else { |
| 486 | if (!create) |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 487 | goto errout; |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 488 | chain = tcf_chain_create(block, chain_index); |
| 489 | if (!chain) |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 490 | goto errout; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 491 | } |
Jiri Pirko | 8053238 | 2017-09-06 13:14:19 +0200 | [diff] [blame] | 492 | |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 493 | if (by_act) |
| 494 | ++chain->action_refcnt; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 495 | is_first_reference = chain->refcnt - chain->action_refcnt == 1; |
| 496 | mutex_unlock(&block->lock); |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 497 | |
| 498 | /* Send notification only in case we got the first |
| 499 | * non-action reference. Until then, the chain acts only as |
| 500 | * a placeholder for actions pointing to it and user ought |
| 501 | * not know about them. |
| 502 | */ |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 503 | if (is_first_reference && !by_act) |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 504 | tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, |
| 505 | RTM_NEWCHAIN, false); |
| 506 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 507 | return chain; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 508 | |
| 509 | errout: |
| 510 | mutex_unlock(&block->lock); |
| 511 | return chain; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 512 | } |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 513 | |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 514 | static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, |
| 515 | bool create) |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 516 | { |
| 517 | return __tcf_chain_get(block, chain_index, create, false); |
| 518 | } |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 519 | |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 520 | struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) |
| 521 | { |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 522 | return __tcf_chain_get(block, chain_index, true, true); |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 523 | } |
| 524 | EXPORT_SYMBOL(tcf_chain_get_by_act); |
| 525 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 526 | static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, |
| 527 | void *tmplt_priv); |
| 528 | static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, |
| 529 | void *tmplt_priv, u32 chain_index, |
| 530 | struct tcf_block *block, struct sk_buff *oskb, |
| 531 | u32 seq, u16 flags, bool unicast); |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 532 | |
Vlad Buslov | 91052fa | 2019-02-11 10:55:33 +0200 | [diff] [blame] | 533 | static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, |
| 534 | bool explicitly_created) |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 535 | { |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 536 | struct tcf_block *block = chain->block; |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 537 | const struct tcf_proto_ops *tmplt_ops; |
Vlad Buslov | b62989f | 2019-03-06 17:50:43 +0200 | [diff] [blame] | 538 | bool free_block = false; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 539 | unsigned int refcnt; |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 540 | void *tmplt_priv; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 541 | |
| 542 | mutex_lock(&block->lock); |
Vlad Buslov | 91052fa | 2019-02-11 10:55:33 +0200 | [diff] [blame] | 543 | if (explicitly_created) { |
| 544 | if (!chain->explicitly_created) { |
| 545 | mutex_unlock(&block->lock); |
| 546 | return; |
| 547 | } |
| 548 | chain->explicitly_created = false; |
| 549 | } |
| 550 | |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 551 | if (by_act) |
| 552 | chain->action_refcnt--; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 553 | |
| 554 | /* tc_chain_notify_delete can't be called while holding block lock. |
| 555 | * However, when block is unlocked chain can be changed concurrently, so |
| 556 | * save these to temporary variables. |
| 557 | */ |
| 558 | refcnt = --chain->refcnt; |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 559 | tmplt_ops = chain->tmplt_ops; |
| 560 | tmplt_priv = chain->tmplt_priv; |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 561 | |
| 562 | /* The last dropped non-action reference will trigger notification. */ |
Vlad Buslov | b62989f | 2019-03-06 17:50:43 +0200 | [diff] [blame] | 563 | if (refcnt - chain->action_refcnt == 0 && !by_act) { |
| 564 | tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index, |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 565 | block, NULL, 0, 0, false); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 566 | /* Last reference to chain, no need to lock. */ |
| 567 | chain->flushing = false; |
| 568 | } |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 569 | |
Vlad Buslov | b62989f | 2019-03-06 17:50:43 +0200 | [diff] [blame] | 570 | if (refcnt == 0) |
| 571 | free_block = tcf_chain_detach(chain); |
| 572 | mutex_unlock(&block->lock); |
| 573 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 574 | if (refcnt == 0) { |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 575 | tc_chain_tmplt_del(tmplt_ops, tmplt_priv); |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 576 | tcf_chain_destroy(chain, free_block); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 577 | } |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 578 | } |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 579 | |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 580 | static void tcf_chain_put(struct tcf_chain *chain) |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 581 | { |
Vlad Buslov | 91052fa | 2019-02-11 10:55:33 +0200 | [diff] [blame] | 582 | __tcf_chain_put(chain, false, false); |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 583 | } |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 584 | |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 585 | void tcf_chain_put_by_act(struct tcf_chain *chain) |
| 586 | { |
Vlad Buslov | 91052fa | 2019-02-11 10:55:33 +0200 | [diff] [blame] | 587 | __tcf_chain_put(chain, true, false); |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 588 | } |
| 589 | EXPORT_SYMBOL(tcf_chain_put_by_act); |
| 590 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 591 | static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) |
| 592 | { |
Vlad Buslov | 91052fa | 2019-02-11 10:55:33 +0200 | [diff] [blame] | 593 | __tcf_chain_put(chain, false, true); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 594 | } |
| 595 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 596 | static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 597 | { |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 598 | struct tcf_proto *tp, *tp_next; |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 599 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 600 | mutex_lock(&chain->filter_chain_lock); |
| 601 | tp = tcf_chain_dereference(chain->filter_chain, chain); |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 602 | while (tp) { |
| 603 | tp_next = rcu_dereference_protected(tp->next, 1); |
| 604 | tcf_proto_signal_destroying(chain, tp); |
| 605 | tp = tp_next; |
| 606 | } |
| 607 | tp = tcf_chain_dereference(chain->filter_chain, chain); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 608 | RCU_INIT_POINTER(chain->filter_chain, NULL); |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 609 | tcf_chain0_head_change(chain, NULL); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 610 | chain->flushing = true; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 611 | mutex_unlock(&chain->filter_chain_lock); |
| 612 | |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 613 | while (tp) { |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 614 | tp_next = rcu_dereference_protected(tp->next, 1); |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 615 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 616 | tp = tp_next; |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 617 | } |
| 618 | } |
| 619 | |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 620 | static int tcf_block_setup(struct tcf_block *block, |
| 621 | struct flow_block_offload *bo); |
| 622 | |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 623 | static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block, |
| 624 | flow_indr_block_bind_cb_t *cb, void *cb_priv, |
| 625 | enum flow_block_command command, bool ingress) |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 626 | { |
| 627 | struct flow_block_offload bo = { |
| 628 | .command = command, |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 629 | .binder_type = ingress ? |
| 630 | FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS : |
| 631 | FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 632 | .net = dev_net(dev), |
| 633 | .block_shared = tcf_block_non_null_shared(block), |
| 634 | }; |
| 635 | INIT_LIST_HEAD(&bo.cb_list); |
| 636 | |
| 637 | if (!block) |
| 638 | return; |
| 639 | |
| 640 | bo.block = &block->flow_block; |
| 641 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 642 | down_write(&block->cb_lock); |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 643 | cb(dev, cb_priv, TC_SETUP_BLOCK, &bo); |
| 644 | |
| 645 | tcf_block_setup(block, &bo); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 646 | up_write(&block->cb_lock); |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 647 | } |
| 648 | |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 649 | static struct tcf_block *tc_dev_block(struct net_device *dev, bool ingress) |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 650 | { |
| 651 | const struct Qdisc_class_ops *cops; |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 652 | const struct Qdisc_ops *ops; |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 653 | struct Qdisc *qdisc; |
| 654 | |
| 655 | if (!dev_ingress_queue(dev)) |
| 656 | return NULL; |
| 657 | |
| 658 | qdisc = dev_ingress_queue(dev)->qdisc_sleeping; |
| 659 | if (!qdisc) |
| 660 | return NULL; |
| 661 | |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 662 | ops = qdisc->ops; |
| 663 | if (!ops) |
| 664 | return NULL; |
| 665 | |
| 666 | if (!ingress && !strcmp("ingress", ops->id)) |
| 667 | return NULL; |
| 668 | |
| 669 | cops = ops->cl_ops; |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 670 | if (!cops) |
| 671 | return NULL; |
| 672 | |
| 673 | if (!cops->tcf_block) |
| 674 | return NULL; |
| 675 | |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 676 | return cops->tcf_block(qdisc, |
| 677 | ingress ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS, |
| 678 | NULL); |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 679 | } |
| 680 | |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 681 | static void tc_indr_block_get_and_cmd(struct net_device *dev, |
| 682 | flow_indr_block_bind_cb_t *cb, |
| 683 | void *cb_priv, |
| 684 | enum flow_block_command command) |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 685 | { |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 686 | struct tcf_block *block; |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 687 | |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 688 | block = tc_dev_block(dev, true); |
| 689 | tc_indr_block_cmd(dev, block, cb, cb_priv, command, true); |
| 690 | |
| 691 | block = tc_dev_block(dev, false); |
| 692 | tc_indr_block_cmd(dev, block, cb, cb_priv, command, false); |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 693 | } |
| 694 | |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 695 | static void tc_indr_block_call(struct tcf_block *block, |
| 696 | struct net_device *dev, |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 697 | struct tcf_block_ext_info *ei, |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 698 | enum flow_block_command command, |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 699 | struct netlink_ext_ack *extack) |
| 700 | { |
Pablo Neira Ayuso | 955bcb6 | 2019-07-09 22:55:46 +0200 | [diff] [blame] | 701 | struct flow_block_offload bo = { |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 702 | .command = command, |
| 703 | .binder_type = ei->binder_type, |
Pablo Neira Ayuso | da3eeb9 | 2019-07-09 22:55:43 +0200 | [diff] [blame] | 704 | .net = dev_net(dev), |
Pablo Neira Ayuso | 14bfb13 | 2019-07-19 18:20:16 +0200 | [diff] [blame] | 705 | .block = &block->flow_block, |
Pablo Neira Ayuso | 955bcb6 | 2019-07-09 22:55:46 +0200 | [diff] [blame] | 706 | .block_shared = tcf_block_shared(block), |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 707 | .extack = extack, |
| 708 | }; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 709 | INIT_LIST_HEAD(&bo.cb_list); |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 710 | |
wenxu | 1150ab0 | 2019-08-07 09:13:53 +0800 | [diff] [blame] | 711 | flow_indr_block_call(dev, &bo, command); |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 712 | tcf_block_setup(block, &bo); |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 713 | } |
| 714 | |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 715 | static bool tcf_block_offload_in_use(struct tcf_block *block) |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 716 | { |
Vlad Buslov | 97394be | 2019-08-26 16:44:58 +0300 | [diff] [blame] | 717 | return atomic_read(&block->offloadcnt); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 718 | } |
| 719 | |
| 720 | static int tcf_block_offload_cmd(struct tcf_block *block, |
| 721 | struct net_device *dev, |
| 722 | struct tcf_block_ext_info *ei, |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 723 | enum flow_block_command command, |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 724 | struct netlink_ext_ack *extack) |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 725 | { |
Pablo Neira Ayuso | 955bcb6 | 2019-07-09 22:55:46 +0200 | [diff] [blame] | 726 | struct flow_block_offload bo = {}; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 727 | int err; |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 728 | |
Pablo Neira Ayuso | da3eeb9 | 2019-07-09 22:55:43 +0200 | [diff] [blame] | 729 | bo.net = dev_net(dev); |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 730 | bo.command = command; |
| 731 | bo.binder_type = ei->binder_type; |
Pablo Neira Ayuso | 14bfb13 | 2019-07-19 18:20:16 +0200 | [diff] [blame] | 732 | bo.block = &block->flow_block; |
Pablo Neira Ayuso | 955bcb6 | 2019-07-09 22:55:46 +0200 | [diff] [blame] | 733 | bo.block_shared = tcf_block_shared(block); |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 734 | bo.extack = extack; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 735 | INIT_LIST_HEAD(&bo.cb_list); |
| 736 | |
| 737 | err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); |
| 738 | if (err < 0) |
| 739 | return err; |
| 740 | |
| 741 | return tcf_block_setup(block, &bo); |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 742 | } |
| 743 | |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 744 | static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 745 | struct tcf_block_ext_info *ei, |
| 746 | struct netlink_ext_ack *extack) |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 747 | { |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 748 | struct net_device *dev = q->dev_queue->dev; |
| 749 | int err; |
| 750 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 751 | down_write(&block->cb_lock); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 752 | if (!dev->netdev_ops->ndo_setup_tc) |
| 753 | goto no_offload_dev_inc; |
| 754 | |
| 755 | /* If tc offload feature is disabled and the block we try to bind |
| 756 | * to already has some offloaded filters, forbid to bind. |
| 757 | */ |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 758 | if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) { |
| 759 | NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 760 | err = -EOPNOTSUPP; |
| 761 | goto err_unlock; |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 762 | } |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 763 | |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 764 | err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 765 | if (err == -EOPNOTSUPP) |
| 766 | goto no_offload_dev_inc; |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 767 | if (err) |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 768 | goto err_unlock; |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 769 | |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 770 | tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 771 | up_write(&block->cb_lock); |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 772 | return 0; |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 773 | |
| 774 | no_offload_dev_inc: |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 775 | if (tcf_block_offload_in_use(block)) { |
| 776 | err = -EOPNOTSUPP; |
| 777 | goto err_unlock; |
| 778 | } |
| 779 | err = 0; |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 780 | block->nooffloaddevcnt++; |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 781 | tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 782 | err_unlock: |
| 783 | up_write(&block->cb_lock); |
| 784 | return err; |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 785 | } |
| 786 | |
| 787 | static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, |
| 788 | struct tcf_block_ext_info *ei) |
| 789 | { |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 790 | struct net_device *dev = q->dev_queue->dev; |
| 791 | int err; |
| 792 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 793 | down_write(&block->cb_lock); |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 794 | tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 795 | |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 796 | if (!dev->netdev_ops->ndo_setup_tc) |
| 797 | goto no_offload_dev_dec; |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 798 | err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 799 | if (err == -EOPNOTSUPP) |
| 800 | goto no_offload_dev_dec; |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 801 | up_write(&block->cb_lock); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 802 | return; |
| 803 | |
| 804 | no_offload_dev_dec: |
| 805 | WARN_ON(block->nooffloaddevcnt-- == 0); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 806 | up_write(&block->cb_lock); |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 807 | } |
| 808 | |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 809 | static int |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 810 | tcf_chain0_head_change_cb_add(struct tcf_block *block, |
| 811 | struct tcf_block_ext_info *ei, |
| 812 | struct netlink_ext_ack *extack) |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 813 | { |
| 814 | struct tcf_filter_chain_list_item *item; |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 815 | struct tcf_chain *chain0; |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 816 | |
| 817 | item = kmalloc(sizeof(*item), GFP_KERNEL); |
| 818 | if (!item) { |
| 819 | NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed"); |
| 820 | return -ENOMEM; |
| 821 | } |
| 822 | item->chain_head_change = ei->chain_head_change; |
| 823 | item->chain_head_change_priv = ei->chain_head_change_priv; |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 824 | |
| 825 | mutex_lock(&block->lock); |
| 826 | chain0 = block->chain0.chain; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 827 | if (chain0) |
| 828 | tcf_chain_hold(chain0); |
| 829 | else |
| 830 | list_add(&item->list, &block->chain0.filter_chain_list); |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 831 | mutex_unlock(&block->lock); |
| 832 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 833 | if (chain0) { |
| 834 | struct tcf_proto *tp_head; |
| 835 | |
| 836 | mutex_lock(&chain0->filter_chain_lock); |
| 837 | |
| 838 | tp_head = tcf_chain_dereference(chain0->filter_chain, chain0); |
| 839 | if (tp_head) |
| 840 | tcf_chain_head_change_item(item, tp_head); |
| 841 | |
| 842 | mutex_lock(&block->lock); |
| 843 | list_add(&item->list, &block->chain0.filter_chain_list); |
| 844 | mutex_unlock(&block->lock); |
| 845 | |
| 846 | mutex_unlock(&chain0->filter_chain_lock); |
| 847 | tcf_chain_put(chain0); |
| 848 | } |
| 849 | |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 850 | return 0; |
| 851 | } |
| 852 | |
| 853 | static void |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 854 | tcf_chain0_head_change_cb_del(struct tcf_block *block, |
| 855 | struct tcf_block_ext_info *ei) |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 856 | { |
| 857 | struct tcf_filter_chain_list_item *item; |
| 858 | |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 859 | mutex_lock(&block->lock); |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 860 | list_for_each_entry(item, &block->chain0.filter_chain_list, list) { |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 861 | if ((!ei->chain_head_change && !ei->chain_head_change_priv) || |
| 862 | (item->chain_head_change == ei->chain_head_change && |
| 863 | item->chain_head_change_priv == ei->chain_head_change_priv)) { |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 864 | if (block->chain0.chain) |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 865 | tcf_chain_head_change_item(item, NULL); |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 866 | list_del(&item->list); |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 867 | mutex_unlock(&block->lock); |
| 868 | |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 869 | kfree(item); |
| 870 | return; |
| 871 | } |
| 872 | } |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 873 | mutex_unlock(&block->lock); |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 874 | WARN_ON(1); |
| 875 | } |
| 876 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 877 | struct tcf_net { |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 878 | spinlock_t idr_lock; /* Protects idr */ |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 879 | struct idr idr; |
| 880 | }; |
| 881 | |
| 882 | static unsigned int tcf_net_id; |
| 883 | |
| 884 | static int tcf_block_insert(struct tcf_block *block, struct net *net, |
Jiri Pirko | bb047dd | 2018-02-13 12:00:16 +0100 | [diff] [blame] | 885 | struct netlink_ext_ack *extack) |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 886 | { |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 887 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 888 | int err; |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 889 | |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 890 | idr_preload(GFP_KERNEL); |
| 891 | spin_lock(&tn->idr_lock); |
| 892 | err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, |
| 893 | GFP_NOWAIT); |
| 894 | spin_unlock(&tn->idr_lock); |
| 895 | idr_preload_end(); |
| 896 | |
| 897 | return err; |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 898 | } |
| 899 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 900 | static void tcf_block_remove(struct tcf_block *block, struct net *net) |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 901 | { |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 902 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
| 903 | |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 904 | spin_lock(&tn->idr_lock); |
Matthew Wilcox | 9c16094 | 2017-11-28 09:48:43 -0500 | [diff] [blame] | 905 | idr_remove(&tn->idr, block->index); |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 906 | spin_unlock(&tn->idr_lock); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 907 | } |
| 908 | |
| 909 | static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, |
Jiri Pirko | bb047dd | 2018-02-13 12:00:16 +0100 | [diff] [blame] | 910 | u32 block_index, |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 911 | struct netlink_ext_ack *extack) |
| 912 | { |
| 913 | struct tcf_block *block; |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 914 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 915 | block = kzalloc(sizeof(*block), GFP_KERNEL); |
Alexander Aring | 8d1a77f | 2017-12-20 12:35:19 -0500 | [diff] [blame] | 916 | if (!block) { |
| 917 | NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 918 | return ERR_PTR(-ENOMEM); |
Alexander Aring | 8d1a77f | 2017-12-20 12:35:19 -0500 | [diff] [blame] | 919 | } |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 920 | mutex_init(&block->lock); |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 921 | mutex_init(&block->proto_destroy_lock); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 922 | init_rwsem(&block->cb_lock); |
Pablo Neira Ayuso | 14bfb13 | 2019-07-19 18:20:16 +0200 | [diff] [blame] | 923 | flow_block_init(&block->flow_block); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 924 | INIT_LIST_HEAD(&block->chain_list); |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 925 | INIT_LIST_HEAD(&block->owner_list); |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 926 | INIT_LIST_HEAD(&block->chain0.filter_chain_list); |
Jiri Pirko | acb6744 | 2017-10-19 15:50:31 +0200 | [diff] [blame] | 927 | |
Vlad Buslov | cfebd7e | 2018-09-24 19:22:54 +0300 | [diff] [blame] | 928 | refcount_set(&block->refcnt, 1); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 929 | block->net = net; |
Jiri Pirko | bb047dd | 2018-02-13 12:00:16 +0100 | [diff] [blame] | 930 | block->index = block_index; |
| 931 | |
| 932 | /* Don't store q pointer for blocks which are shared */ |
| 933 | if (!tcf_block_shared(block)) |
| 934 | block->q = q; |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 935 | return block; |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 936 | } |
| 937 | |
| 938 | static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) |
| 939 | { |
| 940 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
| 941 | |
Matthew Wilcox | 322d884 | 2017-11-28 10:01:24 -0500 | [diff] [blame] | 942 | return idr_find(&tn->idr, block_index); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 943 | } |
| 944 | |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 945 | static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index) |
| 946 | { |
| 947 | struct tcf_block *block; |
| 948 | |
| 949 | rcu_read_lock(); |
| 950 | block = tcf_block_lookup(net, block_index); |
| 951 | if (block && !refcount_inc_not_zero(&block->refcnt)) |
| 952 | block = NULL; |
| 953 | rcu_read_unlock(); |
| 954 | |
| 955 | return block; |
| 956 | } |
| 957 | |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 958 | static struct tcf_chain * |
| 959 | __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) |
| 960 | { |
| 961 | mutex_lock(&block->lock); |
| 962 | if (chain) |
| 963 | chain = list_is_last(&chain->list, &block->chain_list) ? |
| 964 | NULL : list_next_entry(chain, list); |
| 965 | else |
| 966 | chain = list_first_entry_or_null(&block->chain_list, |
| 967 | struct tcf_chain, list); |
| 968 | |
| 969 | /* skip all action-only chains */ |
| 970 | while (chain && tcf_chain_held_by_acts_only(chain)) |
| 971 | chain = list_is_last(&chain->list, &block->chain_list) ? |
| 972 | NULL : list_next_entry(chain, list); |
| 973 | |
| 974 | if (chain) |
| 975 | tcf_chain_hold(chain); |
| 976 | mutex_unlock(&block->lock); |
| 977 | |
| 978 | return chain; |
| 979 | } |
| 980 | |
| 981 | /* Function to be used by all clients that want to iterate over all chains on |
| 982 | * block. It properly obtains block->lock and takes reference to chain before |
| 983 | * returning it. Users of this function must be tolerant to concurrent chain |
| 984 | * insertion/deletion or ensure that no concurrent chain modification is |
| 985 | * possible. Note that all netlink dump callbacks cannot guarantee to provide |
| 986 | * consistent dump because rtnl lock is released each time skb is filled with |
| 987 | * data and sent to user-space. |
| 988 | */ |
| 989 | |
| 990 | struct tcf_chain * |
| 991 | tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) |
| 992 | { |
| 993 | struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); |
| 994 | |
| 995 | if (chain) |
| 996 | tcf_chain_put(chain); |
| 997 | |
| 998 | return chain_next; |
| 999 | } |
| 1000 | EXPORT_SYMBOL(tcf_get_next_chain); |
| 1001 | |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1002 | static struct tcf_proto * |
| 1003 | __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) |
| 1004 | { |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1005 | u32 prio = 0; |
| 1006 | |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1007 | ASSERT_RTNL(); |
| 1008 | mutex_lock(&chain->filter_chain_lock); |
| 1009 | |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1010 | if (!tp) { |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1011 | tp = tcf_chain_dereference(chain->filter_chain, chain); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1012 | } else if (tcf_proto_is_deleting(tp)) { |
| 1013 | /* 'deleting' flag is set and chain->filter_chain_lock was |
| 1014 | * unlocked, which means next pointer could be invalid. Restart |
| 1015 | * search. |
| 1016 | */ |
| 1017 | prio = tp->prio + 1; |
| 1018 | tp = tcf_chain_dereference(chain->filter_chain, chain); |
| 1019 | |
| 1020 | for (; tp; tp = tcf_chain_dereference(tp->next, chain)) |
| 1021 | if (!tp->deleting && tp->prio >= prio) |
| 1022 | break; |
| 1023 | } else { |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1024 | tp = tcf_chain_dereference(tp->next, chain); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1025 | } |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1026 | |
| 1027 | if (tp) |
| 1028 | tcf_proto_get(tp); |
| 1029 | |
| 1030 | mutex_unlock(&chain->filter_chain_lock); |
| 1031 | |
| 1032 | return tp; |
| 1033 | } |
| 1034 | |
| 1035 | /* Function to be used by all clients that want to iterate over all tp's on |
| 1036 | * chain. Users of this function must be tolerant to concurrent tp |
| 1037 | * insertion/deletion or ensure that no concurrent chain modification is |
| 1038 | * possible. Note that all netlink dump callbacks cannot guarantee to provide |
| 1039 | * consistent dump because rtnl lock is released each time skb is filled with |
| 1040 | * data and sent to user-space. |
| 1041 | */ |
| 1042 | |
| 1043 | struct tcf_proto * |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1044 | tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp, |
| 1045 | bool rtnl_held) |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1046 | { |
| 1047 | struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp); |
| 1048 | |
| 1049 | if (tp) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1050 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1051 | |
| 1052 | return tp_next; |
| 1053 | } |
| 1054 | EXPORT_SYMBOL(tcf_get_next_proto); |
| 1055 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1056 | static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) |
Vlad Buslov | f002343 | 2018-09-24 19:22:55 +0300 | [diff] [blame] | 1057 | { |
| 1058 | struct tcf_chain *chain; |
| 1059 | |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 1060 | /* Last reference to block. At this point chains cannot be added or |
| 1061 | * removed concurrently. |
Vlad Buslov | f002343 | 2018-09-24 19:22:55 +0300 | [diff] [blame] | 1062 | */ |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 1063 | for (chain = tcf_get_next_chain(block, NULL); |
| 1064 | chain; |
| 1065 | chain = tcf_get_next_chain(block, chain)) { |
Vlad Buslov | f002343 | 2018-09-24 19:22:55 +0300 | [diff] [blame] | 1066 | tcf_chain_put_explicitly_created(chain); |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1067 | tcf_chain_flush(chain, rtnl_held); |
Vlad Buslov | f002343 | 2018-09-24 19:22:55 +0300 | [diff] [blame] | 1068 | } |
| 1069 | } |
| 1070 | |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1071 | /* Lookup Qdisc and increments its reference counter. |
| 1072 | * Set parent, if necessary. |
| 1073 | */ |
| 1074 | |
| 1075 | static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, |
| 1076 | u32 *parent, int ifindex, bool rtnl_held, |
| 1077 | struct netlink_ext_ack *extack) |
| 1078 | { |
| 1079 | const struct Qdisc_class_ops *cops; |
| 1080 | struct net_device *dev; |
| 1081 | int err = 0; |
| 1082 | |
| 1083 | if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) |
| 1084 | return 0; |
| 1085 | |
| 1086 | rcu_read_lock(); |
| 1087 | |
| 1088 | /* Find link */ |
| 1089 | dev = dev_get_by_index_rcu(net, ifindex); |
| 1090 | if (!dev) { |
| 1091 | rcu_read_unlock(); |
| 1092 | return -ENODEV; |
| 1093 | } |
| 1094 | |
| 1095 | /* Find qdisc */ |
| 1096 | if (!*parent) { |
| 1097 | *q = dev->qdisc; |
| 1098 | *parent = (*q)->handle; |
| 1099 | } else { |
| 1100 | *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); |
| 1101 | if (!*q) { |
| 1102 | NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); |
| 1103 | err = -EINVAL; |
| 1104 | goto errout_rcu; |
| 1105 | } |
| 1106 | } |
| 1107 | |
| 1108 | *q = qdisc_refcount_inc_nz(*q); |
| 1109 | if (!*q) { |
| 1110 | NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); |
| 1111 | err = -EINVAL; |
| 1112 | goto errout_rcu; |
| 1113 | } |
| 1114 | |
| 1115 | /* Is it classful? */ |
| 1116 | cops = (*q)->ops->cl_ops; |
| 1117 | if (!cops) { |
| 1118 | NL_SET_ERR_MSG(extack, "Qdisc not classful"); |
| 1119 | err = -EINVAL; |
| 1120 | goto errout_qdisc; |
| 1121 | } |
| 1122 | |
| 1123 | if (!cops->tcf_block) { |
| 1124 | NL_SET_ERR_MSG(extack, "Class doesn't support blocks"); |
| 1125 | err = -EOPNOTSUPP; |
| 1126 | goto errout_qdisc; |
| 1127 | } |
| 1128 | |
| 1129 | errout_rcu: |
| 1130 | /* At this point we know that qdisc is not noop_qdisc, |
| 1131 | * which means that qdisc holds a reference to net_device |
| 1132 | * and we hold a reference to qdisc, so it is safe to release |
| 1133 | * rcu read lock. |
| 1134 | */ |
| 1135 | rcu_read_unlock(); |
| 1136 | return err; |
| 1137 | |
| 1138 | errout_qdisc: |
| 1139 | rcu_read_unlock(); |
| 1140 | |
| 1141 | if (rtnl_held) |
| 1142 | qdisc_put(*q); |
| 1143 | else |
| 1144 | qdisc_put_unlocked(*q); |
| 1145 | *q = NULL; |
| 1146 | |
| 1147 | return err; |
| 1148 | } |
| 1149 | |
| 1150 | static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl, |
| 1151 | int ifindex, struct netlink_ext_ack *extack) |
| 1152 | { |
| 1153 | if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) |
| 1154 | return 0; |
| 1155 | |
| 1156 | /* Do we search for filter, attached to class? */ |
| 1157 | if (TC_H_MIN(parent)) { |
| 1158 | const struct Qdisc_class_ops *cops = q->ops->cl_ops; |
| 1159 | |
| 1160 | *cl = cops->find(q, parent); |
| 1161 | if (*cl == 0) { |
| 1162 | NL_SET_ERR_MSG(extack, "Specified class doesn't exist"); |
| 1163 | return -ENOENT; |
| 1164 | } |
| 1165 | } |
| 1166 | |
| 1167 | return 0; |
| 1168 | } |
| 1169 | |
| 1170 | static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q, |
| 1171 | unsigned long cl, int ifindex, |
| 1172 | u32 block_index, |
| 1173 | struct netlink_ext_ack *extack) |
| 1174 | { |
| 1175 | struct tcf_block *block; |
| 1176 | |
| 1177 | if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) { |
| 1178 | block = tcf_block_refcnt_get(net, block_index); |
| 1179 | if (!block) { |
| 1180 | NL_SET_ERR_MSG(extack, "Block of given index was not found"); |
| 1181 | return ERR_PTR(-EINVAL); |
| 1182 | } |
| 1183 | } else { |
| 1184 | const struct Qdisc_class_ops *cops = q->ops->cl_ops; |
| 1185 | |
| 1186 | block = cops->tcf_block(q, cl, extack); |
| 1187 | if (!block) |
| 1188 | return ERR_PTR(-EINVAL); |
| 1189 | |
| 1190 | if (tcf_block_shared(block)) { |
| 1191 | NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters"); |
| 1192 | return ERR_PTR(-EOPNOTSUPP); |
| 1193 | } |
| 1194 | |
| 1195 | /* Always take reference to block in order to support execution |
| 1196 | * of rules update path of cls API without rtnl lock. Caller |
| 1197 | * must release block when it is finished using it. 'if' block |
| 1198 | * of this conditional obtain reference to block by calling |
| 1199 | * tcf_block_refcnt_get(). |
| 1200 | */ |
| 1201 | refcount_inc(&block->refcnt); |
| 1202 | } |
| 1203 | |
| 1204 | return block; |
| 1205 | } |
| 1206 | |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1207 | static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1208 | struct tcf_block_ext_info *ei, bool rtnl_held) |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1209 | { |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 1210 | if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1211 | /* Flushing/putting all chains will cause the block to be |
| 1212 | * deallocated when last chain is freed. However, if chain_list |
| 1213 | * is empty, block has to be manually deallocated. After block |
| 1214 | * reference counter reached 0, it is no longer possible to |
| 1215 | * increment it or add new chains to block. |
| 1216 | */ |
| 1217 | bool free_block = list_empty(&block->chain_list); |
| 1218 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 1219 | mutex_unlock(&block->lock); |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1220 | if (tcf_block_shared(block)) |
| 1221 | tcf_block_remove(block, block->net); |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1222 | |
| 1223 | if (q) |
| 1224 | tcf_block_offload_unbind(block, q, ei); |
| 1225 | |
| 1226 | if (free_block) |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 1227 | tcf_block_destroy(block); |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1228 | else |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1229 | tcf_block_flush_all_chains(block, rtnl_held); |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1230 | } else if (q) { |
| 1231 | tcf_block_offload_unbind(block, q, ei); |
| 1232 | } |
| 1233 | } |
| 1234 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1235 | static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1236 | { |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1237 | __tcf_block_put(block, NULL, NULL, rtnl_held); |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1238 | } |
| 1239 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1240 | /* Find tcf block. |
| 1241 | * Set q, parent, cl when appropriate. |
| 1242 | */ |
| 1243 | |
| 1244 | static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, |
| 1245 | u32 *parent, unsigned long *cl, |
| 1246 | int ifindex, u32 block_index, |
| 1247 | struct netlink_ext_ack *extack) |
| 1248 | { |
| 1249 | struct tcf_block *block; |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1250 | int err = 0; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1251 | |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1252 | ASSERT_RTNL(); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1253 | |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1254 | err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack); |
| 1255 | if (err) |
| 1256 | goto errout; |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1257 | |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1258 | err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack); |
| 1259 | if (err) |
| 1260 | goto errout_qdisc; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1261 | |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1262 | block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); |
Dan Carpenter | af736bf | 2019-02-18 12:26:32 +0300 | [diff] [blame] | 1263 | if (IS_ERR(block)) { |
| 1264 | err = PTR_ERR(block); |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1265 | goto errout_qdisc; |
Dan Carpenter | af736bf | 2019-02-18 12:26:32 +0300 | [diff] [blame] | 1266 | } |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1267 | |
| 1268 | return block; |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1269 | |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1270 | errout_qdisc: |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1271 | if (*q) |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1272 | qdisc_put(*q); |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1273 | errout: |
| 1274 | *q = NULL; |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1275 | return ERR_PTR(err); |
| 1276 | } |
| 1277 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1278 | static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, |
| 1279 | bool rtnl_held) |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1280 | { |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 1281 | if (!IS_ERR_OR_NULL(block)) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1282 | tcf_block_refcnt_put(block, rtnl_held); |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 1283 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 1284 | if (q) { |
| 1285 | if (rtnl_held) |
| 1286 | qdisc_put(q); |
| 1287 | else |
| 1288 | qdisc_put_unlocked(q); |
| 1289 | } |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1290 | } |
| 1291 | |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1292 | struct tcf_block_owner_item { |
| 1293 | struct list_head list; |
| 1294 | struct Qdisc *q; |
Pablo Neira Ayuso | 32f8c40 | 2019-07-09 22:55:41 +0200 | [diff] [blame] | 1295 | enum flow_block_binder_type binder_type; |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1296 | }; |
| 1297 | |
| 1298 | static void |
| 1299 | tcf_block_owner_netif_keep_dst(struct tcf_block *block, |
| 1300 | struct Qdisc *q, |
Pablo Neira Ayuso | 32f8c40 | 2019-07-09 22:55:41 +0200 | [diff] [blame] | 1301 | enum flow_block_binder_type binder_type) |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1302 | { |
| 1303 | if (block->keep_dst && |
Pablo Neira Ayuso | 32f8c40 | 2019-07-09 22:55:41 +0200 | [diff] [blame] | 1304 | binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && |
| 1305 | binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1306 | netif_keep_dst(qdisc_dev(q)); |
| 1307 | } |
| 1308 | |
| 1309 | void tcf_block_netif_keep_dst(struct tcf_block *block) |
| 1310 | { |
| 1311 | struct tcf_block_owner_item *item; |
| 1312 | |
| 1313 | block->keep_dst = true; |
| 1314 | list_for_each_entry(item, &block->owner_list, list) |
| 1315 | tcf_block_owner_netif_keep_dst(block, item->q, |
| 1316 | item->binder_type); |
| 1317 | } |
| 1318 | EXPORT_SYMBOL(tcf_block_netif_keep_dst); |
| 1319 | |
| 1320 | static int tcf_block_owner_add(struct tcf_block *block, |
| 1321 | struct Qdisc *q, |
Pablo Neira Ayuso | 32f8c40 | 2019-07-09 22:55:41 +0200 | [diff] [blame] | 1322 | enum flow_block_binder_type binder_type) |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1323 | { |
| 1324 | struct tcf_block_owner_item *item; |
| 1325 | |
| 1326 | item = kmalloc(sizeof(*item), GFP_KERNEL); |
| 1327 | if (!item) |
| 1328 | return -ENOMEM; |
| 1329 | item->q = q; |
| 1330 | item->binder_type = binder_type; |
| 1331 | list_add(&item->list, &block->owner_list); |
| 1332 | return 0; |
| 1333 | } |
| 1334 | |
| 1335 | static void tcf_block_owner_del(struct tcf_block *block, |
| 1336 | struct Qdisc *q, |
Pablo Neira Ayuso | 32f8c40 | 2019-07-09 22:55:41 +0200 | [diff] [blame] | 1337 | enum flow_block_binder_type binder_type) |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1338 | { |
| 1339 | struct tcf_block_owner_item *item; |
| 1340 | |
| 1341 | list_for_each_entry(item, &block->owner_list, list) { |
| 1342 | if (item->q == q && item->binder_type == binder_type) { |
| 1343 | list_del(&item->list); |
| 1344 | kfree(item); |
| 1345 | return; |
| 1346 | } |
| 1347 | } |
| 1348 | WARN_ON(1); |
| 1349 | } |
| 1350 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1351 | int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, |
| 1352 | struct tcf_block_ext_info *ei, |
| 1353 | struct netlink_ext_ack *extack) |
| 1354 | { |
| 1355 | struct net *net = qdisc_net(q); |
| 1356 | struct tcf_block *block = NULL; |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1357 | int err; |
| 1358 | |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 1359 | if (ei->block_index) |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1360 | /* block_index not 0 means the shared block is requested */ |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 1361 | block = tcf_block_refcnt_get(net, ei->block_index); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1362 | |
| 1363 | if (!block) { |
Jiri Pirko | bb047dd | 2018-02-13 12:00:16 +0100 | [diff] [blame] | 1364 | block = tcf_block_create(net, q, ei->block_index, extack); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1365 | if (IS_ERR(block)) |
| 1366 | return PTR_ERR(block); |
Jiri Pirko | bb047dd | 2018-02-13 12:00:16 +0100 | [diff] [blame] | 1367 | if (tcf_block_shared(block)) { |
| 1368 | err = tcf_block_insert(block, net, extack); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1369 | if (err) |
| 1370 | goto err_block_insert; |
| 1371 | } |
| 1372 | } |
| 1373 | |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1374 | err = tcf_block_owner_add(block, q, ei->binder_type); |
| 1375 | if (err) |
| 1376 | goto err_block_owner_add; |
| 1377 | |
| 1378 | tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); |
| 1379 | |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1380 | err = tcf_chain0_head_change_cb_add(block, ei, extack); |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 1381 | if (err) |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1382 | goto err_chain0_head_change_cb_add; |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 1383 | |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 1384 | err = tcf_block_offload_bind(block, q, ei, extack); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 1385 | if (err) |
| 1386 | goto err_block_offload_bind; |
| 1387 | |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 1388 | *p_block = block; |
| 1389 | return 0; |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1390 | |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 1391 | err_block_offload_bind: |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1392 | tcf_chain0_head_change_cb_del(block, ei); |
| 1393 | err_chain0_head_change_cb_add: |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1394 | tcf_block_owner_del(block, q, ei->binder_type); |
| 1395 | err_block_owner_add: |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1396 | err_block_insert: |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1397 | tcf_block_refcnt_put(block, true); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1398 | return err; |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 1399 | } |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1400 | EXPORT_SYMBOL(tcf_block_get_ext); |
| 1401 | |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1402 | static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) |
| 1403 | { |
| 1404 | struct tcf_proto __rcu **p_filter_chain = priv; |
| 1405 | |
| 1406 | rcu_assign_pointer(*p_filter_chain, tp_head); |
| 1407 | } |
| 1408 | |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1409 | int tcf_block_get(struct tcf_block **p_block, |
Alexander Aring | 8d1a77f | 2017-12-20 12:35:19 -0500 | [diff] [blame] | 1410 | struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, |
| 1411 | struct netlink_ext_ack *extack) |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1412 | { |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1413 | struct tcf_block_ext_info ei = { |
| 1414 | .chain_head_change = tcf_chain_head_change_dflt, |
| 1415 | .chain_head_change_priv = p_filter_chain, |
| 1416 | }; |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1417 | |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1418 | WARN_ON(!p_filter_chain); |
Alexander Aring | 8d1a77f | 2017-12-20 12:35:19 -0500 | [diff] [blame] | 1419 | return tcf_block_get_ext(p_block, q, &ei, extack); |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1420 | } |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 1421 | EXPORT_SYMBOL(tcf_block_get); |
| 1422 | |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 1423 | /* XXX: Standalone actions are not allowed to jump to any chain, and bound |
Roman Kapl | a60b3f5 | 2017-11-24 12:27:58 +0100 | [diff] [blame] | 1424 | * actions should be all removed after flushing. |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 1425 | */ |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1426 | void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, |
David S. Miller | e1ea2f9 | 2017-10-30 14:10:01 +0900 | [diff] [blame] | 1427 | struct tcf_block_ext_info *ei) |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 1428 | { |
David S. Miller | c30abd5 | 2017-12-16 22:11:55 -0500 | [diff] [blame] | 1429 | if (!block) |
| 1430 | return; |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1431 | tcf_chain0_head_change_cb_del(block, ei); |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1432 | tcf_block_owner_del(block, q, ei->binder_type); |
Roman Kapl | a60b3f5 | 2017-11-24 12:27:58 +0100 | [diff] [blame] | 1433 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1434 | __tcf_block_put(block, q, ei, true); |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 1435 | } |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1436 | EXPORT_SYMBOL(tcf_block_put_ext); |
| 1437 | |
| 1438 | void tcf_block_put(struct tcf_block *block) |
| 1439 | { |
| 1440 | struct tcf_block_ext_info ei = {0, }; |
| 1441 | |
Jiri Pirko | 4853f12 | 2017-12-21 13:13:59 +0100 | [diff] [blame] | 1442 | if (!block) |
| 1443 | return; |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1444 | tcf_block_put_ext(block, block->q, &ei); |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1445 | } |
David S. Miller | e1ea2f9 | 2017-10-30 14:10:01 +0900 | [diff] [blame] | 1446 | |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 1447 | EXPORT_SYMBOL(tcf_block_put); |
Jiri Pirko | cf1facd | 2017-02-09 14:38:56 +0100 | [diff] [blame] | 1448 | |
John Hurley | 3263674 | 2018-06-25 14:30:10 -0700 | [diff] [blame] | 1449 | static int |
Pablo Neira Ayuso | a732331 | 2019-07-19 18:20:15 +0200 | [diff] [blame] | 1450 | tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, |
John Hurley | 3263674 | 2018-06-25 14:30:10 -0700 | [diff] [blame] | 1451 | void *cb_priv, bool add, bool offload_in_use, |
| 1452 | struct netlink_ext_ack *extack) |
| 1453 | { |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 1454 | struct tcf_chain *chain, *chain_prev; |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1455 | struct tcf_proto *tp, *tp_prev; |
John Hurley | 3263674 | 2018-06-25 14:30:10 -0700 | [diff] [blame] | 1456 | int err; |
| 1457 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 1458 | lockdep_assert_held(&block->cb_lock); |
| 1459 | |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 1460 | for (chain = __tcf_get_next_chain(block, NULL); |
| 1461 | chain; |
| 1462 | chain_prev = chain, |
| 1463 | chain = __tcf_get_next_chain(block, chain), |
| 1464 | tcf_chain_put(chain_prev)) { |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1465 | for (tp = __tcf_get_next_proto(chain, NULL); tp; |
| 1466 | tp_prev = tp, |
| 1467 | tp = __tcf_get_next_proto(chain, tp), |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1468 | tcf_proto_put(tp_prev, true, NULL)) { |
John Hurley | 3263674 | 2018-06-25 14:30:10 -0700 | [diff] [blame] | 1469 | if (tp->ops->reoffload) { |
| 1470 | err = tp->ops->reoffload(tp, add, cb, cb_priv, |
| 1471 | extack); |
| 1472 | if (err && add) |
| 1473 | goto err_playback_remove; |
| 1474 | } else if (add && offload_in_use) { |
| 1475 | err = -EOPNOTSUPP; |
| 1476 | NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); |
| 1477 | goto err_playback_remove; |
| 1478 | } |
| 1479 | } |
| 1480 | } |
| 1481 | |
| 1482 | return 0; |
| 1483 | |
| 1484 | err_playback_remove: |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1485 | tcf_proto_put(tp, true, NULL); |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 1486 | tcf_chain_put(chain); |
John Hurley | 3263674 | 2018-06-25 14:30:10 -0700 | [diff] [blame] | 1487 | tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, |
| 1488 | extack); |
| 1489 | return err; |
| 1490 | } |
| 1491 | |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1492 | static int tcf_block_bind(struct tcf_block *block, |
| 1493 | struct flow_block_offload *bo) |
| 1494 | { |
| 1495 | struct flow_block_cb *block_cb, *next; |
| 1496 | int err, i = 0; |
| 1497 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 1498 | lockdep_assert_held(&block->cb_lock); |
| 1499 | |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1500 | list_for_each_entry(block_cb, &bo->cb_list, list) { |
| 1501 | err = tcf_block_playback_offloads(block, block_cb->cb, |
| 1502 | block_cb->cb_priv, true, |
| 1503 | tcf_block_offload_in_use(block), |
| 1504 | bo->extack); |
| 1505 | if (err) |
| 1506 | goto err_unroll; |
Vlad Buslov | c9f1447 | 2019-08-26 16:45:01 +0300 | [diff] [blame] | 1507 | if (!bo->unlocked_driver_cb) |
| 1508 | block->lockeddevcnt++; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1509 | |
| 1510 | i++; |
| 1511 | } |
Pablo Neira Ayuso | 14bfb13 | 2019-07-19 18:20:16 +0200 | [diff] [blame] | 1512 | list_splice(&bo->cb_list, &block->flow_block.cb_list); |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1513 | |
| 1514 | return 0; |
| 1515 | |
| 1516 | err_unroll: |
| 1517 | list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { |
| 1518 | if (i-- > 0) { |
| 1519 | list_del(&block_cb->list); |
| 1520 | tcf_block_playback_offloads(block, block_cb->cb, |
| 1521 | block_cb->cb_priv, false, |
| 1522 | tcf_block_offload_in_use(block), |
| 1523 | NULL); |
Vlad Buslov | c9f1447 | 2019-08-26 16:45:01 +0300 | [diff] [blame] | 1524 | if (!bo->unlocked_driver_cb) |
| 1525 | block->lockeddevcnt--; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1526 | } |
| 1527 | flow_block_cb_free(block_cb); |
| 1528 | } |
| 1529 | |
| 1530 | return err; |
| 1531 | } |
| 1532 | |
| 1533 | static void tcf_block_unbind(struct tcf_block *block, |
| 1534 | struct flow_block_offload *bo) |
| 1535 | { |
| 1536 | struct flow_block_cb *block_cb, *next; |
| 1537 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 1538 | lockdep_assert_held(&block->cb_lock); |
| 1539 | |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1540 | list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { |
| 1541 | tcf_block_playback_offloads(block, block_cb->cb, |
| 1542 | block_cb->cb_priv, false, |
| 1543 | tcf_block_offload_in_use(block), |
| 1544 | NULL); |
| 1545 | list_del(&block_cb->list); |
| 1546 | flow_block_cb_free(block_cb); |
Vlad Buslov | c9f1447 | 2019-08-26 16:45:01 +0300 | [diff] [blame] | 1547 | if (!bo->unlocked_driver_cb) |
| 1548 | block->lockeddevcnt--; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1549 | } |
| 1550 | } |
| 1551 | |
| 1552 | static int tcf_block_setup(struct tcf_block *block, |
| 1553 | struct flow_block_offload *bo) |
| 1554 | { |
| 1555 | int err; |
| 1556 | |
| 1557 | switch (bo->command) { |
| 1558 | case FLOW_BLOCK_BIND: |
| 1559 | err = tcf_block_bind(block, bo); |
| 1560 | break; |
| 1561 | case FLOW_BLOCK_UNBIND: |
| 1562 | err = 0; |
| 1563 | tcf_block_unbind(block, bo); |
| 1564 | break; |
| 1565 | default: |
| 1566 | WARN_ON_ONCE(1); |
| 1567 | err = -EOPNOTSUPP; |
| 1568 | } |
| 1569 | |
| 1570 | return err; |
| 1571 | } |
| 1572 | |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1573 | /* Main classifier routine: scans classifier chain attached |
| 1574 | * to this qdisc, (optionally) tests for protocol and asks |
| 1575 | * specific classifiers. |
| 1576 | */ |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1577 | static inline int __tcf_classify(struct sk_buff *skb, |
| 1578 | const struct tcf_proto *tp, |
Paul Blakey | af69962 | 2020-02-16 12:01:24 +0200 | [diff] [blame] | 1579 | const struct tcf_proto *orig_tp, |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1580 | struct tcf_result *res, |
| 1581 | bool compat_mode, |
| 1582 | u32 *last_executed_chain) |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1583 | { |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1584 | #ifdef CONFIG_NET_CLS_ACT |
| 1585 | const int max_reclassify_loop = 4; |
Jiri Pirko | ee538dc | 2017-05-23 09:11:59 +0200 | [diff] [blame] | 1586 | const struct tcf_proto *first_tp; |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1587 | int limit = 0; |
| 1588 | |
| 1589 | reclassify: |
| 1590 | #endif |
| 1591 | for (; tp; tp = rcu_dereference_bh(tp->next)) { |
Cong Wang | cd0c4e7 | 2019-01-11 18:55:42 -0800 | [diff] [blame] | 1592 | __be16 protocol = tc_skb_protocol(skb); |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1593 | int err; |
| 1594 | |
| 1595 | if (tp->protocol != protocol && |
| 1596 | tp->protocol != htons(ETH_P_ALL)) |
| 1597 | continue; |
| 1598 | |
| 1599 | err = tp->classify(skb, tp, res); |
| 1600 | #ifdef CONFIG_NET_CLS_ACT |
Jiri Pirko | db50514 | 2017-05-17 11:08:03 +0200 | [diff] [blame] | 1601 | if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { |
Jiri Pirko | ee538dc | 2017-05-23 09:11:59 +0200 | [diff] [blame] | 1602 | first_tp = orig_tp; |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1603 | *last_executed_chain = first_tp->chain->index; |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1604 | goto reset; |
Jiri Pirko | db50514 | 2017-05-17 11:08:03 +0200 | [diff] [blame] | 1605 | } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { |
Jiri Pirko | ee538dc | 2017-05-23 09:11:59 +0200 | [diff] [blame] | 1606 | first_tp = res->goto_tp; |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1607 | *last_executed_chain = err & TC_ACT_EXT_VAL_MASK; |
Jiri Pirko | db50514 | 2017-05-17 11:08:03 +0200 | [diff] [blame] | 1608 | goto reset; |
| 1609 | } |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1610 | #endif |
| 1611 | if (err >= 0) |
| 1612 | return err; |
| 1613 | } |
| 1614 | |
| 1615 | return TC_ACT_UNSPEC; /* signal: continue lookup */ |
| 1616 | #ifdef CONFIG_NET_CLS_ACT |
| 1617 | reset: |
| 1618 | if (unlikely(limit++ >= max_reclassify_loop)) { |
Jiri Pirko | 9d3aaff | 2018-01-17 11:46:47 +0100 | [diff] [blame] | 1619 | net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n", |
| 1620 | tp->chain->block->index, |
| 1621 | tp->prio & 0xffff, |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1622 | ntohs(tp->protocol)); |
| 1623 | return TC_ACT_SHOT; |
| 1624 | } |
| 1625 | |
Jiri Pirko | ee538dc | 2017-05-23 09:11:59 +0200 | [diff] [blame] | 1626 | tp = first_tp; |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1627 | goto reclassify; |
| 1628 | #endif |
| 1629 | } |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1630 | |
| 1631 | int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, |
| 1632 | struct tcf_result *res, bool compat_mode) |
| 1633 | { |
| 1634 | u32 last_executed_chain = 0; |
| 1635 | |
Paul Blakey | af69962 | 2020-02-16 12:01:24 +0200 | [diff] [blame] | 1636 | return __tcf_classify(skb, tp, tp, res, compat_mode, |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1637 | &last_executed_chain); |
| 1638 | } |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1639 | EXPORT_SYMBOL(tcf_classify); |
| 1640 | |
Paul Blakey | 7d17c54 | 2020-02-16 12:01:22 +0200 | [diff] [blame] | 1641 | int tcf_classify_ingress(struct sk_buff *skb, |
| 1642 | const struct tcf_block *ingress_block, |
| 1643 | const struct tcf_proto *tp, |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1644 | struct tcf_result *res, bool compat_mode) |
| 1645 | { |
| 1646 | #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) |
| 1647 | u32 last_executed_chain = 0; |
| 1648 | |
Paul Blakey | af69962 | 2020-02-16 12:01:24 +0200 | [diff] [blame] | 1649 | return __tcf_classify(skb, tp, tp, res, compat_mode, |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1650 | &last_executed_chain); |
| 1651 | #else |
| 1652 | u32 last_executed_chain = tp ? tp->chain->index : 0; |
Paul Blakey | af69962 | 2020-02-16 12:01:24 +0200 | [diff] [blame] | 1653 | const struct tcf_proto *orig_tp = tp; |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1654 | struct tc_skb_ext *ext; |
| 1655 | int ret; |
| 1656 | |
Paul Blakey | af69962 | 2020-02-16 12:01:24 +0200 | [diff] [blame] | 1657 | ext = skb_ext_find(skb, TC_SKB_EXT); |
| 1658 | |
| 1659 | if (ext && ext->chain) { |
| 1660 | struct tcf_chain *fchain; |
| 1661 | |
| 1662 | fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain); |
| 1663 | if (!fchain) |
| 1664 | return TC_ACT_SHOT; |
| 1665 | |
| 1666 | /* Consume, so cloned/redirect skbs won't inherit ext */ |
| 1667 | skb_ext_del(skb, TC_SKB_EXT); |
| 1668 | |
| 1669 | tp = rcu_dereference_bh(fchain->filter_chain); |
| 1670 | } |
| 1671 | |
| 1672 | ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, |
| 1673 | &last_executed_chain); |
Paul Blakey | 9410c94 | 2020-02-16 12:01:21 +0200 | [diff] [blame] | 1674 | |
| 1675 | /* If we missed on some chain */ |
| 1676 | if (ret == TC_ACT_UNSPEC && last_executed_chain) { |
| 1677 | ext = skb_ext_add(skb, TC_SKB_EXT); |
| 1678 | if (WARN_ON_ONCE(!ext)) |
| 1679 | return TC_ACT_SHOT; |
| 1680 | ext->chain = last_executed_chain; |
| 1681 | } |
| 1682 | |
| 1683 | return ret; |
| 1684 | #endif |
| 1685 | } |
| 1686 | EXPORT_SYMBOL(tcf_classify_ingress); |
| 1687 | |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1688 | struct tcf_chain_info { |
| 1689 | struct tcf_proto __rcu **pprev; |
| 1690 | struct tcf_proto __rcu *next; |
| 1691 | }; |
| 1692 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1693 | static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain, |
| 1694 | struct tcf_chain_info *chain_info) |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1695 | { |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1696 | return tcf_chain_dereference(*chain_info->pprev, chain); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1697 | } |
| 1698 | |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1699 | static int tcf_chain_tp_insert(struct tcf_chain *chain, |
| 1700 | struct tcf_chain_info *chain_info, |
| 1701 | struct tcf_proto *tp) |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1702 | { |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1703 | if (chain->flushing) |
| 1704 | return -EAGAIN; |
| 1705 | |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1706 | if (*chain_info->pprev == chain->filter_chain) |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1707 | tcf_chain0_head_change(chain, tp); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 1708 | tcf_proto_get(tp); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1709 | RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1710 | rcu_assign_pointer(*chain_info->pprev, tp); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1711 | |
| 1712 | return 0; |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1713 | } |
| 1714 | |
| 1715 | static void tcf_chain_tp_remove(struct tcf_chain *chain, |
| 1716 | struct tcf_chain_info *chain_info, |
| 1717 | struct tcf_proto *tp) |
| 1718 | { |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1719 | struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1720 | |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1721 | tcf_proto_mark_delete(tp); |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1722 | if (tp == chain->filter_chain) |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1723 | tcf_chain0_head_change(chain, next); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1724 | RCU_INIT_POINTER(*chain_info->pprev, next); |
| 1725 | } |
| 1726 | |
| 1727 | static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, |
| 1728 | struct tcf_chain_info *chain_info, |
| 1729 | u32 protocol, u32 prio, |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1730 | bool prio_allocate); |
| 1731 | |
| 1732 | /* Try to insert new proto. |
| 1733 | * If proto with specified priority already exists, free new proto |
| 1734 | * and return existing one. |
| 1735 | */ |
| 1736 | |
| 1737 | static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain, |
| 1738 | struct tcf_proto *tp_new, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1739 | u32 protocol, u32 prio, |
| 1740 | bool rtnl_held) |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1741 | { |
| 1742 | struct tcf_chain_info chain_info; |
| 1743 | struct tcf_proto *tp; |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1744 | int err = 0; |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1745 | |
| 1746 | mutex_lock(&chain->filter_chain_lock); |
| 1747 | |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 1748 | if (tcf_proto_exists_destroying(chain, tp_new)) { |
| 1749 | mutex_unlock(&chain->filter_chain_lock); |
| 1750 | tcf_proto_destroy(tp_new, rtnl_held, false, NULL); |
| 1751 | return ERR_PTR(-EAGAIN); |
| 1752 | } |
| 1753 | |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1754 | tp = tcf_chain_tp_find(chain, &chain_info, |
| 1755 | protocol, prio, false); |
| 1756 | if (!tp) |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1757 | err = tcf_chain_tp_insert(chain, &chain_info, tp_new); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1758 | mutex_unlock(&chain->filter_chain_lock); |
| 1759 | |
| 1760 | if (tp) { |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 1761 | tcf_proto_destroy(tp_new, rtnl_held, false, NULL); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1762 | tp_new = tp; |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1763 | } else if (err) { |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 1764 | tcf_proto_destroy(tp_new, rtnl_held, false, NULL); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1765 | tp_new = ERR_PTR(err); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1766 | } |
| 1767 | |
| 1768 | return tp_new; |
| 1769 | } |
| 1770 | |
| 1771 | static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1772 | struct tcf_proto *tp, bool rtnl_held, |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1773 | struct netlink_ext_ack *extack) |
| 1774 | { |
| 1775 | struct tcf_chain_info chain_info; |
| 1776 | struct tcf_proto *tp_iter; |
| 1777 | struct tcf_proto **pprev; |
| 1778 | struct tcf_proto *next; |
| 1779 | |
| 1780 | mutex_lock(&chain->filter_chain_lock); |
| 1781 | |
| 1782 | /* Atomically find and remove tp from chain. */ |
| 1783 | for (pprev = &chain->filter_chain; |
| 1784 | (tp_iter = tcf_chain_dereference(*pprev, chain)); |
| 1785 | pprev = &tp_iter->next) { |
| 1786 | if (tp_iter == tp) { |
| 1787 | chain_info.pprev = pprev; |
| 1788 | chain_info.next = tp_iter->next; |
| 1789 | WARN_ON(tp_iter->deleting); |
| 1790 | break; |
| 1791 | } |
| 1792 | } |
| 1793 | /* Verify that tp still exists and no new filters were inserted |
| 1794 | * concurrently. |
| 1795 | * Mark tp for deletion if it is empty. |
| 1796 | */ |
Davide Caratti | a5b72a0 | 2019-12-28 16:36:58 +0100 | [diff] [blame] | 1797 | if (!tp_iter || !tcf_proto_check_delete(tp)) { |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1798 | mutex_unlock(&chain->filter_chain_lock); |
| 1799 | return; |
| 1800 | } |
| 1801 | |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 1802 | tcf_proto_signal_destroying(chain, tp); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1803 | next = tcf_chain_dereference(chain_info.next, chain); |
| 1804 | if (tp == chain->filter_chain) |
| 1805 | tcf_chain0_head_change(chain, next); |
| 1806 | RCU_INIT_POINTER(*chain_info.pprev, next); |
| 1807 | mutex_unlock(&chain->filter_chain_lock); |
| 1808 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1809 | tcf_proto_put(tp, rtnl_held, extack); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1810 | } |
| 1811 | |
| 1812 | static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, |
| 1813 | struct tcf_chain_info *chain_info, |
| 1814 | u32 protocol, u32 prio, |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1815 | bool prio_allocate) |
| 1816 | { |
| 1817 | struct tcf_proto **pprev; |
| 1818 | struct tcf_proto *tp; |
| 1819 | |
| 1820 | /* Check the chain for existence of proto-tcf with this priority */ |
| 1821 | for (pprev = &chain->filter_chain; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1822 | (tp = tcf_chain_dereference(*pprev, chain)); |
| 1823 | pprev = &tp->next) { |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1824 | if (tp->prio >= prio) { |
| 1825 | if (tp->prio == prio) { |
| 1826 | if (prio_allocate || |
| 1827 | (tp->protocol != protocol && protocol)) |
| 1828 | return ERR_PTR(-EINVAL); |
| 1829 | } else { |
| 1830 | tp = NULL; |
| 1831 | } |
| 1832 | break; |
| 1833 | } |
| 1834 | } |
| 1835 | chain_info->pprev = pprev; |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 1836 | if (tp) { |
| 1837 | chain_info->next = tp->next; |
| 1838 | tcf_proto_get(tp); |
| 1839 | } else { |
| 1840 | chain_info->next = NULL; |
| 1841 | } |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1842 | return tp; |
| 1843 | } |
| 1844 | |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1845 | static int tcf_fill_node(struct net *net, struct sk_buff *skb, |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1846 | struct tcf_proto *tp, struct tcf_block *block, |
| 1847 | struct Qdisc *q, u32 parent, void *fh, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1848 | u32 portid, u32 seq, u16 flags, int event, |
| 1849 | bool rtnl_held) |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1850 | { |
| 1851 | struct tcmsg *tcm; |
| 1852 | struct nlmsghdr *nlh; |
| 1853 | unsigned char *b = skb_tail_pointer(skb); |
| 1854 | |
| 1855 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); |
| 1856 | if (!nlh) |
| 1857 | goto out_nlmsg_trim; |
| 1858 | tcm = nlmsg_data(nlh); |
| 1859 | tcm->tcm_family = AF_UNSPEC; |
| 1860 | tcm->tcm__pad1 = 0; |
| 1861 | tcm->tcm__pad2 = 0; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1862 | if (q) { |
| 1863 | tcm->tcm_ifindex = qdisc_dev(q)->ifindex; |
| 1864 | tcm->tcm_parent = parent; |
| 1865 | } else { |
| 1866 | tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; |
| 1867 | tcm->tcm_block_index = block->index; |
| 1868 | } |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1869 | tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); |
| 1870 | if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) |
| 1871 | goto nla_put_failure; |
| 1872 | if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) |
| 1873 | goto nla_put_failure; |
| 1874 | if (!fh) { |
| 1875 | tcm->tcm_handle = 0; |
| 1876 | } else { |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1877 | if (tp->ops->dump && |
| 1878 | tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0) |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1879 | goto nla_put_failure; |
| 1880 | } |
| 1881 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; |
| 1882 | return skb->len; |
| 1883 | |
| 1884 | out_nlmsg_trim: |
| 1885 | nla_put_failure: |
| 1886 | nlmsg_trim(skb, b); |
| 1887 | return -1; |
| 1888 | } |
| 1889 | |
| 1890 | static int tfilter_notify(struct net *net, struct sk_buff *oskb, |
| 1891 | struct nlmsghdr *n, struct tcf_proto *tp, |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1892 | struct tcf_block *block, struct Qdisc *q, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1893 | u32 parent, void *fh, int event, bool unicast, |
| 1894 | bool rtnl_held) |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1895 | { |
| 1896 | struct sk_buff *skb; |
| 1897 | u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 1898 | int err = 0; |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1899 | |
| 1900 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
| 1901 | if (!skb) |
| 1902 | return -ENOBUFS; |
| 1903 | |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1904 | if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1905 | n->nlmsg_seq, n->nlmsg_flags, event, |
| 1906 | rtnl_held) <= 0) { |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1907 | kfree_skb(skb); |
| 1908 | return -EINVAL; |
| 1909 | } |
| 1910 | |
| 1911 | if (unicast) |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 1912 | err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); |
| 1913 | else |
| 1914 | err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, |
| 1915 | n->nlmsg_flags & NLM_F_ECHO); |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1916 | |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 1917 | if (err > 0) |
| 1918 | err = 0; |
| 1919 | return err; |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1920 | } |
| 1921 | |
| 1922 | static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, |
| 1923 | struct nlmsghdr *n, struct tcf_proto *tp, |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1924 | struct tcf_block *block, struct Qdisc *q, |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1925 | u32 parent, void *fh, bool unicast, bool *last, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1926 | bool rtnl_held, struct netlink_ext_ack *extack) |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1927 | { |
| 1928 | struct sk_buff *skb; |
| 1929 | u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; |
| 1930 | int err; |
| 1931 | |
| 1932 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
| 1933 | if (!skb) |
| 1934 | return -ENOBUFS; |
| 1935 | |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1936 | if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1937 | n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, |
| 1938 | rtnl_held) <= 0) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1939 | NL_SET_ERR_MSG(extack, "Failed to build del event notification"); |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1940 | kfree_skb(skb); |
| 1941 | return -EINVAL; |
| 1942 | } |
| 1943 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1944 | err = tp->ops->delete(tp, fh, last, rtnl_held, extack); |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1945 | if (err) { |
| 1946 | kfree_skb(skb); |
| 1947 | return err; |
| 1948 | } |
| 1949 | |
| 1950 | if (unicast) |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 1951 | err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); |
| 1952 | else |
| 1953 | err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, |
| 1954 | n->nlmsg_flags & NLM_F_ECHO); |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1955 | if (err < 0) |
| 1956 | NL_SET_ERR_MSG(extack, "Failed to send filter delete notification"); |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 1957 | |
| 1958 | if (err > 0) |
| 1959 | err = 0; |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1960 | return err; |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1961 | } |
| 1962 | |
| 1963 | static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1964 | struct tcf_block *block, struct Qdisc *q, |
| 1965 | u32 parent, struct nlmsghdr *n, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1966 | struct tcf_chain *chain, int event, |
| 1967 | bool rtnl_held) |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1968 | { |
| 1969 | struct tcf_proto *tp; |
| 1970 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1971 | for (tp = tcf_get_next_proto(chain, NULL, rtnl_held); |
| 1972 | tp; tp = tcf_get_next_proto(chain, tp, rtnl_held)) |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1973 | tfilter_notify(net, oskb, n, tp, block, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1974 | q, parent, NULL, event, false, rtnl_held); |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1975 | } |
| 1976 | |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 1977 | static void tfilter_put(struct tcf_proto *tp, void *fh) |
| 1978 | { |
| 1979 | if (tp->ops->put && fh) |
| 1980 | tp->ops->put(tp, fh); |
| 1981 | } |
| 1982 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1983 | static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, |
David Ahern | c21ef3e | 2017-04-16 09:48:24 -0700 | [diff] [blame] | 1984 | struct netlink_ext_ack *extack) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1985 | { |
YOSHIFUJI Hideaki | 3b1e0a6 | 2008-03-26 02:26:21 +0900 | [diff] [blame] | 1986 | struct net *net = sock_net(skb->sk); |
Patrick McHardy | add93b6 | 2008-01-22 22:11:33 -0800 | [diff] [blame] | 1987 | struct nlattr *tca[TCA_MAX + 1]; |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 1988 | char name[IFNAMSIZ]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1989 | struct tcmsg *t; |
| 1990 | u32 protocol; |
| 1991 | u32 prio; |
Jiri Pirko | 9d36d9e | 2017-05-17 11:07:57 +0200 | [diff] [blame] | 1992 | bool prio_allocate; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1993 | u32 parent; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 1994 | u32 chain_index; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1995 | struct Qdisc *q = NULL; |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1996 | struct tcf_chain_info chain_info; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 1997 | struct tcf_chain *chain = NULL; |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 1998 | struct tcf_block *block; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1999 | struct tcf_proto *tp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2000 | unsigned long cl; |
WANG Cong | 8113c09 | 2017-08-04 21:31:43 -0700 | [diff] [blame] | 2001 | void *fh; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2002 | int err; |
Daniel Borkmann | 628185c | 2016-12-21 18:04:11 +0100 | [diff] [blame] | 2003 | int tp_created; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2004 | bool rtnl_held = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2005 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2006 | if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) |
Eric W. Biederman | dfc47ef | 2012-11-16 03:03:00 +0000 | [diff] [blame] | 2007 | return -EPERM; |
Hong zhi guo | de179c8 | 2013-03-25 17:36:33 +0000 | [diff] [blame] | 2008 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2009 | replay: |
Daniel Borkmann | 628185c | 2016-12-21 18:04:11 +0100 | [diff] [blame] | 2010 | tp_created = 0; |
| 2011 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 2012 | err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, |
| 2013 | rtm_tca_policy, extack); |
Hong zhi guo | de179c8 | 2013-03-25 17:36:33 +0000 | [diff] [blame] | 2014 | if (err < 0) |
| 2015 | return err; |
| 2016 | |
David S. Miller | 942b816 | 2012-06-26 21:48:50 -0700 | [diff] [blame] | 2017 | t = nlmsg_data(n); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2018 | protocol = TC_H_MIN(t->tcm_info); |
| 2019 | prio = TC_H_MAJ(t->tcm_info); |
Jiri Pirko | 9d36d9e | 2017-05-17 11:07:57 +0200 | [diff] [blame] | 2020 | prio_allocate = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2021 | parent = t->tcm_parent; |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2022 | tp = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2023 | cl = 0; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2024 | block = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2025 | |
| 2026 | if (prio == 0) { |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2027 | /* If no priority is provided by the user, |
| 2028 | * we allocate one. |
| 2029 | */ |
| 2030 | if (n->nlmsg_flags & NLM_F_CREATE) { |
| 2031 | prio = TC_H_MAKE(0x80000000U, 0U); |
| 2032 | prio_allocate = true; |
| 2033 | } else { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 2034 | NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2035 | return -ENOENT; |
Daniel Borkmann | ea7f827 | 2016-06-10 23:10:22 +0200 | [diff] [blame] | 2036 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2037 | } |
| 2038 | |
| 2039 | /* Find head of filter chain. */ |
| 2040 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2041 | err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); |
| 2042 | if (err) |
| 2043 | return err; |
| 2044 | |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 2045 | if (tcf_proto_check_kind(tca[TCA_KIND], name)) { |
| 2046 | NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); |
| 2047 | err = -EINVAL; |
| 2048 | goto errout; |
| 2049 | } |
| 2050 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2051 | /* Take rtnl mutex if rtnl_held was set to true on previous iteration, |
| 2052 | * block is shared (no qdisc found), qdisc is not unlocked, classifier |
| 2053 | * type is not specified, classifier is not unlocked. |
| 2054 | */ |
| 2055 | if (rtnl_held || |
| 2056 | (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 2057 | !tcf_proto_is_unlocked(name)) { |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2058 | rtnl_held = true; |
| 2059 | rtnl_lock(); |
| 2060 | } |
| 2061 | |
| 2062 | err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); |
| 2063 | if (err) |
| 2064 | goto errout; |
| 2065 | |
| 2066 | block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, |
| 2067 | extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2068 | if (IS_ERR(block)) { |
| 2069 | err = PTR_ERR(block); |
| 2070 | goto errout; |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2071 | } |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2072 | |
| 2073 | chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; |
| 2074 | if (chain_index > TC_ACT_EXT_VAL_MASK) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 2075 | NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2076 | err = -EINVAL; |
| 2077 | goto errout; |
| 2078 | } |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2079 | chain = tcf_chain_get(block, chain_index, true); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2080 | if (!chain) { |
Jiri Pirko | d5ed72a | 2018-08-27 20:58:43 +0200 | [diff] [blame] | 2081 | NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2082 | err = -ENOMEM; |
Daniel Borkmann | ea7f827 | 2016-06-10 23:10:22 +0200 | [diff] [blame] | 2083 | goto errout; |
| 2084 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2085 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2086 | mutex_lock(&chain->filter_chain_lock); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 2087 | tp = tcf_chain_tp_find(chain, &chain_info, protocol, |
| 2088 | prio, prio_allocate); |
| 2089 | if (IS_ERR(tp)) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 2090 | NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 2091 | err = PTR_ERR(tp); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2092 | goto errout_locked; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2093 | } |
| 2094 | |
| 2095 | if (tp == NULL) { |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 2096 | struct tcf_proto *tp_new = NULL; |
| 2097 | |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 2098 | if (chain->flushing) { |
| 2099 | err = -EAGAIN; |
| 2100 | goto errout_locked; |
| 2101 | } |
| 2102 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2103 | /* Proto-tcf does not exist, create new one */ |
| 2104 | |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2105 | if (tca[TCA_KIND] == NULL || !protocol) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 2106 | NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified"); |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2107 | err = -EINVAL; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2108 | goto errout_locked; |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2109 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2110 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2111 | if (!(n->nlmsg_flags & NLM_F_CREATE)) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 2112 | NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2113 | err = -ENOENT; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2114 | goto errout_locked; |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2115 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2116 | |
Jiri Pirko | 9d36d9e | 2017-05-17 11:07:57 +0200 | [diff] [blame] | 2117 | if (prio_allocate) |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2118 | prio = tcf_auto_prio(tcf_chain_tp_prev(chain, |
| 2119 | &chain_info)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2120 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2121 | mutex_unlock(&chain->filter_chain_lock); |
Eric Dumazet | 36d79af | 2020-01-21 11:02:20 -0800 | [diff] [blame] | 2122 | tp_new = tcf_proto_create(name, protocol, prio, chain, |
| 2123 | rtnl_held, extack); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 2124 | if (IS_ERR(tp_new)) { |
| 2125 | err = PTR_ERR(tp_new); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 2126 | goto errout_tp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2127 | } |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2128 | |
Minoru Usui | 12186be | 2009-06-02 02:17:34 -0700 | [diff] [blame] | 2129 | tp_created = 1; |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2130 | tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio, |
| 2131 | rtnl_held); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 2132 | if (IS_ERR(tp)) { |
| 2133 | err = PTR_ERR(tp); |
| 2134 | goto errout_tp; |
| 2135 | } |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2136 | } else { |
| 2137 | mutex_unlock(&chain->filter_chain_lock); |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2138 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2139 | |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 2140 | if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { |
| 2141 | NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); |
| 2142 | err = -EINVAL; |
| 2143 | goto errout; |
| 2144 | } |
| 2145 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2146 | fh = tp->ops->get(tp, t->tcm_handle); |
| 2147 | |
WANG Cong | 8113c09 | 2017-08-04 21:31:43 -0700 | [diff] [blame] | 2148 | if (!fh) { |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2149 | if (!(n->nlmsg_flags & NLM_F_CREATE)) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 2150 | NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2151 | err = -ENOENT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2152 | goto errout; |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 2153 | } |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2154 | } else if (n->nlmsg_flags & NLM_F_EXCL) { |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 2155 | tfilter_put(tp, fh); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2156 | NL_SET_ERR_MSG(extack, "Filter already exists"); |
| 2157 | err = -EEXIST; |
| 2158 | goto errout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2159 | } |
| 2160 | |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2161 | if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { |
| 2162 | NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); |
| 2163 | err = -EINVAL; |
| 2164 | goto errout; |
| 2165 | } |
| 2166 | |
Cong Wang | 2f7ef2f | 2014-04-25 13:54:06 -0700 | [diff] [blame] | 2167 | err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, |
Alexander Aring | 7306db3 | 2018-01-18 11:20:51 -0500 | [diff] [blame] | 2168 | n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2169 | rtnl_held, extack); |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 2170 | if (err == 0) { |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2171 | tfilter_notify(net, skb, n, tp, block, q, parent, fh, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2172 | RTM_NEWTFILTER, false, rtnl_held); |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 2173 | tfilter_put(tp, fh); |
Vlad Buslov | 503d81d | 2019-07-21 17:44:12 +0300 | [diff] [blame] | 2174 | /* q pointer is NULL for shared blocks */ |
| 2175 | if (q) |
| 2176 | q->flags &= ~TCQ_F_CAN_BYPASS; |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 2177 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2178 | |
| 2179 | errout: |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 2180 | if (err && tp_created) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2181 | tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 2182 | errout_tp: |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2183 | if (chain) { |
| 2184 | if (tp && !IS_ERR(tp)) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2185 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2186 | if (!tp_created) |
| 2187 | tcf_chain_put(chain); |
| 2188 | } |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2189 | tcf_block_release(q, block, rtnl_held); |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2190 | |
| 2191 | if (rtnl_held) |
| 2192 | rtnl_unlock(); |
| 2193 | |
| 2194 | if (err == -EAGAIN) { |
| 2195 | /* Take rtnl lock in case EAGAIN is caused by concurrent flush |
| 2196 | * of target chain. |
| 2197 | */ |
| 2198 | rtnl_held = true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2199 | /* Replay the request. */ |
| 2200 | goto replay; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2201 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2202 | return err; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2203 | |
| 2204 | errout_locked: |
| 2205 | mutex_unlock(&chain->filter_chain_lock); |
| 2206 | goto errout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2207 | } |
| 2208 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2209 | static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, |
| 2210 | struct netlink_ext_ack *extack) |
| 2211 | { |
| 2212 | struct net *net = sock_net(skb->sk); |
| 2213 | struct nlattr *tca[TCA_MAX + 1]; |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 2214 | char name[IFNAMSIZ]; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2215 | struct tcmsg *t; |
| 2216 | u32 protocol; |
| 2217 | u32 prio; |
| 2218 | u32 parent; |
| 2219 | u32 chain_index; |
| 2220 | struct Qdisc *q = NULL; |
| 2221 | struct tcf_chain_info chain_info; |
| 2222 | struct tcf_chain *chain = NULL; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2223 | struct tcf_block *block = NULL; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2224 | struct tcf_proto *tp = NULL; |
| 2225 | unsigned long cl = 0; |
| 2226 | void *fh = NULL; |
| 2227 | int err; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2228 | bool rtnl_held = false; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2229 | |
| 2230 | if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) |
| 2231 | return -EPERM; |
| 2232 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 2233 | err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, |
| 2234 | rtm_tca_policy, extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2235 | if (err < 0) |
| 2236 | return err; |
| 2237 | |
| 2238 | t = nlmsg_data(n); |
| 2239 | protocol = TC_H_MIN(t->tcm_info); |
| 2240 | prio = TC_H_MAJ(t->tcm_info); |
| 2241 | parent = t->tcm_parent; |
| 2242 | |
| 2243 | if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) { |
| 2244 | NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set"); |
| 2245 | return -ENOENT; |
| 2246 | } |
| 2247 | |
| 2248 | /* Find head of filter chain. */ |
| 2249 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2250 | err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); |
| 2251 | if (err) |
| 2252 | return err; |
| 2253 | |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 2254 | if (tcf_proto_check_kind(tca[TCA_KIND], name)) { |
| 2255 | NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); |
| 2256 | err = -EINVAL; |
| 2257 | goto errout; |
| 2258 | } |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2259 | /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc |
| 2260 | * found), qdisc is not unlocked, classifier type is not specified, |
| 2261 | * classifier is not unlocked. |
| 2262 | */ |
| 2263 | if (!prio || |
| 2264 | (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 2265 | !tcf_proto_is_unlocked(name)) { |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2266 | rtnl_held = true; |
| 2267 | rtnl_lock(); |
| 2268 | } |
| 2269 | |
| 2270 | err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); |
| 2271 | if (err) |
| 2272 | goto errout; |
| 2273 | |
| 2274 | block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, |
| 2275 | extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2276 | if (IS_ERR(block)) { |
| 2277 | err = PTR_ERR(block); |
| 2278 | goto errout; |
| 2279 | } |
| 2280 | |
| 2281 | chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; |
| 2282 | if (chain_index > TC_ACT_EXT_VAL_MASK) { |
| 2283 | NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); |
| 2284 | err = -EINVAL; |
| 2285 | goto errout; |
| 2286 | } |
| 2287 | chain = tcf_chain_get(block, chain_index, false); |
| 2288 | if (!chain) { |
Jiri Pirko | 5ca8a25 | 2018-08-03 11:08:47 +0200 | [diff] [blame] | 2289 | /* User requested flush on non-existent chain. Nothing to do, |
| 2290 | * so just return success. |
| 2291 | */ |
| 2292 | if (prio == 0) { |
| 2293 | err = 0; |
| 2294 | goto errout; |
| 2295 | } |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2296 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); |
Jiri Pirko | b7b4247 | 2018-08-27 20:58:44 +0200 | [diff] [blame] | 2297 | err = -ENOENT; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2298 | goto errout; |
| 2299 | } |
| 2300 | |
| 2301 | if (prio == 0) { |
| 2302 | tfilter_notify_chain(net, skb, block, q, parent, n, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2303 | chain, RTM_DELTFILTER, rtnl_held); |
| 2304 | tcf_chain_flush(chain, rtnl_held); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2305 | err = 0; |
| 2306 | goto errout; |
| 2307 | } |
| 2308 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2309 | mutex_lock(&chain->filter_chain_lock); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2310 | tp = tcf_chain_tp_find(chain, &chain_info, protocol, |
| 2311 | prio, false); |
| 2312 | if (!tp || IS_ERR(tp)) { |
| 2313 | NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); |
Vlad Buslov | 0e39903 | 2018-06-04 18:32:23 +0300 | [diff] [blame] | 2314 | err = tp ? PTR_ERR(tp) : -ENOENT; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2315 | goto errout_locked; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2316 | } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { |
| 2317 | NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); |
| 2318 | err = -EINVAL; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2319 | goto errout_locked; |
| 2320 | } else if (t->tcm_handle == 0) { |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 2321 | tcf_proto_signal_destroying(chain, tp); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2322 | tcf_chain_tp_remove(chain, &chain_info, tp); |
| 2323 | mutex_unlock(&chain->filter_chain_lock); |
| 2324 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2325 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2326 | tfilter_notify(net, skb, n, tp, block, q, parent, fh, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2327 | RTM_DELTFILTER, false, rtnl_held); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2328 | err = 0; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2329 | goto errout; |
| 2330 | } |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2331 | mutex_unlock(&chain->filter_chain_lock); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2332 | |
| 2333 | fh = tp->ops->get(tp, t->tcm_handle); |
| 2334 | |
| 2335 | if (!fh) { |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2336 | NL_SET_ERR_MSG(extack, "Specified filter handle not found"); |
| 2337 | err = -ENOENT; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2338 | } else { |
| 2339 | bool last; |
| 2340 | |
| 2341 | err = tfilter_del_notify(net, skb, n, tp, block, |
| 2342 | q, parent, fh, false, &last, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2343 | rtnl_held, extack); |
| 2344 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2345 | if (err) |
| 2346 | goto errout; |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 2347 | if (last) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2348 | tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2349 | } |
| 2350 | |
| 2351 | errout: |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2352 | if (chain) { |
| 2353 | if (tp && !IS_ERR(tp)) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2354 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2355 | tcf_chain_put(chain); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2356 | } |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2357 | tcf_block_release(q, block, rtnl_held); |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2358 | |
| 2359 | if (rtnl_held) |
| 2360 | rtnl_unlock(); |
| 2361 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2362 | return err; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2363 | |
| 2364 | errout_locked: |
| 2365 | mutex_unlock(&chain->filter_chain_lock); |
| 2366 | goto errout; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2367 | } |
| 2368 | |
| 2369 | static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, |
| 2370 | struct netlink_ext_ack *extack) |
| 2371 | { |
| 2372 | struct net *net = sock_net(skb->sk); |
| 2373 | struct nlattr *tca[TCA_MAX + 1]; |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 2374 | char name[IFNAMSIZ]; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2375 | struct tcmsg *t; |
| 2376 | u32 protocol; |
| 2377 | u32 prio; |
| 2378 | u32 parent; |
| 2379 | u32 chain_index; |
| 2380 | struct Qdisc *q = NULL; |
| 2381 | struct tcf_chain_info chain_info; |
| 2382 | struct tcf_chain *chain = NULL; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2383 | struct tcf_block *block = NULL; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2384 | struct tcf_proto *tp = NULL; |
| 2385 | unsigned long cl = 0; |
| 2386 | void *fh = NULL; |
| 2387 | int err; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2388 | bool rtnl_held = false; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2389 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 2390 | err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, |
| 2391 | rtm_tca_policy, extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2392 | if (err < 0) |
| 2393 | return err; |
| 2394 | |
| 2395 | t = nlmsg_data(n); |
| 2396 | protocol = TC_H_MIN(t->tcm_info); |
| 2397 | prio = TC_H_MAJ(t->tcm_info); |
| 2398 | parent = t->tcm_parent; |
| 2399 | |
| 2400 | if (prio == 0) { |
| 2401 | NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); |
| 2402 | return -ENOENT; |
| 2403 | } |
| 2404 | |
| 2405 | /* Find head of filter chain. */ |
| 2406 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2407 | err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); |
| 2408 | if (err) |
| 2409 | return err; |
| 2410 | |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 2411 | if (tcf_proto_check_kind(tca[TCA_KIND], name)) { |
| 2412 | NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); |
| 2413 | err = -EINVAL; |
| 2414 | goto errout; |
| 2415 | } |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2416 | /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not |
| 2417 | * unlocked, classifier type is not specified, classifier is not |
| 2418 | * unlocked. |
| 2419 | */ |
| 2420 | if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || |
Cong Wang | 6f96c3c | 2019-10-07 13:26:28 -0700 | [diff] [blame] | 2421 | !tcf_proto_is_unlocked(name)) { |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2422 | rtnl_held = true; |
| 2423 | rtnl_lock(); |
| 2424 | } |
| 2425 | |
| 2426 | err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); |
| 2427 | if (err) |
| 2428 | goto errout; |
| 2429 | |
| 2430 | block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, |
| 2431 | extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2432 | if (IS_ERR(block)) { |
| 2433 | err = PTR_ERR(block); |
| 2434 | goto errout; |
| 2435 | } |
| 2436 | |
| 2437 | chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; |
| 2438 | if (chain_index > TC_ACT_EXT_VAL_MASK) { |
| 2439 | NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); |
| 2440 | err = -EINVAL; |
| 2441 | goto errout; |
| 2442 | } |
| 2443 | chain = tcf_chain_get(block, chain_index, false); |
| 2444 | if (!chain) { |
| 2445 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); |
| 2446 | err = -EINVAL; |
| 2447 | goto errout; |
| 2448 | } |
| 2449 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2450 | mutex_lock(&chain->filter_chain_lock); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2451 | tp = tcf_chain_tp_find(chain, &chain_info, protocol, |
| 2452 | prio, false); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2453 | mutex_unlock(&chain->filter_chain_lock); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2454 | if (!tp || IS_ERR(tp)) { |
| 2455 | NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); |
Vlad Buslov | 0e39903 | 2018-06-04 18:32:23 +0300 | [diff] [blame] | 2456 | err = tp ? PTR_ERR(tp) : -ENOENT; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2457 | goto errout; |
| 2458 | } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { |
| 2459 | NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); |
| 2460 | err = -EINVAL; |
| 2461 | goto errout; |
| 2462 | } |
| 2463 | |
| 2464 | fh = tp->ops->get(tp, t->tcm_handle); |
| 2465 | |
| 2466 | if (!fh) { |
| 2467 | NL_SET_ERR_MSG(extack, "Specified filter handle not found"); |
| 2468 | err = -ENOENT; |
| 2469 | } else { |
| 2470 | err = tfilter_notify(net, skb, n, tp, block, q, parent, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2471 | fh, RTM_NEWTFILTER, true, rtnl_held); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2472 | if (err < 0) |
| 2473 | NL_SET_ERR_MSG(extack, "Failed to send filter notify message"); |
| 2474 | } |
| 2475 | |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 2476 | tfilter_put(tp, fh); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2477 | errout: |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2478 | if (chain) { |
| 2479 | if (tp && !IS_ERR(tp)) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2480 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2481 | tcf_chain_put(chain); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2482 | } |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2483 | tcf_block_release(q, block, rtnl_held); |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2484 | |
| 2485 | if (rtnl_held) |
| 2486 | rtnl_unlock(); |
| 2487 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2488 | return err; |
| 2489 | } |
| 2490 | |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 2491 | struct tcf_dump_args { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2492 | struct tcf_walker w; |
| 2493 | struct sk_buff *skb; |
| 2494 | struct netlink_callback *cb; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2495 | struct tcf_block *block; |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2496 | struct Qdisc *q; |
| 2497 | u32 parent; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2498 | }; |
| 2499 | |
WANG Cong | 8113c09 | 2017-08-04 21:31:43 -0700 | [diff] [blame] | 2500 | static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2501 | { |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 2502 | struct tcf_dump_args *a = (void *)arg; |
WANG Cong | 832d1d5 | 2014-01-09 16:14:01 -0800 | [diff] [blame] | 2503 | struct net *net = sock_net(a->skb->sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2504 | |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2505 | return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2506 | n, NETLINK_CB(a->cb->skb).portid, |
Jamal Hadi Salim | 5a7a555 | 2016-09-18 08:45:33 -0400 | [diff] [blame] | 2507 | a->cb->nlh->nlmsg_seq, NLM_F_MULTI, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2508 | RTM_NEWTFILTER, true); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2509 | } |
| 2510 | |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2511 | static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, |
| 2512 | struct sk_buff *skb, struct netlink_callback *cb, |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2513 | long index_start, long *p_index) |
| 2514 | { |
| 2515 | struct net *net = sock_net(skb->sk); |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2516 | struct tcf_block *block = chain->block; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2517 | struct tcmsg *tcm = nlmsg_data(cb->nlh); |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2518 | struct tcf_proto *tp, *tp_prev; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2519 | struct tcf_dump_args arg; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2520 | |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2521 | for (tp = __tcf_get_next_proto(chain, NULL); |
| 2522 | tp; |
| 2523 | tp_prev = tp, |
| 2524 | tp = __tcf_get_next_proto(chain, tp), |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2525 | tcf_proto_put(tp_prev, true, NULL), |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2526 | (*p_index)++) { |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2527 | if (*p_index < index_start) |
| 2528 | continue; |
| 2529 | if (TC_H_MAJ(tcm->tcm_info) && |
| 2530 | TC_H_MAJ(tcm->tcm_info) != tp->prio) |
| 2531 | continue; |
| 2532 | if (TC_H_MIN(tcm->tcm_info) && |
| 2533 | TC_H_MIN(tcm->tcm_info) != tp->protocol) |
| 2534 | continue; |
| 2535 | if (*p_index > index_start) |
| 2536 | memset(&cb->args[1], 0, |
| 2537 | sizeof(cb->args) - sizeof(cb->args[0])); |
| 2538 | if (cb->args[1] == 0) { |
YueHaibing | 5318918 | 2018-07-17 20:58:14 +0800 | [diff] [blame] | 2539 | if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2540 | NETLINK_CB(cb->skb).portid, |
| 2541 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2542 | RTM_NEWTFILTER, true) <= 0) |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2543 | goto errout; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2544 | cb->args[1] = 1; |
| 2545 | } |
| 2546 | if (!tp->ops->walk) |
| 2547 | continue; |
| 2548 | arg.w.fn = tcf_node_dump; |
| 2549 | arg.skb = skb; |
| 2550 | arg.cb = cb; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2551 | arg.block = block; |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2552 | arg.q = q; |
| 2553 | arg.parent = parent; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2554 | arg.w.stop = 0; |
| 2555 | arg.w.skip = cb->args[1] - 1; |
| 2556 | arg.w.count = 0; |
Vlad Buslov | 01683a1 | 2018-07-09 13:29:11 +0300 | [diff] [blame] | 2557 | arg.w.cookie = cb->args[2]; |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2558 | tp->ops->walk(tp, &arg.w, true); |
Vlad Buslov | 01683a1 | 2018-07-09 13:29:11 +0300 | [diff] [blame] | 2559 | cb->args[2] = arg.w.cookie; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2560 | cb->args[1] = arg.w.count + 1; |
| 2561 | if (arg.w.stop) |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2562 | goto errout; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2563 | } |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2564 | return true; |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2565 | |
| 2566 | errout: |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2567 | tcf_proto_put(tp, true, NULL); |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2568 | return false; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2569 | } |
| 2570 | |
Eric Dumazet | bd27a87 | 2009-11-05 20:57:26 -0800 | [diff] [blame] | 2571 | /* called with RTNL */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2572 | static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) |
| 2573 | { |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 2574 | struct tcf_chain *chain, *chain_prev; |
YOSHIFUJI Hideaki | 3b1e0a6 | 2008-03-26 02:26:21 +0900 | [diff] [blame] | 2575 | struct net *net = sock_net(skb->sk); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2576 | struct nlattr *tca[TCA_MAX + 1]; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2577 | struct Qdisc *q = NULL; |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 2578 | struct tcf_block *block; |
David S. Miller | 942b816 | 2012-06-26 21:48:50 -0700 | [diff] [blame] | 2579 | struct tcmsg *tcm = nlmsg_data(cb->nlh); |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2580 | long index_start; |
| 2581 | long index; |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2582 | u32 parent; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2583 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2584 | |
Hong zhi guo | 573ce26 | 2013-03-27 06:47:04 +0000 | [diff] [blame] | 2585 | if (nlmsg_len(cb->nlh) < sizeof(*tcm)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2586 | return skb->len; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2587 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 2588 | err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, |
| 2589 | NULL, cb->extack); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2590 | if (err) |
| 2591 | return err; |
| 2592 | |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2593 | if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 2594 | block = tcf_block_refcnt_get(net, tcm->tcm_block_index); |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2595 | if (!block) |
WANG Cong | 143976c | 2017-08-24 16:51:29 -0700 | [diff] [blame] | 2596 | goto out; |
Jiri Pirko | d680b35 | 2018-01-18 16:14:49 +0100 | [diff] [blame] | 2597 | /* If we work with block index, q is NULL and parent value |
| 2598 | * will never be used in the following code. The check |
| 2599 | * in tcf_fill_node prevents it. However, compiler does not |
| 2600 | * see that far, so set parent to zero to silence the warning |
| 2601 | * about parent being uninitialized. |
| 2602 | */ |
| 2603 | parent = 0; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2604 | } else { |
| 2605 | const struct Qdisc_class_ops *cops; |
| 2606 | struct net_device *dev; |
| 2607 | unsigned long cl = 0; |
| 2608 | |
| 2609 | dev = __dev_get_by_index(net, tcm->tcm_ifindex); |
| 2610 | if (!dev) |
| 2611 | return skb->len; |
| 2612 | |
| 2613 | parent = tcm->tcm_parent; |
| 2614 | if (!parent) { |
| 2615 | q = dev->qdisc; |
| 2616 | parent = q->handle; |
| 2617 | } else { |
| 2618 | q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); |
| 2619 | } |
| 2620 | if (!q) |
| 2621 | goto out; |
| 2622 | cops = q->ops->cl_ops; |
| 2623 | if (!cops) |
| 2624 | goto out; |
| 2625 | if (!cops->tcf_block) |
| 2626 | goto out; |
| 2627 | if (TC_H_MIN(tcm->tcm_parent)) { |
| 2628 | cl = cops->find(q, tcm->tcm_parent); |
| 2629 | if (cl == 0) |
| 2630 | goto out; |
| 2631 | } |
| 2632 | block = cops->tcf_block(q, cl, NULL); |
| 2633 | if (!block) |
| 2634 | goto out; |
| 2635 | if (tcf_block_shared(block)) |
| 2636 | q = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2637 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2638 | |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2639 | index_start = cb->args[0]; |
| 2640 | index = 0; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2641 | |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 2642 | for (chain = __tcf_get_next_chain(block, NULL); |
| 2643 | chain; |
| 2644 | chain_prev = chain, |
| 2645 | chain = __tcf_get_next_chain(block, chain), |
| 2646 | tcf_chain_put(chain_prev)) { |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2647 | if (tca[TCA_CHAIN] && |
| 2648 | nla_get_u32(tca[TCA_CHAIN]) != chain->index) |
| 2649 | continue; |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2650 | if (!tcf_chain_dump(chain, q, parent, skb, cb, |
Roman Kapl | 5ae437a | 2018-02-19 21:32:51 +0100 | [diff] [blame] | 2651 | index_start, &index)) { |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 2652 | tcf_chain_put(chain); |
Roman Kapl | 5ae437a | 2018-02-19 21:32:51 +0100 | [diff] [blame] | 2653 | err = -EMSGSIZE; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2654 | break; |
Roman Kapl | 5ae437a | 2018-02-19 21:32:51 +0100 | [diff] [blame] | 2655 | } |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2656 | } |
| 2657 | |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 2658 | if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2659 | tcf_block_refcnt_put(block, true); |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2660 | cb->args[0] = index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2661 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2662 | out: |
Roman Kapl | 5ae437a | 2018-02-19 21:32:51 +0100 | [diff] [blame] | 2663 | /* If we did no progress, the error (EMSGSIZE) is real */ |
| 2664 | if (skb->len == 0 && err) |
| 2665 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2666 | return skb->len; |
| 2667 | } |
| 2668 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2669 | static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, |
| 2670 | void *tmplt_priv, u32 chain_index, |
| 2671 | struct net *net, struct sk_buff *skb, |
| 2672 | struct tcf_block *block, |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2673 | u32 portid, u32 seq, u16 flags, int event) |
| 2674 | { |
| 2675 | unsigned char *b = skb_tail_pointer(skb); |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2676 | const struct tcf_proto_ops *ops; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2677 | struct nlmsghdr *nlh; |
| 2678 | struct tcmsg *tcm; |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2679 | void *priv; |
| 2680 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2681 | ops = tmplt_ops; |
| 2682 | priv = tmplt_priv; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2683 | |
| 2684 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); |
| 2685 | if (!nlh) |
| 2686 | goto out_nlmsg_trim; |
| 2687 | tcm = nlmsg_data(nlh); |
| 2688 | tcm->tcm_family = AF_UNSPEC; |
| 2689 | tcm->tcm__pad1 = 0; |
| 2690 | tcm->tcm__pad2 = 0; |
| 2691 | tcm->tcm_handle = 0; |
| 2692 | if (block->q) { |
| 2693 | tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; |
| 2694 | tcm->tcm_parent = block->q->handle; |
| 2695 | } else { |
| 2696 | tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; |
| 2697 | tcm->tcm_block_index = block->index; |
| 2698 | } |
| 2699 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2700 | if (nla_put_u32(skb, TCA_CHAIN, chain_index)) |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2701 | goto nla_put_failure; |
| 2702 | |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2703 | if (ops) { |
| 2704 | if (nla_put_string(skb, TCA_KIND, ops->kind)) |
| 2705 | goto nla_put_failure; |
| 2706 | if (ops->tmplt_dump(skb, net, priv) < 0) |
| 2707 | goto nla_put_failure; |
| 2708 | } |
| 2709 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2710 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; |
| 2711 | return skb->len; |
| 2712 | |
| 2713 | out_nlmsg_trim: |
| 2714 | nla_put_failure: |
| 2715 | nlmsg_trim(skb, b); |
| 2716 | return -EMSGSIZE; |
| 2717 | } |
| 2718 | |
| 2719 | static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, |
| 2720 | u32 seq, u16 flags, int event, bool unicast) |
| 2721 | { |
| 2722 | u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; |
| 2723 | struct tcf_block *block = chain->block; |
| 2724 | struct net *net = block->net; |
| 2725 | struct sk_buff *skb; |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 2726 | int err = 0; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2727 | |
| 2728 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
| 2729 | if (!skb) |
| 2730 | return -ENOBUFS; |
| 2731 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2732 | if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, |
| 2733 | chain->index, net, skb, block, portid, |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2734 | seq, flags, event) <= 0) { |
| 2735 | kfree_skb(skb); |
| 2736 | return -EINVAL; |
| 2737 | } |
| 2738 | |
| 2739 | if (unicast) |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 2740 | err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); |
| 2741 | else |
| 2742 | err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, |
| 2743 | flags & NLM_F_ECHO); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2744 | |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 2745 | if (err > 0) |
| 2746 | err = 0; |
| 2747 | return err; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2748 | } |
| 2749 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2750 | static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, |
| 2751 | void *tmplt_priv, u32 chain_index, |
| 2752 | struct tcf_block *block, struct sk_buff *oskb, |
| 2753 | u32 seq, u16 flags, bool unicast) |
| 2754 | { |
| 2755 | u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; |
| 2756 | struct net *net = block->net; |
| 2757 | struct sk_buff *skb; |
| 2758 | |
| 2759 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
| 2760 | if (!skb) |
| 2761 | return -ENOBUFS; |
| 2762 | |
| 2763 | if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb, |
| 2764 | block, portid, seq, flags, RTM_DELCHAIN) <= 0) { |
| 2765 | kfree_skb(skb); |
| 2766 | return -EINVAL; |
| 2767 | } |
| 2768 | |
| 2769 | if (unicast) |
| 2770 | return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); |
| 2771 | |
| 2772 | return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); |
| 2773 | } |
| 2774 | |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2775 | static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, |
| 2776 | struct nlattr **tca, |
| 2777 | struct netlink_ext_ack *extack) |
| 2778 | { |
| 2779 | const struct tcf_proto_ops *ops; |
Eric Dumazet | 2dd5616 | 2019-12-07 11:34:45 -0800 | [diff] [blame] | 2780 | char name[IFNAMSIZ]; |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2781 | void *tmplt_priv; |
| 2782 | |
| 2783 | /* If kind is not set, user did not specify template. */ |
| 2784 | if (!tca[TCA_KIND]) |
| 2785 | return 0; |
| 2786 | |
Eric Dumazet | 2dd5616 | 2019-12-07 11:34:45 -0800 | [diff] [blame] | 2787 | if (tcf_proto_check_kind(tca[TCA_KIND], name)) { |
| 2788 | NL_SET_ERR_MSG(extack, "Specified TC chain template name too long"); |
| 2789 | return -EINVAL; |
| 2790 | } |
| 2791 | |
| 2792 | ops = tcf_proto_lookup_ops(name, true, extack); |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2793 | if (IS_ERR(ops)) |
| 2794 | return PTR_ERR(ops); |
| 2795 | if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { |
| 2796 | NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); |
| 2797 | return -EOPNOTSUPP; |
| 2798 | } |
| 2799 | |
| 2800 | tmplt_priv = ops->tmplt_create(net, chain, tca, extack); |
| 2801 | if (IS_ERR(tmplt_priv)) { |
| 2802 | module_put(ops->owner); |
| 2803 | return PTR_ERR(tmplt_priv); |
| 2804 | } |
| 2805 | chain->tmplt_ops = ops; |
| 2806 | chain->tmplt_priv = tmplt_priv; |
| 2807 | return 0; |
| 2808 | } |
| 2809 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2810 | static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, |
| 2811 | void *tmplt_priv) |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2812 | { |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2813 | /* If template ops are set, no work to do for us. */ |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2814 | if (!tmplt_ops) |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2815 | return; |
| 2816 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2817 | tmplt_ops->tmplt_destroy(tmplt_priv); |
| 2818 | module_put(tmplt_ops->owner); |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2819 | } |
| 2820 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2821 | /* Add/delete/get a chain */ |
| 2822 | |
| 2823 | static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, |
| 2824 | struct netlink_ext_ack *extack) |
| 2825 | { |
| 2826 | struct net *net = sock_net(skb->sk); |
| 2827 | struct nlattr *tca[TCA_MAX + 1]; |
| 2828 | struct tcmsg *t; |
| 2829 | u32 parent; |
| 2830 | u32 chain_index; |
| 2831 | struct Qdisc *q = NULL; |
| 2832 | struct tcf_chain *chain = NULL; |
| 2833 | struct tcf_block *block; |
| 2834 | unsigned long cl; |
| 2835 | int err; |
| 2836 | |
| 2837 | if (n->nlmsg_type != RTM_GETCHAIN && |
| 2838 | !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) |
| 2839 | return -EPERM; |
| 2840 | |
| 2841 | replay: |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 2842 | err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, |
| 2843 | rtm_tca_policy, extack); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2844 | if (err < 0) |
| 2845 | return err; |
| 2846 | |
| 2847 | t = nlmsg_data(n); |
| 2848 | parent = t->tcm_parent; |
| 2849 | cl = 0; |
| 2850 | |
| 2851 | block = tcf_block_find(net, &q, &parent, &cl, |
| 2852 | t->tcm_ifindex, t->tcm_block_index, extack); |
| 2853 | if (IS_ERR(block)) |
| 2854 | return PTR_ERR(block); |
| 2855 | |
| 2856 | chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; |
| 2857 | if (chain_index > TC_ACT_EXT_VAL_MASK) { |
| 2858 | NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2859 | err = -EINVAL; |
| 2860 | goto errout_block; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2861 | } |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2862 | |
| 2863 | mutex_lock(&block->lock); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2864 | chain = tcf_chain_lookup(block, chain_index); |
| 2865 | if (n->nlmsg_type == RTM_NEWCHAIN) { |
| 2866 | if (chain) { |
Jiri Pirko | 3d32f4c | 2018-08-01 12:36:55 +0200 | [diff] [blame] | 2867 | if (tcf_chain_held_by_acts_only(chain)) { |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 2868 | /* The chain exists only because there is |
Jiri Pirko | 3d32f4c | 2018-08-01 12:36:55 +0200 | [diff] [blame] | 2869 | * some action referencing it. |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 2870 | */ |
| 2871 | tcf_chain_hold(chain); |
| 2872 | } else { |
| 2873 | NL_SET_ERR_MSG(extack, "Filter chain already exists"); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2874 | err = -EEXIST; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2875 | goto errout_block_locked; |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 2876 | } |
| 2877 | } else { |
| 2878 | if (!(n->nlmsg_flags & NLM_F_CREATE)) { |
| 2879 | NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2880 | err = -ENOENT; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2881 | goto errout_block_locked; |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 2882 | } |
| 2883 | chain = tcf_chain_create(block, chain_index); |
| 2884 | if (!chain) { |
| 2885 | NL_SET_ERR_MSG(extack, "Failed to create filter chain"); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2886 | err = -ENOMEM; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2887 | goto errout_block_locked; |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 2888 | } |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2889 | } |
| 2890 | } else { |
Jiri Pirko | 3d32f4c | 2018-08-01 12:36:55 +0200 | [diff] [blame] | 2891 | if (!chain || tcf_chain_held_by_acts_only(chain)) { |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2892 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2893 | err = -EINVAL; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2894 | goto errout_block_locked; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2895 | } |
| 2896 | tcf_chain_hold(chain); |
| 2897 | } |
| 2898 | |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2899 | if (n->nlmsg_type == RTM_NEWCHAIN) { |
| 2900 | /* Modifying chain requires holding parent block lock. In case |
| 2901 | * the chain was successfully added, take a reference to the |
| 2902 | * chain. This ensures that an empty chain does not disappear at |
| 2903 | * the end of this function. |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2904 | */ |
| 2905 | tcf_chain_hold(chain); |
| 2906 | chain->explicitly_created = true; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2907 | } |
| 2908 | mutex_unlock(&block->lock); |
| 2909 | |
| 2910 | switch (n->nlmsg_type) { |
| 2911 | case RTM_NEWCHAIN: |
| 2912 | err = tc_chain_tmplt_add(chain, net, tca, extack); |
| 2913 | if (err) { |
| 2914 | tcf_chain_put_explicitly_created(chain); |
| 2915 | goto errout; |
| 2916 | } |
| 2917 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2918 | tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, |
| 2919 | RTM_NEWCHAIN, false); |
| 2920 | break; |
| 2921 | case RTM_DELCHAIN: |
Cong Wang | f5b9bac | 2018-09-11 14:22:23 -0700 | [diff] [blame] | 2922 | tfilter_notify_chain(net, skb, block, q, parent, n, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2923 | chain, RTM_DELTFILTER, true); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2924 | /* Flush the chain first as the user requested chain removal. */ |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2925 | tcf_chain_flush(chain, true); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2926 | /* In case the chain was successfully deleted, put a reference |
| 2927 | * to the chain previously taken during addition. |
| 2928 | */ |
| 2929 | tcf_chain_put_explicitly_created(chain); |
| 2930 | break; |
| 2931 | case RTM_GETCHAIN: |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2932 | err = tc_chain_notify(chain, skb, n->nlmsg_seq, |
| 2933 | n->nlmsg_seq, n->nlmsg_type, true); |
| 2934 | if (err < 0) |
| 2935 | NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); |
| 2936 | break; |
| 2937 | default: |
| 2938 | err = -EOPNOTSUPP; |
| 2939 | NL_SET_ERR_MSG(extack, "Unsupported message type"); |
| 2940 | goto errout; |
| 2941 | } |
| 2942 | |
| 2943 | errout: |
| 2944 | tcf_chain_put(chain); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2945 | errout_block: |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2946 | tcf_block_release(q, block, true); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2947 | if (err == -EAGAIN) |
| 2948 | /* Replay the request. */ |
| 2949 | goto replay; |
| 2950 | return err; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2951 | |
| 2952 | errout_block_locked: |
| 2953 | mutex_unlock(&block->lock); |
| 2954 | goto errout_block; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2955 | } |
| 2956 | |
| 2957 | /* called with RTNL */ |
| 2958 | static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) |
| 2959 | { |
| 2960 | struct net *net = sock_net(skb->sk); |
| 2961 | struct nlattr *tca[TCA_MAX + 1]; |
| 2962 | struct Qdisc *q = NULL; |
| 2963 | struct tcf_block *block; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2964 | struct tcmsg *tcm = nlmsg_data(cb->nlh); |
Vlad Buslov | ace4a26 | 2019-02-25 17:45:44 +0200 | [diff] [blame] | 2965 | struct tcf_chain *chain; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2966 | long index_start; |
| 2967 | long index; |
| 2968 | u32 parent; |
| 2969 | int err; |
| 2970 | |
| 2971 | if (nlmsg_len(cb->nlh) < sizeof(*tcm)) |
| 2972 | return skb->len; |
| 2973 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 2974 | err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, |
| 2975 | rtm_tca_policy, cb->extack); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2976 | if (err) |
| 2977 | return err; |
| 2978 | |
| 2979 | if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 2980 | block = tcf_block_refcnt_get(net, tcm->tcm_block_index); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2981 | if (!block) |
| 2982 | goto out; |
| 2983 | /* If we work with block index, q is NULL and parent value |
| 2984 | * will never be used in the following code. The check |
| 2985 | * in tcf_fill_node prevents it. However, compiler does not |
| 2986 | * see that far, so set parent to zero to silence the warning |
| 2987 | * about parent being uninitialized. |
| 2988 | */ |
| 2989 | parent = 0; |
| 2990 | } else { |
| 2991 | const struct Qdisc_class_ops *cops; |
| 2992 | struct net_device *dev; |
| 2993 | unsigned long cl = 0; |
| 2994 | |
| 2995 | dev = __dev_get_by_index(net, tcm->tcm_ifindex); |
| 2996 | if (!dev) |
| 2997 | return skb->len; |
| 2998 | |
| 2999 | parent = tcm->tcm_parent; |
| 3000 | if (!parent) { |
| 3001 | q = dev->qdisc; |
| 3002 | parent = q->handle; |
| 3003 | } else { |
| 3004 | q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); |
| 3005 | } |
| 3006 | if (!q) |
| 3007 | goto out; |
| 3008 | cops = q->ops->cl_ops; |
| 3009 | if (!cops) |
| 3010 | goto out; |
| 3011 | if (!cops->tcf_block) |
| 3012 | goto out; |
| 3013 | if (TC_H_MIN(tcm->tcm_parent)) { |
| 3014 | cl = cops->find(q, tcm->tcm_parent); |
| 3015 | if (cl == 0) |
| 3016 | goto out; |
| 3017 | } |
| 3018 | block = cops->tcf_block(q, cl, NULL); |
| 3019 | if (!block) |
| 3020 | goto out; |
| 3021 | if (tcf_block_shared(block)) |
| 3022 | q = NULL; |
| 3023 | } |
| 3024 | |
| 3025 | index_start = cb->args[0]; |
| 3026 | index = 0; |
| 3027 | |
Vlad Buslov | ace4a26 | 2019-02-25 17:45:44 +0200 | [diff] [blame] | 3028 | mutex_lock(&block->lock); |
| 3029 | list_for_each_entry(chain, &block->chain_list, list) { |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 3030 | if ((tca[TCA_CHAIN] && |
| 3031 | nla_get_u32(tca[TCA_CHAIN]) != chain->index)) |
| 3032 | continue; |
| 3033 | if (index < index_start) { |
| 3034 | index++; |
| 3035 | continue; |
| 3036 | } |
Vlad Buslov | ace4a26 | 2019-02-25 17:45:44 +0200 | [diff] [blame] | 3037 | if (tcf_chain_held_by_acts_only(chain)) |
| 3038 | continue; |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 3039 | err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, |
| 3040 | chain->index, net, skb, block, |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 3041 | NETLINK_CB(cb->skb).portid, |
| 3042 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
| 3043 | RTM_NEWCHAIN); |
Vlad Buslov | ace4a26 | 2019-02-25 17:45:44 +0200 | [diff] [blame] | 3044 | if (err <= 0) |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 3045 | break; |
| 3046 | index++; |
| 3047 | } |
Vlad Buslov | ace4a26 | 2019-02-25 17:45:44 +0200 | [diff] [blame] | 3048 | mutex_unlock(&block->lock); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 3049 | |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 3050 | if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 3051 | tcf_block_refcnt_put(block, true); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 3052 | cb->args[0] = index; |
| 3053 | |
| 3054 | out: |
| 3055 | /* If we did no progress, the error (EMSGSIZE) is real */ |
| 3056 | if (skb->len == 0 && err) |
| 3057 | return err; |
| 3058 | return skb->len; |
| 3059 | } |
| 3060 | |
WANG Cong | 18d0264 | 2014-09-25 10:26:37 -0700 | [diff] [blame] | 3061 | void tcf_exts_destroy(struct tcf_exts *exts) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3062 | { |
| 3063 | #ifdef CONFIG_NET_CLS_ACT |
Eric Dumazet | 3d66b89 | 2019-09-18 12:57:04 -0700 | [diff] [blame] | 3064 | if (exts->actions) { |
| 3065 | tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); |
| 3066 | kfree(exts->actions); |
| 3067 | } |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 3068 | exts->nr_actions = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3069 | #endif |
| 3070 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 3071 | EXPORT_SYMBOL(tcf_exts_destroy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3072 | |
Benjamin LaHaise | c1b5273 | 2013-01-14 05:15:39 +0000 | [diff] [blame] | 3073 | int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, |
Alexander Aring | 50a5619 | 2018-01-18 11:20:52 -0500 | [diff] [blame] | 3074 | struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr, |
Vlad Buslov | ec6743a | 2019-02-11 10:55:43 +0200 | [diff] [blame] | 3075 | bool rtnl_held, struct netlink_ext_ack *extack) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3076 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3077 | #ifdef CONFIG_NET_CLS_ACT |
| 3078 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3079 | struct tc_action *act; |
Roman Mashak | d04e699 | 2018-03-08 16:59:17 -0500 | [diff] [blame] | 3080 | size_t attr_size = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3081 | |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 3082 | if (exts->police && tb[exts->police]) { |
Jiri Pirko | 9fb9f25 | 2017-05-17 11:08:02 +0200 | [diff] [blame] | 3083 | act = tcf_action_init_1(net, tp, tb[exts->police], |
| 3084 | rate_tlv, "police", ovr, |
Vlad Buslov | ec6743a | 2019-02-11 10:55:43 +0200 | [diff] [blame] | 3085 | TCA_ACT_BIND, rtnl_held, |
| 3086 | extack); |
Patrick McHardy | ab27cfb | 2008-01-23 20:33:13 -0800 | [diff] [blame] | 3087 | if (IS_ERR(act)) |
| 3088 | return PTR_ERR(act); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3089 | |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3090 | act->type = exts->type = TCA_OLD_COMPAT; |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 3091 | exts->actions[0] = act; |
| 3092 | exts->nr_actions = 1; |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 3093 | } else if (exts->action && tb[exts->action]) { |
Vlad Buslov | 90b73b7 | 2018-07-05 17:24:33 +0300 | [diff] [blame] | 3094 | int err; |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 3095 | |
Jiri Pirko | 9fb9f25 | 2017-05-17 11:08:02 +0200 | [diff] [blame] | 3096 | err = tcf_action_init(net, tp, tb[exts->action], |
| 3097 | rate_tlv, NULL, ovr, TCA_ACT_BIND, |
Vlad Buslov | ec6743a | 2019-02-11 10:55:43 +0200 | [diff] [blame] | 3098 | exts->actions, &attr_size, |
| 3099 | rtnl_held, extack); |
Vlad Buslov | 90b73b7 | 2018-07-05 17:24:33 +0300 | [diff] [blame] | 3100 | if (err < 0) |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3101 | return err; |
Vlad Buslov | 90b73b7 | 2018-07-05 17:24:33 +0300 | [diff] [blame] | 3102 | exts->nr_actions = err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3103 | } |
| 3104 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3105 | #else |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 3106 | if ((exts->action && tb[exts->action]) || |
Alexander Aring | 50a5619 | 2018-01-18 11:20:52 -0500 | [diff] [blame] | 3107 | (exts->police && tb[exts->police])) { |
| 3108 | NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3109 | return -EOPNOTSUPP; |
Alexander Aring | 50a5619 | 2018-01-18 11:20:52 -0500 | [diff] [blame] | 3110 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3111 | #endif |
| 3112 | |
| 3113 | return 0; |
| 3114 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 3115 | EXPORT_SYMBOL(tcf_exts_validate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3116 | |
Jiri Pirko | 9b0d444 | 2017-08-04 14:29:15 +0200 | [diff] [blame] | 3117 | void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3118 | { |
| 3119 | #ifdef CONFIG_NET_CLS_ACT |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 3120 | struct tcf_exts old = *dst; |
| 3121 | |
Jiri Pirko | 9b0d444 | 2017-08-04 14:29:15 +0200 | [diff] [blame] | 3122 | *dst = *src; |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 3123 | tcf_exts_destroy(&old); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3124 | #endif |
| 3125 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 3126 | EXPORT_SYMBOL(tcf_exts_change); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3127 | |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 3128 | #ifdef CONFIG_NET_CLS_ACT |
| 3129 | static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) |
| 3130 | { |
| 3131 | if (exts->nr_actions == 0) |
| 3132 | return NULL; |
| 3133 | else |
| 3134 | return exts->actions[0]; |
| 3135 | } |
| 3136 | #endif |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3137 | |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 3138 | int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3139 | { |
| 3140 | #ifdef CONFIG_NET_CLS_ACT |
Cong Wang | 9cc63db | 2014-07-16 14:25:30 -0700 | [diff] [blame] | 3141 | struct nlattr *nest; |
| 3142 | |
Jiri Pirko | 978dfd8 | 2017-08-04 14:29:03 +0200 | [diff] [blame] | 3143 | if (exts->action && tcf_exts_has_actions(exts)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3144 | /* |
| 3145 | * again for backward compatible mode - we want |
| 3146 | * to work with both old and new modes of entering |
| 3147 | * tc data even if iproute2 was newer - jhs |
| 3148 | */ |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3149 | if (exts->type != TCA_OLD_COMPAT) { |
Michal Kubecek | ae0be8d | 2019-04-26 11:13:06 +0200 | [diff] [blame] | 3150 | nest = nla_nest_start_noflag(skb, exts->action); |
Patrick McHardy | 4b3550ef | 2008-01-23 20:34:11 -0800 | [diff] [blame] | 3151 | if (nest == NULL) |
| 3152 | goto nla_put_failure; |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 3153 | |
Vlad Buslov | 90b73b7 | 2018-07-05 17:24:33 +0300 | [diff] [blame] | 3154 | if (tcf_action_dump(skb, exts->actions, 0, 0) < 0) |
Patrick McHardy | add93b6 | 2008-01-22 22:11:33 -0800 | [diff] [blame] | 3155 | goto nla_put_failure; |
Patrick McHardy | 4b3550ef | 2008-01-23 20:34:11 -0800 | [diff] [blame] | 3156 | nla_nest_end(skb, nest); |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 3157 | } else if (exts->police) { |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3158 | struct tc_action *act = tcf_exts_first_act(exts); |
Michal Kubecek | ae0be8d | 2019-04-26 11:13:06 +0200 | [diff] [blame] | 3159 | nest = nla_nest_start_noflag(skb, exts->police); |
Jamal Hadi Salim | 63acd68 | 2013-12-23 08:02:12 -0500 | [diff] [blame] | 3160 | if (nest == NULL || !act) |
Patrick McHardy | 4b3550ef | 2008-01-23 20:34:11 -0800 | [diff] [blame] | 3161 | goto nla_put_failure; |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3162 | if (tcf_action_dump_old(skb, act, 0, 0) < 0) |
Patrick McHardy | add93b6 | 2008-01-22 22:11:33 -0800 | [diff] [blame] | 3163 | goto nla_put_failure; |
Patrick McHardy | 4b3550ef | 2008-01-23 20:34:11 -0800 | [diff] [blame] | 3164 | nla_nest_end(skb, nest); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3165 | } |
| 3166 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3167 | return 0; |
Cong Wang | 9cc63db | 2014-07-16 14:25:30 -0700 | [diff] [blame] | 3168 | |
| 3169 | nla_put_failure: |
| 3170 | nla_nest_cancel(skb, nest); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3171 | return -1; |
Cong Wang | 9cc63db | 2014-07-16 14:25:30 -0700 | [diff] [blame] | 3172 | #else |
| 3173 | return 0; |
| 3174 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3175 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 3176 | EXPORT_SYMBOL(tcf_exts_dump); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3177 | |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 3178 | |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 3179 | int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3180 | { |
| 3181 | #ifdef CONFIG_NET_CLS_ACT |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3182 | struct tc_action *a = tcf_exts_first_act(exts); |
Ignacy Gawędzki | b057df2 | 2015-02-03 19:05:18 +0100 | [diff] [blame] | 3183 | if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3184 | return -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3185 | #endif |
| 3186 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3187 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 3188 | EXPORT_SYMBOL(tcf_exts_dump_stats); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3189 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3190 | static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) |
| 3191 | { |
| 3192 | if (*flags & TCA_CLS_FLAGS_IN_HW) |
| 3193 | return; |
| 3194 | *flags |= TCA_CLS_FLAGS_IN_HW; |
| 3195 | atomic_inc(&block->offloadcnt); |
| 3196 | } |
| 3197 | |
| 3198 | static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) |
| 3199 | { |
| 3200 | if (!(*flags & TCA_CLS_FLAGS_IN_HW)) |
| 3201 | return; |
| 3202 | *flags &= ~TCA_CLS_FLAGS_IN_HW; |
| 3203 | atomic_dec(&block->offloadcnt); |
| 3204 | } |
| 3205 | |
| 3206 | static void tc_cls_offload_cnt_update(struct tcf_block *block, |
| 3207 | struct tcf_proto *tp, u32 *cnt, |
| 3208 | u32 *flags, u32 diff, bool add) |
| 3209 | { |
| 3210 | lockdep_assert_held(&block->cb_lock); |
| 3211 | |
| 3212 | spin_lock(&tp->lock); |
| 3213 | if (add) { |
| 3214 | if (!*cnt) |
| 3215 | tcf_block_offload_inc(block, flags); |
| 3216 | *cnt += diff; |
| 3217 | } else { |
| 3218 | *cnt -= diff; |
| 3219 | if (!*cnt) |
| 3220 | tcf_block_offload_dec(block, flags); |
| 3221 | } |
| 3222 | spin_unlock(&tp->lock); |
| 3223 | } |
| 3224 | |
| 3225 | static void |
| 3226 | tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, |
| 3227 | u32 *cnt, u32 *flags) |
| 3228 | { |
| 3229 | lockdep_assert_held(&block->cb_lock); |
| 3230 | |
| 3231 | spin_lock(&tp->lock); |
| 3232 | tcf_block_offload_dec(block, flags); |
| 3233 | *cnt = 0; |
| 3234 | spin_unlock(&tp->lock); |
| 3235 | } |
| 3236 | |
| 3237 | static int |
| 3238 | __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, |
| 3239 | void *type_data, bool err_stop) |
Jiri Pirko | 717503b | 2017-10-11 09:41:09 +0200 | [diff] [blame] | 3240 | { |
Pablo Neira Ayuso | 955bcb6 | 2019-07-09 22:55:46 +0200 | [diff] [blame] | 3241 | struct flow_block_cb *block_cb; |
Cong Wang | aeb3fec | 2018-12-11 11:15:46 -0800 | [diff] [blame] | 3242 | int ok_count = 0; |
| 3243 | int err; |
| 3244 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3245 | list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { |
| 3246 | err = block_cb->cb(type, type_data, block_cb->cb_priv); |
| 3247 | if (err) { |
| 3248 | if (err_stop) |
| 3249 | return err; |
| 3250 | } else { |
| 3251 | ok_count++; |
| 3252 | } |
| 3253 | } |
| 3254 | return ok_count; |
| 3255 | } |
| 3256 | |
| 3257 | int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, |
| 3258 | void *type_data, bool err_stop, bool rtnl_held) |
| 3259 | { |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3260 | bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3261 | int ok_count; |
| 3262 | |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3263 | retry: |
| 3264 | if (take_rtnl) |
| 3265 | rtnl_lock(); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3266 | down_read(&block->cb_lock); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3267 | /* Need to obtain rtnl lock if block is bound to devs that require it. |
| 3268 | * In block bind code cb_lock is obtained while holding rtnl, so we must |
| 3269 | * obtain the locks in same order here. |
| 3270 | */ |
| 3271 | if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { |
| 3272 | up_read(&block->cb_lock); |
| 3273 | take_rtnl = true; |
| 3274 | goto retry; |
| 3275 | } |
| 3276 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3277 | ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3278 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3279 | up_read(&block->cb_lock); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3280 | if (take_rtnl) |
| 3281 | rtnl_unlock(); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3282 | return ok_count; |
| 3283 | } |
| 3284 | EXPORT_SYMBOL(tc_setup_cb_call); |
| 3285 | |
| 3286 | /* Non-destructive filter add. If filter that wasn't already in hardware is |
| 3287 | * successfully offloaded, increment block offloads counter. On failure, |
| 3288 | * previously offloaded filter is considered to be intact and offloads counter |
| 3289 | * is not decremented. |
| 3290 | */ |
| 3291 | |
| 3292 | int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, |
| 3293 | enum tc_setup_type type, void *type_data, bool err_stop, |
| 3294 | u32 *flags, unsigned int *in_hw_count, bool rtnl_held) |
| 3295 | { |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3296 | bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3297 | int ok_count; |
| 3298 | |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3299 | retry: |
| 3300 | if (take_rtnl) |
| 3301 | rtnl_lock(); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 3302 | down_read(&block->cb_lock); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3303 | /* Need to obtain rtnl lock if block is bound to devs that require it. |
| 3304 | * In block bind code cb_lock is obtained while holding rtnl, so we must |
| 3305 | * obtain the locks in same order here. |
| 3306 | */ |
| 3307 | if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { |
| 3308 | up_read(&block->cb_lock); |
| 3309 | take_rtnl = true; |
| 3310 | goto retry; |
| 3311 | } |
| 3312 | |
Cong Wang | aeb3fec | 2018-12-11 11:15:46 -0800 | [diff] [blame] | 3313 | /* Make sure all netdevs sharing this block are offload-capable. */ |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 3314 | if (block->nooffloaddevcnt && err_stop) { |
| 3315 | ok_count = -EOPNOTSUPP; |
| 3316 | goto err_unlock; |
| 3317 | } |
Cong Wang | aeb3fec | 2018-12-11 11:15:46 -0800 | [diff] [blame] | 3318 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3319 | ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); |
Vlad Buslov | a449a3e | 2019-08-26 16:45:00 +0300 | [diff] [blame] | 3320 | if (ok_count < 0) |
| 3321 | goto err_unlock; |
| 3322 | |
| 3323 | if (tp->ops->hw_add) |
| 3324 | tp->ops->hw_add(tp, type_data); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3325 | if (ok_count > 0) |
| 3326 | tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, |
| 3327 | ok_count, true); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 3328 | err_unlock: |
| 3329 | up_read(&block->cb_lock); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3330 | if (take_rtnl) |
| 3331 | rtnl_unlock(); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3332 | return ok_count < 0 ? ok_count : 0; |
Jiri Pirko | 717503b | 2017-10-11 09:41:09 +0200 | [diff] [blame] | 3333 | } |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3334 | EXPORT_SYMBOL(tc_setup_cb_add); |
| 3335 | |
| 3336 | /* Destructive filter replace. If filter that wasn't already in hardware is |
| 3337 | * successfully offloaded, increment block offload counter. On failure, |
| 3338 | * previously offloaded filter is considered to be destroyed and offload counter |
| 3339 | * is decremented. |
| 3340 | */ |
| 3341 | |
| 3342 | int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, |
| 3343 | enum tc_setup_type type, void *type_data, bool err_stop, |
| 3344 | u32 *old_flags, unsigned int *old_in_hw_count, |
| 3345 | u32 *new_flags, unsigned int *new_in_hw_count, |
| 3346 | bool rtnl_held) |
| 3347 | { |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3348 | bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3349 | int ok_count; |
| 3350 | |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3351 | retry: |
| 3352 | if (take_rtnl) |
| 3353 | rtnl_lock(); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3354 | down_read(&block->cb_lock); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3355 | /* Need to obtain rtnl lock if block is bound to devs that require it. |
| 3356 | * In block bind code cb_lock is obtained while holding rtnl, so we must |
| 3357 | * obtain the locks in same order here. |
| 3358 | */ |
| 3359 | if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { |
| 3360 | up_read(&block->cb_lock); |
| 3361 | take_rtnl = true; |
| 3362 | goto retry; |
| 3363 | } |
| 3364 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3365 | /* Make sure all netdevs sharing this block are offload-capable. */ |
| 3366 | if (block->nooffloaddevcnt && err_stop) { |
| 3367 | ok_count = -EOPNOTSUPP; |
| 3368 | goto err_unlock; |
| 3369 | } |
| 3370 | |
| 3371 | tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); |
Vlad Buslov | a449a3e | 2019-08-26 16:45:00 +0300 | [diff] [blame] | 3372 | if (tp->ops->hw_del) |
| 3373 | tp->ops->hw_del(tp, type_data); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3374 | |
| 3375 | ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); |
Vlad Buslov | a449a3e | 2019-08-26 16:45:00 +0300 | [diff] [blame] | 3376 | if (ok_count < 0) |
| 3377 | goto err_unlock; |
| 3378 | |
| 3379 | if (tp->ops->hw_add) |
| 3380 | tp->ops->hw_add(tp, type_data); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3381 | if (ok_count > 0) |
Vlad Buslov | a449a3e | 2019-08-26 16:45:00 +0300 | [diff] [blame] | 3382 | tc_cls_offload_cnt_update(block, tp, new_in_hw_count, |
| 3383 | new_flags, ok_count, true); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3384 | err_unlock: |
| 3385 | up_read(&block->cb_lock); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3386 | if (take_rtnl) |
| 3387 | rtnl_unlock(); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3388 | return ok_count < 0 ? ok_count : 0; |
| 3389 | } |
| 3390 | EXPORT_SYMBOL(tc_setup_cb_replace); |
| 3391 | |
| 3392 | /* Destroy filter and decrement block offload counter, if filter was previously |
| 3393 | * offloaded. |
| 3394 | */ |
| 3395 | |
| 3396 | int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, |
| 3397 | enum tc_setup_type type, void *type_data, bool err_stop, |
| 3398 | u32 *flags, unsigned int *in_hw_count, bool rtnl_held) |
| 3399 | { |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3400 | bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3401 | int ok_count; |
| 3402 | |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3403 | retry: |
| 3404 | if (take_rtnl) |
| 3405 | rtnl_lock(); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3406 | down_read(&block->cb_lock); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3407 | /* Need to obtain rtnl lock if block is bound to devs that require it. |
| 3408 | * In block bind code cb_lock is obtained while holding rtnl, so we must |
| 3409 | * obtain the locks in same order here. |
| 3410 | */ |
| 3411 | if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { |
| 3412 | up_read(&block->cb_lock); |
| 3413 | take_rtnl = true; |
| 3414 | goto retry; |
| 3415 | } |
| 3416 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3417 | ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); |
| 3418 | |
| 3419 | tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); |
Vlad Buslov | a449a3e | 2019-08-26 16:45:00 +0300 | [diff] [blame] | 3420 | if (tp->ops->hw_del) |
| 3421 | tp->ops->hw_del(tp, type_data); |
| 3422 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3423 | up_read(&block->cb_lock); |
Vlad Buslov | 11bd634 | 2019-08-26 16:45:02 +0300 | [diff] [blame] | 3424 | if (take_rtnl) |
| 3425 | rtnl_unlock(); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3426 | return ok_count < 0 ? ok_count : 0; |
| 3427 | } |
| 3428 | EXPORT_SYMBOL(tc_setup_cb_destroy); |
| 3429 | |
| 3430 | int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, |
| 3431 | bool add, flow_setup_cb_t *cb, |
| 3432 | enum tc_setup_type type, void *type_data, |
| 3433 | void *cb_priv, u32 *flags, unsigned int *in_hw_count) |
| 3434 | { |
| 3435 | int err = cb(type, type_data, cb_priv); |
| 3436 | |
| 3437 | if (err) { |
| 3438 | if (add && tc_skip_sw(*flags)) |
| 3439 | return err; |
| 3440 | } else { |
| 3441 | tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, |
| 3442 | add); |
| 3443 | } |
| 3444 | |
| 3445 | return 0; |
| 3446 | } |
| 3447 | EXPORT_SYMBOL(tc_setup_cb_reoffload); |
Jiri Pirko | b3f55bd | 2017-10-11 09:41:08 +0200 | [diff] [blame] | 3448 | |
Jiri Pirko | 2008495 | 2020-02-25 11:45:18 +0100 | [diff] [blame] | 3449 | static int tcf_act_get_cookie(struct flow_action_entry *entry, |
| 3450 | const struct tc_action *act) |
| 3451 | { |
| 3452 | struct tc_cookie *cookie; |
| 3453 | int err = 0; |
| 3454 | |
| 3455 | rcu_read_lock(); |
| 3456 | cookie = rcu_dereference(act->act_cookie); |
| 3457 | if (cookie) { |
| 3458 | entry->cookie = flow_action_cookie_create(cookie->data, |
| 3459 | cookie->len, |
| 3460 | GFP_ATOMIC); |
| 3461 | if (!entry->cookie) |
| 3462 | err = -ENOMEM; |
| 3463 | } |
| 3464 | rcu_read_unlock(); |
| 3465 | return err; |
| 3466 | } |
| 3467 | |
| 3468 | static void tcf_act_put_cookie(struct flow_action_entry *entry) |
| 3469 | { |
| 3470 | flow_action_cookie_destroy(entry->cookie); |
| 3471 | } |
| 3472 | |
Vlad Buslov | 5a6ff4b | 2019-08-26 16:45:04 +0300 | [diff] [blame] | 3473 | void tc_cleanup_flow_action(struct flow_action *flow_action) |
| 3474 | { |
| 3475 | struct flow_action_entry *entry; |
| 3476 | int i; |
| 3477 | |
Jiri Pirko | 2008495 | 2020-02-25 11:45:18 +0100 | [diff] [blame] | 3478 | flow_action_for_each(i, entry, flow_action) { |
| 3479 | tcf_act_put_cookie(entry); |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3480 | if (entry->destructor) |
| 3481 | entry->destructor(entry->destructor_priv); |
Jiri Pirko | 2008495 | 2020-02-25 11:45:18 +0100 | [diff] [blame] | 3482 | } |
Vlad Buslov | 5a6ff4b | 2019-08-26 16:45:04 +0300 | [diff] [blame] | 3483 | } |
| 3484 | EXPORT_SYMBOL(tc_cleanup_flow_action); |
| 3485 | |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3486 | static void tcf_mirred_get_dev(struct flow_action_entry *entry, |
| 3487 | const struct tc_action *act) |
| 3488 | { |
Vlad Buslov | 470d506 | 2019-09-13 18:28:41 +0300 | [diff] [blame] | 3489 | #ifdef CONFIG_NET_CLS_ACT |
| 3490 | entry->dev = act->ops->get_dev(act, &entry->destructor); |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3491 | if (!entry->dev) |
| 3492 | return; |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3493 | entry->destructor_priv = entry->dev; |
Vlad Buslov | 470d506 | 2019-09-13 18:28:41 +0300 | [diff] [blame] | 3494 | #endif |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3495 | } |
| 3496 | |
| 3497 | static void tcf_tunnel_encap_put_tunnel(void *priv) |
| 3498 | { |
| 3499 | struct ip_tunnel_info *tunnel = priv; |
| 3500 | |
| 3501 | kfree(tunnel); |
| 3502 | } |
| 3503 | |
| 3504 | static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry, |
| 3505 | const struct tc_action *act) |
| 3506 | { |
| 3507 | entry->tunnel = tcf_tunnel_info_copy(act); |
| 3508 | if (!entry->tunnel) |
| 3509 | return -ENOMEM; |
| 3510 | entry->destructor = tcf_tunnel_encap_put_tunnel; |
| 3511 | entry->destructor_priv = entry->tunnel; |
| 3512 | return 0; |
| 3513 | } |
| 3514 | |
Vlad Buslov | 4a5da47 | 2019-09-13 18:28:40 +0300 | [diff] [blame] | 3515 | static void tcf_sample_get_group(struct flow_action_entry *entry, |
| 3516 | const struct tc_action *act) |
| 3517 | { |
| 3518 | #ifdef CONFIG_NET_CLS_ACT |
| 3519 | entry->sample.psample_group = |
| 3520 | act->ops->get_psample_group(act, &entry->destructor); |
| 3521 | entry->destructor_priv = entry->sample.psample_group; |
| 3522 | #endif |
| 3523 | } |
| 3524 | |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3525 | int tc_setup_flow_action(struct flow_action *flow_action, |
Vlad Buslov | b15e7a6 | 2020-02-17 12:12:12 +0200 | [diff] [blame] | 3526 | const struct tcf_exts *exts) |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3527 | { |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3528 | struct tc_action *act; |
Vlad Buslov | 9838b20 | 2019-08-26 16:45:03 +0300 | [diff] [blame] | 3529 | int i, j, k, err = 0; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3530 | |
Jakub Kicinski | 0dfb2d8 | 2020-03-19 16:26:23 -0700 | [diff] [blame^] | 3531 | BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); |
| 3532 | BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE); |
| 3533 | BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED); |
Jiri Pirko | 44f8658 | 2020-03-07 12:40:20 +0100 | [diff] [blame] | 3534 | |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3535 | if (!exts) |
| 3536 | return 0; |
| 3537 | |
| 3538 | j = 0; |
| 3539 | tcf_exts_for_each_action(i, act, exts) { |
| 3540 | struct flow_action_entry *entry; |
| 3541 | |
| 3542 | entry = &flow_action->entries[j]; |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3543 | spin_lock_bh(&act->tcfa_lock); |
Jiri Pirko | 2008495 | 2020-02-25 11:45:18 +0100 | [diff] [blame] | 3544 | err = tcf_act_get_cookie(entry, act); |
| 3545 | if (err) |
| 3546 | goto err_out_locked; |
Jiri Pirko | 44f8658 | 2020-03-07 12:40:20 +0100 | [diff] [blame] | 3547 | |
Jakub Kicinski | 0dfb2d8 | 2020-03-19 16:26:23 -0700 | [diff] [blame^] | 3548 | entry->hw_stats = act->hw_stats; |
Jiri Pirko | 44f8658 | 2020-03-07 12:40:20 +0100 | [diff] [blame] | 3549 | |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3550 | if (is_tcf_gact_ok(act)) { |
| 3551 | entry->id = FLOW_ACTION_ACCEPT; |
| 3552 | } else if (is_tcf_gact_shot(act)) { |
| 3553 | entry->id = FLOW_ACTION_DROP; |
| 3554 | } else if (is_tcf_gact_trap(act)) { |
| 3555 | entry->id = FLOW_ACTION_TRAP; |
| 3556 | } else if (is_tcf_gact_goto_chain(act)) { |
| 3557 | entry->id = FLOW_ACTION_GOTO; |
| 3558 | entry->chain_index = tcf_gact_goto_chain_index(act); |
| 3559 | } else if (is_tcf_mirred_egress_redirect(act)) { |
| 3560 | entry->id = FLOW_ACTION_REDIRECT; |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3561 | tcf_mirred_get_dev(entry, act); |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3562 | } else if (is_tcf_mirred_egress_mirror(act)) { |
| 3563 | entry->id = FLOW_ACTION_MIRRED; |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3564 | tcf_mirred_get_dev(entry, act); |
John Hurley | 48e584a | 2019-08-04 16:09:06 +0100 | [diff] [blame] | 3565 | } else if (is_tcf_mirred_ingress_redirect(act)) { |
| 3566 | entry->id = FLOW_ACTION_REDIRECT_INGRESS; |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3567 | tcf_mirred_get_dev(entry, act); |
John Hurley | 48e584a | 2019-08-04 16:09:06 +0100 | [diff] [blame] | 3568 | } else if (is_tcf_mirred_ingress_mirror(act)) { |
| 3569 | entry->id = FLOW_ACTION_MIRRED_INGRESS; |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3570 | tcf_mirred_get_dev(entry, act); |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3571 | } else if (is_tcf_vlan(act)) { |
| 3572 | switch (tcf_vlan_action(act)) { |
| 3573 | case TCA_VLAN_ACT_PUSH: |
| 3574 | entry->id = FLOW_ACTION_VLAN_PUSH; |
| 3575 | entry->vlan.vid = tcf_vlan_push_vid(act); |
| 3576 | entry->vlan.proto = tcf_vlan_push_proto(act); |
| 3577 | entry->vlan.prio = tcf_vlan_push_prio(act); |
| 3578 | break; |
| 3579 | case TCA_VLAN_ACT_POP: |
| 3580 | entry->id = FLOW_ACTION_VLAN_POP; |
| 3581 | break; |
| 3582 | case TCA_VLAN_ACT_MODIFY: |
| 3583 | entry->id = FLOW_ACTION_VLAN_MANGLE; |
| 3584 | entry->vlan.vid = tcf_vlan_push_vid(act); |
| 3585 | entry->vlan.proto = tcf_vlan_push_proto(act); |
| 3586 | entry->vlan.prio = tcf_vlan_push_prio(act); |
| 3587 | break; |
| 3588 | default: |
Vlad Buslov | 9838b20 | 2019-08-26 16:45:03 +0300 | [diff] [blame] | 3589 | err = -EOPNOTSUPP; |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3590 | goto err_out_locked; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3591 | } |
| 3592 | } else if (is_tcf_tunnel_set(act)) { |
| 3593 | entry->id = FLOW_ACTION_TUNNEL_ENCAP; |
Vlad Buslov | 1158958 | 2019-09-13 18:28:39 +0300 | [diff] [blame] | 3594 | err = tcf_tunnel_encap_get_tunnel(entry, act); |
| 3595 | if (err) |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3596 | goto err_out_locked; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3597 | } else if (is_tcf_tunnel_release(act)) { |
| 3598 | entry->id = FLOW_ACTION_TUNNEL_DECAP; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3599 | } else if (is_tcf_pedit(act)) { |
| 3600 | for (k = 0; k < tcf_pedit_nkeys(act); k++) { |
| 3601 | switch (tcf_pedit_cmd(act, k)) { |
| 3602 | case TCA_PEDIT_KEY_EX_CMD_SET: |
| 3603 | entry->id = FLOW_ACTION_MANGLE; |
| 3604 | break; |
| 3605 | case TCA_PEDIT_KEY_EX_CMD_ADD: |
| 3606 | entry->id = FLOW_ACTION_ADD; |
| 3607 | break; |
| 3608 | default: |
Vlad Buslov | 9838b20 | 2019-08-26 16:45:03 +0300 | [diff] [blame] | 3609 | err = -EOPNOTSUPP; |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3610 | goto err_out_locked; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3611 | } |
| 3612 | entry->mangle.htype = tcf_pedit_htype(act, k); |
| 3613 | entry->mangle.mask = tcf_pedit_mask(act, k); |
| 3614 | entry->mangle.val = tcf_pedit_val(act, k); |
| 3615 | entry->mangle.offset = tcf_pedit_offset(act, k); |
Jakub Kicinski | 0dfb2d8 | 2020-03-19 16:26:23 -0700 | [diff] [blame^] | 3616 | entry->hw_stats = act->hw_stats; |
Petr Machata | 2c4b58d | 2020-03-18 19:42:29 +0200 | [diff] [blame] | 3617 | entry = &flow_action->entries[++j]; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3618 | } |
| 3619 | } else if (is_tcf_csum(act)) { |
| 3620 | entry->id = FLOW_ACTION_CSUM; |
| 3621 | entry->csum_flags = tcf_csum_update_flags(act); |
| 3622 | } else if (is_tcf_skbedit_mark(act)) { |
| 3623 | entry->id = FLOW_ACTION_MARK; |
| 3624 | entry->mark = tcf_skbedit_mark(act); |
Pieter Jansen van Vuuren | a7a7be6 | 2019-05-04 04:46:16 -0700 | [diff] [blame] | 3625 | } else if (is_tcf_sample(act)) { |
| 3626 | entry->id = FLOW_ACTION_SAMPLE; |
Pieter Jansen van Vuuren | a7a7be6 | 2019-05-04 04:46:16 -0700 | [diff] [blame] | 3627 | entry->sample.trunc_size = tcf_sample_trunc_size(act); |
| 3628 | entry->sample.truncate = tcf_sample_truncate(act); |
| 3629 | entry->sample.rate = tcf_sample_rate(act); |
Vlad Buslov | 4a5da47 | 2019-09-13 18:28:40 +0300 | [diff] [blame] | 3630 | tcf_sample_get_group(entry, act); |
Pieter Jansen van Vuuren | 8c8cfc6 | 2019-05-04 04:46:22 -0700 | [diff] [blame] | 3631 | } else if (is_tcf_police(act)) { |
| 3632 | entry->id = FLOW_ACTION_POLICE; |
| 3633 | entry->police.burst = tcf_police_tcfp_burst(act); |
| 3634 | entry->police.rate_bytes_ps = |
| 3635 | tcf_police_rate_bytes_ps(act); |
Paul Blakey | b57dc7c | 2019-07-09 10:30:48 +0300 | [diff] [blame] | 3636 | } else if (is_tcf_ct(act)) { |
| 3637 | entry->id = FLOW_ACTION_CT; |
| 3638 | entry->ct.action = tcf_ct_action(act); |
| 3639 | entry->ct.zone = tcf_ct_zone(act); |
Paul Blakey | edd5861 | 2020-03-12 12:23:09 +0200 | [diff] [blame] | 3640 | entry->ct.flow_table = tcf_ct_ft(act); |
John Hurley | 6749d590 | 2019-07-23 15:33:59 +0100 | [diff] [blame] | 3641 | } else if (is_tcf_mpls(act)) { |
| 3642 | switch (tcf_mpls_action(act)) { |
| 3643 | case TCA_MPLS_ACT_PUSH: |
| 3644 | entry->id = FLOW_ACTION_MPLS_PUSH; |
| 3645 | entry->mpls_push.proto = tcf_mpls_proto(act); |
| 3646 | entry->mpls_push.label = tcf_mpls_label(act); |
| 3647 | entry->mpls_push.tc = tcf_mpls_tc(act); |
| 3648 | entry->mpls_push.bos = tcf_mpls_bos(act); |
| 3649 | entry->mpls_push.ttl = tcf_mpls_ttl(act); |
| 3650 | break; |
| 3651 | case TCA_MPLS_ACT_POP: |
| 3652 | entry->id = FLOW_ACTION_MPLS_POP; |
| 3653 | entry->mpls_pop.proto = tcf_mpls_proto(act); |
| 3654 | break; |
| 3655 | case TCA_MPLS_ACT_MODIFY: |
| 3656 | entry->id = FLOW_ACTION_MPLS_MANGLE; |
| 3657 | entry->mpls_mangle.label = tcf_mpls_label(act); |
| 3658 | entry->mpls_mangle.tc = tcf_mpls_tc(act); |
| 3659 | entry->mpls_mangle.bos = tcf_mpls_bos(act); |
| 3660 | entry->mpls_mangle.ttl = tcf_mpls_ttl(act); |
| 3661 | break; |
| 3662 | default: |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3663 | goto err_out_locked; |
John Hurley | 6749d590 | 2019-07-23 15:33:59 +0100 | [diff] [blame] | 3664 | } |
John Hurley | fb1b775 | 2019-08-04 16:09:04 +0100 | [diff] [blame] | 3665 | } else if (is_tcf_skbedit_ptype(act)) { |
| 3666 | entry->id = FLOW_ACTION_PTYPE; |
| 3667 | entry->ptype = tcf_skbedit_ptype(act); |
Petr Machata | 2ce1241 | 2020-03-19 15:47:21 +0200 | [diff] [blame] | 3668 | } else if (is_tcf_skbedit_priority(act)) { |
| 3669 | entry->id = FLOW_ACTION_PRIORITY; |
| 3670 | entry->priority = tcf_skbedit_priority(act); |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3671 | } else { |
Vlad Buslov | 9838b20 | 2019-08-26 16:45:03 +0300 | [diff] [blame] | 3672 | err = -EOPNOTSUPP; |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3673 | goto err_out_locked; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3674 | } |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3675 | spin_unlock_bh(&act->tcfa_lock); |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3676 | |
| 3677 | if (!is_tcf_pedit(act)) |
| 3678 | j++; |
| 3679 | } |
Vlad Buslov | 9838b20 | 2019-08-26 16:45:03 +0300 | [diff] [blame] | 3680 | |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3681 | err_out: |
Vlad Buslov | 5a6ff4b | 2019-08-26 16:45:04 +0300 | [diff] [blame] | 3682 | if (err) |
| 3683 | tc_cleanup_flow_action(flow_action); |
| 3684 | |
Vlad Buslov | 9838b20 | 2019-08-26 16:45:03 +0300 | [diff] [blame] | 3685 | return err; |
Vlad Buslov | 7a47281 | 2020-02-17 12:12:09 +0200 | [diff] [blame] | 3686 | err_out_locked: |
| 3687 | spin_unlock_bh(&act->tcfa_lock); |
| 3688 | goto err_out; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3689 | } |
| 3690 | EXPORT_SYMBOL(tc_setup_flow_action); |
| 3691 | |
Pablo Neira Ayuso | e3ab786 | 2019-02-02 12:50:45 +0100 | [diff] [blame] | 3692 | unsigned int tcf_exts_num_actions(struct tcf_exts *exts) |
| 3693 | { |
| 3694 | unsigned int num_acts = 0; |
| 3695 | struct tc_action *act; |
| 3696 | int i; |
| 3697 | |
| 3698 | tcf_exts_for_each_action(i, act, exts) { |
| 3699 | if (is_tcf_pedit(act)) |
| 3700 | num_acts += tcf_pedit_nkeys(act); |
| 3701 | else |
| 3702 | num_acts++; |
| 3703 | } |
| 3704 | return num_acts; |
| 3705 | } |
| 3706 | EXPORT_SYMBOL(tcf_exts_num_actions); |
| 3707 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 3708 | static __net_init int tcf_net_init(struct net *net) |
| 3709 | { |
| 3710 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
| 3711 | |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 3712 | spin_lock_init(&tn->idr_lock); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 3713 | idr_init(&tn->idr); |
| 3714 | return 0; |
| 3715 | } |
| 3716 | |
| 3717 | static void __net_exit tcf_net_exit(struct net *net) |
| 3718 | { |
| 3719 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
| 3720 | |
| 3721 | idr_destroy(&tn->idr); |
| 3722 | } |
| 3723 | |
| 3724 | static struct pernet_operations tcf_net_ops = { |
| 3725 | .init = tcf_net_init, |
| 3726 | .exit = tcf_net_exit, |
| 3727 | .id = &tcf_net_id, |
| 3728 | .size = sizeof(struct tcf_net), |
| 3729 | }; |
| 3730 | |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 3731 | static struct flow_indr_block_entry block_entry = { |
| 3732 | .cb = tc_indr_block_get_and_cmd, |
| 3733 | .list = LIST_HEAD_INIT(block_entry.list), |
wenxu | 1150ab0 | 2019-08-07 09:13:53 +0800 | [diff] [blame] | 3734 | }; |
| 3735 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3736 | static int __init tc_filter_init(void) |
| 3737 | { |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 3738 | int err; |
| 3739 | |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 3740 | tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); |
| 3741 | if (!tc_filter_wq) |
| 3742 | return -ENOMEM; |
| 3743 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 3744 | err = register_pernet_subsys(&tcf_net_ops); |
| 3745 | if (err) |
| 3746 | goto err_register_pernet_subsys; |
| 3747 | |
John Hurley | 25a443f | 2019-12-05 17:03:35 +0000 | [diff] [blame] | 3748 | flow_indr_add_block_cb(&block_entry); |
wenxu | 1150ab0 | 2019-08-07 09:13:53 +0800 | [diff] [blame] | 3749 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 3750 | rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, |
| 3751 | RTNL_FLAG_DOIT_UNLOCKED); |
| 3752 | rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, |
| 3753 | RTNL_FLAG_DOIT_UNLOCKED); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 3754 | rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 3755 | tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 3756 | rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0); |
| 3757 | rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0); |
| 3758 | rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain, |
| 3759 | tc_dump_chain, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3760 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3761 | return 0; |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 3762 | |
| 3763 | err_register_pernet_subsys: |
| 3764 | destroy_workqueue(tc_filter_wq); |
| 3765 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3766 | } |
| 3767 | |
| 3768 | subsys_initcall(tc_filter_init); |