Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * net/sched/cls_api.c Packet classifier API. |
| 4 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
| 6 | * |
| 7 | * Changes: |
| 8 | * |
| 9 | * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/module.h> |
| 13 | #include <linux/types.h> |
| 14 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/string.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/errno.h> |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 17 | #include <linux/err.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/skbuff.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/init.h> |
| 20 | #include <linux/kmod.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 21 | #include <linux/slab.h> |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 22 | #include <linux/idr.h> |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 23 | #include <linux/rhashtable.h> |
Denis V. Lunev | b854272 | 2007-12-01 00:21:31 +1100 | [diff] [blame] | 24 | #include <net/net_namespace.h> |
| 25 | #include <net/sock.h> |
Arnaldo Carvalho de Melo | dc5fc57 | 2007-03-25 23:06:12 -0700 | [diff] [blame] | 26 | #include <net/netlink.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <net/pkt_sched.h> |
| 28 | #include <net/pkt_cls.h> |
Pablo Neira Ayuso | e3ab786 | 2019-02-02 12:50:45 +0100 | [diff] [blame] | 29 | #include <net/tc_act/tc_pedit.h> |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 30 | #include <net/tc_act/tc_mirred.h> |
| 31 | #include <net/tc_act/tc_vlan.h> |
| 32 | #include <net/tc_act/tc_tunnel_key.h> |
| 33 | #include <net/tc_act/tc_csum.h> |
| 34 | #include <net/tc_act/tc_gact.h> |
Pieter Jansen van Vuuren | 8c8cfc6 | 2019-05-04 04:46:22 -0700 | [diff] [blame] | 35 | #include <net/tc_act/tc_police.h> |
Pieter Jansen van Vuuren | a7a7be6 | 2019-05-04 04:46:16 -0700 | [diff] [blame] | 36 | #include <net/tc_act/tc_sample.h> |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 37 | #include <net/tc_act/tc_skbedit.h> |
Paul Blakey | b57dc7c | 2019-07-09 10:30:48 +0300 | [diff] [blame] | 38 | #include <net/tc_act/tc_ct.h> |
John Hurley | 6749d590 | 2019-07-23 15:33:59 +0100 | [diff] [blame] | 39 | #include <net/tc_act/tc_mpls.h> |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 40 | #include <net/flow_offload.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Davide Caratti | e331473 | 2018-10-10 22:00:58 +0200 | [diff] [blame] | 42 | extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; |
| 43 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | /* The list of all installed classifier types */ |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 45 | static LIST_HEAD(tcf_proto_base); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | |
| 47 | /* Protects list of registered TC modules. It is pure SMP lock. */ |
| 48 | static DEFINE_RWLOCK(cls_mod_lock); |
| 49 | |
| 50 | /* Find classifier type by string name */ |
| 51 | |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 52 | static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | { |
Eric Dumazet | dcd7608 | 2013-12-20 10:04:18 -0800 | [diff] [blame] | 54 | const struct tcf_proto_ops *t, *res = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | |
| 56 | if (kind) { |
| 57 | read_lock(&cls_mod_lock); |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 58 | list_for_each_entry(t, &tcf_proto_base, head) { |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 59 | if (strcmp(kind, t->kind) == 0) { |
Eric Dumazet | dcd7608 | 2013-12-20 10:04:18 -0800 | [diff] [blame] | 60 | if (try_module_get(t->owner)) |
| 61 | res = t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | break; |
| 63 | } |
| 64 | } |
| 65 | read_unlock(&cls_mod_lock); |
| 66 | } |
Eric Dumazet | dcd7608 | 2013-12-20 10:04:18 -0800 | [diff] [blame] | 67 | return res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | } |
| 69 | |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 70 | static const struct tcf_proto_ops * |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 71 | tcf_proto_lookup_ops(const char *kind, bool rtnl_held, |
| 72 | struct netlink_ext_ack *extack) |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 73 | { |
| 74 | const struct tcf_proto_ops *ops; |
| 75 | |
| 76 | ops = __tcf_proto_lookup_ops(kind); |
| 77 | if (ops) |
| 78 | return ops; |
| 79 | #ifdef CONFIG_MODULES |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 80 | if (rtnl_held) |
| 81 | rtnl_unlock(); |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 82 | request_module("cls_%s", kind); |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 83 | if (rtnl_held) |
| 84 | rtnl_lock(); |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 85 | ops = __tcf_proto_lookup_ops(kind); |
| 86 | /* We dropped the RTNL semaphore in order to perform |
| 87 | * the module load. So, even if we succeeded in loading |
| 88 | * the module we have to replay the request. We indicate |
| 89 | * this using -EAGAIN. |
| 90 | */ |
| 91 | if (ops) { |
| 92 | module_put(ops->owner); |
| 93 | return ERR_PTR(-EAGAIN); |
| 94 | } |
| 95 | #endif |
| 96 | NL_SET_ERR_MSG(extack, "TC classifier not found"); |
| 97 | return ERR_PTR(-ENOENT); |
| 98 | } |
| 99 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | /* Register(unregister) new classifier type */ |
| 101 | |
| 102 | int register_tcf_proto_ops(struct tcf_proto_ops *ops) |
| 103 | { |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 104 | struct tcf_proto_ops *t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | int rc = -EEXIST; |
| 106 | |
| 107 | write_lock(&cls_mod_lock); |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 108 | list_for_each_entry(t, &tcf_proto_base, head) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | if (!strcmp(ops->kind, t->kind)) |
| 110 | goto out; |
| 111 | |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 112 | list_add_tail(&ops->head, &tcf_proto_base); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | rc = 0; |
| 114 | out: |
| 115 | write_unlock(&cls_mod_lock); |
| 116 | return rc; |
| 117 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 118 | EXPORT_SYMBOL(register_tcf_proto_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 120 | static struct workqueue_struct *tc_filter_wq; |
| 121 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) |
| 123 | { |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 124 | struct tcf_proto_ops *t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | int rc = -ENOENT; |
| 126 | |
Daniel Borkmann | c78e174 | 2015-05-20 17:13:33 +0200 | [diff] [blame] | 127 | /* Wait for outstanding call_rcu()s, if any, from a |
| 128 | * tcf_proto_ops's destroy() handler. |
| 129 | */ |
| 130 | rcu_barrier(); |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 131 | flush_workqueue(tc_filter_wq); |
Daniel Borkmann | c78e174 | 2015-05-20 17:13:33 +0200 | [diff] [blame] | 132 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | write_lock(&cls_mod_lock); |
Eric Dumazet | dcd7608 | 2013-12-20 10:04:18 -0800 | [diff] [blame] | 134 | list_for_each_entry(t, &tcf_proto_base, head) { |
| 135 | if (t == ops) { |
| 136 | list_del(&t->head); |
| 137 | rc = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | break; |
Eric Dumazet | dcd7608 | 2013-12-20 10:04:18 -0800 | [diff] [blame] | 139 | } |
| 140 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | write_unlock(&cls_mod_lock); |
| 142 | return rc; |
| 143 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 144 | EXPORT_SYMBOL(unregister_tcf_proto_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | |
Cong Wang | aaa908f | 2018-05-23 15:26:53 -0700 | [diff] [blame] | 146 | bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 147 | { |
Cong Wang | aaa908f | 2018-05-23 15:26:53 -0700 | [diff] [blame] | 148 | INIT_RCU_WORK(rwork, func); |
| 149 | return queue_rcu_work(tc_filter_wq, rwork); |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 150 | } |
| 151 | EXPORT_SYMBOL(tcf_queue_work); |
| 152 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | /* Select new prio value from the range, managed by kernel. */ |
| 154 | |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 155 | static inline u32 tcf_auto_prio(struct tcf_proto *tp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | { |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 157 | u32 first = TC_H_MAKE(0xC0000000U, 0U); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | |
| 159 | if (tp) |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 160 | first = tp->prio - 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | |
Jiri Pirko | 7961973 | 2017-05-17 11:07:58 +0200 | [diff] [blame] | 162 | return TC_H_MAJ(first); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | } |
| 164 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 165 | static bool tcf_proto_is_unlocked(const char *kind) |
| 166 | { |
| 167 | const struct tcf_proto_ops *ops; |
| 168 | bool ret; |
| 169 | |
| 170 | ops = tcf_proto_lookup_ops(kind, false, NULL); |
| 171 | /* On error return false to take rtnl lock. Proto lookup/create |
| 172 | * functions will perform lookup again and properly handle errors. |
| 173 | */ |
| 174 | if (IS_ERR(ops)) |
| 175 | return false; |
| 176 | |
| 177 | ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED); |
| 178 | module_put(ops->owner); |
| 179 | return ret; |
| 180 | } |
| 181 | |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 182 | static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 183 | u32 prio, struct tcf_chain *chain, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 184 | bool rtnl_held, |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 185 | struct netlink_ext_ack *extack) |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 186 | { |
| 187 | struct tcf_proto *tp; |
| 188 | int err; |
| 189 | |
| 190 | tp = kzalloc(sizeof(*tp), GFP_KERNEL); |
| 191 | if (!tp) |
| 192 | return ERR_PTR(-ENOBUFS); |
| 193 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 194 | tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack); |
Jiri Pirko | f34e8bf | 2018-07-23 09:23:04 +0200 | [diff] [blame] | 195 | if (IS_ERR(tp->ops)) { |
| 196 | err = PTR_ERR(tp->ops); |
Jiri Pirko | d68d75f | 2018-05-11 17:45:32 +0200 | [diff] [blame] | 197 | goto errout; |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 198 | } |
| 199 | tp->classify = tp->ops->classify; |
| 200 | tp->protocol = protocol; |
| 201 | tp->prio = prio; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 202 | tp->chain = chain; |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 203 | spin_lock_init(&tp->lock); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 204 | refcount_set(&tp->refcnt, 1); |
Jiri Pirko | 33a4892 | 2017-02-09 14:38:57 +0100 | [diff] [blame] | 205 | |
| 206 | err = tp->ops->init(tp); |
| 207 | if (err) { |
| 208 | module_put(tp->ops->owner); |
| 209 | goto errout; |
| 210 | } |
| 211 | return tp; |
| 212 | |
| 213 | errout: |
| 214 | kfree(tp); |
| 215 | return ERR_PTR(err); |
| 216 | } |
| 217 | |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 218 | static void tcf_proto_get(struct tcf_proto *tp) |
| 219 | { |
| 220 | refcount_inc(&tp->refcnt); |
| 221 | } |
| 222 | |
| 223 | static void tcf_chain_put(struct tcf_chain *chain); |
| 224 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 225 | static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, |
Jakub Kicinski | 715df5e | 2018-01-24 12:54:13 -0800 | [diff] [blame] | 226 | struct netlink_ext_ack *extack) |
Jiri Pirko | cf1facd | 2017-02-09 14:38:56 +0100 | [diff] [blame] | 227 | { |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 228 | tp->ops->destroy(tp, rtnl_held, extack); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 229 | tcf_chain_put(tp->chain); |
WANG Cong | 763dbf6 | 2017-04-19 14:21:21 -0700 | [diff] [blame] | 230 | module_put(tp->ops->owner); |
| 231 | kfree_rcu(tp, rcu); |
Jiri Pirko | cf1facd | 2017-02-09 14:38:56 +0100 | [diff] [blame] | 232 | } |
| 233 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 234 | static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 235 | struct netlink_ext_ack *extack) |
| 236 | { |
| 237 | if (refcount_dec_and_test(&tp->refcnt)) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 238 | tcf_proto_destroy(tp, rtnl_held, extack); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 239 | } |
| 240 | |
Vlad Buslov | 268a351 | 2019-02-26 17:34:40 +0200 | [diff] [blame] | 241 | static int walker_check_empty(struct tcf_proto *tp, void *fh, |
Vlad Buslov | 6676d5e | 2019-02-25 17:38:31 +0200 | [diff] [blame] | 242 | struct tcf_walker *arg) |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 243 | { |
Vlad Buslov | 268a351 | 2019-02-26 17:34:40 +0200 | [diff] [blame] | 244 | if (fh) { |
Vlad Buslov | 6676d5e | 2019-02-25 17:38:31 +0200 | [diff] [blame] | 245 | arg->nonempty = true; |
| 246 | return -1; |
| 247 | } |
| 248 | return 0; |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 249 | } |
| 250 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 251 | static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held) |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 252 | { |
Vlad Buslov | 6676d5e | 2019-02-25 17:38:31 +0200 | [diff] [blame] | 253 | struct tcf_walker walker = { .fn = walker_check_empty, }; |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 254 | |
| 255 | if (tp->ops->walk) { |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 256 | tp->ops->walk(tp, &walker, rtnl_held); |
Vlad Buslov | 6676d5e | 2019-02-25 17:38:31 +0200 | [diff] [blame] | 257 | return !walker.nonempty; |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 258 | } |
| 259 | return true; |
| 260 | } |
| 261 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 262 | static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held) |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 263 | { |
| 264 | spin_lock(&tp->lock); |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 265 | if (tcf_proto_is_empty(tp, rtnl_held)) |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 266 | tp->deleting = true; |
| 267 | spin_unlock(&tp->lock); |
| 268 | return tp->deleting; |
| 269 | } |
| 270 | |
| 271 | static void tcf_proto_mark_delete(struct tcf_proto *tp) |
| 272 | { |
| 273 | spin_lock(&tp->lock); |
| 274 | tp->deleting = true; |
| 275 | spin_unlock(&tp->lock); |
| 276 | } |
| 277 | |
| 278 | static bool tcf_proto_is_deleting(struct tcf_proto *tp) |
| 279 | { |
| 280 | bool deleting; |
| 281 | |
| 282 | spin_lock(&tp->lock); |
| 283 | deleting = tp->deleting; |
| 284 | spin_unlock(&tp->lock); |
| 285 | |
| 286 | return deleting; |
| 287 | } |
| 288 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 289 | #define ASSERT_BLOCK_LOCKED(block) \ |
| 290 | lockdep_assert_held(&(block)->lock) |
| 291 | |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 292 | struct tcf_filter_chain_list_item { |
| 293 | struct list_head list; |
| 294 | tcf_chain_head_change_t *chain_head_change; |
| 295 | void *chain_head_change_priv; |
| 296 | }; |
| 297 | |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 298 | static struct tcf_chain *tcf_chain_create(struct tcf_block *block, |
| 299 | u32 chain_index) |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 300 | { |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 301 | struct tcf_chain *chain; |
| 302 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 303 | ASSERT_BLOCK_LOCKED(block); |
| 304 | |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 305 | chain = kzalloc(sizeof(*chain), GFP_KERNEL); |
| 306 | if (!chain) |
| 307 | return NULL; |
| 308 | list_add_tail(&chain->list, &block->chain_list); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 309 | mutex_init(&chain->filter_chain_lock); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 310 | chain->block = block; |
| 311 | chain->index = chain_index; |
Cong Wang | e2ef754 | 2017-09-11 16:33:31 -0700 | [diff] [blame] | 312 | chain->refcnt = 1; |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 313 | if (!chain->index) |
| 314 | block->chain0.chain = chain; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 315 | return chain; |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 316 | } |
| 317 | |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 318 | static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, |
| 319 | struct tcf_proto *tp_head) |
| 320 | { |
| 321 | if (item->chain_head_change) |
| 322 | item->chain_head_change(tp_head, item->chain_head_change_priv); |
| 323 | } |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 324 | |
| 325 | static void tcf_chain0_head_change(struct tcf_chain *chain, |
| 326 | struct tcf_proto *tp_head) |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 327 | { |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 328 | struct tcf_filter_chain_list_item *item; |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 329 | struct tcf_block *block = chain->block; |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 330 | |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 331 | if (chain->index) |
| 332 | return; |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 333 | |
| 334 | mutex_lock(&block->lock); |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 335 | list_for_each_entry(item, &block->chain0.filter_chain_list, list) |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 336 | tcf_chain_head_change_item(item, tp_head); |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 337 | mutex_unlock(&block->lock); |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 338 | } |
| 339 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 340 | /* Returns true if block can be safely freed. */ |
| 341 | |
| 342 | static bool tcf_chain_detach(struct tcf_chain *chain) |
Jiri Pirko | f93e1cd | 2017-05-20 15:01:32 +0200 | [diff] [blame] | 343 | { |
Cong Wang | efbf789 | 2017-12-04 10:48:18 -0800 | [diff] [blame] | 344 | struct tcf_block *block = chain->block; |
| 345 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 346 | ASSERT_BLOCK_LOCKED(block); |
| 347 | |
Cong Wang | e2ef754 | 2017-09-11 16:33:31 -0700 | [diff] [blame] | 348 | list_del(&chain->list); |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 349 | if (!chain->index) |
| 350 | block->chain0.chain = NULL; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 351 | |
| 352 | if (list_empty(&block->chain_list) && |
| 353 | refcount_read(&block->refcnt) == 0) |
| 354 | return true; |
| 355 | |
| 356 | return false; |
| 357 | } |
| 358 | |
| 359 | static void tcf_block_destroy(struct tcf_block *block) |
| 360 | { |
| 361 | mutex_destroy(&block->lock); |
| 362 | kfree_rcu(block, rcu); |
| 363 | } |
| 364 | |
| 365 | static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block) |
| 366 | { |
| 367 | struct tcf_block *block = chain->block; |
| 368 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 369 | mutex_destroy(&chain->filter_chain_lock); |
Davide Caratti | ee3bbfe | 2019-03-20 15:00:16 +0100 | [diff] [blame] | 370 | kfree_rcu(chain, rcu); |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 371 | if (free_block) |
| 372 | tcf_block_destroy(block); |
Cong Wang | e2ef754 | 2017-09-11 16:33:31 -0700 | [diff] [blame] | 373 | } |
Jiri Pirko | 744a4cf | 2017-08-22 22:46:49 +0200 | [diff] [blame] | 374 | |
Cong Wang | e2ef754 | 2017-09-11 16:33:31 -0700 | [diff] [blame] | 375 | static void tcf_chain_hold(struct tcf_chain *chain) |
| 376 | { |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 377 | ASSERT_BLOCK_LOCKED(chain->block); |
| 378 | |
Cong Wang | e2ef754 | 2017-09-11 16:33:31 -0700 | [diff] [blame] | 379 | ++chain->refcnt; |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 380 | } |
| 381 | |
Jiri Pirko | 3d32f4c | 2018-08-01 12:36:55 +0200 | [diff] [blame] | 382 | static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 383 | { |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 384 | ASSERT_BLOCK_LOCKED(chain->block); |
| 385 | |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 386 | /* In case all the references are action references, this |
Jiri Pirko | 3d32f4c | 2018-08-01 12:36:55 +0200 | [diff] [blame] | 387 | * chain should not be shown to the user. |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 388 | */ |
| 389 | return chain->refcnt == chain->action_refcnt; |
| 390 | } |
| 391 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 392 | static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, |
| 393 | u32 chain_index) |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 394 | { |
| 395 | struct tcf_chain *chain; |
| 396 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 397 | ASSERT_BLOCK_LOCKED(block); |
| 398 | |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 399 | list_for_each_entry(chain, &block->chain_list, list) { |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 400 | if (chain->index == chain_index) |
Cong Wang | e2ef754 | 2017-09-11 16:33:31 -0700 | [diff] [blame] | 401 | return chain; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 402 | } |
| 403 | return NULL; |
| 404 | } |
| 405 | |
| 406 | static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, |
| 407 | u32 seq, u16 flags, int event, bool unicast); |
| 408 | |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 409 | static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, |
| 410 | u32 chain_index, bool create, |
| 411 | bool by_act) |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 412 | { |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 413 | struct tcf_chain *chain = NULL; |
| 414 | bool is_first_reference; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 415 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 416 | mutex_lock(&block->lock); |
| 417 | chain = tcf_chain_lookup(block, chain_index); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 418 | if (chain) { |
| 419 | tcf_chain_hold(chain); |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 420 | } else { |
| 421 | if (!create) |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 422 | goto errout; |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 423 | chain = tcf_chain_create(block, chain_index); |
| 424 | if (!chain) |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 425 | goto errout; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 426 | } |
Jiri Pirko | 8053238 | 2017-09-06 13:14:19 +0200 | [diff] [blame] | 427 | |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 428 | if (by_act) |
| 429 | ++chain->action_refcnt; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 430 | is_first_reference = chain->refcnt - chain->action_refcnt == 1; |
| 431 | mutex_unlock(&block->lock); |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 432 | |
| 433 | /* Send notification only in case we got the first |
| 434 | * non-action reference. Until then, the chain acts only as |
| 435 | * a placeholder for actions pointing to it and user ought |
| 436 | * not know about them. |
| 437 | */ |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 438 | if (is_first_reference && !by_act) |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 439 | tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, |
| 440 | RTM_NEWCHAIN, false); |
| 441 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 442 | return chain; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 443 | |
| 444 | errout: |
| 445 | mutex_unlock(&block->lock); |
| 446 | return chain; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 447 | } |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 448 | |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 449 | static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, |
| 450 | bool create) |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 451 | { |
| 452 | return __tcf_chain_get(block, chain_index, create, false); |
| 453 | } |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 454 | |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 455 | struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) |
| 456 | { |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 457 | return __tcf_chain_get(block, chain_index, true, true); |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 458 | } |
| 459 | EXPORT_SYMBOL(tcf_chain_get_by_act); |
| 460 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 461 | static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, |
| 462 | void *tmplt_priv); |
| 463 | static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, |
| 464 | void *tmplt_priv, u32 chain_index, |
| 465 | struct tcf_block *block, struct sk_buff *oskb, |
| 466 | u32 seq, u16 flags, bool unicast); |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 467 | |
Vlad Buslov | 91052fa | 2019-02-11 10:55:33 +0200 | [diff] [blame] | 468 | static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, |
| 469 | bool explicitly_created) |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 470 | { |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 471 | struct tcf_block *block = chain->block; |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 472 | const struct tcf_proto_ops *tmplt_ops; |
Vlad Buslov | b62989f | 2019-03-06 17:50:43 +0200 | [diff] [blame] | 473 | bool free_block = false; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 474 | unsigned int refcnt; |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 475 | void *tmplt_priv; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 476 | |
| 477 | mutex_lock(&block->lock); |
Vlad Buslov | 91052fa | 2019-02-11 10:55:33 +0200 | [diff] [blame] | 478 | if (explicitly_created) { |
| 479 | if (!chain->explicitly_created) { |
| 480 | mutex_unlock(&block->lock); |
| 481 | return; |
| 482 | } |
| 483 | chain->explicitly_created = false; |
| 484 | } |
| 485 | |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 486 | if (by_act) |
| 487 | chain->action_refcnt--; |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 488 | |
| 489 | /* tc_chain_notify_delete can't be called while holding block lock. |
| 490 | * However, when block is unlocked chain can be changed concurrently, so |
| 491 | * save these to temporary variables. |
| 492 | */ |
| 493 | refcnt = --chain->refcnt; |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 494 | tmplt_ops = chain->tmplt_ops; |
| 495 | tmplt_priv = chain->tmplt_priv; |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 496 | |
| 497 | /* The last dropped non-action reference will trigger notification. */ |
Vlad Buslov | b62989f | 2019-03-06 17:50:43 +0200 | [diff] [blame] | 498 | if (refcnt - chain->action_refcnt == 0 && !by_act) { |
| 499 | tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index, |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 500 | block, NULL, 0, 0, false); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 501 | /* Last reference to chain, no need to lock. */ |
| 502 | chain->flushing = false; |
| 503 | } |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 504 | |
Vlad Buslov | b62989f | 2019-03-06 17:50:43 +0200 | [diff] [blame] | 505 | if (refcnt == 0) |
| 506 | free_block = tcf_chain_detach(chain); |
| 507 | mutex_unlock(&block->lock); |
| 508 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 509 | if (refcnt == 0) { |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 510 | tc_chain_tmplt_del(tmplt_ops, tmplt_priv); |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 511 | tcf_chain_destroy(chain, free_block); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 512 | } |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 513 | } |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 514 | |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 515 | static void tcf_chain_put(struct tcf_chain *chain) |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 516 | { |
Vlad Buslov | 91052fa | 2019-02-11 10:55:33 +0200 | [diff] [blame] | 517 | __tcf_chain_put(chain, false, false); |
Jiri Pirko | 5368140 | 2018-08-01 12:36:56 +0200 | [diff] [blame] | 518 | } |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 519 | |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 520 | void tcf_chain_put_by_act(struct tcf_chain *chain) |
| 521 | { |
Vlad Buslov | 91052fa | 2019-02-11 10:55:33 +0200 | [diff] [blame] | 522 | __tcf_chain_put(chain, true, false); |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 523 | } |
| 524 | EXPORT_SYMBOL(tcf_chain_put_by_act); |
| 525 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 526 | static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) |
| 527 | { |
Vlad Buslov | 91052fa | 2019-02-11 10:55:33 +0200 | [diff] [blame] | 528 | __tcf_chain_put(chain, false, true); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 529 | } |
| 530 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 531 | static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 532 | { |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 533 | struct tcf_proto *tp, *tp_next; |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 534 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 535 | mutex_lock(&chain->filter_chain_lock); |
| 536 | tp = tcf_chain_dereference(chain->filter_chain, chain); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 537 | RCU_INIT_POINTER(chain->filter_chain, NULL); |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 538 | tcf_chain0_head_change(chain, NULL); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 539 | chain->flushing = true; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 540 | mutex_unlock(&chain->filter_chain_lock); |
| 541 | |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 542 | while (tp) { |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 543 | tp_next = rcu_dereference_protected(tp->next, 1); |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 544 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 545 | tp = tp_next; |
Jiri Pirko | 290b1c8 | 2018-08-01 12:36:57 +0200 | [diff] [blame] | 546 | } |
| 547 | } |
| 548 | |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 549 | static int tcf_block_setup(struct tcf_block *block, |
| 550 | struct flow_block_offload *bo); |
| 551 | |
| 552 | static void tc_indr_block_ing_cmd(struct net_device *dev, |
| 553 | struct tcf_block *block, |
| 554 | flow_indr_block_bind_cb_t *cb, |
| 555 | void *cb_priv, |
| 556 | enum flow_block_command command) |
| 557 | { |
| 558 | struct flow_block_offload bo = { |
| 559 | .command = command, |
| 560 | .binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, |
| 561 | .net = dev_net(dev), |
| 562 | .block_shared = tcf_block_non_null_shared(block), |
| 563 | }; |
| 564 | INIT_LIST_HEAD(&bo.cb_list); |
| 565 | |
| 566 | if (!block) |
| 567 | return; |
| 568 | |
| 569 | bo.block = &block->flow_block; |
| 570 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 571 | down_write(&block->cb_lock); |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 572 | cb(dev, cb_priv, TC_SETUP_BLOCK, &bo); |
| 573 | |
| 574 | tcf_block_setup(block, &bo); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 575 | up_write(&block->cb_lock); |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 576 | } |
| 577 | |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 578 | static struct tcf_block *tc_dev_ingress_block(struct net_device *dev) |
| 579 | { |
| 580 | const struct Qdisc_class_ops *cops; |
| 581 | struct Qdisc *qdisc; |
| 582 | |
| 583 | if (!dev_ingress_queue(dev)) |
| 584 | return NULL; |
| 585 | |
| 586 | qdisc = dev_ingress_queue(dev)->qdisc_sleeping; |
| 587 | if (!qdisc) |
| 588 | return NULL; |
| 589 | |
| 590 | cops = qdisc->ops->cl_ops; |
| 591 | if (!cops) |
| 592 | return NULL; |
| 593 | |
| 594 | if (!cops->tcf_block) |
| 595 | return NULL; |
| 596 | |
| 597 | return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL); |
| 598 | } |
| 599 | |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 600 | static void tc_indr_block_get_and_ing_cmd(struct net_device *dev, |
| 601 | flow_indr_block_bind_cb_t *cb, |
| 602 | void *cb_priv, |
| 603 | enum flow_block_command command) |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 604 | { |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 605 | struct tcf_block *block = tc_dev_ingress_block(dev); |
| 606 | |
| 607 | tc_indr_block_ing_cmd(dev, block, cb, cb_priv, command); |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 608 | } |
| 609 | |
wenxu | 4e48190 | 2019-08-07 09:13:52 +0800 | [diff] [blame] | 610 | static void tc_indr_block_call(struct tcf_block *block, |
| 611 | struct net_device *dev, |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 612 | struct tcf_block_ext_info *ei, |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 613 | enum flow_block_command command, |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 614 | struct netlink_ext_ack *extack) |
| 615 | { |
Pablo Neira Ayuso | 955bcb6 | 2019-07-09 22:55:46 +0200 | [diff] [blame] | 616 | struct flow_block_offload bo = { |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 617 | .command = command, |
| 618 | .binder_type = ei->binder_type, |
Pablo Neira Ayuso | da3eeb9 | 2019-07-09 22:55:43 +0200 | [diff] [blame] | 619 | .net = dev_net(dev), |
Pablo Neira Ayuso | 14bfb13 | 2019-07-19 18:20:16 +0200 | [diff] [blame] | 620 | .block = &block->flow_block, |
Pablo Neira Ayuso | 955bcb6 | 2019-07-09 22:55:46 +0200 | [diff] [blame] | 621 | .block_shared = tcf_block_shared(block), |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 622 | .extack = extack, |
| 623 | }; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 624 | INIT_LIST_HEAD(&bo.cb_list); |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 625 | |
wenxu | 1150ab0 | 2019-08-07 09:13:53 +0800 | [diff] [blame] | 626 | flow_indr_block_call(dev, &bo, command); |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 627 | tcf_block_setup(block, &bo); |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 628 | } |
| 629 | |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 630 | static bool tcf_block_offload_in_use(struct tcf_block *block) |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 631 | { |
Vlad Buslov | 97394be | 2019-08-26 16:44:58 +0300 | [diff] [blame] | 632 | return atomic_read(&block->offloadcnt); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 633 | } |
| 634 | |
| 635 | static int tcf_block_offload_cmd(struct tcf_block *block, |
| 636 | struct net_device *dev, |
| 637 | struct tcf_block_ext_info *ei, |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 638 | enum flow_block_command command, |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 639 | struct netlink_ext_ack *extack) |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 640 | { |
Pablo Neira Ayuso | 955bcb6 | 2019-07-09 22:55:46 +0200 | [diff] [blame] | 641 | struct flow_block_offload bo = {}; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 642 | int err; |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 643 | |
Pablo Neira Ayuso | da3eeb9 | 2019-07-09 22:55:43 +0200 | [diff] [blame] | 644 | bo.net = dev_net(dev); |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 645 | bo.command = command; |
| 646 | bo.binder_type = ei->binder_type; |
Pablo Neira Ayuso | 14bfb13 | 2019-07-19 18:20:16 +0200 | [diff] [blame] | 647 | bo.block = &block->flow_block; |
Pablo Neira Ayuso | 955bcb6 | 2019-07-09 22:55:46 +0200 | [diff] [blame] | 648 | bo.block_shared = tcf_block_shared(block); |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 649 | bo.extack = extack; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 650 | INIT_LIST_HEAD(&bo.cb_list); |
| 651 | |
| 652 | err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); |
| 653 | if (err < 0) |
| 654 | return err; |
| 655 | |
| 656 | return tcf_block_setup(block, &bo); |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 657 | } |
| 658 | |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 659 | static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 660 | struct tcf_block_ext_info *ei, |
| 661 | struct netlink_ext_ack *extack) |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 662 | { |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 663 | struct net_device *dev = q->dev_queue->dev; |
| 664 | int err; |
| 665 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 666 | down_write(&block->cb_lock); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 667 | if (!dev->netdev_ops->ndo_setup_tc) |
| 668 | goto no_offload_dev_inc; |
| 669 | |
| 670 | /* If tc offload feature is disabled and the block we try to bind |
| 671 | * to already has some offloaded filters, forbid to bind. |
| 672 | */ |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 673 | if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) { |
| 674 | NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 675 | err = -EOPNOTSUPP; |
| 676 | goto err_unlock; |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 677 | } |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 678 | |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 679 | err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 680 | if (err == -EOPNOTSUPP) |
| 681 | goto no_offload_dev_inc; |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 682 | if (err) |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 683 | goto err_unlock; |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 684 | |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 685 | tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 686 | up_write(&block->cb_lock); |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 687 | return 0; |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 688 | |
| 689 | no_offload_dev_inc: |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 690 | if (tcf_block_offload_in_use(block)) { |
| 691 | err = -EOPNOTSUPP; |
| 692 | goto err_unlock; |
| 693 | } |
| 694 | err = 0; |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 695 | block->nooffloaddevcnt++; |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 696 | tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 697 | err_unlock: |
| 698 | up_write(&block->cb_lock); |
| 699 | return err; |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 700 | } |
| 701 | |
| 702 | static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, |
| 703 | struct tcf_block_ext_info *ei) |
| 704 | { |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 705 | struct net_device *dev = q->dev_queue->dev; |
| 706 | int err; |
| 707 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 708 | down_write(&block->cb_lock); |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 709 | tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); |
John Hurley | 7f76fa3 | 2018-11-09 21:21:26 -0800 | [diff] [blame] | 710 | |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 711 | if (!dev->netdev_ops->ndo_setup_tc) |
| 712 | goto no_offload_dev_dec; |
Pablo Neira Ayuso | 9c0e189 | 2019-07-09 22:55:40 +0200 | [diff] [blame] | 713 | err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 714 | if (err == -EOPNOTSUPP) |
| 715 | goto no_offload_dev_dec; |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 716 | up_write(&block->cb_lock); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 717 | return; |
| 718 | |
| 719 | no_offload_dev_dec: |
| 720 | WARN_ON(block->nooffloaddevcnt-- == 0); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 721 | up_write(&block->cb_lock); |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 722 | } |
| 723 | |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 724 | static int |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 725 | tcf_chain0_head_change_cb_add(struct tcf_block *block, |
| 726 | struct tcf_block_ext_info *ei, |
| 727 | struct netlink_ext_ack *extack) |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 728 | { |
| 729 | struct tcf_filter_chain_list_item *item; |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 730 | struct tcf_chain *chain0; |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 731 | |
| 732 | item = kmalloc(sizeof(*item), GFP_KERNEL); |
| 733 | if (!item) { |
| 734 | NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed"); |
| 735 | return -ENOMEM; |
| 736 | } |
| 737 | item->chain_head_change = ei->chain_head_change; |
| 738 | item->chain_head_change_priv = ei->chain_head_change_priv; |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 739 | |
| 740 | mutex_lock(&block->lock); |
| 741 | chain0 = block->chain0.chain; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 742 | if (chain0) |
| 743 | tcf_chain_hold(chain0); |
| 744 | else |
| 745 | list_add(&item->list, &block->chain0.filter_chain_list); |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 746 | mutex_unlock(&block->lock); |
| 747 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 748 | if (chain0) { |
| 749 | struct tcf_proto *tp_head; |
| 750 | |
| 751 | mutex_lock(&chain0->filter_chain_lock); |
| 752 | |
| 753 | tp_head = tcf_chain_dereference(chain0->filter_chain, chain0); |
| 754 | if (tp_head) |
| 755 | tcf_chain_head_change_item(item, tp_head); |
| 756 | |
| 757 | mutex_lock(&block->lock); |
| 758 | list_add(&item->list, &block->chain0.filter_chain_list); |
| 759 | mutex_unlock(&block->lock); |
| 760 | |
| 761 | mutex_unlock(&chain0->filter_chain_lock); |
| 762 | tcf_chain_put(chain0); |
| 763 | } |
| 764 | |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 765 | return 0; |
| 766 | } |
| 767 | |
| 768 | static void |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 769 | tcf_chain0_head_change_cb_del(struct tcf_block *block, |
| 770 | struct tcf_block_ext_info *ei) |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 771 | { |
| 772 | struct tcf_filter_chain_list_item *item; |
| 773 | |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 774 | mutex_lock(&block->lock); |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 775 | list_for_each_entry(item, &block->chain0.filter_chain_list, list) { |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 776 | if ((!ei->chain_head_change && !ei->chain_head_change_priv) || |
| 777 | (item->chain_head_change == ei->chain_head_change && |
| 778 | item->chain_head_change_priv == ei->chain_head_change_priv)) { |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 779 | if (block->chain0.chain) |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 780 | tcf_chain_head_change_item(item, NULL); |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 781 | list_del(&item->list); |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 782 | mutex_unlock(&block->lock); |
| 783 | |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 784 | kfree(item); |
| 785 | return; |
| 786 | } |
| 787 | } |
Vlad Buslov | 165f013 | 2019-02-11 10:55:35 +0200 | [diff] [blame] | 788 | mutex_unlock(&block->lock); |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 789 | WARN_ON(1); |
| 790 | } |
| 791 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 792 | struct tcf_net { |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 793 | spinlock_t idr_lock; /* Protects idr */ |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 794 | struct idr idr; |
| 795 | }; |
| 796 | |
| 797 | static unsigned int tcf_net_id; |
| 798 | |
| 799 | static int tcf_block_insert(struct tcf_block *block, struct net *net, |
Jiri Pirko | bb047dd | 2018-02-13 12:00:16 +0100 | [diff] [blame] | 800 | struct netlink_ext_ack *extack) |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 801 | { |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 802 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 803 | int err; |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 804 | |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 805 | idr_preload(GFP_KERNEL); |
| 806 | spin_lock(&tn->idr_lock); |
| 807 | err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, |
| 808 | GFP_NOWAIT); |
| 809 | spin_unlock(&tn->idr_lock); |
| 810 | idr_preload_end(); |
| 811 | |
| 812 | return err; |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 813 | } |
| 814 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 815 | static void tcf_block_remove(struct tcf_block *block, struct net *net) |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 816 | { |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 817 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
| 818 | |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 819 | spin_lock(&tn->idr_lock); |
Matthew Wilcox | 9c16094 | 2017-11-28 09:48:43 -0500 | [diff] [blame] | 820 | idr_remove(&tn->idr, block->index); |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 821 | spin_unlock(&tn->idr_lock); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 822 | } |
| 823 | |
| 824 | static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, |
Jiri Pirko | bb047dd | 2018-02-13 12:00:16 +0100 | [diff] [blame] | 825 | u32 block_index, |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 826 | struct netlink_ext_ack *extack) |
| 827 | { |
| 828 | struct tcf_block *block; |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 829 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 830 | block = kzalloc(sizeof(*block), GFP_KERNEL); |
Alexander Aring | 8d1a77f | 2017-12-20 12:35:19 -0500 | [diff] [blame] | 831 | if (!block) { |
| 832 | NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 833 | return ERR_PTR(-ENOMEM); |
Alexander Aring | 8d1a77f | 2017-12-20 12:35:19 -0500 | [diff] [blame] | 834 | } |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 835 | mutex_init(&block->lock); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 836 | init_rwsem(&block->cb_lock); |
Pablo Neira Ayuso | 14bfb13 | 2019-07-19 18:20:16 +0200 | [diff] [blame] | 837 | flow_block_init(&block->flow_block); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 838 | INIT_LIST_HEAD(&block->chain_list); |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 839 | INIT_LIST_HEAD(&block->owner_list); |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 840 | INIT_LIST_HEAD(&block->chain0.filter_chain_list); |
Jiri Pirko | acb6744 | 2017-10-19 15:50:31 +0200 | [diff] [blame] | 841 | |
Vlad Buslov | cfebd7e | 2018-09-24 19:22:54 +0300 | [diff] [blame] | 842 | refcount_set(&block->refcnt, 1); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 843 | block->net = net; |
Jiri Pirko | bb047dd | 2018-02-13 12:00:16 +0100 | [diff] [blame] | 844 | block->index = block_index; |
| 845 | |
| 846 | /* Don't store q pointer for blocks which are shared */ |
| 847 | if (!tcf_block_shared(block)) |
| 848 | block->q = q; |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 849 | return block; |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 850 | } |
| 851 | |
| 852 | static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) |
| 853 | { |
| 854 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
| 855 | |
Matthew Wilcox | 322d884 | 2017-11-28 10:01:24 -0500 | [diff] [blame] | 856 | return idr_find(&tn->idr, block_index); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 857 | } |
| 858 | |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 859 | static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index) |
| 860 | { |
| 861 | struct tcf_block *block; |
| 862 | |
| 863 | rcu_read_lock(); |
| 864 | block = tcf_block_lookup(net, block_index); |
| 865 | if (block && !refcount_inc_not_zero(&block->refcnt)) |
| 866 | block = NULL; |
| 867 | rcu_read_unlock(); |
| 868 | |
| 869 | return block; |
| 870 | } |
| 871 | |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 872 | static struct tcf_chain * |
| 873 | __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) |
| 874 | { |
| 875 | mutex_lock(&block->lock); |
| 876 | if (chain) |
| 877 | chain = list_is_last(&chain->list, &block->chain_list) ? |
| 878 | NULL : list_next_entry(chain, list); |
| 879 | else |
| 880 | chain = list_first_entry_or_null(&block->chain_list, |
| 881 | struct tcf_chain, list); |
| 882 | |
| 883 | /* skip all action-only chains */ |
| 884 | while (chain && tcf_chain_held_by_acts_only(chain)) |
| 885 | chain = list_is_last(&chain->list, &block->chain_list) ? |
| 886 | NULL : list_next_entry(chain, list); |
| 887 | |
| 888 | if (chain) |
| 889 | tcf_chain_hold(chain); |
| 890 | mutex_unlock(&block->lock); |
| 891 | |
| 892 | return chain; |
| 893 | } |
| 894 | |
| 895 | /* Function to be used by all clients that want to iterate over all chains on |
| 896 | * block. It properly obtains block->lock and takes reference to chain before |
| 897 | * returning it. Users of this function must be tolerant to concurrent chain |
| 898 | * insertion/deletion or ensure that no concurrent chain modification is |
| 899 | * possible. Note that all netlink dump callbacks cannot guarantee to provide |
| 900 | * consistent dump because rtnl lock is released each time skb is filled with |
| 901 | * data and sent to user-space. |
| 902 | */ |
| 903 | |
| 904 | struct tcf_chain * |
| 905 | tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) |
| 906 | { |
| 907 | struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); |
| 908 | |
| 909 | if (chain) |
| 910 | tcf_chain_put(chain); |
| 911 | |
| 912 | return chain_next; |
| 913 | } |
| 914 | EXPORT_SYMBOL(tcf_get_next_chain); |
| 915 | |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 916 | static struct tcf_proto * |
| 917 | __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) |
| 918 | { |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 919 | u32 prio = 0; |
| 920 | |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 921 | ASSERT_RTNL(); |
| 922 | mutex_lock(&chain->filter_chain_lock); |
| 923 | |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 924 | if (!tp) { |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 925 | tp = tcf_chain_dereference(chain->filter_chain, chain); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 926 | } else if (tcf_proto_is_deleting(tp)) { |
| 927 | /* 'deleting' flag is set and chain->filter_chain_lock was |
| 928 | * unlocked, which means next pointer could be invalid. Restart |
| 929 | * search. |
| 930 | */ |
| 931 | prio = tp->prio + 1; |
| 932 | tp = tcf_chain_dereference(chain->filter_chain, chain); |
| 933 | |
| 934 | for (; tp; tp = tcf_chain_dereference(tp->next, chain)) |
| 935 | if (!tp->deleting && tp->prio >= prio) |
| 936 | break; |
| 937 | } else { |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 938 | tp = tcf_chain_dereference(tp->next, chain); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 939 | } |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 940 | |
| 941 | if (tp) |
| 942 | tcf_proto_get(tp); |
| 943 | |
| 944 | mutex_unlock(&chain->filter_chain_lock); |
| 945 | |
| 946 | return tp; |
| 947 | } |
| 948 | |
| 949 | /* Function to be used by all clients that want to iterate over all tp's on |
| 950 | * chain. Users of this function must be tolerant to concurrent tp |
| 951 | * insertion/deletion or ensure that no concurrent chain modification is |
| 952 | * possible. Note that all netlink dump callbacks cannot guarantee to provide |
| 953 | * consistent dump because rtnl lock is released each time skb is filled with |
| 954 | * data and sent to user-space. |
| 955 | */ |
| 956 | |
| 957 | struct tcf_proto * |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 958 | tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp, |
| 959 | bool rtnl_held) |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 960 | { |
| 961 | struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp); |
| 962 | |
| 963 | if (tp) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 964 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 965 | |
| 966 | return tp_next; |
| 967 | } |
| 968 | EXPORT_SYMBOL(tcf_get_next_proto); |
| 969 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 970 | static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) |
Vlad Buslov | f002343 | 2018-09-24 19:22:55 +0300 | [diff] [blame] | 971 | { |
| 972 | struct tcf_chain *chain; |
| 973 | |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 974 | /* Last reference to block. At this point chains cannot be added or |
| 975 | * removed concurrently. |
Vlad Buslov | f002343 | 2018-09-24 19:22:55 +0300 | [diff] [blame] | 976 | */ |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 977 | for (chain = tcf_get_next_chain(block, NULL); |
| 978 | chain; |
| 979 | chain = tcf_get_next_chain(block, chain)) { |
Vlad Buslov | f002343 | 2018-09-24 19:22:55 +0300 | [diff] [blame] | 980 | tcf_chain_put_explicitly_created(chain); |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 981 | tcf_chain_flush(chain, rtnl_held); |
Vlad Buslov | f002343 | 2018-09-24 19:22:55 +0300 | [diff] [blame] | 982 | } |
| 983 | } |
| 984 | |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 985 | /* Lookup Qdisc and increments its reference counter. |
| 986 | * Set parent, if necessary. |
| 987 | */ |
| 988 | |
| 989 | static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, |
| 990 | u32 *parent, int ifindex, bool rtnl_held, |
| 991 | struct netlink_ext_ack *extack) |
| 992 | { |
| 993 | const struct Qdisc_class_ops *cops; |
| 994 | struct net_device *dev; |
| 995 | int err = 0; |
| 996 | |
| 997 | if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) |
| 998 | return 0; |
| 999 | |
| 1000 | rcu_read_lock(); |
| 1001 | |
| 1002 | /* Find link */ |
| 1003 | dev = dev_get_by_index_rcu(net, ifindex); |
| 1004 | if (!dev) { |
| 1005 | rcu_read_unlock(); |
| 1006 | return -ENODEV; |
| 1007 | } |
| 1008 | |
| 1009 | /* Find qdisc */ |
| 1010 | if (!*parent) { |
| 1011 | *q = dev->qdisc; |
| 1012 | *parent = (*q)->handle; |
| 1013 | } else { |
| 1014 | *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); |
| 1015 | if (!*q) { |
| 1016 | NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); |
| 1017 | err = -EINVAL; |
| 1018 | goto errout_rcu; |
| 1019 | } |
| 1020 | } |
| 1021 | |
| 1022 | *q = qdisc_refcount_inc_nz(*q); |
| 1023 | if (!*q) { |
| 1024 | NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); |
| 1025 | err = -EINVAL; |
| 1026 | goto errout_rcu; |
| 1027 | } |
| 1028 | |
| 1029 | /* Is it classful? */ |
| 1030 | cops = (*q)->ops->cl_ops; |
| 1031 | if (!cops) { |
| 1032 | NL_SET_ERR_MSG(extack, "Qdisc not classful"); |
| 1033 | err = -EINVAL; |
| 1034 | goto errout_qdisc; |
| 1035 | } |
| 1036 | |
| 1037 | if (!cops->tcf_block) { |
| 1038 | NL_SET_ERR_MSG(extack, "Class doesn't support blocks"); |
| 1039 | err = -EOPNOTSUPP; |
| 1040 | goto errout_qdisc; |
| 1041 | } |
| 1042 | |
| 1043 | errout_rcu: |
| 1044 | /* At this point we know that qdisc is not noop_qdisc, |
| 1045 | * which means that qdisc holds a reference to net_device |
| 1046 | * and we hold a reference to qdisc, so it is safe to release |
| 1047 | * rcu read lock. |
| 1048 | */ |
| 1049 | rcu_read_unlock(); |
| 1050 | return err; |
| 1051 | |
| 1052 | errout_qdisc: |
| 1053 | rcu_read_unlock(); |
| 1054 | |
| 1055 | if (rtnl_held) |
| 1056 | qdisc_put(*q); |
| 1057 | else |
| 1058 | qdisc_put_unlocked(*q); |
| 1059 | *q = NULL; |
| 1060 | |
| 1061 | return err; |
| 1062 | } |
| 1063 | |
| 1064 | static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl, |
| 1065 | int ifindex, struct netlink_ext_ack *extack) |
| 1066 | { |
| 1067 | if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) |
| 1068 | return 0; |
| 1069 | |
| 1070 | /* Do we search for filter, attached to class? */ |
| 1071 | if (TC_H_MIN(parent)) { |
| 1072 | const struct Qdisc_class_ops *cops = q->ops->cl_ops; |
| 1073 | |
| 1074 | *cl = cops->find(q, parent); |
| 1075 | if (*cl == 0) { |
| 1076 | NL_SET_ERR_MSG(extack, "Specified class doesn't exist"); |
| 1077 | return -ENOENT; |
| 1078 | } |
| 1079 | } |
| 1080 | |
| 1081 | return 0; |
| 1082 | } |
| 1083 | |
| 1084 | static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q, |
| 1085 | unsigned long cl, int ifindex, |
| 1086 | u32 block_index, |
| 1087 | struct netlink_ext_ack *extack) |
| 1088 | { |
| 1089 | struct tcf_block *block; |
| 1090 | |
| 1091 | if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) { |
| 1092 | block = tcf_block_refcnt_get(net, block_index); |
| 1093 | if (!block) { |
| 1094 | NL_SET_ERR_MSG(extack, "Block of given index was not found"); |
| 1095 | return ERR_PTR(-EINVAL); |
| 1096 | } |
| 1097 | } else { |
| 1098 | const struct Qdisc_class_ops *cops = q->ops->cl_ops; |
| 1099 | |
| 1100 | block = cops->tcf_block(q, cl, extack); |
| 1101 | if (!block) |
| 1102 | return ERR_PTR(-EINVAL); |
| 1103 | |
| 1104 | if (tcf_block_shared(block)) { |
| 1105 | NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters"); |
| 1106 | return ERR_PTR(-EOPNOTSUPP); |
| 1107 | } |
| 1108 | |
| 1109 | /* Always take reference to block in order to support execution |
| 1110 | * of rules update path of cls API without rtnl lock. Caller |
| 1111 | * must release block when it is finished using it. 'if' block |
| 1112 | * of this conditional obtain reference to block by calling |
| 1113 | * tcf_block_refcnt_get(). |
| 1114 | */ |
| 1115 | refcount_inc(&block->refcnt); |
| 1116 | } |
| 1117 | |
| 1118 | return block; |
| 1119 | } |
| 1120 | |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1121 | static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1122 | struct tcf_block_ext_info *ei, bool rtnl_held) |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1123 | { |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 1124 | if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1125 | /* Flushing/putting all chains will cause the block to be |
| 1126 | * deallocated when last chain is freed. However, if chain_list |
| 1127 | * is empty, block has to be manually deallocated. After block |
| 1128 | * reference counter reached 0, it is no longer possible to |
| 1129 | * increment it or add new chains to block. |
| 1130 | */ |
| 1131 | bool free_block = list_empty(&block->chain_list); |
| 1132 | |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 1133 | mutex_unlock(&block->lock); |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1134 | if (tcf_block_shared(block)) |
| 1135 | tcf_block_remove(block, block->net); |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1136 | |
| 1137 | if (q) |
| 1138 | tcf_block_offload_unbind(block, q, ei); |
| 1139 | |
| 1140 | if (free_block) |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 1141 | tcf_block_destroy(block); |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1142 | else |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1143 | tcf_block_flush_all_chains(block, rtnl_held); |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1144 | } else if (q) { |
| 1145 | tcf_block_offload_unbind(block, q, ei); |
| 1146 | } |
| 1147 | } |
| 1148 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1149 | static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1150 | { |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1151 | __tcf_block_put(block, NULL, NULL, rtnl_held); |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 1152 | } |
| 1153 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1154 | /* Find tcf block. |
| 1155 | * Set q, parent, cl when appropriate. |
| 1156 | */ |
| 1157 | |
| 1158 | static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, |
| 1159 | u32 *parent, unsigned long *cl, |
| 1160 | int ifindex, u32 block_index, |
| 1161 | struct netlink_ext_ack *extack) |
| 1162 | { |
| 1163 | struct tcf_block *block; |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1164 | int err = 0; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1165 | |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1166 | ASSERT_RTNL(); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1167 | |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1168 | err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack); |
| 1169 | if (err) |
| 1170 | goto errout; |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1171 | |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1172 | err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack); |
| 1173 | if (err) |
| 1174 | goto errout_qdisc; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1175 | |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1176 | block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); |
Dan Carpenter | af736bf | 2019-02-18 12:26:32 +0300 | [diff] [blame] | 1177 | if (IS_ERR(block)) { |
| 1178 | err = PTR_ERR(block); |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1179 | goto errout_qdisc; |
Dan Carpenter | af736bf | 2019-02-18 12:26:32 +0300 | [diff] [blame] | 1180 | } |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1181 | |
| 1182 | return block; |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1183 | |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1184 | errout_qdisc: |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1185 | if (*q) |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1186 | qdisc_put(*q); |
Vlad Buslov | 18d3eef | 2019-02-11 10:55:47 +0200 | [diff] [blame] | 1187 | errout: |
| 1188 | *q = NULL; |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1189 | return ERR_PTR(err); |
| 1190 | } |
| 1191 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1192 | static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, |
| 1193 | bool rtnl_held) |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 1194 | { |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 1195 | if (!IS_ERR_OR_NULL(block)) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1196 | tcf_block_refcnt_put(block, rtnl_held); |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 1197 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 1198 | if (q) { |
| 1199 | if (rtnl_held) |
| 1200 | qdisc_put(q); |
| 1201 | else |
| 1202 | qdisc_put_unlocked(q); |
| 1203 | } |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1204 | } |
| 1205 | |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1206 | struct tcf_block_owner_item { |
| 1207 | struct list_head list; |
| 1208 | struct Qdisc *q; |
Pablo Neira Ayuso | 32f8c40 | 2019-07-09 22:55:41 +0200 | [diff] [blame] | 1209 | enum flow_block_binder_type binder_type; |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1210 | }; |
| 1211 | |
| 1212 | static void |
| 1213 | tcf_block_owner_netif_keep_dst(struct tcf_block *block, |
| 1214 | struct Qdisc *q, |
Pablo Neira Ayuso | 32f8c40 | 2019-07-09 22:55:41 +0200 | [diff] [blame] | 1215 | enum flow_block_binder_type binder_type) |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1216 | { |
| 1217 | if (block->keep_dst && |
Pablo Neira Ayuso | 32f8c40 | 2019-07-09 22:55:41 +0200 | [diff] [blame] | 1218 | binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && |
| 1219 | binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1220 | netif_keep_dst(qdisc_dev(q)); |
| 1221 | } |
| 1222 | |
| 1223 | void tcf_block_netif_keep_dst(struct tcf_block *block) |
| 1224 | { |
| 1225 | struct tcf_block_owner_item *item; |
| 1226 | |
| 1227 | block->keep_dst = true; |
| 1228 | list_for_each_entry(item, &block->owner_list, list) |
| 1229 | tcf_block_owner_netif_keep_dst(block, item->q, |
| 1230 | item->binder_type); |
| 1231 | } |
| 1232 | EXPORT_SYMBOL(tcf_block_netif_keep_dst); |
| 1233 | |
| 1234 | static int tcf_block_owner_add(struct tcf_block *block, |
| 1235 | struct Qdisc *q, |
Pablo Neira Ayuso | 32f8c40 | 2019-07-09 22:55:41 +0200 | [diff] [blame] | 1236 | enum flow_block_binder_type binder_type) |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1237 | { |
| 1238 | struct tcf_block_owner_item *item; |
| 1239 | |
| 1240 | item = kmalloc(sizeof(*item), GFP_KERNEL); |
| 1241 | if (!item) |
| 1242 | return -ENOMEM; |
| 1243 | item->q = q; |
| 1244 | item->binder_type = binder_type; |
| 1245 | list_add(&item->list, &block->owner_list); |
| 1246 | return 0; |
| 1247 | } |
| 1248 | |
| 1249 | static void tcf_block_owner_del(struct tcf_block *block, |
| 1250 | struct Qdisc *q, |
Pablo Neira Ayuso | 32f8c40 | 2019-07-09 22:55:41 +0200 | [diff] [blame] | 1251 | enum flow_block_binder_type binder_type) |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1252 | { |
| 1253 | struct tcf_block_owner_item *item; |
| 1254 | |
| 1255 | list_for_each_entry(item, &block->owner_list, list) { |
| 1256 | if (item->q == q && item->binder_type == binder_type) { |
| 1257 | list_del(&item->list); |
| 1258 | kfree(item); |
| 1259 | return; |
| 1260 | } |
| 1261 | } |
| 1262 | WARN_ON(1); |
| 1263 | } |
| 1264 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1265 | int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, |
| 1266 | struct tcf_block_ext_info *ei, |
| 1267 | struct netlink_ext_ack *extack) |
| 1268 | { |
| 1269 | struct net *net = qdisc_net(q); |
| 1270 | struct tcf_block *block = NULL; |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1271 | int err; |
| 1272 | |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 1273 | if (ei->block_index) |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1274 | /* block_index not 0 means the shared block is requested */ |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 1275 | block = tcf_block_refcnt_get(net, ei->block_index); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1276 | |
| 1277 | if (!block) { |
Jiri Pirko | bb047dd | 2018-02-13 12:00:16 +0100 | [diff] [blame] | 1278 | block = tcf_block_create(net, q, ei->block_index, extack); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1279 | if (IS_ERR(block)) |
| 1280 | return PTR_ERR(block); |
Jiri Pirko | bb047dd | 2018-02-13 12:00:16 +0100 | [diff] [blame] | 1281 | if (tcf_block_shared(block)) { |
| 1282 | err = tcf_block_insert(block, net, extack); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1283 | if (err) |
| 1284 | goto err_block_insert; |
| 1285 | } |
| 1286 | } |
| 1287 | |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1288 | err = tcf_block_owner_add(block, q, ei->binder_type); |
| 1289 | if (err) |
| 1290 | goto err_block_owner_add; |
| 1291 | |
| 1292 | tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); |
| 1293 | |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1294 | err = tcf_chain0_head_change_cb_add(block, ei, extack); |
Jiri Pirko | a9b1944 | 2018-01-17 11:46:45 +0100 | [diff] [blame] | 1295 | if (err) |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1296 | goto err_chain0_head_change_cb_add; |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 1297 | |
John Hurley | 60513bd | 2018-06-25 14:30:04 -0700 | [diff] [blame] | 1298 | err = tcf_block_offload_bind(block, q, ei, extack); |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 1299 | if (err) |
| 1300 | goto err_block_offload_bind; |
| 1301 | |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 1302 | *p_block = block; |
| 1303 | return 0; |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1304 | |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 1305 | err_block_offload_bind: |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1306 | tcf_chain0_head_change_cb_del(block, ei); |
| 1307 | err_chain0_head_change_cb_add: |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1308 | tcf_block_owner_del(block, q, ei->binder_type); |
| 1309 | err_block_owner_add: |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 1310 | err_block_insert: |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1311 | tcf_block_refcnt_put(block, true); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1312 | return err; |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 1313 | } |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1314 | EXPORT_SYMBOL(tcf_block_get_ext); |
| 1315 | |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1316 | static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) |
| 1317 | { |
| 1318 | struct tcf_proto __rcu **p_filter_chain = priv; |
| 1319 | |
| 1320 | rcu_assign_pointer(*p_filter_chain, tp_head); |
| 1321 | } |
| 1322 | |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1323 | int tcf_block_get(struct tcf_block **p_block, |
Alexander Aring | 8d1a77f | 2017-12-20 12:35:19 -0500 | [diff] [blame] | 1324 | struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, |
| 1325 | struct netlink_ext_ack *extack) |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1326 | { |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1327 | struct tcf_block_ext_info ei = { |
| 1328 | .chain_head_change = tcf_chain_head_change_dflt, |
| 1329 | .chain_head_change_priv = p_filter_chain, |
| 1330 | }; |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1331 | |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1332 | WARN_ON(!p_filter_chain); |
Alexander Aring | 8d1a77f | 2017-12-20 12:35:19 -0500 | [diff] [blame] | 1333 | return tcf_block_get_ext(p_block, q, &ei, extack); |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1334 | } |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 1335 | EXPORT_SYMBOL(tcf_block_get); |
| 1336 | |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 1337 | /* XXX: Standalone actions are not allowed to jump to any chain, and bound |
Roman Kapl | a60b3f5 | 2017-11-24 12:27:58 +0100 | [diff] [blame] | 1338 | * actions should be all removed after flushing. |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 1339 | */ |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1340 | void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, |
David S. Miller | e1ea2f9 | 2017-10-30 14:10:01 +0900 | [diff] [blame] | 1341 | struct tcf_block_ext_info *ei) |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 1342 | { |
David S. Miller | c30abd5 | 2017-12-16 22:11:55 -0500 | [diff] [blame] | 1343 | if (!block) |
| 1344 | return; |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1345 | tcf_chain0_head_change_cb_del(block, ei); |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 1346 | tcf_block_owner_del(block, q, ei->binder_type); |
Roman Kapl | a60b3f5 | 2017-11-24 12:27:58 +0100 | [diff] [blame] | 1347 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1348 | __tcf_block_put(block, q, ei, true); |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 1349 | } |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1350 | EXPORT_SYMBOL(tcf_block_put_ext); |
| 1351 | |
| 1352 | void tcf_block_put(struct tcf_block *block) |
| 1353 | { |
| 1354 | struct tcf_block_ext_info ei = {0, }; |
| 1355 | |
Jiri Pirko | 4853f12 | 2017-12-21 13:13:59 +0100 | [diff] [blame] | 1356 | if (!block) |
| 1357 | return; |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1358 | tcf_block_put_ext(block, block->q, &ei); |
Jiri Pirko | 8c4083b | 2017-10-19 15:50:29 +0200 | [diff] [blame] | 1359 | } |
David S. Miller | e1ea2f9 | 2017-10-30 14:10:01 +0900 | [diff] [blame] | 1360 | |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 1361 | EXPORT_SYMBOL(tcf_block_put); |
Jiri Pirko | cf1facd | 2017-02-09 14:38:56 +0100 | [diff] [blame] | 1362 | |
John Hurley | 3263674 | 2018-06-25 14:30:10 -0700 | [diff] [blame] | 1363 | static int |
Pablo Neira Ayuso | a732331 | 2019-07-19 18:20:15 +0200 | [diff] [blame] | 1364 | tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, |
John Hurley | 3263674 | 2018-06-25 14:30:10 -0700 | [diff] [blame] | 1365 | void *cb_priv, bool add, bool offload_in_use, |
| 1366 | struct netlink_ext_ack *extack) |
| 1367 | { |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 1368 | struct tcf_chain *chain, *chain_prev; |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1369 | struct tcf_proto *tp, *tp_prev; |
John Hurley | 3263674 | 2018-06-25 14:30:10 -0700 | [diff] [blame] | 1370 | int err; |
| 1371 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 1372 | lockdep_assert_held(&block->cb_lock); |
| 1373 | |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 1374 | for (chain = __tcf_get_next_chain(block, NULL); |
| 1375 | chain; |
| 1376 | chain_prev = chain, |
| 1377 | chain = __tcf_get_next_chain(block, chain), |
| 1378 | tcf_chain_put(chain_prev)) { |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 1379 | for (tp = __tcf_get_next_proto(chain, NULL); tp; |
| 1380 | tp_prev = tp, |
| 1381 | tp = __tcf_get_next_proto(chain, tp), |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1382 | tcf_proto_put(tp_prev, true, NULL)) { |
John Hurley | 3263674 | 2018-06-25 14:30:10 -0700 | [diff] [blame] | 1383 | if (tp->ops->reoffload) { |
| 1384 | err = tp->ops->reoffload(tp, add, cb, cb_priv, |
| 1385 | extack); |
| 1386 | if (err && add) |
| 1387 | goto err_playback_remove; |
| 1388 | } else if (add && offload_in_use) { |
| 1389 | err = -EOPNOTSUPP; |
| 1390 | NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); |
| 1391 | goto err_playback_remove; |
| 1392 | } |
| 1393 | } |
| 1394 | } |
| 1395 | |
| 1396 | return 0; |
| 1397 | |
| 1398 | err_playback_remove: |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1399 | tcf_proto_put(tp, true, NULL); |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 1400 | tcf_chain_put(chain); |
John Hurley | 3263674 | 2018-06-25 14:30:10 -0700 | [diff] [blame] | 1401 | tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, |
| 1402 | extack); |
| 1403 | return err; |
| 1404 | } |
| 1405 | |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1406 | static int tcf_block_bind(struct tcf_block *block, |
| 1407 | struct flow_block_offload *bo) |
| 1408 | { |
| 1409 | struct flow_block_cb *block_cb, *next; |
| 1410 | int err, i = 0; |
| 1411 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 1412 | lockdep_assert_held(&block->cb_lock); |
| 1413 | |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1414 | list_for_each_entry(block_cb, &bo->cb_list, list) { |
| 1415 | err = tcf_block_playback_offloads(block, block_cb->cb, |
| 1416 | block_cb->cb_priv, true, |
| 1417 | tcf_block_offload_in_use(block), |
| 1418 | bo->extack); |
| 1419 | if (err) |
| 1420 | goto err_unroll; |
Vlad Buslov | c9f1447 | 2019-08-26 16:45:01 +0300 | [diff] [blame^] | 1421 | if (!bo->unlocked_driver_cb) |
| 1422 | block->lockeddevcnt++; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1423 | |
| 1424 | i++; |
| 1425 | } |
Pablo Neira Ayuso | 14bfb13 | 2019-07-19 18:20:16 +0200 | [diff] [blame] | 1426 | list_splice(&bo->cb_list, &block->flow_block.cb_list); |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1427 | |
| 1428 | return 0; |
| 1429 | |
| 1430 | err_unroll: |
| 1431 | list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { |
| 1432 | if (i-- > 0) { |
| 1433 | list_del(&block_cb->list); |
| 1434 | tcf_block_playback_offloads(block, block_cb->cb, |
| 1435 | block_cb->cb_priv, false, |
| 1436 | tcf_block_offload_in_use(block), |
| 1437 | NULL); |
Vlad Buslov | c9f1447 | 2019-08-26 16:45:01 +0300 | [diff] [blame^] | 1438 | if (!bo->unlocked_driver_cb) |
| 1439 | block->lockeddevcnt--; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1440 | } |
| 1441 | flow_block_cb_free(block_cb); |
| 1442 | } |
| 1443 | |
| 1444 | return err; |
| 1445 | } |
| 1446 | |
| 1447 | static void tcf_block_unbind(struct tcf_block *block, |
| 1448 | struct flow_block_offload *bo) |
| 1449 | { |
| 1450 | struct flow_block_cb *block_cb, *next; |
| 1451 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 1452 | lockdep_assert_held(&block->cb_lock); |
| 1453 | |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1454 | list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { |
| 1455 | tcf_block_playback_offloads(block, block_cb->cb, |
| 1456 | block_cb->cb_priv, false, |
| 1457 | tcf_block_offload_in_use(block), |
| 1458 | NULL); |
| 1459 | list_del(&block_cb->list); |
| 1460 | flow_block_cb_free(block_cb); |
Vlad Buslov | c9f1447 | 2019-08-26 16:45:01 +0300 | [diff] [blame^] | 1461 | if (!bo->unlocked_driver_cb) |
| 1462 | block->lockeddevcnt--; |
Pablo Neira Ayuso | 59094b1 | 2019-07-09 22:55:45 +0200 | [diff] [blame] | 1463 | } |
| 1464 | } |
| 1465 | |
| 1466 | static int tcf_block_setup(struct tcf_block *block, |
| 1467 | struct flow_block_offload *bo) |
| 1468 | { |
| 1469 | int err; |
| 1470 | |
| 1471 | switch (bo->command) { |
| 1472 | case FLOW_BLOCK_BIND: |
| 1473 | err = tcf_block_bind(block, bo); |
| 1474 | break; |
| 1475 | case FLOW_BLOCK_UNBIND: |
| 1476 | err = 0; |
| 1477 | tcf_block_unbind(block, bo); |
| 1478 | break; |
| 1479 | default: |
| 1480 | WARN_ON_ONCE(1); |
| 1481 | err = -EOPNOTSUPP; |
| 1482 | } |
| 1483 | |
| 1484 | return err; |
| 1485 | } |
| 1486 | |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1487 | /* Main classifier routine: scans classifier chain attached |
| 1488 | * to this qdisc, (optionally) tests for protocol and asks |
| 1489 | * specific classifiers. |
| 1490 | */ |
| 1491 | int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, |
| 1492 | struct tcf_result *res, bool compat_mode) |
| 1493 | { |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1494 | #ifdef CONFIG_NET_CLS_ACT |
| 1495 | const int max_reclassify_loop = 4; |
Jiri Pirko | ee538dc | 2017-05-23 09:11:59 +0200 | [diff] [blame] | 1496 | const struct tcf_proto *orig_tp = tp; |
| 1497 | const struct tcf_proto *first_tp; |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1498 | int limit = 0; |
| 1499 | |
| 1500 | reclassify: |
| 1501 | #endif |
| 1502 | for (; tp; tp = rcu_dereference_bh(tp->next)) { |
Cong Wang | cd0c4e7 | 2019-01-11 18:55:42 -0800 | [diff] [blame] | 1503 | __be16 protocol = tc_skb_protocol(skb); |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1504 | int err; |
| 1505 | |
| 1506 | if (tp->protocol != protocol && |
| 1507 | tp->protocol != htons(ETH_P_ALL)) |
| 1508 | continue; |
| 1509 | |
| 1510 | err = tp->classify(skb, tp, res); |
| 1511 | #ifdef CONFIG_NET_CLS_ACT |
Jiri Pirko | db50514 | 2017-05-17 11:08:03 +0200 | [diff] [blame] | 1512 | if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { |
Jiri Pirko | ee538dc | 2017-05-23 09:11:59 +0200 | [diff] [blame] | 1513 | first_tp = orig_tp; |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1514 | goto reset; |
Jiri Pirko | db50514 | 2017-05-17 11:08:03 +0200 | [diff] [blame] | 1515 | } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { |
Jiri Pirko | ee538dc | 2017-05-23 09:11:59 +0200 | [diff] [blame] | 1516 | first_tp = res->goto_tp; |
Jiri Pirko | db50514 | 2017-05-17 11:08:03 +0200 | [diff] [blame] | 1517 | goto reset; |
| 1518 | } |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1519 | #endif |
| 1520 | if (err >= 0) |
| 1521 | return err; |
| 1522 | } |
| 1523 | |
| 1524 | return TC_ACT_UNSPEC; /* signal: continue lookup */ |
| 1525 | #ifdef CONFIG_NET_CLS_ACT |
| 1526 | reset: |
| 1527 | if (unlikely(limit++ >= max_reclassify_loop)) { |
Jiri Pirko | 9d3aaff | 2018-01-17 11:46:47 +0100 | [diff] [blame] | 1528 | net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n", |
| 1529 | tp->chain->block->index, |
| 1530 | tp->prio & 0xffff, |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1531 | ntohs(tp->protocol)); |
| 1532 | return TC_ACT_SHOT; |
| 1533 | } |
| 1534 | |
Jiri Pirko | ee538dc | 2017-05-23 09:11:59 +0200 | [diff] [blame] | 1535 | tp = first_tp; |
Jiri Pirko | 87d8309 | 2017-05-17 11:07:54 +0200 | [diff] [blame] | 1536 | goto reclassify; |
| 1537 | #endif |
| 1538 | } |
| 1539 | EXPORT_SYMBOL(tcf_classify); |
| 1540 | |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1541 | struct tcf_chain_info { |
| 1542 | struct tcf_proto __rcu **pprev; |
| 1543 | struct tcf_proto __rcu *next; |
| 1544 | }; |
| 1545 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1546 | static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain, |
| 1547 | struct tcf_chain_info *chain_info) |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1548 | { |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1549 | return tcf_chain_dereference(*chain_info->pprev, chain); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1550 | } |
| 1551 | |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1552 | static int tcf_chain_tp_insert(struct tcf_chain *chain, |
| 1553 | struct tcf_chain_info *chain_info, |
| 1554 | struct tcf_proto *tp) |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1555 | { |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1556 | if (chain->flushing) |
| 1557 | return -EAGAIN; |
| 1558 | |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1559 | if (*chain_info->pprev == chain->filter_chain) |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1560 | tcf_chain0_head_change(chain, tp); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 1561 | tcf_proto_get(tp); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1562 | RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1563 | rcu_assign_pointer(*chain_info->pprev, tp); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1564 | |
| 1565 | return 0; |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1566 | } |
| 1567 | |
| 1568 | static void tcf_chain_tp_remove(struct tcf_chain *chain, |
| 1569 | struct tcf_chain_info *chain_info, |
| 1570 | struct tcf_proto *tp) |
| 1571 | { |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1572 | struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1573 | |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1574 | tcf_proto_mark_delete(tp); |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 1575 | if (tp == chain->filter_chain) |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 1576 | tcf_chain0_head_change(chain, next); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1577 | RCU_INIT_POINTER(*chain_info->pprev, next); |
| 1578 | } |
| 1579 | |
| 1580 | static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, |
| 1581 | struct tcf_chain_info *chain_info, |
| 1582 | u32 protocol, u32 prio, |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1583 | bool prio_allocate); |
| 1584 | |
| 1585 | /* Try to insert new proto. |
| 1586 | * If proto with specified priority already exists, free new proto |
| 1587 | * and return existing one. |
| 1588 | */ |
| 1589 | |
| 1590 | static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain, |
| 1591 | struct tcf_proto *tp_new, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1592 | u32 protocol, u32 prio, |
| 1593 | bool rtnl_held) |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1594 | { |
| 1595 | struct tcf_chain_info chain_info; |
| 1596 | struct tcf_proto *tp; |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1597 | int err = 0; |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1598 | |
| 1599 | mutex_lock(&chain->filter_chain_lock); |
| 1600 | |
| 1601 | tp = tcf_chain_tp_find(chain, &chain_info, |
| 1602 | protocol, prio, false); |
| 1603 | if (!tp) |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1604 | err = tcf_chain_tp_insert(chain, &chain_info, tp_new); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1605 | mutex_unlock(&chain->filter_chain_lock); |
| 1606 | |
| 1607 | if (tp) { |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1608 | tcf_proto_destroy(tp_new, rtnl_held, NULL); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1609 | tp_new = tp; |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1610 | } else if (err) { |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1611 | tcf_proto_destroy(tp_new, rtnl_held, NULL); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1612 | tp_new = ERR_PTR(err); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1613 | } |
| 1614 | |
| 1615 | return tp_new; |
| 1616 | } |
| 1617 | |
| 1618 | static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1619 | struct tcf_proto *tp, bool rtnl_held, |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1620 | struct netlink_ext_ack *extack) |
| 1621 | { |
| 1622 | struct tcf_chain_info chain_info; |
| 1623 | struct tcf_proto *tp_iter; |
| 1624 | struct tcf_proto **pprev; |
| 1625 | struct tcf_proto *next; |
| 1626 | |
| 1627 | mutex_lock(&chain->filter_chain_lock); |
| 1628 | |
| 1629 | /* Atomically find and remove tp from chain. */ |
| 1630 | for (pprev = &chain->filter_chain; |
| 1631 | (tp_iter = tcf_chain_dereference(*pprev, chain)); |
| 1632 | pprev = &tp_iter->next) { |
| 1633 | if (tp_iter == tp) { |
| 1634 | chain_info.pprev = pprev; |
| 1635 | chain_info.next = tp_iter->next; |
| 1636 | WARN_ON(tp_iter->deleting); |
| 1637 | break; |
| 1638 | } |
| 1639 | } |
| 1640 | /* Verify that tp still exists and no new filters were inserted |
| 1641 | * concurrently. |
| 1642 | * Mark tp for deletion if it is empty. |
| 1643 | */ |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1644 | if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) { |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1645 | mutex_unlock(&chain->filter_chain_lock); |
| 1646 | return; |
| 1647 | } |
| 1648 | |
| 1649 | next = tcf_chain_dereference(chain_info.next, chain); |
| 1650 | if (tp == chain->filter_chain) |
| 1651 | tcf_chain0_head_change(chain, next); |
| 1652 | RCU_INIT_POINTER(*chain_info.pprev, next); |
| 1653 | mutex_unlock(&chain->filter_chain_lock); |
| 1654 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1655 | tcf_proto_put(tp, rtnl_held, extack); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1656 | } |
| 1657 | |
| 1658 | static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, |
| 1659 | struct tcf_chain_info *chain_info, |
| 1660 | u32 protocol, u32 prio, |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1661 | bool prio_allocate) |
| 1662 | { |
| 1663 | struct tcf_proto **pprev; |
| 1664 | struct tcf_proto *tp; |
| 1665 | |
| 1666 | /* Check the chain for existence of proto-tcf with this priority */ |
| 1667 | for (pprev = &chain->filter_chain; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1668 | (tp = tcf_chain_dereference(*pprev, chain)); |
| 1669 | pprev = &tp->next) { |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1670 | if (tp->prio >= prio) { |
| 1671 | if (tp->prio == prio) { |
| 1672 | if (prio_allocate || |
| 1673 | (tp->protocol != protocol && protocol)) |
| 1674 | return ERR_PTR(-EINVAL); |
| 1675 | } else { |
| 1676 | tp = NULL; |
| 1677 | } |
| 1678 | break; |
| 1679 | } |
| 1680 | } |
| 1681 | chain_info->pprev = pprev; |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 1682 | if (tp) { |
| 1683 | chain_info->next = tp->next; |
| 1684 | tcf_proto_get(tp); |
| 1685 | } else { |
| 1686 | chain_info->next = NULL; |
| 1687 | } |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1688 | return tp; |
| 1689 | } |
| 1690 | |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1691 | static int tcf_fill_node(struct net *net, struct sk_buff *skb, |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1692 | struct tcf_proto *tp, struct tcf_block *block, |
| 1693 | struct Qdisc *q, u32 parent, void *fh, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1694 | u32 portid, u32 seq, u16 flags, int event, |
| 1695 | bool rtnl_held) |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1696 | { |
| 1697 | struct tcmsg *tcm; |
| 1698 | struct nlmsghdr *nlh; |
| 1699 | unsigned char *b = skb_tail_pointer(skb); |
| 1700 | |
| 1701 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); |
| 1702 | if (!nlh) |
| 1703 | goto out_nlmsg_trim; |
| 1704 | tcm = nlmsg_data(nlh); |
| 1705 | tcm->tcm_family = AF_UNSPEC; |
| 1706 | tcm->tcm__pad1 = 0; |
| 1707 | tcm->tcm__pad2 = 0; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1708 | if (q) { |
| 1709 | tcm->tcm_ifindex = qdisc_dev(q)->ifindex; |
| 1710 | tcm->tcm_parent = parent; |
| 1711 | } else { |
| 1712 | tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; |
| 1713 | tcm->tcm_block_index = block->index; |
| 1714 | } |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1715 | tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); |
| 1716 | if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) |
| 1717 | goto nla_put_failure; |
| 1718 | if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) |
| 1719 | goto nla_put_failure; |
| 1720 | if (!fh) { |
| 1721 | tcm->tcm_handle = 0; |
| 1722 | } else { |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1723 | if (tp->ops->dump && |
| 1724 | tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0) |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1725 | goto nla_put_failure; |
| 1726 | } |
| 1727 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; |
| 1728 | return skb->len; |
| 1729 | |
| 1730 | out_nlmsg_trim: |
| 1731 | nla_put_failure: |
| 1732 | nlmsg_trim(skb, b); |
| 1733 | return -1; |
| 1734 | } |
| 1735 | |
| 1736 | static int tfilter_notify(struct net *net, struct sk_buff *oskb, |
| 1737 | struct nlmsghdr *n, struct tcf_proto *tp, |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1738 | struct tcf_block *block, struct Qdisc *q, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1739 | u32 parent, void *fh, int event, bool unicast, |
| 1740 | bool rtnl_held) |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1741 | { |
| 1742 | struct sk_buff *skb; |
| 1743 | u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 1744 | int err = 0; |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1745 | |
| 1746 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
| 1747 | if (!skb) |
| 1748 | return -ENOBUFS; |
| 1749 | |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1750 | if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1751 | n->nlmsg_seq, n->nlmsg_flags, event, |
| 1752 | rtnl_held) <= 0) { |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1753 | kfree_skb(skb); |
| 1754 | return -EINVAL; |
| 1755 | } |
| 1756 | |
| 1757 | if (unicast) |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 1758 | err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); |
| 1759 | else |
| 1760 | err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, |
| 1761 | n->nlmsg_flags & NLM_F_ECHO); |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1762 | |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 1763 | if (err > 0) |
| 1764 | err = 0; |
| 1765 | return err; |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1766 | } |
| 1767 | |
| 1768 | static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, |
| 1769 | struct nlmsghdr *n, struct tcf_proto *tp, |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1770 | struct tcf_block *block, struct Qdisc *q, |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1771 | u32 parent, void *fh, bool unicast, bool *last, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1772 | bool rtnl_held, struct netlink_ext_ack *extack) |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1773 | { |
| 1774 | struct sk_buff *skb; |
| 1775 | u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; |
| 1776 | int err; |
| 1777 | |
| 1778 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
| 1779 | if (!skb) |
| 1780 | return -ENOBUFS; |
| 1781 | |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1782 | if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1783 | n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, |
| 1784 | rtnl_held) <= 0) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1785 | NL_SET_ERR_MSG(extack, "Failed to build del event notification"); |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1786 | kfree_skb(skb); |
| 1787 | return -EINVAL; |
| 1788 | } |
| 1789 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1790 | err = tp->ops->delete(tp, fh, last, rtnl_held, extack); |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1791 | if (err) { |
| 1792 | kfree_skb(skb); |
| 1793 | return err; |
| 1794 | } |
| 1795 | |
| 1796 | if (unicast) |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 1797 | err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); |
| 1798 | else |
| 1799 | err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, |
| 1800 | n->nlmsg_flags & NLM_F_ECHO); |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1801 | if (err < 0) |
| 1802 | NL_SET_ERR_MSG(extack, "Failed to send filter delete notification"); |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 1803 | |
| 1804 | if (err > 0) |
| 1805 | err = 0; |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1806 | return err; |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1807 | } |
| 1808 | |
| 1809 | static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1810 | struct tcf_block *block, struct Qdisc *q, |
| 1811 | u32 parent, struct nlmsghdr *n, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1812 | struct tcf_chain *chain, int event, |
| 1813 | bool rtnl_held) |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1814 | { |
| 1815 | struct tcf_proto *tp; |
| 1816 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1817 | for (tp = tcf_get_next_proto(chain, NULL, rtnl_held); |
| 1818 | tp; tp = tcf_get_next_proto(chain, tp, rtnl_held)) |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1819 | tfilter_notify(net, oskb, n, tp, block, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1820 | q, parent, NULL, event, false, rtnl_held); |
WANG Cong | 7120371 | 2017-08-07 15:26:50 -0700 | [diff] [blame] | 1821 | } |
| 1822 | |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 1823 | static void tfilter_put(struct tcf_proto *tp, void *fh) |
| 1824 | { |
| 1825 | if (tp->ops->put && fh) |
| 1826 | tp->ops->put(tp, fh); |
| 1827 | } |
| 1828 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1829 | static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, |
David Ahern | c21ef3e | 2017-04-16 09:48:24 -0700 | [diff] [blame] | 1830 | struct netlink_ext_ack *extack) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1831 | { |
YOSHIFUJI Hideaki | 3b1e0a6 | 2008-03-26 02:26:21 +0900 | [diff] [blame] | 1832 | struct net *net = sock_net(skb->sk); |
Patrick McHardy | add93b6 | 2008-01-22 22:11:33 -0800 | [diff] [blame] | 1833 | struct nlattr *tca[TCA_MAX + 1]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1834 | struct tcmsg *t; |
| 1835 | u32 protocol; |
| 1836 | u32 prio; |
Jiri Pirko | 9d36d9e | 2017-05-17 11:07:57 +0200 | [diff] [blame] | 1837 | bool prio_allocate; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1838 | u32 parent; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 1839 | u32 chain_index; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 1840 | struct Qdisc *q = NULL; |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1841 | struct tcf_chain_info chain_info; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 1842 | struct tcf_chain *chain = NULL; |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 1843 | struct tcf_block *block; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1844 | struct tcf_proto *tp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1845 | unsigned long cl; |
WANG Cong | 8113c09 | 2017-08-04 21:31:43 -0700 | [diff] [blame] | 1846 | void *fh; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1847 | int err; |
Daniel Borkmann | 628185c | 2016-12-21 18:04:11 +0100 | [diff] [blame] | 1848 | int tp_created; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 1849 | bool rtnl_held = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1850 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1851 | if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) |
Eric W. Biederman | dfc47ef | 2012-11-16 03:03:00 +0000 | [diff] [blame] | 1852 | return -EPERM; |
Hong zhi guo | de179c8 | 2013-03-25 17:36:33 +0000 | [diff] [blame] | 1853 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1854 | replay: |
Daniel Borkmann | 628185c | 2016-12-21 18:04:11 +0100 | [diff] [blame] | 1855 | tp_created = 0; |
| 1856 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 1857 | err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, |
| 1858 | rtm_tca_policy, extack); |
Hong zhi guo | de179c8 | 2013-03-25 17:36:33 +0000 | [diff] [blame] | 1859 | if (err < 0) |
| 1860 | return err; |
| 1861 | |
David S. Miller | 942b816 | 2012-06-26 21:48:50 -0700 | [diff] [blame] | 1862 | t = nlmsg_data(n); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1863 | protocol = TC_H_MIN(t->tcm_info); |
| 1864 | prio = TC_H_MAJ(t->tcm_info); |
Jiri Pirko | 9d36d9e | 2017-05-17 11:07:57 +0200 | [diff] [blame] | 1865 | prio_allocate = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1866 | parent = t->tcm_parent; |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 1867 | tp = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1868 | cl = 0; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 1869 | block = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1870 | |
| 1871 | if (prio == 0) { |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1872 | /* If no priority is provided by the user, |
| 1873 | * we allocate one. |
| 1874 | */ |
| 1875 | if (n->nlmsg_flags & NLM_F_CREATE) { |
| 1876 | prio = TC_H_MAKE(0x80000000U, 0U); |
| 1877 | prio_allocate = true; |
| 1878 | } else { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1879 | NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1880 | return -ENOENT; |
Daniel Borkmann | ea7f827 | 2016-06-10 23:10:22 +0200 | [diff] [blame] | 1881 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1882 | } |
| 1883 | |
| 1884 | /* Find head of filter chain. */ |
| 1885 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 1886 | err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); |
| 1887 | if (err) |
| 1888 | return err; |
| 1889 | |
| 1890 | /* Take rtnl mutex if rtnl_held was set to true on previous iteration, |
| 1891 | * block is shared (no qdisc found), qdisc is not unlocked, classifier |
| 1892 | * type is not specified, classifier is not unlocked. |
| 1893 | */ |
| 1894 | if (rtnl_held || |
| 1895 | (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || |
| 1896 | !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) { |
| 1897 | rtnl_held = true; |
| 1898 | rtnl_lock(); |
| 1899 | } |
| 1900 | |
| 1901 | err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); |
| 1902 | if (err) |
| 1903 | goto errout; |
| 1904 | |
| 1905 | block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, |
| 1906 | extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1907 | if (IS_ERR(block)) { |
| 1908 | err = PTR_ERR(block); |
| 1909 | goto errout; |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 1910 | } |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 1911 | |
| 1912 | chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; |
| 1913 | if (chain_index > TC_ACT_EXT_VAL_MASK) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1914 | NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 1915 | err = -EINVAL; |
| 1916 | goto errout; |
| 1917 | } |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1918 | chain = tcf_chain_get(block, chain_index, true); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 1919 | if (!chain) { |
Jiri Pirko | d5ed72a | 2018-08-27 20:58:43 +0200 | [diff] [blame] | 1920 | NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1921 | err = -ENOMEM; |
Daniel Borkmann | ea7f827 | 2016-06-10 23:10:22 +0200 | [diff] [blame] | 1922 | goto errout; |
| 1923 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1924 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1925 | mutex_lock(&chain->filter_chain_lock); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1926 | tp = tcf_chain_tp_find(chain, &chain_info, protocol, |
| 1927 | prio, prio_allocate); |
| 1928 | if (IS_ERR(tp)) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1929 | NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 1930 | err = PTR_ERR(tp); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1931 | goto errout_locked; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1932 | } |
| 1933 | |
| 1934 | if (tp == NULL) { |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1935 | struct tcf_proto *tp_new = NULL; |
| 1936 | |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1937 | if (chain->flushing) { |
| 1938 | err = -EAGAIN; |
| 1939 | goto errout_locked; |
| 1940 | } |
| 1941 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1942 | /* Proto-tcf does not exist, create new one */ |
| 1943 | |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 1944 | if (tca[TCA_KIND] == NULL || !protocol) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1945 | NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified"); |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 1946 | err = -EINVAL; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1947 | goto errout_locked; |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 1948 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1949 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1950 | if (!(n->nlmsg_flags & NLM_F_CREATE)) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1951 | NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 1952 | err = -ENOENT; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1953 | goto errout_locked; |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 1954 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1955 | |
Jiri Pirko | 9d36d9e | 2017-05-17 11:07:57 +0200 | [diff] [blame] | 1956 | if (prio_allocate) |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1957 | prio = tcf_auto_prio(tcf_chain_tp_prev(chain, |
| 1958 | &chain_info)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1959 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1960 | mutex_unlock(&chain->filter_chain_lock); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1961 | tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]), |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1962 | protocol, prio, chain, rtnl_held, |
| 1963 | extack); |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1964 | if (IS_ERR(tp_new)) { |
| 1965 | err = PTR_ERR(tp_new); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1966 | goto errout_tp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1967 | } |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1968 | |
Minoru Usui | 12186be | 2009-06-02 02:17:34 -0700 | [diff] [blame] | 1969 | tp_created = 1; |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 1970 | tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio, |
| 1971 | rtnl_held); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 1972 | if (IS_ERR(tp)) { |
| 1973 | err = PTR_ERR(tp); |
| 1974 | goto errout_tp; |
| 1975 | } |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1976 | } else { |
| 1977 | mutex_unlock(&chain->filter_chain_lock); |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 1978 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1979 | |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 1980 | if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { |
| 1981 | NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); |
| 1982 | err = -EINVAL; |
| 1983 | goto errout; |
| 1984 | } |
| 1985 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1986 | fh = tp->ops->get(tp, t->tcm_handle); |
| 1987 | |
WANG Cong | 8113c09 | 2017-08-04 21:31:43 -0700 | [diff] [blame] | 1988 | if (!fh) { |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1989 | if (!(n->nlmsg_flags & NLM_F_CREATE)) { |
Alexander Aring | c35a4ac | 2018-01-18 11:20:50 -0500 | [diff] [blame] | 1990 | NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 1991 | err = -ENOENT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1992 | goto errout; |
Jiri Pirko | 6bb16e7 | 2017-02-09 14:38:58 +0100 | [diff] [blame] | 1993 | } |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1994 | } else if (n->nlmsg_flags & NLM_F_EXCL) { |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 1995 | tfilter_put(tp, fh); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 1996 | NL_SET_ERR_MSG(extack, "Filter already exists"); |
| 1997 | err = -EEXIST; |
| 1998 | goto errout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1999 | } |
| 2000 | |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2001 | if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { |
| 2002 | NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); |
| 2003 | err = -EINVAL; |
| 2004 | goto errout; |
| 2005 | } |
| 2006 | |
Cong Wang | 2f7ef2f | 2014-04-25 13:54:06 -0700 | [diff] [blame] | 2007 | err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, |
Alexander Aring | 7306db3 | 2018-01-18 11:20:51 -0500 | [diff] [blame] | 2008 | n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2009 | rtnl_held, extack); |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 2010 | if (err == 0) { |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2011 | tfilter_notify(net, skb, n, tp, block, q, parent, fh, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2012 | RTM_NEWTFILTER, false, rtnl_held); |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 2013 | tfilter_put(tp, fh); |
Vlad Buslov | 503d81d | 2019-07-21 17:44:12 +0300 | [diff] [blame] | 2014 | /* q pointer is NULL for shared blocks */ |
| 2015 | if (q) |
| 2016 | q->flags &= ~TCQ_F_CAN_BYPASS; |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 2017 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2018 | |
| 2019 | errout: |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 2020 | if (err && tp_created) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2021 | tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL); |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 2022 | errout_tp: |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2023 | if (chain) { |
| 2024 | if (tp && !IS_ERR(tp)) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2025 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2026 | if (!tp_created) |
| 2027 | tcf_chain_put(chain); |
| 2028 | } |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2029 | tcf_block_release(q, block, rtnl_held); |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2030 | |
| 2031 | if (rtnl_held) |
| 2032 | rtnl_unlock(); |
| 2033 | |
| 2034 | if (err == -EAGAIN) { |
| 2035 | /* Take rtnl lock in case EAGAIN is caused by concurrent flush |
| 2036 | * of target chain. |
| 2037 | */ |
| 2038 | rtnl_held = true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2039 | /* Replay the request. */ |
| 2040 | goto replay; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2041 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2042 | return err; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2043 | |
| 2044 | errout_locked: |
| 2045 | mutex_unlock(&chain->filter_chain_lock); |
| 2046 | goto errout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2047 | } |
| 2048 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2049 | static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, |
| 2050 | struct netlink_ext_ack *extack) |
| 2051 | { |
| 2052 | struct net *net = sock_net(skb->sk); |
| 2053 | struct nlattr *tca[TCA_MAX + 1]; |
| 2054 | struct tcmsg *t; |
| 2055 | u32 protocol; |
| 2056 | u32 prio; |
| 2057 | u32 parent; |
| 2058 | u32 chain_index; |
| 2059 | struct Qdisc *q = NULL; |
| 2060 | struct tcf_chain_info chain_info; |
| 2061 | struct tcf_chain *chain = NULL; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2062 | struct tcf_block *block = NULL; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2063 | struct tcf_proto *tp = NULL; |
| 2064 | unsigned long cl = 0; |
| 2065 | void *fh = NULL; |
| 2066 | int err; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2067 | bool rtnl_held = false; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2068 | |
| 2069 | if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) |
| 2070 | return -EPERM; |
| 2071 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 2072 | err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, |
| 2073 | rtm_tca_policy, extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2074 | if (err < 0) |
| 2075 | return err; |
| 2076 | |
| 2077 | t = nlmsg_data(n); |
| 2078 | protocol = TC_H_MIN(t->tcm_info); |
| 2079 | prio = TC_H_MAJ(t->tcm_info); |
| 2080 | parent = t->tcm_parent; |
| 2081 | |
| 2082 | if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) { |
| 2083 | NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set"); |
| 2084 | return -ENOENT; |
| 2085 | } |
| 2086 | |
| 2087 | /* Find head of filter chain. */ |
| 2088 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2089 | err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); |
| 2090 | if (err) |
| 2091 | return err; |
| 2092 | |
| 2093 | /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc |
| 2094 | * found), qdisc is not unlocked, classifier type is not specified, |
| 2095 | * classifier is not unlocked. |
| 2096 | */ |
| 2097 | if (!prio || |
| 2098 | (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || |
| 2099 | !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) { |
| 2100 | rtnl_held = true; |
| 2101 | rtnl_lock(); |
| 2102 | } |
| 2103 | |
| 2104 | err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); |
| 2105 | if (err) |
| 2106 | goto errout; |
| 2107 | |
| 2108 | block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, |
| 2109 | extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2110 | if (IS_ERR(block)) { |
| 2111 | err = PTR_ERR(block); |
| 2112 | goto errout; |
| 2113 | } |
| 2114 | |
| 2115 | chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; |
| 2116 | if (chain_index > TC_ACT_EXT_VAL_MASK) { |
| 2117 | NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); |
| 2118 | err = -EINVAL; |
| 2119 | goto errout; |
| 2120 | } |
| 2121 | chain = tcf_chain_get(block, chain_index, false); |
| 2122 | if (!chain) { |
Jiri Pirko | 5ca8a25 | 2018-08-03 11:08:47 +0200 | [diff] [blame] | 2123 | /* User requested flush on non-existent chain. Nothing to do, |
| 2124 | * so just return success. |
| 2125 | */ |
| 2126 | if (prio == 0) { |
| 2127 | err = 0; |
| 2128 | goto errout; |
| 2129 | } |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2130 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); |
Jiri Pirko | b7b4247 | 2018-08-27 20:58:44 +0200 | [diff] [blame] | 2131 | err = -ENOENT; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2132 | goto errout; |
| 2133 | } |
| 2134 | |
| 2135 | if (prio == 0) { |
| 2136 | tfilter_notify_chain(net, skb, block, q, parent, n, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2137 | chain, RTM_DELTFILTER, rtnl_held); |
| 2138 | tcf_chain_flush(chain, rtnl_held); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2139 | err = 0; |
| 2140 | goto errout; |
| 2141 | } |
| 2142 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2143 | mutex_lock(&chain->filter_chain_lock); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2144 | tp = tcf_chain_tp_find(chain, &chain_info, protocol, |
| 2145 | prio, false); |
| 2146 | if (!tp || IS_ERR(tp)) { |
| 2147 | NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); |
Vlad Buslov | 0e39903 | 2018-06-04 18:32:23 +0300 | [diff] [blame] | 2148 | err = tp ? PTR_ERR(tp) : -ENOENT; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2149 | goto errout_locked; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2150 | } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { |
| 2151 | NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); |
| 2152 | err = -EINVAL; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2153 | goto errout_locked; |
| 2154 | } else if (t->tcm_handle == 0) { |
| 2155 | tcf_chain_tp_remove(chain, &chain_info, tp); |
| 2156 | mutex_unlock(&chain->filter_chain_lock); |
| 2157 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2158 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2159 | tfilter_notify(net, skb, n, tp, block, q, parent, fh, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2160 | RTM_DELTFILTER, false, rtnl_held); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2161 | err = 0; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2162 | goto errout; |
| 2163 | } |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2164 | mutex_unlock(&chain->filter_chain_lock); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2165 | |
| 2166 | fh = tp->ops->get(tp, t->tcm_handle); |
| 2167 | |
| 2168 | if (!fh) { |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2169 | NL_SET_ERR_MSG(extack, "Specified filter handle not found"); |
| 2170 | err = -ENOENT; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2171 | } else { |
| 2172 | bool last; |
| 2173 | |
| 2174 | err = tfilter_del_notify(net, skb, n, tp, block, |
| 2175 | q, parent, fh, false, &last, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2176 | rtnl_held, extack); |
| 2177 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2178 | if (err) |
| 2179 | goto errout; |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 2180 | if (last) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2181 | tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2182 | } |
| 2183 | |
| 2184 | errout: |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2185 | if (chain) { |
| 2186 | if (tp && !IS_ERR(tp)) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2187 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2188 | tcf_chain_put(chain); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2189 | } |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2190 | tcf_block_release(q, block, rtnl_held); |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2191 | |
| 2192 | if (rtnl_held) |
| 2193 | rtnl_unlock(); |
| 2194 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2195 | return err; |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2196 | |
| 2197 | errout_locked: |
| 2198 | mutex_unlock(&chain->filter_chain_lock); |
| 2199 | goto errout; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2200 | } |
| 2201 | |
| 2202 | static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, |
| 2203 | struct netlink_ext_ack *extack) |
| 2204 | { |
| 2205 | struct net *net = sock_net(skb->sk); |
| 2206 | struct nlattr *tca[TCA_MAX + 1]; |
| 2207 | struct tcmsg *t; |
| 2208 | u32 protocol; |
| 2209 | u32 prio; |
| 2210 | u32 parent; |
| 2211 | u32 chain_index; |
| 2212 | struct Qdisc *q = NULL; |
| 2213 | struct tcf_chain_info chain_info; |
| 2214 | struct tcf_chain *chain = NULL; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2215 | struct tcf_block *block = NULL; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2216 | struct tcf_proto *tp = NULL; |
| 2217 | unsigned long cl = 0; |
| 2218 | void *fh = NULL; |
| 2219 | int err; |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2220 | bool rtnl_held = false; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2221 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 2222 | err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, |
| 2223 | rtm_tca_policy, extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2224 | if (err < 0) |
| 2225 | return err; |
| 2226 | |
| 2227 | t = nlmsg_data(n); |
| 2228 | protocol = TC_H_MIN(t->tcm_info); |
| 2229 | prio = TC_H_MAJ(t->tcm_info); |
| 2230 | parent = t->tcm_parent; |
| 2231 | |
| 2232 | if (prio == 0) { |
| 2233 | NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); |
| 2234 | return -ENOENT; |
| 2235 | } |
| 2236 | |
| 2237 | /* Find head of filter chain. */ |
| 2238 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2239 | err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); |
| 2240 | if (err) |
| 2241 | return err; |
| 2242 | |
| 2243 | /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not |
| 2244 | * unlocked, classifier type is not specified, classifier is not |
| 2245 | * unlocked. |
| 2246 | */ |
| 2247 | if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || |
| 2248 | !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) { |
| 2249 | rtnl_held = true; |
| 2250 | rtnl_lock(); |
| 2251 | } |
| 2252 | |
| 2253 | err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); |
| 2254 | if (err) |
| 2255 | goto errout; |
| 2256 | |
| 2257 | block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, |
| 2258 | extack); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2259 | if (IS_ERR(block)) { |
| 2260 | err = PTR_ERR(block); |
| 2261 | goto errout; |
| 2262 | } |
| 2263 | |
| 2264 | chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; |
| 2265 | if (chain_index > TC_ACT_EXT_VAL_MASK) { |
| 2266 | NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); |
| 2267 | err = -EINVAL; |
| 2268 | goto errout; |
| 2269 | } |
| 2270 | chain = tcf_chain_get(block, chain_index, false); |
| 2271 | if (!chain) { |
| 2272 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); |
| 2273 | err = -EINVAL; |
| 2274 | goto errout; |
| 2275 | } |
| 2276 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2277 | mutex_lock(&chain->filter_chain_lock); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2278 | tp = tcf_chain_tp_find(chain, &chain_info, protocol, |
| 2279 | prio, false); |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 2280 | mutex_unlock(&chain->filter_chain_lock); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2281 | if (!tp || IS_ERR(tp)) { |
| 2282 | NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); |
Vlad Buslov | 0e39903 | 2018-06-04 18:32:23 +0300 | [diff] [blame] | 2283 | err = tp ? PTR_ERR(tp) : -ENOENT; |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2284 | goto errout; |
| 2285 | } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { |
| 2286 | NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); |
| 2287 | err = -EINVAL; |
| 2288 | goto errout; |
| 2289 | } |
| 2290 | |
| 2291 | fh = tp->ops->get(tp, t->tcm_handle); |
| 2292 | |
| 2293 | if (!fh) { |
| 2294 | NL_SET_ERR_MSG(extack, "Specified filter handle not found"); |
| 2295 | err = -ENOENT; |
| 2296 | } else { |
| 2297 | err = tfilter_notify(net, skb, n, tp, block, q, parent, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2298 | fh, RTM_NEWTFILTER, true, rtnl_held); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2299 | if (err < 0) |
| 2300 | NL_SET_ERR_MSG(extack, "Failed to send filter notify message"); |
| 2301 | } |
| 2302 | |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 2303 | tfilter_put(tp, fh); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2304 | errout: |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2305 | if (chain) { |
| 2306 | if (tp && !IS_ERR(tp)) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2307 | tcf_proto_put(tp, rtnl_held, NULL); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2308 | tcf_chain_put(chain); |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 2309 | } |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2310 | tcf_block_release(q, block, rtnl_held); |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 2311 | |
| 2312 | if (rtnl_held) |
| 2313 | rtnl_unlock(); |
| 2314 | |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 2315 | return err; |
| 2316 | } |
| 2317 | |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 2318 | struct tcf_dump_args { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2319 | struct tcf_walker w; |
| 2320 | struct sk_buff *skb; |
| 2321 | struct netlink_callback *cb; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2322 | struct tcf_block *block; |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2323 | struct Qdisc *q; |
| 2324 | u32 parent; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2325 | }; |
| 2326 | |
WANG Cong | 8113c09 | 2017-08-04 21:31:43 -0700 | [diff] [blame] | 2327 | static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2328 | { |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 2329 | struct tcf_dump_args *a = (void *)arg; |
WANG Cong | 832d1d5 | 2014-01-09 16:14:01 -0800 | [diff] [blame] | 2330 | struct net *net = sock_net(a->skb->sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2331 | |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2332 | return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2333 | n, NETLINK_CB(a->cb->skb).portid, |
Jamal Hadi Salim | 5a7a555 | 2016-09-18 08:45:33 -0400 | [diff] [blame] | 2334 | a->cb->nlh->nlmsg_seq, NLM_F_MULTI, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2335 | RTM_NEWTFILTER, true); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2336 | } |
| 2337 | |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2338 | static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, |
| 2339 | struct sk_buff *skb, struct netlink_callback *cb, |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2340 | long index_start, long *p_index) |
| 2341 | { |
| 2342 | struct net *net = sock_net(skb->sk); |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2343 | struct tcf_block *block = chain->block; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2344 | struct tcmsg *tcm = nlmsg_data(cb->nlh); |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2345 | struct tcf_proto *tp, *tp_prev; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2346 | struct tcf_dump_args arg; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2347 | |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2348 | for (tp = __tcf_get_next_proto(chain, NULL); |
| 2349 | tp; |
| 2350 | tp_prev = tp, |
| 2351 | tp = __tcf_get_next_proto(chain, tp), |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2352 | tcf_proto_put(tp_prev, true, NULL), |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2353 | (*p_index)++) { |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2354 | if (*p_index < index_start) |
| 2355 | continue; |
| 2356 | if (TC_H_MAJ(tcm->tcm_info) && |
| 2357 | TC_H_MAJ(tcm->tcm_info) != tp->prio) |
| 2358 | continue; |
| 2359 | if (TC_H_MIN(tcm->tcm_info) && |
| 2360 | TC_H_MIN(tcm->tcm_info) != tp->protocol) |
| 2361 | continue; |
| 2362 | if (*p_index > index_start) |
| 2363 | memset(&cb->args[1], 0, |
| 2364 | sizeof(cb->args) - sizeof(cb->args[0])); |
| 2365 | if (cb->args[1] == 0) { |
YueHaibing | 5318918 | 2018-07-17 20:58:14 +0800 | [diff] [blame] | 2366 | if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2367 | NETLINK_CB(cb->skb).portid, |
| 2368 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2369 | RTM_NEWTFILTER, true) <= 0) |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2370 | goto errout; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2371 | cb->args[1] = 1; |
| 2372 | } |
| 2373 | if (!tp->ops->walk) |
| 2374 | continue; |
| 2375 | arg.w.fn = tcf_node_dump; |
| 2376 | arg.skb = skb; |
| 2377 | arg.cb = cb; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2378 | arg.block = block; |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2379 | arg.q = q; |
| 2380 | arg.parent = parent; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2381 | arg.w.stop = 0; |
| 2382 | arg.w.skip = cb->args[1] - 1; |
| 2383 | arg.w.count = 0; |
Vlad Buslov | 01683a1 | 2018-07-09 13:29:11 +0300 | [diff] [blame] | 2384 | arg.w.cookie = cb->args[2]; |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2385 | tp->ops->walk(tp, &arg.w, true); |
Vlad Buslov | 01683a1 | 2018-07-09 13:29:11 +0300 | [diff] [blame] | 2386 | cb->args[2] = arg.w.cookie; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2387 | cb->args[1] = arg.w.count + 1; |
| 2388 | if (arg.w.stop) |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2389 | goto errout; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2390 | } |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2391 | return true; |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2392 | |
| 2393 | errout: |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2394 | tcf_proto_put(tp, true, NULL); |
Vlad Buslov | fe2923a | 2019-02-11 10:55:40 +0200 | [diff] [blame] | 2395 | return false; |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2396 | } |
| 2397 | |
Eric Dumazet | bd27a87 | 2009-11-05 20:57:26 -0800 | [diff] [blame] | 2398 | /* called with RTNL */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2399 | static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) |
| 2400 | { |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 2401 | struct tcf_chain *chain, *chain_prev; |
YOSHIFUJI Hideaki | 3b1e0a6 | 2008-03-26 02:26:21 +0900 | [diff] [blame] | 2402 | struct net *net = sock_net(skb->sk); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2403 | struct nlattr *tca[TCA_MAX + 1]; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2404 | struct Qdisc *q = NULL; |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 2405 | struct tcf_block *block; |
David S. Miller | 942b816 | 2012-06-26 21:48:50 -0700 | [diff] [blame] | 2406 | struct tcmsg *tcm = nlmsg_data(cb->nlh); |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2407 | long index_start; |
| 2408 | long index; |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2409 | u32 parent; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2410 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2411 | |
Hong zhi guo | 573ce26 | 2013-03-27 06:47:04 +0000 | [diff] [blame] | 2412 | if (nlmsg_len(cb->nlh) < sizeof(*tcm)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2413 | return skb->len; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2414 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 2415 | err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, |
| 2416 | NULL, cb->extack); |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2417 | if (err) |
| 2418 | return err; |
| 2419 | |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2420 | if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 2421 | block = tcf_block_refcnt_get(net, tcm->tcm_block_index); |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2422 | if (!block) |
WANG Cong | 143976c | 2017-08-24 16:51:29 -0700 | [diff] [blame] | 2423 | goto out; |
Jiri Pirko | d680b35 | 2018-01-18 16:14:49 +0100 | [diff] [blame] | 2424 | /* If we work with block index, q is NULL and parent value |
| 2425 | * will never be used in the following code. The check |
| 2426 | * in tcf_fill_node prevents it. However, compiler does not |
| 2427 | * see that far, so set parent to zero to silence the warning |
| 2428 | * about parent being uninitialized. |
| 2429 | */ |
| 2430 | parent = 0; |
Jiri Pirko | 7960d1d | 2018-01-17 11:46:51 +0100 | [diff] [blame] | 2431 | } else { |
| 2432 | const struct Qdisc_class_ops *cops; |
| 2433 | struct net_device *dev; |
| 2434 | unsigned long cl = 0; |
| 2435 | |
| 2436 | dev = __dev_get_by_index(net, tcm->tcm_ifindex); |
| 2437 | if (!dev) |
| 2438 | return skb->len; |
| 2439 | |
| 2440 | parent = tcm->tcm_parent; |
| 2441 | if (!parent) { |
| 2442 | q = dev->qdisc; |
| 2443 | parent = q->handle; |
| 2444 | } else { |
| 2445 | q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); |
| 2446 | } |
| 2447 | if (!q) |
| 2448 | goto out; |
| 2449 | cops = q->ops->cl_ops; |
| 2450 | if (!cops) |
| 2451 | goto out; |
| 2452 | if (!cops->tcf_block) |
| 2453 | goto out; |
| 2454 | if (TC_H_MIN(tcm->tcm_parent)) { |
| 2455 | cl = cops->find(q, tcm->tcm_parent); |
| 2456 | if (cl == 0) |
| 2457 | goto out; |
| 2458 | } |
| 2459 | block = cops->tcf_block(q, cl, NULL); |
| 2460 | if (!block) |
| 2461 | goto out; |
| 2462 | if (tcf_block_shared(block)) |
| 2463 | q = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2464 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2465 | |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2466 | index_start = cb->args[0]; |
| 2467 | index = 0; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2468 | |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 2469 | for (chain = __tcf_get_next_chain(block, NULL); |
| 2470 | chain; |
| 2471 | chain_prev = chain, |
| 2472 | chain = __tcf_get_next_chain(block, chain), |
| 2473 | tcf_chain_put(chain_prev)) { |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2474 | if (tca[TCA_CHAIN] && |
| 2475 | nla_get_u32(tca[TCA_CHAIN]) != chain->index) |
| 2476 | continue; |
Jiri Pirko | a10fa20 | 2017-10-13 14:01:05 +0200 | [diff] [blame] | 2477 | if (!tcf_chain_dump(chain, q, parent, skb, cb, |
Roman Kapl | 5ae437a | 2018-02-19 21:32:51 +0100 | [diff] [blame] | 2478 | index_start, &index)) { |
Vlad Buslov | bbf7383 | 2019-02-11 10:55:36 +0200 | [diff] [blame] | 2479 | tcf_chain_put(chain); |
Roman Kapl | 5ae437a | 2018-02-19 21:32:51 +0100 | [diff] [blame] | 2480 | err = -EMSGSIZE; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2481 | break; |
Roman Kapl | 5ae437a | 2018-02-19 21:32:51 +0100 | [diff] [blame] | 2482 | } |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 2483 | } |
| 2484 | |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 2485 | if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2486 | tcf_block_refcnt_put(block, true); |
Jiri Pirko | acb31fa | 2017-05-17 11:08:00 +0200 | [diff] [blame] | 2487 | cb->args[0] = index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2488 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2489 | out: |
Roman Kapl | 5ae437a | 2018-02-19 21:32:51 +0100 | [diff] [blame] | 2490 | /* If we did no progress, the error (EMSGSIZE) is real */ |
| 2491 | if (skb->len == 0 && err) |
| 2492 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2493 | return skb->len; |
| 2494 | } |
| 2495 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2496 | static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, |
| 2497 | void *tmplt_priv, u32 chain_index, |
| 2498 | struct net *net, struct sk_buff *skb, |
| 2499 | struct tcf_block *block, |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2500 | u32 portid, u32 seq, u16 flags, int event) |
| 2501 | { |
| 2502 | unsigned char *b = skb_tail_pointer(skb); |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2503 | const struct tcf_proto_ops *ops; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2504 | struct nlmsghdr *nlh; |
| 2505 | struct tcmsg *tcm; |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2506 | void *priv; |
| 2507 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2508 | ops = tmplt_ops; |
| 2509 | priv = tmplt_priv; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2510 | |
| 2511 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); |
| 2512 | if (!nlh) |
| 2513 | goto out_nlmsg_trim; |
| 2514 | tcm = nlmsg_data(nlh); |
| 2515 | tcm->tcm_family = AF_UNSPEC; |
| 2516 | tcm->tcm__pad1 = 0; |
| 2517 | tcm->tcm__pad2 = 0; |
| 2518 | tcm->tcm_handle = 0; |
| 2519 | if (block->q) { |
| 2520 | tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; |
| 2521 | tcm->tcm_parent = block->q->handle; |
| 2522 | } else { |
| 2523 | tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; |
| 2524 | tcm->tcm_block_index = block->index; |
| 2525 | } |
| 2526 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2527 | if (nla_put_u32(skb, TCA_CHAIN, chain_index)) |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2528 | goto nla_put_failure; |
| 2529 | |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2530 | if (ops) { |
| 2531 | if (nla_put_string(skb, TCA_KIND, ops->kind)) |
| 2532 | goto nla_put_failure; |
| 2533 | if (ops->tmplt_dump(skb, net, priv) < 0) |
| 2534 | goto nla_put_failure; |
| 2535 | } |
| 2536 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2537 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; |
| 2538 | return skb->len; |
| 2539 | |
| 2540 | out_nlmsg_trim: |
| 2541 | nla_put_failure: |
| 2542 | nlmsg_trim(skb, b); |
| 2543 | return -EMSGSIZE; |
| 2544 | } |
| 2545 | |
| 2546 | static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, |
| 2547 | u32 seq, u16 flags, int event, bool unicast) |
| 2548 | { |
| 2549 | u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; |
| 2550 | struct tcf_block *block = chain->block; |
| 2551 | struct net *net = block->net; |
| 2552 | struct sk_buff *skb; |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 2553 | int err = 0; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2554 | |
| 2555 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
| 2556 | if (!skb) |
| 2557 | return -ENOBUFS; |
| 2558 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2559 | if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, |
| 2560 | chain->index, net, skb, block, portid, |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2561 | seq, flags, event) <= 0) { |
| 2562 | kfree_skb(skb); |
| 2563 | return -EINVAL; |
| 2564 | } |
| 2565 | |
| 2566 | if (unicast) |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 2567 | err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); |
| 2568 | else |
| 2569 | err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, |
| 2570 | flags & NLM_F_ECHO); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2571 | |
Zhike Wang | 5b5f99b | 2019-03-11 03:15:54 -0700 | [diff] [blame] | 2572 | if (err > 0) |
| 2573 | err = 0; |
| 2574 | return err; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2575 | } |
| 2576 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2577 | static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, |
| 2578 | void *tmplt_priv, u32 chain_index, |
| 2579 | struct tcf_block *block, struct sk_buff *oskb, |
| 2580 | u32 seq, u16 flags, bool unicast) |
| 2581 | { |
| 2582 | u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; |
| 2583 | struct net *net = block->net; |
| 2584 | struct sk_buff *skb; |
| 2585 | |
| 2586 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
| 2587 | if (!skb) |
| 2588 | return -ENOBUFS; |
| 2589 | |
| 2590 | if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb, |
| 2591 | block, portid, seq, flags, RTM_DELCHAIN) <= 0) { |
| 2592 | kfree_skb(skb); |
| 2593 | return -EINVAL; |
| 2594 | } |
| 2595 | |
| 2596 | if (unicast) |
| 2597 | return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); |
| 2598 | |
| 2599 | return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); |
| 2600 | } |
| 2601 | |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2602 | static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, |
| 2603 | struct nlattr **tca, |
| 2604 | struct netlink_ext_ack *extack) |
| 2605 | { |
| 2606 | const struct tcf_proto_ops *ops; |
| 2607 | void *tmplt_priv; |
| 2608 | |
| 2609 | /* If kind is not set, user did not specify template. */ |
| 2610 | if (!tca[TCA_KIND]) |
| 2611 | return 0; |
| 2612 | |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2613 | ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), true, extack); |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2614 | if (IS_ERR(ops)) |
| 2615 | return PTR_ERR(ops); |
| 2616 | if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { |
| 2617 | NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); |
| 2618 | return -EOPNOTSUPP; |
| 2619 | } |
| 2620 | |
| 2621 | tmplt_priv = ops->tmplt_create(net, chain, tca, extack); |
| 2622 | if (IS_ERR(tmplt_priv)) { |
| 2623 | module_put(ops->owner); |
| 2624 | return PTR_ERR(tmplt_priv); |
| 2625 | } |
| 2626 | chain->tmplt_ops = ops; |
| 2627 | chain->tmplt_priv = tmplt_priv; |
| 2628 | return 0; |
| 2629 | } |
| 2630 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2631 | static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, |
| 2632 | void *tmplt_priv) |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2633 | { |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2634 | /* If template ops are set, no work to do for us. */ |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2635 | if (!tmplt_ops) |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2636 | return; |
| 2637 | |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2638 | tmplt_ops->tmplt_destroy(tmplt_priv); |
| 2639 | module_put(tmplt_ops->owner); |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 2640 | } |
| 2641 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2642 | /* Add/delete/get a chain */ |
| 2643 | |
| 2644 | static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, |
| 2645 | struct netlink_ext_ack *extack) |
| 2646 | { |
| 2647 | struct net *net = sock_net(skb->sk); |
| 2648 | struct nlattr *tca[TCA_MAX + 1]; |
| 2649 | struct tcmsg *t; |
| 2650 | u32 parent; |
| 2651 | u32 chain_index; |
| 2652 | struct Qdisc *q = NULL; |
| 2653 | struct tcf_chain *chain = NULL; |
| 2654 | struct tcf_block *block; |
| 2655 | unsigned long cl; |
| 2656 | int err; |
| 2657 | |
| 2658 | if (n->nlmsg_type != RTM_GETCHAIN && |
| 2659 | !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) |
| 2660 | return -EPERM; |
| 2661 | |
| 2662 | replay: |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 2663 | err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, |
| 2664 | rtm_tca_policy, extack); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2665 | if (err < 0) |
| 2666 | return err; |
| 2667 | |
| 2668 | t = nlmsg_data(n); |
| 2669 | parent = t->tcm_parent; |
| 2670 | cl = 0; |
| 2671 | |
| 2672 | block = tcf_block_find(net, &q, &parent, &cl, |
| 2673 | t->tcm_ifindex, t->tcm_block_index, extack); |
| 2674 | if (IS_ERR(block)) |
| 2675 | return PTR_ERR(block); |
| 2676 | |
| 2677 | chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; |
| 2678 | if (chain_index > TC_ACT_EXT_VAL_MASK) { |
| 2679 | NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2680 | err = -EINVAL; |
| 2681 | goto errout_block; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2682 | } |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2683 | |
| 2684 | mutex_lock(&block->lock); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2685 | chain = tcf_chain_lookup(block, chain_index); |
| 2686 | if (n->nlmsg_type == RTM_NEWCHAIN) { |
| 2687 | if (chain) { |
Jiri Pirko | 3d32f4c | 2018-08-01 12:36:55 +0200 | [diff] [blame] | 2688 | if (tcf_chain_held_by_acts_only(chain)) { |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 2689 | /* The chain exists only because there is |
Jiri Pirko | 3d32f4c | 2018-08-01 12:36:55 +0200 | [diff] [blame] | 2690 | * some action referencing it. |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 2691 | */ |
| 2692 | tcf_chain_hold(chain); |
| 2693 | } else { |
| 2694 | NL_SET_ERR_MSG(extack, "Filter chain already exists"); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2695 | err = -EEXIST; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2696 | goto errout_block_locked; |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 2697 | } |
| 2698 | } else { |
| 2699 | if (!(n->nlmsg_flags & NLM_F_CREATE)) { |
| 2700 | NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2701 | err = -ENOENT; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2702 | goto errout_block_locked; |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 2703 | } |
| 2704 | chain = tcf_chain_create(block, chain_index); |
| 2705 | if (!chain) { |
| 2706 | NL_SET_ERR_MSG(extack, "Failed to create filter chain"); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2707 | err = -ENOMEM; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2708 | goto errout_block_locked; |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 2709 | } |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2710 | } |
| 2711 | } else { |
Jiri Pirko | 3d32f4c | 2018-08-01 12:36:55 +0200 | [diff] [blame] | 2712 | if (!chain || tcf_chain_held_by_acts_only(chain)) { |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2713 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2714 | err = -EINVAL; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2715 | goto errout_block_locked; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2716 | } |
| 2717 | tcf_chain_hold(chain); |
| 2718 | } |
| 2719 | |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2720 | if (n->nlmsg_type == RTM_NEWCHAIN) { |
| 2721 | /* Modifying chain requires holding parent block lock. In case |
| 2722 | * the chain was successfully added, take a reference to the |
| 2723 | * chain. This ensures that an empty chain does not disappear at |
| 2724 | * the end of this function. |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2725 | */ |
| 2726 | tcf_chain_hold(chain); |
| 2727 | chain->explicitly_created = true; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2728 | } |
| 2729 | mutex_unlock(&block->lock); |
| 2730 | |
| 2731 | switch (n->nlmsg_type) { |
| 2732 | case RTM_NEWCHAIN: |
| 2733 | err = tc_chain_tmplt_add(chain, net, tca, extack); |
| 2734 | if (err) { |
| 2735 | tcf_chain_put_explicitly_created(chain); |
| 2736 | goto errout; |
| 2737 | } |
| 2738 | |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2739 | tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, |
| 2740 | RTM_NEWCHAIN, false); |
| 2741 | break; |
| 2742 | case RTM_DELCHAIN: |
Cong Wang | f5b9bac | 2018-09-11 14:22:23 -0700 | [diff] [blame] | 2743 | tfilter_notify_chain(net, skb, block, q, parent, n, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2744 | chain, RTM_DELTFILTER, true); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2745 | /* Flush the chain first as the user requested chain removal. */ |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2746 | tcf_chain_flush(chain, true); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2747 | /* In case the chain was successfully deleted, put a reference |
| 2748 | * to the chain previously taken during addition. |
| 2749 | */ |
| 2750 | tcf_chain_put_explicitly_created(chain); |
| 2751 | break; |
| 2752 | case RTM_GETCHAIN: |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2753 | err = tc_chain_notify(chain, skb, n->nlmsg_seq, |
| 2754 | n->nlmsg_seq, n->nlmsg_type, true); |
| 2755 | if (err < 0) |
| 2756 | NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); |
| 2757 | break; |
| 2758 | default: |
| 2759 | err = -EOPNOTSUPP; |
| 2760 | NL_SET_ERR_MSG(extack, "Unsupported message type"); |
| 2761 | goto errout; |
| 2762 | } |
| 2763 | |
| 2764 | errout: |
| 2765 | tcf_chain_put(chain); |
Vlad Buslov | e368fdb | 2018-09-24 19:22:53 +0300 | [diff] [blame] | 2766 | errout_block: |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2767 | tcf_block_release(q, block, true); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2768 | if (err == -EAGAIN) |
| 2769 | /* Replay the request. */ |
| 2770 | goto replay; |
| 2771 | return err; |
Vlad Buslov | 2cbfab0 | 2019-02-11 10:55:34 +0200 | [diff] [blame] | 2772 | |
| 2773 | errout_block_locked: |
| 2774 | mutex_unlock(&block->lock); |
| 2775 | goto errout_block; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2776 | } |
| 2777 | |
| 2778 | /* called with RTNL */ |
| 2779 | static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) |
| 2780 | { |
| 2781 | struct net *net = sock_net(skb->sk); |
| 2782 | struct nlattr *tca[TCA_MAX + 1]; |
| 2783 | struct Qdisc *q = NULL; |
| 2784 | struct tcf_block *block; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2785 | struct tcmsg *tcm = nlmsg_data(cb->nlh); |
Vlad Buslov | ace4a26 | 2019-02-25 17:45:44 +0200 | [diff] [blame] | 2786 | struct tcf_chain *chain; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2787 | long index_start; |
| 2788 | long index; |
| 2789 | u32 parent; |
| 2790 | int err; |
| 2791 | |
| 2792 | if (nlmsg_len(cb->nlh) < sizeof(*tcm)) |
| 2793 | return skb->len; |
| 2794 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 2795 | err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, |
| 2796 | rtm_tca_policy, cb->extack); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2797 | if (err) |
| 2798 | return err; |
| 2799 | |
| 2800 | if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 2801 | block = tcf_block_refcnt_get(net, tcm->tcm_block_index); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2802 | if (!block) |
| 2803 | goto out; |
| 2804 | /* If we work with block index, q is NULL and parent value |
| 2805 | * will never be used in the following code. The check |
| 2806 | * in tcf_fill_node prevents it. However, compiler does not |
| 2807 | * see that far, so set parent to zero to silence the warning |
| 2808 | * about parent being uninitialized. |
| 2809 | */ |
| 2810 | parent = 0; |
| 2811 | } else { |
| 2812 | const struct Qdisc_class_ops *cops; |
| 2813 | struct net_device *dev; |
| 2814 | unsigned long cl = 0; |
| 2815 | |
| 2816 | dev = __dev_get_by_index(net, tcm->tcm_ifindex); |
| 2817 | if (!dev) |
| 2818 | return skb->len; |
| 2819 | |
| 2820 | parent = tcm->tcm_parent; |
| 2821 | if (!parent) { |
| 2822 | q = dev->qdisc; |
| 2823 | parent = q->handle; |
| 2824 | } else { |
| 2825 | q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); |
| 2826 | } |
| 2827 | if (!q) |
| 2828 | goto out; |
| 2829 | cops = q->ops->cl_ops; |
| 2830 | if (!cops) |
| 2831 | goto out; |
| 2832 | if (!cops->tcf_block) |
| 2833 | goto out; |
| 2834 | if (TC_H_MIN(tcm->tcm_parent)) { |
| 2835 | cl = cops->find(q, tcm->tcm_parent); |
| 2836 | if (cl == 0) |
| 2837 | goto out; |
| 2838 | } |
| 2839 | block = cops->tcf_block(q, cl, NULL); |
| 2840 | if (!block) |
| 2841 | goto out; |
| 2842 | if (tcf_block_shared(block)) |
| 2843 | q = NULL; |
| 2844 | } |
| 2845 | |
| 2846 | index_start = cb->args[0]; |
| 2847 | index = 0; |
| 2848 | |
Vlad Buslov | ace4a26 | 2019-02-25 17:45:44 +0200 | [diff] [blame] | 2849 | mutex_lock(&block->lock); |
| 2850 | list_for_each_entry(chain, &block->chain_list, list) { |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2851 | if ((tca[TCA_CHAIN] && |
| 2852 | nla_get_u32(tca[TCA_CHAIN]) != chain->index)) |
| 2853 | continue; |
| 2854 | if (index < index_start) { |
| 2855 | index++; |
| 2856 | continue; |
| 2857 | } |
Vlad Buslov | ace4a26 | 2019-02-25 17:45:44 +0200 | [diff] [blame] | 2858 | if (tcf_chain_held_by_acts_only(chain)) |
| 2859 | continue; |
Vlad Buslov | a565482 | 2019-02-11 10:55:37 +0200 | [diff] [blame] | 2860 | err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, |
| 2861 | chain->index, net, skb, block, |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2862 | NETLINK_CB(cb->skb).portid, |
| 2863 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
| 2864 | RTM_NEWCHAIN); |
Vlad Buslov | ace4a26 | 2019-02-25 17:45:44 +0200 | [diff] [blame] | 2865 | if (err <= 0) |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2866 | break; |
| 2867 | index++; |
| 2868 | } |
Vlad Buslov | ace4a26 | 2019-02-25 17:45:44 +0200 | [diff] [blame] | 2869 | mutex_unlock(&block->lock); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2870 | |
Vlad Buslov | 787ce6d | 2018-09-24 19:22:58 +0300 | [diff] [blame] | 2871 | if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 2872 | tcf_block_refcnt_put(block, true); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 2873 | cb->args[0] = index; |
| 2874 | |
| 2875 | out: |
| 2876 | /* If we did no progress, the error (EMSGSIZE) is real */ |
| 2877 | if (skb->len == 0 && err) |
| 2878 | return err; |
| 2879 | return skb->len; |
| 2880 | } |
| 2881 | |
WANG Cong | 18d0264 | 2014-09-25 10:26:37 -0700 | [diff] [blame] | 2882 | void tcf_exts_destroy(struct tcf_exts *exts) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2883 | { |
| 2884 | #ifdef CONFIG_NET_CLS_ACT |
Vlad Buslov | 90b73b7 | 2018-07-05 17:24:33 +0300 | [diff] [blame] | 2885 | tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 2886 | kfree(exts->actions); |
| 2887 | exts->nr_actions = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2888 | #endif |
| 2889 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 2890 | EXPORT_SYMBOL(tcf_exts_destroy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2891 | |
Benjamin LaHaise | c1b5273 | 2013-01-14 05:15:39 +0000 | [diff] [blame] | 2892 | int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, |
Alexander Aring | 50a5619 | 2018-01-18 11:20:52 -0500 | [diff] [blame] | 2893 | struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr, |
Vlad Buslov | ec6743a | 2019-02-11 10:55:43 +0200 | [diff] [blame] | 2894 | bool rtnl_held, struct netlink_ext_ack *extack) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2895 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2896 | #ifdef CONFIG_NET_CLS_ACT |
| 2897 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2898 | struct tc_action *act; |
Roman Mashak | d04e699 | 2018-03-08 16:59:17 -0500 | [diff] [blame] | 2899 | size_t attr_size = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2900 | |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 2901 | if (exts->police && tb[exts->police]) { |
Jiri Pirko | 9fb9f25 | 2017-05-17 11:08:02 +0200 | [diff] [blame] | 2902 | act = tcf_action_init_1(net, tp, tb[exts->police], |
| 2903 | rate_tlv, "police", ovr, |
Vlad Buslov | ec6743a | 2019-02-11 10:55:43 +0200 | [diff] [blame] | 2904 | TCA_ACT_BIND, rtnl_held, |
| 2905 | extack); |
Patrick McHardy | ab27cfb | 2008-01-23 20:33:13 -0800 | [diff] [blame] | 2906 | if (IS_ERR(act)) |
| 2907 | return PTR_ERR(act); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2908 | |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 2909 | act->type = exts->type = TCA_OLD_COMPAT; |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 2910 | exts->actions[0] = act; |
| 2911 | exts->nr_actions = 1; |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 2912 | } else if (exts->action && tb[exts->action]) { |
Vlad Buslov | 90b73b7 | 2018-07-05 17:24:33 +0300 | [diff] [blame] | 2913 | int err; |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 2914 | |
Jiri Pirko | 9fb9f25 | 2017-05-17 11:08:02 +0200 | [diff] [blame] | 2915 | err = tcf_action_init(net, tp, tb[exts->action], |
| 2916 | rate_tlv, NULL, ovr, TCA_ACT_BIND, |
Vlad Buslov | ec6743a | 2019-02-11 10:55:43 +0200 | [diff] [blame] | 2917 | exts->actions, &attr_size, |
| 2918 | rtnl_held, extack); |
Vlad Buslov | 90b73b7 | 2018-07-05 17:24:33 +0300 | [diff] [blame] | 2919 | if (err < 0) |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 2920 | return err; |
Vlad Buslov | 90b73b7 | 2018-07-05 17:24:33 +0300 | [diff] [blame] | 2921 | exts->nr_actions = err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2922 | } |
| 2923 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2924 | #else |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 2925 | if ((exts->action && tb[exts->action]) || |
Alexander Aring | 50a5619 | 2018-01-18 11:20:52 -0500 | [diff] [blame] | 2926 | (exts->police && tb[exts->police])) { |
| 2927 | NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2928 | return -EOPNOTSUPP; |
Alexander Aring | 50a5619 | 2018-01-18 11:20:52 -0500 | [diff] [blame] | 2929 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2930 | #endif |
| 2931 | |
| 2932 | return 0; |
| 2933 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 2934 | EXPORT_SYMBOL(tcf_exts_validate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2935 | |
Jiri Pirko | 9b0d444 | 2017-08-04 14:29:15 +0200 | [diff] [blame] | 2936 | void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2937 | { |
| 2938 | #ifdef CONFIG_NET_CLS_ACT |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 2939 | struct tcf_exts old = *dst; |
| 2940 | |
Jiri Pirko | 9b0d444 | 2017-08-04 14:29:15 +0200 | [diff] [blame] | 2941 | *dst = *src; |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 2942 | tcf_exts_destroy(&old); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2943 | #endif |
| 2944 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 2945 | EXPORT_SYMBOL(tcf_exts_change); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2946 | |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 2947 | #ifdef CONFIG_NET_CLS_ACT |
| 2948 | static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) |
| 2949 | { |
| 2950 | if (exts->nr_actions == 0) |
| 2951 | return NULL; |
| 2952 | else |
| 2953 | return exts->actions[0]; |
| 2954 | } |
| 2955 | #endif |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 2956 | |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 2957 | int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2958 | { |
| 2959 | #ifdef CONFIG_NET_CLS_ACT |
Cong Wang | 9cc63db | 2014-07-16 14:25:30 -0700 | [diff] [blame] | 2960 | struct nlattr *nest; |
| 2961 | |
Jiri Pirko | 978dfd8 | 2017-08-04 14:29:03 +0200 | [diff] [blame] | 2962 | if (exts->action && tcf_exts_has_actions(exts)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2963 | /* |
| 2964 | * again for backward compatible mode - we want |
| 2965 | * to work with both old and new modes of entering |
| 2966 | * tc data even if iproute2 was newer - jhs |
| 2967 | */ |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 2968 | if (exts->type != TCA_OLD_COMPAT) { |
Michal Kubecek | ae0be8d | 2019-04-26 11:13:06 +0200 | [diff] [blame] | 2969 | nest = nla_nest_start_noflag(skb, exts->action); |
Patrick McHardy | 4b3550ef | 2008-01-23 20:34:11 -0800 | [diff] [blame] | 2970 | if (nest == NULL) |
| 2971 | goto nla_put_failure; |
WANG Cong | 22dc13c | 2016-08-13 22:35:00 -0700 | [diff] [blame] | 2972 | |
Vlad Buslov | 90b73b7 | 2018-07-05 17:24:33 +0300 | [diff] [blame] | 2973 | if (tcf_action_dump(skb, exts->actions, 0, 0) < 0) |
Patrick McHardy | add93b6 | 2008-01-22 22:11:33 -0800 | [diff] [blame] | 2974 | goto nla_put_failure; |
Patrick McHardy | 4b3550ef | 2008-01-23 20:34:11 -0800 | [diff] [blame] | 2975 | nla_nest_end(skb, nest); |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 2976 | } else if (exts->police) { |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 2977 | struct tc_action *act = tcf_exts_first_act(exts); |
Michal Kubecek | ae0be8d | 2019-04-26 11:13:06 +0200 | [diff] [blame] | 2978 | nest = nla_nest_start_noflag(skb, exts->police); |
Jamal Hadi Salim | 63acd68 | 2013-12-23 08:02:12 -0500 | [diff] [blame] | 2979 | if (nest == NULL || !act) |
Patrick McHardy | 4b3550ef | 2008-01-23 20:34:11 -0800 | [diff] [blame] | 2980 | goto nla_put_failure; |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 2981 | if (tcf_action_dump_old(skb, act, 0, 0) < 0) |
Patrick McHardy | add93b6 | 2008-01-22 22:11:33 -0800 | [diff] [blame] | 2982 | goto nla_put_failure; |
Patrick McHardy | 4b3550ef | 2008-01-23 20:34:11 -0800 | [diff] [blame] | 2983 | nla_nest_end(skb, nest); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2984 | } |
| 2985 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2986 | return 0; |
Cong Wang | 9cc63db | 2014-07-16 14:25:30 -0700 | [diff] [blame] | 2987 | |
| 2988 | nla_put_failure: |
| 2989 | nla_nest_cancel(skb, nest); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2990 | return -1; |
Cong Wang | 9cc63db | 2014-07-16 14:25:30 -0700 | [diff] [blame] | 2991 | #else |
| 2992 | return 0; |
| 2993 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2994 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 2995 | EXPORT_SYMBOL(tcf_exts_dump); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2996 | |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 2997 | |
WANG Cong | 5da57f4 | 2013-12-15 20:15:07 -0800 | [diff] [blame] | 2998 | int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2999 | { |
| 3000 | #ifdef CONFIG_NET_CLS_ACT |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3001 | struct tc_action *a = tcf_exts_first_act(exts); |
Ignacy Gawędzki | b057df2 | 2015-02-03 19:05:18 +0100 | [diff] [blame] | 3002 | if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) |
WANG Cong | 33be627 | 2013-12-15 20:15:05 -0800 | [diff] [blame] | 3003 | return -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3004 | #endif |
| 3005 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3006 | } |
Stephen Hemminger | aa767bf | 2008-01-21 02:26:41 -0800 | [diff] [blame] | 3007 | EXPORT_SYMBOL(tcf_exts_dump_stats); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3008 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3009 | static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) |
| 3010 | { |
| 3011 | if (*flags & TCA_CLS_FLAGS_IN_HW) |
| 3012 | return; |
| 3013 | *flags |= TCA_CLS_FLAGS_IN_HW; |
| 3014 | atomic_inc(&block->offloadcnt); |
| 3015 | } |
| 3016 | |
| 3017 | static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) |
| 3018 | { |
| 3019 | if (!(*flags & TCA_CLS_FLAGS_IN_HW)) |
| 3020 | return; |
| 3021 | *flags &= ~TCA_CLS_FLAGS_IN_HW; |
| 3022 | atomic_dec(&block->offloadcnt); |
| 3023 | } |
| 3024 | |
| 3025 | static void tc_cls_offload_cnt_update(struct tcf_block *block, |
| 3026 | struct tcf_proto *tp, u32 *cnt, |
| 3027 | u32 *flags, u32 diff, bool add) |
| 3028 | { |
| 3029 | lockdep_assert_held(&block->cb_lock); |
| 3030 | |
| 3031 | spin_lock(&tp->lock); |
| 3032 | if (add) { |
| 3033 | if (!*cnt) |
| 3034 | tcf_block_offload_inc(block, flags); |
| 3035 | *cnt += diff; |
| 3036 | } else { |
| 3037 | *cnt -= diff; |
| 3038 | if (!*cnt) |
| 3039 | tcf_block_offload_dec(block, flags); |
| 3040 | } |
| 3041 | spin_unlock(&tp->lock); |
| 3042 | } |
| 3043 | |
| 3044 | static void |
| 3045 | tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, |
| 3046 | u32 *cnt, u32 *flags) |
| 3047 | { |
| 3048 | lockdep_assert_held(&block->cb_lock); |
| 3049 | |
| 3050 | spin_lock(&tp->lock); |
| 3051 | tcf_block_offload_dec(block, flags); |
| 3052 | *cnt = 0; |
| 3053 | spin_unlock(&tp->lock); |
| 3054 | } |
| 3055 | |
| 3056 | static int |
| 3057 | __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, |
| 3058 | void *type_data, bool err_stop) |
Jiri Pirko | 717503b | 2017-10-11 09:41:09 +0200 | [diff] [blame] | 3059 | { |
Pablo Neira Ayuso | 955bcb6 | 2019-07-09 22:55:46 +0200 | [diff] [blame] | 3060 | struct flow_block_cb *block_cb; |
Cong Wang | aeb3fec | 2018-12-11 11:15:46 -0800 | [diff] [blame] | 3061 | int ok_count = 0; |
| 3062 | int err; |
| 3063 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3064 | list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { |
| 3065 | err = block_cb->cb(type, type_data, block_cb->cb_priv); |
| 3066 | if (err) { |
| 3067 | if (err_stop) |
| 3068 | return err; |
| 3069 | } else { |
| 3070 | ok_count++; |
| 3071 | } |
| 3072 | } |
| 3073 | return ok_count; |
| 3074 | } |
| 3075 | |
| 3076 | int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, |
| 3077 | void *type_data, bool err_stop, bool rtnl_held) |
| 3078 | { |
| 3079 | int ok_count; |
| 3080 | |
| 3081 | down_read(&block->cb_lock); |
| 3082 | ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); |
| 3083 | up_read(&block->cb_lock); |
| 3084 | return ok_count; |
| 3085 | } |
| 3086 | EXPORT_SYMBOL(tc_setup_cb_call); |
| 3087 | |
| 3088 | /* Non-destructive filter add. If filter that wasn't already in hardware is |
| 3089 | * successfully offloaded, increment block offloads counter. On failure, |
| 3090 | * previously offloaded filter is considered to be intact and offloads counter |
| 3091 | * is not decremented. |
| 3092 | */ |
| 3093 | |
| 3094 | int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, |
| 3095 | enum tc_setup_type type, void *type_data, bool err_stop, |
| 3096 | u32 *flags, unsigned int *in_hw_count, bool rtnl_held) |
| 3097 | { |
| 3098 | int ok_count; |
| 3099 | |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 3100 | down_read(&block->cb_lock); |
Cong Wang | aeb3fec | 2018-12-11 11:15:46 -0800 | [diff] [blame] | 3101 | /* Make sure all netdevs sharing this block are offload-capable. */ |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 3102 | if (block->nooffloaddevcnt && err_stop) { |
| 3103 | ok_count = -EOPNOTSUPP; |
| 3104 | goto err_unlock; |
| 3105 | } |
Cong Wang | aeb3fec | 2018-12-11 11:15:46 -0800 | [diff] [blame] | 3106 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3107 | ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); |
Vlad Buslov | a449a3e | 2019-08-26 16:45:00 +0300 | [diff] [blame] | 3108 | if (ok_count < 0) |
| 3109 | goto err_unlock; |
| 3110 | |
| 3111 | if (tp->ops->hw_add) |
| 3112 | tp->ops->hw_add(tp, type_data); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3113 | if (ok_count > 0) |
| 3114 | tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, |
| 3115 | ok_count, true); |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 3116 | err_unlock: |
| 3117 | up_read(&block->cb_lock); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3118 | return ok_count < 0 ? ok_count : 0; |
Jiri Pirko | 717503b | 2017-10-11 09:41:09 +0200 | [diff] [blame] | 3119 | } |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3120 | EXPORT_SYMBOL(tc_setup_cb_add); |
| 3121 | |
| 3122 | /* Destructive filter replace. If filter that wasn't already in hardware is |
| 3123 | * successfully offloaded, increment block offload counter. On failure, |
| 3124 | * previously offloaded filter is considered to be destroyed and offload counter |
| 3125 | * is decremented. |
| 3126 | */ |
| 3127 | |
| 3128 | int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, |
| 3129 | enum tc_setup_type type, void *type_data, bool err_stop, |
| 3130 | u32 *old_flags, unsigned int *old_in_hw_count, |
| 3131 | u32 *new_flags, unsigned int *new_in_hw_count, |
| 3132 | bool rtnl_held) |
| 3133 | { |
| 3134 | int ok_count; |
| 3135 | |
| 3136 | down_read(&block->cb_lock); |
| 3137 | /* Make sure all netdevs sharing this block are offload-capable. */ |
| 3138 | if (block->nooffloaddevcnt && err_stop) { |
| 3139 | ok_count = -EOPNOTSUPP; |
| 3140 | goto err_unlock; |
| 3141 | } |
| 3142 | |
| 3143 | tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); |
Vlad Buslov | a449a3e | 2019-08-26 16:45:00 +0300 | [diff] [blame] | 3144 | if (tp->ops->hw_del) |
| 3145 | tp->ops->hw_del(tp, type_data); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3146 | |
| 3147 | ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); |
Vlad Buslov | a449a3e | 2019-08-26 16:45:00 +0300 | [diff] [blame] | 3148 | if (ok_count < 0) |
| 3149 | goto err_unlock; |
| 3150 | |
| 3151 | if (tp->ops->hw_add) |
| 3152 | tp->ops->hw_add(tp, type_data); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3153 | if (ok_count > 0) |
Vlad Buslov | a449a3e | 2019-08-26 16:45:00 +0300 | [diff] [blame] | 3154 | tc_cls_offload_cnt_update(block, tp, new_in_hw_count, |
| 3155 | new_flags, ok_count, true); |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3156 | err_unlock: |
| 3157 | up_read(&block->cb_lock); |
| 3158 | return ok_count < 0 ? ok_count : 0; |
| 3159 | } |
| 3160 | EXPORT_SYMBOL(tc_setup_cb_replace); |
| 3161 | |
| 3162 | /* Destroy filter and decrement block offload counter, if filter was previously |
| 3163 | * offloaded. |
| 3164 | */ |
| 3165 | |
| 3166 | int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, |
| 3167 | enum tc_setup_type type, void *type_data, bool err_stop, |
| 3168 | u32 *flags, unsigned int *in_hw_count, bool rtnl_held) |
| 3169 | { |
| 3170 | int ok_count; |
| 3171 | |
| 3172 | down_read(&block->cb_lock); |
| 3173 | ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); |
| 3174 | |
| 3175 | tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); |
Vlad Buslov | a449a3e | 2019-08-26 16:45:00 +0300 | [diff] [blame] | 3176 | if (tp->ops->hw_del) |
| 3177 | tp->ops->hw_del(tp, type_data); |
| 3178 | |
Vlad Buslov | 4011921 | 2019-08-26 16:44:59 +0300 | [diff] [blame] | 3179 | up_read(&block->cb_lock); |
| 3180 | return ok_count < 0 ? ok_count : 0; |
| 3181 | } |
| 3182 | EXPORT_SYMBOL(tc_setup_cb_destroy); |
| 3183 | |
| 3184 | int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, |
| 3185 | bool add, flow_setup_cb_t *cb, |
| 3186 | enum tc_setup_type type, void *type_data, |
| 3187 | void *cb_priv, u32 *flags, unsigned int *in_hw_count) |
| 3188 | { |
| 3189 | int err = cb(type, type_data, cb_priv); |
| 3190 | |
| 3191 | if (err) { |
| 3192 | if (add && tc_skip_sw(*flags)) |
| 3193 | return err; |
| 3194 | } else { |
| 3195 | tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, |
| 3196 | add); |
| 3197 | } |
| 3198 | |
| 3199 | return 0; |
| 3200 | } |
| 3201 | EXPORT_SYMBOL(tc_setup_cb_reoffload); |
Jiri Pirko | b3f55bd | 2017-10-11 09:41:08 +0200 | [diff] [blame] | 3202 | |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3203 | int tc_setup_flow_action(struct flow_action *flow_action, |
| 3204 | const struct tcf_exts *exts) |
| 3205 | { |
| 3206 | const struct tc_action *act; |
| 3207 | int i, j, k; |
| 3208 | |
| 3209 | if (!exts) |
| 3210 | return 0; |
| 3211 | |
| 3212 | j = 0; |
| 3213 | tcf_exts_for_each_action(i, act, exts) { |
| 3214 | struct flow_action_entry *entry; |
| 3215 | |
| 3216 | entry = &flow_action->entries[j]; |
| 3217 | if (is_tcf_gact_ok(act)) { |
| 3218 | entry->id = FLOW_ACTION_ACCEPT; |
| 3219 | } else if (is_tcf_gact_shot(act)) { |
| 3220 | entry->id = FLOW_ACTION_DROP; |
| 3221 | } else if (is_tcf_gact_trap(act)) { |
| 3222 | entry->id = FLOW_ACTION_TRAP; |
| 3223 | } else if (is_tcf_gact_goto_chain(act)) { |
| 3224 | entry->id = FLOW_ACTION_GOTO; |
| 3225 | entry->chain_index = tcf_gact_goto_chain_index(act); |
| 3226 | } else if (is_tcf_mirred_egress_redirect(act)) { |
| 3227 | entry->id = FLOW_ACTION_REDIRECT; |
| 3228 | entry->dev = tcf_mirred_dev(act); |
| 3229 | } else if (is_tcf_mirred_egress_mirror(act)) { |
| 3230 | entry->id = FLOW_ACTION_MIRRED; |
| 3231 | entry->dev = tcf_mirred_dev(act); |
John Hurley | 48e584a | 2019-08-04 16:09:06 +0100 | [diff] [blame] | 3232 | } else if (is_tcf_mirred_ingress_redirect(act)) { |
| 3233 | entry->id = FLOW_ACTION_REDIRECT_INGRESS; |
| 3234 | entry->dev = tcf_mirred_dev(act); |
| 3235 | } else if (is_tcf_mirred_ingress_mirror(act)) { |
| 3236 | entry->id = FLOW_ACTION_MIRRED_INGRESS; |
| 3237 | entry->dev = tcf_mirred_dev(act); |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3238 | } else if (is_tcf_vlan(act)) { |
| 3239 | switch (tcf_vlan_action(act)) { |
| 3240 | case TCA_VLAN_ACT_PUSH: |
| 3241 | entry->id = FLOW_ACTION_VLAN_PUSH; |
| 3242 | entry->vlan.vid = tcf_vlan_push_vid(act); |
| 3243 | entry->vlan.proto = tcf_vlan_push_proto(act); |
| 3244 | entry->vlan.prio = tcf_vlan_push_prio(act); |
| 3245 | break; |
| 3246 | case TCA_VLAN_ACT_POP: |
| 3247 | entry->id = FLOW_ACTION_VLAN_POP; |
| 3248 | break; |
| 3249 | case TCA_VLAN_ACT_MODIFY: |
| 3250 | entry->id = FLOW_ACTION_VLAN_MANGLE; |
| 3251 | entry->vlan.vid = tcf_vlan_push_vid(act); |
| 3252 | entry->vlan.proto = tcf_vlan_push_proto(act); |
| 3253 | entry->vlan.prio = tcf_vlan_push_prio(act); |
| 3254 | break; |
| 3255 | default: |
| 3256 | goto err_out; |
| 3257 | } |
| 3258 | } else if (is_tcf_tunnel_set(act)) { |
| 3259 | entry->id = FLOW_ACTION_TUNNEL_ENCAP; |
| 3260 | entry->tunnel = tcf_tunnel_info(act); |
| 3261 | } else if (is_tcf_tunnel_release(act)) { |
| 3262 | entry->id = FLOW_ACTION_TUNNEL_DECAP; |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3263 | } else if (is_tcf_pedit(act)) { |
| 3264 | for (k = 0; k < tcf_pedit_nkeys(act); k++) { |
| 3265 | switch (tcf_pedit_cmd(act, k)) { |
| 3266 | case TCA_PEDIT_KEY_EX_CMD_SET: |
| 3267 | entry->id = FLOW_ACTION_MANGLE; |
| 3268 | break; |
| 3269 | case TCA_PEDIT_KEY_EX_CMD_ADD: |
| 3270 | entry->id = FLOW_ACTION_ADD; |
| 3271 | break; |
| 3272 | default: |
| 3273 | goto err_out; |
| 3274 | } |
| 3275 | entry->mangle.htype = tcf_pedit_htype(act, k); |
| 3276 | entry->mangle.mask = tcf_pedit_mask(act, k); |
| 3277 | entry->mangle.val = tcf_pedit_val(act, k); |
| 3278 | entry->mangle.offset = tcf_pedit_offset(act, k); |
| 3279 | entry = &flow_action->entries[++j]; |
| 3280 | } |
| 3281 | } else if (is_tcf_csum(act)) { |
| 3282 | entry->id = FLOW_ACTION_CSUM; |
| 3283 | entry->csum_flags = tcf_csum_update_flags(act); |
| 3284 | } else if (is_tcf_skbedit_mark(act)) { |
| 3285 | entry->id = FLOW_ACTION_MARK; |
| 3286 | entry->mark = tcf_skbedit_mark(act); |
Pieter Jansen van Vuuren | a7a7be6 | 2019-05-04 04:46:16 -0700 | [diff] [blame] | 3287 | } else if (is_tcf_sample(act)) { |
| 3288 | entry->id = FLOW_ACTION_SAMPLE; |
| 3289 | entry->sample.psample_group = |
| 3290 | tcf_sample_psample_group(act); |
| 3291 | entry->sample.trunc_size = tcf_sample_trunc_size(act); |
| 3292 | entry->sample.truncate = tcf_sample_truncate(act); |
| 3293 | entry->sample.rate = tcf_sample_rate(act); |
Pieter Jansen van Vuuren | 8c8cfc6 | 2019-05-04 04:46:22 -0700 | [diff] [blame] | 3294 | } else if (is_tcf_police(act)) { |
| 3295 | entry->id = FLOW_ACTION_POLICE; |
| 3296 | entry->police.burst = tcf_police_tcfp_burst(act); |
| 3297 | entry->police.rate_bytes_ps = |
| 3298 | tcf_police_rate_bytes_ps(act); |
Paul Blakey | b57dc7c | 2019-07-09 10:30:48 +0300 | [diff] [blame] | 3299 | } else if (is_tcf_ct(act)) { |
| 3300 | entry->id = FLOW_ACTION_CT; |
| 3301 | entry->ct.action = tcf_ct_action(act); |
| 3302 | entry->ct.zone = tcf_ct_zone(act); |
John Hurley | 6749d590 | 2019-07-23 15:33:59 +0100 | [diff] [blame] | 3303 | } else if (is_tcf_mpls(act)) { |
| 3304 | switch (tcf_mpls_action(act)) { |
| 3305 | case TCA_MPLS_ACT_PUSH: |
| 3306 | entry->id = FLOW_ACTION_MPLS_PUSH; |
| 3307 | entry->mpls_push.proto = tcf_mpls_proto(act); |
| 3308 | entry->mpls_push.label = tcf_mpls_label(act); |
| 3309 | entry->mpls_push.tc = tcf_mpls_tc(act); |
| 3310 | entry->mpls_push.bos = tcf_mpls_bos(act); |
| 3311 | entry->mpls_push.ttl = tcf_mpls_ttl(act); |
| 3312 | break; |
| 3313 | case TCA_MPLS_ACT_POP: |
| 3314 | entry->id = FLOW_ACTION_MPLS_POP; |
| 3315 | entry->mpls_pop.proto = tcf_mpls_proto(act); |
| 3316 | break; |
| 3317 | case TCA_MPLS_ACT_MODIFY: |
| 3318 | entry->id = FLOW_ACTION_MPLS_MANGLE; |
| 3319 | entry->mpls_mangle.label = tcf_mpls_label(act); |
| 3320 | entry->mpls_mangle.tc = tcf_mpls_tc(act); |
| 3321 | entry->mpls_mangle.bos = tcf_mpls_bos(act); |
| 3322 | entry->mpls_mangle.ttl = tcf_mpls_ttl(act); |
| 3323 | break; |
| 3324 | default: |
| 3325 | goto err_out; |
| 3326 | } |
John Hurley | fb1b775 | 2019-08-04 16:09:04 +0100 | [diff] [blame] | 3327 | } else if (is_tcf_skbedit_ptype(act)) { |
| 3328 | entry->id = FLOW_ACTION_PTYPE; |
| 3329 | entry->ptype = tcf_skbedit_ptype(act); |
Pablo Neira Ayuso | 3a7b686 | 2019-02-02 12:50:46 +0100 | [diff] [blame] | 3330 | } else { |
| 3331 | goto err_out; |
| 3332 | } |
| 3333 | |
| 3334 | if (!is_tcf_pedit(act)) |
| 3335 | j++; |
| 3336 | } |
| 3337 | return 0; |
| 3338 | err_out: |
| 3339 | return -EOPNOTSUPP; |
| 3340 | } |
| 3341 | EXPORT_SYMBOL(tc_setup_flow_action); |
| 3342 | |
Pablo Neira Ayuso | e3ab786 | 2019-02-02 12:50:45 +0100 | [diff] [blame] | 3343 | unsigned int tcf_exts_num_actions(struct tcf_exts *exts) |
| 3344 | { |
| 3345 | unsigned int num_acts = 0; |
| 3346 | struct tc_action *act; |
| 3347 | int i; |
| 3348 | |
| 3349 | tcf_exts_for_each_action(i, act, exts) { |
| 3350 | if (is_tcf_pedit(act)) |
| 3351 | num_acts += tcf_pedit_nkeys(act); |
| 3352 | else |
| 3353 | num_acts++; |
| 3354 | } |
| 3355 | return num_acts; |
| 3356 | } |
| 3357 | EXPORT_SYMBOL(tcf_exts_num_actions); |
| 3358 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 3359 | static __net_init int tcf_net_init(struct net *net) |
| 3360 | { |
| 3361 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
| 3362 | |
Vlad Buslov | ab28162 | 2018-09-24 19:22:56 +0300 | [diff] [blame] | 3363 | spin_lock_init(&tn->idr_lock); |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 3364 | idr_init(&tn->idr); |
| 3365 | return 0; |
| 3366 | } |
| 3367 | |
| 3368 | static void __net_exit tcf_net_exit(struct net *net) |
| 3369 | { |
| 3370 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
| 3371 | |
| 3372 | idr_destroy(&tn->idr); |
| 3373 | } |
| 3374 | |
| 3375 | static struct pernet_operations tcf_net_ops = { |
| 3376 | .init = tcf_net_init, |
| 3377 | .exit = tcf_net_exit, |
| 3378 | .id = &tcf_net_id, |
| 3379 | .size = sizeof(struct tcf_net), |
| 3380 | }; |
| 3381 | |
wenxu | 1150ab0 | 2019-08-07 09:13:53 +0800 | [diff] [blame] | 3382 | static struct flow_indr_block_ing_entry block_ing_entry = { |
| 3383 | .cb = tc_indr_block_get_and_ing_cmd, |
| 3384 | .list = LIST_HEAD_INIT(block_ing_entry.list), |
| 3385 | }; |
| 3386 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3387 | static int __init tc_filter_init(void) |
| 3388 | { |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 3389 | int err; |
| 3390 | |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 3391 | tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); |
| 3392 | if (!tc_filter_wq) |
| 3393 | return -ENOMEM; |
| 3394 | |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 3395 | err = register_pernet_subsys(&tcf_net_ops); |
| 3396 | if (err) |
| 3397 | goto err_register_pernet_subsys; |
| 3398 | |
wenxu | 1150ab0 | 2019-08-07 09:13:53 +0800 | [diff] [blame] | 3399 | flow_indr_add_block_ing_cb(&block_ing_entry); |
| 3400 | |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 3401 | rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, |
| 3402 | RTNL_FLAG_DOIT_UNLOCKED); |
| 3403 | rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, |
| 3404 | RTNL_FLAG_DOIT_UNLOCKED); |
Vlad Buslov | c431f89 | 2018-05-31 09:52:53 +0300 | [diff] [blame] | 3405 | rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, |
Vlad Buslov | 470502d | 2019-02-11 10:55:48 +0200 | [diff] [blame] | 3406 | tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED); |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 3407 | rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0); |
| 3408 | rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0); |
| 3409 | rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain, |
| 3410 | tc_dump_chain, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3411 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3412 | return 0; |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 3413 | |
| 3414 | err_register_pernet_subsys: |
| 3415 | destroy_workqueue(tc_filter_wq); |
| 3416 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3417 | } |
| 3418 | |
| 3419 | subsys_initcall(tc_filter_init); |