Jakub Kicinski | a39e17b | 2017-11-27 12:10:23 -0800 | [diff] [blame] | 1 | /* |
Jakub Kicinski | 0cd3cbe | 2018-05-03 18:37:08 -0700 | [diff] [blame] | 2 | * Copyright (C) 2017-2018 Netronome Systems, Inc. |
Jakub Kicinski | a39e17b | 2017-11-27 12:10:23 -0800 | [diff] [blame] | 3 | * |
| 4 | * This software is licensed under the GNU General License Version 2, |
| 5 | * June 1991 as shown in the file COPYING in the top-level directory of this |
| 6 | * source tree. |
| 7 | * |
| 8 | * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" |
| 9 | * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, |
| 10 | * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 11 | * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE |
| 12 | * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME |
| 13 | * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. |
| 14 | */ |
| 15 | |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 16 | #include <linux/bpf.h> |
| 17 | #include <linux/bpf_verifier.h> |
| 18 | #include <linux/bug.h> |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 19 | #include <linux/kdev_t.h> |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 20 | #include <linux/list.h> |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 21 | #include <linux/lockdep.h> |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 22 | #include <linux/netdevice.h> |
| 23 | #include <linux/printk.h> |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 24 | #include <linux/proc_ns.h> |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 25 | #include <linux/rhashtable.h> |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 26 | #include <linux/rtnetlink.h> |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 27 | #include <linux/rwsem.h> |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 28 | |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 29 | /* Protects offdevs, members of bpf_offload_netdev and offload members |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 30 | * of all progs. |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 31 | * RTNL lock cannot be taken when holding this lock. |
| 32 | */ |
| 33 | static DECLARE_RWSEM(bpf_devs_lock); |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 34 | |
Jakub Kicinski | 602144c | 2018-07-17 10:53:25 -0700 | [diff] [blame] | 35 | struct bpf_offload_dev { |
Quentin Monnet | 1385d75 | 2018-11-09 13:03:25 +0000 | [diff] [blame] | 36 | const struct bpf_prog_offload_ops *ops; |
Jakub Kicinski | 602144c | 2018-07-17 10:53:25 -0700 | [diff] [blame] | 37 | struct list_head netdevs; |
Jakub Kicinski | dd27c2e | 2019-02-12 00:20:39 -0800 | [diff] [blame] | 38 | void *priv; |
Jakub Kicinski | 602144c | 2018-07-17 10:53:25 -0700 | [diff] [blame] | 39 | }; |
| 40 | |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 41 | struct bpf_offload_netdev { |
| 42 | struct rhash_head l; |
| 43 | struct net_device *netdev; |
Jakub Kicinski | 602144c | 2018-07-17 10:53:25 -0700 | [diff] [blame] | 44 | struct bpf_offload_dev *offdev; |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 45 | struct list_head progs; |
| 46 | struct list_head maps; |
Jakub Kicinski | 602144c | 2018-07-17 10:53:25 -0700 | [diff] [blame] | 47 | struct list_head offdev_netdevs; |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 48 | }; |
| 49 | |
| 50 | static const struct rhashtable_params offdevs_params = { |
| 51 | .nelem_hint = 4, |
| 52 | .key_len = sizeof(struct net_device *), |
| 53 | .key_offset = offsetof(struct bpf_offload_netdev, netdev), |
| 54 | .head_offset = offsetof(struct bpf_offload_netdev, l), |
| 55 | .automatic_shrinking = true, |
| 56 | }; |
| 57 | |
| 58 | static struct rhashtable offdevs; |
| 59 | static bool offdevs_inited; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 60 | |
Jakub Kicinski | 5bc2d55 | 2018-01-11 20:29:08 -0800 | [diff] [blame] | 61 | static int bpf_dev_offload_check(struct net_device *netdev) |
| 62 | { |
| 63 | if (!netdev) |
| 64 | return -EINVAL; |
| 65 | if (!netdev->netdev_ops->ndo_bpf) |
| 66 | return -EOPNOTSUPP; |
| 67 | return 0; |
| 68 | } |
| 69 | |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 70 | static struct bpf_offload_netdev * |
| 71 | bpf_offload_find_netdev(struct net_device *netdev) |
| 72 | { |
| 73 | lockdep_assert_held(&bpf_devs_lock); |
| 74 | |
| 75 | if (!offdevs_inited) |
| 76 | return NULL; |
| 77 | return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); |
| 78 | } |
| 79 | |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 80 | int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) |
| 81 | { |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 82 | struct bpf_offload_netdev *ondev; |
Jakub Kicinski | 0a9c199 | 2018-01-11 20:29:07 -0800 | [diff] [blame] | 83 | struct bpf_prog_offload *offload; |
Jakub Kicinski | 5bc2d55 | 2018-01-11 20:29:08 -0800 | [diff] [blame] | 84 | int err; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 85 | |
Jakub Kicinski | 649f11d | 2017-11-20 15:21:52 -0800 | [diff] [blame] | 86 | if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && |
| 87 | attr->prog_type != BPF_PROG_TYPE_XDP) |
| 88 | return -EINVAL; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 89 | |
| 90 | if (attr->prog_flags) |
| 91 | return -EINVAL; |
| 92 | |
| 93 | offload = kzalloc(sizeof(*offload), GFP_USER); |
| 94 | if (!offload) |
| 95 | return -ENOMEM; |
| 96 | |
| 97 | offload->prog = prog; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 98 | |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 99 | offload->netdev = dev_get_by_index(current->nsproxy->net_ns, |
| 100 | attr->prog_ifindex); |
Jakub Kicinski | 5bc2d55 | 2018-01-11 20:29:08 -0800 | [diff] [blame] | 101 | err = bpf_dev_offload_check(offload->netdev); |
| 102 | if (err) |
| 103 | goto err_maybe_put; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 104 | |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 105 | down_write(&bpf_devs_lock); |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 106 | ondev = bpf_offload_find_netdev(offload->netdev); |
| 107 | if (!ondev) { |
Jakub Kicinski | 5bc2d55 | 2018-01-11 20:29:08 -0800 | [diff] [blame] | 108 | err = -EINVAL; |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 109 | goto err_unlock; |
Jakub Kicinski | 5bc2d55 | 2018-01-11 20:29:08 -0800 | [diff] [blame] | 110 | } |
Quentin Monnet | 341b3e7 | 2018-11-09 13:03:26 +0000 | [diff] [blame] | 111 | offload->offdev = ondev->offdev; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 112 | prog->aux->offload = offload; |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 113 | list_add_tail(&offload->offloads, &ondev->progs); |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 114 | dev_put(offload->netdev); |
| 115 | up_write(&bpf_devs_lock); |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 116 | |
| 117 | return 0; |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 118 | err_unlock: |
| 119 | up_write(&bpf_devs_lock); |
Jakub Kicinski | 5bc2d55 | 2018-01-11 20:29:08 -0800 | [diff] [blame] | 120 | err_maybe_put: |
| 121 | if (offload->netdev) |
| 122 | dev_put(offload->netdev); |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 123 | kfree(offload); |
Jakub Kicinski | 5bc2d55 | 2018-01-11 20:29:08 -0800 | [diff] [blame] | 124 | return err; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 125 | } |
| 126 | |
Quentin Monnet | a40a263 | 2018-11-09 13:03:31 +0000 | [diff] [blame] | 127 | int bpf_prog_offload_verifier_prep(struct bpf_prog *prog) |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 128 | { |
Quentin Monnet | 00db12c | 2018-11-09 13:03:28 +0000 | [diff] [blame] | 129 | struct bpf_prog_offload *offload; |
| 130 | int ret = -ENODEV; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 131 | |
Quentin Monnet | 00db12c | 2018-11-09 13:03:28 +0000 | [diff] [blame] | 132 | down_read(&bpf_devs_lock); |
Quentin Monnet | a40a263 | 2018-11-09 13:03:31 +0000 | [diff] [blame] | 133 | offload = prog->aux->offload; |
Colin Ian King | 592ee43 | 2018-11-13 09:29:26 +0000 | [diff] [blame] | 134 | if (offload) { |
Quentin Monnet | 16a8cb5c | 2018-11-09 13:03:32 +0000 | [diff] [blame] | 135 | ret = offload->offdev->ops->prepare(prog); |
Colin Ian King | 592ee43 | 2018-11-13 09:29:26 +0000 | [diff] [blame] | 136 | offload->dev_state = !ret; |
| 137 | } |
Quentin Monnet | 00db12c | 2018-11-09 13:03:28 +0000 | [diff] [blame] | 138 | up_read(&bpf_devs_lock); |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 139 | |
Quentin Monnet | 00db12c | 2018-11-09 13:03:28 +0000 | [diff] [blame] | 140 | return ret; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 141 | } |
| 142 | |
Jakub Kicinski | cae1927 | 2017-12-27 18:39:05 -0800 | [diff] [blame] | 143 | int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, |
| 144 | int insn_idx, int prev_insn_idx) |
| 145 | { |
Jakub Kicinski | 0a9c199 | 2018-01-11 20:29:07 -0800 | [diff] [blame] | 146 | struct bpf_prog_offload *offload; |
Jakub Kicinski | cae1927 | 2017-12-27 18:39:05 -0800 | [diff] [blame] | 147 | int ret = -ENODEV; |
| 148 | |
| 149 | down_read(&bpf_devs_lock); |
| 150 | offload = env->prog->aux->offload; |
Jakub Kicinski | ce3b9db | 2017-12-27 18:39:06 -0800 | [diff] [blame] | 151 | if (offload) |
Quentin Monnet | 341b3e7 | 2018-11-09 13:03:26 +0000 | [diff] [blame] | 152 | ret = offload->offdev->ops->insn_hook(env, insn_idx, |
| 153 | prev_insn_idx); |
Jakub Kicinski | cae1927 | 2017-12-27 18:39:05 -0800 | [diff] [blame] | 154 | up_read(&bpf_devs_lock); |
| 155 | |
| 156 | return ret; |
| 157 | } |
| 158 | |
Quentin Monnet | c941ce9 | 2018-10-07 12:56:47 +0100 | [diff] [blame] | 159 | int bpf_prog_offload_finalize(struct bpf_verifier_env *env) |
| 160 | { |
| 161 | struct bpf_prog_offload *offload; |
| 162 | int ret = -ENODEV; |
| 163 | |
| 164 | down_read(&bpf_devs_lock); |
| 165 | offload = env->prog->aux->offload; |
| 166 | if (offload) { |
Quentin Monnet | 6dc18fa | 2018-11-09 13:03:27 +0000 | [diff] [blame] | 167 | if (offload->offdev->ops->finalize) |
| 168 | ret = offload->offdev->ops->finalize(env); |
Quentin Monnet | c941ce9 | 2018-10-07 12:56:47 +0100 | [diff] [blame] | 169 | else |
| 170 | ret = 0; |
| 171 | } |
| 172 | up_read(&bpf_devs_lock); |
| 173 | |
| 174 | return ret; |
| 175 | } |
| 176 | |
Jakub Kicinski | 08ca90a | 2019-01-22 22:45:24 -0800 | [diff] [blame] | 177 | void |
| 178 | bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, |
| 179 | struct bpf_insn *insn) |
| 180 | { |
| 181 | const struct bpf_prog_offload_ops *ops; |
| 182 | struct bpf_prog_offload *offload; |
| 183 | int ret = -EOPNOTSUPP; |
| 184 | |
| 185 | down_read(&bpf_devs_lock); |
| 186 | offload = env->prog->aux->offload; |
| 187 | if (offload) { |
| 188 | ops = offload->offdev->ops; |
| 189 | if (!offload->opt_failed && ops->replace_insn) |
| 190 | ret = ops->replace_insn(env, off, insn); |
| 191 | offload->opt_failed |= ret; |
| 192 | } |
| 193 | up_read(&bpf_devs_lock); |
| 194 | } |
| 195 | |
| 196 | void |
| 197 | bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) |
| 198 | { |
| 199 | struct bpf_prog_offload *offload; |
| 200 | int ret = -EOPNOTSUPP; |
| 201 | |
| 202 | down_read(&bpf_devs_lock); |
| 203 | offload = env->prog->aux->offload; |
| 204 | if (offload) { |
| 205 | if (!offload->opt_failed && offload->offdev->ops->remove_insns) |
| 206 | ret = offload->offdev->ops->remove_insns(env, off, cnt); |
| 207 | offload->opt_failed |= ret; |
| 208 | } |
| 209 | up_read(&bpf_devs_lock); |
| 210 | } |
| 211 | |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 212 | static void __bpf_prog_offload_destroy(struct bpf_prog *prog) |
| 213 | { |
Jakub Kicinski | 0a9c199 | 2018-01-11 20:29:07 -0800 | [diff] [blame] | 214 | struct bpf_prog_offload *offload = prog->aux->offload; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 215 | |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 216 | if (offload->dev_state) |
Quentin Monnet | eb91194 | 2018-11-09 13:03:30 +0000 | [diff] [blame] | 217 | offload->offdev->ops->destroy(prog); |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 218 | |
Jakub Kicinski | ad8ad79 | 2017-12-27 18:39:07 -0800 | [diff] [blame] | 219 | /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */ |
| 220 | bpf_prog_free_id(prog, true); |
| 221 | |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 222 | list_del_init(&offload->offloads); |
Jakub Kicinski | ce3b9db | 2017-12-27 18:39:06 -0800 | [diff] [blame] | 223 | kfree(offload); |
| 224 | prog->aux->offload = NULL; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 225 | } |
| 226 | |
| 227 | void bpf_prog_offload_destroy(struct bpf_prog *prog) |
| 228 | { |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 229 | down_write(&bpf_devs_lock); |
Jakub Kicinski | ce3b9db | 2017-12-27 18:39:06 -0800 | [diff] [blame] | 230 | if (prog->aux->offload) |
| 231 | __bpf_prog_offload_destroy(prog); |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 232 | up_write(&bpf_devs_lock); |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 233 | } |
| 234 | |
| 235 | static int bpf_prog_offload_translate(struct bpf_prog *prog) |
| 236 | { |
Quentin Monnet | b07ade2 | 2018-11-09 13:03:29 +0000 | [diff] [blame] | 237 | struct bpf_prog_offload *offload; |
| 238 | int ret = -ENODEV; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 239 | |
Quentin Monnet | b07ade2 | 2018-11-09 13:03:29 +0000 | [diff] [blame] | 240 | down_read(&bpf_devs_lock); |
| 241 | offload = prog->aux->offload; |
| 242 | if (offload) |
Quentin Monnet | 16a8cb5c | 2018-11-09 13:03:32 +0000 | [diff] [blame] | 243 | ret = offload->offdev->ops->translate(prog); |
Quentin Monnet | b07ade2 | 2018-11-09 13:03:29 +0000 | [diff] [blame] | 244 | up_read(&bpf_devs_lock); |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 245 | |
| 246 | return ret; |
| 247 | } |
| 248 | |
| 249 | static unsigned int bpf_prog_warn_on_exec(const void *ctx, |
| 250 | const struct bpf_insn *insn) |
| 251 | { |
| 252 | WARN(1, "attempt to execute device eBPF program on the host!"); |
| 253 | return 0; |
| 254 | } |
| 255 | |
| 256 | int bpf_prog_offload_compile(struct bpf_prog *prog) |
| 257 | { |
| 258 | prog->bpf_func = bpf_prog_warn_on_exec; |
| 259 | |
| 260 | return bpf_prog_offload_translate(prog); |
| 261 | } |
| 262 | |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 263 | struct ns_get_path_bpf_prog_args { |
| 264 | struct bpf_prog *prog; |
| 265 | struct bpf_prog_info *info; |
| 266 | }; |
| 267 | |
| 268 | static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data) |
| 269 | { |
| 270 | struct ns_get_path_bpf_prog_args *args = private_data; |
| 271 | struct bpf_prog_aux *aux = args->prog->aux; |
| 272 | struct ns_common *ns; |
| 273 | struct net *net; |
| 274 | |
| 275 | rtnl_lock(); |
| 276 | down_read(&bpf_devs_lock); |
| 277 | |
| 278 | if (aux->offload) { |
| 279 | args->info->ifindex = aux->offload->netdev->ifindex; |
| 280 | net = dev_net(aux->offload->netdev); |
| 281 | get_net(net); |
| 282 | ns = &net->ns; |
| 283 | } else { |
| 284 | args->info->ifindex = 0; |
| 285 | ns = NULL; |
| 286 | } |
| 287 | |
| 288 | up_read(&bpf_devs_lock); |
| 289 | rtnl_unlock(); |
| 290 | |
| 291 | return ns; |
| 292 | } |
| 293 | |
| 294 | int bpf_prog_offload_info_fill(struct bpf_prog_info *info, |
| 295 | struct bpf_prog *prog) |
| 296 | { |
| 297 | struct ns_get_path_bpf_prog_args args = { |
| 298 | .prog = prog, |
| 299 | .info = info, |
| 300 | }; |
Jiong Wang | fcfb126 | 2018-01-16 16:05:19 -0800 | [diff] [blame] | 301 | struct bpf_prog_aux *aux = prog->aux; |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 302 | struct inode *ns_inode; |
| 303 | struct path ns_path; |
Jiong Wang | fcfb126 | 2018-01-16 16:05:19 -0800 | [diff] [blame] | 304 | char __user *uinsns; |
Aleksa Sarai | ce623f8 | 2019-12-07 01:13:27 +1100 | [diff] [blame] | 305 | int res; |
Jiong Wang | fcfb126 | 2018-01-16 16:05:19 -0800 | [diff] [blame] | 306 | u32 ulen; |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 307 | |
| 308 | res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args); |
Aleksa Sarai | ce623f8 | 2019-12-07 01:13:27 +1100 | [diff] [blame] | 309 | if (res) { |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 310 | if (!info->ifindex) |
| 311 | return -ENODEV; |
Aleksa Sarai | ce623f8 | 2019-12-07 01:13:27 +1100 | [diff] [blame] | 312 | return res; |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 313 | } |
| 314 | |
Jiong Wang | fcfb126 | 2018-01-16 16:05:19 -0800 | [diff] [blame] | 315 | down_read(&bpf_devs_lock); |
| 316 | |
| 317 | if (!aux->offload) { |
| 318 | up_read(&bpf_devs_lock); |
| 319 | return -ENODEV; |
| 320 | } |
| 321 | |
| 322 | ulen = info->jited_prog_len; |
| 323 | info->jited_prog_len = aux->offload->jited_len; |
Johannes Krude | e20d3a0 | 2020-02-12 20:32:27 +0100 | [diff] [blame] | 324 | if (info->jited_prog_len && ulen) { |
Jiong Wang | fcfb126 | 2018-01-16 16:05:19 -0800 | [diff] [blame] | 325 | uinsns = u64_to_user_ptr(info->jited_prog_insns); |
| 326 | ulen = min_t(u32, info->jited_prog_len, ulen); |
| 327 | if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) { |
| 328 | up_read(&bpf_devs_lock); |
| 329 | return -EFAULT; |
| 330 | } |
| 331 | } |
| 332 | |
| 333 | up_read(&bpf_devs_lock); |
| 334 | |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 335 | ns_inode = ns_path.dentry->d_inode; |
| 336 | info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev); |
| 337 | info->netns_ino = ns_inode->i_ino; |
| 338 | path_put(&ns_path); |
| 339 | |
| 340 | return 0; |
| 341 | } |
| 342 | |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 343 | const struct bpf_prog_ops bpf_offload_prog_ops = { |
| 344 | }; |
| 345 | |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 346 | static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap, |
| 347 | enum bpf_netdev_command cmd) |
| 348 | { |
| 349 | struct netdev_bpf data = {}; |
| 350 | struct net_device *netdev; |
| 351 | |
| 352 | ASSERT_RTNL(); |
| 353 | |
| 354 | data.command = cmd; |
| 355 | data.offmap = offmap; |
| 356 | /* Caller must make sure netdev is valid */ |
| 357 | netdev = offmap->netdev; |
| 358 | |
| 359 | return netdev->netdev_ops->ndo_bpf(netdev, &data); |
| 360 | } |
| 361 | |
| 362 | struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) |
| 363 | { |
| 364 | struct net *net = current->nsproxy->net_ns; |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 365 | struct bpf_offload_netdev *ondev; |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 366 | struct bpf_offloaded_map *offmap; |
| 367 | int err; |
| 368 | |
| 369 | if (!capable(CAP_SYS_ADMIN)) |
| 370 | return ERR_PTR(-EPERM); |
Jakub Kicinski | 7a0ef69 | 2018-01-17 19:13:27 -0800 | [diff] [blame] | 371 | if (attr->map_type != BPF_MAP_TYPE_ARRAY && |
| 372 | attr->map_type != BPF_MAP_TYPE_HASH) |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 373 | return ERR_PTR(-EINVAL); |
| 374 | |
| 375 | offmap = kzalloc(sizeof(*offmap), GFP_USER); |
| 376 | if (!offmap) |
| 377 | return ERR_PTR(-ENOMEM); |
| 378 | |
| 379 | bpf_map_init_from_attr(&offmap->map, attr); |
| 380 | |
| 381 | rtnl_lock(); |
| 382 | down_write(&bpf_devs_lock); |
| 383 | offmap->netdev = __dev_get_by_index(net, attr->map_ifindex); |
| 384 | err = bpf_dev_offload_check(offmap->netdev); |
| 385 | if (err) |
| 386 | goto err_unlock; |
| 387 | |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 388 | ondev = bpf_offload_find_netdev(offmap->netdev); |
| 389 | if (!ondev) { |
| 390 | err = -EINVAL; |
| 391 | goto err_unlock; |
| 392 | } |
| 393 | |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 394 | err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC); |
| 395 | if (err) |
| 396 | goto err_unlock; |
| 397 | |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 398 | list_add_tail(&offmap->offloads, &ondev->maps); |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 399 | up_write(&bpf_devs_lock); |
| 400 | rtnl_unlock(); |
| 401 | |
| 402 | return &offmap->map; |
| 403 | |
| 404 | err_unlock: |
| 405 | up_write(&bpf_devs_lock); |
| 406 | rtnl_unlock(); |
| 407 | kfree(offmap); |
| 408 | return ERR_PTR(err); |
| 409 | } |
| 410 | |
| 411 | static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap) |
| 412 | { |
| 413 | WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE)); |
| 414 | /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */ |
| 415 | bpf_map_free_id(&offmap->map, true); |
| 416 | list_del_init(&offmap->offloads); |
| 417 | offmap->netdev = NULL; |
| 418 | } |
| 419 | |
| 420 | void bpf_map_offload_map_free(struct bpf_map *map) |
| 421 | { |
| 422 | struct bpf_offloaded_map *offmap = map_to_offmap(map); |
| 423 | |
| 424 | rtnl_lock(); |
| 425 | down_write(&bpf_devs_lock); |
| 426 | if (offmap->netdev) |
| 427 | __bpf_map_offload_destroy(offmap); |
| 428 | up_write(&bpf_devs_lock); |
| 429 | rtnl_unlock(); |
| 430 | |
| 431 | kfree(offmap); |
| 432 | } |
| 433 | |
| 434 | int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value) |
| 435 | { |
| 436 | struct bpf_offloaded_map *offmap = map_to_offmap(map); |
| 437 | int ret = -ENODEV; |
| 438 | |
| 439 | down_read(&bpf_devs_lock); |
| 440 | if (offmap->netdev) |
| 441 | ret = offmap->dev_ops->map_lookup_elem(offmap, key, value); |
| 442 | up_read(&bpf_devs_lock); |
| 443 | |
| 444 | return ret; |
| 445 | } |
| 446 | |
| 447 | int bpf_map_offload_update_elem(struct bpf_map *map, |
| 448 | void *key, void *value, u64 flags) |
| 449 | { |
| 450 | struct bpf_offloaded_map *offmap = map_to_offmap(map); |
| 451 | int ret = -ENODEV; |
| 452 | |
| 453 | if (unlikely(flags > BPF_EXIST)) |
| 454 | return -EINVAL; |
| 455 | |
| 456 | down_read(&bpf_devs_lock); |
| 457 | if (offmap->netdev) |
| 458 | ret = offmap->dev_ops->map_update_elem(offmap, key, value, |
| 459 | flags); |
| 460 | up_read(&bpf_devs_lock); |
| 461 | |
| 462 | return ret; |
| 463 | } |
| 464 | |
| 465 | int bpf_map_offload_delete_elem(struct bpf_map *map, void *key) |
| 466 | { |
| 467 | struct bpf_offloaded_map *offmap = map_to_offmap(map); |
| 468 | int ret = -ENODEV; |
| 469 | |
| 470 | down_read(&bpf_devs_lock); |
| 471 | if (offmap->netdev) |
| 472 | ret = offmap->dev_ops->map_delete_elem(offmap, key); |
| 473 | up_read(&bpf_devs_lock); |
| 474 | |
| 475 | return ret; |
| 476 | } |
| 477 | |
| 478 | int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key) |
| 479 | { |
| 480 | struct bpf_offloaded_map *offmap = map_to_offmap(map); |
| 481 | int ret = -ENODEV; |
| 482 | |
| 483 | down_read(&bpf_devs_lock); |
| 484 | if (offmap->netdev) |
| 485 | ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key); |
| 486 | up_read(&bpf_devs_lock); |
| 487 | |
| 488 | return ret; |
| 489 | } |
| 490 | |
Jakub Kicinski | 52775b3 | 2018-01-17 19:13:28 -0800 | [diff] [blame] | 491 | struct ns_get_path_bpf_map_args { |
| 492 | struct bpf_offloaded_map *offmap; |
| 493 | struct bpf_map_info *info; |
| 494 | }; |
| 495 | |
| 496 | static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data) |
| 497 | { |
| 498 | struct ns_get_path_bpf_map_args *args = private_data; |
| 499 | struct ns_common *ns; |
| 500 | struct net *net; |
| 501 | |
| 502 | rtnl_lock(); |
| 503 | down_read(&bpf_devs_lock); |
| 504 | |
| 505 | if (args->offmap->netdev) { |
| 506 | args->info->ifindex = args->offmap->netdev->ifindex; |
| 507 | net = dev_net(args->offmap->netdev); |
| 508 | get_net(net); |
| 509 | ns = &net->ns; |
| 510 | } else { |
| 511 | args->info->ifindex = 0; |
| 512 | ns = NULL; |
| 513 | } |
| 514 | |
| 515 | up_read(&bpf_devs_lock); |
| 516 | rtnl_unlock(); |
| 517 | |
| 518 | return ns; |
| 519 | } |
| 520 | |
| 521 | int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map) |
| 522 | { |
| 523 | struct ns_get_path_bpf_map_args args = { |
| 524 | .offmap = map_to_offmap(map), |
| 525 | .info = info, |
| 526 | }; |
| 527 | struct inode *ns_inode; |
| 528 | struct path ns_path; |
Aleksa Sarai | ce623f8 | 2019-12-07 01:13:27 +1100 | [diff] [blame] | 529 | int res; |
Jakub Kicinski | 52775b3 | 2018-01-17 19:13:28 -0800 | [diff] [blame] | 530 | |
| 531 | res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args); |
Aleksa Sarai | ce623f8 | 2019-12-07 01:13:27 +1100 | [diff] [blame] | 532 | if (res) { |
Jakub Kicinski | 52775b3 | 2018-01-17 19:13:28 -0800 | [diff] [blame] | 533 | if (!info->ifindex) |
| 534 | return -ENODEV; |
Aleksa Sarai | ce623f8 | 2019-12-07 01:13:27 +1100 | [diff] [blame] | 535 | return res; |
Jakub Kicinski | 52775b3 | 2018-01-17 19:13:28 -0800 | [diff] [blame] | 536 | } |
| 537 | |
| 538 | ns_inode = ns_path.dentry->d_inode; |
| 539 | info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev); |
| 540 | info->netns_ino = ns_inode->i_ino; |
| 541 | path_put(&ns_path); |
| 542 | |
| 543 | return 0; |
| 544 | } |
| 545 | |
Jakub Kicinski | fd4f227 | 2018-07-17 10:53:26 -0700 | [diff] [blame] | 546 | static bool __bpf_offload_dev_match(struct bpf_prog *prog, |
| 547 | struct net_device *netdev) |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 548 | { |
Jakub Kicinski | fd4f227 | 2018-07-17 10:53:26 -0700 | [diff] [blame] | 549 | struct bpf_offload_netdev *ondev1, *ondev2; |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 550 | struct bpf_prog_offload *offload; |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 551 | |
Jakub Kicinski | 0cd3cbe | 2018-05-03 18:37:08 -0700 | [diff] [blame] | 552 | if (!bpf_prog_is_dev_bound(prog->aux)) |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 553 | return false; |
Jakub Kicinski | fd4f227 | 2018-07-17 10:53:26 -0700 | [diff] [blame] | 554 | |
| 555 | offload = prog->aux->offload; |
| 556 | if (!offload) |
| 557 | return false; |
| 558 | if (offload->netdev == netdev) |
| 559 | return true; |
| 560 | |
| 561 | ondev1 = bpf_offload_find_netdev(offload->netdev); |
| 562 | ondev2 = bpf_offload_find_netdev(netdev); |
| 563 | |
| 564 | return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev; |
| 565 | } |
| 566 | |
| 567 | bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev) |
| 568 | { |
| 569 | bool ret; |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 570 | |
| 571 | down_read(&bpf_devs_lock); |
Jakub Kicinski | fd4f227 | 2018-07-17 10:53:26 -0700 | [diff] [blame] | 572 | ret = __bpf_offload_dev_match(prog, netdev); |
| 573 | up_read(&bpf_devs_lock); |
| 574 | |
| 575 | return ret; |
| 576 | } |
| 577 | EXPORT_SYMBOL_GPL(bpf_offload_dev_match); |
| 578 | |
| 579 | bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map) |
| 580 | { |
| 581 | struct bpf_offloaded_map *offmap; |
| 582 | bool ret; |
| 583 | |
| 584 | if (!bpf_map_is_dev_bound(map)) |
| 585 | return bpf_map_offload_neutral(map); |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 586 | offmap = map_to_offmap(map); |
| 587 | |
Jakub Kicinski | fd4f227 | 2018-07-17 10:53:26 -0700 | [diff] [blame] | 588 | down_read(&bpf_devs_lock); |
| 589 | ret = __bpf_offload_dev_match(prog, offmap->netdev); |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 590 | up_read(&bpf_devs_lock); |
| 591 | |
| 592 | return ret; |
| 593 | } |
| 594 | |
Jakub Kicinski | 602144c | 2018-07-17 10:53:25 -0700 | [diff] [blame] | 595 | int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, |
| 596 | struct net_device *netdev) |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 597 | { |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 598 | struct bpf_offload_netdev *ondev; |
| 599 | int err; |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 600 | |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 601 | ondev = kzalloc(sizeof(*ondev), GFP_KERNEL); |
| 602 | if (!ondev) |
| 603 | return -ENOMEM; |
| 604 | |
| 605 | ondev->netdev = netdev; |
Jakub Kicinski | 602144c | 2018-07-17 10:53:25 -0700 | [diff] [blame] | 606 | ondev->offdev = offdev; |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 607 | INIT_LIST_HEAD(&ondev->progs); |
| 608 | INIT_LIST_HEAD(&ondev->maps); |
| 609 | |
| 610 | down_write(&bpf_devs_lock); |
| 611 | err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params); |
| 612 | if (err) { |
| 613 | netdev_warn(netdev, "failed to register for BPF offload\n"); |
| 614 | goto err_unlock_free; |
| 615 | } |
| 616 | |
Jakub Kicinski | 602144c | 2018-07-17 10:53:25 -0700 | [diff] [blame] | 617 | list_add(&ondev->offdev_netdevs, &offdev->netdevs); |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 618 | up_write(&bpf_devs_lock); |
| 619 | return 0; |
| 620 | |
| 621 | err_unlock_free: |
| 622 | up_write(&bpf_devs_lock); |
| 623 | kfree(ondev); |
| 624 | return err; |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 625 | } |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 626 | EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register); |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 627 | |
Jakub Kicinski | 602144c | 2018-07-17 10:53:25 -0700 | [diff] [blame] | 628 | void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, |
| 629 | struct net_device *netdev) |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 630 | { |
Jakub Kicinski | 602144c | 2018-07-17 10:53:25 -0700 | [diff] [blame] | 631 | struct bpf_offload_netdev *ondev, *altdev; |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 632 | struct bpf_offloaded_map *offmap, *mtmp; |
| 633 | struct bpf_prog_offload *offload, *ptmp; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 634 | |
| 635 | ASSERT_RTNL(); |
| 636 | |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 637 | down_write(&bpf_devs_lock); |
| 638 | ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); |
| 639 | if (WARN_ON(!ondev)) |
| 640 | goto unlock; |
Jakub Kicinski | 62c71b4 | 2017-11-20 15:21:57 -0800 | [diff] [blame] | 641 | |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 642 | WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params)); |
Jakub Kicinski | 602144c | 2018-07-17 10:53:25 -0700 | [diff] [blame] | 643 | list_del(&ondev->offdev_netdevs); |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 644 | |
Jakub Kicinski | 602144c | 2018-07-17 10:53:25 -0700 | [diff] [blame] | 645 | /* Try to move the objects to another netdev of the device */ |
| 646 | altdev = list_first_entry_or_null(&offdev->netdevs, |
| 647 | struct bpf_offload_netdev, |
| 648 | offdev_netdevs); |
| 649 | if (altdev) { |
| 650 | list_for_each_entry(offload, &ondev->progs, offloads) |
| 651 | offload->netdev = altdev->netdev; |
| 652 | list_splice_init(&ondev->progs, &altdev->progs); |
| 653 | |
| 654 | list_for_each_entry(offmap, &ondev->maps, offloads) |
| 655 | offmap->netdev = altdev->netdev; |
| 656 | list_splice_init(&ondev->maps, &altdev->maps); |
| 657 | } else { |
| 658 | list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads) |
| 659 | __bpf_prog_offload_destroy(offload->prog); |
| 660 | list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads) |
| 661 | __bpf_map_offload_destroy(offmap); |
| 662 | } |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 663 | |
| 664 | WARN_ON(!list_empty(&ondev->progs)); |
| 665 | WARN_ON(!list_empty(&ondev->maps)); |
| 666 | kfree(ondev); |
| 667 | unlock: |
| 668 | up_write(&bpf_devs_lock); |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 669 | } |
Jakub Kicinski | 9fd7c55 | 2018-07-17 10:53:24 -0700 | [diff] [blame] | 670 | EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister); |
Jakub Kicinski | 602144c | 2018-07-17 10:53:25 -0700 | [diff] [blame] | 671 | |
Quentin Monnet | 1385d75 | 2018-11-09 13:03:25 +0000 | [diff] [blame] | 672 | struct bpf_offload_dev * |
Jakub Kicinski | dd27c2e | 2019-02-12 00:20:39 -0800 | [diff] [blame] | 673 | bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv) |
Jakub Kicinski | 602144c | 2018-07-17 10:53:25 -0700 | [diff] [blame] | 674 | { |
| 675 | struct bpf_offload_dev *offdev; |
| 676 | int err; |
| 677 | |
| 678 | down_write(&bpf_devs_lock); |
| 679 | if (!offdevs_inited) { |
| 680 | err = rhashtable_init(&offdevs, &offdevs_params); |
Dan Carpenter | d0fbb51 | 2019-11-04 12:15:36 +0300 | [diff] [blame] | 681 | if (err) { |
| 682 | up_write(&bpf_devs_lock); |
Jakub Kicinski | 602144c | 2018-07-17 10:53:25 -0700 | [diff] [blame] | 683 | return ERR_PTR(err); |
Dan Carpenter | d0fbb51 | 2019-11-04 12:15:36 +0300 | [diff] [blame] | 684 | } |
Jakub Kicinski | 602144c | 2018-07-17 10:53:25 -0700 | [diff] [blame] | 685 | offdevs_inited = true; |
| 686 | } |
| 687 | up_write(&bpf_devs_lock); |
| 688 | |
| 689 | offdev = kzalloc(sizeof(*offdev), GFP_KERNEL); |
| 690 | if (!offdev) |
| 691 | return ERR_PTR(-ENOMEM); |
| 692 | |
Quentin Monnet | 1385d75 | 2018-11-09 13:03:25 +0000 | [diff] [blame] | 693 | offdev->ops = ops; |
Jakub Kicinski | dd27c2e | 2019-02-12 00:20:39 -0800 | [diff] [blame] | 694 | offdev->priv = priv; |
Jakub Kicinski | 602144c | 2018-07-17 10:53:25 -0700 | [diff] [blame] | 695 | INIT_LIST_HEAD(&offdev->netdevs); |
| 696 | |
| 697 | return offdev; |
| 698 | } |
| 699 | EXPORT_SYMBOL_GPL(bpf_offload_dev_create); |
| 700 | |
| 701 | void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev) |
| 702 | { |
| 703 | WARN_ON(!list_empty(&offdev->netdevs)); |
| 704 | kfree(offdev); |
| 705 | } |
| 706 | EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy); |
Jakub Kicinski | dd27c2e | 2019-02-12 00:20:39 -0800 | [diff] [blame] | 707 | |
| 708 | void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev) |
| 709 | { |
| 710 | return offdev->priv; |
| 711 | } |
| 712 | EXPORT_SYMBOL_GPL(bpf_offload_dev_priv); |