Jakub Kicinski | a39e17b | 2017-11-27 12:10:23 -0800 | [diff] [blame] | 1 | /* |
Jakub Kicinski | 0cd3cbe | 2018-05-03 18:37:08 -0700 | [diff] [blame] | 2 | * Copyright (C) 2017-2018 Netronome Systems, Inc. |
Jakub Kicinski | a39e17b | 2017-11-27 12:10:23 -0800 | [diff] [blame] | 3 | * |
| 4 | * This software is licensed under the GNU General License Version 2, |
| 5 | * June 1991 as shown in the file COPYING in the top-level directory of this |
| 6 | * source tree. |
| 7 | * |
| 8 | * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" |
| 9 | * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, |
| 10 | * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 11 | * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE |
| 12 | * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME |
| 13 | * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. |
| 14 | */ |
| 15 | |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 16 | #include <linux/bpf.h> |
| 17 | #include <linux/bpf_verifier.h> |
| 18 | #include <linux/bug.h> |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 19 | #include <linux/kdev_t.h> |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 20 | #include <linux/list.h> |
| 21 | #include <linux/netdevice.h> |
| 22 | #include <linux/printk.h> |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 23 | #include <linux/proc_ns.h> |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 24 | #include <linux/rtnetlink.h> |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 25 | #include <linux/rwsem.h> |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 26 | |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 27 | /* Protects bpf_prog_offload_devs, bpf_map_offload_devs and offload members |
| 28 | * of all progs. |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 29 | * RTNL lock cannot be taken when holding this lock. |
| 30 | */ |
| 31 | static DECLARE_RWSEM(bpf_devs_lock); |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 32 | static LIST_HEAD(bpf_prog_offload_devs); |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 33 | static LIST_HEAD(bpf_map_offload_devs); |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 34 | |
Jakub Kicinski | 5bc2d55 | 2018-01-11 20:29:08 -0800 | [diff] [blame] | 35 | static int bpf_dev_offload_check(struct net_device *netdev) |
| 36 | { |
| 37 | if (!netdev) |
| 38 | return -EINVAL; |
| 39 | if (!netdev->netdev_ops->ndo_bpf) |
| 40 | return -EOPNOTSUPP; |
| 41 | return 0; |
| 42 | } |
| 43 | |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 44 | int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) |
| 45 | { |
Jakub Kicinski | 0a9c199 | 2018-01-11 20:29:07 -0800 | [diff] [blame] | 46 | struct bpf_prog_offload *offload; |
Jakub Kicinski | 5bc2d55 | 2018-01-11 20:29:08 -0800 | [diff] [blame] | 47 | int err; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 48 | |
Jakub Kicinski | 649f11d | 2017-11-20 15:21:52 -0800 | [diff] [blame] | 49 | if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && |
| 50 | attr->prog_type != BPF_PROG_TYPE_XDP) |
| 51 | return -EINVAL; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 52 | |
| 53 | if (attr->prog_flags) |
| 54 | return -EINVAL; |
| 55 | |
| 56 | offload = kzalloc(sizeof(*offload), GFP_USER); |
| 57 | if (!offload) |
| 58 | return -ENOMEM; |
| 59 | |
| 60 | offload->prog = prog; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 61 | |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 62 | offload->netdev = dev_get_by_index(current->nsproxy->net_ns, |
| 63 | attr->prog_ifindex); |
Jakub Kicinski | 5bc2d55 | 2018-01-11 20:29:08 -0800 | [diff] [blame] | 64 | err = bpf_dev_offload_check(offload->netdev); |
| 65 | if (err) |
| 66 | goto err_maybe_put; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 67 | |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 68 | down_write(&bpf_devs_lock); |
Jakub Kicinski | 5bc2d55 | 2018-01-11 20:29:08 -0800 | [diff] [blame] | 69 | if (offload->netdev->reg_state != NETREG_REGISTERED) { |
| 70 | err = -EINVAL; |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 71 | goto err_unlock; |
Jakub Kicinski | 5bc2d55 | 2018-01-11 20:29:08 -0800 | [diff] [blame] | 72 | } |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 73 | prog->aux->offload = offload; |
| 74 | list_add_tail(&offload->offloads, &bpf_prog_offload_devs); |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 75 | dev_put(offload->netdev); |
| 76 | up_write(&bpf_devs_lock); |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 77 | |
| 78 | return 0; |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 79 | err_unlock: |
| 80 | up_write(&bpf_devs_lock); |
Jakub Kicinski | 5bc2d55 | 2018-01-11 20:29:08 -0800 | [diff] [blame] | 81 | err_maybe_put: |
| 82 | if (offload->netdev) |
| 83 | dev_put(offload->netdev); |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 84 | kfree(offload); |
Jakub Kicinski | 5bc2d55 | 2018-01-11 20:29:08 -0800 | [diff] [blame] | 85 | return err; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 86 | } |
| 87 | |
| 88 | static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd, |
| 89 | struct netdev_bpf *data) |
| 90 | { |
Jakub Kicinski | 0a9c199 | 2018-01-11 20:29:07 -0800 | [diff] [blame] | 91 | struct bpf_prog_offload *offload = prog->aux->offload; |
Jakub Kicinski | ce3b9db | 2017-12-27 18:39:06 -0800 | [diff] [blame] | 92 | struct net_device *netdev; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 93 | |
| 94 | ASSERT_RTNL(); |
| 95 | |
Jakub Kicinski | ce3b9db | 2017-12-27 18:39:06 -0800 | [diff] [blame] | 96 | if (!offload) |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 97 | return -ENODEV; |
Jakub Kicinski | ce3b9db | 2017-12-27 18:39:06 -0800 | [diff] [blame] | 98 | netdev = offload->netdev; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 99 | |
| 100 | data->command = cmd; |
| 101 | |
| 102 | return netdev->netdev_ops->ndo_bpf(netdev, data); |
| 103 | } |
| 104 | |
| 105 | int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) |
| 106 | { |
| 107 | struct netdev_bpf data = {}; |
| 108 | int err; |
| 109 | |
| 110 | data.verifier.prog = env->prog; |
| 111 | |
| 112 | rtnl_lock(); |
| 113 | err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data); |
| 114 | if (err) |
| 115 | goto exit_unlock; |
| 116 | |
Jakub Kicinski | cae1927 | 2017-12-27 18:39:05 -0800 | [diff] [blame] | 117 | env->prog->aux->offload->dev_ops = data.verifier.ops; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 118 | env->prog->aux->offload->dev_state = true; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 119 | exit_unlock: |
| 120 | rtnl_unlock(); |
| 121 | return err; |
| 122 | } |
| 123 | |
Jakub Kicinski | cae1927 | 2017-12-27 18:39:05 -0800 | [diff] [blame] | 124 | int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, |
| 125 | int insn_idx, int prev_insn_idx) |
| 126 | { |
Jakub Kicinski | 0a9c199 | 2018-01-11 20:29:07 -0800 | [diff] [blame] | 127 | struct bpf_prog_offload *offload; |
Jakub Kicinski | cae1927 | 2017-12-27 18:39:05 -0800 | [diff] [blame] | 128 | int ret = -ENODEV; |
| 129 | |
| 130 | down_read(&bpf_devs_lock); |
| 131 | offload = env->prog->aux->offload; |
Jakub Kicinski | ce3b9db | 2017-12-27 18:39:06 -0800 | [diff] [blame] | 132 | if (offload) |
Jakub Kicinski | cae1927 | 2017-12-27 18:39:05 -0800 | [diff] [blame] | 133 | ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx); |
| 134 | up_read(&bpf_devs_lock); |
| 135 | |
| 136 | return ret; |
| 137 | } |
| 138 | |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 139 | static void __bpf_prog_offload_destroy(struct bpf_prog *prog) |
| 140 | { |
Jakub Kicinski | 0a9c199 | 2018-01-11 20:29:07 -0800 | [diff] [blame] | 141 | struct bpf_prog_offload *offload = prog->aux->offload; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 142 | struct netdev_bpf data = {}; |
| 143 | |
| 144 | data.offload.prog = prog; |
| 145 | |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 146 | if (offload->dev_state) |
| 147 | WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data)); |
| 148 | |
Jakub Kicinski | ad8ad79 | 2017-12-27 18:39:07 -0800 | [diff] [blame] | 149 | /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */ |
| 150 | bpf_prog_free_id(prog, true); |
| 151 | |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 152 | list_del_init(&offload->offloads); |
Jakub Kicinski | ce3b9db | 2017-12-27 18:39:06 -0800 | [diff] [blame] | 153 | kfree(offload); |
| 154 | prog->aux->offload = NULL; |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 155 | } |
| 156 | |
| 157 | void bpf_prog_offload_destroy(struct bpf_prog *prog) |
| 158 | { |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 159 | rtnl_lock(); |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 160 | down_write(&bpf_devs_lock); |
Jakub Kicinski | ce3b9db | 2017-12-27 18:39:06 -0800 | [diff] [blame] | 161 | if (prog->aux->offload) |
| 162 | __bpf_prog_offload_destroy(prog); |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 163 | up_write(&bpf_devs_lock); |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 164 | rtnl_unlock(); |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 165 | } |
| 166 | |
| 167 | static int bpf_prog_offload_translate(struct bpf_prog *prog) |
| 168 | { |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 169 | struct netdev_bpf data = {}; |
| 170 | int ret; |
| 171 | |
| 172 | data.offload.prog = prog; |
| 173 | |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 174 | rtnl_lock(); |
| 175 | ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data); |
| 176 | rtnl_unlock(); |
| 177 | |
| 178 | return ret; |
| 179 | } |
| 180 | |
| 181 | static unsigned int bpf_prog_warn_on_exec(const void *ctx, |
| 182 | const struct bpf_insn *insn) |
| 183 | { |
| 184 | WARN(1, "attempt to execute device eBPF program on the host!"); |
| 185 | return 0; |
| 186 | } |
| 187 | |
| 188 | int bpf_prog_offload_compile(struct bpf_prog *prog) |
| 189 | { |
| 190 | prog->bpf_func = bpf_prog_warn_on_exec; |
| 191 | |
| 192 | return bpf_prog_offload_translate(prog); |
| 193 | } |
| 194 | |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 195 | struct ns_get_path_bpf_prog_args { |
| 196 | struct bpf_prog *prog; |
| 197 | struct bpf_prog_info *info; |
| 198 | }; |
| 199 | |
| 200 | static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data) |
| 201 | { |
| 202 | struct ns_get_path_bpf_prog_args *args = private_data; |
| 203 | struct bpf_prog_aux *aux = args->prog->aux; |
| 204 | struct ns_common *ns; |
| 205 | struct net *net; |
| 206 | |
| 207 | rtnl_lock(); |
| 208 | down_read(&bpf_devs_lock); |
| 209 | |
| 210 | if (aux->offload) { |
| 211 | args->info->ifindex = aux->offload->netdev->ifindex; |
| 212 | net = dev_net(aux->offload->netdev); |
| 213 | get_net(net); |
| 214 | ns = &net->ns; |
| 215 | } else { |
| 216 | args->info->ifindex = 0; |
| 217 | ns = NULL; |
| 218 | } |
| 219 | |
| 220 | up_read(&bpf_devs_lock); |
| 221 | rtnl_unlock(); |
| 222 | |
| 223 | return ns; |
| 224 | } |
| 225 | |
| 226 | int bpf_prog_offload_info_fill(struct bpf_prog_info *info, |
| 227 | struct bpf_prog *prog) |
| 228 | { |
| 229 | struct ns_get_path_bpf_prog_args args = { |
| 230 | .prog = prog, |
| 231 | .info = info, |
| 232 | }; |
Jiong Wang | fcfb126 | 2018-01-16 16:05:19 -0800 | [diff] [blame] | 233 | struct bpf_prog_aux *aux = prog->aux; |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 234 | struct inode *ns_inode; |
| 235 | struct path ns_path; |
Jiong Wang | fcfb126 | 2018-01-16 16:05:19 -0800 | [diff] [blame] | 236 | char __user *uinsns; |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 237 | void *res; |
Jiong Wang | fcfb126 | 2018-01-16 16:05:19 -0800 | [diff] [blame] | 238 | u32 ulen; |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 239 | |
| 240 | res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args); |
| 241 | if (IS_ERR(res)) { |
| 242 | if (!info->ifindex) |
| 243 | return -ENODEV; |
| 244 | return PTR_ERR(res); |
| 245 | } |
| 246 | |
Jiong Wang | fcfb126 | 2018-01-16 16:05:19 -0800 | [diff] [blame] | 247 | down_read(&bpf_devs_lock); |
| 248 | |
| 249 | if (!aux->offload) { |
| 250 | up_read(&bpf_devs_lock); |
| 251 | return -ENODEV; |
| 252 | } |
| 253 | |
| 254 | ulen = info->jited_prog_len; |
| 255 | info->jited_prog_len = aux->offload->jited_len; |
| 256 | if (info->jited_prog_len & ulen) { |
| 257 | uinsns = u64_to_user_ptr(info->jited_prog_insns); |
| 258 | ulen = min_t(u32, info->jited_prog_len, ulen); |
| 259 | if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) { |
| 260 | up_read(&bpf_devs_lock); |
| 261 | return -EFAULT; |
| 262 | } |
| 263 | } |
| 264 | |
| 265 | up_read(&bpf_devs_lock); |
| 266 | |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 267 | ns_inode = ns_path.dentry->d_inode; |
| 268 | info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev); |
| 269 | info->netns_ino = ns_inode->i_ino; |
| 270 | path_put(&ns_path); |
| 271 | |
| 272 | return 0; |
| 273 | } |
| 274 | |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 275 | const struct bpf_prog_ops bpf_offload_prog_ops = { |
| 276 | }; |
| 277 | |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 278 | static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap, |
| 279 | enum bpf_netdev_command cmd) |
| 280 | { |
| 281 | struct netdev_bpf data = {}; |
| 282 | struct net_device *netdev; |
| 283 | |
| 284 | ASSERT_RTNL(); |
| 285 | |
| 286 | data.command = cmd; |
| 287 | data.offmap = offmap; |
| 288 | /* Caller must make sure netdev is valid */ |
| 289 | netdev = offmap->netdev; |
| 290 | |
| 291 | return netdev->netdev_ops->ndo_bpf(netdev, &data); |
| 292 | } |
| 293 | |
| 294 | struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) |
| 295 | { |
| 296 | struct net *net = current->nsproxy->net_ns; |
| 297 | struct bpf_offloaded_map *offmap; |
| 298 | int err; |
| 299 | |
| 300 | if (!capable(CAP_SYS_ADMIN)) |
| 301 | return ERR_PTR(-EPERM); |
Jakub Kicinski | 7a0ef69 | 2018-01-17 19:13:27 -0800 | [diff] [blame] | 302 | if (attr->map_type != BPF_MAP_TYPE_ARRAY && |
| 303 | attr->map_type != BPF_MAP_TYPE_HASH) |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 304 | return ERR_PTR(-EINVAL); |
| 305 | |
| 306 | offmap = kzalloc(sizeof(*offmap), GFP_USER); |
| 307 | if (!offmap) |
| 308 | return ERR_PTR(-ENOMEM); |
| 309 | |
| 310 | bpf_map_init_from_attr(&offmap->map, attr); |
| 311 | |
| 312 | rtnl_lock(); |
| 313 | down_write(&bpf_devs_lock); |
| 314 | offmap->netdev = __dev_get_by_index(net, attr->map_ifindex); |
| 315 | err = bpf_dev_offload_check(offmap->netdev); |
| 316 | if (err) |
| 317 | goto err_unlock; |
| 318 | |
| 319 | err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC); |
| 320 | if (err) |
| 321 | goto err_unlock; |
| 322 | |
| 323 | list_add_tail(&offmap->offloads, &bpf_map_offload_devs); |
| 324 | up_write(&bpf_devs_lock); |
| 325 | rtnl_unlock(); |
| 326 | |
| 327 | return &offmap->map; |
| 328 | |
| 329 | err_unlock: |
| 330 | up_write(&bpf_devs_lock); |
| 331 | rtnl_unlock(); |
| 332 | kfree(offmap); |
| 333 | return ERR_PTR(err); |
| 334 | } |
| 335 | |
| 336 | static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap) |
| 337 | { |
| 338 | WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE)); |
| 339 | /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */ |
| 340 | bpf_map_free_id(&offmap->map, true); |
| 341 | list_del_init(&offmap->offloads); |
| 342 | offmap->netdev = NULL; |
| 343 | } |
| 344 | |
| 345 | void bpf_map_offload_map_free(struct bpf_map *map) |
| 346 | { |
| 347 | struct bpf_offloaded_map *offmap = map_to_offmap(map); |
| 348 | |
| 349 | rtnl_lock(); |
| 350 | down_write(&bpf_devs_lock); |
| 351 | if (offmap->netdev) |
| 352 | __bpf_map_offload_destroy(offmap); |
| 353 | up_write(&bpf_devs_lock); |
| 354 | rtnl_unlock(); |
| 355 | |
| 356 | kfree(offmap); |
| 357 | } |
| 358 | |
| 359 | int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value) |
| 360 | { |
| 361 | struct bpf_offloaded_map *offmap = map_to_offmap(map); |
| 362 | int ret = -ENODEV; |
| 363 | |
| 364 | down_read(&bpf_devs_lock); |
| 365 | if (offmap->netdev) |
| 366 | ret = offmap->dev_ops->map_lookup_elem(offmap, key, value); |
| 367 | up_read(&bpf_devs_lock); |
| 368 | |
| 369 | return ret; |
| 370 | } |
| 371 | |
| 372 | int bpf_map_offload_update_elem(struct bpf_map *map, |
| 373 | void *key, void *value, u64 flags) |
| 374 | { |
| 375 | struct bpf_offloaded_map *offmap = map_to_offmap(map); |
| 376 | int ret = -ENODEV; |
| 377 | |
| 378 | if (unlikely(flags > BPF_EXIST)) |
| 379 | return -EINVAL; |
| 380 | |
| 381 | down_read(&bpf_devs_lock); |
| 382 | if (offmap->netdev) |
| 383 | ret = offmap->dev_ops->map_update_elem(offmap, key, value, |
| 384 | flags); |
| 385 | up_read(&bpf_devs_lock); |
| 386 | |
| 387 | return ret; |
| 388 | } |
| 389 | |
| 390 | int bpf_map_offload_delete_elem(struct bpf_map *map, void *key) |
| 391 | { |
| 392 | struct bpf_offloaded_map *offmap = map_to_offmap(map); |
| 393 | int ret = -ENODEV; |
| 394 | |
| 395 | down_read(&bpf_devs_lock); |
| 396 | if (offmap->netdev) |
| 397 | ret = offmap->dev_ops->map_delete_elem(offmap, key); |
| 398 | up_read(&bpf_devs_lock); |
| 399 | |
| 400 | return ret; |
| 401 | } |
| 402 | |
| 403 | int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key) |
| 404 | { |
| 405 | struct bpf_offloaded_map *offmap = map_to_offmap(map); |
| 406 | int ret = -ENODEV; |
| 407 | |
| 408 | down_read(&bpf_devs_lock); |
| 409 | if (offmap->netdev) |
| 410 | ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key); |
| 411 | up_read(&bpf_devs_lock); |
| 412 | |
| 413 | return ret; |
| 414 | } |
| 415 | |
Jakub Kicinski | 52775b3 | 2018-01-17 19:13:28 -0800 | [diff] [blame] | 416 | struct ns_get_path_bpf_map_args { |
| 417 | struct bpf_offloaded_map *offmap; |
| 418 | struct bpf_map_info *info; |
| 419 | }; |
| 420 | |
| 421 | static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data) |
| 422 | { |
| 423 | struct ns_get_path_bpf_map_args *args = private_data; |
| 424 | struct ns_common *ns; |
| 425 | struct net *net; |
| 426 | |
| 427 | rtnl_lock(); |
| 428 | down_read(&bpf_devs_lock); |
| 429 | |
| 430 | if (args->offmap->netdev) { |
| 431 | args->info->ifindex = args->offmap->netdev->ifindex; |
| 432 | net = dev_net(args->offmap->netdev); |
| 433 | get_net(net); |
| 434 | ns = &net->ns; |
| 435 | } else { |
| 436 | args->info->ifindex = 0; |
| 437 | ns = NULL; |
| 438 | } |
| 439 | |
| 440 | up_read(&bpf_devs_lock); |
| 441 | rtnl_unlock(); |
| 442 | |
| 443 | return ns; |
| 444 | } |
| 445 | |
| 446 | int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map) |
| 447 | { |
| 448 | struct ns_get_path_bpf_map_args args = { |
| 449 | .offmap = map_to_offmap(map), |
| 450 | .info = info, |
| 451 | }; |
| 452 | struct inode *ns_inode; |
| 453 | struct path ns_path; |
| 454 | void *res; |
| 455 | |
| 456 | res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args); |
| 457 | if (IS_ERR(res)) { |
| 458 | if (!info->ifindex) |
| 459 | return -ENODEV; |
| 460 | return PTR_ERR(res); |
| 461 | } |
| 462 | |
| 463 | ns_inode = ns_path.dentry->d_inode; |
| 464 | info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev); |
| 465 | info->netns_ino = ns_inode->i_ino; |
| 466 | path_put(&ns_path); |
| 467 | |
| 468 | return 0; |
| 469 | } |
| 470 | |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 471 | bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map) |
| 472 | { |
| 473 | struct bpf_offloaded_map *offmap; |
| 474 | struct bpf_prog_offload *offload; |
| 475 | bool ret; |
| 476 | |
Jakub Kicinski | 0cd3cbe | 2018-05-03 18:37:08 -0700 | [diff] [blame] | 477 | if (!bpf_prog_is_dev_bound(prog->aux)) |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 478 | return false; |
Jakub Kicinski | 0cd3cbe | 2018-05-03 18:37:08 -0700 | [diff] [blame] | 479 | if (!bpf_map_is_dev_bound(map)) |
| 480 | return bpf_map_offload_neutral(map); |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 481 | |
| 482 | down_read(&bpf_devs_lock); |
| 483 | offload = prog->aux->offload; |
| 484 | offmap = map_to_offmap(map); |
| 485 | |
| 486 | ret = offload && offload->netdev == offmap->netdev; |
| 487 | up_read(&bpf_devs_lock); |
| 488 | |
| 489 | return ret; |
| 490 | } |
| 491 | |
| 492 | static void bpf_offload_orphan_all_progs(struct net_device *netdev) |
| 493 | { |
| 494 | struct bpf_prog_offload *offload, *tmp; |
| 495 | |
| 496 | list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, offloads) |
| 497 | if (offload->netdev == netdev) |
| 498 | __bpf_prog_offload_destroy(offload->prog); |
| 499 | } |
| 500 | |
| 501 | static void bpf_offload_orphan_all_maps(struct net_device *netdev) |
| 502 | { |
| 503 | struct bpf_offloaded_map *offmap, *tmp; |
| 504 | |
| 505 | list_for_each_entry_safe(offmap, tmp, &bpf_map_offload_devs, offloads) |
| 506 | if (offmap->netdev == netdev) |
| 507 | __bpf_map_offload_destroy(offmap); |
| 508 | } |
| 509 | |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 510 | static int bpf_offload_notification(struct notifier_block *notifier, |
| 511 | ulong event, void *ptr) |
| 512 | { |
| 513 | struct net_device *netdev = netdev_notifier_info_to_dev(ptr); |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 514 | |
| 515 | ASSERT_RTNL(); |
| 516 | |
| 517 | switch (event) { |
| 518 | case NETDEV_UNREGISTER: |
Jakub Kicinski | 62c71b4 | 2017-11-20 15:21:57 -0800 | [diff] [blame] | 519 | /* ignore namespace changes */ |
| 520 | if (netdev->reg_state != NETREG_UNREGISTERING) |
| 521 | break; |
| 522 | |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 523 | down_write(&bpf_devs_lock); |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 524 | bpf_offload_orphan_all_progs(netdev); |
| 525 | bpf_offload_orphan_all_maps(netdev); |
Jakub Kicinski | e0d3974 | 2017-12-27 18:39:03 -0800 | [diff] [blame] | 526 | up_write(&bpf_devs_lock); |
Jakub Kicinski | ab3f006 | 2017-11-03 13:56:17 -0700 | [diff] [blame] | 527 | break; |
| 528 | default: |
| 529 | break; |
| 530 | } |
| 531 | return NOTIFY_OK; |
| 532 | } |
| 533 | |
| 534 | static struct notifier_block bpf_offload_notifier = { |
| 535 | .notifier_call = bpf_offload_notification, |
| 536 | }; |
| 537 | |
| 538 | static int __init bpf_offload_init(void) |
| 539 | { |
| 540 | register_netdevice_notifier(&bpf_offload_notifier); |
| 541 | return 0; |
| 542 | } |
| 543 | |
| 544 | subsys_initcall(bpf_offload_init); |