blob: d513fbf9ca5313bf2d4d7b89090f37757217cf12 [file] [log] [blame]
Jakub Kicinskia39e17b2017-11-27 12:10:23 -08001/*
Jakub Kicinski0cd3cbe2018-05-03 18:37:08 -07002 * Copyright (C) 2017-2018 Netronome Systems, Inc.
Jakub Kicinskia39e17b2017-11-27 12:10:23 -08003 *
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree.
7 *
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14 */
15
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070016#include <linux/bpf.h>
17#include <linux/bpf_verifier.h>
18#include <linux/bug.h>
Jakub Kicinski675fc272017-12-27 18:39:09 -080019#include <linux/kdev_t.h>
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070020#include <linux/list.h>
Jakub Kicinski9fd7c552018-07-17 10:53:24 -070021#include <linux/lockdep.h>
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070022#include <linux/netdevice.h>
23#include <linux/printk.h>
Jakub Kicinski675fc272017-12-27 18:39:09 -080024#include <linux/proc_ns.h>
Jakub Kicinski9fd7c552018-07-17 10:53:24 -070025#include <linux/rhashtable.h>
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070026#include <linux/rtnetlink.h>
Jakub Kicinskie0d39742017-12-27 18:39:03 -080027#include <linux/rwsem.h>
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070028
Jakub Kicinski9fd7c552018-07-17 10:53:24 -070029/* Protects offdevs, members of bpf_offload_netdev and offload members
Jakub Kicinskia3884572018-01-11 20:29:09 -080030 * of all progs.
Jakub Kicinskie0d39742017-12-27 18:39:03 -080031 * RTNL lock cannot be taken when holding this lock.
32 */
33static DECLARE_RWSEM(bpf_devs_lock);
Jakub Kicinski9fd7c552018-07-17 10:53:24 -070034
Jakub Kicinski602144c2018-07-17 10:53:25 -070035struct bpf_offload_dev {
Quentin Monnet1385d752018-11-09 13:03:25 +000036 const struct bpf_prog_offload_ops *ops;
Jakub Kicinski602144c2018-07-17 10:53:25 -070037 struct list_head netdevs;
38};
39
Jakub Kicinski9fd7c552018-07-17 10:53:24 -070040struct bpf_offload_netdev {
41 struct rhash_head l;
42 struct net_device *netdev;
Jakub Kicinski602144c2018-07-17 10:53:25 -070043 struct bpf_offload_dev *offdev;
Jakub Kicinski9fd7c552018-07-17 10:53:24 -070044 struct list_head progs;
45 struct list_head maps;
Jakub Kicinski602144c2018-07-17 10:53:25 -070046 struct list_head offdev_netdevs;
Jakub Kicinski9fd7c552018-07-17 10:53:24 -070047};
48
49static const struct rhashtable_params offdevs_params = {
50 .nelem_hint = 4,
51 .key_len = sizeof(struct net_device *),
52 .key_offset = offsetof(struct bpf_offload_netdev, netdev),
53 .head_offset = offsetof(struct bpf_offload_netdev, l),
54 .automatic_shrinking = true,
55};
56
57static struct rhashtable offdevs;
58static bool offdevs_inited;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070059
Jakub Kicinski5bc2d552018-01-11 20:29:08 -080060static int bpf_dev_offload_check(struct net_device *netdev)
61{
62 if (!netdev)
63 return -EINVAL;
64 if (!netdev->netdev_ops->ndo_bpf)
65 return -EOPNOTSUPP;
66 return 0;
67}
68
Jakub Kicinski9fd7c552018-07-17 10:53:24 -070069static struct bpf_offload_netdev *
70bpf_offload_find_netdev(struct net_device *netdev)
71{
72 lockdep_assert_held(&bpf_devs_lock);
73
74 if (!offdevs_inited)
75 return NULL;
76 return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
77}
78
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070079int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
80{
Jakub Kicinski9fd7c552018-07-17 10:53:24 -070081 struct bpf_offload_netdev *ondev;
Jakub Kicinski0a9c1992018-01-11 20:29:07 -080082 struct bpf_prog_offload *offload;
Jakub Kicinski5bc2d552018-01-11 20:29:08 -080083 int err;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070084
Jakub Kicinski649f11d2017-11-20 15:21:52 -080085 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
86 attr->prog_type != BPF_PROG_TYPE_XDP)
87 return -EINVAL;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070088
89 if (attr->prog_flags)
90 return -EINVAL;
91
92 offload = kzalloc(sizeof(*offload), GFP_USER);
93 if (!offload)
94 return -ENOMEM;
95
96 offload->prog = prog;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070097
Jakub Kicinskie0d39742017-12-27 18:39:03 -080098 offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
99 attr->prog_ifindex);
Jakub Kicinski5bc2d552018-01-11 20:29:08 -0800100 err = bpf_dev_offload_check(offload->netdev);
101 if (err)
102 goto err_maybe_put;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700103
Jakub Kicinskie0d39742017-12-27 18:39:03 -0800104 down_write(&bpf_devs_lock);
Jakub Kicinski9fd7c552018-07-17 10:53:24 -0700105 ondev = bpf_offload_find_netdev(offload->netdev);
106 if (!ondev) {
Jakub Kicinski5bc2d552018-01-11 20:29:08 -0800107 err = -EINVAL;
Jakub Kicinskie0d39742017-12-27 18:39:03 -0800108 goto err_unlock;
Jakub Kicinski5bc2d552018-01-11 20:29:08 -0800109 }
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700110 prog->aux->offload = offload;
Jakub Kicinski9fd7c552018-07-17 10:53:24 -0700111 list_add_tail(&offload->offloads, &ondev->progs);
Jakub Kicinskie0d39742017-12-27 18:39:03 -0800112 dev_put(offload->netdev);
113 up_write(&bpf_devs_lock);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700114
115 return 0;
Jakub Kicinskie0d39742017-12-27 18:39:03 -0800116err_unlock:
117 up_write(&bpf_devs_lock);
Jakub Kicinski5bc2d552018-01-11 20:29:08 -0800118err_maybe_put:
119 if (offload->netdev)
120 dev_put(offload->netdev);
Jakub Kicinskie0d39742017-12-27 18:39:03 -0800121 kfree(offload);
Jakub Kicinski5bc2d552018-01-11 20:29:08 -0800122 return err;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700123}
124
125static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
126 struct netdev_bpf *data)
127{
Jakub Kicinski0a9c1992018-01-11 20:29:07 -0800128 struct bpf_prog_offload *offload = prog->aux->offload;
Jakub Kicinskice3b9db2017-12-27 18:39:06 -0800129 struct net_device *netdev;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700130
131 ASSERT_RTNL();
132
Jakub Kicinskice3b9db2017-12-27 18:39:06 -0800133 if (!offload)
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700134 return -ENODEV;
Jakub Kicinskice3b9db2017-12-27 18:39:06 -0800135 netdev = offload->netdev;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700136
137 data->command = cmd;
138
139 return netdev->netdev_ops->ndo_bpf(netdev, data);
140}
141
142int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
143{
144 struct netdev_bpf data = {};
145 int err;
146
147 data.verifier.prog = env->prog;
148
149 rtnl_lock();
150 err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data);
151 if (err)
152 goto exit_unlock;
153
Jakub Kicinskicae19272017-12-27 18:39:05 -0800154 env->prog->aux->offload->dev_ops = data.verifier.ops;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700155 env->prog->aux->offload->dev_state = true;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700156exit_unlock:
157 rtnl_unlock();
158 return err;
159}
160
Jakub Kicinskicae19272017-12-27 18:39:05 -0800161int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
162 int insn_idx, int prev_insn_idx)
163{
Jakub Kicinski0a9c1992018-01-11 20:29:07 -0800164 struct bpf_prog_offload *offload;
Jakub Kicinskicae19272017-12-27 18:39:05 -0800165 int ret = -ENODEV;
166
167 down_read(&bpf_devs_lock);
168 offload = env->prog->aux->offload;
Jakub Kicinskice3b9db2017-12-27 18:39:06 -0800169 if (offload)
Jakub Kicinskicae19272017-12-27 18:39:05 -0800170 ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
171 up_read(&bpf_devs_lock);
172
173 return ret;
174}
175
Quentin Monnetc941ce92018-10-07 12:56:47 +0100176int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
177{
178 struct bpf_prog_offload *offload;
179 int ret = -ENODEV;
180
181 down_read(&bpf_devs_lock);
182 offload = env->prog->aux->offload;
183 if (offload) {
184 if (offload->dev_ops->finalize)
185 ret = offload->dev_ops->finalize(env);
186 else
187 ret = 0;
188 }
189 up_read(&bpf_devs_lock);
190
191 return ret;
192}
193
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700194static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
195{
Jakub Kicinski0a9c1992018-01-11 20:29:07 -0800196 struct bpf_prog_offload *offload = prog->aux->offload;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700197 struct netdev_bpf data = {};
198
199 data.offload.prog = prog;
200
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700201 if (offload->dev_state)
202 WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
203
Jakub Kicinskiad8ad792017-12-27 18:39:07 -0800204 /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
205 bpf_prog_free_id(prog, true);
206
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700207 list_del_init(&offload->offloads);
Jakub Kicinskice3b9db2017-12-27 18:39:06 -0800208 kfree(offload);
209 prog->aux->offload = NULL;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700210}
211
212void bpf_prog_offload_destroy(struct bpf_prog *prog)
213{
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700214 rtnl_lock();
Jakub Kicinskie0d39742017-12-27 18:39:03 -0800215 down_write(&bpf_devs_lock);
Jakub Kicinskice3b9db2017-12-27 18:39:06 -0800216 if (prog->aux->offload)
217 __bpf_prog_offload_destroy(prog);
Jakub Kicinskie0d39742017-12-27 18:39:03 -0800218 up_write(&bpf_devs_lock);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700219 rtnl_unlock();
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700220}
221
222static int bpf_prog_offload_translate(struct bpf_prog *prog)
223{
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700224 struct netdev_bpf data = {};
225 int ret;
226
227 data.offload.prog = prog;
228
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700229 rtnl_lock();
230 ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
231 rtnl_unlock();
232
233 return ret;
234}
235
236static unsigned int bpf_prog_warn_on_exec(const void *ctx,
237 const struct bpf_insn *insn)
238{
239 WARN(1, "attempt to execute device eBPF program on the host!");
240 return 0;
241}
242
243int bpf_prog_offload_compile(struct bpf_prog *prog)
244{
245 prog->bpf_func = bpf_prog_warn_on_exec;
246
247 return bpf_prog_offload_translate(prog);
248}
249
Jakub Kicinski675fc272017-12-27 18:39:09 -0800250struct ns_get_path_bpf_prog_args {
251 struct bpf_prog *prog;
252 struct bpf_prog_info *info;
253};
254
255static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
256{
257 struct ns_get_path_bpf_prog_args *args = private_data;
258 struct bpf_prog_aux *aux = args->prog->aux;
259 struct ns_common *ns;
260 struct net *net;
261
262 rtnl_lock();
263 down_read(&bpf_devs_lock);
264
265 if (aux->offload) {
266 args->info->ifindex = aux->offload->netdev->ifindex;
267 net = dev_net(aux->offload->netdev);
268 get_net(net);
269 ns = &net->ns;
270 } else {
271 args->info->ifindex = 0;
272 ns = NULL;
273 }
274
275 up_read(&bpf_devs_lock);
276 rtnl_unlock();
277
278 return ns;
279}
280
281int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
282 struct bpf_prog *prog)
283{
284 struct ns_get_path_bpf_prog_args args = {
285 .prog = prog,
286 .info = info,
287 };
Jiong Wangfcfb1262018-01-16 16:05:19 -0800288 struct bpf_prog_aux *aux = prog->aux;
Jakub Kicinski675fc272017-12-27 18:39:09 -0800289 struct inode *ns_inode;
290 struct path ns_path;
Jiong Wangfcfb1262018-01-16 16:05:19 -0800291 char __user *uinsns;
Jakub Kicinski675fc272017-12-27 18:39:09 -0800292 void *res;
Jiong Wangfcfb1262018-01-16 16:05:19 -0800293 u32 ulen;
Jakub Kicinski675fc272017-12-27 18:39:09 -0800294
295 res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
296 if (IS_ERR(res)) {
297 if (!info->ifindex)
298 return -ENODEV;
299 return PTR_ERR(res);
300 }
301
Jiong Wangfcfb1262018-01-16 16:05:19 -0800302 down_read(&bpf_devs_lock);
303
304 if (!aux->offload) {
305 up_read(&bpf_devs_lock);
306 return -ENODEV;
307 }
308
309 ulen = info->jited_prog_len;
310 info->jited_prog_len = aux->offload->jited_len;
311 if (info->jited_prog_len & ulen) {
312 uinsns = u64_to_user_ptr(info->jited_prog_insns);
313 ulen = min_t(u32, info->jited_prog_len, ulen);
314 if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
315 up_read(&bpf_devs_lock);
316 return -EFAULT;
317 }
318 }
319
320 up_read(&bpf_devs_lock);
321
Jakub Kicinski675fc272017-12-27 18:39:09 -0800322 ns_inode = ns_path.dentry->d_inode;
323 info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
324 info->netns_ino = ns_inode->i_ino;
325 path_put(&ns_path);
326
327 return 0;
328}
329
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700330const struct bpf_prog_ops bpf_offload_prog_ops = {
331};
332
Jakub Kicinskia3884572018-01-11 20:29:09 -0800333static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
334 enum bpf_netdev_command cmd)
335{
336 struct netdev_bpf data = {};
337 struct net_device *netdev;
338
339 ASSERT_RTNL();
340
341 data.command = cmd;
342 data.offmap = offmap;
343 /* Caller must make sure netdev is valid */
344 netdev = offmap->netdev;
345
346 return netdev->netdev_ops->ndo_bpf(netdev, &data);
347}
348
349struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
350{
351 struct net *net = current->nsproxy->net_ns;
Jakub Kicinski9fd7c552018-07-17 10:53:24 -0700352 struct bpf_offload_netdev *ondev;
Jakub Kicinskia3884572018-01-11 20:29:09 -0800353 struct bpf_offloaded_map *offmap;
354 int err;
355
356 if (!capable(CAP_SYS_ADMIN))
357 return ERR_PTR(-EPERM);
Jakub Kicinski7a0ef692018-01-17 19:13:27 -0800358 if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
359 attr->map_type != BPF_MAP_TYPE_HASH)
Jakub Kicinskia3884572018-01-11 20:29:09 -0800360 return ERR_PTR(-EINVAL);
361
362 offmap = kzalloc(sizeof(*offmap), GFP_USER);
363 if (!offmap)
364 return ERR_PTR(-ENOMEM);
365
366 bpf_map_init_from_attr(&offmap->map, attr);
367
368 rtnl_lock();
369 down_write(&bpf_devs_lock);
370 offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
371 err = bpf_dev_offload_check(offmap->netdev);
372 if (err)
373 goto err_unlock;
374
Jakub Kicinski9fd7c552018-07-17 10:53:24 -0700375 ondev = bpf_offload_find_netdev(offmap->netdev);
376 if (!ondev) {
377 err = -EINVAL;
378 goto err_unlock;
379 }
380
Jakub Kicinskia3884572018-01-11 20:29:09 -0800381 err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
382 if (err)
383 goto err_unlock;
384
Jakub Kicinski9fd7c552018-07-17 10:53:24 -0700385 list_add_tail(&offmap->offloads, &ondev->maps);
Jakub Kicinskia3884572018-01-11 20:29:09 -0800386 up_write(&bpf_devs_lock);
387 rtnl_unlock();
388
389 return &offmap->map;
390
391err_unlock:
392 up_write(&bpf_devs_lock);
393 rtnl_unlock();
394 kfree(offmap);
395 return ERR_PTR(err);
396}
397
398static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
399{
400 WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
401 /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
402 bpf_map_free_id(&offmap->map, true);
403 list_del_init(&offmap->offloads);
404 offmap->netdev = NULL;
405}
406
407void bpf_map_offload_map_free(struct bpf_map *map)
408{
409 struct bpf_offloaded_map *offmap = map_to_offmap(map);
410
411 rtnl_lock();
412 down_write(&bpf_devs_lock);
413 if (offmap->netdev)
414 __bpf_map_offload_destroy(offmap);
415 up_write(&bpf_devs_lock);
416 rtnl_unlock();
417
418 kfree(offmap);
419}
420
421int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
422{
423 struct bpf_offloaded_map *offmap = map_to_offmap(map);
424 int ret = -ENODEV;
425
426 down_read(&bpf_devs_lock);
427 if (offmap->netdev)
428 ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
429 up_read(&bpf_devs_lock);
430
431 return ret;
432}
433
434int bpf_map_offload_update_elem(struct bpf_map *map,
435 void *key, void *value, u64 flags)
436{
437 struct bpf_offloaded_map *offmap = map_to_offmap(map);
438 int ret = -ENODEV;
439
440 if (unlikely(flags > BPF_EXIST))
441 return -EINVAL;
442
443 down_read(&bpf_devs_lock);
444 if (offmap->netdev)
445 ret = offmap->dev_ops->map_update_elem(offmap, key, value,
446 flags);
447 up_read(&bpf_devs_lock);
448
449 return ret;
450}
451
452int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
453{
454 struct bpf_offloaded_map *offmap = map_to_offmap(map);
455 int ret = -ENODEV;
456
457 down_read(&bpf_devs_lock);
458 if (offmap->netdev)
459 ret = offmap->dev_ops->map_delete_elem(offmap, key);
460 up_read(&bpf_devs_lock);
461
462 return ret;
463}
464
465int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
466{
467 struct bpf_offloaded_map *offmap = map_to_offmap(map);
468 int ret = -ENODEV;
469
470 down_read(&bpf_devs_lock);
471 if (offmap->netdev)
472 ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
473 up_read(&bpf_devs_lock);
474
475 return ret;
476}
477
Jakub Kicinski52775b32018-01-17 19:13:28 -0800478struct ns_get_path_bpf_map_args {
479 struct bpf_offloaded_map *offmap;
480 struct bpf_map_info *info;
481};
482
483static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
484{
485 struct ns_get_path_bpf_map_args *args = private_data;
486 struct ns_common *ns;
487 struct net *net;
488
489 rtnl_lock();
490 down_read(&bpf_devs_lock);
491
492 if (args->offmap->netdev) {
493 args->info->ifindex = args->offmap->netdev->ifindex;
494 net = dev_net(args->offmap->netdev);
495 get_net(net);
496 ns = &net->ns;
497 } else {
498 args->info->ifindex = 0;
499 ns = NULL;
500 }
501
502 up_read(&bpf_devs_lock);
503 rtnl_unlock();
504
505 return ns;
506}
507
508int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
509{
510 struct ns_get_path_bpf_map_args args = {
511 .offmap = map_to_offmap(map),
512 .info = info,
513 };
514 struct inode *ns_inode;
515 struct path ns_path;
516 void *res;
517
518 res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
519 if (IS_ERR(res)) {
520 if (!info->ifindex)
521 return -ENODEV;
522 return PTR_ERR(res);
523 }
524
525 ns_inode = ns_path.dentry->d_inode;
526 info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
527 info->netns_ino = ns_inode->i_ino;
528 path_put(&ns_path);
529
530 return 0;
531}
532
Jakub Kicinskifd4f2272018-07-17 10:53:26 -0700533static bool __bpf_offload_dev_match(struct bpf_prog *prog,
534 struct net_device *netdev)
Jakub Kicinskia3884572018-01-11 20:29:09 -0800535{
Jakub Kicinskifd4f2272018-07-17 10:53:26 -0700536 struct bpf_offload_netdev *ondev1, *ondev2;
Jakub Kicinskia3884572018-01-11 20:29:09 -0800537 struct bpf_prog_offload *offload;
Jakub Kicinskia3884572018-01-11 20:29:09 -0800538
Jakub Kicinski0cd3cbe2018-05-03 18:37:08 -0700539 if (!bpf_prog_is_dev_bound(prog->aux))
Jakub Kicinskia3884572018-01-11 20:29:09 -0800540 return false;
Jakub Kicinskifd4f2272018-07-17 10:53:26 -0700541
542 offload = prog->aux->offload;
543 if (!offload)
544 return false;
545 if (offload->netdev == netdev)
546 return true;
547
548 ondev1 = bpf_offload_find_netdev(offload->netdev);
549 ondev2 = bpf_offload_find_netdev(netdev);
550
551 return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
552}
553
554bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
555{
556 bool ret;
Jakub Kicinskia3884572018-01-11 20:29:09 -0800557
558 down_read(&bpf_devs_lock);
Jakub Kicinskifd4f2272018-07-17 10:53:26 -0700559 ret = __bpf_offload_dev_match(prog, netdev);
560 up_read(&bpf_devs_lock);
561
562 return ret;
563}
564EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
565
566bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
567{
568 struct bpf_offloaded_map *offmap;
569 bool ret;
570
571 if (!bpf_map_is_dev_bound(map))
572 return bpf_map_offload_neutral(map);
Jakub Kicinskia3884572018-01-11 20:29:09 -0800573 offmap = map_to_offmap(map);
574
Jakub Kicinskifd4f2272018-07-17 10:53:26 -0700575 down_read(&bpf_devs_lock);
576 ret = __bpf_offload_dev_match(prog, offmap->netdev);
Jakub Kicinskia3884572018-01-11 20:29:09 -0800577 up_read(&bpf_devs_lock);
578
579 return ret;
580}
581
Jakub Kicinski602144c2018-07-17 10:53:25 -0700582int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
583 struct net_device *netdev)
Jakub Kicinskia3884572018-01-11 20:29:09 -0800584{
Jakub Kicinski9fd7c552018-07-17 10:53:24 -0700585 struct bpf_offload_netdev *ondev;
586 int err;
Jakub Kicinskia3884572018-01-11 20:29:09 -0800587
Jakub Kicinski9fd7c552018-07-17 10:53:24 -0700588 ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
589 if (!ondev)
590 return -ENOMEM;
591
592 ondev->netdev = netdev;
Jakub Kicinski602144c2018-07-17 10:53:25 -0700593 ondev->offdev = offdev;
Jakub Kicinski9fd7c552018-07-17 10:53:24 -0700594 INIT_LIST_HEAD(&ondev->progs);
595 INIT_LIST_HEAD(&ondev->maps);
596
597 down_write(&bpf_devs_lock);
598 err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
599 if (err) {
600 netdev_warn(netdev, "failed to register for BPF offload\n");
601 goto err_unlock_free;
602 }
603
Jakub Kicinski602144c2018-07-17 10:53:25 -0700604 list_add(&ondev->offdev_netdevs, &offdev->netdevs);
Jakub Kicinski9fd7c552018-07-17 10:53:24 -0700605 up_write(&bpf_devs_lock);
606 return 0;
607
608err_unlock_free:
609 up_write(&bpf_devs_lock);
610 kfree(ondev);
611 return err;
Jakub Kicinskia3884572018-01-11 20:29:09 -0800612}
Jakub Kicinski9fd7c552018-07-17 10:53:24 -0700613EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
Jakub Kicinskia3884572018-01-11 20:29:09 -0800614
Jakub Kicinski602144c2018-07-17 10:53:25 -0700615void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
616 struct net_device *netdev)
Jakub Kicinskia3884572018-01-11 20:29:09 -0800617{
Jakub Kicinski602144c2018-07-17 10:53:25 -0700618 struct bpf_offload_netdev *ondev, *altdev;
Jakub Kicinski9fd7c552018-07-17 10:53:24 -0700619 struct bpf_offloaded_map *offmap, *mtmp;
620 struct bpf_prog_offload *offload, *ptmp;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700621
622 ASSERT_RTNL();
623
Jakub Kicinski9fd7c552018-07-17 10:53:24 -0700624 down_write(&bpf_devs_lock);
625 ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
626 if (WARN_ON(!ondev))
627 goto unlock;
Jakub Kicinski62c71b42017-11-20 15:21:57 -0800628
Jakub Kicinski9fd7c552018-07-17 10:53:24 -0700629 WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
Jakub Kicinski602144c2018-07-17 10:53:25 -0700630 list_del(&ondev->offdev_netdevs);
Jakub Kicinski9fd7c552018-07-17 10:53:24 -0700631
Jakub Kicinski602144c2018-07-17 10:53:25 -0700632 /* Try to move the objects to another netdev of the device */
633 altdev = list_first_entry_or_null(&offdev->netdevs,
634 struct bpf_offload_netdev,
635 offdev_netdevs);
636 if (altdev) {
637 list_for_each_entry(offload, &ondev->progs, offloads)
638 offload->netdev = altdev->netdev;
639 list_splice_init(&ondev->progs, &altdev->progs);
640
641 list_for_each_entry(offmap, &ondev->maps, offloads)
642 offmap->netdev = altdev->netdev;
643 list_splice_init(&ondev->maps, &altdev->maps);
644 } else {
645 list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
646 __bpf_prog_offload_destroy(offload->prog);
647 list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
648 __bpf_map_offload_destroy(offmap);
649 }
Jakub Kicinski9fd7c552018-07-17 10:53:24 -0700650
651 WARN_ON(!list_empty(&ondev->progs));
652 WARN_ON(!list_empty(&ondev->maps));
653 kfree(ondev);
654unlock:
655 up_write(&bpf_devs_lock);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700656}
Jakub Kicinski9fd7c552018-07-17 10:53:24 -0700657EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
Jakub Kicinski602144c2018-07-17 10:53:25 -0700658
Quentin Monnet1385d752018-11-09 13:03:25 +0000659struct bpf_offload_dev *
660bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops)
Jakub Kicinski602144c2018-07-17 10:53:25 -0700661{
662 struct bpf_offload_dev *offdev;
663 int err;
664
665 down_write(&bpf_devs_lock);
666 if (!offdevs_inited) {
667 err = rhashtable_init(&offdevs, &offdevs_params);
668 if (err)
669 return ERR_PTR(err);
670 offdevs_inited = true;
671 }
672 up_write(&bpf_devs_lock);
673
674 offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
675 if (!offdev)
676 return ERR_PTR(-ENOMEM);
677
Quentin Monnet1385d752018-11-09 13:03:25 +0000678 offdev->ops = ops;
Jakub Kicinski602144c2018-07-17 10:53:25 -0700679 INIT_LIST_HEAD(&offdev->netdevs);
680
681 return offdev;
682}
683EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
684
685void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
686{
687 WARN_ON(!list_empty(&offdev->netdevs));
688 kfree(offdev);
689}
690EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);