blob: 3126e1a842e652dfdc305234e80080bc47c68ecf [file] [log] [blame]
Jakub Kicinskia39e17b2017-11-27 12:10:23 -08001/*
2 * Copyright (C) 2017 Netronome Systems, Inc.
3 *
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree.
7 *
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14 */
15
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070016#include <linux/bpf.h>
17#include <linux/bpf_verifier.h>
18#include <linux/bug.h>
19#include <linux/list.h>
20#include <linux/netdevice.h>
21#include <linux/printk.h>
22#include <linux/rtnetlink.h>
Jakub Kicinskie0d39742017-12-27 18:39:03 -080023#include <linux/rwsem.h>
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070024
Jakub Kicinskie0d39742017-12-27 18:39:03 -080025/* Protects bpf_prog_offload_devs and offload members of all progs.
26 * RTNL lock cannot be taken when holding this lock.
27 */
28static DECLARE_RWSEM(bpf_devs_lock);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070029static LIST_HEAD(bpf_prog_offload_devs);
30
31int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
32{
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070033 struct bpf_dev_offload *offload;
34
Jakub Kicinski649f11d2017-11-20 15:21:52 -080035 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
36 attr->prog_type != BPF_PROG_TYPE_XDP)
37 return -EINVAL;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070038
39 if (attr->prog_flags)
40 return -EINVAL;
41
42 offload = kzalloc(sizeof(*offload), GFP_USER);
43 if (!offload)
44 return -ENOMEM;
45
46 offload->prog = prog;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070047
Jakub Kicinskie0d39742017-12-27 18:39:03 -080048 offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
49 attr->prog_ifindex);
50 if (!offload->netdev)
51 goto err_free;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070052
Jakub Kicinskie0d39742017-12-27 18:39:03 -080053 down_write(&bpf_devs_lock);
54 if (offload->netdev->reg_state != NETREG_REGISTERED)
55 goto err_unlock;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070056 prog->aux->offload = offload;
57 list_add_tail(&offload->offloads, &bpf_prog_offload_devs);
Jakub Kicinskie0d39742017-12-27 18:39:03 -080058 dev_put(offload->netdev);
59 up_write(&bpf_devs_lock);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070060
61 return 0;
Jakub Kicinskie0d39742017-12-27 18:39:03 -080062err_unlock:
63 up_write(&bpf_devs_lock);
64 dev_put(offload->netdev);
65err_free:
66 kfree(offload);
67 return -EINVAL;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070068}
69
70static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
71 struct netdev_bpf *data)
72{
Jakub Kicinskice3b9db2017-12-27 18:39:06 -080073 struct bpf_dev_offload *offload = prog->aux->offload;
74 struct net_device *netdev;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070075
76 ASSERT_RTNL();
77
Jakub Kicinskice3b9db2017-12-27 18:39:06 -080078 if (!offload)
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070079 return -ENODEV;
Jakub Kicinskice3b9db2017-12-27 18:39:06 -080080 netdev = offload->netdev;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070081 if (!netdev->netdev_ops->ndo_bpf)
82 return -EOPNOTSUPP;
83
84 data->command = cmd;
85
86 return netdev->netdev_ops->ndo_bpf(netdev, data);
87}
88
89int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
90{
91 struct netdev_bpf data = {};
92 int err;
93
94 data.verifier.prog = env->prog;
95
96 rtnl_lock();
97 err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data);
98 if (err)
99 goto exit_unlock;
100
Jakub Kicinskicae19272017-12-27 18:39:05 -0800101 env->prog->aux->offload->dev_ops = data.verifier.ops;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700102 env->prog->aux->offload->dev_state = true;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700103exit_unlock:
104 rtnl_unlock();
105 return err;
106}
107
Jakub Kicinskicae19272017-12-27 18:39:05 -0800108int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
109 int insn_idx, int prev_insn_idx)
110{
111 struct bpf_dev_offload *offload;
112 int ret = -ENODEV;
113
114 down_read(&bpf_devs_lock);
115 offload = env->prog->aux->offload;
Jakub Kicinskice3b9db2017-12-27 18:39:06 -0800116 if (offload)
Jakub Kicinskicae19272017-12-27 18:39:05 -0800117 ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
118 up_read(&bpf_devs_lock);
119
120 return ret;
121}
122
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700123static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
124{
125 struct bpf_dev_offload *offload = prog->aux->offload;
126 struct netdev_bpf data = {};
127
128 data.offload.prog = prog;
129
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700130 if (offload->dev_state)
131 WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
132
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700133 list_del_init(&offload->offloads);
Jakub Kicinskice3b9db2017-12-27 18:39:06 -0800134 kfree(offload);
135 prog->aux->offload = NULL;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700136}
137
138void bpf_prog_offload_destroy(struct bpf_prog *prog)
139{
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700140 rtnl_lock();
Jakub Kicinskie0d39742017-12-27 18:39:03 -0800141 down_write(&bpf_devs_lock);
Jakub Kicinskice3b9db2017-12-27 18:39:06 -0800142 if (prog->aux->offload)
143 __bpf_prog_offload_destroy(prog);
Jakub Kicinskie0d39742017-12-27 18:39:03 -0800144 up_write(&bpf_devs_lock);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700145 rtnl_unlock();
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700146}
147
148static int bpf_prog_offload_translate(struct bpf_prog *prog)
149{
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700150 struct netdev_bpf data = {};
151 int ret;
152
153 data.offload.prog = prog;
154
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700155 rtnl_lock();
156 ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
157 rtnl_unlock();
158
159 return ret;
160}
161
162static unsigned int bpf_prog_warn_on_exec(const void *ctx,
163 const struct bpf_insn *insn)
164{
165 WARN(1, "attempt to execute device eBPF program on the host!");
166 return 0;
167}
168
169int bpf_prog_offload_compile(struct bpf_prog *prog)
170{
171 prog->bpf_func = bpf_prog_warn_on_exec;
172
173 return bpf_prog_offload_translate(prog);
174}
175
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700176const struct bpf_prog_ops bpf_offload_prog_ops = {
177};
178
179static int bpf_offload_notification(struct notifier_block *notifier,
180 ulong event, void *ptr)
181{
182 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
183 struct bpf_dev_offload *offload, *tmp;
184
185 ASSERT_RTNL();
186
187 switch (event) {
188 case NETDEV_UNREGISTER:
Jakub Kicinski62c71b42017-11-20 15:21:57 -0800189 /* ignore namespace changes */
190 if (netdev->reg_state != NETREG_UNREGISTERING)
191 break;
192
Jakub Kicinskie0d39742017-12-27 18:39:03 -0800193 down_write(&bpf_devs_lock);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700194 list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
195 offloads) {
196 if (offload->netdev == netdev)
197 __bpf_prog_offload_destroy(offload->prog);
198 }
Jakub Kicinskie0d39742017-12-27 18:39:03 -0800199 up_write(&bpf_devs_lock);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700200 break;
201 default:
202 break;
203 }
204 return NOTIFY_OK;
205}
206
207static struct notifier_block bpf_offload_notifier = {
208 .notifier_call = bpf_offload_notification,
209};
210
211static int __init bpf_offload_init(void)
212{
213 register_netdevice_notifier(&bpf_offload_notifier);
214 return 0;
215}
216
217subsys_initcall(bpf_offload_init);