blob: 032079754d88cc64255c17a348ff420ee15b8a2b [file] [log] [blame]
Jakub Kicinskia39e17b2017-11-27 12:10:23 -08001/*
2 * Copyright (C) 2017 Netronome Systems, Inc.
3 *
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree.
7 *
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14 */
15
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070016#include <linux/bpf.h>
17#include <linux/bpf_verifier.h>
18#include <linux/bug.h>
19#include <linux/list.h>
20#include <linux/netdevice.h>
21#include <linux/printk.h>
22#include <linux/rtnetlink.h>
Jakub Kicinskie0d39742017-12-27 18:39:03 -080023#include <linux/rwsem.h>
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070024
Jakub Kicinskie0d39742017-12-27 18:39:03 -080025/* Protects bpf_prog_offload_devs and offload members of all progs.
26 * RTNL lock cannot be taken when holding this lock.
27 */
28static DECLARE_RWSEM(bpf_devs_lock);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070029static LIST_HEAD(bpf_prog_offload_devs);
30
31int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
32{
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070033 struct bpf_dev_offload *offload;
34
Jakub Kicinski649f11d2017-11-20 15:21:52 -080035 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
36 attr->prog_type != BPF_PROG_TYPE_XDP)
37 return -EINVAL;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070038
39 if (attr->prog_flags)
40 return -EINVAL;
41
42 offload = kzalloc(sizeof(*offload), GFP_USER);
43 if (!offload)
44 return -ENOMEM;
45
46 offload->prog = prog;
47 init_waitqueue_head(&offload->verifier_done);
48
Jakub Kicinskie0d39742017-12-27 18:39:03 -080049 offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
50 attr->prog_ifindex);
51 if (!offload->netdev)
52 goto err_free;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070053
Jakub Kicinskie0d39742017-12-27 18:39:03 -080054 down_write(&bpf_devs_lock);
55 if (offload->netdev->reg_state != NETREG_REGISTERED)
56 goto err_unlock;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070057 prog->aux->offload = offload;
58 list_add_tail(&offload->offloads, &bpf_prog_offload_devs);
Jakub Kicinskie0d39742017-12-27 18:39:03 -080059 dev_put(offload->netdev);
60 up_write(&bpf_devs_lock);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070061
62 return 0;
Jakub Kicinskie0d39742017-12-27 18:39:03 -080063err_unlock:
64 up_write(&bpf_devs_lock);
65 dev_put(offload->netdev);
66err_free:
67 kfree(offload);
68 return -EINVAL;
Jakub Kicinskiab3f0062017-11-03 13:56:17 -070069}
70
71static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
72 struct netdev_bpf *data)
73{
74 struct net_device *netdev = prog->aux->offload->netdev;
75
76 ASSERT_RTNL();
77
78 if (!netdev)
79 return -ENODEV;
80 if (!netdev->netdev_ops->ndo_bpf)
81 return -EOPNOTSUPP;
82
83 data->command = cmd;
84
85 return netdev->netdev_ops->ndo_bpf(netdev, data);
86}
87
88int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
89{
90 struct netdev_bpf data = {};
91 int err;
92
93 data.verifier.prog = env->prog;
94
95 rtnl_lock();
96 err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data);
97 if (err)
98 goto exit_unlock;
99
100 env->dev_ops = data.verifier.ops;
101
102 env->prog->aux->offload->dev_state = true;
103 env->prog->aux->offload->verifier_running = true;
104exit_unlock:
105 rtnl_unlock();
106 return err;
107}
108
109static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
110{
111 struct bpf_dev_offload *offload = prog->aux->offload;
112 struct netdev_bpf data = {};
113
Jakub Kicinski13a9c482017-11-20 15:21:51 -0800114 /* Caution - if netdev is destroyed before the program, this function
115 * will be called twice.
116 */
117
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700118 data.offload.prog = prog;
119
120 if (offload->verifier_running)
121 wait_event(offload->verifier_done, !offload->verifier_running);
122
123 if (offload->dev_state)
124 WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
125
126 offload->dev_state = false;
127 list_del_init(&offload->offloads);
128 offload->netdev = NULL;
129}
130
131void bpf_prog_offload_destroy(struct bpf_prog *prog)
132{
133 struct bpf_dev_offload *offload = prog->aux->offload;
134
135 offload->verifier_running = false;
136 wake_up(&offload->verifier_done);
137
138 rtnl_lock();
Jakub Kicinskie0d39742017-12-27 18:39:03 -0800139 down_write(&bpf_devs_lock);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700140 __bpf_prog_offload_destroy(prog);
Jakub Kicinskie0d39742017-12-27 18:39:03 -0800141 up_write(&bpf_devs_lock);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700142 rtnl_unlock();
143
144 kfree(offload);
145}
146
147static int bpf_prog_offload_translate(struct bpf_prog *prog)
148{
149 struct bpf_dev_offload *offload = prog->aux->offload;
150 struct netdev_bpf data = {};
151 int ret;
152
153 data.offload.prog = prog;
154
155 offload->verifier_running = false;
156 wake_up(&offload->verifier_done);
157
158 rtnl_lock();
159 ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
160 rtnl_unlock();
161
162 return ret;
163}
164
165static unsigned int bpf_prog_warn_on_exec(const void *ctx,
166 const struct bpf_insn *insn)
167{
168 WARN(1, "attempt to execute device eBPF program on the host!");
169 return 0;
170}
171
172int bpf_prog_offload_compile(struct bpf_prog *prog)
173{
174 prog->bpf_func = bpf_prog_warn_on_exec;
175
176 return bpf_prog_offload_translate(prog);
177}
178
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700179const struct bpf_prog_ops bpf_offload_prog_ops = {
180};
181
182static int bpf_offload_notification(struct notifier_block *notifier,
183 ulong event, void *ptr)
184{
185 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
186 struct bpf_dev_offload *offload, *tmp;
187
188 ASSERT_RTNL();
189
190 switch (event) {
191 case NETDEV_UNREGISTER:
Jakub Kicinski62c71b42017-11-20 15:21:57 -0800192 /* ignore namespace changes */
193 if (netdev->reg_state != NETREG_UNREGISTERING)
194 break;
195
Jakub Kicinskie0d39742017-12-27 18:39:03 -0800196 down_write(&bpf_devs_lock);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700197 list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
198 offloads) {
199 if (offload->netdev == netdev)
200 __bpf_prog_offload_destroy(offload->prog);
201 }
Jakub Kicinskie0d39742017-12-27 18:39:03 -0800202 up_write(&bpf_devs_lock);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700203 break;
204 default:
205 break;
206 }
207 return NOTIFY_OK;
208}
209
210static struct notifier_block bpf_offload_notifier = {
211 .notifier_call = bpf_offload_notification,
212};
213
214static int __init bpf_offload_init(void)
215{
216 register_netdevice_notifier(&bpf_offload_notifier);
217 return 0;
218}
219
220subsys_initcall(bpf_offload_init);