blob: 30ba7d38941d91fb12147ff20e7010c0aba549d9 [file] [log] [blame]
Thomas Gleixner25763b32019-05-28 10:10:09 -07001// SPDX-License-Identifier: GPL-2.0-only
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07002/* Copyright (c) 2017 Facebook
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07003 */
4#include <linux/bpf.h>
5#include <linux/slab.h>
6#include <linux/vmalloc.h>
7#include <linux/etherdevice.h>
8#include <linux/filter.h>
9#include <linux/sched/signal.h>
Martin KaFai Lau6ac99e82019-04-26 16:39:39 -070010#include <net/bpf_sk_storage.h>
Song Liu2cb494a2018-10-19 09:57:58 -070011#include <net/sock.h>
12#include <net/tcp.h>
KP Singh3d08b6f2020-03-04 20:18:53 +010013#include <linux/error-injection.h>
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070014
Matt Mullinse950e842019-04-26 11:49:51 -070015#define CREATE_TRACE_POINTS
16#include <trace/events/bpf_test_run.h>
17
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080018static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
Björn Töpelf23c4b32019-12-13 18:51:10 +010019 u32 *retval, u32 *time, bool xdp)
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070020{
Bo YU71b91a52019-03-08 01:45:51 -050021 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
Roman Gushchin8bad74f2018-09-28 14:45:36 +000022 enum bpf_cgroup_storage_type stype;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070023 u64 time_start, time_spent = 0;
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080024 int ret = 0;
Roman Gushchindcb40592018-12-01 10:39:44 -080025 u32 i;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070026
Roman Gushchin8bad74f2018-09-28 14:45:36 +000027 for_each_cgroup_storage_type(stype) {
28 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
29 if (IS_ERR(storage[stype])) {
30 storage[stype] = NULL;
31 for_each_cgroup_storage_type(stype)
32 bpf_cgroup_storage_free(storage[stype]);
33 return -ENOMEM;
34 }
35 }
Roman Gushchinf42ee092018-08-02 14:27:27 -070036
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070037 if (!repeat)
38 repeat = 1;
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080039
40 rcu_read_lock();
David Miller6eac7792020-02-24 15:01:44 +010041 migrate_disable();
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070042 time_start = ktime_get_ns();
43 for (i = 0; i < repeat; i++) {
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080044 bpf_cgroup_storage_set(storage);
Björn Töpelf23c4b32019-12-13 18:51:10 +010045
46 if (xdp)
47 *retval = bpf_prog_run_xdp(prog, ctx);
48 else
49 *retval = BPF_PROG_RUN(prog, ctx);
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080050
51 if (signal_pending(current)) {
52 ret = -EINTR;
53 break;
54 }
55
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070056 if (need_resched()) {
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070057 time_spent += ktime_get_ns() - time_start;
David Miller6eac7792020-02-24 15:01:44 +010058 migrate_enable();
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080059 rcu_read_unlock();
60
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070061 cond_resched();
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080062
63 rcu_read_lock();
David Miller6eac7792020-02-24 15:01:44 +010064 migrate_disable();
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070065 time_start = ktime_get_ns();
66 }
67 }
68 time_spent += ktime_get_ns() - time_start;
David Miller6eac7792020-02-24 15:01:44 +010069 migrate_enable();
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080070 rcu_read_unlock();
71
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070072 do_div(time_spent, repeat);
73 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
74
Roman Gushchin8bad74f2018-09-28 14:45:36 +000075 for_each_cgroup_storage_type(stype)
76 bpf_cgroup_storage_free(storage[stype]);
Roman Gushchinf42ee092018-08-02 14:27:27 -070077
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080078 return ret;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070079}
80
David Miller78e52272017-05-02 11:36:33 -040081static int bpf_test_finish(const union bpf_attr *kattr,
82 union bpf_attr __user *uattr, const void *data,
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070083 u32 size, u32 retval, u32 duration)
84{
David Miller78e52272017-05-02 11:36:33 -040085 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070086 int err = -EFAULT;
Lorenz Bauerb5a36b12018-12-03 11:31:23 +000087 u32 copy_size = size;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070088
Lorenz Bauerb5a36b12018-12-03 11:31:23 +000089 /* Clamp copy if the user has provided a size hint, but copy the full
90 * buffer if not to retain old behaviour.
91 */
92 if (kattr->test.data_size_out &&
93 copy_size > kattr->test.data_size_out) {
94 copy_size = kattr->test.data_size_out;
95 err = -ENOSPC;
96 }
97
98 if (data_out && copy_to_user(data_out, data, copy_size))
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070099 goto out;
100 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
101 goto out;
102 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
103 goto out;
104 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
105 goto out;
Lorenz Bauerb5a36b12018-12-03 11:31:23 +0000106 if (err != -ENOSPC)
107 err = 0;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700108out:
Matt Mullinse950e842019-04-26 11:49:51 -0700109 trace_bpf_test_finish(&err);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700110 return err;
111}
112
Alexei Starovoitovfaeb2dc2019-11-14 10:57:08 -0800113/* Integer types of various sizes and pointer combinations cover variety of
114 * architecture dependent calling conventions. 7+ can be supported in the
115 * future.
116 */
Jean-Philippe Menile9ff9d52020-03-27 21:47:13 +0100117__diag_push();
118__diag_ignore(GCC, 8, "-Wmissing-prototypes",
119 "Global functions as their definitions will be in vmlinux BTF");
Alexei Starovoitovfaeb2dc2019-11-14 10:57:08 -0800120int noinline bpf_fentry_test1(int a)
121{
122 return a + 1;
123}
124
125int noinline bpf_fentry_test2(int a, u64 b)
126{
127 return a + b;
128}
129
130int noinline bpf_fentry_test3(char a, int b, u64 c)
131{
132 return a + b + c;
133}
134
135int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
136{
137 return (long)a + b + c + d;
138}
139
140int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
141{
142 return a + (long)b + c + d + e;
143}
144
145int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
146{
147 return a + (long)b + c + d + (long)e + f;
148}
149
KP Singh3d08b6f2020-03-04 20:18:53 +0100150int noinline bpf_modify_return_test(int a, int *b)
151{
152 *b += 1;
153 return a + *b;
154}
Jean-Philippe Menile9ff9d52020-03-27 21:47:13 +0100155__diag_pop();
KP Singh3d08b6f2020-03-04 20:18:53 +0100156
157ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
158
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700159static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
160 u32 headroom, u32 tailroom)
161{
162 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
163 void *data;
164
165 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
166 return ERR_PTR(-EINVAL);
167
168 data = kzalloc(size + headroom + tailroom, GFP_USER);
169 if (!data)
170 return ERR_PTR(-ENOMEM);
171
172 if (copy_from_user(data + headroom, data_in, size)) {
173 kfree(data);
174 return ERR_PTR(-EFAULT);
175 }
KP Singhda00d2f2020-03-04 20:18:52 +0100176
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700177 return data;
178}
179
KP Singhda00d2f2020-03-04 20:18:52 +0100180int bpf_prog_test_run_tracing(struct bpf_prog *prog,
181 const union bpf_attr *kattr,
182 union bpf_attr __user *uattr)
183{
KP Singh3d08b6f2020-03-04 20:18:53 +0100184 u16 side_effect = 0, ret = 0;
185 int b = 2, err = -EFAULT;
186 u32 retval = 0;
KP Singhda00d2f2020-03-04 20:18:52 +0100187
188 switch (prog->expected_attach_type) {
189 case BPF_TRACE_FENTRY:
190 case BPF_TRACE_FEXIT:
191 if (bpf_fentry_test1(1) != 2 ||
192 bpf_fentry_test2(2, 3) != 5 ||
193 bpf_fentry_test3(4, 5, 6) != 15 ||
194 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
195 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
196 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111)
197 goto out;
198 break;
KP Singh3d08b6f2020-03-04 20:18:53 +0100199 case BPF_MODIFY_RETURN:
200 ret = bpf_modify_return_test(1, &b);
201 if (b != 2)
202 side_effect = 1;
203 break;
KP Singhda00d2f2020-03-04 20:18:52 +0100204 default:
205 goto out;
206 }
207
KP Singh3d08b6f2020-03-04 20:18:53 +0100208 retval = ((u32)side_effect << 16) | ret;
209 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
210 goto out;
211
KP Singhda00d2f2020-03-04 20:18:52 +0100212 err = 0;
213out:
214 trace_bpf_test_finish(&err);
215 return err;
216}
217
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700218static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
219{
220 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
221 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
222 u32 size = kattr->test.ctx_size_in;
223 void *data;
224 int err;
225
226 if (!data_in && !data_out)
227 return NULL;
228
229 data = kzalloc(max_size, GFP_USER);
230 if (!data)
231 return ERR_PTR(-ENOMEM);
232
233 if (data_in) {
234 err = bpf_check_uarg_tail_zero(data_in, max_size, size);
235 if (err) {
236 kfree(data);
237 return ERR_PTR(err);
238 }
239
240 size = min_t(u32, max_size, size);
241 if (copy_from_user(data, data_in, size)) {
242 kfree(data);
243 return ERR_PTR(-EFAULT);
244 }
245 }
246 return data;
247}
248
249static int bpf_ctx_finish(const union bpf_attr *kattr,
250 union bpf_attr __user *uattr, const void *data,
251 u32 size)
252{
253 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
254 int err = -EFAULT;
255 u32 copy_size = size;
256
257 if (!data || !data_out)
258 return 0;
259
260 if (copy_size > kattr->test.ctx_size_out) {
261 copy_size = kattr->test.ctx_size_out;
262 err = -ENOSPC;
263 }
264
265 if (copy_to_user(data_out, data, copy_size))
266 goto out;
267 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
268 goto out;
269 if (err != -ENOSPC)
270 err = 0;
271out:
272 return err;
273}
274
275/**
276 * range_is_zero - test whether buffer is initialized
277 * @buf: buffer to check
278 * @from: check from this position
279 * @to: check up until (excluding) this position
280 *
281 * This function returns true if the there is a non-zero byte
282 * in the buf in the range [from,to).
283 */
284static inline bool range_is_zero(void *buf, size_t from, size_t to)
285{
286 return !memchr_inv((u8 *)buf + from, 0, to - from);
287}
288
289static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
290{
291 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
292
293 if (!__skb)
294 return 0;
295
296 /* make sure the fields we don't use are zeroed */
Nikita V. Shirokov6de6c1f2019-12-18 12:57:47 -0800297 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
298 return -EINVAL;
299
300 /* mark is allowed */
301
302 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
303 offsetof(struct __sk_buff, priority)))
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700304 return -EINVAL;
305
306 /* priority is allowed */
307
Stanislav Fomichevb590cb52019-12-10 11:19:33 -0800308 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority),
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700309 offsetof(struct __sk_buff, cb)))
310 return -EINVAL;
311
312 /* cb is allowed */
313
Stanislav Fomichevb590cb52019-12-10 11:19:33 -0800314 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
Stanislav Fomichevba940942019-10-15 11:31:24 -0700315 offsetof(struct __sk_buff, tstamp)))
316 return -EINVAL;
317
318 /* tstamp is allowed */
Stanislav Fomichev850a88c2019-12-13 14:30:27 -0800319 /* wire_len is allowed */
320 /* gso_segs is allowed */
Stanislav Fomichevba940942019-10-15 11:31:24 -0700321
Stanislav Fomichev850a88c2019-12-13 14:30:27 -0800322 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
Willem de Bruijncf620892020-03-03 15:05:01 -0500323 offsetof(struct __sk_buff, gso_size)))
324 return -EINVAL;
325
326 /* gso_size is allowed */
327
328 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700329 sizeof(struct __sk_buff)))
330 return -EINVAL;
331
Nikita V. Shirokov6de6c1f2019-12-18 12:57:47 -0800332 skb->mark = __skb->mark;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700333 skb->priority = __skb->priority;
Stanislav Fomichevba940942019-10-15 11:31:24 -0700334 skb->tstamp = __skb->tstamp;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700335 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
336
Stanislav Fomichev850a88c2019-12-13 14:30:27 -0800337 if (__skb->wire_len == 0) {
338 cb->pkt_len = skb->len;
339 } else {
340 if (__skb->wire_len < skb->len ||
341 __skb->wire_len > GSO_MAX_SIZE)
342 return -EINVAL;
343 cb->pkt_len = __skb->wire_len;
344 }
345
346 if (__skb->gso_segs > GSO_MAX_SEGS)
347 return -EINVAL;
348 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
Willem de Bruijncf620892020-03-03 15:05:01 -0500349 skb_shinfo(skb)->gso_size = __skb->gso_size;
Stanislav Fomichev850a88c2019-12-13 14:30:27 -0800350
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700351 return 0;
352}
353
354static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
355{
356 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
357
358 if (!__skb)
359 return;
360
Nikita V. Shirokov6de6c1f2019-12-18 12:57:47 -0800361 __skb->mark = skb->mark;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700362 __skb->priority = skb->priority;
Stanislav Fomichevba940942019-10-15 11:31:24 -0700363 __skb->tstamp = skb->tstamp;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700364 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
Stanislav Fomichev850a88c2019-12-13 14:30:27 -0800365 __skb->wire_len = cb->pkt_len;
366 __skb->gso_segs = skb_shinfo(skb)->gso_segs;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700367}
368
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700369int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
370 union bpf_attr __user *uattr)
371{
372 bool is_l2 = false, is_direct_pkt_access = false;
373 u32 size = kattr->test.data_size_in;
374 u32 repeat = kattr->test.repeat;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700375 struct __sk_buff *ctx = NULL;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700376 u32 retval, duration;
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200377 int hh_len = ETH_HLEN;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700378 struct sk_buff *skb;
Song Liu2cb494a2018-10-19 09:57:58 -0700379 struct sock *sk;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700380 void *data;
381 int ret;
382
David Miller586f8522017-05-02 11:36:45 -0400383 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700384 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
385 if (IS_ERR(data))
386 return PTR_ERR(data);
387
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700388 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
389 if (IS_ERR(ctx)) {
390 kfree(data);
391 return PTR_ERR(ctx);
392 }
393
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700394 switch (prog->type) {
395 case BPF_PROG_TYPE_SCHED_CLS:
396 case BPF_PROG_TYPE_SCHED_ACT:
397 is_l2 = true;
398 /* fall through */
399 case BPF_PROG_TYPE_LWT_IN:
400 case BPF_PROG_TYPE_LWT_OUT:
401 case BPF_PROG_TYPE_LWT_XMIT:
402 is_direct_pkt_access = true;
403 break;
404 default:
405 break;
406 }
407
Song Liu2cb494a2018-10-19 09:57:58 -0700408 sk = kzalloc(sizeof(struct sock), GFP_USER);
409 if (!sk) {
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700410 kfree(data);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700411 kfree(ctx);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700412 return -ENOMEM;
413 }
Song Liu2cb494a2018-10-19 09:57:58 -0700414 sock_net_set(sk, current->nsproxy->net_ns);
415 sock_init_data(NULL, sk);
416
417 skb = build_skb(data, 0);
418 if (!skb) {
419 kfree(data);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700420 kfree(ctx);
Song Liu2cb494a2018-10-19 09:57:58 -0700421 kfree(sk);
422 return -ENOMEM;
423 }
424 skb->sk = sk;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700425
David Miller586f8522017-05-02 11:36:45 -0400426 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700427 __skb_put(skb, size);
428 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
429 skb_reset_network_header(skb);
430
431 if (is_l2)
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200432 __skb_push(skb, hh_len);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700433 if (is_direct_pkt_access)
Daniel Borkmann6aaae2b2017-09-25 02:25:50 +0200434 bpf_compute_data_pointers(skb);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700435 ret = convert___skb_to_skb(skb, ctx);
436 if (ret)
437 goto out;
Björn Töpelf23c4b32019-12-13 18:51:10 +0100438 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700439 if (ret)
440 goto out;
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200441 if (!is_l2) {
442 if (skb_headroom(skb) < hh_len) {
443 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
444
445 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700446 ret = -ENOMEM;
447 goto out;
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200448 }
449 }
450 memset(__skb_push(skb, hh_len), 0, hh_len);
451 }
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700452 convert_skb_to___skb(skb, ctx);
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200453
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700454 size = skb->len;
455 /* bpf program can never convert linear skb to non-linear */
456 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
457 size = skb_headlen(skb);
David Miller78e52272017-05-02 11:36:33 -0400458 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700459 if (!ret)
460 ret = bpf_ctx_finish(kattr, uattr, ctx,
461 sizeof(struct __sk_buff));
462out:
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700463 kfree_skb(skb);
Martin KaFai Lau6ac99e82019-04-26 16:39:39 -0700464 bpf_sk_storage_free(sk);
Song Liu2cb494a2018-10-19 09:57:58 -0700465 kfree(sk);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700466 kfree(ctx);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700467 return ret;
468}
469
470int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
471 union bpf_attr __user *uattr)
472{
Jesper Dangaard Brouerbc56c912020-05-14 12:51:35 +0200473 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
474 u32 headroom = XDP_PACKET_HEADROOM;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700475 u32 size = kattr->test.data_size_in;
476 u32 repeat = kattr->test.repeat;
Daniel Borkmann65073a62018-01-31 12:58:56 +0100477 struct netdev_rx_queue *rxqueue;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700478 struct xdp_buff xdp = {};
479 u32 retval, duration;
Jesper Dangaard Brouerbc56c912020-05-14 12:51:35 +0200480 u32 max_data_sz;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700481 void *data;
482 int ret;
483
Stanislav Fomichev947e8b52019-04-11 15:47:07 -0700484 if (kattr->test.ctx_in || kattr->test.ctx_out)
485 return -EINVAL;
486
Jesper Dangaard Brouerbc56c912020-05-14 12:51:35 +0200487 /* XDP have extra tailroom as (most) drivers use full page */
488 max_data_sz = 4096 - headroom - tailroom;
489 if (size > max_data_sz)
490 return -EINVAL;
491
492 data = bpf_test_init(kattr, max_data_sz, headroom, tailroom);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700493 if (IS_ERR(data))
494 return PTR_ERR(data);
495
496 xdp.data_hard_start = data;
Jesper Dangaard Brouerbc56c912020-05-14 12:51:35 +0200497 xdp.data = data + headroom;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +0200498 xdp.data_meta = xdp.data;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700499 xdp.data_end = xdp.data + size;
Jesper Dangaard Brouerbc56c912020-05-14 12:51:35 +0200500 xdp.frame_sz = headroom + max_data_sz + tailroom;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700501
Daniel Borkmann65073a62018-01-31 12:58:56 +0100502 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
503 xdp.rxq = &rxqueue->xdp_rxq;
Björn Töpelf23c4b32019-12-13 18:51:10 +0100504 bpf_prog_change_xdp(NULL, prog);
505 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
Roman Gushchindcb40592018-12-01 10:39:44 -0800506 if (ret)
507 goto out;
Jesper Dangaard Brouerbc56c912020-05-14 12:51:35 +0200508 if (xdp.data != data + headroom || xdp.data_end != xdp.data + size)
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700509 size = xdp.data_end - xdp.data;
David Miller78e52272017-05-02 11:36:33 -0400510 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
Roman Gushchindcb40592018-12-01 10:39:44 -0800511out:
Björn Töpelf23c4b32019-12-13 18:51:10 +0100512 bpf_prog_change_xdp(prog, NULL);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700513 kfree(data);
514 return ret;
515}
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800516
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700517static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
518{
519 /* make sure the fields we don't use are zeroed */
520 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
521 return -EINVAL;
522
523 /* flags is allowed */
524
Stanislav Fomichevb590cb52019-12-10 11:19:33 -0800525 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700526 sizeof(struct bpf_flow_keys)))
527 return -EINVAL;
528
529 return 0;
530}
531
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800532int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
533 const union bpf_attr *kattr,
534 union bpf_attr __user *uattr)
535{
536 u32 size = kattr->test.data_size_in;
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700537 struct bpf_flow_dissector ctx = {};
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800538 u32 repeat = kattr->test.repeat;
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700539 struct bpf_flow_keys *user_ctx;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800540 struct bpf_flow_keys flow_keys;
541 u64 time_start, time_spent = 0;
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700542 const struct ethhdr *eth;
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700543 unsigned int flags = 0;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800544 u32 retval, duration;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800545 void *data;
546 int ret;
547 u32 i;
548
549 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
550 return -EINVAL;
551
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700552 if (size < ETH_HLEN)
553 return -EINVAL;
554
555 data = bpf_test_init(kattr, size, 0, 0);
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800556 if (IS_ERR(data))
557 return PTR_ERR(data);
558
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700559 eth = (struct ethhdr *)data;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800560
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800561 if (!repeat)
562 repeat = 1;
563
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700564 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
565 if (IS_ERR(user_ctx)) {
566 kfree(data);
567 return PTR_ERR(user_ctx);
568 }
569 if (user_ctx) {
570 ret = verify_user_bpf_flow_keys(user_ctx);
571 if (ret)
572 goto out;
573 flags = user_ctx->flags;
574 }
575
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700576 ctx.flow_keys = &flow_keys;
577 ctx.data = data;
578 ctx.data_end = (__u8 *)data + size;
579
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800580 rcu_read_lock();
581 preempt_disable();
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800582 time_start = ktime_get_ns();
583 for (i = 0; i < repeat; i++) {
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700584 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700585 size, flags);
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700586
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800587 if (signal_pending(current)) {
588 preempt_enable();
589 rcu_read_unlock();
590
591 ret = -EINTR;
592 goto out;
593 }
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800594
595 if (need_resched()) {
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800596 time_spent += ktime_get_ns() - time_start;
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800597 preempt_enable();
598 rcu_read_unlock();
599
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800600 cond_resched();
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800601
602 rcu_read_lock();
603 preempt_disable();
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800604 time_start = ktime_get_ns();
605 }
606 }
607 time_spent += ktime_get_ns() - time_start;
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800608 preempt_enable();
609 rcu_read_unlock();
610
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800611 do_div(time_spent, repeat);
612 duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
613
614 ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
615 retval, duration);
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700616 if (!ret)
617 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
618 sizeof(struct bpf_flow_keys));
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800619
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800620out:
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700621 kfree(user_ctx);
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700622 kfree(data);
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800623 return ret;
624}