blob: a66f211726e7c98cce2da4a422331c48e1508f82 [file] [log] [blame]
Thomas Gleixner25763b32019-05-28 10:10:09 -07001// SPDX-License-Identifier: GPL-2.0-only
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07002/* Copyright (c) 2017 Facebook
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07003 */
4#include <linux/bpf.h>
5#include <linux/slab.h>
6#include <linux/vmalloc.h>
7#include <linux/etherdevice.h>
8#include <linux/filter.h>
9#include <linux/sched/signal.h>
Martin KaFai Lau6ac99e82019-04-26 16:39:39 -070010#include <net/bpf_sk_storage.h>
Song Liu2cb494a2018-10-19 09:57:58 -070011#include <net/sock.h>
12#include <net/tcp.h>
KP Singh3d08b6f2020-03-04 20:18:53 +010013#include <linux/error-injection.h>
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070014
Matt Mullinse950e842019-04-26 11:49:51 -070015#define CREATE_TRACE_POINTS
16#include <trace/events/bpf_test_run.h>
17
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080018static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
Björn Töpelf23c4b32019-12-13 18:51:10 +010019 u32 *retval, u32 *time, bool xdp)
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070020{
Bo YU71b91a52019-03-08 01:45:51 -050021 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
Roman Gushchin8bad74f2018-09-28 14:45:36 +000022 enum bpf_cgroup_storage_type stype;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070023 u64 time_start, time_spent = 0;
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080024 int ret = 0;
Roman Gushchindcb40592018-12-01 10:39:44 -080025 u32 i;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070026
Roman Gushchin8bad74f2018-09-28 14:45:36 +000027 for_each_cgroup_storage_type(stype) {
28 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
29 if (IS_ERR(storage[stype])) {
30 storage[stype] = NULL;
31 for_each_cgroup_storage_type(stype)
32 bpf_cgroup_storage_free(storage[stype]);
33 return -ENOMEM;
34 }
35 }
Roman Gushchinf42ee092018-08-02 14:27:27 -070036
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070037 if (!repeat)
38 repeat = 1;
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080039
40 rcu_read_lock();
David Miller6eac7792020-02-24 15:01:44 +010041 migrate_disable();
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070042 time_start = ktime_get_ns();
43 for (i = 0; i < repeat; i++) {
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080044 bpf_cgroup_storage_set(storage);
Björn Töpelf23c4b32019-12-13 18:51:10 +010045
46 if (xdp)
47 *retval = bpf_prog_run_xdp(prog, ctx);
48 else
49 *retval = BPF_PROG_RUN(prog, ctx);
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080050
51 if (signal_pending(current)) {
52 ret = -EINTR;
53 break;
54 }
55
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070056 if (need_resched()) {
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070057 time_spent += ktime_get_ns() - time_start;
David Miller6eac7792020-02-24 15:01:44 +010058 migrate_enable();
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080059 rcu_read_unlock();
60
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070061 cond_resched();
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080062
63 rcu_read_lock();
David Miller6eac7792020-02-24 15:01:44 +010064 migrate_disable();
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070065 time_start = ktime_get_ns();
66 }
67 }
68 time_spent += ktime_get_ns() - time_start;
David Miller6eac7792020-02-24 15:01:44 +010069 migrate_enable();
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080070 rcu_read_unlock();
71
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070072 do_div(time_spent, repeat);
73 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
74
Roman Gushchin8bad74f2018-09-28 14:45:36 +000075 for_each_cgroup_storage_type(stype)
76 bpf_cgroup_storage_free(storage[stype]);
Roman Gushchinf42ee092018-08-02 14:27:27 -070077
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080078 return ret;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070079}
80
David Miller78e52272017-05-02 11:36:33 -040081static int bpf_test_finish(const union bpf_attr *kattr,
82 union bpf_attr __user *uattr, const void *data,
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070083 u32 size, u32 retval, u32 duration)
84{
David Miller78e52272017-05-02 11:36:33 -040085 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070086 int err = -EFAULT;
Lorenz Bauerb5a36b12018-12-03 11:31:23 +000087 u32 copy_size = size;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070088
Lorenz Bauerb5a36b12018-12-03 11:31:23 +000089 /* Clamp copy if the user has provided a size hint, but copy the full
90 * buffer if not to retain old behaviour.
91 */
92 if (kattr->test.data_size_out &&
93 copy_size > kattr->test.data_size_out) {
94 copy_size = kattr->test.data_size_out;
95 err = -ENOSPC;
96 }
97
98 if (data_out && copy_to_user(data_out, data, copy_size))
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070099 goto out;
100 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
101 goto out;
102 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
103 goto out;
104 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
105 goto out;
Lorenz Bauerb5a36b12018-12-03 11:31:23 +0000106 if (err != -ENOSPC)
107 err = 0;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700108out:
Matt Mullinse950e842019-04-26 11:49:51 -0700109 trace_bpf_test_finish(&err);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700110 return err;
111}
112
Alexei Starovoitovfaeb2dc2019-11-14 10:57:08 -0800113/* Integer types of various sizes and pointer combinations cover variety of
114 * architecture dependent calling conventions. 7+ can be supported in the
115 * future.
116 */
Jean-Philippe Menile9ff9d52020-03-27 21:47:13 +0100117__diag_push();
118__diag_ignore(GCC, 8, "-Wmissing-prototypes",
119 "Global functions as their definitions will be in vmlinux BTF");
Alexei Starovoitovfaeb2dc2019-11-14 10:57:08 -0800120int noinline bpf_fentry_test1(int a)
121{
122 return a + 1;
123}
124
125int noinline bpf_fentry_test2(int a, u64 b)
126{
127 return a + b;
128}
129
130int noinline bpf_fentry_test3(char a, int b, u64 c)
131{
132 return a + b + c;
133}
134
135int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
136{
137 return (long)a + b + c + d;
138}
139
140int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
141{
142 return a + (long)b + c + d + e;
143}
144
145int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
146{
147 return a + (long)b + c + d + (long)e + f;
148}
149
Yonghong Songd9230212020-06-30 10:12:41 -0700150struct bpf_fentry_test_t {
151 struct bpf_fentry_test_t *a;
152};
153
154int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
155{
156 return (long)arg;
157}
158
159int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
160{
161 return (long)arg->a;
162}
163
KP Singh3d08b6f2020-03-04 20:18:53 +0100164int noinline bpf_modify_return_test(int a, int *b)
165{
166 *b += 1;
167 return a + *b;
168}
Jean-Philippe Menile9ff9d52020-03-27 21:47:13 +0100169__diag_pop();
KP Singh3d08b6f2020-03-04 20:18:53 +0100170
171ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
172
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700173static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
174 u32 headroom, u32 tailroom)
175{
176 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
Jesper Dangaard Brouerd800bad2020-05-18 15:05:27 +0200177 u32 user_size = kattr->test.data_size_in;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700178 void *data;
179
180 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
181 return ERR_PTR(-EINVAL);
182
Jesper Dangaard Brouerd800bad2020-05-18 15:05:27 +0200183 if (user_size > size)
184 return ERR_PTR(-EMSGSIZE);
185
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700186 data = kzalloc(size + headroom + tailroom, GFP_USER);
187 if (!data)
188 return ERR_PTR(-ENOMEM);
189
Jesper Dangaard Brouerd800bad2020-05-18 15:05:27 +0200190 if (copy_from_user(data + headroom, data_in, user_size)) {
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700191 kfree(data);
192 return ERR_PTR(-EFAULT);
193 }
KP Singhda00d2f2020-03-04 20:18:52 +0100194
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700195 return data;
196}
197
KP Singhda00d2f2020-03-04 20:18:52 +0100198int bpf_prog_test_run_tracing(struct bpf_prog *prog,
199 const union bpf_attr *kattr,
200 union bpf_attr __user *uattr)
201{
Yonghong Songd9230212020-06-30 10:12:41 -0700202 struct bpf_fentry_test_t arg = {};
KP Singh3d08b6f2020-03-04 20:18:53 +0100203 u16 side_effect = 0, ret = 0;
204 int b = 2, err = -EFAULT;
205 u32 retval = 0;
KP Singhda00d2f2020-03-04 20:18:52 +0100206
207 switch (prog->expected_attach_type) {
208 case BPF_TRACE_FENTRY:
209 case BPF_TRACE_FEXIT:
210 if (bpf_fentry_test1(1) != 2 ||
211 bpf_fentry_test2(2, 3) != 5 ||
212 bpf_fentry_test3(4, 5, 6) != 15 ||
213 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
214 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
Yonghong Songd9230212020-06-30 10:12:41 -0700215 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
216 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
217 bpf_fentry_test8(&arg) != 0)
KP Singhda00d2f2020-03-04 20:18:52 +0100218 goto out;
219 break;
KP Singh3d08b6f2020-03-04 20:18:53 +0100220 case BPF_MODIFY_RETURN:
221 ret = bpf_modify_return_test(1, &b);
222 if (b != 2)
223 side_effect = 1;
224 break;
KP Singhda00d2f2020-03-04 20:18:52 +0100225 default:
226 goto out;
227 }
228
KP Singh3d08b6f2020-03-04 20:18:53 +0100229 retval = ((u32)side_effect << 16) | ret;
230 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
231 goto out;
232
KP Singhda00d2f2020-03-04 20:18:52 +0100233 err = 0;
234out:
235 trace_bpf_test_finish(&err);
236 return err;
237}
238
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700239static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
240{
241 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
242 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
243 u32 size = kattr->test.ctx_size_in;
244 void *data;
245 int err;
246
247 if (!data_in && !data_out)
248 return NULL;
249
250 data = kzalloc(max_size, GFP_USER);
251 if (!data)
252 return ERR_PTR(-ENOMEM);
253
254 if (data_in) {
255 err = bpf_check_uarg_tail_zero(data_in, max_size, size);
256 if (err) {
257 kfree(data);
258 return ERR_PTR(err);
259 }
260
261 size = min_t(u32, max_size, size);
262 if (copy_from_user(data, data_in, size)) {
263 kfree(data);
264 return ERR_PTR(-EFAULT);
265 }
266 }
267 return data;
268}
269
270static int bpf_ctx_finish(const union bpf_attr *kattr,
271 union bpf_attr __user *uattr, const void *data,
272 u32 size)
273{
274 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
275 int err = -EFAULT;
276 u32 copy_size = size;
277
278 if (!data || !data_out)
279 return 0;
280
281 if (copy_size > kattr->test.ctx_size_out) {
282 copy_size = kattr->test.ctx_size_out;
283 err = -ENOSPC;
284 }
285
286 if (copy_to_user(data_out, data, copy_size))
287 goto out;
288 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
289 goto out;
290 if (err != -ENOSPC)
291 err = 0;
292out:
293 return err;
294}
295
296/**
297 * range_is_zero - test whether buffer is initialized
298 * @buf: buffer to check
299 * @from: check from this position
300 * @to: check up until (excluding) this position
301 *
302 * This function returns true if the there is a non-zero byte
303 * in the buf in the range [from,to).
304 */
305static inline bool range_is_zero(void *buf, size_t from, size_t to)
306{
307 return !memchr_inv((u8 *)buf + from, 0, to - from);
308}
309
310static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
311{
312 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
313
314 if (!__skb)
315 return 0;
316
317 /* make sure the fields we don't use are zeroed */
Nikita V. Shirokov6de6c1f2019-12-18 12:57:47 -0800318 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
319 return -EINVAL;
320
321 /* mark is allowed */
322
323 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
324 offsetof(struct __sk_buff, priority)))
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700325 return -EINVAL;
326
327 /* priority is allowed */
328
Stanislav Fomichevb590cb52019-12-10 11:19:33 -0800329 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority),
Dmitry Yakunin21594c42020-08-03 12:05:45 +0300330 offsetof(struct __sk_buff, ifindex)))
331 return -EINVAL;
332
333 /* ifindex is allowed */
334
335 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700336 offsetof(struct __sk_buff, cb)))
337 return -EINVAL;
338
339 /* cb is allowed */
340
Stanislav Fomichevb590cb52019-12-10 11:19:33 -0800341 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
Stanislav Fomichevba940942019-10-15 11:31:24 -0700342 offsetof(struct __sk_buff, tstamp)))
343 return -EINVAL;
344
345 /* tstamp is allowed */
Stanislav Fomichev850a88c2019-12-13 14:30:27 -0800346 /* wire_len is allowed */
347 /* gso_segs is allowed */
Stanislav Fomichevba940942019-10-15 11:31:24 -0700348
Stanislav Fomichev850a88c2019-12-13 14:30:27 -0800349 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
Willem de Bruijncf620892020-03-03 15:05:01 -0500350 offsetof(struct __sk_buff, gso_size)))
351 return -EINVAL;
352
353 /* gso_size is allowed */
354
355 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700356 sizeof(struct __sk_buff)))
357 return -EINVAL;
358
Nikita V. Shirokov6de6c1f2019-12-18 12:57:47 -0800359 skb->mark = __skb->mark;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700360 skb->priority = __skb->priority;
Stanislav Fomichevba940942019-10-15 11:31:24 -0700361 skb->tstamp = __skb->tstamp;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700362 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
363
Stanislav Fomichev850a88c2019-12-13 14:30:27 -0800364 if (__skb->wire_len == 0) {
365 cb->pkt_len = skb->len;
366 } else {
367 if (__skb->wire_len < skb->len ||
368 __skb->wire_len > GSO_MAX_SIZE)
369 return -EINVAL;
370 cb->pkt_len = __skb->wire_len;
371 }
372
373 if (__skb->gso_segs > GSO_MAX_SEGS)
374 return -EINVAL;
375 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
Willem de Bruijncf620892020-03-03 15:05:01 -0500376 skb_shinfo(skb)->gso_size = __skb->gso_size;
Stanislav Fomichev850a88c2019-12-13 14:30:27 -0800377
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700378 return 0;
379}
380
381static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
382{
383 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
384
385 if (!__skb)
386 return;
387
Nikita V. Shirokov6de6c1f2019-12-18 12:57:47 -0800388 __skb->mark = skb->mark;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700389 __skb->priority = skb->priority;
Dmitry Yakunin21594c42020-08-03 12:05:45 +0300390 __skb->ifindex = skb->dev->ifindex;
Stanislav Fomichevba940942019-10-15 11:31:24 -0700391 __skb->tstamp = skb->tstamp;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700392 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
Stanislav Fomichev850a88c2019-12-13 14:30:27 -0800393 __skb->wire_len = cb->pkt_len;
394 __skb->gso_segs = skb_shinfo(skb)->gso_segs;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700395}
396
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700397int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
398 union bpf_attr __user *uattr)
399{
400 bool is_l2 = false, is_direct_pkt_access = false;
Dmitry Yakunin21594c42020-08-03 12:05:45 +0300401 struct net *net = current->nsproxy->net_ns;
402 struct net_device *dev = net->loopback_dev;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700403 u32 size = kattr->test.data_size_in;
404 u32 repeat = kattr->test.repeat;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700405 struct __sk_buff *ctx = NULL;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700406 u32 retval, duration;
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200407 int hh_len = ETH_HLEN;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700408 struct sk_buff *skb;
Song Liu2cb494a2018-10-19 09:57:58 -0700409 struct sock *sk;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700410 void *data;
411 int ret;
412
David Miller586f8522017-05-02 11:36:45 -0400413 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700414 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
415 if (IS_ERR(data))
416 return PTR_ERR(data);
417
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700418 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
419 if (IS_ERR(ctx)) {
420 kfree(data);
421 return PTR_ERR(ctx);
422 }
423
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700424 switch (prog->type) {
425 case BPF_PROG_TYPE_SCHED_CLS:
426 case BPF_PROG_TYPE_SCHED_ACT:
427 is_l2 = true;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500428 fallthrough;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700429 case BPF_PROG_TYPE_LWT_IN:
430 case BPF_PROG_TYPE_LWT_OUT:
431 case BPF_PROG_TYPE_LWT_XMIT:
432 is_direct_pkt_access = true;
433 break;
434 default:
435 break;
436 }
437
Song Liu2cb494a2018-10-19 09:57:58 -0700438 sk = kzalloc(sizeof(struct sock), GFP_USER);
439 if (!sk) {
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700440 kfree(data);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700441 kfree(ctx);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700442 return -ENOMEM;
443 }
Dmitry Yakunin21594c42020-08-03 12:05:45 +0300444 sock_net_set(sk, net);
Song Liu2cb494a2018-10-19 09:57:58 -0700445 sock_init_data(NULL, sk);
446
447 skb = build_skb(data, 0);
448 if (!skb) {
449 kfree(data);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700450 kfree(ctx);
Song Liu2cb494a2018-10-19 09:57:58 -0700451 kfree(sk);
452 return -ENOMEM;
453 }
454 skb->sk = sk;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700455
David Miller586f8522017-05-02 11:36:45 -0400456 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700457 __skb_put(skb, size);
Dmitry Yakunin21594c42020-08-03 12:05:45 +0300458 if (ctx && ctx->ifindex > 1) {
459 dev = dev_get_by_index(net, ctx->ifindex);
460 if (!dev) {
461 ret = -ENODEV;
462 goto out;
463 }
464 }
465 skb->protocol = eth_type_trans(skb, dev);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700466 skb_reset_network_header(skb);
467
Dmitry Yakuninfa5cb542020-08-03 12:05:44 +0300468 switch (skb->protocol) {
469 case htons(ETH_P_IP):
470 sk->sk_family = AF_INET;
471 if (sizeof(struct iphdr) <= skb_headlen(skb)) {
472 sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
473 sk->sk_daddr = ip_hdr(skb)->daddr;
474 }
475 break;
476#if IS_ENABLED(CONFIG_IPV6)
477 case htons(ETH_P_IPV6):
478 sk->sk_family = AF_INET6;
479 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
480 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
481 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
482 }
483 break;
484#endif
485 default:
486 break;
487 }
488
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700489 if (is_l2)
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200490 __skb_push(skb, hh_len);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700491 if (is_direct_pkt_access)
Daniel Borkmann6aaae2b2017-09-25 02:25:50 +0200492 bpf_compute_data_pointers(skb);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700493 ret = convert___skb_to_skb(skb, ctx);
494 if (ret)
495 goto out;
Björn Töpelf23c4b32019-12-13 18:51:10 +0100496 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700497 if (ret)
498 goto out;
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200499 if (!is_l2) {
500 if (skb_headroom(skb) < hh_len) {
501 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
502
503 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700504 ret = -ENOMEM;
505 goto out;
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200506 }
507 }
508 memset(__skb_push(skb, hh_len), 0, hh_len);
509 }
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700510 convert_skb_to___skb(skb, ctx);
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200511
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700512 size = skb->len;
513 /* bpf program can never convert linear skb to non-linear */
514 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
515 size = skb_headlen(skb);
David Miller78e52272017-05-02 11:36:33 -0400516 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700517 if (!ret)
518 ret = bpf_ctx_finish(kattr, uattr, ctx,
519 sizeof(struct __sk_buff));
520out:
Dmitry Yakunin21594c42020-08-03 12:05:45 +0300521 if (dev && dev != net->loopback_dev)
522 dev_put(dev);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700523 kfree_skb(skb);
Martin KaFai Lau6ac99e82019-04-26 16:39:39 -0700524 bpf_sk_storage_free(sk);
Song Liu2cb494a2018-10-19 09:57:58 -0700525 kfree(sk);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700526 kfree(ctx);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700527 return ret;
528}
529
530int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
531 union bpf_attr __user *uattr)
532{
Jesper Dangaard Brouerbc56c912020-05-14 12:51:35 +0200533 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
534 u32 headroom = XDP_PACKET_HEADROOM;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700535 u32 size = kattr->test.data_size_in;
536 u32 repeat = kattr->test.repeat;
Daniel Borkmann65073a62018-01-31 12:58:56 +0100537 struct netdev_rx_queue *rxqueue;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700538 struct xdp_buff xdp = {};
539 u32 retval, duration;
Jesper Dangaard Brouerbc56c912020-05-14 12:51:35 +0200540 u32 max_data_sz;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700541 void *data;
542 int ret;
543
Stanislav Fomichev947e8b52019-04-11 15:47:07 -0700544 if (kattr->test.ctx_in || kattr->test.ctx_out)
545 return -EINVAL;
546
Jesper Dangaard Brouerbc56c912020-05-14 12:51:35 +0200547 /* XDP have extra tailroom as (most) drivers use full page */
548 max_data_sz = 4096 - headroom - tailroom;
Jesper Dangaard Brouerbc56c912020-05-14 12:51:35 +0200549
550 data = bpf_test_init(kattr, max_data_sz, headroom, tailroom);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700551 if (IS_ERR(data))
552 return PTR_ERR(data);
553
554 xdp.data_hard_start = data;
Jesper Dangaard Brouerbc56c912020-05-14 12:51:35 +0200555 xdp.data = data + headroom;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +0200556 xdp.data_meta = xdp.data;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700557 xdp.data_end = xdp.data + size;
Jesper Dangaard Brouerbc56c912020-05-14 12:51:35 +0200558 xdp.frame_sz = headroom + max_data_sz + tailroom;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700559
Daniel Borkmann65073a62018-01-31 12:58:56 +0100560 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
561 xdp.rxq = &rxqueue->xdp_rxq;
Björn Töpelf23c4b32019-12-13 18:51:10 +0100562 bpf_prog_change_xdp(NULL, prog);
563 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
Roman Gushchindcb40592018-12-01 10:39:44 -0800564 if (ret)
565 goto out;
Jesper Dangaard Brouerbc56c912020-05-14 12:51:35 +0200566 if (xdp.data != data + headroom || xdp.data_end != xdp.data + size)
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700567 size = xdp.data_end - xdp.data;
David Miller78e52272017-05-02 11:36:33 -0400568 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
Roman Gushchindcb40592018-12-01 10:39:44 -0800569out:
Björn Töpelf23c4b32019-12-13 18:51:10 +0100570 bpf_prog_change_xdp(prog, NULL);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700571 kfree(data);
572 return ret;
573}
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800574
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700575static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
576{
577 /* make sure the fields we don't use are zeroed */
578 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
579 return -EINVAL;
580
581 /* flags is allowed */
582
Stanislav Fomichevb590cb52019-12-10 11:19:33 -0800583 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700584 sizeof(struct bpf_flow_keys)))
585 return -EINVAL;
586
587 return 0;
588}
589
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800590int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
591 const union bpf_attr *kattr,
592 union bpf_attr __user *uattr)
593{
594 u32 size = kattr->test.data_size_in;
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700595 struct bpf_flow_dissector ctx = {};
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800596 u32 repeat = kattr->test.repeat;
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700597 struct bpf_flow_keys *user_ctx;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800598 struct bpf_flow_keys flow_keys;
599 u64 time_start, time_spent = 0;
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700600 const struct ethhdr *eth;
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700601 unsigned int flags = 0;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800602 u32 retval, duration;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800603 void *data;
604 int ret;
605 u32 i;
606
607 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
608 return -EINVAL;
609
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700610 if (size < ETH_HLEN)
611 return -EINVAL;
612
613 data = bpf_test_init(kattr, size, 0, 0);
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800614 if (IS_ERR(data))
615 return PTR_ERR(data);
616
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700617 eth = (struct ethhdr *)data;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800618
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800619 if (!repeat)
620 repeat = 1;
621
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700622 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
623 if (IS_ERR(user_ctx)) {
624 kfree(data);
625 return PTR_ERR(user_ctx);
626 }
627 if (user_ctx) {
628 ret = verify_user_bpf_flow_keys(user_ctx);
629 if (ret)
630 goto out;
631 flags = user_ctx->flags;
632 }
633
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700634 ctx.flow_keys = &flow_keys;
635 ctx.data = data;
636 ctx.data_end = (__u8 *)data + size;
637
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800638 rcu_read_lock();
639 preempt_disable();
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800640 time_start = ktime_get_ns();
641 for (i = 0; i < repeat; i++) {
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700642 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700643 size, flags);
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700644
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800645 if (signal_pending(current)) {
646 preempt_enable();
647 rcu_read_unlock();
648
649 ret = -EINTR;
650 goto out;
651 }
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800652
653 if (need_resched()) {
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800654 time_spent += ktime_get_ns() - time_start;
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800655 preempt_enable();
656 rcu_read_unlock();
657
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800658 cond_resched();
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800659
660 rcu_read_lock();
661 preempt_disable();
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800662 time_start = ktime_get_ns();
663 }
664 }
665 time_spent += ktime_get_ns() - time_start;
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800666 preempt_enable();
667 rcu_read_unlock();
668
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800669 do_div(time_spent, repeat);
670 duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
671
672 ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
673 retval, duration);
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700674 if (!ret)
675 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
676 sizeof(struct bpf_flow_keys));
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800677
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800678out:
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700679 kfree(user_ctx);
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700680 kfree(data);
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800681 return ret;
682}