blob: 0be4497cb832c765eb6c9ece614b7aa8882bda93 [file] [log] [blame]
Thomas Gleixner25763b32019-05-28 10:10:09 -07001// SPDX-License-Identifier: GPL-2.0-only
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07002/* Copyright (c) 2017 Facebook
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07003 */
4#include <linux/bpf.h>
5#include <linux/slab.h>
6#include <linux/vmalloc.h>
7#include <linux/etherdevice.h>
8#include <linux/filter.h>
9#include <linux/sched/signal.h>
Martin KaFai Lau6ac99e82019-04-26 16:39:39 -070010#include <net/bpf_sk_storage.h>
Song Liu2cb494a2018-10-19 09:57:58 -070011#include <net/sock.h>
12#include <net/tcp.h>
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070013
Matt Mullinse950e842019-04-26 11:49:51 -070014#define CREATE_TRACE_POINTS
15#include <trace/events/bpf_test_run.h>
16
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080017static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
18 u32 *retval, u32 *time)
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070019{
Bo YU71b91a52019-03-08 01:45:51 -050020 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
Roman Gushchin8bad74f2018-09-28 14:45:36 +000021 enum bpf_cgroup_storage_type stype;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070022 u64 time_start, time_spent = 0;
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080023 int ret = 0;
Roman Gushchindcb40592018-12-01 10:39:44 -080024 u32 i;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070025
Roman Gushchin8bad74f2018-09-28 14:45:36 +000026 for_each_cgroup_storage_type(stype) {
27 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
28 if (IS_ERR(storage[stype])) {
29 storage[stype] = NULL;
30 for_each_cgroup_storage_type(stype)
31 bpf_cgroup_storage_free(storage[stype]);
32 return -ENOMEM;
33 }
34 }
Roman Gushchinf42ee092018-08-02 14:27:27 -070035
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070036 if (!repeat)
37 repeat = 1;
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080038
39 rcu_read_lock();
40 preempt_disable();
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070041 time_start = ktime_get_ns();
42 for (i = 0; i < repeat; i++) {
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080043 bpf_cgroup_storage_set(storage);
44 *retval = BPF_PROG_RUN(prog, ctx);
45
46 if (signal_pending(current)) {
47 ret = -EINTR;
48 break;
49 }
50
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070051 if (need_resched()) {
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070052 time_spent += ktime_get_ns() - time_start;
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080053 preempt_enable();
54 rcu_read_unlock();
55
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070056 cond_resched();
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080057
58 rcu_read_lock();
59 preempt_disable();
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070060 time_start = ktime_get_ns();
61 }
62 }
63 time_spent += ktime_get_ns() - time_start;
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080064 preempt_enable();
65 rcu_read_unlock();
66
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070067 do_div(time_spent, repeat);
68 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
69
Roman Gushchin8bad74f2018-09-28 14:45:36 +000070 for_each_cgroup_storage_type(stype)
71 bpf_cgroup_storage_free(storage[stype]);
Roman Gushchinf42ee092018-08-02 14:27:27 -070072
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080073 return ret;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070074}
75
David Miller78e52272017-05-02 11:36:33 -040076static int bpf_test_finish(const union bpf_attr *kattr,
77 union bpf_attr __user *uattr, const void *data,
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070078 u32 size, u32 retval, u32 duration)
79{
David Miller78e52272017-05-02 11:36:33 -040080 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070081 int err = -EFAULT;
Lorenz Bauerb5a36b12018-12-03 11:31:23 +000082 u32 copy_size = size;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070083
Lorenz Bauerb5a36b12018-12-03 11:31:23 +000084 /* Clamp copy if the user has provided a size hint, but copy the full
85 * buffer if not to retain old behaviour.
86 */
87 if (kattr->test.data_size_out &&
88 copy_size > kattr->test.data_size_out) {
89 copy_size = kattr->test.data_size_out;
90 err = -ENOSPC;
91 }
92
93 if (data_out && copy_to_user(data_out, data, copy_size))
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070094 goto out;
95 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
96 goto out;
97 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
98 goto out;
99 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
100 goto out;
Lorenz Bauerb5a36b12018-12-03 11:31:23 +0000101 if (err != -ENOSPC)
102 err = 0;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700103out:
Matt Mullinse950e842019-04-26 11:49:51 -0700104 trace_bpf_test_finish(&err);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700105 return err;
106}
107
108static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
109 u32 headroom, u32 tailroom)
110{
111 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
112 void *data;
113
114 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
115 return ERR_PTR(-EINVAL);
116
117 data = kzalloc(size + headroom + tailroom, GFP_USER);
118 if (!data)
119 return ERR_PTR(-ENOMEM);
120
121 if (copy_from_user(data + headroom, data_in, size)) {
122 kfree(data);
123 return ERR_PTR(-EFAULT);
124 }
125 return data;
126}
127
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700128static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
129{
130 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
131 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
132 u32 size = kattr->test.ctx_size_in;
133 void *data;
134 int err;
135
136 if (!data_in && !data_out)
137 return NULL;
138
139 data = kzalloc(max_size, GFP_USER);
140 if (!data)
141 return ERR_PTR(-ENOMEM);
142
143 if (data_in) {
144 err = bpf_check_uarg_tail_zero(data_in, max_size, size);
145 if (err) {
146 kfree(data);
147 return ERR_PTR(err);
148 }
149
150 size = min_t(u32, max_size, size);
151 if (copy_from_user(data, data_in, size)) {
152 kfree(data);
153 return ERR_PTR(-EFAULT);
154 }
155 }
156 return data;
157}
158
159static int bpf_ctx_finish(const union bpf_attr *kattr,
160 union bpf_attr __user *uattr, const void *data,
161 u32 size)
162{
163 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
164 int err = -EFAULT;
165 u32 copy_size = size;
166
167 if (!data || !data_out)
168 return 0;
169
170 if (copy_size > kattr->test.ctx_size_out) {
171 copy_size = kattr->test.ctx_size_out;
172 err = -ENOSPC;
173 }
174
175 if (copy_to_user(data_out, data, copy_size))
176 goto out;
177 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
178 goto out;
179 if (err != -ENOSPC)
180 err = 0;
181out:
182 return err;
183}
184
185/**
186 * range_is_zero - test whether buffer is initialized
187 * @buf: buffer to check
188 * @from: check from this position
189 * @to: check up until (excluding) this position
190 *
191 * This function returns true if the there is a non-zero byte
192 * in the buf in the range [from,to).
193 */
194static inline bool range_is_zero(void *buf, size_t from, size_t to)
195{
196 return !memchr_inv((u8 *)buf + from, 0, to - from);
197}
198
199static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
200{
201 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
202
203 if (!__skb)
204 return 0;
205
206 /* make sure the fields we don't use are zeroed */
207 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, priority)))
208 return -EINVAL;
209
210 /* priority is allowed */
211
212 if (!range_is_zero(__skb, offsetof(struct __sk_buff, priority) +
213 FIELD_SIZEOF(struct __sk_buff, priority),
214 offsetof(struct __sk_buff, cb)))
215 return -EINVAL;
216
217 /* cb is allowed */
218
219 if (!range_is_zero(__skb, offsetof(struct __sk_buff, cb) +
220 FIELD_SIZEOF(struct __sk_buff, cb),
Stanislav Fomichevba940942019-10-15 11:31:24 -0700221 offsetof(struct __sk_buff, tstamp)))
222 return -EINVAL;
223
224 /* tstamp is allowed */
225
226 if (!range_is_zero(__skb, offsetof(struct __sk_buff, tstamp) +
227 FIELD_SIZEOF(struct __sk_buff, tstamp),
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700228 sizeof(struct __sk_buff)))
229 return -EINVAL;
230
231 skb->priority = __skb->priority;
Stanislav Fomichevba940942019-10-15 11:31:24 -0700232 skb->tstamp = __skb->tstamp;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700233 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
234
235 return 0;
236}
237
238static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
239{
240 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
241
242 if (!__skb)
243 return;
244
245 __skb->priority = skb->priority;
Stanislav Fomichevba940942019-10-15 11:31:24 -0700246 __skb->tstamp = skb->tstamp;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700247 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
248}
249
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700250int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
251 union bpf_attr __user *uattr)
252{
253 bool is_l2 = false, is_direct_pkt_access = false;
254 u32 size = kattr->test.data_size_in;
255 u32 repeat = kattr->test.repeat;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700256 struct __sk_buff *ctx = NULL;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700257 u32 retval, duration;
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200258 int hh_len = ETH_HLEN;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700259 struct sk_buff *skb;
Song Liu2cb494a2018-10-19 09:57:58 -0700260 struct sock *sk;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700261 void *data;
262 int ret;
263
David Miller586f8522017-05-02 11:36:45 -0400264 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700265 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
266 if (IS_ERR(data))
267 return PTR_ERR(data);
268
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700269 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
270 if (IS_ERR(ctx)) {
271 kfree(data);
272 return PTR_ERR(ctx);
273 }
274
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700275 switch (prog->type) {
276 case BPF_PROG_TYPE_SCHED_CLS:
277 case BPF_PROG_TYPE_SCHED_ACT:
278 is_l2 = true;
279 /* fall through */
280 case BPF_PROG_TYPE_LWT_IN:
281 case BPF_PROG_TYPE_LWT_OUT:
282 case BPF_PROG_TYPE_LWT_XMIT:
283 is_direct_pkt_access = true;
284 break;
285 default:
286 break;
287 }
288
Song Liu2cb494a2018-10-19 09:57:58 -0700289 sk = kzalloc(sizeof(struct sock), GFP_USER);
290 if (!sk) {
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700291 kfree(data);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700292 kfree(ctx);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700293 return -ENOMEM;
294 }
Song Liu2cb494a2018-10-19 09:57:58 -0700295 sock_net_set(sk, current->nsproxy->net_ns);
296 sock_init_data(NULL, sk);
297
298 skb = build_skb(data, 0);
299 if (!skb) {
300 kfree(data);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700301 kfree(ctx);
Song Liu2cb494a2018-10-19 09:57:58 -0700302 kfree(sk);
303 return -ENOMEM;
304 }
305 skb->sk = sk;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700306
David Miller586f8522017-05-02 11:36:45 -0400307 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700308 __skb_put(skb, size);
309 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
310 skb_reset_network_header(skb);
311
312 if (is_l2)
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200313 __skb_push(skb, hh_len);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700314 if (is_direct_pkt_access)
Daniel Borkmann6aaae2b2017-09-25 02:25:50 +0200315 bpf_compute_data_pointers(skb);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700316 ret = convert___skb_to_skb(skb, ctx);
317 if (ret)
318 goto out;
Roman Gushchindcb40592018-12-01 10:39:44 -0800319 ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700320 if (ret)
321 goto out;
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200322 if (!is_l2) {
323 if (skb_headroom(skb) < hh_len) {
324 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
325
326 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700327 ret = -ENOMEM;
328 goto out;
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200329 }
330 }
331 memset(__skb_push(skb, hh_len), 0, hh_len);
332 }
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700333 convert_skb_to___skb(skb, ctx);
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200334
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700335 size = skb->len;
336 /* bpf program can never convert linear skb to non-linear */
337 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
338 size = skb_headlen(skb);
David Miller78e52272017-05-02 11:36:33 -0400339 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700340 if (!ret)
341 ret = bpf_ctx_finish(kattr, uattr, ctx,
342 sizeof(struct __sk_buff));
343out:
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700344 kfree_skb(skb);
Martin KaFai Lau6ac99e82019-04-26 16:39:39 -0700345 bpf_sk_storage_free(sk);
Song Liu2cb494a2018-10-19 09:57:58 -0700346 kfree(sk);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700347 kfree(ctx);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700348 return ret;
349}
350
351int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
352 union bpf_attr __user *uattr)
353{
354 u32 size = kattr->test.data_size_in;
355 u32 repeat = kattr->test.repeat;
Daniel Borkmann65073a62018-01-31 12:58:56 +0100356 struct netdev_rx_queue *rxqueue;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700357 struct xdp_buff xdp = {};
358 u32 retval, duration;
359 void *data;
360 int ret;
361
Stanislav Fomichev947e8b52019-04-11 15:47:07 -0700362 if (kattr->test.ctx_in || kattr->test.ctx_out)
363 return -EINVAL;
364
David Miller586f8522017-05-02 11:36:45 -0400365 data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700366 if (IS_ERR(data))
367 return PTR_ERR(data);
368
369 xdp.data_hard_start = data;
David Miller586f8522017-05-02 11:36:45 -0400370 xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +0200371 xdp.data_meta = xdp.data;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700372 xdp.data_end = xdp.data + size;
373
Daniel Borkmann65073a62018-01-31 12:58:56 +0100374 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
375 xdp.rxq = &rxqueue->xdp_rxq;
376
Roman Gushchindcb40592018-12-01 10:39:44 -0800377 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
378 if (ret)
379 goto out;
Nikita V. Shirokov587b80c2018-04-17 21:42:21 -0700380 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
381 xdp.data_end != xdp.data + size)
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700382 size = xdp.data_end - xdp.data;
David Miller78e52272017-05-02 11:36:33 -0400383 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
Roman Gushchindcb40592018-12-01 10:39:44 -0800384out:
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700385 kfree(data);
386 return ret;
387}
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800388
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700389static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
390{
391 /* make sure the fields we don't use are zeroed */
392 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
393 return -EINVAL;
394
395 /* flags is allowed */
396
397 if (!range_is_zero(ctx, offsetof(struct bpf_flow_keys, flags) +
398 FIELD_SIZEOF(struct bpf_flow_keys, flags),
399 sizeof(struct bpf_flow_keys)))
400 return -EINVAL;
401
402 return 0;
403}
404
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800405int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
406 const union bpf_attr *kattr,
407 union bpf_attr __user *uattr)
408{
409 u32 size = kattr->test.data_size_in;
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700410 struct bpf_flow_dissector ctx = {};
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800411 u32 repeat = kattr->test.repeat;
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700412 struct bpf_flow_keys *user_ctx;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800413 struct bpf_flow_keys flow_keys;
414 u64 time_start, time_spent = 0;
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700415 const struct ethhdr *eth;
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700416 unsigned int flags = 0;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800417 u32 retval, duration;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800418 void *data;
419 int ret;
420 u32 i;
421
422 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
423 return -EINVAL;
424
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700425 if (size < ETH_HLEN)
426 return -EINVAL;
427
428 data = bpf_test_init(kattr, size, 0, 0);
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800429 if (IS_ERR(data))
430 return PTR_ERR(data);
431
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700432 eth = (struct ethhdr *)data;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800433
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800434 if (!repeat)
435 repeat = 1;
436
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700437 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
438 if (IS_ERR(user_ctx)) {
439 kfree(data);
440 return PTR_ERR(user_ctx);
441 }
442 if (user_ctx) {
443 ret = verify_user_bpf_flow_keys(user_ctx);
444 if (ret)
445 goto out;
446 flags = user_ctx->flags;
447 }
448
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700449 ctx.flow_keys = &flow_keys;
450 ctx.data = data;
451 ctx.data_end = (__u8 *)data + size;
452
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800453 rcu_read_lock();
454 preempt_disable();
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800455 time_start = ktime_get_ns();
456 for (i = 0; i < repeat; i++) {
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700457 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700458 size, flags);
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700459
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800460 if (signal_pending(current)) {
461 preempt_enable();
462 rcu_read_unlock();
463
464 ret = -EINTR;
465 goto out;
466 }
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800467
468 if (need_resched()) {
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800469 time_spent += ktime_get_ns() - time_start;
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800470 preempt_enable();
471 rcu_read_unlock();
472
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800473 cond_resched();
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800474
475 rcu_read_lock();
476 preempt_disable();
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800477 time_start = ktime_get_ns();
478 }
479 }
480 time_spent += ktime_get_ns() - time_start;
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800481 preempt_enable();
482 rcu_read_unlock();
483
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800484 do_div(time_spent, repeat);
485 duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
486
487 ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
488 retval, duration);
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700489 if (!ret)
490 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
491 sizeof(struct bpf_flow_keys));
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800492
Stanislav Fomicheva4391842019-02-19 10:54:17 -0800493out:
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -0700494 kfree(user_ctx);
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -0700495 kfree(data);
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800496 return ret;
497}