blob: 2c5172b332098ffa91f2ee249db7951106277124 [file] [log] [blame]
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001/* Copyright (c) 2017 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <linux/bpf.h>
8#include <linux/slab.h>
9#include <linux/vmalloc.h>
10#include <linux/etherdevice.h>
11#include <linux/filter.h>
12#include <linux/sched/signal.h>
Song Liu2cb494a2018-10-19 09:57:58 -070013#include <net/sock.h>
14#include <net/tcp.h>
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070015
Roman Gushchinf42ee092018-08-02 14:27:27 -070016static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
Roman Gushchin8bad74f2018-09-28 14:45:36 +000017 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070018{
19 u32 ret;
20
21 preempt_disable();
22 rcu_read_lock();
Roman Gushchinf42ee092018-08-02 14:27:27 -070023 bpf_cgroup_storage_set(storage);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070024 ret = BPF_PROG_RUN(prog, ctx);
25 rcu_read_unlock();
26 preempt_enable();
27
28 return ret;
29}
30
Roman Gushchindcb40592018-12-01 10:39:44 -080031static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
32 u32 *time)
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070033{
Roman Gushchin8bad74f2018-09-28 14:45:36 +000034 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
35 enum bpf_cgroup_storage_type stype;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070036 u64 time_start, time_spent = 0;
Roman Gushchindcb40592018-12-01 10:39:44 -080037 u32 i;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070038
Roman Gushchin8bad74f2018-09-28 14:45:36 +000039 for_each_cgroup_storage_type(stype) {
40 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
41 if (IS_ERR(storage[stype])) {
42 storage[stype] = NULL;
43 for_each_cgroup_storage_type(stype)
44 bpf_cgroup_storage_free(storage[stype]);
45 return -ENOMEM;
46 }
47 }
Roman Gushchinf42ee092018-08-02 14:27:27 -070048
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070049 if (!repeat)
50 repeat = 1;
51 time_start = ktime_get_ns();
52 for (i = 0; i < repeat; i++) {
Roman Gushchindcb40592018-12-01 10:39:44 -080053 *ret = bpf_test_run_one(prog, ctx, storage);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070054 if (need_resched()) {
55 if (signal_pending(current))
56 break;
57 time_spent += ktime_get_ns() - time_start;
58 cond_resched();
59 time_start = ktime_get_ns();
60 }
61 }
62 time_spent += ktime_get_ns() - time_start;
63 do_div(time_spent, repeat);
64 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
65
Roman Gushchin8bad74f2018-09-28 14:45:36 +000066 for_each_cgroup_storage_type(stype)
67 bpf_cgroup_storage_free(storage[stype]);
Roman Gushchinf42ee092018-08-02 14:27:27 -070068
Roman Gushchindcb40592018-12-01 10:39:44 -080069 return 0;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070070}
71
David Miller78e52272017-05-02 11:36:33 -040072static int bpf_test_finish(const union bpf_attr *kattr,
73 union bpf_attr __user *uattr, const void *data,
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070074 u32 size, u32 retval, u32 duration)
75{
David Miller78e52272017-05-02 11:36:33 -040076 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070077 int err = -EFAULT;
Lorenz Bauerb5a36b12018-12-03 11:31:23 +000078 u32 copy_size = size;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070079
Lorenz Bauerb5a36b12018-12-03 11:31:23 +000080 /* Clamp copy if the user has provided a size hint, but copy the full
81 * buffer if not to retain old behaviour.
82 */
83 if (kattr->test.data_size_out &&
84 copy_size > kattr->test.data_size_out) {
85 copy_size = kattr->test.data_size_out;
86 err = -ENOSPC;
87 }
88
89 if (data_out && copy_to_user(data_out, data, copy_size))
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070090 goto out;
91 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
92 goto out;
93 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
94 goto out;
95 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
96 goto out;
Lorenz Bauerb5a36b12018-12-03 11:31:23 +000097 if (err != -ENOSPC)
98 err = 0;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070099out:
100 return err;
101}
102
103static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
104 u32 headroom, u32 tailroom)
105{
106 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
107 void *data;
108
109 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
110 return ERR_PTR(-EINVAL);
111
112 data = kzalloc(size + headroom + tailroom, GFP_USER);
113 if (!data)
114 return ERR_PTR(-ENOMEM);
115
116 if (copy_from_user(data + headroom, data_in, size)) {
117 kfree(data);
118 return ERR_PTR(-EFAULT);
119 }
120 return data;
121}
122
123int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
124 union bpf_attr __user *uattr)
125{
126 bool is_l2 = false, is_direct_pkt_access = false;
127 u32 size = kattr->test.data_size_in;
128 u32 repeat = kattr->test.repeat;
129 u32 retval, duration;
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200130 int hh_len = ETH_HLEN;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700131 struct sk_buff *skb;
Song Liu2cb494a2018-10-19 09:57:58 -0700132 struct sock *sk;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700133 void *data;
134 int ret;
135
David Miller586f8522017-05-02 11:36:45 -0400136 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700137 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
138 if (IS_ERR(data))
139 return PTR_ERR(data);
140
141 switch (prog->type) {
142 case BPF_PROG_TYPE_SCHED_CLS:
143 case BPF_PROG_TYPE_SCHED_ACT:
144 is_l2 = true;
145 /* fall through */
146 case BPF_PROG_TYPE_LWT_IN:
147 case BPF_PROG_TYPE_LWT_OUT:
148 case BPF_PROG_TYPE_LWT_XMIT:
149 is_direct_pkt_access = true;
150 break;
151 default:
152 break;
153 }
154
Song Liu2cb494a2018-10-19 09:57:58 -0700155 sk = kzalloc(sizeof(struct sock), GFP_USER);
156 if (!sk) {
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700157 kfree(data);
158 return -ENOMEM;
159 }
Song Liu2cb494a2018-10-19 09:57:58 -0700160 sock_net_set(sk, current->nsproxy->net_ns);
161 sock_init_data(NULL, sk);
162
163 skb = build_skb(data, 0);
164 if (!skb) {
165 kfree(data);
166 kfree(sk);
167 return -ENOMEM;
168 }
169 skb->sk = sk;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700170
David Miller586f8522017-05-02 11:36:45 -0400171 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700172 __skb_put(skb, size);
173 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
174 skb_reset_network_header(skb);
175
176 if (is_l2)
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200177 __skb_push(skb, hh_len);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700178 if (is_direct_pkt_access)
Daniel Borkmann6aaae2b2017-09-25 02:25:50 +0200179 bpf_compute_data_pointers(skb);
Roman Gushchindcb40592018-12-01 10:39:44 -0800180 ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
181 if (ret) {
182 kfree_skb(skb);
183 kfree(sk);
184 return ret;
185 }
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200186 if (!is_l2) {
187 if (skb_headroom(skb) < hh_len) {
188 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
189
190 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
191 kfree_skb(skb);
Song Liu2cb494a2018-10-19 09:57:58 -0700192 kfree(sk);
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200193 return -ENOMEM;
194 }
195 }
196 memset(__skb_push(skb, hh_len), 0, hh_len);
197 }
198
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700199 size = skb->len;
200 /* bpf program can never convert linear skb to non-linear */
201 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
202 size = skb_headlen(skb);
David Miller78e52272017-05-02 11:36:33 -0400203 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700204 kfree_skb(skb);
Song Liu2cb494a2018-10-19 09:57:58 -0700205 kfree(sk);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700206 return ret;
207}
208
209int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
210 union bpf_attr __user *uattr)
211{
212 u32 size = kattr->test.data_size_in;
213 u32 repeat = kattr->test.repeat;
Daniel Borkmann65073a62018-01-31 12:58:56 +0100214 struct netdev_rx_queue *rxqueue;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700215 struct xdp_buff xdp = {};
216 u32 retval, duration;
217 void *data;
218 int ret;
219
David Miller586f8522017-05-02 11:36:45 -0400220 data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700221 if (IS_ERR(data))
222 return PTR_ERR(data);
223
224 xdp.data_hard_start = data;
David Miller586f8522017-05-02 11:36:45 -0400225 xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +0200226 xdp.data_meta = xdp.data;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700227 xdp.data_end = xdp.data + size;
228
Daniel Borkmann65073a62018-01-31 12:58:56 +0100229 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
230 xdp.rxq = &rxqueue->xdp_rxq;
231
Roman Gushchindcb40592018-12-01 10:39:44 -0800232 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
233 if (ret)
234 goto out;
Nikita V. Shirokov587b80c2018-04-17 21:42:21 -0700235 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
236 xdp.data_end != xdp.data + size)
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700237 size = xdp.data_end - xdp.data;
David Miller78e52272017-05-02 11:36:33 -0400238 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
Roman Gushchindcb40592018-12-01 10:39:44 -0800239out:
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700240 kfree(data);
241 return ret;
242}
Stanislav Fomichevb7a18482019-01-28 08:53:54 -0800243
244int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
245 const union bpf_attr *kattr,
246 union bpf_attr __user *uattr)
247{
248 u32 size = kattr->test.data_size_in;
249 u32 repeat = kattr->test.repeat;
250 struct bpf_flow_keys flow_keys;
251 u64 time_start, time_spent = 0;
252 struct bpf_skb_data_end *cb;
253 u32 retval, duration;
254 struct sk_buff *skb;
255 struct sock *sk;
256 void *data;
257 int ret;
258 u32 i;
259
260 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
261 return -EINVAL;
262
263 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
264 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
265 if (IS_ERR(data))
266 return PTR_ERR(data);
267
268 sk = kzalloc(sizeof(*sk), GFP_USER);
269 if (!sk) {
270 kfree(data);
271 return -ENOMEM;
272 }
273 sock_net_set(sk, current->nsproxy->net_ns);
274 sock_init_data(NULL, sk);
275
276 skb = build_skb(data, 0);
277 if (!skb) {
278 kfree(data);
279 kfree(sk);
280 return -ENOMEM;
281 }
282 skb->sk = sk;
283
284 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
285 __skb_put(skb, size);
286 skb->protocol = eth_type_trans(skb,
287 current->nsproxy->net_ns->loopback_dev);
288 skb_reset_network_header(skb);
289
290 cb = (struct bpf_skb_data_end *)skb->cb;
291 cb->qdisc_cb.flow_keys = &flow_keys;
292
293 if (!repeat)
294 repeat = 1;
295
296 time_start = ktime_get_ns();
297 for (i = 0; i < repeat; i++) {
298 preempt_disable();
299 rcu_read_lock();
300 retval = __skb_flow_bpf_dissect(prog, skb,
301 &flow_keys_dissector,
302 &flow_keys);
303 rcu_read_unlock();
304 preempt_enable();
305
306 if (need_resched()) {
307 if (signal_pending(current))
308 break;
309 time_spent += ktime_get_ns() - time_start;
310 cond_resched();
311 time_start = ktime_get_ns();
312 }
313 }
314 time_spent += ktime_get_ns() - time_start;
315 do_div(time_spent, repeat);
316 duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
317
318 ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
319 retval, duration);
320
321 kfree_skb(skb);
322 kfree(sk);
323 return ret;
324}