blob: e31e1b20f7f4dea446ec596ae661f7beb080f4ee [file] [log] [blame]
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001/* Copyright (c) 2017 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <linux/bpf.h>
8#include <linux/slab.h>
9#include <linux/vmalloc.h>
10#include <linux/etherdevice.h>
11#include <linux/filter.h>
12#include <linux/sched/signal.h>
Song Liu2cb494a2018-10-19 09:57:58 -070013#include <net/sock.h>
14#include <net/tcp.h>
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070015
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080016static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
17 u32 *retval, u32 *time)
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070018{
Roman Gushchin8bad74f2018-09-28 14:45:36 +000019 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
20 enum bpf_cgroup_storage_type stype;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070021 u64 time_start, time_spent = 0;
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080022 int ret = 0;
Roman Gushchindcb40592018-12-01 10:39:44 -080023 u32 i;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070024
Roman Gushchin8bad74f2018-09-28 14:45:36 +000025 for_each_cgroup_storage_type(stype) {
26 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
27 if (IS_ERR(storage[stype])) {
28 storage[stype] = NULL;
29 for_each_cgroup_storage_type(stype)
30 bpf_cgroup_storage_free(storage[stype]);
31 return -ENOMEM;
32 }
33 }
Roman Gushchinf42ee092018-08-02 14:27:27 -070034
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070035 if (!repeat)
36 repeat = 1;
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080037
38 rcu_read_lock();
39 preempt_disable();
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070040 time_start = ktime_get_ns();
41 for (i = 0; i < repeat; i++) {
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080042 bpf_cgroup_storage_set(storage);
43 *retval = BPF_PROG_RUN(prog, ctx);
44
45 if (signal_pending(current)) {
46 ret = -EINTR;
47 break;
48 }
49
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070050 if (need_resched()) {
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070051 time_spent += ktime_get_ns() - time_start;
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080052 preempt_enable();
53 rcu_read_unlock();
54
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070055 cond_resched();
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080056
57 rcu_read_lock();
58 preempt_disable();
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070059 time_start = ktime_get_ns();
60 }
61 }
62 time_spent += ktime_get_ns() - time_start;
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080063 preempt_enable();
64 rcu_read_unlock();
65
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070066 do_div(time_spent, repeat);
67 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
68
Roman Gushchin8bad74f2018-09-28 14:45:36 +000069 for_each_cgroup_storage_type(stype)
70 bpf_cgroup_storage_free(storage[stype]);
Roman Gushchinf42ee092018-08-02 14:27:27 -070071
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -080072 return ret;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070073}
74
David Miller78e52272017-05-02 11:36:33 -040075static int bpf_test_finish(const union bpf_attr *kattr,
76 union bpf_attr __user *uattr, const void *data,
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070077 u32 size, u32 retval, u32 duration)
78{
David Miller78e52272017-05-02 11:36:33 -040079 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070080 int err = -EFAULT;
Lorenz Bauerb5a36b12018-12-03 11:31:23 +000081 u32 copy_size = size;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070082
Lorenz Bauerb5a36b12018-12-03 11:31:23 +000083 /* Clamp copy if the user has provided a size hint, but copy the full
84 * buffer if not to retain old behaviour.
85 */
86 if (kattr->test.data_size_out &&
87 copy_size > kattr->test.data_size_out) {
88 copy_size = kattr->test.data_size_out;
89 err = -ENOSPC;
90 }
91
92 if (data_out && copy_to_user(data_out, data, copy_size))
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070093 goto out;
94 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
95 goto out;
96 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
97 goto out;
98 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
99 goto out;
Lorenz Bauerb5a36b12018-12-03 11:31:23 +0000100 if (err != -ENOSPC)
101 err = 0;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700102out:
103 return err;
104}
105
106static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
107 u32 headroom, u32 tailroom)
108{
109 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
110 void *data;
111
112 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
113 return ERR_PTR(-EINVAL);
114
115 data = kzalloc(size + headroom + tailroom, GFP_USER);
116 if (!data)
117 return ERR_PTR(-ENOMEM);
118
119 if (copy_from_user(data + headroom, data_in, size)) {
120 kfree(data);
121 return ERR_PTR(-EFAULT);
122 }
123 return data;
124}
125
126int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
127 union bpf_attr __user *uattr)
128{
129 bool is_l2 = false, is_direct_pkt_access = false;
130 u32 size = kattr->test.data_size_in;
131 u32 repeat = kattr->test.repeat;
132 u32 retval, duration;
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200133 int hh_len = ETH_HLEN;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700134 struct sk_buff *skb;
Song Liu2cb494a2018-10-19 09:57:58 -0700135 struct sock *sk;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700136 void *data;
137 int ret;
138
David Miller586f8522017-05-02 11:36:45 -0400139 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700140 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
141 if (IS_ERR(data))
142 return PTR_ERR(data);
143
144 switch (prog->type) {
145 case BPF_PROG_TYPE_SCHED_CLS:
146 case BPF_PROG_TYPE_SCHED_ACT:
147 is_l2 = true;
148 /* fall through */
149 case BPF_PROG_TYPE_LWT_IN:
150 case BPF_PROG_TYPE_LWT_OUT:
151 case BPF_PROG_TYPE_LWT_XMIT:
152 is_direct_pkt_access = true;
153 break;
154 default:
155 break;
156 }
157
Song Liu2cb494a2018-10-19 09:57:58 -0700158 sk = kzalloc(sizeof(struct sock), GFP_USER);
159 if (!sk) {
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700160 kfree(data);
161 return -ENOMEM;
162 }
Song Liu2cb494a2018-10-19 09:57:58 -0700163 sock_net_set(sk, current->nsproxy->net_ns);
164 sock_init_data(NULL, sk);
165
166 skb = build_skb(data, 0);
167 if (!skb) {
168 kfree(data);
169 kfree(sk);
170 return -ENOMEM;
171 }
172 skb->sk = sk;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700173
David Miller586f8522017-05-02 11:36:45 -0400174 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700175 __skb_put(skb, size);
176 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
177 skb_reset_network_header(skb);
178
179 if (is_l2)
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200180 __skb_push(skb, hh_len);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700181 if (is_direct_pkt_access)
Daniel Borkmann6aaae2b2017-09-25 02:25:50 +0200182 bpf_compute_data_pointers(skb);
Roman Gushchindcb40592018-12-01 10:39:44 -0800183 ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
184 if (ret) {
185 kfree_skb(skb);
186 kfree(sk);
187 return ret;
188 }
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200189 if (!is_l2) {
190 if (skb_headroom(skb) < hh_len) {
191 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
192
193 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
194 kfree_skb(skb);
Song Liu2cb494a2018-10-19 09:57:58 -0700195 kfree(sk);
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200196 return -ENOMEM;
197 }
198 }
199 memset(__skb_push(skb, hh_len), 0, hh_len);
200 }
201
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700202 size = skb->len;
203 /* bpf program can never convert linear skb to non-linear */
204 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
205 size = skb_headlen(skb);
David Miller78e52272017-05-02 11:36:33 -0400206 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700207 kfree_skb(skb);
Song Liu2cb494a2018-10-19 09:57:58 -0700208 kfree(sk);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700209 return ret;
210}
211
212int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
213 union bpf_attr __user *uattr)
214{
215 u32 size = kattr->test.data_size_in;
216 u32 repeat = kattr->test.repeat;
Daniel Borkmann65073a62018-01-31 12:58:56 +0100217 struct netdev_rx_queue *rxqueue;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700218 struct xdp_buff xdp = {};
219 u32 retval, duration;
220 void *data;
221 int ret;
222
David Miller586f8522017-05-02 11:36:45 -0400223 data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700224 if (IS_ERR(data))
225 return PTR_ERR(data);
226
227 xdp.data_hard_start = data;
David Miller586f8522017-05-02 11:36:45 -0400228 xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +0200229 xdp.data_meta = xdp.data;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700230 xdp.data_end = xdp.data + size;
231
Daniel Borkmann65073a62018-01-31 12:58:56 +0100232 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
233 xdp.rxq = &rxqueue->xdp_rxq;
234
Roman Gushchindcb40592018-12-01 10:39:44 -0800235 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
236 if (ret)
237 goto out;
Nikita V. Shirokov587b80c2018-04-17 21:42:21 -0700238 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
239 xdp.data_end != xdp.data + size)
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700240 size = xdp.data_end - xdp.data;
David Miller78e52272017-05-02 11:36:33 -0400241 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
Roman Gushchindcb40592018-12-01 10:39:44 -0800242out:
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700243 kfree(data);
244 return ret;
245}