Thomas Gleixner | 25763b3 | 2019-05-28 10:10:09 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 2 | /* Copyright (c) 2017 Facebook |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 3 | */ |
| 4 | #include <linux/bpf.h> |
| 5 | #include <linux/slab.h> |
| 6 | #include <linux/vmalloc.h> |
| 7 | #include <linux/etherdevice.h> |
| 8 | #include <linux/filter.h> |
| 9 | #include <linux/sched/signal.h> |
Martin KaFai Lau | 6ac99e8 | 2019-04-26 16:39:39 -0700 | [diff] [blame] | 10 | #include <net/bpf_sk_storage.h> |
Song Liu | 2cb494a | 2018-10-19 09:57:58 -0700 | [diff] [blame] | 11 | #include <net/sock.h> |
| 12 | #include <net/tcp.h> |
Lorenz Bauer | 7c32e8f | 2021-03-03 10:18:13 +0000 | [diff] [blame^] | 13 | #include <net/net_namespace.h> |
KP Singh | 3d08b6f | 2020-03-04 20:18:53 +0100 | [diff] [blame] | 14 | #include <linux/error-injection.h> |
Song Liu | 1b4d60e | 2020-09-25 13:54:29 -0700 | [diff] [blame] | 15 | #include <linux/smp.h> |
Lorenz Bauer | 7c32e8f | 2021-03-03 10:18:13 +0000 | [diff] [blame^] | 16 | #include <linux/sock_diag.h> |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 17 | |
Matt Mullins | e950e84 | 2019-04-26 11:49:51 -0700 | [diff] [blame] | 18 | #define CREATE_TRACE_POINTS |
| 19 | #include <trace/events/bpf_test_run.h> |
| 20 | |
Lorenz Bauer | 607b9cc | 2021-03-03 10:18:12 +0000 | [diff] [blame] | 21 | struct bpf_test_timer { |
| 22 | enum { NO_PREEMPT, NO_MIGRATE } mode; |
| 23 | u32 i; |
| 24 | u64 time_start, time_spent; |
| 25 | }; |
| 26 | |
| 27 | static void bpf_test_timer_enter(struct bpf_test_timer *t) |
| 28 | __acquires(rcu) |
| 29 | { |
| 30 | rcu_read_lock(); |
| 31 | if (t->mode == NO_PREEMPT) |
| 32 | preempt_disable(); |
| 33 | else |
| 34 | migrate_disable(); |
| 35 | |
| 36 | t->time_start = ktime_get_ns(); |
| 37 | } |
| 38 | |
| 39 | static void bpf_test_timer_leave(struct bpf_test_timer *t) |
| 40 | __releases(rcu) |
| 41 | { |
| 42 | t->time_start = 0; |
| 43 | |
| 44 | if (t->mode == NO_PREEMPT) |
| 45 | preempt_enable(); |
| 46 | else |
| 47 | migrate_enable(); |
| 48 | rcu_read_unlock(); |
| 49 | } |
| 50 | |
| 51 | static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration) |
| 52 | __must_hold(rcu) |
| 53 | { |
| 54 | t->i++; |
| 55 | if (t->i >= repeat) { |
| 56 | /* We're done. */ |
| 57 | t->time_spent += ktime_get_ns() - t->time_start; |
| 58 | do_div(t->time_spent, t->i); |
| 59 | *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent; |
| 60 | *err = 0; |
| 61 | goto reset; |
| 62 | } |
| 63 | |
| 64 | if (signal_pending(current)) { |
| 65 | /* During iteration: we've been cancelled, abort. */ |
| 66 | *err = -EINTR; |
| 67 | goto reset; |
| 68 | } |
| 69 | |
| 70 | if (need_resched()) { |
| 71 | /* During iteration: we need to reschedule between runs. */ |
| 72 | t->time_spent += ktime_get_ns() - t->time_start; |
| 73 | bpf_test_timer_leave(t); |
| 74 | cond_resched(); |
| 75 | bpf_test_timer_enter(t); |
| 76 | } |
| 77 | |
| 78 | /* Do another round. */ |
| 79 | return true; |
| 80 | |
| 81 | reset: |
| 82 | t->i = 0; |
| 83 | return false; |
| 84 | } |
| 85 | |
Stanislav Fomichev | df1a2cb | 2019-02-12 15:42:38 -0800 | [diff] [blame] | 86 | static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, |
Björn Töpel | f23c4b3 | 2019-12-13 18:51:10 +0100 | [diff] [blame] | 87 | u32 *retval, u32 *time, bool xdp) |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 88 | { |
Bo YU | 71b91a5 | 2019-03-08 01:45:51 -0500 | [diff] [blame] | 89 | struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL }; |
Lorenz Bauer | 607b9cc | 2021-03-03 10:18:12 +0000 | [diff] [blame] | 90 | struct bpf_test_timer t = { NO_MIGRATE }; |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 91 | enum bpf_cgroup_storage_type stype; |
Lorenz Bauer | 607b9cc | 2021-03-03 10:18:12 +0000 | [diff] [blame] | 92 | int ret; |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 93 | |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 94 | for_each_cgroup_storage_type(stype) { |
| 95 | storage[stype] = bpf_cgroup_storage_alloc(prog, stype); |
| 96 | if (IS_ERR(storage[stype])) { |
| 97 | storage[stype] = NULL; |
| 98 | for_each_cgroup_storage_type(stype) |
| 99 | bpf_cgroup_storage_free(storage[stype]); |
| 100 | return -ENOMEM; |
| 101 | } |
| 102 | } |
Roman Gushchin | f42ee09 | 2018-08-02 14:27:27 -0700 | [diff] [blame] | 103 | |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 104 | if (!repeat) |
| 105 | repeat = 1; |
Stanislav Fomichev | df1a2cb | 2019-02-12 15:42:38 -0800 | [diff] [blame] | 106 | |
Lorenz Bauer | 607b9cc | 2021-03-03 10:18:12 +0000 | [diff] [blame] | 107 | bpf_test_timer_enter(&t); |
| 108 | do { |
Stanislav Fomichev | df1a2cb | 2019-02-12 15:42:38 -0800 | [diff] [blame] | 109 | bpf_cgroup_storage_set(storage); |
Björn Töpel | f23c4b3 | 2019-12-13 18:51:10 +0100 | [diff] [blame] | 110 | |
| 111 | if (xdp) |
| 112 | *retval = bpf_prog_run_xdp(prog, ctx); |
| 113 | else |
| 114 | *retval = BPF_PROG_RUN(prog, ctx); |
Lorenz Bauer | 607b9cc | 2021-03-03 10:18:12 +0000 | [diff] [blame] | 115 | } while (bpf_test_timer_continue(&t, repeat, &ret, time)); |
| 116 | bpf_test_timer_leave(&t); |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 117 | |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 118 | for_each_cgroup_storage_type(stype) |
| 119 | bpf_cgroup_storage_free(storage[stype]); |
Roman Gushchin | f42ee09 | 2018-08-02 14:27:27 -0700 | [diff] [blame] | 120 | |
Stanislav Fomichev | df1a2cb | 2019-02-12 15:42:38 -0800 | [diff] [blame] | 121 | return ret; |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 122 | } |
| 123 | |
David Miller | 78e5227 | 2017-05-02 11:36:33 -0400 | [diff] [blame] | 124 | static int bpf_test_finish(const union bpf_attr *kattr, |
| 125 | union bpf_attr __user *uattr, const void *data, |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 126 | u32 size, u32 retval, u32 duration) |
| 127 | { |
David Miller | 78e5227 | 2017-05-02 11:36:33 -0400 | [diff] [blame] | 128 | void __user *data_out = u64_to_user_ptr(kattr->test.data_out); |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 129 | int err = -EFAULT; |
Lorenz Bauer | b5a36b1 | 2018-12-03 11:31:23 +0000 | [diff] [blame] | 130 | u32 copy_size = size; |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 131 | |
Lorenz Bauer | b5a36b1 | 2018-12-03 11:31:23 +0000 | [diff] [blame] | 132 | /* Clamp copy if the user has provided a size hint, but copy the full |
| 133 | * buffer if not to retain old behaviour. |
| 134 | */ |
| 135 | if (kattr->test.data_size_out && |
| 136 | copy_size > kattr->test.data_size_out) { |
| 137 | copy_size = kattr->test.data_size_out; |
| 138 | err = -ENOSPC; |
| 139 | } |
| 140 | |
| 141 | if (data_out && copy_to_user(data_out, data, copy_size)) |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 142 | goto out; |
| 143 | if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) |
| 144 | goto out; |
| 145 | if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) |
| 146 | goto out; |
| 147 | if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) |
| 148 | goto out; |
Lorenz Bauer | b5a36b1 | 2018-12-03 11:31:23 +0000 | [diff] [blame] | 149 | if (err != -ENOSPC) |
| 150 | err = 0; |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 151 | out: |
Matt Mullins | e950e84 | 2019-04-26 11:49:51 -0700 | [diff] [blame] | 152 | trace_bpf_test_finish(&err); |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 153 | return err; |
| 154 | } |
| 155 | |
Alexei Starovoitov | faeb2dc | 2019-11-14 10:57:08 -0800 | [diff] [blame] | 156 | /* Integer types of various sizes and pointer combinations cover variety of |
| 157 | * architecture dependent calling conventions. 7+ can be supported in the |
| 158 | * future. |
| 159 | */ |
Jean-Philippe Menil | e9ff9d5 | 2020-03-27 21:47:13 +0100 | [diff] [blame] | 160 | __diag_push(); |
| 161 | __diag_ignore(GCC, 8, "-Wmissing-prototypes", |
| 162 | "Global functions as their definitions will be in vmlinux BTF"); |
Alexei Starovoitov | faeb2dc | 2019-11-14 10:57:08 -0800 | [diff] [blame] | 163 | int noinline bpf_fentry_test1(int a) |
| 164 | { |
| 165 | return a + 1; |
| 166 | } |
| 167 | |
| 168 | int noinline bpf_fentry_test2(int a, u64 b) |
| 169 | { |
| 170 | return a + b; |
| 171 | } |
| 172 | |
| 173 | int noinline bpf_fentry_test3(char a, int b, u64 c) |
| 174 | { |
| 175 | return a + b + c; |
| 176 | } |
| 177 | |
| 178 | int noinline bpf_fentry_test4(void *a, char b, int c, u64 d) |
| 179 | { |
| 180 | return (long)a + b + c + d; |
| 181 | } |
| 182 | |
| 183 | int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e) |
| 184 | { |
| 185 | return a + (long)b + c + d + e; |
| 186 | } |
| 187 | |
| 188 | int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f) |
| 189 | { |
| 190 | return a + (long)b + c + d + (long)e + f; |
| 191 | } |
| 192 | |
Yonghong Song | d923021 | 2020-06-30 10:12:41 -0700 | [diff] [blame] | 193 | struct bpf_fentry_test_t { |
| 194 | struct bpf_fentry_test_t *a; |
| 195 | }; |
| 196 | |
| 197 | int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg) |
| 198 | { |
| 199 | return (long)arg; |
| 200 | } |
| 201 | |
| 202 | int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg) |
| 203 | { |
| 204 | return (long)arg->a; |
| 205 | } |
| 206 | |
KP Singh | 3d08b6f | 2020-03-04 20:18:53 +0100 | [diff] [blame] | 207 | int noinline bpf_modify_return_test(int a, int *b) |
| 208 | { |
| 209 | *b += 1; |
| 210 | return a + *b; |
| 211 | } |
Jean-Philippe Menil | e9ff9d5 | 2020-03-27 21:47:13 +0100 | [diff] [blame] | 212 | __diag_pop(); |
KP Singh | 3d08b6f | 2020-03-04 20:18:53 +0100 | [diff] [blame] | 213 | |
| 214 | ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO); |
| 215 | |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 216 | static void *bpf_test_init(const union bpf_attr *kattr, u32 size, |
| 217 | u32 headroom, u32 tailroom) |
| 218 | { |
| 219 | void __user *data_in = u64_to_user_ptr(kattr->test.data_in); |
Jesper Dangaard Brouer | d800bad | 2020-05-18 15:05:27 +0200 | [diff] [blame] | 220 | u32 user_size = kattr->test.data_size_in; |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 221 | void *data; |
| 222 | |
| 223 | if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) |
| 224 | return ERR_PTR(-EINVAL); |
| 225 | |
Jesper Dangaard Brouer | d800bad | 2020-05-18 15:05:27 +0200 | [diff] [blame] | 226 | if (user_size > size) |
| 227 | return ERR_PTR(-EMSGSIZE); |
| 228 | |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 229 | data = kzalloc(size + headroom + tailroom, GFP_USER); |
| 230 | if (!data) |
| 231 | return ERR_PTR(-ENOMEM); |
| 232 | |
Jesper Dangaard Brouer | d800bad | 2020-05-18 15:05:27 +0200 | [diff] [blame] | 233 | if (copy_from_user(data + headroom, data_in, user_size)) { |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 234 | kfree(data); |
| 235 | return ERR_PTR(-EFAULT); |
| 236 | } |
KP Singh | da00d2f | 2020-03-04 20:18:52 +0100 | [diff] [blame] | 237 | |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 238 | return data; |
| 239 | } |
| 240 | |
KP Singh | da00d2f | 2020-03-04 20:18:52 +0100 | [diff] [blame] | 241 | int bpf_prog_test_run_tracing(struct bpf_prog *prog, |
| 242 | const union bpf_attr *kattr, |
| 243 | union bpf_attr __user *uattr) |
| 244 | { |
Yonghong Song | d923021 | 2020-06-30 10:12:41 -0700 | [diff] [blame] | 245 | struct bpf_fentry_test_t arg = {}; |
KP Singh | 3d08b6f | 2020-03-04 20:18:53 +0100 | [diff] [blame] | 246 | u16 side_effect = 0, ret = 0; |
| 247 | int b = 2, err = -EFAULT; |
| 248 | u32 retval = 0; |
KP Singh | da00d2f | 2020-03-04 20:18:52 +0100 | [diff] [blame] | 249 | |
Song Liu | 1b4d60e | 2020-09-25 13:54:29 -0700 | [diff] [blame] | 250 | if (kattr->test.flags || kattr->test.cpu) |
| 251 | return -EINVAL; |
| 252 | |
KP Singh | da00d2f | 2020-03-04 20:18:52 +0100 | [diff] [blame] | 253 | switch (prog->expected_attach_type) { |
| 254 | case BPF_TRACE_FENTRY: |
| 255 | case BPF_TRACE_FEXIT: |
| 256 | if (bpf_fentry_test1(1) != 2 || |
| 257 | bpf_fentry_test2(2, 3) != 5 || |
| 258 | bpf_fentry_test3(4, 5, 6) != 15 || |
| 259 | bpf_fentry_test4((void *)7, 8, 9, 10) != 34 || |
| 260 | bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 || |
Yonghong Song | d923021 | 2020-06-30 10:12:41 -0700 | [diff] [blame] | 261 | bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 || |
| 262 | bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 || |
| 263 | bpf_fentry_test8(&arg) != 0) |
KP Singh | da00d2f | 2020-03-04 20:18:52 +0100 | [diff] [blame] | 264 | goto out; |
| 265 | break; |
KP Singh | 3d08b6f | 2020-03-04 20:18:53 +0100 | [diff] [blame] | 266 | case BPF_MODIFY_RETURN: |
| 267 | ret = bpf_modify_return_test(1, &b); |
| 268 | if (b != 2) |
| 269 | side_effect = 1; |
| 270 | break; |
KP Singh | da00d2f | 2020-03-04 20:18:52 +0100 | [diff] [blame] | 271 | default: |
| 272 | goto out; |
| 273 | } |
| 274 | |
KP Singh | 3d08b6f | 2020-03-04 20:18:53 +0100 | [diff] [blame] | 275 | retval = ((u32)side_effect << 16) | ret; |
| 276 | if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) |
| 277 | goto out; |
| 278 | |
KP Singh | da00d2f | 2020-03-04 20:18:52 +0100 | [diff] [blame] | 279 | err = 0; |
| 280 | out: |
| 281 | trace_bpf_test_finish(&err); |
| 282 | return err; |
| 283 | } |
| 284 | |
Song Liu | 1b4d60e | 2020-09-25 13:54:29 -0700 | [diff] [blame] | 285 | struct bpf_raw_tp_test_run_info { |
| 286 | struct bpf_prog *prog; |
| 287 | void *ctx; |
| 288 | u32 retval; |
| 289 | }; |
| 290 | |
| 291 | static void |
| 292 | __bpf_prog_test_run_raw_tp(void *data) |
| 293 | { |
| 294 | struct bpf_raw_tp_test_run_info *info = data; |
| 295 | |
| 296 | rcu_read_lock(); |
Song Liu | 1b4d60e | 2020-09-25 13:54:29 -0700 | [diff] [blame] | 297 | info->retval = BPF_PROG_RUN(info->prog, info->ctx); |
Song Liu | 1b4d60e | 2020-09-25 13:54:29 -0700 | [diff] [blame] | 298 | rcu_read_unlock(); |
| 299 | } |
| 300 | |
| 301 | int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, |
| 302 | const union bpf_attr *kattr, |
| 303 | union bpf_attr __user *uattr) |
| 304 | { |
| 305 | void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); |
| 306 | __u32 ctx_size_in = kattr->test.ctx_size_in; |
| 307 | struct bpf_raw_tp_test_run_info info; |
| 308 | int cpu = kattr->test.cpu, err = 0; |
Song Liu | 963ec27 | 2020-09-29 15:29:49 -0700 | [diff] [blame] | 309 | int current_cpu; |
Song Liu | 1b4d60e | 2020-09-25 13:54:29 -0700 | [diff] [blame] | 310 | |
| 311 | /* doesn't support data_in/out, ctx_out, duration, or repeat */ |
| 312 | if (kattr->test.data_in || kattr->test.data_out || |
| 313 | kattr->test.ctx_out || kattr->test.duration || |
| 314 | kattr->test.repeat) |
| 315 | return -EINVAL; |
| 316 | |
Song Liu | 7ac6ad0 | 2021-01-12 15:42:54 -0800 | [diff] [blame] | 317 | if (ctx_size_in < prog->aux->max_ctx_offset || |
| 318 | ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64)) |
Song Liu | 1b4d60e | 2020-09-25 13:54:29 -0700 | [diff] [blame] | 319 | return -EINVAL; |
| 320 | |
| 321 | if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0) |
| 322 | return -EINVAL; |
| 323 | |
| 324 | if (ctx_size_in) { |
| 325 | info.ctx = kzalloc(ctx_size_in, GFP_USER); |
| 326 | if (!info.ctx) |
| 327 | return -ENOMEM; |
| 328 | if (copy_from_user(info.ctx, ctx_in, ctx_size_in)) { |
| 329 | err = -EFAULT; |
| 330 | goto out; |
| 331 | } |
| 332 | } else { |
| 333 | info.ctx = NULL; |
| 334 | } |
| 335 | |
| 336 | info.prog = prog; |
| 337 | |
Song Liu | 963ec27 | 2020-09-29 15:29:49 -0700 | [diff] [blame] | 338 | current_cpu = get_cpu(); |
Song Liu | 1b4d60e | 2020-09-25 13:54:29 -0700 | [diff] [blame] | 339 | if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 || |
Song Liu | 963ec27 | 2020-09-29 15:29:49 -0700 | [diff] [blame] | 340 | cpu == current_cpu) { |
Song Liu | 1b4d60e | 2020-09-25 13:54:29 -0700 | [diff] [blame] | 341 | __bpf_prog_test_run_raw_tp(&info); |
Song Liu | 963ec27 | 2020-09-29 15:29:49 -0700 | [diff] [blame] | 342 | } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { |
Song Liu | 1b4d60e | 2020-09-25 13:54:29 -0700 | [diff] [blame] | 343 | /* smp_call_function_single() also checks cpu_online() |
| 344 | * after csd_lock(). However, since cpu is from user |
| 345 | * space, let's do an extra quick check to filter out |
| 346 | * invalid value before smp_call_function_single(). |
| 347 | */ |
Song Liu | 963ec27 | 2020-09-29 15:29:49 -0700 | [diff] [blame] | 348 | err = -ENXIO; |
| 349 | } else { |
Song Liu | 1b4d60e | 2020-09-25 13:54:29 -0700 | [diff] [blame] | 350 | err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp, |
| 351 | &info, 1); |
Song Liu | 1b4d60e | 2020-09-25 13:54:29 -0700 | [diff] [blame] | 352 | } |
Song Liu | 963ec27 | 2020-09-29 15:29:49 -0700 | [diff] [blame] | 353 | put_cpu(); |
Song Liu | 1b4d60e | 2020-09-25 13:54:29 -0700 | [diff] [blame] | 354 | |
Song Liu | 963ec27 | 2020-09-29 15:29:49 -0700 | [diff] [blame] | 355 | if (!err && |
| 356 | copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32))) |
Song Liu | 1b4d60e | 2020-09-25 13:54:29 -0700 | [diff] [blame] | 357 | err = -EFAULT; |
| 358 | |
| 359 | out: |
| 360 | kfree(info.ctx); |
| 361 | return err; |
| 362 | } |
| 363 | |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 364 | static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size) |
| 365 | { |
| 366 | void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in); |
| 367 | void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); |
| 368 | u32 size = kattr->test.ctx_size_in; |
| 369 | void *data; |
| 370 | int err; |
| 371 | |
| 372 | if (!data_in && !data_out) |
| 373 | return NULL; |
| 374 | |
| 375 | data = kzalloc(max_size, GFP_USER); |
| 376 | if (!data) |
| 377 | return ERR_PTR(-ENOMEM); |
| 378 | |
| 379 | if (data_in) { |
| 380 | err = bpf_check_uarg_tail_zero(data_in, max_size, size); |
| 381 | if (err) { |
| 382 | kfree(data); |
| 383 | return ERR_PTR(err); |
| 384 | } |
| 385 | |
| 386 | size = min_t(u32, max_size, size); |
| 387 | if (copy_from_user(data, data_in, size)) { |
| 388 | kfree(data); |
| 389 | return ERR_PTR(-EFAULT); |
| 390 | } |
| 391 | } |
| 392 | return data; |
| 393 | } |
| 394 | |
| 395 | static int bpf_ctx_finish(const union bpf_attr *kattr, |
| 396 | union bpf_attr __user *uattr, const void *data, |
| 397 | u32 size) |
| 398 | { |
| 399 | void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); |
| 400 | int err = -EFAULT; |
| 401 | u32 copy_size = size; |
| 402 | |
| 403 | if (!data || !data_out) |
| 404 | return 0; |
| 405 | |
| 406 | if (copy_size > kattr->test.ctx_size_out) { |
| 407 | copy_size = kattr->test.ctx_size_out; |
| 408 | err = -ENOSPC; |
| 409 | } |
| 410 | |
| 411 | if (copy_to_user(data_out, data, copy_size)) |
| 412 | goto out; |
| 413 | if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size))) |
| 414 | goto out; |
| 415 | if (err != -ENOSPC) |
| 416 | err = 0; |
| 417 | out: |
| 418 | return err; |
| 419 | } |
| 420 | |
| 421 | /** |
| 422 | * range_is_zero - test whether buffer is initialized |
| 423 | * @buf: buffer to check |
| 424 | * @from: check from this position |
| 425 | * @to: check up until (excluding) this position |
| 426 | * |
| 427 | * This function returns true if the there is a non-zero byte |
| 428 | * in the buf in the range [from,to). |
| 429 | */ |
| 430 | static inline bool range_is_zero(void *buf, size_t from, size_t to) |
| 431 | { |
| 432 | return !memchr_inv((u8 *)buf + from, 0, to - from); |
| 433 | } |
| 434 | |
| 435 | static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) |
| 436 | { |
| 437 | struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; |
| 438 | |
| 439 | if (!__skb) |
| 440 | return 0; |
| 441 | |
| 442 | /* make sure the fields we don't use are zeroed */ |
Nikita V. Shirokov | 6de6c1f | 2019-12-18 12:57:47 -0800 | [diff] [blame] | 443 | if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark))) |
| 444 | return -EINVAL; |
| 445 | |
| 446 | /* mark is allowed */ |
| 447 | |
| 448 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark), |
| 449 | offsetof(struct __sk_buff, priority))) |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 450 | return -EINVAL; |
| 451 | |
| 452 | /* priority is allowed */ |
| 453 | |
Stanislav Fomichev | b590cb5 | 2019-12-10 11:19:33 -0800 | [diff] [blame] | 454 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority), |
Dmitry Yakunin | 21594c4 | 2020-08-03 12:05:45 +0300 | [diff] [blame] | 455 | offsetof(struct __sk_buff, ifindex))) |
| 456 | return -EINVAL; |
| 457 | |
| 458 | /* ifindex is allowed */ |
| 459 | |
| 460 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex), |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 461 | offsetof(struct __sk_buff, cb))) |
| 462 | return -EINVAL; |
| 463 | |
| 464 | /* cb is allowed */ |
| 465 | |
Stanislav Fomichev | b590cb5 | 2019-12-10 11:19:33 -0800 | [diff] [blame] | 466 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb), |
Stanislav Fomichev | ba94094 | 2019-10-15 11:31:24 -0700 | [diff] [blame] | 467 | offsetof(struct __sk_buff, tstamp))) |
| 468 | return -EINVAL; |
| 469 | |
| 470 | /* tstamp is allowed */ |
Stanislav Fomichev | 850a88c | 2019-12-13 14:30:27 -0800 | [diff] [blame] | 471 | /* wire_len is allowed */ |
| 472 | /* gso_segs is allowed */ |
Stanislav Fomichev | ba94094 | 2019-10-15 11:31:24 -0700 | [diff] [blame] | 473 | |
Stanislav Fomichev | 850a88c | 2019-12-13 14:30:27 -0800 | [diff] [blame] | 474 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs), |
Willem de Bruijn | cf62089 | 2020-03-03 15:05:01 -0500 | [diff] [blame] | 475 | offsetof(struct __sk_buff, gso_size))) |
| 476 | return -EINVAL; |
| 477 | |
| 478 | /* gso_size is allowed */ |
| 479 | |
| 480 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size), |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 481 | sizeof(struct __sk_buff))) |
| 482 | return -EINVAL; |
| 483 | |
Nikita V. Shirokov | 6de6c1f | 2019-12-18 12:57:47 -0800 | [diff] [blame] | 484 | skb->mark = __skb->mark; |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 485 | skb->priority = __skb->priority; |
Stanislav Fomichev | ba94094 | 2019-10-15 11:31:24 -0700 | [diff] [blame] | 486 | skb->tstamp = __skb->tstamp; |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 487 | memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN); |
| 488 | |
Stanislav Fomichev | 850a88c | 2019-12-13 14:30:27 -0800 | [diff] [blame] | 489 | if (__skb->wire_len == 0) { |
| 490 | cb->pkt_len = skb->len; |
| 491 | } else { |
| 492 | if (__skb->wire_len < skb->len || |
| 493 | __skb->wire_len > GSO_MAX_SIZE) |
| 494 | return -EINVAL; |
| 495 | cb->pkt_len = __skb->wire_len; |
| 496 | } |
| 497 | |
| 498 | if (__skb->gso_segs > GSO_MAX_SEGS) |
| 499 | return -EINVAL; |
| 500 | skb_shinfo(skb)->gso_segs = __skb->gso_segs; |
Willem de Bruijn | cf62089 | 2020-03-03 15:05:01 -0500 | [diff] [blame] | 501 | skb_shinfo(skb)->gso_size = __skb->gso_size; |
Stanislav Fomichev | 850a88c | 2019-12-13 14:30:27 -0800 | [diff] [blame] | 502 | |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 503 | return 0; |
| 504 | } |
| 505 | |
| 506 | static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb) |
| 507 | { |
| 508 | struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; |
| 509 | |
| 510 | if (!__skb) |
| 511 | return; |
| 512 | |
Nikita V. Shirokov | 6de6c1f | 2019-12-18 12:57:47 -0800 | [diff] [blame] | 513 | __skb->mark = skb->mark; |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 514 | __skb->priority = skb->priority; |
Dmitry Yakunin | 21594c4 | 2020-08-03 12:05:45 +0300 | [diff] [blame] | 515 | __skb->ifindex = skb->dev->ifindex; |
Stanislav Fomichev | ba94094 | 2019-10-15 11:31:24 -0700 | [diff] [blame] | 516 | __skb->tstamp = skb->tstamp; |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 517 | memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN); |
Stanislav Fomichev | 850a88c | 2019-12-13 14:30:27 -0800 | [diff] [blame] | 518 | __skb->wire_len = cb->pkt_len; |
| 519 | __skb->gso_segs = skb_shinfo(skb)->gso_segs; |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 520 | } |
| 521 | |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 522 | int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, |
| 523 | union bpf_attr __user *uattr) |
| 524 | { |
| 525 | bool is_l2 = false, is_direct_pkt_access = false; |
Dmitry Yakunin | 21594c4 | 2020-08-03 12:05:45 +0300 | [diff] [blame] | 526 | struct net *net = current->nsproxy->net_ns; |
| 527 | struct net_device *dev = net->loopback_dev; |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 528 | u32 size = kattr->test.data_size_in; |
| 529 | u32 repeat = kattr->test.repeat; |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 530 | struct __sk_buff *ctx = NULL; |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 531 | u32 retval, duration; |
Daniel Borkmann | 6e6fddc | 2018-07-11 15:30:14 +0200 | [diff] [blame] | 532 | int hh_len = ETH_HLEN; |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 533 | struct sk_buff *skb; |
Song Liu | 2cb494a | 2018-10-19 09:57:58 -0700 | [diff] [blame] | 534 | struct sock *sk; |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 535 | void *data; |
| 536 | int ret; |
| 537 | |
Song Liu | 1b4d60e | 2020-09-25 13:54:29 -0700 | [diff] [blame] | 538 | if (kattr->test.flags || kattr->test.cpu) |
| 539 | return -EINVAL; |
| 540 | |
David Miller | 586f852 | 2017-05-02 11:36:45 -0400 | [diff] [blame] | 541 | data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN, |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 542 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); |
| 543 | if (IS_ERR(data)) |
| 544 | return PTR_ERR(data); |
| 545 | |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 546 | ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff)); |
| 547 | if (IS_ERR(ctx)) { |
| 548 | kfree(data); |
| 549 | return PTR_ERR(ctx); |
| 550 | } |
| 551 | |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 552 | switch (prog->type) { |
| 553 | case BPF_PROG_TYPE_SCHED_CLS: |
| 554 | case BPF_PROG_TYPE_SCHED_ACT: |
| 555 | is_l2 = true; |
Gustavo A. R. Silva | df561f66 | 2020-08-23 17:36:59 -0500 | [diff] [blame] | 556 | fallthrough; |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 557 | case BPF_PROG_TYPE_LWT_IN: |
| 558 | case BPF_PROG_TYPE_LWT_OUT: |
| 559 | case BPF_PROG_TYPE_LWT_XMIT: |
| 560 | is_direct_pkt_access = true; |
| 561 | break; |
| 562 | default: |
| 563 | break; |
| 564 | } |
| 565 | |
Song Liu | 2cb494a | 2018-10-19 09:57:58 -0700 | [diff] [blame] | 566 | sk = kzalloc(sizeof(struct sock), GFP_USER); |
| 567 | if (!sk) { |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 568 | kfree(data); |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 569 | kfree(ctx); |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 570 | return -ENOMEM; |
| 571 | } |
Dmitry Yakunin | 21594c4 | 2020-08-03 12:05:45 +0300 | [diff] [blame] | 572 | sock_net_set(sk, net); |
Song Liu | 2cb494a | 2018-10-19 09:57:58 -0700 | [diff] [blame] | 573 | sock_init_data(NULL, sk); |
| 574 | |
| 575 | skb = build_skb(data, 0); |
| 576 | if (!skb) { |
| 577 | kfree(data); |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 578 | kfree(ctx); |
Song Liu | 2cb494a | 2018-10-19 09:57:58 -0700 | [diff] [blame] | 579 | kfree(sk); |
| 580 | return -ENOMEM; |
| 581 | } |
| 582 | skb->sk = sk; |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 583 | |
David Miller | 586f852 | 2017-05-02 11:36:45 -0400 | [diff] [blame] | 584 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 585 | __skb_put(skb, size); |
Dmitry Yakunin | 21594c4 | 2020-08-03 12:05:45 +0300 | [diff] [blame] | 586 | if (ctx && ctx->ifindex > 1) { |
| 587 | dev = dev_get_by_index(net, ctx->ifindex); |
| 588 | if (!dev) { |
| 589 | ret = -ENODEV; |
| 590 | goto out; |
| 591 | } |
| 592 | } |
| 593 | skb->protocol = eth_type_trans(skb, dev); |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 594 | skb_reset_network_header(skb); |
| 595 | |
Dmitry Yakunin | fa5cb54 | 2020-08-03 12:05:44 +0300 | [diff] [blame] | 596 | switch (skb->protocol) { |
| 597 | case htons(ETH_P_IP): |
| 598 | sk->sk_family = AF_INET; |
| 599 | if (sizeof(struct iphdr) <= skb_headlen(skb)) { |
| 600 | sk->sk_rcv_saddr = ip_hdr(skb)->saddr; |
| 601 | sk->sk_daddr = ip_hdr(skb)->daddr; |
| 602 | } |
| 603 | break; |
| 604 | #if IS_ENABLED(CONFIG_IPV6) |
| 605 | case htons(ETH_P_IPV6): |
| 606 | sk->sk_family = AF_INET6; |
| 607 | if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) { |
| 608 | sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr; |
| 609 | sk->sk_v6_daddr = ipv6_hdr(skb)->daddr; |
| 610 | } |
| 611 | break; |
| 612 | #endif |
| 613 | default: |
| 614 | break; |
| 615 | } |
| 616 | |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 617 | if (is_l2) |
Daniel Borkmann | 6e6fddc | 2018-07-11 15:30:14 +0200 | [diff] [blame] | 618 | __skb_push(skb, hh_len); |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 619 | if (is_direct_pkt_access) |
Daniel Borkmann | 6aaae2b | 2017-09-25 02:25:50 +0200 | [diff] [blame] | 620 | bpf_compute_data_pointers(skb); |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 621 | ret = convert___skb_to_skb(skb, ctx); |
| 622 | if (ret) |
| 623 | goto out; |
Björn Töpel | f23c4b3 | 2019-12-13 18:51:10 +0100 | [diff] [blame] | 624 | ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false); |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 625 | if (ret) |
| 626 | goto out; |
Daniel Borkmann | 6e6fddc | 2018-07-11 15:30:14 +0200 | [diff] [blame] | 627 | if (!is_l2) { |
| 628 | if (skb_headroom(skb) < hh_len) { |
| 629 | int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); |
| 630 | |
| 631 | if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 632 | ret = -ENOMEM; |
| 633 | goto out; |
Daniel Borkmann | 6e6fddc | 2018-07-11 15:30:14 +0200 | [diff] [blame] | 634 | } |
| 635 | } |
| 636 | memset(__skb_push(skb, hh_len), 0, hh_len); |
| 637 | } |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 638 | convert_skb_to___skb(skb, ctx); |
Daniel Borkmann | 6e6fddc | 2018-07-11 15:30:14 +0200 | [diff] [blame] | 639 | |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 640 | size = skb->len; |
| 641 | /* bpf program can never convert linear skb to non-linear */ |
| 642 | if (WARN_ON_ONCE(skb_is_nonlinear(skb))) |
| 643 | size = skb_headlen(skb); |
David Miller | 78e5227 | 2017-05-02 11:36:33 -0400 | [diff] [blame] | 644 | ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration); |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 645 | if (!ret) |
| 646 | ret = bpf_ctx_finish(kattr, uattr, ctx, |
| 647 | sizeof(struct __sk_buff)); |
| 648 | out: |
Dmitry Yakunin | 21594c4 | 2020-08-03 12:05:45 +0300 | [diff] [blame] | 649 | if (dev && dev != net->loopback_dev) |
| 650 | dev_put(dev); |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 651 | kfree_skb(skb); |
Martin KaFai Lau | 6ac99e8 | 2019-04-26 16:39:39 -0700 | [diff] [blame] | 652 | bpf_sk_storage_free(sk); |
Song Liu | 2cb494a | 2018-10-19 09:57:58 -0700 | [diff] [blame] | 653 | kfree(sk); |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 654 | kfree(ctx); |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 655 | return ret; |
| 656 | } |
| 657 | |
| 658 | int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, |
| 659 | union bpf_attr __user *uattr) |
| 660 | { |
Jesper Dangaard Brouer | bc56c91 | 2020-05-14 12:51:35 +0200 | [diff] [blame] | 661 | u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 662 | u32 headroom = XDP_PACKET_HEADROOM; |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 663 | u32 size = kattr->test.data_size_in; |
| 664 | u32 repeat = kattr->test.repeat; |
Daniel Borkmann | 65073a6 | 2018-01-31 12:58:56 +0100 | [diff] [blame] | 665 | struct netdev_rx_queue *rxqueue; |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 666 | struct xdp_buff xdp = {}; |
| 667 | u32 retval, duration; |
Jesper Dangaard Brouer | bc56c91 | 2020-05-14 12:51:35 +0200 | [diff] [blame] | 668 | u32 max_data_sz; |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 669 | void *data; |
| 670 | int ret; |
| 671 | |
Stanislav Fomichev | 947e8b5 | 2019-04-11 15:47:07 -0700 | [diff] [blame] | 672 | if (kattr->test.ctx_in || kattr->test.ctx_out) |
| 673 | return -EINVAL; |
| 674 | |
Jesper Dangaard Brouer | bc56c91 | 2020-05-14 12:51:35 +0200 | [diff] [blame] | 675 | /* XDP have extra tailroom as (most) drivers use full page */ |
| 676 | max_data_sz = 4096 - headroom - tailroom; |
Jesper Dangaard Brouer | bc56c91 | 2020-05-14 12:51:35 +0200 | [diff] [blame] | 677 | |
| 678 | data = bpf_test_init(kattr, max_data_sz, headroom, tailroom); |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 679 | if (IS_ERR(data)) |
| 680 | return PTR_ERR(data); |
| 681 | |
Daniel Borkmann | 65073a6 | 2018-01-31 12:58:56 +0100 | [diff] [blame] | 682 | rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); |
Lorenzo Bianconi | 43b5169 | 2020-12-22 22:09:28 +0100 | [diff] [blame] | 683 | xdp_init_buff(&xdp, headroom + max_data_sz + tailroom, |
| 684 | &rxqueue->xdp_rxq); |
Lorenzo Bianconi | be9df4a | 2020-12-22 22:09:29 +0100 | [diff] [blame] | 685 | xdp_prepare_buff(&xdp, data, headroom, size, true); |
| 686 | |
Björn Töpel | f23c4b3 | 2019-12-13 18:51:10 +0100 | [diff] [blame] | 687 | bpf_prog_change_xdp(NULL, prog); |
| 688 | ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); |
Roman Gushchin | dcb4059 | 2018-12-01 10:39:44 -0800 | [diff] [blame] | 689 | if (ret) |
| 690 | goto out; |
Jesper Dangaard Brouer | bc56c91 | 2020-05-14 12:51:35 +0200 | [diff] [blame] | 691 | if (xdp.data != data + headroom || xdp.data_end != xdp.data + size) |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 692 | size = xdp.data_end - xdp.data; |
David Miller | 78e5227 | 2017-05-02 11:36:33 -0400 | [diff] [blame] | 693 | ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); |
Roman Gushchin | dcb4059 | 2018-12-01 10:39:44 -0800 | [diff] [blame] | 694 | out: |
Björn Töpel | f23c4b3 | 2019-12-13 18:51:10 +0100 | [diff] [blame] | 695 | bpf_prog_change_xdp(prog, NULL); |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 696 | kfree(data); |
| 697 | return ret; |
| 698 | } |
Stanislav Fomichev | b7a1848 | 2019-01-28 08:53:54 -0800 | [diff] [blame] | 699 | |
Stanislav Fomichev | b2ca4e1 | 2019-07-25 15:52:27 -0700 | [diff] [blame] | 700 | static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx) |
| 701 | { |
| 702 | /* make sure the fields we don't use are zeroed */ |
| 703 | if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags))) |
| 704 | return -EINVAL; |
| 705 | |
| 706 | /* flags is allowed */ |
| 707 | |
Stanislav Fomichev | b590cb5 | 2019-12-10 11:19:33 -0800 | [diff] [blame] | 708 | if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags), |
Stanislav Fomichev | b2ca4e1 | 2019-07-25 15:52:27 -0700 | [diff] [blame] | 709 | sizeof(struct bpf_flow_keys))) |
| 710 | return -EINVAL; |
| 711 | |
| 712 | return 0; |
| 713 | } |
| 714 | |
Stanislav Fomichev | b7a1848 | 2019-01-28 08:53:54 -0800 | [diff] [blame] | 715 | int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, |
| 716 | const union bpf_attr *kattr, |
| 717 | union bpf_attr __user *uattr) |
| 718 | { |
Lorenz Bauer | 607b9cc | 2021-03-03 10:18:12 +0000 | [diff] [blame] | 719 | struct bpf_test_timer t = { NO_PREEMPT }; |
Stanislav Fomichev | b7a1848 | 2019-01-28 08:53:54 -0800 | [diff] [blame] | 720 | u32 size = kattr->test.data_size_in; |
Stanislav Fomichev | 7b8a130 | 2019-04-22 08:55:45 -0700 | [diff] [blame] | 721 | struct bpf_flow_dissector ctx = {}; |
Stanislav Fomichev | b7a1848 | 2019-01-28 08:53:54 -0800 | [diff] [blame] | 722 | u32 repeat = kattr->test.repeat; |
Stanislav Fomichev | b2ca4e1 | 2019-07-25 15:52:27 -0700 | [diff] [blame] | 723 | struct bpf_flow_keys *user_ctx; |
Stanislav Fomichev | b7a1848 | 2019-01-28 08:53:54 -0800 | [diff] [blame] | 724 | struct bpf_flow_keys flow_keys; |
Stanislav Fomichev | 7b8a130 | 2019-04-22 08:55:45 -0700 | [diff] [blame] | 725 | const struct ethhdr *eth; |
Stanislav Fomichev | b2ca4e1 | 2019-07-25 15:52:27 -0700 | [diff] [blame] | 726 | unsigned int flags = 0; |
Stanislav Fomichev | b7a1848 | 2019-01-28 08:53:54 -0800 | [diff] [blame] | 727 | u32 retval, duration; |
Stanislav Fomichev | b7a1848 | 2019-01-28 08:53:54 -0800 | [diff] [blame] | 728 | void *data; |
| 729 | int ret; |
Stanislav Fomichev | b7a1848 | 2019-01-28 08:53:54 -0800 | [diff] [blame] | 730 | |
| 731 | if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR) |
| 732 | return -EINVAL; |
| 733 | |
Song Liu | 1b4d60e | 2020-09-25 13:54:29 -0700 | [diff] [blame] | 734 | if (kattr->test.flags || kattr->test.cpu) |
| 735 | return -EINVAL; |
| 736 | |
Stanislav Fomichev | 7b8a130 | 2019-04-22 08:55:45 -0700 | [diff] [blame] | 737 | if (size < ETH_HLEN) |
| 738 | return -EINVAL; |
| 739 | |
| 740 | data = bpf_test_init(kattr, size, 0, 0); |
Stanislav Fomichev | b7a1848 | 2019-01-28 08:53:54 -0800 | [diff] [blame] | 741 | if (IS_ERR(data)) |
| 742 | return PTR_ERR(data); |
| 743 | |
Stanislav Fomichev | 7b8a130 | 2019-04-22 08:55:45 -0700 | [diff] [blame] | 744 | eth = (struct ethhdr *)data; |
Stanislav Fomichev | b7a1848 | 2019-01-28 08:53:54 -0800 | [diff] [blame] | 745 | |
Stanislav Fomichev | b7a1848 | 2019-01-28 08:53:54 -0800 | [diff] [blame] | 746 | if (!repeat) |
| 747 | repeat = 1; |
| 748 | |
Stanislav Fomichev | b2ca4e1 | 2019-07-25 15:52:27 -0700 | [diff] [blame] | 749 | user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys)); |
| 750 | if (IS_ERR(user_ctx)) { |
| 751 | kfree(data); |
| 752 | return PTR_ERR(user_ctx); |
| 753 | } |
| 754 | if (user_ctx) { |
| 755 | ret = verify_user_bpf_flow_keys(user_ctx); |
| 756 | if (ret) |
| 757 | goto out; |
| 758 | flags = user_ctx->flags; |
| 759 | } |
| 760 | |
Stanislav Fomichev | 7b8a130 | 2019-04-22 08:55:45 -0700 | [diff] [blame] | 761 | ctx.flow_keys = &flow_keys; |
| 762 | ctx.data = data; |
| 763 | ctx.data_end = (__u8 *)data + size; |
| 764 | |
Lorenz Bauer | 607b9cc | 2021-03-03 10:18:12 +0000 | [diff] [blame] | 765 | bpf_test_timer_enter(&t); |
| 766 | do { |
Stanislav Fomichev | 7b8a130 | 2019-04-22 08:55:45 -0700 | [diff] [blame] | 767 | retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN, |
Stanislav Fomichev | b2ca4e1 | 2019-07-25 15:52:27 -0700 | [diff] [blame] | 768 | size, flags); |
Lorenz Bauer | 607b9cc | 2021-03-03 10:18:12 +0000 | [diff] [blame] | 769 | } while (bpf_test_timer_continue(&t, repeat, &ret, &duration)); |
| 770 | bpf_test_timer_leave(&t); |
Stanislav Fomichev | 7b8a130 | 2019-04-22 08:55:45 -0700 | [diff] [blame] | 771 | |
Lorenz Bauer | 607b9cc | 2021-03-03 10:18:12 +0000 | [diff] [blame] | 772 | if (ret < 0) |
| 773 | goto out; |
Stanislav Fomichev | b7a1848 | 2019-01-28 08:53:54 -0800 | [diff] [blame] | 774 | |
| 775 | ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys), |
| 776 | retval, duration); |
Stanislav Fomichev | b2ca4e1 | 2019-07-25 15:52:27 -0700 | [diff] [blame] | 777 | if (!ret) |
| 778 | ret = bpf_ctx_finish(kattr, uattr, user_ctx, |
| 779 | sizeof(struct bpf_flow_keys)); |
Stanislav Fomichev | b7a1848 | 2019-01-28 08:53:54 -0800 | [diff] [blame] | 780 | |
Stanislav Fomichev | a439184 | 2019-02-19 10:54:17 -0800 | [diff] [blame] | 781 | out: |
Stanislav Fomichev | b2ca4e1 | 2019-07-25 15:52:27 -0700 | [diff] [blame] | 782 | kfree(user_ctx); |
Stanislav Fomichev | 7b8a130 | 2019-04-22 08:55:45 -0700 | [diff] [blame] | 783 | kfree(data); |
Stanislav Fomichev | b7a1848 | 2019-01-28 08:53:54 -0800 | [diff] [blame] | 784 | return ret; |
| 785 | } |
Lorenz Bauer | 7c32e8f | 2021-03-03 10:18:13 +0000 | [diff] [blame^] | 786 | |
| 787 | int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr, |
| 788 | union bpf_attr __user *uattr) |
| 789 | { |
| 790 | struct bpf_test_timer t = { NO_PREEMPT }; |
| 791 | struct bpf_prog_array *progs = NULL; |
| 792 | struct bpf_sk_lookup_kern ctx = {}; |
| 793 | u32 repeat = kattr->test.repeat; |
| 794 | struct bpf_sk_lookup *user_ctx; |
| 795 | u32 retval, duration; |
| 796 | int ret = -EINVAL; |
| 797 | |
| 798 | if (prog->type != BPF_PROG_TYPE_SK_LOOKUP) |
| 799 | return -EINVAL; |
| 800 | |
| 801 | if (kattr->test.flags || kattr->test.cpu) |
| 802 | return -EINVAL; |
| 803 | |
| 804 | if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out || |
| 805 | kattr->test.data_size_out) |
| 806 | return -EINVAL; |
| 807 | |
| 808 | if (!repeat) |
| 809 | repeat = 1; |
| 810 | |
| 811 | user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx)); |
| 812 | if (IS_ERR(user_ctx)) |
| 813 | return PTR_ERR(user_ctx); |
| 814 | |
| 815 | if (!user_ctx) |
| 816 | return -EINVAL; |
| 817 | |
| 818 | if (user_ctx->sk) |
| 819 | goto out; |
| 820 | |
| 821 | if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx))) |
| 822 | goto out; |
| 823 | |
| 824 | if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) { |
| 825 | ret = -ERANGE; |
| 826 | goto out; |
| 827 | } |
| 828 | |
| 829 | ctx.family = (u16)user_ctx->family; |
| 830 | ctx.protocol = (u16)user_ctx->protocol; |
| 831 | ctx.dport = (u16)user_ctx->local_port; |
| 832 | ctx.sport = (__force __be16)user_ctx->remote_port; |
| 833 | |
| 834 | switch (ctx.family) { |
| 835 | case AF_INET: |
| 836 | ctx.v4.daddr = (__force __be32)user_ctx->local_ip4; |
| 837 | ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4; |
| 838 | break; |
| 839 | |
| 840 | #if IS_ENABLED(CONFIG_IPV6) |
| 841 | case AF_INET6: |
| 842 | ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6; |
| 843 | ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6; |
| 844 | break; |
| 845 | #endif |
| 846 | |
| 847 | default: |
| 848 | ret = -EAFNOSUPPORT; |
| 849 | goto out; |
| 850 | } |
| 851 | |
| 852 | progs = bpf_prog_array_alloc(1, GFP_KERNEL); |
| 853 | if (!progs) { |
| 854 | ret = -ENOMEM; |
| 855 | goto out; |
| 856 | } |
| 857 | |
| 858 | progs->items[0].prog = prog; |
| 859 | |
| 860 | bpf_test_timer_enter(&t); |
| 861 | do { |
| 862 | ctx.selected_sk = NULL; |
| 863 | retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, BPF_PROG_RUN); |
| 864 | } while (bpf_test_timer_continue(&t, repeat, &ret, &duration)); |
| 865 | bpf_test_timer_leave(&t); |
| 866 | |
| 867 | if (ret < 0) |
| 868 | goto out; |
| 869 | |
| 870 | user_ctx->cookie = 0; |
| 871 | if (ctx.selected_sk) { |
| 872 | if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) { |
| 873 | ret = -EOPNOTSUPP; |
| 874 | goto out; |
| 875 | } |
| 876 | |
| 877 | user_ctx->cookie = sock_gen_cookie(ctx.selected_sk); |
| 878 | } |
| 879 | |
| 880 | ret = bpf_test_finish(kattr, uattr, NULL, 0, retval, duration); |
| 881 | if (!ret) |
| 882 | ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx)); |
| 883 | |
| 884 | out: |
| 885 | bpf_prog_array_free(progs); |
| 886 | kfree(user_ctx); |
| 887 | return ret; |
| 888 | } |