blob: 221a17af1f8169a3921fd4d97a4039ee9fa47a68 [file] [log] [blame]
Alexei Starovoitovfec56f52019-11-14 10:57:04 -08001// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2019 Facebook */
3#include <linux/hash.h>
4#include <linux/bpf.h>
5#include <linux/filter.h>
Alexei Starovoitovb91e0142019-12-08 16:01:13 -08006#include <linux/ftrace.h>
Jiri Olsae9b4e602020-01-23 17:15:07 +01007#include <linux/rbtree_latch.h>
Alexei Starovoitovfec56f52019-11-14 10:57:04 -08008
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -08009/* dummy _ops. The verifier will operate on target program's ops. */
10const struct bpf_verifier_ops bpf_extension_verifier_ops = {
11};
12const struct bpf_prog_ops bpf_extension_prog_ops = {
13};
14
Alexei Starovoitovfec56f52019-11-14 10:57:04 -080015/* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
16#define TRAMPOLINE_HASH_BITS 10
17#define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
18
19static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
Jiri Olsae9b4e602020-01-23 17:15:07 +010020static struct latch_tree_root image_tree __cacheline_aligned;
Alexei Starovoitovfec56f52019-11-14 10:57:04 -080021
Jiri Olsae9b4e602020-01-23 17:15:07 +010022/* serializes access to trampoline_table and image_tree */
Alexei Starovoitovfec56f52019-11-14 10:57:04 -080023static DEFINE_MUTEX(trampoline_mutex);
24
Jiri Olsae9b4e602020-01-23 17:15:07 +010025static void *bpf_jit_alloc_exec_page(void)
Björn Töpel98e86272019-12-13 18:51:07 +010026{
27 void *image;
28
29 image = bpf_jit_alloc_exec(PAGE_SIZE);
30 if (!image)
31 return NULL;
32
33 set_vm_flush_reset_perms(image);
34 /* Keep image as writeable. The alternative is to keep flipping ro/rw
35 * everytime new program is attached or detached.
36 */
37 set_memory_x((long)image, 1);
38 return image;
39}
40
Jiri Olsae9b4e602020-01-23 17:15:07 +010041static __always_inline bool image_tree_less(struct latch_tree_node *a,
42 struct latch_tree_node *b)
43{
44 struct bpf_image *ia = container_of(a, struct bpf_image, tnode);
45 struct bpf_image *ib = container_of(b, struct bpf_image, tnode);
46
47 return ia < ib;
48}
49
50static __always_inline int image_tree_comp(void *addr, struct latch_tree_node *n)
51{
52 void *image = container_of(n, struct bpf_image, tnode);
53
54 if (addr < image)
55 return -1;
56 if (addr >= image + PAGE_SIZE)
57 return 1;
58
59 return 0;
60}
61
62static const struct latch_tree_ops image_tree_ops = {
63 .less = image_tree_less,
64 .comp = image_tree_comp,
65};
66
67static void *__bpf_image_alloc(bool lock)
68{
69 struct bpf_image *image;
70
71 image = bpf_jit_alloc_exec_page();
72 if (!image)
73 return NULL;
74
75 if (lock)
76 mutex_lock(&trampoline_mutex);
77 latch_tree_insert(&image->tnode, &image_tree, &image_tree_ops);
78 if (lock)
79 mutex_unlock(&trampoline_mutex);
80 return image->data;
81}
82
83void *bpf_image_alloc(void)
84{
85 return __bpf_image_alloc(true);
86}
87
88bool is_bpf_image_address(unsigned long addr)
89{
90 bool ret;
91
92 rcu_read_lock();
93 ret = latch_tree_find((void *) addr, &image_tree, &image_tree_ops) != NULL;
94 rcu_read_unlock();
95
96 return ret;
97}
98
Alexei Starovoitovfec56f52019-11-14 10:57:04 -080099struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
100{
101 struct bpf_trampoline *tr;
102 struct hlist_head *head;
103 void *image;
104 int i;
105
106 mutex_lock(&trampoline_mutex);
107 head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
108 hlist_for_each_entry(tr, head, hlist) {
109 if (tr->key == key) {
110 refcount_inc(&tr->refcnt);
111 goto out;
112 }
113 }
114 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
115 if (!tr)
116 goto out;
117
118 /* is_root was checked earlier. No need for bpf_jit_charge_modmem() */
Jiri Olsae9b4e602020-01-23 17:15:07 +0100119 image = __bpf_image_alloc(false);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800120 if (!image) {
121 kfree(tr);
122 tr = NULL;
123 goto out;
124 }
125
126 tr->key = key;
127 INIT_HLIST_NODE(&tr->hlist);
128 hlist_add_head(&tr->hlist, head);
129 refcount_set(&tr->refcnt, 1);
130 mutex_init(&tr->mutex);
131 for (i = 0; i < BPF_TRAMP_MAX; i++)
132 INIT_HLIST_HEAD(&tr->progs_hlist[i]);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800133 tr->image = image;
134out:
135 mutex_unlock(&trampoline_mutex);
136 return tr;
137}
138
Alexei Starovoitovb91e0142019-12-08 16:01:13 -0800139static int is_ftrace_location(void *ip)
140{
141 long addr;
142
143 addr = ftrace_location((long)ip);
144 if (!addr)
145 return 0;
146 if (WARN_ON_ONCE(addr != (long)ip))
147 return -EFAULT;
148 return 1;
149}
150
151static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
152{
153 void *ip = tr->func.addr;
154 int ret;
155
156 if (tr->func.ftrace_managed)
157 ret = unregister_ftrace_direct((long)ip, (long)old_addr);
158 else
159 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
160 return ret;
161}
162
163static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr)
164{
165 void *ip = tr->func.addr;
166 int ret;
167
168 if (tr->func.ftrace_managed)
169 ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
170 else
171 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
172 return ret;
173}
174
175/* first time registering */
176static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
177{
178 void *ip = tr->func.addr;
179 int ret;
180
181 ret = is_ftrace_location(ip);
182 if (ret < 0)
183 return ret;
184 tr->func.ftrace_managed = ret;
185
186 if (tr->func.ftrace_managed)
187 ret = register_ftrace_direct((long)ip, (long)new_addr);
188 else
189 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
190 return ret;
191}
192
KP Singh88fd9e52020-03-04 20:18:47 +0100193static struct bpf_tramp_progs *
194bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total)
195{
196 const struct bpf_prog_aux *aux;
197 struct bpf_tramp_progs *tprogs;
198 struct bpf_prog **progs;
199 int kind;
200
201 *total = 0;
202 tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
203 if (!tprogs)
204 return ERR_PTR(-ENOMEM);
205
206 for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
207 tprogs[kind].nr_progs = tr->progs_cnt[kind];
208 *total += tr->progs_cnt[kind];
209 progs = tprogs[kind].progs;
210
211 hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist)
212 *progs++ = aux->prog;
213 }
214 return tprogs;
215}
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800216
217static int bpf_trampoline_update(struct bpf_trampoline *tr)
218{
Jiri Olsae9b4e602020-01-23 17:15:07 +0100219 void *old_image = tr->image + ((tr->selector + 1) & 1) * BPF_IMAGE_SIZE/2;
220 void *new_image = tr->image + (tr->selector & 1) * BPF_IMAGE_SIZE/2;
KP Singh88fd9e52020-03-04 20:18:47 +0100221 struct bpf_tramp_progs *tprogs;
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800222 u32 flags = BPF_TRAMP_F_RESTORE_REGS;
KP Singh88fd9e52020-03-04 20:18:47 +0100223 int err, total;
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800224
KP Singh88fd9e52020-03-04 20:18:47 +0100225 tprogs = bpf_trampoline_get_progs(tr, &total);
226 if (IS_ERR(tprogs))
227 return PTR_ERR(tprogs);
228
229 if (total == 0) {
Alexei Starovoitovb91e0142019-12-08 16:01:13 -0800230 err = unregister_fentry(tr, old_image);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800231 tr->selector = 0;
232 goto out;
233 }
234
KP Singhae240822020-03-04 20:18:49 +0100235 if (tprogs[BPF_TRAMP_FEXIT].nr_progs ||
236 tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800237 flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
238
Alexei Starovoitov05d57f12020-01-20 19:22:31 -0800239 /* Though the second half of trampoline page is unused a task could be
240 * preempted in the middle of the first half of trampoline and two
241 * updates to trampoline would change the code from underneath the
242 * preempted task. Hence wait for tasks to voluntarily schedule or go
243 * to userspace.
244 */
KP Singh88fd9e52020-03-04 20:18:47 +0100245
Alexei Starovoitov05d57f12020-01-20 19:22:31 -0800246 synchronize_rcu_tasks();
247
Jiri Olsae9b4e602020-01-23 17:15:07 +0100248 err = arch_prepare_bpf_trampoline(new_image, new_image + BPF_IMAGE_SIZE / 2,
KP Singh88fd9e52020-03-04 20:18:47 +0100249 &tr->func.model, flags, tprogs,
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800250 tr->func.addr);
Martin KaFai Lau85d33df2020-01-08 16:35:05 -0800251 if (err < 0)
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800252 goto out;
253
254 if (tr->selector)
255 /* progs already running at this address */
Alexei Starovoitovb91e0142019-12-08 16:01:13 -0800256 err = modify_fentry(tr, old_image, new_image);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800257 else
258 /* first time registering */
Alexei Starovoitovb91e0142019-12-08 16:01:13 -0800259 err = register_fentry(tr, new_image);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800260 if (err)
261 goto out;
262 tr->selector++;
263out:
KP Singh88fd9e52020-03-04 20:18:47 +0100264 kfree(tprogs);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800265 return err;
266}
267
268static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(enum bpf_attach_type t)
269{
270 switch (t) {
271 case BPF_TRACE_FENTRY:
272 return BPF_TRAMP_FENTRY;
KP Singhae240822020-03-04 20:18:49 +0100273 case BPF_MODIFY_RETURN:
274 return BPF_TRAMP_MODIFY_RETURN;
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -0800275 case BPF_TRACE_FEXIT:
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800276 return BPF_TRAMP_FEXIT;
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -0800277 default:
278 return BPF_TRAMP_REPLACE;
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800279 }
280}
281
282int bpf_trampoline_link_prog(struct bpf_prog *prog)
283{
284 enum bpf_tramp_prog_type kind;
285 struct bpf_trampoline *tr;
286 int err = 0;
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -0800287 int cnt;
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800288
289 tr = prog->aux->trampoline;
290 kind = bpf_attach_type_to_tramp(prog->expected_attach_type);
291 mutex_lock(&tr->mutex);
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -0800292 if (tr->extension_prog) {
293 /* cannot attach fentry/fexit if extension prog is attached.
294 * cannot overwrite extension prog either.
295 */
296 err = -EBUSY;
297 goto out;
298 }
299 cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT];
300 if (kind == BPF_TRAMP_REPLACE) {
301 /* Cannot attach extension if fentry/fexit are in use. */
302 if (cnt) {
303 err = -EBUSY;
304 goto out;
305 }
306 tr->extension_prog = prog;
307 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
308 prog->bpf_func);
309 goto out;
310 }
311 if (cnt >= BPF_MAX_TRAMP_PROGS) {
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800312 err = -E2BIG;
313 goto out;
314 }
315 if (!hlist_unhashed(&prog->aux->tramp_hlist)) {
316 /* prog already linked */
317 err = -EBUSY;
318 goto out;
319 }
320 hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]);
321 tr->progs_cnt[kind]++;
322 err = bpf_trampoline_update(prog->aux->trampoline);
323 if (err) {
324 hlist_del(&prog->aux->tramp_hlist);
325 tr->progs_cnt[kind]--;
326 }
327out:
328 mutex_unlock(&tr->mutex);
329 return err;
330}
331
332/* bpf_trampoline_unlink_prog() should never fail. */
333int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
334{
335 enum bpf_tramp_prog_type kind;
336 struct bpf_trampoline *tr;
337 int err;
338
339 tr = prog->aux->trampoline;
340 kind = bpf_attach_type_to_tramp(prog->expected_attach_type);
341 mutex_lock(&tr->mutex);
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -0800342 if (kind == BPF_TRAMP_REPLACE) {
343 WARN_ON_ONCE(!tr->extension_prog);
344 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
345 tr->extension_prog->bpf_func, NULL);
346 tr->extension_prog = NULL;
347 goto out;
348 }
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800349 hlist_del(&prog->aux->tramp_hlist);
350 tr->progs_cnt[kind]--;
351 err = bpf_trampoline_update(prog->aux->trampoline);
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -0800352out:
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800353 mutex_unlock(&tr->mutex);
354 return err;
355}
356
357void bpf_trampoline_put(struct bpf_trampoline *tr)
358{
Jiri Olsae9b4e602020-01-23 17:15:07 +0100359 struct bpf_image *image;
360
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800361 if (!tr)
362 return;
363 mutex_lock(&trampoline_mutex);
364 if (!refcount_dec_and_test(&tr->refcnt))
365 goto out;
366 WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
367 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
368 goto out;
369 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
370 goto out;
Jiri Olsae9b4e602020-01-23 17:15:07 +0100371 image = container_of(tr->image, struct bpf_image, data);
372 latch_tree_erase(&image->tnode, &image_tree, &image_tree_ops);
Alexei Starovoitov05d57f12020-01-20 19:22:31 -0800373 /* wait for tasks to get out of trampoline before freeing it */
374 synchronize_rcu_tasks();
Jiri Olsae9b4e602020-01-23 17:15:07 +0100375 bpf_jit_free_exec(image);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800376 hlist_del(&tr->hlist);
377 kfree(tr);
378out:
379 mutex_unlock(&trampoline_mutex);
380}
381
David Miller02ad0592020-02-24 15:01:45 +0100382/* The logic is similar to BPF_PROG_RUN, but with an explicit
383 * rcu_read_lock() and migrate_disable() which are required
384 * for the trampoline. The macro is split into
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800385 * call _bpf_prog_enter
386 * call prog->bpf_func
387 * call __bpf_prog_exit
388 */
389u64 notrace __bpf_prog_enter(void)
390{
391 u64 start = 0;
392
393 rcu_read_lock();
David Miller02ad0592020-02-24 15:01:45 +0100394 migrate_disable();
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800395 if (static_branch_unlikely(&bpf_stats_enabled_key))
396 start = sched_clock();
397 return start;
398}
399
400void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
401{
402 struct bpf_prog_stats *stats;
403
404 if (static_branch_unlikely(&bpf_stats_enabled_key) &&
405 /* static_key could be enabled in __bpf_prog_enter
406 * and disabled in __bpf_prog_exit.
407 * And vice versa.
408 * Hence check that 'start' is not zero.
409 */
410 start) {
411 stats = this_cpu_ptr(prog->aux->stats);
412 u64_stats_update_begin(&stats->syncp);
413 stats->cnt++;
414 stats->nsecs += sched_clock() - start;
415 u64_stats_update_end(&stats->syncp);
416 }
David Miller02ad0592020-02-24 15:01:45 +0100417 migrate_enable();
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800418 rcu_read_unlock();
419}
420
421int __weak
Martin KaFai Lau85d33df2020-01-08 16:35:05 -0800422arch_prepare_bpf_trampoline(void *image, void *image_end,
423 const struct btf_func_model *m, u32 flags,
KP Singh88fd9e52020-03-04 20:18:47 +0100424 struct bpf_tramp_progs *tprogs,
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800425 void *orig_call)
426{
427 return -ENOTSUPP;
428}
429
430static int __init init_trampolines(void)
431{
432 int i;
433
434 for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
435 INIT_HLIST_HEAD(&trampoline_table[i]);
436 return 0;
437}
438late_initcall(init_trampolines);