blob: 5e7edf913060129bad0271924bb40f7777a137d2 [file] [log] [blame]
Alexei Starovoitovfec56f52019-11-14 10:57:04 -08001// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2019 Facebook */
3#include <linux/hash.h>
4#include <linux/bpf.h>
5#include <linux/filter.h>
Alexei Starovoitovb91e0142019-12-08 16:01:13 -08006#include <linux/ftrace.h>
Jiri Olsae9b4e602020-01-23 17:15:07 +01007#include <linux/rbtree_latch.h>
Jiri Olsaa108f7d2020-03-12 20:56:05 +01008#include <linux/perf_event.h>
KP Singh9e4e01d2020-03-29 01:43:52 +01009#include <linux/btf.h>
Alexei Starovoitov1e6c62a2020-08-27 15:01:11 -070010#include <linux/rcupdate_trace.h>
11#include <linux/rcupdate_wait.h>
Jiri Olsa861de022021-03-26 11:59:00 +010012#include <linux/module.h>
Song Liu856c02d2021-09-10 11:33:51 -070013#include <linux/static_call.h>
Alexei Starovoitovfec56f52019-11-14 10:57:04 -080014
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -080015/* dummy _ops. The verifier will operate on target program's ops. */
16const struct bpf_verifier_ops bpf_extension_verifier_ops = {
17};
18const struct bpf_prog_ops bpf_extension_prog_ops = {
19};
20
Alexei Starovoitovfec56f52019-11-14 10:57:04 -080021/* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
22#define TRAMPOLINE_HASH_BITS 10
23#define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
24
25static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
26
Jiri Olsa7ac88eb2020-03-12 20:56:07 +010027/* serializes access to trampoline_table */
Alexei Starovoitovfec56f52019-11-14 10:57:04 -080028static DEFINE_MUTEX(trampoline_mutex);
29
Jiri Olsaf92c1e12021-12-08 20:32:44 +010030bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
31{
32 enum bpf_attach_type eatype = prog->expected_attach_type;
33
34 return eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT ||
35 eatype == BPF_MODIFY_RETURN;
36}
37
Jiri Olsa7ac88eb2020-03-12 20:56:07 +010038void *bpf_jit_alloc_exec_page(void)
Björn Töpel98e86272019-12-13 18:51:07 +010039{
40 void *image;
41
42 image = bpf_jit_alloc_exec(PAGE_SIZE);
43 if (!image)
44 return NULL;
45
46 set_vm_flush_reset_perms(image);
47 /* Keep image as writeable. The alternative is to keep flipping ro/rw
48 * everytime new program is attached or detached.
49 */
50 set_memory_x((long)image, 1);
51 return image;
52}
53
Jiri Olsaa108f7d2020-03-12 20:56:05 +010054void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
55{
56 ksym->start = (unsigned long) data;
Jiri Olsa7ac88eb2020-03-12 20:56:07 +010057 ksym->end = ksym->start + PAGE_SIZE;
Jiri Olsaa108f7d2020-03-12 20:56:05 +010058 bpf_ksym_add(ksym);
59 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
Jiri Olsa7ac88eb2020-03-12 20:56:07 +010060 PAGE_SIZE, false, ksym->name);
Jiri Olsaa108f7d2020-03-12 20:56:05 +010061}
62
63void bpf_image_ksym_del(struct bpf_ksym *ksym)
64{
65 bpf_ksym_del(ksym);
66 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
Jiri Olsa7ac88eb2020-03-12 20:56:07 +010067 PAGE_SIZE, true, ksym->name);
Jiri Olsaa108f7d2020-03-12 20:56:05 +010068}
69
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +020070static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
Alexei Starovoitovfec56f52019-11-14 10:57:04 -080071{
72 struct bpf_trampoline *tr;
73 struct hlist_head *head;
Alexei Starovoitovfec56f52019-11-14 10:57:04 -080074 int i;
75
76 mutex_lock(&trampoline_mutex);
77 head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
78 hlist_for_each_entry(tr, head, hlist) {
79 if (tr->key == key) {
80 refcount_inc(&tr->refcnt);
81 goto out;
82 }
83 }
84 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
85 if (!tr)
86 goto out;
87
Alexei Starovoitovfec56f52019-11-14 10:57:04 -080088 tr->key = key;
89 INIT_HLIST_NODE(&tr->hlist);
90 hlist_add_head(&tr->hlist, head);
91 refcount_set(&tr->refcnt, 1);
92 mutex_init(&tr->mutex);
93 for (i = 0; i < BPF_TRAMP_MAX; i++)
94 INIT_HLIST_HEAD(&tr->progs_hlist[i]);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -080095out:
96 mutex_unlock(&trampoline_mutex);
97 return tr;
98}
99
Jiri Olsa861de022021-03-26 11:59:00 +0100100static int bpf_trampoline_module_get(struct bpf_trampoline *tr)
101{
102 struct module *mod;
103 int err = 0;
104
105 preempt_disable();
106 mod = __module_text_address((unsigned long) tr->func.addr);
107 if (mod && !try_module_get(mod))
108 err = -ENOENT;
109 preempt_enable();
110 tr->mod = mod;
111 return err;
112}
113
114static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
115{
116 module_put(tr->mod);
117 tr->mod = NULL;
118}
119
Alexei Starovoitovb91e0142019-12-08 16:01:13 -0800120static int is_ftrace_location(void *ip)
121{
122 long addr;
123
124 addr = ftrace_location((long)ip);
125 if (!addr)
126 return 0;
127 if (WARN_ON_ONCE(addr != (long)ip))
128 return -EFAULT;
129 return 1;
130}
131
132static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
133{
134 void *ip = tr->func.addr;
135 int ret;
136
137 if (tr->func.ftrace_managed)
138 ret = unregister_ftrace_direct((long)ip, (long)old_addr);
139 else
140 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
Jiri Olsa861de022021-03-26 11:59:00 +0100141
142 if (!ret)
143 bpf_trampoline_module_put(tr);
Alexei Starovoitovb91e0142019-12-08 16:01:13 -0800144 return ret;
145}
146
147static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr)
148{
149 void *ip = tr->func.addr;
150 int ret;
151
152 if (tr->func.ftrace_managed)
153 ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
154 else
155 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
156 return ret;
157}
158
159/* first time registering */
160static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
161{
162 void *ip = tr->func.addr;
163 int ret;
164
165 ret = is_ftrace_location(ip);
166 if (ret < 0)
167 return ret;
168 tr->func.ftrace_managed = ret;
169
Jiri Olsa861de022021-03-26 11:59:00 +0100170 if (bpf_trampoline_module_get(tr))
171 return -ENOENT;
172
Alexei Starovoitovb91e0142019-12-08 16:01:13 -0800173 if (tr->func.ftrace_managed)
174 ret = register_ftrace_direct((long)ip, (long)new_addr);
175 else
176 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
Jiri Olsa861de022021-03-26 11:59:00 +0100177
178 if (ret)
179 bpf_trampoline_module_put(tr);
Alexei Starovoitovb91e0142019-12-08 16:01:13 -0800180 return ret;
181}
182
KP Singh88fd9e52020-03-04 20:18:47 +0100183static struct bpf_tramp_progs *
Jiri Olsa1e373922021-07-14 11:43:54 +0200184bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg)
KP Singh88fd9e52020-03-04 20:18:47 +0100185{
186 const struct bpf_prog_aux *aux;
187 struct bpf_tramp_progs *tprogs;
188 struct bpf_prog **progs;
189 int kind;
190
191 *total = 0;
192 tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
193 if (!tprogs)
194 return ERR_PTR(-ENOMEM);
195
196 for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
197 tprogs[kind].nr_progs = tr->progs_cnt[kind];
198 *total += tr->progs_cnt[kind];
199 progs = tprogs[kind].progs;
200
Jiri Olsa1e373922021-07-14 11:43:54 +0200201 hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist) {
202 *ip_arg |= aux->prog->call_get_func_ip;
KP Singh88fd9e52020-03-04 20:18:47 +0100203 *progs++ = aux->prog;
Jiri Olsa1e373922021-07-14 11:43:54 +0200204 }
KP Singh88fd9e52020-03-04 20:18:47 +0100205 }
206 return tprogs;
207}
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800208
Alexei Starovoitove21aa342021-03-16 14:00:07 -0700209static void __bpf_tramp_image_put_deferred(struct work_struct *work)
210{
211 struct bpf_tramp_image *im;
212
213 im = container_of(work, struct bpf_tramp_image, work);
214 bpf_image_ksym_del(&im->ksym);
215 bpf_jit_free_exec(im->image);
216 bpf_jit_uncharge_modmem(1);
217 percpu_ref_exit(&im->pcref);
218 kfree_rcu(im, rcu);
219}
220
221/* callback, fexit step 3 or fentry step 2 */
222static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
223{
224 struct bpf_tramp_image *im;
225
226 im = container_of(rcu, struct bpf_tramp_image, rcu);
227 INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
228 schedule_work(&im->work);
229}
230
231/* callback, fexit step 2. Called after percpu_ref_kill confirms. */
232static void __bpf_tramp_image_release(struct percpu_ref *pcref)
233{
234 struct bpf_tramp_image *im;
235
236 im = container_of(pcref, struct bpf_tramp_image, pcref);
237 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
238}
239
240/* callback, fexit or fentry step 1 */
241static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
242{
243 struct bpf_tramp_image *im;
244
245 im = container_of(rcu, struct bpf_tramp_image, rcu);
246 if (im->ip_after_call)
247 /* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */
248 percpu_ref_kill(&im->pcref);
249 else
250 /* the case of fentry trampoline */
251 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
252}
253
254static void bpf_tramp_image_put(struct bpf_tramp_image *im)
255{
256 /* The trampoline image that calls original function is using:
257 * rcu_read_lock_trace to protect sleepable bpf progs
258 * rcu_read_lock to protect normal bpf progs
259 * percpu_ref to protect trampoline itself
260 * rcu tasks to protect trampoline asm not covered by percpu_ref
261 * (which are few asm insns before __bpf_tramp_enter and
262 * after __bpf_tramp_exit)
263 *
264 * The trampoline is unreachable before bpf_tramp_image_put().
265 *
266 * First, patch the trampoline to avoid calling into fexit progs.
267 * The progs will be freed even if the original function is still
268 * executing or sleeping.
269 * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on
270 * first few asm instructions to execute and call into
271 * __bpf_tramp_enter->percpu_ref_get.
272 * Then use percpu_ref_kill to wait for the trampoline and the original
273 * function to finish.
274 * Then use call_rcu_tasks() to make sure few asm insns in
275 * the trampoline epilogue are done as well.
276 *
277 * In !PREEMPT case the task that got interrupted in the first asm
278 * insns won't go through an RCU quiescent state which the
279 * percpu_ref_kill will be waiting for. Hence the first
280 * call_rcu_tasks() is not necessary.
281 */
282 if (im->ip_after_call) {
283 int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
284 NULL, im->ip_epilogue);
285 WARN_ON(err);
286 if (IS_ENABLED(CONFIG_PREEMPTION))
287 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
288 else
289 percpu_ref_kill(&im->pcref);
290 return;
291 }
292
293 /* The trampoline without fexit and fmod_ret progs doesn't call original
294 * function and doesn't use percpu_ref.
295 * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
296 * Then use call_rcu_tasks() to wait for the rest of trampoline asm
297 * and normal progs.
298 */
299 call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
300}
301
302static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
303{
304 struct bpf_tramp_image *im;
305 struct bpf_ksym *ksym;
306 void *image;
307 int err = -ENOMEM;
308
309 im = kzalloc(sizeof(*im), GFP_KERNEL);
310 if (!im)
311 goto out;
312
313 err = bpf_jit_charge_modmem(1);
314 if (err)
315 goto out_free_im;
316
317 err = -ENOMEM;
318 im->image = image = bpf_jit_alloc_exec_page();
319 if (!image)
320 goto out_uncharge;
321
322 err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
323 if (err)
324 goto out_free_image;
325
326 ksym = &im->ksym;
327 INIT_LIST_HEAD_RCU(&ksym->lnode);
328 snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
329 bpf_image_ksym_add(image, ksym);
330 return im;
331
332out_free_image:
333 bpf_jit_free_exec(im->image);
334out_uncharge:
335 bpf_jit_uncharge_modmem(1);
336out_free_im:
337 kfree(im);
338out:
339 return ERR_PTR(err);
340}
341
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800342static int bpf_trampoline_update(struct bpf_trampoline *tr)
343{
Alexei Starovoitove21aa342021-03-16 14:00:07 -0700344 struct bpf_tramp_image *im;
KP Singh88fd9e52020-03-04 20:18:47 +0100345 struct bpf_tramp_progs *tprogs;
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800346 u32 flags = BPF_TRAMP_F_RESTORE_REGS;
Jiri Olsa1e373922021-07-14 11:43:54 +0200347 bool ip_arg = false;
KP Singh88fd9e52020-03-04 20:18:47 +0100348 int err, total;
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800349
Jiri Olsa1e373922021-07-14 11:43:54 +0200350 tprogs = bpf_trampoline_get_progs(tr, &total, &ip_arg);
KP Singh88fd9e52020-03-04 20:18:47 +0100351 if (IS_ERR(tprogs))
352 return PTR_ERR(tprogs);
353
354 if (total == 0) {
Alexei Starovoitove21aa342021-03-16 14:00:07 -0700355 err = unregister_fentry(tr, tr->cur_image->image);
356 bpf_tramp_image_put(tr->cur_image);
357 tr->cur_image = NULL;
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800358 tr->selector = 0;
359 goto out;
360 }
361
Alexei Starovoitove21aa342021-03-16 14:00:07 -0700362 im = bpf_tramp_image_alloc(tr->key, tr->selector);
363 if (IS_ERR(im)) {
364 err = PTR_ERR(im);
365 goto out;
366 }
367
KP Singhae240822020-03-04 20:18:49 +0100368 if (tprogs[BPF_TRAMP_FEXIT].nr_progs ||
369 tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800370 flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
371
Jiri Olsa1e373922021-07-14 11:43:54 +0200372 if (ip_arg)
373 flags |= BPF_TRAMP_F_IP_ARG;
374
Alexei Starovoitove21aa342021-03-16 14:00:07 -0700375 err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
KP Singh88fd9e52020-03-04 20:18:47 +0100376 &tr->func.model, flags, tprogs,
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800377 tr->func.addr);
Martin KaFai Lau85d33df2020-01-08 16:35:05 -0800378 if (err < 0)
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800379 goto out;
380
Alexei Starovoitove21aa342021-03-16 14:00:07 -0700381 WARN_ON(tr->cur_image && tr->selector == 0);
382 WARN_ON(!tr->cur_image && tr->selector);
383 if (tr->cur_image)
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800384 /* progs already running at this address */
Alexei Starovoitove21aa342021-03-16 14:00:07 -0700385 err = modify_fentry(tr, tr->cur_image->image, im->image);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800386 else
387 /* first time registering */
Alexei Starovoitove21aa342021-03-16 14:00:07 -0700388 err = register_fentry(tr, im->image);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800389 if (err)
390 goto out;
Alexei Starovoitove21aa342021-03-16 14:00:07 -0700391 if (tr->cur_image)
392 bpf_tramp_image_put(tr->cur_image);
393 tr->cur_image = im;
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800394 tr->selector++;
395out:
KP Singh88fd9e52020-03-04 20:18:47 +0100396 kfree(tprogs);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800397 return err;
398}
399
KP Singh9e4e01d2020-03-29 01:43:52 +0100400static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800401{
KP Singh9e4e01d2020-03-29 01:43:52 +0100402 switch (prog->expected_attach_type) {
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800403 case BPF_TRACE_FENTRY:
404 return BPF_TRAMP_FENTRY;
KP Singhae240822020-03-04 20:18:49 +0100405 case BPF_MODIFY_RETURN:
406 return BPF_TRAMP_MODIFY_RETURN;
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -0800407 case BPF_TRACE_FEXIT:
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800408 return BPF_TRAMP_FEXIT;
KP Singh9e4e01d2020-03-29 01:43:52 +0100409 case BPF_LSM_MAC:
410 if (!prog->aux->attach_func_proto->type)
411 /* The function returns void, we cannot modify its
412 * return value.
413 */
414 return BPF_TRAMP_FEXIT;
415 else
416 return BPF_TRAMP_MODIFY_RETURN;
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -0800417 default:
418 return BPF_TRAMP_REPLACE;
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800419 }
420}
421
Toke Høiland-Jørgensen3aac1ea2020-09-29 14:45:50 +0200422int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800423{
424 enum bpf_tramp_prog_type kind;
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800425 int err = 0;
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -0800426 int cnt;
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800427
KP Singh9e4e01d2020-03-29 01:43:52 +0100428 kind = bpf_attach_type_to_tramp(prog);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800429 mutex_lock(&tr->mutex);
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -0800430 if (tr->extension_prog) {
431 /* cannot attach fentry/fexit if extension prog is attached.
432 * cannot overwrite extension prog either.
433 */
434 err = -EBUSY;
435 goto out;
436 }
437 cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT];
438 if (kind == BPF_TRAMP_REPLACE) {
439 /* Cannot attach extension if fentry/fexit are in use. */
440 if (cnt) {
441 err = -EBUSY;
442 goto out;
443 }
444 tr->extension_prog = prog;
445 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
446 prog->bpf_func);
447 goto out;
448 }
449 if (cnt >= BPF_MAX_TRAMP_PROGS) {
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800450 err = -E2BIG;
451 goto out;
452 }
453 if (!hlist_unhashed(&prog->aux->tramp_hlist)) {
454 /* prog already linked */
455 err = -EBUSY;
456 goto out;
457 }
458 hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]);
459 tr->progs_cnt[kind]++;
Toke Høiland-Jørgensen3aac1ea2020-09-29 14:45:50 +0200460 err = bpf_trampoline_update(tr);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800461 if (err) {
Jiri Olsaf3a95072021-04-14 21:51:41 +0200462 hlist_del_init(&prog->aux->tramp_hlist);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800463 tr->progs_cnt[kind]--;
464 }
465out:
466 mutex_unlock(&tr->mutex);
467 return err;
468}
469
470/* bpf_trampoline_unlink_prog() should never fail. */
Toke Høiland-Jørgensen3aac1ea2020-09-29 14:45:50 +0200471int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800472{
473 enum bpf_tramp_prog_type kind;
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800474 int err;
475
KP Singh9e4e01d2020-03-29 01:43:52 +0100476 kind = bpf_attach_type_to_tramp(prog);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800477 mutex_lock(&tr->mutex);
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -0800478 if (kind == BPF_TRAMP_REPLACE) {
479 WARN_ON_ONCE(!tr->extension_prog);
480 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
481 tr->extension_prog->bpf_func, NULL);
482 tr->extension_prog = NULL;
483 goto out;
484 }
Jiri Olsaf3a95072021-04-14 21:51:41 +0200485 hlist_del_init(&prog->aux->tramp_hlist);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800486 tr->progs_cnt[kind]--;
Toke Høiland-Jørgensen3aac1ea2020-09-29 14:45:50 +0200487 err = bpf_trampoline_update(tr);
Alexei Starovoitovbe8704f2020-01-20 16:53:46 -0800488out:
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800489 mutex_unlock(&tr->mutex);
490 return err;
491}
492
Toke Høiland-Jørgensenf7b12b62020-09-25 23:25:02 +0200493struct bpf_trampoline *bpf_trampoline_get(u64 key,
494 struct bpf_attach_target_info *tgt_info)
495{
496 struct bpf_trampoline *tr;
497
498 tr = bpf_trampoline_lookup(key);
499 if (!tr)
500 return NULL;
501
502 mutex_lock(&tr->mutex);
503 if (tr->func.addr)
504 goto out;
505
506 memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
507 tr->func.addr = (void *)tgt_info->tgt_addr;
508out:
509 mutex_unlock(&tr->mutex);
510 return tr;
511}
512
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800513void bpf_trampoline_put(struct bpf_trampoline *tr)
514{
515 if (!tr)
516 return;
517 mutex_lock(&trampoline_mutex);
518 if (!refcount_dec_and_test(&tr->refcnt))
519 goto out;
520 WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
521 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
522 goto out;
523 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
524 goto out;
Alexei Starovoitove21aa342021-03-16 14:00:07 -0700525 /* This code will be executed even when the last bpf_tramp_image
526 * is alive. All progs are detached from the trampoline and the
527 * trampoline image is patched with jmp into epilogue to skip
528 * fexit progs. The fentry-only trampoline will be freed via
529 * multiple rcu callbacks.
Alexei Starovoitov1e6c62a2020-08-27 15:01:11 -0700530 */
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800531 hlist_del(&tr->hlist);
532 kfree(tr);
533out:
534 mutex_unlock(&trampoline_mutex);
535}
536
Alexei Starovoitovca06f552021-02-09 19:36:29 -0800537#define NO_START_TIME 1
Song Liu856c02d2021-09-10 11:33:51 -0700538static __always_inline u64 notrace bpf_prog_start_time(void)
Alexei Starovoitovf2dd3b32021-02-09 19:36:28 -0800539{
540 u64 start = NO_START_TIME;
541
Alexei Starovoitovca06f552021-02-09 19:36:29 -0800542 if (static_branch_unlikely(&bpf_stats_enabled_key)) {
Alexei Starovoitovf2dd3b32021-02-09 19:36:28 -0800543 start = sched_clock();
Alexei Starovoitovca06f552021-02-09 19:36:29 -0800544 if (unlikely(!start))
545 start = NO_START_TIME;
546 }
Alexei Starovoitovf2dd3b32021-02-09 19:36:28 -0800547 return start;
548}
549
Alexei Starovoitov9ed9e9b2021-02-09 19:36:31 -0800550static void notrace inc_misses_counter(struct bpf_prog *prog)
551{
552 struct bpf_prog_stats *stats;
He Fengqing0e3135d2022-01-22 10:29:36 +0000553 unsigned int flags;
Alexei Starovoitov9ed9e9b2021-02-09 19:36:31 -0800554
555 stats = this_cpu_ptr(prog->stats);
He Fengqing0e3135d2022-01-22 10:29:36 +0000556 flags = u64_stats_update_begin_irqsave(&stats->syncp);
Eric Dumazet61a0aba2021-10-26 14:41:33 -0700557 u64_stats_inc(&stats->misses);
He Fengqing0e3135d2022-01-22 10:29:36 +0000558 u64_stats_update_end_irqrestore(&stats->syncp, flags);
Alexei Starovoitov9ed9e9b2021-02-09 19:36:31 -0800559}
560
Andrii Nakryikofb7dd8b2021-08-15 00:05:54 -0700561/* The logic is similar to bpf_prog_run(), but with an explicit
David Miller02ad0592020-02-24 15:01:45 +0100562 * rcu_read_lock() and migrate_disable() which are required
563 * for the trampoline. The macro is split into
Alexei Starovoitovf2dd3b32021-02-09 19:36:28 -0800564 * call __bpf_prog_enter
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800565 * call prog->bpf_func
566 * call __bpf_prog_exit
Alexei Starovoitovca06f552021-02-09 19:36:29 -0800567 *
568 * __bpf_prog_enter returns:
569 * 0 - skip execution of the bpf prog
570 * 1 - execute bpf prog
Zhen Lei8fb33b62021-05-25 10:56:59 +0800571 * [2..MAX_U64] - execute bpf prog and record execution time.
Alexei Starovoitovca06f552021-02-09 19:36:29 -0800572 * This is start time.
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800573 */
Alexei Starovoitovca06f552021-02-09 19:36:29 -0800574u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
Jules Irengedcce11d2020-03-11 01:09:01 +0000575 __acquires(RCU)
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800576{
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800577 rcu_read_lock();
David Miller02ad0592020-02-24 15:01:45 +0100578 migrate_disable();
Alexei Starovoitov9ed9e9b2021-02-09 19:36:31 -0800579 if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
580 inc_misses_counter(prog);
Alexei Starovoitovca06f552021-02-09 19:36:29 -0800581 return 0;
Alexei Starovoitov9ed9e9b2021-02-09 19:36:31 -0800582 }
Alexei Starovoitovf2dd3b32021-02-09 19:36:28 -0800583 return bpf_prog_start_time();
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800584}
585
Alexei Starovoitovf2dd3b32021-02-09 19:36:28 -0800586static void notrace update_prog_stats(struct bpf_prog *prog,
587 u64 start)
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800588{
589 struct bpf_prog_stats *stats;
590
591 if (static_branch_unlikely(&bpf_stats_enabled_key) &&
Alexei Starovoitovf2dd3b32021-02-09 19:36:28 -0800592 /* static_key could be enabled in __bpf_prog_enter*
593 * and disabled in __bpf_prog_exit*.
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800594 * And vice versa.
Alexei Starovoitovf2dd3b32021-02-09 19:36:28 -0800595 * Hence check that 'start' is valid.
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800596 */
Alexei Starovoitovf2dd3b32021-02-09 19:36:28 -0800597 start > NO_START_TIME) {
Eric Dumazetd9796172021-10-26 14:41:32 -0700598 unsigned long flags;
599
Alexei Starovoitov700d4792021-02-09 19:36:26 -0800600 stats = this_cpu_ptr(prog->stats);
Eric Dumazetd9796172021-10-26 14:41:32 -0700601 flags = u64_stats_update_begin_irqsave(&stats->syncp);
Eric Dumazet61a0aba2021-10-26 14:41:33 -0700602 u64_stats_inc(&stats->cnt);
603 u64_stats_add(&stats->nsecs, sched_clock() - start);
Eric Dumazetd9796172021-10-26 14:41:32 -0700604 u64_stats_update_end_irqrestore(&stats->syncp, flags);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800605 }
Alexei Starovoitovf2dd3b32021-02-09 19:36:28 -0800606}
607
608void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
609 __releases(RCU)
610{
611 update_prog_stats(prog, start);
Alexei Starovoitovca06f552021-02-09 19:36:29 -0800612 __this_cpu_dec(*(prog->active));
David Miller02ad0592020-02-24 15:01:45 +0100613 migrate_enable();
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800614 rcu_read_unlock();
615}
616
Alexei Starovoitovca06f552021-02-09 19:36:29 -0800617u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog)
Alexei Starovoitov1e6c62a2020-08-27 15:01:11 -0700618{
619 rcu_read_lock_trace();
Alexei Starovoitov031d6e02021-02-09 19:36:27 -0800620 migrate_disable();
Alexei Starovoitovf56407f2020-08-31 13:16:51 -0700621 might_fault();
Alexei Starovoitov9ed9e9b2021-02-09 19:36:31 -0800622 if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
623 inc_misses_counter(prog);
Alexei Starovoitovca06f552021-02-09 19:36:29 -0800624 return 0;
Alexei Starovoitov9ed9e9b2021-02-09 19:36:31 -0800625 }
Alexei Starovoitovf2dd3b32021-02-09 19:36:28 -0800626 return bpf_prog_start_time();
Alexei Starovoitov1e6c62a2020-08-27 15:01:11 -0700627}
628
Alexei Starovoitovf2dd3b32021-02-09 19:36:28 -0800629void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start)
Alexei Starovoitov1e6c62a2020-08-27 15:01:11 -0700630{
Alexei Starovoitovf2dd3b32021-02-09 19:36:28 -0800631 update_prog_stats(prog, start);
Alexei Starovoitovca06f552021-02-09 19:36:29 -0800632 __this_cpu_dec(*(prog->active));
Alexei Starovoitov031d6e02021-02-09 19:36:27 -0800633 migrate_enable();
Alexei Starovoitov1e6c62a2020-08-27 15:01:11 -0700634 rcu_read_unlock_trace();
635}
636
Alexei Starovoitove21aa342021-03-16 14:00:07 -0700637void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
638{
639 percpu_ref_get(&tr->pcref);
640}
641
642void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
643{
644 percpu_ref_put(&tr->pcref);
645}
646
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800647int __weak
Alexei Starovoitove21aa342021-03-16 14:00:07 -0700648arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
Martin KaFai Lau85d33df2020-01-08 16:35:05 -0800649 const struct btf_func_model *m, u32 flags,
KP Singh88fd9e52020-03-04 20:18:47 +0100650 struct bpf_tramp_progs *tprogs,
Alexei Starovoitovfec56f52019-11-14 10:57:04 -0800651 void *orig_call)
652{
653 return -ENOTSUPP;
654}
655
656static int __init init_trampolines(void)
657{
658 int i;
659
660 for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
661 INIT_HLIST_HEAD(&trampoline_table[i]);
662 return 0;
663}
664late_initcall(init_trampolines);