Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* Copyright (c) 2019 Facebook */ |
| 3 | #include <linux/hash.h> |
| 4 | #include <linux/bpf.h> |
| 5 | #include <linux/filter.h> |
Alexei Starovoitov | b91e014 | 2019-12-08 16:01:13 -0800 | [diff] [blame] | 6 | #include <linux/ftrace.h> |
Jiri Olsa | e9b4e60 | 2020-01-23 17:15:07 +0100 | [diff] [blame] | 7 | #include <linux/rbtree_latch.h> |
Jiri Olsa | a108f7d | 2020-03-12 20:56:05 +0100 | [diff] [blame] | 8 | #include <linux/perf_event.h> |
KP Singh | 9e4e01d | 2020-03-29 01:43:52 +0100 | [diff] [blame] | 9 | #include <linux/btf.h> |
Alexei Starovoitov | 1e6c62a | 2020-08-27 15:01:11 -0700 | [diff] [blame] | 10 | #include <linux/rcupdate_trace.h> |
| 11 | #include <linux/rcupdate_wait.h> |
Jiri Olsa | 861de02 | 2021-03-26 11:59:00 +0100 | [diff] [blame] | 12 | #include <linux/module.h> |
Song Liu | 856c02d | 2021-09-10 11:33:51 -0700 | [diff] [blame] | 13 | #include <linux/static_call.h> |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 14 | |
Alexei Starovoitov | be8704f | 2020-01-20 16:53:46 -0800 | [diff] [blame] | 15 | /* dummy _ops. The verifier will operate on target program's ops. */ |
| 16 | const struct bpf_verifier_ops bpf_extension_verifier_ops = { |
| 17 | }; |
| 18 | const struct bpf_prog_ops bpf_extension_prog_ops = { |
| 19 | }; |
| 20 | |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 21 | /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */ |
| 22 | #define TRAMPOLINE_HASH_BITS 10 |
| 23 | #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS) |
| 24 | |
| 25 | static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE]; |
| 26 | |
Jiri Olsa | 7ac88eb | 2020-03-12 20:56:07 +0100 | [diff] [blame] | 27 | /* serializes access to trampoline_table */ |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 28 | static DEFINE_MUTEX(trampoline_mutex); |
| 29 | |
Jiri Olsa | f92c1e1 | 2021-12-08 20:32:44 +0100 | [diff] [blame] | 30 | bool bpf_prog_has_trampoline(const struct bpf_prog *prog) |
| 31 | { |
| 32 | enum bpf_attach_type eatype = prog->expected_attach_type; |
| 33 | |
| 34 | return eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT || |
| 35 | eatype == BPF_MODIFY_RETURN; |
| 36 | } |
| 37 | |
Jiri Olsa | 7ac88eb | 2020-03-12 20:56:07 +0100 | [diff] [blame] | 38 | void *bpf_jit_alloc_exec_page(void) |
Björn Töpel | 98e8627 | 2019-12-13 18:51:07 +0100 | [diff] [blame] | 39 | { |
| 40 | void *image; |
| 41 | |
| 42 | image = bpf_jit_alloc_exec(PAGE_SIZE); |
| 43 | if (!image) |
| 44 | return NULL; |
| 45 | |
| 46 | set_vm_flush_reset_perms(image); |
| 47 | /* Keep image as writeable. The alternative is to keep flipping ro/rw |
| 48 | * everytime new program is attached or detached. |
| 49 | */ |
| 50 | set_memory_x((long)image, 1); |
| 51 | return image; |
| 52 | } |
| 53 | |
Jiri Olsa | a108f7d | 2020-03-12 20:56:05 +0100 | [diff] [blame] | 54 | void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym) |
| 55 | { |
| 56 | ksym->start = (unsigned long) data; |
Jiri Olsa | 7ac88eb | 2020-03-12 20:56:07 +0100 | [diff] [blame] | 57 | ksym->end = ksym->start + PAGE_SIZE; |
Jiri Olsa | a108f7d | 2020-03-12 20:56:05 +0100 | [diff] [blame] | 58 | bpf_ksym_add(ksym); |
| 59 | perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start, |
Jiri Olsa | 7ac88eb | 2020-03-12 20:56:07 +0100 | [diff] [blame] | 60 | PAGE_SIZE, false, ksym->name); |
Jiri Olsa | a108f7d | 2020-03-12 20:56:05 +0100 | [diff] [blame] | 61 | } |
| 62 | |
| 63 | void bpf_image_ksym_del(struct bpf_ksym *ksym) |
| 64 | { |
| 65 | bpf_ksym_del(ksym); |
| 66 | perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start, |
Jiri Olsa | 7ac88eb | 2020-03-12 20:56:07 +0100 | [diff] [blame] | 67 | PAGE_SIZE, true, ksym->name); |
Jiri Olsa | a108f7d | 2020-03-12 20:56:05 +0100 | [diff] [blame] | 68 | } |
| 69 | |
Toke Høiland-Jørgensen | f7b12b6 | 2020-09-25 23:25:02 +0200 | [diff] [blame] | 70 | static struct bpf_trampoline *bpf_trampoline_lookup(u64 key) |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 71 | { |
| 72 | struct bpf_trampoline *tr; |
| 73 | struct hlist_head *head; |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 74 | int i; |
| 75 | |
| 76 | mutex_lock(&trampoline_mutex); |
| 77 | head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)]; |
| 78 | hlist_for_each_entry(tr, head, hlist) { |
| 79 | if (tr->key == key) { |
| 80 | refcount_inc(&tr->refcnt); |
| 81 | goto out; |
| 82 | } |
| 83 | } |
| 84 | tr = kzalloc(sizeof(*tr), GFP_KERNEL); |
| 85 | if (!tr) |
| 86 | goto out; |
| 87 | |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 88 | tr->key = key; |
| 89 | INIT_HLIST_NODE(&tr->hlist); |
| 90 | hlist_add_head(&tr->hlist, head); |
| 91 | refcount_set(&tr->refcnt, 1); |
| 92 | mutex_init(&tr->mutex); |
| 93 | for (i = 0; i < BPF_TRAMP_MAX; i++) |
| 94 | INIT_HLIST_HEAD(&tr->progs_hlist[i]); |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 95 | out: |
| 96 | mutex_unlock(&trampoline_mutex); |
| 97 | return tr; |
| 98 | } |
| 99 | |
Jiri Olsa | 861de02 | 2021-03-26 11:59:00 +0100 | [diff] [blame] | 100 | static int bpf_trampoline_module_get(struct bpf_trampoline *tr) |
| 101 | { |
| 102 | struct module *mod; |
| 103 | int err = 0; |
| 104 | |
| 105 | preempt_disable(); |
| 106 | mod = __module_text_address((unsigned long) tr->func.addr); |
| 107 | if (mod && !try_module_get(mod)) |
| 108 | err = -ENOENT; |
| 109 | preempt_enable(); |
| 110 | tr->mod = mod; |
| 111 | return err; |
| 112 | } |
| 113 | |
| 114 | static void bpf_trampoline_module_put(struct bpf_trampoline *tr) |
| 115 | { |
| 116 | module_put(tr->mod); |
| 117 | tr->mod = NULL; |
| 118 | } |
| 119 | |
Alexei Starovoitov | b91e014 | 2019-12-08 16:01:13 -0800 | [diff] [blame] | 120 | static int is_ftrace_location(void *ip) |
| 121 | { |
| 122 | long addr; |
| 123 | |
| 124 | addr = ftrace_location((long)ip); |
| 125 | if (!addr) |
| 126 | return 0; |
| 127 | if (WARN_ON_ONCE(addr != (long)ip)) |
| 128 | return -EFAULT; |
| 129 | return 1; |
| 130 | } |
| 131 | |
| 132 | static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr) |
| 133 | { |
| 134 | void *ip = tr->func.addr; |
| 135 | int ret; |
| 136 | |
| 137 | if (tr->func.ftrace_managed) |
| 138 | ret = unregister_ftrace_direct((long)ip, (long)old_addr); |
| 139 | else |
| 140 | ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL); |
Jiri Olsa | 861de02 | 2021-03-26 11:59:00 +0100 | [diff] [blame] | 141 | |
| 142 | if (!ret) |
| 143 | bpf_trampoline_module_put(tr); |
Alexei Starovoitov | b91e014 | 2019-12-08 16:01:13 -0800 | [diff] [blame] | 144 | return ret; |
| 145 | } |
| 146 | |
| 147 | static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr) |
| 148 | { |
| 149 | void *ip = tr->func.addr; |
| 150 | int ret; |
| 151 | |
| 152 | if (tr->func.ftrace_managed) |
| 153 | ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr); |
| 154 | else |
| 155 | ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr); |
| 156 | return ret; |
| 157 | } |
| 158 | |
| 159 | /* first time registering */ |
| 160 | static int register_fentry(struct bpf_trampoline *tr, void *new_addr) |
| 161 | { |
| 162 | void *ip = tr->func.addr; |
| 163 | int ret; |
| 164 | |
| 165 | ret = is_ftrace_location(ip); |
| 166 | if (ret < 0) |
| 167 | return ret; |
| 168 | tr->func.ftrace_managed = ret; |
| 169 | |
Jiri Olsa | 861de02 | 2021-03-26 11:59:00 +0100 | [diff] [blame] | 170 | if (bpf_trampoline_module_get(tr)) |
| 171 | return -ENOENT; |
| 172 | |
Alexei Starovoitov | b91e014 | 2019-12-08 16:01:13 -0800 | [diff] [blame] | 173 | if (tr->func.ftrace_managed) |
| 174 | ret = register_ftrace_direct((long)ip, (long)new_addr); |
| 175 | else |
| 176 | ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr); |
Jiri Olsa | 861de02 | 2021-03-26 11:59:00 +0100 | [diff] [blame] | 177 | |
| 178 | if (ret) |
| 179 | bpf_trampoline_module_put(tr); |
Alexei Starovoitov | b91e014 | 2019-12-08 16:01:13 -0800 | [diff] [blame] | 180 | return ret; |
| 181 | } |
| 182 | |
KP Singh | 88fd9e5 | 2020-03-04 20:18:47 +0100 | [diff] [blame] | 183 | static struct bpf_tramp_progs * |
Jiri Olsa | 1e37392 | 2021-07-14 11:43:54 +0200 | [diff] [blame] | 184 | bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg) |
KP Singh | 88fd9e5 | 2020-03-04 20:18:47 +0100 | [diff] [blame] | 185 | { |
| 186 | const struct bpf_prog_aux *aux; |
| 187 | struct bpf_tramp_progs *tprogs; |
| 188 | struct bpf_prog **progs; |
| 189 | int kind; |
| 190 | |
| 191 | *total = 0; |
| 192 | tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL); |
| 193 | if (!tprogs) |
| 194 | return ERR_PTR(-ENOMEM); |
| 195 | |
| 196 | for (kind = 0; kind < BPF_TRAMP_MAX; kind++) { |
| 197 | tprogs[kind].nr_progs = tr->progs_cnt[kind]; |
| 198 | *total += tr->progs_cnt[kind]; |
| 199 | progs = tprogs[kind].progs; |
| 200 | |
Jiri Olsa | 1e37392 | 2021-07-14 11:43:54 +0200 | [diff] [blame] | 201 | hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist) { |
| 202 | *ip_arg |= aux->prog->call_get_func_ip; |
KP Singh | 88fd9e5 | 2020-03-04 20:18:47 +0100 | [diff] [blame] | 203 | *progs++ = aux->prog; |
Jiri Olsa | 1e37392 | 2021-07-14 11:43:54 +0200 | [diff] [blame] | 204 | } |
KP Singh | 88fd9e5 | 2020-03-04 20:18:47 +0100 | [diff] [blame] | 205 | } |
| 206 | return tprogs; |
| 207 | } |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 208 | |
Alexei Starovoitov | e21aa34 | 2021-03-16 14:00:07 -0700 | [diff] [blame] | 209 | static void __bpf_tramp_image_put_deferred(struct work_struct *work) |
| 210 | { |
| 211 | struct bpf_tramp_image *im; |
| 212 | |
| 213 | im = container_of(work, struct bpf_tramp_image, work); |
| 214 | bpf_image_ksym_del(&im->ksym); |
| 215 | bpf_jit_free_exec(im->image); |
| 216 | bpf_jit_uncharge_modmem(1); |
| 217 | percpu_ref_exit(&im->pcref); |
| 218 | kfree_rcu(im, rcu); |
| 219 | } |
| 220 | |
| 221 | /* callback, fexit step 3 or fentry step 2 */ |
| 222 | static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu) |
| 223 | { |
| 224 | struct bpf_tramp_image *im; |
| 225 | |
| 226 | im = container_of(rcu, struct bpf_tramp_image, rcu); |
| 227 | INIT_WORK(&im->work, __bpf_tramp_image_put_deferred); |
| 228 | schedule_work(&im->work); |
| 229 | } |
| 230 | |
| 231 | /* callback, fexit step 2. Called after percpu_ref_kill confirms. */ |
| 232 | static void __bpf_tramp_image_release(struct percpu_ref *pcref) |
| 233 | { |
| 234 | struct bpf_tramp_image *im; |
| 235 | |
| 236 | im = container_of(pcref, struct bpf_tramp_image, pcref); |
| 237 | call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu); |
| 238 | } |
| 239 | |
| 240 | /* callback, fexit or fentry step 1 */ |
| 241 | static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu) |
| 242 | { |
| 243 | struct bpf_tramp_image *im; |
| 244 | |
| 245 | im = container_of(rcu, struct bpf_tramp_image, rcu); |
| 246 | if (im->ip_after_call) |
| 247 | /* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */ |
| 248 | percpu_ref_kill(&im->pcref); |
| 249 | else |
| 250 | /* the case of fentry trampoline */ |
| 251 | call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu); |
| 252 | } |
| 253 | |
| 254 | static void bpf_tramp_image_put(struct bpf_tramp_image *im) |
| 255 | { |
| 256 | /* The trampoline image that calls original function is using: |
| 257 | * rcu_read_lock_trace to protect sleepable bpf progs |
| 258 | * rcu_read_lock to protect normal bpf progs |
| 259 | * percpu_ref to protect trampoline itself |
| 260 | * rcu tasks to protect trampoline asm not covered by percpu_ref |
| 261 | * (which are few asm insns before __bpf_tramp_enter and |
| 262 | * after __bpf_tramp_exit) |
| 263 | * |
| 264 | * The trampoline is unreachable before bpf_tramp_image_put(). |
| 265 | * |
| 266 | * First, patch the trampoline to avoid calling into fexit progs. |
| 267 | * The progs will be freed even if the original function is still |
| 268 | * executing or sleeping. |
| 269 | * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on |
| 270 | * first few asm instructions to execute and call into |
| 271 | * __bpf_tramp_enter->percpu_ref_get. |
| 272 | * Then use percpu_ref_kill to wait for the trampoline and the original |
| 273 | * function to finish. |
| 274 | * Then use call_rcu_tasks() to make sure few asm insns in |
| 275 | * the trampoline epilogue are done as well. |
| 276 | * |
| 277 | * In !PREEMPT case the task that got interrupted in the first asm |
| 278 | * insns won't go through an RCU quiescent state which the |
| 279 | * percpu_ref_kill will be waiting for. Hence the first |
| 280 | * call_rcu_tasks() is not necessary. |
| 281 | */ |
| 282 | if (im->ip_after_call) { |
| 283 | int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP, |
| 284 | NULL, im->ip_epilogue); |
| 285 | WARN_ON(err); |
| 286 | if (IS_ENABLED(CONFIG_PREEMPTION)) |
| 287 | call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks); |
| 288 | else |
| 289 | percpu_ref_kill(&im->pcref); |
| 290 | return; |
| 291 | } |
| 292 | |
| 293 | /* The trampoline without fexit and fmod_ret progs doesn't call original |
| 294 | * function and doesn't use percpu_ref. |
| 295 | * Use call_rcu_tasks_trace() to wait for sleepable progs to finish. |
| 296 | * Then use call_rcu_tasks() to wait for the rest of trampoline asm |
| 297 | * and normal progs. |
| 298 | */ |
| 299 | call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks); |
| 300 | } |
| 301 | |
| 302 | static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx) |
| 303 | { |
| 304 | struct bpf_tramp_image *im; |
| 305 | struct bpf_ksym *ksym; |
| 306 | void *image; |
| 307 | int err = -ENOMEM; |
| 308 | |
| 309 | im = kzalloc(sizeof(*im), GFP_KERNEL); |
| 310 | if (!im) |
| 311 | goto out; |
| 312 | |
| 313 | err = bpf_jit_charge_modmem(1); |
| 314 | if (err) |
| 315 | goto out_free_im; |
| 316 | |
| 317 | err = -ENOMEM; |
| 318 | im->image = image = bpf_jit_alloc_exec_page(); |
| 319 | if (!image) |
| 320 | goto out_uncharge; |
| 321 | |
| 322 | err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL); |
| 323 | if (err) |
| 324 | goto out_free_image; |
| 325 | |
| 326 | ksym = &im->ksym; |
| 327 | INIT_LIST_HEAD_RCU(&ksym->lnode); |
| 328 | snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx); |
| 329 | bpf_image_ksym_add(image, ksym); |
| 330 | return im; |
| 331 | |
| 332 | out_free_image: |
| 333 | bpf_jit_free_exec(im->image); |
| 334 | out_uncharge: |
| 335 | bpf_jit_uncharge_modmem(1); |
| 336 | out_free_im: |
| 337 | kfree(im); |
| 338 | out: |
| 339 | return ERR_PTR(err); |
| 340 | } |
| 341 | |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 342 | static int bpf_trampoline_update(struct bpf_trampoline *tr) |
| 343 | { |
Alexei Starovoitov | e21aa34 | 2021-03-16 14:00:07 -0700 | [diff] [blame] | 344 | struct bpf_tramp_image *im; |
KP Singh | 88fd9e5 | 2020-03-04 20:18:47 +0100 | [diff] [blame] | 345 | struct bpf_tramp_progs *tprogs; |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 346 | u32 flags = BPF_TRAMP_F_RESTORE_REGS; |
Jiri Olsa | 1e37392 | 2021-07-14 11:43:54 +0200 | [diff] [blame] | 347 | bool ip_arg = false; |
KP Singh | 88fd9e5 | 2020-03-04 20:18:47 +0100 | [diff] [blame] | 348 | int err, total; |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 349 | |
Jiri Olsa | 1e37392 | 2021-07-14 11:43:54 +0200 | [diff] [blame] | 350 | tprogs = bpf_trampoline_get_progs(tr, &total, &ip_arg); |
KP Singh | 88fd9e5 | 2020-03-04 20:18:47 +0100 | [diff] [blame] | 351 | if (IS_ERR(tprogs)) |
| 352 | return PTR_ERR(tprogs); |
| 353 | |
| 354 | if (total == 0) { |
Alexei Starovoitov | e21aa34 | 2021-03-16 14:00:07 -0700 | [diff] [blame] | 355 | err = unregister_fentry(tr, tr->cur_image->image); |
| 356 | bpf_tramp_image_put(tr->cur_image); |
| 357 | tr->cur_image = NULL; |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 358 | tr->selector = 0; |
| 359 | goto out; |
| 360 | } |
| 361 | |
Alexei Starovoitov | e21aa34 | 2021-03-16 14:00:07 -0700 | [diff] [blame] | 362 | im = bpf_tramp_image_alloc(tr->key, tr->selector); |
| 363 | if (IS_ERR(im)) { |
| 364 | err = PTR_ERR(im); |
| 365 | goto out; |
| 366 | } |
| 367 | |
KP Singh | ae24082 | 2020-03-04 20:18:49 +0100 | [diff] [blame] | 368 | if (tprogs[BPF_TRAMP_FEXIT].nr_progs || |
| 369 | tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs) |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 370 | flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME; |
| 371 | |
Jiri Olsa | 1e37392 | 2021-07-14 11:43:54 +0200 | [diff] [blame] | 372 | if (ip_arg) |
| 373 | flags |= BPF_TRAMP_F_IP_ARG; |
| 374 | |
Alexei Starovoitov | e21aa34 | 2021-03-16 14:00:07 -0700 | [diff] [blame] | 375 | err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE, |
KP Singh | 88fd9e5 | 2020-03-04 20:18:47 +0100 | [diff] [blame] | 376 | &tr->func.model, flags, tprogs, |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 377 | tr->func.addr); |
Martin KaFai Lau | 85d33df | 2020-01-08 16:35:05 -0800 | [diff] [blame] | 378 | if (err < 0) |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 379 | goto out; |
| 380 | |
Alexei Starovoitov | e21aa34 | 2021-03-16 14:00:07 -0700 | [diff] [blame] | 381 | WARN_ON(tr->cur_image && tr->selector == 0); |
| 382 | WARN_ON(!tr->cur_image && tr->selector); |
| 383 | if (tr->cur_image) |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 384 | /* progs already running at this address */ |
Alexei Starovoitov | e21aa34 | 2021-03-16 14:00:07 -0700 | [diff] [blame] | 385 | err = modify_fentry(tr, tr->cur_image->image, im->image); |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 386 | else |
| 387 | /* first time registering */ |
Alexei Starovoitov | e21aa34 | 2021-03-16 14:00:07 -0700 | [diff] [blame] | 388 | err = register_fentry(tr, im->image); |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 389 | if (err) |
| 390 | goto out; |
Alexei Starovoitov | e21aa34 | 2021-03-16 14:00:07 -0700 | [diff] [blame] | 391 | if (tr->cur_image) |
| 392 | bpf_tramp_image_put(tr->cur_image); |
| 393 | tr->cur_image = im; |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 394 | tr->selector++; |
| 395 | out: |
KP Singh | 88fd9e5 | 2020-03-04 20:18:47 +0100 | [diff] [blame] | 396 | kfree(tprogs); |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 397 | return err; |
| 398 | } |
| 399 | |
KP Singh | 9e4e01d | 2020-03-29 01:43:52 +0100 | [diff] [blame] | 400 | static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog) |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 401 | { |
KP Singh | 9e4e01d | 2020-03-29 01:43:52 +0100 | [diff] [blame] | 402 | switch (prog->expected_attach_type) { |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 403 | case BPF_TRACE_FENTRY: |
| 404 | return BPF_TRAMP_FENTRY; |
KP Singh | ae24082 | 2020-03-04 20:18:49 +0100 | [diff] [blame] | 405 | case BPF_MODIFY_RETURN: |
| 406 | return BPF_TRAMP_MODIFY_RETURN; |
Alexei Starovoitov | be8704f | 2020-01-20 16:53:46 -0800 | [diff] [blame] | 407 | case BPF_TRACE_FEXIT: |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 408 | return BPF_TRAMP_FEXIT; |
KP Singh | 9e4e01d | 2020-03-29 01:43:52 +0100 | [diff] [blame] | 409 | case BPF_LSM_MAC: |
| 410 | if (!prog->aux->attach_func_proto->type) |
| 411 | /* The function returns void, we cannot modify its |
| 412 | * return value. |
| 413 | */ |
| 414 | return BPF_TRAMP_FEXIT; |
| 415 | else |
| 416 | return BPF_TRAMP_MODIFY_RETURN; |
Alexei Starovoitov | be8704f | 2020-01-20 16:53:46 -0800 | [diff] [blame] | 417 | default: |
| 418 | return BPF_TRAMP_REPLACE; |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 419 | } |
| 420 | } |
| 421 | |
Toke Høiland-Jørgensen | 3aac1ea | 2020-09-29 14:45:50 +0200 | [diff] [blame] | 422 | int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 423 | { |
| 424 | enum bpf_tramp_prog_type kind; |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 425 | int err = 0; |
Alexei Starovoitov | be8704f | 2020-01-20 16:53:46 -0800 | [diff] [blame] | 426 | int cnt; |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 427 | |
KP Singh | 9e4e01d | 2020-03-29 01:43:52 +0100 | [diff] [blame] | 428 | kind = bpf_attach_type_to_tramp(prog); |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 429 | mutex_lock(&tr->mutex); |
Alexei Starovoitov | be8704f | 2020-01-20 16:53:46 -0800 | [diff] [blame] | 430 | if (tr->extension_prog) { |
| 431 | /* cannot attach fentry/fexit if extension prog is attached. |
| 432 | * cannot overwrite extension prog either. |
| 433 | */ |
| 434 | err = -EBUSY; |
| 435 | goto out; |
| 436 | } |
| 437 | cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT]; |
| 438 | if (kind == BPF_TRAMP_REPLACE) { |
| 439 | /* Cannot attach extension if fentry/fexit are in use. */ |
| 440 | if (cnt) { |
| 441 | err = -EBUSY; |
| 442 | goto out; |
| 443 | } |
| 444 | tr->extension_prog = prog; |
| 445 | err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL, |
| 446 | prog->bpf_func); |
| 447 | goto out; |
| 448 | } |
| 449 | if (cnt >= BPF_MAX_TRAMP_PROGS) { |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 450 | err = -E2BIG; |
| 451 | goto out; |
| 452 | } |
| 453 | if (!hlist_unhashed(&prog->aux->tramp_hlist)) { |
| 454 | /* prog already linked */ |
| 455 | err = -EBUSY; |
| 456 | goto out; |
| 457 | } |
| 458 | hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]); |
| 459 | tr->progs_cnt[kind]++; |
Toke Høiland-Jørgensen | 3aac1ea | 2020-09-29 14:45:50 +0200 | [diff] [blame] | 460 | err = bpf_trampoline_update(tr); |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 461 | if (err) { |
Jiri Olsa | f3a9507 | 2021-04-14 21:51:41 +0200 | [diff] [blame] | 462 | hlist_del_init(&prog->aux->tramp_hlist); |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 463 | tr->progs_cnt[kind]--; |
| 464 | } |
| 465 | out: |
| 466 | mutex_unlock(&tr->mutex); |
| 467 | return err; |
| 468 | } |
| 469 | |
| 470 | /* bpf_trampoline_unlink_prog() should never fail. */ |
Toke Høiland-Jørgensen | 3aac1ea | 2020-09-29 14:45:50 +0200 | [diff] [blame] | 471 | int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 472 | { |
| 473 | enum bpf_tramp_prog_type kind; |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 474 | int err; |
| 475 | |
KP Singh | 9e4e01d | 2020-03-29 01:43:52 +0100 | [diff] [blame] | 476 | kind = bpf_attach_type_to_tramp(prog); |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 477 | mutex_lock(&tr->mutex); |
Alexei Starovoitov | be8704f | 2020-01-20 16:53:46 -0800 | [diff] [blame] | 478 | if (kind == BPF_TRAMP_REPLACE) { |
| 479 | WARN_ON_ONCE(!tr->extension_prog); |
| 480 | err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, |
| 481 | tr->extension_prog->bpf_func, NULL); |
| 482 | tr->extension_prog = NULL; |
| 483 | goto out; |
| 484 | } |
Jiri Olsa | f3a9507 | 2021-04-14 21:51:41 +0200 | [diff] [blame] | 485 | hlist_del_init(&prog->aux->tramp_hlist); |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 486 | tr->progs_cnt[kind]--; |
Toke Høiland-Jørgensen | 3aac1ea | 2020-09-29 14:45:50 +0200 | [diff] [blame] | 487 | err = bpf_trampoline_update(tr); |
Alexei Starovoitov | be8704f | 2020-01-20 16:53:46 -0800 | [diff] [blame] | 488 | out: |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 489 | mutex_unlock(&tr->mutex); |
| 490 | return err; |
| 491 | } |
| 492 | |
Toke Høiland-Jørgensen | f7b12b6 | 2020-09-25 23:25:02 +0200 | [diff] [blame] | 493 | struct bpf_trampoline *bpf_trampoline_get(u64 key, |
| 494 | struct bpf_attach_target_info *tgt_info) |
| 495 | { |
| 496 | struct bpf_trampoline *tr; |
| 497 | |
| 498 | tr = bpf_trampoline_lookup(key); |
| 499 | if (!tr) |
| 500 | return NULL; |
| 501 | |
| 502 | mutex_lock(&tr->mutex); |
| 503 | if (tr->func.addr) |
| 504 | goto out; |
| 505 | |
| 506 | memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel)); |
| 507 | tr->func.addr = (void *)tgt_info->tgt_addr; |
| 508 | out: |
| 509 | mutex_unlock(&tr->mutex); |
| 510 | return tr; |
| 511 | } |
| 512 | |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 513 | void bpf_trampoline_put(struct bpf_trampoline *tr) |
| 514 | { |
| 515 | if (!tr) |
| 516 | return; |
| 517 | mutex_lock(&trampoline_mutex); |
| 518 | if (!refcount_dec_and_test(&tr->refcnt)) |
| 519 | goto out; |
| 520 | WARN_ON_ONCE(mutex_is_locked(&tr->mutex)); |
| 521 | if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY]))) |
| 522 | goto out; |
| 523 | if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT]))) |
| 524 | goto out; |
Alexei Starovoitov | e21aa34 | 2021-03-16 14:00:07 -0700 | [diff] [blame] | 525 | /* This code will be executed even when the last bpf_tramp_image |
| 526 | * is alive. All progs are detached from the trampoline and the |
| 527 | * trampoline image is patched with jmp into epilogue to skip |
| 528 | * fexit progs. The fentry-only trampoline will be freed via |
| 529 | * multiple rcu callbacks. |
Alexei Starovoitov | 1e6c62a | 2020-08-27 15:01:11 -0700 | [diff] [blame] | 530 | */ |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 531 | hlist_del(&tr->hlist); |
| 532 | kfree(tr); |
| 533 | out: |
| 534 | mutex_unlock(&trampoline_mutex); |
| 535 | } |
| 536 | |
Alexei Starovoitov | ca06f55 | 2021-02-09 19:36:29 -0800 | [diff] [blame] | 537 | #define NO_START_TIME 1 |
Song Liu | 856c02d | 2021-09-10 11:33:51 -0700 | [diff] [blame] | 538 | static __always_inline u64 notrace bpf_prog_start_time(void) |
Alexei Starovoitov | f2dd3b3 | 2021-02-09 19:36:28 -0800 | [diff] [blame] | 539 | { |
| 540 | u64 start = NO_START_TIME; |
| 541 | |
Alexei Starovoitov | ca06f55 | 2021-02-09 19:36:29 -0800 | [diff] [blame] | 542 | if (static_branch_unlikely(&bpf_stats_enabled_key)) { |
Alexei Starovoitov | f2dd3b3 | 2021-02-09 19:36:28 -0800 | [diff] [blame] | 543 | start = sched_clock(); |
Alexei Starovoitov | ca06f55 | 2021-02-09 19:36:29 -0800 | [diff] [blame] | 544 | if (unlikely(!start)) |
| 545 | start = NO_START_TIME; |
| 546 | } |
Alexei Starovoitov | f2dd3b3 | 2021-02-09 19:36:28 -0800 | [diff] [blame] | 547 | return start; |
| 548 | } |
| 549 | |
Alexei Starovoitov | 9ed9e9b | 2021-02-09 19:36:31 -0800 | [diff] [blame] | 550 | static void notrace inc_misses_counter(struct bpf_prog *prog) |
| 551 | { |
| 552 | struct bpf_prog_stats *stats; |
He Fengqing | 0e3135d | 2022-01-22 10:29:36 +0000 | [diff] [blame] | 553 | unsigned int flags; |
Alexei Starovoitov | 9ed9e9b | 2021-02-09 19:36:31 -0800 | [diff] [blame] | 554 | |
| 555 | stats = this_cpu_ptr(prog->stats); |
He Fengqing | 0e3135d | 2022-01-22 10:29:36 +0000 | [diff] [blame] | 556 | flags = u64_stats_update_begin_irqsave(&stats->syncp); |
Eric Dumazet | 61a0aba | 2021-10-26 14:41:33 -0700 | [diff] [blame] | 557 | u64_stats_inc(&stats->misses); |
He Fengqing | 0e3135d | 2022-01-22 10:29:36 +0000 | [diff] [blame] | 558 | u64_stats_update_end_irqrestore(&stats->syncp, flags); |
Alexei Starovoitov | 9ed9e9b | 2021-02-09 19:36:31 -0800 | [diff] [blame] | 559 | } |
| 560 | |
Andrii Nakryiko | fb7dd8b | 2021-08-15 00:05:54 -0700 | [diff] [blame] | 561 | /* The logic is similar to bpf_prog_run(), but with an explicit |
David Miller | 02ad059 | 2020-02-24 15:01:45 +0100 | [diff] [blame] | 562 | * rcu_read_lock() and migrate_disable() which are required |
| 563 | * for the trampoline. The macro is split into |
Alexei Starovoitov | f2dd3b3 | 2021-02-09 19:36:28 -0800 | [diff] [blame] | 564 | * call __bpf_prog_enter |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 565 | * call prog->bpf_func |
| 566 | * call __bpf_prog_exit |
Alexei Starovoitov | ca06f55 | 2021-02-09 19:36:29 -0800 | [diff] [blame] | 567 | * |
| 568 | * __bpf_prog_enter returns: |
| 569 | * 0 - skip execution of the bpf prog |
| 570 | * 1 - execute bpf prog |
Zhen Lei | 8fb33b6 | 2021-05-25 10:56:59 +0800 | [diff] [blame] | 571 | * [2..MAX_U64] - execute bpf prog and record execution time. |
Alexei Starovoitov | ca06f55 | 2021-02-09 19:36:29 -0800 | [diff] [blame] | 572 | * This is start time. |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 573 | */ |
Alexei Starovoitov | ca06f55 | 2021-02-09 19:36:29 -0800 | [diff] [blame] | 574 | u64 notrace __bpf_prog_enter(struct bpf_prog *prog) |
Jules Irenge | dcce11d | 2020-03-11 01:09:01 +0000 | [diff] [blame] | 575 | __acquires(RCU) |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 576 | { |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 577 | rcu_read_lock(); |
David Miller | 02ad059 | 2020-02-24 15:01:45 +0100 | [diff] [blame] | 578 | migrate_disable(); |
Alexei Starovoitov | 9ed9e9b | 2021-02-09 19:36:31 -0800 | [diff] [blame] | 579 | if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) { |
| 580 | inc_misses_counter(prog); |
Alexei Starovoitov | ca06f55 | 2021-02-09 19:36:29 -0800 | [diff] [blame] | 581 | return 0; |
Alexei Starovoitov | 9ed9e9b | 2021-02-09 19:36:31 -0800 | [diff] [blame] | 582 | } |
Alexei Starovoitov | f2dd3b3 | 2021-02-09 19:36:28 -0800 | [diff] [blame] | 583 | return bpf_prog_start_time(); |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 584 | } |
| 585 | |
Alexei Starovoitov | f2dd3b3 | 2021-02-09 19:36:28 -0800 | [diff] [blame] | 586 | static void notrace update_prog_stats(struct bpf_prog *prog, |
| 587 | u64 start) |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 588 | { |
| 589 | struct bpf_prog_stats *stats; |
| 590 | |
| 591 | if (static_branch_unlikely(&bpf_stats_enabled_key) && |
Alexei Starovoitov | f2dd3b3 | 2021-02-09 19:36:28 -0800 | [diff] [blame] | 592 | /* static_key could be enabled in __bpf_prog_enter* |
| 593 | * and disabled in __bpf_prog_exit*. |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 594 | * And vice versa. |
Alexei Starovoitov | f2dd3b3 | 2021-02-09 19:36:28 -0800 | [diff] [blame] | 595 | * Hence check that 'start' is valid. |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 596 | */ |
Alexei Starovoitov | f2dd3b3 | 2021-02-09 19:36:28 -0800 | [diff] [blame] | 597 | start > NO_START_TIME) { |
Eric Dumazet | d979617 | 2021-10-26 14:41:32 -0700 | [diff] [blame] | 598 | unsigned long flags; |
| 599 | |
Alexei Starovoitov | 700d479 | 2021-02-09 19:36:26 -0800 | [diff] [blame] | 600 | stats = this_cpu_ptr(prog->stats); |
Eric Dumazet | d979617 | 2021-10-26 14:41:32 -0700 | [diff] [blame] | 601 | flags = u64_stats_update_begin_irqsave(&stats->syncp); |
Eric Dumazet | 61a0aba | 2021-10-26 14:41:33 -0700 | [diff] [blame] | 602 | u64_stats_inc(&stats->cnt); |
| 603 | u64_stats_add(&stats->nsecs, sched_clock() - start); |
Eric Dumazet | d979617 | 2021-10-26 14:41:32 -0700 | [diff] [blame] | 604 | u64_stats_update_end_irqrestore(&stats->syncp, flags); |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 605 | } |
Alexei Starovoitov | f2dd3b3 | 2021-02-09 19:36:28 -0800 | [diff] [blame] | 606 | } |
| 607 | |
| 608 | void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start) |
| 609 | __releases(RCU) |
| 610 | { |
| 611 | update_prog_stats(prog, start); |
Alexei Starovoitov | ca06f55 | 2021-02-09 19:36:29 -0800 | [diff] [blame] | 612 | __this_cpu_dec(*(prog->active)); |
David Miller | 02ad059 | 2020-02-24 15:01:45 +0100 | [diff] [blame] | 613 | migrate_enable(); |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 614 | rcu_read_unlock(); |
| 615 | } |
| 616 | |
Alexei Starovoitov | ca06f55 | 2021-02-09 19:36:29 -0800 | [diff] [blame] | 617 | u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog) |
Alexei Starovoitov | 1e6c62a | 2020-08-27 15:01:11 -0700 | [diff] [blame] | 618 | { |
| 619 | rcu_read_lock_trace(); |
Alexei Starovoitov | 031d6e0 | 2021-02-09 19:36:27 -0800 | [diff] [blame] | 620 | migrate_disable(); |
Alexei Starovoitov | f56407f | 2020-08-31 13:16:51 -0700 | [diff] [blame] | 621 | might_fault(); |
Alexei Starovoitov | 9ed9e9b | 2021-02-09 19:36:31 -0800 | [diff] [blame] | 622 | if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) { |
| 623 | inc_misses_counter(prog); |
Alexei Starovoitov | ca06f55 | 2021-02-09 19:36:29 -0800 | [diff] [blame] | 624 | return 0; |
Alexei Starovoitov | 9ed9e9b | 2021-02-09 19:36:31 -0800 | [diff] [blame] | 625 | } |
Alexei Starovoitov | f2dd3b3 | 2021-02-09 19:36:28 -0800 | [diff] [blame] | 626 | return bpf_prog_start_time(); |
Alexei Starovoitov | 1e6c62a | 2020-08-27 15:01:11 -0700 | [diff] [blame] | 627 | } |
| 628 | |
Alexei Starovoitov | f2dd3b3 | 2021-02-09 19:36:28 -0800 | [diff] [blame] | 629 | void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start) |
Alexei Starovoitov | 1e6c62a | 2020-08-27 15:01:11 -0700 | [diff] [blame] | 630 | { |
Alexei Starovoitov | f2dd3b3 | 2021-02-09 19:36:28 -0800 | [diff] [blame] | 631 | update_prog_stats(prog, start); |
Alexei Starovoitov | ca06f55 | 2021-02-09 19:36:29 -0800 | [diff] [blame] | 632 | __this_cpu_dec(*(prog->active)); |
Alexei Starovoitov | 031d6e0 | 2021-02-09 19:36:27 -0800 | [diff] [blame] | 633 | migrate_enable(); |
Alexei Starovoitov | 1e6c62a | 2020-08-27 15:01:11 -0700 | [diff] [blame] | 634 | rcu_read_unlock_trace(); |
| 635 | } |
| 636 | |
Alexei Starovoitov | e21aa34 | 2021-03-16 14:00:07 -0700 | [diff] [blame] | 637 | void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr) |
| 638 | { |
| 639 | percpu_ref_get(&tr->pcref); |
| 640 | } |
| 641 | |
| 642 | void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr) |
| 643 | { |
| 644 | percpu_ref_put(&tr->pcref); |
| 645 | } |
| 646 | |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 647 | int __weak |
Alexei Starovoitov | e21aa34 | 2021-03-16 14:00:07 -0700 | [diff] [blame] | 648 | arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, |
Martin KaFai Lau | 85d33df | 2020-01-08 16:35:05 -0800 | [diff] [blame] | 649 | const struct btf_func_model *m, u32 flags, |
KP Singh | 88fd9e5 | 2020-03-04 20:18:47 +0100 | [diff] [blame] | 650 | struct bpf_tramp_progs *tprogs, |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 651 | void *orig_call) |
| 652 | { |
| 653 | return -ENOTSUPP; |
| 654 | } |
| 655 | |
| 656 | static int __init init_trampolines(void) |
| 657 | { |
| 658 | int i; |
| 659 | |
| 660 | for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++) |
| 661 | INIT_HLIST_HEAD(&trampoline_table[i]); |
| 662 | return 0; |
| 663 | } |
| 664 | late_initcall(init_trampolines); |