Alexei Starovoitov | d5a3b1f | 2016-02-17 19:58:58 -0800 | [diff] [blame] | 1 | /* Copyright (c) 2016 Facebook |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or |
| 4 | * modify it under the terms of version 2 of the GNU General Public |
| 5 | * License as published by the Free Software Foundation. |
| 6 | */ |
| 7 | #include <linux/bpf.h> |
| 8 | #include <linux/jhash.h> |
| 9 | #include <linux/filter.h> |
| 10 | #include <linux/vmalloc.h> |
| 11 | #include <linux/stacktrace.h> |
| 12 | #include <linux/perf_event.h> |
| 13 | |
| 14 | struct stack_map_bucket { |
| 15 | struct rcu_head rcu; |
| 16 | u32 hash; |
| 17 | u32 nr; |
| 18 | u64 ip[]; |
| 19 | }; |
| 20 | |
| 21 | struct bpf_stack_map { |
| 22 | struct bpf_map map; |
| 23 | u32 n_buckets; |
| 24 | struct stack_map_bucket __rcu *buckets[]; |
| 25 | }; |
| 26 | |
| 27 | /* Called from syscall */ |
| 28 | static struct bpf_map *stack_map_alloc(union bpf_attr *attr) |
| 29 | { |
| 30 | u32 value_size = attr->value_size; |
| 31 | struct bpf_stack_map *smap; |
| 32 | u64 cost, n_buckets; |
| 33 | int err; |
| 34 | |
| 35 | if (!capable(CAP_SYS_ADMIN)) |
| 36 | return ERR_PTR(-EPERM); |
| 37 | |
Alexei Starovoitov | 823707b | 2016-03-07 21:57:16 -0800 | [diff] [blame^] | 38 | if (attr->map_flags) |
| 39 | return ERR_PTR(-EINVAL); |
| 40 | |
Alexei Starovoitov | d5a3b1f | 2016-02-17 19:58:58 -0800 | [diff] [blame] | 41 | /* check sanity of attributes */ |
| 42 | if (attr->max_entries == 0 || attr->key_size != 4 || |
| 43 | value_size < 8 || value_size % 8 || |
| 44 | value_size / 8 > PERF_MAX_STACK_DEPTH) |
| 45 | return ERR_PTR(-EINVAL); |
| 46 | |
| 47 | /* hash table size must be power of 2 */ |
| 48 | n_buckets = roundup_pow_of_two(attr->max_entries); |
| 49 | |
| 50 | cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); |
| 51 | if (cost >= U32_MAX - PAGE_SIZE) |
| 52 | return ERR_PTR(-E2BIG); |
| 53 | |
| 54 | smap = kzalloc(cost, GFP_USER | __GFP_NOWARN); |
| 55 | if (!smap) { |
| 56 | smap = vzalloc(cost); |
| 57 | if (!smap) |
| 58 | return ERR_PTR(-ENOMEM); |
| 59 | } |
| 60 | |
| 61 | err = -E2BIG; |
| 62 | cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); |
| 63 | if (cost >= U32_MAX - PAGE_SIZE) |
| 64 | goto free_smap; |
| 65 | |
| 66 | smap->map.map_type = attr->map_type; |
| 67 | smap->map.key_size = attr->key_size; |
| 68 | smap->map.value_size = value_size; |
| 69 | smap->map.max_entries = attr->max_entries; |
| 70 | smap->n_buckets = n_buckets; |
| 71 | smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; |
| 72 | |
| 73 | err = get_callchain_buffers(); |
| 74 | if (err) |
| 75 | goto free_smap; |
| 76 | |
| 77 | return &smap->map; |
| 78 | |
| 79 | free_smap: |
| 80 | kvfree(smap); |
| 81 | return ERR_PTR(err); |
| 82 | } |
| 83 | |
| 84 | static u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5) |
| 85 | { |
| 86 | struct pt_regs *regs = (struct pt_regs *) (long) r1; |
| 87 | struct bpf_map *map = (struct bpf_map *) (long) r2; |
| 88 | struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); |
| 89 | struct perf_callchain_entry *trace; |
| 90 | struct stack_map_bucket *bucket, *new_bucket, *old_bucket; |
| 91 | u32 max_depth = map->value_size / 8; |
| 92 | /* stack_map_alloc() checks that max_depth <= PERF_MAX_STACK_DEPTH */ |
| 93 | u32 init_nr = PERF_MAX_STACK_DEPTH - max_depth; |
| 94 | u32 skip = flags & BPF_F_SKIP_FIELD_MASK; |
| 95 | u32 hash, id, trace_nr, trace_len; |
| 96 | bool user = flags & BPF_F_USER_STACK; |
| 97 | bool kernel = !user; |
| 98 | u64 *ips; |
| 99 | |
| 100 | if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | |
| 101 | BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) |
| 102 | return -EINVAL; |
| 103 | |
| 104 | trace = get_perf_callchain(regs, init_nr, kernel, user, false, false); |
| 105 | |
| 106 | if (unlikely(!trace)) |
| 107 | /* couldn't fetch the stack trace */ |
| 108 | return -EFAULT; |
| 109 | |
| 110 | /* get_perf_callchain() guarantees that trace->nr >= init_nr |
| 111 | * and trace-nr <= PERF_MAX_STACK_DEPTH, so trace_nr <= max_depth |
| 112 | */ |
| 113 | trace_nr = trace->nr - init_nr; |
| 114 | |
| 115 | if (trace_nr <= skip) |
| 116 | /* skipping more than usable stack trace */ |
| 117 | return -EFAULT; |
| 118 | |
| 119 | trace_nr -= skip; |
| 120 | trace_len = trace_nr * sizeof(u64); |
| 121 | ips = trace->ip + skip + init_nr; |
| 122 | hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0); |
| 123 | id = hash & (smap->n_buckets - 1); |
| 124 | bucket = rcu_dereference(smap->buckets[id]); |
| 125 | |
| 126 | if (bucket && bucket->hash == hash) { |
| 127 | if (flags & BPF_F_FAST_STACK_CMP) |
| 128 | return id; |
| 129 | if (bucket->nr == trace_nr && |
| 130 | memcmp(bucket->ip, ips, trace_len) == 0) |
| 131 | return id; |
| 132 | } |
| 133 | |
| 134 | /* this call stack is not in the map, try to add it */ |
| 135 | if (bucket && !(flags & BPF_F_REUSE_STACKID)) |
| 136 | return -EEXIST; |
| 137 | |
| 138 | new_bucket = kmalloc(sizeof(struct stack_map_bucket) + map->value_size, |
| 139 | GFP_ATOMIC | __GFP_NOWARN); |
| 140 | if (unlikely(!new_bucket)) |
| 141 | return -ENOMEM; |
| 142 | |
| 143 | memcpy(new_bucket->ip, ips, trace_len); |
| 144 | memset(new_bucket->ip + trace_len / 8, 0, map->value_size - trace_len); |
| 145 | new_bucket->hash = hash; |
| 146 | new_bucket->nr = trace_nr; |
| 147 | |
| 148 | old_bucket = xchg(&smap->buckets[id], new_bucket); |
| 149 | if (old_bucket) |
| 150 | kfree_rcu(old_bucket, rcu); |
| 151 | return id; |
| 152 | } |
| 153 | |
| 154 | const struct bpf_func_proto bpf_get_stackid_proto = { |
| 155 | .func = bpf_get_stackid, |
| 156 | .gpl_only = true, |
| 157 | .ret_type = RET_INTEGER, |
| 158 | .arg1_type = ARG_PTR_TO_CTX, |
| 159 | .arg2_type = ARG_CONST_MAP_PTR, |
| 160 | .arg3_type = ARG_ANYTHING, |
| 161 | }; |
| 162 | |
| 163 | /* Called from syscall or from eBPF program */ |
| 164 | static void *stack_map_lookup_elem(struct bpf_map *map, void *key) |
| 165 | { |
| 166 | struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); |
| 167 | struct stack_map_bucket *bucket; |
| 168 | u32 id = *(u32 *)key; |
| 169 | |
| 170 | if (unlikely(id >= smap->n_buckets)) |
| 171 | return NULL; |
| 172 | bucket = rcu_dereference(smap->buckets[id]); |
| 173 | return bucket ? bucket->ip : NULL; |
| 174 | } |
| 175 | |
| 176 | static int stack_map_get_next_key(struct bpf_map *map, void *key, void *next_key) |
| 177 | { |
| 178 | return -EINVAL; |
| 179 | } |
| 180 | |
| 181 | static int stack_map_update_elem(struct bpf_map *map, void *key, void *value, |
| 182 | u64 map_flags) |
| 183 | { |
| 184 | return -EINVAL; |
| 185 | } |
| 186 | |
| 187 | /* Called from syscall or from eBPF program */ |
| 188 | static int stack_map_delete_elem(struct bpf_map *map, void *key) |
| 189 | { |
| 190 | struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); |
| 191 | struct stack_map_bucket *old_bucket; |
| 192 | u32 id = *(u32 *)key; |
| 193 | |
| 194 | if (unlikely(id >= smap->n_buckets)) |
| 195 | return -E2BIG; |
| 196 | |
| 197 | old_bucket = xchg(&smap->buckets[id], NULL); |
| 198 | if (old_bucket) { |
| 199 | kfree_rcu(old_bucket, rcu); |
| 200 | return 0; |
| 201 | } else { |
| 202 | return -ENOENT; |
| 203 | } |
| 204 | } |
| 205 | |
| 206 | /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ |
| 207 | static void stack_map_free(struct bpf_map *map) |
| 208 | { |
| 209 | struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); |
| 210 | int i; |
| 211 | |
| 212 | synchronize_rcu(); |
| 213 | |
| 214 | for (i = 0; i < smap->n_buckets; i++) |
| 215 | if (smap->buckets[i]) |
| 216 | kfree_rcu(smap->buckets[i], rcu); |
| 217 | kvfree(smap); |
| 218 | put_callchain_buffers(); |
| 219 | } |
| 220 | |
| 221 | static const struct bpf_map_ops stack_map_ops = { |
| 222 | .map_alloc = stack_map_alloc, |
| 223 | .map_free = stack_map_free, |
| 224 | .map_get_next_key = stack_map_get_next_key, |
| 225 | .map_lookup_elem = stack_map_lookup_elem, |
| 226 | .map_update_elem = stack_map_update_elem, |
| 227 | .map_delete_elem = stack_map_delete_elem, |
| 228 | }; |
| 229 | |
| 230 | static struct bpf_map_type_list stack_map_type __read_mostly = { |
| 231 | .ops = &stack_map_ops, |
| 232 | .type = BPF_MAP_TYPE_STACK_TRACE, |
| 233 | }; |
| 234 | |
| 235 | static int __init register_stack_map(void) |
| 236 | { |
| 237 | bpf_register_map_type(&stack_map_type); |
| 238 | return 0; |
| 239 | } |
| 240 | late_initcall(register_stack_map); |