Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* XSKMAP used for AF_XDP sockets |
| 3 | * Copyright(c) 2018 Intel Corporation. |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include <linux/bpf.h> |
| 7 | #include <linux/capability.h> |
| 8 | #include <net/xdp_sock.h> |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/sched.h> |
| 11 | |
| 12 | struct xsk_map { |
| 13 | struct bpf_map map; |
| 14 | struct xdp_sock **xsk_map; |
| 15 | struct list_head __percpu *flush_list; |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame^] | 16 | spinlock_t lock; /* Synchronize map updates */ |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 17 | }; |
| 18 | |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame^] | 19 | int xsk_map_inc(struct xsk_map *map) |
| 20 | { |
| 21 | struct bpf_map *m = &map->map; |
| 22 | |
| 23 | m = bpf_map_inc(m, false); |
| 24 | return IS_ERR(m) ? PTR_ERR(m) : 0; |
| 25 | } |
| 26 | |
| 27 | void xsk_map_put(struct xsk_map *map) |
| 28 | { |
| 29 | bpf_map_put(&map->map); |
| 30 | } |
| 31 | |
| 32 | static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map, |
| 33 | struct xdp_sock **map_entry) |
| 34 | { |
| 35 | struct xsk_map_node *node; |
| 36 | int err; |
| 37 | |
| 38 | node = kzalloc(sizeof(*node), GFP_ATOMIC | __GFP_NOWARN); |
| 39 | if (!node) |
| 40 | return NULL; |
| 41 | |
| 42 | err = xsk_map_inc(map); |
| 43 | if (err) { |
| 44 | kfree(node); |
| 45 | return ERR_PTR(err); |
| 46 | } |
| 47 | |
| 48 | node->map = map; |
| 49 | node->map_entry = map_entry; |
| 50 | return node; |
| 51 | } |
| 52 | |
| 53 | static void xsk_map_node_free(struct xsk_map_node *node) |
| 54 | { |
| 55 | xsk_map_put(node->map); |
| 56 | kfree(node); |
| 57 | } |
| 58 | |
| 59 | static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node) |
| 60 | { |
| 61 | spin_lock_bh(&xs->map_list_lock); |
| 62 | list_add_tail(&node->node, &xs->map_list); |
| 63 | spin_unlock_bh(&xs->map_list_lock); |
| 64 | } |
| 65 | |
| 66 | static void xsk_map_sock_delete(struct xdp_sock *xs, |
| 67 | struct xdp_sock **map_entry) |
| 68 | { |
| 69 | struct xsk_map_node *n, *tmp; |
| 70 | |
| 71 | spin_lock_bh(&xs->map_list_lock); |
| 72 | list_for_each_entry_safe(n, tmp, &xs->map_list, node) { |
| 73 | if (map_entry == n->map_entry) { |
| 74 | list_del(&n->node); |
| 75 | xsk_map_node_free(n); |
| 76 | } |
| 77 | } |
| 78 | spin_unlock_bh(&xs->map_list_lock); |
| 79 | } |
| 80 | |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 81 | static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) |
| 82 | { |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 83 | struct xsk_map *m; |
Colin Ian King | 6685699 | 2019-06-04 09:21:46 +0100 | [diff] [blame] | 84 | int cpu, err; |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 85 | u64 cost; |
| 86 | |
| 87 | if (!capable(CAP_NET_ADMIN)) |
| 88 | return ERR_PTR(-EPERM); |
| 89 | |
| 90 | if (attr->max_entries == 0 || attr->key_size != 4 || |
| 91 | attr->value_size != 4 || |
| 92 | attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)) |
| 93 | return ERR_PTR(-EINVAL); |
| 94 | |
| 95 | m = kzalloc(sizeof(*m), GFP_USER); |
| 96 | if (!m) |
| 97 | return ERR_PTR(-ENOMEM); |
| 98 | |
| 99 | bpf_map_init_from_attr(&m->map, attr); |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame^] | 100 | spin_lock_init(&m->lock); |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 101 | |
| 102 | cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *); |
| 103 | cost += sizeof(struct list_head) * num_possible_cpus(); |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 104 | |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 105 | /* Notice returns -EPERM on if map size is larger than memlock limit */ |
Roman Gushchin | c85d691 | 2019-05-29 18:03:59 -0700 | [diff] [blame] | 106 | err = bpf_map_charge_init(&m->map.memory, cost); |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 107 | if (err) |
| 108 | goto free_m; |
| 109 | |
Daniel Borkmann | e94fa1d | 2018-05-04 16:27:53 +0200 | [diff] [blame] | 110 | err = -ENOMEM; |
| 111 | |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 112 | m->flush_list = alloc_percpu(struct list_head); |
| 113 | if (!m->flush_list) |
Roman Gushchin | b936ca6 | 2019-05-29 18:03:58 -0700 | [diff] [blame] | 114 | goto free_charge; |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 115 | |
| 116 | for_each_possible_cpu(cpu) |
| 117 | INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu)); |
| 118 | |
| 119 | m->xsk_map = bpf_map_area_alloc(m->map.max_entries * |
| 120 | sizeof(struct xdp_sock *), |
| 121 | m->map.numa_node); |
| 122 | if (!m->xsk_map) |
| 123 | goto free_percpu; |
| 124 | return &m->map; |
| 125 | |
| 126 | free_percpu: |
| 127 | free_percpu(m->flush_list); |
Roman Gushchin | b936ca6 | 2019-05-29 18:03:58 -0700 | [diff] [blame] | 128 | free_charge: |
| 129 | bpf_map_charge_finish(&m->map.memory); |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 130 | free_m: |
| 131 | kfree(m); |
| 132 | return ERR_PTR(err); |
| 133 | } |
| 134 | |
| 135 | static void xsk_map_free(struct bpf_map *map) |
| 136 | { |
| 137 | struct xsk_map *m = container_of(map, struct xsk_map, map); |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 138 | |
Daniel Borkmann | f6069b9 | 2018-08-17 23:26:14 +0200 | [diff] [blame] | 139 | bpf_clear_redirect_map(map); |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 140 | synchronize_net(); |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 141 | free_percpu(m->flush_list); |
| 142 | bpf_map_area_free(m->xsk_map); |
| 143 | kfree(m); |
| 144 | } |
| 145 | |
| 146 | static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key) |
| 147 | { |
| 148 | struct xsk_map *m = container_of(map, struct xsk_map, map); |
| 149 | u32 index = key ? *(u32 *)key : U32_MAX; |
| 150 | u32 *next = next_key; |
| 151 | |
| 152 | if (index >= m->map.max_entries) { |
| 153 | *next = 0; |
| 154 | return 0; |
| 155 | } |
| 156 | |
| 157 | if (index == m->map.max_entries - 1) |
| 158 | return -ENOENT; |
| 159 | *next = index + 1; |
| 160 | return 0; |
| 161 | } |
| 162 | |
| 163 | struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key) |
| 164 | { |
| 165 | struct xsk_map *m = container_of(map, struct xsk_map, map); |
| 166 | struct xdp_sock *xs; |
| 167 | |
| 168 | if (key >= map->max_entries) |
| 169 | return NULL; |
| 170 | |
| 171 | xs = READ_ONCE(m->xsk_map[key]); |
| 172 | return xs; |
| 173 | } |
| 174 | |
| 175 | int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, |
| 176 | struct xdp_sock *xs) |
| 177 | { |
| 178 | struct xsk_map *m = container_of(map, struct xsk_map, map); |
| 179 | struct list_head *flush_list = this_cpu_ptr(m->flush_list); |
| 180 | int err; |
| 181 | |
| 182 | err = xsk_rcv(xs, xdp); |
| 183 | if (err) |
| 184 | return err; |
| 185 | |
| 186 | if (!xs->flush_node.prev) |
| 187 | list_add(&xs->flush_node, flush_list); |
| 188 | |
| 189 | return 0; |
| 190 | } |
| 191 | |
| 192 | void __xsk_map_flush(struct bpf_map *map) |
| 193 | { |
| 194 | struct xsk_map *m = container_of(map, struct xsk_map, map); |
| 195 | struct list_head *flush_list = this_cpu_ptr(m->flush_list); |
| 196 | struct xdp_sock *xs, *tmp; |
| 197 | |
| 198 | list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { |
| 199 | xsk_flush(xs); |
Toke Høiland-Jørgensen | c8af5cd | 2019-06-28 11:12:34 +0200 | [diff] [blame] | 200 | __list_del_clearprev(&xs->flush_node); |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 201 | } |
| 202 | } |
| 203 | |
| 204 | static void *xsk_map_lookup_elem(struct bpf_map *map, void *key) |
| 205 | { |
Jonathan Lemon | fada7fd | 2019-06-06 13:59:40 -0700 | [diff] [blame] | 206 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 207 | return __xsk_map_lookup_elem(map, *(u32 *)key); |
| 208 | } |
| 209 | |
| 210 | static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key) |
| 211 | { |
Prashant Bhole | 3b4a63f | 2018-10-09 10:04:50 +0900 | [diff] [blame] | 212 | return ERR_PTR(-EOPNOTSUPP); |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 213 | } |
| 214 | |
| 215 | static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value, |
| 216 | u64 map_flags) |
| 217 | { |
| 218 | struct xsk_map *m = container_of(map, struct xsk_map, map); |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame^] | 219 | struct xdp_sock *xs, *old_xs, **map_entry; |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 220 | u32 i = *(u32 *)key, fd = *(u32 *)value; |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame^] | 221 | struct xsk_map_node *node; |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 222 | struct socket *sock; |
| 223 | int err; |
| 224 | |
| 225 | if (unlikely(map_flags > BPF_EXIST)) |
| 226 | return -EINVAL; |
| 227 | if (unlikely(i >= m->map.max_entries)) |
| 228 | return -E2BIG; |
| 229 | if (unlikely(map_flags == BPF_NOEXIST)) |
| 230 | return -EEXIST; |
| 231 | |
| 232 | sock = sockfd_lookup(fd, &err); |
| 233 | if (!sock) |
| 234 | return err; |
| 235 | |
| 236 | if (sock->sk->sk_family != PF_XDP) { |
| 237 | sockfd_put(sock); |
| 238 | return -EOPNOTSUPP; |
| 239 | } |
| 240 | |
| 241 | xs = (struct xdp_sock *)sock->sk; |
| 242 | |
| 243 | if (!xsk_is_setup_for_bpf_map(xs)) { |
| 244 | sockfd_put(sock); |
| 245 | return -EOPNOTSUPP; |
| 246 | } |
| 247 | |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame^] | 248 | map_entry = &m->xsk_map[i]; |
| 249 | node = xsk_map_node_alloc(m, map_entry); |
| 250 | if (IS_ERR(node)) { |
| 251 | sockfd_put(sock); |
| 252 | return PTR_ERR(node); |
| 253 | } |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 254 | |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame^] | 255 | spin_lock_bh(&m->lock); |
| 256 | old_xs = READ_ONCE(*map_entry); |
| 257 | if (old_xs == xs) { |
| 258 | err = 0; |
| 259 | goto out; |
| 260 | } |
| 261 | xsk_map_sock_add(xs, node); |
| 262 | WRITE_ONCE(*map_entry, xs); |
Björn Töpel | cee2716 | 2018-10-08 19:40:16 +0200 | [diff] [blame] | 263 | if (old_xs) |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame^] | 264 | xsk_map_sock_delete(old_xs, map_entry); |
| 265 | spin_unlock_bh(&m->lock); |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 266 | sockfd_put(sock); |
| 267 | return 0; |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame^] | 268 | |
| 269 | out: |
| 270 | spin_unlock_bh(&m->lock); |
| 271 | sockfd_put(sock); |
| 272 | xsk_map_node_free(node); |
| 273 | return err; |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 274 | } |
| 275 | |
| 276 | static int xsk_map_delete_elem(struct bpf_map *map, void *key) |
| 277 | { |
| 278 | struct xsk_map *m = container_of(map, struct xsk_map, map); |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame^] | 279 | struct xdp_sock *old_xs, **map_entry; |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 280 | int k = *(u32 *)key; |
| 281 | |
| 282 | if (k >= map->max_entries) |
| 283 | return -EINVAL; |
| 284 | |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame^] | 285 | spin_lock_bh(&m->lock); |
| 286 | map_entry = &m->xsk_map[k]; |
| 287 | old_xs = xchg(map_entry, NULL); |
Björn Töpel | cee2716 | 2018-10-08 19:40:16 +0200 | [diff] [blame] | 288 | if (old_xs) |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame^] | 289 | xsk_map_sock_delete(old_xs, map_entry); |
| 290 | spin_unlock_bh(&m->lock); |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 291 | |
| 292 | return 0; |
| 293 | } |
| 294 | |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame^] | 295 | void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, |
| 296 | struct xdp_sock **map_entry) |
| 297 | { |
| 298 | spin_lock_bh(&map->lock); |
| 299 | if (READ_ONCE(*map_entry) == xs) { |
| 300 | WRITE_ONCE(*map_entry, NULL); |
| 301 | xsk_map_sock_delete(xs, map_entry); |
| 302 | } |
| 303 | spin_unlock_bh(&map->lock); |
| 304 | } |
| 305 | |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 306 | const struct bpf_map_ops xsk_map_ops = { |
| 307 | .map_alloc = xsk_map_alloc, |
| 308 | .map_free = xsk_map_free, |
| 309 | .map_get_next_key = xsk_map_get_next_key, |
| 310 | .map_lookup_elem = xsk_map_lookup_elem, |
Jonathan Lemon | fada7fd | 2019-06-06 13:59:40 -0700 | [diff] [blame] | 311 | .map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only, |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 312 | .map_update_elem = xsk_map_update_elem, |
| 313 | .map_delete_elem = xsk_map_delete_elem, |
Daniel Borkmann | e8d2bec | 2018-08-12 01:59:17 +0200 | [diff] [blame] | 314 | .map_check_btf = map_check_no_btf, |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 315 | }; |