Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (c) 2018 Facebook |
| 4 | */ |
| 5 | #include <linux/bpf.h> |
| 6 | #include <linux/err.h> |
| 7 | #include <linux/sock_diag.h> |
| 8 | #include <net/sock_reuseport.h> |
| 9 | |
| 10 | struct reuseport_array { |
| 11 | struct bpf_map map; |
| 12 | struct sock __rcu *ptrs[]; |
| 13 | }; |
| 14 | |
| 15 | static struct reuseport_array *reuseport_array(struct bpf_map *map) |
| 16 | { |
| 17 | return (struct reuseport_array *)map; |
| 18 | } |
| 19 | |
| 20 | /* The caller must hold the reuseport_lock */ |
| 21 | void bpf_sk_reuseport_detach(struct sock *sk) |
| 22 | { |
Martin KaFai Lau | f3dda7a | 2020-07-08 23:11:04 -0700 | [diff] [blame] | 23 | uintptr_t sk_user_data; |
Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 24 | |
| 25 | write_lock_bh(&sk->sk_callback_lock); |
Martin KaFai Lau | f3dda7a | 2020-07-08 23:11:04 -0700 | [diff] [blame] | 26 | sk_user_data = (uintptr_t)sk->sk_user_data; |
Martin KaFai Lau | c9a368f | 2020-07-08 23:11:10 -0700 | [diff] [blame] | 27 | if (sk_user_data & SK_USER_DATA_BPF) { |
Martin KaFai Lau | f3dda7a | 2020-07-08 23:11:04 -0700 | [diff] [blame] | 28 | struct sock __rcu **socks; |
| 29 | |
| 30 | socks = (void *)(sk_user_data & SK_USER_DATA_PTRMASK); |
Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 31 | WRITE_ONCE(sk->sk_user_data, NULL); |
| 32 | /* |
| 33 | * Do not move this NULL assignment outside of |
| 34 | * sk->sk_callback_lock because there is |
| 35 | * a race with reuseport_array_free() |
| 36 | * which does not hold the reuseport_lock. |
| 37 | */ |
| 38 | RCU_INIT_POINTER(*socks, NULL); |
| 39 | } |
| 40 | write_unlock_bh(&sk->sk_callback_lock); |
| 41 | } |
| 42 | |
| 43 | static int reuseport_array_alloc_check(union bpf_attr *attr) |
| 44 | { |
| 45 | if (attr->value_size != sizeof(u32) && |
| 46 | attr->value_size != sizeof(u64)) |
| 47 | return -EINVAL; |
| 48 | |
| 49 | return array_map_alloc_check(attr); |
| 50 | } |
| 51 | |
| 52 | static void *reuseport_array_lookup_elem(struct bpf_map *map, void *key) |
| 53 | { |
| 54 | struct reuseport_array *array = reuseport_array(map); |
| 55 | u32 index = *(u32 *)key; |
| 56 | |
| 57 | if (unlikely(index >= array->map.max_entries)) |
| 58 | return NULL; |
| 59 | |
| 60 | return rcu_dereference(array->ptrs[index]); |
| 61 | } |
| 62 | |
| 63 | /* Called from syscall only */ |
| 64 | static int reuseport_array_delete_elem(struct bpf_map *map, void *key) |
| 65 | { |
| 66 | struct reuseport_array *array = reuseport_array(map); |
| 67 | u32 index = *(u32 *)key; |
| 68 | struct sock *sk; |
| 69 | int err; |
| 70 | |
| 71 | if (index >= map->max_entries) |
| 72 | return -E2BIG; |
| 73 | |
| 74 | if (!rcu_access_pointer(array->ptrs[index])) |
| 75 | return -ENOENT; |
| 76 | |
| 77 | spin_lock_bh(&reuseport_lock); |
| 78 | |
| 79 | sk = rcu_dereference_protected(array->ptrs[index], |
| 80 | lockdep_is_held(&reuseport_lock)); |
| 81 | if (sk) { |
| 82 | write_lock_bh(&sk->sk_callback_lock); |
| 83 | WRITE_ONCE(sk->sk_user_data, NULL); |
| 84 | RCU_INIT_POINTER(array->ptrs[index], NULL); |
| 85 | write_unlock_bh(&sk->sk_callback_lock); |
| 86 | err = 0; |
| 87 | } else { |
| 88 | err = -ENOENT; |
| 89 | } |
| 90 | |
| 91 | spin_unlock_bh(&reuseport_lock); |
| 92 | |
| 93 | return err; |
| 94 | } |
| 95 | |
| 96 | static void reuseport_array_free(struct bpf_map *map) |
| 97 | { |
| 98 | struct reuseport_array *array = reuseport_array(map); |
| 99 | struct sock *sk; |
| 100 | u32 i; |
| 101 | |
Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 102 | /* |
| 103 | * ops->map_*_elem() will not be able to access this |
| 104 | * array now. Hence, this function only races with |
Zhen Lei | 8fb33b6 | 2021-05-25 10:56:59 +0800 | [diff] [blame] | 105 | * bpf_sk_reuseport_detach() which was triggered by |
Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 106 | * close() or disconnect(). |
| 107 | * |
| 108 | * This function and bpf_sk_reuseport_detach() are |
| 109 | * both removing sk from "array". Who removes it |
| 110 | * first does not matter. |
| 111 | * |
| 112 | * The only concern here is bpf_sk_reuseport_detach() |
| 113 | * may access "array" which is being freed here. |
| 114 | * bpf_sk_reuseport_detach() access this "array" |
| 115 | * through sk->sk_user_data _and_ with sk->sk_callback_lock |
| 116 | * held which is enough because this "array" is not freed |
| 117 | * until all sk->sk_user_data has stopped referencing this "array". |
| 118 | * |
| 119 | * Hence, due to the above, taking "reuseport_lock" is not |
| 120 | * needed here. |
| 121 | */ |
| 122 | |
| 123 | /* |
| 124 | * Since reuseport_lock is not taken, sk is accessed under |
| 125 | * rcu_read_lock() |
| 126 | */ |
| 127 | rcu_read_lock(); |
| 128 | for (i = 0; i < map->max_entries; i++) { |
| 129 | sk = rcu_dereference(array->ptrs[i]); |
| 130 | if (sk) { |
| 131 | write_lock_bh(&sk->sk_callback_lock); |
| 132 | /* |
| 133 | * No need for WRITE_ONCE(). At this point, |
| 134 | * no one is reading it without taking the |
| 135 | * sk->sk_callback_lock. |
| 136 | */ |
| 137 | sk->sk_user_data = NULL; |
| 138 | write_unlock_bh(&sk->sk_callback_lock); |
| 139 | RCU_INIT_POINTER(array->ptrs[i], NULL); |
| 140 | } |
| 141 | } |
| 142 | rcu_read_unlock(); |
| 143 | |
| 144 | /* |
| 145 | * Once reaching here, all sk->sk_user_data is not |
| 146 | * referenceing this "array". "array" can be freed now. |
| 147 | */ |
| 148 | bpf_map_area_free(array); |
| 149 | } |
| 150 | |
| 151 | static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr) |
| 152 | { |
Roman Gushchin | db54330 | 2020-12-01 13:58:52 -0800 | [diff] [blame] | 153 | int numa_node = bpf_map_attr_numa_node(attr); |
Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 154 | struct reuseport_array *array; |
Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 155 | |
Alexei Starovoitov | 2c78ee8 | 2020-05-13 16:03:54 -0700 | [diff] [blame] | 156 | if (!bpf_capable()) |
Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 157 | return ERR_PTR(-EPERM); |
| 158 | |
Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 159 | /* allocate all map elements and zero-initialize them */ |
Xiu Jianfeng | 0dd668d | 2021-12-20 19:30:48 +0800 | [diff] [blame] | 160 | array = bpf_map_area_alloc(struct_size(array, ptrs, attr->max_entries), numa_node); |
Roman Gushchin | db54330 | 2020-12-01 13:58:52 -0800 | [diff] [blame] | 161 | if (!array) |
Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 162 | return ERR_PTR(-ENOMEM); |
| 163 | |
| 164 | /* copy mandatory map attributes */ |
| 165 | bpf_map_init_from_attr(&array->map, attr); |
Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 166 | |
| 167 | return &array->map; |
| 168 | } |
| 169 | |
| 170 | int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, |
| 171 | void *value) |
| 172 | { |
| 173 | struct sock *sk; |
| 174 | int err; |
| 175 | |
| 176 | if (map->value_size != sizeof(u64)) |
| 177 | return -ENOSPC; |
| 178 | |
| 179 | rcu_read_lock(); |
| 180 | sk = reuseport_array_lookup_elem(map, key); |
| 181 | if (sk) { |
Daniel Borkmann | 92acdc5 | 2020-09-30 17:18:16 +0200 | [diff] [blame] | 182 | *(u64 *)value = __sock_gen_cookie(sk); |
Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 183 | err = 0; |
| 184 | } else { |
| 185 | err = -ENOENT; |
| 186 | } |
| 187 | rcu_read_unlock(); |
| 188 | |
| 189 | return err; |
| 190 | } |
| 191 | |
| 192 | static int |
| 193 | reuseport_array_update_check(const struct reuseport_array *array, |
| 194 | const struct sock *nsk, |
| 195 | const struct sock *osk, |
| 196 | const struct sock_reuseport *nsk_reuse, |
| 197 | u32 map_flags) |
| 198 | { |
| 199 | if (osk && map_flags == BPF_NOEXIST) |
| 200 | return -EEXIST; |
| 201 | |
| 202 | if (!osk && map_flags == BPF_EXIST) |
| 203 | return -ENOENT; |
| 204 | |
| 205 | if (nsk->sk_protocol != IPPROTO_UDP && nsk->sk_protocol != IPPROTO_TCP) |
| 206 | return -ENOTSUPP; |
| 207 | |
| 208 | if (nsk->sk_family != AF_INET && nsk->sk_family != AF_INET6) |
| 209 | return -ENOTSUPP; |
| 210 | |
| 211 | if (nsk->sk_type != SOCK_STREAM && nsk->sk_type != SOCK_DGRAM) |
| 212 | return -ENOTSUPP; |
| 213 | |
| 214 | /* |
| 215 | * sk must be hashed (i.e. listening in the TCP case or binded |
| 216 | * in the UDP case) and |
| 217 | * it must also be a SO_REUSEPORT sk (i.e. reuse cannot be NULL). |
| 218 | * |
| 219 | * Also, sk will be used in bpf helper that is protected by |
| 220 | * rcu_read_lock(). |
| 221 | */ |
| 222 | if (!sock_flag(nsk, SOCK_RCU_FREE) || !sk_hashed(nsk) || !nsk_reuse) |
| 223 | return -EINVAL; |
| 224 | |
| 225 | /* READ_ONCE because the sk->sk_callback_lock may not be held here */ |
| 226 | if (READ_ONCE(nsk->sk_user_data)) |
| 227 | return -EBUSY; |
| 228 | |
| 229 | return 0; |
| 230 | } |
| 231 | |
| 232 | /* |
| 233 | * Called from syscall only. |
| 234 | * The "nsk" in the fd refcnt. |
| 235 | * The "osk" and "reuse" are protected by reuseport_lock. |
| 236 | */ |
| 237 | int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, |
| 238 | void *value, u64 map_flags) |
| 239 | { |
| 240 | struct reuseport_array *array = reuseport_array(map); |
| 241 | struct sock *free_osk = NULL, *osk, *nsk; |
| 242 | struct sock_reuseport *reuse; |
| 243 | u32 index = *(u32 *)key; |
Martin KaFai Lau | f3dda7a | 2020-07-08 23:11:04 -0700 | [diff] [blame] | 244 | uintptr_t sk_user_data; |
Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 245 | struct socket *socket; |
| 246 | int err, fd; |
| 247 | |
| 248 | if (map_flags > BPF_EXIST) |
| 249 | return -EINVAL; |
| 250 | |
| 251 | if (index >= map->max_entries) |
| 252 | return -E2BIG; |
| 253 | |
| 254 | if (map->value_size == sizeof(u64)) { |
| 255 | u64 fd64 = *(u64 *)value; |
| 256 | |
| 257 | if (fd64 > S32_MAX) |
| 258 | return -EINVAL; |
| 259 | fd = fd64; |
| 260 | } else { |
| 261 | fd = *(int *)value; |
| 262 | } |
| 263 | |
| 264 | socket = sockfd_lookup(fd, &err); |
| 265 | if (!socket) |
| 266 | return err; |
| 267 | |
| 268 | nsk = socket->sk; |
| 269 | if (!nsk) { |
| 270 | err = -EINVAL; |
| 271 | goto put_file; |
| 272 | } |
| 273 | |
| 274 | /* Quick checks before taking reuseport_lock */ |
| 275 | err = reuseport_array_update_check(array, nsk, |
| 276 | rcu_access_pointer(array->ptrs[index]), |
| 277 | rcu_access_pointer(nsk->sk_reuseport_cb), |
| 278 | map_flags); |
| 279 | if (err) |
| 280 | goto put_file; |
| 281 | |
| 282 | spin_lock_bh(&reuseport_lock); |
| 283 | /* |
| 284 | * Some of the checks only need reuseport_lock |
| 285 | * but it is done under sk_callback_lock also |
| 286 | * for simplicity reason. |
| 287 | */ |
| 288 | write_lock_bh(&nsk->sk_callback_lock); |
| 289 | |
| 290 | osk = rcu_dereference_protected(array->ptrs[index], |
| 291 | lockdep_is_held(&reuseport_lock)); |
| 292 | reuse = rcu_dereference_protected(nsk->sk_reuseport_cb, |
| 293 | lockdep_is_held(&reuseport_lock)); |
| 294 | err = reuseport_array_update_check(array, nsk, osk, reuse, map_flags); |
| 295 | if (err) |
| 296 | goto put_file_unlock; |
| 297 | |
Martin KaFai Lau | c9a368f | 2020-07-08 23:11:10 -0700 | [diff] [blame] | 298 | sk_user_data = (uintptr_t)&array->ptrs[index] | SK_USER_DATA_NOCOPY | |
| 299 | SK_USER_DATA_BPF; |
Martin KaFai Lau | f3dda7a | 2020-07-08 23:11:04 -0700 | [diff] [blame] | 300 | WRITE_ONCE(nsk->sk_user_data, (void *)sk_user_data); |
Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 301 | rcu_assign_pointer(array->ptrs[index], nsk); |
| 302 | free_osk = osk; |
| 303 | err = 0; |
| 304 | |
| 305 | put_file_unlock: |
| 306 | write_unlock_bh(&nsk->sk_callback_lock); |
| 307 | |
| 308 | if (free_osk) { |
| 309 | write_lock_bh(&free_osk->sk_callback_lock); |
| 310 | WRITE_ONCE(free_osk->sk_user_data, NULL); |
| 311 | write_unlock_bh(&free_osk->sk_callback_lock); |
| 312 | } |
| 313 | |
| 314 | spin_unlock_bh(&reuseport_lock); |
| 315 | put_file: |
| 316 | fput(socket->file); |
| 317 | return err; |
| 318 | } |
| 319 | |
| 320 | /* Called from syscall */ |
| 321 | static int reuseport_array_get_next_key(struct bpf_map *map, void *key, |
| 322 | void *next_key) |
| 323 | { |
| 324 | struct reuseport_array *array = reuseport_array(map); |
| 325 | u32 index = key ? *(u32 *)key : U32_MAX; |
| 326 | u32 *next = (u32 *)next_key; |
| 327 | |
| 328 | if (index >= array->map.max_entries) { |
| 329 | *next = 0; |
| 330 | return 0; |
| 331 | } |
| 332 | |
| 333 | if (index == array->map.max_entries - 1) |
| 334 | return -ENOENT; |
| 335 | |
| 336 | *next = index + 1; |
| 337 | return 0; |
| 338 | } |
| 339 | |
Andrey Ignatov | 2872e9a | 2020-06-19 14:11:44 -0700 | [diff] [blame] | 340 | static int reuseport_array_map_btf_id; |
Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 341 | const struct bpf_map_ops reuseport_array_ops = { |
Martin KaFai Lau | f4d0525 | 2020-08-27 18:18:06 -0700 | [diff] [blame] | 342 | .map_meta_equal = bpf_map_meta_equal, |
Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 343 | .map_alloc_check = reuseport_array_alloc_check, |
| 344 | .map_alloc = reuseport_array_alloc, |
| 345 | .map_free = reuseport_array_free, |
| 346 | .map_lookup_elem = reuseport_array_lookup_elem, |
| 347 | .map_get_next_key = reuseport_array_get_next_key, |
| 348 | .map_delete_elem = reuseport_array_delete_elem, |
Andrey Ignatov | 2872e9a | 2020-06-19 14:11:44 -0700 | [diff] [blame] | 349 | .map_btf_name = "reuseport_array", |
| 350 | .map_btf_id = &reuseport_array_map_btf_id, |
Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 351 | }; |