Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 2 | #ifndef _SOCK_REUSEPORT_H |
| 3 | #define _SOCK_REUSEPORT_H |
| 4 | |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 5 | #include <linux/filter.h> |
| 6 | #include <linux/skbuff.h> |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 7 | #include <linux/types.h> |
Martin KaFai Lau | 736b460 | 2018-08-08 01:01:22 -0700 | [diff] [blame] | 8 | #include <linux/spinlock.h> |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 9 | #include <net/sock.h> |
| 10 | |
Martin KaFai Lau | 736b460 | 2018-08-08 01:01:22 -0700 | [diff] [blame] | 11 | extern spinlock_t reuseport_lock; |
| 12 | |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 13 | struct sock_reuseport { |
| 14 | struct rcu_head rcu; |
| 15 | |
| 16 | u16 max_socks; /* length of socks */ |
| 17 | u16 num_socks; /* elements in socks */ |
Martin KaFai Lau | 40a1227 | 2018-08-08 01:01:21 -0700 | [diff] [blame] | 18 | /* The last synq overflow event timestamp of this |
| 19 | * reuse->socks[] group. |
| 20 | */ |
| 21 | unsigned int synq_overflow_ts; |
Martin KaFai Lau | 736b460 | 2018-08-08 01:01:22 -0700 | [diff] [blame] | 22 | /* ID stays the same even after the size of socks[] grows. */ |
| 23 | unsigned int reuseport_id; |
Willem de Bruijn | acdcecc | 2019-09-12 21:16:39 -0400 | [diff] [blame] | 24 | unsigned int bind_inany:1; |
| 25 | unsigned int has_conns:1; |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 26 | struct bpf_prog __rcu *prog; /* optional BPF sock selector */ |
Gustavo A. R. Silva | 2603c29 | 2020-02-28 18:11:02 -0600 | [diff] [blame] | 27 | struct sock *socks[]; /* array of sock pointers */ |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 28 | }; |
| 29 | |
Martin KaFai Lau | 2dbb9b9 | 2018-08-08 01:01:25 -0700 | [diff] [blame] | 30 | extern int reuseport_alloc(struct sock *sk, bool bind_inany); |
| 31 | extern int reuseport_add_sock(struct sock *sk, struct sock *sk2, |
| 32 | bool bind_inany); |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 33 | extern void reuseport_detach_sock(struct sock *sk); |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 34 | extern struct sock *reuseport_select_sock(struct sock *sk, |
| 35 | u32 hash, |
| 36 | struct sk_buff *skb, |
| 37 | int hdr_len); |
Martin KaFai Lau | 8217ca6 | 2018-08-08 01:01:26 -0700 | [diff] [blame] | 38 | extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog); |
Martin KaFai Lau | 99f3a06 | 2019-06-13 15:00:01 -0700 | [diff] [blame] | 39 | extern int reuseport_detach_prog(struct sock *sk); |
| 40 | |
Willem de Bruijn | acdcecc | 2019-09-12 21:16:39 -0400 | [diff] [blame] | 41 | static inline bool reuseport_has_conns(struct sock *sk, bool set) |
| 42 | { |
| 43 | struct sock_reuseport *reuse; |
| 44 | bool ret = false; |
| 45 | |
| 46 | rcu_read_lock(); |
| 47 | reuse = rcu_dereference(sk->sk_reuseport_cb); |
| 48 | if (reuse) { |
| 49 | if (set) |
| 50 | reuse->has_conns = 1; |
| 51 | ret = reuse->has_conns; |
| 52 | } |
| 53 | rcu_read_unlock(); |
| 54 | |
| 55 | return ret; |
| 56 | } |
| 57 | |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 58 | #endif /* _SOCK_REUSEPORT_H */ |