blob: 822c048934e3f6689e4e950e7139ad55f8934124 [file] [log] [blame]
Daniel Borkmann604326b2018-10-13 02:45:58 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#ifndef _LINUX_SKMSG_H
5#define _LINUX_SKMSG_H
6
7#include <linux/bpf.h>
8#include <linux/filter.h>
9#include <linux/scatterlist.h>
10#include <linux/skbuff.h>
11
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <net/strparser.h>
15
16#define MAX_MSG_FRAGS MAX_SKB_FRAGS
Jakub Kicinski031097d2019-11-27 12:16:41 -080017#define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1)
Daniel Borkmann604326b2018-10-13 02:45:58 +020018
19enum __sk_action {
20 __SK_DROP = 0,
21 __SK_PASS,
22 __SK_REDIRECT,
23 __SK_NONE,
24};
25
26struct sk_msg_sg {
27 u32 start;
28 u32 curr;
29 u32 end;
30 u32 size;
31 u32 copybreak;
Jakub Kicinski163ab962019-10-06 21:09:27 -070032 unsigned long copy;
Jakub Kicinski031097d2019-11-27 12:16:41 -080033 /* The extra two elements:
34 * 1) used for chaining the front and sections when the list becomes
35 * partitioned (e.g. end < start). The crypto APIs require the
36 * chaining;
37 * 2) to chain tailer SG entries after the message.
John Fastabendd3b18ad32018-10-13 02:46:01 +020038 */
Jakub Kicinski031097d2019-11-27 12:16:41 -080039 struct scatterlist data[MAX_MSG_FRAGS + 2];
Daniel Borkmann604326b2018-10-13 02:45:58 +020040};
Jakub Kicinski031097d2019-11-27 12:16:41 -080041static_assert(BITS_PER_LONG >= NR_MSG_FRAG_IDS);
Daniel Borkmann604326b2018-10-13 02:45:58 +020042
John Fastabend7a69c0f2018-12-20 11:35:31 -080043/* UAPI in filter.c depends on struct sk_msg_sg being first element. */
Daniel Borkmann604326b2018-10-13 02:45:58 +020044struct sk_msg {
45 struct sk_msg_sg sg;
46 void *data;
47 void *data_end;
48 u32 apply_bytes;
49 u32 cork_bytes;
50 u32 flags;
51 struct sk_buff *skb;
52 struct sock *sk_redir;
53 struct sock *sk;
54 struct list_head list;
55};
56
57struct sk_psock_progs {
58 struct bpf_prog *msg_parser;
59 struct bpf_prog *skb_parser;
60 struct bpf_prog *skb_verdict;
61};
62
63enum sk_psock_state_bits {
64 SK_PSOCK_TX_ENABLED,
65};
66
67struct sk_psock_link {
68 struct list_head list;
69 struct bpf_map *map;
70 void *link_raw;
71};
72
73struct sk_psock_parser {
74 struct strparser strp;
75 bool enabled;
76 void (*saved_data_ready)(struct sock *sk);
77};
78
79struct sk_psock_work_state {
80 struct sk_buff *skb;
81 u32 len;
82 u32 off;
83};
84
85struct sk_psock {
86 struct sock *sk;
87 struct sock *sk_redir;
88 u32 apply_bytes;
89 u32 cork_bytes;
90 u32 eval;
91 struct sk_msg *cork;
92 struct sk_psock_progs progs;
93 struct sk_psock_parser parser;
94 struct sk_buff_head ingress_skb;
95 struct list_head ingress_msg;
96 unsigned long state;
97 struct list_head link;
98 spinlock_t link_lock;
99 refcount_t refcnt;
100 void (*saved_unhash)(struct sock *sk);
101 void (*saved_close)(struct sock *sk, long timeout);
102 void (*saved_write_space)(struct sock *sk);
103 struct proto *sk_proto;
104 struct sk_psock_work_state work_state;
105 struct work_struct work;
106 union {
107 struct rcu_head rcu;
108 struct work_struct gc;
109 };
110};
111
112int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
113 int elem_first_coalesce);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200114int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
115 u32 off, u32 len);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200116void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len);
117int sk_msg_free(struct sock *sk, struct sk_msg *msg);
118int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg);
119void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes);
120void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
121 u32 bytes);
122
123void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200124void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200125
126int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
127 struct sk_msg *msg, u32 bytes);
128int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
129 struct sk_msg *msg, u32 bytes);
130
131static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
132{
133 WARN_ON(i == msg->sg.end && bytes);
134}
135
136static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
137{
138 if (psock->apply_bytes) {
139 if (psock->apply_bytes < bytes)
140 psock->apply_bytes = 0;
141 else
142 psock->apply_bytes -= bytes;
143 }
144}
145
Jakub Kicinski683916f2019-11-04 15:36:57 -0800146static inline u32 sk_msg_iter_dist(u32 start, u32 end)
147{
Jakub Kicinski031097d2019-11-27 12:16:41 -0800148 return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start);
Jakub Kicinski683916f2019-11-04 15:36:57 -0800149}
150
Daniel Borkmann604326b2018-10-13 02:45:58 +0200151#define sk_msg_iter_var_prev(var) \
152 do { \
153 if (var == 0) \
Jakub Kicinski031097d2019-11-27 12:16:41 -0800154 var = NR_MSG_FRAG_IDS - 1; \
Daniel Borkmann604326b2018-10-13 02:45:58 +0200155 else \
156 var--; \
157 } while (0)
158
159#define sk_msg_iter_var_next(var) \
160 do { \
161 var++; \
Jakub Kicinski031097d2019-11-27 12:16:41 -0800162 if (var == NR_MSG_FRAG_IDS) \
Daniel Borkmann604326b2018-10-13 02:45:58 +0200163 var = 0; \
164 } while (0)
165
166#define sk_msg_iter_prev(msg, which) \
167 sk_msg_iter_var_prev(msg->sg.which)
168
169#define sk_msg_iter_next(msg, which) \
170 sk_msg_iter_var_next(msg->sg.which)
171
172static inline void sk_msg_clear_meta(struct sk_msg *msg)
173{
174 memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy));
175}
176
177static inline void sk_msg_init(struct sk_msg *msg)
178{
Jakub Kicinski031097d2019-11-27 12:16:41 -0800179 BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200180 memset(msg, 0, sizeof(*msg));
Jakub Kicinski031097d2019-11-27 12:16:41 -0800181 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200182}
183
184static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
185 int which, u32 size)
186{
187 dst->sg.data[which] = src->sg.data[which];
188 dst->sg.data[which].length = size;
John Fastabend3f4c3122018-10-16 10:36:01 -0700189 dst->sg.size += size;
John Fastabend81aabbb2020-05-04 10:21:44 -0700190 src->sg.size -= size;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200191 src->sg.data[which].length -= size;
192 src->sg.data[which].offset += size;
193}
194
John Fastabendd3b18ad32018-10-13 02:46:01 +0200195static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src)
196{
197 memcpy(dst, src, sizeof(*src));
198 sk_msg_init(src);
199}
200
Daniel Borkmann604326b2018-10-13 02:45:58 +0200201static inline bool sk_msg_full(const struct sk_msg *msg)
202{
Jakub Kicinski031097d2019-11-27 12:16:41 -0800203 return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200204}
205
John Fastabend8734a162018-10-16 11:07:59 -0700206static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
207{
Jakub Kicinski683916f2019-11-04 15:36:57 -0800208 return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
John Fastabend8734a162018-10-16 11:07:59 -0700209}
210
Daniel Borkmann604326b2018-10-13 02:45:58 +0200211static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
212{
213 return &msg->sg.data[which];
214}
215
John Fastabend6fff6072018-10-19 19:56:49 -0700216static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which)
217{
218 return msg->sg.data[which];
219}
220
Daniel Borkmann604326b2018-10-13 02:45:58 +0200221static inline struct page *sk_msg_page(struct sk_msg *msg, int which)
222{
223 return sg_page(sk_msg_elem(msg, which));
224}
225
226static inline bool sk_msg_to_ingress(const struct sk_msg *msg)
227{
228 return msg->flags & BPF_F_INGRESS;
229}
230
231static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
232{
233 struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
234
Jakub Kicinski163ab962019-10-06 21:09:27 -0700235 if (test_bit(msg->sg.start, &msg->sg.copy)) {
Daniel Borkmann604326b2018-10-13 02:45:58 +0200236 msg->data = NULL;
237 msg->data_end = NULL;
238 } else {
239 msg->data = sg_virt(sge);
240 msg->data_end = msg->data + sge->length;
241 }
242}
243
244static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
245 u32 len, u32 offset)
246{
247 struct scatterlist *sge;
248
249 get_page(page);
250 sge = sk_msg_elem(msg, msg->sg.end);
251 sg_set_page(sge, page, len, offset);
252 sg_unmark_end(sge);
253
Jakub Kicinski163ab962019-10-06 21:09:27 -0700254 __set_bit(msg->sg.end, &msg->sg.copy);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200255 msg->sg.size += len;
256 sk_msg_iter_next(msg, end);
257}
258
John Fastabendd3b18ad32018-10-13 02:46:01 +0200259static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
260{
261 do {
Jakub Kicinski163ab962019-10-06 21:09:27 -0700262 if (copy_state)
263 __set_bit(i, &msg->sg.copy);
264 else
265 __clear_bit(i, &msg->sg.copy);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200266 sk_msg_iter_var_next(i);
267 if (i == msg->sg.end)
268 break;
269 } while (1);
270}
271
272static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start)
273{
274 sk_msg_sg_copy(msg, start, true);
275}
276
277static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
278{
279 sk_msg_sg_copy(msg, start, false);
280}
281
Daniel Borkmann604326b2018-10-13 02:45:58 +0200282static inline struct sk_psock *sk_psock(const struct sock *sk)
283{
284 return rcu_dereference_sk_user_data(sk);
285}
286
Daniel Borkmann604326b2018-10-13 02:45:58 +0200287static inline void sk_psock_queue_msg(struct sk_psock *psock,
288 struct sk_msg *msg)
289{
290 list_add_tail(&msg->list, &psock->ingress_msg);
291}
292
John Fastabendd3b18ad32018-10-13 02:46:01 +0200293static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
294{
295 return psock ? list_empty(&psock->ingress_msg) : true;
296}
297
Daniel Borkmann604326b2018-10-13 02:45:58 +0200298static inline void sk_psock_report_error(struct sk_psock *psock, int err)
299{
300 struct sock *sk = psock->sk;
301
302 sk->sk_err = err;
303 sk->sk_error_report(sk);
304}
305
306struct sk_psock *sk_psock_init(struct sock *sk, int node);
307
308int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
309void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
310void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
John Fastabendef565922020-10-10 22:09:38 -0700311void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock);
312void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200313
314int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
315 struct sk_msg *msg);
316
317static inline struct sk_psock_link *sk_psock_init_link(void)
318{
319 return kzalloc(sizeof(struct sk_psock_link),
320 GFP_ATOMIC | __GFP_NOWARN);
321}
322
323static inline void sk_psock_free_link(struct sk_psock_link *link)
324{
325 kfree(link);
326}
327
328struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200329
330void __sk_psock_purge_ingress_msg(struct sk_psock *psock);
331
332static inline void sk_psock_cork_free(struct sk_psock *psock)
333{
334 if (psock->cork) {
335 sk_msg_free(psock->sk, psock->cork);
336 kfree(psock->cork);
337 psock->cork = NULL;
338 }
339}
340
341static inline void sk_psock_update_proto(struct sock *sk,
342 struct sk_psock *psock,
343 struct proto *ops)
344{
Jakub Sitnickib8e202d2020-02-18 17:10:13 +0000345 /* Pairs with lockless read in sk_clone_lock() */
346 WRITE_ONCE(sk->sk_prot, ops);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200347}
348
349static inline void sk_psock_restore_proto(struct sock *sk,
350 struct sk_psock *psock)
351{
Lorenz Bauer7b709732020-03-09 11:12:32 +0000352 if (inet_csk_has_ulp(sk)) {
John Fastabend00c01de2021-04-01 15:00:19 -0700353 /* TLS does not have an unhash proto in SW cases, but we need
354 * to ensure we stop using the sock_map unhash routine because
355 * the associated psock is being removed. So use the original
356 * unhash handler.
357 */
358 WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash);
Lorenz Bauer7b709732020-03-09 11:12:32 +0000359 tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space);
360 } else {
361 sk->sk_write_space = psock->saved_write_space;
362 /* Pairs with lockless read in sk_clone_lock() */
363 WRITE_ONCE(sk->sk_prot, psock->sk_proto);
364 }
Daniel Borkmann604326b2018-10-13 02:45:58 +0200365}
366
367static inline void sk_psock_set_state(struct sk_psock *psock,
368 enum sk_psock_state_bits bit)
369{
370 set_bit(bit, &psock->state);
371}
372
373static inline void sk_psock_clear_state(struct sk_psock *psock,
374 enum sk_psock_state_bits bit)
375{
376 clear_bit(bit, &psock->state);
377}
378
379static inline bool sk_psock_test_state(const struct sk_psock *psock,
380 enum sk_psock_state_bits bit)
381{
382 return test_bit(bit, &psock->state);
383}
384
Daniel Borkmann604326b2018-10-13 02:45:58 +0200385static inline struct sk_psock *sk_psock_get(struct sock *sk)
386{
387 struct sk_psock *psock;
388
389 rcu_read_lock();
390 psock = sk_psock(sk);
391 if (psock && !refcount_inc_not_zero(&psock->refcnt))
392 psock = NULL;
393 rcu_read_unlock();
394 return psock;
395}
396
397void sk_psock_stop(struct sock *sk, struct sk_psock *psock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200398void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
399
400static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
401{
402 if (refcount_dec_and_test(&psock->refcnt))
403 sk_psock_drop(sk, psock);
404}
405
John Fastabend552de9102018-12-20 11:35:33 -0800406static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
407{
408 if (psock->parser.enabled)
409 psock->parser.saved_data_ready(sk);
410 else
411 sk->sk_data_ready(sk);
412}
413
Daniel Borkmann604326b2018-10-13 02:45:58 +0200414static inline void psock_set_prog(struct bpf_prog **pprog,
415 struct bpf_prog *prog)
416{
417 prog = xchg(pprog, prog);
418 if (prog)
419 bpf_prog_put(prog);
420}
421
Lorenz Bauerbb0de312020-06-29 10:56:28 +0100422static inline int psock_replace_prog(struct bpf_prog **pprog,
423 struct bpf_prog *prog,
424 struct bpf_prog *old)
425{
426 if (cmpxchg(pprog, old, prog) != old)
427 return -ENOENT;
428
429 if (old)
430 bpf_prog_put(old);
431
432 return 0;
433}
434
Daniel Borkmann604326b2018-10-13 02:45:58 +0200435static inline void psock_progs_drop(struct sk_psock_progs *progs)
436{
437 psock_set_prog(&progs->msg_parser, NULL);
438 psock_set_prog(&progs->skb_parser, NULL);
439 psock_set_prog(&progs->skb_verdict, NULL);
440}
441
John Fastabende91de6a2020-05-29 16:06:59 -0700442int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb);
443
444static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
445{
446 if (!psock)
447 return false;
448 return psock->parser.enabled;
449}
Daniel Borkmann604326b2018-10-13 02:45:58 +0200450#endif /* _LINUX_SKMSG_H */