blob: 4f49c12dae53829c692a56531141fc8d00a51771 [file] [log] [blame]
Daniel Borkmann604326b2018-10-13 02:45:58 +02001// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/skmsg.h>
5#include <linux/filter.h>
6#include <linux/bpf.h>
7#include <linux/init.h>
8#include <linux/wait.h>
9
10#include <net/inet_common.h>
John Fastabend0608c692018-12-20 11:35:35 -080011#include <net/tls.h>
Daniel Borkmann604326b2018-10-13 02:45:58 +020012
Daniel Borkmann604326b2018-10-13 02:45:58 +020013static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
14 struct sk_msg *msg, u32 apply_bytes, int flags)
15{
16 bool apply = apply_bytes;
17 struct scatterlist *sge;
18 u32 size, copied = 0;
19 struct sk_msg *tmp;
20 int i, ret = 0;
21
22 tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL);
23 if (unlikely(!tmp))
24 return -ENOMEM;
25
26 lock_sock(sk);
27 tmp->sg.start = msg->sg.start;
28 i = msg->sg.start;
29 do {
30 sge = sk_msg_elem(msg, i);
31 size = (apply && apply_bytes < sge->length) ?
32 apply_bytes : sge->length;
33 if (!sk_wmem_schedule(sk, size)) {
34 if (!copied)
35 ret = -ENOMEM;
36 break;
37 }
38
39 sk_mem_charge(sk, size);
40 sk_msg_xfer(tmp, msg, i, size);
41 copied += size;
42 if (sge->length)
43 get_page(sk_msg_page(tmp, i));
44 sk_msg_iter_var_next(i);
45 tmp->sg.end = i;
46 if (apply) {
47 apply_bytes -= size;
48 if (!apply_bytes)
49 break;
50 }
51 } while (i != msg->sg.end);
52
53 if (!ret) {
54 msg->sg.start = i;
Daniel Borkmann604326b2018-10-13 02:45:58 +020055 sk_psock_queue_msg(psock, tmp);
John Fastabend552de9102018-12-20 11:35:33 -080056 sk_psock_data_ready(sk, psock);
Daniel Borkmann604326b2018-10-13 02:45:58 +020057 } else {
58 sk_msg_free(sk, tmp);
59 kfree(tmp);
60 }
61
62 release_sock(sk);
63 return ret;
64}
65
66static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
67 int flags, bool uncharge)
68{
69 bool apply = apply_bytes;
70 struct scatterlist *sge;
71 struct page *page;
72 int size, ret = 0;
73 u32 off;
74
75 while (1) {
John Fastabend0608c692018-12-20 11:35:35 -080076 bool has_tx_ulp;
77
Daniel Borkmann604326b2018-10-13 02:45:58 +020078 sge = sk_msg_elem(msg, msg->sg.start);
79 size = (apply && apply_bytes < sge->length) ?
80 apply_bytes : sge->length;
81 off = sge->offset;
82 page = sg_page(sge);
83
84 tcp_rate_check_app_limited(sk);
85retry:
John Fastabend0608c692018-12-20 11:35:35 -080086 has_tx_ulp = tls_sw_has_ctx_tx(sk);
87 if (has_tx_ulp) {
88 flags |= MSG_SENDPAGE_NOPOLICY;
89 ret = kernel_sendpage_locked(sk,
90 page, off, size, flags);
91 } else {
92 ret = do_tcp_sendpages(sk, page, off, size, flags);
93 }
94
Daniel Borkmann604326b2018-10-13 02:45:58 +020095 if (ret <= 0)
96 return ret;
97 if (apply)
98 apply_bytes -= ret;
99 msg->sg.size -= ret;
100 sge->offset += ret;
101 sge->length -= ret;
102 if (uncharge)
103 sk_mem_uncharge(sk, ret);
104 if (ret != size) {
105 size -= ret;
106 off += ret;
107 goto retry;
108 }
109 if (!sge->length) {
110 put_page(page);
111 sk_msg_iter_next(msg, start);
112 sg_init_table(sge, 1);
113 if (msg->sg.start == msg->sg.end)
114 break;
115 }
116 if (apply && !apply_bytes)
117 break;
118 }
119
120 return 0;
121}
122
123static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg,
124 u32 apply_bytes, int flags, bool uncharge)
125{
126 int ret;
127
128 lock_sock(sk);
129 ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge);
130 release_sock(sk);
131 return ret;
132}
133
134int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg,
135 u32 bytes, int flags)
136{
137 bool ingress = sk_msg_to_ingress(msg);
138 struct sk_psock *psock = sk_psock_get(sk);
139 int ret;
140
141 if (unlikely(!psock)) {
142 sk_msg_free(sk, msg);
143 return 0;
144 }
145 ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) :
146 tcp_bpf_push_locked(sk, msg, bytes, flags, false);
147 sk_psock_put(sk, psock);
148 return ret;
149}
150EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir);
151
Cong Wang88759602021-02-23 10:49:26 -0800152#ifdef CONFIG_BPF_SYSCALL
YueHaibinga2652792020-03-20 10:34:25 +0800153static bool tcp_bpf_stream_read(const struct sock *sk)
154{
155 struct sk_psock *psock;
156 bool empty = true;
157
158 rcu_read_lock();
159 psock = sk_psock(sk);
160 if (likely(psock))
161 empty = list_empty(&psock->ingress_msg);
162 rcu_read_unlock();
163 return !empty;
164}
165
YueHaibingc0fd3362020-03-20 10:34:26 +0800166static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
167 int nonblock, int flags, int *addr_len)
168{
169 struct sk_psock *psock;
170 int copied, ret;
171
Xiyu Yang18f02ad2020-04-26 11:35:15 +0800172 if (unlikely(flags & MSG_ERRQUEUE))
173 return inet_recv_error(sk, msg, len, addr_len);
174
YueHaibingc0fd3362020-03-20 10:34:26 +0800175 psock = sk_psock_get(sk);
176 if (unlikely(!psock))
177 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
YueHaibingc0fd3362020-03-20 10:34:26 +0800178 if (!skb_queue_empty(&sk->sk_receive_queue) &&
Xiyu Yang18f02ad2020-04-26 11:35:15 +0800179 sk_psock_queue_empty(psock)) {
180 sk_psock_put(sk, psock);
YueHaibingc0fd3362020-03-20 10:34:26 +0800181 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
Xiyu Yang18f02ad2020-04-26 11:35:15 +0800182 }
YueHaibingc0fd3362020-03-20 10:34:26 +0800183 lock_sock(sk);
184msg_bytes_ready:
Cong Wang2bc793e2021-03-30 19:32:33 -0700185 copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
YueHaibingc0fd3362020-03-20 10:34:26 +0800186 if (!copied) {
187 int data, err = 0;
188 long timeo;
189
190 timeo = sock_rcvtimeo(sk, nonblock);
Cong Wang2bc793e2021-03-30 19:32:33 -0700191 data = sk_msg_wait_data(sk, psock, flags, timeo, &err);
YueHaibingc0fd3362020-03-20 10:34:26 +0800192 if (data) {
193 if (!sk_psock_queue_empty(psock))
194 goto msg_bytes_ready;
195 release_sock(sk);
196 sk_psock_put(sk, psock);
197 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
198 }
199 if (err) {
200 ret = err;
201 goto out;
202 }
203 copied = -EAGAIN;
204 }
205 ret = copied;
206out:
207 release_sock(sk);
208 sk_psock_put(sk, psock);
209 return ret;
210}
211
Daniel Borkmann604326b2018-10-13 02:45:58 +0200212static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
213 struct sk_msg *msg, int *copied, int flags)
214{
Jakub Kicinski031097d2019-11-27 12:16:41 -0800215 bool cork = false, enospc = sk_msg_full(msg);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200216 struct sock *sk_redir;
John Fastabend7246d8e2018-11-26 14:16:17 -0800217 u32 tosend, delta = 0;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200218 int ret;
219
220more_data:
John Fastabend7246d8e2018-11-26 14:16:17 -0800221 if (psock->eval == __SK_NONE) {
222 /* Track delta in msg size to add/subtract it on SK_DROP from
223 * returned to user copied size. This ensures user doesn't
224 * get a positive return code with msg_cut_data and SK_DROP
225 * verdict.
226 */
227 delta = msg->sg.size;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200228 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
John Fastabend7361d442020-01-11 06:12:06 +0000229 delta -= msg->sg.size;
John Fastabend7246d8e2018-11-26 14:16:17 -0800230 }
Daniel Borkmann604326b2018-10-13 02:45:58 +0200231
232 if (msg->cork_bytes &&
233 msg->cork_bytes > msg->sg.size && !enospc) {
234 psock->cork_bytes = msg->cork_bytes - msg->sg.size;
235 if (!psock->cork) {
236 psock->cork = kzalloc(sizeof(*psock->cork),
237 GFP_ATOMIC | __GFP_NOWARN);
238 if (!psock->cork)
239 return -ENOMEM;
240 }
241 memcpy(psock->cork, msg, sizeof(*msg));
242 return 0;
243 }
244
245 tosend = msg->sg.size;
246 if (psock->apply_bytes && psock->apply_bytes < tosend)
247 tosend = psock->apply_bytes;
248
249 switch (psock->eval) {
250 case __SK_PASS:
251 ret = tcp_bpf_push(sk, msg, tosend, flags, true);
252 if (unlikely(ret)) {
253 *copied -= sk_msg_free(sk, msg);
254 break;
255 }
256 sk_msg_apply_bytes(psock, tosend);
257 break;
258 case __SK_REDIRECT:
259 sk_redir = psock->sk_redir;
260 sk_msg_apply_bytes(psock, tosend);
261 if (psock->cork) {
262 cork = true;
263 psock->cork = NULL;
264 }
265 sk_msg_return(sk, msg, tosend);
266 release_sock(sk);
267 ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags);
268 lock_sock(sk);
269 if (unlikely(ret < 0)) {
270 int free = sk_msg_free_nocharge(sk, msg);
271
272 if (!cork)
273 *copied -= free;
274 }
275 if (cork) {
276 sk_msg_free(sk, msg);
277 kfree(msg);
278 msg = NULL;
279 ret = 0;
280 }
281 break;
282 case __SK_DROP:
283 default:
284 sk_msg_free_partial(sk, msg, tosend);
285 sk_msg_apply_bytes(psock, tosend);
John Fastabend7246d8e2018-11-26 14:16:17 -0800286 *copied -= (tosend + delta);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200287 return -EACCES;
288 }
289
290 if (likely(!ret)) {
291 if (!psock->apply_bytes) {
292 psock->eval = __SK_NONE;
293 if (psock->sk_redir) {
294 sock_put(psock->sk_redir);
295 psock->sk_redir = NULL;
296 }
297 }
298 if (msg &&
299 msg->sg.data[msg->sg.start].page_link &&
300 msg->sg.data[msg->sg.start].length)
301 goto more_data;
302 }
303 return ret;
304}
305
306static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
307{
308 struct sk_msg tmp, *msg_tx = NULL;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200309 int copied = 0, err = 0;
310 struct sk_psock *psock;
311 long timeo;
Jakub Kicinski41477662019-08-07 17:03:59 -0700312 int flags;
313
314 /* Don't let internal do_tcp_sendpages() flags through */
315 flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED);
316 flags |= MSG_NO_SHARED_FRAGS;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200317
318 psock = sk_psock_get(sk);
319 if (unlikely(!psock))
320 return tcp_sendmsg(sk, msg, size);
321
322 lock_sock(sk);
323 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
324 while (msg_data_left(msg)) {
325 bool enospc = false;
326 u32 copy, osize;
327
328 if (sk->sk_err) {
329 err = -sk->sk_err;
330 goto out_err;
331 }
332
333 copy = msg_data_left(msg);
334 if (!sk_stream_memory_free(sk))
335 goto wait_for_sndbuf;
336 if (psock->cork) {
337 msg_tx = psock->cork;
338 } else {
339 msg_tx = &tmp;
340 sk_msg_init(msg_tx);
341 }
342
343 osize = msg_tx->sg.size;
344 err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1);
345 if (err) {
346 if (err != -ENOSPC)
347 goto wait_for_memory;
348 enospc = true;
349 copy = msg_tx->sg.size - osize;
350 }
351
352 err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx,
353 copy);
354 if (err < 0) {
355 sk_msg_trim(sk, msg_tx, osize);
356 goto out_err;
357 }
358
359 copied += copy;
360 if (psock->cork_bytes) {
361 if (size > psock->cork_bytes)
362 psock->cork_bytes = 0;
363 else
364 psock->cork_bytes -= size;
365 if (psock->cork_bytes && !enospc)
366 goto out_err;
367 /* All cork bytes are accounted, rerun the prog. */
368 psock->eval = __SK_NONE;
369 psock->cork_bytes = 0;
370 }
371
372 err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags);
373 if (unlikely(err < 0))
374 goto out_err;
375 continue;
376wait_for_sndbuf:
377 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
378wait_for_memory:
379 err = sk_stream_wait_memory(sk, &timeo);
380 if (err) {
381 if (msg_tx && msg_tx != psock->cork)
382 sk_msg_free(sk, msg_tx);
383 goto out_err;
384 }
385 }
386out_err:
387 if (err < 0)
388 err = sk_stream_error(sk, msg->msg_flags, err);
389 release_sock(sk);
390 sk_psock_put(sk, psock);
391 return copied ? copied : err;
392}
393
394static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset,
395 size_t size, int flags)
396{
397 struct sk_msg tmp, *msg = NULL;
398 int err = 0, copied = 0;
399 struct sk_psock *psock;
400 bool enospc = false;
401
402 psock = sk_psock_get(sk);
403 if (unlikely(!psock))
404 return tcp_sendpage(sk, page, offset, size, flags);
405
406 lock_sock(sk);
407 if (psock->cork) {
408 msg = psock->cork;
409 } else {
410 msg = &tmp;
411 sk_msg_init(msg);
412 }
413
414 /* Catch case where ring is full and sendpage is stalled. */
415 if (unlikely(sk_msg_full(msg)))
416 goto out_err;
417
418 sk_msg_page_add(msg, page, size, offset);
419 sk_mem_charge(sk, size);
420 copied = size;
421 if (sk_msg_full(msg))
422 enospc = true;
423 if (psock->cork_bytes) {
424 if (size > psock->cork_bytes)
425 psock->cork_bytes = 0;
426 else
427 psock->cork_bytes -= size;
428 if (psock->cork_bytes && !enospc)
429 goto out_err;
430 /* All cork bytes are accounted, rerun the prog. */
431 psock->eval = __SK_NONE;
432 psock->cork_bytes = 0;
433 }
434
435 err = tcp_bpf_send_verdict(sk, psock, msg, &copied, flags);
436out_err:
437 release_sock(sk);
438 sk_psock_put(sk, psock);
439 return copied ? copied : err;
440}
441
Daniel Borkmann604326b2018-10-13 02:45:58 +0200442enum {
443 TCP_BPF_IPV4,
444 TCP_BPF_IPV6,
445 TCP_BPF_NUM_PROTS,
446};
447
448enum {
449 TCP_BPF_BASE,
450 TCP_BPF_TX,
451 TCP_BPF_NUM_CFGS,
452};
453
454static struct proto *tcpv6_prot_saved __read_mostly;
455static DEFINE_SPINLOCK(tcpv6_prot_lock);
456static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS];
457
458static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
459 struct proto *base)
460{
461 prot[TCP_BPF_BASE] = *base;
Lorenz Bauerf7476322020-03-09 11:12:36 +0000462 prot[TCP_BPF_BASE].unhash = sock_map_unhash;
463 prot[TCP_BPF_BASE].close = sock_map_close;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200464 prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg;
465 prot[TCP_BPF_BASE].stream_memory_read = tcp_bpf_stream_read;
466
467 prot[TCP_BPF_TX] = prot[TCP_BPF_BASE];
468 prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg;
469 prot[TCP_BPF_TX].sendpage = tcp_bpf_sendpage;
470}
471
Lorenz Bauer7b219da2020-08-21 11:29:43 +0100472static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops)
Daniel Borkmann604326b2018-10-13 02:45:58 +0200473{
Lorenz Bauer7b219da2020-08-21 11:29:43 +0100474 if (unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) {
Daniel Borkmann604326b2018-10-13 02:45:58 +0200475 spin_lock_bh(&tcpv6_prot_lock);
476 if (likely(ops != tcpv6_prot_saved)) {
477 tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops);
478 smp_store_release(&tcpv6_prot_saved, ops);
479 }
480 spin_unlock_bh(&tcpv6_prot_lock);
481 }
482}
483
484static int __init tcp_bpf_v4_build_proto(void)
485{
486 tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot);
487 return 0;
488}
489core_initcall(tcp_bpf_v4_build_proto);
490
Daniel Borkmann604326b2018-10-13 02:45:58 +0200491static int tcp_bpf_assert_proto_ops(struct proto *ops)
492{
493 /* In order to avoid retpoline, we make assumptions when we call
494 * into ops if e.g. a psock is not present. Make sure they are
495 * indeed valid assumptions.
496 */
497 return ops->recvmsg == tcp_recvmsg &&
498 ops->sendmsg == tcp_sendmsg &&
499 ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP;
500}
501
Cong Wang8a59f9d2021-03-30 19:32:31 -0700502int tcp_bpf_update_proto(struct sock *sk, bool restore)
Daniel Borkmann604326b2018-10-13 02:45:58 +0200503{
Cong Wang8a59f9d2021-03-30 19:32:31 -0700504 struct sk_psock *psock = sk_psock(sk);
Lorenz Bauerd19da362020-03-09 11:12:34 +0000505 int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
506 int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200507
Cong Wang8a59f9d2021-03-30 19:32:31 -0700508 if (restore) {
509 if (inet_csk_has_ulp(sk)) {
Jakub Kicinski8859a442021-04-09 20:46:01 -0700510 /* TLS does not have an unhash proto in SW cases,
511 * but we need to ensure we stop using the sock_map
512 * unhash routine because the associated psock is being
513 * removed. So use the original unhash handler.
514 */
515 WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash);
Cong Wang8a59f9d2021-03-30 19:32:31 -0700516 tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space);
517 } else {
518 sk->sk_write_space = psock->saved_write_space;
519 /* Pairs with lockless read in sk_clone_lock() */
520 WRITE_ONCE(sk->sk_prot, psock->sk_proto);
521 }
522 return 0;
523 }
524
525 if (inet_csk_has_ulp(sk))
526 return -EINVAL;
527
Lorenz Bauer7b219da2020-08-21 11:29:43 +0100528 if (sk->sk_family == AF_INET6) {
529 if (tcp_bpf_assert_proto_ops(psock->sk_proto))
Cong Wang8a59f9d2021-03-30 19:32:31 -0700530 return -EINVAL;
Lorenz Bauerd19da362020-03-09 11:12:34 +0000531
Lorenz Bauer7b219da2020-08-21 11:29:43 +0100532 tcp_bpf_check_v6_needs_rebuild(psock->sk_proto);
Lorenz Bauerd19da362020-03-09 11:12:34 +0000533 }
534
Cong Wang8a59f9d2021-03-30 19:32:31 -0700535 /* Pairs with lockless read in sk_clone_lock() */
536 WRITE_ONCE(sk->sk_prot, &tcp_bpf_prots[family][config]);
537 return 0;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200538}
Cong Wang8a59f9d2021-03-30 19:32:31 -0700539EXPORT_SYMBOL_GPL(tcp_bpf_update_proto);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200540
Jakub Sitnickie8025152020-02-18 17:10:15 +0000541/* If a child got cloned from a listening socket that had tcp_bpf
542 * protocol callbacks installed, we need to restore the callbacks to
543 * the default ones because the child does not inherit the psock state
544 * that tcp_bpf callbacks expect.
545 */
546void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
547{
548 int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
549 struct proto *prot = newsk->sk_prot;
550
551 if (prot == &tcp_bpf_prots[family][TCP_BPF_BASE])
552 newsk->sk_prot = sk->sk_prot_creator;
553}
Cong Wang88759602021-02-23 10:49:26 -0800554#endif /* CONFIG_BPF_SYSCALL */