blob: 8eb671c827f90f1f3d2514163fc82998c9906cb6 [file] [log] [blame]
Daniel Borkmann604326b2018-10-13 02:45:58 +02001// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/skmsg.h>
5#include <linux/skbuff.h>
6#include <linux/scatterlist.h>
7
8#include <net/sock.h>
9#include <net/tcp.h>
John Fastabende91de6a2020-05-29 16:06:59 -070010#include <net/tls.h>
Daniel Borkmann604326b2018-10-13 02:45:58 +020011
12static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13{
14 if (msg->sg.end > msg->sg.start &&
15 elem_first_coalesce < msg->sg.end)
16 return true;
17
18 if (msg->sg.end < msg->sg.start &&
19 (elem_first_coalesce > msg->sg.start ||
20 elem_first_coalesce < msg->sg.end))
21 return true;
22
23 return false;
24}
25
26int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27 int elem_first_coalesce)
28{
29 struct page_frag *pfrag = sk_page_frag(sk);
30 int ret = 0;
31
32 len -= msg->sg.size;
33 while (len > 0) {
34 struct scatterlist *sge;
35 u32 orig_offset;
36 int use, i;
37
38 if (!sk_page_frag_refill(sk, pfrag))
39 return -ENOMEM;
40
41 orig_offset = pfrag->offset;
42 use = min_t(int, len, pfrag->size - orig_offset);
43 if (!sk_wmem_schedule(sk, use))
44 return -ENOMEM;
45
46 i = msg->sg.end;
47 sk_msg_iter_var_prev(i);
48 sge = &msg->sg.data[i];
49
50 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
51 sg_page(sge) == pfrag->page &&
52 sge->offset + sge->length == orig_offset) {
53 sge->length += use;
54 } else {
55 if (sk_msg_full(msg)) {
56 ret = -ENOSPC;
57 break;
58 }
59
60 sge = &msg->sg.data[msg->sg.end];
61 sg_unmark_end(sge);
62 sg_set_page(sge, pfrag->page, use, orig_offset);
63 get_page(pfrag->page);
64 sk_msg_iter_next(msg, end);
65 }
66
67 sk_mem_charge(sk, use);
68 msg->sg.size += use;
69 pfrag->offset += use;
70 len -= use;
71 }
72
73 return ret;
74}
75EXPORT_SYMBOL_GPL(sk_msg_alloc);
76
Daniel Borkmannd829e9c2018-10-13 02:45:59 +020077int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
78 u32 off, u32 len)
79{
80 int i = src->sg.start;
81 struct scatterlist *sge = sk_msg_elem(src, i);
Vakul Gargfda497e2019-01-16 01:42:44 +000082 struct scatterlist *sgd = NULL;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +020083 u32 sge_len, sge_off;
84
Daniel Borkmannd829e9c2018-10-13 02:45:59 +020085 while (off) {
86 if (sge->length > off)
87 break;
88 off -= sge->length;
89 sk_msg_iter_var_next(i);
90 if (i == src->sg.end && off)
91 return -ENOSPC;
92 sge = sk_msg_elem(src, i);
93 }
94
95 while (len) {
96 sge_len = sge->length - off;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +020097 if (sge_len > len)
98 sge_len = len;
Vakul Gargfda497e2019-01-16 01:42:44 +000099
100 if (dst->sg.end)
101 sgd = sk_msg_elem(dst, dst->sg.end - 1);
102
103 if (sgd &&
104 (sg_page(sge) == sg_page(sgd)) &&
105 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
106 sgd->length += sge_len;
107 dst->sg.size += sge_len;
108 } else if (!sk_msg_full(dst)) {
109 sge_off = sge->offset + off;
110 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
111 } else {
112 return -ENOSPC;
113 }
114
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200115 off = 0;
116 len -= sge_len;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200117 sk_mem_charge(sk, sge_len);
118 sk_msg_iter_var_next(i);
119 if (i == src->sg.end && len)
120 return -ENOSPC;
121 sge = sk_msg_elem(src, i);
122 }
123
124 return 0;
125}
126EXPORT_SYMBOL_GPL(sk_msg_clone);
127
Daniel Borkmann604326b2018-10-13 02:45:58 +0200128void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
129{
130 int i = msg->sg.start;
131
132 do {
133 struct scatterlist *sge = sk_msg_elem(msg, i);
134
135 if (bytes < sge->length) {
136 sge->length -= bytes;
137 sge->offset += bytes;
138 sk_mem_uncharge(sk, bytes);
139 break;
140 }
141
142 sk_mem_uncharge(sk, sge->length);
143 bytes -= sge->length;
144 sge->length = 0;
145 sge->offset = 0;
146 sk_msg_iter_var_next(i);
147 } while (bytes && i != msg->sg.end);
148 msg->sg.start = i;
149}
150EXPORT_SYMBOL_GPL(sk_msg_return_zero);
151
152void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
153{
154 int i = msg->sg.start;
155
156 do {
157 struct scatterlist *sge = &msg->sg.data[i];
158 int uncharge = (bytes < sge->length) ? bytes : sge->length;
159
160 sk_mem_uncharge(sk, uncharge);
161 bytes -= uncharge;
162 sk_msg_iter_var_next(i);
163 } while (i != msg->sg.end);
164}
165EXPORT_SYMBOL_GPL(sk_msg_return);
166
167static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
168 bool charge)
169{
170 struct scatterlist *sge = sk_msg_elem(msg, i);
171 u32 len = sge->length;
172
John Fastabend36cd0e692020-11-16 14:28:06 -0800173 /* When the skb owns the memory we free it from consume_skb path. */
174 if (!msg->skb) {
175 if (charge)
176 sk_mem_uncharge(sk, len);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200177 put_page(sg_page(sge));
John Fastabend36cd0e692020-11-16 14:28:06 -0800178 }
Daniel Borkmann604326b2018-10-13 02:45:58 +0200179 memset(sge, 0, sizeof(*sge));
180 return len;
181}
182
183static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
184 bool charge)
185{
186 struct scatterlist *sge = sk_msg_elem(msg, i);
187 int freed = 0;
188
189 while (msg->sg.size) {
190 msg->sg.size -= sge->length;
191 freed += sk_msg_free_elem(sk, msg, i, charge);
192 sk_msg_iter_var_next(i);
193 sk_msg_check_to_free(msg, i, msg->sg.size);
194 sge = sk_msg_elem(msg, i);
195 }
Markus Elfringdd016ac2019-08-22 18:00:40 +0200196 consume_skb(msg->skb);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200197 sk_msg_init(msg);
198 return freed;
199}
200
201int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
202{
203 return __sk_msg_free(sk, msg, msg->sg.start, false);
204}
205EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
206
207int sk_msg_free(struct sock *sk, struct sk_msg *msg)
208{
209 return __sk_msg_free(sk, msg, msg->sg.start, true);
210}
211EXPORT_SYMBOL_GPL(sk_msg_free);
212
213static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
214 u32 bytes, bool charge)
215{
216 struct scatterlist *sge;
217 u32 i = msg->sg.start;
218
219 while (bytes) {
220 sge = sk_msg_elem(msg, i);
221 if (!sge->length)
222 break;
223 if (bytes < sge->length) {
224 if (charge)
225 sk_mem_uncharge(sk, bytes);
226 sge->length -= bytes;
227 sge->offset += bytes;
228 msg->sg.size -= bytes;
229 break;
230 }
231
232 msg->sg.size -= sge->length;
233 bytes -= sge->length;
234 sk_msg_free_elem(sk, msg, i, charge);
235 sk_msg_iter_var_next(i);
236 sk_msg_check_to_free(msg, i, bytes);
237 }
238 msg->sg.start = i;
239}
240
241void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
242{
243 __sk_msg_free_partial(sk, msg, bytes, true);
244}
245EXPORT_SYMBOL_GPL(sk_msg_free_partial);
246
247void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
248 u32 bytes)
249{
250 __sk_msg_free_partial(sk, msg, bytes, false);
251}
252
253void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
254{
255 int trim = msg->sg.size - len;
256 u32 i = msg->sg.end;
257
258 if (trim <= 0) {
259 WARN_ON(trim < 0);
260 return;
261 }
262
263 sk_msg_iter_var_prev(i);
264 msg->sg.size = len;
265 while (msg->sg.data[i].length &&
266 trim >= msg->sg.data[i].length) {
267 trim -= msg->sg.data[i].length;
268 sk_msg_free_elem(sk, msg, i, true);
269 sk_msg_iter_var_prev(i);
270 if (!trim)
271 goto out;
272 }
273
274 msg->sg.data[i].length -= trim;
275 sk_mem_uncharge(sk, trim);
Jakub Kicinski683916f2019-11-04 15:36:57 -0800276 /* Adjust copybreak if it falls into the trimmed part of last buf */
277 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
278 msg->sg.copybreak = msg->sg.data[i].length;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200279out:
Jakub Kicinski683916f2019-11-04 15:36:57 -0800280 sk_msg_iter_var_next(i);
281 msg->sg.end = i;
282
283 /* If we trim data a full sg elem before curr pointer update
284 * copybreak and current so that any future copy operations
285 * start at new copy location.
Daniel Borkmann604326b2018-10-13 02:45:58 +0200286 * However trimed data that has not yet been used in a copy op
287 * does not require an update.
288 */
Jakub Kicinski683916f2019-11-04 15:36:57 -0800289 if (!msg->sg.size) {
290 msg->sg.curr = msg->sg.start;
291 msg->sg.copybreak = 0;
292 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
293 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
294 sk_msg_iter_var_prev(i);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200295 msg->sg.curr = i;
296 msg->sg.copybreak = msg->sg.data[i].length;
297 }
Daniel Borkmann604326b2018-10-13 02:45:58 +0200298}
299EXPORT_SYMBOL_GPL(sk_msg_trim);
300
301int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
302 struct sk_msg *msg, u32 bytes)
303{
304 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
305 const int to_max_pages = MAX_MSG_FRAGS;
306 struct page *pages[MAX_MSG_FRAGS];
307 ssize_t orig, copied, use, offset;
308
309 orig = msg->sg.size;
310 while (bytes > 0) {
311 i = 0;
312 maxpages = to_max_pages - num_elems;
313 if (maxpages == 0) {
314 ret = -EFAULT;
315 goto out;
316 }
317
318 copied = iov_iter_get_pages(from, pages, bytes, maxpages,
319 &offset);
320 if (copied <= 0) {
321 ret = -EFAULT;
322 goto out;
323 }
324
325 iov_iter_advance(from, copied);
326 bytes -= copied;
327 msg->sg.size += copied;
328
329 while (copied) {
330 use = min_t(int, copied, PAGE_SIZE - offset);
331 sg_set_page(&msg->sg.data[msg->sg.end],
332 pages[i], use, offset);
333 sg_unmark_end(&msg->sg.data[msg->sg.end]);
334 sk_mem_charge(sk, use);
335
336 offset = 0;
337 copied -= use;
338 sk_msg_iter_next(msg, end);
339 num_elems++;
340 i++;
341 }
342 /* When zerocopy is mixed with sk_msg_*copy* operations we
343 * may have a copybreak set in this case clear and prefer
344 * zerocopy remainder when possible.
345 */
346 msg->sg.copybreak = 0;
347 msg->sg.curr = msg->sg.end;
348 }
349out:
350 /* Revert iov_iter updates, msg will need to use 'trim' later if it
351 * also needs to be cleared.
352 */
353 if (ret)
354 iov_iter_revert(from, msg->sg.size - orig);
355 return ret;
356}
357EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
358
359int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
360 struct sk_msg *msg, u32 bytes)
361{
362 int ret = -ENOSPC, i = msg->sg.curr;
363 struct scatterlist *sge;
364 u32 copy, buf_size;
365 void *to;
366
367 do {
368 sge = sk_msg_elem(msg, i);
369 /* This is possible if a trim operation shrunk the buffer */
370 if (msg->sg.copybreak >= sge->length) {
371 msg->sg.copybreak = 0;
372 sk_msg_iter_var_next(i);
373 if (i == msg->sg.end)
374 break;
375 sge = sk_msg_elem(msg, i);
376 }
377
378 buf_size = sge->length - msg->sg.copybreak;
379 copy = (buf_size > bytes) ? bytes : buf_size;
380 to = sg_virt(sge) + msg->sg.copybreak;
381 msg->sg.copybreak += copy;
382 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
383 ret = copy_from_iter_nocache(to, copy, from);
384 else
385 ret = copy_from_iter(to, copy, from);
386 if (ret != copy) {
387 ret = -EFAULT;
388 goto out;
389 }
390 bytes -= copy;
391 if (!bytes)
392 break;
393 msg->sg.copybreak = 0;
394 sk_msg_iter_var_next(i);
395 } while (i != msg->sg.end);
396out:
397 msg->sg.curr = i;
398 return ret;
399}
400EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
401
Cong Wang2bc793e2021-03-30 19:32:33 -0700402/* Receive sk_msg from psock->ingress_msg to @msg. */
403int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
404 int len, int flags)
405{
406 struct iov_iter *iter = &msg->msg_iter;
407 int peek = flags & MSG_PEEK;
408 struct sk_msg *msg_rx;
409 int i, copied = 0;
410
411 msg_rx = sk_psock_peek_msg(psock);
412 while (copied != len) {
413 struct scatterlist *sge;
414
415 if (unlikely(!msg_rx))
416 break;
417
418 i = msg_rx->sg.start;
419 do {
420 struct page *page;
421 int copy;
422
423 sge = sk_msg_elem(msg_rx, i);
424 copy = sge->length;
425 page = sg_page(sge);
426 if (copied + copy > len)
427 copy = len - copied;
428 copy = copy_page_to_iter(page, sge->offset, copy, iter);
429 if (!copy)
430 return copied ? copied : -EFAULT;
431
432 copied += copy;
433 if (likely(!peek)) {
434 sge->offset += copy;
435 sge->length -= copy;
436 if (!msg_rx->skb)
437 sk_mem_uncharge(sk, copy);
438 msg_rx->sg.size -= copy;
439
440 if (!sge->length) {
441 sk_msg_iter_var_next(i);
442 if (!msg_rx->skb)
443 put_page(page);
444 }
445 } else {
446 /* Lets not optimize peek case if copy_page_to_iter
447 * didn't copy the entire length lets just break.
448 */
449 if (copy != sge->length)
450 return copied;
451 sk_msg_iter_var_next(i);
452 }
453
454 if (copied == len)
455 break;
456 } while (i != msg_rx->sg.end);
457
458 if (unlikely(peek)) {
459 msg_rx = sk_psock_next_msg(psock, msg_rx);
460 if (!msg_rx)
461 break;
462 continue;
463 }
464
465 msg_rx->sg.start = i;
466 if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
467 msg_rx = sk_psock_dequeue_msg(psock);
468 kfree_sk_msg(msg_rx);
469 }
470 msg_rx = sk_psock_peek_msg(psock);
471 }
472
473 return copied;
474}
475EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
476
Cong Wangfb4e0a52021-10-08 13:33:04 -0700477bool sk_msg_is_readable(struct sock *sk)
478{
479 struct sk_psock *psock;
480 bool empty = true;
481
482 rcu_read_lock();
483 psock = sk_psock(sk);
484 if (likely(psock))
485 empty = list_empty(&psock->ingress_msg);
486 rcu_read_unlock();
487 return !empty;
488}
489EXPORT_SYMBOL_GPL(sk_msg_is_readable);
490
John Fastabend6fa92012020-11-16 14:28:46 -0800491static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
492 struct sk_buff *skb)
Daniel Borkmann604326b2018-10-13 02:45:58 +0200493{
Daniel Borkmann604326b2018-10-13 02:45:58 +0200494 struct sk_msg *msg;
495
John Fastabend36cd0e692020-11-16 14:28:06 -0800496 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
John Fastabend6fa92012020-11-16 14:28:46 -0800497 return NULL;
498
499 if (!sk_rmem_schedule(sk, skb, skb->truesize))
500 return NULL;
John Fastabend36cd0e692020-11-16 14:28:06 -0800501
Cong Wang190179f2021-03-30 19:32:27 -0700502 msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200503 if (unlikely(!msg))
John Fastabend6fa92012020-11-16 14:28:46 -0800504 return NULL;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200505
506 sk_msg_init(msg);
John Fastabend6fa92012020-11-16 14:28:46 -0800507 return msg;
508}
509
510static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
Liu Jian73035242021-10-29 22:12:14 +0800511 u32 off, u32 len,
John Fastabend6fa92012020-11-16 14:28:46 -0800512 struct sk_psock *psock,
513 struct sock *sk,
514 struct sk_msg *msg)
515{
John Fastabend43630232020-11-16 14:29:28 -0800516 int num_sge, copied;
John Fastabend6fa92012020-11-16 14:28:46 -0800517
John Fastabend43630232020-11-16 14:29:28 -0800518 /* skb linearize may fail with ENOMEM, but lets simply try again
519 * later if this happens. Under memory pressure we don't want to
520 * drop the skb. We need to linearize the skb so that the mapping
521 * in skb_to_sgvec can not error.
522 */
523 if (skb_linearize(skb))
524 return -EAGAIN;
Liu Jian73035242021-10-29 22:12:14 +0800525 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
John Fastabend7e6b27a2021-07-12 12:55:45 -0700526 if (unlikely(num_sge < 0))
Daniel Borkmann604326b2018-10-13 02:45:58 +0200527 return num_sge;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200528
Liu Jian73035242021-10-29 22:12:14 +0800529 copied = len;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200530 msg->sg.start = 0;
John Fastabendcabede82019-05-13 07:19:55 -0700531 msg->sg.size = copied;
Jakub Kicinski031097d2019-11-27 12:16:41 -0800532 msg->sg.end = num_sge;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200533 msg->skb = skb;
534
535 sk_psock_queue_msg(psock, msg);
John Fastabend552de9102018-12-20 11:35:33 -0800536 sk_psock_data_ready(sk, psock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200537 return copied;
538}
539
Liu Jian73035242021-10-29 22:12:14 +0800540static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
541 u32 off, u32 len);
John Fastabend2443ca62020-11-16 14:29:08 -0800542
Liu Jian73035242021-10-29 22:12:14 +0800543static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
544 u32 off, u32 len)
John Fastabend6fa92012020-11-16 14:28:46 -0800545{
546 struct sock *sk = psock->sk;
547 struct sk_msg *msg;
John Fastabend7e6b27a2021-07-12 12:55:45 -0700548 int err;
John Fastabend6fa92012020-11-16 14:28:46 -0800549
John Fastabend2443ca62020-11-16 14:29:08 -0800550 /* If we are receiving on the same sock skb->sk is already assigned,
551 * skip memory accounting and owner transition seeing it already set
552 * correctly.
553 */
554 if (unlikely(skb->sk == sk))
Liu Jian73035242021-10-29 22:12:14 +0800555 return sk_psock_skb_ingress_self(psock, skb, off, len);
John Fastabend6fa92012020-11-16 14:28:46 -0800556 msg = sk_psock_create_ingress_msg(sk, skb);
557 if (!msg)
558 return -EAGAIN;
559
560 /* This will transition ownership of the data from the socket where
561 * the BPF program was run initiating the redirect to the socket
562 * we will eventually receive this data on. The data will be released
563 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
564 * into user buffers.
565 */
566 skb_set_owner_r(skb, sk);
Liu Jian73035242021-10-29 22:12:14 +0800567 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
John Fastabend7e6b27a2021-07-12 12:55:45 -0700568 if (err < 0)
569 kfree(msg);
570 return err;
John Fastabend6fa92012020-11-16 14:28:46 -0800571}
572
573/* Puts an skb on the ingress queue of the socket already assigned to the
574 * skb. In this case we do not need to check memory limits or skb_set_owner_r
575 * because the skb is already accounted for here.
576 */
Liu Jian73035242021-10-29 22:12:14 +0800577static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
578 u32 off, u32 len)
John Fastabend6fa92012020-11-16 14:28:46 -0800579{
580 struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
581 struct sock *sk = psock->sk;
John Fastabend7e6b27a2021-07-12 12:55:45 -0700582 int err;
John Fastabend6fa92012020-11-16 14:28:46 -0800583
584 if (unlikely(!msg))
585 return -EAGAIN;
586 sk_msg_init(msg);
John Fastabend144748e2021-04-01 15:00:40 -0700587 skb_set_owner_r(skb, sk);
Liu Jian73035242021-10-29 22:12:14 +0800588 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
John Fastabend7e6b27a2021-07-12 12:55:45 -0700589 if (err < 0)
590 kfree(msg);
591 return err;
John Fastabend6fa92012020-11-16 14:28:46 -0800592}
593
Daniel Borkmann604326b2018-10-13 02:45:58 +0200594static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
595 u32 off, u32 len, bool ingress)
596{
John Fastabend9047f192020-10-09 11:37:17 -0700597 if (!ingress) {
598 if (!sock_writeable(psock->sk))
599 return -EAGAIN;
Cong Wang799aa7f2021-03-30 19:32:25 -0700600 return skb_send_sock(psock->sk, skb, off, len);
John Fastabend9047f192020-10-09 11:37:17 -0700601 }
Liu Jian73035242021-10-29 22:12:14 +0800602 return sk_psock_skb_ingress(psock, skb, off, len);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200603}
604
John Fastabend476d9802021-07-27 09:04:59 -0700605static void sk_psock_skb_state(struct sk_psock *psock,
606 struct sk_psock_work_state *state,
607 struct sk_buff *skb,
608 int len, int off)
609{
610 spin_lock_bh(&psock->ingress_lock);
611 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
612 state->skb = skb;
613 state->len = len;
614 state->off = off;
615 } else {
616 sock_drop(psock->sk, skb);
617 }
618 spin_unlock_bh(&psock->ingress_lock);
619}
620
Daniel Borkmann604326b2018-10-13 02:45:58 +0200621static void sk_psock_backlog(struct work_struct *work)
622{
623 struct sk_psock *psock = container_of(work, struct sk_psock, work);
624 struct sk_psock_work_state *state = &psock->work_state;
John Fastabend476d9802021-07-27 09:04:59 -0700625 struct sk_buff *skb = NULL;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200626 bool ingress;
627 u32 len, off;
628 int ret;
629
Cong Wang799aa7f2021-03-30 19:32:25 -0700630 mutex_lock(&psock->work_mutex);
John Fastabend476d9802021-07-27 09:04:59 -0700631 if (unlikely(state->skb)) {
632 spin_lock_bh(&psock->ingress_lock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200633 skb = state->skb;
634 len = state->len;
635 off = state->off;
636 state->skb = NULL;
John Fastabend476d9802021-07-27 09:04:59 -0700637 spin_unlock_bh(&psock->ingress_lock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200638 }
John Fastabend476d9802021-07-27 09:04:59 -0700639 if (skb)
640 goto start;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200641
642 while ((skb = skb_dequeue(&psock->ingress_skb))) {
643 len = skb->len;
644 off = 0;
Liu Jian73035242021-10-29 22:12:14 +0800645 if (skb_bpf_strparser(skb)) {
646 struct strp_msg *stm = strp_msg(skb);
647
648 off = stm->offset;
649 len = stm->full_len;
650 }
Daniel Borkmann604326b2018-10-13 02:45:58 +0200651start:
Cong Wange3526bb2021-02-23 10:49:29 -0800652 ingress = skb_bpf_ingress(skb);
653 skb_bpf_redirect_clear(skb);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200654 do {
655 ret = -EIO;
Cong Wang799aa7f2021-03-30 19:32:25 -0700656 if (!sock_flag(psock->sk, SOCK_DEAD))
Daniel Borkmann604326b2018-10-13 02:45:58 +0200657 ret = sk_psock_handle_skb(psock, skb, off,
658 len, ingress);
659 if (ret <= 0) {
660 if (ret == -EAGAIN) {
John Fastabend476d9802021-07-27 09:04:59 -0700661 sk_psock_skb_state(psock, state, skb,
662 len, off);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200663 goto end;
664 }
665 /* Hard errors break pipe and stop xmit. */
666 sk_psock_report_error(psock, ret ? -ret : EPIPE);
667 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
Cong Wang781dd042021-06-14 19:13:42 -0700668 sock_drop(psock->sk, skb);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200669 goto end;
670 }
671 off += ret;
672 len -= ret;
673 } while (len);
674
675 if (!ingress)
676 kfree_skb(skb);
677 }
678end:
Cong Wang799aa7f2021-03-30 19:32:25 -0700679 mutex_unlock(&psock->work_mutex);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200680}
681
682struct sk_psock *sk_psock_init(struct sock *sk, int node)
683{
Lorenz Bauer7b219da2020-08-21 11:29:43 +0100684 struct sk_psock *psock;
685 struct proto *prot;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200686
Lorenz Bauer7b219da2020-08-21 11:29:43 +0100687 write_lock_bh(&sk->sk_callback_lock);
688
Lorenz Bauer7b219da2020-08-21 11:29:43 +0100689 if (sk->sk_user_data) {
690 psock = ERR_PTR(-EBUSY);
691 goto out;
692 }
693
694 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
695 if (!psock) {
696 psock = ERR_PTR(-ENOMEM);
697 goto out;
698 }
699
700 prot = READ_ONCE(sk->sk_prot);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200701 psock->sk = sk;
Lorenz Bauer7b219da2020-08-21 11:29:43 +0100702 psock->eval = __SK_NONE;
703 psock->sk_proto = prot;
704 psock->saved_unhash = prot->unhash;
705 psock->saved_close = prot->close;
706 psock->saved_write_space = sk->sk_write_space;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200707
708 INIT_LIST_HEAD(&psock->link);
709 spin_lock_init(&psock->link_lock);
710
711 INIT_WORK(&psock->work, sk_psock_backlog);
Cong Wang799aa7f2021-03-30 19:32:25 -0700712 mutex_init(&psock->work_mutex);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200713 INIT_LIST_HEAD(&psock->ingress_msg);
Cong Wangb01fd6e2021-03-30 19:32:23 -0700714 spin_lock_init(&psock->ingress_lock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200715 skb_queue_head_init(&psock->ingress_skb);
716
717 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
718 refcount_set(&psock->refcnt, 1);
719
Jakub Sitnickif1ff5ce2020-02-18 17:10:14 +0000720 rcu_assign_sk_user_data_nocopy(sk, psock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200721 sock_hold(sk);
722
Lorenz Bauer7b219da2020-08-21 11:29:43 +0100723out:
724 write_unlock_bh(&sk->sk_callback_lock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200725 return psock;
726}
727EXPORT_SYMBOL_GPL(sk_psock_init);
728
729struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
730{
731 struct sk_psock_link *link;
732
733 spin_lock_bh(&psock->link_lock);
734 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
735 list);
736 if (link)
737 list_del(&link->list);
738 spin_unlock_bh(&psock->link_lock);
739 return link;
740}
741
Cong Wangcd81cefb2021-02-23 10:49:32 -0800742static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
Daniel Borkmann604326b2018-10-13 02:45:58 +0200743{
744 struct sk_msg *msg, *tmp;
745
746 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
747 list_del(&msg->list);
748 sk_msg_free(psock->sk, msg);
749 kfree(msg);
750 }
751}
752
Cong Wang799aa7f2021-03-30 19:32:25 -0700753static void __sk_psock_zap_ingress(struct sk_psock *psock)
Daniel Borkmann604326b2018-10-13 02:45:58 +0200754{
Cong Wange3526bb2021-02-23 10:49:29 -0800755 struct sk_buff *skb;
756
Cong Wang37f0e512021-03-30 19:32:22 -0700757 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
Cong Wange3526bb2021-02-23 10:49:29 -0800758 skb_bpf_redirect_clear(skb);
Cong Wang781dd042021-06-14 19:13:42 -0700759 sock_drop(psock->sk, skb);
Cong Wange3526bb2021-02-23 10:49:29 -0800760 }
John Fastabend476d9802021-07-27 09:04:59 -0700761 kfree_skb(psock->work_state.skb);
762 /* We null the skb here to ensure that calls to sk_psock_backlog
763 * do not pick up the free'd skb.
764 */
765 psock->work_state.skb = NULL;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200766 __sk_psock_purge_ingress_msg(psock);
767}
768
769static void sk_psock_link_destroy(struct sk_psock *psock)
770{
771 struct sk_psock_link *link, *tmp;
772
773 list_for_each_entry_safe(link, tmp, &psock->link, list) {
774 list_del(&link->list);
775 sk_psock_free_link(link);
776 }
777}
778
Cong Wang799aa7f2021-03-30 19:32:25 -0700779void sk_psock_stop(struct sk_psock *psock, bool wait)
780{
781 spin_lock_bh(&psock->ingress_lock);
782 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
783 sk_psock_cork_free(psock);
784 __sk_psock_zap_ingress(psock);
785 spin_unlock_bh(&psock->ingress_lock);
786
787 if (wait)
788 cancel_work_sync(&psock->work);
789}
790
Cong Wang88759602021-02-23 10:49:26 -0800791static void sk_psock_done_strp(struct sk_psock *psock);
792
Cong Wang7786dfc2021-03-30 19:32:26 -0700793static void sk_psock_destroy(struct work_struct *work)
Daniel Borkmann604326b2018-10-13 02:45:58 +0200794{
Cong Wang7786dfc2021-03-30 19:32:26 -0700795 struct sk_psock *psock = container_of(to_rcu_work(work),
796 struct sk_psock, rwork);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200797 /* No sk_callback_lock since already detached. */
John Fastabend01489432019-05-13 07:19:19 -0700798
Cong Wang88759602021-02-23 10:49:26 -0800799 sk_psock_done_strp(psock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200800
801 cancel_work_sync(&psock->work);
Cong Wang799aa7f2021-03-30 19:32:25 -0700802 mutex_destroy(&psock->work_mutex);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200803
804 psock_progs_drop(&psock->progs);
805
806 sk_psock_link_destroy(psock);
807 sk_psock_cork_free(psock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200808
809 if (psock->sk_redir)
810 sock_put(psock->sk_redir);
811 sock_put(psock->sk);
812 kfree(psock);
813}
814
Daniel Borkmann604326b2018-10-13 02:45:58 +0200815void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
816{
Daniel Borkmann604326b2018-10-13 02:45:58 +0200817 write_lock_bh(&sk->sk_callback_lock);
John Fastabend95fa1452019-07-19 10:29:22 -0700818 sk_psock_restore_proto(sk, psock);
819 rcu_assign_sk_user_data(sk, NULL);
Cong Wangae8b8332021-02-23 10:49:30 -0800820 if (psock->progs.stream_parser)
Daniel Borkmann604326b2018-10-13 02:45:58 +0200821 sk_psock_stop_strp(sk, psock);
Cong Wanga7ba4552021-03-30 19:32:30 -0700822 else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
John Fastabendef565922020-10-10 22:09:38 -0700823 sk_psock_stop_verdict(sk, psock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200824 write_unlock_bh(&sk->sk_callback_lock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200825
John Fastabend343597d2021-07-27 09:04:58 -0700826 sk_psock_stop(psock, false);
827
Cong Wang7786dfc2021-03-30 19:32:26 -0700828 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
829 queue_rcu_work(system_wq, &psock->rwork);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200830}
831EXPORT_SYMBOL_GPL(sk_psock_drop);
832
833static int sk_psock_map_verd(int verdict, bool redir)
834{
835 switch (verdict) {
836 case SK_PASS:
837 return redir ? __SK_REDIRECT : __SK_PASS;
838 case SK_DROP:
839 default:
840 break;
841 }
842
843 return __SK_DROP;
844}
845
846int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
847 struct sk_msg *msg)
848{
849 struct bpf_prog *prog;
850 int ret;
851
Daniel Borkmann604326b2018-10-13 02:45:58 +0200852 rcu_read_lock();
853 prog = READ_ONCE(psock->progs.msg_parser);
854 if (unlikely(!prog)) {
855 ret = __SK_PASS;
856 goto out;
857 }
858
859 sk_msg_compute_data_pointers(msg);
860 msg->sk = sk;
David Miller3d9f773c2020-02-24 15:01:43 +0100861 ret = bpf_prog_run_pin_on_cpu(prog, msg);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200862 ret = sk_psock_map_verd(ret, msg->sk_redir);
863 psock->apply_bytes = msg->apply_bytes;
864 if (ret == __SK_REDIRECT) {
865 if (psock->sk_redir)
866 sock_put(psock->sk_redir);
867 psock->sk_redir = msg->sk_redir;
868 if (!psock->sk_redir) {
869 ret = __SK_DROP;
870 goto out;
871 }
872 sock_hold(psock->sk_redir);
873 }
874out:
875 rcu_read_unlock();
Daniel Borkmann604326b2018-10-13 02:45:58 +0200876 return ret;
877}
878EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
879
Cong Wang42830572021-06-14 19:13:41 -0700880static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
Daniel Borkmann604326b2018-10-13 02:45:58 +0200881{
882 struct sk_psock *psock_other;
883 struct sock *sk_other;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200884
Cong Wange3526bb2021-02-23 10:49:29 -0800885 sk_other = skb_bpf_redirect_fetch(skb);
John Fastabend9047f192020-10-09 11:37:17 -0700886 /* This error is a buggy BPF program, it returned a redirect
887 * return code, but then didn't set a redirect interface.
888 */
John Fastabendca2f5f22020-05-29 16:06:41 -0700889 if (unlikely(!sk_other)) {
Liu Jian73035242021-10-29 22:12:14 +0800890 skb_bpf_redirect_clear(skb);
Cong Wang781dd042021-06-14 19:13:42 -0700891 sock_drop(from->sk, skb);
Cong Wang1581a6c2021-06-14 19:13:40 -0700892 return -EIO;
John Fastabendca2f5f22020-05-29 16:06:41 -0700893 }
894 psock_other = sk_psock(sk_other);
John Fastabend9047f192020-10-09 11:37:17 -0700895 /* This error indicates the socket is being torn down or had another
896 * error that caused the pipe to break. We can't send a packet on
897 * a socket that is in this state so we drop the skb.
898 */
Cong Wang799aa7f2021-03-30 19:32:25 -0700899 if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
Cong Wang30b9c542021-06-14 19:13:38 -0700900 skb_bpf_redirect_clear(skb);
Cong Wang781dd042021-06-14 19:13:42 -0700901 sock_drop(from->sk, skb);
Cong Wang1581a6c2021-06-14 19:13:40 -0700902 return -EIO;
Cong Wang799aa7f2021-03-30 19:32:25 -0700903 }
904 spin_lock_bh(&psock_other->ingress_lock);
905 if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
906 spin_unlock_bh(&psock_other->ingress_lock);
Cong Wang30b9c542021-06-14 19:13:38 -0700907 skb_bpf_redirect_clear(skb);
Cong Wang781dd042021-06-14 19:13:42 -0700908 sock_drop(from->sk, skb);
Cong Wang1581a6c2021-06-14 19:13:40 -0700909 return -EIO;
John Fastabendca2f5f22020-05-29 16:06:41 -0700910 }
911
John Fastabend9047f192020-10-09 11:37:17 -0700912 skb_queue_tail(&psock_other->ingress_skb, skb);
913 schedule_work(&psock_other->work);
Cong Wang799aa7f2021-03-30 19:32:25 -0700914 spin_unlock_bh(&psock_other->ingress_lock);
Cong Wang1581a6c2021-06-14 19:13:40 -0700915 return 0;
John Fastabendca2f5f22020-05-29 16:06:41 -0700916}
917
Cong Wang42830572021-06-14 19:13:41 -0700918static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
919 struct sk_psock *from, int verdict)
John Fastabende91de6a2020-05-29 16:06:59 -0700920{
921 switch (verdict) {
922 case __SK_REDIRECT:
Cong Wang42830572021-06-14 19:13:41 -0700923 sk_psock_skb_redirect(from, skb);
John Fastabende91de6a2020-05-29 16:06:59 -0700924 break;
925 case __SK_PASS:
926 case __SK_DROP:
927 default:
928 break;
929 }
930}
931
932int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
933{
934 struct bpf_prog *prog;
935 int ret = __SK_PASS;
936
937 rcu_read_lock();
Cong Wangae8b8332021-02-23 10:49:30 -0800938 prog = READ_ONCE(psock->progs.stream_verdict);
John Fastabende91de6a2020-05-29 16:06:59 -0700939 if (likely(prog)) {
John Fastabend0b17ad22020-10-09 11:37:55 -0700940 skb->sk = psock->sk;
Cong Wange3526bb2021-02-23 10:49:29 -0800941 skb_dst_drop(skb);
942 skb_bpf_redirect_clear(skb);
Cong Wang53334232021-02-23 10:49:33 -0800943 ret = bpf_prog_run_pin_on_cpu(prog, skb);
Cong Wange3526bb2021-02-23 10:49:29 -0800944 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
John Fastabend0b17ad22020-10-09 11:37:55 -0700945 skb->sk = NULL;
John Fastabende91de6a2020-05-29 16:06:59 -0700946 }
Cong Wang42830572021-06-14 19:13:41 -0700947 sk_psock_tls_verdict_apply(skb, psock, ret);
John Fastabende91de6a2020-05-29 16:06:59 -0700948 rcu_read_unlock();
John Fastabende91de6a2020-05-29 16:06:59 -0700949 return ret;
950}
951EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
952
Cong Wang1581a6c2021-06-14 19:13:40 -0700953static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
954 int verdict)
John Fastabendca2f5f22020-05-29 16:06:41 -0700955{
956 struct sock *sk_other;
Cong Wang1581a6c2021-06-14 19:13:40 -0700957 int err = 0;
Liu Jian73035242021-10-29 22:12:14 +0800958 u32 len, off;
John Fastabendca2f5f22020-05-29 16:06:41 -0700959
Daniel Borkmann604326b2018-10-13 02:45:58 +0200960 switch (verdict) {
John Fastabend51199402018-12-20 11:35:32 -0800961 case __SK_PASS:
Cong Wang1581a6c2021-06-14 19:13:40 -0700962 err = -EIO;
John Fastabend51199402018-12-20 11:35:32 -0800963 sk_other = psock->sk;
964 if (sock_flag(sk_other, SOCK_DEAD) ||
965 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
Liu Jian73035242021-10-29 22:12:14 +0800966 skb_bpf_redirect_clear(skb);
John Fastabend51199402018-12-20 11:35:32 -0800967 goto out_free;
968 }
John Fastabend51199402018-12-20 11:35:32 -0800969
Cong Wange3526bb2021-02-23 10:49:29 -0800970 skb_bpf_set_ingress(skb);
John Fastabend9ecbfb02020-10-09 11:36:37 -0700971
972 /* If the queue is empty then we can submit directly
973 * into the msg queue. If its not empty we have to
974 * queue work otherwise we may get OOO data. Otherwise,
975 * if sk_psock_skb_ingress errors will be handled by
976 * retrying later from workqueue.
977 */
978 if (skb_queue_empty(&psock->ingress_skb)) {
Liu Jian73035242021-10-29 22:12:14 +0800979 len = skb->len;
980 off = 0;
981 if (skb_bpf_strparser(skb)) {
982 struct strp_msg *stm = strp_msg(skb);
983
984 off = stm->offset;
985 len = stm->full_len;
986 }
987 err = sk_psock_skb_ingress_self(psock, skb, off, len);
John Fastabend9ecbfb02020-10-09 11:36:37 -0700988 }
989 if (err < 0) {
Cong Wang799aa7f2021-03-30 19:32:25 -0700990 spin_lock_bh(&psock->ingress_lock);
991 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
992 skb_queue_tail(&psock->ingress_skb, skb);
993 schedule_work(&psock->work);
Cong Wang0cf66722021-06-14 19:13:39 -0700994 err = 0;
Cong Wang799aa7f2021-03-30 19:32:25 -0700995 }
996 spin_unlock_bh(&psock->ingress_lock);
Cong Wang0cf66722021-06-14 19:13:39 -0700997 if (err < 0) {
998 skb_bpf_redirect_clear(skb);
999 goto out_free;
1000 }
John Fastabend9ecbfb02020-10-09 11:36:37 -07001001 }
John Fastabendcfea28f2020-10-09 11:36:16 -07001002 break;
Daniel Borkmann604326b2018-10-13 02:45:58 +02001003 case __SK_REDIRECT:
Cong Wang42830572021-06-14 19:13:41 -07001004 err = sk_psock_skb_redirect(psock, skb);
John Fastabendca2f5f22020-05-29 16:06:41 -07001005 break;
Daniel Borkmann604326b2018-10-13 02:45:58 +02001006 case __SK_DROP:
Daniel Borkmann604326b2018-10-13 02:45:58 +02001007 default:
1008out_free:
Cong Wang781dd042021-06-14 19:13:42 -07001009 sock_drop(psock->sk, skb);
Daniel Borkmann604326b2018-10-13 02:45:58 +02001010 }
Cong Wang1581a6c2021-06-14 19:13:40 -07001011
1012 return err;
Daniel Borkmann604326b2018-10-13 02:45:58 +02001013}
1014
Cong Wang88759602021-02-23 10:49:26 -08001015static void sk_psock_write_space(struct sock *sk)
1016{
1017 struct sk_psock *psock;
1018 void (*write_space)(struct sock *sk) = NULL;
1019
1020 rcu_read_lock();
1021 psock = sk_psock(sk);
1022 if (likely(psock)) {
1023 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1024 schedule_work(&psock->work);
1025 write_space = psock->saved_write_space;
1026 }
1027 rcu_read_unlock();
1028 if (write_space)
1029 write_space(sk);
1030}
1031
1032#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
Daniel Borkmann604326b2018-10-13 02:45:58 +02001033static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1034{
John Fastabend80257512020-06-25 16:13:18 -07001035 struct sk_psock *psock;
Daniel Borkmann604326b2018-10-13 02:45:58 +02001036 struct bpf_prog *prog;
1037 int ret = __SK_DROP;
John Fastabend80257512020-06-25 16:13:18 -07001038 struct sock *sk;
Daniel Borkmann604326b2018-10-13 02:45:58 +02001039
1040 rcu_read_lock();
John Fastabend80257512020-06-25 16:13:18 -07001041 sk = strp->sk;
1042 psock = sk_psock(sk);
1043 if (unlikely(!psock)) {
Cong Wang781dd042021-06-14 19:13:42 -07001044 sock_drop(sk, skb);
John Fastabend80257512020-06-25 16:13:18 -07001045 goto out;
1046 }
Cong Wangae8b8332021-02-23 10:49:30 -08001047 prog = READ_ONCE(psock->progs.stream_verdict);
Daniel Borkmann604326b2018-10-13 02:45:58 +02001048 if (likely(prog)) {
John Fastabend144748e2021-04-01 15:00:40 -07001049 skb->sk = sk;
Cong Wange3526bb2021-02-23 10:49:29 -08001050 skb_dst_drop(skb);
1051 skb_bpf_redirect_clear(skb);
Cong Wang53334232021-02-23 10:49:33 -08001052 ret = bpf_prog_run_pin_on_cpu(prog, skb);
Liu Jian73035242021-10-29 22:12:14 +08001053 if (ret == SK_PASS)
1054 skb_bpf_set_strparser(skb);
Cong Wange3526bb2021-02-23 10:49:29 -08001055 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
John Fastabend144748e2021-04-01 15:00:40 -07001056 skb->sk = NULL;
Daniel Borkmann604326b2018-10-13 02:45:58 +02001057 }
Daniel Borkmann604326b2018-10-13 02:45:58 +02001058 sk_psock_verdict_apply(psock, skb, ret);
John Fastabend80257512020-06-25 16:13:18 -07001059out:
John Fastabend93dd5f12020-06-25 16:12:59 -07001060 rcu_read_unlock();
Daniel Borkmann604326b2018-10-13 02:45:58 +02001061}
1062
1063static int sk_psock_strp_read_done(struct strparser *strp, int err)
1064{
1065 return err;
1066}
1067
1068static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1069{
Cong Wang5a685cd2021-02-23 10:49:27 -08001070 struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
Daniel Borkmann604326b2018-10-13 02:45:58 +02001071 struct bpf_prog *prog;
1072 int ret = skb->len;
1073
1074 rcu_read_lock();
Cong Wangae8b8332021-02-23 10:49:30 -08001075 prog = READ_ONCE(psock->progs.stream_parser);
John Fastabend0b17ad22020-10-09 11:37:55 -07001076 if (likely(prog)) {
1077 skb->sk = psock->sk;
Cong Wang53334232021-02-23 10:49:33 -08001078 ret = bpf_prog_run_pin_on_cpu(prog, skb);
John Fastabend0b17ad22020-10-09 11:37:55 -07001079 skb->sk = NULL;
1080 }
Daniel Borkmann604326b2018-10-13 02:45:58 +02001081 rcu_read_unlock();
1082 return ret;
1083}
1084
1085/* Called with socket lock held. */
John Fastabend552de9102018-12-20 11:35:33 -08001086static void sk_psock_strp_data_ready(struct sock *sk)
Daniel Borkmann604326b2018-10-13 02:45:58 +02001087{
1088 struct sk_psock *psock;
1089
1090 rcu_read_lock();
1091 psock = sk_psock(sk);
1092 if (likely(psock)) {
John Fastabende91de6a2020-05-29 16:06:59 -07001093 if (tls_sw_has_ctx_rx(sk)) {
Cong Wang5a685cd2021-02-23 10:49:27 -08001094 psock->saved_data_ready(sk);
John Fastabende91de6a2020-05-29 16:06:59 -07001095 } else {
1096 write_lock_bh(&sk->sk_callback_lock);
Cong Wang5a685cd2021-02-23 10:49:27 -08001097 strp_data_ready(&psock->strp);
John Fastabende91de6a2020-05-29 16:06:59 -07001098 write_unlock_bh(&sk->sk_callback_lock);
1099 }
Daniel Borkmann604326b2018-10-13 02:45:58 +02001100 }
1101 rcu_read_unlock();
1102}
1103
Cong Wang88759602021-02-23 10:49:26 -08001104int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1105{
1106 static const struct strp_callbacks cb = {
1107 .rcv_msg = sk_psock_strp_read,
1108 .read_sock_done = sk_psock_strp_read_done,
1109 .parse_msg = sk_psock_strp_parse,
1110 };
1111
Cong Wang5a685cd2021-02-23 10:49:27 -08001112 return strp_init(&psock->strp, sk, &cb);
Cong Wang88759602021-02-23 10:49:26 -08001113}
1114
1115void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1116{
Cong Wang5a685cd2021-02-23 10:49:27 -08001117 if (psock->saved_data_ready)
Cong Wang88759602021-02-23 10:49:26 -08001118 return;
1119
Cong Wang5a685cd2021-02-23 10:49:27 -08001120 psock->saved_data_ready = sk->sk_data_ready;
Cong Wang88759602021-02-23 10:49:26 -08001121 sk->sk_data_ready = sk_psock_strp_data_ready;
1122 sk->sk_write_space = sk_psock_write_space;
Cong Wang88759602021-02-23 10:49:26 -08001123}
1124
1125void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1126{
John Fastabendc0d95d32021-11-19 10:14:18 -08001127 psock_set_prog(&psock->progs.stream_parser, NULL);
1128
Cong Wang5a685cd2021-02-23 10:49:27 -08001129 if (!psock->saved_data_ready)
Cong Wang88759602021-02-23 10:49:26 -08001130 return;
1131
Cong Wang5a685cd2021-02-23 10:49:27 -08001132 sk->sk_data_ready = psock->saved_data_ready;
1133 psock->saved_data_ready = NULL;
1134 strp_stop(&psock->strp);
Cong Wang88759602021-02-23 10:49:26 -08001135}
1136
1137static void sk_psock_done_strp(struct sk_psock *psock)
1138{
1139 /* Parser has been stopped */
Cong Wangae8b8332021-02-23 10:49:30 -08001140 if (psock->progs.stream_parser)
Cong Wang5a685cd2021-02-23 10:49:27 -08001141 strp_done(&psock->strp);
Cong Wang88759602021-02-23 10:49:26 -08001142}
1143#else
1144static void sk_psock_done_strp(struct sk_psock *psock)
1145{
1146}
1147#endif /* CONFIG_BPF_STREAM_PARSER */
1148
John Fastabendef565922020-10-10 22:09:38 -07001149static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
1150 unsigned int offset, size_t orig_len)
1151{
1152 struct sock *sk = (struct sock *)desc->arg.data;
1153 struct sk_psock *psock;
1154 struct bpf_prog *prog;
1155 int ret = __SK_DROP;
1156 int len = skb->len;
1157
1158 /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
1159 skb = skb_clone(skb, GFP_ATOMIC);
1160 if (!skb) {
1161 desc->error = -ENOMEM;
1162 return 0;
1163 }
1164
1165 rcu_read_lock();
1166 psock = sk_psock(sk);
1167 if (unlikely(!psock)) {
1168 len = 0;
Cong Wang781dd042021-06-14 19:13:42 -07001169 sock_drop(sk, skb);
John Fastabendef565922020-10-10 22:09:38 -07001170 goto out;
1171 }
Cong Wangae8b8332021-02-23 10:49:30 -08001172 prog = READ_ONCE(psock->progs.stream_verdict);
Cong Wanga7ba4552021-03-30 19:32:30 -07001173 if (!prog)
1174 prog = READ_ONCE(psock->progs.skb_verdict);
John Fastabendef565922020-10-10 22:09:38 -07001175 if (likely(prog)) {
John Fastabend144748e2021-04-01 15:00:40 -07001176 skb->sk = sk;
Cong Wange3526bb2021-02-23 10:49:29 -08001177 skb_dst_drop(skb);
1178 skb_bpf_redirect_clear(skb);
Cong Wang53334232021-02-23 10:49:33 -08001179 ret = bpf_prog_run_pin_on_cpu(prog, skb);
Cong Wange3526bb2021-02-23 10:49:29 -08001180 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
John Fastabend144748e2021-04-01 15:00:40 -07001181 skb->sk = NULL;
John Fastabendef565922020-10-10 22:09:38 -07001182 }
Cong Wang1581a6c2021-06-14 19:13:40 -07001183 if (sk_psock_verdict_apply(psock, skb, ret) < 0)
1184 len = 0;
John Fastabendef565922020-10-10 22:09:38 -07001185out:
1186 rcu_read_unlock();
1187 return len;
1188}
1189
1190static void sk_psock_verdict_data_ready(struct sock *sk)
1191{
1192 struct socket *sock = sk->sk_socket;
1193 read_descriptor_t desc;
1194
1195 if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
1196 return;
1197
1198 desc.arg.data = sk;
1199 desc.error = 0;
1200 desc.count = 1;
1201
1202 sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
1203}
1204
John Fastabendef565922020-10-10 22:09:38 -07001205void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1206{
Cong Wang5a685cd2021-02-23 10:49:27 -08001207 if (psock->saved_data_ready)
John Fastabendef565922020-10-10 22:09:38 -07001208 return;
1209
Cong Wang5a685cd2021-02-23 10:49:27 -08001210 psock->saved_data_ready = sk->sk_data_ready;
John Fastabendef565922020-10-10 22:09:38 -07001211 sk->sk_data_ready = sk_psock_verdict_data_ready;
1212 sk->sk_write_space = sk_psock_write_space;
John Fastabendef565922020-10-10 22:09:38 -07001213}
1214
John Fastabendef565922020-10-10 22:09:38 -07001215void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1216{
John Fastabendc0d95d32021-11-19 10:14:18 -08001217 psock_set_prog(&psock->progs.stream_verdict, NULL);
1218 psock_set_prog(&psock->progs.skb_verdict, NULL);
1219
Cong Wang5a685cd2021-02-23 10:49:27 -08001220 if (!psock->saved_data_ready)
John Fastabendef565922020-10-10 22:09:38 -07001221 return;
1222
Cong Wang5a685cd2021-02-23 10:49:27 -08001223 sk->sk_data_ready = psock->saved_data_ready;
1224 psock->saved_data_ready = NULL;
John Fastabendef565922020-10-10 22:09:38 -07001225}