blob: ae2b281c9c57bc6dd85e211e967829a81c6da306 [file] [log] [blame]
Daniel Borkmann604326b2018-10-13 02:45:58 +02001// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/skmsg.h>
5#include <linux/skbuff.h>
6#include <linux/scatterlist.h>
7
8#include <net/sock.h>
9#include <net/tcp.h>
10
11static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
12{
13 if (msg->sg.end > msg->sg.start &&
14 elem_first_coalesce < msg->sg.end)
15 return true;
16
17 if (msg->sg.end < msg->sg.start &&
18 (elem_first_coalesce > msg->sg.start ||
19 elem_first_coalesce < msg->sg.end))
20 return true;
21
22 return false;
23}
24
25int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
26 int elem_first_coalesce)
27{
28 struct page_frag *pfrag = sk_page_frag(sk);
29 int ret = 0;
30
31 len -= msg->sg.size;
32 while (len > 0) {
33 struct scatterlist *sge;
34 u32 orig_offset;
35 int use, i;
36
37 if (!sk_page_frag_refill(sk, pfrag))
38 return -ENOMEM;
39
40 orig_offset = pfrag->offset;
41 use = min_t(int, len, pfrag->size - orig_offset);
42 if (!sk_wmem_schedule(sk, use))
43 return -ENOMEM;
44
45 i = msg->sg.end;
46 sk_msg_iter_var_prev(i);
47 sge = &msg->sg.data[i];
48
49 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
50 sg_page(sge) == pfrag->page &&
51 sge->offset + sge->length == orig_offset) {
52 sge->length += use;
53 } else {
54 if (sk_msg_full(msg)) {
55 ret = -ENOSPC;
56 break;
57 }
58
59 sge = &msg->sg.data[msg->sg.end];
60 sg_unmark_end(sge);
61 sg_set_page(sge, pfrag->page, use, orig_offset);
62 get_page(pfrag->page);
63 sk_msg_iter_next(msg, end);
64 }
65
66 sk_mem_charge(sk, use);
67 msg->sg.size += use;
68 pfrag->offset += use;
69 len -= use;
70 }
71
72 return ret;
73}
74EXPORT_SYMBOL_GPL(sk_msg_alloc);
75
76void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
77{
78 int i = msg->sg.start;
79
80 do {
81 struct scatterlist *sge = sk_msg_elem(msg, i);
82
83 if (bytes < sge->length) {
84 sge->length -= bytes;
85 sge->offset += bytes;
86 sk_mem_uncharge(sk, bytes);
87 break;
88 }
89
90 sk_mem_uncharge(sk, sge->length);
91 bytes -= sge->length;
92 sge->length = 0;
93 sge->offset = 0;
94 sk_msg_iter_var_next(i);
95 } while (bytes && i != msg->sg.end);
96 msg->sg.start = i;
97}
98EXPORT_SYMBOL_GPL(sk_msg_return_zero);
99
100void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
101{
102 int i = msg->sg.start;
103
104 do {
105 struct scatterlist *sge = &msg->sg.data[i];
106 int uncharge = (bytes < sge->length) ? bytes : sge->length;
107
108 sk_mem_uncharge(sk, uncharge);
109 bytes -= uncharge;
110 sk_msg_iter_var_next(i);
111 } while (i != msg->sg.end);
112}
113EXPORT_SYMBOL_GPL(sk_msg_return);
114
115static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
116 bool charge)
117{
118 struct scatterlist *sge = sk_msg_elem(msg, i);
119 u32 len = sge->length;
120
121 if (charge)
122 sk_mem_uncharge(sk, len);
123 if (!msg->skb)
124 put_page(sg_page(sge));
125 memset(sge, 0, sizeof(*sge));
126 return len;
127}
128
129static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
130 bool charge)
131{
132 struct scatterlist *sge = sk_msg_elem(msg, i);
133 int freed = 0;
134
135 while (msg->sg.size) {
136 msg->sg.size -= sge->length;
137 freed += sk_msg_free_elem(sk, msg, i, charge);
138 sk_msg_iter_var_next(i);
139 sk_msg_check_to_free(msg, i, msg->sg.size);
140 sge = sk_msg_elem(msg, i);
141 }
142 if (msg->skb)
143 consume_skb(msg->skb);
144 sk_msg_init(msg);
145 return freed;
146}
147
148int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
149{
150 return __sk_msg_free(sk, msg, msg->sg.start, false);
151}
152EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
153
154int sk_msg_free(struct sock *sk, struct sk_msg *msg)
155{
156 return __sk_msg_free(sk, msg, msg->sg.start, true);
157}
158EXPORT_SYMBOL_GPL(sk_msg_free);
159
160static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
161 u32 bytes, bool charge)
162{
163 struct scatterlist *sge;
164 u32 i = msg->sg.start;
165
166 while (bytes) {
167 sge = sk_msg_elem(msg, i);
168 if (!sge->length)
169 break;
170 if (bytes < sge->length) {
171 if (charge)
172 sk_mem_uncharge(sk, bytes);
173 sge->length -= bytes;
174 sge->offset += bytes;
175 msg->sg.size -= bytes;
176 break;
177 }
178
179 msg->sg.size -= sge->length;
180 bytes -= sge->length;
181 sk_msg_free_elem(sk, msg, i, charge);
182 sk_msg_iter_var_next(i);
183 sk_msg_check_to_free(msg, i, bytes);
184 }
185 msg->sg.start = i;
186}
187
188void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
189{
190 __sk_msg_free_partial(sk, msg, bytes, true);
191}
192EXPORT_SYMBOL_GPL(sk_msg_free_partial);
193
194void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
195 u32 bytes)
196{
197 __sk_msg_free_partial(sk, msg, bytes, false);
198}
199
200void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
201{
202 int trim = msg->sg.size - len;
203 u32 i = msg->sg.end;
204
205 if (trim <= 0) {
206 WARN_ON(trim < 0);
207 return;
208 }
209
210 sk_msg_iter_var_prev(i);
211 msg->sg.size = len;
212 while (msg->sg.data[i].length &&
213 trim >= msg->sg.data[i].length) {
214 trim -= msg->sg.data[i].length;
215 sk_msg_free_elem(sk, msg, i, true);
216 sk_msg_iter_var_prev(i);
217 if (!trim)
218 goto out;
219 }
220
221 msg->sg.data[i].length -= trim;
222 sk_mem_uncharge(sk, trim);
223out:
224 /* If we trim data before curr pointer update copybreak and current
225 * so that any future copy operations start at new copy location.
226 * However trimed data that has not yet been used in a copy op
227 * does not require an update.
228 */
229 if (msg->sg.curr >= i) {
230 msg->sg.curr = i;
231 msg->sg.copybreak = msg->sg.data[i].length;
232 }
233 sk_msg_iter_var_next(i);
234 msg->sg.end = i;
235}
236EXPORT_SYMBOL_GPL(sk_msg_trim);
237
238int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
239 struct sk_msg *msg, u32 bytes)
240{
241 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
242 const int to_max_pages = MAX_MSG_FRAGS;
243 struct page *pages[MAX_MSG_FRAGS];
244 ssize_t orig, copied, use, offset;
245
246 orig = msg->sg.size;
247 while (bytes > 0) {
248 i = 0;
249 maxpages = to_max_pages - num_elems;
250 if (maxpages == 0) {
251 ret = -EFAULT;
252 goto out;
253 }
254
255 copied = iov_iter_get_pages(from, pages, bytes, maxpages,
256 &offset);
257 if (copied <= 0) {
258 ret = -EFAULT;
259 goto out;
260 }
261
262 iov_iter_advance(from, copied);
263 bytes -= copied;
264 msg->sg.size += copied;
265
266 while (copied) {
267 use = min_t(int, copied, PAGE_SIZE - offset);
268 sg_set_page(&msg->sg.data[msg->sg.end],
269 pages[i], use, offset);
270 sg_unmark_end(&msg->sg.data[msg->sg.end]);
271 sk_mem_charge(sk, use);
272
273 offset = 0;
274 copied -= use;
275 sk_msg_iter_next(msg, end);
276 num_elems++;
277 i++;
278 }
279 /* When zerocopy is mixed with sk_msg_*copy* operations we
280 * may have a copybreak set in this case clear and prefer
281 * zerocopy remainder when possible.
282 */
283 msg->sg.copybreak = 0;
284 msg->sg.curr = msg->sg.end;
285 }
286out:
287 /* Revert iov_iter updates, msg will need to use 'trim' later if it
288 * also needs to be cleared.
289 */
290 if (ret)
291 iov_iter_revert(from, msg->sg.size - orig);
292 return ret;
293}
294EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
295
296int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
297 struct sk_msg *msg, u32 bytes)
298{
299 int ret = -ENOSPC, i = msg->sg.curr;
300 struct scatterlist *sge;
301 u32 copy, buf_size;
302 void *to;
303
304 do {
305 sge = sk_msg_elem(msg, i);
306 /* This is possible if a trim operation shrunk the buffer */
307 if (msg->sg.copybreak >= sge->length) {
308 msg->sg.copybreak = 0;
309 sk_msg_iter_var_next(i);
310 if (i == msg->sg.end)
311 break;
312 sge = sk_msg_elem(msg, i);
313 }
314
315 buf_size = sge->length - msg->sg.copybreak;
316 copy = (buf_size > bytes) ? bytes : buf_size;
317 to = sg_virt(sge) + msg->sg.copybreak;
318 msg->sg.copybreak += copy;
319 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
320 ret = copy_from_iter_nocache(to, copy, from);
321 else
322 ret = copy_from_iter(to, copy, from);
323 if (ret != copy) {
324 ret = -EFAULT;
325 goto out;
326 }
327 bytes -= copy;
328 if (!bytes)
329 break;
330 msg->sg.copybreak = 0;
331 sk_msg_iter_var_next(i);
332 } while (i != msg->sg.end);
333out:
334 msg->sg.curr = i;
335 return ret;
336}
337EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
338
339static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
340{
341 struct sock *sk = psock->sk;
342 int copied = 0, num_sge;
343 struct sk_msg *msg;
344
345 msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
346 if (unlikely(!msg))
347 return -EAGAIN;
348 if (!sk_rmem_schedule(sk, skb, skb->len)) {
349 kfree(msg);
350 return -EAGAIN;
351 }
352
353 sk_msg_init(msg);
354 num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
355 if (unlikely(num_sge < 0)) {
356 kfree(msg);
357 return num_sge;
358 }
359
360 sk_mem_charge(sk, skb->len);
361 copied = skb->len;
362 msg->sg.start = 0;
363 msg->sg.end = num_sge == MAX_MSG_FRAGS ? 0 : num_sge;
364 msg->skb = skb;
365
366 sk_psock_queue_msg(psock, msg);
367 sk->sk_data_ready(sk);
368 return copied;
369}
370
371static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
372 u32 off, u32 len, bool ingress)
373{
374 if (ingress)
375 return sk_psock_skb_ingress(psock, skb);
376 else
377 return skb_send_sock_locked(psock->sk, skb, off, len);
378}
379
380static void sk_psock_backlog(struct work_struct *work)
381{
382 struct sk_psock *psock = container_of(work, struct sk_psock, work);
383 struct sk_psock_work_state *state = &psock->work_state;
384 struct sk_buff *skb;
385 bool ingress;
386 u32 len, off;
387 int ret;
388
389 /* Lock sock to avoid losing sk_socket during loop. */
390 lock_sock(psock->sk);
391 if (state->skb) {
392 skb = state->skb;
393 len = state->len;
394 off = state->off;
395 state->skb = NULL;
396 goto start;
397 }
398
399 while ((skb = skb_dequeue(&psock->ingress_skb))) {
400 len = skb->len;
401 off = 0;
402start:
403 ingress = tcp_skb_bpf_ingress(skb);
404 do {
405 ret = -EIO;
406 if (likely(psock->sk->sk_socket))
407 ret = sk_psock_handle_skb(psock, skb, off,
408 len, ingress);
409 if (ret <= 0) {
410 if (ret == -EAGAIN) {
411 state->skb = skb;
412 state->len = len;
413 state->off = off;
414 goto end;
415 }
416 /* Hard errors break pipe and stop xmit. */
417 sk_psock_report_error(psock, ret ? -ret : EPIPE);
418 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
419 kfree_skb(skb);
420 goto end;
421 }
422 off += ret;
423 len -= ret;
424 } while (len);
425
426 if (!ingress)
427 kfree_skb(skb);
428 }
429end:
430 release_sock(psock->sk);
431}
432
433struct sk_psock *sk_psock_init(struct sock *sk, int node)
434{
435 struct sk_psock *psock = kzalloc_node(sizeof(*psock),
436 GFP_ATOMIC | __GFP_NOWARN,
437 node);
438 if (!psock)
439 return NULL;
440
441 psock->sk = sk;
442 psock->eval = __SK_NONE;
443
444 INIT_LIST_HEAD(&psock->link);
445 spin_lock_init(&psock->link_lock);
446
447 INIT_WORK(&psock->work, sk_psock_backlog);
448 INIT_LIST_HEAD(&psock->ingress_msg);
449 skb_queue_head_init(&psock->ingress_skb);
450
451 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
452 refcount_set(&psock->refcnt, 1);
453
454 rcu_assign_sk_user_data(sk, psock);
455 sock_hold(sk);
456
457 return psock;
458}
459EXPORT_SYMBOL_GPL(sk_psock_init);
460
461struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
462{
463 struct sk_psock_link *link;
464
465 spin_lock_bh(&psock->link_lock);
466 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
467 list);
468 if (link)
469 list_del(&link->list);
470 spin_unlock_bh(&psock->link_lock);
471 return link;
472}
473
474void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
475{
476 struct sk_msg *msg, *tmp;
477
478 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
479 list_del(&msg->list);
480 sk_msg_free(psock->sk, msg);
481 kfree(msg);
482 }
483}
484
485static void sk_psock_zap_ingress(struct sk_psock *psock)
486{
487 __skb_queue_purge(&psock->ingress_skb);
488 __sk_psock_purge_ingress_msg(psock);
489}
490
491static void sk_psock_link_destroy(struct sk_psock *psock)
492{
493 struct sk_psock_link *link, *tmp;
494
495 list_for_each_entry_safe(link, tmp, &psock->link, list) {
496 list_del(&link->list);
497 sk_psock_free_link(link);
498 }
499}
500
501static void sk_psock_destroy_deferred(struct work_struct *gc)
502{
503 struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
504
505 /* No sk_callback_lock since already detached. */
506 if (psock->parser.enabled)
507 strp_done(&psock->parser.strp);
508
509 cancel_work_sync(&psock->work);
510
511 psock_progs_drop(&psock->progs);
512
513 sk_psock_link_destroy(psock);
514 sk_psock_cork_free(psock);
515 sk_psock_zap_ingress(psock);
516
517 if (psock->sk_redir)
518 sock_put(psock->sk_redir);
519 sock_put(psock->sk);
520 kfree(psock);
521}
522
523void sk_psock_destroy(struct rcu_head *rcu)
524{
525 struct sk_psock *psock = container_of(rcu, struct sk_psock, rcu);
526
527 INIT_WORK(&psock->gc, sk_psock_destroy_deferred);
528 schedule_work(&psock->gc);
529}
530EXPORT_SYMBOL_GPL(sk_psock_destroy);
531
532void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
533{
534 rcu_assign_sk_user_data(sk, NULL);
535 sk_psock_cork_free(psock);
536 sk_psock_restore_proto(sk, psock);
537
538 write_lock_bh(&sk->sk_callback_lock);
539 if (psock->progs.skb_parser)
540 sk_psock_stop_strp(sk, psock);
541 write_unlock_bh(&sk->sk_callback_lock);
542 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
543
544 call_rcu_sched(&psock->rcu, sk_psock_destroy);
545}
546EXPORT_SYMBOL_GPL(sk_psock_drop);
547
548static int sk_psock_map_verd(int verdict, bool redir)
549{
550 switch (verdict) {
551 case SK_PASS:
552 return redir ? __SK_REDIRECT : __SK_PASS;
553 case SK_DROP:
554 default:
555 break;
556 }
557
558 return __SK_DROP;
559}
560
561int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
562 struct sk_msg *msg)
563{
564 struct bpf_prog *prog;
565 int ret;
566
567 preempt_disable();
568 rcu_read_lock();
569 prog = READ_ONCE(psock->progs.msg_parser);
570 if (unlikely(!prog)) {
571 ret = __SK_PASS;
572 goto out;
573 }
574
575 sk_msg_compute_data_pointers(msg);
576 msg->sk = sk;
577 ret = BPF_PROG_RUN(prog, msg);
578 ret = sk_psock_map_verd(ret, msg->sk_redir);
579 psock->apply_bytes = msg->apply_bytes;
580 if (ret == __SK_REDIRECT) {
581 if (psock->sk_redir)
582 sock_put(psock->sk_redir);
583 psock->sk_redir = msg->sk_redir;
584 if (!psock->sk_redir) {
585 ret = __SK_DROP;
586 goto out;
587 }
588 sock_hold(psock->sk_redir);
589 }
590out:
591 rcu_read_unlock();
592 preempt_enable();
593 return ret;
594}
595EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
596
597static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
598 struct sk_buff *skb)
599{
600 int ret;
601
602 skb->sk = psock->sk;
603 bpf_compute_data_end_sk_skb(skb);
604 preempt_disable();
605 ret = BPF_PROG_RUN(prog, skb);
606 preempt_enable();
607 /* strparser clones the skb before handing it to a upper layer,
608 * meaning skb_orphan has been called. We NULL sk on the way out
609 * to ensure we don't trigger a BUG_ON() in skb/sk operations
610 * later and because we are not charging the memory of this skb
611 * to any socket yet.
612 */
613 skb->sk = NULL;
614 return ret;
615}
616
617static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
618{
619 struct sk_psock_parser *parser;
620
621 parser = container_of(strp, struct sk_psock_parser, strp);
622 return container_of(parser, struct sk_psock, parser);
623}
624
625static void sk_psock_verdict_apply(struct sk_psock *psock,
626 struct sk_buff *skb, int verdict)
627{
628 struct sk_psock *psock_other;
629 struct sock *sk_other;
630 bool ingress;
631
632 switch (verdict) {
633 case __SK_REDIRECT:
634 sk_other = tcp_skb_bpf_redirect_fetch(skb);
635 if (unlikely(!sk_other))
636 goto out_free;
637 psock_other = sk_psock(sk_other);
638 if (!psock_other || sock_flag(sk_other, SOCK_DEAD) ||
639 !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED))
640 goto out_free;
641 ingress = tcp_skb_bpf_ingress(skb);
642 if ((!ingress && sock_writeable(sk_other)) ||
643 (ingress &&
644 atomic_read(&sk_other->sk_rmem_alloc) <=
645 sk_other->sk_rcvbuf)) {
646 if (!ingress)
647 skb_set_owner_w(skb, sk_other);
648 skb_queue_tail(&psock_other->ingress_skb, skb);
649 schedule_work(&psock_other->work);
650 break;
651 }
652 /* fall-through */
653 case __SK_DROP:
654 /* fall-through */
655 default:
656out_free:
657 kfree_skb(skb);
658 }
659}
660
661static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
662{
663 struct sk_psock *psock = sk_psock_from_strp(strp);
664 struct bpf_prog *prog;
665 int ret = __SK_DROP;
666
667 rcu_read_lock();
668 prog = READ_ONCE(psock->progs.skb_verdict);
669 if (likely(prog)) {
670 skb_orphan(skb);
671 tcp_skb_bpf_redirect_clear(skb);
672 ret = sk_psock_bpf_run(psock, prog, skb);
673 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
674 }
675 rcu_read_unlock();
676 sk_psock_verdict_apply(psock, skb, ret);
677}
678
679static int sk_psock_strp_read_done(struct strparser *strp, int err)
680{
681 return err;
682}
683
684static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
685{
686 struct sk_psock *psock = sk_psock_from_strp(strp);
687 struct bpf_prog *prog;
688 int ret = skb->len;
689
690 rcu_read_lock();
691 prog = READ_ONCE(psock->progs.skb_parser);
692 if (likely(prog))
693 ret = sk_psock_bpf_run(psock, prog, skb);
694 rcu_read_unlock();
695 return ret;
696}
697
698/* Called with socket lock held. */
699static void sk_psock_data_ready(struct sock *sk)
700{
701 struct sk_psock *psock;
702
703 rcu_read_lock();
704 psock = sk_psock(sk);
705 if (likely(psock)) {
706 write_lock_bh(&sk->sk_callback_lock);
707 strp_data_ready(&psock->parser.strp);
708 write_unlock_bh(&sk->sk_callback_lock);
709 }
710 rcu_read_unlock();
711}
712
713static void sk_psock_write_space(struct sock *sk)
714{
715 struct sk_psock *psock;
716 void (*write_space)(struct sock *sk);
717
718 rcu_read_lock();
719 psock = sk_psock(sk);
720 if (likely(psock && sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)))
721 schedule_work(&psock->work);
722 write_space = psock->saved_write_space;
723 rcu_read_unlock();
724 write_space(sk);
725}
726
727int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
728{
729 static const struct strp_callbacks cb = {
730 .rcv_msg = sk_psock_strp_read,
731 .read_sock_done = sk_psock_strp_read_done,
732 .parse_msg = sk_psock_strp_parse,
733 };
734
735 psock->parser.enabled = false;
736 return strp_init(&psock->parser.strp, sk, &cb);
737}
738
739void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
740{
741 struct sk_psock_parser *parser = &psock->parser;
742
743 if (parser->enabled)
744 return;
745
746 parser->saved_data_ready = sk->sk_data_ready;
747 sk->sk_data_ready = sk_psock_data_ready;
748 sk->sk_write_space = sk_psock_write_space;
749 parser->enabled = true;
750}
751
752void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
753{
754 struct sk_psock_parser *parser = &psock->parser;
755
756 if (!parser->enabled)
757 return;
758
759 sk->sk_data_ready = parser->saved_data_ready;
760 parser->saved_data_ready = NULL;
761 strp_stop(&parser->strp);
762 parser->enabled = false;
763}