blob: a3b21385e9476298eb0b33df7dceb242a7d1e104 [file] [log] [blame]
John Fastabend174a79f2017-08-15 22:32:47 -07001/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12
13/* A BPF sock_map is used to store sock objects. This is primarly used
14 * for doing socket redirect with BPF helper routines.
15 *
John Fastabend2f857d02017-08-28 07:10:25 -070016 * A sock map may have BPF programs attached to it, currently a program
17 * used to parse packets and a program to provide a verdict and redirect
18 * decision on the packet are supported. Any programs attached to a sock
19 * map are inherited by sock objects when they are added to the map. If
20 * no BPF programs are attached the sock object may only be used for sock
21 * redirect.
22 *
23 * A sock object may be in multiple maps, but can only inherit a single
24 * parse or verdict program. If adding a sock object to a map would result
25 * in having multiple parsing programs the update will return an EBUSY error.
John Fastabend174a79f2017-08-15 22:32:47 -070026 *
27 * For reference this program is similar to devmap used in XDP context
28 * reviewing these together may be useful. For an example please review
29 * ./samples/bpf/sockmap/.
30 */
31#include <linux/bpf.h>
32#include <net/sock.h>
33#include <linux/filter.h>
34#include <linux/errno.h>
35#include <linux/file.h>
36#include <linux/kernel.h>
37#include <linux/net.h>
38#include <linux/skbuff.h>
39#include <linux/workqueue.h>
40#include <linux/list.h>
John Fastabend4f738ad2018-03-18 12:57:10 -070041#include <linux/mm.h>
John Fastabend174a79f2017-08-15 22:32:47 -070042#include <net/strparser.h>
John Fastabend34f795022017-10-18 07:10:36 -070043#include <net/tcp.h>
John Fastabend8934ce22018-03-28 12:49:15 -070044#include <linux/ptr_ring.h>
45#include <net/inet_common.h>
John Fastabend174a79f2017-08-15 22:32:47 -070046
Chenbo Feng6e71b042017-10-18 13:00:22 -070047#define SOCK_CREATE_FLAG_MASK \
48 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
49
John Fastabend174a79f2017-08-15 22:32:47 -070050struct bpf_stab {
51 struct bpf_map map;
52 struct sock **sock_map;
John Fastabend4f738ad2018-03-18 12:57:10 -070053 struct bpf_prog *bpf_tx_msg;
John Fastabend174a79f2017-08-15 22:32:47 -070054 struct bpf_prog *bpf_parse;
55 struct bpf_prog *bpf_verdict;
John Fastabend174a79f2017-08-15 22:32:47 -070056};
57
58enum smap_psock_state {
59 SMAP_TX_RUNNING,
60};
61
John Fastabend2f857d02017-08-28 07:10:25 -070062struct smap_psock_map_entry {
63 struct list_head list;
64 struct sock **entry;
65};
66
John Fastabend174a79f2017-08-15 22:32:47 -070067struct smap_psock {
68 struct rcu_head rcu;
John Fastabendffa35662018-03-18 12:56:54 -070069 refcount_t refcnt;
John Fastabend174a79f2017-08-15 22:32:47 -070070
71 /* datapath variables */
72 struct sk_buff_head rxqueue;
73 bool strp_enabled;
74
75 /* datapath error path cache across tx work invocations */
76 int save_rem;
77 int save_off;
78 struct sk_buff *save_skb;
79
John Fastabend4f738ad2018-03-18 12:57:10 -070080 /* datapath variables for tx_msg ULP */
81 struct sock *sk_redir;
82 int apply_bytes;
83 int cork_bytes;
84 int sg_size;
85 int eval;
86 struct sk_msg_buff *cork;
John Fastabend8934ce22018-03-28 12:49:15 -070087 struct list_head ingress;
John Fastabend4f738ad2018-03-18 12:57:10 -070088
John Fastabend174a79f2017-08-15 22:32:47 -070089 struct strparser strp;
John Fastabend4f738ad2018-03-18 12:57:10 -070090 struct bpf_prog *bpf_tx_msg;
John Fastabend174a79f2017-08-15 22:32:47 -070091 struct bpf_prog *bpf_parse;
92 struct bpf_prog *bpf_verdict;
John Fastabend2f857d02017-08-28 07:10:25 -070093 struct list_head maps;
John Fastabend174a79f2017-08-15 22:32:47 -070094
95 /* Back reference used when sock callback trigger sockmap operations */
John Fastabend174a79f2017-08-15 22:32:47 -070096 struct sock *sock;
97 unsigned long state;
98
99 struct work_struct tx_work;
100 struct work_struct gc_work;
101
John Fastabend1aa12bd2018-02-05 10:17:49 -0800102 struct proto *sk_proto;
103 void (*save_close)(struct sock *sk, long timeout);
John Fastabend174a79f2017-08-15 22:32:47 -0700104 void (*save_data_ready)(struct sock *sk);
105 void (*save_write_space)(struct sock *sk);
John Fastabend174a79f2017-08-15 22:32:47 -0700106};
107
John Fastabend4f738ad2018-03-18 12:57:10 -0700108static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
John Fastabend8934ce22018-03-28 12:49:15 -0700109static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
110 int nonblock, int flags, int *addr_len);
John Fastabend4f738ad2018-03-18 12:57:10 -0700111static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
112static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
113 int offset, size_t size, int flags);
114
John Fastabend174a79f2017-08-15 22:32:47 -0700115static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
116{
John Fastabend2f857d02017-08-28 07:10:25 -0700117 return rcu_dereference_sk_user_data(sk);
John Fastabend174a79f2017-08-15 22:32:47 -0700118}
119
John Fastabend8934ce22018-03-28 12:49:15 -0700120static bool bpf_tcp_stream_read(const struct sock *sk)
121{
122 struct smap_psock *psock;
123 bool empty = true;
124
125 rcu_read_lock();
126 psock = smap_psock_sk(sk);
127 if (unlikely(!psock))
128 goto out;
129 empty = list_empty(&psock->ingress);
130out:
131 rcu_read_unlock();
132 return !empty;
133}
134
John Fastabend1aa12bd2018-02-05 10:17:49 -0800135static struct proto tcp_bpf_proto;
136static int bpf_tcp_init(struct sock *sk)
137{
138 struct smap_psock *psock;
139
140 rcu_read_lock();
141 psock = smap_psock_sk(sk);
142 if (unlikely(!psock)) {
143 rcu_read_unlock();
144 return -EINVAL;
145 }
146
147 if (unlikely(psock->sk_proto)) {
148 rcu_read_unlock();
149 return -EBUSY;
150 }
151
152 psock->save_close = sk->sk_prot->close;
153 psock->sk_proto = sk->sk_prot;
John Fastabend4f738ad2018-03-18 12:57:10 -0700154
155 if (psock->bpf_tx_msg) {
156 tcp_bpf_proto.sendmsg = bpf_tcp_sendmsg;
157 tcp_bpf_proto.sendpage = bpf_tcp_sendpage;
John Fastabend8934ce22018-03-28 12:49:15 -0700158 tcp_bpf_proto.recvmsg = bpf_tcp_recvmsg;
159 tcp_bpf_proto.stream_memory_read = bpf_tcp_stream_read;
John Fastabend4f738ad2018-03-18 12:57:10 -0700160 }
161
John Fastabend1aa12bd2018-02-05 10:17:49 -0800162 sk->sk_prot = &tcp_bpf_proto;
163 rcu_read_unlock();
164 return 0;
165}
166
John Fastabend4f738ad2018-03-18 12:57:10 -0700167static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
168static int free_start_sg(struct sock *sk, struct sk_msg_buff *md);
169
John Fastabend1aa12bd2018-02-05 10:17:49 -0800170static void bpf_tcp_release(struct sock *sk)
171{
172 struct smap_psock *psock;
173
174 rcu_read_lock();
175 psock = smap_psock_sk(sk);
John Fastabend4f738ad2018-03-18 12:57:10 -0700176 if (unlikely(!psock))
177 goto out;
John Fastabend1aa12bd2018-02-05 10:17:49 -0800178
John Fastabend4f738ad2018-03-18 12:57:10 -0700179 if (psock->cork) {
180 free_start_sg(psock->sock, psock->cork);
181 kfree(psock->cork);
182 psock->cork = NULL;
John Fastabend1aa12bd2018-02-05 10:17:49 -0800183 }
John Fastabend4f738ad2018-03-18 12:57:10 -0700184
John Fastabend0e94d872018-04-02 12:50:52 -0700185 if (psock->sk_proto) {
186 sk->sk_prot = psock->sk_proto;
187 psock->sk_proto = NULL;
188 }
John Fastabend4f738ad2018-03-18 12:57:10 -0700189out:
John Fastabend1aa12bd2018-02-05 10:17:49 -0800190 rcu_read_unlock();
191}
192
John Fastabend1aa12bd2018-02-05 10:17:49 -0800193static void bpf_tcp_close(struct sock *sk, long timeout)
194{
195 void (*close_fun)(struct sock *sk, long timeout);
196 struct smap_psock_map_entry *e, *tmp;
John Fastabend8934ce22018-03-28 12:49:15 -0700197 struct sk_msg_buff *md, *mtmp;
John Fastabend1aa12bd2018-02-05 10:17:49 -0800198 struct smap_psock *psock;
199 struct sock *osk;
200
201 rcu_read_lock();
202 psock = smap_psock_sk(sk);
203 if (unlikely(!psock)) {
204 rcu_read_unlock();
205 return sk->sk_prot->close(sk, timeout);
206 }
207
208 /* The psock may be destroyed anytime after exiting the RCU critial
209 * section so by the time we use close_fun the psock may no longer
210 * be valid. However, bpf_tcp_close is called with the sock lock
211 * held so the close hook and sk are still valid.
212 */
213 close_fun = psock->save_close;
214
215 write_lock_bh(&sk->sk_callback_lock);
John Fastabend820ed3f2018-04-02 12:50:46 -0700216 if (psock->cork) {
217 free_start_sg(psock->sock, psock->cork);
218 kfree(psock->cork);
219 psock->cork = NULL;
220 }
221
John Fastabend8934ce22018-03-28 12:49:15 -0700222 list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
223 list_del(&md->list);
224 free_start_sg(psock->sock, md);
225 kfree(md);
226 }
227
John Fastabend1aa12bd2018-02-05 10:17:49 -0800228 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
229 osk = cmpxchg(e->entry, sk, NULL);
230 if (osk == sk) {
231 list_del(&e->list);
232 smap_release_sock(psock, sk);
233 }
234 }
235 write_unlock_bh(&sk->sk_callback_lock);
236 rcu_read_unlock();
237 close_fun(sk, timeout);
238}
239
John Fastabend04686ef2017-10-31 19:17:31 -0700240enum __sk_action {
241 __SK_DROP = 0,
242 __SK_PASS,
243 __SK_REDIRECT,
John Fastabend4f738ad2018-03-18 12:57:10 -0700244 __SK_NONE,
John Fastabend04686ef2017-10-31 19:17:31 -0700245};
246
John Fastabend1aa12bd2018-02-05 10:17:49 -0800247static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = {
248 .name = "bpf_tcp",
249 .uid = TCP_ULP_BPF,
250 .user_visible = false,
251 .owner = NULL,
252 .init = bpf_tcp_init,
253 .release = bpf_tcp_release,
254};
255
John Fastabend4f738ad2018-03-18 12:57:10 -0700256static int memcopy_from_iter(struct sock *sk,
257 struct sk_msg_buff *md,
258 struct iov_iter *from, int bytes)
259{
260 struct scatterlist *sg = md->sg_data;
261 int i = md->sg_curr, rc = -ENOSPC;
262
263 do {
264 int copy;
265 char *to;
266
267 if (md->sg_copybreak >= sg[i].length) {
268 md->sg_copybreak = 0;
269
270 if (++i == MAX_SKB_FRAGS)
271 i = 0;
272
273 if (i == md->sg_end)
274 break;
275 }
276
277 copy = sg[i].length - md->sg_copybreak;
278 to = sg_virt(&sg[i]) + md->sg_copybreak;
279 md->sg_copybreak += copy;
280
281 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
282 rc = copy_from_iter_nocache(to, copy, from);
283 else
284 rc = copy_from_iter(to, copy, from);
285
286 if (rc != copy) {
287 rc = -EFAULT;
288 goto out;
289 }
290
291 bytes -= copy;
292 if (!bytes)
293 break;
294
295 md->sg_copybreak = 0;
296 if (++i == MAX_SKB_FRAGS)
297 i = 0;
298 } while (i != md->sg_end);
299out:
300 md->sg_curr = i;
301 return rc;
302}
303
304static int bpf_tcp_push(struct sock *sk, int apply_bytes,
305 struct sk_msg_buff *md,
306 int flags, bool uncharge)
307{
308 bool apply = apply_bytes;
309 struct scatterlist *sg;
310 int offset, ret = 0;
311 struct page *p;
312 size_t size;
313
314 while (1) {
315 sg = md->sg_data + md->sg_start;
316 size = (apply && apply_bytes < sg->length) ?
317 apply_bytes : sg->length;
318 offset = sg->offset;
319
320 tcp_rate_check_app_limited(sk);
321 p = sg_page(sg);
322retry:
323 ret = do_tcp_sendpages(sk, p, offset, size, flags);
324 if (ret != size) {
325 if (ret > 0) {
326 if (apply)
327 apply_bytes -= ret;
328 size -= ret;
329 offset += ret;
330 if (uncharge)
331 sk_mem_uncharge(sk, ret);
332 goto retry;
333 }
334
335 sg->length = size;
336 sg->offset = offset;
337 return ret;
338 }
339
340 if (apply)
341 apply_bytes -= ret;
342 sg->offset += ret;
343 sg->length -= ret;
344 if (uncharge)
345 sk_mem_uncharge(sk, ret);
346
347 if (!sg->length) {
348 put_page(p);
349 md->sg_start++;
350 if (md->sg_start == MAX_SKB_FRAGS)
351 md->sg_start = 0;
Prashant Bhole6ef6d842018-03-30 09:21:00 +0900352 sg_init_table(sg, 1);
John Fastabend4f738ad2018-03-18 12:57:10 -0700353
354 if (md->sg_start == md->sg_end)
355 break;
356 }
357
358 if (apply && !apply_bytes)
359 break;
360 }
361 return 0;
362}
363
364static inline void bpf_compute_data_pointers_sg(struct sk_msg_buff *md)
365{
366 struct scatterlist *sg = md->sg_data + md->sg_start;
367
368 if (md->sg_copy[md->sg_start]) {
369 md->data = md->data_end = 0;
370 } else {
371 md->data = sg_virt(sg);
372 md->data_end = md->data + sg->length;
373 }
374}
375
376static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
377{
378 struct scatterlist *sg = md->sg_data;
379 int i = md->sg_start;
380
381 do {
382 int uncharge = (bytes < sg[i].length) ? bytes : sg[i].length;
383
384 sk_mem_uncharge(sk, uncharge);
385 bytes -= uncharge;
386 if (!bytes)
387 break;
388 i++;
389 if (i == MAX_SKB_FRAGS)
390 i = 0;
391 } while (i != md->sg_end);
392}
393
394static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
395{
396 struct scatterlist *sg = md->sg_data;
397 int i = md->sg_start, free;
398
399 while (bytes && sg[i].length) {
400 free = sg[i].length;
401 if (bytes < free) {
402 sg[i].length -= bytes;
403 sg[i].offset += bytes;
404 sk_mem_uncharge(sk, bytes);
405 break;
406 }
407
408 sk_mem_uncharge(sk, sg[i].length);
409 put_page(sg_page(&sg[i]));
410 bytes -= sg[i].length;
411 sg[i].length = 0;
412 sg[i].page_link = 0;
413 sg[i].offset = 0;
414 i++;
415
416 if (i == MAX_SKB_FRAGS)
417 i = 0;
418 }
419}
420
421static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
422{
423 struct scatterlist *sg = md->sg_data;
424 int i = start, free = 0;
425
426 while (sg[i].length) {
427 free += sg[i].length;
428 sk_mem_uncharge(sk, sg[i].length);
429 put_page(sg_page(&sg[i]));
430 sg[i].length = 0;
431 sg[i].page_link = 0;
432 sg[i].offset = 0;
433 i++;
434
435 if (i == MAX_SKB_FRAGS)
436 i = 0;
437 }
438
439 return free;
440}
441
442static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
443{
444 int free = free_sg(sk, md->sg_start, md);
445
446 md->sg_start = md->sg_end;
447 return free;
448}
449
450static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md)
451{
452 return free_sg(sk, md->sg_curr, md);
453}
454
455static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md)
456{
457 return ((_rc == SK_PASS) ?
458 (md->map ? __SK_REDIRECT : __SK_PASS) :
459 __SK_DROP);
460}
461
462static unsigned int smap_do_tx_msg(struct sock *sk,
463 struct smap_psock *psock,
464 struct sk_msg_buff *md)
465{
466 struct bpf_prog *prog;
467 unsigned int rc, _rc;
468
469 preempt_disable();
470 rcu_read_lock();
471
472 /* If the policy was removed mid-send then default to 'accept' */
473 prog = READ_ONCE(psock->bpf_tx_msg);
474 if (unlikely(!prog)) {
475 _rc = SK_PASS;
476 goto verdict;
477 }
478
479 bpf_compute_data_pointers_sg(md);
480 rc = (*prog->bpf_func)(md, prog->insnsi);
481 psock->apply_bytes = md->apply_bytes;
482
483 /* Moving return codes from UAPI namespace into internal namespace */
484 _rc = bpf_map_msg_verdict(rc, md);
485
486 /* The psock has a refcount on the sock but not on the map and because
487 * we need to drop rcu read lock here its possible the map could be
488 * removed between here and when we need it to execute the sock
489 * redirect. So do the map lookup now for future use.
490 */
491 if (_rc == __SK_REDIRECT) {
492 if (psock->sk_redir)
493 sock_put(psock->sk_redir);
494 psock->sk_redir = do_msg_redirect_map(md);
495 if (!psock->sk_redir) {
496 _rc = __SK_DROP;
497 goto verdict;
498 }
499 sock_hold(psock->sk_redir);
500 }
501verdict:
502 rcu_read_unlock();
503 preempt_enable();
504
505 return _rc;
506}
507
John Fastabend8934ce22018-03-28 12:49:15 -0700508static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
509 struct smap_psock *psock,
510 struct sk_msg_buff *md, int flags)
511{
512 bool apply = apply_bytes;
513 size_t size, copied = 0;
514 struct sk_msg_buff *r;
515 int err = 0, i;
516
517 r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_KERNEL);
518 if (unlikely(!r))
519 return -ENOMEM;
520
521 lock_sock(sk);
522 r->sg_start = md->sg_start;
523 i = md->sg_start;
524
525 do {
526 r->sg_data[i] = md->sg_data[i];
527
528 size = (apply && apply_bytes < md->sg_data[i].length) ?
529 apply_bytes : md->sg_data[i].length;
530
531 if (!sk_wmem_schedule(sk, size)) {
532 if (!copied)
533 err = -ENOMEM;
534 break;
535 }
536
537 sk_mem_charge(sk, size);
538 r->sg_data[i].length = size;
539 md->sg_data[i].length -= size;
540 md->sg_data[i].offset += size;
541 copied += size;
542
543 if (md->sg_data[i].length) {
544 get_page(sg_page(&r->sg_data[i]));
545 r->sg_end = (i + 1) == MAX_SKB_FRAGS ? 0 : i + 1;
546 } else {
547 i++;
548 if (i == MAX_SKB_FRAGS)
549 i = 0;
550 r->sg_end = i;
551 }
552
553 if (apply) {
554 apply_bytes -= size;
555 if (!apply_bytes)
556 break;
557 }
558 } while (i != md->sg_end);
559
560 md->sg_start = i;
561
562 if (!err) {
563 list_add_tail(&r->list, &psock->ingress);
564 sk->sk_data_ready(sk);
565 } else {
566 free_start_sg(sk, r);
567 kfree(r);
568 }
569
570 release_sock(sk);
571 return err;
572}
573
John Fastabend4f738ad2018-03-18 12:57:10 -0700574static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
575 struct sk_msg_buff *md,
576 int flags)
577{
578 struct smap_psock *psock;
579 struct scatterlist *sg;
580 int i, err, free = 0;
John Fastabend8934ce22018-03-28 12:49:15 -0700581 bool ingress = !!(md->flags & BPF_F_INGRESS);
John Fastabend4f738ad2018-03-18 12:57:10 -0700582
583 sg = md->sg_data;
584
585 rcu_read_lock();
586 psock = smap_psock_sk(sk);
587 if (unlikely(!psock))
588 goto out_rcu;
589
590 if (!refcount_inc_not_zero(&psock->refcnt))
591 goto out_rcu;
592
593 rcu_read_unlock();
John Fastabend8934ce22018-03-28 12:49:15 -0700594
595 if (ingress) {
596 err = bpf_tcp_ingress(sk, send, psock, md, flags);
597 } else {
598 lock_sock(sk);
599 err = bpf_tcp_push(sk, send, md, flags, false);
600 release_sock(sk);
601 }
John Fastabend4f738ad2018-03-18 12:57:10 -0700602 smap_release_sock(psock, sk);
603 if (unlikely(err))
604 goto out;
605 return 0;
606out_rcu:
607 rcu_read_unlock();
608out:
609 i = md->sg_start;
610 while (sg[i].length) {
611 free += sg[i].length;
612 put_page(sg_page(&sg[i]));
613 sg[i].length = 0;
614 i++;
615 if (i == MAX_SKB_FRAGS)
616 i = 0;
617 }
618 return free;
619}
620
621static inline void bpf_md_init(struct smap_psock *psock)
622{
623 if (!psock->apply_bytes) {
624 psock->eval = __SK_NONE;
625 if (psock->sk_redir) {
626 sock_put(psock->sk_redir);
627 psock->sk_redir = NULL;
628 }
629 }
630}
631
632static void apply_bytes_dec(struct smap_psock *psock, int i)
633{
634 if (psock->apply_bytes) {
635 if (psock->apply_bytes < i)
636 psock->apply_bytes = 0;
637 else
638 psock->apply_bytes -= i;
639 }
640}
641
642static int bpf_exec_tx_verdict(struct smap_psock *psock,
643 struct sk_msg_buff *m,
644 struct sock *sk,
645 int *copied, int flags)
646{
647 bool cork = false, enospc = (m->sg_start == m->sg_end);
648 struct sock *redir;
649 int err = 0;
650 int send;
651
652more_data:
653 if (psock->eval == __SK_NONE)
654 psock->eval = smap_do_tx_msg(sk, psock, m);
655
656 if (m->cork_bytes &&
657 m->cork_bytes > psock->sg_size && !enospc) {
658 psock->cork_bytes = m->cork_bytes - psock->sg_size;
659 if (!psock->cork) {
660 psock->cork = kcalloc(1,
661 sizeof(struct sk_msg_buff),
662 GFP_ATOMIC | __GFP_NOWARN);
663
664 if (!psock->cork) {
665 err = -ENOMEM;
666 goto out_err;
667 }
668 }
669 memcpy(psock->cork, m, sizeof(*m));
670 goto out_err;
671 }
672
673 send = psock->sg_size;
674 if (psock->apply_bytes && psock->apply_bytes < send)
675 send = psock->apply_bytes;
676
677 switch (psock->eval) {
678 case __SK_PASS:
679 err = bpf_tcp_push(sk, send, m, flags, true);
680 if (unlikely(err)) {
681 *copied -= free_start_sg(sk, m);
682 break;
683 }
684
685 apply_bytes_dec(psock, send);
686 psock->sg_size -= send;
687 break;
688 case __SK_REDIRECT:
689 redir = psock->sk_redir;
690 apply_bytes_dec(psock, send);
691
692 if (psock->cork) {
693 cork = true;
694 psock->cork = NULL;
695 }
696
697 return_mem_sg(sk, send, m);
698 release_sock(sk);
699
700 err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags);
701 lock_sock(sk);
702
703 if (cork) {
704 free_start_sg(sk, m);
705 kfree(m);
706 m = NULL;
707 }
708 if (unlikely(err))
709 *copied -= err;
710 else
711 psock->sg_size -= send;
712 break;
713 case __SK_DROP:
714 default:
715 free_bytes_sg(sk, send, m);
716 apply_bytes_dec(psock, send);
717 *copied -= send;
718 psock->sg_size -= send;
719 err = -EACCES;
720 break;
721 }
722
723 if (likely(!err)) {
724 bpf_md_init(psock);
725 if (m &&
726 m->sg_data[m->sg_start].page_link &&
727 m->sg_data[m->sg_start].length)
728 goto more_data;
729 }
730
731out_err:
732 return err;
733}
734
John Fastabend8934ce22018-03-28 12:49:15 -0700735static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
736 int nonblock, int flags, int *addr_len)
737{
738 struct iov_iter *iter = &msg->msg_iter;
739 struct smap_psock *psock;
740 int copied = 0;
741
742 if (unlikely(flags & MSG_ERRQUEUE))
743 return inet_recv_error(sk, msg, len, addr_len);
744
745 rcu_read_lock();
746 psock = smap_psock_sk(sk);
747 if (unlikely(!psock))
748 goto out;
749
750 if (unlikely(!refcount_inc_not_zero(&psock->refcnt)))
751 goto out;
752 rcu_read_unlock();
753
754 if (!skb_queue_empty(&sk->sk_receive_queue))
755 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
756
757 lock_sock(sk);
758 while (copied != len) {
759 struct scatterlist *sg;
760 struct sk_msg_buff *md;
761 int i;
762
763 md = list_first_entry_or_null(&psock->ingress,
764 struct sk_msg_buff, list);
765 if (unlikely(!md))
766 break;
767 i = md->sg_start;
768 do {
769 struct page *page;
770 int n, copy;
771
772 sg = &md->sg_data[i];
773 copy = sg->length;
774 page = sg_page(sg);
775
776 if (copied + copy > len)
777 copy = len - copied;
778
779 n = copy_page_to_iter(page, sg->offset, copy, iter);
780 if (n != copy) {
781 md->sg_start = i;
782 release_sock(sk);
783 smap_release_sock(psock, sk);
784 return -EFAULT;
785 }
786
787 copied += copy;
788 sg->offset += copy;
789 sg->length -= copy;
790 sk_mem_uncharge(sk, copy);
791
792 if (!sg->length) {
793 i++;
794 if (i == MAX_SKB_FRAGS)
795 i = 0;
John Fastabendfa246692018-03-28 12:49:25 -0700796 if (!md->skb)
797 put_page(page);
John Fastabend8934ce22018-03-28 12:49:15 -0700798 }
799 if (copied == len)
800 break;
801 } while (i != md->sg_end);
802 md->sg_start = i;
803
804 if (!sg->length && md->sg_start == md->sg_end) {
805 list_del(&md->list);
John Fastabendfa246692018-03-28 12:49:25 -0700806 if (md->skb)
807 consume_skb(md->skb);
John Fastabend8934ce22018-03-28 12:49:15 -0700808 kfree(md);
809 }
810 }
811
812 release_sock(sk);
813 smap_release_sock(psock, sk);
814 return copied;
815out:
816 rcu_read_unlock();
817 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
818}
819
820
John Fastabend4f738ad2018-03-18 12:57:10 -0700821static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
822{
823 int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
824 struct sk_msg_buff md = {0};
825 unsigned int sg_copy = 0;
826 struct smap_psock *psock;
827 int copied = 0, err = 0;
828 struct scatterlist *sg;
829 long timeo;
830
831 /* Its possible a sock event or user removed the psock _but_ the ops
832 * have not been reprogrammed yet so we get here. In this case fallback
833 * to tcp_sendmsg. Note this only works because we _only_ ever allow
834 * a single ULP there is no hierarchy here.
835 */
836 rcu_read_lock();
837 psock = smap_psock_sk(sk);
838 if (unlikely(!psock)) {
839 rcu_read_unlock();
840 return tcp_sendmsg(sk, msg, size);
841 }
842
843 /* Increment the psock refcnt to ensure its not released while sending a
844 * message. Required because sk lookup and bpf programs are used in
845 * separate rcu critical sections. Its OK if we lose the map entry
846 * but we can't lose the sock reference.
847 */
848 if (!refcount_inc_not_zero(&psock->refcnt)) {
849 rcu_read_unlock();
850 return tcp_sendmsg(sk, msg, size);
851 }
852
853 sg = md.sg_data;
Prashant Bhole6ef6d842018-03-30 09:21:00 +0900854 sg_init_marker(sg, MAX_SKB_FRAGS);
John Fastabend4f738ad2018-03-18 12:57:10 -0700855 rcu_read_unlock();
856
857 lock_sock(sk);
858 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
859
860 while (msg_data_left(msg)) {
861 struct sk_msg_buff *m;
862 bool enospc = false;
863 int copy;
864
865 if (sk->sk_err) {
866 err = sk->sk_err;
867 goto out_err;
868 }
869
870 copy = msg_data_left(msg);
871 if (!sk_stream_memory_free(sk))
872 goto wait_for_sndbuf;
873
874 m = psock->cork_bytes ? psock->cork : &md;
875 m->sg_curr = m->sg_copybreak ? m->sg_curr : m->sg_end;
876 err = sk_alloc_sg(sk, copy, m->sg_data,
877 m->sg_start, &m->sg_end, &sg_copy,
878 m->sg_end - 1);
879 if (err) {
880 if (err != -ENOSPC)
881 goto wait_for_memory;
882 enospc = true;
883 copy = sg_copy;
884 }
885
886 err = memcopy_from_iter(sk, m, &msg->msg_iter, copy);
887 if (err < 0) {
888 free_curr_sg(sk, m);
889 goto out_err;
890 }
891
892 psock->sg_size += copy;
893 copied += copy;
894 sg_copy = 0;
895
896 /* When bytes are being corked skip running BPF program and
897 * applying verdict unless there is no more buffer space. In
898 * the ENOSPC case simply run BPF prorgram with currently
899 * accumulated data. We don't have much choice at this point
900 * we could try extending the page frags or chaining complex
901 * frags but even in these cases _eventually_ we will hit an
902 * OOM scenario. More complex recovery schemes may be
903 * implemented in the future, but BPF programs must handle
904 * the case where apply_cork requests are not honored. The
905 * canonical method to verify this is to check data length.
906 */
907 if (psock->cork_bytes) {
908 if (copy > psock->cork_bytes)
909 psock->cork_bytes = 0;
910 else
911 psock->cork_bytes -= copy;
912
913 if (psock->cork_bytes && !enospc)
914 goto out_cork;
915
916 /* All cork bytes accounted for re-run filter */
917 psock->eval = __SK_NONE;
918 psock->cork_bytes = 0;
919 }
920
921 err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
922 if (unlikely(err < 0))
923 goto out_err;
924 continue;
925wait_for_sndbuf:
926 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
927wait_for_memory:
928 err = sk_stream_wait_memory(sk, &timeo);
929 if (err)
930 goto out_err;
931 }
932out_err:
933 if (err < 0)
934 err = sk_stream_error(sk, msg->msg_flags, err);
935out_cork:
936 release_sock(sk);
937 smap_release_sock(psock, sk);
938 return copied ? copied : err;
939}
940
941static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
942 int offset, size_t size, int flags)
943{
944 struct sk_msg_buff md = {0}, *m = NULL;
945 int err = 0, copied = 0;
946 struct smap_psock *psock;
947 struct scatterlist *sg;
948 bool enospc = false;
949
950 rcu_read_lock();
951 psock = smap_psock_sk(sk);
952 if (unlikely(!psock))
953 goto accept;
954
955 if (!refcount_inc_not_zero(&psock->refcnt))
956 goto accept;
957 rcu_read_unlock();
958
959 lock_sock(sk);
960
Prashant Bhole6ef6d842018-03-30 09:21:00 +0900961 if (psock->cork_bytes) {
John Fastabend4f738ad2018-03-18 12:57:10 -0700962 m = psock->cork;
Prashant Bhole6ef6d842018-03-30 09:21:00 +0900963 sg = &m->sg_data[m->sg_end];
964 } else {
John Fastabend4f738ad2018-03-18 12:57:10 -0700965 m = &md;
Prashant Bhole6ef6d842018-03-30 09:21:00 +0900966 sg = m->sg_data;
967 sg_init_marker(sg, MAX_SKB_FRAGS);
968 }
John Fastabend4f738ad2018-03-18 12:57:10 -0700969
970 /* Catch case where ring is full and sendpage is stalled. */
971 if (unlikely(m->sg_end == m->sg_start &&
972 m->sg_data[m->sg_end].length))
973 goto out_err;
974
975 psock->sg_size += size;
John Fastabend4f738ad2018-03-18 12:57:10 -0700976 sg_set_page(sg, page, size, offset);
977 get_page(page);
978 m->sg_copy[m->sg_end] = true;
979 sk_mem_charge(sk, size);
980 m->sg_end++;
981 copied = size;
982
983 if (m->sg_end == MAX_SKB_FRAGS)
984 m->sg_end = 0;
985
986 if (m->sg_end == m->sg_start)
987 enospc = true;
988
989 if (psock->cork_bytes) {
990 if (size > psock->cork_bytes)
991 psock->cork_bytes = 0;
992 else
993 psock->cork_bytes -= size;
994
995 if (psock->cork_bytes && !enospc)
996 goto out_err;
997
998 /* All cork bytes accounted for re-run filter */
999 psock->eval = __SK_NONE;
1000 psock->cork_bytes = 0;
1001 }
1002
1003 err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
1004out_err:
1005 release_sock(sk);
1006 smap_release_sock(psock, sk);
1007 return copied ? copied : err;
1008accept:
1009 rcu_read_unlock();
1010 return tcp_sendpage(sk, page, offset, size, flags);
1011}
1012
1013static void bpf_tcp_msg_add(struct smap_psock *psock,
1014 struct sock *sk,
1015 struct bpf_prog *tx_msg)
1016{
1017 struct bpf_prog *orig_tx_msg;
1018
1019 orig_tx_msg = xchg(&psock->bpf_tx_msg, tx_msg);
1020 if (orig_tx_msg)
1021 bpf_prog_put(orig_tx_msg);
1022}
1023
John Fastabend1aa12bd2018-02-05 10:17:49 -08001024static int bpf_tcp_ulp_register(void)
1025{
1026 tcp_bpf_proto = tcp_prot;
1027 tcp_bpf_proto.close = bpf_tcp_close;
John Fastabend4f738ad2018-03-18 12:57:10 -07001028 /* Once BPF TX ULP is registered it is never unregistered. It
1029 * will be in the ULP list for the lifetime of the system. Doing
1030 * duplicate registers is not a problem.
1031 */
John Fastabend1aa12bd2018-02-05 10:17:49 -08001032 return tcp_register_ulp(&bpf_tcp_ulp_ops);
1033}
1034
John Fastabend174a79f2017-08-15 22:32:47 -07001035static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
1036{
1037 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
1038 int rc;
1039
1040 if (unlikely(!prog))
John Fastabend04686ef2017-10-31 19:17:31 -07001041 return __SK_DROP;
John Fastabend174a79f2017-08-15 22:32:47 -07001042
1043 skb_orphan(skb);
John Fastabend34f795022017-10-18 07:10:36 -07001044 /* We need to ensure that BPF metadata for maps is also cleared
1045 * when we orphan the skb so that we don't have the possibility
1046 * to reference a stale map.
1047 */
1048 TCP_SKB_CB(skb)->bpf.map = NULL;
John Fastabend174a79f2017-08-15 22:32:47 -07001049 skb->sk = psock->sock;
Daniel Borkmann6aaae2b2017-09-25 02:25:50 +02001050 bpf_compute_data_pointers(skb);
John Fastabend34f795022017-10-18 07:10:36 -07001051 preempt_disable();
John Fastabend174a79f2017-08-15 22:32:47 -07001052 rc = (*prog->bpf_func)(skb, prog->insnsi);
John Fastabend34f795022017-10-18 07:10:36 -07001053 preempt_enable();
John Fastabend174a79f2017-08-15 22:32:47 -07001054 skb->sk = NULL;
1055
John Fastabend04686ef2017-10-31 19:17:31 -07001056 /* Moving return codes from UAPI namespace into internal namespace */
John Fastabendbfa640752017-10-27 09:45:53 -07001057 return rc == SK_PASS ?
John Fastabend04686ef2017-10-31 19:17:31 -07001058 (TCP_SKB_CB(skb)->bpf.map ? __SK_REDIRECT : __SK_PASS) :
1059 __SK_DROP;
John Fastabend174a79f2017-08-15 22:32:47 -07001060}
1061
John Fastabendfa246692018-03-28 12:49:25 -07001062static int smap_do_ingress(struct smap_psock *psock, struct sk_buff *skb)
1063{
1064 struct sock *sk = psock->sock;
1065 int copied = 0, num_sg;
1066 struct sk_msg_buff *r;
1067
1068 r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_ATOMIC);
1069 if (unlikely(!r))
1070 return -EAGAIN;
1071
1072 if (!sk_rmem_schedule(sk, skb, skb->len)) {
1073 kfree(r);
1074 return -EAGAIN;
1075 }
1076
1077 sg_init_table(r->sg_data, MAX_SKB_FRAGS);
1078 num_sg = skb_to_sgvec(skb, r->sg_data, 0, skb->len);
1079 if (unlikely(num_sg < 0)) {
1080 kfree(r);
1081 return num_sg;
1082 }
1083 sk_mem_charge(sk, skb->len);
1084 copied = skb->len;
1085 r->sg_start = 0;
1086 r->sg_end = num_sg == MAX_SKB_FRAGS ? 0 : num_sg;
1087 r->skb = skb;
1088 list_add_tail(&r->list, &psock->ingress);
1089 sk->sk_data_ready(sk);
1090 return copied;
1091}
1092
John Fastabend174a79f2017-08-15 22:32:47 -07001093static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
1094{
John Fastabendfa246692018-03-28 12:49:25 -07001095 struct smap_psock *peer;
John Fastabend90a96312017-09-01 11:29:26 -07001096 struct sock *sk;
John Fastabendfa246692018-03-28 12:49:25 -07001097 __u32 in;
John Fastabend174a79f2017-08-15 22:32:47 -07001098 int rc;
1099
John Fastabend174a79f2017-08-15 22:32:47 -07001100 rc = smap_verdict_func(psock, skb);
1101 switch (rc) {
John Fastabend04686ef2017-10-31 19:17:31 -07001102 case __SK_REDIRECT:
John Fastabend34f795022017-10-18 07:10:36 -07001103 sk = do_sk_redirect_map(skb);
John Fastabendfa246692018-03-28 12:49:25 -07001104 if (!sk) {
1105 kfree_skb(skb);
1106 break;
1107 }
John Fastabend174a79f2017-08-15 22:32:47 -07001108
John Fastabendfa246692018-03-28 12:49:25 -07001109 peer = smap_psock_sk(sk);
1110 in = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
1111
1112 if (unlikely(!peer || sock_flag(sk, SOCK_DEAD) ||
1113 !test_bit(SMAP_TX_RUNNING, &peer->state))) {
1114 kfree_skb(skb);
1115 break;
1116 }
1117
1118 if (!in && sock_writeable(sk)) {
1119 skb_set_owner_w(skb, sk);
1120 skb_queue_tail(&peer->rxqueue, skb);
1121 schedule_work(&peer->tx_work);
1122 break;
1123 } else if (in &&
1124 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
1125 skb_queue_tail(&peer->rxqueue, skb);
1126 schedule_work(&peer->tx_work);
1127 break;
John Fastabend174a79f2017-08-15 22:32:47 -07001128 }
1129 /* Fall through and free skb otherwise */
John Fastabend04686ef2017-10-31 19:17:31 -07001130 case __SK_DROP:
John Fastabend174a79f2017-08-15 22:32:47 -07001131 default:
John Fastabend174a79f2017-08-15 22:32:47 -07001132 kfree_skb(skb);
1133 }
1134}
1135
1136static void smap_report_sk_error(struct smap_psock *psock, int err)
1137{
1138 struct sock *sk = psock->sock;
1139
1140 sk->sk_err = err;
1141 sk->sk_error_report(sk);
1142}
1143
John Fastabend174a79f2017-08-15 22:32:47 -07001144static void smap_read_sock_strparser(struct strparser *strp,
1145 struct sk_buff *skb)
1146{
1147 struct smap_psock *psock;
1148
1149 rcu_read_lock();
1150 psock = container_of(strp, struct smap_psock, strp);
1151 smap_do_verdict(psock, skb);
1152 rcu_read_unlock();
1153}
1154
1155/* Called with lock held on socket */
1156static void smap_data_ready(struct sock *sk)
1157{
1158 struct smap_psock *psock;
1159
John Fastabendd26e597d2017-08-28 07:10:45 -07001160 rcu_read_lock();
John Fastabend174a79f2017-08-15 22:32:47 -07001161 psock = smap_psock_sk(sk);
John Fastabendd26e597d2017-08-28 07:10:45 -07001162 if (likely(psock)) {
1163 write_lock_bh(&sk->sk_callback_lock);
John Fastabend174a79f2017-08-15 22:32:47 -07001164 strp_data_ready(&psock->strp);
John Fastabendd26e597d2017-08-28 07:10:45 -07001165 write_unlock_bh(&sk->sk_callback_lock);
1166 }
1167 rcu_read_unlock();
John Fastabend174a79f2017-08-15 22:32:47 -07001168}
1169
1170static void smap_tx_work(struct work_struct *w)
1171{
1172 struct smap_psock *psock;
1173 struct sk_buff *skb;
1174 int rem, off, n;
1175
1176 psock = container_of(w, struct smap_psock, tx_work);
1177
1178 /* lock sock to avoid losing sk_socket at some point during loop */
1179 lock_sock(psock->sock);
1180 if (psock->save_skb) {
1181 skb = psock->save_skb;
1182 rem = psock->save_rem;
1183 off = psock->save_off;
1184 psock->save_skb = NULL;
1185 goto start;
1186 }
1187
1188 while ((skb = skb_dequeue(&psock->rxqueue))) {
John Fastabendfa246692018-03-28 12:49:25 -07001189 __u32 flags;
1190
John Fastabend174a79f2017-08-15 22:32:47 -07001191 rem = skb->len;
1192 off = 0;
1193start:
John Fastabendfa246692018-03-28 12:49:25 -07001194 flags = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
John Fastabend174a79f2017-08-15 22:32:47 -07001195 do {
John Fastabendfa246692018-03-28 12:49:25 -07001196 if (likely(psock->sock->sk_socket)) {
1197 if (flags)
1198 n = smap_do_ingress(psock, skb);
1199 else
1200 n = skb_send_sock_locked(psock->sock,
1201 skb, off, rem);
1202 } else {
John Fastabend174a79f2017-08-15 22:32:47 -07001203 n = -EINVAL;
John Fastabendfa246692018-03-28 12:49:25 -07001204 }
1205
John Fastabend174a79f2017-08-15 22:32:47 -07001206 if (n <= 0) {
1207 if (n == -EAGAIN) {
1208 /* Retry when space is available */
1209 psock->save_skb = skb;
1210 psock->save_rem = rem;
1211 psock->save_off = off;
1212 goto out;
1213 }
1214 /* Hard errors break pipe and stop xmit */
1215 smap_report_sk_error(psock, n ? -n : EPIPE);
1216 clear_bit(SMAP_TX_RUNNING, &psock->state);
John Fastabend174a79f2017-08-15 22:32:47 -07001217 kfree_skb(skb);
1218 goto out;
1219 }
1220 rem -= n;
1221 off += n;
1222 } while (rem);
John Fastabendfa246692018-03-28 12:49:25 -07001223
1224 if (!flags)
1225 kfree_skb(skb);
John Fastabend174a79f2017-08-15 22:32:47 -07001226 }
1227out:
1228 release_sock(psock->sock);
1229}
1230
1231static void smap_write_space(struct sock *sk)
1232{
1233 struct smap_psock *psock;
1234
1235 rcu_read_lock();
1236 psock = smap_psock_sk(sk);
1237 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
1238 schedule_work(&psock->tx_work);
1239 rcu_read_unlock();
1240}
1241
1242static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
1243{
John Fastabend174a79f2017-08-15 22:32:47 -07001244 if (!psock->strp_enabled)
John Fastabend2f857d02017-08-28 07:10:25 -07001245 return;
John Fastabend174a79f2017-08-15 22:32:47 -07001246 sk->sk_data_ready = psock->save_data_ready;
1247 sk->sk_write_space = psock->save_write_space;
John Fastabend174a79f2017-08-15 22:32:47 -07001248 psock->save_data_ready = NULL;
1249 psock->save_write_space = NULL;
John Fastabend174a79f2017-08-15 22:32:47 -07001250 strp_stop(&psock->strp);
1251 psock->strp_enabled = false;
John Fastabend174a79f2017-08-15 22:32:47 -07001252}
1253
1254static void smap_destroy_psock(struct rcu_head *rcu)
1255{
1256 struct smap_psock *psock = container_of(rcu,
1257 struct smap_psock, rcu);
1258
1259 /* Now that a grace period has passed there is no longer
1260 * any reference to this sock in the sockmap so we can
1261 * destroy the psock, strparser, and bpf programs. But,
1262 * because we use workqueue sync operations we can not
1263 * do it in rcu context
1264 */
1265 schedule_work(&psock->gc_work);
1266}
1267
John Fastabend2f857d02017-08-28 07:10:25 -07001268static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
John Fastabend174a79f2017-08-15 22:32:47 -07001269{
John Fastabendffa35662018-03-18 12:56:54 -07001270 if (refcount_dec_and_test(&psock->refcnt)) {
1271 tcp_cleanup_ulp(sock);
1272 smap_stop_sock(psock, sock);
1273 clear_bit(SMAP_TX_RUNNING, &psock->state);
1274 rcu_assign_sk_user_data(sock, NULL);
1275 call_rcu_sched(&psock->rcu, smap_destroy_psock);
1276 }
John Fastabend174a79f2017-08-15 22:32:47 -07001277}
1278
1279static int smap_parse_func_strparser(struct strparser *strp,
1280 struct sk_buff *skb)
1281{
1282 struct smap_psock *psock;
1283 struct bpf_prog *prog;
1284 int rc;
1285
1286 rcu_read_lock();
1287 psock = container_of(strp, struct smap_psock, strp);
1288 prog = READ_ONCE(psock->bpf_parse);
1289
1290 if (unlikely(!prog)) {
1291 rcu_read_unlock();
1292 return skb->len;
1293 }
1294
1295 /* Attach socket for bpf program to use if needed we can do this
1296 * because strparser clones the skb before handing it to a upper
1297 * layer, meaning skb_orphan has been called. We NULL sk on the
1298 * way out to ensure we don't trigger a BUG_ON in skb/sk operations
1299 * later and because we are not charging the memory of this skb to
1300 * any socket yet.
1301 */
1302 skb->sk = psock->sock;
Daniel Borkmann6aaae2b2017-09-25 02:25:50 +02001303 bpf_compute_data_pointers(skb);
John Fastabend174a79f2017-08-15 22:32:47 -07001304 rc = (*prog->bpf_func)(skb, prog->insnsi);
1305 skb->sk = NULL;
1306 rcu_read_unlock();
1307 return rc;
1308}
1309
John Fastabend174a79f2017-08-15 22:32:47 -07001310static int smap_read_sock_done(struct strparser *strp, int err)
1311{
1312 return err;
1313}
1314
1315static int smap_init_sock(struct smap_psock *psock,
1316 struct sock *sk)
1317{
Eric Biggers3fd87122017-08-24 14:38:51 -07001318 static const struct strp_callbacks cb = {
1319 .rcv_msg = smap_read_sock_strparser,
1320 .parse_msg = smap_parse_func_strparser,
1321 .read_sock_done = smap_read_sock_done,
1322 };
John Fastabend174a79f2017-08-15 22:32:47 -07001323
John Fastabend174a79f2017-08-15 22:32:47 -07001324 return strp_init(&psock->strp, sk, &cb);
1325}
1326
1327static void smap_init_progs(struct smap_psock *psock,
1328 struct bpf_stab *stab,
1329 struct bpf_prog *verdict,
1330 struct bpf_prog *parse)
1331{
1332 struct bpf_prog *orig_parse, *orig_verdict;
1333
1334 orig_parse = xchg(&psock->bpf_parse, parse);
1335 orig_verdict = xchg(&psock->bpf_verdict, verdict);
1336
1337 if (orig_verdict)
1338 bpf_prog_put(orig_verdict);
1339 if (orig_parse)
1340 bpf_prog_put(orig_parse);
1341}
1342
1343static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
1344{
1345 if (sk->sk_data_ready == smap_data_ready)
1346 return;
1347 psock->save_data_ready = sk->sk_data_ready;
1348 psock->save_write_space = sk->sk_write_space;
John Fastabend174a79f2017-08-15 22:32:47 -07001349 sk->sk_data_ready = smap_data_ready;
1350 sk->sk_write_space = smap_write_space;
John Fastabend174a79f2017-08-15 22:32:47 -07001351 psock->strp_enabled = true;
1352}
1353
1354static void sock_map_remove_complete(struct bpf_stab *stab)
1355{
1356 bpf_map_area_free(stab->sock_map);
1357 kfree(stab);
1358}
1359
1360static void smap_gc_work(struct work_struct *w)
1361{
John Fastabend2f857d02017-08-28 07:10:25 -07001362 struct smap_psock_map_entry *e, *tmp;
John Fastabend8934ce22018-03-28 12:49:15 -07001363 struct sk_msg_buff *md, *mtmp;
John Fastabend174a79f2017-08-15 22:32:47 -07001364 struct smap_psock *psock;
1365
1366 psock = container_of(w, struct smap_psock, gc_work);
1367
1368 /* no callback lock needed because we already detached sockmap ops */
1369 if (psock->strp_enabled)
1370 strp_done(&psock->strp);
1371
1372 cancel_work_sync(&psock->tx_work);
1373 __skb_queue_purge(&psock->rxqueue);
1374
1375 /* At this point all strparser and xmit work must be complete */
1376 if (psock->bpf_parse)
1377 bpf_prog_put(psock->bpf_parse);
1378 if (psock->bpf_verdict)
1379 bpf_prog_put(psock->bpf_verdict);
John Fastabend4f738ad2018-03-18 12:57:10 -07001380 if (psock->bpf_tx_msg)
1381 bpf_prog_put(psock->bpf_tx_msg);
1382
1383 if (psock->cork) {
1384 free_start_sg(psock->sock, psock->cork);
1385 kfree(psock->cork);
1386 }
John Fastabend174a79f2017-08-15 22:32:47 -07001387
John Fastabend8934ce22018-03-28 12:49:15 -07001388 list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
1389 list_del(&md->list);
1390 free_start_sg(psock->sock, md);
1391 kfree(md);
1392 }
1393
John Fastabend2f857d02017-08-28 07:10:25 -07001394 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
1395 list_del(&e->list);
1396 kfree(e);
1397 }
John Fastabend174a79f2017-08-15 22:32:47 -07001398
John Fastabend4f738ad2018-03-18 12:57:10 -07001399 if (psock->sk_redir)
1400 sock_put(psock->sk_redir);
1401
John Fastabend174a79f2017-08-15 22:32:47 -07001402 sock_put(psock->sock);
1403 kfree(psock);
1404}
1405
1406static struct smap_psock *smap_init_psock(struct sock *sock,
1407 struct bpf_stab *stab)
1408{
1409 struct smap_psock *psock;
1410
Martin KaFai Lau96eabe72017-08-18 11:28:00 -07001411 psock = kzalloc_node(sizeof(struct smap_psock),
1412 GFP_ATOMIC | __GFP_NOWARN,
1413 stab->map.numa_node);
John Fastabend174a79f2017-08-15 22:32:47 -07001414 if (!psock)
1415 return ERR_PTR(-ENOMEM);
1416
John Fastabend4f738ad2018-03-18 12:57:10 -07001417 psock->eval = __SK_NONE;
John Fastabend174a79f2017-08-15 22:32:47 -07001418 psock->sock = sock;
1419 skb_queue_head_init(&psock->rxqueue);
1420 INIT_WORK(&psock->tx_work, smap_tx_work);
1421 INIT_WORK(&psock->gc_work, smap_gc_work);
John Fastabend2f857d02017-08-28 07:10:25 -07001422 INIT_LIST_HEAD(&psock->maps);
John Fastabend8934ce22018-03-28 12:49:15 -07001423 INIT_LIST_HEAD(&psock->ingress);
John Fastabendffa35662018-03-18 12:56:54 -07001424 refcount_set(&psock->refcnt, 1);
John Fastabend174a79f2017-08-15 22:32:47 -07001425
1426 rcu_assign_sk_user_data(sock, psock);
1427 sock_hold(sock);
1428 return psock;
1429}
1430
1431static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
1432{
1433 struct bpf_stab *stab;
John Fastabend174a79f2017-08-15 22:32:47 -07001434 u64 cost;
Eric Dumazet952fad82018-02-13 15:33:52 -08001435 int err;
John Fastabend174a79f2017-08-15 22:32:47 -07001436
John Fastabendfb50df82017-10-18 07:11:22 -07001437 if (!capable(CAP_NET_ADMIN))
1438 return ERR_PTR(-EPERM);
1439
John Fastabend174a79f2017-08-15 22:32:47 -07001440 /* check sanity of attributes */
1441 if (attr->max_entries == 0 || attr->key_size != 4 ||
Chenbo Feng6e71b042017-10-18 13:00:22 -07001442 attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
John Fastabend174a79f2017-08-15 22:32:47 -07001443 return ERR_PTR(-EINVAL);
1444
John Fastabend1aa12bd2018-02-05 10:17:49 -08001445 err = bpf_tcp_ulp_register();
1446 if (err && err != -EEXIST)
1447 return ERR_PTR(err);
1448
John Fastabend174a79f2017-08-15 22:32:47 -07001449 stab = kzalloc(sizeof(*stab), GFP_USER);
1450 if (!stab)
1451 return ERR_PTR(-ENOMEM);
1452
Jakub Kicinskibd475642018-01-11 20:29:06 -08001453 bpf_map_init_from_attr(&stab->map, attr);
John Fastabend174a79f2017-08-15 22:32:47 -07001454
1455 /* make sure page count doesn't overflow */
1456 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
Eric Dumazet952fad82018-02-13 15:33:52 -08001457 err = -EINVAL;
John Fastabend174a79f2017-08-15 22:32:47 -07001458 if (cost >= U32_MAX - PAGE_SIZE)
1459 goto free_stab;
1460
1461 stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
1462
1463 /* if map size is larger than memlock limit, reject it early */
1464 err = bpf_map_precharge_memlock(stab->map.pages);
1465 if (err)
1466 goto free_stab;
1467
Dan Carpenterf740c342017-08-25 23:27:14 +03001468 err = -ENOMEM;
John Fastabend174a79f2017-08-15 22:32:47 -07001469 stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
Martin KaFai Lau96eabe72017-08-18 11:28:00 -07001470 sizeof(struct sock *),
1471 stab->map.numa_node);
John Fastabend174a79f2017-08-15 22:32:47 -07001472 if (!stab->sock_map)
1473 goto free_stab;
1474
John Fastabend174a79f2017-08-15 22:32:47 -07001475 return &stab->map;
1476free_stab:
1477 kfree(stab);
1478 return ERR_PTR(err);
1479}
1480
John Fastabend2f857d02017-08-28 07:10:25 -07001481static void smap_list_remove(struct smap_psock *psock, struct sock **entry)
1482{
1483 struct smap_psock_map_entry *e, *tmp;
1484
1485 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
1486 if (e->entry == entry) {
1487 list_del(&e->list);
1488 break;
1489 }
1490 }
1491}
1492
John Fastabend174a79f2017-08-15 22:32:47 -07001493static void sock_map_free(struct bpf_map *map)
1494{
1495 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1496 int i;
1497
1498 synchronize_rcu();
1499
1500 /* At this point no update, lookup or delete operations can happen.
1501 * However, be aware we can still get a socket state event updates,
1502 * and data ready callabacks that reference the psock from sk_user_data
1503 * Also psock worker threads are still in-flight. So smap_release_sock
1504 * will only free the psock after cancel_sync on the worker threads
1505 * and a grace period expire to ensure psock is really safe to remove.
1506 */
1507 rcu_read_lock();
1508 for (i = 0; i < stab->map.max_entries; i++) {
John Fastabend2f857d02017-08-28 07:10:25 -07001509 struct smap_psock *psock;
John Fastabend174a79f2017-08-15 22:32:47 -07001510 struct sock *sock;
1511
1512 sock = xchg(&stab->sock_map[i], NULL);
1513 if (!sock)
1514 continue;
1515
John Fastabend2f857d02017-08-28 07:10:25 -07001516 write_lock_bh(&sock->sk_callback_lock);
1517 psock = smap_psock_sk(sock);
John Fastabend5731a872018-01-04 20:02:09 -08001518 /* This check handles a racing sock event that can get the
1519 * sk_callback_lock before this case but after xchg happens
1520 * causing the refcnt to hit zero and sock user data (psock)
1521 * to be null and queued for garbage collection.
1522 */
1523 if (likely(psock)) {
1524 smap_list_remove(psock, &stab->sock_map[i]);
1525 smap_release_sock(psock, sock);
1526 }
John Fastabend2f857d02017-08-28 07:10:25 -07001527 write_unlock_bh(&sock->sk_callback_lock);
John Fastabend174a79f2017-08-15 22:32:47 -07001528 }
1529 rcu_read_unlock();
1530
John Fastabend2f857d02017-08-28 07:10:25 -07001531 sock_map_remove_complete(stab);
John Fastabend174a79f2017-08-15 22:32:47 -07001532}
1533
1534static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
1535{
1536 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1537 u32 i = key ? *(u32 *)key : U32_MAX;
1538 u32 *next = (u32 *)next_key;
1539
1540 if (i >= stab->map.max_entries) {
1541 *next = 0;
1542 return 0;
1543 }
1544
1545 if (i == stab->map.max_entries - 1)
1546 return -ENOENT;
1547
1548 *next = i + 1;
1549 return 0;
1550}
1551
1552struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
1553{
1554 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1555
1556 if (key >= map->max_entries)
1557 return NULL;
1558
1559 return READ_ONCE(stab->sock_map[key]);
1560}
1561
1562static int sock_map_delete_elem(struct bpf_map *map, void *key)
1563{
1564 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
John Fastabend2f857d02017-08-28 07:10:25 -07001565 struct smap_psock *psock;
John Fastabend174a79f2017-08-15 22:32:47 -07001566 int k = *(u32 *)key;
1567 struct sock *sock;
1568
1569 if (k >= map->max_entries)
1570 return -EINVAL;
1571
1572 sock = xchg(&stab->sock_map[k], NULL);
1573 if (!sock)
1574 return -EINVAL;
1575
John Fastabend2f857d02017-08-28 07:10:25 -07001576 write_lock_bh(&sock->sk_callback_lock);
1577 psock = smap_psock_sk(sock);
1578 if (!psock)
1579 goto out;
1580
1581 if (psock->bpf_parse)
1582 smap_stop_sock(psock, sock);
1583 smap_list_remove(psock, &stab->sock_map[k]);
1584 smap_release_sock(psock, sock);
1585out:
1586 write_unlock_bh(&sock->sk_callback_lock);
John Fastabend174a79f2017-08-15 22:32:47 -07001587 return 0;
1588}
1589
1590/* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
1591 * done inside rcu critical sections. This ensures on updates that the psock
1592 * will not be released via smap_release_sock() until concurrent updates/deletes
1593 * complete. All operations operate on sock_map using cmpxchg and xchg
1594 * operations to ensure we do not get stale references. Any reads into the
1595 * map must be done with READ_ONCE() because of this.
1596 *
1597 * A psock is destroyed via call_rcu and after any worker threads are cancelled
1598 * and syncd so we are certain all references from the update/lookup/delete
1599 * operations as well as references in the data path are no longer in use.
1600 *
John Fastabend2f857d02017-08-28 07:10:25 -07001601 * Psocks may exist in multiple maps, but only a single set of parse/verdict
1602 * programs may be inherited from the maps it belongs to. A reference count
1603 * is kept with the total number of references to the psock from all maps. The
1604 * psock will not be released until this reaches zero. The psock and sock
1605 * user data data use the sk_callback_lock to protect critical data structures
1606 * from concurrent access. This allows us to avoid two updates from modifying
1607 * the user data in sock and the lock is required anyways for modifying
1608 * callbacks, we simply increase its scope slightly.
John Fastabend174a79f2017-08-15 22:32:47 -07001609 *
John Fastabend2f857d02017-08-28 07:10:25 -07001610 * Rules to follow,
1611 * - psock must always be read inside RCU critical section
1612 * - sk_user_data must only be modified inside sk_callback_lock and read
1613 * inside RCU critical section.
1614 * - psock->maps list must only be read & modified inside sk_callback_lock
1615 * - sock_map must use READ_ONCE and (cmp)xchg operations
1616 * - BPF verdict/parse programs must use READ_ONCE and xchg operations
John Fastabend174a79f2017-08-15 22:32:47 -07001617 */
1618static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
1619 struct bpf_map *map,
John Fastabend2f857d02017-08-28 07:10:25 -07001620 void *key, u64 flags)
John Fastabend174a79f2017-08-15 22:32:47 -07001621{
1622 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
John Fastabend2f857d02017-08-28 07:10:25 -07001623 struct smap_psock_map_entry *e = NULL;
John Fastabend4f738ad2018-03-18 12:57:10 -07001624 struct bpf_prog *verdict, *parse, *tx_msg;
John Fastabend2f857d02017-08-28 07:10:25 -07001625 struct sock *osock, *sock;
1626 struct smap_psock *psock;
John Fastabend174a79f2017-08-15 22:32:47 -07001627 u32 i = *(u32 *)key;
John Fastabend4f738ad2018-03-18 12:57:10 -07001628 bool new = false;
John Fastabend2f857d02017-08-28 07:10:25 -07001629 int err;
John Fastabend174a79f2017-08-15 22:32:47 -07001630
1631 if (unlikely(flags > BPF_EXIST))
1632 return -EINVAL;
1633
1634 if (unlikely(i >= stab->map.max_entries))
1635 return -E2BIG;
1636
John Fastabend174a79f2017-08-15 22:32:47 -07001637 sock = READ_ONCE(stab->sock_map[i]);
John Fastabend2f857d02017-08-28 07:10:25 -07001638 if (flags == BPF_EXIST && !sock)
1639 return -ENOENT;
1640 else if (flags == BPF_NOEXIST && sock)
John Fastabend174a79f2017-08-15 22:32:47 -07001641 return -EEXIST;
John Fastabend174a79f2017-08-15 22:32:47 -07001642
John Fastabend2f857d02017-08-28 07:10:25 -07001643 sock = skops->sk;
John Fastabend174a79f2017-08-15 22:32:47 -07001644
John Fastabend2f857d02017-08-28 07:10:25 -07001645 /* 1. If sock map has BPF programs those will be inherited by the
1646 * sock being added. If the sock is already attached to BPF programs
1647 * this results in an error.
1648 */
1649 verdict = READ_ONCE(stab->bpf_verdict);
1650 parse = READ_ONCE(stab->bpf_parse);
John Fastabend4f738ad2018-03-18 12:57:10 -07001651 tx_msg = READ_ONCE(stab->bpf_tx_msg);
John Fastabend174a79f2017-08-15 22:32:47 -07001652
John Fastabend2f857d02017-08-28 07:10:25 -07001653 if (parse && verdict) {
John Fastabend174a79f2017-08-15 22:32:47 -07001654 /* bpf prog refcnt may be zero if a concurrent attach operation
1655 * removes the program after the above READ_ONCE() but before
1656 * we increment the refcnt. If this is the case abort with an
1657 * error.
1658 */
1659 verdict = bpf_prog_inc_not_zero(stab->bpf_verdict);
1660 if (IS_ERR(verdict))
1661 return PTR_ERR(verdict);
1662
1663 parse = bpf_prog_inc_not_zero(stab->bpf_parse);
1664 if (IS_ERR(parse)) {
1665 bpf_prog_put(verdict);
1666 return PTR_ERR(parse);
1667 }
1668 }
1669
John Fastabend4f738ad2018-03-18 12:57:10 -07001670 if (tx_msg) {
1671 tx_msg = bpf_prog_inc_not_zero(stab->bpf_tx_msg);
1672 if (IS_ERR(tx_msg)) {
1673 if (verdict)
1674 bpf_prog_put(verdict);
1675 if (parse)
1676 bpf_prog_put(parse);
1677 return PTR_ERR(tx_msg);
1678 }
1679 }
1680
John Fastabend2f857d02017-08-28 07:10:25 -07001681 write_lock_bh(&sock->sk_callback_lock);
1682 psock = smap_psock_sk(sock);
1683
1684 /* 2. Do not allow inheriting programs if psock exists and has
1685 * already inherited programs. This would create confusion on
1686 * which parser/verdict program is running. If no psock exists
1687 * create one. Inside sk_callback_lock to ensure concurrent create
1688 * doesn't update user data.
1689 */
1690 if (psock) {
1691 if (READ_ONCE(psock->bpf_parse) && parse) {
1692 err = -EBUSY;
1693 goto out_progs;
1694 }
John Fastabend4f738ad2018-03-18 12:57:10 -07001695 if (READ_ONCE(psock->bpf_tx_msg) && tx_msg) {
1696 err = -EBUSY;
1697 goto out_progs;
1698 }
1699 if (!refcount_inc_not_zero(&psock->refcnt)) {
1700 err = -EAGAIN;
1701 goto out_progs;
1702 }
John Fastabend2f857d02017-08-28 07:10:25 -07001703 } else {
John Fastabend174a79f2017-08-15 22:32:47 -07001704 psock = smap_init_psock(sock, stab);
1705 if (IS_ERR(psock)) {
John Fastabend2f857d02017-08-28 07:10:25 -07001706 err = PTR_ERR(psock);
1707 goto out_progs;
John Fastabend174a79f2017-08-15 22:32:47 -07001708 }
John Fastabend2f857d02017-08-28 07:10:25 -07001709
John Fastabend174a79f2017-08-15 22:32:47 -07001710 set_bit(SMAP_TX_RUNNING, &psock->state);
John Fastabend4f738ad2018-03-18 12:57:10 -07001711 new = true;
John Fastabend174a79f2017-08-15 22:32:47 -07001712 }
1713
John Fastabend2f857d02017-08-28 07:10:25 -07001714 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
1715 if (!e) {
1716 err = -ENOMEM;
1717 goto out_progs;
1718 }
1719 e->entry = &stab->sock_map[i];
1720
1721 /* 3. At this point we have a reference to a valid psock that is
1722 * running. Attach any BPF programs needed.
1723 */
John Fastabend4f738ad2018-03-18 12:57:10 -07001724 if (tx_msg)
1725 bpf_tcp_msg_add(psock, sock, tx_msg);
1726 if (new) {
1727 err = tcp_set_ulp_id(sock, TCP_ULP_BPF);
1728 if (err)
1729 goto out_free;
1730 }
1731
John Fastabend2f857d02017-08-28 07:10:25 -07001732 if (parse && verdict && !psock->strp_enabled) {
John Fastabend174a79f2017-08-15 22:32:47 -07001733 err = smap_init_sock(psock, sock);
1734 if (err)
John Fastabend2f857d02017-08-28 07:10:25 -07001735 goto out_free;
John Fastabend174a79f2017-08-15 22:32:47 -07001736 smap_init_progs(psock, stab, verdict, parse);
1737 smap_start_sock(psock, sock);
John Fastabend174a79f2017-08-15 22:32:47 -07001738 }
1739
John Fastabend2f857d02017-08-28 07:10:25 -07001740 /* 4. Place psock in sockmap for use and stop any programs on
1741 * the old sock assuming its not the same sock we are replacing
1742 * it with. Because we can only have a single set of programs if
1743 * old_sock has a strp we can stop it.
1744 */
1745 list_add_tail(&e->list, &psock->maps);
John Fastabend174a79f2017-08-15 22:32:47 -07001746 write_unlock_bh(&sock->sk_callback_lock);
John Fastabend2f857d02017-08-28 07:10:25 -07001747
1748 osock = xchg(&stab->sock_map[i], sock);
1749 if (osock) {
1750 struct smap_psock *opsock = smap_psock_sk(osock);
1751
1752 write_lock_bh(&osock->sk_callback_lock);
John Fastabend2f857d02017-08-28 07:10:25 -07001753 smap_list_remove(opsock, &stab->sock_map[i]);
1754 smap_release_sock(opsock, osock);
1755 write_unlock_bh(&osock->sk_callback_lock);
1756 }
1757 return 0;
1758out_free:
1759 smap_release_sock(psock, sock);
1760out_progs:
1761 if (verdict)
1762 bpf_prog_put(verdict);
1763 if (parse)
1764 bpf_prog_put(parse);
John Fastabend4f738ad2018-03-18 12:57:10 -07001765 if (tx_msg)
1766 bpf_prog_put(tx_msg);
John Fastabend2f857d02017-08-28 07:10:25 -07001767 write_unlock_bh(&sock->sk_callback_lock);
1768 kfree(e);
John Fastabend174a79f2017-08-15 22:32:47 -07001769 return err;
1770}
1771
John Fastabend5a67da22017-09-08 14:00:49 -07001772int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
John Fastabend174a79f2017-08-15 22:32:47 -07001773{
1774 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
John Fastabend464bc0f2017-08-28 07:10:04 -07001775 struct bpf_prog *orig;
John Fastabend174a79f2017-08-15 22:32:47 -07001776
John Fastabend81374aa2017-08-28 07:11:43 -07001777 if (unlikely(map->map_type != BPF_MAP_TYPE_SOCKMAP))
1778 return -EINVAL;
1779
John Fastabend464bc0f2017-08-28 07:10:04 -07001780 switch (type) {
John Fastabend4f738ad2018-03-18 12:57:10 -07001781 case BPF_SK_MSG_VERDICT:
1782 orig = xchg(&stab->bpf_tx_msg, prog);
1783 break;
John Fastabend464bc0f2017-08-28 07:10:04 -07001784 case BPF_SK_SKB_STREAM_PARSER:
1785 orig = xchg(&stab->bpf_parse, prog);
1786 break;
1787 case BPF_SK_SKB_STREAM_VERDICT:
1788 orig = xchg(&stab->bpf_verdict, prog);
1789 break;
1790 default:
1791 return -EOPNOTSUPP;
1792 }
John Fastabend174a79f2017-08-15 22:32:47 -07001793
John Fastabend464bc0f2017-08-28 07:10:04 -07001794 if (orig)
1795 bpf_prog_put(orig);
John Fastabend174a79f2017-08-15 22:32:47 -07001796
1797 return 0;
1798}
1799
1800static void *sock_map_lookup(struct bpf_map *map, void *key)
1801{
1802 return NULL;
1803}
1804
1805static int sock_map_update_elem(struct bpf_map *map,
1806 void *key, void *value, u64 flags)
1807{
1808 struct bpf_sock_ops_kern skops;
1809 u32 fd = *(u32 *)value;
1810 struct socket *socket;
1811 int err;
1812
1813 socket = sockfd_lookup(fd, &err);
1814 if (!socket)
1815 return err;
1816
1817 skops.sk = socket->sk;
1818 if (!skops.sk) {
1819 fput(socket->file);
1820 return -EINVAL;
1821 }
1822
John Fastabend435bf0d2017-10-18 07:10:15 -07001823 if (skops.sk->sk_type != SOCK_STREAM ||
1824 skops.sk->sk_protocol != IPPROTO_TCP) {
1825 fput(socket->file);
1826 return -EOPNOTSUPP;
1827 }
1828
John Fastabend2f857d02017-08-28 07:10:25 -07001829 err = sock_map_ctx_update_elem(&skops, map, key, flags);
John Fastabend174a79f2017-08-15 22:32:47 -07001830 fput(socket->file);
1831 return err;
1832}
1833
John Fastabend3d9e9522018-02-05 10:17:54 -08001834static void sock_map_release(struct bpf_map *map, struct file *map_file)
1835{
1836 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1837 struct bpf_prog *orig;
1838
1839 orig = xchg(&stab->bpf_parse, NULL);
1840 if (orig)
1841 bpf_prog_put(orig);
1842 orig = xchg(&stab->bpf_verdict, NULL);
1843 if (orig)
1844 bpf_prog_put(orig);
John Fastabend4f738ad2018-03-18 12:57:10 -07001845
1846 orig = xchg(&stab->bpf_tx_msg, NULL);
1847 if (orig)
1848 bpf_prog_put(orig);
John Fastabend3d9e9522018-02-05 10:17:54 -08001849}
1850
John Fastabend174a79f2017-08-15 22:32:47 -07001851const struct bpf_map_ops sock_map_ops = {
1852 .map_alloc = sock_map_alloc,
1853 .map_free = sock_map_free,
1854 .map_lookup_elem = sock_map_lookup,
1855 .map_get_next_key = sock_map_get_next_key,
1856 .map_update_elem = sock_map_update_elem,
1857 .map_delete_elem = sock_map_delete_elem,
John Fastabend3d9e9522018-02-05 10:17:54 -08001858 .map_release = sock_map_release,
John Fastabend174a79f2017-08-15 22:32:47 -07001859};
1860
John Fastabend2f857d02017-08-28 07:10:25 -07001861BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
1862 struct bpf_map *, map, void *, key, u64, flags)
John Fastabend174a79f2017-08-15 22:32:47 -07001863{
1864 WARN_ON_ONCE(!rcu_read_lock_held());
John Fastabend2f857d02017-08-28 07:10:25 -07001865 return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
John Fastabend174a79f2017-08-15 22:32:47 -07001866}
1867
1868const struct bpf_func_proto bpf_sock_map_update_proto = {
1869 .func = bpf_sock_map_update,
1870 .gpl_only = false,
1871 .pkt_access = true,
1872 .ret_type = RET_INTEGER,
1873 .arg1_type = ARG_PTR_TO_CTX,
1874 .arg2_type = ARG_CONST_MAP_PTR,
1875 .arg3_type = ARG_PTR_TO_MAP_KEY,
1876 .arg4_type = ARG_ANYTHING,
John Fastabend174a79f2017-08-15 22:32:47 -07001877};