blob: cf570d108fd58f99301cde627a37e8aaa7edd1df [file] [log] [blame]
John Fastabend174a79f2017-08-15 22:32:47 -07001/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12
13/* A BPF sock_map is used to store sock objects. This is primarly used
14 * for doing socket redirect with BPF helper routines.
15 *
16 * A sock map may have two BPF programs attached to it, a program used
17 * to parse packets and a program to provide a verdict and redirect
18 * decision on the packet. If no BPF parse program is provided it is
19 * assumed that every skb is a "message" (skb->len). Otherwise the
20 * parse program is attached to strparser and used to build messages
21 * that may span multiple skbs. The verdict program will either select
22 * a socket to send/receive the skb on or provide the drop code indicating
23 * the skb should be dropped. More actions may be added later as needed.
24 * The default program will drop packets.
25 *
26 * For reference this program is similar to devmap used in XDP context
27 * reviewing these together may be useful. For an example please review
28 * ./samples/bpf/sockmap/.
29 */
30#include <linux/bpf.h>
31#include <net/sock.h>
32#include <linux/filter.h>
33#include <linux/errno.h>
34#include <linux/file.h>
35#include <linux/kernel.h>
36#include <linux/net.h>
37#include <linux/skbuff.h>
38#include <linux/workqueue.h>
39#include <linux/list.h>
40#include <net/strparser.h>
41
42struct bpf_stab {
43 struct bpf_map map;
44 struct sock **sock_map;
45 struct bpf_prog *bpf_parse;
46 struct bpf_prog *bpf_verdict;
47 refcount_t refcnt;
48};
49
50enum smap_psock_state {
51 SMAP_TX_RUNNING,
52};
53
54struct smap_psock {
55 struct rcu_head rcu;
56
57 /* datapath variables */
58 struct sk_buff_head rxqueue;
59 bool strp_enabled;
60
61 /* datapath error path cache across tx work invocations */
62 int save_rem;
63 int save_off;
64 struct sk_buff *save_skb;
65
66 struct strparser strp;
67 struct bpf_prog *bpf_parse;
68 struct bpf_prog *bpf_verdict;
69 struct bpf_stab *stab;
70
71 /* Back reference used when sock callback trigger sockmap operations */
72 int key;
73 struct sock *sock;
74 unsigned long state;
75
76 struct work_struct tx_work;
77 struct work_struct gc_work;
78
79 void (*save_data_ready)(struct sock *sk);
80 void (*save_write_space)(struct sock *sk);
81 void (*save_state_change)(struct sock *sk);
82};
83
84static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
85{
86 return (struct smap_psock *)rcu_dereference_sk_user_data(sk);
87}
88
89static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
90{
91 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
92 int rc;
93
94 if (unlikely(!prog))
95 return SK_DROP;
96
97 skb_orphan(skb);
98 skb->sk = psock->sock;
99 bpf_compute_data_end(skb);
100 rc = (*prog->bpf_func)(skb, prog->insnsi);
101 skb->sk = NULL;
102
103 return rc;
104}
105
106static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
107{
108 struct sock *sock;
109 int rc;
110
111 /* Because we use per cpu values to feed input from sock redirect
112 * in BPF program to do_sk_redirect_map() call we need to ensure we
113 * are not preempted. RCU read lock is not sufficient in this case
114 * with CONFIG_PREEMPT_RCU enabled so we must be explicit here.
115 */
116 preempt_disable();
117 rc = smap_verdict_func(psock, skb);
118 switch (rc) {
119 case SK_REDIRECT:
120 sock = do_sk_redirect_map();
121 preempt_enable();
122 if (likely(sock)) {
123 struct smap_psock *peer = smap_psock_sk(sock);
124
125 if (likely(peer &&
126 test_bit(SMAP_TX_RUNNING, &peer->state) &&
127 sk_stream_memory_free(peer->sock))) {
128 peer->sock->sk_wmem_queued += skb->truesize;
129 sk_mem_charge(peer->sock, skb->truesize);
130 skb_queue_tail(&peer->rxqueue, skb);
131 schedule_work(&peer->tx_work);
132 break;
133 }
134 }
135 /* Fall through and free skb otherwise */
136 case SK_DROP:
137 default:
Daniel Borkmann976d28b2017-08-17 17:22:36 +0200138 if (rc != SK_REDIRECT)
139 preempt_enable();
John Fastabend174a79f2017-08-15 22:32:47 -0700140 kfree_skb(skb);
141 }
142}
143
144static void smap_report_sk_error(struct smap_psock *psock, int err)
145{
146 struct sock *sk = psock->sock;
147
148 sk->sk_err = err;
149 sk->sk_error_report(sk);
150}
151
152static void smap_release_sock(struct sock *sock);
153
154/* Called with lock_sock(sk) held */
155static void smap_state_change(struct sock *sk)
156{
157 struct smap_psock *psock;
158 struct sock *osk;
159
160 rcu_read_lock();
161
162 /* Allowing transitions into an established syn_recv states allows
163 * for early binding sockets to a smap object before the connection
164 * is established.
165 */
166 switch (sk->sk_state) {
167 case TCP_SYN_RECV:
168 case TCP_ESTABLISHED:
169 break;
170 case TCP_CLOSE_WAIT:
171 case TCP_CLOSING:
172 case TCP_LAST_ACK:
173 case TCP_FIN_WAIT1:
174 case TCP_FIN_WAIT2:
175 case TCP_LISTEN:
176 break;
177 case TCP_CLOSE:
178 /* Only release if the map entry is in fact the sock in
179 * question. There is a case where the operator deletes
180 * the sock from the map, but the TCP sock is closed before
181 * the psock is detached. Use cmpxchg to verify correct
182 * sock is removed.
183 */
184 psock = smap_psock_sk(sk);
185 if (unlikely(!psock))
186 break;
187 osk = cmpxchg(&psock->stab->sock_map[psock->key], sk, NULL);
188 if (osk == sk)
189 smap_release_sock(sk);
190 break;
191 default:
John Fastabendcf56e3b2017-08-16 15:02:12 -0700192 psock = smap_psock_sk(sk);
193 if (unlikely(!psock))
194 break;
John Fastabend174a79f2017-08-15 22:32:47 -0700195 smap_report_sk_error(psock, EPIPE);
196 break;
197 }
198 rcu_read_unlock();
199}
200
201static void smap_read_sock_strparser(struct strparser *strp,
202 struct sk_buff *skb)
203{
204 struct smap_psock *psock;
205
206 rcu_read_lock();
207 psock = container_of(strp, struct smap_psock, strp);
208 smap_do_verdict(psock, skb);
209 rcu_read_unlock();
210}
211
212/* Called with lock held on socket */
213static void smap_data_ready(struct sock *sk)
214{
215 struct smap_psock *psock;
216
217 write_lock_bh(&sk->sk_callback_lock);
218 psock = smap_psock_sk(sk);
219 if (likely(psock))
220 strp_data_ready(&psock->strp);
221 write_unlock_bh(&sk->sk_callback_lock);
222}
223
224static void smap_tx_work(struct work_struct *w)
225{
226 struct smap_psock *psock;
227 struct sk_buff *skb;
228 int rem, off, n;
229
230 psock = container_of(w, struct smap_psock, tx_work);
231
232 /* lock sock to avoid losing sk_socket at some point during loop */
233 lock_sock(psock->sock);
234 if (psock->save_skb) {
235 skb = psock->save_skb;
236 rem = psock->save_rem;
237 off = psock->save_off;
238 psock->save_skb = NULL;
239 goto start;
240 }
241
242 while ((skb = skb_dequeue(&psock->rxqueue))) {
243 rem = skb->len;
244 off = 0;
245start:
246 do {
247 if (likely(psock->sock->sk_socket))
248 n = skb_send_sock_locked(psock->sock,
249 skb, off, rem);
250 else
251 n = -EINVAL;
252 if (n <= 0) {
253 if (n == -EAGAIN) {
254 /* Retry when space is available */
255 psock->save_skb = skb;
256 psock->save_rem = rem;
257 psock->save_off = off;
258 goto out;
259 }
260 /* Hard errors break pipe and stop xmit */
261 smap_report_sk_error(psock, n ? -n : EPIPE);
262 clear_bit(SMAP_TX_RUNNING, &psock->state);
263 sk_mem_uncharge(psock->sock, skb->truesize);
264 psock->sock->sk_wmem_queued -= skb->truesize;
265 kfree_skb(skb);
266 goto out;
267 }
268 rem -= n;
269 off += n;
270 } while (rem);
271 sk_mem_uncharge(psock->sock, skb->truesize);
272 psock->sock->sk_wmem_queued -= skb->truesize;
273 kfree_skb(skb);
274 }
275out:
276 release_sock(psock->sock);
277}
278
279static void smap_write_space(struct sock *sk)
280{
281 struct smap_psock *psock;
282
283 rcu_read_lock();
284 psock = smap_psock_sk(sk);
285 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
286 schedule_work(&psock->tx_work);
287 rcu_read_unlock();
288}
289
290static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
291{
292 write_lock_bh(&sk->sk_callback_lock);
293 if (!psock->strp_enabled)
294 goto out;
295 sk->sk_data_ready = psock->save_data_ready;
296 sk->sk_write_space = psock->save_write_space;
297 sk->sk_state_change = psock->save_state_change;
298 psock->save_data_ready = NULL;
299 psock->save_write_space = NULL;
300 psock->save_state_change = NULL;
301 strp_stop(&psock->strp);
302 psock->strp_enabled = false;
303out:
304 write_unlock_bh(&sk->sk_callback_lock);
305}
306
307static void smap_destroy_psock(struct rcu_head *rcu)
308{
309 struct smap_psock *psock = container_of(rcu,
310 struct smap_psock, rcu);
311
312 /* Now that a grace period has passed there is no longer
313 * any reference to this sock in the sockmap so we can
314 * destroy the psock, strparser, and bpf programs. But,
315 * because we use workqueue sync operations we can not
316 * do it in rcu context
317 */
318 schedule_work(&psock->gc_work);
319}
320
321static void smap_release_sock(struct sock *sock)
322{
323 struct smap_psock *psock = smap_psock_sk(sock);
324
325 smap_stop_sock(psock, sock);
326 clear_bit(SMAP_TX_RUNNING, &psock->state);
327 rcu_assign_sk_user_data(sock, NULL);
328 call_rcu_sched(&psock->rcu, smap_destroy_psock);
329}
330
331static int smap_parse_func_strparser(struct strparser *strp,
332 struct sk_buff *skb)
333{
334 struct smap_psock *psock;
335 struct bpf_prog *prog;
336 int rc;
337
338 rcu_read_lock();
339 psock = container_of(strp, struct smap_psock, strp);
340 prog = READ_ONCE(psock->bpf_parse);
341
342 if (unlikely(!prog)) {
343 rcu_read_unlock();
344 return skb->len;
345 }
346
347 /* Attach socket for bpf program to use if needed we can do this
348 * because strparser clones the skb before handing it to a upper
349 * layer, meaning skb_orphan has been called. We NULL sk on the
350 * way out to ensure we don't trigger a BUG_ON in skb/sk operations
351 * later and because we are not charging the memory of this skb to
352 * any socket yet.
353 */
354 skb->sk = psock->sock;
355 bpf_compute_data_end(skb);
356 rc = (*prog->bpf_func)(skb, prog->insnsi);
357 skb->sk = NULL;
358 rcu_read_unlock();
359 return rc;
360}
361
362
363static int smap_read_sock_done(struct strparser *strp, int err)
364{
365 return err;
366}
367
368static int smap_init_sock(struct smap_psock *psock,
369 struct sock *sk)
370{
Eric Biggers3fd87122017-08-24 14:38:51 -0700371 static const struct strp_callbacks cb = {
372 .rcv_msg = smap_read_sock_strparser,
373 .parse_msg = smap_parse_func_strparser,
374 .read_sock_done = smap_read_sock_done,
375 };
John Fastabend174a79f2017-08-15 22:32:47 -0700376
John Fastabend174a79f2017-08-15 22:32:47 -0700377 return strp_init(&psock->strp, sk, &cb);
378}
379
380static void smap_init_progs(struct smap_psock *psock,
381 struct bpf_stab *stab,
382 struct bpf_prog *verdict,
383 struct bpf_prog *parse)
384{
385 struct bpf_prog *orig_parse, *orig_verdict;
386
387 orig_parse = xchg(&psock->bpf_parse, parse);
388 orig_verdict = xchg(&psock->bpf_verdict, verdict);
389
390 if (orig_verdict)
391 bpf_prog_put(orig_verdict);
392 if (orig_parse)
393 bpf_prog_put(orig_parse);
394}
395
396static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
397{
398 if (sk->sk_data_ready == smap_data_ready)
399 return;
400 psock->save_data_ready = sk->sk_data_ready;
401 psock->save_write_space = sk->sk_write_space;
402 psock->save_state_change = sk->sk_state_change;
403 sk->sk_data_ready = smap_data_ready;
404 sk->sk_write_space = smap_write_space;
405 sk->sk_state_change = smap_state_change;
406 psock->strp_enabled = true;
407}
408
409static void sock_map_remove_complete(struct bpf_stab *stab)
410{
411 bpf_map_area_free(stab->sock_map);
412 kfree(stab);
413}
414
415static void smap_gc_work(struct work_struct *w)
416{
417 struct smap_psock *psock;
418
419 psock = container_of(w, struct smap_psock, gc_work);
420
421 /* no callback lock needed because we already detached sockmap ops */
422 if (psock->strp_enabled)
423 strp_done(&psock->strp);
424
425 cancel_work_sync(&psock->tx_work);
426 __skb_queue_purge(&psock->rxqueue);
427
428 /* At this point all strparser and xmit work must be complete */
429 if (psock->bpf_parse)
430 bpf_prog_put(psock->bpf_parse);
431 if (psock->bpf_verdict)
432 bpf_prog_put(psock->bpf_verdict);
433
434 if (refcount_dec_and_test(&psock->stab->refcnt))
435 sock_map_remove_complete(psock->stab);
436
437 sock_put(psock->sock);
438 kfree(psock);
439}
440
441static struct smap_psock *smap_init_psock(struct sock *sock,
442 struct bpf_stab *stab)
443{
444 struct smap_psock *psock;
445
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700446 psock = kzalloc_node(sizeof(struct smap_psock),
447 GFP_ATOMIC | __GFP_NOWARN,
448 stab->map.numa_node);
John Fastabend174a79f2017-08-15 22:32:47 -0700449 if (!psock)
450 return ERR_PTR(-ENOMEM);
451
452 psock->sock = sock;
453 skb_queue_head_init(&psock->rxqueue);
454 INIT_WORK(&psock->tx_work, smap_tx_work);
455 INIT_WORK(&psock->gc_work, smap_gc_work);
456
457 rcu_assign_sk_user_data(sock, psock);
458 sock_hold(sock);
459 return psock;
460}
461
462static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
463{
464 struct bpf_stab *stab;
465 int err = -EINVAL;
466 u64 cost;
467
468 /* check sanity of attributes */
469 if (attr->max_entries == 0 || attr->key_size != 4 ||
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700470 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
John Fastabend174a79f2017-08-15 22:32:47 -0700471 return ERR_PTR(-EINVAL);
472
473 if (attr->value_size > KMALLOC_MAX_SIZE)
474 return ERR_PTR(-E2BIG);
475
476 stab = kzalloc(sizeof(*stab), GFP_USER);
477 if (!stab)
478 return ERR_PTR(-ENOMEM);
479
480 /* mandatory map attributes */
481 stab->map.map_type = attr->map_type;
482 stab->map.key_size = attr->key_size;
483 stab->map.value_size = attr->value_size;
484 stab->map.max_entries = attr->max_entries;
485 stab->map.map_flags = attr->map_flags;
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700486 stab->map.numa_node = bpf_map_attr_numa_node(attr);
John Fastabend174a79f2017-08-15 22:32:47 -0700487
488 /* make sure page count doesn't overflow */
489 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
490 if (cost >= U32_MAX - PAGE_SIZE)
491 goto free_stab;
492
493 stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
494
495 /* if map size is larger than memlock limit, reject it early */
496 err = bpf_map_precharge_memlock(stab->map.pages);
497 if (err)
498 goto free_stab;
499
500 stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700501 sizeof(struct sock *),
502 stab->map.numa_node);
John Fastabend174a79f2017-08-15 22:32:47 -0700503 if (!stab->sock_map)
504 goto free_stab;
505
506 refcount_set(&stab->refcnt, 1);
507 return &stab->map;
508free_stab:
509 kfree(stab);
510 return ERR_PTR(err);
511}
512
513static void sock_map_free(struct bpf_map *map)
514{
515 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
516 int i;
517
518 synchronize_rcu();
519
520 /* At this point no update, lookup or delete operations can happen.
521 * However, be aware we can still get a socket state event updates,
522 * and data ready callabacks that reference the psock from sk_user_data
523 * Also psock worker threads are still in-flight. So smap_release_sock
524 * will only free the psock after cancel_sync on the worker threads
525 * and a grace period expire to ensure psock is really safe to remove.
526 */
527 rcu_read_lock();
528 for (i = 0; i < stab->map.max_entries; i++) {
529 struct sock *sock;
530
531 sock = xchg(&stab->sock_map[i], NULL);
532 if (!sock)
533 continue;
534
535 smap_release_sock(sock);
536 }
537 rcu_read_unlock();
538
539 if (stab->bpf_verdict)
540 bpf_prog_put(stab->bpf_verdict);
541 if (stab->bpf_parse)
542 bpf_prog_put(stab->bpf_parse);
543
544 if (refcount_dec_and_test(&stab->refcnt))
545 sock_map_remove_complete(stab);
546}
547
548static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
549{
550 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
551 u32 i = key ? *(u32 *)key : U32_MAX;
552 u32 *next = (u32 *)next_key;
553
554 if (i >= stab->map.max_entries) {
555 *next = 0;
556 return 0;
557 }
558
559 if (i == stab->map.max_entries - 1)
560 return -ENOENT;
561
562 *next = i + 1;
563 return 0;
564}
565
566struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
567{
568 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
569
570 if (key >= map->max_entries)
571 return NULL;
572
573 return READ_ONCE(stab->sock_map[key]);
574}
575
576static int sock_map_delete_elem(struct bpf_map *map, void *key)
577{
578 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
579 int k = *(u32 *)key;
580 struct sock *sock;
581
582 if (k >= map->max_entries)
583 return -EINVAL;
584
585 sock = xchg(&stab->sock_map[k], NULL);
586 if (!sock)
587 return -EINVAL;
588
589 smap_release_sock(sock);
590 return 0;
591}
592
593/* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
594 * done inside rcu critical sections. This ensures on updates that the psock
595 * will not be released via smap_release_sock() until concurrent updates/deletes
596 * complete. All operations operate on sock_map using cmpxchg and xchg
597 * operations to ensure we do not get stale references. Any reads into the
598 * map must be done with READ_ONCE() because of this.
599 *
600 * A psock is destroyed via call_rcu and after any worker threads are cancelled
601 * and syncd so we are certain all references from the update/lookup/delete
602 * operations as well as references in the data path are no longer in use.
603 *
604 * A psock object holds a refcnt on the sockmap it is attached to and this is
605 * not decremented until after a RCU grace period and garbage collection occurs.
606 * This ensures the map is not free'd until psocks linked to it are removed. The
607 * map link is used when the independent sock events trigger map deletion.
608 *
609 * Psocks may only participate in one sockmap at a time. Users that try to
610 * join a single sock to multiple maps will get an error.
611 *
612 * Last, but not least, it is possible the socket is closed while running
613 * an update on an existing psock. This will release the psock, but again
614 * not until the update has completed due to rcu grace period rules.
615 */
616static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
617 struct bpf_map *map,
618 void *key, u64 flags, u64 map_flags)
619{
620 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
621 struct bpf_prog *verdict, *parse;
622 struct smap_psock *psock = NULL;
623 struct sock *old_sock, *sock;
624 u32 i = *(u32 *)key;
625 bool update = false;
626 int err = 0;
627
628 if (unlikely(flags > BPF_EXIST))
629 return -EINVAL;
630
631 if (unlikely(i >= stab->map.max_entries))
632 return -E2BIG;
633
634 if (unlikely(map_flags > BPF_SOCKMAP_STRPARSER))
635 return -EINVAL;
636
637 verdict = parse = NULL;
638 sock = READ_ONCE(stab->sock_map[i]);
639
640 if (flags == BPF_EXIST || flags == BPF_ANY) {
641 if (!sock && flags == BPF_EXIST) {
642 return -ENOENT;
643 } else if (sock && sock != skops->sk) {
644 return -EINVAL;
645 } else if (sock) {
646 psock = smap_psock_sk(sock);
647 if (unlikely(!psock))
648 return -EBUSY;
649 update = true;
650 }
651 } else if (sock && BPF_NOEXIST) {
652 return -EEXIST;
653 }
654
655 /* reserve BPF programs early so can abort easily on failures */
656 if (map_flags & BPF_SOCKMAP_STRPARSER) {
657 verdict = READ_ONCE(stab->bpf_verdict);
658 parse = READ_ONCE(stab->bpf_parse);
659
660 if (!verdict || !parse)
661 return -ENOENT;
662
663 /* bpf prog refcnt may be zero if a concurrent attach operation
664 * removes the program after the above READ_ONCE() but before
665 * we increment the refcnt. If this is the case abort with an
666 * error.
667 */
668 verdict = bpf_prog_inc_not_zero(stab->bpf_verdict);
669 if (IS_ERR(verdict))
670 return PTR_ERR(verdict);
671
672 parse = bpf_prog_inc_not_zero(stab->bpf_parse);
673 if (IS_ERR(parse)) {
674 bpf_prog_put(verdict);
675 return PTR_ERR(parse);
676 }
677 }
678
679 if (!psock) {
680 sock = skops->sk;
681 if (rcu_dereference_sk_user_data(sock))
682 return -EEXIST;
683 psock = smap_init_psock(sock, stab);
684 if (IS_ERR(psock)) {
685 if (verdict)
686 bpf_prog_put(verdict);
687 if (parse)
688 bpf_prog_put(parse);
689 return PTR_ERR(psock);
690 }
691 psock->key = i;
692 psock->stab = stab;
693 refcount_inc(&stab->refcnt);
694 set_bit(SMAP_TX_RUNNING, &psock->state);
695 }
696
697 if (map_flags & BPF_SOCKMAP_STRPARSER) {
698 write_lock_bh(&sock->sk_callback_lock);
699 if (psock->strp_enabled)
700 goto start_done;
701 err = smap_init_sock(psock, sock);
702 if (err)
703 goto out;
704 smap_init_progs(psock, stab, verdict, parse);
705 smap_start_sock(psock, sock);
706start_done:
707 write_unlock_bh(&sock->sk_callback_lock);
708 } else if (update) {
709 smap_stop_sock(psock, sock);
710 }
711
712 if (!update) {
713 old_sock = xchg(&stab->sock_map[i], skops->sk);
714 if (old_sock)
715 smap_release_sock(old_sock);
716 }
717
718 return 0;
719out:
720 write_unlock_bh(&sock->sk_callback_lock);
721 if (!update)
722 smap_release_sock(sock);
723 return err;
724}
725
John Fastabend464bc0f2017-08-28 07:10:04 -0700726int sock_map_attach_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
John Fastabend174a79f2017-08-15 22:32:47 -0700727{
728 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
John Fastabend464bc0f2017-08-28 07:10:04 -0700729 struct bpf_prog *orig;
John Fastabend174a79f2017-08-15 22:32:47 -0700730
John Fastabend464bc0f2017-08-28 07:10:04 -0700731 switch (type) {
732 case BPF_SK_SKB_STREAM_PARSER:
733 orig = xchg(&stab->bpf_parse, prog);
734 break;
735 case BPF_SK_SKB_STREAM_VERDICT:
736 orig = xchg(&stab->bpf_verdict, prog);
737 break;
738 default:
739 return -EOPNOTSUPP;
740 }
John Fastabend174a79f2017-08-15 22:32:47 -0700741
John Fastabend464bc0f2017-08-28 07:10:04 -0700742 if (orig)
743 bpf_prog_put(orig);
John Fastabend174a79f2017-08-15 22:32:47 -0700744
745 return 0;
746}
747
748static void *sock_map_lookup(struct bpf_map *map, void *key)
749{
750 return NULL;
751}
752
753static int sock_map_update_elem(struct bpf_map *map,
754 void *key, void *value, u64 flags)
755{
756 struct bpf_sock_ops_kern skops;
757 u32 fd = *(u32 *)value;
758 struct socket *socket;
759 int err;
760
761 socket = sockfd_lookup(fd, &err);
762 if (!socket)
763 return err;
764
765 skops.sk = socket->sk;
766 if (!skops.sk) {
767 fput(socket->file);
768 return -EINVAL;
769 }
770
771 err = sock_map_ctx_update_elem(&skops, map, key,
772 flags, BPF_SOCKMAP_STRPARSER);
773 fput(socket->file);
774 return err;
775}
776
777const struct bpf_map_ops sock_map_ops = {
778 .map_alloc = sock_map_alloc,
779 .map_free = sock_map_free,
780 .map_lookup_elem = sock_map_lookup,
781 .map_get_next_key = sock_map_get_next_key,
782 .map_update_elem = sock_map_update_elem,
783 .map_delete_elem = sock_map_delete_elem,
John Fastabend174a79f2017-08-15 22:32:47 -0700784};
785
786BPF_CALL_5(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
787 struct bpf_map *, map, void *, key, u64, flags, u64, map_flags)
788{
789 WARN_ON_ONCE(!rcu_read_lock_held());
790 return sock_map_ctx_update_elem(bpf_sock, map, key, flags, map_flags);
791}
792
793const struct bpf_func_proto bpf_sock_map_update_proto = {
794 .func = bpf_sock_map_update,
795 .gpl_only = false,
796 .pkt_access = true,
797 .ret_type = RET_INTEGER,
798 .arg1_type = ARG_PTR_TO_CTX,
799 .arg2_type = ARG_CONST_MAP_PTR,
800 .arg3_type = ARG_PTR_TO_MAP_KEY,
801 .arg4_type = ARG_ANYTHING,
802 .arg5_type = ARG_ANYTHING,
803};