John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or |
| 4 | * modify it under the terms of version 2 of the GNU General Public |
| 5 | * License as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, but |
| 8 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 10 | * General Public License for more details. |
| 11 | */ |
| 12 | |
| 13 | /* A BPF sock_map is used to store sock objects. This is primarly used |
| 14 | * for doing socket redirect with BPF helper routines. |
| 15 | * |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 16 | * A sock map may have BPF programs attached to it, currently a program |
| 17 | * used to parse packets and a program to provide a verdict and redirect |
| 18 | * decision on the packet are supported. Any programs attached to a sock |
| 19 | * map are inherited by sock objects when they are added to the map. If |
| 20 | * no BPF programs are attached the sock object may only be used for sock |
| 21 | * redirect. |
| 22 | * |
| 23 | * A sock object may be in multiple maps, but can only inherit a single |
| 24 | * parse or verdict program. If adding a sock object to a map would result |
| 25 | * in having multiple parsing programs the update will return an EBUSY error. |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 26 | * |
| 27 | * For reference this program is similar to devmap used in XDP context |
| 28 | * reviewing these together may be useful. For an example please review |
| 29 | * ./samples/bpf/sockmap/. |
| 30 | */ |
| 31 | #include <linux/bpf.h> |
| 32 | #include <net/sock.h> |
| 33 | #include <linux/filter.h> |
| 34 | #include <linux/errno.h> |
| 35 | #include <linux/file.h> |
| 36 | #include <linux/kernel.h> |
| 37 | #include <linux/net.h> |
| 38 | #include <linux/skbuff.h> |
| 39 | #include <linux/workqueue.h> |
| 40 | #include <linux/list.h> |
| 41 | #include <net/strparser.h> |
John Fastabend | 34f79502 | 2017-10-18 07:10:36 -0700 | [diff] [blame] | 42 | #include <net/tcp.h> |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 43 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 44 | #define SOCK_CREATE_FLAG_MASK \ |
| 45 | (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) |
| 46 | |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 47 | struct bpf_stab { |
| 48 | struct bpf_map map; |
| 49 | struct sock **sock_map; |
| 50 | struct bpf_prog *bpf_parse; |
| 51 | struct bpf_prog *bpf_verdict; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 52 | }; |
| 53 | |
| 54 | enum smap_psock_state { |
| 55 | SMAP_TX_RUNNING, |
| 56 | }; |
| 57 | |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 58 | struct smap_psock_map_entry { |
| 59 | struct list_head list; |
| 60 | struct sock **entry; |
| 61 | }; |
| 62 | |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 63 | struct smap_psock { |
| 64 | struct rcu_head rcu; |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 65 | /* refcnt is used inside sk_callback_lock */ |
| 66 | u32 refcnt; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 67 | |
| 68 | /* datapath variables */ |
| 69 | struct sk_buff_head rxqueue; |
| 70 | bool strp_enabled; |
| 71 | |
| 72 | /* datapath error path cache across tx work invocations */ |
| 73 | int save_rem; |
| 74 | int save_off; |
| 75 | struct sk_buff *save_skb; |
| 76 | |
| 77 | struct strparser strp; |
| 78 | struct bpf_prog *bpf_parse; |
| 79 | struct bpf_prog *bpf_verdict; |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 80 | struct list_head maps; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 81 | |
| 82 | /* Back reference used when sock callback trigger sockmap operations */ |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 83 | struct sock *sock; |
| 84 | unsigned long state; |
| 85 | |
| 86 | struct work_struct tx_work; |
| 87 | struct work_struct gc_work; |
| 88 | |
John Fastabend | 1aa12bd | 2018-02-05 10:17:49 -0800 | [diff] [blame] | 89 | struct proto *sk_proto; |
| 90 | void (*save_close)(struct sock *sk, long timeout); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 91 | void (*save_data_ready)(struct sock *sk); |
| 92 | void (*save_write_space)(struct sock *sk); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 93 | }; |
| 94 | |
| 95 | static inline struct smap_psock *smap_psock_sk(const struct sock *sk) |
| 96 | { |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 97 | return rcu_dereference_sk_user_data(sk); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 98 | } |
| 99 | |
John Fastabend | 1aa12bd | 2018-02-05 10:17:49 -0800 | [diff] [blame] | 100 | static struct proto tcp_bpf_proto; |
| 101 | static int bpf_tcp_init(struct sock *sk) |
| 102 | { |
| 103 | struct smap_psock *psock; |
| 104 | |
| 105 | rcu_read_lock(); |
| 106 | psock = smap_psock_sk(sk); |
| 107 | if (unlikely(!psock)) { |
| 108 | rcu_read_unlock(); |
| 109 | return -EINVAL; |
| 110 | } |
| 111 | |
| 112 | if (unlikely(psock->sk_proto)) { |
| 113 | rcu_read_unlock(); |
| 114 | return -EBUSY; |
| 115 | } |
| 116 | |
| 117 | psock->save_close = sk->sk_prot->close; |
| 118 | psock->sk_proto = sk->sk_prot; |
| 119 | sk->sk_prot = &tcp_bpf_proto; |
| 120 | rcu_read_unlock(); |
| 121 | return 0; |
| 122 | } |
| 123 | |
| 124 | static void bpf_tcp_release(struct sock *sk) |
| 125 | { |
| 126 | struct smap_psock *psock; |
| 127 | |
| 128 | rcu_read_lock(); |
| 129 | psock = smap_psock_sk(sk); |
| 130 | |
| 131 | if (likely(psock)) { |
| 132 | sk->sk_prot = psock->sk_proto; |
| 133 | psock->sk_proto = NULL; |
| 134 | } |
| 135 | rcu_read_unlock(); |
| 136 | } |
| 137 | |
| 138 | static void smap_release_sock(struct smap_psock *psock, struct sock *sock); |
| 139 | |
| 140 | static void bpf_tcp_close(struct sock *sk, long timeout) |
| 141 | { |
| 142 | void (*close_fun)(struct sock *sk, long timeout); |
| 143 | struct smap_psock_map_entry *e, *tmp; |
| 144 | struct smap_psock *psock; |
| 145 | struct sock *osk; |
| 146 | |
| 147 | rcu_read_lock(); |
| 148 | psock = smap_psock_sk(sk); |
| 149 | if (unlikely(!psock)) { |
| 150 | rcu_read_unlock(); |
| 151 | return sk->sk_prot->close(sk, timeout); |
| 152 | } |
| 153 | |
| 154 | /* The psock may be destroyed anytime after exiting the RCU critial |
| 155 | * section so by the time we use close_fun the psock may no longer |
| 156 | * be valid. However, bpf_tcp_close is called with the sock lock |
| 157 | * held so the close hook and sk are still valid. |
| 158 | */ |
| 159 | close_fun = psock->save_close; |
| 160 | |
| 161 | write_lock_bh(&sk->sk_callback_lock); |
| 162 | list_for_each_entry_safe(e, tmp, &psock->maps, list) { |
| 163 | osk = cmpxchg(e->entry, sk, NULL); |
| 164 | if (osk == sk) { |
| 165 | list_del(&e->list); |
| 166 | smap_release_sock(psock, sk); |
| 167 | } |
| 168 | } |
| 169 | write_unlock_bh(&sk->sk_callback_lock); |
| 170 | rcu_read_unlock(); |
| 171 | close_fun(sk, timeout); |
| 172 | } |
| 173 | |
John Fastabend | 04686ef | 2017-10-31 19:17:31 -0700 | [diff] [blame] | 174 | enum __sk_action { |
| 175 | __SK_DROP = 0, |
| 176 | __SK_PASS, |
| 177 | __SK_REDIRECT, |
| 178 | }; |
| 179 | |
John Fastabend | 1aa12bd | 2018-02-05 10:17:49 -0800 | [diff] [blame] | 180 | static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = { |
| 181 | .name = "bpf_tcp", |
| 182 | .uid = TCP_ULP_BPF, |
| 183 | .user_visible = false, |
| 184 | .owner = NULL, |
| 185 | .init = bpf_tcp_init, |
| 186 | .release = bpf_tcp_release, |
| 187 | }; |
| 188 | |
| 189 | static int bpf_tcp_ulp_register(void) |
| 190 | { |
| 191 | tcp_bpf_proto = tcp_prot; |
| 192 | tcp_bpf_proto.close = bpf_tcp_close; |
| 193 | return tcp_register_ulp(&bpf_tcp_ulp_ops); |
| 194 | } |
| 195 | |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 196 | static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb) |
| 197 | { |
| 198 | struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict); |
| 199 | int rc; |
| 200 | |
| 201 | if (unlikely(!prog)) |
John Fastabend | 04686ef | 2017-10-31 19:17:31 -0700 | [diff] [blame] | 202 | return __SK_DROP; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 203 | |
| 204 | skb_orphan(skb); |
John Fastabend | 34f79502 | 2017-10-18 07:10:36 -0700 | [diff] [blame] | 205 | /* We need to ensure that BPF metadata for maps is also cleared |
| 206 | * when we orphan the skb so that we don't have the possibility |
| 207 | * to reference a stale map. |
| 208 | */ |
| 209 | TCP_SKB_CB(skb)->bpf.map = NULL; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 210 | skb->sk = psock->sock; |
Daniel Borkmann | 6aaae2b | 2017-09-25 02:25:50 +0200 | [diff] [blame] | 211 | bpf_compute_data_pointers(skb); |
John Fastabend | 34f79502 | 2017-10-18 07:10:36 -0700 | [diff] [blame] | 212 | preempt_disable(); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 213 | rc = (*prog->bpf_func)(skb, prog->insnsi); |
John Fastabend | 34f79502 | 2017-10-18 07:10:36 -0700 | [diff] [blame] | 214 | preempt_enable(); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 215 | skb->sk = NULL; |
| 216 | |
John Fastabend | 04686ef | 2017-10-31 19:17:31 -0700 | [diff] [blame] | 217 | /* Moving return codes from UAPI namespace into internal namespace */ |
John Fastabend | bfa64075 | 2017-10-27 09:45:53 -0700 | [diff] [blame] | 218 | return rc == SK_PASS ? |
John Fastabend | 04686ef | 2017-10-31 19:17:31 -0700 | [diff] [blame] | 219 | (TCP_SKB_CB(skb)->bpf.map ? __SK_REDIRECT : __SK_PASS) : |
| 220 | __SK_DROP; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 221 | } |
| 222 | |
| 223 | static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb) |
| 224 | { |
John Fastabend | 90a9631 | 2017-09-01 11:29:26 -0700 | [diff] [blame] | 225 | struct sock *sk; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 226 | int rc; |
| 227 | |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 228 | rc = smap_verdict_func(psock, skb); |
| 229 | switch (rc) { |
John Fastabend | 04686ef | 2017-10-31 19:17:31 -0700 | [diff] [blame] | 230 | case __SK_REDIRECT: |
John Fastabend | 34f79502 | 2017-10-18 07:10:36 -0700 | [diff] [blame] | 231 | sk = do_sk_redirect_map(skb); |
John Fastabend | 90a9631 | 2017-09-01 11:29:26 -0700 | [diff] [blame] | 232 | if (likely(sk)) { |
| 233 | struct smap_psock *peer = smap_psock_sk(sk); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 234 | |
| 235 | if (likely(peer && |
| 236 | test_bit(SMAP_TX_RUNNING, &peer->state) && |
John Fastabend | 90a9631 | 2017-09-01 11:29:26 -0700 | [diff] [blame] | 237 | !sock_flag(sk, SOCK_DEAD) && |
| 238 | sock_writeable(sk))) { |
| 239 | skb_set_owner_w(skb, sk); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 240 | skb_queue_tail(&peer->rxqueue, skb); |
| 241 | schedule_work(&peer->tx_work); |
| 242 | break; |
| 243 | } |
| 244 | } |
| 245 | /* Fall through and free skb otherwise */ |
John Fastabend | 04686ef | 2017-10-31 19:17:31 -0700 | [diff] [blame] | 246 | case __SK_DROP: |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 247 | default: |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 248 | kfree_skb(skb); |
| 249 | } |
| 250 | } |
| 251 | |
| 252 | static void smap_report_sk_error(struct smap_psock *psock, int err) |
| 253 | { |
| 254 | struct sock *sk = psock->sock; |
| 255 | |
| 256 | sk->sk_err = err; |
| 257 | sk->sk_error_report(sk); |
| 258 | } |
| 259 | |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 260 | static void smap_read_sock_strparser(struct strparser *strp, |
| 261 | struct sk_buff *skb) |
| 262 | { |
| 263 | struct smap_psock *psock; |
| 264 | |
| 265 | rcu_read_lock(); |
| 266 | psock = container_of(strp, struct smap_psock, strp); |
| 267 | smap_do_verdict(psock, skb); |
| 268 | rcu_read_unlock(); |
| 269 | } |
| 270 | |
| 271 | /* Called with lock held on socket */ |
| 272 | static void smap_data_ready(struct sock *sk) |
| 273 | { |
| 274 | struct smap_psock *psock; |
| 275 | |
John Fastabend | d26e597d | 2017-08-28 07:10:45 -0700 | [diff] [blame] | 276 | rcu_read_lock(); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 277 | psock = smap_psock_sk(sk); |
John Fastabend | d26e597d | 2017-08-28 07:10:45 -0700 | [diff] [blame] | 278 | if (likely(psock)) { |
| 279 | write_lock_bh(&sk->sk_callback_lock); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 280 | strp_data_ready(&psock->strp); |
John Fastabend | d26e597d | 2017-08-28 07:10:45 -0700 | [diff] [blame] | 281 | write_unlock_bh(&sk->sk_callback_lock); |
| 282 | } |
| 283 | rcu_read_unlock(); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 284 | } |
| 285 | |
| 286 | static void smap_tx_work(struct work_struct *w) |
| 287 | { |
| 288 | struct smap_psock *psock; |
| 289 | struct sk_buff *skb; |
| 290 | int rem, off, n; |
| 291 | |
| 292 | psock = container_of(w, struct smap_psock, tx_work); |
| 293 | |
| 294 | /* lock sock to avoid losing sk_socket at some point during loop */ |
| 295 | lock_sock(psock->sock); |
| 296 | if (psock->save_skb) { |
| 297 | skb = psock->save_skb; |
| 298 | rem = psock->save_rem; |
| 299 | off = psock->save_off; |
| 300 | psock->save_skb = NULL; |
| 301 | goto start; |
| 302 | } |
| 303 | |
| 304 | while ((skb = skb_dequeue(&psock->rxqueue))) { |
| 305 | rem = skb->len; |
| 306 | off = 0; |
| 307 | start: |
| 308 | do { |
| 309 | if (likely(psock->sock->sk_socket)) |
| 310 | n = skb_send_sock_locked(psock->sock, |
| 311 | skb, off, rem); |
| 312 | else |
| 313 | n = -EINVAL; |
| 314 | if (n <= 0) { |
| 315 | if (n == -EAGAIN) { |
| 316 | /* Retry when space is available */ |
| 317 | psock->save_skb = skb; |
| 318 | psock->save_rem = rem; |
| 319 | psock->save_off = off; |
| 320 | goto out; |
| 321 | } |
| 322 | /* Hard errors break pipe and stop xmit */ |
| 323 | smap_report_sk_error(psock, n ? -n : EPIPE); |
| 324 | clear_bit(SMAP_TX_RUNNING, &psock->state); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 325 | kfree_skb(skb); |
| 326 | goto out; |
| 327 | } |
| 328 | rem -= n; |
| 329 | off += n; |
| 330 | } while (rem); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 331 | kfree_skb(skb); |
| 332 | } |
| 333 | out: |
| 334 | release_sock(psock->sock); |
| 335 | } |
| 336 | |
| 337 | static void smap_write_space(struct sock *sk) |
| 338 | { |
| 339 | struct smap_psock *psock; |
| 340 | |
| 341 | rcu_read_lock(); |
| 342 | psock = smap_psock_sk(sk); |
| 343 | if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state))) |
| 344 | schedule_work(&psock->tx_work); |
| 345 | rcu_read_unlock(); |
| 346 | } |
| 347 | |
| 348 | static void smap_stop_sock(struct smap_psock *psock, struct sock *sk) |
| 349 | { |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 350 | if (!psock->strp_enabled) |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 351 | return; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 352 | sk->sk_data_ready = psock->save_data_ready; |
| 353 | sk->sk_write_space = psock->save_write_space; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 354 | psock->save_data_ready = NULL; |
| 355 | psock->save_write_space = NULL; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 356 | strp_stop(&psock->strp); |
| 357 | psock->strp_enabled = false; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 358 | } |
| 359 | |
| 360 | static void smap_destroy_psock(struct rcu_head *rcu) |
| 361 | { |
| 362 | struct smap_psock *psock = container_of(rcu, |
| 363 | struct smap_psock, rcu); |
| 364 | |
| 365 | /* Now that a grace period has passed there is no longer |
| 366 | * any reference to this sock in the sockmap so we can |
| 367 | * destroy the psock, strparser, and bpf programs. But, |
| 368 | * because we use workqueue sync operations we can not |
| 369 | * do it in rcu context |
| 370 | */ |
| 371 | schedule_work(&psock->gc_work); |
| 372 | } |
| 373 | |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 374 | static void smap_release_sock(struct smap_psock *psock, struct sock *sock) |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 375 | { |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 376 | psock->refcnt--; |
| 377 | if (psock->refcnt) |
| 378 | return; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 379 | |
John Fastabend | 1aa12bd | 2018-02-05 10:17:49 -0800 | [diff] [blame] | 380 | tcp_cleanup_ulp(sock); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 381 | smap_stop_sock(psock, sock); |
| 382 | clear_bit(SMAP_TX_RUNNING, &psock->state); |
| 383 | rcu_assign_sk_user_data(sock, NULL); |
| 384 | call_rcu_sched(&psock->rcu, smap_destroy_psock); |
| 385 | } |
| 386 | |
| 387 | static int smap_parse_func_strparser(struct strparser *strp, |
| 388 | struct sk_buff *skb) |
| 389 | { |
| 390 | struct smap_psock *psock; |
| 391 | struct bpf_prog *prog; |
| 392 | int rc; |
| 393 | |
| 394 | rcu_read_lock(); |
| 395 | psock = container_of(strp, struct smap_psock, strp); |
| 396 | prog = READ_ONCE(psock->bpf_parse); |
| 397 | |
| 398 | if (unlikely(!prog)) { |
| 399 | rcu_read_unlock(); |
| 400 | return skb->len; |
| 401 | } |
| 402 | |
| 403 | /* Attach socket for bpf program to use if needed we can do this |
| 404 | * because strparser clones the skb before handing it to a upper |
| 405 | * layer, meaning skb_orphan has been called. We NULL sk on the |
| 406 | * way out to ensure we don't trigger a BUG_ON in skb/sk operations |
| 407 | * later and because we are not charging the memory of this skb to |
| 408 | * any socket yet. |
| 409 | */ |
| 410 | skb->sk = psock->sock; |
Daniel Borkmann | 6aaae2b | 2017-09-25 02:25:50 +0200 | [diff] [blame] | 411 | bpf_compute_data_pointers(skb); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 412 | rc = (*prog->bpf_func)(skb, prog->insnsi); |
| 413 | skb->sk = NULL; |
| 414 | rcu_read_unlock(); |
| 415 | return rc; |
| 416 | } |
| 417 | |
| 418 | |
| 419 | static int smap_read_sock_done(struct strparser *strp, int err) |
| 420 | { |
| 421 | return err; |
| 422 | } |
| 423 | |
| 424 | static int smap_init_sock(struct smap_psock *psock, |
| 425 | struct sock *sk) |
| 426 | { |
Eric Biggers | 3fd8712 | 2017-08-24 14:38:51 -0700 | [diff] [blame] | 427 | static const struct strp_callbacks cb = { |
| 428 | .rcv_msg = smap_read_sock_strparser, |
| 429 | .parse_msg = smap_parse_func_strparser, |
| 430 | .read_sock_done = smap_read_sock_done, |
| 431 | }; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 432 | |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 433 | return strp_init(&psock->strp, sk, &cb); |
| 434 | } |
| 435 | |
| 436 | static void smap_init_progs(struct smap_psock *psock, |
| 437 | struct bpf_stab *stab, |
| 438 | struct bpf_prog *verdict, |
| 439 | struct bpf_prog *parse) |
| 440 | { |
| 441 | struct bpf_prog *orig_parse, *orig_verdict; |
| 442 | |
| 443 | orig_parse = xchg(&psock->bpf_parse, parse); |
| 444 | orig_verdict = xchg(&psock->bpf_verdict, verdict); |
| 445 | |
| 446 | if (orig_verdict) |
| 447 | bpf_prog_put(orig_verdict); |
| 448 | if (orig_parse) |
| 449 | bpf_prog_put(orig_parse); |
| 450 | } |
| 451 | |
| 452 | static void smap_start_sock(struct smap_psock *psock, struct sock *sk) |
| 453 | { |
| 454 | if (sk->sk_data_ready == smap_data_ready) |
| 455 | return; |
| 456 | psock->save_data_ready = sk->sk_data_ready; |
| 457 | psock->save_write_space = sk->sk_write_space; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 458 | sk->sk_data_ready = smap_data_ready; |
| 459 | sk->sk_write_space = smap_write_space; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 460 | psock->strp_enabled = true; |
| 461 | } |
| 462 | |
| 463 | static void sock_map_remove_complete(struct bpf_stab *stab) |
| 464 | { |
| 465 | bpf_map_area_free(stab->sock_map); |
| 466 | kfree(stab); |
| 467 | } |
| 468 | |
| 469 | static void smap_gc_work(struct work_struct *w) |
| 470 | { |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 471 | struct smap_psock_map_entry *e, *tmp; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 472 | struct smap_psock *psock; |
| 473 | |
| 474 | psock = container_of(w, struct smap_psock, gc_work); |
| 475 | |
| 476 | /* no callback lock needed because we already detached sockmap ops */ |
| 477 | if (psock->strp_enabled) |
| 478 | strp_done(&psock->strp); |
| 479 | |
| 480 | cancel_work_sync(&psock->tx_work); |
| 481 | __skb_queue_purge(&psock->rxqueue); |
| 482 | |
| 483 | /* At this point all strparser and xmit work must be complete */ |
| 484 | if (psock->bpf_parse) |
| 485 | bpf_prog_put(psock->bpf_parse); |
| 486 | if (psock->bpf_verdict) |
| 487 | bpf_prog_put(psock->bpf_verdict); |
| 488 | |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 489 | list_for_each_entry_safe(e, tmp, &psock->maps, list) { |
| 490 | list_del(&e->list); |
| 491 | kfree(e); |
| 492 | } |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 493 | |
| 494 | sock_put(psock->sock); |
| 495 | kfree(psock); |
| 496 | } |
| 497 | |
| 498 | static struct smap_psock *smap_init_psock(struct sock *sock, |
| 499 | struct bpf_stab *stab) |
| 500 | { |
| 501 | struct smap_psock *psock; |
| 502 | |
Martin KaFai Lau | 96eabe7 | 2017-08-18 11:28:00 -0700 | [diff] [blame] | 503 | psock = kzalloc_node(sizeof(struct smap_psock), |
| 504 | GFP_ATOMIC | __GFP_NOWARN, |
| 505 | stab->map.numa_node); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 506 | if (!psock) |
| 507 | return ERR_PTR(-ENOMEM); |
| 508 | |
| 509 | psock->sock = sock; |
| 510 | skb_queue_head_init(&psock->rxqueue); |
| 511 | INIT_WORK(&psock->tx_work, smap_tx_work); |
| 512 | INIT_WORK(&psock->gc_work, smap_gc_work); |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 513 | INIT_LIST_HEAD(&psock->maps); |
| 514 | psock->refcnt = 1; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 515 | |
| 516 | rcu_assign_sk_user_data(sock, psock); |
| 517 | sock_hold(sock); |
| 518 | return psock; |
| 519 | } |
| 520 | |
| 521 | static struct bpf_map *sock_map_alloc(union bpf_attr *attr) |
| 522 | { |
| 523 | struct bpf_stab *stab; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 524 | u64 cost; |
Eric Dumazet | 952fad8 | 2018-02-13 15:33:52 -0800 | [diff] [blame] | 525 | int err; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 526 | |
John Fastabend | fb50df8 | 2017-10-18 07:11:22 -0700 | [diff] [blame] | 527 | if (!capable(CAP_NET_ADMIN)) |
| 528 | return ERR_PTR(-EPERM); |
| 529 | |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 530 | /* check sanity of attributes */ |
| 531 | if (attr->max_entries == 0 || attr->key_size != 4 || |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 532 | attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK) |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 533 | return ERR_PTR(-EINVAL); |
| 534 | |
| 535 | if (attr->value_size > KMALLOC_MAX_SIZE) |
| 536 | return ERR_PTR(-E2BIG); |
| 537 | |
John Fastabend | 1aa12bd | 2018-02-05 10:17:49 -0800 | [diff] [blame] | 538 | err = bpf_tcp_ulp_register(); |
| 539 | if (err && err != -EEXIST) |
| 540 | return ERR_PTR(err); |
| 541 | |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 542 | stab = kzalloc(sizeof(*stab), GFP_USER); |
| 543 | if (!stab) |
| 544 | return ERR_PTR(-ENOMEM); |
| 545 | |
Jakub Kicinski | bd47564 | 2018-01-11 20:29:06 -0800 | [diff] [blame] | 546 | bpf_map_init_from_attr(&stab->map, attr); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 547 | |
| 548 | /* make sure page count doesn't overflow */ |
| 549 | cost = (u64) stab->map.max_entries * sizeof(struct sock *); |
Eric Dumazet | 952fad8 | 2018-02-13 15:33:52 -0800 | [diff] [blame] | 550 | err = -EINVAL; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 551 | if (cost >= U32_MAX - PAGE_SIZE) |
| 552 | goto free_stab; |
| 553 | |
| 554 | stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; |
| 555 | |
| 556 | /* if map size is larger than memlock limit, reject it early */ |
| 557 | err = bpf_map_precharge_memlock(stab->map.pages); |
| 558 | if (err) |
| 559 | goto free_stab; |
| 560 | |
Dan Carpenter | f740c34 | 2017-08-25 23:27:14 +0300 | [diff] [blame] | 561 | err = -ENOMEM; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 562 | stab->sock_map = bpf_map_area_alloc(stab->map.max_entries * |
Martin KaFai Lau | 96eabe7 | 2017-08-18 11:28:00 -0700 | [diff] [blame] | 563 | sizeof(struct sock *), |
| 564 | stab->map.numa_node); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 565 | if (!stab->sock_map) |
| 566 | goto free_stab; |
| 567 | |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 568 | return &stab->map; |
| 569 | free_stab: |
| 570 | kfree(stab); |
| 571 | return ERR_PTR(err); |
| 572 | } |
| 573 | |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 574 | static void smap_list_remove(struct smap_psock *psock, struct sock **entry) |
| 575 | { |
| 576 | struct smap_psock_map_entry *e, *tmp; |
| 577 | |
| 578 | list_for_each_entry_safe(e, tmp, &psock->maps, list) { |
| 579 | if (e->entry == entry) { |
| 580 | list_del(&e->list); |
| 581 | break; |
| 582 | } |
| 583 | } |
| 584 | } |
| 585 | |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 586 | static void sock_map_free(struct bpf_map *map) |
| 587 | { |
| 588 | struct bpf_stab *stab = container_of(map, struct bpf_stab, map); |
| 589 | int i; |
| 590 | |
| 591 | synchronize_rcu(); |
| 592 | |
| 593 | /* At this point no update, lookup or delete operations can happen. |
| 594 | * However, be aware we can still get a socket state event updates, |
| 595 | * and data ready callabacks that reference the psock from sk_user_data |
| 596 | * Also psock worker threads are still in-flight. So smap_release_sock |
| 597 | * will only free the psock after cancel_sync on the worker threads |
| 598 | * and a grace period expire to ensure psock is really safe to remove. |
| 599 | */ |
| 600 | rcu_read_lock(); |
| 601 | for (i = 0; i < stab->map.max_entries; i++) { |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 602 | struct smap_psock *psock; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 603 | struct sock *sock; |
| 604 | |
| 605 | sock = xchg(&stab->sock_map[i], NULL); |
| 606 | if (!sock) |
| 607 | continue; |
| 608 | |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 609 | write_lock_bh(&sock->sk_callback_lock); |
| 610 | psock = smap_psock_sk(sock); |
John Fastabend | 5731a87 | 2018-01-04 20:02:09 -0800 | [diff] [blame] | 611 | /* This check handles a racing sock event that can get the |
| 612 | * sk_callback_lock before this case but after xchg happens |
| 613 | * causing the refcnt to hit zero and sock user data (psock) |
| 614 | * to be null and queued for garbage collection. |
| 615 | */ |
| 616 | if (likely(psock)) { |
| 617 | smap_list_remove(psock, &stab->sock_map[i]); |
| 618 | smap_release_sock(psock, sock); |
| 619 | } |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 620 | write_unlock_bh(&sock->sk_callback_lock); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 621 | } |
| 622 | rcu_read_unlock(); |
| 623 | |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 624 | sock_map_remove_complete(stab); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 625 | } |
| 626 | |
| 627 | static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key) |
| 628 | { |
| 629 | struct bpf_stab *stab = container_of(map, struct bpf_stab, map); |
| 630 | u32 i = key ? *(u32 *)key : U32_MAX; |
| 631 | u32 *next = (u32 *)next_key; |
| 632 | |
| 633 | if (i >= stab->map.max_entries) { |
| 634 | *next = 0; |
| 635 | return 0; |
| 636 | } |
| 637 | |
| 638 | if (i == stab->map.max_entries - 1) |
| 639 | return -ENOENT; |
| 640 | |
| 641 | *next = i + 1; |
| 642 | return 0; |
| 643 | } |
| 644 | |
| 645 | struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) |
| 646 | { |
| 647 | struct bpf_stab *stab = container_of(map, struct bpf_stab, map); |
| 648 | |
| 649 | if (key >= map->max_entries) |
| 650 | return NULL; |
| 651 | |
| 652 | return READ_ONCE(stab->sock_map[key]); |
| 653 | } |
| 654 | |
| 655 | static int sock_map_delete_elem(struct bpf_map *map, void *key) |
| 656 | { |
| 657 | struct bpf_stab *stab = container_of(map, struct bpf_stab, map); |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 658 | struct smap_psock *psock; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 659 | int k = *(u32 *)key; |
| 660 | struct sock *sock; |
| 661 | |
| 662 | if (k >= map->max_entries) |
| 663 | return -EINVAL; |
| 664 | |
| 665 | sock = xchg(&stab->sock_map[k], NULL); |
| 666 | if (!sock) |
| 667 | return -EINVAL; |
| 668 | |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 669 | write_lock_bh(&sock->sk_callback_lock); |
| 670 | psock = smap_psock_sk(sock); |
| 671 | if (!psock) |
| 672 | goto out; |
| 673 | |
| 674 | if (psock->bpf_parse) |
| 675 | smap_stop_sock(psock, sock); |
| 676 | smap_list_remove(psock, &stab->sock_map[k]); |
| 677 | smap_release_sock(psock, sock); |
| 678 | out: |
| 679 | write_unlock_bh(&sock->sk_callback_lock); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 680 | return 0; |
| 681 | } |
| 682 | |
| 683 | /* Locking notes: Concurrent updates, deletes, and lookups are allowed and are |
| 684 | * done inside rcu critical sections. This ensures on updates that the psock |
| 685 | * will not be released via smap_release_sock() until concurrent updates/deletes |
| 686 | * complete. All operations operate on sock_map using cmpxchg and xchg |
| 687 | * operations to ensure we do not get stale references. Any reads into the |
| 688 | * map must be done with READ_ONCE() because of this. |
| 689 | * |
| 690 | * A psock is destroyed via call_rcu and after any worker threads are cancelled |
| 691 | * and syncd so we are certain all references from the update/lookup/delete |
| 692 | * operations as well as references in the data path are no longer in use. |
| 693 | * |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 694 | * Psocks may exist in multiple maps, but only a single set of parse/verdict |
| 695 | * programs may be inherited from the maps it belongs to. A reference count |
| 696 | * is kept with the total number of references to the psock from all maps. The |
| 697 | * psock will not be released until this reaches zero. The psock and sock |
| 698 | * user data data use the sk_callback_lock to protect critical data structures |
| 699 | * from concurrent access. This allows us to avoid two updates from modifying |
| 700 | * the user data in sock and the lock is required anyways for modifying |
| 701 | * callbacks, we simply increase its scope slightly. |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 702 | * |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 703 | * Rules to follow, |
| 704 | * - psock must always be read inside RCU critical section |
| 705 | * - sk_user_data must only be modified inside sk_callback_lock and read |
| 706 | * inside RCU critical section. |
| 707 | * - psock->maps list must only be read & modified inside sk_callback_lock |
| 708 | * - sock_map must use READ_ONCE and (cmp)xchg operations |
| 709 | * - BPF verdict/parse programs must use READ_ONCE and xchg operations |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 710 | */ |
| 711 | static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops, |
| 712 | struct bpf_map *map, |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 713 | void *key, u64 flags) |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 714 | { |
| 715 | struct bpf_stab *stab = container_of(map, struct bpf_stab, map); |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 716 | struct smap_psock_map_entry *e = NULL; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 717 | struct bpf_prog *verdict, *parse; |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 718 | struct sock *osock, *sock; |
| 719 | struct smap_psock *psock; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 720 | u32 i = *(u32 *)key; |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 721 | int err; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 722 | |
| 723 | if (unlikely(flags > BPF_EXIST)) |
| 724 | return -EINVAL; |
| 725 | |
| 726 | if (unlikely(i >= stab->map.max_entries)) |
| 727 | return -E2BIG; |
| 728 | |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 729 | sock = READ_ONCE(stab->sock_map[i]); |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 730 | if (flags == BPF_EXIST && !sock) |
| 731 | return -ENOENT; |
| 732 | else if (flags == BPF_NOEXIST && sock) |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 733 | return -EEXIST; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 734 | |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 735 | sock = skops->sk; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 736 | |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 737 | /* 1. If sock map has BPF programs those will be inherited by the |
| 738 | * sock being added. If the sock is already attached to BPF programs |
| 739 | * this results in an error. |
| 740 | */ |
| 741 | verdict = READ_ONCE(stab->bpf_verdict); |
| 742 | parse = READ_ONCE(stab->bpf_parse); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 743 | |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 744 | if (parse && verdict) { |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 745 | /* bpf prog refcnt may be zero if a concurrent attach operation |
| 746 | * removes the program after the above READ_ONCE() but before |
| 747 | * we increment the refcnt. If this is the case abort with an |
| 748 | * error. |
| 749 | */ |
| 750 | verdict = bpf_prog_inc_not_zero(stab->bpf_verdict); |
| 751 | if (IS_ERR(verdict)) |
| 752 | return PTR_ERR(verdict); |
| 753 | |
| 754 | parse = bpf_prog_inc_not_zero(stab->bpf_parse); |
| 755 | if (IS_ERR(parse)) { |
| 756 | bpf_prog_put(verdict); |
| 757 | return PTR_ERR(parse); |
| 758 | } |
| 759 | } |
| 760 | |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 761 | write_lock_bh(&sock->sk_callback_lock); |
| 762 | psock = smap_psock_sk(sock); |
| 763 | |
| 764 | /* 2. Do not allow inheriting programs if psock exists and has |
| 765 | * already inherited programs. This would create confusion on |
| 766 | * which parser/verdict program is running. If no psock exists |
| 767 | * create one. Inside sk_callback_lock to ensure concurrent create |
| 768 | * doesn't update user data. |
| 769 | */ |
| 770 | if (psock) { |
| 771 | if (READ_ONCE(psock->bpf_parse) && parse) { |
| 772 | err = -EBUSY; |
| 773 | goto out_progs; |
| 774 | } |
| 775 | psock->refcnt++; |
| 776 | } else { |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 777 | psock = smap_init_psock(sock, stab); |
| 778 | if (IS_ERR(psock)) { |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 779 | err = PTR_ERR(psock); |
| 780 | goto out_progs; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 781 | } |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 782 | |
John Fastabend | 1aa12bd | 2018-02-05 10:17:49 -0800 | [diff] [blame] | 783 | err = tcp_set_ulp_id(sock, TCP_ULP_BPF); |
| 784 | if (err) |
| 785 | goto out_progs; |
| 786 | |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 787 | set_bit(SMAP_TX_RUNNING, &psock->state); |
| 788 | } |
| 789 | |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 790 | e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN); |
| 791 | if (!e) { |
| 792 | err = -ENOMEM; |
| 793 | goto out_progs; |
| 794 | } |
| 795 | e->entry = &stab->sock_map[i]; |
| 796 | |
| 797 | /* 3. At this point we have a reference to a valid psock that is |
| 798 | * running. Attach any BPF programs needed. |
| 799 | */ |
| 800 | if (parse && verdict && !psock->strp_enabled) { |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 801 | err = smap_init_sock(psock, sock); |
| 802 | if (err) |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 803 | goto out_free; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 804 | smap_init_progs(psock, stab, verdict, parse); |
| 805 | smap_start_sock(psock, sock); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 806 | } |
| 807 | |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 808 | /* 4. Place psock in sockmap for use and stop any programs on |
| 809 | * the old sock assuming its not the same sock we are replacing |
| 810 | * it with. Because we can only have a single set of programs if |
| 811 | * old_sock has a strp we can stop it. |
| 812 | */ |
| 813 | list_add_tail(&e->list, &psock->maps); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 814 | write_unlock_bh(&sock->sk_callback_lock); |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 815 | |
| 816 | osock = xchg(&stab->sock_map[i], sock); |
| 817 | if (osock) { |
| 818 | struct smap_psock *opsock = smap_psock_sk(osock); |
| 819 | |
| 820 | write_lock_bh(&osock->sk_callback_lock); |
| 821 | if (osock != sock && parse) |
| 822 | smap_stop_sock(opsock, osock); |
| 823 | smap_list_remove(opsock, &stab->sock_map[i]); |
| 824 | smap_release_sock(opsock, osock); |
| 825 | write_unlock_bh(&osock->sk_callback_lock); |
| 826 | } |
| 827 | return 0; |
| 828 | out_free: |
| 829 | smap_release_sock(psock, sock); |
| 830 | out_progs: |
| 831 | if (verdict) |
| 832 | bpf_prog_put(verdict); |
| 833 | if (parse) |
| 834 | bpf_prog_put(parse); |
| 835 | write_unlock_bh(&sock->sk_callback_lock); |
| 836 | kfree(e); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 837 | return err; |
| 838 | } |
| 839 | |
John Fastabend | 5a67da2 | 2017-09-08 14:00:49 -0700 | [diff] [blame] | 840 | int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type) |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 841 | { |
| 842 | struct bpf_stab *stab = container_of(map, struct bpf_stab, map); |
John Fastabend | 464bc0f | 2017-08-28 07:10:04 -0700 | [diff] [blame] | 843 | struct bpf_prog *orig; |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 844 | |
John Fastabend | 81374aa | 2017-08-28 07:11:43 -0700 | [diff] [blame] | 845 | if (unlikely(map->map_type != BPF_MAP_TYPE_SOCKMAP)) |
| 846 | return -EINVAL; |
| 847 | |
John Fastabend | 464bc0f | 2017-08-28 07:10:04 -0700 | [diff] [blame] | 848 | switch (type) { |
| 849 | case BPF_SK_SKB_STREAM_PARSER: |
| 850 | orig = xchg(&stab->bpf_parse, prog); |
| 851 | break; |
| 852 | case BPF_SK_SKB_STREAM_VERDICT: |
| 853 | orig = xchg(&stab->bpf_verdict, prog); |
| 854 | break; |
| 855 | default: |
| 856 | return -EOPNOTSUPP; |
| 857 | } |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 858 | |
John Fastabend | 464bc0f | 2017-08-28 07:10:04 -0700 | [diff] [blame] | 859 | if (orig) |
| 860 | bpf_prog_put(orig); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 861 | |
| 862 | return 0; |
| 863 | } |
| 864 | |
| 865 | static void *sock_map_lookup(struct bpf_map *map, void *key) |
| 866 | { |
| 867 | return NULL; |
| 868 | } |
| 869 | |
| 870 | static int sock_map_update_elem(struct bpf_map *map, |
| 871 | void *key, void *value, u64 flags) |
| 872 | { |
| 873 | struct bpf_sock_ops_kern skops; |
| 874 | u32 fd = *(u32 *)value; |
| 875 | struct socket *socket; |
| 876 | int err; |
| 877 | |
| 878 | socket = sockfd_lookup(fd, &err); |
| 879 | if (!socket) |
| 880 | return err; |
| 881 | |
| 882 | skops.sk = socket->sk; |
| 883 | if (!skops.sk) { |
| 884 | fput(socket->file); |
| 885 | return -EINVAL; |
| 886 | } |
| 887 | |
John Fastabend | 435bf0d | 2017-10-18 07:10:15 -0700 | [diff] [blame] | 888 | if (skops.sk->sk_type != SOCK_STREAM || |
| 889 | skops.sk->sk_protocol != IPPROTO_TCP) { |
| 890 | fput(socket->file); |
| 891 | return -EOPNOTSUPP; |
| 892 | } |
| 893 | |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 894 | err = sock_map_ctx_update_elem(&skops, map, key, flags); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 895 | fput(socket->file); |
| 896 | return err; |
| 897 | } |
| 898 | |
John Fastabend | 3d9e952 | 2018-02-05 10:17:54 -0800 | [diff] [blame] | 899 | static void sock_map_release(struct bpf_map *map, struct file *map_file) |
| 900 | { |
| 901 | struct bpf_stab *stab = container_of(map, struct bpf_stab, map); |
| 902 | struct bpf_prog *orig; |
| 903 | |
| 904 | orig = xchg(&stab->bpf_parse, NULL); |
| 905 | if (orig) |
| 906 | bpf_prog_put(orig); |
| 907 | orig = xchg(&stab->bpf_verdict, NULL); |
| 908 | if (orig) |
| 909 | bpf_prog_put(orig); |
| 910 | } |
| 911 | |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 912 | const struct bpf_map_ops sock_map_ops = { |
| 913 | .map_alloc = sock_map_alloc, |
| 914 | .map_free = sock_map_free, |
| 915 | .map_lookup_elem = sock_map_lookup, |
| 916 | .map_get_next_key = sock_map_get_next_key, |
| 917 | .map_update_elem = sock_map_update_elem, |
| 918 | .map_delete_elem = sock_map_delete_elem, |
John Fastabend | 3d9e952 | 2018-02-05 10:17:54 -0800 | [diff] [blame] | 919 | .map_release = sock_map_release, |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 920 | }; |
| 921 | |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 922 | BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, |
| 923 | struct bpf_map *, map, void *, key, u64, flags) |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 924 | { |
| 925 | WARN_ON_ONCE(!rcu_read_lock_held()); |
John Fastabend | 2f857d0 | 2017-08-28 07:10:25 -0700 | [diff] [blame] | 926 | return sock_map_ctx_update_elem(bpf_sock, map, key, flags); |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 927 | } |
| 928 | |
| 929 | const struct bpf_func_proto bpf_sock_map_update_proto = { |
| 930 | .func = bpf_sock_map_update, |
| 931 | .gpl_only = false, |
| 932 | .pkt_access = true, |
| 933 | .ret_type = RET_INTEGER, |
| 934 | .arg1_type = ARG_PTR_TO_CTX, |
| 935 | .arg2_type = ARG_CONST_MAP_PTR, |
| 936 | .arg3_type = ARG_PTR_TO_MAP_KEY, |
| 937 | .arg4_type = ARG_ANYTHING, |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 938 | }; |