Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ |
| 3 | |
| 4 | #include <linux/skmsg.h> |
| 5 | #include <linux/filter.h> |
| 6 | #include <linux/bpf.h> |
| 7 | #include <linux/init.h> |
| 8 | #include <linux/wait.h> |
| 9 | |
| 10 | #include <net/inet_common.h> |
John Fastabend | 0608c69 | 2018-12-20 11:35:35 -0800 | [diff] [blame] | 11 | #include <net/tls.h> |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 12 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 13 | static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, |
| 14 | struct sk_msg *msg, u32 apply_bytes, int flags) |
| 15 | { |
| 16 | bool apply = apply_bytes; |
| 17 | struct scatterlist *sge; |
| 18 | u32 size, copied = 0; |
| 19 | struct sk_msg *tmp; |
| 20 | int i, ret = 0; |
| 21 | |
| 22 | tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL); |
| 23 | if (unlikely(!tmp)) |
| 24 | return -ENOMEM; |
| 25 | |
| 26 | lock_sock(sk); |
| 27 | tmp->sg.start = msg->sg.start; |
| 28 | i = msg->sg.start; |
| 29 | do { |
| 30 | sge = sk_msg_elem(msg, i); |
| 31 | size = (apply && apply_bytes < sge->length) ? |
| 32 | apply_bytes : sge->length; |
| 33 | if (!sk_wmem_schedule(sk, size)) { |
| 34 | if (!copied) |
| 35 | ret = -ENOMEM; |
| 36 | break; |
| 37 | } |
| 38 | |
| 39 | sk_mem_charge(sk, size); |
| 40 | sk_msg_xfer(tmp, msg, i, size); |
| 41 | copied += size; |
| 42 | if (sge->length) |
| 43 | get_page(sk_msg_page(tmp, i)); |
| 44 | sk_msg_iter_var_next(i); |
| 45 | tmp->sg.end = i; |
| 46 | if (apply) { |
| 47 | apply_bytes -= size; |
| 48 | if (!apply_bytes) |
| 49 | break; |
| 50 | } |
| 51 | } while (i != msg->sg.end); |
| 52 | |
| 53 | if (!ret) { |
| 54 | msg->sg.start = i; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 55 | sk_psock_queue_msg(psock, tmp); |
John Fastabend | 552de910 | 2018-12-20 11:35:33 -0800 | [diff] [blame] | 56 | sk_psock_data_ready(sk, psock); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 57 | } else { |
| 58 | sk_msg_free(sk, tmp); |
| 59 | kfree(tmp); |
| 60 | } |
| 61 | |
| 62 | release_sock(sk); |
| 63 | return ret; |
| 64 | } |
| 65 | |
| 66 | static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes, |
| 67 | int flags, bool uncharge) |
| 68 | { |
| 69 | bool apply = apply_bytes; |
| 70 | struct scatterlist *sge; |
| 71 | struct page *page; |
| 72 | int size, ret = 0; |
| 73 | u32 off; |
| 74 | |
| 75 | while (1) { |
John Fastabend | 0608c69 | 2018-12-20 11:35:35 -0800 | [diff] [blame] | 76 | bool has_tx_ulp; |
| 77 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 78 | sge = sk_msg_elem(msg, msg->sg.start); |
| 79 | size = (apply && apply_bytes < sge->length) ? |
| 80 | apply_bytes : sge->length; |
| 81 | off = sge->offset; |
| 82 | page = sg_page(sge); |
| 83 | |
| 84 | tcp_rate_check_app_limited(sk); |
| 85 | retry: |
John Fastabend | 0608c69 | 2018-12-20 11:35:35 -0800 | [diff] [blame] | 86 | has_tx_ulp = tls_sw_has_ctx_tx(sk); |
| 87 | if (has_tx_ulp) { |
| 88 | flags |= MSG_SENDPAGE_NOPOLICY; |
| 89 | ret = kernel_sendpage_locked(sk, |
| 90 | page, off, size, flags); |
| 91 | } else { |
| 92 | ret = do_tcp_sendpages(sk, page, off, size, flags); |
| 93 | } |
| 94 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 95 | if (ret <= 0) |
| 96 | return ret; |
| 97 | if (apply) |
| 98 | apply_bytes -= ret; |
| 99 | msg->sg.size -= ret; |
| 100 | sge->offset += ret; |
| 101 | sge->length -= ret; |
| 102 | if (uncharge) |
| 103 | sk_mem_uncharge(sk, ret); |
| 104 | if (ret != size) { |
| 105 | size -= ret; |
| 106 | off += ret; |
| 107 | goto retry; |
| 108 | } |
| 109 | if (!sge->length) { |
| 110 | put_page(page); |
| 111 | sk_msg_iter_next(msg, start); |
| 112 | sg_init_table(sge, 1); |
| 113 | if (msg->sg.start == msg->sg.end) |
| 114 | break; |
| 115 | } |
| 116 | if (apply && !apply_bytes) |
| 117 | break; |
| 118 | } |
| 119 | |
| 120 | return 0; |
| 121 | } |
| 122 | |
| 123 | static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg, |
| 124 | u32 apply_bytes, int flags, bool uncharge) |
| 125 | { |
| 126 | int ret; |
| 127 | |
| 128 | lock_sock(sk); |
| 129 | ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge); |
| 130 | release_sock(sk); |
| 131 | return ret; |
| 132 | } |
| 133 | |
| 134 | int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, |
| 135 | u32 bytes, int flags) |
| 136 | { |
| 137 | bool ingress = sk_msg_to_ingress(msg); |
| 138 | struct sk_psock *psock = sk_psock_get(sk); |
| 139 | int ret; |
| 140 | |
| 141 | if (unlikely(!psock)) { |
| 142 | sk_msg_free(sk, msg); |
| 143 | return 0; |
| 144 | } |
| 145 | ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) : |
| 146 | tcp_bpf_push_locked(sk, msg, bytes, flags, false); |
| 147 | sk_psock_put(sk, psock); |
| 148 | return ret; |
| 149 | } |
| 150 | EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir); |
| 151 | |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame] | 152 | #ifdef CONFIG_BPF_SYSCALL |
YueHaibing | a265279 | 2020-03-20 10:34:25 +0800 | [diff] [blame] | 153 | static bool tcp_bpf_stream_read(const struct sock *sk) |
| 154 | { |
| 155 | struct sk_psock *psock; |
| 156 | bool empty = true; |
| 157 | |
| 158 | rcu_read_lock(); |
| 159 | psock = sk_psock(sk); |
| 160 | if (likely(psock)) |
| 161 | empty = list_empty(&psock->ingress_msg); |
| 162 | rcu_read_unlock(); |
| 163 | return !empty; |
| 164 | } |
| 165 | |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 166 | static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, |
| 167 | int nonblock, int flags, int *addr_len) |
| 168 | { |
| 169 | struct sk_psock *psock; |
| 170 | int copied, ret; |
| 171 | |
Xiyu Yang | 18f02ad | 2020-04-26 11:35:15 +0800 | [diff] [blame] | 172 | if (unlikely(flags & MSG_ERRQUEUE)) |
| 173 | return inet_recv_error(sk, msg, len, addr_len); |
| 174 | |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 175 | psock = sk_psock_get(sk); |
| 176 | if (unlikely(!psock)) |
| 177 | return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 178 | if (!skb_queue_empty(&sk->sk_receive_queue) && |
Xiyu Yang | 18f02ad | 2020-04-26 11:35:15 +0800 | [diff] [blame] | 179 | sk_psock_queue_empty(psock)) { |
| 180 | sk_psock_put(sk, psock); |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 181 | return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); |
Xiyu Yang | 18f02ad | 2020-04-26 11:35:15 +0800 | [diff] [blame] | 182 | } |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 183 | lock_sock(sk); |
| 184 | msg_bytes_ready: |
Cong Wang | 2bc793e | 2021-03-30 19:32:33 -0700 | [diff] [blame] | 185 | copied = sk_msg_recvmsg(sk, psock, msg, len, flags); |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 186 | if (!copied) { |
| 187 | int data, err = 0; |
| 188 | long timeo; |
| 189 | |
| 190 | timeo = sock_rcvtimeo(sk, nonblock); |
Cong Wang | 2bc793e | 2021-03-30 19:32:33 -0700 | [diff] [blame] | 191 | data = sk_msg_wait_data(sk, psock, flags, timeo, &err); |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 192 | if (data) { |
| 193 | if (!sk_psock_queue_empty(psock)) |
| 194 | goto msg_bytes_ready; |
| 195 | release_sock(sk); |
| 196 | sk_psock_put(sk, psock); |
| 197 | return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); |
| 198 | } |
| 199 | if (err) { |
| 200 | ret = err; |
| 201 | goto out; |
| 202 | } |
| 203 | copied = -EAGAIN; |
| 204 | } |
| 205 | ret = copied; |
| 206 | out: |
| 207 | release_sock(sk); |
| 208 | sk_psock_put(sk, psock); |
| 209 | return ret; |
| 210 | } |
| 211 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 212 | static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, |
| 213 | struct sk_msg *msg, int *copied, int flags) |
| 214 | { |
Jakub Kicinski | 031097d | 2019-11-27 12:16:41 -0800 | [diff] [blame] | 215 | bool cork = false, enospc = sk_msg_full(msg); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 216 | struct sock *sk_redir; |
John Fastabend | 7246d8e | 2018-11-26 14:16:17 -0800 | [diff] [blame] | 217 | u32 tosend, delta = 0; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 218 | int ret; |
| 219 | |
| 220 | more_data: |
John Fastabend | 7246d8e | 2018-11-26 14:16:17 -0800 | [diff] [blame] | 221 | if (psock->eval == __SK_NONE) { |
| 222 | /* Track delta in msg size to add/subtract it on SK_DROP from |
| 223 | * returned to user copied size. This ensures user doesn't |
| 224 | * get a positive return code with msg_cut_data and SK_DROP |
| 225 | * verdict. |
| 226 | */ |
| 227 | delta = msg->sg.size; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 228 | psock->eval = sk_psock_msg_verdict(sk, psock, msg); |
John Fastabend | 7361d44 | 2020-01-11 06:12:06 +0000 | [diff] [blame] | 229 | delta -= msg->sg.size; |
John Fastabend | 7246d8e | 2018-11-26 14:16:17 -0800 | [diff] [blame] | 230 | } |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 231 | |
| 232 | if (msg->cork_bytes && |
| 233 | msg->cork_bytes > msg->sg.size && !enospc) { |
| 234 | psock->cork_bytes = msg->cork_bytes - msg->sg.size; |
| 235 | if (!psock->cork) { |
| 236 | psock->cork = kzalloc(sizeof(*psock->cork), |
| 237 | GFP_ATOMIC | __GFP_NOWARN); |
| 238 | if (!psock->cork) |
| 239 | return -ENOMEM; |
| 240 | } |
| 241 | memcpy(psock->cork, msg, sizeof(*msg)); |
| 242 | return 0; |
| 243 | } |
| 244 | |
| 245 | tosend = msg->sg.size; |
| 246 | if (psock->apply_bytes && psock->apply_bytes < tosend) |
| 247 | tosend = psock->apply_bytes; |
| 248 | |
| 249 | switch (psock->eval) { |
| 250 | case __SK_PASS: |
| 251 | ret = tcp_bpf_push(sk, msg, tosend, flags, true); |
| 252 | if (unlikely(ret)) { |
| 253 | *copied -= sk_msg_free(sk, msg); |
| 254 | break; |
| 255 | } |
| 256 | sk_msg_apply_bytes(psock, tosend); |
| 257 | break; |
| 258 | case __SK_REDIRECT: |
| 259 | sk_redir = psock->sk_redir; |
| 260 | sk_msg_apply_bytes(psock, tosend); |
| 261 | if (psock->cork) { |
| 262 | cork = true; |
| 263 | psock->cork = NULL; |
| 264 | } |
| 265 | sk_msg_return(sk, msg, tosend); |
| 266 | release_sock(sk); |
| 267 | ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags); |
| 268 | lock_sock(sk); |
| 269 | if (unlikely(ret < 0)) { |
| 270 | int free = sk_msg_free_nocharge(sk, msg); |
| 271 | |
| 272 | if (!cork) |
| 273 | *copied -= free; |
| 274 | } |
| 275 | if (cork) { |
| 276 | sk_msg_free(sk, msg); |
| 277 | kfree(msg); |
| 278 | msg = NULL; |
| 279 | ret = 0; |
| 280 | } |
| 281 | break; |
| 282 | case __SK_DROP: |
| 283 | default: |
| 284 | sk_msg_free_partial(sk, msg, tosend); |
| 285 | sk_msg_apply_bytes(psock, tosend); |
John Fastabend | 7246d8e | 2018-11-26 14:16:17 -0800 | [diff] [blame] | 286 | *copied -= (tosend + delta); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 287 | return -EACCES; |
| 288 | } |
| 289 | |
| 290 | if (likely(!ret)) { |
| 291 | if (!psock->apply_bytes) { |
| 292 | psock->eval = __SK_NONE; |
| 293 | if (psock->sk_redir) { |
| 294 | sock_put(psock->sk_redir); |
| 295 | psock->sk_redir = NULL; |
| 296 | } |
| 297 | } |
| 298 | if (msg && |
| 299 | msg->sg.data[msg->sg.start].page_link && |
| 300 | msg->sg.data[msg->sg.start].length) |
| 301 | goto more_data; |
| 302 | } |
| 303 | return ret; |
| 304 | } |
| 305 | |
| 306 | static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) |
| 307 | { |
| 308 | struct sk_msg tmp, *msg_tx = NULL; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 309 | int copied = 0, err = 0; |
| 310 | struct sk_psock *psock; |
| 311 | long timeo; |
Jakub Kicinski | 4147766 | 2019-08-07 17:03:59 -0700 | [diff] [blame] | 312 | int flags; |
| 313 | |
| 314 | /* Don't let internal do_tcp_sendpages() flags through */ |
| 315 | flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED); |
| 316 | flags |= MSG_NO_SHARED_FRAGS; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 317 | |
| 318 | psock = sk_psock_get(sk); |
| 319 | if (unlikely(!psock)) |
| 320 | return tcp_sendmsg(sk, msg, size); |
| 321 | |
| 322 | lock_sock(sk); |
| 323 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); |
| 324 | while (msg_data_left(msg)) { |
| 325 | bool enospc = false; |
| 326 | u32 copy, osize; |
| 327 | |
| 328 | if (sk->sk_err) { |
| 329 | err = -sk->sk_err; |
| 330 | goto out_err; |
| 331 | } |
| 332 | |
| 333 | copy = msg_data_left(msg); |
| 334 | if (!sk_stream_memory_free(sk)) |
| 335 | goto wait_for_sndbuf; |
| 336 | if (psock->cork) { |
| 337 | msg_tx = psock->cork; |
| 338 | } else { |
| 339 | msg_tx = &tmp; |
| 340 | sk_msg_init(msg_tx); |
| 341 | } |
| 342 | |
| 343 | osize = msg_tx->sg.size; |
| 344 | err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1); |
| 345 | if (err) { |
| 346 | if (err != -ENOSPC) |
| 347 | goto wait_for_memory; |
| 348 | enospc = true; |
| 349 | copy = msg_tx->sg.size - osize; |
| 350 | } |
| 351 | |
| 352 | err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx, |
| 353 | copy); |
| 354 | if (err < 0) { |
| 355 | sk_msg_trim(sk, msg_tx, osize); |
| 356 | goto out_err; |
| 357 | } |
| 358 | |
| 359 | copied += copy; |
| 360 | if (psock->cork_bytes) { |
| 361 | if (size > psock->cork_bytes) |
| 362 | psock->cork_bytes = 0; |
| 363 | else |
| 364 | psock->cork_bytes -= size; |
| 365 | if (psock->cork_bytes && !enospc) |
| 366 | goto out_err; |
| 367 | /* All cork bytes are accounted, rerun the prog. */ |
| 368 | psock->eval = __SK_NONE; |
| 369 | psock->cork_bytes = 0; |
| 370 | } |
| 371 | |
| 372 | err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags); |
| 373 | if (unlikely(err < 0)) |
| 374 | goto out_err; |
| 375 | continue; |
| 376 | wait_for_sndbuf: |
| 377 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
| 378 | wait_for_memory: |
| 379 | err = sk_stream_wait_memory(sk, &timeo); |
| 380 | if (err) { |
| 381 | if (msg_tx && msg_tx != psock->cork) |
| 382 | sk_msg_free(sk, msg_tx); |
| 383 | goto out_err; |
| 384 | } |
| 385 | } |
| 386 | out_err: |
| 387 | if (err < 0) |
| 388 | err = sk_stream_error(sk, msg->msg_flags, err); |
| 389 | release_sock(sk); |
| 390 | sk_psock_put(sk, psock); |
| 391 | return copied ? copied : err; |
| 392 | } |
| 393 | |
| 394 | static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset, |
| 395 | size_t size, int flags) |
| 396 | { |
| 397 | struct sk_msg tmp, *msg = NULL; |
| 398 | int err = 0, copied = 0; |
| 399 | struct sk_psock *psock; |
| 400 | bool enospc = false; |
| 401 | |
| 402 | psock = sk_psock_get(sk); |
| 403 | if (unlikely(!psock)) |
| 404 | return tcp_sendpage(sk, page, offset, size, flags); |
| 405 | |
| 406 | lock_sock(sk); |
| 407 | if (psock->cork) { |
| 408 | msg = psock->cork; |
| 409 | } else { |
| 410 | msg = &tmp; |
| 411 | sk_msg_init(msg); |
| 412 | } |
| 413 | |
| 414 | /* Catch case where ring is full and sendpage is stalled. */ |
| 415 | if (unlikely(sk_msg_full(msg))) |
| 416 | goto out_err; |
| 417 | |
| 418 | sk_msg_page_add(msg, page, size, offset); |
| 419 | sk_mem_charge(sk, size); |
| 420 | copied = size; |
| 421 | if (sk_msg_full(msg)) |
| 422 | enospc = true; |
| 423 | if (psock->cork_bytes) { |
| 424 | if (size > psock->cork_bytes) |
| 425 | psock->cork_bytes = 0; |
| 426 | else |
| 427 | psock->cork_bytes -= size; |
| 428 | if (psock->cork_bytes && !enospc) |
| 429 | goto out_err; |
| 430 | /* All cork bytes are accounted, rerun the prog. */ |
| 431 | psock->eval = __SK_NONE; |
| 432 | psock->cork_bytes = 0; |
| 433 | } |
| 434 | |
| 435 | err = tcp_bpf_send_verdict(sk, psock, msg, &copied, flags); |
| 436 | out_err: |
| 437 | release_sock(sk); |
| 438 | sk_psock_put(sk, psock); |
| 439 | return copied ? copied : err; |
| 440 | } |
| 441 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 442 | enum { |
| 443 | TCP_BPF_IPV4, |
| 444 | TCP_BPF_IPV6, |
| 445 | TCP_BPF_NUM_PROTS, |
| 446 | }; |
| 447 | |
| 448 | enum { |
| 449 | TCP_BPF_BASE, |
| 450 | TCP_BPF_TX, |
| 451 | TCP_BPF_NUM_CFGS, |
| 452 | }; |
| 453 | |
| 454 | static struct proto *tcpv6_prot_saved __read_mostly; |
| 455 | static DEFINE_SPINLOCK(tcpv6_prot_lock); |
| 456 | static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS]; |
| 457 | |
| 458 | static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS], |
| 459 | struct proto *base) |
| 460 | { |
| 461 | prot[TCP_BPF_BASE] = *base; |
Lorenz Bauer | f747632 | 2020-03-09 11:12:36 +0000 | [diff] [blame] | 462 | prot[TCP_BPF_BASE].unhash = sock_map_unhash; |
| 463 | prot[TCP_BPF_BASE].close = sock_map_close; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 464 | prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg; |
| 465 | prot[TCP_BPF_BASE].stream_memory_read = tcp_bpf_stream_read; |
| 466 | |
| 467 | prot[TCP_BPF_TX] = prot[TCP_BPF_BASE]; |
| 468 | prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg; |
| 469 | prot[TCP_BPF_TX].sendpage = tcp_bpf_sendpage; |
| 470 | } |
| 471 | |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 472 | static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops) |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 473 | { |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 474 | if (unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) { |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 475 | spin_lock_bh(&tcpv6_prot_lock); |
| 476 | if (likely(ops != tcpv6_prot_saved)) { |
| 477 | tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops); |
| 478 | smp_store_release(&tcpv6_prot_saved, ops); |
| 479 | } |
| 480 | spin_unlock_bh(&tcpv6_prot_lock); |
| 481 | } |
| 482 | } |
| 483 | |
| 484 | static int __init tcp_bpf_v4_build_proto(void) |
| 485 | { |
| 486 | tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot); |
| 487 | return 0; |
| 488 | } |
| 489 | core_initcall(tcp_bpf_v4_build_proto); |
| 490 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 491 | static int tcp_bpf_assert_proto_ops(struct proto *ops) |
| 492 | { |
| 493 | /* In order to avoid retpoline, we make assumptions when we call |
| 494 | * into ops if e.g. a psock is not present. Make sure they are |
| 495 | * indeed valid assumptions. |
| 496 | */ |
| 497 | return ops->recvmsg == tcp_recvmsg && |
| 498 | ops->sendmsg == tcp_sendmsg && |
| 499 | ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP; |
| 500 | } |
| 501 | |
Cong Wang | 8a59f9d | 2021-03-30 19:32:31 -0700 | [diff] [blame] | 502 | int tcp_bpf_update_proto(struct sock *sk, bool restore) |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 503 | { |
Cong Wang | 8a59f9d | 2021-03-30 19:32:31 -0700 | [diff] [blame] | 504 | struct sk_psock *psock = sk_psock(sk); |
Lorenz Bauer | d19da36 | 2020-03-09 11:12:34 +0000 | [diff] [blame] | 505 | int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; |
| 506 | int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 507 | |
Cong Wang | 8a59f9d | 2021-03-30 19:32:31 -0700 | [diff] [blame] | 508 | if (restore) { |
| 509 | if (inet_csk_has_ulp(sk)) { |
Jakub Kicinski | 8859a44 | 2021-04-09 20:46:01 -0700 | [diff] [blame^] | 510 | /* TLS does not have an unhash proto in SW cases, |
| 511 | * but we need to ensure we stop using the sock_map |
| 512 | * unhash routine because the associated psock is being |
| 513 | * removed. So use the original unhash handler. |
| 514 | */ |
| 515 | WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash); |
Cong Wang | 8a59f9d | 2021-03-30 19:32:31 -0700 | [diff] [blame] | 516 | tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space); |
| 517 | } else { |
| 518 | sk->sk_write_space = psock->saved_write_space; |
| 519 | /* Pairs with lockless read in sk_clone_lock() */ |
| 520 | WRITE_ONCE(sk->sk_prot, psock->sk_proto); |
| 521 | } |
| 522 | return 0; |
| 523 | } |
| 524 | |
| 525 | if (inet_csk_has_ulp(sk)) |
| 526 | return -EINVAL; |
| 527 | |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 528 | if (sk->sk_family == AF_INET6) { |
| 529 | if (tcp_bpf_assert_proto_ops(psock->sk_proto)) |
Cong Wang | 8a59f9d | 2021-03-30 19:32:31 -0700 | [diff] [blame] | 530 | return -EINVAL; |
Lorenz Bauer | d19da36 | 2020-03-09 11:12:34 +0000 | [diff] [blame] | 531 | |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 532 | tcp_bpf_check_v6_needs_rebuild(psock->sk_proto); |
Lorenz Bauer | d19da36 | 2020-03-09 11:12:34 +0000 | [diff] [blame] | 533 | } |
| 534 | |
Cong Wang | 8a59f9d | 2021-03-30 19:32:31 -0700 | [diff] [blame] | 535 | /* Pairs with lockless read in sk_clone_lock() */ |
| 536 | WRITE_ONCE(sk->sk_prot, &tcp_bpf_prots[family][config]); |
| 537 | return 0; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 538 | } |
Cong Wang | 8a59f9d | 2021-03-30 19:32:31 -0700 | [diff] [blame] | 539 | EXPORT_SYMBOL_GPL(tcp_bpf_update_proto); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 540 | |
Jakub Sitnicki | e802515 | 2020-02-18 17:10:15 +0000 | [diff] [blame] | 541 | /* If a child got cloned from a listening socket that had tcp_bpf |
| 542 | * protocol callbacks installed, we need to restore the callbacks to |
| 543 | * the default ones because the child does not inherit the psock state |
| 544 | * that tcp_bpf callbacks expect. |
| 545 | */ |
| 546 | void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) |
| 547 | { |
| 548 | int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; |
| 549 | struct proto *prot = newsk->sk_prot; |
| 550 | |
| 551 | if (prot == &tcp_bpf_prots[family][TCP_BPF_BASE]) |
| 552 | newsk->sk_prot = sk->sk_prot_creator; |
| 553 | } |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame] | 554 | #endif /* CONFIG_BPF_SYSCALL */ |