Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ |
| 3 | |
| 4 | #include <linux/skmsg.h> |
| 5 | #include <linux/filter.h> |
| 6 | #include <linux/bpf.h> |
| 7 | #include <linux/init.h> |
| 8 | #include <linux/wait.h> |
| 9 | |
| 10 | #include <net/inet_common.h> |
John Fastabend | 0608c69 | 2018-12-20 11:35:35 -0800 | [diff] [blame] | 11 | #include <net/tls.h> |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 12 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 13 | int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock, |
John Fastabend | 02c558b | 2018-10-16 11:08:04 -0700 | [diff] [blame] | 14 | struct msghdr *msg, int len, int flags) |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 15 | { |
| 16 | struct iov_iter *iter = &msg->msg_iter; |
John Fastabend | 02c558b | 2018-10-16 11:08:04 -0700 | [diff] [blame] | 17 | int peek = flags & MSG_PEEK; |
John Fastabend | 02c558b | 2018-10-16 11:08:04 -0700 | [diff] [blame] | 18 | struct sk_msg *msg_rx; |
John Fastabend | c9c89dc | 2020-11-16 14:27:46 -0800 | [diff] [blame] | 19 | int i, copied = 0; |
John Fastabend | 02c558b | 2018-10-16 11:08:04 -0700 | [diff] [blame] | 20 | |
| 21 | msg_rx = list_first_entry_or_null(&psock->ingress_msg, |
| 22 | struct sk_msg, list); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 23 | |
| 24 | while (copied != len) { |
| 25 | struct scatterlist *sge; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 26 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 27 | if (unlikely(!msg_rx)) |
| 28 | break; |
| 29 | |
| 30 | i = msg_rx->sg.start; |
| 31 | do { |
| 32 | struct page *page; |
| 33 | int copy; |
| 34 | |
| 35 | sge = sk_msg_elem(msg_rx, i); |
| 36 | copy = sge->length; |
| 37 | page = sg_page(sge); |
| 38 | if (copied + copy > len) |
| 39 | copy = len - copied; |
John Fastabend | c9c89dc | 2020-11-16 14:27:46 -0800 | [diff] [blame] | 40 | copy = copy_page_to_iter(page, sge->offset, copy, iter); |
| 41 | if (!copy) |
| 42 | return copied ? copied : -EFAULT; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 43 | |
| 44 | copied += copy; |
John Fastabend | 02c558b | 2018-10-16 11:08:04 -0700 | [diff] [blame] | 45 | if (likely(!peek)) { |
| 46 | sge->offset += copy; |
| 47 | sge->length -= copy; |
John Fastabend | 36cd0e69 | 2020-11-16 14:28:06 -0800 | [diff] [blame] | 48 | if (!msg_rx->skb) |
| 49 | sk_mem_uncharge(sk, copy); |
John Fastabend | 02c558b | 2018-10-16 11:08:04 -0700 | [diff] [blame] | 50 | msg_rx->sg.size -= copy; |
| 51 | |
| 52 | if (!sge->length) { |
| 53 | sk_msg_iter_var_next(i); |
| 54 | if (!msg_rx->skb) |
| 55 | put_page(page); |
| 56 | } |
| 57 | } else { |
John Fastabend | c9c89dc | 2020-11-16 14:27:46 -0800 | [diff] [blame] | 58 | /* Lets not optimize peek case if copy_page_to_iter |
| 59 | * didn't copy the entire length lets just break. |
| 60 | */ |
| 61 | if (copy != sge->length) |
| 62 | return copied; |
John Fastabend | 02c558b | 2018-10-16 11:08:04 -0700 | [diff] [blame] | 63 | sk_msg_iter_var_next(i); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 64 | } |
| 65 | |
| 66 | if (copied == len) |
| 67 | break; |
| 68 | } while (i != msg_rx->sg.end); |
| 69 | |
John Fastabend | 02c558b | 2018-10-16 11:08:04 -0700 | [diff] [blame] | 70 | if (unlikely(peek)) { |
dihu | 487082f | 2020-06-05 16:46:25 +0800 | [diff] [blame] | 71 | if (msg_rx == list_last_entry(&psock->ingress_msg, |
| 72 | struct sk_msg, list)) |
| 73 | break; |
John Fastabend | 02c558b | 2018-10-16 11:08:04 -0700 | [diff] [blame] | 74 | msg_rx = list_next_entry(msg_rx, list); |
| 75 | continue; |
| 76 | } |
| 77 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 78 | msg_rx->sg.start = i; |
| 79 | if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) { |
| 80 | list_del(&msg_rx->list); |
| 81 | if (msg_rx->skb) |
| 82 | consume_skb(msg_rx->skb); |
| 83 | kfree(msg_rx); |
| 84 | } |
John Fastabend | 02c558b | 2018-10-16 11:08:04 -0700 | [diff] [blame] | 85 | msg_rx = list_first_entry_or_null(&psock->ingress_msg, |
| 86 | struct sk_msg, list); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 87 | } |
| 88 | |
| 89 | return copied; |
| 90 | } |
| 91 | EXPORT_SYMBOL_GPL(__tcp_bpf_recvmsg); |
| 92 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 93 | static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, |
| 94 | struct sk_msg *msg, u32 apply_bytes, int flags) |
| 95 | { |
| 96 | bool apply = apply_bytes; |
| 97 | struct scatterlist *sge; |
| 98 | u32 size, copied = 0; |
| 99 | struct sk_msg *tmp; |
| 100 | int i, ret = 0; |
| 101 | |
| 102 | tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL); |
| 103 | if (unlikely(!tmp)) |
| 104 | return -ENOMEM; |
| 105 | |
| 106 | lock_sock(sk); |
| 107 | tmp->sg.start = msg->sg.start; |
| 108 | i = msg->sg.start; |
| 109 | do { |
| 110 | sge = sk_msg_elem(msg, i); |
| 111 | size = (apply && apply_bytes < sge->length) ? |
| 112 | apply_bytes : sge->length; |
| 113 | if (!sk_wmem_schedule(sk, size)) { |
| 114 | if (!copied) |
| 115 | ret = -ENOMEM; |
| 116 | break; |
| 117 | } |
| 118 | |
| 119 | sk_mem_charge(sk, size); |
| 120 | sk_msg_xfer(tmp, msg, i, size); |
| 121 | copied += size; |
| 122 | if (sge->length) |
| 123 | get_page(sk_msg_page(tmp, i)); |
| 124 | sk_msg_iter_var_next(i); |
| 125 | tmp->sg.end = i; |
| 126 | if (apply) { |
| 127 | apply_bytes -= size; |
| 128 | if (!apply_bytes) |
| 129 | break; |
| 130 | } |
| 131 | } while (i != msg->sg.end); |
| 132 | |
| 133 | if (!ret) { |
| 134 | msg->sg.start = i; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 135 | sk_psock_queue_msg(psock, tmp); |
John Fastabend | 552de910 | 2018-12-20 11:35:33 -0800 | [diff] [blame] | 136 | sk_psock_data_ready(sk, psock); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 137 | } else { |
| 138 | sk_msg_free(sk, tmp); |
| 139 | kfree(tmp); |
| 140 | } |
| 141 | |
| 142 | release_sock(sk); |
| 143 | return ret; |
| 144 | } |
| 145 | |
| 146 | static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes, |
| 147 | int flags, bool uncharge) |
| 148 | { |
| 149 | bool apply = apply_bytes; |
| 150 | struct scatterlist *sge; |
| 151 | struct page *page; |
| 152 | int size, ret = 0; |
| 153 | u32 off; |
| 154 | |
| 155 | while (1) { |
John Fastabend | 0608c69 | 2018-12-20 11:35:35 -0800 | [diff] [blame] | 156 | bool has_tx_ulp; |
| 157 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 158 | sge = sk_msg_elem(msg, msg->sg.start); |
| 159 | size = (apply && apply_bytes < sge->length) ? |
| 160 | apply_bytes : sge->length; |
| 161 | off = sge->offset; |
| 162 | page = sg_page(sge); |
| 163 | |
| 164 | tcp_rate_check_app_limited(sk); |
| 165 | retry: |
John Fastabend | 0608c69 | 2018-12-20 11:35:35 -0800 | [diff] [blame] | 166 | has_tx_ulp = tls_sw_has_ctx_tx(sk); |
| 167 | if (has_tx_ulp) { |
| 168 | flags |= MSG_SENDPAGE_NOPOLICY; |
| 169 | ret = kernel_sendpage_locked(sk, |
| 170 | page, off, size, flags); |
| 171 | } else { |
| 172 | ret = do_tcp_sendpages(sk, page, off, size, flags); |
| 173 | } |
| 174 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 175 | if (ret <= 0) |
| 176 | return ret; |
| 177 | if (apply) |
| 178 | apply_bytes -= ret; |
| 179 | msg->sg.size -= ret; |
| 180 | sge->offset += ret; |
| 181 | sge->length -= ret; |
| 182 | if (uncharge) |
| 183 | sk_mem_uncharge(sk, ret); |
| 184 | if (ret != size) { |
| 185 | size -= ret; |
| 186 | off += ret; |
| 187 | goto retry; |
| 188 | } |
| 189 | if (!sge->length) { |
| 190 | put_page(page); |
| 191 | sk_msg_iter_next(msg, start); |
| 192 | sg_init_table(sge, 1); |
| 193 | if (msg->sg.start == msg->sg.end) |
| 194 | break; |
| 195 | } |
| 196 | if (apply && !apply_bytes) |
| 197 | break; |
| 198 | } |
| 199 | |
| 200 | return 0; |
| 201 | } |
| 202 | |
| 203 | static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg, |
| 204 | u32 apply_bytes, int flags, bool uncharge) |
| 205 | { |
| 206 | int ret; |
| 207 | |
| 208 | lock_sock(sk); |
| 209 | ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge); |
| 210 | release_sock(sk); |
| 211 | return ret; |
| 212 | } |
| 213 | |
| 214 | int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, |
| 215 | u32 bytes, int flags) |
| 216 | { |
| 217 | bool ingress = sk_msg_to_ingress(msg); |
| 218 | struct sk_psock *psock = sk_psock_get(sk); |
| 219 | int ret; |
| 220 | |
| 221 | if (unlikely(!psock)) { |
| 222 | sk_msg_free(sk, msg); |
| 223 | return 0; |
| 224 | } |
| 225 | ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) : |
| 226 | tcp_bpf_push_locked(sk, msg, bytes, flags, false); |
| 227 | sk_psock_put(sk, psock); |
| 228 | return ret; |
| 229 | } |
| 230 | EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir); |
| 231 | |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame^] | 232 | #ifdef CONFIG_BPF_SYSCALL |
YueHaibing | a265279 | 2020-03-20 10:34:25 +0800 | [diff] [blame] | 233 | static bool tcp_bpf_stream_read(const struct sock *sk) |
| 234 | { |
| 235 | struct sk_psock *psock; |
| 236 | bool empty = true; |
| 237 | |
| 238 | rcu_read_lock(); |
| 239 | psock = sk_psock(sk); |
| 240 | if (likely(psock)) |
| 241 | empty = list_empty(&psock->ingress_msg); |
| 242 | rcu_read_unlock(); |
| 243 | return !empty; |
| 244 | } |
| 245 | |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 246 | static int tcp_bpf_wait_data(struct sock *sk, struct sk_psock *psock, |
| 247 | int flags, long timeo, int *err) |
| 248 | { |
| 249 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
| 250 | int ret = 0; |
| 251 | |
Sabrina Dubroca | 2c7269b | 2020-06-10 12:19:43 +0200 | [diff] [blame] | 252 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
| 253 | return 1; |
| 254 | |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 255 | if (!timeo) |
| 256 | return ret; |
| 257 | |
| 258 | add_wait_queue(sk_sleep(sk), &wait); |
| 259 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
| 260 | ret = sk_wait_event(sk, &timeo, |
| 261 | !list_empty(&psock->ingress_msg) || |
| 262 | !skb_queue_empty(&sk->sk_receive_queue), &wait); |
| 263 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
| 264 | remove_wait_queue(sk_sleep(sk), &wait); |
| 265 | return ret; |
| 266 | } |
| 267 | |
| 268 | static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, |
| 269 | int nonblock, int flags, int *addr_len) |
| 270 | { |
| 271 | struct sk_psock *psock; |
| 272 | int copied, ret; |
| 273 | |
Xiyu Yang | 18f02ad | 2020-04-26 11:35:15 +0800 | [diff] [blame] | 274 | if (unlikely(flags & MSG_ERRQUEUE)) |
| 275 | return inet_recv_error(sk, msg, len, addr_len); |
| 276 | |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 277 | psock = sk_psock_get(sk); |
| 278 | if (unlikely(!psock)) |
| 279 | return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 280 | if (!skb_queue_empty(&sk->sk_receive_queue) && |
Xiyu Yang | 18f02ad | 2020-04-26 11:35:15 +0800 | [diff] [blame] | 281 | sk_psock_queue_empty(psock)) { |
| 282 | sk_psock_put(sk, psock); |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 283 | return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); |
Xiyu Yang | 18f02ad | 2020-04-26 11:35:15 +0800 | [diff] [blame] | 284 | } |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 285 | lock_sock(sk); |
| 286 | msg_bytes_ready: |
| 287 | copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags); |
| 288 | if (!copied) { |
| 289 | int data, err = 0; |
| 290 | long timeo; |
| 291 | |
| 292 | timeo = sock_rcvtimeo(sk, nonblock); |
| 293 | data = tcp_bpf_wait_data(sk, psock, flags, timeo, &err); |
| 294 | if (data) { |
| 295 | if (!sk_psock_queue_empty(psock)) |
| 296 | goto msg_bytes_ready; |
| 297 | release_sock(sk); |
| 298 | sk_psock_put(sk, psock); |
| 299 | return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); |
| 300 | } |
| 301 | if (err) { |
| 302 | ret = err; |
| 303 | goto out; |
| 304 | } |
| 305 | copied = -EAGAIN; |
| 306 | } |
| 307 | ret = copied; |
| 308 | out: |
| 309 | release_sock(sk); |
| 310 | sk_psock_put(sk, psock); |
| 311 | return ret; |
| 312 | } |
| 313 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 314 | static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, |
| 315 | struct sk_msg *msg, int *copied, int flags) |
| 316 | { |
Jakub Kicinski | 031097d | 2019-11-27 12:16:41 -0800 | [diff] [blame] | 317 | bool cork = false, enospc = sk_msg_full(msg); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 318 | struct sock *sk_redir; |
John Fastabend | 7246d8e | 2018-11-26 14:16:17 -0800 | [diff] [blame] | 319 | u32 tosend, delta = 0; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 320 | int ret; |
| 321 | |
| 322 | more_data: |
John Fastabend | 7246d8e | 2018-11-26 14:16:17 -0800 | [diff] [blame] | 323 | if (psock->eval == __SK_NONE) { |
| 324 | /* Track delta in msg size to add/subtract it on SK_DROP from |
| 325 | * returned to user copied size. This ensures user doesn't |
| 326 | * get a positive return code with msg_cut_data and SK_DROP |
| 327 | * verdict. |
| 328 | */ |
| 329 | delta = msg->sg.size; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 330 | psock->eval = sk_psock_msg_verdict(sk, psock, msg); |
John Fastabend | 7361d44 | 2020-01-11 06:12:06 +0000 | [diff] [blame] | 331 | delta -= msg->sg.size; |
John Fastabend | 7246d8e | 2018-11-26 14:16:17 -0800 | [diff] [blame] | 332 | } |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 333 | |
| 334 | if (msg->cork_bytes && |
| 335 | msg->cork_bytes > msg->sg.size && !enospc) { |
| 336 | psock->cork_bytes = msg->cork_bytes - msg->sg.size; |
| 337 | if (!psock->cork) { |
| 338 | psock->cork = kzalloc(sizeof(*psock->cork), |
| 339 | GFP_ATOMIC | __GFP_NOWARN); |
| 340 | if (!psock->cork) |
| 341 | return -ENOMEM; |
| 342 | } |
| 343 | memcpy(psock->cork, msg, sizeof(*msg)); |
| 344 | return 0; |
| 345 | } |
| 346 | |
| 347 | tosend = msg->sg.size; |
| 348 | if (psock->apply_bytes && psock->apply_bytes < tosend) |
| 349 | tosend = psock->apply_bytes; |
| 350 | |
| 351 | switch (psock->eval) { |
| 352 | case __SK_PASS: |
| 353 | ret = tcp_bpf_push(sk, msg, tosend, flags, true); |
| 354 | if (unlikely(ret)) { |
| 355 | *copied -= sk_msg_free(sk, msg); |
| 356 | break; |
| 357 | } |
| 358 | sk_msg_apply_bytes(psock, tosend); |
| 359 | break; |
| 360 | case __SK_REDIRECT: |
| 361 | sk_redir = psock->sk_redir; |
| 362 | sk_msg_apply_bytes(psock, tosend); |
| 363 | if (psock->cork) { |
| 364 | cork = true; |
| 365 | psock->cork = NULL; |
| 366 | } |
| 367 | sk_msg_return(sk, msg, tosend); |
| 368 | release_sock(sk); |
| 369 | ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags); |
| 370 | lock_sock(sk); |
| 371 | if (unlikely(ret < 0)) { |
| 372 | int free = sk_msg_free_nocharge(sk, msg); |
| 373 | |
| 374 | if (!cork) |
| 375 | *copied -= free; |
| 376 | } |
| 377 | if (cork) { |
| 378 | sk_msg_free(sk, msg); |
| 379 | kfree(msg); |
| 380 | msg = NULL; |
| 381 | ret = 0; |
| 382 | } |
| 383 | break; |
| 384 | case __SK_DROP: |
| 385 | default: |
| 386 | sk_msg_free_partial(sk, msg, tosend); |
| 387 | sk_msg_apply_bytes(psock, tosend); |
John Fastabend | 7246d8e | 2018-11-26 14:16:17 -0800 | [diff] [blame] | 388 | *copied -= (tosend + delta); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 389 | return -EACCES; |
| 390 | } |
| 391 | |
| 392 | if (likely(!ret)) { |
| 393 | if (!psock->apply_bytes) { |
| 394 | psock->eval = __SK_NONE; |
| 395 | if (psock->sk_redir) { |
| 396 | sock_put(psock->sk_redir); |
| 397 | psock->sk_redir = NULL; |
| 398 | } |
| 399 | } |
| 400 | if (msg && |
| 401 | msg->sg.data[msg->sg.start].page_link && |
| 402 | msg->sg.data[msg->sg.start].length) |
| 403 | goto more_data; |
| 404 | } |
| 405 | return ret; |
| 406 | } |
| 407 | |
| 408 | static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) |
| 409 | { |
| 410 | struct sk_msg tmp, *msg_tx = NULL; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 411 | int copied = 0, err = 0; |
| 412 | struct sk_psock *psock; |
| 413 | long timeo; |
Jakub Kicinski | 4147766 | 2019-08-07 17:03:59 -0700 | [diff] [blame] | 414 | int flags; |
| 415 | |
| 416 | /* Don't let internal do_tcp_sendpages() flags through */ |
| 417 | flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED); |
| 418 | flags |= MSG_NO_SHARED_FRAGS; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 419 | |
| 420 | psock = sk_psock_get(sk); |
| 421 | if (unlikely(!psock)) |
| 422 | return tcp_sendmsg(sk, msg, size); |
| 423 | |
| 424 | lock_sock(sk); |
| 425 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); |
| 426 | while (msg_data_left(msg)) { |
| 427 | bool enospc = false; |
| 428 | u32 copy, osize; |
| 429 | |
| 430 | if (sk->sk_err) { |
| 431 | err = -sk->sk_err; |
| 432 | goto out_err; |
| 433 | } |
| 434 | |
| 435 | copy = msg_data_left(msg); |
| 436 | if (!sk_stream_memory_free(sk)) |
| 437 | goto wait_for_sndbuf; |
| 438 | if (psock->cork) { |
| 439 | msg_tx = psock->cork; |
| 440 | } else { |
| 441 | msg_tx = &tmp; |
| 442 | sk_msg_init(msg_tx); |
| 443 | } |
| 444 | |
| 445 | osize = msg_tx->sg.size; |
| 446 | err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1); |
| 447 | if (err) { |
| 448 | if (err != -ENOSPC) |
| 449 | goto wait_for_memory; |
| 450 | enospc = true; |
| 451 | copy = msg_tx->sg.size - osize; |
| 452 | } |
| 453 | |
| 454 | err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx, |
| 455 | copy); |
| 456 | if (err < 0) { |
| 457 | sk_msg_trim(sk, msg_tx, osize); |
| 458 | goto out_err; |
| 459 | } |
| 460 | |
| 461 | copied += copy; |
| 462 | if (psock->cork_bytes) { |
| 463 | if (size > psock->cork_bytes) |
| 464 | psock->cork_bytes = 0; |
| 465 | else |
| 466 | psock->cork_bytes -= size; |
| 467 | if (psock->cork_bytes && !enospc) |
| 468 | goto out_err; |
| 469 | /* All cork bytes are accounted, rerun the prog. */ |
| 470 | psock->eval = __SK_NONE; |
| 471 | psock->cork_bytes = 0; |
| 472 | } |
| 473 | |
| 474 | err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags); |
| 475 | if (unlikely(err < 0)) |
| 476 | goto out_err; |
| 477 | continue; |
| 478 | wait_for_sndbuf: |
| 479 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
| 480 | wait_for_memory: |
| 481 | err = sk_stream_wait_memory(sk, &timeo); |
| 482 | if (err) { |
| 483 | if (msg_tx && msg_tx != psock->cork) |
| 484 | sk_msg_free(sk, msg_tx); |
| 485 | goto out_err; |
| 486 | } |
| 487 | } |
| 488 | out_err: |
| 489 | if (err < 0) |
| 490 | err = sk_stream_error(sk, msg->msg_flags, err); |
| 491 | release_sock(sk); |
| 492 | sk_psock_put(sk, psock); |
| 493 | return copied ? copied : err; |
| 494 | } |
| 495 | |
| 496 | static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset, |
| 497 | size_t size, int flags) |
| 498 | { |
| 499 | struct sk_msg tmp, *msg = NULL; |
| 500 | int err = 0, copied = 0; |
| 501 | struct sk_psock *psock; |
| 502 | bool enospc = false; |
| 503 | |
| 504 | psock = sk_psock_get(sk); |
| 505 | if (unlikely(!psock)) |
| 506 | return tcp_sendpage(sk, page, offset, size, flags); |
| 507 | |
| 508 | lock_sock(sk); |
| 509 | if (psock->cork) { |
| 510 | msg = psock->cork; |
| 511 | } else { |
| 512 | msg = &tmp; |
| 513 | sk_msg_init(msg); |
| 514 | } |
| 515 | |
| 516 | /* Catch case where ring is full and sendpage is stalled. */ |
| 517 | if (unlikely(sk_msg_full(msg))) |
| 518 | goto out_err; |
| 519 | |
| 520 | sk_msg_page_add(msg, page, size, offset); |
| 521 | sk_mem_charge(sk, size); |
| 522 | copied = size; |
| 523 | if (sk_msg_full(msg)) |
| 524 | enospc = true; |
| 525 | if (psock->cork_bytes) { |
| 526 | if (size > psock->cork_bytes) |
| 527 | psock->cork_bytes = 0; |
| 528 | else |
| 529 | psock->cork_bytes -= size; |
| 530 | if (psock->cork_bytes && !enospc) |
| 531 | goto out_err; |
| 532 | /* All cork bytes are accounted, rerun the prog. */ |
| 533 | psock->eval = __SK_NONE; |
| 534 | psock->cork_bytes = 0; |
| 535 | } |
| 536 | |
| 537 | err = tcp_bpf_send_verdict(sk, psock, msg, &copied, flags); |
| 538 | out_err: |
| 539 | release_sock(sk); |
| 540 | sk_psock_put(sk, psock); |
| 541 | return copied ? copied : err; |
| 542 | } |
| 543 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 544 | enum { |
| 545 | TCP_BPF_IPV4, |
| 546 | TCP_BPF_IPV6, |
| 547 | TCP_BPF_NUM_PROTS, |
| 548 | }; |
| 549 | |
| 550 | enum { |
| 551 | TCP_BPF_BASE, |
| 552 | TCP_BPF_TX, |
| 553 | TCP_BPF_NUM_CFGS, |
| 554 | }; |
| 555 | |
| 556 | static struct proto *tcpv6_prot_saved __read_mostly; |
| 557 | static DEFINE_SPINLOCK(tcpv6_prot_lock); |
| 558 | static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS]; |
| 559 | |
| 560 | static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS], |
| 561 | struct proto *base) |
| 562 | { |
| 563 | prot[TCP_BPF_BASE] = *base; |
Lorenz Bauer | f747632 | 2020-03-09 11:12:36 +0000 | [diff] [blame] | 564 | prot[TCP_BPF_BASE].unhash = sock_map_unhash; |
| 565 | prot[TCP_BPF_BASE].close = sock_map_close; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 566 | prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg; |
| 567 | prot[TCP_BPF_BASE].stream_memory_read = tcp_bpf_stream_read; |
| 568 | |
| 569 | prot[TCP_BPF_TX] = prot[TCP_BPF_BASE]; |
| 570 | prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg; |
| 571 | prot[TCP_BPF_TX].sendpage = tcp_bpf_sendpage; |
| 572 | } |
| 573 | |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 574 | static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops) |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 575 | { |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 576 | if (unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) { |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 577 | spin_lock_bh(&tcpv6_prot_lock); |
| 578 | if (likely(ops != tcpv6_prot_saved)) { |
| 579 | tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops); |
| 580 | smp_store_release(&tcpv6_prot_saved, ops); |
| 581 | } |
| 582 | spin_unlock_bh(&tcpv6_prot_lock); |
| 583 | } |
| 584 | } |
| 585 | |
| 586 | static int __init tcp_bpf_v4_build_proto(void) |
| 587 | { |
| 588 | tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot); |
| 589 | return 0; |
| 590 | } |
| 591 | core_initcall(tcp_bpf_v4_build_proto); |
| 592 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 593 | static int tcp_bpf_assert_proto_ops(struct proto *ops) |
| 594 | { |
| 595 | /* In order to avoid retpoline, we make assumptions when we call |
| 596 | * into ops if e.g. a psock is not present. Make sure they are |
| 597 | * indeed valid assumptions. |
| 598 | */ |
| 599 | return ops->recvmsg == tcp_recvmsg && |
| 600 | ops->sendmsg == tcp_sendmsg && |
| 601 | ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP; |
| 602 | } |
| 603 | |
Lorenz Bauer | f747632 | 2020-03-09 11:12:36 +0000 | [diff] [blame] | 604 | struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock) |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 605 | { |
Lorenz Bauer | d19da36 | 2020-03-09 11:12:34 +0000 | [diff] [blame] | 606 | int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; |
| 607 | int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 608 | |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 609 | if (sk->sk_family == AF_INET6) { |
| 610 | if (tcp_bpf_assert_proto_ops(psock->sk_proto)) |
Lorenz Bauer | d19da36 | 2020-03-09 11:12:34 +0000 | [diff] [blame] | 611 | return ERR_PTR(-EINVAL); |
| 612 | |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 613 | tcp_bpf_check_v6_needs_rebuild(psock->sk_proto); |
Lorenz Bauer | d19da36 | 2020-03-09 11:12:34 +0000 | [diff] [blame] | 614 | } |
| 615 | |
| 616 | return &tcp_bpf_prots[family][config]; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 617 | } |
| 618 | |
Jakub Sitnicki | e802515 | 2020-02-18 17:10:15 +0000 | [diff] [blame] | 619 | /* If a child got cloned from a listening socket that had tcp_bpf |
| 620 | * protocol callbacks installed, we need to restore the callbacks to |
| 621 | * the default ones because the child does not inherit the psock state |
| 622 | * that tcp_bpf callbacks expect. |
| 623 | */ |
| 624 | void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) |
| 625 | { |
| 626 | int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; |
| 627 | struct proto *prot = newsk->sk_prot; |
| 628 | |
| 629 | if (prot == &tcp_bpf_prots[family][TCP_BPF_BASE]) |
| 630 | newsk->sk_prot = sk->sk_prot_creator; |
| 631 | } |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame^] | 632 | #endif /* CONFIG_BPF_SYSCALL */ |