Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 1 | /* |
Ka-Cheong Poon | eee2fa6 | 2018-07-23 20:51:21 -0700 | [diff] [blame] | 2 | * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | * |
| 32 | */ |
| 33 | #include <linux/kernel.h> |
| 34 | #include <linux/in.h> |
| 35 | #include <net/tcp.h> |
| 36 | |
Sowmini Varadhan | 0cb4396 | 2016-06-13 09:44:26 -0700 | [diff] [blame] | 37 | #include "rds_single_path.h" |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 38 | #include "rds.h" |
| 39 | #include "tcp.h" |
| 40 | |
Sowmini Varadhan | 226f7a7 | 2016-06-30 16:11:10 -0700 | [diff] [blame] | 41 | void rds_tcp_xmit_path_prepare(struct rds_conn_path *cp) |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 42 | { |
Sowmini Varadhan | 226f7a7 | 2016-06-30 16:11:10 -0700 | [diff] [blame] | 43 | struct rds_tcp_connection *tc = cp->cp_transport_data; |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 44 | |
Christoph Hellwig | db10538 | 2020-05-28 07:12:18 +0200 | [diff] [blame] | 45 | tcp_sock_set_cork(tc->t_sock->sk, true); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 46 | } |
| 47 | |
Sowmini Varadhan | 226f7a7 | 2016-06-30 16:11:10 -0700 | [diff] [blame] | 48 | void rds_tcp_xmit_path_complete(struct rds_conn_path *cp) |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 49 | { |
Sowmini Varadhan | 226f7a7 | 2016-06-30 16:11:10 -0700 | [diff] [blame] | 50 | struct rds_tcp_connection *tc = cp->cp_transport_data; |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 51 | |
Christoph Hellwig | db10538 | 2020-05-28 07:12:18 +0200 | [diff] [blame] | 52 | tcp_sock_set_cork(tc->t_sock->sk, false); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 53 | } |
| 54 | |
| 55 | /* the core send_sem serializes this with other xmit and shutdown */ |
stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 56 | static int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len) |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 57 | { |
| 58 | struct kvec vec = { |
Joshua Houghton | 5c3da57 | 2016-06-18 15:46:31 +0000 | [diff] [blame] | 59 | .iov_base = data, |
| 60 | .iov_len = len, |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 61 | }; |
Joshua Houghton | 5c3da57 | 2016-06-18 15:46:31 +0000 | [diff] [blame] | 62 | struct msghdr msg = { |
| 63 | .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL, |
| 64 | }; |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 65 | |
| 66 | return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len); |
| 67 | } |
| 68 | |
| 69 | /* the core send_sem serializes this with other xmit and shutdown */ |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 70 | int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, |
Joshua Houghton | 5c3da57 | 2016-06-18 15:46:31 +0000 | [diff] [blame] | 71 | unsigned int hdr_off, unsigned int sg, unsigned int off) |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 72 | { |
Sowmini Varadhan | 5916e2c | 2016-07-14 03:51:03 -0700 | [diff] [blame] | 73 | struct rds_conn_path *cp = rm->m_inc.i_conn_path; |
| 74 | struct rds_tcp_connection *tc = cp->cp_transport_data; |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 75 | int done = 0; |
| 76 | int ret = 0; |
Sowmini Varadhan | 76b29ef | 2015-09-30 16:54:09 -0400 | [diff] [blame] | 77 | int more; |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 78 | |
| 79 | if (hdr_off == 0) { |
| 80 | /* |
| 81 | * m_ack_seq is set to the sequence number of the last byte of |
| 82 | * header and data. see rds_tcp_is_acked(). |
| 83 | */ |
Sowmini Varadhan | b589513 | 2018-01-18 13:11:07 -0800 | [diff] [blame] | 84 | tc->t_last_sent_nxt = rds_tcp_write_seq(tc); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 85 | rm->m_ack_seq = tc->t_last_sent_nxt + |
| 86 | sizeof(struct rds_header) + |
| 87 | be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1; |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 88 | smp_mb__before_atomic(); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 89 | set_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags); |
| 90 | tc->t_last_expected_una = rm->m_ack_seq + 1; |
| 91 | |
Sowmini Varadhan | 315ca6d | 2016-11-16 13:29:48 -0800 | [diff] [blame] | 92 | if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) |
| 93 | rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED; |
| 94 | |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 95 | rdsdebug("rm %p tcp nxt %u ack_seq %llu\n", |
Sowmini Varadhan | b589513 | 2018-01-18 13:11:07 -0800 | [diff] [blame] | 96 | rm, rds_tcp_write_seq(tc), |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 97 | (unsigned long long)rm->m_ack_seq); |
| 98 | } |
| 99 | |
| 100 | if (hdr_off < sizeof(struct rds_header)) { |
| 101 | /* see rds_tcp_write_space() */ |
| 102 | set_bit(SOCK_NOSPACE, &tc->t_sock->sk->sk_socket->flags); |
| 103 | |
| 104 | ret = rds_tcp_sendmsg(tc->t_sock, |
| 105 | (void *)&rm->m_inc.i_hdr + hdr_off, |
| 106 | sizeof(rm->m_inc.i_hdr) - hdr_off); |
| 107 | if (ret < 0) |
| 108 | goto out; |
| 109 | done += ret; |
| 110 | if (hdr_off + done != sizeof(struct rds_header)) |
| 111 | goto out; |
| 112 | } |
| 113 | |
Sowmini Varadhan | 76b29ef | 2015-09-30 16:54:09 -0400 | [diff] [blame] | 114 | more = rm->data.op_nents > 1 ? (MSG_MORE | MSG_SENDPAGE_NOTLAST) : 0; |
Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 115 | while (sg < rm->data.op_nents) { |
Sowmini Varadhan | 76b29ef | 2015-09-30 16:54:09 -0400 | [diff] [blame] | 116 | int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more; |
| 117 | |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 118 | ret = tc->t_sock->ops->sendpage(tc->t_sock, |
Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 119 | sg_page(&rm->data.op_sg[sg]), |
| 120 | rm->data.op_sg[sg].offset + off, |
| 121 | rm->data.op_sg[sg].length - off, |
Sowmini Varadhan | 76b29ef | 2015-09-30 16:54:09 -0400 | [diff] [blame] | 122 | flags); |
Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 123 | rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]), |
| 124 | rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off, |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 125 | ret); |
| 126 | if (ret <= 0) |
| 127 | break; |
| 128 | |
| 129 | off += ret; |
| 130 | done += ret; |
Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 131 | if (off == rm->data.op_sg[sg].length) { |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 132 | off = 0; |
| 133 | sg++; |
| 134 | } |
Sowmini Varadhan | 76b29ef | 2015-09-30 16:54:09 -0400 | [diff] [blame] | 135 | if (sg == rm->data.op_nents - 1) |
| 136 | more = 0; |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 137 | } |
| 138 | |
| 139 | out: |
| 140 | if (ret <= 0) { |
| 141 | /* write_space will hit after EAGAIN, all else fatal */ |
| 142 | if (ret == -EAGAIN) { |
| 143 | rds_tcp_stats_inc(s_tcp_sndbuf_full); |
| 144 | ret = 0; |
| 145 | } else { |
Sowmini Varadhan | 5916e2c | 2016-07-14 03:51:03 -0700 | [diff] [blame] | 146 | /* No need to disconnect/reconnect if path_drop |
| 147 | * has already been triggered, because, e.g., of |
| 148 | * an incoming RST. |
| 149 | */ |
| 150 | if (rds_conn_path_up(cp)) { |
Ka-Cheong Poon | eee2fa6 | 2018-07-23 20:51:21 -0700 | [diff] [blame] | 151 | pr_warn("RDS/tcp: send to %pI6c on cp [%d]" |
Sowmini Varadhan | 5916e2c | 2016-07-14 03:51:03 -0700 | [diff] [blame] | 152 | "returned %d, " |
| 153 | "disconnecting and reconnecting\n", |
| 154 | &conn->c_faddr, cp->cp_index, ret); |
Sowmini Varadhan | aed20a5 | 2017-07-16 16:43:46 -0700 | [diff] [blame] | 155 | rds_conn_path_drop(cp, false); |
Sowmini Varadhan | 5916e2c | 2016-07-14 03:51:03 -0700 | [diff] [blame] | 156 | } |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 157 | } |
| 158 | } |
| 159 | if (done == 0) |
| 160 | done = ret; |
| 161 | return done; |
| 162 | } |
| 163 | |
| 164 | /* |
| 165 | * rm->m_ack_seq is set to the tcp sequence number that corresponds to the |
| 166 | * last byte of the message, including the header. This means that the |
| 167 | * entire message has been received if rm->m_ack_seq is "before" the next |
| 168 | * unacked byte of the TCP sequence space. We have to do very careful |
| 169 | * wrapping 32bit comparisons here. |
| 170 | */ |
| 171 | static int rds_tcp_is_acked(struct rds_message *rm, uint64_t ack) |
| 172 | { |
| 173 | if (!test_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags)) |
| 174 | return 0; |
| 175 | return (__s32)((u32)rm->m_ack_seq - (u32)ack) < 0; |
| 176 | } |
| 177 | |
| 178 | void rds_tcp_write_space(struct sock *sk) |
| 179 | { |
| 180 | void (*write_space)(struct sock *sk); |
Sowmini Varadhan | ea3b1ea | 2016-06-30 16:11:14 -0700 | [diff] [blame] | 181 | struct rds_conn_path *cp; |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 182 | struct rds_tcp_connection *tc; |
| 183 | |
Eric Dumazet | 3803662 | 2016-05-17 17:44:08 -0700 | [diff] [blame] | 184 | read_lock_bh(&sk->sk_callback_lock); |
Sowmini Varadhan | ea3b1ea | 2016-06-30 16:11:14 -0700 | [diff] [blame] | 185 | cp = sk->sk_user_data; |
| 186 | if (!cp) { |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 187 | write_space = sk->sk_write_space; |
| 188 | goto out; |
| 189 | } |
| 190 | |
Sowmini Varadhan | ea3b1ea | 2016-06-30 16:11:14 -0700 | [diff] [blame] | 191 | tc = cp->cp_transport_data; |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 192 | rdsdebug("write_space for tc %p\n", tc); |
| 193 | write_space = tc->t_orig_write_space; |
| 194 | rds_tcp_stats_inc(s_tcp_write_space_calls); |
| 195 | |
| 196 | rdsdebug("tcp una %u\n", rds_tcp_snd_una(tc)); |
| 197 | tc->t_last_seen_una = rds_tcp_snd_una(tc); |
Sowmini Varadhan | ea3b1ea | 2016-06-30 16:11:14 -0700 | [diff] [blame] | 198 | rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), rds_tcp_is_acked); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 199 | |
Sowmini Varadhan | 3db6e0d | 2018-01-04 06:53:00 -0800 | [diff] [blame] | 200 | rcu_read_lock(); |
| 201 | if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf && |
Sowmini Varadhan | ebeeb1a | 2018-02-03 04:26:51 -0800 | [diff] [blame] | 202 | !rds_destroy_pending(cp->cp_conn)) |
Sowmini Varadhan | ea3b1ea | 2016-06-30 16:11:14 -0700 | [diff] [blame] | 203 | queue_delayed_work(rds_wq, &cp->cp_send_w, 0); |
Sowmini Varadhan | 3db6e0d | 2018-01-04 06:53:00 -0800 | [diff] [blame] | 204 | rcu_read_unlock(); |
Andy Grover | 8e82376 | 2010-03-11 13:49:58 +0000 | [diff] [blame] | 205 | |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 206 | out: |
Eric Dumazet | 3803662 | 2016-05-17 17:44:08 -0700 | [diff] [blame] | 207 | read_unlock_bh(&sk->sk_callback_lock); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 208 | |
| 209 | /* |
| 210 | * write_space is only called when data leaves tcp's send queue if |
| 211 | * SOCK_NOSPACE is set. We set SOCK_NOSPACE every time we put |
| 212 | * data in tcp's send queue because we use write_space to parse the |
| 213 | * sequence numbers and notice that rds messages have been fully |
| 214 | * received. |
| 215 | * |
| 216 | * tcp's write_space clears SOCK_NOSPACE if the send queue has more |
| 217 | * than a certain amount of space. So we need to set it again *after* |
| 218 | * we call tcp's write_space or else we might only get called on the |
| 219 | * first of a series of incoming tcp acks. |
| 220 | */ |
| 221 | write_space(sk); |
| 222 | |
| 223 | if (sk->sk_socket) |
| 224 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
| 225 | } |