Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2015 Oracle. All rights reserved. |
| 4 | * |
| 5 | * Support for backward direction RPCs on RPC/RDMA. |
| 6 | */ |
| 7 | |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 8 | #include <linux/sunrpc/xprt.h> |
| 9 | #include <linux/sunrpc/svc.h> |
Chuck Lever | 7656677 | 2015-10-24 17:28:32 -0400 | [diff] [blame] | 10 | #include <linux/sunrpc/svc_xprt.h> |
Chuck Lever | bd2abef | 2018-05-07 15:27:16 -0400 | [diff] [blame] | 11 | #include <linux/sunrpc/svc_rdma.h> |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 12 | |
| 13 | #include "xprt_rdma.h" |
Chuck Lever | b6e717cb | 2018-05-07 15:27:05 -0400 | [diff] [blame] | 14 | #include <trace/events/rpcrdma.h> |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 15 | |
| 16 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 17 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 18 | #endif |
| 19 | |
Chuck Lever | c8bbe0c | 2015-12-16 17:22:23 -0500 | [diff] [blame] | 20 | #undef RPCRDMA_BACKCHANNEL_DEBUG |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 21 | |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 22 | /** |
| 23 | * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests |
| 24 | * @xprt: transport associated with these backchannel resources |
| 25 | * @reqs: number of concurrent incoming requests to expect |
| 26 | * |
| 27 | * Returns 0 on success; otherwise a negative errno |
| 28 | */ |
| 29 | int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs) |
| 30 | { |
| 31 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 32 | |
Chuck Lever | 3f9c7e7 | 2019-04-24 09:39:37 -0400 | [diff] [blame] | 33 | r_xprt->rx_buf.rb_bc_srv_max_requests = RPCRDMA_BACKWARD_WRS >> 1; |
Chuck Lever | fc1eb80 | 2017-12-20 16:31:37 -0500 | [diff] [blame] | 34 | trace_xprtrdma_cb_setup(r_xprt, reqs); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 35 | return 0; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 36 | } |
| 37 | |
| 38 | /** |
Chuck Lever | 6b26cc8 | 2016-05-02 14:40:40 -0400 | [diff] [blame] | 39 | * xprt_rdma_bc_maxpayload - Return maximum backchannel message size |
| 40 | * @xprt: transport |
| 41 | * |
| 42 | * Returns maximum size, in bytes, of a backchannel message |
| 43 | */ |
| 44 | size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt) |
| 45 | { |
| 46 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
Chuck Lever | 94087e9 | 2019-04-24 09:40:20 -0400 | [diff] [blame] | 47 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
Chuck Lever | 6b26cc8 | 2016-05-02 14:40:40 -0400 | [diff] [blame] | 48 | size_t maxmsg; |
| 49 | |
Chuck Lever | 94087e9 | 2019-04-24 09:40:20 -0400 | [diff] [blame] | 50 | maxmsg = min_t(unsigned int, ep->rep_inline_send, ep->rep_inline_recv); |
Chuck Lever | 62aee0e | 2016-11-29 10:52:08 -0500 | [diff] [blame] | 51 | maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE); |
Chuck Lever | 6b26cc8 | 2016-05-02 14:40:40 -0400 | [diff] [blame] | 52 | return maxmsg - RPCRDMA_HDRLEN_MIN; |
| 53 | } |
| 54 | |
Trond Myklebust | 7402a4f | 2019-07-16 13:51:29 -0400 | [diff] [blame] | 55 | unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *xprt) |
| 56 | { |
Chuck Lever | 17d47f9 | 2019-08-19 18:50:16 -0400 | [diff] [blame] | 57 | return RPCRDMA_BACKWARD_WRS >> 1; |
Trond Myklebust | 7402a4f | 2019-07-16 13:51:29 -0400 | [diff] [blame] | 58 | } |
| 59 | |
Chuck Lever | cf73daf | 2017-12-14 20:57:31 -0500 | [diff] [blame] | 60 | static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst) |
Chuck Lever | 83128a6 | 2015-10-24 17:27:59 -0400 | [diff] [blame] | 61 | { |
Chuck Lever | 7ec910e | 2017-08-10 12:47:44 -0400 | [diff] [blame] | 62 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); |
Chuck Lever | 83128a6 | 2015-10-24 17:27:59 -0400 | [diff] [blame] | 63 | struct rpcrdma_req *req = rpcr_to_rdmar(rqst); |
Chuck Lever | 7ec910e | 2017-08-10 12:47:44 -0400 | [diff] [blame] | 64 | __be32 *p; |
Chuck Lever | 83128a6 | 2015-10-24 17:27:59 -0400 | [diff] [blame] | 65 | |
Chuck Lever | 7ec910e | 2017-08-10 12:47:44 -0400 | [diff] [blame] | 66 | rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0); |
| 67 | xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf, |
Chuck Lever | 8cec3db | 2019-04-24 09:39:16 -0400 | [diff] [blame] | 68 | rdmab_data(req->rl_rdmabuf), rqst); |
Chuck Lever | 7ec910e | 2017-08-10 12:47:44 -0400 | [diff] [blame] | 69 | |
| 70 | p = xdr_reserve_space(&req->rl_stream, 28); |
| 71 | if (unlikely(!p)) |
| 72 | return -EIO; |
| 73 | *p++ = rqst->rq_xid; |
| 74 | *p++ = rpcrdma_version; |
| 75 | *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests); |
| 76 | *p++ = rdma_msg; |
| 77 | *p++ = xdr_zero; |
| 78 | *p++ = xdr_zero; |
| 79 | *p = xdr_zero; |
Chuck Lever | 83128a6 | 2015-10-24 17:27:59 -0400 | [diff] [blame] | 80 | |
Chuck Lever | 857f9ac | 2017-10-20 10:47:55 -0400 | [diff] [blame] | 81 | if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN, |
| 82 | &rqst->rq_snd_buf, rpcrdma_noch)) |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 83 | return -EIO; |
Chuck Lever | fc1eb80 | 2017-12-20 16:31:37 -0500 | [diff] [blame] | 84 | |
| 85 | trace_xprtrdma_cb_reply(rqst); |
Chuck Lever | 83128a6 | 2015-10-24 17:27:59 -0400 | [diff] [blame] | 86 | return 0; |
| 87 | } |
| 88 | |
| 89 | /** |
Chuck Lever | cf73daf | 2017-12-14 20:57:31 -0500 | [diff] [blame] | 90 | * xprt_rdma_bc_send_reply - marshal and send a backchannel reply |
| 91 | * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf |
| 92 | * |
| 93 | * Caller holds the transport's write lock. |
| 94 | * |
| 95 | * Returns: |
| 96 | * %0 if the RPC message has been sent |
| 97 | * %-ENOTCONN if the caller should reconnect and call again |
| 98 | * %-EIO if a permanent error occurred and the request was not |
| 99 | * sent. Do not try to send this message again. |
| 100 | */ |
| 101 | int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst) |
| 102 | { |
Chuck Lever | 0c0829b | 2018-12-19 10:58:40 -0500 | [diff] [blame] | 103 | struct rpc_xprt *xprt = rqst->rq_xprt; |
| 104 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
Chuck Lever | cf73daf | 2017-12-14 20:57:31 -0500 | [diff] [blame] | 105 | struct rpcrdma_req *req = rpcr_to_rdmar(rqst); |
| 106 | int rc; |
| 107 | |
Chuck Lever | 0c0829b | 2018-12-19 10:58:40 -0500 | [diff] [blame] | 108 | if (!xprt_connected(xprt)) |
| 109 | return -ENOTCONN; |
Chuck Lever | cf73daf | 2017-12-14 20:57:31 -0500 | [diff] [blame] | 110 | |
Chuck Lever | 0c0829b | 2018-12-19 10:58:40 -0500 | [diff] [blame] | 111 | if (!xprt_request_get_cong(xprt, rqst)) |
Trond Myklebust | 75891f5 | 2018-09-03 17:37:36 -0400 | [diff] [blame] | 112 | return -EBADSLT; |
| 113 | |
Chuck Lever | cf73daf | 2017-12-14 20:57:31 -0500 | [diff] [blame] | 114 | rc = rpcrdma_bc_marshal_reply(rqst); |
| 115 | if (rc < 0) |
| 116 | goto failed_marshal; |
| 117 | |
| 118 | if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) |
| 119 | goto drop_connection; |
| 120 | return 0; |
| 121 | |
| 122 | failed_marshal: |
| 123 | if (rc != -ENOTCONN) |
| 124 | return rc; |
| 125 | drop_connection: |
Chuck Lever | 0c0829b | 2018-12-19 10:58:40 -0500 | [diff] [blame] | 126 | xprt_rdma_close(xprt); |
Chuck Lever | cf73daf | 2017-12-14 20:57:31 -0500 | [diff] [blame] | 127 | return -ENOTCONN; |
| 128 | } |
| 129 | |
| 130 | /** |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 131 | * xprt_rdma_bc_destroy - Release resources for handling backchannel requests |
| 132 | * @xprt: transport associated with these backchannel resources |
| 133 | * @reqs: number of incoming requests to destroy; ignored |
| 134 | */ |
| 135 | void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs) |
| 136 | { |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 137 | struct rpc_rqst *rqst, *tmp; |
| 138 | |
Chuck Lever | f7d4668 | 2018-10-01 14:26:24 -0400 | [diff] [blame] | 139 | spin_lock(&xprt->bc_pa_lock); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 140 | list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { |
| 141 | list_del(&rqst->rq_bc_pa_list); |
Chuck Lever | f7d4668 | 2018-10-01 14:26:24 -0400 | [diff] [blame] | 142 | spin_unlock(&xprt->bc_pa_lock); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 143 | |
Chuck Lever | 92f4433 | 2018-12-19 10:59:33 -0500 | [diff] [blame] | 144 | rpcrdma_req_destroy(rpcr_to_rdmar(rqst)); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 145 | |
Chuck Lever | f7d4668 | 2018-10-01 14:26:24 -0400 | [diff] [blame] | 146 | spin_lock(&xprt->bc_pa_lock); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 147 | } |
Chuck Lever | f7d4668 | 2018-10-01 14:26:24 -0400 | [diff] [blame] | 148 | spin_unlock(&xprt->bc_pa_lock); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 149 | } |
| 150 | |
| 151 | /** |
| 152 | * xprt_rdma_bc_free_rqst - Release a backchannel rqst |
| 153 | * @rqst: request to release |
| 154 | */ |
| 155 | void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst) |
| 156 | { |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 157 | struct rpcrdma_req *req = rpcr_to_rdmar(rqst); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 158 | struct rpc_xprt *xprt = rqst->rq_xprt; |
| 159 | |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 160 | rpcrdma_recv_buffer_put(req->rl_reply); |
| 161 | req->rl_reply = NULL; |
Chuck Lever | c8bbe0c | 2015-12-16 17:22:23 -0500 | [diff] [blame] | 162 | |
Chuck Lever | f7d4668 | 2018-10-01 14:26:24 -0400 | [diff] [blame] | 163 | spin_lock(&xprt->bc_pa_lock); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 164 | list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list); |
Chuck Lever | f7d4668 | 2018-10-01 14:26:24 -0400 | [diff] [blame] | 165 | spin_unlock(&xprt->bc_pa_lock); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 166 | } |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 167 | |
Chuck Lever | 3f9c7e7 | 2019-04-24 09:39:37 -0400 | [diff] [blame] | 168 | static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt) |
| 169 | { |
| 170 | struct rpc_xprt *xprt = &r_xprt->rx_xprt; |
| 171 | struct rpcrdma_req *req; |
| 172 | struct rpc_rqst *rqst; |
| 173 | size_t size; |
| 174 | |
| 175 | spin_lock(&xprt->bc_pa_lock); |
| 176 | rqst = list_first_entry_or_null(&xprt->bc_pa_list, struct rpc_rqst, |
| 177 | rq_bc_pa_list); |
| 178 | if (!rqst) |
| 179 | goto create_req; |
| 180 | list_del(&rqst->rq_bc_pa_list); |
| 181 | spin_unlock(&xprt->bc_pa_lock); |
| 182 | return rqst; |
| 183 | |
| 184 | create_req: |
| 185 | spin_unlock(&xprt->bc_pa_lock); |
| 186 | |
| 187 | /* Set a limit to prevent a remote from overrunning our resources. |
| 188 | */ |
| 189 | if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS) |
| 190 | return NULL; |
| 191 | |
Chuck Lever | 94087e9 | 2019-04-24 09:40:20 -0400 | [diff] [blame] | 192 | size = min_t(size_t, r_xprt->rx_ep.rep_inline_recv, PAGE_SIZE); |
Chuck Lever | 3f9c7e7 | 2019-04-24 09:39:37 -0400 | [diff] [blame] | 193 | req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL); |
| 194 | if (!req) |
| 195 | return NULL; |
| 196 | |
| 197 | xprt->bc_alloc_count++; |
| 198 | rqst = &req->rl_slot; |
| 199 | rqst->rq_xprt = xprt; |
| 200 | __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state); |
| 201 | xdr_buf_init(&rqst->rq_snd_buf, rdmab_data(req->rl_sendbuf), size); |
| 202 | return rqst; |
| 203 | } |
| 204 | |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 205 | /** |
| 206 | * rpcrdma_bc_receive_call - Handle a backward direction call |
Chuck Lever | 9ab6d89 | 2018-01-03 15:38:17 -0500 | [diff] [blame] | 207 | * @r_xprt: transport receiving the call |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 208 | * @rep: receive buffer containing the call |
| 209 | * |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 210 | * Operational assumptions: |
| 211 | * o Backchannel credits are ignored, just as the NFS server |
| 212 | * forechannel currently does |
| 213 | * o The ULP manages a replay cache (eg, NFSv4.1 sessions). |
| 214 | * No replay detection is done at the transport level |
| 215 | */ |
| 216 | void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt, |
| 217 | struct rpcrdma_rep *rep) |
| 218 | { |
| 219 | struct rpc_xprt *xprt = &r_xprt->rx_xprt; |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 220 | struct svc_serv *bc_serv; |
| 221 | struct rpcrdma_req *req; |
| 222 | struct rpc_rqst *rqst; |
| 223 | struct xdr_buf *buf; |
| 224 | size_t size; |
| 225 | __be32 *p; |
| 226 | |
Chuck Lever | 41c8f70 | 2017-08-03 14:30:11 -0400 | [diff] [blame] | 227 | p = xdr_inline_decode(&rep->rr_stream, 0); |
| 228 | size = xdr_stream_remaining(&rep->rr_stream); |
| 229 | |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 230 | #ifdef RPCRDMA_BACKCHANNEL_DEBUG |
| 231 | pr_info("RPC: %s: callback XID %08x, length=%u\n", |
Chuck Lever | 41c8f70 | 2017-08-03 14:30:11 -0400 | [diff] [blame] | 232 | __func__, be32_to_cpup(p), size); |
| 233 | pr_info("RPC: %s: %*ph\n", __func__, size, p); |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 234 | #endif |
| 235 | |
Chuck Lever | 3f9c7e7 | 2019-04-24 09:39:37 -0400 | [diff] [blame] | 236 | rqst = rpcrdma_bc_rqst_get(r_xprt); |
| 237 | if (!rqst) |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 238 | goto out_overflow; |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 239 | |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 240 | rqst->rq_reply_bytes_recvd = 0; |
Chuck Lever | 41c8f70 | 2017-08-03 14:30:11 -0400 | [diff] [blame] | 241 | rqst->rq_xid = *p; |
Chuck Lever | 9f74660b | 2016-02-15 10:23:59 -0500 | [diff] [blame] | 242 | |
| 243 | rqst->rq_private_buf.len = size; |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 244 | |
| 245 | buf = &rqst->rq_rcv_buf; |
| 246 | memset(buf, 0, sizeof(*buf)); |
| 247 | buf->head[0].iov_base = p; |
| 248 | buf->head[0].iov_len = size; |
| 249 | buf->len = size; |
| 250 | |
| 251 | /* The receive buffer has to be hooked to the rpcrdma_req |
Chuck Lever | 41c8f70 | 2017-08-03 14:30:11 -0400 | [diff] [blame] | 252 | * so that it is not released while the req is pointing |
| 253 | * to its buffer, and so that it can be reposted after |
| 254 | * the Upper Layer is done decoding it. |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 255 | */ |
| 256 | req = rpcr_to_rdmar(rqst); |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 257 | req->rl_reply = rep; |
Chuck Lever | fc1eb80 | 2017-12-20 16:31:37 -0500 | [diff] [blame] | 258 | trace_xprtrdma_cb_call(rqst); |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 259 | |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 260 | /* Queue rqst for ULP's callback service */ |
| 261 | bc_serv = xprt->bc_serv; |
| 262 | spin_lock(&bc_serv->sv_cb_lock); |
| 263 | list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list); |
| 264 | spin_unlock(&bc_serv->sv_cb_lock); |
| 265 | |
| 266 | wake_up(&bc_serv->sv_cb_waitq); |
| 267 | |
| 268 | r_xprt->rx_stats.bcall_count++; |
| 269 | return; |
| 270 | |
| 271 | out_overflow: |
| 272 | pr_warn("RPC/RDMA backchannel overflow\n"); |
Chuck Lever | 0c0829b | 2018-12-19 10:58:40 -0500 | [diff] [blame] | 273 | xprt_force_disconnect(xprt); |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 274 | /* This receive buffer gets reposted automatically |
| 275 | * when the connection is re-established. |
| 276 | */ |
| 277 | return; |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 278 | } |