blob: 50e075fcdd8feb13bc9747c613373e9b2570c073 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Chuck Leverf531a5d2015-10-24 17:27:43 -04002/*
3 * Copyright (c) 2015 Oracle. All rights reserved.
4 *
5 * Support for backward direction RPCs on RPC/RDMA.
6 */
7
Chuck Lever63cae472015-10-24 17:28:08 -04008#include <linux/sunrpc/xprt.h>
9#include <linux/sunrpc/svc.h>
Chuck Lever76566772015-10-24 17:28:32 -040010#include <linux/sunrpc/svc_xprt.h>
Chuck Leverbd2abef2018-05-07 15:27:16 -040011#include <linux/sunrpc/svc_rdma.h>
Chuck Leverf531a5d2015-10-24 17:27:43 -040012
13#include "xprt_rdma.h"
Chuck Leverb6e717cb2018-05-07 15:27:05 -040014#include <trace/events/rpcrdma.h>
Chuck Leverf531a5d2015-10-24 17:27:43 -040015
16#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
17# define RPCDBG_FACILITY RPCDBG_TRANS
18#endif
19
Chuck Leverc8bbe0c2015-12-16 17:22:23 -050020#undef RPCRDMA_BACKCHANNEL_DEBUG
Chuck Lever63cae472015-10-24 17:28:08 -040021
Chuck Leverf531a5d2015-10-24 17:27:43 -040022/**
23 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
24 * @xprt: transport associated with these backchannel resources
25 * @reqs: number of concurrent incoming requests to expect
26 *
27 * Returns 0 on success; otherwise a negative errno
28 */
29int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
30{
31 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
Chuck Leverf531a5d2015-10-24 17:27:43 -040032
Chuck Lever3f9c7e72019-04-24 09:39:37 -040033 r_xprt->rx_buf.rb_bc_srv_max_requests = RPCRDMA_BACKWARD_WRS >> 1;
Chuck Leverfc1eb802017-12-20 16:31:37 -050034 trace_xprtrdma_cb_setup(r_xprt, reqs);
Chuck Leverf531a5d2015-10-24 17:27:43 -040035 return 0;
Chuck Leverf531a5d2015-10-24 17:27:43 -040036}
37
38/**
Chuck Lever6b26cc82016-05-02 14:40:40 -040039 * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
40 * @xprt: transport
41 *
42 * Returns maximum size, in bytes, of a backchannel message
43 */
44size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
45{
46 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
Chuck Lever94087e92019-04-24 09:40:20 -040047 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
Chuck Lever6b26cc82016-05-02 14:40:40 -040048 size_t maxmsg;
49
Chuck Lever94087e92019-04-24 09:40:20 -040050 maxmsg = min_t(unsigned int, ep->rep_inline_send, ep->rep_inline_recv);
Chuck Lever62aee0e2016-11-29 10:52:08 -050051 maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
Chuck Lever6b26cc82016-05-02 14:40:40 -040052 return maxmsg - RPCRDMA_HDRLEN_MIN;
53}
54
Trond Myklebust7402a4f2019-07-16 13:51:29 -040055unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *xprt)
56{
Chuck Lever17d47f92019-08-19 18:50:16 -040057 return RPCRDMA_BACKWARD_WRS >> 1;
Trond Myklebust7402a4f2019-07-16 13:51:29 -040058}
59
Chuck Levercf73daf2017-12-14 20:57:31 -050060static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
Chuck Lever83128a62015-10-24 17:27:59 -040061{
Chuck Lever7ec910e2017-08-10 12:47:44 -040062 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
Chuck Lever83128a62015-10-24 17:27:59 -040063 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
Chuck Lever7ec910e2017-08-10 12:47:44 -040064 __be32 *p;
Chuck Lever83128a62015-10-24 17:27:59 -040065
Chuck Lever7ec910e2017-08-10 12:47:44 -040066 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
67 xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
Chuck Lever8cec3db2019-04-24 09:39:16 -040068 rdmab_data(req->rl_rdmabuf), rqst);
Chuck Lever7ec910e2017-08-10 12:47:44 -040069
70 p = xdr_reserve_space(&req->rl_stream, 28);
71 if (unlikely(!p))
72 return -EIO;
73 *p++ = rqst->rq_xid;
74 *p++ = rpcrdma_version;
75 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
76 *p++ = rdma_msg;
77 *p++ = xdr_zero;
78 *p++ = xdr_zero;
79 *p = xdr_zero;
Chuck Lever83128a62015-10-24 17:27:59 -040080
Chuck Lever857f9ac2017-10-20 10:47:55 -040081 if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
82 &rqst->rq_snd_buf, rpcrdma_noch))
Chuck Lever655fec62016-09-15 10:57:24 -040083 return -EIO;
Chuck Leverfc1eb802017-12-20 16:31:37 -050084
85 trace_xprtrdma_cb_reply(rqst);
Chuck Lever83128a62015-10-24 17:27:59 -040086 return 0;
87}
88
89/**
Chuck Levercf73daf2017-12-14 20:57:31 -050090 * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
91 * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
92 *
93 * Caller holds the transport's write lock.
94 *
95 * Returns:
96 * %0 if the RPC message has been sent
97 * %-ENOTCONN if the caller should reconnect and call again
98 * %-EIO if a permanent error occurred and the request was not
99 * sent. Do not try to send this message again.
100 */
101int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
102{
Chuck Lever0c0829b2018-12-19 10:58:40 -0500103 struct rpc_xprt *xprt = rqst->rq_xprt;
104 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
Chuck Levercf73daf2017-12-14 20:57:31 -0500105 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
106 int rc;
107
Chuck Lever0c0829b2018-12-19 10:58:40 -0500108 if (!xprt_connected(xprt))
109 return -ENOTCONN;
Chuck Levercf73daf2017-12-14 20:57:31 -0500110
Chuck Lever0c0829b2018-12-19 10:58:40 -0500111 if (!xprt_request_get_cong(xprt, rqst))
Trond Myklebust75891f52018-09-03 17:37:36 -0400112 return -EBADSLT;
113
Chuck Levercf73daf2017-12-14 20:57:31 -0500114 rc = rpcrdma_bc_marshal_reply(rqst);
115 if (rc < 0)
116 goto failed_marshal;
117
118 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
119 goto drop_connection;
120 return 0;
121
122failed_marshal:
123 if (rc != -ENOTCONN)
124 return rc;
125drop_connection:
Chuck Lever0c0829b2018-12-19 10:58:40 -0500126 xprt_rdma_close(xprt);
Chuck Levercf73daf2017-12-14 20:57:31 -0500127 return -ENOTCONN;
128}
129
130/**
Chuck Leverf531a5d2015-10-24 17:27:43 -0400131 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
132 * @xprt: transport associated with these backchannel resources
133 * @reqs: number of incoming requests to destroy; ignored
134 */
135void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
136{
Chuck Leverf531a5d2015-10-24 17:27:43 -0400137 struct rpc_rqst *rqst, *tmp;
138
Chuck Leverf7d46682018-10-01 14:26:24 -0400139 spin_lock(&xprt->bc_pa_lock);
Chuck Leverf531a5d2015-10-24 17:27:43 -0400140 list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
141 list_del(&rqst->rq_bc_pa_list);
Chuck Leverf7d46682018-10-01 14:26:24 -0400142 spin_unlock(&xprt->bc_pa_lock);
Chuck Leverf531a5d2015-10-24 17:27:43 -0400143
Chuck Lever92f44332018-12-19 10:59:33 -0500144 rpcrdma_req_destroy(rpcr_to_rdmar(rqst));
Chuck Leverf531a5d2015-10-24 17:27:43 -0400145
Chuck Leverf7d46682018-10-01 14:26:24 -0400146 spin_lock(&xprt->bc_pa_lock);
Chuck Leverf531a5d2015-10-24 17:27:43 -0400147 }
Chuck Leverf7d46682018-10-01 14:26:24 -0400148 spin_unlock(&xprt->bc_pa_lock);
Chuck Leverf531a5d2015-10-24 17:27:43 -0400149}
150
151/**
152 * xprt_rdma_bc_free_rqst - Release a backchannel rqst
153 * @rqst: request to release
154 */
155void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
156{
Chuck Lever7c8d9e72018-05-04 15:35:20 -0400157 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
Chuck Leverf531a5d2015-10-24 17:27:43 -0400158 struct rpc_xprt *xprt = rqst->rq_xprt;
159
Chuck Lever7c8d9e72018-05-04 15:35:20 -0400160 rpcrdma_recv_buffer_put(req->rl_reply);
161 req->rl_reply = NULL;
Chuck Leverc8bbe0c2015-12-16 17:22:23 -0500162
Chuck Leverf7d46682018-10-01 14:26:24 -0400163 spin_lock(&xprt->bc_pa_lock);
Chuck Leverf531a5d2015-10-24 17:27:43 -0400164 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
Chuck Leverf7d46682018-10-01 14:26:24 -0400165 spin_unlock(&xprt->bc_pa_lock);
Chuck Leverf531a5d2015-10-24 17:27:43 -0400166}
Chuck Lever63cae472015-10-24 17:28:08 -0400167
Chuck Lever3f9c7e72019-04-24 09:39:37 -0400168static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
169{
170 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
171 struct rpcrdma_req *req;
172 struct rpc_rqst *rqst;
173 size_t size;
174
175 spin_lock(&xprt->bc_pa_lock);
176 rqst = list_first_entry_or_null(&xprt->bc_pa_list, struct rpc_rqst,
177 rq_bc_pa_list);
178 if (!rqst)
179 goto create_req;
180 list_del(&rqst->rq_bc_pa_list);
181 spin_unlock(&xprt->bc_pa_lock);
182 return rqst;
183
184create_req:
185 spin_unlock(&xprt->bc_pa_lock);
186
187 /* Set a limit to prevent a remote from overrunning our resources.
188 */
189 if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS)
190 return NULL;
191
Chuck Lever94087e92019-04-24 09:40:20 -0400192 size = min_t(size_t, r_xprt->rx_ep.rep_inline_recv, PAGE_SIZE);
Chuck Lever3f9c7e72019-04-24 09:39:37 -0400193 req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL);
194 if (!req)
195 return NULL;
196
197 xprt->bc_alloc_count++;
198 rqst = &req->rl_slot;
199 rqst->rq_xprt = xprt;
200 __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
201 xdr_buf_init(&rqst->rq_snd_buf, rdmab_data(req->rl_sendbuf), size);
202 return rqst;
203}
204
Chuck Lever63cae472015-10-24 17:28:08 -0400205/**
206 * rpcrdma_bc_receive_call - Handle a backward direction call
Chuck Lever9ab6d892018-01-03 15:38:17 -0500207 * @r_xprt: transport receiving the call
Chuck Lever63cae472015-10-24 17:28:08 -0400208 * @rep: receive buffer containing the call
209 *
Chuck Lever63cae472015-10-24 17:28:08 -0400210 * Operational assumptions:
211 * o Backchannel credits are ignored, just as the NFS server
212 * forechannel currently does
213 * o The ULP manages a replay cache (eg, NFSv4.1 sessions).
214 * No replay detection is done at the transport level
215 */
216void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
217 struct rpcrdma_rep *rep)
218{
219 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
Chuck Lever63cae472015-10-24 17:28:08 -0400220 struct svc_serv *bc_serv;
221 struct rpcrdma_req *req;
222 struct rpc_rqst *rqst;
223 struct xdr_buf *buf;
224 size_t size;
225 __be32 *p;
226
Chuck Lever41c8f702017-08-03 14:30:11 -0400227 p = xdr_inline_decode(&rep->rr_stream, 0);
228 size = xdr_stream_remaining(&rep->rr_stream);
229
Chuck Lever63cae472015-10-24 17:28:08 -0400230#ifdef RPCRDMA_BACKCHANNEL_DEBUG
231 pr_info("RPC: %s: callback XID %08x, length=%u\n",
Chuck Lever41c8f702017-08-03 14:30:11 -0400232 __func__, be32_to_cpup(p), size);
233 pr_info("RPC: %s: %*ph\n", __func__, size, p);
Chuck Lever63cae472015-10-24 17:28:08 -0400234#endif
235
Chuck Lever3f9c7e72019-04-24 09:39:37 -0400236 rqst = rpcrdma_bc_rqst_get(r_xprt);
237 if (!rqst)
Chuck Lever63cae472015-10-24 17:28:08 -0400238 goto out_overflow;
Chuck Lever63cae472015-10-24 17:28:08 -0400239
Chuck Lever63cae472015-10-24 17:28:08 -0400240 rqst->rq_reply_bytes_recvd = 0;
Chuck Lever41c8f702017-08-03 14:30:11 -0400241 rqst->rq_xid = *p;
Chuck Lever9f74660b2016-02-15 10:23:59 -0500242
243 rqst->rq_private_buf.len = size;
Chuck Lever63cae472015-10-24 17:28:08 -0400244
245 buf = &rqst->rq_rcv_buf;
246 memset(buf, 0, sizeof(*buf));
247 buf->head[0].iov_base = p;
248 buf->head[0].iov_len = size;
249 buf->len = size;
250
251 /* The receive buffer has to be hooked to the rpcrdma_req
Chuck Lever41c8f702017-08-03 14:30:11 -0400252 * so that it is not released while the req is pointing
253 * to its buffer, and so that it can be reposted after
254 * the Upper Layer is done decoding it.
Chuck Lever63cae472015-10-24 17:28:08 -0400255 */
256 req = rpcr_to_rdmar(rqst);
Chuck Lever63cae472015-10-24 17:28:08 -0400257 req->rl_reply = rep;
Chuck Leverfc1eb802017-12-20 16:31:37 -0500258 trace_xprtrdma_cb_call(rqst);
Chuck Lever63cae472015-10-24 17:28:08 -0400259
Chuck Lever63cae472015-10-24 17:28:08 -0400260 /* Queue rqst for ULP's callback service */
261 bc_serv = xprt->bc_serv;
262 spin_lock(&bc_serv->sv_cb_lock);
263 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
264 spin_unlock(&bc_serv->sv_cb_lock);
265
266 wake_up(&bc_serv->sv_cb_waitq);
267
268 r_xprt->rx_stats.bcall_count++;
269 return;
270
271out_overflow:
272 pr_warn("RPC/RDMA backchannel overflow\n");
Chuck Lever0c0829b2018-12-19 10:58:40 -0500273 xprt_force_disconnect(xprt);
Chuck Lever63cae472015-10-24 17:28:08 -0400274 /* This receive buffer gets reposted automatically
275 * when the connection is re-established.
276 */
277 return;
Chuck Lever63cae472015-10-24 17:28:08 -0400278}