blob: 675b5308b7c37362da58f2c55c881159968ec484 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Chuck Leverf531a5d2015-10-24 17:27:43 -04002/*
3 * Copyright (c) 2015 Oracle. All rights reserved.
4 *
5 * Support for backward direction RPCs on RPC/RDMA.
6 */
7
8#include <linux/module.h>
Chuck Lever63cae472015-10-24 17:28:08 -04009#include <linux/sunrpc/xprt.h>
10#include <linux/sunrpc/svc.h>
Chuck Lever76566772015-10-24 17:28:32 -040011#include <linux/sunrpc/svc_xprt.h>
Chuck Leverbd2abef2018-05-07 15:27:16 -040012#include <linux/sunrpc/svc_rdma.h>
Chuck Leverf531a5d2015-10-24 17:27:43 -040013
14#include "xprt_rdma.h"
Chuck Leverb6e717cb2018-05-07 15:27:05 -040015#include <trace/events/rpcrdma.h>
Chuck Leverf531a5d2015-10-24 17:27:43 -040016
17#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
18# define RPCDBG_FACILITY RPCDBG_TRANS
19#endif
20
Chuck Leverc8bbe0c2015-12-16 17:22:23 -050021#undef RPCRDMA_BACKCHANNEL_DEBUG
Chuck Lever63cae472015-10-24 17:28:08 -040022
Chuck Leverf531a5d2015-10-24 17:27:43 -040023static void rpcrdma_bc_free_rqst(struct rpcrdma_xprt *r_xprt,
24 struct rpc_rqst *rqst)
25{
26 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
27 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
28
29 spin_lock(&buf->rb_reqslock);
30 list_del(&req->rl_all);
31 spin_unlock(&buf->rb_reqslock);
32
Chuck Lever13650c22016-09-15 10:56:26 -040033 rpcrdma_destroy_req(req);
Chuck Leverf531a5d2015-10-24 17:27:43 -040034}
35
Chuck Leveredb41e62018-05-04 15:35:09 -040036static int rpcrdma_bc_setup_reqs(struct rpcrdma_xprt *r_xprt,
37 unsigned int count)
Chuck Leverf531a5d2015-10-24 17:27:43 -040038{
Chuck Leveredb41e62018-05-04 15:35:09 -040039 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
40 struct rpc_rqst *rqst;
41 unsigned int i;
Chuck Leverf531a5d2015-10-24 17:27:43 -040042
Chuck Leveredb41e62018-05-04 15:35:09 -040043 for (i = 0; i < (count << 1); i++) {
44 struct rpcrdma_regbuf *rb;
45 struct rpcrdma_req *req;
46 size_t size;
Chuck Leverf531a5d2015-10-24 17:27:43 -040047
Chuck Leveredb41e62018-05-04 15:35:09 -040048 req = rpcrdma_create_req(r_xprt);
49 if (IS_ERR(req))
50 return PTR_ERR(req);
51 rqst = &req->rl_slot;
52
53 rqst->rq_xprt = xprt;
54 INIT_LIST_HEAD(&rqst->rq_list);
55 INIT_LIST_HEAD(&rqst->rq_bc_list);
56 __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
Chuck Leverf7d46682018-10-01 14:26:24 -040057 spin_lock(&xprt->bc_pa_lock);
Chuck Leveredb41e62018-05-04 15:35:09 -040058 list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
Chuck Leverf7d46682018-10-01 14:26:24 -040059 spin_unlock(&xprt->bc_pa_lock);
Chuck Leveredb41e62018-05-04 15:35:09 -040060
61 size = r_xprt->rx_data.inline_rsize;
62 rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
63 if (IS_ERR(rb))
64 goto out_fail;
65 req->rl_sendbuf = rb;
66 xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base,
67 min_t(size_t, size, PAGE_SIZE));
68 }
Chuck Leverf531a5d2015-10-24 17:27:43 -040069 return 0;
70
71out_fail:
72 rpcrdma_bc_free_rqst(r_xprt, rqst);
73 return -ENOMEM;
74}
75
Chuck Leverf531a5d2015-10-24 17:27:43 -040076/**
77 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
78 * @xprt: transport associated with these backchannel resources
79 * @reqs: number of concurrent incoming requests to expect
80 *
81 * Returns 0 on success; otherwise a negative errno
82 */
83int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
84{
85 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
Chuck Leverf531a5d2015-10-24 17:27:43 -040086 int rc;
87
88 /* The backchannel reply path returns each rpc_rqst to the
89 * bc_pa_list _after_ the reply is sent. If the server is
90 * faster than the client, it can send another backward
91 * direction request before the rpc_rqst is returned to the
92 * list. The client rejects the request in this case.
93 *
94 * Twice as many rpc_rqsts are prepared to ensure there is
95 * always an rpc_rqst available as soon as a reply is sent.
96 */
Chuck Lever124fa172015-10-24 17:27:51 -040097 if (reqs > RPCRDMA_BACKWARD_WRS >> 1)
98 goto out_err;
99
Chuck Leveredb41e62018-05-04 15:35:09 -0400100 rc = rpcrdma_bc_setup_reqs(r_xprt, reqs);
Chuck Leverf531a5d2015-10-24 17:27:43 -0400101 if (rc)
102 goto out_free;
103
Chuck Leveredb41e62018-05-04 15:35:09 -0400104 r_xprt->rx_buf.rb_bc_srv_max_requests = reqs;
Chuck Leverf531a5d2015-10-24 17:27:43 -0400105 request_module("svcrdma");
Chuck Leverfc1eb802017-12-20 16:31:37 -0500106 trace_xprtrdma_cb_setup(r_xprt, reqs);
Chuck Leverf531a5d2015-10-24 17:27:43 -0400107 return 0;
108
109out_free:
110 xprt_rdma_bc_destroy(xprt, reqs);
111
Chuck Lever124fa172015-10-24 17:27:51 -0400112out_err:
Chuck Leverf531a5d2015-10-24 17:27:43 -0400113 pr_err("RPC: %s: setup backchannel transport failed\n", __func__);
114 return -ENOMEM;
115}
116
117/**
Chuck Lever76566772015-10-24 17:28:32 -0400118 * xprt_rdma_bc_up - Create transport endpoint for backchannel service
119 * @serv: server endpoint
120 * @net: network namespace
121 *
122 * The "xprt" is an implied argument: it supplies the name of the
123 * backchannel transport class.
124 *
125 * Returns zero on success, negative errno on failure
126 */
127int xprt_rdma_bc_up(struct svc_serv *serv, struct net *net)
128{
129 int ret;
130
131 ret = svc_create_xprt(serv, "rdma-bc", net, PF_INET, 0, 0);
132 if (ret < 0)
133 return ret;
134 return 0;
135}
136
137/**
Chuck Lever6b26cc82016-05-02 14:40:40 -0400138 * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
139 * @xprt: transport
140 *
141 * Returns maximum size, in bytes, of a backchannel message
142 */
143size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
144{
145 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
146 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
147 size_t maxmsg;
148
149 maxmsg = min_t(unsigned int, cdata->inline_rsize, cdata->inline_wsize);
Chuck Lever62aee0e2016-11-29 10:52:08 -0500150 maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
Chuck Lever6b26cc82016-05-02 14:40:40 -0400151 return maxmsg - RPCRDMA_HDRLEN_MIN;
152}
153
Chuck Levercf73daf2017-12-14 20:57:31 -0500154static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
Chuck Lever83128a62015-10-24 17:27:59 -0400155{
Chuck Lever7ec910e2017-08-10 12:47:44 -0400156 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
Chuck Lever83128a62015-10-24 17:27:59 -0400157 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
Chuck Lever7ec910e2017-08-10 12:47:44 -0400158 __be32 *p;
Chuck Lever83128a62015-10-24 17:27:59 -0400159
Chuck Lever7ec910e2017-08-10 12:47:44 -0400160 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
161 xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
162 req->rl_rdmabuf->rg_base);
163
164 p = xdr_reserve_space(&req->rl_stream, 28);
165 if (unlikely(!p))
166 return -EIO;
167 *p++ = rqst->rq_xid;
168 *p++ = rpcrdma_version;
169 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
170 *p++ = rdma_msg;
171 *p++ = xdr_zero;
172 *p++ = xdr_zero;
173 *p = xdr_zero;
Chuck Lever83128a62015-10-24 17:27:59 -0400174
Chuck Lever857f9ac2017-10-20 10:47:55 -0400175 if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
176 &rqst->rq_snd_buf, rpcrdma_noch))
Chuck Lever655fec62016-09-15 10:57:24 -0400177 return -EIO;
Chuck Leverfc1eb802017-12-20 16:31:37 -0500178
179 trace_xprtrdma_cb_reply(rqst);
Chuck Lever83128a62015-10-24 17:27:59 -0400180 return 0;
181}
182
183/**
Chuck Levercf73daf2017-12-14 20:57:31 -0500184 * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
185 * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
186 *
187 * Caller holds the transport's write lock.
188 *
189 * Returns:
190 * %0 if the RPC message has been sent
191 * %-ENOTCONN if the caller should reconnect and call again
192 * %-EIO if a permanent error occurred and the request was not
193 * sent. Do not try to send this message again.
194 */
195int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
196{
197 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
198 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
199 int rc;
200
201 if (!xprt_connected(rqst->rq_xprt))
202 goto drop_connection;
203
204 rc = rpcrdma_bc_marshal_reply(rqst);
205 if (rc < 0)
206 goto failed_marshal;
207
Chuck Lever7c8d9e72018-05-04 15:35:20 -0400208 rpcrdma_post_recvs(r_xprt, true);
Chuck Levercf73daf2017-12-14 20:57:31 -0500209 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
210 goto drop_connection;
211 return 0;
212
213failed_marshal:
214 if (rc != -ENOTCONN)
215 return rc;
216drop_connection:
217 xprt_disconnect_done(rqst->rq_xprt);
218 return -ENOTCONN;
219}
220
221/**
Chuck Leverf531a5d2015-10-24 17:27:43 -0400222 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
223 * @xprt: transport associated with these backchannel resources
224 * @reqs: number of incoming requests to destroy; ignored
225 */
226void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
227{
228 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
229 struct rpc_rqst *rqst, *tmp;
230
Chuck Leverf7d46682018-10-01 14:26:24 -0400231 spin_lock(&xprt->bc_pa_lock);
Chuck Leverf531a5d2015-10-24 17:27:43 -0400232 list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
233 list_del(&rqst->rq_bc_pa_list);
Chuck Leverf7d46682018-10-01 14:26:24 -0400234 spin_unlock(&xprt->bc_pa_lock);
Chuck Leverf531a5d2015-10-24 17:27:43 -0400235
236 rpcrdma_bc_free_rqst(r_xprt, rqst);
237
Chuck Leverf7d46682018-10-01 14:26:24 -0400238 spin_lock(&xprt->bc_pa_lock);
Chuck Leverf531a5d2015-10-24 17:27:43 -0400239 }
Chuck Leverf7d46682018-10-01 14:26:24 -0400240 spin_unlock(&xprt->bc_pa_lock);
Chuck Leverf531a5d2015-10-24 17:27:43 -0400241}
242
243/**
244 * xprt_rdma_bc_free_rqst - Release a backchannel rqst
245 * @rqst: request to release
246 */
247void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
248{
Chuck Lever7c8d9e72018-05-04 15:35:20 -0400249 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
Chuck Leverf531a5d2015-10-24 17:27:43 -0400250 struct rpc_xprt *xprt = rqst->rq_xprt;
251
Chuck Leverc8bbe0c2015-12-16 17:22:23 -0500252 dprintk("RPC: %s: freeing rqst %p (req %p)\n",
Chuck Lever7c8d9e72018-05-04 15:35:20 -0400253 __func__, rqst, req);
254
255 rpcrdma_recv_buffer_put(req->rl_reply);
256 req->rl_reply = NULL;
Chuck Leverc8bbe0c2015-12-16 17:22:23 -0500257
Chuck Leverf7d46682018-10-01 14:26:24 -0400258 spin_lock(&xprt->bc_pa_lock);
Chuck Leverf531a5d2015-10-24 17:27:43 -0400259 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
Chuck Leverf7d46682018-10-01 14:26:24 -0400260 spin_unlock(&xprt->bc_pa_lock);
Chuck Leverf531a5d2015-10-24 17:27:43 -0400261}
Chuck Lever63cae472015-10-24 17:28:08 -0400262
263/**
264 * rpcrdma_bc_receive_call - Handle a backward direction call
Chuck Lever9ab6d892018-01-03 15:38:17 -0500265 * @r_xprt: transport receiving the call
Chuck Lever63cae472015-10-24 17:28:08 -0400266 * @rep: receive buffer containing the call
267 *
Chuck Lever63cae472015-10-24 17:28:08 -0400268 * Operational assumptions:
269 * o Backchannel credits are ignored, just as the NFS server
270 * forechannel currently does
271 * o The ULP manages a replay cache (eg, NFSv4.1 sessions).
272 * No replay detection is done at the transport level
273 */
274void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
275 struct rpcrdma_rep *rep)
276{
277 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
Chuck Lever63cae472015-10-24 17:28:08 -0400278 struct svc_serv *bc_serv;
279 struct rpcrdma_req *req;
280 struct rpc_rqst *rqst;
281 struct xdr_buf *buf;
282 size_t size;
283 __be32 *p;
284
Chuck Lever41c8f702017-08-03 14:30:11 -0400285 p = xdr_inline_decode(&rep->rr_stream, 0);
286 size = xdr_stream_remaining(&rep->rr_stream);
287
Chuck Lever63cae472015-10-24 17:28:08 -0400288#ifdef RPCRDMA_BACKCHANNEL_DEBUG
289 pr_info("RPC: %s: callback XID %08x, length=%u\n",
Chuck Lever41c8f702017-08-03 14:30:11 -0400290 __func__, be32_to_cpup(p), size);
291 pr_info("RPC: %s: %*ph\n", __func__, size, p);
Chuck Lever63cae472015-10-24 17:28:08 -0400292#endif
293
Chuck Lever63cae472015-10-24 17:28:08 -0400294 /* Grab a free bc rqst */
295 spin_lock(&xprt->bc_pa_lock);
296 if (list_empty(&xprt->bc_pa_list)) {
297 spin_unlock(&xprt->bc_pa_lock);
298 goto out_overflow;
299 }
300 rqst = list_first_entry(&xprt->bc_pa_list,
301 struct rpc_rqst, rq_bc_pa_list);
302 list_del(&rqst->rq_bc_pa_list);
303 spin_unlock(&xprt->bc_pa_lock);
Chuck Lever63cae472015-10-24 17:28:08 -0400304
305 /* Prepare rqst */
306 rqst->rq_reply_bytes_recvd = 0;
307 rqst->rq_bytes_sent = 0;
Chuck Lever41c8f702017-08-03 14:30:11 -0400308 rqst->rq_xid = *p;
Chuck Lever9f74660b2016-02-15 10:23:59 -0500309
310 rqst->rq_private_buf.len = size;
Chuck Lever63cae472015-10-24 17:28:08 -0400311
312 buf = &rqst->rq_rcv_buf;
313 memset(buf, 0, sizeof(*buf));
314 buf->head[0].iov_base = p;
315 buf->head[0].iov_len = size;
316 buf->len = size;
317
318 /* The receive buffer has to be hooked to the rpcrdma_req
Chuck Lever41c8f702017-08-03 14:30:11 -0400319 * so that it is not released while the req is pointing
320 * to its buffer, and so that it can be reposted after
321 * the Upper Layer is done decoding it.
Chuck Lever63cae472015-10-24 17:28:08 -0400322 */
323 req = rpcr_to_rdmar(rqst);
Chuck Lever63cae472015-10-24 17:28:08 -0400324 req->rl_reply = rep;
Chuck Leverfc1eb802017-12-20 16:31:37 -0500325 trace_xprtrdma_cb_call(rqst);
Chuck Lever63cae472015-10-24 17:28:08 -0400326
Chuck Lever63cae472015-10-24 17:28:08 -0400327 /* Queue rqst for ULP's callback service */
328 bc_serv = xprt->bc_serv;
329 spin_lock(&bc_serv->sv_cb_lock);
330 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
331 spin_unlock(&bc_serv->sv_cb_lock);
332
333 wake_up(&bc_serv->sv_cb_waitq);
334
335 r_xprt->rx_stats.bcall_count++;
336 return;
337
338out_overflow:
339 pr_warn("RPC/RDMA backchannel overflow\n");
340 xprt_disconnect_done(xprt);
341 /* This receive buffer gets reposted automatically
342 * when the connection is re-established.
343 */
344 return;
Chuck Lever63cae472015-10-24 17:28:08 -0400345}