blob: b1bc7a796782f4077cc79555ec002a1f450015be [file] [log] [blame]
Chuck Lever5d252f92016-01-07 14:50:10 -05001/*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 *
4 * Support for backward direction RPCs on RPC/RDMA (server-side).
5 */
6
7#include <linux/sunrpc/svc_rdma.h>
8#include "xprt_rdma.h"
9
10#define RPCDBG_FACILITY RPCDBG_SVCXPRT
11
12#undef SVCRDMA_BACKCHANNEL_DEBUG
13
14int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, struct rpcrdma_msg *rmsgp,
15 struct xdr_buf *rcvbuf)
16{
17 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
18 struct kvec *dst, *src = &rcvbuf->head[0];
19 struct rpc_rqst *req;
20 unsigned long cwnd;
21 u32 credits;
22 size_t len;
23 __be32 xid;
24 __be32 *p;
25 int ret;
26
27 p = (__be32 *)src->iov_base;
28 len = src->iov_len;
29 xid = rmsgp->rm_xid;
30
31#ifdef SVCRDMA_BACKCHANNEL_DEBUG
32 pr_info("%s: xid=%08x, length=%zu\n",
33 __func__, be32_to_cpu(xid), len);
34 pr_info("%s: RPC/RDMA: %*ph\n",
35 __func__, (int)RPCRDMA_HDRLEN_MIN, rmsgp);
36 pr_info("%s: RPC: %*ph\n",
37 __func__, (int)len, p);
38#endif
39
40 ret = -EAGAIN;
41 if (src->iov_len < 24)
42 goto out_shortreply;
43
44 spin_lock_bh(&xprt->transport_lock);
45 req = xprt_lookup_rqst(xprt, xid);
46 if (!req)
47 goto out_notfound;
48
49 dst = &req->rq_private_buf.head[0];
50 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
51 if (dst->iov_len < len)
52 goto out_unlock;
53 memcpy(dst->iov_base, p, len);
54
55 credits = be32_to_cpu(rmsgp->rm_credit);
56 if (credits == 0)
57 credits = 1; /* don't deadlock */
58 else if (credits > r_xprt->rx_buf.rb_bc_max_requests)
59 credits = r_xprt->rx_buf.rb_bc_max_requests;
60
61 cwnd = xprt->cwnd;
62 xprt->cwnd = credits << RPC_CWNDSHIFT;
63 if (xprt->cwnd > cwnd)
64 xprt_release_rqst_cong(req->rq_task);
65
66 ret = 0;
67 xprt_complete_rqst(req->rq_task, rcvbuf->len);
68 rcvbuf->len = 0;
69
70out_unlock:
71 spin_unlock_bh(&xprt->transport_lock);
72out:
73 return ret;
74
75out_shortreply:
76 dprintk("svcrdma: short bc reply: xprt=%p, len=%zu\n",
77 xprt, src->iov_len);
78 goto out;
79
80out_notfound:
81 dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n",
82 xprt, be32_to_cpu(xid));
83
84 goto out_unlock;
85}
86
87/* Send a backwards direction RPC call.
88 *
89 * Caller holds the connection's mutex and has already marshaled
90 * the RPC/RDMA request.
91 *
92 * This is similar to svc_rdma_reply, but takes an rpc_rqst
93 * instead, does not support chunks, and avoids blocking memory
94 * allocation.
95 *
96 * XXX: There is still an opportunity to block in svc_rdma_send()
97 * if there are no SQ entries to post the Send. This may occur if
98 * the adapter has a small maximum SQ depth.
99 */
100static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
101 struct rpc_rqst *rqst)
102{
103 struct xdr_buf *sndbuf = &rqst->rq_snd_buf;
104 struct svc_rdma_op_ctxt *ctxt;
105 struct svc_rdma_req_map *vec;
106 struct ib_send_wr send_wr;
107 int ret;
108
109 vec = svc_rdma_get_req_map(rdma);
Chuck Leverf6763c22016-03-01 13:05:54 -0500110 ret = svc_rdma_map_xdr(rdma, sndbuf, vec, false);
Chuck Lever5d252f92016-01-07 14:50:10 -0500111 if (ret)
112 goto out_err;
113
Chuck Leverbf363872016-03-01 13:06:20 -0500114 ret = svc_rdma_repost_recv(rdma, GFP_NOIO);
115 if (ret)
Chuck Lever5d252f92016-01-07 14:50:10 -0500116 goto out_err;
Chuck Lever5d252f92016-01-07 14:50:10 -0500117
118 ctxt = svc_rdma_get_context(rdma);
119 ctxt->pages[0] = virt_to_page(rqst->rq_buffer);
120 ctxt->count = 1;
121
Chuck Lever5d252f92016-01-07 14:50:10 -0500122 ctxt->direction = DMA_TO_DEVICE;
Christoph Hellwig5fe10432016-01-07 23:53:41 -0800123 ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey;
Chuck Lever5d252f92016-01-07 14:50:10 -0500124 ctxt->sge[0].length = sndbuf->len;
125 ctxt->sge[0].addr =
126 ib_dma_map_page(rdma->sc_cm_id->device, ctxt->pages[0], 0,
127 sndbuf->len, DMA_TO_DEVICE);
128 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) {
129 ret = -EIO;
130 goto out_unmap;
131 }
Chuck Levercace5642016-09-13 10:52:50 -0400132 svc_rdma_count_mappings(rdma, ctxt);
Chuck Lever5d252f92016-01-07 14:50:10 -0500133
134 memset(&send_wr, 0, sizeof(send_wr));
Chuck Leverbe99bb12016-03-01 13:07:22 -0500135 ctxt->cqe.done = svc_rdma_wc_send;
136 send_wr.wr_cqe = &ctxt->cqe;
Chuck Lever5d252f92016-01-07 14:50:10 -0500137 send_wr.sg_list = ctxt->sge;
138 send_wr.num_sge = 1;
139 send_wr.opcode = IB_WR_SEND;
140 send_wr.send_flags = IB_SEND_SIGNALED;
141
142 ret = svc_rdma_send(rdma, &send_wr);
143 if (ret) {
144 ret = -EIO;
145 goto out_unmap;
146 }
147
148out_err:
149 svc_rdma_put_req_map(rdma, vec);
150 dprintk("svcrdma: %s returns %d\n", __func__, ret);
151 return ret;
152
153out_unmap:
154 svc_rdma_unmap_dma(ctxt);
155 svc_rdma_put_context(ctxt, 1);
156 goto out_err;
157}
158
159/* Server-side transport endpoint wants a whole page for its send
160 * buffer. The client RPC code constructs the RPC header in this
161 * buffer before it invokes ->send_request.
Chuck Lever5d252f92016-01-07 14:50:10 -0500162 */
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400163static int
164xprt_rdma_bc_allocate(struct rpc_task *task)
Chuck Lever5d252f92016-01-07 14:50:10 -0500165{
166 struct rpc_rqst *rqst = task->tk_rqstp;
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400167 size_t size = rqst->rq_callsize;
Chuck Lever5d252f92016-01-07 14:50:10 -0500168 struct page *page;
169
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400170 if (size > PAGE_SIZE) {
Chuck Lever5d252f92016-01-07 14:50:10 -0500171 WARN_ONCE(1, "svcrdma: large bc buffer request (size %zu)\n",
172 size);
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400173 return -EINVAL;
174 }
Chuck Lever5d252f92016-01-07 14:50:10 -0500175
Chuck Lever8d426292016-10-28 22:22:33 -0400176 /* svc_rdma_sendto releases this page */
Chuck Lever5d252f92016-01-07 14:50:10 -0500177 page = alloc_page(RPCRDMA_DEF_GFP);
178 if (!page)
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400179 return -ENOMEM;
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400180 rqst->rq_buffer = page_address(page);
Chuck Lever8d426292016-10-28 22:22:33 -0400181
182 rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, RPCRDMA_DEF_GFP);
183 if (!rqst->rq_rbuffer) {
184 put_page(page);
185 return -ENOMEM;
186 }
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400187 return 0;
Chuck Lever5d252f92016-01-07 14:50:10 -0500188}
189
190static void
Chuck Lever3435c742016-09-15 10:55:29 -0400191xprt_rdma_bc_free(struct rpc_task *task)
Chuck Lever5d252f92016-01-07 14:50:10 -0500192{
Chuck Lever8d426292016-10-28 22:22:33 -0400193 struct rpc_rqst *rqst = task->tk_rqstp;
194
195 kfree(rqst->rq_rbuffer);
Chuck Lever5d252f92016-01-07 14:50:10 -0500196}
197
198static int
199rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst)
200{
201 struct rpc_xprt *xprt = rqst->rq_xprt;
202 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
Chuck Leverc2ccf642017-02-07 11:58:40 -0500203 __be32 *p;
Chuck Lever5d252f92016-01-07 14:50:10 -0500204 int rc;
205
206 /* Space in the send buffer for an RPC/RDMA header is reserved
207 * via xprt->tsh_size.
208 */
Chuck Leverc2ccf642017-02-07 11:58:40 -0500209 p = rqst->rq_buffer;
210 *p++ = rqst->rq_xid;
211 *p++ = rpcrdma_version;
212 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_max_requests);
213 *p++ = rdma_msg;
214 *p++ = xdr_zero;
215 *p++ = xdr_zero;
216 *p = xdr_zero;
Chuck Lever5d252f92016-01-07 14:50:10 -0500217
218#ifdef SVCRDMA_BACKCHANNEL_DEBUG
219 pr_info("%s: %*ph\n", __func__, 64, rqst->rq_buffer);
220#endif
221
222 rc = svc_rdma_bc_sendto(rdma, rqst);
223 if (rc)
224 goto drop_connection;
225 return rc;
226
227drop_connection:
228 dprintk("svcrdma: failed to send bc call\n");
229 xprt_disconnect_done(xprt);
230 return -ENOTCONN;
231}
232
233/* Send an RPC call on the passive end of a transport
234 * connection.
235 */
236static int
237xprt_rdma_bc_send_request(struct rpc_task *task)
238{
239 struct rpc_rqst *rqst = task->tk_rqstp;
240 struct svc_xprt *sxprt = rqst->rq_xprt->bc_xprt;
241 struct svcxprt_rdma *rdma;
242 int ret;
243
244 dprintk("svcrdma: sending bc call with xid: %08x\n",
245 be32_to_cpu(rqst->rq_xid));
246
247 if (!mutex_trylock(&sxprt->xpt_mutex)) {
248 rpc_sleep_on(&sxprt->xpt_bc_pending, task, NULL);
249 if (!mutex_trylock(&sxprt->xpt_mutex))
250 return -EAGAIN;
251 rpc_wake_up_queued_task(&sxprt->xpt_bc_pending, task);
252 }
253
254 ret = -ENOTCONN;
255 rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt);
256 if (!test_bit(XPT_DEAD, &sxprt->xpt_flags))
257 ret = rpcrdma_bc_send_request(rdma, rqst);
258
259 mutex_unlock(&sxprt->xpt_mutex);
260
261 if (ret < 0)
262 return ret;
263 return 0;
264}
265
266static void
267xprt_rdma_bc_close(struct rpc_xprt *xprt)
268{
269 dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
270}
271
272static void
273xprt_rdma_bc_put(struct rpc_xprt *xprt)
274{
275 dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
276
277 xprt_free(xprt);
278 module_put(THIS_MODULE);
279}
280
281static struct rpc_xprt_ops xprt_rdma_bc_procs = {
282 .reserve_xprt = xprt_reserve_xprt_cong,
283 .release_xprt = xprt_release_xprt_cong,
284 .alloc_slot = xprt_alloc_slot,
285 .release_request = xprt_release_rqst_cong,
286 .buf_alloc = xprt_rdma_bc_allocate,
287 .buf_free = xprt_rdma_bc_free,
288 .send_request = xprt_rdma_bc_send_request,
289 .set_retrans_timeout = xprt_set_retrans_timeout_def,
290 .close = xprt_rdma_bc_close,
291 .destroy = xprt_rdma_bc_put,
292 .print_stats = xprt_rdma_print_stats
293};
294
295static const struct rpc_timeout xprt_rdma_bc_timeout = {
296 .to_initval = 60 * HZ,
297 .to_maxval = 60 * HZ,
298};
299
300/* It shouldn't matter if the number of backchannel session slots
301 * doesn't match the number of RPC/RDMA credits. That just means
302 * one or the other will have extra slots that aren't used.
303 */
304static struct rpc_xprt *
305xprt_setup_rdma_bc(struct xprt_create *args)
306{
307 struct rpc_xprt *xprt;
308 struct rpcrdma_xprt *new_xprt;
309
310 if (args->addrlen > sizeof(xprt->addr)) {
311 dprintk("RPC: %s: address too large\n", __func__);
312 return ERR_PTR(-EBADF);
313 }
314
315 xprt = xprt_alloc(args->net, sizeof(*new_xprt),
316 RPCRDMA_MAX_BC_REQUESTS,
317 RPCRDMA_MAX_BC_REQUESTS);
318 if (!xprt) {
319 dprintk("RPC: %s: couldn't allocate rpc_xprt\n",
320 __func__);
321 return ERR_PTR(-ENOMEM);
322 }
323
324 xprt->timeout = &xprt_rdma_bc_timeout;
325 xprt_set_bound(xprt);
326 xprt_set_connected(xprt);
327 xprt->bind_timeout = RPCRDMA_BIND_TO;
328 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
329 xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
330
331 xprt->prot = XPRT_TRANSPORT_BC_RDMA;
332 xprt->tsh_size = RPCRDMA_HDRLEN_MIN / sizeof(__be32);
333 xprt->ops = &xprt_rdma_bc_procs;
334
335 memcpy(&xprt->addr, args->dstaddr, args->addrlen);
336 xprt->addrlen = args->addrlen;
337 xprt_rdma_format_addresses(xprt, (struct sockaddr *)&xprt->addr);
338 xprt->resvport = 0;
339
340 xprt->max_payload = xprt_rdma_max_inline_read;
341
342 new_xprt = rpcx_to_rdmax(xprt);
343 new_xprt->rx_buf.rb_bc_max_requests = xprt->max_reqs;
344
345 xprt_get(xprt);
346 args->bc_xprt->xpt_bc_xprt = xprt;
347 xprt->bc_xprt = args->bc_xprt;
348
349 if (!try_module_get(THIS_MODULE))
350 goto out_fail;
351
352 /* Final put for backchannel xprt is in __svc_rdma_free */
353 xprt_get(xprt);
354 return xprt;
355
356out_fail:
357 xprt_rdma_free_addresses(xprt);
358 args->bc_xprt->xpt_bc_xprt = NULL;
Chuck Lever1b9f7002016-11-29 11:04:26 -0500359 args->bc_xprt->xpt_bc_xps = NULL;
Chuck Lever5d252f92016-01-07 14:50:10 -0500360 xprt_put(xprt);
361 xprt_free(xprt);
362 return ERR_PTR(-EINVAL);
363}
364
365struct xprt_class xprt_rdma_bc = {
366 .list = LIST_HEAD_INIT(xprt_rdma_bc.list),
367 .name = "rdma backchannel",
368 .owner = THIS_MODULE,
369 .ident = XPRT_TRANSPORT_BC_RDMA,
370 .setup = xprt_setup_rdma_bc,
371};