blob: 1f200119268ccdff5674f49eadebbde0de86fdd1 [file] [log] [blame]
Chuck Leverbcf3ffd2018-05-07 15:26:55 -04001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
Tom Tuckerc06b5402007-12-12 16:13:25 -06002/*
Chuck Leverecf85b22018-05-07 15:27:21 -04003 * Copyright (c) 2016-2018 Oracle. All rights reserved.
Steve Wise0bf48282014-05-28 15:12:01 -05004 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
Tom Tuckerc06b5402007-12-12 16:13:25 -06005 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
11 * license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 *
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
24 *
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
28 * permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 * Author: Tom Tucker <tom@opengridcomputing.com>
43 */
44
Chuck Lever9a6a1802017-04-09 13:06:25 -040045/* Operation
46 *
47 * The main entry point is svc_rdma_sendto. This is called by the
48 * RPC server when an RPC Reply is ready to be transmitted to a client.
49 *
50 * The passed-in svc_rqst contains a struct xdr_buf which holds an
51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
52 * transport header, post all Write WRs needed for this Reply, then post
53 * a Send WR conveying the transport header and the RPC message itself to
54 * the client.
55 *
56 * svc_rdma_sendto must fully transmit the Reply before returning, as
57 * the svc_rqst will be recycled as soon as sendto returns. Remaining
58 * resources referred to by the svc_rqst are also recycled at that time.
59 * Therefore any resources that must remain longer must be detached
60 * from the svc_rqst and released later.
61 *
62 * Page Management
63 *
64 * The I/O that performs Reply transmission is asynchronous, and may
65 * complete well after sendto returns. Thus pages under I/O must be
66 * removed from the svc_rqst before sendto returns.
67 *
68 * The logic here depends on Send Queue and completion ordering. Since
69 * the Send WR is always posted last, it will always complete last. Thus
70 * when it completes, it is guaranteed that all previous Write WRs have
71 * also completed.
72 *
73 * Write WRs are constructed and posted. Each Write segment gets its own
74 * svc_rdma_rw_ctxt, allowing the Write completion handler to find and
75 * DMA-unmap the pages under I/O for that Write segment. The Write
76 * completion handler does not release any pages.
77 *
Chuck Lever4201c7462018-05-07 15:28:04 -040078 * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
Chuck Lever9a6a1802017-04-09 13:06:25 -040079 * The ownership of all of the Reply's pages are transferred into that
80 * ctxt, the Send WR is posted, and sendto returns.
81 *
Chuck Lever4201c7462018-05-07 15:28:04 -040082 * The svc_rdma_send_ctxt is presented when the Send WR completes. The
Chuck Lever9a6a1802017-04-09 13:06:25 -040083 * Send completion handler finally releases the Reply's pages.
84 *
85 * This mechanism also assumes that completions on the transport's Send
86 * Completion Queue do not run in parallel. Otherwise a Write completion
87 * and Send completion running at the same time could release pages that
88 * are still DMA-mapped.
89 *
90 * Error Handling
91 *
92 * - If the Send WR is posted successfully, it will either complete
93 * successfully, or get flushed. Either way, the Send completion
94 * handler releases the Reply's pages.
95 * - If the Send WR cannot be not posted, the forward path releases
96 * the Reply's pages.
97 *
98 * This handles the case, without the use of page reference counting,
99 * where two different Write segments send portions of the same page.
100 */
101
Tom Tuckerc06b5402007-12-12 16:13:25 -0600102#include <linux/spinlock.h>
103#include <asm/unaligned.h>
Chuck Lever98895ed2018-05-07 15:27:11 -0400104
Tom Tuckerc06b5402007-12-12 16:13:25 -0600105#include <rdma/ib_verbs.h>
106#include <rdma/rdma_cm.h>
Chuck Lever98895ed2018-05-07 15:27:11 -0400107
108#include <linux/sunrpc/debug.h>
109#include <linux/sunrpc/rpc_rdma.h>
Tom Tuckerc06b5402007-12-12 16:13:25 -0600110#include <linux/sunrpc/svc_rdma.h>
111
Chuck Lever98895ed2018-05-07 15:27:11 -0400112#include "xprt_rdma.h"
113#include <trace/events/rpcrdma.h>
114
Tom Tuckerc06b5402007-12-12 16:13:25 -0600115#define RPCDBG_FACILITY RPCDBG_SVCXPRT
116
Chuck Lever4201c7462018-05-07 15:28:04 -0400117static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
118
119static inline struct svc_rdma_send_ctxt *
120svc_rdma_next_send_ctxt(struct list_head *list)
121{
122 return list_first_entry_or_null(list, struct svc_rdma_send_ctxt,
123 sc_list);
124}
125
126static struct svc_rdma_send_ctxt *
127svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
128{
129 struct svc_rdma_send_ctxt *ctxt;
Chuck Lever99722fe2018-05-07 15:28:25 -0400130 dma_addr_t addr;
131 void *buffer;
Chuck Lever25fd86e2018-05-07 15:28:09 -0400132 size_t size;
Chuck Lever4201c7462018-05-07 15:28:04 -0400133 int i;
134
Chuck Lever25fd86e2018-05-07 15:28:09 -0400135 size = sizeof(*ctxt);
136 size += rdma->sc_max_send_sges * sizeof(struct ib_sge);
137 ctxt = kmalloc(size, GFP_KERNEL);
Chuck Lever4201c7462018-05-07 15:28:04 -0400138 if (!ctxt)
Chuck Lever99722fe2018-05-07 15:28:25 -0400139 goto fail0;
140 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
141 if (!buffer)
142 goto fail1;
143 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
144 rdma->sc_max_req_size, DMA_TO_DEVICE);
145 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
146 goto fail2;
Chuck Lever4201c7462018-05-07 15:28:04 -0400147
Chuck Lever4201c7462018-05-07 15:28:04 -0400148 ctxt->sc_send_wr.next = NULL;
149 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
150 ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
151 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
Chuck Lever99722fe2018-05-07 15:28:25 -0400152 ctxt->sc_cqe.done = svc_rdma_wc_send;
153 ctxt->sc_xprt_buf = buffer;
154 ctxt->sc_sges[0].addr = addr;
155
Chuck Lever25fd86e2018-05-07 15:28:09 -0400156 for (i = 0; i < rdma->sc_max_send_sges; i++)
Chuck Lever4201c7462018-05-07 15:28:04 -0400157 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
158 return ctxt;
Chuck Lever99722fe2018-05-07 15:28:25 -0400159
160fail2:
161 kfree(buffer);
162fail1:
163 kfree(ctxt);
164fail0:
165 return NULL;
Chuck Lever4201c7462018-05-07 15:28:04 -0400166}
167
168/**
169 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
170 * @rdma: svcxprt_rdma being torn down
171 *
172 */
173void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
174{
175 struct svc_rdma_send_ctxt *ctxt;
176
177 while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) {
178 list_del(&ctxt->sc_list);
Chuck Lever99722fe2018-05-07 15:28:25 -0400179 ib_dma_unmap_single(rdma->sc_pd->device,
180 ctxt->sc_sges[0].addr,
181 rdma->sc_max_req_size,
182 DMA_TO_DEVICE);
183 kfree(ctxt->sc_xprt_buf);
Chuck Lever4201c7462018-05-07 15:28:04 -0400184 kfree(ctxt);
185 }
186}
187
188/**
189 * svc_rdma_send_ctxt_get - Get a free send_ctxt
190 * @rdma: controlling svcxprt_rdma
191 *
192 * Returns a ready-to-use send_ctxt, or NULL if none are
193 * available and a fresh one cannot be allocated.
194 */
195struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
196{
197 struct svc_rdma_send_ctxt *ctxt;
198
199 spin_lock(&rdma->sc_send_lock);
200 ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts);
201 if (!ctxt)
202 goto out_empty;
203 list_del(&ctxt->sc_list);
204 spin_unlock(&rdma->sc_send_lock);
205
206out:
207 ctxt->sc_send_wr.num_sge = 0;
Chuck Lever99722fe2018-05-07 15:28:25 -0400208 ctxt->sc_cur_sge_no = 0;
Chuck Lever4201c7462018-05-07 15:28:04 -0400209 ctxt->sc_page_count = 0;
210 return ctxt;
211
212out_empty:
213 spin_unlock(&rdma->sc_send_lock);
214 ctxt = svc_rdma_send_ctxt_alloc(rdma);
215 if (!ctxt)
216 return NULL;
217 goto out;
218}
219
220/**
221 * svc_rdma_send_ctxt_put - Return send_ctxt to free list
222 * @rdma: controlling svcxprt_rdma
223 * @ctxt: object to return to the free list
224 *
225 * Pages left in sc_pages are DMA unmapped and released.
226 */
227void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
228 struct svc_rdma_send_ctxt *ctxt)
229{
230 struct ib_device *device = rdma->sc_cm_id->device;
231 unsigned int i;
232
Chuck Lever99722fe2018-05-07 15:28:25 -0400233 /* The first SGE contains the transport header, which
234 * remains mapped until @ctxt is destroyed.
235 */
236 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++)
Chuck Lever4201c7462018-05-07 15:28:04 -0400237 ib_dma_unmap_page(device,
238 ctxt->sc_sges[i].addr,
239 ctxt->sc_sges[i].length,
240 DMA_TO_DEVICE);
241
242 for (i = 0; i < ctxt->sc_page_count; ++i)
243 put_page(ctxt->sc_pages[i]);
244
245 spin_lock(&rdma->sc_send_lock);
246 list_add(&ctxt->sc_list, &rdma->sc_send_ctxts);
247 spin_unlock(&rdma->sc_send_lock);
248}
249
250/**
251 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
252 * @cq: Completion Queue context
253 * @wc: Work Completion object
254 *
255 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
256 * the Send completion handler could be running.
257 */
258static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
259{
260 struct svcxprt_rdma *rdma = cq->cq_context;
261 struct ib_cqe *cqe = wc->wr_cqe;
262 struct svc_rdma_send_ctxt *ctxt;
263
264 trace_svcrdma_wc_send(wc);
265
266 atomic_inc(&rdma->sc_sq_avail);
267 wake_up(&rdma->sc_send_wait);
268
269 ctxt = container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
270 svc_rdma_send_ctxt_put(rdma, ctxt);
271
272 if (unlikely(wc->status != IB_WC_SUCCESS)) {
273 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
274 svc_xprt_enqueue(&rdma->sc_xprt);
275 if (wc->status != IB_WC_WR_FLUSH_ERR)
276 pr_err("svcrdma: Send: %s (%u/0x%x)\n",
277 ib_wc_status_msg(wc->status),
278 wc->status, wc->vendor_err);
279 }
280
281 svc_xprt_put(&rdma->sc_xprt);
282}
283
Chuck Lever3abb03f2018-05-07 15:28:20 -0400284/**
285 * svc_rdma_send - Post a single Send WR
286 * @rdma: transport on which to post the WR
287 * @wr: prepared Send WR to post
288 *
289 * Returns zero the Send WR was posted successfully. Otherwise, a
290 * negative errno is returned.
291 */
Chuck Lever4201c7462018-05-07 15:28:04 -0400292int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
293{
Chuck Lever4201c7462018-05-07 15:28:04 -0400294 int ret;
295
Chuck Lever3abb03f2018-05-07 15:28:20 -0400296 might_sleep();
Chuck Lever4201c7462018-05-07 15:28:04 -0400297
298 /* If the SQ is full, wait until an SQ entry is available */
299 while (1) {
Chuck Lever3abb03f2018-05-07 15:28:20 -0400300 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
Chuck Lever4201c7462018-05-07 15:28:04 -0400301 atomic_inc(&rdma_stat_sq_starve);
302 trace_svcrdma_sq_full(rdma);
Chuck Lever3abb03f2018-05-07 15:28:20 -0400303 atomic_inc(&rdma->sc_sq_avail);
Chuck Lever4201c7462018-05-07 15:28:04 -0400304 wait_event(rdma->sc_send_wait,
Chuck Lever3abb03f2018-05-07 15:28:20 -0400305 atomic_read(&rdma->sc_sq_avail) > 1);
Chuck Lever4201c7462018-05-07 15:28:04 -0400306 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
307 return -ENOTCONN;
308 trace_svcrdma_sq_retry(rdma);
309 continue;
310 }
Chuck Lever4201c7462018-05-07 15:28:04 -0400311
Chuck Lever3abb03f2018-05-07 15:28:20 -0400312 svc_xprt_get(&rdma->sc_xprt);
Bart Van Asscheed288d72018-07-18 09:25:31 -0700313 ret = ib_post_send(rdma->sc_qp, wr, NULL);
Chuck Lever4201c7462018-05-07 15:28:04 -0400314 trace_svcrdma_post_send(wr, ret);
315 if (ret) {
316 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
Chuck Lever3abb03f2018-05-07 15:28:20 -0400317 svc_xprt_put(&rdma->sc_xprt);
Chuck Lever4201c7462018-05-07 15:28:04 -0400318 wake_up(&rdma->sc_send_wait);
319 }
320 break;
321 }
322 return ret;
323}
324
Chuck Levercf570a92016-03-01 13:05:45 -0500325static u32 xdr_padsize(u32 len)
326{
327 return (len & 3) ? (4 - (len & 3)) : 0;
328}
329
Chuck Lever9a6a1802017-04-09 13:06:25 -0400330/* Returns length of transport header, in bytes.
331 */
332static unsigned int svc_rdma_reply_hdr_len(__be32 *rdma_resp)
333{
334 unsigned int nsegs;
335 __be32 *p;
336
337 p = rdma_resp;
338
339 /* RPC-over-RDMA V1 replies never have a Read list. */
340 p += rpcrdma_fixed_maxsz + 1;
341
342 /* Skip Write list. */
343 while (*p++ != xdr_zero) {
344 nsegs = be32_to_cpup(p++);
345 p += nsegs * rpcrdma_segment_maxsz;
346 }
347
348 /* Skip Reply chunk. */
349 if (*p++ != xdr_zero) {
350 nsegs = be32_to_cpup(p++);
351 p += nsegs * rpcrdma_segment_maxsz;
352 }
353
354 return (unsigned long)p - (unsigned long)rdma_resp;
355}
356
357/* One Write chunk is copied from Call transport header to Reply
358 * transport header. Each segment's length field is updated to
359 * reflect number of bytes consumed in the segment.
360 *
361 * Returns number of segments in this chunk.
362 */
363static unsigned int xdr_encode_write_chunk(__be32 *dst, __be32 *src,
364 unsigned int remaining)
365{
366 unsigned int i, nsegs;
367 u32 seg_len;
368
369 /* Write list discriminator */
370 *dst++ = *src++;
371
372 /* number of segments in this chunk */
373 nsegs = be32_to_cpup(src);
374 *dst++ = *src++;
375
376 for (i = nsegs; i; i--) {
377 /* segment's RDMA handle */
378 *dst++ = *src++;
379
380 /* bytes returned in this segment */
381 seg_len = be32_to_cpu(*src);
382 if (remaining >= seg_len) {
383 /* entire segment was consumed */
384 *dst = *src;
385 remaining -= seg_len;
386 } else {
387 /* segment only partly filled */
388 *dst = cpu_to_be32(remaining);
389 remaining = 0;
390 }
391 dst++; src++;
392
393 /* segment's RDMA offset */
394 *dst++ = *src++;
395 *dst++ = *src++;
396 }
397
398 return nsegs;
399}
400
401/* The client provided a Write list in the Call message. Fill in
402 * the segments in the first Write chunk in the Reply's transport
403 * header with the number of bytes consumed in each segment.
404 * Remaining chunks are returned unused.
405 *
406 * Assumptions:
407 * - Client has provided only one Write chunk
408 */
409static void svc_rdma_xdr_encode_write_list(__be32 *rdma_resp, __be32 *wr_ch,
410 unsigned int consumed)
411{
412 unsigned int nsegs;
413 __be32 *p, *q;
414
415 /* RPC-over-RDMA V1 replies never have a Read list. */
416 p = rdma_resp + rpcrdma_fixed_maxsz + 1;
417
418 q = wr_ch;
419 while (*q != xdr_zero) {
420 nsegs = xdr_encode_write_chunk(p, q, consumed);
421 q += 2 + nsegs * rpcrdma_segment_maxsz;
422 p += 2 + nsegs * rpcrdma_segment_maxsz;
423 consumed = 0;
424 }
425
426 /* Terminate Write list */
427 *p++ = xdr_zero;
428
429 /* Reply chunk discriminator; may be replaced later */
430 *p = xdr_zero;
431}
432
433/* The client provided a Reply chunk in the Call message. Fill in
434 * the segments in the Reply chunk in the Reply message with the
435 * number of bytes consumed in each segment.
436 *
437 * Assumptions:
438 * - Reply can always fit in the provided Reply chunk
439 */
440static void svc_rdma_xdr_encode_reply_chunk(__be32 *rdma_resp, __be32 *rp_ch,
441 unsigned int consumed)
442{
443 __be32 *p;
444
445 /* Find the Reply chunk in the Reply's xprt header.
446 * RPC-over-RDMA V1 replies never have a Read list.
447 */
448 p = rdma_resp + rpcrdma_fixed_maxsz + 1;
449
450 /* Skip past Write list */
451 while (*p++ != xdr_zero)
452 p += 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz;
453
454 xdr_encode_write_chunk(p, rp_ch, consumed);
455}
456
Chuck Lever5fdca652016-11-29 11:04:42 -0500457/* Parse the RPC Call's transport header.
Chuck Lever10dc4512015-07-09 16:45:28 -0400458 */
Chuck Lever9a6a1802017-04-09 13:06:25 -0400459static void svc_rdma_get_write_arrays(__be32 *rdma_argp,
460 __be32 **write, __be32 **reply)
Chuck Lever10dc4512015-07-09 16:45:28 -0400461{
Chuck Lever5fdca652016-11-29 11:04:42 -0500462 __be32 *p;
Chuck Lever10dc4512015-07-09 16:45:28 -0400463
Chuck Lever9a6a1802017-04-09 13:06:25 -0400464 p = rdma_argp + rpcrdma_fixed_maxsz;
Chuck Lever10dc4512015-07-09 16:45:28 -0400465
Chuck Lever5fdca652016-11-29 11:04:42 -0500466 /* Read list */
467 while (*p++ != xdr_zero)
468 p += 5;
Chuck Lever10dc4512015-07-09 16:45:28 -0400469
Chuck Lever5fdca652016-11-29 11:04:42 -0500470 /* Write list */
471 if (*p != xdr_zero) {
Chuck Lever9a6a1802017-04-09 13:06:25 -0400472 *write = p;
Chuck Lever5fdca652016-11-29 11:04:42 -0500473 while (*p++ != xdr_zero)
474 p += 1 + be32_to_cpu(*p) * 4;
475 } else {
476 *write = NULL;
477 p++;
Chuck Lever10dc4512015-07-09 16:45:28 -0400478 }
479
Chuck Lever5fdca652016-11-29 11:04:42 -0500480 /* Reply chunk */
481 if (*p != xdr_zero)
Chuck Lever9a6a1802017-04-09 13:06:25 -0400482 *reply = p;
Chuck Lever5fdca652016-11-29 11:04:42 -0500483 else
484 *reply = NULL;
Chuck Lever10dc4512015-07-09 16:45:28 -0400485}
486
Chuck Lever6e6092c2017-04-09 13:05:44 -0400487static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
Chuck Lever4201c7462018-05-07 15:28:04 -0400488 struct svc_rdma_send_ctxt *ctxt,
Chuck Lever6e6092c2017-04-09 13:05:44 -0400489 struct page *page,
Chuck Leverf016f302018-05-07 15:27:53 -0400490 unsigned long offset,
Chuck Lever6e6092c2017-04-09 13:05:44 -0400491 unsigned int len)
492{
493 struct ib_device *dev = rdma->sc_cm_id->device;
494 dma_addr_t dma_addr;
495
496 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
497 if (ib_dma_mapping_error(dev, dma_addr))
Chuck Lever91a08eae2017-06-23 17:17:15 -0400498 goto out_maperr;
Chuck Lever6e6092c2017-04-09 13:05:44 -0400499
Chuck Lever25fd86e2018-05-07 15:28:09 -0400500 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
501 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
Chuck Lever4201c7462018-05-07 15:28:04 -0400502 ctxt->sc_send_wr.num_sge++;
Chuck Lever6e6092c2017-04-09 13:05:44 -0400503 return 0;
Chuck Lever91a08eae2017-06-23 17:17:15 -0400504
505out_maperr:
Chuck Leverbd2abef2018-05-07 15:27:16 -0400506 trace_svcrdma_dma_map_page(rdma, page);
Chuck Lever91a08eae2017-06-23 17:17:15 -0400507 return -EIO;
Chuck Lever6e6092c2017-04-09 13:05:44 -0400508}
509
Chuck Leverf016f302018-05-07 15:27:53 -0400510/* ib_dma_map_page() is used here because svc_rdma_dma_unmap()
511 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
512 */
513static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
Chuck Lever4201c7462018-05-07 15:28:04 -0400514 struct svc_rdma_send_ctxt *ctxt,
Chuck Leverf016f302018-05-07 15:27:53 -0400515 unsigned char *base,
516 unsigned int len)
517{
Chuck Lever25fd86e2018-05-07 15:28:09 -0400518 return svc_rdma_dma_map_page(rdma, ctxt, virt_to_page(base),
Chuck Leverf016f302018-05-07 15:27:53 -0400519 offset_in_page(base), len);
520}
521
Chuck Lever6e6092c2017-04-09 13:05:44 -0400522/**
Chuck Lever99722fe2018-05-07 15:28:25 -0400523 * svc_rdma_sync_reply_hdr - DMA sync the transport header buffer
Chuck Lever6e6092c2017-04-09 13:05:44 -0400524 * @rdma: controlling transport
Chuck Lever99722fe2018-05-07 15:28:25 -0400525 * @ctxt: send_ctxt for the Send WR
Chuck Lever6e6092c2017-04-09 13:05:44 -0400526 * @len: length of transport header
527 *
Chuck Lever6e6092c2017-04-09 13:05:44 -0400528 */
Chuck Lever99722fe2018-05-07 15:28:25 -0400529void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
530 struct svc_rdma_send_ctxt *ctxt,
531 unsigned int len)
Chuck Lever6e6092c2017-04-09 13:05:44 -0400532{
Chuck Lever99722fe2018-05-07 15:28:25 -0400533 ctxt->sc_sges[0].length = len;
534 ctxt->sc_send_wr.num_sge++;
535 ib_dma_sync_single_for_device(rdma->sc_pd->device,
536 ctxt->sc_sges[0].addr, len,
537 DMA_TO_DEVICE);
Chuck Lever6e6092c2017-04-09 13:05:44 -0400538}
539
Chuck Levere248aa72019-01-25 16:54:54 -0500540/* If the xdr_buf has more elements than the device can
541 * transmit in a single RDMA Send, then the reply will
542 * have to be copied into a bounce buffer.
543 */
544static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
545 struct xdr_buf *xdr,
546 __be32 *wr_lst)
547{
548 int elements;
549
550 /* xdr->head */
551 elements = 1;
552
553 /* xdr->pages */
554 if (!wr_lst) {
555 unsigned int remaining;
556 unsigned long pageoff;
557
558 pageoff = xdr->page_base & ~PAGE_MASK;
559 remaining = xdr->page_len;
560 while (remaining) {
561 ++elements;
562 remaining -= min_t(u32, PAGE_SIZE - pageoff,
563 remaining);
564 pageoff = 0;
565 }
566 }
567
568 /* xdr->tail */
569 if (xdr->tail[0].iov_len)
570 ++elements;
571
572 /* assume 1 SGE is needed for the transport header */
573 return elements >= rdma->sc_max_send_sges;
574}
575
576/* The device is not capable of sending the reply directly.
577 * Assemble the elements of @xdr into the transport header
578 * buffer.
579 */
580static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
581 struct svc_rdma_send_ctxt *ctxt,
582 struct xdr_buf *xdr, __be32 *wr_lst)
583{
584 unsigned char *dst, *tailbase;
585 unsigned int taillen;
586
587 dst = ctxt->sc_xprt_buf;
588 dst += ctxt->sc_sges[0].length;
589
590 memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
591 dst += xdr->head[0].iov_len;
592
593 tailbase = xdr->tail[0].iov_base;
594 taillen = xdr->tail[0].iov_len;
595 if (wr_lst) {
596 u32 xdrpad;
597
598 xdrpad = xdr_padsize(xdr->page_len);
599 if (taillen && xdrpad) {
600 tailbase += xdrpad;
601 taillen -= xdrpad;
602 }
603 } else {
604 unsigned int len, remaining;
605 unsigned long pageoff;
606 struct page **ppages;
607
608 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
609 pageoff = xdr->page_base & ~PAGE_MASK;
610 remaining = xdr->page_len;
611 while (remaining) {
612 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
613
614 memcpy(dst, page_address(*ppages), len);
615 remaining -= len;
616 dst += len;
617 pageoff = 0;
618 }
619 }
620
621 if (taillen)
622 memcpy(dst, tailbase, taillen);
623
624 ctxt->sc_sges[0].length += xdr->len;
625 ib_dma_sync_single_for_device(rdma->sc_pd->device,
626 ctxt->sc_sges[0].addr,
627 ctxt->sc_sges[0].length,
628 DMA_TO_DEVICE);
629
630 return 0;
631}
632
Chuck Lever99722fe2018-05-07 15:28:25 -0400633/* svc_rdma_map_reply_msg - Map the buffer holding RPC message
634 * @rdma: controlling transport
635 * @ctxt: send_ctxt for the Send WR
636 * @xdr: prepared xdr_buf containing RPC message
637 * @wr_lst: pointer to Call header's Write list, or NULL
638 *
639 * Load the xdr_buf into the ctxt's sge array, and DMA map each
Chuck Lever9a6a1802017-04-09 13:06:25 -0400640 * element as it is added.
641 *
Chuck Lever23262792018-05-07 15:27:59 -0400642 * Returns zero on success, or a negative errno on failure.
Tom Tuckerc06b5402007-12-12 16:13:25 -0600643 */
Chuck Lever99722fe2018-05-07 15:28:25 -0400644int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
645 struct svc_rdma_send_ctxt *ctxt,
646 struct xdr_buf *xdr, __be32 *wr_lst)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600647{
Chuck Lever25fd86e2018-05-07 15:28:09 -0400648 unsigned int len, remaining;
Chuck Leverf016f302018-05-07 15:27:53 -0400649 unsigned long page_off;
Chuck Lever9a6a1802017-04-09 13:06:25 -0400650 struct page **ppages;
651 unsigned char *base;
652 u32 xdr_pad;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600653 int ret;
654
Chuck Levere248aa72019-01-25 16:54:54 -0500655 if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
656 return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
657
658 ++ctxt->sc_cur_sge_no;
Chuck Lever25fd86e2018-05-07 15:28:09 -0400659 ret = svc_rdma_dma_map_buf(rdma, ctxt,
Chuck Lever9a6a1802017-04-09 13:06:25 -0400660 xdr->head[0].iov_base,
661 xdr->head[0].iov_len);
662 if (ret < 0)
663 return ret;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600664
Chuck Lever9a6a1802017-04-09 13:06:25 -0400665 /* If a Write chunk is present, the xdr_buf's page list
666 * is not included inline. However the Upper Layer may
667 * have added XDR padding in the tail buffer, and that
668 * should not be included inline.
669 */
670 if (wr_lst) {
671 base = xdr->tail[0].iov_base;
672 len = xdr->tail[0].iov_len;
673 xdr_pad = xdr_padsize(xdr->page_len);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600674
Chuck Lever9a6a1802017-04-09 13:06:25 -0400675 if (len && xdr_pad) {
676 base += xdr_pad;
677 len -= xdr_pad;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600678 }
Chuck Lever9a6a1802017-04-09 13:06:25 -0400679
680 goto tail;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600681 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600682
Chuck Lever9a6a1802017-04-09 13:06:25 -0400683 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
684 page_off = xdr->page_base & ~PAGE_MASK;
685 remaining = xdr->page_len;
686 while (remaining) {
687 len = min_t(u32, PAGE_SIZE - page_off, remaining);
Chuck Lever08ae4e72016-03-01 13:05:36 -0500688
Chuck Levere248aa72019-01-25 16:54:54 -0500689 ++ctxt->sc_cur_sge_no;
Chuck Lever25fd86e2018-05-07 15:28:09 -0400690 ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
691 page_off, len);
Chuck Lever9a6a1802017-04-09 13:06:25 -0400692 if (ret < 0)
693 return ret;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600694
Chuck Lever9a6a1802017-04-09 13:06:25 -0400695 remaining -= len;
696 page_off = 0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600697 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600698
Chuck Lever9a6a1802017-04-09 13:06:25 -0400699 base = xdr->tail[0].iov_base;
700 len = xdr->tail[0].iov_len;
701tail:
702 if (len) {
Chuck Levere248aa72019-01-25 16:54:54 -0500703 ++ctxt->sc_cur_sge_no;
Chuck Lever25fd86e2018-05-07 15:28:09 -0400704 ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
Chuck Lever9a6a1802017-04-09 13:06:25 -0400705 if (ret < 0)
706 return ret;
707 }
Chuck Lever08ae4e72016-03-01 13:05:36 -0500708
Chuck Lever23262792018-05-07 15:27:59 -0400709 return 0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600710}
711
Chuck Leverc55ab072017-04-09 13:06:00 -0400712/* The svc_rqst and all resources it owns are released as soon as
713 * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
714 * so they are released by the Send completion handler.
715 */
716static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
Chuck Lever4201c7462018-05-07 15:28:04 -0400717 struct svc_rdma_send_ctxt *ctxt)
Chuck Leverc55ab072017-04-09 13:06:00 -0400718{
719 int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
720
Chuck Lever4201c7462018-05-07 15:28:04 -0400721 ctxt->sc_page_count += pages;
Chuck Leverc55ab072017-04-09 13:06:00 -0400722 for (i = 0; i < pages; i++) {
Chuck Lever99722fe2018-05-07 15:28:25 -0400723 ctxt->sc_pages[i] = rqstp->rq_respages[i];
Chuck Leverc55ab072017-04-09 13:06:00 -0400724 rqstp->rq_respages[i] = NULL;
725 }
Chuck Levera53d5cb02018-07-27 11:18:54 -0400726
727 /* Prevent svc_xprt_release from releasing pages in rq_pages */
728 rqstp->rq_next_page = rqstp->rq_respages;
Chuck Leverc55ab072017-04-09 13:06:00 -0400729}
730
Chuck Lever9a6a1802017-04-09 13:06:25 -0400731/* Prepare the portion of the RPC Reply that will be transmitted
732 * via RDMA Send. The RPC-over-RDMA transport header is prepared
Chuck Lever4201c7462018-05-07 15:28:04 -0400733 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
Chuck Lever9a6a1802017-04-09 13:06:25 -0400734 *
735 * Depending on whether a Write list or Reply chunk is present,
736 * the server may send all, a portion of, or none of the xdr_buf.
Chuck Lever4201c7462018-05-07 15:28:04 -0400737 * In the latter case, only the transport header (sc_sges[0]) is
Chuck Lever9a6a1802017-04-09 13:06:25 -0400738 * transmitted.
739 *
740 * RDMA Send is the last step of transmitting an RPC reply. Pages
741 * involved in the earlier RDMA Writes are here transferred out
Chuck Lever97bce632018-11-27 11:11:35 -0500742 * of the rqstp and into the sctxt's page array. These pages are
Chuck Lever9a6a1802017-04-09 13:06:25 -0400743 * DMA unmapped by each Write completion, but the subsequent Send
744 * completion finally releases these pages.
745 *
746 * Assumptions:
747 * - The Reply's transport header will never be larger than a page.
Tom Tuckerc06b5402007-12-12 16:13:25 -0600748 */
Chuck Lever9a6a1802017-04-09 13:06:25 -0400749static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
Chuck Lever97bce632018-11-27 11:11:35 -0500750 struct svc_rdma_send_ctxt *sctxt,
751 struct svc_rdma_recv_ctxt *rctxt,
Chuck Lever9a6a1802017-04-09 13:06:25 -0400752 struct svc_rqst *rqstp,
753 __be32 *wr_lst, __be32 *rp_ch)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600754{
Chuck Lever9a6a1802017-04-09 13:06:25 -0400755 int ret;
Tom Tucker0e7f0112008-04-23 16:49:54 -0500756
Chuck Lever9a6a1802017-04-09 13:06:25 -0400757 if (!rp_ch) {
Chuck Lever97bce632018-11-27 11:11:35 -0500758 ret = svc_rdma_map_reply_msg(rdma, sctxt,
Chuck Lever9a6a1802017-04-09 13:06:25 -0400759 &rqstp->rq_res, wr_lst);
760 if (ret < 0)
Chuck Lever99722fe2018-05-07 15:28:25 -0400761 return ret;
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500762 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600763
Chuck Lever97bce632018-11-27 11:11:35 -0500764 svc_rdma_save_io_pages(rqstp, sctxt);
Steve Wise0bf48282014-05-28 15:12:01 -0500765
Chuck Lever97bce632018-11-27 11:11:35 -0500766 if (rctxt->rc_inv_rkey) {
767 sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
768 sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
769 } else {
770 sctxt->sc_send_wr.opcode = IB_WR_SEND;
Chuck Lever986b7882018-05-07 15:28:15 -0400771 }
772 dprintk("svcrdma: posting Send WR with %u sge(s)\n",
Chuck Lever97bce632018-11-27 11:11:35 -0500773 sctxt->sc_send_wr.num_sge);
774 return svc_rdma_send(rdma, &sctxt->sc_send_wr);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600775}
776
Chuck Lever4757d902017-04-09 13:06:41 -0400777/* Given the client-provided Write and Reply chunks, the server was not
778 * able to form a complete reply. Return an RDMA_ERROR message so the
779 * client can retire this RPC transaction. As above, the Send completion
780 * routine releases payload pages that were part of a previous RDMA Write.
781 *
782 * Remote Invalidation is skipped for simplicity.
783 */
784static int svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
Chuck Lever99722fe2018-05-07 15:28:25 -0400785 struct svc_rdma_send_ctxt *ctxt,
786 struct svc_rqst *rqstp)
Chuck Lever4757d902017-04-09 13:06:41 -0400787{
Chuck Lever4757d902017-04-09 13:06:41 -0400788 __be32 *p;
789 int ret;
790
Chuck Lever99722fe2018-05-07 15:28:25 -0400791 p = ctxt->sc_xprt_buf;
792 trace_svcrdma_err_chunk(*p);
793 p += 3;
Chuck Lever4757d902017-04-09 13:06:41 -0400794 *p++ = rdma_error;
795 *p = err_chunk;
Chuck Lever99722fe2018-05-07 15:28:25 -0400796 svc_rdma_sync_reply_hdr(rdma, ctxt, RPCRDMA_HDRLEN_ERR);
Chuck Lever4757d902017-04-09 13:06:41 -0400797
798 svc_rdma_save_io_pages(rqstp, ctxt);
799
Chuck Lever986b7882018-05-07 15:28:15 -0400800 ctxt->sc_send_wr.opcode = IB_WR_SEND;
801 ret = svc_rdma_send(rdma, &ctxt->sc_send_wr);
Chuck Lever99722fe2018-05-07 15:28:25 -0400802 if (ret) {
803 svc_rdma_send_ctxt_put(rdma, ctxt);
804 return ret;
805 }
Chuck Lever4757d902017-04-09 13:06:41 -0400806
807 return 0;
Chuck Lever4757d902017-04-09 13:06:41 -0400808}
809
Chuck Lever9a6a1802017-04-09 13:06:25 -0400810/**
811 * svc_rdma_sendto - Transmit an RPC reply
812 * @rqstp: processed RPC request, reply XDR already in ::rq_res
813 *
814 * Any resources still associated with @rqstp are released upon return.
815 * If no reply message was possible, the connection is closed.
816 *
817 * Returns:
818 * %0 if an RPC reply has been successfully posted,
819 * %-ENOMEM if a resource shortage occurred (connection is lost),
820 * %-ENOTCONN if posting failed (connection is lost).
821 */
Tom Tuckerc06b5402007-12-12 16:13:25 -0600822int svc_rdma_sendto(struct svc_rqst *rqstp)
823{
824 struct svc_xprt *xprt = rqstp->rq_xprt;
825 struct svcxprt_rdma *rdma =
826 container_of(xprt, struct svcxprt_rdma, sc_xprt);
Chuck Lever3a880922018-05-07 15:27:37 -0400827 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
Chuck Lever9a6a1802017-04-09 13:06:25 -0400828 __be32 *p, *rdma_argp, *rdma_resp, *wr_lst, *rp_ch;
829 struct xdr_buf *xdr = &rqstp->rq_res;
Chuck Lever99722fe2018-05-07 15:28:25 -0400830 struct svc_rdma_send_ctxt *sctxt;
Chuck Lever9a6a1802017-04-09 13:06:25 -0400831 int ret;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600832
Chuck Lever3316f062018-05-07 15:27:43 -0400833 rdma_argp = rctxt->rc_recv_buf;
Chuck Lever9a6a1802017-04-09 13:06:25 -0400834 svc_rdma_get_write_arrays(rdma_argp, &wr_lst, &rp_ch);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600835
Chuck Levere4eb42c2016-11-29 11:04:50 -0500836 /* Create the RDMA response header. xprt->xpt_mutex,
837 * acquired in svc_send(), serializes RPC replies. The
838 * code path below that inserts the credit grant value
839 * into each transport header runs only inside this
840 * critical section.
841 */
Chuck Lever78da2b32016-01-07 14:49:45 -0500842 ret = -ENOMEM;
Chuck Lever99722fe2018-05-07 15:28:25 -0400843 sctxt = svc_rdma_send_ctxt_get(rdma);
844 if (!sctxt)
Chuck Lever78da2b32016-01-07 14:49:45 -0500845 goto err0;
Chuck Lever99722fe2018-05-07 15:28:25 -0400846 rdma_resp = sctxt->sc_xprt_buf;
Chuck Lever98fc21d2017-02-07 11:58:23 -0500847
Chuck Lever9a6a1802017-04-09 13:06:25 -0400848 p = rdma_resp;
849 *p++ = *rdma_argp;
850 *p++ = *(rdma_argp + 1);
Chuck Lever98fc21d2017-02-07 11:58:23 -0500851 *p++ = rdma->sc_fc_credits;
Chuck Lever9a6a1802017-04-09 13:06:25 -0400852 *p++ = rp_ch ? rdma_nomsg : rdma_msg;
Chuck Lever98fc21d2017-02-07 11:58:23 -0500853
854 /* Start with empty chunks */
855 *p++ = xdr_zero;
856 *p++ = xdr_zero;
857 *p = xdr_zero;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600858
Chuck Lever9a6a1802017-04-09 13:06:25 -0400859 if (wr_lst) {
860 /* XXX: Presume the client sent only one Write chunk */
861 ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr);
Chuck Lever08ae4e72016-03-01 13:05:36 -0500862 if (ret < 0)
Chuck Lever4757d902017-04-09 13:06:41 -0400863 goto err2;
Chuck Lever9a6a1802017-04-09 13:06:25 -0400864 svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600865 }
Chuck Lever9a6a1802017-04-09 13:06:25 -0400866 if (rp_ch) {
867 ret = svc_rdma_send_reply_chunk(rdma, rp_ch, wr_lst, xdr);
Chuck Lever08ae4e72016-03-01 13:05:36 -0500868 if (ret < 0)
Chuck Lever4757d902017-04-09 13:06:41 -0400869 goto err2;
Chuck Lever9a6a1802017-04-09 13:06:25 -0400870 svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600871 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600872
Chuck Lever99722fe2018-05-07 15:28:25 -0400873 svc_rdma_sync_reply_hdr(rdma, sctxt, svc_rdma_reply_hdr_len(rdma_resp));
Chuck Lever97bce632018-11-27 11:11:35 -0500874 ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp,
Chuck Lever9a6a1802017-04-09 13:06:25 -0400875 wr_lst, rp_ch);
Chuck Lever3e1eeb92016-03-01 13:06:11 -0500876 if (ret < 0)
Chuck Lever99722fe2018-05-07 15:28:25 -0400877 goto err1;
Chuck Lever3a880922018-05-07 15:27:37 -0400878 ret = 0;
879
880out:
881 rqstp->rq_xprt_ctxt = NULL;
882 svc_rdma_recv_ctxt_put(rdma, rctxt);
883 return ret;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500884
Chuck Lever4757d902017-04-09 13:06:41 -0400885 err2:
Colin Ian Kingb20dae702017-07-13 18:51:15 +0100886 if (ret != -E2BIG && ret != -EINVAL)
Chuck Lever4757d902017-04-09 13:06:41 -0400887 goto err1;
888
Chuck Lever99722fe2018-05-07 15:28:25 -0400889 ret = svc_rdma_send_error_msg(rdma, sctxt, rqstp);
Chuck Lever4757d902017-04-09 13:06:41 -0400890 if (ret < 0)
Chuck Lever99722fe2018-05-07 15:28:25 -0400891 goto err1;
Chuck Lever3a880922018-05-07 15:27:37 -0400892 ret = 0;
893 goto out;
Chuck Lever4757d902017-04-09 13:06:41 -0400894
Tom Tuckerafd566e2008-10-03 15:45:03 -0500895 err1:
Chuck Lever99722fe2018-05-07 15:28:25 -0400896 svc_rdma_send_ctxt_put(rdma, sctxt);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500897 err0:
Chuck Leverbd2abef2018-05-07 15:27:16 -0400898 trace_svcrdma_send_failed(rqstp, ret);
Chuck Lever9a6a1802017-04-09 13:06:25 -0400899 set_bit(XPT_CLOSE, &xprt->xpt_flags);
Chuck Lever3a880922018-05-07 15:27:37 -0400900 ret = -ENOTCONN;
901 goto out;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600902}