blob: 132df9b59ab42f31ff48b9651304ae6fdb34ed84 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Chuck Levera0ce85f2015-03-30 14:34:21 -04002/*
Chuck Leverce5b3712017-12-14 20:57:47 -05003 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
Chuck Levera0ce85f2015-03-30 14:34:21 -04004 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 */
6
7/* Lightweight memory registration using Fast Registration Work
Chuck Leverce5b3712017-12-14 20:57:47 -05008 * Requests (FRWR).
Chuck Levera0ce85f2015-03-30 14:34:21 -04009 *
Chuck Lever2fb2a4d2019-08-19 18:37:52 -040010 * FRWR features ordered asynchronous registration and invalidation
11 * of arbitrarily-sized memory regions. This is the fastest and safest
Chuck Levera0ce85f2015-03-30 14:34:21 -040012 * but most complex memory registration mode.
13 */
14
Chuck Leverc14d86e2015-05-26 11:52:35 -040015/* Normal operation
16 *
Chuck Lever2fb2a4d2019-08-19 18:37:52 -040017 * A Memory Region is prepared for RDMA Read or Write using a FAST_REG
Chuck Lever5f624122018-12-19 10:59:01 -050018 * Work Request (frwr_map). When the RDMA operation is finished, this
Chuck Leverc14d86e2015-05-26 11:52:35 -040019 * Memory Region is invalidated using a LOCAL_INV Work Request
Chuck Lever2fb2a4d2019-08-19 18:37:52 -040020 * (frwr_unmap_async and frwr_unmap_sync).
Chuck Leverc14d86e2015-05-26 11:52:35 -040021 *
Chuck Lever2fb2a4d2019-08-19 18:37:52 -040022 * Typically FAST_REG Work Requests are not signaled, and neither are
23 * RDMA Send Work Requests (with the exception of signaling occasionally
24 * to prevent provider work queue overflows). This greatly reduces HCA
Chuck Leverc14d86e2015-05-26 11:52:35 -040025 * interrupt workload.
Chuck Leverc14d86e2015-05-26 11:52:35 -040026 */
27
28/* Transport recovery
29 *
Chuck Lever2fb2a4d2019-08-19 18:37:52 -040030 * frwr_map and frwr_unmap_* cannot run at the same time the transport
31 * connect worker is running. The connect worker holds the transport
32 * send lock, just as ->send_request does. This prevents frwr_map and
33 * the connect worker from running concurrently. When a connection is
34 * closed, the Receive completion queue is drained before the allowing
35 * the connect worker to get control. This prevents frwr_unmap and the
36 * connect worker from running concurrently.
Chuck Leverc14d86e2015-05-26 11:52:35 -040037 *
Chuck Lever2fb2a4d2019-08-19 18:37:52 -040038 * When the underlying transport disconnects, MRs that are in flight
Chuck Lever9d2da4f2019-10-09 13:07:48 -040039 * are flushed and are likely unusable. Thus all MRs are destroyed.
40 * New MRs are created on demand.
Chuck Leverc14d86e2015-05-26 11:52:35 -040041 */
42
Chuck Leverbd2abef2018-05-07 15:27:16 -040043#include <linux/sunrpc/svc_rdma.h>
Chuck Leverc8b920b2016-09-15 10:57:16 -040044
Chuck Levera0ce85f2015-03-30 14:34:21 -040045#include "xprt_rdma.h"
Chuck Leverb6e717cb2018-05-07 15:27:05 -040046#include <trace/events/rpcrdma.h>
Chuck Levera0ce85f2015-03-30 14:34:21 -040047
48#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
49# define RPCDBG_FACILITY RPCDBG_TRANS
50#endif
51
Chuck Lever5f624122018-12-19 10:59:01 -050052/**
Chuck Lever5f624122018-12-19 10:59:01 -050053 * frwr_release_mr - Destroy one MR
Chuck Lever253a5162020-02-21 17:00:17 -050054 * @mr: MR allocated by frwr_mr_init
Chuck Lever5f624122018-12-19 10:59:01 -050055 *
56 */
57void frwr_release_mr(struct rpcrdma_mr *mr)
Chuck Lever61da8862018-10-01 14:25:25 -040058{
59 int rc;
60
61 rc = ib_dereg_mr(mr->frwr.fr_mr);
62 if (rc)
Chuck Lever53b2c1c2018-12-19 11:00:06 -050063 trace_xprtrdma_frwr_dereg(mr, rc);
Chuck Lever61da8862018-10-01 14:25:25 -040064 kfree(mr->mr_sg);
65 kfree(mr);
66}
67
Chuck Leveref2be592020-11-09 14:40:14 -050068static void frwr_mr_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
Chuck Lever61da8862018-10-01 14:25:25 -040069{
Chuck Lever7a03aeb62020-11-09 14:40:19 -050070 if (mr->mr_device) {
Chuck Leverd379eaa2018-10-01 14:25:30 -040071 trace_xprtrdma_mr_unmap(mr);
Chuck Lever7a03aeb62020-11-09 14:40:19 -050072 ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents,
73 mr->mr_dir);
74 mr->mr_device = NULL;
Chuck Lever61da8862018-10-01 14:25:25 -040075 }
Chuck Leveref2be592020-11-09 14:40:14 -050076}
77
78static void frwr_mr_recycle(struct rpcrdma_mr *mr)
79{
80 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
81
82 trace_xprtrdma_mr_recycle(mr);
83
84 frwr_mr_unmap(r_xprt, mr);
Chuck Lever61da8862018-10-01 14:25:25 -040085
Chuck Lever4d6b8892019-08-19 18:47:57 -040086 spin_lock(&r_xprt->rx_buf.rb_lock);
Chuck Lever61da8862018-10-01 14:25:25 -040087 list_del(&mr->mr_all);
88 r_xprt->rx_stats.mrs_recycled++;
Chuck Lever4d6b8892019-08-19 18:47:57 -040089 spin_unlock(&r_xprt->rx_buf.rb_lock);
Chuck Lever5f624122018-12-19 10:59:01 -050090
91 frwr_release_mr(mr);
Chuck Lever61da8862018-10-01 14:25:25 -040092}
93
Chuck Leveref2be592020-11-09 14:40:14 -050094static void frwr_mr_put(struct rpcrdma_mr *mr)
95{
96 frwr_mr_unmap(mr->mr_xprt, mr);
97
98 /* The MR is returned to the req's MR free list instead
99 * of to the xprt's MR free list. No spinlock is needed.
100 */
101 rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
102}
103
Chuck Lever40088f02019-06-19 10:33:04 -0400104/* frwr_reset - Place MRs back on the free list
105 * @req: request to reset
106 *
107 * Used after a failed marshal. For FRWR, this means the MRs
108 * don't have to be fully released and recreated.
109 *
110 * NB: This is safe only as long as none of @req's MRs are
111 * involved with an ongoing asynchronous FAST_REG or LOCAL_INV
112 * Work Request.
113 */
114void frwr_reset(struct rpcrdma_req *req)
115{
Chuck Lever265a38d2019-08-19 18:44:04 -0400116 struct rpcrdma_mr *mr;
Chuck Lever40088f02019-06-19 10:33:04 -0400117
Chuck Lever265a38d2019-08-19 18:44:04 -0400118 while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
Chuck Leveref2be592020-11-09 14:40:14 -0500119 frwr_mr_put(mr);
Chuck Lever40088f02019-06-19 10:33:04 -0400120}
121
Chuck Lever5f624122018-12-19 10:59:01 -0500122/**
Chuck Lever253a5162020-02-21 17:00:17 -0500123 * frwr_mr_init - Initialize one MR
124 * @r_xprt: controlling transport instance
Chuck Lever5f624122018-12-19 10:59:01 -0500125 * @mr: generic MR to prepare for FRWR
126 *
127 * Returns zero if successful. Otherwise a negative errno
128 * is returned.
129 */
Chuck Lever253a5162020-02-21 17:00:17 -0500130int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
Chuck Leverd48b1d22016-06-29 13:52:29 -0400131{
Chuck Levere28ce902020-02-21 17:01:05 -0500132 struct rpcrdma_ep *ep = r_xprt->rx_ep;
Chuck Lever93aa8e02020-02-21 17:00:54 -0500133 unsigned int depth = ep->re_max_fr_depth;
Chuck Leverf85adb12018-12-19 11:00:48 -0500134 struct scatterlist *sg;
135 struct ib_mr *frmr;
Chuck Leverd48b1d22016-06-29 13:52:29 -0400136 int rc;
137
Chuck Lever93aa8e02020-02-21 17:00:54 -0500138 frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth);
Chuck Leverf85adb12018-12-19 11:00:48 -0500139 if (IS_ERR(frmr))
Chuck Leverd48b1d22016-06-29 13:52:29 -0400140 goto out_mr_err;
141
Julia Lawalled38c332020-09-20 13:26:20 +0200142 sg = kmalloc_array(depth, sizeof(*sg), GFP_NOFS);
Chuck Leverf85adb12018-12-19 11:00:48 -0500143 if (!sg)
Chuck Leverd48b1d22016-06-29 13:52:29 -0400144 goto out_list_err;
145
Chuck Lever253a5162020-02-21 17:00:17 -0500146 mr->mr_xprt = r_xprt;
Chuck Leverf85adb12018-12-19 11:00:48 -0500147 mr->frwr.fr_mr = frmr;
Chuck Lever7a03aeb62020-11-09 14:40:19 -0500148 mr->mr_device = NULL;
Chuck Lever054f1552018-05-01 11:37:14 -0400149 INIT_LIST_HEAD(&mr->mr_list);
Chuck Leverf85adb12018-12-19 11:00:48 -0500150 init_completion(&mr->frwr.fr_linv_done);
151
152 sg_init_table(sg, depth);
153 mr->mr_sg = sg;
Chuck Leverd48b1d22016-06-29 13:52:29 -0400154 return 0;
155
156out_mr_err:
Chuck Leverf85adb12018-12-19 11:00:48 -0500157 rc = PTR_ERR(frmr);
Chuck Lever53b2c1c2018-12-19 11:00:06 -0500158 trace_xprtrdma_frwr_alloc(mr, rc);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400159 return rc;
160
161out_list_err:
Chuck Leverf85adb12018-12-19 11:00:48 -0500162 ib_dereg_mr(frmr);
163 return -ENOMEM;
Chuck Leverd48b1d22016-06-29 13:52:29 -0400164}
165
Chuck Lever5f624122018-12-19 10:59:01 -0500166/**
Chuck Lever25868e62020-01-03 11:56:48 -0500167 * frwr_query_device - Prepare a transport for use with FRWR
Chuck Lever93aa8e02020-02-21 17:00:54 -0500168 * @ep: endpoint to fill in
Chuck Lever25868e62020-01-03 11:56:48 -0500169 * @device: RDMA device to query
Chuck Lever5f624122018-12-19 10:59:01 -0500170 *
171 * On success, sets:
Chuck Lever93aa8e02020-02-21 17:00:54 -0500172 * ep->re_attr
173 * ep->re_max_requests
174 * ep->re_max_rdma_segs
175 * ep->re_max_fr_depth
176 * ep->re_mrtype
Chuck Lever5f624122018-12-19 10:59:01 -0500177 *
Chuck Lever25868e62020-01-03 11:56:48 -0500178 * Return values:
179 * On success, returns zero.
180 * %-EINVAL - the device does not support FRWR memory registration
181 * %-ENOMEM - the device is not sufficiently capable for NFS/RDMA
Chuck Lever914fcad2018-05-04 15:34:48 -0400182 */
Chuck Lever93aa8e02020-02-21 17:00:54 -0500183int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device)
Chuck Lever3968cb52015-03-30 14:35:26 -0400184{
Chuck Lever25868e62020-01-03 11:56:48 -0500185 const struct ib_device_attr *attrs = &device->attrs;
Chuck Lever914fcad2018-05-04 15:34:48 -0400186 int max_qp_wr, depth, delta;
Chuck Lever2e870362020-01-03 11:56:27 -0500187 unsigned int max_sge;
188
Chuck Lever25868e62020-01-03 11:56:48 -0500189 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) ||
190 attrs->max_fast_reg_page_list_len == 0) {
191 pr_err("rpcrdma: 'frwr' mode is not supported by device %s\n",
192 device->name);
193 return -EINVAL;
194 }
195
Chuck Lever2e870362020-01-03 11:56:27 -0500196 max_sge = min_t(unsigned int, attrs->max_send_sge,
197 RPCRDMA_MAX_SEND_SGES);
198 if (max_sge < RPCRDMA_MIN_SEND_SGES) {
199 pr_err("rpcrdma: HCA provides only %u send SGEs\n", max_sge);
200 return -ENOMEM;
201 }
Chuck Lever93aa8e02020-02-21 17:00:54 -0500202 ep->re_attr.cap.max_send_sge = max_sge;
203 ep->re_attr.cap.max_recv_sge = 1;
Chuck Lever3968cb52015-03-30 14:35:26 -0400204
Chuck Lever93aa8e02020-02-21 17:00:54 -0500205 ep->re_mrtype = IB_MR_TYPE_MEM_REG;
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500206 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
Chuck Lever93aa8e02020-02-21 17:00:54 -0500207 ep->re_mrtype = IB_MR_TYPE_SG_GAPS;
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500208
Chuck Levera7886842018-12-19 10:58:51 -0500209 /* Quirk: Some devices advertise a large max_fast_reg_page_list_len
210 * capability, but perform optimally when the MRs are not larger
211 * than a page.
212 */
Chuck Lever18d065a2020-01-03 11:56:43 -0500213 if (attrs->max_sge_rd > RPCRDMA_MAX_HDR_SEGS)
Chuck Lever93aa8e02020-02-21 17:00:54 -0500214 ep->re_max_fr_depth = attrs->max_sge_rd;
Chuck Levera7886842018-12-19 10:58:51 -0500215 else
Chuck Lever93aa8e02020-02-21 17:00:54 -0500216 ep->re_max_fr_depth = attrs->max_fast_reg_page_list_len;
217 if (ep->re_max_fr_depth > RPCRDMA_MAX_DATA_SEGS)
218 ep->re_max_fr_depth = RPCRDMA_MAX_DATA_SEGS;
Chuck Lever3968cb52015-03-30 14:35:26 -0400219
Chuck Leverce5b3712017-12-14 20:57:47 -0500220 /* Add room for frwr register and invalidate WRs.
221 * 1. FRWR reg WR for head
222 * 2. FRWR invalidate WR for head
223 * 3. N FRWR reg WRs for pagelist
224 * 4. N FRWR invalidate WRs for pagelist
225 * 5. FRWR reg WR for tail
226 * 6. FRWR invalidate WR for tail
Chuck Lever3968cb52015-03-30 14:35:26 -0400227 * 7. The RDMA_SEND WR
228 */
229 depth = 7;
230
Chuck Leverce5b3712017-12-14 20:57:47 -0500231 /* Calculate N if the device max FRWR depth is smaller than
Chuck Lever3968cb52015-03-30 14:35:26 -0400232 * RPCRDMA_MAX_DATA_SEGS.
233 */
Chuck Lever93aa8e02020-02-21 17:00:54 -0500234 if (ep->re_max_fr_depth < RPCRDMA_MAX_DATA_SEGS) {
235 delta = RPCRDMA_MAX_DATA_SEGS - ep->re_max_fr_depth;
Chuck Lever3968cb52015-03-30 14:35:26 -0400236 do {
Chuck Leverce5b3712017-12-14 20:57:47 -0500237 depth += 2; /* FRWR reg + invalidate */
Chuck Lever93aa8e02020-02-21 17:00:54 -0500238 delta -= ep->re_max_fr_depth;
Chuck Lever3968cb52015-03-30 14:35:26 -0400239 } while (delta > 0);
240 }
241
Chuck Lever25868e62020-01-03 11:56:48 -0500242 max_qp_wr = attrs->max_qp_wr;
Chuck Lever914fcad2018-05-04 15:34:48 -0400243 max_qp_wr -= RPCRDMA_BACKWARD_WRS;
244 max_qp_wr -= 1;
245 if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
246 return -ENOMEM;
Chuck Lever93aa8e02020-02-21 17:00:54 -0500247 if (ep->re_max_requests > max_qp_wr)
248 ep->re_max_requests = max_qp_wr;
249 ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
250 if (ep->re_attr.cap.max_send_wr > max_qp_wr) {
251 ep->re_max_requests = max_qp_wr / depth;
252 if (!ep->re_max_requests)
Chuck Lever25868e62020-01-03 11:56:48 -0500253 return -ENOMEM;
Chuck Lever93aa8e02020-02-21 17:00:54 -0500254 ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
Chuck Lever3968cb52015-03-30 14:35:26 -0400255 }
Chuck Lever93aa8e02020-02-21 17:00:54 -0500256 ep->re_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
257 ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
258 ep->re_attr.cap.max_recv_wr = ep->re_max_requests;
259 ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
Chuck Lever32e6b682021-04-19 14:02:03 -0400260 ep->re_attr.cap.max_recv_wr += RPCRDMA_MAX_RECV_BATCH;
Chuck Lever93aa8e02020-02-21 17:00:54 -0500261 ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
Chuck Lever3968cb52015-03-30 14:35:26 -0400262
Chuck Lever93aa8e02020-02-21 17:00:54 -0500263 ep->re_max_rdma_segs =
264 DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ep->re_max_fr_depth);
Chuck Lever6946f822018-12-19 10:58:45 -0500265 /* Reply chunks require segments for head and tail buffers */
Chuck Lever93aa8e02020-02-21 17:00:54 -0500266 ep->re_max_rdma_segs += 2;
267 if (ep->re_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS)
268 ep->re_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS;
Chuck Lever18d065a2020-01-03 11:56:43 -0500269
270 /* Ensure the underlying device is capable of conveying the
271 * largest r/wsize NFS will ask for. This guarantees that
272 * failing over from one RDMA device to another will not
273 * break NFS I/O.
274 */
Chuck Lever93aa8e02020-02-21 17:00:54 -0500275 if ((ep->re_max_rdma_segs * ep->re_max_fr_depth) < RPCRDMA_MAX_SEGS)
Chuck Lever18d065a2020-01-03 11:56:43 -0500276 return -ENOMEM;
277
Chuck Lever3968cb52015-03-30 14:35:26 -0400278 return 0;
279}
280
Chuck Lever5f624122018-12-19 10:59:01 -0500281/**
Chuck Lever5f624122018-12-19 10:59:01 -0500282 * frwr_map - Register a memory region
283 * @r_xprt: controlling transport
284 * @seg: memory region co-ordinates
285 * @nsegs: number of segments remaining
286 * @writing: true when RDMA Write will be used
Chuck Lever0a93fbc2018-12-19 10:59:07 -0500287 * @xid: XID of RPC using the registered memory
Chuck Lever3b39f522019-08-19 18:45:37 -0400288 * @mr: MR to fill in
Chuck Lever5f624122018-12-19 10:59:01 -0500289 *
290 * Prepare a REG_MR Work Request to register a memory region
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400291 * for remote access via RDMA READ or RDMA WRITE.
Chuck Lever5f624122018-12-19 10:59:01 -0500292 *
293 * Returns the next segment or a negative errno pointer.
Chuck Lever3b39f522019-08-19 18:45:37 -0400294 * On success, @mr is filled in.
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400295 */
Chuck Lever5f624122018-12-19 10:59:01 -0500296struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
297 struct rpcrdma_mr_seg *seg,
Chuck Leverec482cc2019-02-11 11:23:44 -0500298 int nsegs, bool writing, __be32 xid,
Chuck Lever3b39f522019-08-19 18:45:37 -0400299 struct rpcrdma_mr *mr)
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400300{
Chuck Levere28ce902020-02-21 17:01:05 -0500301 struct rpcrdma_ep *ep = r_xprt->rx_ep;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500302 struct ib_reg_wr *reg_wr;
Chuck Leverca1c6712020-02-12 11:12:30 -0500303 int i, n, dma_nents;
Chuck Lever3b39f522019-08-19 18:45:37 -0400304 struct ib_mr *ibmr;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400305 u8 key;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400306
Chuck Lever93aa8e02020-02-21 17:00:54 -0500307 if (nsegs > ep->re_max_fr_depth)
308 nsegs = ep->re_max_fr_depth;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300309 for (i = 0; i < nsegs;) {
Chuck Lever67b16622021-02-04 11:59:13 -0500310 sg_set_page(&mr->mr_sg[i], seg->mr_page,
311 seg->mr_len, seg->mr_offset);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300312
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400313 ++seg;
314 ++i;
Chuck Lever93aa8e02020-02-21 17:00:54 -0500315 if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS)
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500316 continue;
Chuck Lever67b16622021-02-04 11:59:13 -0500317 if ((i < nsegs && seg->mr_offset) ||
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400318 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
319 break;
320 }
Chuck Lever96cedde2017-12-14 20:57:55 -0500321 mr->mr_dir = rpcrdma_data_dir(writing);
Chuck Leverca1c6712020-02-12 11:12:30 -0500322 mr->mr_nents = i;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400323
Chuck Lever93aa8e02020-02-21 17:00:54 -0500324 dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents,
Chuck Leverca1c6712020-02-12 11:12:30 -0500325 mr->mr_dir);
326 if (!dma_nents)
Chuck Lever564471d2016-06-29 13:52:21 -0400327 goto out_dmamap_err;
Chuck Lever7a03aeb62020-11-09 14:40:19 -0500328 mr->mr_device = ep->re_id->device;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300329
Chuck Lever84756892019-06-19 10:32:59 -0400330 ibmr = mr->frwr.fr_mr;
Chuck Leverca1c6712020-02-12 11:12:30 -0500331 n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
332 if (n != dma_nents)
Chuck Lever564471d2016-06-29 13:52:21 -0400333 goto out_mapmr_err;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300334
Chuck Lever0a93fbc2018-12-19 10:59:07 -0500335 ibmr->iova &= 0x00000000ffffffff;
Chuck Leverec482cc2019-02-11 11:23:44 -0500336 ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32;
Chuck Lever96cedde2017-12-14 20:57:55 -0500337 key = (u8)(ibmr->rkey & 0x000000FF);
338 ib_update_fast_reg_key(ibmr, ++key);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300339
Chuck Lever84756892019-06-19 10:32:59 -0400340 reg_wr = &mr->frwr.fr_regwr;
Chuck Lever96cedde2017-12-14 20:57:55 -0500341 reg_wr->mr = ibmr;
342 reg_wr->key = ibmr->rkey;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500343 reg_wr->access = writing ?
344 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
345 IB_ACCESS_REMOTE_READ;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400346
Chuck Lever96cedde2017-12-14 20:57:55 -0500347 mr->mr_handle = ibmr->rkey;
348 mr->mr_length = ibmr->length;
349 mr->mr_offset = ibmr->iova;
Chuck Leverba217ec2018-12-19 10:59:55 -0500350 trace_xprtrdma_mr_map(mr);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300351
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400352 return seg;
Chuck Lever564471d2016-06-29 13:52:21 -0400353
354out_dmamap_err:
Chuck Lever53b2c1c2018-12-19 11:00:06 -0500355 trace_xprtrdma_frwr_sgerr(mr, i);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400356 return ERR_PTR(-EIO);
Chuck Lever564471d2016-06-29 13:52:21 -0400357
358out_mapmr_err:
Chuck Lever53b2c1c2018-12-19 11:00:06 -0500359 trace_xprtrdma_frwr_maperr(mr, n);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400360 return ERR_PTR(-EIO);
Chuck Leverf2877622018-02-28 15:30:59 -0500361}
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400362
Chuck Lever5f624122018-12-19 10:59:01 -0500363/**
Chuck Lever84756892019-06-19 10:32:59 -0400364 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
Chuck Leverd6ccebf2020-02-21 17:00:49 -0500365 * @cq: completion queue
366 * @wc: WCE for a completed FastReg WR
Chuck Lever84756892019-06-19 10:32:59 -0400367 *
368 */
369static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
370{
371 struct ib_cqe *cqe = wc->wr_cqe;
372 struct rpcrdma_frwr *frwr =
373 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
374
375 /* WARNING: Only wr_cqe and status are reliable at this point */
Chuck Lever5ecef9c2020-11-09 14:39:31 -0500376 trace_xprtrdma_wc_fastreg(wc, &frwr->fr_cid);
Chuck Lever84756892019-06-19 10:32:59 -0400377 /* The MR will get recycled when the associated req is retransmitted */
Chuck Leverd6ccebf2020-02-21 17:00:49 -0500378
Chuck Leverf423f752020-06-15 09:21:02 -0400379 rpcrdma_flush_disconnect(cq->cq_context, wc);
Chuck Lever84756892019-06-19 10:32:59 -0400380}
381
Chuck Lever5ecef9c2020-11-09 14:39:31 -0500382static void frwr_cid_init(struct rpcrdma_ep *ep,
383 struct rpcrdma_frwr *frwr)
384{
385 struct rpc_rdma_cid *cid = &frwr->fr_cid;
386
387 cid->ci_queue_id = ep->re_attr.send_cq->res.id;
388 cid->ci_completion_id = frwr->fr_mr->res.id;
389}
390
Chuck Lever84756892019-06-19 10:32:59 -0400391/**
Chuck Lever97d0de82020-02-21 17:00:23 -0500392 * frwr_send - post Send WRs containing the RPC Call message
393 * @r_xprt: controlling transport instance
394 * @req: prepared RPC Call
Chuck Leverf2877622018-02-28 15:30:59 -0500395 *
Chuck Levere0f86bc2018-12-19 11:00:27 -0500396 * For FRWR, chain any FastReg WRs to the Send WR. Only a
Chuck Leverf2877622018-02-28 15:30:59 -0500397 * single ib_post_send call is needed to register memory
398 * and then post the Send WR.
Chuck Lever5f624122018-12-19 10:59:01 -0500399 *
Chuck Lever97d0de82020-02-21 17:00:23 -0500400 * Returns the return code from ib_post_send.
401 *
402 * Caller must hold the transport send lock to ensure that the
403 * pointers to the transport's rdma_cm_id and QP are stable.
Chuck Leverf2877622018-02-28 15:30:59 -0500404 */
Chuck Lever97d0de82020-02-21 17:00:23 -0500405int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
Chuck Leverf2877622018-02-28 15:30:59 -0500406{
Chuck Lever5ecef9c2020-11-09 14:39:31 -0500407 struct rpcrdma_ep *ep = r_xprt->rx_ep;
Bart Van Asscheed288d72018-07-18 09:25:31 -0700408 struct ib_send_wr *post_wr;
Chuck Leverf2877622018-02-28 15:30:59 -0500409 struct rpcrdma_mr *mr;
410
Chuck Leverdc15c3d2019-10-17 14:31:35 -0400411 post_wr = &req->rl_wr;
Chuck Leverf2877622018-02-28 15:30:59 -0500412 list_for_each_entry(mr, &req->rl_registered, mr_list) {
413 struct rpcrdma_frwr *frwr;
414
415 frwr = &mr->frwr;
416
417 frwr->fr_cqe.done = frwr_wc_fastreg;
Chuck Lever5ecef9c2020-11-09 14:39:31 -0500418 frwr_cid_init(ep, frwr);
Chuck Leverf2877622018-02-28 15:30:59 -0500419 frwr->fr_regwr.wr.next = post_wr;
420 frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
421 frwr->fr_regwr.wr.num_sge = 0;
422 frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
423 frwr->fr_regwr.wr.send_flags = 0;
424
425 post_wr = &frwr->fr_regwr.wr;
426 }
427
Chuck Lever5ecef9c2020-11-09 14:39:31 -0500428 return ib_post_send(ep->re_id->qp, post_wr, NULL);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400429}
430
Chuck Lever5f624122018-12-19 10:59:01 -0500431/**
432 * frwr_reminv - handle a remotely invalidated mr on the @mrs list
433 * @rep: Received reply
434 * @mrs: list of MRs to check
435 *
Chuck Leverc3441612017-12-14 20:56:26 -0500436 */
Chuck Lever5f624122018-12-19 10:59:01 -0500437void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
Chuck Leverc3441612017-12-14 20:56:26 -0500438{
Chuck Lever96cedde2017-12-14 20:57:55 -0500439 struct rpcrdma_mr *mr;
Chuck Leverc3441612017-12-14 20:56:26 -0500440
Chuck Lever96cedde2017-12-14 20:57:55 -0500441 list_for_each_entry(mr, mrs, mr_list)
442 if (mr->mr_handle == rep->rr_inv_rkey) {
Chuck Lever054f1552018-05-01 11:37:14 -0400443 list_del_init(&mr->mr_list);
Chuck Leveref2be592020-11-09 14:40:14 -0500444 frwr_mr_put(mr);
Chuck Leverc3441612017-12-14 20:56:26 -0500445 break; /* only one invalidated MR per RPC */
446 }
447}
448
Chuck Leveref2be592020-11-09 14:40:14 -0500449static void frwr_mr_done(struct ib_wc *wc, struct rpcrdma_mr *mr)
Chuck Lever84756892019-06-19 10:32:59 -0400450{
451 if (wc->status != IB_WC_SUCCESS)
Chuck Lever15d9b012019-10-17 14:31:09 -0400452 frwr_mr_recycle(mr);
Chuck Lever84756892019-06-19 10:32:59 -0400453 else
Chuck Leveref2be592020-11-09 14:40:14 -0500454 frwr_mr_put(mr);
Chuck Lever84756892019-06-19 10:32:59 -0400455}
456
457/**
458 * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC
Chuck Leverd6ccebf2020-02-21 17:00:49 -0500459 * @cq: completion queue
460 * @wc: WCE for a completed LocalInv WR
Chuck Lever84756892019-06-19 10:32:59 -0400461 *
462 */
463static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
464{
465 struct ib_cqe *cqe = wc->wr_cqe;
466 struct rpcrdma_frwr *frwr =
467 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
468 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
469
470 /* WARNING: Only wr_cqe and status are reliable at this point */
Chuck Lever5ecef9c2020-11-09 14:39:31 -0500471 trace_xprtrdma_wc_li(wc, &frwr->fr_cid);
Chuck Leveref2be592020-11-09 14:40:14 -0500472 frwr_mr_done(wc, mr);
Chuck Leverd6ccebf2020-02-21 17:00:49 -0500473
Chuck Leverf423f752020-06-15 09:21:02 -0400474 rpcrdma_flush_disconnect(cq->cq_context, wc);
Chuck Lever84756892019-06-19 10:32:59 -0400475}
476
477/**
478 * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC
Chuck Leverd6ccebf2020-02-21 17:00:49 -0500479 * @cq: completion queue
480 * @wc: WCE for a completed LocalInv WR
Chuck Lever84756892019-06-19 10:32:59 -0400481 *
482 * Awaken anyone waiting for an MR to finish being fenced.
483 */
484static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
485{
486 struct ib_cqe *cqe = wc->wr_cqe;
487 struct rpcrdma_frwr *frwr =
488 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
489 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
490
491 /* WARNING: Only wr_cqe and status are reliable at this point */
Chuck Lever5ecef9c2020-11-09 14:39:31 -0500492 trace_xprtrdma_wc_li_wake(wc, &frwr->fr_cid);
Chuck Leveref2be592020-11-09 14:40:14 -0500493 frwr_mr_done(wc, mr);
Chuck Lever6dc6ec92019-08-19 18:47:10 -0400494 complete(&frwr->fr_linv_done);
Chuck Leverd6ccebf2020-02-21 17:00:49 -0500495
Chuck Leverf423f752020-06-15 09:21:02 -0400496 rpcrdma_flush_disconnect(cq->cq_context, wc);
Chuck Lever84756892019-06-19 10:32:59 -0400497}
498
Chuck Lever5f624122018-12-19 10:59:01 -0500499/**
500 * frwr_unmap_sync - invalidate memory regions that were registered for @req
Chuck Lever84756892019-06-19 10:32:59 -0400501 * @r_xprt: controlling transport instance
502 * @req: rpcrdma_req with a non-empty list of MRs to process
Chuck Leverc9918ff2015-12-16 17:22:47 -0500503 *
Chuck Lever84756892019-06-19 10:32:59 -0400504 * Sleeps until it is safe for the host CPU to access the previously mapped
Chuck Leverd8099fe2019-06-19 10:33:10 -0400505 * memory regions. This guarantees that registered MRs are properly fenced
506 * from the server before the RPC consumer accesses the data in them. It
507 * also ensures proper Send flow control: waking the next RPC waits until
508 * this RPC has relinquished all its Send Queue entries.
Chuck Leverc9918ff2015-12-16 17:22:47 -0500509 */
Chuck Lever84756892019-06-19 10:32:59 -0400510void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
Chuck Leverc9918ff2015-12-16 17:22:47 -0500511{
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700512 struct ib_send_wr *first, **prev, *last;
Chuck Lever5ecef9c2020-11-09 14:39:31 -0500513 struct rpcrdma_ep *ep = r_xprt->rx_ep;
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700514 const struct ib_send_wr *bad_wr;
Chuck Leverce5b3712017-12-14 20:57:47 -0500515 struct rpcrdma_frwr *frwr;
Chuck Lever96cedde2017-12-14 20:57:55 -0500516 struct rpcrdma_mr *mr;
Chuck Lever84756892019-06-19 10:32:59 -0400517 int rc;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500518
Chuck Lever451d26e2017-06-08 11:52:04 -0400519 /* ORDER: Invalidate all of the MRs first
Chuck Leverc9918ff2015-12-16 17:22:47 -0500520 *
521 * Chain the LOCAL_INV Work Requests and post them with
522 * a single ib_post_send() call.
523 */
Chuck Leverce5b3712017-12-14 20:57:47 -0500524 frwr = NULL;
Chuck Levera100fda2016-11-29 10:52:57 -0500525 prev = &first;
Chuck Lever265a38d2019-08-19 18:44:04 -0400526 while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
Chuck Lever84756892019-06-19 10:32:59 -0400527
528 trace_xprtrdma_mr_localinv(mr);
529 r_xprt->rx_stats.local_inv_needed++;
Chuck Leverc8b920b2016-09-15 10:57:16 -0400530
Chuck Lever96cedde2017-12-14 20:57:55 -0500531 frwr = &mr->frwr;
Chuck Leverce5b3712017-12-14 20:57:47 -0500532 frwr->fr_cqe.done = frwr_wc_localinv;
Chuck Lever5ecef9c2020-11-09 14:39:31 -0500533 frwr_cid_init(ep, frwr);
Chuck Leverce5b3712017-12-14 20:57:47 -0500534 last = &frwr->fr_invwr;
Chuck Lever84756892019-06-19 10:32:59 -0400535 last->next = NULL;
Chuck Leverce5b3712017-12-14 20:57:47 -0500536 last->wr_cqe = &frwr->fr_cqe;
Chuck Lever84756892019-06-19 10:32:59 -0400537 last->sg_list = NULL;
538 last->num_sge = 0;
Chuck Levera100fda2016-11-29 10:52:57 -0500539 last->opcode = IB_WR_LOCAL_INV;
Chuck Lever84756892019-06-19 10:32:59 -0400540 last->send_flags = IB_SEND_SIGNALED;
Chuck Lever96cedde2017-12-14 20:57:55 -0500541 last->ex.invalidate_rkey = mr->mr_handle;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500542
Chuck Levera100fda2016-11-29 10:52:57 -0500543 *prev = last;
544 prev = &last->next;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500545 }
Chuck Leverc9918ff2015-12-16 17:22:47 -0500546
547 /* Strong send queue ordering guarantees that when the
548 * last WR in the chain completes, all WRs in the chain
549 * are complete.
550 */
Chuck Leverce5b3712017-12-14 20:57:47 -0500551 frwr->fr_cqe.done = frwr_wc_localinv_wake;
552 reinit_completion(&frwr->fr_linv_done);
Chuck Lever8d38de62016-11-29 10:52:16 -0500553
Chuck Leverc9918ff2015-12-16 17:22:47 -0500554 /* Transport disconnect drains the receive CQ before it
555 * replaces the QP. The RPC reply handler won't call us
Chuck Lever93aa8e02020-02-21 17:00:54 -0500556 * unless re_id->qp is a valid pointer.
Chuck Leverc9918ff2015-12-16 17:22:47 -0500557 */
Chuck Lever8d754832017-06-08 11:52:28 -0400558 bad_wr = NULL;
Chuck Lever5ecef9c2020-11-09 14:39:31 -0500559 rc = ib_post_send(ep->re_id->qp, first, &bad_wr);
Chuck Lever84756892019-06-19 10:32:59 -0400560
561 /* The final LOCAL_INV WR in the chain is supposed to
562 * do the wake. If it was never posted, the wake will
563 * not happen, so don't wait in that case.
564 */
Chuck Lever8d754832017-06-08 11:52:28 -0400565 if (bad_wr != first)
Chuck Leverce5b3712017-12-14 20:57:47 -0500566 wait_for_completion(&frwr->fr_linv_done);
Chuck Lever84756892019-06-19 10:32:59 -0400567 if (!rc)
568 return;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500569
Chuck Lever84756892019-06-19 10:32:59 -0400570 /* Recycle MRs in the LOCAL_INV chain that did not get posted.
Chuck Leverd7a21c12016-05-02 14:42:12 -0400571 */
Chuck Lever36a55ed2020-11-09 14:39:37 -0500572 trace_xprtrdma_post_linv_err(req, rc);
Chuck Lever8d754832017-06-08 11:52:28 -0400573 while (bad_wr) {
Chuck Leverce5b3712017-12-14 20:57:47 -0500574 frwr = container_of(bad_wr, struct rpcrdma_frwr,
575 fr_invwr);
Chuck Lever96cedde2017-12-14 20:57:55 -0500576 mr = container_of(frwr, struct rpcrdma_mr, frwr);
Chuck Lever8d754832017-06-08 11:52:28 -0400577 bad_wr = bad_wr->next;
Chuck Lever61da8862018-10-01 14:25:25 -0400578
Chuck Leverb674c4b2018-12-19 10:58:19 -0500579 list_del_init(&mr->mr_list);
Chuck Lever15d9b012019-10-17 14:31:09 -0400580 frwr_mr_recycle(mr);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400581 }
Chuck Leverc9918ff2015-12-16 17:22:47 -0500582}
Chuck Leverd8099fe2019-06-19 10:33:10 -0400583
584/**
585 * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC
Chuck Leverd6ccebf2020-02-21 17:00:49 -0500586 * @cq: completion queue
587 * @wc: WCE for a completed LocalInv WR
Chuck Leverd8099fe2019-06-19 10:33:10 -0400588 *
589 */
590static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
591{
592 struct ib_cqe *cqe = wc->wr_cqe;
593 struct rpcrdma_frwr *frwr =
594 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
595 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
Chuck Lever6dc6ec92019-08-19 18:47:10 -0400596 struct rpcrdma_rep *rep = mr->mr_req->rl_reply;
Chuck Leverd8099fe2019-06-19 10:33:10 -0400597
598 /* WARNING: Only wr_cqe and status are reliable at this point */
Chuck Lever5ecef9c2020-11-09 14:39:31 -0500599 trace_xprtrdma_wc_li_done(wc, &frwr->fr_cid);
Chuck Leveref2be592020-11-09 14:40:14 -0500600 frwr_mr_done(wc, mr);
Chuck Lever6dc6ec92019-08-19 18:47:10 -0400601
Chuck Leveref2be592020-11-09 14:40:14 -0500602 /* Ensure @rep is generated before frwr_mr_done */
Chuck Lever6dc6ec92019-08-19 18:47:10 -0400603 smp_rmb();
604 rpcrdma_complete_rqst(rep);
Chuck Leverd6ccebf2020-02-21 17:00:49 -0500605
Chuck Leverf423f752020-06-15 09:21:02 -0400606 rpcrdma_flush_disconnect(cq->cq_context, wc);
Chuck Leverd8099fe2019-06-19 10:33:10 -0400607}
608
609/**
610 * frwr_unmap_async - invalidate memory regions that were registered for @req
611 * @r_xprt: controlling transport instance
612 * @req: rpcrdma_req with a non-empty list of MRs to process
613 *
614 * This guarantees that registered MRs are properly fenced from the
615 * server before the RPC consumer accesses the data in them. It also
616 * ensures proper Send flow control: waking the next RPC waits until
617 * this RPC has relinquished all its Send Queue entries.
618 */
619void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
620{
621 struct ib_send_wr *first, *last, **prev;
Chuck Lever5ecef9c2020-11-09 14:39:31 -0500622 struct rpcrdma_ep *ep = r_xprt->rx_ep;
Chuck Leverd8099fe2019-06-19 10:33:10 -0400623 const struct ib_send_wr *bad_wr;
624 struct rpcrdma_frwr *frwr;
625 struct rpcrdma_mr *mr;
626 int rc;
627
628 /* Chain the LOCAL_INV Work Requests and post them with
629 * a single ib_post_send() call.
630 */
631 frwr = NULL;
632 prev = &first;
Chuck Lever265a38d2019-08-19 18:44:04 -0400633 while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
Chuck Leverd8099fe2019-06-19 10:33:10 -0400634
635 trace_xprtrdma_mr_localinv(mr);
636 r_xprt->rx_stats.local_inv_needed++;
637
638 frwr = &mr->frwr;
639 frwr->fr_cqe.done = frwr_wc_localinv;
Chuck Lever5ecef9c2020-11-09 14:39:31 -0500640 frwr_cid_init(ep, frwr);
Chuck Leverd8099fe2019-06-19 10:33:10 -0400641 last = &frwr->fr_invwr;
642 last->next = NULL;
643 last->wr_cqe = &frwr->fr_cqe;
644 last->sg_list = NULL;
645 last->num_sge = 0;
646 last->opcode = IB_WR_LOCAL_INV;
647 last->send_flags = IB_SEND_SIGNALED;
648 last->ex.invalidate_rkey = mr->mr_handle;
649
650 *prev = last;
651 prev = &last->next;
652 }
653
654 /* Strong send queue ordering guarantees that when the
655 * last WR in the chain completes, all WRs in the chain
656 * are complete. The last completion will wake up the
657 * RPC waiter.
658 */
659 frwr->fr_cqe.done = frwr_wc_localinv_done;
660
661 /* Transport disconnect drains the receive CQ before it
662 * replaces the QP. The RPC reply handler won't call us
Chuck Lever93aa8e02020-02-21 17:00:54 -0500663 * unless re_id->qp is a valid pointer.
Chuck Leverd8099fe2019-06-19 10:33:10 -0400664 */
665 bad_wr = NULL;
Chuck Lever5ecef9c2020-11-09 14:39:31 -0500666 rc = ib_post_send(ep->re_id->qp, first, &bad_wr);
Chuck Leverd8099fe2019-06-19 10:33:10 -0400667 if (!rc)
668 return;
669
670 /* Recycle MRs in the LOCAL_INV chain that did not get posted.
671 */
Chuck Lever36a55ed2020-11-09 14:39:37 -0500672 trace_xprtrdma_post_linv_err(req, rc);
Chuck Leverd8099fe2019-06-19 10:33:10 -0400673 while (bad_wr) {
674 frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr);
675 mr = container_of(frwr, struct rpcrdma_mr, frwr);
676 bad_wr = bad_wr->next;
677
Chuck Lever15d9b012019-10-17 14:31:09 -0400678 frwr_mr_recycle(mr);
Chuck Leverd8099fe2019-06-19 10:33:10 -0400679 }
680
681 /* The final LOCAL_INV WR in the chain is supposed to
682 * do the wake. If it was never posted, the wake will
683 * not happen, so wake here in that case.
684 */
685 rpcrdma_complete_rqst(req->rl_reply);
686}