blob: 5a936a6a31a3245cc6ab0f6e9804d268bc198261 [file] [log] [blame]
Chuck Levera0ce85f2015-03-30 14:34:21 -04001/*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 */
5
6/* Lightweight memory registration using Fast Registration Work
7 * Requests (FRWR). Also referred to sometimes as FRMR mode.
8 *
9 * FRWR features ordered asynchronous registration and deregistration
10 * of arbitrarily sized memory regions. This is the fastest and safest
11 * but most complex memory registration mode.
12 */
13
Chuck Leverc14d86e2015-05-26 11:52:35 -040014/* Normal operation
15 *
16 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
17 * Work Request (frmr_op_map). When the RDMA operation is finished, this
18 * Memory Region is invalidated using a LOCAL_INV Work Request
19 * (frmr_op_unmap).
20 *
21 * Typically these Work Requests are not signaled, and neither are RDMA
22 * SEND Work Requests (with the exception of signaling occasionally to
23 * prevent provider work queue overflows). This greatly reduces HCA
24 * interrupt workload.
25 *
26 * As an optimization, frwr_op_unmap marks MRs INVALID before the
27 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
28 * rb_mws immediately so that no work (like managing a linked list
29 * under a spinlock) is needed in the completion upcall.
30 *
31 * But this means that frwr_op_map() can occasionally encounter an MR
32 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
33 * ordering prevents a subsequent FAST_REG WR from executing against
34 * that MR while it is still being invalidated.
35 */
36
37/* Transport recovery
38 *
39 * ->op_map and the transport connect worker cannot run at the same
40 * time, but ->op_unmap can fire while the transport connect worker
41 * is running. Thus MR recovery is handled in ->op_map, to guarantee
42 * that recovered MRs are owned by a sending RPC, and not one where
43 * ->op_unmap could fire at the same time transport reconnect is
44 * being done.
45 *
46 * When the underlying transport disconnects, MRs are left in one of
Chuck Lever62bdf942016-11-07 16:16:24 -050047 * four states:
Chuck Leverc14d86e2015-05-26 11:52:35 -040048 *
49 * INVALID: The MR was not in use before the QP entered ERROR state.
Chuck Leverc14d86e2015-05-26 11:52:35 -040050 *
51 * VALID: The MR was registered before the QP entered ERROR state.
52 *
Chuck Lever62bdf942016-11-07 16:16:24 -050053 * FLUSHED_FR: The MR was being registered when the QP entered ERROR
54 * state, and the pending WR was flushed.
55 *
56 * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
57 * state, and the pending WR was flushed.
58 *
59 * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
60 * with ib_dereg_mr and then are re-initialized. Because MR recovery
Chuck Leverc14d86e2015-05-26 11:52:35 -040061 * allocates fresh resources, it is deferred to a workqueue, and the
62 * recovered MRs are placed back on the rb_mws list when recovery is
63 * complete. frwr_op_map allocates another MR for the current RPC while
64 * the broken MR is reset.
65 *
66 * To ensure that frwr_op_map doesn't encounter an MR that is marked
67 * INVALID but that is about to be flushed due to a previous transport
68 * disconnect, the transport connect worker attempts to drain all
69 * pending send queue WRs before the transport is reconnected.
70 */
71
Chuck Leverc8b920b2016-09-15 10:57:16 -040072#include <linux/sunrpc/rpc_rdma.h>
73
Chuck Levera0ce85f2015-03-30 14:34:21 -040074#include "xprt_rdma.h"
75
76#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
77# define RPCDBG_FACILITY RPCDBG_TRANS
78#endif
79
Chuck Leverb54054c2016-06-29 13:53:27 -040080bool
81frwr_is_supported(struct rpcrdma_ia *ia)
82{
83 struct ib_device_attr *attrs = &ia->ri_device->attrs;
84
85 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
86 goto out_not_supported;
87 if (attrs->max_fast_reg_page_list_len == 0)
88 goto out_not_supported;
89 return true;
90
91out_not_supported:
92 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
93 ia->ri_device->name);
94 return false;
95}
96
Chuck Leverd7a21c12016-05-02 14:42:12 -040097static int
Chuck Levere2ac2362016-06-29 13:54:00 -040098frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
Chuck Leverd48b1d22016-06-29 13:52:29 -040099{
Chuck Levere2ac2362016-06-29 13:54:00 -0400100 unsigned int depth = ia->ri_max_frmr_depth;
Chuck Leverd48b1d22016-06-29 13:52:29 -0400101 struct rpcrdma_frmr *f = &r->frmr;
102 int rc;
103
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500104 f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400105 if (IS_ERR(f->fr_mr))
106 goto out_mr_err;
107
108 r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL);
109 if (!r->mw_sg)
110 goto out_list_err;
111
112 sg_init_table(r->mw_sg, depth);
113 init_completion(&f->fr_linv_done);
114 return 0;
115
116out_mr_err:
117 rc = PTR_ERR(f->fr_mr);
118 dprintk("RPC: %s: ib_alloc_mr status %i\n",
119 __func__, rc);
120 return rc;
121
122out_list_err:
123 rc = -ENOMEM;
124 dprintk("RPC: %s: sg allocation failure\n",
125 __func__);
126 ib_dereg_mr(f->fr_mr);
127 return rc;
128}
129
130static void
Chuck Levere2ac2362016-06-29 13:54:00 -0400131frwr_op_release_mr(struct rpcrdma_mw *r)
Chuck Leverd48b1d22016-06-29 13:52:29 -0400132{
133 int rc;
134
Chuck Lever9d6b0402016-06-29 13:54:16 -0400135 /* Ensure MW is not on any rl_registered list */
136 if (!list_empty(&r->mw_list))
137 list_del(&r->mw_list);
138
Chuck Leverd48b1d22016-06-29 13:52:29 -0400139 rc = ib_dereg_mr(r->frmr.fr_mr);
140 if (rc)
141 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
142 r, rc);
143 kfree(r->mw_sg);
Chuck Levere2ac2362016-06-29 13:54:00 -0400144 kfree(r);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400145}
146
147static int
Chuck Leverd7a21c12016-05-02 14:42:12 -0400148__frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
149{
150 struct rpcrdma_frmr *f = &r->frmr;
151 int rc;
152
153 rc = ib_dereg_mr(f->fr_mr);
154 if (rc) {
155 pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
156 rc, r);
157 return rc;
158 }
159
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500160 f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype,
Chuck Leverd7a21c12016-05-02 14:42:12 -0400161 ia->ri_max_frmr_depth);
162 if (IS_ERR(f->fr_mr)) {
163 pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
164 PTR_ERR(f->fr_mr), r);
165 return PTR_ERR(f->fr_mr);
166 }
167
Chuck Levereeb30612016-09-15 10:57:40 -0400168 dprintk("RPC: %s: recovered FRMR %p\n", __func__, f);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400169 f->fr_state = FRMR_IS_INVALID;
170 return 0;
171}
172
Chuck Lever505bbe62016-06-29 13:52:54 -0400173/* Reset of a single FRMR. Generate a fresh rkey by replacing the MR.
Chuck Lever505bbe62016-06-29 13:52:54 -0400174 */
Chuck Lever660bb492016-05-02 14:42:21 -0400175static void
Chuck Lever505bbe62016-06-29 13:52:54 -0400176frwr_op_recover_mr(struct rpcrdma_mw *mw)
Chuck Lever660bb492016-05-02 14:42:21 -0400177{
Chuck Lever62bdf942016-11-07 16:16:24 -0500178 enum rpcrdma_frmr_state state = mw->frmr.fr_state;
Chuck Lever564471d2016-06-29 13:52:21 -0400179 struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
Chuck Lever660bb492016-05-02 14:42:21 -0400180 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Lever660bb492016-05-02 14:42:21 -0400181 int rc;
182
183 rc = __frwr_reset_mr(ia, mw);
Chuck Lever62bdf942016-11-07 16:16:24 -0500184 if (state != FRMR_FLUSHED_LI)
185 ib_dma_unmap_sg(ia->ri_device,
186 mw->mw_sg, mw->mw_nents, mw->mw_dir);
Chuck Lever2ffc8712016-06-29 13:54:08 -0400187 if (rc)
188 goto out_release;
Chuck Lever505bbe62016-06-29 13:52:54 -0400189
Chuck Lever660bb492016-05-02 14:42:21 -0400190 rpcrdma_put_mw(r_xprt, mw);
Chuck Lever505bbe62016-06-29 13:52:54 -0400191 r_xprt->rx_stats.mrs_recovered++;
Chuck Lever2ffc8712016-06-29 13:54:08 -0400192 return;
193
194out_release:
195 pr_err("rpcrdma: FRMR reset failed %d, %p release\n", rc, mw);
196 r_xprt->rx_stats.mrs_orphaned++;
197
198 spin_lock(&r_xprt->rx_buf.rb_mwlock);
199 list_del(&mw->mw_all);
200 spin_unlock(&r_xprt->rx_buf.rb_mwlock);
201
202 frwr_op_release_mr(mw);
Chuck Lever951e7212015-05-26 11:52:25 -0400203}
204
Chuck Lever91e70e72015-03-30 14:34:58 -0400205static int
Chuck Lever3968cb52015-03-30 14:35:26 -0400206frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
207 struct rpcrdma_create_data_internal *cdata)
208{
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500209 struct ib_device_attr *attrs = &ia->ri_device->attrs;
Chuck Lever3968cb52015-03-30 14:35:26 -0400210 int depth, delta;
211
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500212 ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
213 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
214 ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
215
Chuck Lever3968cb52015-03-30 14:35:26 -0400216 ia->ri_max_frmr_depth =
217 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500218 attrs->max_fast_reg_page_list_len);
Chuck Lever3968cb52015-03-30 14:35:26 -0400219 dprintk("RPC: %s: device's max FR page list len = %u\n",
220 __func__, ia->ri_max_frmr_depth);
221
222 /* Add room for frmr register and invalidate WRs.
223 * 1. FRMR reg WR for head
224 * 2. FRMR invalidate WR for head
225 * 3. N FRMR reg WRs for pagelist
226 * 4. N FRMR invalidate WRs for pagelist
227 * 5. FRMR reg WR for tail
228 * 6. FRMR invalidate WR for tail
229 * 7. The RDMA_SEND WR
230 */
231 depth = 7;
232
233 /* Calculate N if the device max FRMR depth is smaller than
234 * RPCRDMA_MAX_DATA_SEGS.
235 */
236 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
237 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
238 do {
239 depth += 2; /* FRMR reg + invalidate */
240 delta -= ia->ri_max_frmr_depth;
241 } while (delta > 0);
242 }
243
244 ep->rep_attr.cap.max_send_wr *= depth;
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500245 if (ep->rep_attr.cap.max_send_wr > attrs->max_qp_wr) {
246 cdata->max_requests = attrs->max_qp_wr / depth;
Chuck Lever3968cb52015-03-30 14:35:26 -0400247 if (!cdata->max_requests)
248 return -EINVAL;
249 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
250 depth;
251 }
252
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400253 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
254 ia->ri_max_frmr_depth);
Chuck Lever3968cb52015-03-30 14:35:26 -0400255 return 0;
256}
257
Chuck Lever1c9351e2015-03-30 14:34:30 -0400258/* FRWR mode conveys a list of pages per chunk segment. The
259 * maximum length of that list is the FRWR page list depth.
260 */
261static size_t
262frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
263{
264 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
265
266 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
Chuck Lever94931742016-05-02 14:40:56 -0400267 RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth);
Chuck Lever1c9351e2015-03-30 14:34:30 -0400268}
269
Chuck Levere46ac342015-03-30 14:35:35 -0400270static void
Chuck Lever62bdf942016-11-07 16:16:24 -0500271__frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
Chuck Levere46ac342015-03-30 14:35:35 -0400272{
Chuck Lever2fa8f882016-03-04 11:28:53 -0500273 if (wc->status != IB_WC_WR_FLUSH_ERR)
274 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
275 wr, ib_wc_status_msg(wc->status),
276 wc->status, wc->vendor_err);
Chuck Levere46ac342015-03-30 14:35:35 -0400277}
278
Chuck Lever2fa8f882016-03-04 11:28:53 -0500279/**
Chuck Lever6afafa72017-06-08 11:53:24 -0400280 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
Chuck Lever2fa8f882016-03-04 11:28:53 -0500281 * @cq: completion queue (ignored)
282 * @wc: completed WR
283 *
284 */
Chuck Leverc9918ff2015-12-16 17:22:47 -0500285static void
Chuck Lever2fa8f882016-03-04 11:28:53 -0500286frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
Chuck Leverc9918ff2015-12-16 17:22:47 -0500287{
Chuck Lever2fa8f882016-03-04 11:28:53 -0500288 struct rpcrdma_frmr *frmr;
289 struct ib_cqe *cqe;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500290
Chuck Lever2fa8f882016-03-04 11:28:53 -0500291 /* WARNING: Only wr_cqe and status are reliable at this point */
292 if (wc->status != IB_WC_SUCCESS) {
293 cqe = wc->wr_cqe;
294 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
Chuck Lever62bdf942016-11-07 16:16:24 -0500295 frmr->fr_state = FRMR_FLUSHED_FR;
296 __frwr_sendcompletion_flush(wc, "fastreg");
Chuck Lever2fa8f882016-03-04 11:28:53 -0500297 }
298}
Chuck Leverc9918ff2015-12-16 17:22:47 -0500299
Chuck Lever2fa8f882016-03-04 11:28:53 -0500300/**
Chuck Lever6afafa72017-06-08 11:53:24 -0400301 * frwr_wc_localinv - Invoked by RDMA provider for a flushed LocalInv WC
Chuck Lever2fa8f882016-03-04 11:28:53 -0500302 * @cq: completion queue (ignored)
303 * @wc: completed WR
304 *
305 */
306static void
307frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
308{
309 struct rpcrdma_frmr *frmr;
310 struct ib_cqe *cqe;
311
312 /* WARNING: Only wr_cqe and status are reliable at this point */
313 if (wc->status != IB_WC_SUCCESS) {
314 cqe = wc->wr_cqe;
315 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
Chuck Lever62bdf942016-11-07 16:16:24 -0500316 frmr->fr_state = FRMR_FLUSHED_LI;
317 __frwr_sendcompletion_flush(wc, "localinv");
Chuck Lever2fa8f882016-03-04 11:28:53 -0500318 }
319}
320
321/**
Chuck Lever6afafa72017-06-08 11:53:24 -0400322 * frwr_wc_localinv_wake - Invoked by RDMA provider for a signaled LocalInv WC
Chuck Lever2fa8f882016-03-04 11:28:53 -0500323 * @cq: completion queue (ignored)
324 * @wc: completed WR
325 *
326 * Awaken anyone waiting for an MR to finish being fenced.
327 */
328static void
329frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
330{
331 struct rpcrdma_frmr *frmr;
332 struct ib_cqe *cqe;
333
334 /* WARNING: Only wr_cqe and status are reliable at this point */
335 cqe = wc->wr_cqe;
336 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
Chuck Lever62bdf942016-11-07 16:16:24 -0500337 if (wc->status != IB_WC_SUCCESS) {
338 frmr->fr_state = FRMR_FLUSHED_LI;
339 __frwr_sendcompletion_flush(wc, "localinv");
340 }
Daniel Wagner5690a222016-09-23 10:41:57 +0200341 complete(&frmr->fr_linv_done);
Chuck Leverc9918ff2015-12-16 17:22:47 -0500342}
343
Chuck Lever564471d2016-06-29 13:52:21 -0400344/* Post a REG_MR Work Request to register a memory region
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400345 * for remote access via RDMA READ or RDMA WRITE.
346 */
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400347static struct rpcrdma_mr_seg *
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400348frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
Chuck Lever9d6b0402016-06-29 13:54:16 -0400349 int nsegs, bool writing, struct rpcrdma_mw **out)
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400350{
351 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500352 bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400353 struct rpcrdma_mw *mw;
354 struct rpcrdma_frmr *frmr;
355 struct ib_mr *mr;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500356 struct ib_reg_wr *reg_wr;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100357 struct ib_send_wr *bad_wr;
Chuck Lever1f541892017-06-08 11:52:36 -0400358 int rc, i, n;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400359 u8 key;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400360
Chuck Lever9d6b0402016-06-29 13:54:16 -0400361 mw = NULL;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400362 do {
363 if (mw)
Chuck Lever505bbe62016-06-29 13:52:54 -0400364 rpcrdma_defer_mr_recovery(mw);
Chuck Leverc14d86e2015-05-26 11:52:35 -0400365 mw = rpcrdma_get_mw(r_xprt);
366 if (!mw)
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400367 return ERR_PTR(-ENOBUFS);
Chuck Leverc882a652016-03-04 11:28:45 -0500368 } while (mw->frmr.fr_state != FRMR_IS_INVALID);
369 frmr = &mw->frmr;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400370 frmr->fr_state = FRMR_IS_VALID;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300371 mr = frmr->fr_mr;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500372 reg_wr = &frmr->fr_regwr;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400373
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400374 if (nsegs > ia->ri_max_frmr_depth)
375 nsegs = ia->ri_max_frmr_depth;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300376 for (i = 0; i < nsegs;) {
377 if (seg->mr_page)
Chuck Lever564471d2016-06-29 13:52:21 -0400378 sg_set_page(&mw->mw_sg[i],
Sagi Grimberg4143f342015-10-13 19:11:35 +0300379 seg->mr_page,
380 seg->mr_len,
381 offset_in_page(seg->mr_offset));
382 else
Chuck Lever564471d2016-06-29 13:52:21 -0400383 sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
Sagi Grimberg4143f342015-10-13 19:11:35 +0300384 seg->mr_len);
385
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400386 ++seg;
387 ++i;
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500388 if (holes_ok)
389 continue;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400390 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
391 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
392 break;
393 }
Chuck Lever564471d2016-06-29 13:52:21 -0400394 mw->mw_dir = rpcrdma_data_dir(writing);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400395
Chuck Lever1f541892017-06-08 11:52:36 -0400396 mw->mw_nents = ib_dma_map_sg(ia->ri_device, mw->mw_sg, i, mw->mw_dir);
397 if (!mw->mw_nents)
Chuck Lever564471d2016-06-29 13:52:21 -0400398 goto out_dmamap_err;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300399
Chuck Lever564471d2016-06-29 13:52:21 -0400400 n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE);
401 if (unlikely(n != mw->mw_nents))
402 goto out_mapmr_err;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300403
404 dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n",
Chuck Levereeb30612016-09-15 10:57:40 -0400405 __func__, frmr, mw->mw_nents, mr->length);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300406
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400407 key = (u8)(mr->rkey & 0x000000FF);
408 ib_update_fast_reg_key(mr, ++key);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300409
Chuck Lever3cf4e162015-12-16 17:22:31 -0500410 reg_wr->wr.next = NULL;
411 reg_wr->wr.opcode = IB_WR_REG_MR;
Chuck Lever2fa8f882016-03-04 11:28:53 -0500412 frmr->fr_cqe.done = frwr_wc_fastreg;
413 reg_wr->wr.wr_cqe = &frmr->fr_cqe;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500414 reg_wr->wr.num_sge = 0;
415 reg_wr->wr.send_flags = 0;
416 reg_wr->mr = mr;
417 reg_wr->key = mr->rkey;
418 reg_wr->access = writing ?
419 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
420 IB_ACCESS_REMOTE_READ;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400421
Chuck Lever8d38de62016-11-29 10:52:16 -0500422 rpcrdma_set_signaled(&r_xprt->rx_ep, &reg_wr->wr);
Chuck Lever3cf4e162015-12-16 17:22:31 -0500423 rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400424 if (rc)
425 goto out_senderr;
426
Chuck Lever9d6b0402016-06-29 13:54:16 -0400427 mw->mw_handle = mr->rkey;
428 mw->mw_length = mr->length;
429 mw->mw_offset = mr->iova;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300430
Chuck Lever9d6b0402016-06-29 13:54:16 -0400431 *out = mw;
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400432 return seg;
Chuck Lever564471d2016-06-29 13:52:21 -0400433
434out_dmamap_err:
Chuck Lever1f541892017-06-08 11:52:36 -0400435 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
436 mw->mw_sg, i);
437 frmr->fr_state = FRMR_IS_INVALID;
438 rpcrdma_put_mw(r_xprt, mw);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400439 return ERR_PTR(-EIO);
Chuck Lever564471d2016-06-29 13:52:21 -0400440
441out_mapmr_err:
Chuck Lever1f541892017-06-08 11:52:36 -0400442 pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
Chuck Lever564471d2016-06-29 13:52:21 -0400443 frmr->fr_mr, n, mw->mw_nents);
Chuck Lever505bbe62016-06-29 13:52:54 -0400444 rpcrdma_defer_mr_recovery(mw);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400445 return ERR_PTR(-EIO);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400446
447out_senderr:
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400448 pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc);
Chuck Lever505bbe62016-06-29 13:52:54 -0400449 rpcrdma_defer_mr_recovery(mw);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400450 return ERR_PTR(-ENOTCONN);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400451}
452
Chuck Leverc9918ff2015-12-16 17:22:47 -0500453/* Invalidate all memory regions that were registered for "req".
454 *
455 * Sleeps until it is safe for the host CPU to access the
456 * previously mapped memory regions.
Chuck Lever9d6b0402016-06-29 13:54:16 -0400457 *
Chuck Lever451d26e2017-06-08 11:52:04 -0400458 * Caller ensures that @mws is not empty before the call. This
459 * function empties the list.
Chuck Leverc9918ff2015-12-16 17:22:47 -0500460 */
461static void
Chuck Lever451d26e2017-06-08 11:52:04 -0400462frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
Chuck Leverc9918ff2015-12-16 17:22:47 -0500463{
Chuck Levera100fda2016-11-29 10:52:57 -0500464 struct ib_send_wr *first, **prev, *last, *bad_wr;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500465 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500466 struct rpcrdma_frmr *f;
Chuck Lever9a5c63e2017-02-08 17:00:43 -0500467 struct rpcrdma_mw *mw;
Chuck Lever8d38de62016-11-29 10:52:16 -0500468 int count, rc;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500469
Chuck Lever451d26e2017-06-08 11:52:04 -0400470 /* ORDER: Invalidate all of the MRs first
Chuck Leverc9918ff2015-12-16 17:22:47 -0500471 *
472 * Chain the LOCAL_INV Work Requests and post them with
473 * a single ib_post_send() call.
474 */
Chuck Lever9d6b0402016-06-29 13:54:16 -0400475 f = NULL;
Chuck Lever8d38de62016-11-29 10:52:16 -0500476 count = 0;
Chuck Levera100fda2016-11-29 10:52:57 -0500477 prev = &first;
Chuck Lever451d26e2017-06-08 11:52:04 -0400478 list_for_each_entry(mw, mws, mw_list) {
Chuck Levera100fda2016-11-29 10:52:57 -0500479 mw->frmr.fr_state = FRMR_IS_INVALID;
Chuck Leverc8b920b2016-09-15 10:57:16 -0400480
Chuck Lever4b196dc62017-06-08 11:51:56 -0400481 if (mw->mw_flags & RPCRDMA_MW_F_RI)
Chuck Levera100fda2016-11-29 10:52:57 -0500482 continue;
483
484 f = &mw->frmr;
485 dprintk("RPC: %s: invalidating frmr %p\n",
486 __func__, f);
487
488 f->fr_cqe.done = frwr_wc_localinv;
489 last = &f->fr_invwr;
490 memset(last, 0, sizeof(*last));
491 last->wr_cqe = &f->fr_cqe;
492 last->opcode = IB_WR_LOCAL_INV;
493 last->ex.invalidate_rkey = mw->mw_handle;
Chuck Lever8d38de62016-11-29 10:52:16 -0500494 count++;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500495
Chuck Levera100fda2016-11-29 10:52:57 -0500496 *prev = last;
497 prev = &last->next;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500498 }
Chuck Leverc8b920b2016-09-15 10:57:16 -0400499 if (!f)
500 goto unmap;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500501
502 /* Strong send queue ordering guarantees that when the
503 * last WR in the chain completes, all WRs in the chain
504 * are complete.
505 */
Chuck Levera100fda2016-11-29 10:52:57 -0500506 last->send_flags = IB_SEND_SIGNALED;
Chuck Lever2fa8f882016-03-04 11:28:53 -0500507 f->fr_cqe.done = frwr_wc_localinv_wake;
508 reinit_completion(&f->fr_linv_done);
Chuck Lever8d38de62016-11-29 10:52:16 -0500509
510 /* Initialize CQ count, since there is always a signaled
511 * WR being posted here. The new cqcount depends on how
512 * many SQEs are about to be consumed.
513 */
514 rpcrdma_init_cqcount(&r_xprt->rx_ep, count);
Chuck Leverc9918ff2015-12-16 17:22:47 -0500515
516 /* Transport disconnect drains the receive CQ before it
517 * replaces the QP. The RPC reply handler won't call us
518 * unless ri_id->qp is a valid pointer.
519 */
Chuck Leverc8b920b2016-09-15 10:57:16 -0400520 r_xprt->rx_stats.local_inv_needed++;
Chuck Lever8d754832017-06-08 11:52:28 -0400521 bad_wr = NULL;
Chuck Levera100fda2016-11-29 10:52:57 -0500522 rc = ib_post_send(ia->ri_id->qp, first, &bad_wr);
Chuck Lever8d754832017-06-08 11:52:28 -0400523 if (bad_wr != first)
524 wait_for_completion(&f->fr_linv_done);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400525 if (rc)
526 goto reset_mrs;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500527
Chuck Lever451d26e2017-06-08 11:52:04 -0400528 /* ORDER: Now DMA unmap all of the MRs, and return
Chuck Leverc9918ff2015-12-16 17:22:47 -0500529 * them to the free MW list.
530 */
Chuck Leverb892a692016-03-04 11:28:01 -0500531unmap:
Chuck Lever451d26e2017-06-08 11:52:04 -0400532 while (!list_empty(mws)) {
533 mw = rpcrdma_pop_mw(mws);
Chuck Levera100fda2016-11-29 10:52:57 -0500534 dprintk("RPC: %s: DMA unmapping frmr %p\n",
Chuck Levereeb30612016-09-15 10:57:40 -0400535 __func__, &mw->frmr);
Chuck Lever564471d2016-06-29 13:52:21 -0400536 ib_dma_unmap_sg(ia->ri_device,
537 mw->mw_sg, mw->mw_nents, mw->mw_dir);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400538 rpcrdma_put_mw(r_xprt, mw);
Chuck Leverc9918ff2015-12-16 17:22:47 -0500539 }
Chuck Leverd7a21c12016-05-02 14:42:12 -0400540 return;
541
542reset_mrs:
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400543 pr_err("rpcrdma: FRMR invalidate ib_post_send returned %i\n", rc);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400544
545 /* Find and reset the MRs in the LOCAL_INV WRs that did not
Chuck Lever8d754832017-06-08 11:52:28 -0400546 * get posted.
Chuck Leverd7a21c12016-05-02 14:42:12 -0400547 */
Chuck Lever8d754832017-06-08 11:52:28 -0400548 rpcrdma_init_cqcount(&r_xprt->rx_ep, -count);
549 while (bad_wr) {
550 f = container_of(bad_wr, struct rpcrdma_frmr,
551 fr_invwr);
552 mw = container_of(f, struct rpcrdma_mw, frmr);
553
554 __frwr_reset_mr(ia, mw);
555
556 bad_wr = bad_wr->next;
Chuck Leverd7a21c12016-05-02 14:42:12 -0400557 }
558 goto unmap;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500559}
560
Chuck Leveread3f262016-05-02 14:42:46 -0400561/* Use a slow, safe mechanism to invalidate all memory regions
562 * that were registered for "req".
Chuck Lever6814bae2015-03-30 14:34:48 -0400563 */
Chuck Leveread3f262016-05-02 14:42:46 -0400564static void
565frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
566 bool sync)
Chuck Lever6814bae2015-03-30 14:34:48 -0400567{
Chuck Leveread3f262016-05-02 14:42:46 -0400568 struct rpcrdma_mw *mw;
Chuck Lever6814bae2015-03-30 14:34:48 -0400569
Chuck Lever9d6b0402016-06-29 13:54:16 -0400570 while (!list_empty(&req->rl_registered)) {
Chuck Lever9a5c63e2017-02-08 17:00:43 -0500571 mw = rpcrdma_pop_mw(&req->rl_registered);
Chuck Leveread3f262016-05-02 14:42:46 -0400572 if (sync)
Chuck Lever505bbe62016-06-29 13:52:54 -0400573 frwr_op_recover_mr(mw);
Chuck Leveread3f262016-05-02 14:42:46 -0400574 else
Chuck Lever505bbe62016-06-29 13:52:54 -0400575 rpcrdma_defer_mr_recovery(mw);
Chuck Leveread3f262016-05-02 14:42:46 -0400576 }
Chuck Lever6814bae2015-03-30 14:34:48 -0400577}
578
Chuck Levera0ce85f2015-03-30 14:34:21 -0400579const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400580 .ro_map = frwr_op_map,
Chuck Leverc9918ff2015-12-16 17:22:47 -0500581 .ro_unmap_sync = frwr_op_unmap_sync,
Chuck Leveread3f262016-05-02 14:42:46 -0400582 .ro_unmap_safe = frwr_op_unmap_safe,
Chuck Lever505bbe62016-06-29 13:52:54 -0400583 .ro_recover_mr = frwr_op_recover_mr,
Chuck Lever3968cb52015-03-30 14:35:26 -0400584 .ro_open = frwr_op_open,
Chuck Lever1c9351e2015-03-30 14:34:30 -0400585 .ro_maxpages = frwr_op_maxpages,
Chuck Levere2ac2362016-06-29 13:54:00 -0400586 .ro_init_mr = frwr_op_init_mr,
587 .ro_release_mr = frwr_op_release_mr,
Chuck Levera0ce85f2015-03-30 14:34:21 -0400588 .ro_displayname = "frwr",
Chuck Leverc8b920b2016-09-15 10:57:16 -0400589 .ro_send_w_inv_ok = RPCRDMA_CMP_F_SND_W_INV_OK,
Chuck Levera0ce85f2015-03-30 14:34:21 -0400590};