blob: f8312e30fade1f0ee60aa5c75c74e7da01c52bd2 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Chuck Levera0ce85f2015-03-30 14:34:21 -04002/*
Chuck Leverce5b3712017-12-14 20:57:47 -05003 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
Chuck Levera0ce85f2015-03-30 14:34:21 -04004 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 */
6
7/* Lightweight memory registration using Fast Registration Work
Chuck Leverce5b3712017-12-14 20:57:47 -05008 * Requests (FRWR).
Chuck Levera0ce85f2015-03-30 14:34:21 -04009 *
10 * FRWR features ordered asynchronous registration and deregistration
11 * of arbitrarily sized memory regions. This is the fastest and safest
12 * but most complex memory registration mode.
13 */
14
Chuck Leverc14d86e2015-05-26 11:52:35 -040015/* Normal operation
16 *
17 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
Chuck Leverce5b3712017-12-14 20:57:47 -050018 * Work Request (frwr_op_map). When the RDMA operation is finished, this
Chuck Leverc14d86e2015-05-26 11:52:35 -040019 * Memory Region is invalidated using a LOCAL_INV Work Request
Chuck Lever96cedde2017-12-14 20:57:55 -050020 * (frwr_op_unmap_sync).
Chuck Leverc14d86e2015-05-26 11:52:35 -040021 *
22 * Typically these Work Requests are not signaled, and neither are RDMA
23 * SEND Work Requests (with the exception of signaling occasionally to
24 * prevent provider work queue overflows). This greatly reduces HCA
25 * interrupt workload.
26 *
27 * As an optimization, frwr_op_unmap marks MRs INVALID before the
28 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
Chuck Lever96cedde2017-12-14 20:57:55 -050029 * rb_mrs immediately so that no work (like managing a linked list
Chuck Leverc14d86e2015-05-26 11:52:35 -040030 * under a spinlock) is needed in the completion upcall.
31 *
32 * But this means that frwr_op_map() can occasionally encounter an MR
33 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
34 * ordering prevents a subsequent FAST_REG WR from executing against
35 * that MR while it is still being invalidated.
36 */
37
38/* Transport recovery
39 *
40 * ->op_map and the transport connect worker cannot run at the same
41 * time, but ->op_unmap can fire while the transport connect worker
42 * is running. Thus MR recovery is handled in ->op_map, to guarantee
43 * that recovered MRs are owned by a sending RPC, and not one where
44 * ->op_unmap could fire at the same time transport reconnect is
45 * being done.
46 *
47 * When the underlying transport disconnects, MRs are left in one of
Chuck Lever62bdf942016-11-07 16:16:24 -050048 * four states:
Chuck Leverc14d86e2015-05-26 11:52:35 -040049 *
50 * INVALID: The MR was not in use before the QP entered ERROR state.
Chuck Leverc14d86e2015-05-26 11:52:35 -040051 *
52 * VALID: The MR was registered before the QP entered ERROR state.
53 *
Chuck Lever62bdf942016-11-07 16:16:24 -050054 * FLUSHED_FR: The MR was being registered when the QP entered ERROR
55 * state, and the pending WR was flushed.
56 *
57 * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
58 * state, and the pending WR was flushed.
59 *
60 * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
61 * with ib_dereg_mr and then are re-initialized. Because MR recovery
Chuck Leverc14d86e2015-05-26 11:52:35 -040062 * allocates fresh resources, it is deferred to a workqueue, and the
Chuck Lever96cedde2017-12-14 20:57:55 -050063 * recovered MRs are placed back on the rb_mrs list when recovery is
Chuck Leverc14d86e2015-05-26 11:52:35 -040064 * complete. frwr_op_map allocates another MR for the current RPC while
65 * the broken MR is reset.
66 *
67 * To ensure that frwr_op_map doesn't encounter an MR that is marked
68 * INVALID but that is about to be flushed due to a previous transport
69 * disconnect, the transport connect worker attempts to drain all
70 * pending send queue WRs before the transport is reconnected.
71 */
72
Chuck Leverc8b920b2016-09-15 10:57:16 -040073#include <linux/sunrpc/rpc_rdma.h>
74
Chuck Levera0ce85f2015-03-30 14:34:21 -040075#include "xprt_rdma.h"
Chuck Leverb6e717cb2018-05-07 15:27:05 -040076#include <trace/events/rpcrdma.h>
Chuck Levera0ce85f2015-03-30 14:34:21 -040077
78#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
79# define RPCDBG_FACILITY RPCDBG_TRANS
80#endif
81
Chuck Leverb54054c2016-06-29 13:53:27 -040082bool
83frwr_is_supported(struct rpcrdma_ia *ia)
84{
85 struct ib_device_attr *attrs = &ia->ri_device->attrs;
86
87 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
88 goto out_not_supported;
89 if (attrs->max_fast_reg_page_list_len == 0)
90 goto out_not_supported;
91 return true;
92
93out_not_supported:
94 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
95 ia->ri_device->name);
96 return false;
97}
98
Chuck Leverd7a21c12016-05-02 14:42:12 -040099static int
Chuck Lever96cedde2017-12-14 20:57:55 -0500100frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
Chuck Leverd48b1d22016-06-29 13:52:29 -0400101{
Chuck Leverce5b3712017-12-14 20:57:47 -0500102 unsigned int depth = ia->ri_max_frwr_depth;
Chuck Lever96cedde2017-12-14 20:57:55 -0500103 struct rpcrdma_frwr *frwr = &mr->frwr;
Chuck Leverd48b1d22016-06-29 13:52:29 -0400104 int rc;
105
Chuck Leverce5b3712017-12-14 20:57:47 -0500106 frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
107 if (IS_ERR(frwr->fr_mr))
Chuck Leverd48b1d22016-06-29 13:52:29 -0400108 goto out_mr_err;
109
Chuck Lever96cedde2017-12-14 20:57:55 -0500110 mr->mr_sg = kcalloc(depth, sizeof(*mr->mr_sg), GFP_KERNEL);
111 if (!mr->mr_sg)
Chuck Leverd48b1d22016-06-29 13:52:29 -0400112 goto out_list_err;
113
Chuck Lever96cedde2017-12-14 20:57:55 -0500114 sg_init_table(mr->mr_sg, depth);
Chuck Leverce5b3712017-12-14 20:57:47 -0500115 init_completion(&frwr->fr_linv_done);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400116 return 0;
117
118out_mr_err:
Chuck Leverce5b3712017-12-14 20:57:47 -0500119 rc = PTR_ERR(frwr->fr_mr);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400120 dprintk("RPC: %s: ib_alloc_mr status %i\n",
121 __func__, rc);
122 return rc;
123
124out_list_err:
125 rc = -ENOMEM;
126 dprintk("RPC: %s: sg allocation failure\n",
127 __func__);
Chuck Leverce5b3712017-12-14 20:57:47 -0500128 ib_dereg_mr(frwr->fr_mr);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400129 return rc;
130}
131
132static void
Chuck Lever96cedde2017-12-14 20:57:55 -0500133frwr_op_release_mr(struct rpcrdma_mr *mr)
Chuck Leverd48b1d22016-06-29 13:52:29 -0400134{
135 int rc;
136
Chuck Lever96cedde2017-12-14 20:57:55 -0500137 /* Ensure MR is not on any rl_registered list */
138 if (!list_empty(&mr->mr_list))
139 list_del(&mr->mr_list);
Chuck Lever9d6b0402016-06-29 13:54:16 -0400140
Chuck Lever96cedde2017-12-14 20:57:55 -0500141 rc = ib_dereg_mr(mr->frwr.fr_mr);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400142 if (rc)
143 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
Chuck Lever96cedde2017-12-14 20:57:55 -0500144 mr, rc);
145 kfree(mr->mr_sg);
146 kfree(mr);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400147}
148
149static int
Chuck Lever96cedde2017-12-14 20:57:55 -0500150__frwr_mr_reset(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
Chuck Leverd7a21c12016-05-02 14:42:12 -0400151{
Chuck Lever96cedde2017-12-14 20:57:55 -0500152 struct rpcrdma_frwr *frwr = &mr->frwr;
Chuck Leverd7a21c12016-05-02 14:42:12 -0400153 int rc;
154
Chuck Leverce5b3712017-12-14 20:57:47 -0500155 rc = ib_dereg_mr(frwr->fr_mr);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400156 if (rc) {
157 pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
Chuck Lever96cedde2017-12-14 20:57:55 -0500158 rc, mr);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400159 return rc;
160 }
161
Chuck Leverce5b3712017-12-14 20:57:47 -0500162 frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype,
163 ia->ri_max_frwr_depth);
164 if (IS_ERR(frwr->fr_mr)) {
Chuck Leverd7a21c12016-05-02 14:42:12 -0400165 pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
Chuck Lever96cedde2017-12-14 20:57:55 -0500166 PTR_ERR(frwr->fr_mr), mr);
Chuck Leverce5b3712017-12-14 20:57:47 -0500167 return PTR_ERR(frwr->fr_mr);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400168 }
169
Chuck Leverce5b3712017-12-14 20:57:47 -0500170 dprintk("RPC: %s: recovered FRWR %p\n", __func__, frwr);
171 frwr->fr_state = FRWR_IS_INVALID;
Chuck Leverd7a21c12016-05-02 14:42:12 -0400172 return 0;
173}
174
Chuck Leverce5b3712017-12-14 20:57:47 -0500175/* Reset of a single FRWR. Generate a fresh rkey by replacing the MR.
Chuck Lever505bbe62016-06-29 13:52:54 -0400176 */
Chuck Lever660bb492016-05-02 14:42:21 -0400177static void
Chuck Lever96cedde2017-12-14 20:57:55 -0500178frwr_op_recover_mr(struct rpcrdma_mr *mr)
Chuck Lever660bb492016-05-02 14:42:21 -0400179{
Chuck Lever96cedde2017-12-14 20:57:55 -0500180 enum rpcrdma_frwr_state state = mr->frwr.fr_state;
181 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
Chuck Lever660bb492016-05-02 14:42:21 -0400182 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Lever660bb492016-05-02 14:42:21 -0400183 int rc;
184
Chuck Lever96cedde2017-12-14 20:57:55 -0500185 rc = __frwr_mr_reset(ia, mr);
Chuck Lever2937fed2017-12-20 16:31:12 -0500186 if (state != FRWR_FLUSHED_LI) {
187 trace_xprtrdma_dma_unmap(mr);
Chuck Lever62bdf942016-11-07 16:16:24 -0500188 ib_dma_unmap_sg(ia->ri_device,
Chuck Lever96cedde2017-12-14 20:57:55 -0500189 mr->mr_sg, mr->mr_nents, mr->mr_dir);
Chuck Lever2937fed2017-12-20 16:31:12 -0500190 }
Chuck Lever2ffc8712016-06-29 13:54:08 -0400191 if (rc)
192 goto out_release;
Chuck Lever505bbe62016-06-29 13:52:54 -0400193
Chuck Lever96cedde2017-12-14 20:57:55 -0500194 rpcrdma_mr_put(mr);
Chuck Lever505bbe62016-06-29 13:52:54 -0400195 r_xprt->rx_stats.mrs_recovered++;
Chuck Lever2ffc8712016-06-29 13:54:08 -0400196 return;
197
198out_release:
Chuck Lever96cedde2017-12-14 20:57:55 -0500199 pr_err("rpcrdma: FRWR reset failed %d, %p release\n", rc, mr);
Chuck Lever2ffc8712016-06-29 13:54:08 -0400200 r_xprt->rx_stats.mrs_orphaned++;
201
Chuck Lever96cedde2017-12-14 20:57:55 -0500202 spin_lock(&r_xprt->rx_buf.rb_mrlock);
203 list_del(&mr->mr_all);
204 spin_unlock(&r_xprt->rx_buf.rb_mrlock);
Chuck Lever2ffc8712016-06-29 13:54:08 -0400205
Chuck Lever96cedde2017-12-14 20:57:55 -0500206 frwr_op_release_mr(mr);
Chuck Lever951e7212015-05-26 11:52:25 -0400207}
208
Chuck Lever91e70e72015-03-30 14:34:58 -0400209static int
Chuck Lever3968cb52015-03-30 14:35:26 -0400210frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
211 struct rpcrdma_create_data_internal *cdata)
212{
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500213 struct ib_device_attr *attrs = &ia->ri_device->attrs;
Chuck Lever3968cb52015-03-30 14:35:26 -0400214 int depth, delta;
215
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500216 ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
217 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
218 ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
219
Chuck Leverce5b3712017-12-14 20:57:47 -0500220 ia->ri_max_frwr_depth =
Chuck Lever3968cb52015-03-30 14:35:26 -0400221 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500222 attrs->max_fast_reg_page_list_len);
Chuck Lever3968cb52015-03-30 14:35:26 -0400223 dprintk("RPC: %s: device's max FR page list len = %u\n",
Chuck Leverce5b3712017-12-14 20:57:47 -0500224 __func__, ia->ri_max_frwr_depth);
Chuck Lever3968cb52015-03-30 14:35:26 -0400225
Chuck Leverce5b3712017-12-14 20:57:47 -0500226 /* Add room for frwr register and invalidate WRs.
227 * 1. FRWR reg WR for head
228 * 2. FRWR invalidate WR for head
229 * 3. N FRWR reg WRs for pagelist
230 * 4. N FRWR invalidate WRs for pagelist
231 * 5. FRWR reg WR for tail
232 * 6. FRWR invalidate WR for tail
Chuck Lever3968cb52015-03-30 14:35:26 -0400233 * 7. The RDMA_SEND WR
234 */
235 depth = 7;
236
Chuck Leverce5b3712017-12-14 20:57:47 -0500237 /* Calculate N if the device max FRWR depth is smaller than
Chuck Lever3968cb52015-03-30 14:35:26 -0400238 * RPCRDMA_MAX_DATA_SEGS.
239 */
Chuck Leverce5b3712017-12-14 20:57:47 -0500240 if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) {
241 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth;
Chuck Lever3968cb52015-03-30 14:35:26 -0400242 do {
Chuck Leverce5b3712017-12-14 20:57:47 -0500243 depth += 2; /* FRWR reg + invalidate */
244 delta -= ia->ri_max_frwr_depth;
Chuck Lever3968cb52015-03-30 14:35:26 -0400245 } while (delta > 0);
246 }
247
248 ep->rep_attr.cap.max_send_wr *= depth;
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500249 if (ep->rep_attr.cap.max_send_wr > attrs->max_qp_wr) {
250 cdata->max_requests = attrs->max_qp_wr / depth;
Chuck Lever3968cb52015-03-30 14:35:26 -0400251 if (!cdata->max_requests)
252 return -EINVAL;
253 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
254 depth;
255 }
256
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400257 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
Chuck Leverce5b3712017-12-14 20:57:47 -0500258 ia->ri_max_frwr_depth);
Chuck Lever3968cb52015-03-30 14:35:26 -0400259 return 0;
260}
261
Chuck Lever1c9351e2015-03-30 14:34:30 -0400262/* FRWR mode conveys a list of pages per chunk segment. The
263 * maximum length of that list is the FRWR page list depth.
264 */
265static size_t
266frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
267{
268 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
269
270 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
Chuck Leverce5b3712017-12-14 20:57:47 -0500271 RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frwr_depth);
Chuck Lever1c9351e2015-03-30 14:34:30 -0400272}
273
Chuck Levere46ac342015-03-30 14:35:35 -0400274static void
Chuck Lever62bdf942016-11-07 16:16:24 -0500275__frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
Chuck Levere46ac342015-03-30 14:35:35 -0400276{
Chuck Lever2fa8f882016-03-04 11:28:53 -0500277 if (wc->status != IB_WC_WR_FLUSH_ERR)
278 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
279 wr, ib_wc_status_msg(wc->status),
280 wc->status, wc->vendor_err);
Chuck Levere46ac342015-03-30 14:35:35 -0400281}
282
Chuck Lever2fa8f882016-03-04 11:28:53 -0500283/**
Chuck Lever6afafa72017-06-08 11:53:24 -0400284 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
Chuck Lever2fa8f882016-03-04 11:28:53 -0500285 * @cq: completion queue (ignored)
286 * @wc: completed WR
287 *
288 */
Chuck Leverc9918ff2015-12-16 17:22:47 -0500289static void
Chuck Lever2fa8f882016-03-04 11:28:53 -0500290frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
Chuck Leverc9918ff2015-12-16 17:22:47 -0500291{
Chuck Lever58f10ad2017-12-20 16:30:56 -0500292 struct ib_cqe *cqe = wc->wr_cqe;
293 struct rpcrdma_frwr *frwr =
294 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
Chuck Leverc9918ff2015-12-16 17:22:47 -0500295
Chuck Lever2fa8f882016-03-04 11:28:53 -0500296 /* WARNING: Only wr_cqe and status are reliable at this point */
297 if (wc->status != IB_WC_SUCCESS) {
Chuck Leverce5b3712017-12-14 20:57:47 -0500298 frwr->fr_state = FRWR_FLUSHED_FR;
Chuck Lever62bdf942016-11-07 16:16:24 -0500299 __frwr_sendcompletion_flush(wc, "fastreg");
Chuck Lever2fa8f882016-03-04 11:28:53 -0500300 }
Chuck Lever58f10ad2017-12-20 16:30:56 -0500301 trace_xprtrdma_wc_fastreg(wc, frwr);
Chuck Lever2fa8f882016-03-04 11:28:53 -0500302}
Chuck Leverc9918ff2015-12-16 17:22:47 -0500303
Chuck Lever2fa8f882016-03-04 11:28:53 -0500304/**
Chuck Lever6afafa72017-06-08 11:53:24 -0400305 * frwr_wc_localinv - Invoked by RDMA provider for a flushed LocalInv WC
Chuck Lever2fa8f882016-03-04 11:28:53 -0500306 * @cq: completion queue (ignored)
307 * @wc: completed WR
308 *
309 */
310static void
311frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
312{
Chuck Lever2937fed2017-12-20 16:31:12 -0500313 struct ib_cqe *cqe = wc->wr_cqe;
314 struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
315 fr_cqe);
Chuck Lever2fa8f882016-03-04 11:28:53 -0500316
317 /* WARNING: Only wr_cqe and status are reliable at this point */
318 if (wc->status != IB_WC_SUCCESS) {
Chuck Leverce5b3712017-12-14 20:57:47 -0500319 frwr->fr_state = FRWR_FLUSHED_LI;
Chuck Lever62bdf942016-11-07 16:16:24 -0500320 __frwr_sendcompletion_flush(wc, "localinv");
Chuck Lever2fa8f882016-03-04 11:28:53 -0500321 }
Chuck Lever2937fed2017-12-20 16:31:12 -0500322 trace_xprtrdma_wc_li(wc, frwr);
Chuck Lever2fa8f882016-03-04 11:28:53 -0500323}
324
325/**
Chuck Lever6afafa72017-06-08 11:53:24 -0400326 * frwr_wc_localinv_wake - Invoked by RDMA provider for a signaled LocalInv WC
Chuck Lever2fa8f882016-03-04 11:28:53 -0500327 * @cq: completion queue (ignored)
328 * @wc: completed WR
329 *
330 * Awaken anyone waiting for an MR to finish being fenced.
331 */
332static void
333frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
334{
Chuck Lever2937fed2017-12-20 16:31:12 -0500335 struct ib_cqe *cqe = wc->wr_cqe;
336 struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
337 fr_cqe);
Chuck Lever2fa8f882016-03-04 11:28:53 -0500338
339 /* WARNING: Only wr_cqe and status are reliable at this point */
Chuck Lever62bdf942016-11-07 16:16:24 -0500340 if (wc->status != IB_WC_SUCCESS) {
Chuck Leverce5b3712017-12-14 20:57:47 -0500341 frwr->fr_state = FRWR_FLUSHED_LI;
Chuck Lever62bdf942016-11-07 16:16:24 -0500342 __frwr_sendcompletion_flush(wc, "localinv");
343 }
Chuck Leverce5b3712017-12-14 20:57:47 -0500344 complete(&frwr->fr_linv_done);
Chuck Lever2937fed2017-12-20 16:31:12 -0500345 trace_xprtrdma_wc_li_wake(wc, frwr);
Chuck Leverc9918ff2015-12-16 17:22:47 -0500346}
347
Chuck Lever564471d2016-06-29 13:52:21 -0400348/* Post a REG_MR Work Request to register a memory region
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400349 * for remote access via RDMA READ or RDMA WRITE.
350 */
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400351static struct rpcrdma_mr_seg *
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400352frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
Chuck Lever96cedde2017-12-14 20:57:55 -0500353 int nsegs, bool writing, struct rpcrdma_mr **out)
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400354{
355 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500356 bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
Chuck Leverce5b3712017-12-14 20:57:47 -0500357 struct rpcrdma_frwr *frwr;
Chuck Lever96cedde2017-12-14 20:57:55 -0500358 struct rpcrdma_mr *mr;
359 struct ib_mr *ibmr;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500360 struct ib_reg_wr *reg_wr;
Chuck Leverf2877622018-02-28 15:30:59 -0500361 int i, n;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400362 u8 key;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400363
Chuck Lever96cedde2017-12-14 20:57:55 -0500364 mr = NULL;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400365 do {
Chuck Lever96cedde2017-12-14 20:57:55 -0500366 if (mr)
367 rpcrdma_mr_defer_recovery(mr);
368 mr = rpcrdma_mr_get(r_xprt);
369 if (!mr)
Chuck Lever9e679d52018-02-28 15:30:44 -0500370 return ERR_PTR(-EAGAIN);
Chuck Lever96cedde2017-12-14 20:57:55 -0500371 } while (mr->frwr.fr_state != FRWR_IS_INVALID);
372 frwr = &mr->frwr;
Chuck Leverce5b3712017-12-14 20:57:47 -0500373 frwr->fr_state = FRWR_IS_VALID;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400374
Chuck Leverce5b3712017-12-14 20:57:47 -0500375 if (nsegs > ia->ri_max_frwr_depth)
376 nsegs = ia->ri_max_frwr_depth;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300377 for (i = 0; i < nsegs;) {
378 if (seg->mr_page)
Chuck Lever96cedde2017-12-14 20:57:55 -0500379 sg_set_page(&mr->mr_sg[i],
Sagi Grimberg4143f342015-10-13 19:11:35 +0300380 seg->mr_page,
381 seg->mr_len,
382 offset_in_page(seg->mr_offset));
383 else
Chuck Lever96cedde2017-12-14 20:57:55 -0500384 sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
Sagi Grimberg4143f342015-10-13 19:11:35 +0300385 seg->mr_len);
386
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400387 ++seg;
388 ++i;
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500389 if (holes_ok)
390 continue;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400391 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
392 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
393 break;
394 }
Chuck Lever96cedde2017-12-14 20:57:55 -0500395 mr->mr_dir = rpcrdma_data_dir(writing);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400396
Chuck Lever96cedde2017-12-14 20:57:55 -0500397 mr->mr_nents = ib_dma_map_sg(ia->ri_device, mr->mr_sg, i, mr->mr_dir);
398 if (!mr->mr_nents)
Chuck Lever564471d2016-06-29 13:52:21 -0400399 goto out_dmamap_err;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300400
Chuck Lever96cedde2017-12-14 20:57:55 -0500401 ibmr = frwr->fr_mr;
402 n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
403 if (unlikely(n != mr->mr_nents))
Chuck Lever564471d2016-06-29 13:52:21 -0400404 goto out_mapmr_err;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300405
Chuck Lever96cedde2017-12-14 20:57:55 -0500406 key = (u8)(ibmr->rkey & 0x000000FF);
407 ib_update_fast_reg_key(ibmr, ++key);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300408
Chuck Leverce5b3712017-12-14 20:57:47 -0500409 reg_wr = &frwr->fr_regwr;
Chuck Lever96cedde2017-12-14 20:57:55 -0500410 reg_wr->mr = ibmr;
411 reg_wr->key = ibmr->rkey;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500412 reg_wr->access = writing ?
413 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
414 IB_ACCESS_REMOTE_READ;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400415
Chuck Lever96cedde2017-12-14 20:57:55 -0500416 mr->mr_handle = ibmr->rkey;
417 mr->mr_length = ibmr->length;
418 mr->mr_offset = ibmr->iova;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300419
Chuck Lever96cedde2017-12-14 20:57:55 -0500420 *out = mr;
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400421 return seg;
Chuck Lever564471d2016-06-29 13:52:21 -0400422
423out_dmamap_err:
Chuck Lever1f541892017-06-08 11:52:36 -0400424 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
Chuck Lever96cedde2017-12-14 20:57:55 -0500425 mr->mr_sg, i);
Chuck Leverce5b3712017-12-14 20:57:47 -0500426 frwr->fr_state = FRWR_IS_INVALID;
Chuck Lever96cedde2017-12-14 20:57:55 -0500427 rpcrdma_mr_put(mr);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400428 return ERR_PTR(-EIO);
Chuck Lever564471d2016-06-29 13:52:21 -0400429
430out_mapmr_err:
Chuck Lever1f541892017-06-08 11:52:36 -0400431 pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
Chuck Lever96cedde2017-12-14 20:57:55 -0500432 frwr->fr_mr, n, mr->mr_nents);
433 rpcrdma_mr_defer_recovery(mr);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400434 return ERR_PTR(-EIO);
Chuck Leverf2877622018-02-28 15:30:59 -0500435}
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400436
Chuck Leverf2877622018-02-28 15:30:59 -0500437/* Post Send WR containing the RPC Call message.
438 *
439 * For FRMR, chain any FastReg WRs to the Send WR. Only a
440 * single ib_post_send call is needed to register memory
441 * and then post the Send WR.
442 */
443static int
444frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
445{
446 struct ib_send_wr *post_wr, *bad_wr;
447 struct rpcrdma_mr *mr;
448
449 post_wr = &req->rl_sendctx->sc_wr;
450 list_for_each_entry(mr, &req->rl_registered, mr_list) {
451 struct rpcrdma_frwr *frwr;
452
453 frwr = &mr->frwr;
454
455 frwr->fr_cqe.done = frwr_wc_fastreg;
456 frwr->fr_regwr.wr.next = post_wr;
457 frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
458 frwr->fr_regwr.wr.num_sge = 0;
459 frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
460 frwr->fr_regwr.wr.send_flags = 0;
461
462 post_wr = &frwr->fr_regwr.wr;
463 }
464
465 /* If ib_post_send fails, the next ->send_request for
466 * @req will queue these MWs for recovery.
467 */
468 return ib_post_send(ia->ri_id->qp, post_wr, &bad_wr);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400469}
470
Chuck Lever96cedde2017-12-14 20:57:55 -0500471/* Handle a remotely invalidated mr on the @mrs list
Chuck Leverc3441612017-12-14 20:56:26 -0500472 */
473static void
Chuck Lever96cedde2017-12-14 20:57:55 -0500474frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
Chuck Leverc3441612017-12-14 20:56:26 -0500475{
Chuck Lever96cedde2017-12-14 20:57:55 -0500476 struct rpcrdma_mr *mr;
Chuck Leverc3441612017-12-14 20:56:26 -0500477
Chuck Lever96cedde2017-12-14 20:57:55 -0500478 list_for_each_entry(mr, mrs, mr_list)
479 if (mr->mr_handle == rep->rr_inv_rkey) {
Chuck Lever96cedde2017-12-14 20:57:55 -0500480 list_del(&mr->mr_list);
Chuck Lever2937fed2017-12-20 16:31:12 -0500481 trace_xprtrdma_remoteinv(mr);
Chuck Lever96cedde2017-12-14 20:57:55 -0500482 mr->frwr.fr_state = FRWR_IS_INVALID;
Chuck Leverec12e472017-12-14 20:58:04 -0500483 rpcrdma_mr_unmap_and_put(mr);
Chuck Leverc3441612017-12-14 20:56:26 -0500484 break; /* only one invalidated MR per RPC */
485 }
486}
487
Chuck Leverc9918ff2015-12-16 17:22:47 -0500488/* Invalidate all memory regions that were registered for "req".
489 *
490 * Sleeps until it is safe for the host CPU to access the
491 * previously mapped memory regions.
Chuck Lever9d6b0402016-06-29 13:54:16 -0400492 *
Chuck Lever96cedde2017-12-14 20:57:55 -0500493 * Caller ensures that @mrs is not empty before the call. This
Chuck Lever451d26e2017-06-08 11:52:04 -0400494 * function empties the list.
Chuck Leverc9918ff2015-12-16 17:22:47 -0500495 */
496static void
Chuck Lever96cedde2017-12-14 20:57:55 -0500497frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
Chuck Leverc9918ff2015-12-16 17:22:47 -0500498{
Chuck Levera100fda2016-11-29 10:52:57 -0500499 struct ib_send_wr *first, **prev, *last, *bad_wr;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500500 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Leverce5b3712017-12-14 20:57:47 -0500501 struct rpcrdma_frwr *frwr;
Chuck Lever96cedde2017-12-14 20:57:55 -0500502 struct rpcrdma_mr *mr;
Chuck Lever8d38de62016-11-29 10:52:16 -0500503 int count, rc;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500504
Chuck Lever451d26e2017-06-08 11:52:04 -0400505 /* ORDER: Invalidate all of the MRs first
Chuck Leverc9918ff2015-12-16 17:22:47 -0500506 *
507 * Chain the LOCAL_INV Work Requests and post them with
508 * a single ib_post_send() call.
509 */
Chuck Leverce5b3712017-12-14 20:57:47 -0500510 frwr = NULL;
Chuck Lever8d38de62016-11-29 10:52:16 -0500511 count = 0;
Chuck Levera100fda2016-11-29 10:52:57 -0500512 prev = &first;
Chuck Lever96cedde2017-12-14 20:57:55 -0500513 list_for_each_entry(mr, mrs, mr_list) {
514 mr->frwr.fr_state = FRWR_IS_INVALID;
Chuck Leverc8b920b2016-09-15 10:57:16 -0400515
Chuck Lever96cedde2017-12-14 20:57:55 -0500516 frwr = &mr->frwr;
Chuck Lever2937fed2017-12-20 16:31:12 -0500517 trace_xprtrdma_localinv(mr);
Chuck Levera100fda2016-11-29 10:52:57 -0500518
Chuck Leverce5b3712017-12-14 20:57:47 -0500519 frwr->fr_cqe.done = frwr_wc_localinv;
520 last = &frwr->fr_invwr;
Chuck Levera100fda2016-11-29 10:52:57 -0500521 memset(last, 0, sizeof(*last));
Chuck Leverce5b3712017-12-14 20:57:47 -0500522 last->wr_cqe = &frwr->fr_cqe;
Chuck Levera100fda2016-11-29 10:52:57 -0500523 last->opcode = IB_WR_LOCAL_INV;
Chuck Lever96cedde2017-12-14 20:57:55 -0500524 last->ex.invalidate_rkey = mr->mr_handle;
Chuck Lever8d38de62016-11-29 10:52:16 -0500525 count++;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500526
Chuck Levera100fda2016-11-29 10:52:57 -0500527 *prev = last;
528 prev = &last->next;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500529 }
Chuck Leverce5b3712017-12-14 20:57:47 -0500530 if (!frwr)
Chuck Leverc8b920b2016-09-15 10:57:16 -0400531 goto unmap;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500532
533 /* Strong send queue ordering guarantees that when the
534 * last WR in the chain completes, all WRs in the chain
535 * are complete.
536 */
Chuck Levera100fda2016-11-29 10:52:57 -0500537 last->send_flags = IB_SEND_SIGNALED;
Chuck Leverce5b3712017-12-14 20:57:47 -0500538 frwr->fr_cqe.done = frwr_wc_localinv_wake;
539 reinit_completion(&frwr->fr_linv_done);
Chuck Lever8d38de62016-11-29 10:52:16 -0500540
Chuck Leverc9918ff2015-12-16 17:22:47 -0500541 /* Transport disconnect drains the receive CQ before it
542 * replaces the QP. The RPC reply handler won't call us
543 * unless ri_id->qp is a valid pointer.
544 */
Chuck Leverc8b920b2016-09-15 10:57:16 -0400545 r_xprt->rx_stats.local_inv_needed++;
Chuck Lever8d754832017-06-08 11:52:28 -0400546 bad_wr = NULL;
Chuck Levera100fda2016-11-29 10:52:57 -0500547 rc = ib_post_send(ia->ri_id->qp, first, &bad_wr);
Chuck Lever8d754832017-06-08 11:52:28 -0400548 if (bad_wr != first)
Chuck Leverce5b3712017-12-14 20:57:47 -0500549 wait_for_completion(&frwr->fr_linv_done);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400550 if (rc)
551 goto reset_mrs;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500552
Chuck Lever451d26e2017-06-08 11:52:04 -0400553 /* ORDER: Now DMA unmap all of the MRs, and return
Chuck Lever96cedde2017-12-14 20:57:55 -0500554 * them to the free MR list.
Chuck Leverc9918ff2015-12-16 17:22:47 -0500555 */
Chuck Leverb892a692016-03-04 11:28:01 -0500556unmap:
Chuck Lever96cedde2017-12-14 20:57:55 -0500557 while (!list_empty(mrs)) {
558 mr = rpcrdma_mr_pop(mrs);
Chuck Leverec12e472017-12-14 20:58:04 -0500559 rpcrdma_mr_unmap_and_put(mr);
Chuck Leverc9918ff2015-12-16 17:22:47 -0500560 }
Chuck Leverd7a21c12016-05-02 14:42:12 -0400561 return;
562
563reset_mrs:
Chuck Leverce5b3712017-12-14 20:57:47 -0500564 pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400565
566 /* Find and reset the MRs in the LOCAL_INV WRs that did not
Chuck Lever8d754832017-06-08 11:52:28 -0400567 * get posted.
Chuck Leverd7a21c12016-05-02 14:42:12 -0400568 */
Chuck Lever8d754832017-06-08 11:52:28 -0400569 while (bad_wr) {
Chuck Leverce5b3712017-12-14 20:57:47 -0500570 frwr = container_of(bad_wr, struct rpcrdma_frwr,
571 fr_invwr);
Chuck Lever96cedde2017-12-14 20:57:55 -0500572 mr = container_of(frwr, struct rpcrdma_mr, frwr);
Chuck Lever8d754832017-06-08 11:52:28 -0400573
Chuck Lever96cedde2017-12-14 20:57:55 -0500574 __frwr_mr_reset(ia, mr);
Chuck Lever8d754832017-06-08 11:52:28 -0400575
576 bad_wr = bad_wr->next;
Chuck Leverd7a21c12016-05-02 14:42:12 -0400577 }
578 goto unmap;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500579}
580
Chuck Levera0ce85f2015-03-30 14:34:21 -0400581const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400582 .ro_map = frwr_op_map,
Chuck Leverf2877622018-02-28 15:30:59 -0500583 .ro_send = frwr_op_send,
Chuck Leverc3441612017-12-14 20:56:26 -0500584 .ro_reminv = frwr_op_reminv,
Chuck Leverc9918ff2015-12-16 17:22:47 -0500585 .ro_unmap_sync = frwr_op_unmap_sync,
Chuck Lever505bbe62016-06-29 13:52:54 -0400586 .ro_recover_mr = frwr_op_recover_mr,
Chuck Lever3968cb52015-03-30 14:35:26 -0400587 .ro_open = frwr_op_open,
Chuck Lever1c9351e2015-03-30 14:34:30 -0400588 .ro_maxpages = frwr_op_maxpages,
Chuck Levere2ac2362016-06-29 13:54:00 -0400589 .ro_init_mr = frwr_op_init_mr,
590 .ro_release_mr = frwr_op_release_mr,
Chuck Levera0ce85f2015-03-30 14:34:21 -0400591 .ro_displayname = "frwr",
Chuck Leverc8b920b2016-09-15 10:57:16 -0400592 .ro_send_w_inv_ok = RPCRDMA_CMP_F_SND_W_INV_OK,
Chuck Levera0ce85f2015-03-30 14:34:21 -0400593};