blob: 20ced24cc61bc0ce8a986177504614af5f63c9da [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Chuck Levera0ce85f2015-03-30 14:34:21 -04002/*
Chuck Leverce5b3712017-12-14 20:57:47 -05003 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
Chuck Levera0ce85f2015-03-30 14:34:21 -04004 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 */
6
7/* Lightweight memory registration using Fast Registration Work
Chuck Leverce5b3712017-12-14 20:57:47 -05008 * Requests (FRWR).
Chuck Levera0ce85f2015-03-30 14:34:21 -04009 *
10 * FRWR features ordered asynchronous registration and deregistration
11 * of arbitrarily sized memory regions. This is the fastest and safest
12 * but most complex memory registration mode.
13 */
14
Chuck Leverc14d86e2015-05-26 11:52:35 -040015/* Normal operation
16 *
17 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
Chuck Leverce5b3712017-12-14 20:57:47 -050018 * Work Request (frwr_op_map). When the RDMA operation is finished, this
Chuck Leverc14d86e2015-05-26 11:52:35 -040019 * Memory Region is invalidated using a LOCAL_INV Work Request
Chuck Lever96cedde2017-12-14 20:57:55 -050020 * (frwr_op_unmap_sync).
Chuck Leverc14d86e2015-05-26 11:52:35 -040021 *
22 * Typically these Work Requests are not signaled, and neither are RDMA
23 * SEND Work Requests (with the exception of signaling occasionally to
24 * prevent provider work queue overflows). This greatly reduces HCA
25 * interrupt workload.
26 *
27 * As an optimization, frwr_op_unmap marks MRs INVALID before the
28 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
Chuck Lever96cedde2017-12-14 20:57:55 -050029 * rb_mrs immediately so that no work (like managing a linked list
Chuck Leverc14d86e2015-05-26 11:52:35 -040030 * under a spinlock) is needed in the completion upcall.
31 *
32 * But this means that frwr_op_map() can occasionally encounter an MR
33 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
34 * ordering prevents a subsequent FAST_REG WR from executing against
35 * that MR while it is still being invalidated.
36 */
37
38/* Transport recovery
39 *
40 * ->op_map and the transport connect worker cannot run at the same
41 * time, but ->op_unmap can fire while the transport connect worker
42 * is running. Thus MR recovery is handled in ->op_map, to guarantee
43 * that recovered MRs are owned by a sending RPC, and not one where
44 * ->op_unmap could fire at the same time transport reconnect is
45 * being done.
46 *
47 * When the underlying transport disconnects, MRs are left in one of
Chuck Lever62bdf942016-11-07 16:16:24 -050048 * four states:
Chuck Leverc14d86e2015-05-26 11:52:35 -040049 *
50 * INVALID: The MR was not in use before the QP entered ERROR state.
Chuck Leverc14d86e2015-05-26 11:52:35 -040051 *
52 * VALID: The MR was registered before the QP entered ERROR state.
53 *
Chuck Lever62bdf942016-11-07 16:16:24 -050054 * FLUSHED_FR: The MR was being registered when the QP entered ERROR
55 * state, and the pending WR was flushed.
56 *
57 * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
58 * state, and the pending WR was flushed.
59 *
60 * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
61 * with ib_dereg_mr and then are re-initialized. Because MR recovery
Chuck Leverc14d86e2015-05-26 11:52:35 -040062 * allocates fresh resources, it is deferred to a workqueue, and the
Chuck Lever96cedde2017-12-14 20:57:55 -050063 * recovered MRs are placed back on the rb_mrs list when recovery is
Chuck Leverc14d86e2015-05-26 11:52:35 -040064 * complete. frwr_op_map allocates another MR for the current RPC while
65 * the broken MR is reset.
66 *
67 * To ensure that frwr_op_map doesn't encounter an MR that is marked
68 * INVALID but that is about to be flushed due to a previous transport
69 * disconnect, the transport connect worker attempts to drain all
70 * pending send queue WRs before the transport is reconnected.
71 */
72
Chuck Leverc8b920b2016-09-15 10:57:16 -040073#include <linux/sunrpc/rpc_rdma.h>
Chuck Leverbd2abef2018-05-07 15:27:16 -040074#include <linux/sunrpc/svc_rdma.h>
Chuck Leverc8b920b2016-09-15 10:57:16 -040075
Chuck Levera0ce85f2015-03-30 14:34:21 -040076#include "xprt_rdma.h"
Chuck Leverb6e717cb2018-05-07 15:27:05 -040077#include <trace/events/rpcrdma.h>
Chuck Levera0ce85f2015-03-30 14:34:21 -040078
79#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
80# define RPCDBG_FACILITY RPCDBG_TRANS
81#endif
82
Chuck Leverb54054c2016-06-29 13:53:27 -040083bool
84frwr_is_supported(struct rpcrdma_ia *ia)
85{
86 struct ib_device_attr *attrs = &ia->ri_device->attrs;
87
88 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
89 goto out_not_supported;
90 if (attrs->max_fast_reg_page_list_len == 0)
91 goto out_not_supported;
92 return true;
93
94out_not_supported:
95 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
96 ia->ri_device->name);
97 return false;
98}
99
Chuck Lever61da8862018-10-01 14:25:25 -0400100static void
101frwr_op_release_mr(struct rpcrdma_mr *mr)
102{
103 int rc;
104
105 rc = ib_dereg_mr(mr->frwr.fr_mr);
106 if (rc)
107 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
108 mr, rc);
109 kfree(mr->mr_sg);
110 kfree(mr);
111}
112
113/* MRs are dynamically allocated, so simply clean up and release the MR.
114 * A replacement MR will subsequently be allocated on demand.
115 */
116static void
117frwr_mr_recycle_worker(struct work_struct *work)
118{
119 struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
Chuck Lever61da8862018-10-01 14:25:25 -0400120 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
121
122 trace_xprtrdma_mr_recycle(mr);
123
Chuck Levere2f34e22018-12-19 10:58:13 -0500124 if (mr->mr_dir != DMA_NONE) {
Chuck Leverd379eaa2018-10-01 14:25:30 -0400125 trace_xprtrdma_mr_unmap(mr);
Chuck Lever61da8862018-10-01 14:25:25 -0400126 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
127 mr->mr_sg, mr->mr_nents, mr->mr_dir);
Chuck Levere2f34e22018-12-19 10:58:13 -0500128 mr->mr_dir = DMA_NONE;
Chuck Lever61da8862018-10-01 14:25:25 -0400129 }
130
131 spin_lock(&r_xprt->rx_buf.rb_mrlock);
132 list_del(&mr->mr_all);
133 r_xprt->rx_stats.mrs_recycled++;
134 spin_unlock(&r_xprt->rx_buf.rb_mrlock);
135 frwr_op_release_mr(mr);
136}
137
Chuck Leverd7a21c12016-05-02 14:42:12 -0400138static int
Chuck Lever96cedde2017-12-14 20:57:55 -0500139frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
Chuck Leverd48b1d22016-06-29 13:52:29 -0400140{
Chuck Leverce5b3712017-12-14 20:57:47 -0500141 unsigned int depth = ia->ri_max_frwr_depth;
Chuck Lever96cedde2017-12-14 20:57:55 -0500142 struct rpcrdma_frwr *frwr = &mr->frwr;
Chuck Leverd48b1d22016-06-29 13:52:29 -0400143 int rc;
144
Chuck Leverce5b3712017-12-14 20:57:47 -0500145 frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
146 if (IS_ERR(frwr->fr_mr))
Chuck Leverd48b1d22016-06-29 13:52:29 -0400147 goto out_mr_err;
148
Chuck Lever96cedde2017-12-14 20:57:55 -0500149 mr->mr_sg = kcalloc(depth, sizeof(*mr->mr_sg), GFP_KERNEL);
150 if (!mr->mr_sg)
Chuck Leverd48b1d22016-06-29 13:52:29 -0400151 goto out_list_err;
152
Chuck Levere2f34e22018-12-19 10:58:13 -0500153 frwr->fr_state = FRWR_IS_INVALID;
154 mr->mr_dir = DMA_NONE;
Chuck Lever054f1552018-05-01 11:37:14 -0400155 INIT_LIST_HEAD(&mr->mr_list);
Chuck Lever61da8862018-10-01 14:25:25 -0400156 INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker);
Chuck Lever96cedde2017-12-14 20:57:55 -0500157 sg_init_table(mr->mr_sg, depth);
Chuck Leverce5b3712017-12-14 20:57:47 -0500158 init_completion(&frwr->fr_linv_done);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400159 return 0;
160
161out_mr_err:
Chuck Leverce5b3712017-12-14 20:57:47 -0500162 rc = PTR_ERR(frwr->fr_mr);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400163 dprintk("RPC: %s: ib_alloc_mr status %i\n",
164 __func__, rc);
165 return rc;
166
167out_list_err:
168 rc = -ENOMEM;
169 dprintk("RPC: %s: sg allocation failure\n",
170 __func__);
Chuck Leverce5b3712017-12-14 20:57:47 -0500171 ib_dereg_mr(frwr->fr_mr);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400172 return rc;
173}
174
Chuck Lever914fcad2018-05-04 15:34:48 -0400175/* On success, sets:
176 * ep->rep_attr.cap.max_send_wr
177 * ep->rep_attr.cap.max_recv_wr
178 * cdata->max_requests
179 * ia->ri_max_segs
180 *
181 * And these FRWR-related fields:
182 * ia->ri_max_frwr_depth
183 * ia->ri_mrtype
184 */
Chuck Lever91e70e72015-03-30 14:34:58 -0400185static int
Chuck Lever3968cb52015-03-30 14:35:26 -0400186frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
187 struct rpcrdma_create_data_internal *cdata)
188{
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500189 struct ib_device_attr *attrs = &ia->ri_device->attrs;
Chuck Lever914fcad2018-05-04 15:34:48 -0400190 int max_qp_wr, depth, delta;
Chuck Lever3968cb52015-03-30 14:35:26 -0400191
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500192 ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
193 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
194 ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
195
Chuck Leverce5b3712017-12-14 20:57:47 -0500196 ia->ri_max_frwr_depth =
Chuck Lever3968cb52015-03-30 14:35:26 -0400197 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500198 attrs->max_fast_reg_page_list_len);
Chuck Lever3968cb52015-03-30 14:35:26 -0400199 dprintk("RPC: %s: device's max FR page list len = %u\n",
Chuck Leverce5b3712017-12-14 20:57:47 -0500200 __func__, ia->ri_max_frwr_depth);
Chuck Lever3968cb52015-03-30 14:35:26 -0400201
Chuck Leverce5b3712017-12-14 20:57:47 -0500202 /* Add room for frwr register and invalidate WRs.
203 * 1. FRWR reg WR for head
204 * 2. FRWR invalidate WR for head
205 * 3. N FRWR reg WRs for pagelist
206 * 4. N FRWR invalidate WRs for pagelist
207 * 5. FRWR reg WR for tail
208 * 6. FRWR invalidate WR for tail
Chuck Lever3968cb52015-03-30 14:35:26 -0400209 * 7. The RDMA_SEND WR
210 */
211 depth = 7;
212
Chuck Leverce5b3712017-12-14 20:57:47 -0500213 /* Calculate N if the device max FRWR depth is smaller than
Chuck Lever3968cb52015-03-30 14:35:26 -0400214 * RPCRDMA_MAX_DATA_SEGS.
215 */
Chuck Leverce5b3712017-12-14 20:57:47 -0500216 if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) {
217 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth;
Chuck Lever3968cb52015-03-30 14:35:26 -0400218 do {
Chuck Leverce5b3712017-12-14 20:57:47 -0500219 depth += 2; /* FRWR reg + invalidate */
220 delta -= ia->ri_max_frwr_depth;
Chuck Lever3968cb52015-03-30 14:35:26 -0400221 } while (delta > 0);
222 }
223
Chuck Lever914fcad2018-05-04 15:34:48 -0400224 max_qp_wr = ia->ri_device->attrs.max_qp_wr;
225 max_qp_wr -= RPCRDMA_BACKWARD_WRS;
226 max_qp_wr -= 1;
227 if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
228 return -ENOMEM;
229 if (cdata->max_requests > max_qp_wr)
230 cdata->max_requests = max_qp_wr;
231 ep->rep_attr.cap.max_send_wr = cdata->max_requests * depth;
232 if (ep->rep_attr.cap.max_send_wr > max_qp_wr) {
233 cdata->max_requests = max_qp_wr / depth;
Chuck Lever3968cb52015-03-30 14:35:26 -0400234 if (!cdata->max_requests)
235 return -EINVAL;
236 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
237 depth;
238 }
Chuck Lever914fcad2018-05-04 15:34:48 -0400239 ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
240 ep->rep_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
241 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
242 ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
243 ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
Chuck Lever3968cb52015-03-30 14:35:26 -0400244
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400245 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
Chuck Leverce5b3712017-12-14 20:57:47 -0500246 ia->ri_max_frwr_depth);
Chuck Leverc421ece2018-10-01 14:25:20 -0400247 ia->ri_max_segs += 2; /* segments for head and tail buffers */
Chuck Lever3968cb52015-03-30 14:35:26 -0400248 return 0;
249}
250
Chuck Lever1c9351e2015-03-30 14:34:30 -0400251/* FRWR mode conveys a list of pages per chunk segment. The
252 * maximum length of that list is the FRWR page list depth.
253 */
254static size_t
255frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
256{
257 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
258
259 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
Chuck Leverce5b3712017-12-14 20:57:47 -0500260 RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frwr_depth);
Chuck Lever1c9351e2015-03-30 14:34:30 -0400261}
262
Chuck Levere46ac342015-03-30 14:35:35 -0400263static void
Chuck Lever62bdf942016-11-07 16:16:24 -0500264__frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
Chuck Levere46ac342015-03-30 14:35:35 -0400265{
Chuck Lever2fa8f882016-03-04 11:28:53 -0500266 if (wc->status != IB_WC_WR_FLUSH_ERR)
267 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
268 wr, ib_wc_status_msg(wc->status),
269 wc->status, wc->vendor_err);
Chuck Levere46ac342015-03-30 14:35:35 -0400270}
271
Chuck Lever2fa8f882016-03-04 11:28:53 -0500272/**
Chuck Lever6afafa72017-06-08 11:53:24 -0400273 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
Chuck Lever2fa8f882016-03-04 11:28:53 -0500274 * @cq: completion queue (ignored)
275 * @wc: completed WR
276 *
277 */
Chuck Leverc9918ff2015-12-16 17:22:47 -0500278static void
Chuck Lever2fa8f882016-03-04 11:28:53 -0500279frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
Chuck Leverc9918ff2015-12-16 17:22:47 -0500280{
Chuck Lever58f10ad2017-12-20 16:30:56 -0500281 struct ib_cqe *cqe = wc->wr_cqe;
282 struct rpcrdma_frwr *frwr =
283 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
Chuck Leverc9918ff2015-12-16 17:22:47 -0500284
Chuck Lever2fa8f882016-03-04 11:28:53 -0500285 /* WARNING: Only wr_cqe and status are reliable at this point */
286 if (wc->status != IB_WC_SUCCESS) {
Chuck Leverce5b3712017-12-14 20:57:47 -0500287 frwr->fr_state = FRWR_FLUSHED_FR;
Chuck Lever62bdf942016-11-07 16:16:24 -0500288 __frwr_sendcompletion_flush(wc, "fastreg");
Chuck Lever2fa8f882016-03-04 11:28:53 -0500289 }
Chuck Lever58f10ad2017-12-20 16:30:56 -0500290 trace_xprtrdma_wc_fastreg(wc, frwr);
Chuck Lever2fa8f882016-03-04 11:28:53 -0500291}
Chuck Leverc9918ff2015-12-16 17:22:47 -0500292
Chuck Lever2fa8f882016-03-04 11:28:53 -0500293/**
Chuck Lever6afafa72017-06-08 11:53:24 -0400294 * frwr_wc_localinv - Invoked by RDMA provider for a flushed LocalInv WC
Chuck Lever2fa8f882016-03-04 11:28:53 -0500295 * @cq: completion queue (ignored)
296 * @wc: completed WR
297 *
298 */
299static void
300frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
301{
Chuck Lever2937fed2017-12-20 16:31:12 -0500302 struct ib_cqe *cqe = wc->wr_cqe;
303 struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
304 fr_cqe);
Chuck Lever2fa8f882016-03-04 11:28:53 -0500305
306 /* WARNING: Only wr_cqe and status are reliable at this point */
307 if (wc->status != IB_WC_SUCCESS) {
Chuck Leverce5b3712017-12-14 20:57:47 -0500308 frwr->fr_state = FRWR_FLUSHED_LI;
Chuck Lever62bdf942016-11-07 16:16:24 -0500309 __frwr_sendcompletion_flush(wc, "localinv");
Chuck Lever2fa8f882016-03-04 11:28:53 -0500310 }
Chuck Lever2937fed2017-12-20 16:31:12 -0500311 trace_xprtrdma_wc_li(wc, frwr);
Chuck Lever2fa8f882016-03-04 11:28:53 -0500312}
313
314/**
Chuck Lever6afafa72017-06-08 11:53:24 -0400315 * frwr_wc_localinv_wake - Invoked by RDMA provider for a signaled LocalInv WC
Chuck Lever2fa8f882016-03-04 11:28:53 -0500316 * @cq: completion queue (ignored)
317 * @wc: completed WR
318 *
319 * Awaken anyone waiting for an MR to finish being fenced.
320 */
321static void
322frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
323{
Chuck Lever2937fed2017-12-20 16:31:12 -0500324 struct ib_cqe *cqe = wc->wr_cqe;
325 struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
326 fr_cqe);
Chuck Lever2fa8f882016-03-04 11:28:53 -0500327
328 /* WARNING: Only wr_cqe and status are reliable at this point */
Chuck Lever62bdf942016-11-07 16:16:24 -0500329 if (wc->status != IB_WC_SUCCESS) {
Chuck Leverce5b3712017-12-14 20:57:47 -0500330 frwr->fr_state = FRWR_FLUSHED_LI;
Chuck Lever62bdf942016-11-07 16:16:24 -0500331 __frwr_sendcompletion_flush(wc, "localinv");
332 }
Chuck Leverce5b3712017-12-14 20:57:47 -0500333 complete(&frwr->fr_linv_done);
Chuck Lever2937fed2017-12-20 16:31:12 -0500334 trace_xprtrdma_wc_li_wake(wc, frwr);
Chuck Leverc9918ff2015-12-16 17:22:47 -0500335}
336
Chuck Lever564471d2016-06-29 13:52:21 -0400337/* Post a REG_MR Work Request to register a memory region
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400338 * for remote access via RDMA READ or RDMA WRITE.
339 */
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400340static struct rpcrdma_mr_seg *
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400341frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
Chuck Lever96cedde2017-12-14 20:57:55 -0500342 int nsegs, bool writing, struct rpcrdma_mr **out)
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400343{
344 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500345 bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
Chuck Leverce5b3712017-12-14 20:57:47 -0500346 struct rpcrdma_frwr *frwr;
Chuck Lever96cedde2017-12-14 20:57:55 -0500347 struct rpcrdma_mr *mr;
348 struct ib_mr *ibmr;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500349 struct ib_reg_wr *reg_wr;
Chuck Leverf2877622018-02-28 15:30:59 -0500350 int i, n;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400351 u8 key;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400352
Chuck Lever96cedde2017-12-14 20:57:55 -0500353 mr = NULL;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400354 do {
Chuck Lever96cedde2017-12-14 20:57:55 -0500355 if (mr)
Chuck Lever61da8862018-10-01 14:25:25 -0400356 rpcrdma_mr_recycle(mr);
Chuck Lever96cedde2017-12-14 20:57:55 -0500357 mr = rpcrdma_mr_get(r_xprt);
358 if (!mr)
Chuck Lever9e679d52018-02-28 15:30:44 -0500359 return ERR_PTR(-EAGAIN);
Chuck Lever96cedde2017-12-14 20:57:55 -0500360 } while (mr->frwr.fr_state != FRWR_IS_INVALID);
361 frwr = &mr->frwr;
Chuck Leverce5b3712017-12-14 20:57:47 -0500362 frwr->fr_state = FRWR_IS_VALID;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400363
Chuck Leverce5b3712017-12-14 20:57:47 -0500364 if (nsegs > ia->ri_max_frwr_depth)
365 nsegs = ia->ri_max_frwr_depth;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300366 for (i = 0; i < nsegs;) {
367 if (seg->mr_page)
Chuck Lever96cedde2017-12-14 20:57:55 -0500368 sg_set_page(&mr->mr_sg[i],
Sagi Grimberg4143f342015-10-13 19:11:35 +0300369 seg->mr_page,
370 seg->mr_len,
371 offset_in_page(seg->mr_offset));
372 else
Chuck Lever96cedde2017-12-14 20:57:55 -0500373 sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
Sagi Grimberg4143f342015-10-13 19:11:35 +0300374 seg->mr_len);
375
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400376 ++seg;
377 ++i;
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500378 if (holes_ok)
379 continue;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400380 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
381 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
382 break;
383 }
Chuck Lever96cedde2017-12-14 20:57:55 -0500384 mr->mr_dir = rpcrdma_data_dir(writing);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400385
Chuck Lever96cedde2017-12-14 20:57:55 -0500386 mr->mr_nents = ib_dma_map_sg(ia->ri_device, mr->mr_sg, i, mr->mr_dir);
387 if (!mr->mr_nents)
Chuck Lever564471d2016-06-29 13:52:21 -0400388 goto out_dmamap_err;
Chuck Leverd379eaa2018-10-01 14:25:30 -0400389 trace_xprtrdma_mr_map(mr);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300390
Chuck Lever96cedde2017-12-14 20:57:55 -0500391 ibmr = frwr->fr_mr;
392 n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
393 if (unlikely(n != mr->mr_nents))
Chuck Lever564471d2016-06-29 13:52:21 -0400394 goto out_mapmr_err;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300395
Chuck Lever96cedde2017-12-14 20:57:55 -0500396 key = (u8)(ibmr->rkey & 0x000000FF);
397 ib_update_fast_reg_key(ibmr, ++key);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300398
Chuck Leverce5b3712017-12-14 20:57:47 -0500399 reg_wr = &frwr->fr_regwr;
Chuck Lever96cedde2017-12-14 20:57:55 -0500400 reg_wr->mr = ibmr;
401 reg_wr->key = ibmr->rkey;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500402 reg_wr->access = writing ?
403 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
404 IB_ACCESS_REMOTE_READ;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400405
Chuck Lever96cedde2017-12-14 20:57:55 -0500406 mr->mr_handle = ibmr->rkey;
407 mr->mr_length = ibmr->length;
408 mr->mr_offset = ibmr->iova;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300409
Chuck Lever96cedde2017-12-14 20:57:55 -0500410 *out = mr;
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400411 return seg;
Chuck Lever564471d2016-06-29 13:52:21 -0400412
413out_dmamap_err:
Chuck Lever1f541892017-06-08 11:52:36 -0400414 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
Chuck Lever96cedde2017-12-14 20:57:55 -0500415 mr->mr_sg, i);
Chuck Leverce5b3712017-12-14 20:57:47 -0500416 frwr->fr_state = FRWR_IS_INVALID;
Chuck Lever96cedde2017-12-14 20:57:55 -0500417 rpcrdma_mr_put(mr);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400418 return ERR_PTR(-EIO);
Chuck Lever564471d2016-06-29 13:52:21 -0400419
420out_mapmr_err:
Chuck Lever1f541892017-06-08 11:52:36 -0400421 pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
Chuck Lever96cedde2017-12-14 20:57:55 -0500422 frwr->fr_mr, n, mr->mr_nents);
Chuck Lever61da8862018-10-01 14:25:25 -0400423 rpcrdma_mr_recycle(mr);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400424 return ERR_PTR(-EIO);
Chuck Leverf2877622018-02-28 15:30:59 -0500425}
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400426
Chuck Leverf2877622018-02-28 15:30:59 -0500427/* Post Send WR containing the RPC Call message.
428 *
429 * For FRMR, chain any FastReg WRs to the Send WR. Only a
430 * single ib_post_send call is needed to register memory
431 * and then post the Send WR.
432 */
433static int
434frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
435{
Bart Van Asscheed288d72018-07-18 09:25:31 -0700436 struct ib_send_wr *post_wr;
Chuck Leverf2877622018-02-28 15:30:59 -0500437 struct rpcrdma_mr *mr;
438
439 post_wr = &req->rl_sendctx->sc_wr;
440 list_for_each_entry(mr, &req->rl_registered, mr_list) {
441 struct rpcrdma_frwr *frwr;
442
443 frwr = &mr->frwr;
444
445 frwr->fr_cqe.done = frwr_wc_fastreg;
446 frwr->fr_regwr.wr.next = post_wr;
447 frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
448 frwr->fr_regwr.wr.num_sge = 0;
449 frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
450 frwr->fr_regwr.wr.send_flags = 0;
451
452 post_wr = &frwr->fr_regwr.wr;
453 }
454
455 /* If ib_post_send fails, the next ->send_request for
456 * @req will queue these MWs for recovery.
457 */
Bart Van Asscheed288d72018-07-18 09:25:31 -0700458 return ib_post_send(ia->ri_id->qp, post_wr, NULL);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400459}
460
Chuck Lever96cedde2017-12-14 20:57:55 -0500461/* Handle a remotely invalidated mr on the @mrs list
Chuck Leverc3441612017-12-14 20:56:26 -0500462 */
463static void
Chuck Lever96cedde2017-12-14 20:57:55 -0500464frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
Chuck Leverc3441612017-12-14 20:56:26 -0500465{
Chuck Lever96cedde2017-12-14 20:57:55 -0500466 struct rpcrdma_mr *mr;
Chuck Leverc3441612017-12-14 20:56:26 -0500467
Chuck Lever96cedde2017-12-14 20:57:55 -0500468 list_for_each_entry(mr, mrs, mr_list)
469 if (mr->mr_handle == rep->rr_inv_rkey) {
Chuck Lever054f1552018-05-01 11:37:14 -0400470 list_del_init(&mr->mr_list);
Chuck Leverd379eaa2018-10-01 14:25:30 -0400471 trace_xprtrdma_mr_remoteinv(mr);
Chuck Lever96cedde2017-12-14 20:57:55 -0500472 mr->frwr.fr_state = FRWR_IS_INVALID;
Chuck Leverec12e472017-12-14 20:58:04 -0500473 rpcrdma_mr_unmap_and_put(mr);
Chuck Leverc3441612017-12-14 20:56:26 -0500474 break; /* only one invalidated MR per RPC */
475 }
476}
477
Chuck Leverc9918ff2015-12-16 17:22:47 -0500478/* Invalidate all memory regions that were registered for "req".
479 *
480 * Sleeps until it is safe for the host CPU to access the
481 * previously mapped memory regions.
Chuck Lever9d6b0402016-06-29 13:54:16 -0400482 *
Chuck Lever96cedde2017-12-14 20:57:55 -0500483 * Caller ensures that @mrs is not empty before the call. This
Chuck Lever451d26e2017-06-08 11:52:04 -0400484 * function empties the list.
Chuck Leverc9918ff2015-12-16 17:22:47 -0500485 */
486static void
Chuck Lever96cedde2017-12-14 20:57:55 -0500487frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
Chuck Leverc9918ff2015-12-16 17:22:47 -0500488{
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700489 struct ib_send_wr *first, **prev, *last;
490 const struct ib_send_wr *bad_wr;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500491 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Leverce5b3712017-12-14 20:57:47 -0500492 struct rpcrdma_frwr *frwr;
Chuck Lever96cedde2017-12-14 20:57:55 -0500493 struct rpcrdma_mr *mr;
Chuck Lever8d38de62016-11-29 10:52:16 -0500494 int count, rc;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500495
Chuck Lever451d26e2017-06-08 11:52:04 -0400496 /* ORDER: Invalidate all of the MRs first
Chuck Leverc9918ff2015-12-16 17:22:47 -0500497 *
498 * Chain the LOCAL_INV Work Requests and post them with
499 * a single ib_post_send() call.
500 */
Chuck Leverce5b3712017-12-14 20:57:47 -0500501 frwr = NULL;
Chuck Lever8d38de62016-11-29 10:52:16 -0500502 count = 0;
Chuck Levera100fda2016-11-29 10:52:57 -0500503 prev = &first;
Chuck Lever96cedde2017-12-14 20:57:55 -0500504 list_for_each_entry(mr, mrs, mr_list) {
505 mr->frwr.fr_state = FRWR_IS_INVALID;
Chuck Leverc8b920b2016-09-15 10:57:16 -0400506
Chuck Lever96cedde2017-12-14 20:57:55 -0500507 frwr = &mr->frwr;
Chuck Leverd379eaa2018-10-01 14:25:30 -0400508 trace_xprtrdma_mr_localinv(mr);
Chuck Levera100fda2016-11-29 10:52:57 -0500509
Chuck Leverce5b3712017-12-14 20:57:47 -0500510 frwr->fr_cqe.done = frwr_wc_localinv;
511 last = &frwr->fr_invwr;
Chuck Levera100fda2016-11-29 10:52:57 -0500512 memset(last, 0, sizeof(*last));
Chuck Leverce5b3712017-12-14 20:57:47 -0500513 last->wr_cqe = &frwr->fr_cqe;
Chuck Levera100fda2016-11-29 10:52:57 -0500514 last->opcode = IB_WR_LOCAL_INV;
Chuck Lever96cedde2017-12-14 20:57:55 -0500515 last->ex.invalidate_rkey = mr->mr_handle;
Chuck Lever8d38de62016-11-29 10:52:16 -0500516 count++;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500517
Chuck Levera100fda2016-11-29 10:52:57 -0500518 *prev = last;
519 prev = &last->next;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500520 }
Chuck Leverce5b3712017-12-14 20:57:47 -0500521 if (!frwr)
Chuck Leverc8b920b2016-09-15 10:57:16 -0400522 goto unmap;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500523
524 /* Strong send queue ordering guarantees that when the
525 * last WR in the chain completes, all WRs in the chain
526 * are complete.
527 */
Chuck Levera100fda2016-11-29 10:52:57 -0500528 last->send_flags = IB_SEND_SIGNALED;
Chuck Leverce5b3712017-12-14 20:57:47 -0500529 frwr->fr_cqe.done = frwr_wc_localinv_wake;
530 reinit_completion(&frwr->fr_linv_done);
Chuck Lever8d38de62016-11-29 10:52:16 -0500531
Chuck Leverc9918ff2015-12-16 17:22:47 -0500532 /* Transport disconnect drains the receive CQ before it
533 * replaces the QP. The RPC reply handler won't call us
534 * unless ri_id->qp is a valid pointer.
535 */
Chuck Leverc8b920b2016-09-15 10:57:16 -0400536 r_xprt->rx_stats.local_inv_needed++;
Chuck Lever8d754832017-06-08 11:52:28 -0400537 bad_wr = NULL;
Chuck Levera100fda2016-11-29 10:52:57 -0500538 rc = ib_post_send(ia->ri_id->qp, first, &bad_wr);
Chuck Lever8d754832017-06-08 11:52:28 -0400539 if (bad_wr != first)
Chuck Leverce5b3712017-12-14 20:57:47 -0500540 wait_for_completion(&frwr->fr_linv_done);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400541 if (rc)
Chuck Lever61da8862018-10-01 14:25:25 -0400542 goto out_release;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500543
Chuck Lever451d26e2017-06-08 11:52:04 -0400544 /* ORDER: Now DMA unmap all of the MRs, and return
Chuck Lever96cedde2017-12-14 20:57:55 -0500545 * them to the free MR list.
Chuck Leverc9918ff2015-12-16 17:22:47 -0500546 */
Chuck Leverb892a692016-03-04 11:28:01 -0500547unmap:
Chuck Lever96cedde2017-12-14 20:57:55 -0500548 while (!list_empty(mrs)) {
549 mr = rpcrdma_mr_pop(mrs);
Chuck Leverec12e472017-12-14 20:58:04 -0500550 rpcrdma_mr_unmap_and_put(mr);
Chuck Leverc9918ff2015-12-16 17:22:47 -0500551 }
Chuck Leverd7a21c12016-05-02 14:42:12 -0400552 return;
553
Chuck Lever61da8862018-10-01 14:25:25 -0400554out_release:
Chuck Leverce5b3712017-12-14 20:57:47 -0500555 pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400556
Chuck Lever61da8862018-10-01 14:25:25 -0400557 /* Unmap and release the MRs in the LOCAL_INV WRs that did not
Chuck Lever8d754832017-06-08 11:52:28 -0400558 * get posted.
Chuck Leverd7a21c12016-05-02 14:42:12 -0400559 */
Chuck Lever8d754832017-06-08 11:52:28 -0400560 while (bad_wr) {
Chuck Leverce5b3712017-12-14 20:57:47 -0500561 frwr = container_of(bad_wr, struct rpcrdma_frwr,
562 fr_invwr);
Chuck Lever96cedde2017-12-14 20:57:55 -0500563 mr = container_of(frwr, struct rpcrdma_mr, frwr);
Chuck Lever8d754832017-06-08 11:52:28 -0400564 bad_wr = bad_wr->next;
Chuck Lever61da8862018-10-01 14:25:25 -0400565
566 list_del(&mr->mr_list);
567 frwr_op_release_mr(mr);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400568 }
Chuck Leverc9918ff2015-12-16 17:22:47 -0500569}
570
Chuck Levera0ce85f2015-03-30 14:34:21 -0400571const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400572 .ro_map = frwr_op_map,
Chuck Leverf2877622018-02-28 15:30:59 -0500573 .ro_send = frwr_op_send,
Chuck Leverc3441612017-12-14 20:56:26 -0500574 .ro_reminv = frwr_op_reminv,
Chuck Leverc9918ff2015-12-16 17:22:47 -0500575 .ro_unmap_sync = frwr_op_unmap_sync,
Chuck Lever3968cb52015-03-30 14:35:26 -0400576 .ro_open = frwr_op_open,
Chuck Lever1c9351e2015-03-30 14:34:30 -0400577 .ro_maxpages = frwr_op_maxpages,
Chuck Levere2ac2362016-06-29 13:54:00 -0400578 .ro_init_mr = frwr_op_init_mr,
579 .ro_release_mr = frwr_op_release_mr,
Chuck Levera0ce85f2015-03-30 14:34:21 -0400580 .ro_displayname = "frwr",
Chuck Leverc8b920b2016-09-15 10:57:16 -0400581 .ro_send_w_inv_ok = RPCRDMA_CMP_F_SND_W_INV_OK,
Chuck Levera0ce85f2015-03-30 14:34:21 -0400582};