blob: 523722be6a16ad22deb908c110a625fab6a29fbd [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Chuck Levera0ce85f2015-03-30 14:34:21 -04002/*
Chuck Leverce5b3712017-12-14 20:57:47 -05003 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
Chuck Levera0ce85f2015-03-30 14:34:21 -04004 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 */
6
7/* Lightweight memory registration using Fast Registration Work
Chuck Leverce5b3712017-12-14 20:57:47 -05008 * Requests (FRWR).
Chuck Levera0ce85f2015-03-30 14:34:21 -04009 *
Chuck Lever2fb2a4d2019-08-19 18:37:52 -040010 * FRWR features ordered asynchronous registration and invalidation
11 * of arbitrarily-sized memory regions. This is the fastest and safest
Chuck Levera0ce85f2015-03-30 14:34:21 -040012 * but most complex memory registration mode.
13 */
14
Chuck Leverc14d86e2015-05-26 11:52:35 -040015/* Normal operation
16 *
Chuck Lever2fb2a4d2019-08-19 18:37:52 -040017 * A Memory Region is prepared for RDMA Read or Write using a FAST_REG
Chuck Lever5f624122018-12-19 10:59:01 -050018 * Work Request (frwr_map). When the RDMA operation is finished, this
Chuck Leverc14d86e2015-05-26 11:52:35 -040019 * Memory Region is invalidated using a LOCAL_INV Work Request
Chuck Lever2fb2a4d2019-08-19 18:37:52 -040020 * (frwr_unmap_async and frwr_unmap_sync).
Chuck Leverc14d86e2015-05-26 11:52:35 -040021 *
Chuck Lever2fb2a4d2019-08-19 18:37:52 -040022 * Typically FAST_REG Work Requests are not signaled, and neither are
23 * RDMA Send Work Requests (with the exception of signaling occasionally
24 * to prevent provider work queue overflows). This greatly reduces HCA
Chuck Leverc14d86e2015-05-26 11:52:35 -040025 * interrupt workload.
Chuck Leverc14d86e2015-05-26 11:52:35 -040026 */
27
28/* Transport recovery
29 *
Chuck Lever2fb2a4d2019-08-19 18:37:52 -040030 * frwr_map and frwr_unmap_* cannot run at the same time the transport
31 * connect worker is running. The connect worker holds the transport
32 * send lock, just as ->send_request does. This prevents frwr_map and
33 * the connect worker from running concurrently. When a connection is
34 * closed, the Receive completion queue is drained before the allowing
35 * the connect worker to get control. This prevents frwr_unmap and the
36 * connect worker from running concurrently.
Chuck Leverc14d86e2015-05-26 11:52:35 -040037 *
Chuck Lever2fb2a4d2019-08-19 18:37:52 -040038 * When the underlying transport disconnects, MRs that are in flight
Chuck Lever9d2da4f2019-10-09 13:07:48 -040039 * are flushed and are likely unusable. Thus all MRs are destroyed.
40 * New MRs are created on demand.
Chuck Leverc14d86e2015-05-26 11:52:35 -040041 */
42
Chuck Leverc8b920b2016-09-15 10:57:16 -040043#include <linux/sunrpc/rpc_rdma.h>
Chuck Leverbd2abef2018-05-07 15:27:16 -040044#include <linux/sunrpc/svc_rdma.h>
Chuck Leverc8b920b2016-09-15 10:57:16 -040045
Chuck Levera0ce85f2015-03-30 14:34:21 -040046#include "xprt_rdma.h"
Chuck Leverb6e717cb2018-05-07 15:27:05 -040047#include <trace/events/rpcrdma.h>
Chuck Levera0ce85f2015-03-30 14:34:21 -040048
49#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
50# define RPCDBG_FACILITY RPCDBG_TRANS
51#endif
52
Chuck Lever5f624122018-12-19 10:59:01 -050053/**
54 * frwr_is_supported - Check if device supports FRWR
Chuck Leverf19bd0b2019-04-24 09:40:04 -040055 * @device: interface adapter to check
Chuck Lever5f624122018-12-19 10:59:01 -050056 *
57 * Returns true if device supports FRWR, otherwise false
58 */
Chuck Leverf19bd0b2019-04-24 09:40:04 -040059bool frwr_is_supported(struct ib_device *device)
Chuck Leverb54054c2016-06-29 13:53:27 -040060{
Chuck Leverf19bd0b2019-04-24 09:40:04 -040061 struct ib_device_attr *attrs = &device->attrs;
Chuck Leverb54054c2016-06-29 13:53:27 -040062
63 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
64 goto out_not_supported;
65 if (attrs->max_fast_reg_page_list_len == 0)
66 goto out_not_supported;
67 return true;
68
69out_not_supported:
70 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
Chuck Leverf19bd0b2019-04-24 09:40:04 -040071 device->name);
Chuck Leverb54054c2016-06-29 13:53:27 -040072 return false;
73}
74
Chuck Lever5f624122018-12-19 10:59:01 -050075/**
76 * frwr_release_mr - Destroy one MR
77 * @mr: MR allocated by frwr_init_mr
78 *
79 */
80void frwr_release_mr(struct rpcrdma_mr *mr)
Chuck Lever61da8862018-10-01 14:25:25 -040081{
82 int rc;
83
84 rc = ib_dereg_mr(mr->frwr.fr_mr);
85 if (rc)
Chuck Lever53b2c1c2018-12-19 11:00:06 -050086 trace_xprtrdma_frwr_dereg(mr, rc);
Chuck Lever61da8862018-10-01 14:25:25 -040087 kfree(mr->mr_sg);
88 kfree(mr);
89}
90
Chuck Lever15d9b012019-10-17 14:31:09 -040091static void frwr_mr_recycle(struct rpcrdma_mr *mr)
Chuck Lever61da8862018-10-01 14:25:25 -040092{
Chuck Lever15d9b012019-10-17 14:31:09 -040093 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
94
Chuck Lever61da8862018-10-01 14:25:25 -040095 trace_xprtrdma_mr_recycle(mr);
96
Chuck Levere2f34e22018-12-19 10:58:13 -050097 if (mr->mr_dir != DMA_NONE) {
Chuck Leverd379eaa2018-10-01 14:25:30 -040098 trace_xprtrdma_mr_unmap(mr);
Chuck Leverf19bd0b2019-04-24 09:40:04 -040099 ib_dma_unmap_sg(r_xprt->rx_ia.ri_id->device,
Chuck Lever61da8862018-10-01 14:25:25 -0400100 mr->mr_sg, mr->mr_nents, mr->mr_dir);
Chuck Levere2f34e22018-12-19 10:58:13 -0500101 mr->mr_dir = DMA_NONE;
Chuck Lever61da8862018-10-01 14:25:25 -0400102 }
103
Chuck Lever4d6b8892019-08-19 18:47:57 -0400104 spin_lock(&r_xprt->rx_buf.rb_lock);
Chuck Lever61da8862018-10-01 14:25:25 -0400105 list_del(&mr->mr_all);
106 r_xprt->rx_stats.mrs_recycled++;
Chuck Lever4d6b8892019-08-19 18:47:57 -0400107 spin_unlock(&r_xprt->rx_buf.rb_lock);
Chuck Lever5f624122018-12-19 10:59:01 -0500108
109 frwr_release_mr(mr);
Chuck Lever61da8862018-10-01 14:25:25 -0400110}
111
Chuck Lever40088f02019-06-19 10:33:04 -0400112/* frwr_reset - Place MRs back on the free list
113 * @req: request to reset
114 *
115 * Used after a failed marshal. For FRWR, this means the MRs
116 * don't have to be fully released and recreated.
117 *
118 * NB: This is safe only as long as none of @req's MRs are
119 * involved with an ongoing asynchronous FAST_REG or LOCAL_INV
120 * Work Request.
121 */
122void frwr_reset(struct rpcrdma_req *req)
123{
Chuck Lever265a38d2019-08-19 18:44:04 -0400124 struct rpcrdma_mr *mr;
Chuck Lever40088f02019-06-19 10:33:04 -0400125
Chuck Lever265a38d2019-08-19 18:44:04 -0400126 while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
Chuck Lever1ca3f4c2019-08-19 18:44:50 -0400127 rpcrdma_mr_put(mr);
Chuck Lever40088f02019-06-19 10:33:04 -0400128}
129
Chuck Lever5f624122018-12-19 10:59:01 -0500130/**
131 * frwr_init_mr - Initialize one MR
132 * @ia: interface adapter
133 * @mr: generic MR to prepare for FRWR
134 *
135 * Returns zero if successful. Otherwise a negative errno
136 * is returned.
137 */
138int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
Chuck Leverd48b1d22016-06-29 13:52:29 -0400139{
Chuck Leverce5b3712017-12-14 20:57:47 -0500140 unsigned int depth = ia->ri_max_frwr_depth;
Chuck Leverf85adb12018-12-19 11:00:48 -0500141 struct scatterlist *sg;
142 struct ib_mr *frmr;
Chuck Leverd48b1d22016-06-29 13:52:29 -0400143 int rc;
144
Chuck Leverf85adb12018-12-19 11:00:48 -0500145 frmr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
146 if (IS_ERR(frmr))
Chuck Leverd48b1d22016-06-29 13:52:29 -0400147 goto out_mr_err;
148
Chuck Lever805a1f62019-08-19 18:46:24 -0400149 sg = kcalloc(depth, sizeof(*sg), GFP_NOFS);
Chuck Leverf85adb12018-12-19 11:00:48 -0500150 if (!sg)
Chuck Leverd48b1d22016-06-29 13:52:29 -0400151 goto out_list_err;
152
Chuck Leverf85adb12018-12-19 11:00:48 -0500153 mr->frwr.fr_mr = frmr;
Chuck Levere2f34e22018-12-19 10:58:13 -0500154 mr->mr_dir = DMA_NONE;
Chuck Lever054f1552018-05-01 11:37:14 -0400155 INIT_LIST_HEAD(&mr->mr_list);
Chuck Leverf85adb12018-12-19 11:00:48 -0500156 init_completion(&mr->frwr.fr_linv_done);
157
158 sg_init_table(sg, depth);
159 mr->mr_sg = sg;
Chuck Leverd48b1d22016-06-29 13:52:29 -0400160 return 0;
161
162out_mr_err:
Chuck Leverf85adb12018-12-19 11:00:48 -0500163 rc = PTR_ERR(frmr);
Chuck Lever53b2c1c2018-12-19 11:00:06 -0500164 trace_xprtrdma_frwr_alloc(mr, rc);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400165 return rc;
166
167out_list_err:
Chuck Leverf85adb12018-12-19 11:00:48 -0500168 ib_dereg_mr(frmr);
169 return -ENOMEM;
Chuck Leverd48b1d22016-06-29 13:52:29 -0400170}
171
Chuck Lever5f624122018-12-19 10:59:01 -0500172/**
173 * frwr_open - Prepare an endpoint for use with FRWR
174 * @ia: interface adapter this endpoint will use
175 * @ep: endpoint to prepare
Chuck Lever5f624122018-12-19 10:59:01 -0500176 *
177 * On success, sets:
Chuck Lever914fcad2018-05-04 15:34:48 -0400178 * ep->rep_attr.cap.max_send_wr
179 * ep->rep_attr.cap.max_recv_wr
Chuck Lever86c4ccd2019-04-24 09:40:25 -0400180 * ep->rep_max_requests
Chuck Lever914fcad2018-05-04 15:34:48 -0400181 * ia->ri_max_segs
182 *
183 * And these FRWR-related fields:
184 * ia->ri_max_frwr_depth
185 * ia->ri_mrtype
Chuck Lever5f624122018-12-19 10:59:01 -0500186 *
187 * On failure, a negative errno is returned.
Chuck Lever914fcad2018-05-04 15:34:48 -0400188 */
Chuck Lever86c4ccd2019-04-24 09:40:25 -0400189int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep)
Chuck Lever3968cb52015-03-30 14:35:26 -0400190{
Chuck Leverf19bd0b2019-04-24 09:40:04 -0400191 struct ib_device_attr *attrs = &ia->ri_id->device->attrs;
Chuck Lever914fcad2018-05-04 15:34:48 -0400192 int max_qp_wr, depth, delta;
Chuck Lever3968cb52015-03-30 14:35:26 -0400193
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500194 ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
195 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
196 ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
197
Chuck Levera7886842018-12-19 10:58:51 -0500198 /* Quirk: Some devices advertise a large max_fast_reg_page_list_len
199 * capability, but perform optimally when the MRs are not larger
200 * than a page.
201 */
202 if (attrs->max_sge_rd > 1)
203 ia->ri_max_frwr_depth = attrs->max_sge_rd;
204 else
205 ia->ri_max_frwr_depth = attrs->max_fast_reg_page_list_len;
206 if (ia->ri_max_frwr_depth > RPCRDMA_MAX_DATA_SEGS)
207 ia->ri_max_frwr_depth = RPCRDMA_MAX_DATA_SEGS;
208 dprintk("RPC: %s: max FR page list depth = %u\n",
Chuck Leverce5b3712017-12-14 20:57:47 -0500209 __func__, ia->ri_max_frwr_depth);
Chuck Lever3968cb52015-03-30 14:35:26 -0400210
Chuck Leverce5b3712017-12-14 20:57:47 -0500211 /* Add room for frwr register and invalidate WRs.
212 * 1. FRWR reg WR for head
213 * 2. FRWR invalidate WR for head
214 * 3. N FRWR reg WRs for pagelist
215 * 4. N FRWR invalidate WRs for pagelist
216 * 5. FRWR reg WR for tail
217 * 6. FRWR invalidate WR for tail
Chuck Lever3968cb52015-03-30 14:35:26 -0400218 * 7. The RDMA_SEND WR
219 */
220 depth = 7;
221
Chuck Leverce5b3712017-12-14 20:57:47 -0500222 /* Calculate N if the device max FRWR depth is smaller than
Chuck Lever3968cb52015-03-30 14:35:26 -0400223 * RPCRDMA_MAX_DATA_SEGS.
224 */
Chuck Leverce5b3712017-12-14 20:57:47 -0500225 if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) {
226 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth;
Chuck Lever3968cb52015-03-30 14:35:26 -0400227 do {
Chuck Leverce5b3712017-12-14 20:57:47 -0500228 depth += 2; /* FRWR reg + invalidate */
229 delta -= ia->ri_max_frwr_depth;
Chuck Lever3968cb52015-03-30 14:35:26 -0400230 } while (delta > 0);
231 }
232
Chuck Leverf19bd0b2019-04-24 09:40:04 -0400233 max_qp_wr = ia->ri_id->device->attrs.max_qp_wr;
Chuck Lever914fcad2018-05-04 15:34:48 -0400234 max_qp_wr -= RPCRDMA_BACKWARD_WRS;
235 max_qp_wr -= 1;
236 if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
237 return -ENOMEM;
Chuck Lever86c4ccd2019-04-24 09:40:25 -0400238 if (ep->rep_max_requests > max_qp_wr)
239 ep->rep_max_requests = max_qp_wr;
240 ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth;
Chuck Lever914fcad2018-05-04 15:34:48 -0400241 if (ep->rep_attr.cap.max_send_wr > max_qp_wr) {
Chuck Lever86c4ccd2019-04-24 09:40:25 -0400242 ep->rep_max_requests = max_qp_wr / depth;
243 if (!ep->rep_max_requests)
Chuck Lever3968cb52015-03-30 14:35:26 -0400244 return -EINVAL;
Chuck Lever86c4ccd2019-04-24 09:40:25 -0400245 ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth;
Chuck Lever3968cb52015-03-30 14:35:26 -0400246 }
Chuck Lever914fcad2018-05-04 15:34:48 -0400247 ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
248 ep->rep_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
Chuck Lever86c4ccd2019-04-24 09:40:25 -0400249 ep->rep_attr.cap.max_recv_wr = ep->rep_max_requests;
Chuck Lever914fcad2018-05-04 15:34:48 -0400250 ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
251 ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
Chuck Lever3968cb52015-03-30 14:35:26 -0400252
Chuck Lever36bdd902019-08-19 18:39:25 -0400253 ia->ri_max_segs =
254 DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ia->ri_max_frwr_depth);
Chuck Lever6946f822018-12-19 10:58:45 -0500255 /* Reply chunks require segments for head and tail buffers */
256 ia->ri_max_segs += 2;
257 if (ia->ri_max_segs > RPCRDMA_MAX_HDR_SEGS)
258 ia->ri_max_segs = RPCRDMA_MAX_HDR_SEGS;
Chuck Lever3968cb52015-03-30 14:35:26 -0400259 return 0;
260}
261
Chuck Lever5f624122018-12-19 10:59:01 -0500262/**
263 * frwr_maxpages - Compute size of largest payload
264 * @r_xprt: transport
265 *
266 * Returns maximum size of an RPC message, in pages.
267 *
268 * FRWR mode conveys a list of pages per chunk segment. The
Chuck Lever1c9351e2015-03-30 14:34:30 -0400269 * maximum length of that list is the FRWR page list depth.
270 */
Chuck Lever5f624122018-12-19 10:59:01 -0500271size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt)
Chuck Lever1c9351e2015-03-30 14:34:30 -0400272{
273 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
274
275 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
Chuck Lever6946f822018-12-19 10:58:45 -0500276 (ia->ri_max_segs - 2) * ia->ri_max_frwr_depth);
Chuck Lever1c9351e2015-03-30 14:34:30 -0400277}
278
Chuck Lever2fa8f882016-03-04 11:28:53 -0500279/**
Chuck Lever5f624122018-12-19 10:59:01 -0500280 * frwr_map - Register a memory region
281 * @r_xprt: controlling transport
282 * @seg: memory region co-ordinates
283 * @nsegs: number of segments remaining
284 * @writing: true when RDMA Write will be used
Chuck Lever0a93fbc2018-12-19 10:59:07 -0500285 * @xid: XID of RPC using the registered memory
Chuck Lever3b39f522019-08-19 18:45:37 -0400286 * @mr: MR to fill in
Chuck Lever5f624122018-12-19 10:59:01 -0500287 *
288 * Prepare a REG_MR Work Request to register a memory region
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400289 * for remote access via RDMA READ or RDMA WRITE.
Chuck Lever5f624122018-12-19 10:59:01 -0500290 *
291 * Returns the next segment or a negative errno pointer.
Chuck Lever3b39f522019-08-19 18:45:37 -0400292 * On success, @mr is filled in.
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400293 */
Chuck Lever5f624122018-12-19 10:59:01 -0500294struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
295 struct rpcrdma_mr_seg *seg,
Chuck Leverec482cc2019-02-11 11:23:44 -0500296 int nsegs, bool writing, __be32 xid,
Chuck Lever3b39f522019-08-19 18:45:37 -0400297 struct rpcrdma_mr *mr)
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400298{
299 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500300 struct ib_reg_wr *reg_wr;
Chuck Lever3b39f522019-08-19 18:45:37 -0400301 struct ib_mr *ibmr;
Chuck Leverf2877622018-02-28 15:30:59 -0500302 int i, n;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400303 u8 key;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400304
Chuck Leverce5b3712017-12-14 20:57:47 -0500305 if (nsegs > ia->ri_max_frwr_depth)
306 nsegs = ia->ri_max_frwr_depth;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300307 for (i = 0; i < nsegs;) {
308 if (seg->mr_page)
Chuck Lever96cedde2017-12-14 20:57:55 -0500309 sg_set_page(&mr->mr_sg[i],
Sagi Grimberg4143f342015-10-13 19:11:35 +0300310 seg->mr_page,
311 seg->mr_len,
312 offset_in_page(seg->mr_offset));
313 else
Chuck Lever96cedde2017-12-14 20:57:55 -0500314 sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
Sagi Grimberg4143f342015-10-13 19:11:35 +0300315 seg->mr_len);
316
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400317 ++seg;
318 ++i;
Chuck Lever3b39f522019-08-19 18:45:37 -0400319 if (ia->ri_mrtype == IB_MR_TYPE_SG_GAPS)
Chuck Lever5e9fc6a2016-11-29 10:52:24 -0500320 continue;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400321 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
322 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
323 break;
324 }
Chuck Lever96cedde2017-12-14 20:57:55 -0500325 mr->mr_dir = rpcrdma_data_dir(writing);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400326
Chuck Leverf19bd0b2019-04-24 09:40:04 -0400327 mr->mr_nents =
328 ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, i, mr->mr_dir);
Chuck Lever96cedde2017-12-14 20:57:55 -0500329 if (!mr->mr_nents)
Chuck Lever564471d2016-06-29 13:52:21 -0400330 goto out_dmamap_err;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300331
Chuck Lever84756892019-06-19 10:32:59 -0400332 ibmr = mr->frwr.fr_mr;
Chuck Lever96cedde2017-12-14 20:57:55 -0500333 n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
334 if (unlikely(n != mr->mr_nents))
Chuck Lever564471d2016-06-29 13:52:21 -0400335 goto out_mapmr_err;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300336
Chuck Lever0a93fbc2018-12-19 10:59:07 -0500337 ibmr->iova &= 0x00000000ffffffff;
Chuck Leverec482cc2019-02-11 11:23:44 -0500338 ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32;
Chuck Lever96cedde2017-12-14 20:57:55 -0500339 key = (u8)(ibmr->rkey & 0x000000FF);
340 ib_update_fast_reg_key(ibmr, ++key);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300341
Chuck Lever84756892019-06-19 10:32:59 -0400342 reg_wr = &mr->frwr.fr_regwr;
Chuck Lever96cedde2017-12-14 20:57:55 -0500343 reg_wr->mr = ibmr;
344 reg_wr->key = ibmr->rkey;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500345 reg_wr->access = writing ?
346 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
347 IB_ACCESS_REMOTE_READ;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400348
Chuck Lever96cedde2017-12-14 20:57:55 -0500349 mr->mr_handle = ibmr->rkey;
350 mr->mr_length = ibmr->length;
351 mr->mr_offset = ibmr->iova;
Chuck Leverba217ec2018-12-19 10:59:55 -0500352 trace_xprtrdma_mr_map(mr);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300353
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400354 return seg;
Chuck Lever564471d2016-06-29 13:52:21 -0400355
356out_dmamap_err:
Chuck Leverb2ca4732019-04-24 09:39:00 -0400357 mr->mr_dir = DMA_NONE;
Chuck Lever53b2c1c2018-12-19 11:00:06 -0500358 trace_xprtrdma_frwr_sgerr(mr, i);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400359 return ERR_PTR(-EIO);
Chuck Lever564471d2016-06-29 13:52:21 -0400360
361out_mapmr_err:
Chuck Lever53b2c1c2018-12-19 11:00:06 -0500362 trace_xprtrdma_frwr_maperr(mr, n);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400363 return ERR_PTR(-EIO);
Chuck Leverf2877622018-02-28 15:30:59 -0500364}
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400365
Chuck Lever5f624122018-12-19 10:59:01 -0500366/**
Chuck Lever84756892019-06-19 10:32:59 -0400367 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
368 * @cq: completion queue (ignored)
369 * @wc: completed WR
370 *
371 */
372static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
373{
374 struct ib_cqe *cqe = wc->wr_cqe;
375 struct rpcrdma_frwr *frwr =
376 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
377
378 /* WARNING: Only wr_cqe and status are reliable at this point */
379 trace_xprtrdma_wc_fastreg(wc, frwr);
380 /* The MR will get recycled when the associated req is retransmitted */
381}
382
383/**
Chuck Lever5f624122018-12-19 10:59:01 -0500384 * frwr_send - post Send WR containing the RPC Call message
385 * @ia: interface adapter
386 * @req: Prepared RPC Call
Chuck Leverf2877622018-02-28 15:30:59 -0500387 *
Chuck Levere0f86bc2018-12-19 11:00:27 -0500388 * For FRWR, chain any FastReg WRs to the Send WR. Only a
Chuck Leverf2877622018-02-28 15:30:59 -0500389 * single ib_post_send call is needed to register memory
390 * and then post the Send WR.
Chuck Lever5f624122018-12-19 10:59:01 -0500391 *
392 * Returns the result of ib_post_send.
Chuck Leverf2877622018-02-28 15:30:59 -0500393 */
Chuck Lever5f624122018-12-19 10:59:01 -0500394int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
Chuck Leverf2877622018-02-28 15:30:59 -0500395{
Bart Van Asscheed288d72018-07-18 09:25:31 -0700396 struct ib_send_wr *post_wr;
Chuck Leverf2877622018-02-28 15:30:59 -0500397 struct rpcrdma_mr *mr;
398
Chuck Leverdc15c3d2019-10-17 14:31:35 -0400399 post_wr = &req->rl_wr;
Chuck Leverf2877622018-02-28 15:30:59 -0500400 list_for_each_entry(mr, &req->rl_registered, mr_list) {
401 struct rpcrdma_frwr *frwr;
402
403 frwr = &mr->frwr;
404
405 frwr->fr_cqe.done = frwr_wc_fastreg;
406 frwr->fr_regwr.wr.next = post_wr;
407 frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
408 frwr->fr_regwr.wr.num_sge = 0;
409 frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
410 frwr->fr_regwr.wr.send_flags = 0;
411
412 post_wr = &frwr->fr_regwr.wr;
413 }
414
Bart Van Asscheed288d72018-07-18 09:25:31 -0700415 return ib_post_send(ia->ri_id->qp, post_wr, NULL);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400416}
417
Chuck Lever5f624122018-12-19 10:59:01 -0500418/**
419 * frwr_reminv - handle a remotely invalidated mr on the @mrs list
420 * @rep: Received reply
421 * @mrs: list of MRs to check
422 *
Chuck Leverc3441612017-12-14 20:56:26 -0500423 */
Chuck Lever5f624122018-12-19 10:59:01 -0500424void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
Chuck Leverc3441612017-12-14 20:56:26 -0500425{
Chuck Lever96cedde2017-12-14 20:57:55 -0500426 struct rpcrdma_mr *mr;
Chuck Leverc3441612017-12-14 20:56:26 -0500427
Chuck Lever96cedde2017-12-14 20:57:55 -0500428 list_for_each_entry(mr, mrs, mr_list)
429 if (mr->mr_handle == rep->rr_inv_rkey) {
Chuck Lever054f1552018-05-01 11:37:14 -0400430 list_del_init(&mr->mr_list);
Chuck Leverd379eaa2018-10-01 14:25:30 -0400431 trace_xprtrdma_mr_remoteinv(mr);
Chuck Lever1ca3f4c2019-08-19 18:44:50 -0400432 rpcrdma_mr_put(mr);
Chuck Leverc3441612017-12-14 20:56:26 -0500433 break; /* only one invalidated MR per RPC */
434 }
435}
436
Chuck Lever84756892019-06-19 10:32:59 -0400437static void __frwr_release_mr(struct ib_wc *wc, struct rpcrdma_mr *mr)
438{
439 if (wc->status != IB_WC_SUCCESS)
Chuck Lever15d9b012019-10-17 14:31:09 -0400440 frwr_mr_recycle(mr);
Chuck Lever84756892019-06-19 10:32:59 -0400441 else
Chuck Lever1ca3f4c2019-08-19 18:44:50 -0400442 rpcrdma_mr_put(mr);
Chuck Lever84756892019-06-19 10:32:59 -0400443}
444
445/**
446 * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC
447 * @cq: completion queue (ignored)
448 * @wc: completed WR
449 *
450 */
451static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
452{
453 struct ib_cqe *cqe = wc->wr_cqe;
454 struct rpcrdma_frwr *frwr =
455 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
456 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
457
458 /* WARNING: Only wr_cqe and status are reliable at this point */
459 trace_xprtrdma_wc_li(wc, frwr);
460 __frwr_release_mr(wc, mr);
461}
462
463/**
464 * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC
465 * @cq: completion queue (ignored)
466 * @wc: completed WR
467 *
468 * Awaken anyone waiting for an MR to finish being fenced.
469 */
470static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
471{
472 struct ib_cqe *cqe = wc->wr_cqe;
473 struct rpcrdma_frwr *frwr =
474 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
475 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
476
477 /* WARNING: Only wr_cqe and status are reliable at this point */
478 trace_xprtrdma_wc_li_wake(wc, frwr);
Chuck Lever84756892019-06-19 10:32:59 -0400479 __frwr_release_mr(wc, mr);
Chuck Lever6dc6ec92019-08-19 18:47:10 -0400480 complete(&frwr->fr_linv_done);
Chuck Lever84756892019-06-19 10:32:59 -0400481}
482
Chuck Lever5f624122018-12-19 10:59:01 -0500483/**
484 * frwr_unmap_sync - invalidate memory regions that were registered for @req
Chuck Lever84756892019-06-19 10:32:59 -0400485 * @r_xprt: controlling transport instance
486 * @req: rpcrdma_req with a non-empty list of MRs to process
Chuck Leverc9918ff2015-12-16 17:22:47 -0500487 *
Chuck Lever84756892019-06-19 10:32:59 -0400488 * Sleeps until it is safe for the host CPU to access the previously mapped
Chuck Leverd8099fe2019-06-19 10:33:10 -0400489 * memory regions. This guarantees that registered MRs are properly fenced
490 * from the server before the RPC consumer accesses the data in them. It
491 * also ensures proper Send flow control: waking the next RPC waits until
492 * this RPC has relinquished all its Send Queue entries.
Chuck Leverc9918ff2015-12-16 17:22:47 -0500493 */
Chuck Lever84756892019-06-19 10:32:59 -0400494void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
Chuck Leverc9918ff2015-12-16 17:22:47 -0500495{
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700496 struct ib_send_wr *first, **prev, *last;
497 const struct ib_send_wr *bad_wr;
Chuck Leverce5b3712017-12-14 20:57:47 -0500498 struct rpcrdma_frwr *frwr;
Chuck Lever96cedde2017-12-14 20:57:55 -0500499 struct rpcrdma_mr *mr;
Chuck Lever84756892019-06-19 10:32:59 -0400500 int rc;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500501
Chuck Lever451d26e2017-06-08 11:52:04 -0400502 /* ORDER: Invalidate all of the MRs first
Chuck Leverc9918ff2015-12-16 17:22:47 -0500503 *
504 * Chain the LOCAL_INV Work Requests and post them with
505 * a single ib_post_send() call.
506 */
Chuck Leverce5b3712017-12-14 20:57:47 -0500507 frwr = NULL;
Chuck Levera100fda2016-11-29 10:52:57 -0500508 prev = &first;
Chuck Lever265a38d2019-08-19 18:44:04 -0400509 while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
Chuck Lever84756892019-06-19 10:32:59 -0400510
511 trace_xprtrdma_mr_localinv(mr);
512 r_xprt->rx_stats.local_inv_needed++;
Chuck Leverc8b920b2016-09-15 10:57:16 -0400513
Chuck Lever96cedde2017-12-14 20:57:55 -0500514 frwr = &mr->frwr;
Chuck Leverce5b3712017-12-14 20:57:47 -0500515 frwr->fr_cqe.done = frwr_wc_localinv;
516 last = &frwr->fr_invwr;
Chuck Lever84756892019-06-19 10:32:59 -0400517 last->next = NULL;
Chuck Leverce5b3712017-12-14 20:57:47 -0500518 last->wr_cqe = &frwr->fr_cqe;
Chuck Lever84756892019-06-19 10:32:59 -0400519 last->sg_list = NULL;
520 last->num_sge = 0;
Chuck Levera100fda2016-11-29 10:52:57 -0500521 last->opcode = IB_WR_LOCAL_INV;
Chuck Lever84756892019-06-19 10:32:59 -0400522 last->send_flags = IB_SEND_SIGNALED;
Chuck Lever96cedde2017-12-14 20:57:55 -0500523 last->ex.invalidate_rkey = mr->mr_handle;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500524
Chuck Levera100fda2016-11-29 10:52:57 -0500525 *prev = last;
526 prev = &last->next;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500527 }
Chuck Leverc9918ff2015-12-16 17:22:47 -0500528
529 /* Strong send queue ordering guarantees that when the
530 * last WR in the chain completes, all WRs in the chain
531 * are complete.
532 */
Chuck Leverce5b3712017-12-14 20:57:47 -0500533 frwr->fr_cqe.done = frwr_wc_localinv_wake;
534 reinit_completion(&frwr->fr_linv_done);
Chuck Lever8d38de62016-11-29 10:52:16 -0500535
Chuck Leverc9918ff2015-12-16 17:22:47 -0500536 /* Transport disconnect drains the receive CQ before it
537 * replaces the QP. The RPC reply handler won't call us
538 * unless ri_id->qp is a valid pointer.
539 */
Chuck Lever8d754832017-06-08 11:52:28 -0400540 bad_wr = NULL;
Chuck Lever84756892019-06-19 10:32:59 -0400541 rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr);
Chuck Lever84756892019-06-19 10:32:59 -0400542
543 /* The final LOCAL_INV WR in the chain is supposed to
544 * do the wake. If it was never posted, the wake will
545 * not happen, so don't wait in that case.
546 */
Chuck Lever8d754832017-06-08 11:52:28 -0400547 if (bad_wr != first)
Chuck Leverce5b3712017-12-14 20:57:47 -0500548 wait_for_completion(&frwr->fr_linv_done);
Chuck Lever84756892019-06-19 10:32:59 -0400549 if (!rc)
550 return;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500551
Chuck Lever84756892019-06-19 10:32:59 -0400552 /* Recycle MRs in the LOCAL_INV chain that did not get posted.
Chuck Leverd7a21c12016-05-02 14:42:12 -0400553 */
Chuck Lever4b93dab2019-10-09 13:07:21 -0400554 trace_xprtrdma_post_linv(req, rc);
Chuck Lever8d754832017-06-08 11:52:28 -0400555 while (bad_wr) {
Chuck Leverce5b3712017-12-14 20:57:47 -0500556 frwr = container_of(bad_wr, struct rpcrdma_frwr,
557 fr_invwr);
Chuck Lever96cedde2017-12-14 20:57:55 -0500558 mr = container_of(frwr, struct rpcrdma_mr, frwr);
Chuck Lever8d754832017-06-08 11:52:28 -0400559 bad_wr = bad_wr->next;
Chuck Lever61da8862018-10-01 14:25:25 -0400560
Chuck Leverb674c4b2018-12-19 10:58:19 -0500561 list_del_init(&mr->mr_list);
Chuck Lever15d9b012019-10-17 14:31:09 -0400562 frwr_mr_recycle(mr);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400563 }
Chuck Leverc9918ff2015-12-16 17:22:47 -0500564}
Chuck Leverd8099fe2019-06-19 10:33:10 -0400565
566/**
567 * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC
568 * @cq: completion queue (ignored)
569 * @wc: completed WR
570 *
571 */
572static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
573{
574 struct ib_cqe *cqe = wc->wr_cqe;
575 struct rpcrdma_frwr *frwr =
576 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
577 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
Chuck Lever6dc6ec92019-08-19 18:47:10 -0400578 struct rpcrdma_rep *rep = mr->mr_req->rl_reply;
Chuck Leverd8099fe2019-06-19 10:33:10 -0400579
580 /* WARNING: Only wr_cqe and status are reliable at this point */
581 trace_xprtrdma_wc_li_done(wc, frwr);
Chuck Leverd8099fe2019-06-19 10:33:10 -0400582 __frwr_release_mr(wc, mr);
Chuck Lever6dc6ec92019-08-19 18:47:10 -0400583
584 /* Ensure @rep is generated before __frwr_release_mr */
585 smp_rmb();
586 rpcrdma_complete_rqst(rep);
Chuck Leverd8099fe2019-06-19 10:33:10 -0400587}
588
589/**
590 * frwr_unmap_async - invalidate memory regions that were registered for @req
591 * @r_xprt: controlling transport instance
592 * @req: rpcrdma_req with a non-empty list of MRs to process
593 *
594 * This guarantees that registered MRs are properly fenced from the
595 * server before the RPC consumer accesses the data in them. It also
596 * ensures proper Send flow control: waking the next RPC waits until
597 * this RPC has relinquished all its Send Queue entries.
598 */
599void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
600{
601 struct ib_send_wr *first, *last, **prev;
602 const struct ib_send_wr *bad_wr;
603 struct rpcrdma_frwr *frwr;
604 struct rpcrdma_mr *mr;
605 int rc;
606
607 /* Chain the LOCAL_INV Work Requests and post them with
608 * a single ib_post_send() call.
609 */
610 frwr = NULL;
611 prev = &first;
Chuck Lever265a38d2019-08-19 18:44:04 -0400612 while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
Chuck Leverd8099fe2019-06-19 10:33:10 -0400613
614 trace_xprtrdma_mr_localinv(mr);
615 r_xprt->rx_stats.local_inv_needed++;
616
617 frwr = &mr->frwr;
618 frwr->fr_cqe.done = frwr_wc_localinv;
Chuck Leverd8099fe2019-06-19 10:33:10 -0400619 last = &frwr->fr_invwr;
620 last->next = NULL;
621 last->wr_cqe = &frwr->fr_cqe;
622 last->sg_list = NULL;
623 last->num_sge = 0;
624 last->opcode = IB_WR_LOCAL_INV;
625 last->send_flags = IB_SEND_SIGNALED;
626 last->ex.invalidate_rkey = mr->mr_handle;
627
628 *prev = last;
629 prev = &last->next;
630 }
631
632 /* Strong send queue ordering guarantees that when the
633 * last WR in the chain completes, all WRs in the chain
634 * are complete. The last completion will wake up the
635 * RPC waiter.
636 */
637 frwr->fr_cqe.done = frwr_wc_localinv_done;
638
639 /* Transport disconnect drains the receive CQ before it
640 * replaces the QP. The RPC reply handler won't call us
641 * unless ri_id->qp is a valid pointer.
642 */
643 bad_wr = NULL;
644 rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr);
Chuck Leverd8099fe2019-06-19 10:33:10 -0400645 if (!rc)
646 return;
647
648 /* Recycle MRs in the LOCAL_INV chain that did not get posted.
649 */
Chuck Lever4b93dab2019-10-09 13:07:21 -0400650 trace_xprtrdma_post_linv(req, rc);
Chuck Leverd8099fe2019-06-19 10:33:10 -0400651 while (bad_wr) {
652 frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr);
653 mr = container_of(frwr, struct rpcrdma_mr, frwr);
654 bad_wr = bad_wr->next;
655
Chuck Lever15d9b012019-10-17 14:31:09 -0400656 frwr_mr_recycle(mr);
Chuck Leverd8099fe2019-06-19 10:33:10 -0400657 }
658
659 /* The final LOCAL_INV WR in the chain is supposed to
660 * do the wake. If it was never posted, the wake will
661 * not happen, so wake here in that case.
662 */
663 rpcrdma_complete_rqst(req->rl_reply);
664}