Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 2 | /* |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 3 | * Copyright (c) 2015, 2017 Oracle. All rights reserved. |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 4 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
| 5 | */ |
| 6 | |
| 7 | /* Lightweight memory registration using Fast Registration Work |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 8 | * Requests (FRWR). |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 9 | * |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 10 | * FRWR features ordered asynchronous registration and invalidation |
| 11 | * of arbitrarily-sized memory regions. This is the fastest and safest |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 12 | * but most complex memory registration mode. |
| 13 | */ |
| 14 | |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 15 | /* Normal operation |
| 16 | * |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 17 | * A Memory Region is prepared for RDMA Read or Write using a FAST_REG |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 18 | * Work Request (frwr_map). When the RDMA operation is finished, this |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 19 | * Memory Region is invalidated using a LOCAL_INV Work Request |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 20 | * (frwr_unmap_async and frwr_unmap_sync). |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 21 | * |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 22 | * Typically FAST_REG Work Requests are not signaled, and neither are |
| 23 | * RDMA Send Work Requests (with the exception of signaling occasionally |
| 24 | * to prevent provider work queue overflows). This greatly reduces HCA |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 25 | * interrupt workload. |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 26 | */ |
| 27 | |
| 28 | /* Transport recovery |
| 29 | * |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 30 | * frwr_map and frwr_unmap_* cannot run at the same time the transport |
| 31 | * connect worker is running. The connect worker holds the transport |
| 32 | * send lock, just as ->send_request does. This prevents frwr_map and |
| 33 | * the connect worker from running concurrently. When a connection is |
| 34 | * closed, the Receive completion queue is drained before the allowing |
| 35 | * the connect worker to get control. This prevents frwr_unmap and the |
| 36 | * connect worker from running concurrently. |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 37 | * |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 38 | * When the underlying transport disconnects, MRs that are in flight |
Chuck Lever | 9d2da4f | 2019-10-09 13:07:48 -0400 | [diff] [blame] | 39 | * are flushed and are likely unusable. Thus all MRs are destroyed. |
| 40 | * New MRs are created on demand. |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 41 | */ |
| 42 | |
Chuck Lever | bd2abef | 2018-05-07 15:27:16 -0400 | [diff] [blame] | 43 | #include <linux/sunrpc/svc_rdma.h> |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 44 | |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 45 | #include "xprt_rdma.h" |
Chuck Lever | b6e717cb | 2018-05-07 15:27:05 -0400 | [diff] [blame] | 46 | #include <trace/events/rpcrdma.h> |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 47 | |
Chuck Lever | 0a26d10 | 2021-04-19 14:03:56 -0400 | [diff] [blame] | 48 | static void frwr_cid_init(struct rpcrdma_ep *ep, |
| 49 | struct rpcrdma_mr *mr) |
| 50 | { |
| 51 | struct rpc_rdma_cid *cid = &mr->mr_cid; |
| 52 | |
| 53 | cid->ci_queue_id = ep->re_attr.send_cq->res.id; |
Chuck Lever | 13bcf7e | 2021-04-19 14:04:21 -0400 | [diff] [blame] | 54 | cid->ci_completion_id = mr->mr_ibmr->res.id; |
Chuck Lever | 0a26d10 | 2021-04-19 14:03:56 -0400 | [diff] [blame] | 55 | } |
| 56 | |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 57 | static void frwr_mr_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 58 | { |
Chuck Lever | 7a03aeb6 | 2020-11-09 14:40:19 -0500 | [diff] [blame] | 59 | if (mr->mr_device) { |
Chuck Lever | d379eaa | 2018-10-01 14:25:30 -0400 | [diff] [blame] | 60 | trace_xprtrdma_mr_unmap(mr); |
Chuck Lever | 7a03aeb6 | 2020-11-09 14:40:19 -0500 | [diff] [blame] | 61 | ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents, |
| 62 | mr->mr_dir); |
| 63 | mr->mr_device = NULL; |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 64 | } |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 65 | } |
| 66 | |
Chuck Lever | e4b52ca | 2021-04-19 14:03:12 -0400 | [diff] [blame] | 67 | /** |
| 68 | * frwr_mr_release - Destroy one MR |
| 69 | * @mr: MR allocated by frwr_mr_init |
| 70 | * |
| 71 | */ |
| 72 | void frwr_mr_release(struct rpcrdma_mr *mr) |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 73 | { |
Chuck Lever | e4b52ca | 2021-04-19 14:03:12 -0400 | [diff] [blame] | 74 | int rc; |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 75 | |
Chuck Lever | e4b52ca | 2021-04-19 14:03:12 -0400 | [diff] [blame] | 76 | frwr_mr_unmap(mr->mr_xprt, mr); |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 77 | |
Chuck Lever | 13bcf7e | 2021-04-19 14:04:21 -0400 | [diff] [blame] | 78 | rc = ib_dereg_mr(mr->mr_ibmr); |
Chuck Lever | e4b52ca | 2021-04-19 14:03:12 -0400 | [diff] [blame] | 79 | if (rc) |
| 80 | trace_xprtrdma_frwr_dereg(mr, rc); |
| 81 | kfree(mr->mr_sg); |
| 82 | kfree(mr); |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 83 | } |
| 84 | |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 85 | static void frwr_mr_put(struct rpcrdma_mr *mr) |
| 86 | { |
| 87 | frwr_mr_unmap(mr->mr_xprt, mr); |
| 88 | |
| 89 | /* The MR is returned to the req's MR free list instead |
| 90 | * of to the xprt's MR free list. No spinlock is needed. |
| 91 | */ |
| 92 | rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs); |
| 93 | } |
| 94 | |
Chuck Lever | 40088f0 | 2019-06-19 10:33:04 -0400 | [diff] [blame] | 95 | /* frwr_reset - Place MRs back on the free list |
| 96 | * @req: request to reset |
| 97 | * |
| 98 | * Used after a failed marshal. For FRWR, this means the MRs |
| 99 | * don't have to be fully released and recreated. |
| 100 | * |
| 101 | * NB: This is safe only as long as none of @req's MRs are |
| 102 | * involved with an ongoing asynchronous FAST_REG or LOCAL_INV |
| 103 | * Work Request. |
| 104 | */ |
| 105 | void frwr_reset(struct rpcrdma_req *req) |
| 106 | { |
Chuck Lever | 265a38d | 2019-08-19 18:44:04 -0400 | [diff] [blame] | 107 | struct rpcrdma_mr *mr; |
Chuck Lever | 40088f0 | 2019-06-19 10:33:04 -0400 | [diff] [blame] | 108 | |
Chuck Lever | 265a38d | 2019-08-19 18:44:04 -0400 | [diff] [blame] | 109 | while ((mr = rpcrdma_mr_pop(&req->rl_registered))) |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 110 | frwr_mr_put(mr); |
Chuck Lever | 40088f0 | 2019-06-19 10:33:04 -0400 | [diff] [blame] | 111 | } |
| 112 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 113 | /** |
Chuck Lever | 253a516 | 2020-02-21 17:00:17 -0500 | [diff] [blame] | 114 | * frwr_mr_init - Initialize one MR |
| 115 | * @r_xprt: controlling transport instance |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 116 | * @mr: generic MR to prepare for FRWR |
| 117 | * |
| 118 | * Returns zero if successful. Otherwise a negative errno |
| 119 | * is returned. |
| 120 | */ |
Chuck Lever | 253a516 | 2020-02-21 17:00:17 -0500 | [diff] [blame] | 121 | int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 122 | { |
Chuck Lever | e28ce90 | 2020-02-21 17:01:05 -0500 | [diff] [blame] | 123 | struct rpcrdma_ep *ep = r_xprt->rx_ep; |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 124 | unsigned int depth = ep->re_max_fr_depth; |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 125 | struct scatterlist *sg; |
| 126 | struct ib_mr *frmr; |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 127 | int rc; |
| 128 | |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 129 | frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth); |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 130 | if (IS_ERR(frmr)) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 131 | goto out_mr_err; |
| 132 | |
Julia Lawall | ed38c33 | 2020-09-20 13:26:20 +0200 | [diff] [blame] | 133 | sg = kmalloc_array(depth, sizeof(*sg), GFP_NOFS); |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 134 | if (!sg) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 135 | goto out_list_err; |
| 136 | |
Chuck Lever | 253a516 | 2020-02-21 17:00:17 -0500 | [diff] [blame] | 137 | mr->mr_xprt = r_xprt; |
Chuck Lever | 13bcf7e | 2021-04-19 14:04:21 -0400 | [diff] [blame] | 138 | mr->mr_ibmr = frmr; |
Chuck Lever | 7a03aeb6 | 2020-11-09 14:40:19 -0500 | [diff] [blame] | 139 | mr->mr_device = NULL; |
Chuck Lever | 054f155 | 2018-05-01 11:37:14 -0400 | [diff] [blame] | 140 | INIT_LIST_HEAD(&mr->mr_list); |
Chuck Lever | 9a301ca | 2021-04-19 14:04:09 -0400 | [diff] [blame] | 141 | init_completion(&mr->mr_linv_done); |
Chuck Lever | 0a26d10 | 2021-04-19 14:03:56 -0400 | [diff] [blame] | 142 | frwr_cid_init(ep, mr); |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 143 | |
| 144 | sg_init_table(sg, depth); |
| 145 | mr->mr_sg = sg; |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 146 | return 0; |
| 147 | |
| 148 | out_mr_err: |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 149 | rc = PTR_ERR(frmr); |
Chuck Lever | 53b2c1c | 2018-12-19 11:00:06 -0500 | [diff] [blame] | 150 | trace_xprtrdma_frwr_alloc(mr, rc); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 151 | return rc; |
| 152 | |
| 153 | out_list_err: |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 154 | ib_dereg_mr(frmr); |
| 155 | return -ENOMEM; |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 156 | } |
| 157 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 158 | /** |
Chuck Lever | 25868e6 | 2020-01-03 11:56:48 -0500 | [diff] [blame] | 159 | * frwr_query_device - Prepare a transport for use with FRWR |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 160 | * @ep: endpoint to fill in |
Chuck Lever | 25868e6 | 2020-01-03 11:56:48 -0500 | [diff] [blame] | 161 | * @device: RDMA device to query |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 162 | * |
| 163 | * On success, sets: |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 164 | * ep->re_attr |
| 165 | * ep->re_max_requests |
| 166 | * ep->re_max_rdma_segs |
| 167 | * ep->re_max_fr_depth |
| 168 | * ep->re_mrtype |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 169 | * |
Chuck Lever | 25868e6 | 2020-01-03 11:56:48 -0500 | [diff] [blame] | 170 | * Return values: |
| 171 | * On success, returns zero. |
| 172 | * %-EINVAL - the device does not support FRWR memory registration |
| 173 | * %-ENOMEM - the device is not sufficiently capable for NFS/RDMA |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 174 | */ |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 175 | int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device) |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 176 | { |
Chuck Lever | 25868e6 | 2020-01-03 11:56:48 -0500 | [diff] [blame] | 177 | const struct ib_device_attr *attrs = &device->attrs; |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 178 | int max_qp_wr, depth, delta; |
Chuck Lever | 2e87036 | 2020-01-03 11:56:27 -0500 | [diff] [blame] | 179 | unsigned int max_sge; |
| 180 | |
Chuck Lever | 25868e6 | 2020-01-03 11:56:48 -0500 | [diff] [blame] | 181 | if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) || |
| 182 | attrs->max_fast_reg_page_list_len == 0) { |
| 183 | pr_err("rpcrdma: 'frwr' mode is not supported by device %s\n", |
| 184 | device->name); |
| 185 | return -EINVAL; |
| 186 | } |
| 187 | |
Chuck Lever | 2e87036 | 2020-01-03 11:56:27 -0500 | [diff] [blame] | 188 | max_sge = min_t(unsigned int, attrs->max_send_sge, |
| 189 | RPCRDMA_MAX_SEND_SGES); |
| 190 | if (max_sge < RPCRDMA_MIN_SEND_SGES) { |
| 191 | pr_err("rpcrdma: HCA provides only %u send SGEs\n", max_sge); |
| 192 | return -ENOMEM; |
| 193 | } |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 194 | ep->re_attr.cap.max_send_sge = max_sge; |
| 195 | ep->re_attr.cap.max_recv_sge = 1; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 196 | |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 197 | ep->re_mrtype = IB_MR_TYPE_MEM_REG; |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 198 | if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG) |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 199 | ep->re_mrtype = IB_MR_TYPE_SG_GAPS; |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 200 | |
Chuck Lever | a788684 | 2018-12-19 10:58:51 -0500 | [diff] [blame] | 201 | /* Quirk: Some devices advertise a large max_fast_reg_page_list_len |
| 202 | * capability, but perform optimally when the MRs are not larger |
| 203 | * than a page. |
| 204 | */ |
Chuck Lever | 18d065a | 2020-01-03 11:56:43 -0500 | [diff] [blame] | 205 | if (attrs->max_sge_rd > RPCRDMA_MAX_HDR_SEGS) |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 206 | ep->re_max_fr_depth = attrs->max_sge_rd; |
Chuck Lever | a788684 | 2018-12-19 10:58:51 -0500 | [diff] [blame] | 207 | else |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 208 | ep->re_max_fr_depth = attrs->max_fast_reg_page_list_len; |
| 209 | if (ep->re_max_fr_depth > RPCRDMA_MAX_DATA_SEGS) |
| 210 | ep->re_max_fr_depth = RPCRDMA_MAX_DATA_SEGS; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 211 | |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 212 | /* Add room for frwr register and invalidate WRs. |
| 213 | * 1. FRWR reg WR for head |
| 214 | * 2. FRWR invalidate WR for head |
| 215 | * 3. N FRWR reg WRs for pagelist |
| 216 | * 4. N FRWR invalidate WRs for pagelist |
| 217 | * 5. FRWR reg WR for tail |
| 218 | * 6. FRWR invalidate WR for tail |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 219 | * 7. The RDMA_SEND WR |
| 220 | */ |
| 221 | depth = 7; |
| 222 | |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 223 | /* Calculate N if the device max FRWR depth is smaller than |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 224 | * RPCRDMA_MAX_DATA_SEGS. |
| 225 | */ |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 226 | if (ep->re_max_fr_depth < RPCRDMA_MAX_DATA_SEGS) { |
| 227 | delta = RPCRDMA_MAX_DATA_SEGS - ep->re_max_fr_depth; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 228 | do { |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 229 | depth += 2; /* FRWR reg + invalidate */ |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 230 | delta -= ep->re_max_fr_depth; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 231 | } while (delta > 0); |
| 232 | } |
| 233 | |
Chuck Lever | 25868e6 | 2020-01-03 11:56:48 -0500 | [diff] [blame] | 234 | max_qp_wr = attrs->max_qp_wr; |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 235 | max_qp_wr -= RPCRDMA_BACKWARD_WRS; |
| 236 | max_qp_wr -= 1; |
| 237 | if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE) |
| 238 | return -ENOMEM; |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 239 | if (ep->re_max_requests > max_qp_wr) |
| 240 | ep->re_max_requests = max_qp_wr; |
| 241 | ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth; |
| 242 | if (ep->re_attr.cap.max_send_wr > max_qp_wr) { |
| 243 | ep->re_max_requests = max_qp_wr / depth; |
| 244 | if (!ep->re_max_requests) |
Chuck Lever | 25868e6 | 2020-01-03 11:56:48 -0500 | [diff] [blame] | 245 | return -ENOMEM; |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 246 | ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 247 | } |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 248 | ep->re_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS; |
| 249 | ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */ |
| 250 | ep->re_attr.cap.max_recv_wr = ep->re_max_requests; |
| 251 | ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; |
Chuck Lever | 32e6b68 | 2021-04-19 14:02:03 -0400 | [diff] [blame] | 252 | ep->re_attr.cap.max_recv_wr += RPCRDMA_MAX_RECV_BATCH; |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 253 | ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */ |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 254 | |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 255 | ep->re_max_rdma_segs = |
| 256 | DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ep->re_max_fr_depth); |
Chuck Lever | 6946f82 | 2018-12-19 10:58:45 -0500 | [diff] [blame] | 257 | /* Reply chunks require segments for head and tail buffers */ |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 258 | ep->re_max_rdma_segs += 2; |
| 259 | if (ep->re_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS) |
| 260 | ep->re_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS; |
Chuck Lever | 18d065a | 2020-01-03 11:56:43 -0500 | [diff] [blame] | 261 | |
| 262 | /* Ensure the underlying device is capable of conveying the |
| 263 | * largest r/wsize NFS will ask for. This guarantees that |
| 264 | * failing over from one RDMA device to another will not |
| 265 | * break NFS I/O. |
| 266 | */ |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 267 | if ((ep->re_max_rdma_segs * ep->re_max_fr_depth) < RPCRDMA_MAX_SEGS) |
Chuck Lever | 18d065a | 2020-01-03 11:56:43 -0500 | [diff] [blame] | 268 | return -ENOMEM; |
| 269 | |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 270 | return 0; |
| 271 | } |
| 272 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 273 | /** |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 274 | * frwr_map - Register a memory region |
| 275 | * @r_xprt: controlling transport |
| 276 | * @seg: memory region co-ordinates |
| 277 | * @nsegs: number of segments remaining |
| 278 | * @writing: true when RDMA Write will be used |
Chuck Lever | 0a93fbc | 2018-12-19 10:59:07 -0500 | [diff] [blame] | 279 | * @xid: XID of RPC using the registered memory |
Chuck Lever | 3b39f52 | 2019-08-19 18:45:37 -0400 | [diff] [blame] | 280 | * @mr: MR to fill in |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 281 | * |
| 282 | * Prepare a REG_MR Work Request to register a memory region |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 283 | * for remote access via RDMA READ or RDMA WRITE. |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 284 | * |
| 285 | * Returns the next segment or a negative errno pointer. |
Chuck Lever | 3b39f52 | 2019-08-19 18:45:37 -0400 | [diff] [blame] | 286 | * On success, @mr is filled in. |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 287 | */ |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 288 | struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt, |
| 289 | struct rpcrdma_mr_seg *seg, |
Chuck Lever | ec482cc | 2019-02-11 11:23:44 -0500 | [diff] [blame] | 290 | int nsegs, bool writing, __be32 xid, |
Chuck Lever | 3b39f52 | 2019-08-19 18:45:37 -0400 | [diff] [blame] | 291 | struct rpcrdma_mr *mr) |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 292 | { |
Chuck Lever | e28ce90 | 2020-02-21 17:01:05 -0500 | [diff] [blame] | 293 | struct rpcrdma_ep *ep = r_xprt->rx_ep; |
Chuck Lever | 3cf4e16 | 2015-12-16 17:22:31 -0500 | [diff] [blame] | 294 | struct ib_reg_wr *reg_wr; |
Chuck Lever | ca1c671 | 2020-02-12 11:12:30 -0500 | [diff] [blame] | 295 | int i, n, dma_nents; |
Chuck Lever | 3b39f52 | 2019-08-19 18:45:37 -0400 | [diff] [blame] | 296 | struct ib_mr *ibmr; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 297 | u8 key; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 298 | |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 299 | if (nsegs > ep->re_max_fr_depth) |
| 300 | nsegs = ep->re_max_fr_depth; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 301 | for (i = 0; i < nsegs;) { |
Chuck Lever | 67b1662 | 2021-02-04 11:59:13 -0500 | [diff] [blame] | 302 | sg_set_page(&mr->mr_sg[i], seg->mr_page, |
| 303 | seg->mr_len, seg->mr_offset); |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 304 | |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 305 | ++seg; |
| 306 | ++i; |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 307 | if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS) |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 308 | continue; |
Chuck Lever | 67b1662 | 2021-02-04 11:59:13 -0500 | [diff] [blame] | 309 | if ((i < nsegs && seg->mr_offset) || |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 310 | offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) |
| 311 | break; |
| 312 | } |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 313 | mr->mr_dir = rpcrdma_data_dir(writing); |
Chuck Lever | ca1c671 | 2020-02-12 11:12:30 -0500 | [diff] [blame] | 314 | mr->mr_nents = i; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 315 | |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 316 | dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents, |
Chuck Lever | ca1c671 | 2020-02-12 11:12:30 -0500 | [diff] [blame] | 317 | mr->mr_dir); |
| 318 | if (!dma_nents) |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 319 | goto out_dmamap_err; |
Chuck Lever | 7a03aeb6 | 2020-11-09 14:40:19 -0500 | [diff] [blame] | 320 | mr->mr_device = ep->re_id->device; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 321 | |
Chuck Lever | 13bcf7e | 2021-04-19 14:04:21 -0400 | [diff] [blame] | 322 | ibmr = mr->mr_ibmr; |
Chuck Lever | ca1c671 | 2020-02-12 11:12:30 -0500 | [diff] [blame] | 323 | n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE); |
| 324 | if (n != dma_nents) |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 325 | goto out_mapmr_err; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 326 | |
Chuck Lever | 0a93fbc | 2018-12-19 10:59:07 -0500 | [diff] [blame] | 327 | ibmr->iova &= 0x00000000ffffffff; |
Chuck Lever | ec482cc | 2019-02-11 11:23:44 -0500 | [diff] [blame] | 328 | ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 329 | key = (u8)(ibmr->rkey & 0x000000FF); |
| 330 | ib_update_fast_reg_key(ibmr, ++key); |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 331 | |
Chuck Lever | dcff9ed | 2021-04-19 14:04:15 -0400 | [diff] [blame] | 332 | reg_wr = &mr->mr_regwr; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 333 | reg_wr->mr = ibmr; |
| 334 | reg_wr->key = ibmr->rkey; |
Chuck Lever | 3cf4e16 | 2015-12-16 17:22:31 -0500 | [diff] [blame] | 335 | reg_wr->access = writing ? |
| 336 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : |
| 337 | IB_ACCESS_REMOTE_READ; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 338 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 339 | mr->mr_handle = ibmr->rkey; |
| 340 | mr->mr_length = ibmr->length; |
| 341 | mr->mr_offset = ibmr->iova; |
Chuck Lever | ba217ec | 2018-12-19 10:59:55 -0500 | [diff] [blame] | 342 | trace_xprtrdma_mr_map(mr); |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 343 | |
Chuck Lever | 6748b0ca | 2017-08-14 15:38:30 -0400 | [diff] [blame] | 344 | return seg; |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 345 | |
| 346 | out_dmamap_err: |
Chuck Lever | 53b2c1c | 2018-12-19 11:00:06 -0500 | [diff] [blame] | 347 | trace_xprtrdma_frwr_sgerr(mr, i); |
Chuck Lever | 6748b0ca | 2017-08-14 15:38:30 -0400 | [diff] [blame] | 348 | return ERR_PTR(-EIO); |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 349 | |
| 350 | out_mapmr_err: |
Chuck Lever | 53b2c1c | 2018-12-19 11:00:06 -0500 | [diff] [blame] | 351 | trace_xprtrdma_frwr_maperr(mr, n); |
Chuck Lever | 6748b0ca | 2017-08-14 15:38:30 -0400 | [diff] [blame] | 352 | return ERR_PTR(-EIO); |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 353 | } |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 354 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 355 | /** |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 356 | * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC |
Chuck Lever | d6ccebf | 2020-02-21 17:00:49 -0500 | [diff] [blame] | 357 | * @cq: completion queue |
| 358 | * @wc: WCE for a completed FastReg WR |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 359 | * |
Chuck Lever | e4b52ca | 2021-04-19 14:03:12 -0400 | [diff] [blame] | 360 | * Each flushed MR gets destroyed after the QP has drained. |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 361 | */ |
| 362 | static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) |
| 363 | { |
| 364 | struct ib_cqe *cqe = wc->wr_cqe; |
Chuck Lever | e10fa96 | 2021-04-19 14:04:03 -0400 | [diff] [blame] | 365 | struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 366 | |
| 367 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
Chuck Lever | 0a26d10 | 2021-04-19 14:03:56 -0400 | [diff] [blame] | 368 | trace_xprtrdma_wc_fastreg(wc, &mr->mr_cid); |
Chuck Lever | d6ccebf | 2020-02-21 17:00:49 -0500 | [diff] [blame] | 369 | |
Chuck Lever | f423f75 | 2020-06-15 09:21:02 -0400 | [diff] [blame] | 370 | rpcrdma_flush_disconnect(cq->cq_context, wc); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 371 | } |
| 372 | |
| 373 | /** |
Chuck Lever | 97d0de8 | 2020-02-21 17:00:23 -0500 | [diff] [blame] | 374 | * frwr_send - post Send WRs containing the RPC Call message |
| 375 | * @r_xprt: controlling transport instance |
| 376 | * @req: prepared RPC Call |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 377 | * |
Chuck Lever | e0f86bc | 2018-12-19 11:00:27 -0500 | [diff] [blame] | 378 | * For FRWR, chain any FastReg WRs to the Send WR. Only a |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 379 | * single ib_post_send call is needed to register memory |
| 380 | * and then post the Send WR. |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 381 | * |
Chuck Lever | 97d0de8 | 2020-02-21 17:00:23 -0500 | [diff] [blame] | 382 | * Returns the return code from ib_post_send. |
| 383 | * |
| 384 | * Caller must hold the transport send lock to ensure that the |
| 385 | * pointers to the transport's rdma_cm_id and QP are stable. |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 386 | */ |
Chuck Lever | 97d0de8 | 2020-02-21 17:00:23 -0500 | [diff] [blame] | 387 | int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 388 | { |
Chuck Lever | b3ce7a2 | 2021-04-19 14:03:25 -0400 | [diff] [blame] | 389 | struct ib_send_wr *post_wr, *send_wr = &req->rl_wr; |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 390 | struct rpcrdma_ep *ep = r_xprt->rx_ep; |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 391 | struct rpcrdma_mr *mr; |
Chuck Lever | b3ce7a2 | 2021-04-19 14:03:25 -0400 | [diff] [blame] | 392 | unsigned int num_wrs; |
Chuck Lever | d9ae813 | 2021-08-02 14:44:36 -0400 | [diff] [blame] | 393 | int ret; |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 394 | |
Chuck Lever | b3ce7a2 | 2021-04-19 14:03:25 -0400 | [diff] [blame] | 395 | num_wrs = 1; |
| 396 | post_wr = send_wr; |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 397 | list_for_each_entry(mr, &req->rl_registered, mr_list) { |
Chuck Lever | 4ddd0fc | 2021-04-19 14:03:31 -0400 | [diff] [blame] | 398 | trace_xprtrdma_mr_fastreg(mr); |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 399 | |
Chuck Lever | e10fa96 | 2021-04-19 14:04:03 -0400 | [diff] [blame] | 400 | mr->mr_cqe.done = frwr_wc_fastreg; |
Chuck Lever | dcff9ed | 2021-04-19 14:04:15 -0400 | [diff] [blame] | 401 | mr->mr_regwr.wr.next = post_wr; |
| 402 | mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe; |
| 403 | mr->mr_regwr.wr.num_sge = 0; |
| 404 | mr->mr_regwr.wr.opcode = IB_WR_REG_MR; |
| 405 | mr->mr_regwr.wr.send_flags = 0; |
| 406 | post_wr = &mr->mr_regwr.wr; |
Chuck Lever | b3ce7a2 | 2021-04-19 14:03:25 -0400 | [diff] [blame] | 407 | ++num_wrs; |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 408 | } |
| 409 | |
Chuck Lever | b3ce7a2 | 2021-04-19 14:03:25 -0400 | [diff] [blame] | 410 | if ((kref_read(&req->rl_kref) > 1) || num_wrs > ep->re_send_count) { |
| 411 | send_wr->send_flags |= IB_SEND_SIGNALED; |
| 412 | ep->re_send_count = min_t(unsigned int, ep->re_send_batch, |
| 413 | num_wrs - ep->re_send_count); |
| 414 | } else { |
| 415 | send_wr->send_flags &= ~IB_SEND_SIGNALED; |
| 416 | ep->re_send_count -= num_wrs; |
| 417 | } |
| 418 | |
| 419 | trace_xprtrdma_post_send(req); |
Chuck Lever | d9ae813 | 2021-08-02 14:44:36 -0400 | [diff] [blame] | 420 | ret = ib_post_send(ep->re_id->qp, post_wr, NULL); |
| 421 | if (ret) |
| 422 | trace_xprtrdma_post_send_err(r_xprt, req, ret); |
| 423 | return ret; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 424 | } |
| 425 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 426 | /** |
| 427 | * frwr_reminv - handle a remotely invalidated mr on the @mrs list |
| 428 | * @rep: Received reply |
| 429 | * @mrs: list of MRs to check |
| 430 | * |
Chuck Lever | c344161 | 2017-12-14 20:56:26 -0500 | [diff] [blame] | 431 | */ |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 432 | void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs) |
Chuck Lever | c344161 | 2017-12-14 20:56:26 -0500 | [diff] [blame] | 433 | { |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 434 | struct rpcrdma_mr *mr; |
Chuck Lever | c344161 | 2017-12-14 20:56:26 -0500 | [diff] [blame] | 435 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 436 | list_for_each_entry(mr, mrs, mr_list) |
| 437 | if (mr->mr_handle == rep->rr_inv_rkey) { |
Chuck Lever | 054f155 | 2018-05-01 11:37:14 -0400 | [diff] [blame] | 438 | list_del_init(&mr->mr_list); |
Chuck Lever | 4ddd0fc | 2021-04-19 14:03:31 -0400 | [diff] [blame] | 439 | trace_xprtrdma_mr_reminv(mr); |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 440 | frwr_mr_put(mr); |
Chuck Lever | c344161 | 2017-12-14 20:56:26 -0500 | [diff] [blame] | 441 | break; /* only one invalidated MR per RPC */ |
| 442 | } |
| 443 | } |
| 444 | |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 445 | static void frwr_mr_done(struct ib_wc *wc, struct rpcrdma_mr *mr) |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 446 | { |
Chuck Lever | e4b52ca | 2021-04-19 14:03:12 -0400 | [diff] [blame] | 447 | if (likely(wc->status == IB_WC_SUCCESS)) |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 448 | frwr_mr_put(mr); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 449 | } |
| 450 | |
| 451 | /** |
| 452 | * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC |
Chuck Lever | d6ccebf | 2020-02-21 17:00:49 -0500 | [diff] [blame] | 453 | * @cq: completion queue |
| 454 | * @wc: WCE for a completed LocalInv WR |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 455 | * |
| 456 | */ |
| 457 | static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) |
| 458 | { |
| 459 | struct ib_cqe *cqe = wc->wr_cqe; |
Chuck Lever | e10fa96 | 2021-04-19 14:04:03 -0400 | [diff] [blame] | 460 | struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 461 | |
| 462 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
Chuck Lever | 0a26d10 | 2021-04-19 14:03:56 -0400 | [diff] [blame] | 463 | trace_xprtrdma_wc_li(wc, &mr->mr_cid); |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 464 | frwr_mr_done(wc, mr); |
Chuck Lever | d6ccebf | 2020-02-21 17:00:49 -0500 | [diff] [blame] | 465 | |
Chuck Lever | f423f75 | 2020-06-15 09:21:02 -0400 | [diff] [blame] | 466 | rpcrdma_flush_disconnect(cq->cq_context, wc); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 467 | } |
| 468 | |
| 469 | /** |
| 470 | * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC |
Chuck Lever | d6ccebf | 2020-02-21 17:00:49 -0500 | [diff] [blame] | 471 | * @cq: completion queue |
| 472 | * @wc: WCE for a completed LocalInv WR |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 473 | * |
| 474 | * Awaken anyone waiting for an MR to finish being fenced. |
| 475 | */ |
| 476 | static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) |
| 477 | { |
| 478 | struct ib_cqe *cqe = wc->wr_cqe; |
Chuck Lever | e10fa96 | 2021-04-19 14:04:03 -0400 | [diff] [blame] | 479 | struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 480 | |
| 481 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
Chuck Lever | 0a26d10 | 2021-04-19 14:03:56 -0400 | [diff] [blame] | 482 | trace_xprtrdma_wc_li_wake(wc, &mr->mr_cid); |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 483 | frwr_mr_done(wc, mr); |
Chuck Lever | 9a301ca | 2021-04-19 14:04:09 -0400 | [diff] [blame] | 484 | complete(&mr->mr_linv_done); |
Chuck Lever | d6ccebf | 2020-02-21 17:00:49 -0500 | [diff] [blame] | 485 | |
Chuck Lever | f423f75 | 2020-06-15 09:21:02 -0400 | [diff] [blame] | 486 | rpcrdma_flush_disconnect(cq->cq_context, wc); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 487 | } |
| 488 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 489 | /** |
| 490 | * frwr_unmap_sync - invalidate memory regions that were registered for @req |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 491 | * @r_xprt: controlling transport instance |
| 492 | * @req: rpcrdma_req with a non-empty list of MRs to process |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 493 | * |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 494 | * Sleeps until it is safe for the host CPU to access the previously mapped |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 495 | * memory regions. This guarantees that registered MRs are properly fenced |
| 496 | * from the server before the RPC consumer accesses the data in them. It |
| 497 | * also ensures proper Send flow control: waking the next RPC waits until |
| 498 | * this RPC has relinquished all its Send Queue entries. |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 499 | */ |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 500 | void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 501 | { |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 502 | struct ib_send_wr *first, **prev, *last; |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 503 | struct rpcrdma_ep *ep = r_xprt->rx_ep; |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 504 | const struct ib_send_wr *bad_wr; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 505 | struct rpcrdma_mr *mr; |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 506 | int rc; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 507 | |
Chuck Lever | 451d26e | 2017-06-08 11:52:04 -0400 | [diff] [blame] | 508 | /* ORDER: Invalidate all of the MRs first |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 509 | * |
| 510 | * Chain the LOCAL_INV Work Requests and post them with |
| 511 | * a single ib_post_send() call. |
| 512 | */ |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame] | 513 | prev = &first; |
Benjamin Coddington | cb5a967 | 2021-11-02 14:48:59 -0400 | [diff] [blame] | 514 | mr = rpcrdma_mr_pop(&req->rl_registered); |
| 515 | do { |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 516 | trace_xprtrdma_mr_localinv(mr); |
| 517 | r_xprt->rx_stats.local_inv_needed++; |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 518 | |
Chuck Lever | dcff9ed | 2021-04-19 14:04:15 -0400 | [diff] [blame] | 519 | last = &mr->mr_invwr; |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 520 | last->next = NULL; |
Chuck Lever | e10fa96 | 2021-04-19 14:04:03 -0400 | [diff] [blame] | 521 | last->wr_cqe = &mr->mr_cqe; |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 522 | last->sg_list = NULL; |
| 523 | last->num_sge = 0; |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame] | 524 | last->opcode = IB_WR_LOCAL_INV; |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 525 | last->send_flags = IB_SEND_SIGNALED; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 526 | last->ex.invalidate_rkey = mr->mr_handle; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 527 | |
Chuck Lever | e10fa96 | 2021-04-19 14:04:03 -0400 | [diff] [blame] | 528 | last->wr_cqe->done = frwr_wc_localinv; |
| 529 | |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame] | 530 | *prev = last; |
| 531 | prev = &last->next; |
Benjamin Coddington | cb5a967 | 2021-11-02 14:48:59 -0400 | [diff] [blame] | 532 | } while ((mr = rpcrdma_mr_pop(&req->rl_registered))); |
| 533 | |
Chuck Lever | 9e895cd | 2021-05-01 15:38:02 -0400 | [diff] [blame] | 534 | mr = container_of(last, struct rpcrdma_mr, mr_invwr); |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 535 | |
| 536 | /* Strong send queue ordering guarantees that when the |
| 537 | * last WR in the chain completes, all WRs in the chain |
| 538 | * are complete. |
| 539 | */ |
Chuck Lever | e10fa96 | 2021-04-19 14:04:03 -0400 | [diff] [blame] | 540 | last->wr_cqe->done = frwr_wc_localinv_wake; |
Chuck Lever | 9a301ca | 2021-04-19 14:04:09 -0400 | [diff] [blame] | 541 | reinit_completion(&mr->mr_linv_done); |
Chuck Lever | 8d38de6 | 2016-11-29 10:52:16 -0500 | [diff] [blame] | 542 | |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 543 | /* Transport disconnect drains the receive CQ before it |
| 544 | * replaces the QP. The RPC reply handler won't call us |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 545 | * unless re_id->qp is a valid pointer. |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 546 | */ |
Chuck Lever | 8d75483 | 2017-06-08 11:52:28 -0400 | [diff] [blame] | 547 | bad_wr = NULL; |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 548 | rc = ib_post_send(ep->re_id->qp, first, &bad_wr); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 549 | |
| 550 | /* The final LOCAL_INV WR in the chain is supposed to |
| 551 | * do the wake. If it was never posted, the wake will |
| 552 | * not happen, so don't wait in that case. |
| 553 | */ |
Chuck Lever | 8d75483 | 2017-06-08 11:52:28 -0400 | [diff] [blame] | 554 | if (bad_wr != first) |
Chuck Lever | 9a301ca | 2021-04-19 14:04:09 -0400 | [diff] [blame] | 555 | wait_for_completion(&mr->mr_linv_done); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 556 | if (!rc) |
| 557 | return; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 558 | |
Chuck Lever | e4b52ca | 2021-04-19 14:03:12 -0400 | [diff] [blame] | 559 | /* On error, the MRs get destroyed once the QP has drained. */ |
Chuck Lever | 36a55ed | 2020-11-09 14:39:37 -0500 | [diff] [blame] | 560 | trace_xprtrdma_post_linv_err(req, rc); |
Chuck Lever | 1143129 | 2021-08-02 14:44:17 -0400 | [diff] [blame] | 561 | |
| 562 | /* Force a connection loss to ensure complete recovery. |
| 563 | */ |
| 564 | rpcrdma_force_disconnect(ep); |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 565 | } |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 566 | |
| 567 | /** |
| 568 | * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC |
Chuck Lever | d6ccebf | 2020-02-21 17:00:49 -0500 | [diff] [blame] | 569 | * @cq: completion queue |
| 570 | * @wc: WCE for a completed LocalInv WR |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 571 | * |
| 572 | */ |
| 573 | static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) |
| 574 | { |
| 575 | struct ib_cqe *cqe = wc->wr_cqe; |
Chuck Lever | e10fa96 | 2021-04-19 14:04:03 -0400 | [diff] [blame] | 576 | struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe); |
Chuck Lever | 44438ad | 2021-04-19 14:03:06 -0400 | [diff] [blame] | 577 | struct rpcrdma_rep *rep; |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 578 | |
| 579 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
Chuck Lever | 0a26d10 | 2021-04-19 14:03:56 -0400 | [diff] [blame] | 580 | trace_xprtrdma_wc_li_done(wc, &mr->mr_cid); |
Chuck Lever | 6dc6ec9 | 2019-08-19 18:47:10 -0400 | [diff] [blame] | 581 | |
Chuck Lever | 44438ad | 2021-04-19 14:03:06 -0400 | [diff] [blame] | 582 | /* Ensure that @rep is generated before the MR is released */ |
| 583 | rep = mr->mr_req->rl_reply; |
Chuck Lever | 6dc6ec9 | 2019-08-19 18:47:10 -0400 | [diff] [blame] | 584 | smp_rmb(); |
Chuck Lever | 44438ad | 2021-04-19 14:03:06 -0400 | [diff] [blame] | 585 | |
Chuck Lever | 8a05343 | 2021-04-19 14:03:19 -0400 | [diff] [blame] | 586 | if (wc->status != IB_WC_SUCCESS) { |
| 587 | if (rep) |
| 588 | rpcrdma_unpin_rqst(rep); |
| 589 | rpcrdma_flush_disconnect(cq->cq_context, wc); |
| 590 | return; |
| 591 | } |
| 592 | frwr_mr_put(mr); |
Chuck Lever | 6dc6ec9 | 2019-08-19 18:47:10 -0400 | [diff] [blame] | 593 | rpcrdma_complete_rqst(rep); |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 594 | } |
| 595 | |
| 596 | /** |
| 597 | * frwr_unmap_async - invalidate memory regions that were registered for @req |
| 598 | * @r_xprt: controlling transport instance |
| 599 | * @req: rpcrdma_req with a non-empty list of MRs to process |
| 600 | * |
| 601 | * This guarantees that registered MRs are properly fenced from the |
| 602 | * server before the RPC consumer accesses the data in them. It also |
| 603 | * ensures proper Send flow control: waking the next RPC waits until |
| 604 | * this RPC has relinquished all its Send Queue entries. |
| 605 | */ |
| 606 | void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
| 607 | { |
| 608 | struct ib_send_wr *first, *last, **prev; |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 609 | struct rpcrdma_ep *ep = r_xprt->rx_ep; |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 610 | struct rpcrdma_mr *mr; |
| 611 | int rc; |
| 612 | |
| 613 | /* Chain the LOCAL_INV Work Requests and post them with |
| 614 | * a single ib_post_send() call. |
| 615 | */ |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 616 | prev = &first; |
Benjamin Coddington | cb5a967 | 2021-11-02 14:48:59 -0400 | [diff] [blame] | 617 | mr = rpcrdma_mr_pop(&req->rl_registered); |
| 618 | do { |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 619 | trace_xprtrdma_mr_localinv(mr); |
| 620 | r_xprt->rx_stats.local_inv_needed++; |
| 621 | |
Chuck Lever | dcff9ed | 2021-04-19 14:04:15 -0400 | [diff] [blame] | 622 | last = &mr->mr_invwr; |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 623 | last->next = NULL; |
Chuck Lever | e10fa96 | 2021-04-19 14:04:03 -0400 | [diff] [blame] | 624 | last->wr_cqe = &mr->mr_cqe; |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 625 | last->sg_list = NULL; |
| 626 | last->num_sge = 0; |
| 627 | last->opcode = IB_WR_LOCAL_INV; |
| 628 | last->send_flags = IB_SEND_SIGNALED; |
| 629 | last->ex.invalidate_rkey = mr->mr_handle; |
| 630 | |
Chuck Lever | e10fa96 | 2021-04-19 14:04:03 -0400 | [diff] [blame] | 631 | last->wr_cqe->done = frwr_wc_localinv; |
| 632 | |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 633 | *prev = last; |
| 634 | prev = &last->next; |
Benjamin Coddington | cb5a967 | 2021-11-02 14:48:59 -0400 | [diff] [blame] | 635 | } while ((mr = rpcrdma_mr_pop(&req->rl_registered))); |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 636 | |
| 637 | /* Strong send queue ordering guarantees that when the |
| 638 | * last WR in the chain completes, all WRs in the chain |
| 639 | * are complete. The last completion will wake up the |
| 640 | * RPC waiter. |
| 641 | */ |
Chuck Lever | e10fa96 | 2021-04-19 14:04:03 -0400 | [diff] [blame] | 642 | last->wr_cqe->done = frwr_wc_localinv_done; |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 643 | |
| 644 | /* Transport disconnect drains the receive CQ before it |
| 645 | * replaces the QP. The RPC reply handler won't call us |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 646 | * unless re_id->qp is a valid pointer. |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 647 | */ |
Chuck Lever | e4b52ca | 2021-04-19 14:03:12 -0400 | [diff] [blame] | 648 | rc = ib_post_send(ep->re_id->qp, first, NULL); |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 649 | if (!rc) |
| 650 | return; |
| 651 | |
Chuck Lever | e4b52ca | 2021-04-19 14:03:12 -0400 | [diff] [blame] | 652 | /* On error, the MRs get destroyed once the QP has drained. */ |
Chuck Lever | 36a55ed | 2020-11-09 14:39:37 -0500 | [diff] [blame] | 653 | trace_xprtrdma_post_linv_err(req, rc); |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 654 | |
| 655 | /* The final LOCAL_INV WR in the chain is supposed to |
Chuck Lever | 8a05343 | 2021-04-19 14:03:19 -0400 | [diff] [blame] | 656 | * do the wake. If it was never posted, the wake does |
| 657 | * not happen. Unpin the rqst in preparation for its |
| 658 | * retransmission. |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 659 | */ |
Chuck Lever | 8a05343 | 2021-04-19 14:03:19 -0400 | [diff] [blame] | 660 | rpcrdma_unpin_rqst(req->rl_reply); |
Chuck Lever | 1143129 | 2021-08-02 14:44:17 -0400 | [diff] [blame] | 661 | |
| 662 | /* Force a connection loss to ensure complete recovery. |
| 663 | */ |
| 664 | rpcrdma_force_disconnect(ep); |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 665 | } |
Chuck Lever | 21037b8 | 2021-10-05 10:17:59 -0400 | [diff] [blame] | 666 | |
| 667 | /** |
| 668 | * frwr_wp_create - Create an MR for padding Write chunks |
| 669 | * @r_xprt: transport resources to use |
| 670 | * |
| 671 | * Return 0 on success, negative errno on failure. |
| 672 | */ |
| 673 | int frwr_wp_create(struct rpcrdma_xprt *r_xprt) |
| 674 | { |
| 675 | struct rpcrdma_ep *ep = r_xprt->rx_ep; |
| 676 | struct rpcrdma_mr_seg seg; |
| 677 | struct rpcrdma_mr *mr; |
| 678 | |
| 679 | mr = rpcrdma_mr_get(r_xprt); |
| 680 | if (!mr) |
| 681 | return -EAGAIN; |
| 682 | mr->mr_req = NULL; |
| 683 | ep->re_write_pad_mr = mr; |
| 684 | |
| 685 | seg.mr_len = XDR_UNIT; |
| 686 | seg.mr_page = virt_to_page(ep->re_write_pad); |
| 687 | seg.mr_offset = offset_in_page(ep->re_write_pad); |
| 688 | if (IS_ERR(frwr_map(r_xprt, &seg, 1, true, xdr_zero, mr))) |
| 689 | return -EIO; |
| 690 | trace_xprtrdma_mr_fastreg(mr); |
| 691 | |
| 692 | mr->mr_cqe.done = frwr_wc_fastreg; |
| 693 | mr->mr_regwr.wr.next = NULL; |
| 694 | mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe; |
| 695 | mr->mr_regwr.wr.num_sge = 0; |
| 696 | mr->mr_regwr.wr.opcode = IB_WR_REG_MR; |
| 697 | mr->mr_regwr.wr.send_flags = 0; |
| 698 | |
| 699 | return ib_post_send(ep->re_id->qp, &mr->mr_regwr.wr, NULL); |
| 700 | } |