Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 2 | /* |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 3 | * Copyright (c) 2015, 2017 Oracle. All rights reserved. |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 4 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
| 5 | */ |
| 6 | |
| 7 | /* Lightweight memory registration using Fast Registration Work |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 8 | * Requests (FRWR). |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 9 | * |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 10 | * FRWR features ordered asynchronous registration and invalidation |
| 11 | * of arbitrarily-sized memory regions. This is the fastest and safest |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 12 | * but most complex memory registration mode. |
| 13 | */ |
| 14 | |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 15 | /* Normal operation |
| 16 | * |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 17 | * A Memory Region is prepared for RDMA Read or Write using a FAST_REG |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 18 | * Work Request (frwr_map). When the RDMA operation is finished, this |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 19 | * Memory Region is invalidated using a LOCAL_INV Work Request |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 20 | * (frwr_unmap_async and frwr_unmap_sync). |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 21 | * |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 22 | * Typically FAST_REG Work Requests are not signaled, and neither are |
| 23 | * RDMA Send Work Requests (with the exception of signaling occasionally |
| 24 | * to prevent provider work queue overflows). This greatly reduces HCA |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 25 | * interrupt workload. |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 26 | */ |
| 27 | |
| 28 | /* Transport recovery |
| 29 | * |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 30 | * frwr_map and frwr_unmap_* cannot run at the same time the transport |
| 31 | * connect worker is running. The connect worker holds the transport |
| 32 | * send lock, just as ->send_request does. This prevents frwr_map and |
| 33 | * the connect worker from running concurrently. When a connection is |
| 34 | * closed, the Receive completion queue is drained before the allowing |
| 35 | * the connect worker to get control. This prevents frwr_unmap and the |
| 36 | * connect worker from running concurrently. |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 37 | * |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 38 | * When the underlying transport disconnects, MRs that are in flight |
Chuck Lever | 9d2da4f | 2019-10-09 13:07:48 -0400 | [diff] [blame] | 39 | * are flushed and are likely unusable. Thus all MRs are destroyed. |
| 40 | * New MRs are created on demand. |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 41 | */ |
| 42 | |
Chuck Lever | bd2abef | 2018-05-07 15:27:16 -0400 | [diff] [blame] | 43 | #include <linux/sunrpc/svc_rdma.h> |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 44 | |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 45 | #include "xprt_rdma.h" |
Chuck Lever | b6e717cb | 2018-05-07 15:27:05 -0400 | [diff] [blame] | 46 | #include <trace/events/rpcrdma.h> |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 47 | |
| 48 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 49 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 50 | #endif |
| 51 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 52 | /** |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 53 | * frwr_release_mr - Destroy one MR |
Chuck Lever | 253a516 | 2020-02-21 17:00:17 -0500 | [diff] [blame] | 54 | * @mr: MR allocated by frwr_mr_init |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 55 | * |
| 56 | */ |
| 57 | void frwr_release_mr(struct rpcrdma_mr *mr) |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 58 | { |
| 59 | int rc; |
| 60 | |
| 61 | rc = ib_dereg_mr(mr->frwr.fr_mr); |
| 62 | if (rc) |
Chuck Lever | 53b2c1c | 2018-12-19 11:00:06 -0500 | [diff] [blame] | 63 | trace_xprtrdma_frwr_dereg(mr, rc); |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 64 | kfree(mr->mr_sg); |
| 65 | kfree(mr); |
| 66 | } |
| 67 | |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 68 | static void frwr_mr_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 69 | { |
Chuck Lever | 7a03aeb6 | 2020-11-09 14:40:19 -0500 | [diff] [blame] | 70 | if (mr->mr_device) { |
Chuck Lever | d379eaa | 2018-10-01 14:25:30 -0400 | [diff] [blame] | 71 | trace_xprtrdma_mr_unmap(mr); |
Chuck Lever | 7a03aeb6 | 2020-11-09 14:40:19 -0500 | [diff] [blame] | 72 | ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents, |
| 73 | mr->mr_dir); |
| 74 | mr->mr_device = NULL; |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 75 | } |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 76 | } |
| 77 | |
| 78 | static void frwr_mr_recycle(struct rpcrdma_mr *mr) |
| 79 | { |
| 80 | struct rpcrdma_xprt *r_xprt = mr->mr_xprt; |
| 81 | |
| 82 | trace_xprtrdma_mr_recycle(mr); |
| 83 | |
| 84 | frwr_mr_unmap(r_xprt, mr); |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 85 | |
Chuck Lever | 4d6b889 | 2019-08-19 18:47:57 -0400 | [diff] [blame] | 86 | spin_lock(&r_xprt->rx_buf.rb_lock); |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 87 | list_del(&mr->mr_all); |
| 88 | r_xprt->rx_stats.mrs_recycled++; |
Chuck Lever | 4d6b889 | 2019-08-19 18:47:57 -0400 | [diff] [blame] | 89 | spin_unlock(&r_xprt->rx_buf.rb_lock); |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 90 | |
| 91 | frwr_release_mr(mr); |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 92 | } |
| 93 | |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 94 | static void frwr_mr_put(struct rpcrdma_mr *mr) |
| 95 | { |
| 96 | frwr_mr_unmap(mr->mr_xprt, mr); |
| 97 | |
| 98 | /* The MR is returned to the req's MR free list instead |
| 99 | * of to the xprt's MR free list. No spinlock is needed. |
| 100 | */ |
| 101 | rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs); |
| 102 | } |
| 103 | |
Chuck Lever | 40088f0 | 2019-06-19 10:33:04 -0400 | [diff] [blame] | 104 | /* frwr_reset - Place MRs back on the free list |
| 105 | * @req: request to reset |
| 106 | * |
| 107 | * Used after a failed marshal. For FRWR, this means the MRs |
| 108 | * don't have to be fully released and recreated. |
| 109 | * |
| 110 | * NB: This is safe only as long as none of @req's MRs are |
| 111 | * involved with an ongoing asynchronous FAST_REG or LOCAL_INV |
| 112 | * Work Request. |
| 113 | */ |
| 114 | void frwr_reset(struct rpcrdma_req *req) |
| 115 | { |
Chuck Lever | 265a38d | 2019-08-19 18:44:04 -0400 | [diff] [blame] | 116 | struct rpcrdma_mr *mr; |
Chuck Lever | 40088f0 | 2019-06-19 10:33:04 -0400 | [diff] [blame] | 117 | |
Chuck Lever | 265a38d | 2019-08-19 18:44:04 -0400 | [diff] [blame] | 118 | while ((mr = rpcrdma_mr_pop(&req->rl_registered))) |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 119 | frwr_mr_put(mr); |
Chuck Lever | 40088f0 | 2019-06-19 10:33:04 -0400 | [diff] [blame] | 120 | } |
| 121 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 122 | /** |
Chuck Lever | 253a516 | 2020-02-21 17:00:17 -0500 | [diff] [blame] | 123 | * frwr_mr_init - Initialize one MR |
| 124 | * @r_xprt: controlling transport instance |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 125 | * @mr: generic MR to prepare for FRWR |
| 126 | * |
| 127 | * Returns zero if successful. Otherwise a negative errno |
| 128 | * is returned. |
| 129 | */ |
Chuck Lever | 253a516 | 2020-02-21 17:00:17 -0500 | [diff] [blame] | 130 | int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 131 | { |
Chuck Lever | e28ce90 | 2020-02-21 17:01:05 -0500 | [diff] [blame] | 132 | struct rpcrdma_ep *ep = r_xprt->rx_ep; |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 133 | unsigned int depth = ep->re_max_fr_depth; |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 134 | struct scatterlist *sg; |
| 135 | struct ib_mr *frmr; |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 136 | int rc; |
| 137 | |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 138 | frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth); |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 139 | if (IS_ERR(frmr)) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 140 | goto out_mr_err; |
| 141 | |
Julia Lawall | ed38c33 | 2020-09-20 13:26:20 +0200 | [diff] [blame] | 142 | sg = kmalloc_array(depth, sizeof(*sg), GFP_NOFS); |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 143 | if (!sg) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 144 | goto out_list_err; |
| 145 | |
Chuck Lever | 253a516 | 2020-02-21 17:00:17 -0500 | [diff] [blame] | 146 | mr->mr_xprt = r_xprt; |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 147 | mr->frwr.fr_mr = frmr; |
Chuck Lever | 7a03aeb6 | 2020-11-09 14:40:19 -0500 | [diff] [blame] | 148 | mr->mr_device = NULL; |
Chuck Lever | 054f155 | 2018-05-01 11:37:14 -0400 | [diff] [blame] | 149 | INIT_LIST_HEAD(&mr->mr_list); |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 150 | init_completion(&mr->frwr.fr_linv_done); |
| 151 | |
| 152 | sg_init_table(sg, depth); |
| 153 | mr->mr_sg = sg; |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 154 | return 0; |
| 155 | |
| 156 | out_mr_err: |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 157 | rc = PTR_ERR(frmr); |
Chuck Lever | 53b2c1c | 2018-12-19 11:00:06 -0500 | [diff] [blame] | 158 | trace_xprtrdma_frwr_alloc(mr, rc); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 159 | return rc; |
| 160 | |
| 161 | out_list_err: |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 162 | ib_dereg_mr(frmr); |
| 163 | return -ENOMEM; |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 164 | } |
| 165 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 166 | /** |
Chuck Lever | 25868e6 | 2020-01-03 11:56:48 -0500 | [diff] [blame] | 167 | * frwr_query_device - Prepare a transport for use with FRWR |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 168 | * @ep: endpoint to fill in |
Chuck Lever | 25868e6 | 2020-01-03 11:56:48 -0500 | [diff] [blame] | 169 | * @device: RDMA device to query |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 170 | * |
| 171 | * On success, sets: |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 172 | * ep->re_attr |
| 173 | * ep->re_max_requests |
| 174 | * ep->re_max_rdma_segs |
| 175 | * ep->re_max_fr_depth |
| 176 | * ep->re_mrtype |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 177 | * |
Chuck Lever | 25868e6 | 2020-01-03 11:56:48 -0500 | [diff] [blame] | 178 | * Return values: |
| 179 | * On success, returns zero. |
| 180 | * %-EINVAL - the device does not support FRWR memory registration |
| 181 | * %-ENOMEM - the device is not sufficiently capable for NFS/RDMA |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 182 | */ |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 183 | int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device) |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 184 | { |
Chuck Lever | 25868e6 | 2020-01-03 11:56:48 -0500 | [diff] [blame] | 185 | const struct ib_device_attr *attrs = &device->attrs; |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 186 | int max_qp_wr, depth, delta; |
Chuck Lever | 2e87036 | 2020-01-03 11:56:27 -0500 | [diff] [blame] | 187 | unsigned int max_sge; |
| 188 | |
Chuck Lever | 25868e6 | 2020-01-03 11:56:48 -0500 | [diff] [blame] | 189 | if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) || |
| 190 | attrs->max_fast_reg_page_list_len == 0) { |
| 191 | pr_err("rpcrdma: 'frwr' mode is not supported by device %s\n", |
| 192 | device->name); |
| 193 | return -EINVAL; |
| 194 | } |
| 195 | |
Chuck Lever | 2e87036 | 2020-01-03 11:56:27 -0500 | [diff] [blame] | 196 | max_sge = min_t(unsigned int, attrs->max_send_sge, |
| 197 | RPCRDMA_MAX_SEND_SGES); |
| 198 | if (max_sge < RPCRDMA_MIN_SEND_SGES) { |
| 199 | pr_err("rpcrdma: HCA provides only %u send SGEs\n", max_sge); |
| 200 | return -ENOMEM; |
| 201 | } |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 202 | ep->re_attr.cap.max_send_sge = max_sge; |
| 203 | ep->re_attr.cap.max_recv_sge = 1; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 204 | |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 205 | ep->re_mrtype = IB_MR_TYPE_MEM_REG; |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 206 | if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG) |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 207 | ep->re_mrtype = IB_MR_TYPE_SG_GAPS; |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 208 | |
Chuck Lever | a788684 | 2018-12-19 10:58:51 -0500 | [diff] [blame] | 209 | /* Quirk: Some devices advertise a large max_fast_reg_page_list_len |
| 210 | * capability, but perform optimally when the MRs are not larger |
| 211 | * than a page. |
| 212 | */ |
Chuck Lever | 18d065a | 2020-01-03 11:56:43 -0500 | [diff] [blame] | 213 | if (attrs->max_sge_rd > RPCRDMA_MAX_HDR_SEGS) |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 214 | ep->re_max_fr_depth = attrs->max_sge_rd; |
Chuck Lever | a788684 | 2018-12-19 10:58:51 -0500 | [diff] [blame] | 215 | else |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 216 | ep->re_max_fr_depth = attrs->max_fast_reg_page_list_len; |
| 217 | if (ep->re_max_fr_depth > RPCRDMA_MAX_DATA_SEGS) |
| 218 | ep->re_max_fr_depth = RPCRDMA_MAX_DATA_SEGS; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 219 | |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 220 | /* Add room for frwr register and invalidate WRs. |
| 221 | * 1. FRWR reg WR for head |
| 222 | * 2. FRWR invalidate WR for head |
| 223 | * 3. N FRWR reg WRs for pagelist |
| 224 | * 4. N FRWR invalidate WRs for pagelist |
| 225 | * 5. FRWR reg WR for tail |
| 226 | * 6. FRWR invalidate WR for tail |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 227 | * 7. The RDMA_SEND WR |
| 228 | */ |
| 229 | depth = 7; |
| 230 | |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 231 | /* Calculate N if the device max FRWR depth is smaller than |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 232 | * RPCRDMA_MAX_DATA_SEGS. |
| 233 | */ |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 234 | if (ep->re_max_fr_depth < RPCRDMA_MAX_DATA_SEGS) { |
| 235 | delta = RPCRDMA_MAX_DATA_SEGS - ep->re_max_fr_depth; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 236 | do { |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 237 | depth += 2; /* FRWR reg + invalidate */ |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 238 | delta -= ep->re_max_fr_depth; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 239 | } while (delta > 0); |
| 240 | } |
| 241 | |
Chuck Lever | 25868e6 | 2020-01-03 11:56:48 -0500 | [diff] [blame] | 242 | max_qp_wr = attrs->max_qp_wr; |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 243 | max_qp_wr -= RPCRDMA_BACKWARD_WRS; |
| 244 | max_qp_wr -= 1; |
| 245 | if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE) |
| 246 | return -ENOMEM; |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 247 | if (ep->re_max_requests > max_qp_wr) |
| 248 | ep->re_max_requests = max_qp_wr; |
| 249 | ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth; |
| 250 | if (ep->re_attr.cap.max_send_wr > max_qp_wr) { |
| 251 | ep->re_max_requests = max_qp_wr / depth; |
| 252 | if (!ep->re_max_requests) |
Chuck Lever | 25868e6 | 2020-01-03 11:56:48 -0500 | [diff] [blame] | 253 | return -ENOMEM; |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 254 | ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 255 | } |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 256 | ep->re_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS; |
| 257 | ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */ |
| 258 | ep->re_attr.cap.max_recv_wr = ep->re_max_requests; |
| 259 | ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; |
Chuck Lever | 32e6b68 | 2021-04-19 14:02:03 -0400 | [diff] [blame^] | 260 | ep->re_attr.cap.max_recv_wr += RPCRDMA_MAX_RECV_BATCH; |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 261 | ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */ |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 262 | |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 263 | ep->re_max_rdma_segs = |
| 264 | DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ep->re_max_fr_depth); |
Chuck Lever | 6946f82 | 2018-12-19 10:58:45 -0500 | [diff] [blame] | 265 | /* Reply chunks require segments for head and tail buffers */ |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 266 | ep->re_max_rdma_segs += 2; |
| 267 | if (ep->re_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS) |
| 268 | ep->re_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS; |
Chuck Lever | 18d065a | 2020-01-03 11:56:43 -0500 | [diff] [blame] | 269 | |
| 270 | /* Ensure the underlying device is capable of conveying the |
| 271 | * largest r/wsize NFS will ask for. This guarantees that |
| 272 | * failing over from one RDMA device to another will not |
| 273 | * break NFS I/O. |
| 274 | */ |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 275 | if ((ep->re_max_rdma_segs * ep->re_max_fr_depth) < RPCRDMA_MAX_SEGS) |
Chuck Lever | 18d065a | 2020-01-03 11:56:43 -0500 | [diff] [blame] | 276 | return -ENOMEM; |
| 277 | |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 278 | return 0; |
| 279 | } |
| 280 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 281 | /** |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 282 | * frwr_map - Register a memory region |
| 283 | * @r_xprt: controlling transport |
| 284 | * @seg: memory region co-ordinates |
| 285 | * @nsegs: number of segments remaining |
| 286 | * @writing: true when RDMA Write will be used |
Chuck Lever | 0a93fbc | 2018-12-19 10:59:07 -0500 | [diff] [blame] | 287 | * @xid: XID of RPC using the registered memory |
Chuck Lever | 3b39f52 | 2019-08-19 18:45:37 -0400 | [diff] [blame] | 288 | * @mr: MR to fill in |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 289 | * |
| 290 | * Prepare a REG_MR Work Request to register a memory region |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 291 | * for remote access via RDMA READ or RDMA WRITE. |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 292 | * |
| 293 | * Returns the next segment or a negative errno pointer. |
Chuck Lever | 3b39f52 | 2019-08-19 18:45:37 -0400 | [diff] [blame] | 294 | * On success, @mr is filled in. |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 295 | */ |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 296 | struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt, |
| 297 | struct rpcrdma_mr_seg *seg, |
Chuck Lever | ec482cc | 2019-02-11 11:23:44 -0500 | [diff] [blame] | 298 | int nsegs, bool writing, __be32 xid, |
Chuck Lever | 3b39f52 | 2019-08-19 18:45:37 -0400 | [diff] [blame] | 299 | struct rpcrdma_mr *mr) |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 300 | { |
Chuck Lever | e28ce90 | 2020-02-21 17:01:05 -0500 | [diff] [blame] | 301 | struct rpcrdma_ep *ep = r_xprt->rx_ep; |
Chuck Lever | 3cf4e16 | 2015-12-16 17:22:31 -0500 | [diff] [blame] | 302 | struct ib_reg_wr *reg_wr; |
Chuck Lever | ca1c671 | 2020-02-12 11:12:30 -0500 | [diff] [blame] | 303 | int i, n, dma_nents; |
Chuck Lever | 3b39f52 | 2019-08-19 18:45:37 -0400 | [diff] [blame] | 304 | struct ib_mr *ibmr; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 305 | u8 key; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 306 | |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 307 | if (nsegs > ep->re_max_fr_depth) |
| 308 | nsegs = ep->re_max_fr_depth; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 309 | for (i = 0; i < nsegs;) { |
Chuck Lever | 67b1662 | 2021-02-04 11:59:13 -0500 | [diff] [blame] | 310 | sg_set_page(&mr->mr_sg[i], seg->mr_page, |
| 311 | seg->mr_len, seg->mr_offset); |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 312 | |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 313 | ++seg; |
| 314 | ++i; |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 315 | if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS) |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 316 | continue; |
Chuck Lever | 67b1662 | 2021-02-04 11:59:13 -0500 | [diff] [blame] | 317 | if ((i < nsegs && seg->mr_offset) || |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 318 | offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) |
| 319 | break; |
| 320 | } |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 321 | mr->mr_dir = rpcrdma_data_dir(writing); |
Chuck Lever | ca1c671 | 2020-02-12 11:12:30 -0500 | [diff] [blame] | 322 | mr->mr_nents = i; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 323 | |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 324 | dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents, |
Chuck Lever | ca1c671 | 2020-02-12 11:12:30 -0500 | [diff] [blame] | 325 | mr->mr_dir); |
| 326 | if (!dma_nents) |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 327 | goto out_dmamap_err; |
Chuck Lever | 7a03aeb6 | 2020-11-09 14:40:19 -0500 | [diff] [blame] | 328 | mr->mr_device = ep->re_id->device; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 329 | |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 330 | ibmr = mr->frwr.fr_mr; |
Chuck Lever | ca1c671 | 2020-02-12 11:12:30 -0500 | [diff] [blame] | 331 | n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE); |
| 332 | if (n != dma_nents) |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 333 | goto out_mapmr_err; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 334 | |
Chuck Lever | 0a93fbc | 2018-12-19 10:59:07 -0500 | [diff] [blame] | 335 | ibmr->iova &= 0x00000000ffffffff; |
Chuck Lever | ec482cc | 2019-02-11 11:23:44 -0500 | [diff] [blame] | 336 | ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 337 | key = (u8)(ibmr->rkey & 0x000000FF); |
| 338 | ib_update_fast_reg_key(ibmr, ++key); |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 339 | |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 340 | reg_wr = &mr->frwr.fr_regwr; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 341 | reg_wr->mr = ibmr; |
| 342 | reg_wr->key = ibmr->rkey; |
Chuck Lever | 3cf4e16 | 2015-12-16 17:22:31 -0500 | [diff] [blame] | 343 | reg_wr->access = writing ? |
| 344 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : |
| 345 | IB_ACCESS_REMOTE_READ; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 346 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 347 | mr->mr_handle = ibmr->rkey; |
| 348 | mr->mr_length = ibmr->length; |
| 349 | mr->mr_offset = ibmr->iova; |
Chuck Lever | ba217ec | 2018-12-19 10:59:55 -0500 | [diff] [blame] | 350 | trace_xprtrdma_mr_map(mr); |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 351 | |
Chuck Lever | 6748b0ca | 2017-08-14 15:38:30 -0400 | [diff] [blame] | 352 | return seg; |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 353 | |
| 354 | out_dmamap_err: |
Chuck Lever | 53b2c1c | 2018-12-19 11:00:06 -0500 | [diff] [blame] | 355 | trace_xprtrdma_frwr_sgerr(mr, i); |
Chuck Lever | 6748b0ca | 2017-08-14 15:38:30 -0400 | [diff] [blame] | 356 | return ERR_PTR(-EIO); |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 357 | |
| 358 | out_mapmr_err: |
Chuck Lever | 53b2c1c | 2018-12-19 11:00:06 -0500 | [diff] [blame] | 359 | trace_xprtrdma_frwr_maperr(mr, n); |
Chuck Lever | 6748b0ca | 2017-08-14 15:38:30 -0400 | [diff] [blame] | 360 | return ERR_PTR(-EIO); |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 361 | } |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 362 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 363 | /** |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 364 | * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC |
Chuck Lever | d6ccebf | 2020-02-21 17:00:49 -0500 | [diff] [blame] | 365 | * @cq: completion queue |
| 366 | * @wc: WCE for a completed FastReg WR |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 367 | * |
| 368 | */ |
| 369 | static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) |
| 370 | { |
| 371 | struct ib_cqe *cqe = wc->wr_cqe; |
| 372 | struct rpcrdma_frwr *frwr = |
| 373 | container_of(cqe, struct rpcrdma_frwr, fr_cqe); |
| 374 | |
| 375 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 376 | trace_xprtrdma_wc_fastreg(wc, &frwr->fr_cid); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 377 | /* The MR will get recycled when the associated req is retransmitted */ |
Chuck Lever | d6ccebf | 2020-02-21 17:00:49 -0500 | [diff] [blame] | 378 | |
Chuck Lever | f423f75 | 2020-06-15 09:21:02 -0400 | [diff] [blame] | 379 | rpcrdma_flush_disconnect(cq->cq_context, wc); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 380 | } |
| 381 | |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 382 | static void frwr_cid_init(struct rpcrdma_ep *ep, |
| 383 | struct rpcrdma_frwr *frwr) |
| 384 | { |
| 385 | struct rpc_rdma_cid *cid = &frwr->fr_cid; |
| 386 | |
| 387 | cid->ci_queue_id = ep->re_attr.send_cq->res.id; |
| 388 | cid->ci_completion_id = frwr->fr_mr->res.id; |
| 389 | } |
| 390 | |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 391 | /** |
Chuck Lever | 97d0de8 | 2020-02-21 17:00:23 -0500 | [diff] [blame] | 392 | * frwr_send - post Send WRs containing the RPC Call message |
| 393 | * @r_xprt: controlling transport instance |
| 394 | * @req: prepared RPC Call |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 395 | * |
Chuck Lever | e0f86bc | 2018-12-19 11:00:27 -0500 | [diff] [blame] | 396 | * For FRWR, chain any FastReg WRs to the Send WR. Only a |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 397 | * single ib_post_send call is needed to register memory |
| 398 | * and then post the Send WR. |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 399 | * |
Chuck Lever | 97d0de8 | 2020-02-21 17:00:23 -0500 | [diff] [blame] | 400 | * Returns the return code from ib_post_send. |
| 401 | * |
| 402 | * Caller must hold the transport send lock to ensure that the |
| 403 | * pointers to the transport's rdma_cm_id and QP are stable. |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 404 | */ |
Chuck Lever | 97d0de8 | 2020-02-21 17:00:23 -0500 | [diff] [blame] | 405 | int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 406 | { |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 407 | struct rpcrdma_ep *ep = r_xprt->rx_ep; |
Bart Van Assche | ed288d7 | 2018-07-18 09:25:31 -0700 | [diff] [blame] | 408 | struct ib_send_wr *post_wr; |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 409 | struct rpcrdma_mr *mr; |
| 410 | |
Chuck Lever | dc15c3d | 2019-10-17 14:31:35 -0400 | [diff] [blame] | 411 | post_wr = &req->rl_wr; |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 412 | list_for_each_entry(mr, &req->rl_registered, mr_list) { |
| 413 | struct rpcrdma_frwr *frwr; |
| 414 | |
| 415 | frwr = &mr->frwr; |
| 416 | |
| 417 | frwr->fr_cqe.done = frwr_wc_fastreg; |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 418 | frwr_cid_init(ep, frwr); |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 419 | frwr->fr_regwr.wr.next = post_wr; |
| 420 | frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe; |
| 421 | frwr->fr_regwr.wr.num_sge = 0; |
| 422 | frwr->fr_regwr.wr.opcode = IB_WR_REG_MR; |
| 423 | frwr->fr_regwr.wr.send_flags = 0; |
| 424 | |
| 425 | post_wr = &frwr->fr_regwr.wr; |
| 426 | } |
| 427 | |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 428 | return ib_post_send(ep->re_id->qp, post_wr, NULL); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 429 | } |
| 430 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 431 | /** |
| 432 | * frwr_reminv - handle a remotely invalidated mr on the @mrs list |
| 433 | * @rep: Received reply |
| 434 | * @mrs: list of MRs to check |
| 435 | * |
Chuck Lever | c344161 | 2017-12-14 20:56:26 -0500 | [diff] [blame] | 436 | */ |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 437 | void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs) |
Chuck Lever | c344161 | 2017-12-14 20:56:26 -0500 | [diff] [blame] | 438 | { |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 439 | struct rpcrdma_mr *mr; |
Chuck Lever | c344161 | 2017-12-14 20:56:26 -0500 | [diff] [blame] | 440 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 441 | list_for_each_entry(mr, mrs, mr_list) |
| 442 | if (mr->mr_handle == rep->rr_inv_rkey) { |
Chuck Lever | 054f155 | 2018-05-01 11:37:14 -0400 | [diff] [blame] | 443 | list_del_init(&mr->mr_list); |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 444 | frwr_mr_put(mr); |
Chuck Lever | c344161 | 2017-12-14 20:56:26 -0500 | [diff] [blame] | 445 | break; /* only one invalidated MR per RPC */ |
| 446 | } |
| 447 | } |
| 448 | |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 449 | static void frwr_mr_done(struct ib_wc *wc, struct rpcrdma_mr *mr) |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 450 | { |
| 451 | if (wc->status != IB_WC_SUCCESS) |
Chuck Lever | 15d9b01 | 2019-10-17 14:31:09 -0400 | [diff] [blame] | 452 | frwr_mr_recycle(mr); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 453 | else |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 454 | frwr_mr_put(mr); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 455 | } |
| 456 | |
| 457 | /** |
| 458 | * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC |
Chuck Lever | d6ccebf | 2020-02-21 17:00:49 -0500 | [diff] [blame] | 459 | * @cq: completion queue |
| 460 | * @wc: WCE for a completed LocalInv WR |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 461 | * |
| 462 | */ |
| 463 | static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) |
| 464 | { |
| 465 | struct ib_cqe *cqe = wc->wr_cqe; |
| 466 | struct rpcrdma_frwr *frwr = |
| 467 | container_of(cqe, struct rpcrdma_frwr, fr_cqe); |
| 468 | struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr); |
| 469 | |
| 470 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 471 | trace_xprtrdma_wc_li(wc, &frwr->fr_cid); |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 472 | frwr_mr_done(wc, mr); |
Chuck Lever | d6ccebf | 2020-02-21 17:00:49 -0500 | [diff] [blame] | 473 | |
Chuck Lever | f423f75 | 2020-06-15 09:21:02 -0400 | [diff] [blame] | 474 | rpcrdma_flush_disconnect(cq->cq_context, wc); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 475 | } |
| 476 | |
| 477 | /** |
| 478 | * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC |
Chuck Lever | d6ccebf | 2020-02-21 17:00:49 -0500 | [diff] [blame] | 479 | * @cq: completion queue |
| 480 | * @wc: WCE for a completed LocalInv WR |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 481 | * |
| 482 | * Awaken anyone waiting for an MR to finish being fenced. |
| 483 | */ |
| 484 | static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) |
| 485 | { |
| 486 | struct ib_cqe *cqe = wc->wr_cqe; |
| 487 | struct rpcrdma_frwr *frwr = |
| 488 | container_of(cqe, struct rpcrdma_frwr, fr_cqe); |
| 489 | struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr); |
| 490 | |
| 491 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 492 | trace_xprtrdma_wc_li_wake(wc, &frwr->fr_cid); |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 493 | frwr_mr_done(wc, mr); |
Chuck Lever | 6dc6ec9 | 2019-08-19 18:47:10 -0400 | [diff] [blame] | 494 | complete(&frwr->fr_linv_done); |
Chuck Lever | d6ccebf | 2020-02-21 17:00:49 -0500 | [diff] [blame] | 495 | |
Chuck Lever | f423f75 | 2020-06-15 09:21:02 -0400 | [diff] [blame] | 496 | rpcrdma_flush_disconnect(cq->cq_context, wc); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 497 | } |
| 498 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 499 | /** |
| 500 | * frwr_unmap_sync - invalidate memory regions that were registered for @req |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 501 | * @r_xprt: controlling transport instance |
| 502 | * @req: rpcrdma_req with a non-empty list of MRs to process |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 503 | * |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 504 | * Sleeps until it is safe for the host CPU to access the previously mapped |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 505 | * memory regions. This guarantees that registered MRs are properly fenced |
| 506 | * from the server before the RPC consumer accesses the data in them. It |
| 507 | * also ensures proper Send flow control: waking the next RPC waits until |
| 508 | * this RPC has relinquished all its Send Queue entries. |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 509 | */ |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 510 | void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 511 | { |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 512 | struct ib_send_wr *first, **prev, *last; |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 513 | struct rpcrdma_ep *ep = r_xprt->rx_ep; |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 514 | const struct ib_send_wr *bad_wr; |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 515 | struct rpcrdma_frwr *frwr; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 516 | struct rpcrdma_mr *mr; |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 517 | int rc; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 518 | |
Chuck Lever | 451d26e | 2017-06-08 11:52:04 -0400 | [diff] [blame] | 519 | /* ORDER: Invalidate all of the MRs first |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 520 | * |
| 521 | * Chain the LOCAL_INV Work Requests and post them with |
| 522 | * a single ib_post_send() call. |
| 523 | */ |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 524 | frwr = NULL; |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame] | 525 | prev = &first; |
Chuck Lever | 265a38d | 2019-08-19 18:44:04 -0400 | [diff] [blame] | 526 | while ((mr = rpcrdma_mr_pop(&req->rl_registered))) { |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 527 | |
| 528 | trace_xprtrdma_mr_localinv(mr); |
| 529 | r_xprt->rx_stats.local_inv_needed++; |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 530 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 531 | frwr = &mr->frwr; |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 532 | frwr->fr_cqe.done = frwr_wc_localinv; |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 533 | frwr_cid_init(ep, frwr); |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 534 | last = &frwr->fr_invwr; |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 535 | last->next = NULL; |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 536 | last->wr_cqe = &frwr->fr_cqe; |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 537 | last->sg_list = NULL; |
| 538 | last->num_sge = 0; |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame] | 539 | last->opcode = IB_WR_LOCAL_INV; |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 540 | last->send_flags = IB_SEND_SIGNALED; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 541 | last->ex.invalidate_rkey = mr->mr_handle; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 542 | |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame] | 543 | *prev = last; |
| 544 | prev = &last->next; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 545 | } |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 546 | |
| 547 | /* Strong send queue ordering guarantees that when the |
| 548 | * last WR in the chain completes, all WRs in the chain |
| 549 | * are complete. |
| 550 | */ |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 551 | frwr->fr_cqe.done = frwr_wc_localinv_wake; |
| 552 | reinit_completion(&frwr->fr_linv_done); |
Chuck Lever | 8d38de6 | 2016-11-29 10:52:16 -0500 | [diff] [blame] | 553 | |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 554 | /* Transport disconnect drains the receive CQ before it |
| 555 | * replaces the QP. The RPC reply handler won't call us |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 556 | * unless re_id->qp is a valid pointer. |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 557 | */ |
Chuck Lever | 8d75483 | 2017-06-08 11:52:28 -0400 | [diff] [blame] | 558 | bad_wr = NULL; |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 559 | rc = ib_post_send(ep->re_id->qp, first, &bad_wr); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 560 | |
| 561 | /* The final LOCAL_INV WR in the chain is supposed to |
| 562 | * do the wake. If it was never posted, the wake will |
| 563 | * not happen, so don't wait in that case. |
| 564 | */ |
Chuck Lever | 8d75483 | 2017-06-08 11:52:28 -0400 | [diff] [blame] | 565 | if (bad_wr != first) |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 566 | wait_for_completion(&frwr->fr_linv_done); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 567 | if (!rc) |
| 568 | return; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 569 | |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 570 | /* Recycle MRs in the LOCAL_INV chain that did not get posted. |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 571 | */ |
Chuck Lever | 36a55ed | 2020-11-09 14:39:37 -0500 | [diff] [blame] | 572 | trace_xprtrdma_post_linv_err(req, rc); |
Chuck Lever | 8d75483 | 2017-06-08 11:52:28 -0400 | [diff] [blame] | 573 | while (bad_wr) { |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 574 | frwr = container_of(bad_wr, struct rpcrdma_frwr, |
| 575 | fr_invwr); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 576 | mr = container_of(frwr, struct rpcrdma_mr, frwr); |
Chuck Lever | 8d75483 | 2017-06-08 11:52:28 -0400 | [diff] [blame] | 577 | bad_wr = bad_wr->next; |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 578 | |
Chuck Lever | b674c4b | 2018-12-19 10:58:19 -0500 | [diff] [blame] | 579 | list_del_init(&mr->mr_list); |
Chuck Lever | 15d9b01 | 2019-10-17 14:31:09 -0400 | [diff] [blame] | 580 | frwr_mr_recycle(mr); |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 581 | } |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 582 | } |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 583 | |
| 584 | /** |
| 585 | * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC |
Chuck Lever | d6ccebf | 2020-02-21 17:00:49 -0500 | [diff] [blame] | 586 | * @cq: completion queue |
| 587 | * @wc: WCE for a completed LocalInv WR |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 588 | * |
| 589 | */ |
| 590 | static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) |
| 591 | { |
| 592 | struct ib_cqe *cqe = wc->wr_cqe; |
| 593 | struct rpcrdma_frwr *frwr = |
| 594 | container_of(cqe, struct rpcrdma_frwr, fr_cqe); |
| 595 | struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr); |
Chuck Lever | 6dc6ec9 | 2019-08-19 18:47:10 -0400 | [diff] [blame] | 596 | struct rpcrdma_rep *rep = mr->mr_req->rl_reply; |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 597 | |
| 598 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 599 | trace_xprtrdma_wc_li_done(wc, &frwr->fr_cid); |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 600 | frwr_mr_done(wc, mr); |
Chuck Lever | 6dc6ec9 | 2019-08-19 18:47:10 -0400 | [diff] [blame] | 601 | |
Chuck Lever | ef2be59 | 2020-11-09 14:40:14 -0500 | [diff] [blame] | 602 | /* Ensure @rep is generated before frwr_mr_done */ |
Chuck Lever | 6dc6ec9 | 2019-08-19 18:47:10 -0400 | [diff] [blame] | 603 | smp_rmb(); |
| 604 | rpcrdma_complete_rqst(rep); |
Chuck Lever | d6ccebf | 2020-02-21 17:00:49 -0500 | [diff] [blame] | 605 | |
Chuck Lever | f423f75 | 2020-06-15 09:21:02 -0400 | [diff] [blame] | 606 | rpcrdma_flush_disconnect(cq->cq_context, wc); |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 607 | } |
| 608 | |
| 609 | /** |
| 610 | * frwr_unmap_async - invalidate memory regions that were registered for @req |
| 611 | * @r_xprt: controlling transport instance |
| 612 | * @req: rpcrdma_req with a non-empty list of MRs to process |
| 613 | * |
| 614 | * This guarantees that registered MRs are properly fenced from the |
| 615 | * server before the RPC consumer accesses the data in them. It also |
| 616 | * ensures proper Send flow control: waking the next RPC waits until |
| 617 | * this RPC has relinquished all its Send Queue entries. |
| 618 | */ |
| 619 | void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
| 620 | { |
| 621 | struct ib_send_wr *first, *last, **prev; |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 622 | struct rpcrdma_ep *ep = r_xprt->rx_ep; |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 623 | const struct ib_send_wr *bad_wr; |
| 624 | struct rpcrdma_frwr *frwr; |
| 625 | struct rpcrdma_mr *mr; |
| 626 | int rc; |
| 627 | |
| 628 | /* Chain the LOCAL_INV Work Requests and post them with |
| 629 | * a single ib_post_send() call. |
| 630 | */ |
| 631 | frwr = NULL; |
| 632 | prev = &first; |
Chuck Lever | 265a38d | 2019-08-19 18:44:04 -0400 | [diff] [blame] | 633 | while ((mr = rpcrdma_mr_pop(&req->rl_registered))) { |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 634 | |
| 635 | trace_xprtrdma_mr_localinv(mr); |
| 636 | r_xprt->rx_stats.local_inv_needed++; |
| 637 | |
| 638 | frwr = &mr->frwr; |
| 639 | frwr->fr_cqe.done = frwr_wc_localinv; |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 640 | frwr_cid_init(ep, frwr); |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 641 | last = &frwr->fr_invwr; |
| 642 | last->next = NULL; |
| 643 | last->wr_cqe = &frwr->fr_cqe; |
| 644 | last->sg_list = NULL; |
| 645 | last->num_sge = 0; |
| 646 | last->opcode = IB_WR_LOCAL_INV; |
| 647 | last->send_flags = IB_SEND_SIGNALED; |
| 648 | last->ex.invalidate_rkey = mr->mr_handle; |
| 649 | |
| 650 | *prev = last; |
| 651 | prev = &last->next; |
| 652 | } |
| 653 | |
| 654 | /* Strong send queue ordering guarantees that when the |
| 655 | * last WR in the chain completes, all WRs in the chain |
| 656 | * are complete. The last completion will wake up the |
| 657 | * RPC waiter. |
| 658 | */ |
| 659 | frwr->fr_cqe.done = frwr_wc_localinv_done; |
| 660 | |
| 661 | /* Transport disconnect drains the receive CQ before it |
| 662 | * replaces the QP. The RPC reply handler won't call us |
Chuck Lever | 93aa8e0 | 2020-02-21 17:00:54 -0500 | [diff] [blame] | 663 | * unless re_id->qp is a valid pointer. |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 664 | */ |
| 665 | bad_wr = NULL; |
Chuck Lever | 5ecef9c | 2020-11-09 14:39:31 -0500 | [diff] [blame] | 666 | rc = ib_post_send(ep->re_id->qp, first, &bad_wr); |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 667 | if (!rc) |
| 668 | return; |
| 669 | |
| 670 | /* Recycle MRs in the LOCAL_INV chain that did not get posted. |
| 671 | */ |
Chuck Lever | 36a55ed | 2020-11-09 14:39:37 -0500 | [diff] [blame] | 672 | trace_xprtrdma_post_linv_err(req, rc); |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 673 | while (bad_wr) { |
| 674 | frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr); |
| 675 | mr = container_of(frwr, struct rpcrdma_mr, frwr); |
| 676 | bad_wr = bad_wr->next; |
| 677 | |
Chuck Lever | 15d9b01 | 2019-10-17 14:31:09 -0400 | [diff] [blame] | 678 | frwr_mr_recycle(mr); |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 679 | } |
| 680 | |
| 681 | /* The final LOCAL_INV WR in the chain is supposed to |
| 682 | * do the wake. If it was never posted, the wake will |
| 683 | * not happen, so wake here in that case. |
| 684 | */ |
| 685 | rpcrdma_complete_rqst(req->rl_reply); |
| 686 | } |