Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 2 | /* |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 3 | * Copyright (c) 2015, 2017 Oracle. All rights reserved. |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 4 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
| 5 | */ |
| 6 | |
| 7 | /* Lightweight memory registration using Fast Registration Work |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 8 | * Requests (FRWR). |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 9 | * |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 10 | * FRWR features ordered asynchronous registration and invalidation |
| 11 | * of arbitrarily-sized memory regions. This is the fastest and safest |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 12 | * but most complex memory registration mode. |
| 13 | */ |
| 14 | |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 15 | /* Normal operation |
| 16 | * |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 17 | * A Memory Region is prepared for RDMA Read or Write using a FAST_REG |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 18 | * Work Request (frwr_map). When the RDMA operation is finished, this |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 19 | * Memory Region is invalidated using a LOCAL_INV Work Request |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 20 | * (frwr_unmap_async and frwr_unmap_sync). |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 21 | * |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 22 | * Typically FAST_REG Work Requests are not signaled, and neither are |
| 23 | * RDMA Send Work Requests (with the exception of signaling occasionally |
| 24 | * to prevent provider work queue overflows). This greatly reduces HCA |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 25 | * interrupt workload. |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 26 | */ |
| 27 | |
| 28 | /* Transport recovery |
| 29 | * |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 30 | * frwr_map and frwr_unmap_* cannot run at the same time the transport |
| 31 | * connect worker is running. The connect worker holds the transport |
| 32 | * send lock, just as ->send_request does. This prevents frwr_map and |
| 33 | * the connect worker from running concurrently. When a connection is |
| 34 | * closed, the Receive completion queue is drained before the allowing |
| 35 | * the connect worker to get control. This prevents frwr_unmap and the |
| 36 | * connect worker from running concurrently. |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 37 | * |
Chuck Lever | 2fb2a4d | 2019-08-19 18:37:52 -0400 | [diff] [blame] | 38 | * When the underlying transport disconnects, MRs that are in flight |
| 39 | * are flushed and are likely unusable. Thus all flushed MRs are |
| 40 | * destroyed. New MRs are created on demand. |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 41 | */ |
| 42 | |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 43 | #include <linux/sunrpc/rpc_rdma.h> |
Chuck Lever | bd2abef | 2018-05-07 15:27:16 -0400 | [diff] [blame] | 44 | #include <linux/sunrpc/svc_rdma.h> |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 45 | |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 46 | #include "xprt_rdma.h" |
Chuck Lever | b6e717cb | 2018-05-07 15:27:05 -0400 | [diff] [blame] | 47 | #include <trace/events/rpcrdma.h> |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 48 | |
| 49 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 50 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 51 | #endif |
| 52 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 53 | /** |
| 54 | * frwr_is_supported - Check if device supports FRWR |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 55 | * @device: interface adapter to check |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 56 | * |
| 57 | * Returns true if device supports FRWR, otherwise false |
| 58 | */ |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 59 | bool frwr_is_supported(struct ib_device *device) |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 60 | { |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 61 | struct ib_device_attr *attrs = &device->attrs; |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 62 | |
| 63 | if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) |
| 64 | goto out_not_supported; |
| 65 | if (attrs->max_fast_reg_page_list_len == 0) |
| 66 | goto out_not_supported; |
| 67 | return true; |
| 68 | |
| 69 | out_not_supported: |
| 70 | pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n", |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 71 | device->name); |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 72 | return false; |
| 73 | } |
| 74 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 75 | /** |
| 76 | * frwr_release_mr - Destroy one MR |
| 77 | * @mr: MR allocated by frwr_init_mr |
| 78 | * |
| 79 | */ |
| 80 | void frwr_release_mr(struct rpcrdma_mr *mr) |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 81 | { |
| 82 | int rc; |
| 83 | |
| 84 | rc = ib_dereg_mr(mr->frwr.fr_mr); |
| 85 | if (rc) |
Chuck Lever | 53b2c1c | 2018-12-19 11:00:06 -0500 | [diff] [blame] | 86 | trace_xprtrdma_frwr_dereg(mr, rc); |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 87 | kfree(mr->mr_sg); |
| 88 | kfree(mr); |
| 89 | } |
| 90 | |
| 91 | /* MRs are dynamically allocated, so simply clean up and release the MR. |
| 92 | * A replacement MR will subsequently be allocated on demand. |
| 93 | */ |
| 94 | static void |
| 95 | frwr_mr_recycle_worker(struct work_struct *work) |
| 96 | { |
| 97 | struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle); |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 98 | struct rpcrdma_xprt *r_xprt = mr->mr_xprt; |
| 99 | |
| 100 | trace_xprtrdma_mr_recycle(mr); |
| 101 | |
Chuck Lever | e2f34e2 | 2018-12-19 10:58:13 -0500 | [diff] [blame] | 102 | if (mr->mr_dir != DMA_NONE) { |
Chuck Lever | d379eaa | 2018-10-01 14:25:30 -0400 | [diff] [blame] | 103 | trace_xprtrdma_mr_unmap(mr); |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 104 | ib_dma_unmap_sg(r_xprt->rx_ia.ri_id->device, |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 105 | mr->mr_sg, mr->mr_nents, mr->mr_dir); |
Chuck Lever | e2f34e2 | 2018-12-19 10:58:13 -0500 | [diff] [blame] | 106 | mr->mr_dir = DMA_NONE; |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 107 | } |
| 108 | |
| 109 | spin_lock(&r_xprt->rx_buf.rb_mrlock); |
| 110 | list_del(&mr->mr_all); |
| 111 | r_xprt->rx_stats.mrs_recycled++; |
| 112 | spin_unlock(&r_xprt->rx_buf.rb_mrlock); |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 113 | |
| 114 | frwr_release_mr(mr); |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 115 | } |
| 116 | |
Chuck Lever | 40088f0 | 2019-06-19 10:33:04 -0400 | [diff] [blame] | 117 | /* frwr_reset - Place MRs back on the free list |
| 118 | * @req: request to reset |
| 119 | * |
| 120 | * Used after a failed marshal. For FRWR, this means the MRs |
| 121 | * don't have to be fully released and recreated. |
| 122 | * |
| 123 | * NB: This is safe only as long as none of @req's MRs are |
| 124 | * involved with an ongoing asynchronous FAST_REG or LOCAL_INV |
| 125 | * Work Request. |
| 126 | */ |
| 127 | void frwr_reset(struct rpcrdma_req *req) |
| 128 | { |
Chuck Lever | 265a38d | 2019-08-19 18:44:04 -0400 | [diff] [blame] | 129 | struct rpcrdma_mr *mr; |
Chuck Lever | 40088f0 | 2019-06-19 10:33:04 -0400 | [diff] [blame] | 130 | |
Chuck Lever | 265a38d | 2019-08-19 18:44:04 -0400 | [diff] [blame] | 131 | while ((mr = rpcrdma_mr_pop(&req->rl_registered))) |
Chuck Lever | 1ca3f4c | 2019-08-19 18:44:50 -0400 | [diff] [blame] | 132 | rpcrdma_mr_put(mr); |
Chuck Lever | 40088f0 | 2019-06-19 10:33:04 -0400 | [diff] [blame] | 133 | } |
| 134 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 135 | /** |
| 136 | * frwr_init_mr - Initialize one MR |
| 137 | * @ia: interface adapter |
| 138 | * @mr: generic MR to prepare for FRWR |
| 139 | * |
| 140 | * Returns zero if successful. Otherwise a negative errno |
| 141 | * is returned. |
| 142 | */ |
| 143 | int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 144 | { |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 145 | unsigned int depth = ia->ri_max_frwr_depth; |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 146 | struct scatterlist *sg; |
| 147 | struct ib_mr *frmr; |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 148 | int rc; |
| 149 | |
Chuck Lever | 805a1f6 | 2019-08-19 18:46:24 -0400 | [diff] [blame^] | 150 | /* NB: ib_alloc_mr and device drivers typically allocate |
| 151 | * memory with GFP_KERNEL. |
| 152 | */ |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 153 | frmr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth); |
| 154 | if (IS_ERR(frmr)) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 155 | goto out_mr_err; |
| 156 | |
Chuck Lever | 805a1f6 | 2019-08-19 18:46:24 -0400 | [diff] [blame^] | 157 | sg = kcalloc(depth, sizeof(*sg), GFP_NOFS); |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 158 | if (!sg) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 159 | goto out_list_err; |
| 160 | |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 161 | mr->frwr.fr_mr = frmr; |
Chuck Lever | e2f34e2 | 2018-12-19 10:58:13 -0500 | [diff] [blame] | 162 | mr->mr_dir = DMA_NONE; |
Chuck Lever | 054f155 | 2018-05-01 11:37:14 -0400 | [diff] [blame] | 163 | INIT_LIST_HEAD(&mr->mr_list); |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 164 | INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker); |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 165 | init_completion(&mr->frwr.fr_linv_done); |
| 166 | |
| 167 | sg_init_table(sg, depth); |
| 168 | mr->mr_sg = sg; |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 169 | return 0; |
| 170 | |
| 171 | out_mr_err: |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 172 | rc = PTR_ERR(frmr); |
Chuck Lever | 53b2c1c | 2018-12-19 11:00:06 -0500 | [diff] [blame] | 173 | trace_xprtrdma_frwr_alloc(mr, rc); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 174 | return rc; |
| 175 | |
| 176 | out_list_err: |
Chuck Lever | f85adb1 | 2018-12-19 11:00:48 -0500 | [diff] [blame] | 177 | ib_dereg_mr(frmr); |
| 178 | return -ENOMEM; |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 179 | } |
| 180 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 181 | /** |
| 182 | * frwr_open - Prepare an endpoint for use with FRWR |
| 183 | * @ia: interface adapter this endpoint will use |
| 184 | * @ep: endpoint to prepare |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 185 | * |
| 186 | * On success, sets: |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 187 | * ep->rep_attr.cap.max_send_wr |
| 188 | * ep->rep_attr.cap.max_recv_wr |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 189 | * ep->rep_max_requests |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 190 | * ia->ri_max_segs |
| 191 | * |
| 192 | * And these FRWR-related fields: |
| 193 | * ia->ri_max_frwr_depth |
| 194 | * ia->ri_mrtype |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 195 | * |
| 196 | * On failure, a negative errno is returned. |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 197 | */ |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 198 | int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep) |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 199 | { |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 200 | struct ib_device_attr *attrs = &ia->ri_id->device->attrs; |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 201 | int max_qp_wr, depth, delta; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 202 | |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 203 | ia->ri_mrtype = IB_MR_TYPE_MEM_REG; |
| 204 | if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG) |
| 205 | ia->ri_mrtype = IB_MR_TYPE_SG_GAPS; |
| 206 | |
Chuck Lever | a788684 | 2018-12-19 10:58:51 -0500 | [diff] [blame] | 207 | /* Quirk: Some devices advertise a large max_fast_reg_page_list_len |
| 208 | * capability, but perform optimally when the MRs are not larger |
| 209 | * than a page. |
| 210 | */ |
| 211 | if (attrs->max_sge_rd > 1) |
| 212 | ia->ri_max_frwr_depth = attrs->max_sge_rd; |
| 213 | else |
| 214 | ia->ri_max_frwr_depth = attrs->max_fast_reg_page_list_len; |
| 215 | if (ia->ri_max_frwr_depth > RPCRDMA_MAX_DATA_SEGS) |
| 216 | ia->ri_max_frwr_depth = RPCRDMA_MAX_DATA_SEGS; |
| 217 | dprintk("RPC: %s: max FR page list depth = %u\n", |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 218 | __func__, ia->ri_max_frwr_depth); |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 219 | |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 220 | /* Add room for frwr register and invalidate WRs. |
| 221 | * 1. FRWR reg WR for head |
| 222 | * 2. FRWR invalidate WR for head |
| 223 | * 3. N FRWR reg WRs for pagelist |
| 224 | * 4. N FRWR invalidate WRs for pagelist |
| 225 | * 5. FRWR reg WR for tail |
| 226 | * 6. FRWR invalidate WR for tail |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 227 | * 7. The RDMA_SEND WR |
| 228 | */ |
| 229 | depth = 7; |
| 230 | |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 231 | /* Calculate N if the device max FRWR depth is smaller than |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 232 | * RPCRDMA_MAX_DATA_SEGS. |
| 233 | */ |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 234 | if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) { |
| 235 | delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 236 | do { |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 237 | depth += 2; /* FRWR reg + invalidate */ |
| 238 | delta -= ia->ri_max_frwr_depth; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 239 | } while (delta > 0); |
| 240 | } |
| 241 | |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 242 | max_qp_wr = ia->ri_id->device->attrs.max_qp_wr; |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 243 | max_qp_wr -= RPCRDMA_BACKWARD_WRS; |
| 244 | max_qp_wr -= 1; |
| 245 | if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE) |
| 246 | return -ENOMEM; |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 247 | if (ep->rep_max_requests > max_qp_wr) |
| 248 | ep->rep_max_requests = max_qp_wr; |
| 249 | ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth; |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 250 | if (ep->rep_attr.cap.max_send_wr > max_qp_wr) { |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 251 | ep->rep_max_requests = max_qp_wr / depth; |
| 252 | if (!ep->rep_max_requests) |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 253 | return -EINVAL; |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 254 | ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 255 | } |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 256 | ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS; |
| 257 | ep->rep_attr.cap.max_send_wr += 1; /* for ib_drain_sq */ |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 258 | ep->rep_attr.cap.max_recv_wr = ep->rep_max_requests; |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 259 | ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; |
| 260 | ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */ |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 261 | |
Chuck Lever | 36bdd90 | 2019-08-19 18:39:25 -0400 | [diff] [blame] | 262 | ia->ri_max_segs = |
| 263 | DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ia->ri_max_frwr_depth); |
Chuck Lever | 6946f82 | 2018-12-19 10:58:45 -0500 | [diff] [blame] | 264 | /* Reply chunks require segments for head and tail buffers */ |
| 265 | ia->ri_max_segs += 2; |
| 266 | if (ia->ri_max_segs > RPCRDMA_MAX_HDR_SEGS) |
| 267 | ia->ri_max_segs = RPCRDMA_MAX_HDR_SEGS; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 268 | return 0; |
| 269 | } |
| 270 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 271 | /** |
| 272 | * frwr_maxpages - Compute size of largest payload |
| 273 | * @r_xprt: transport |
| 274 | * |
| 275 | * Returns maximum size of an RPC message, in pages. |
| 276 | * |
| 277 | * FRWR mode conveys a list of pages per chunk segment. The |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 278 | * maximum length of that list is the FRWR page list depth. |
| 279 | */ |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 280 | size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt) |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 281 | { |
| 282 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
| 283 | |
| 284 | return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, |
Chuck Lever | 6946f82 | 2018-12-19 10:58:45 -0500 | [diff] [blame] | 285 | (ia->ri_max_segs - 2) * ia->ri_max_frwr_depth); |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 286 | } |
| 287 | |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 288 | /** |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 289 | * frwr_map - Register a memory region |
| 290 | * @r_xprt: controlling transport |
| 291 | * @seg: memory region co-ordinates |
| 292 | * @nsegs: number of segments remaining |
| 293 | * @writing: true when RDMA Write will be used |
Chuck Lever | 0a93fbc | 2018-12-19 10:59:07 -0500 | [diff] [blame] | 294 | * @xid: XID of RPC using the registered memory |
Chuck Lever | 3b39f52 | 2019-08-19 18:45:37 -0400 | [diff] [blame] | 295 | * @mr: MR to fill in |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 296 | * |
| 297 | * Prepare a REG_MR Work Request to register a memory region |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 298 | * for remote access via RDMA READ or RDMA WRITE. |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 299 | * |
| 300 | * Returns the next segment or a negative errno pointer. |
Chuck Lever | 3b39f52 | 2019-08-19 18:45:37 -0400 | [diff] [blame] | 301 | * On success, @mr is filled in. |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 302 | */ |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 303 | struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt, |
| 304 | struct rpcrdma_mr_seg *seg, |
Chuck Lever | ec482cc | 2019-02-11 11:23:44 -0500 | [diff] [blame] | 305 | int nsegs, bool writing, __be32 xid, |
Chuck Lever | 3b39f52 | 2019-08-19 18:45:37 -0400 | [diff] [blame] | 306 | struct rpcrdma_mr *mr) |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 307 | { |
| 308 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
Chuck Lever | 3cf4e16 | 2015-12-16 17:22:31 -0500 | [diff] [blame] | 309 | struct ib_reg_wr *reg_wr; |
Chuck Lever | 3b39f52 | 2019-08-19 18:45:37 -0400 | [diff] [blame] | 310 | struct ib_mr *ibmr; |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 311 | int i, n; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 312 | u8 key; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 313 | |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 314 | if (nsegs > ia->ri_max_frwr_depth) |
| 315 | nsegs = ia->ri_max_frwr_depth; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 316 | for (i = 0; i < nsegs;) { |
| 317 | if (seg->mr_page) |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 318 | sg_set_page(&mr->mr_sg[i], |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 319 | seg->mr_page, |
| 320 | seg->mr_len, |
| 321 | offset_in_page(seg->mr_offset)); |
| 322 | else |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 323 | sg_set_buf(&mr->mr_sg[i], seg->mr_offset, |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 324 | seg->mr_len); |
| 325 | |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 326 | ++seg; |
| 327 | ++i; |
Chuck Lever | 3b39f52 | 2019-08-19 18:45:37 -0400 | [diff] [blame] | 328 | if (ia->ri_mrtype == IB_MR_TYPE_SG_GAPS) |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 329 | continue; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 330 | if ((i < nsegs && offset_in_page(seg->mr_offset)) || |
| 331 | offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) |
| 332 | break; |
| 333 | } |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 334 | mr->mr_dir = rpcrdma_data_dir(writing); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 335 | |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 336 | mr->mr_nents = |
| 337 | ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, i, mr->mr_dir); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 338 | if (!mr->mr_nents) |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 339 | goto out_dmamap_err; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 340 | |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 341 | ibmr = mr->frwr.fr_mr; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 342 | n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE); |
| 343 | if (unlikely(n != mr->mr_nents)) |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 344 | goto out_mapmr_err; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 345 | |
Chuck Lever | 0a93fbc | 2018-12-19 10:59:07 -0500 | [diff] [blame] | 346 | ibmr->iova &= 0x00000000ffffffff; |
Chuck Lever | ec482cc | 2019-02-11 11:23:44 -0500 | [diff] [blame] | 347 | ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 348 | key = (u8)(ibmr->rkey & 0x000000FF); |
| 349 | ib_update_fast_reg_key(ibmr, ++key); |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 350 | |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 351 | reg_wr = &mr->frwr.fr_regwr; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 352 | reg_wr->mr = ibmr; |
| 353 | reg_wr->key = ibmr->rkey; |
Chuck Lever | 3cf4e16 | 2015-12-16 17:22:31 -0500 | [diff] [blame] | 354 | reg_wr->access = writing ? |
| 355 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : |
| 356 | IB_ACCESS_REMOTE_READ; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 357 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 358 | mr->mr_handle = ibmr->rkey; |
| 359 | mr->mr_length = ibmr->length; |
| 360 | mr->mr_offset = ibmr->iova; |
Chuck Lever | ba217ec | 2018-12-19 10:59:55 -0500 | [diff] [blame] | 361 | trace_xprtrdma_mr_map(mr); |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 362 | |
Chuck Lever | 6748b0ca | 2017-08-14 15:38:30 -0400 | [diff] [blame] | 363 | return seg; |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 364 | |
| 365 | out_dmamap_err: |
Chuck Lever | b2ca473 | 2019-04-24 09:39:00 -0400 | [diff] [blame] | 366 | mr->mr_dir = DMA_NONE; |
Chuck Lever | 53b2c1c | 2018-12-19 11:00:06 -0500 | [diff] [blame] | 367 | trace_xprtrdma_frwr_sgerr(mr, i); |
Chuck Lever | 6748b0ca | 2017-08-14 15:38:30 -0400 | [diff] [blame] | 368 | return ERR_PTR(-EIO); |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 369 | |
| 370 | out_mapmr_err: |
Chuck Lever | 53b2c1c | 2018-12-19 11:00:06 -0500 | [diff] [blame] | 371 | trace_xprtrdma_frwr_maperr(mr, n); |
Chuck Lever | 6748b0ca | 2017-08-14 15:38:30 -0400 | [diff] [blame] | 372 | return ERR_PTR(-EIO); |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 373 | } |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 374 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 375 | /** |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 376 | * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC |
| 377 | * @cq: completion queue (ignored) |
| 378 | * @wc: completed WR |
| 379 | * |
| 380 | */ |
| 381 | static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) |
| 382 | { |
| 383 | struct ib_cqe *cqe = wc->wr_cqe; |
| 384 | struct rpcrdma_frwr *frwr = |
| 385 | container_of(cqe, struct rpcrdma_frwr, fr_cqe); |
| 386 | |
| 387 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
| 388 | trace_xprtrdma_wc_fastreg(wc, frwr); |
| 389 | /* The MR will get recycled when the associated req is retransmitted */ |
| 390 | } |
| 391 | |
| 392 | /** |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 393 | * frwr_send - post Send WR containing the RPC Call message |
| 394 | * @ia: interface adapter |
| 395 | * @req: Prepared RPC Call |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 396 | * |
Chuck Lever | e0f86bc | 2018-12-19 11:00:27 -0500 | [diff] [blame] | 397 | * For FRWR, chain any FastReg WRs to the Send WR. Only a |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 398 | * single ib_post_send call is needed to register memory |
| 399 | * and then post the Send WR. |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 400 | * |
| 401 | * Returns the result of ib_post_send. |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 402 | */ |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 403 | int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req) |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 404 | { |
Bart Van Assche | ed288d7 | 2018-07-18 09:25:31 -0700 | [diff] [blame] | 405 | struct ib_send_wr *post_wr; |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 406 | struct rpcrdma_mr *mr; |
| 407 | |
| 408 | post_wr = &req->rl_sendctx->sc_wr; |
| 409 | list_for_each_entry(mr, &req->rl_registered, mr_list) { |
| 410 | struct rpcrdma_frwr *frwr; |
| 411 | |
| 412 | frwr = &mr->frwr; |
| 413 | |
| 414 | frwr->fr_cqe.done = frwr_wc_fastreg; |
| 415 | frwr->fr_regwr.wr.next = post_wr; |
| 416 | frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe; |
| 417 | frwr->fr_regwr.wr.num_sge = 0; |
| 418 | frwr->fr_regwr.wr.opcode = IB_WR_REG_MR; |
| 419 | frwr->fr_regwr.wr.send_flags = 0; |
| 420 | |
| 421 | post_wr = &frwr->fr_regwr.wr; |
| 422 | } |
| 423 | |
| 424 | /* If ib_post_send fails, the next ->send_request for |
Chuck Lever | e0f86bc | 2018-12-19 11:00:27 -0500 | [diff] [blame] | 425 | * @req will queue these MRs for recovery. |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 426 | */ |
Bart Van Assche | ed288d7 | 2018-07-18 09:25:31 -0700 | [diff] [blame] | 427 | return ib_post_send(ia->ri_id->qp, post_wr, NULL); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 428 | } |
| 429 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 430 | /** |
| 431 | * frwr_reminv - handle a remotely invalidated mr on the @mrs list |
| 432 | * @rep: Received reply |
| 433 | * @mrs: list of MRs to check |
| 434 | * |
Chuck Lever | c344161 | 2017-12-14 20:56:26 -0500 | [diff] [blame] | 435 | */ |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 436 | void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs) |
Chuck Lever | c344161 | 2017-12-14 20:56:26 -0500 | [diff] [blame] | 437 | { |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 438 | struct rpcrdma_mr *mr; |
Chuck Lever | c344161 | 2017-12-14 20:56:26 -0500 | [diff] [blame] | 439 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 440 | list_for_each_entry(mr, mrs, mr_list) |
| 441 | if (mr->mr_handle == rep->rr_inv_rkey) { |
Chuck Lever | 054f155 | 2018-05-01 11:37:14 -0400 | [diff] [blame] | 442 | list_del_init(&mr->mr_list); |
Chuck Lever | d379eaa | 2018-10-01 14:25:30 -0400 | [diff] [blame] | 443 | trace_xprtrdma_mr_remoteinv(mr); |
Chuck Lever | 1ca3f4c | 2019-08-19 18:44:50 -0400 | [diff] [blame] | 444 | rpcrdma_mr_put(mr); |
Chuck Lever | c344161 | 2017-12-14 20:56:26 -0500 | [diff] [blame] | 445 | break; /* only one invalidated MR per RPC */ |
| 446 | } |
| 447 | } |
| 448 | |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 449 | static void __frwr_release_mr(struct ib_wc *wc, struct rpcrdma_mr *mr) |
| 450 | { |
| 451 | if (wc->status != IB_WC_SUCCESS) |
| 452 | rpcrdma_mr_recycle(mr); |
| 453 | else |
Chuck Lever | 1ca3f4c | 2019-08-19 18:44:50 -0400 | [diff] [blame] | 454 | rpcrdma_mr_put(mr); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 455 | } |
| 456 | |
| 457 | /** |
| 458 | * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC |
| 459 | * @cq: completion queue (ignored) |
| 460 | * @wc: completed WR |
| 461 | * |
| 462 | */ |
| 463 | static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) |
| 464 | { |
| 465 | struct ib_cqe *cqe = wc->wr_cqe; |
| 466 | struct rpcrdma_frwr *frwr = |
| 467 | container_of(cqe, struct rpcrdma_frwr, fr_cqe); |
| 468 | struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr); |
| 469 | |
| 470 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
| 471 | trace_xprtrdma_wc_li(wc, frwr); |
| 472 | __frwr_release_mr(wc, mr); |
| 473 | } |
| 474 | |
| 475 | /** |
| 476 | * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC |
| 477 | * @cq: completion queue (ignored) |
| 478 | * @wc: completed WR |
| 479 | * |
| 480 | * Awaken anyone waiting for an MR to finish being fenced. |
| 481 | */ |
| 482 | static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) |
| 483 | { |
| 484 | struct ib_cqe *cqe = wc->wr_cqe; |
| 485 | struct rpcrdma_frwr *frwr = |
| 486 | container_of(cqe, struct rpcrdma_frwr, fr_cqe); |
| 487 | struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr); |
| 488 | |
| 489 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
| 490 | trace_xprtrdma_wc_li_wake(wc, frwr); |
| 491 | complete(&frwr->fr_linv_done); |
| 492 | __frwr_release_mr(wc, mr); |
| 493 | } |
| 494 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 495 | /** |
| 496 | * frwr_unmap_sync - invalidate memory regions that were registered for @req |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 497 | * @r_xprt: controlling transport instance |
| 498 | * @req: rpcrdma_req with a non-empty list of MRs to process |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 499 | * |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 500 | * Sleeps until it is safe for the host CPU to access the previously mapped |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 501 | * memory regions. This guarantees that registered MRs are properly fenced |
| 502 | * from the server before the RPC consumer accesses the data in them. It |
| 503 | * also ensures proper Send flow control: waking the next RPC waits until |
| 504 | * this RPC has relinquished all its Send Queue entries. |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 505 | */ |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 506 | void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 507 | { |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 508 | struct ib_send_wr *first, **prev, *last; |
| 509 | const struct ib_send_wr *bad_wr; |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 510 | struct rpcrdma_frwr *frwr; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 511 | struct rpcrdma_mr *mr; |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 512 | int rc; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 513 | |
Chuck Lever | 451d26e | 2017-06-08 11:52:04 -0400 | [diff] [blame] | 514 | /* ORDER: Invalidate all of the MRs first |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 515 | * |
| 516 | * Chain the LOCAL_INV Work Requests and post them with |
| 517 | * a single ib_post_send() call. |
| 518 | */ |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 519 | frwr = NULL; |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame] | 520 | prev = &first; |
Chuck Lever | 265a38d | 2019-08-19 18:44:04 -0400 | [diff] [blame] | 521 | while ((mr = rpcrdma_mr_pop(&req->rl_registered))) { |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 522 | |
| 523 | trace_xprtrdma_mr_localinv(mr); |
| 524 | r_xprt->rx_stats.local_inv_needed++; |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 525 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 526 | frwr = &mr->frwr; |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 527 | frwr->fr_cqe.done = frwr_wc_localinv; |
| 528 | last = &frwr->fr_invwr; |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 529 | last->next = NULL; |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 530 | last->wr_cqe = &frwr->fr_cqe; |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 531 | last->sg_list = NULL; |
| 532 | last->num_sge = 0; |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame] | 533 | last->opcode = IB_WR_LOCAL_INV; |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 534 | last->send_flags = IB_SEND_SIGNALED; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 535 | last->ex.invalidate_rkey = mr->mr_handle; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 536 | |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame] | 537 | *prev = last; |
| 538 | prev = &last->next; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 539 | } |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 540 | |
| 541 | /* Strong send queue ordering guarantees that when the |
| 542 | * last WR in the chain completes, all WRs in the chain |
| 543 | * are complete. |
| 544 | */ |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 545 | frwr->fr_cqe.done = frwr_wc_localinv_wake; |
| 546 | reinit_completion(&frwr->fr_linv_done); |
Chuck Lever | 8d38de6 | 2016-11-29 10:52:16 -0500 | [diff] [blame] | 547 | |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 548 | /* Transport disconnect drains the receive CQ before it |
| 549 | * replaces the QP. The RPC reply handler won't call us |
| 550 | * unless ri_id->qp is a valid pointer. |
| 551 | */ |
Chuck Lever | 8d75483 | 2017-06-08 11:52:28 -0400 | [diff] [blame] | 552 | bad_wr = NULL; |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 553 | rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr); |
| 554 | trace_xprtrdma_post_send(req, rc); |
| 555 | |
| 556 | /* The final LOCAL_INV WR in the chain is supposed to |
| 557 | * do the wake. If it was never posted, the wake will |
| 558 | * not happen, so don't wait in that case. |
| 559 | */ |
Chuck Lever | 8d75483 | 2017-06-08 11:52:28 -0400 | [diff] [blame] | 560 | if (bad_wr != first) |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 561 | wait_for_completion(&frwr->fr_linv_done); |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 562 | if (!rc) |
| 563 | return; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 564 | |
Chuck Lever | 8475689 | 2019-06-19 10:32:59 -0400 | [diff] [blame] | 565 | /* Recycle MRs in the LOCAL_INV chain that did not get posted. |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 566 | */ |
Chuck Lever | 8d75483 | 2017-06-08 11:52:28 -0400 | [diff] [blame] | 567 | while (bad_wr) { |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 568 | frwr = container_of(bad_wr, struct rpcrdma_frwr, |
| 569 | fr_invwr); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 570 | mr = container_of(frwr, struct rpcrdma_mr, frwr); |
Chuck Lever | 8d75483 | 2017-06-08 11:52:28 -0400 | [diff] [blame] | 571 | bad_wr = bad_wr->next; |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 572 | |
Chuck Lever | b674c4b | 2018-12-19 10:58:19 -0500 | [diff] [blame] | 573 | list_del_init(&mr->mr_list); |
| 574 | rpcrdma_mr_recycle(mr); |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 575 | } |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 576 | } |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 577 | |
| 578 | /** |
| 579 | * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC |
| 580 | * @cq: completion queue (ignored) |
| 581 | * @wc: completed WR |
| 582 | * |
| 583 | */ |
| 584 | static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) |
| 585 | { |
| 586 | struct ib_cqe *cqe = wc->wr_cqe; |
| 587 | struct rpcrdma_frwr *frwr = |
| 588 | container_of(cqe, struct rpcrdma_frwr, fr_cqe); |
| 589 | struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr); |
| 590 | |
| 591 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
| 592 | trace_xprtrdma_wc_li_done(wc, frwr); |
| 593 | rpcrdma_complete_rqst(frwr->fr_req->rl_reply); |
| 594 | __frwr_release_mr(wc, mr); |
| 595 | } |
| 596 | |
| 597 | /** |
| 598 | * frwr_unmap_async - invalidate memory regions that were registered for @req |
| 599 | * @r_xprt: controlling transport instance |
| 600 | * @req: rpcrdma_req with a non-empty list of MRs to process |
| 601 | * |
| 602 | * This guarantees that registered MRs are properly fenced from the |
| 603 | * server before the RPC consumer accesses the data in them. It also |
| 604 | * ensures proper Send flow control: waking the next RPC waits until |
| 605 | * this RPC has relinquished all its Send Queue entries. |
| 606 | */ |
| 607 | void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
| 608 | { |
| 609 | struct ib_send_wr *first, *last, **prev; |
| 610 | const struct ib_send_wr *bad_wr; |
| 611 | struct rpcrdma_frwr *frwr; |
| 612 | struct rpcrdma_mr *mr; |
| 613 | int rc; |
| 614 | |
| 615 | /* Chain the LOCAL_INV Work Requests and post them with |
| 616 | * a single ib_post_send() call. |
| 617 | */ |
| 618 | frwr = NULL; |
| 619 | prev = &first; |
Chuck Lever | 265a38d | 2019-08-19 18:44:04 -0400 | [diff] [blame] | 620 | while ((mr = rpcrdma_mr_pop(&req->rl_registered))) { |
Chuck Lever | d8099fe | 2019-06-19 10:33:10 -0400 | [diff] [blame] | 621 | |
| 622 | trace_xprtrdma_mr_localinv(mr); |
| 623 | r_xprt->rx_stats.local_inv_needed++; |
| 624 | |
| 625 | frwr = &mr->frwr; |
| 626 | frwr->fr_cqe.done = frwr_wc_localinv; |
| 627 | frwr->fr_req = req; |
| 628 | last = &frwr->fr_invwr; |
| 629 | last->next = NULL; |
| 630 | last->wr_cqe = &frwr->fr_cqe; |
| 631 | last->sg_list = NULL; |
| 632 | last->num_sge = 0; |
| 633 | last->opcode = IB_WR_LOCAL_INV; |
| 634 | last->send_flags = IB_SEND_SIGNALED; |
| 635 | last->ex.invalidate_rkey = mr->mr_handle; |
| 636 | |
| 637 | *prev = last; |
| 638 | prev = &last->next; |
| 639 | } |
| 640 | |
| 641 | /* Strong send queue ordering guarantees that when the |
| 642 | * last WR in the chain completes, all WRs in the chain |
| 643 | * are complete. The last completion will wake up the |
| 644 | * RPC waiter. |
| 645 | */ |
| 646 | frwr->fr_cqe.done = frwr_wc_localinv_done; |
| 647 | |
| 648 | /* Transport disconnect drains the receive CQ before it |
| 649 | * replaces the QP. The RPC reply handler won't call us |
| 650 | * unless ri_id->qp is a valid pointer. |
| 651 | */ |
| 652 | bad_wr = NULL; |
| 653 | rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr); |
| 654 | trace_xprtrdma_post_send(req, rc); |
| 655 | if (!rc) |
| 656 | return; |
| 657 | |
| 658 | /* Recycle MRs in the LOCAL_INV chain that did not get posted. |
| 659 | */ |
| 660 | while (bad_wr) { |
| 661 | frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr); |
| 662 | mr = container_of(frwr, struct rpcrdma_mr, frwr); |
| 663 | bad_wr = bad_wr->next; |
| 664 | |
| 665 | rpcrdma_mr_recycle(mr); |
| 666 | } |
| 667 | |
| 668 | /* The final LOCAL_INV WR in the chain is supposed to |
| 669 | * do the wake. If it was never posted, the wake will |
| 670 | * not happen, so wake here in that case. |
| 671 | */ |
| 672 | rpcrdma_complete_rqst(req->rl_reply); |
| 673 | } |