Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 2 | /* |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 3 | * Copyright (c) 2015, 2017 Oracle. All rights reserved. |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 4 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
| 5 | */ |
| 6 | |
| 7 | /* Lightweight memory registration using Fast Registration Work |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 8 | * Requests (FRWR). |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 9 | * |
| 10 | * FRWR features ordered asynchronous registration and deregistration |
| 11 | * of arbitrarily sized memory regions. This is the fastest and safest |
| 12 | * but most complex memory registration mode. |
| 13 | */ |
| 14 | |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 15 | /* Normal operation |
| 16 | * |
| 17 | * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 18 | * Work Request (frwr_op_map). When the RDMA operation is finished, this |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 19 | * Memory Region is invalidated using a LOCAL_INV Work Request |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 20 | * (frwr_op_unmap_sync). |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 21 | * |
| 22 | * Typically these Work Requests are not signaled, and neither are RDMA |
| 23 | * SEND Work Requests (with the exception of signaling occasionally to |
| 24 | * prevent provider work queue overflows). This greatly reduces HCA |
| 25 | * interrupt workload. |
| 26 | * |
| 27 | * As an optimization, frwr_op_unmap marks MRs INVALID before the |
| 28 | * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 29 | * rb_mrs immediately so that no work (like managing a linked list |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 30 | * under a spinlock) is needed in the completion upcall. |
| 31 | * |
| 32 | * But this means that frwr_op_map() can occasionally encounter an MR |
| 33 | * that is INVALID but the LOCAL_INV WR has not completed. Work Queue |
| 34 | * ordering prevents a subsequent FAST_REG WR from executing against |
| 35 | * that MR while it is still being invalidated. |
| 36 | */ |
| 37 | |
| 38 | /* Transport recovery |
| 39 | * |
| 40 | * ->op_map and the transport connect worker cannot run at the same |
| 41 | * time, but ->op_unmap can fire while the transport connect worker |
| 42 | * is running. Thus MR recovery is handled in ->op_map, to guarantee |
| 43 | * that recovered MRs are owned by a sending RPC, and not one where |
| 44 | * ->op_unmap could fire at the same time transport reconnect is |
| 45 | * being done. |
| 46 | * |
| 47 | * When the underlying transport disconnects, MRs are left in one of |
Chuck Lever | 62bdf94 | 2016-11-07 16:16:24 -0500 | [diff] [blame] | 48 | * four states: |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 49 | * |
| 50 | * INVALID: The MR was not in use before the QP entered ERROR state. |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 51 | * |
| 52 | * VALID: The MR was registered before the QP entered ERROR state. |
| 53 | * |
Chuck Lever | 62bdf94 | 2016-11-07 16:16:24 -0500 | [diff] [blame] | 54 | * FLUSHED_FR: The MR was being registered when the QP entered ERROR |
| 55 | * state, and the pending WR was flushed. |
| 56 | * |
| 57 | * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR |
| 58 | * state, and the pending WR was flushed. |
| 59 | * |
| 60 | * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered |
| 61 | * with ib_dereg_mr and then are re-initialized. Because MR recovery |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 62 | * allocates fresh resources, it is deferred to a workqueue, and the |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 63 | * recovered MRs are placed back on the rb_mrs list when recovery is |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 64 | * complete. frwr_op_map allocates another MR for the current RPC while |
| 65 | * the broken MR is reset. |
| 66 | * |
| 67 | * To ensure that frwr_op_map doesn't encounter an MR that is marked |
| 68 | * INVALID but that is about to be flushed due to a previous transport |
| 69 | * disconnect, the transport connect worker attempts to drain all |
| 70 | * pending send queue WRs before the transport is reconnected. |
| 71 | */ |
| 72 | |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 73 | #include <linux/sunrpc/rpc_rdma.h> |
Chuck Lever | bd2abef | 2018-05-07 15:27:16 -0400 | [diff] [blame] | 74 | #include <linux/sunrpc/svc_rdma.h> |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 75 | |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 76 | #include "xprt_rdma.h" |
Chuck Lever | b6e717cb | 2018-05-07 15:27:05 -0400 | [diff] [blame] | 77 | #include <trace/events/rpcrdma.h> |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 78 | |
| 79 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 80 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 81 | #endif |
| 82 | |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 83 | bool |
| 84 | frwr_is_supported(struct rpcrdma_ia *ia) |
| 85 | { |
| 86 | struct ib_device_attr *attrs = &ia->ri_device->attrs; |
| 87 | |
| 88 | if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) |
| 89 | goto out_not_supported; |
| 90 | if (attrs->max_fast_reg_page_list_len == 0) |
| 91 | goto out_not_supported; |
| 92 | return true; |
| 93 | |
| 94 | out_not_supported: |
| 95 | pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n", |
| 96 | ia->ri_device->name); |
| 97 | return false; |
| 98 | } |
| 99 | |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 100 | static void |
| 101 | frwr_op_release_mr(struct rpcrdma_mr *mr) |
| 102 | { |
| 103 | int rc; |
| 104 | |
| 105 | rc = ib_dereg_mr(mr->frwr.fr_mr); |
| 106 | if (rc) |
| 107 | pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n", |
| 108 | mr, rc); |
| 109 | kfree(mr->mr_sg); |
| 110 | kfree(mr); |
| 111 | } |
| 112 | |
| 113 | /* MRs are dynamically allocated, so simply clean up and release the MR. |
| 114 | * A replacement MR will subsequently be allocated on demand. |
| 115 | */ |
| 116 | static void |
| 117 | frwr_mr_recycle_worker(struct work_struct *work) |
| 118 | { |
| 119 | struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle); |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 120 | struct rpcrdma_xprt *r_xprt = mr->mr_xprt; |
| 121 | |
| 122 | trace_xprtrdma_mr_recycle(mr); |
| 123 | |
Chuck Lever | e2f34e2 | 2018-12-19 10:58:13 -0500 | [diff] [blame^] | 124 | if (mr->mr_dir != DMA_NONE) { |
Chuck Lever | d379eaa | 2018-10-01 14:25:30 -0400 | [diff] [blame] | 125 | trace_xprtrdma_mr_unmap(mr); |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 126 | ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, |
| 127 | mr->mr_sg, mr->mr_nents, mr->mr_dir); |
Chuck Lever | e2f34e2 | 2018-12-19 10:58:13 -0500 | [diff] [blame^] | 128 | mr->mr_dir = DMA_NONE; |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 129 | } |
| 130 | |
| 131 | spin_lock(&r_xprt->rx_buf.rb_mrlock); |
| 132 | list_del(&mr->mr_all); |
| 133 | r_xprt->rx_stats.mrs_recycled++; |
| 134 | spin_unlock(&r_xprt->rx_buf.rb_mrlock); |
| 135 | frwr_op_release_mr(mr); |
| 136 | } |
| 137 | |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 138 | static int |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 139 | frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 140 | { |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 141 | unsigned int depth = ia->ri_max_frwr_depth; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 142 | struct rpcrdma_frwr *frwr = &mr->frwr; |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 143 | int rc; |
| 144 | |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 145 | frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth); |
| 146 | if (IS_ERR(frwr->fr_mr)) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 147 | goto out_mr_err; |
| 148 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 149 | mr->mr_sg = kcalloc(depth, sizeof(*mr->mr_sg), GFP_KERNEL); |
| 150 | if (!mr->mr_sg) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 151 | goto out_list_err; |
| 152 | |
Chuck Lever | e2f34e2 | 2018-12-19 10:58:13 -0500 | [diff] [blame^] | 153 | frwr->fr_state = FRWR_IS_INVALID; |
| 154 | mr->mr_dir = DMA_NONE; |
Chuck Lever | 054f155 | 2018-05-01 11:37:14 -0400 | [diff] [blame] | 155 | INIT_LIST_HEAD(&mr->mr_list); |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 156 | INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 157 | sg_init_table(mr->mr_sg, depth); |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 158 | init_completion(&frwr->fr_linv_done); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 159 | return 0; |
| 160 | |
| 161 | out_mr_err: |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 162 | rc = PTR_ERR(frwr->fr_mr); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 163 | dprintk("RPC: %s: ib_alloc_mr status %i\n", |
| 164 | __func__, rc); |
| 165 | return rc; |
| 166 | |
| 167 | out_list_err: |
| 168 | rc = -ENOMEM; |
| 169 | dprintk("RPC: %s: sg allocation failure\n", |
| 170 | __func__); |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 171 | ib_dereg_mr(frwr->fr_mr); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 172 | return rc; |
| 173 | } |
| 174 | |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 175 | /* On success, sets: |
| 176 | * ep->rep_attr.cap.max_send_wr |
| 177 | * ep->rep_attr.cap.max_recv_wr |
| 178 | * cdata->max_requests |
| 179 | * ia->ri_max_segs |
| 180 | * |
| 181 | * And these FRWR-related fields: |
| 182 | * ia->ri_max_frwr_depth |
| 183 | * ia->ri_mrtype |
| 184 | */ |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 185 | static int |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 186 | frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, |
| 187 | struct rpcrdma_create_data_internal *cdata) |
| 188 | { |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 189 | struct ib_device_attr *attrs = &ia->ri_device->attrs; |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 190 | int max_qp_wr, depth, delta; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 191 | |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 192 | ia->ri_mrtype = IB_MR_TYPE_MEM_REG; |
| 193 | if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG) |
| 194 | ia->ri_mrtype = IB_MR_TYPE_SG_GAPS; |
| 195 | |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 196 | ia->ri_max_frwr_depth = |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 197 | min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 198 | attrs->max_fast_reg_page_list_len); |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 199 | dprintk("RPC: %s: device's max FR page list len = %u\n", |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 200 | __func__, ia->ri_max_frwr_depth); |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 201 | |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 202 | /* Add room for frwr register and invalidate WRs. |
| 203 | * 1. FRWR reg WR for head |
| 204 | * 2. FRWR invalidate WR for head |
| 205 | * 3. N FRWR reg WRs for pagelist |
| 206 | * 4. N FRWR invalidate WRs for pagelist |
| 207 | * 5. FRWR reg WR for tail |
| 208 | * 6. FRWR invalidate WR for tail |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 209 | * 7. The RDMA_SEND WR |
| 210 | */ |
| 211 | depth = 7; |
| 212 | |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 213 | /* Calculate N if the device max FRWR depth is smaller than |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 214 | * RPCRDMA_MAX_DATA_SEGS. |
| 215 | */ |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 216 | if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) { |
| 217 | delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 218 | do { |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 219 | depth += 2; /* FRWR reg + invalidate */ |
| 220 | delta -= ia->ri_max_frwr_depth; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 221 | } while (delta > 0); |
| 222 | } |
| 223 | |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 224 | max_qp_wr = ia->ri_device->attrs.max_qp_wr; |
| 225 | max_qp_wr -= RPCRDMA_BACKWARD_WRS; |
| 226 | max_qp_wr -= 1; |
| 227 | if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE) |
| 228 | return -ENOMEM; |
| 229 | if (cdata->max_requests > max_qp_wr) |
| 230 | cdata->max_requests = max_qp_wr; |
| 231 | ep->rep_attr.cap.max_send_wr = cdata->max_requests * depth; |
| 232 | if (ep->rep_attr.cap.max_send_wr > max_qp_wr) { |
| 233 | cdata->max_requests = max_qp_wr / depth; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 234 | if (!cdata->max_requests) |
| 235 | return -EINVAL; |
| 236 | ep->rep_attr.cap.max_send_wr = cdata->max_requests * |
| 237 | depth; |
| 238 | } |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 239 | ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS; |
| 240 | ep->rep_attr.cap.max_send_wr += 1; /* for ib_drain_sq */ |
| 241 | ep->rep_attr.cap.max_recv_wr = cdata->max_requests; |
| 242 | ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; |
| 243 | ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */ |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 244 | |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 245 | ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS / |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 246 | ia->ri_max_frwr_depth); |
Chuck Lever | c421ece | 2018-10-01 14:25:20 -0400 | [diff] [blame] | 247 | ia->ri_max_segs += 2; /* segments for head and tail buffers */ |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 248 | return 0; |
| 249 | } |
| 250 | |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 251 | /* FRWR mode conveys a list of pages per chunk segment. The |
| 252 | * maximum length of that list is the FRWR page list depth. |
| 253 | */ |
| 254 | static size_t |
| 255 | frwr_op_maxpages(struct rpcrdma_xprt *r_xprt) |
| 256 | { |
| 257 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
| 258 | |
| 259 | return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 260 | RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frwr_depth); |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 261 | } |
| 262 | |
Chuck Lever | e46ac34 | 2015-03-30 14:35:35 -0400 | [diff] [blame] | 263 | static void |
Chuck Lever | 62bdf94 | 2016-11-07 16:16:24 -0500 | [diff] [blame] | 264 | __frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr) |
Chuck Lever | e46ac34 | 2015-03-30 14:35:35 -0400 | [diff] [blame] | 265 | { |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 266 | if (wc->status != IB_WC_WR_FLUSH_ERR) |
| 267 | pr_err("rpcrdma: %s: %s (%u/0x%x)\n", |
| 268 | wr, ib_wc_status_msg(wc->status), |
| 269 | wc->status, wc->vendor_err); |
Chuck Lever | e46ac34 | 2015-03-30 14:35:35 -0400 | [diff] [blame] | 270 | } |
| 271 | |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 272 | /** |
Chuck Lever | 6afafa7 | 2017-06-08 11:53:24 -0400 | [diff] [blame] | 273 | * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 274 | * @cq: completion queue (ignored) |
| 275 | * @wc: completed WR |
| 276 | * |
| 277 | */ |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 278 | static void |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 279 | frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 280 | { |
Chuck Lever | 58f10ad | 2017-12-20 16:30:56 -0500 | [diff] [blame] | 281 | struct ib_cqe *cqe = wc->wr_cqe; |
| 282 | struct rpcrdma_frwr *frwr = |
| 283 | container_of(cqe, struct rpcrdma_frwr, fr_cqe); |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 284 | |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 285 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
| 286 | if (wc->status != IB_WC_SUCCESS) { |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 287 | frwr->fr_state = FRWR_FLUSHED_FR; |
Chuck Lever | 62bdf94 | 2016-11-07 16:16:24 -0500 | [diff] [blame] | 288 | __frwr_sendcompletion_flush(wc, "fastreg"); |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 289 | } |
Chuck Lever | 58f10ad | 2017-12-20 16:30:56 -0500 | [diff] [blame] | 290 | trace_xprtrdma_wc_fastreg(wc, frwr); |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 291 | } |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 292 | |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 293 | /** |
Chuck Lever | 6afafa7 | 2017-06-08 11:53:24 -0400 | [diff] [blame] | 294 | * frwr_wc_localinv - Invoked by RDMA provider for a flushed LocalInv WC |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 295 | * @cq: completion queue (ignored) |
| 296 | * @wc: completed WR |
| 297 | * |
| 298 | */ |
| 299 | static void |
| 300 | frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) |
| 301 | { |
Chuck Lever | 2937fed | 2017-12-20 16:31:12 -0500 | [diff] [blame] | 302 | struct ib_cqe *cqe = wc->wr_cqe; |
| 303 | struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr, |
| 304 | fr_cqe); |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 305 | |
| 306 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
| 307 | if (wc->status != IB_WC_SUCCESS) { |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 308 | frwr->fr_state = FRWR_FLUSHED_LI; |
Chuck Lever | 62bdf94 | 2016-11-07 16:16:24 -0500 | [diff] [blame] | 309 | __frwr_sendcompletion_flush(wc, "localinv"); |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 310 | } |
Chuck Lever | 2937fed | 2017-12-20 16:31:12 -0500 | [diff] [blame] | 311 | trace_xprtrdma_wc_li(wc, frwr); |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 312 | } |
| 313 | |
| 314 | /** |
Chuck Lever | 6afafa7 | 2017-06-08 11:53:24 -0400 | [diff] [blame] | 315 | * frwr_wc_localinv_wake - Invoked by RDMA provider for a signaled LocalInv WC |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 316 | * @cq: completion queue (ignored) |
| 317 | * @wc: completed WR |
| 318 | * |
| 319 | * Awaken anyone waiting for an MR to finish being fenced. |
| 320 | */ |
| 321 | static void |
| 322 | frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) |
| 323 | { |
Chuck Lever | 2937fed | 2017-12-20 16:31:12 -0500 | [diff] [blame] | 324 | struct ib_cqe *cqe = wc->wr_cqe; |
| 325 | struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr, |
| 326 | fr_cqe); |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 327 | |
| 328 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
Chuck Lever | 62bdf94 | 2016-11-07 16:16:24 -0500 | [diff] [blame] | 329 | if (wc->status != IB_WC_SUCCESS) { |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 330 | frwr->fr_state = FRWR_FLUSHED_LI; |
Chuck Lever | 62bdf94 | 2016-11-07 16:16:24 -0500 | [diff] [blame] | 331 | __frwr_sendcompletion_flush(wc, "localinv"); |
| 332 | } |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 333 | complete(&frwr->fr_linv_done); |
Chuck Lever | 2937fed | 2017-12-20 16:31:12 -0500 | [diff] [blame] | 334 | trace_xprtrdma_wc_li_wake(wc, frwr); |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 335 | } |
| 336 | |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 337 | /* Post a REG_MR Work Request to register a memory region |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 338 | * for remote access via RDMA READ or RDMA WRITE. |
| 339 | */ |
Chuck Lever | 6748b0ca | 2017-08-14 15:38:30 -0400 | [diff] [blame] | 340 | static struct rpcrdma_mr_seg * |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 341 | frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 342 | int nsegs, bool writing, struct rpcrdma_mr **out) |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 343 | { |
| 344 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 345 | bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS; |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 346 | struct rpcrdma_frwr *frwr; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 347 | struct rpcrdma_mr *mr; |
| 348 | struct ib_mr *ibmr; |
Chuck Lever | 3cf4e16 | 2015-12-16 17:22:31 -0500 | [diff] [blame] | 349 | struct ib_reg_wr *reg_wr; |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 350 | int i, n; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 351 | u8 key; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 352 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 353 | mr = NULL; |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 354 | do { |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 355 | if (mr) |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 356 | rpcrdma_mr_recycle(mr); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 357 | mr = rpcrdma_mr_get(r_xprt); |
| 358 | if (!mr) |
Chuck Lever | 9e679d5 | 2018-02-28 15:30:44 -0500 | [diff] [blame] | 359 | return ERR_PTR(-EAGAIN); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 360 | } while (mr->frwr.fr_state != FRWR_IS_INVALID); |
| 361 | frwr = &mr->frwr; |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 362 | frwr->fr_state = FRWR_IS_VALID; |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 363 | |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 364 | if (nsegs > ia->ri_max_frwr_depth) |
| 365 | nsegs = ia->ri_max_frwr_depth; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 366 | for (i = 0; i < nsegs;) { |
| 367 | if (seg->mr_page) |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 368 | sg_set_page(&mr->mr_sg[i], |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 369 | seg->mr_page, |
| 370 | seg->mr_len, |
| 371 | offset_in_page(seg->mr_offset)); |
| 372 | else |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 373 | sg_set_buf(&mr->mr_sg[i], seg->mr_offset, |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 374 | seg->mr_len); |
| 375 | |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 376 | ++seg; |
| 377 | ++i; |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 378 | if (holes_ok) |
| 379 | continue; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 380 | if ((i < nsegs && offset_in_page(seg->mr_offset)) || |
| 381 | offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) |
| 382 | break; |
| 383 | } |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 384 | mr->mr_dir = rpcrdma_data_dir(writing); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 385 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 386 | mr->mr_nents = ib_dma_map_sg(ia->ri_device, mr->mr_sg, i, mr->mr_dir); |
| 387 | if (!mr->mr_nents) |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 388 | goto out_dmamap_err; |
Chuck Lever | d379eaa | 2018-10-01 14:25:30 -0400 | [diff] [blame] | 389 | trace_xprtrdma_mr_map(mr); |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 390 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 391 | ibmr = frwr->fr_mr; |
| 392 | n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE); |
| 393 | if (unlikely(n != mr->mr_nents)) |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 394 | goto out_mapmr_err; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 395 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 396 | key = (u8)(ibmr->rkey & 0x000000FF); |
| 397 | ib_update_fast_reg_key(ibmr, ++key); |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 398 | |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 399 | reg_wr = &frwr->fr_regwr; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 400 | reg_wr->mr = ibmr; |
| 401 | reg_wr->key = ibmr->rkey; |
Chuck Lever | 3cf4e16 | 2015-12-16 17:22:31 -0500 | [diff] [blame] | 402 | reg_wr->access = writing ? |
| 403 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : |
| 404 | IB_ACCESS_REMOTE_READ; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 405 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 406 | mr->mr_handle = ibmr->rkey; |
| 407 | mr->mr_length = ibmr->length; |
| 408 | mr->mr_offset = ibmr->iova; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 409 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 410 | *out = mr; |
Chuck Lever | 6748b0ca | 2017-08-14 15:38:30 -0400 | [diff] [blame] | 411 | return seg; |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 412 | |
| 413 | out_dmamap_err: |
Chuck Lever | 1f54189 | 2017-06-08 11:52:36 -0400 | [diff] [blame] | 414 | pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n", |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 415 | mr->mr_sg, i); |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 416 | frwr->fr_state = FRWR_IS_INVALID; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 417 | rpcrdma_mr_put(mr); |
Chuck Lever | 6748b0ca | 2017-08-14 15:38:30 -0400 | [diff] [blame] | 418 | return ERR_PTR(-EIO); |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 419 | |
| 420 | out_mapmr_err: |
Chuck Lever | 1f54189 | 2017-06-08 11:52:36 -0400 | [diff] [blame] | 421 | pr_err("rpcrdma: failed to map mr %p (%d/%d)\n", |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 422 | frwr->fr_mr, n, mr->mr_nents); |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 423 | rpcrdma_mr_recycle(mr); |
Chuck Lever | 6748b0ca | 2017-08-14 15:38:30 -0400 | [diff] [blame] | 424 | return ERR_PTR(-EIO); |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 425 | } |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 426 | |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 427 | /* Post Send WR containing the RPC Call message. |
| 428 | * |
| 429 | * For FRMR, chain any FastReg WRs to the Send WR. Only a |
| 430 | * single ib_post_send call is needed to register memory |
| 431 | * and then post the Send WR. |
| 432 | */ |
| 433 | static int |
| 434 | frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req) |
| 435 | { |
Bart Van Assche | ed288d7 | 2018-07-18 09:25:31 -0700 | [diff] [blame] | 436 | struct ib_send_wr *post_wr; |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 437 | struct rpcrdma_mr *mr; |
| 438 | |
| 439 | post_wr = &req->rl_sendctx->sc_wr; |
| 440 | list_for_each_entry(mr, &req->rl_registered, mr_list) { |
| 441 | struct rpcrdma_frwr *frwr; |
| 442 | |
| 443 | frwr = &mr->frwr; |
| 444 | |
| 445 | frwr->fr_cqe.done = frwr_wc_fastreg; |
| 446 | frwr->fr_regwr.wr.next = post_wr; |
| 447 | frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe; |
| 448 | frwr->fr_regwr.wr.num_sge = 0; |
| 449 | frwr->fr_regwr.wr.opcode = IB_WR_REG_MR; |
| 450 | frwr->fr_regwr.wr.send_flags = 0; |
| 451 | |
| 452 | post_wr = &frwr->fr_regwr.wr; |
| 453 | } |
| 454 | |
| 455 | /* If ib_post_send fails, the next ->send_request for |
| 456 | * @req will queue these MWs for recovery. |
| 457 | */ |
Bart Van Assche | ed288d7 | 2018-07-18 09:25:31 -0700 | [diff] [blame] | 458 | return ib_post_send(ia->ri_id->qp, post_wr, NULL); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 459 | } |
| 460 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 461 | /* Handle a remotely invalidated mr on the @mrs list |
Chuck Lever | c344161 | 2017-12-14 20:56:26 -0500 | [diff] [blame] | 462 | */ |
| 463 | static void |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 464 | frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs) |
Chuck Lever | c344161 | 2017-12-14 20:56:26 -0500 | [diff] [blame] | 465 | { |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 466 | struct rpcrdma_mr *mr; |
Chuck Lever | c344161 | 2017-12-14 20:56:26 -0500 | [diff] [blame] | 467 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 468 | list_for_each_entry(mr, mrs, mr_list) |
| 469 | if (mr->mr_handle == rep->rr_inv_rkey) { |
Chuck Lever | 054f155 | 2018-05-01 11:37:14 -0400 | [diff] [blame] | 470 | list_del_init(&mr->mr_list); |
Chuck Lever | d379eaa | 2018-10-01 14:25:30 -0400 | [diff] [blame] | 471 | trace_xprtrdma_mr_remoteinv(mr); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 472 | mr->frwr.fr_state = FRWR_IS_INVALID; |
Chuck Lever | ec12e47 | 2017-12-14 20:58:04 -0500 | [diff] [blame] | 473 | rpcrdma_mr_unmap_and_put(mr); |
Chuck Lever | c344161 | 2017-12-14 20:56:26 -0500 | [diff] [blame] | 474 | break; /* only one invalidated MR per RPC */ |
| 475 | } |
| 476 | } |
| 477 | |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 478 | /* Invalidate all memory regions that were registered for "req". |
| 479 | * |
| 480 | * Sleeps until it is safe for the host CPU to access the |
| 481 | * previously mapped memory regions. |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 482 | * |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 483 | * Caller ensures that @mrs is not empty before the call. This |
Chuck Lever | 451d26e | 2017-06-08 11:52:04 -0400 | [diff] [blame] | 484 | * function empties the list. |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 485 | */ |
| 486 | static void |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 487 | frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs) |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 488 | { |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 489 | struct ib_send_wr *first, **prev, *last; |
| 490 | const struct ib_send_wr *bad_wr; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 491 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 492 | struct rpcrdma_frwr *frwr; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 493 | struct rpcrdma_mr *mr; |
Chuck Lever | 8d38de6 | 2016-11-29 10:52:16 -0500 | [diff] [blame] | 494 | int count, rc; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 495 | |
Chuck Lever | 451d26e | 2017-06-08 11:52:04 -0400 | [diff] [blame] | 496 | /* ORDER: Invalidate all of the MRs first |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 497 | * |
| 498 | * Chain the LOCAL_INV Work Requests and post them with |
| 499 | * a single ib_post_send() call. |
| 500 | */ |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 501 | frwr = NULL; |
Chuck Lever | 8d38de6 | 2016-11-29 10:52:16 -0500 | [diff] [blame] | 502 | count = 0; |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame] | 503 | prev = &first; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 504 | list_for_each_entry(mr, mrs, mr_list) { |
| 505 | mr->frwr.fr_state = FRWR_IS_INVALID; |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 506 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 507 | frwr = &mr->frwr; |
Chuck Lever | d379eaa | 2018-10-01 14:25:30 -0400 | [diff] [blame] | 508 | trace_xprtrdma_mr_localinv(mr); |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame] | 509 | |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 510 | frwr->fr_cqe.done = frwr_wc_localinv; |
| 511 | last = &frwr->fr_invwr; |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame] | 512 | memset(last, 0, sizeof(*last)); |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 513 | last->wr_cqe = &frwr->fr_cqe; |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame] | 514 | last->opcode = IB_WR_LOCAL_INV; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 515 | last->ex.invalidate_rkey = mr->mr_handle; |
Chuck Lever | 8d38de6 | 2016-11-29 10:52:16 -0500 | [diff] [blame] | 516 | count++; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 517 | |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame] | 518 | *prev = last; |
| 519 | prev = &last->next; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 520 | } |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 521 | if (!frwr) |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 522 | goto unmap; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 523 | |
| 524 | /* Strong send queue ordering guarantees that when the |
| 525 | * last WR in the chain completes, all WRs in the chain |
| 526 | * are complete. |
| 527 | */ |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame] | 528 | last->send_flags = IB_SEND_SIGNALED; |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 529 | frwr->fr_cqe.done = frwr_wc_localinv_wake; |
| 530 | reinit_completion(&frwr->fr_linv_done); |
Chuck Lever | 8d38de6 | 2016-11-29 10:52:16 -0500 | [diff] [blame] | 531 | |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 532 | /* Transport disconnect drains the receive CQ before it |
| 533 | * replaces the QP. The RPC reply handler won't call us |
| 534 | * unless ri_id->qp is a valid pointer. |
| 535 | */ |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 536 | r_xprt->rx_stats.local_inv_needed++; |
Chuck Lever | 8d75483 | 2017-06-08 11:52:28 -0400 | [diff] [blame] | 537 | bad_wr = NULL; |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame] | 538 | rc = ib_post_send(ia->ri_id->qp, first, &bad_wr); |
Chuck Lever | 8d75483 | 2017-06-08 11:52:28 -0400 | [diff] [blame] | 539 | if (bad_wr != first) |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 540 | wait_for_completion(&frwr->fr_linv_done); |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 541 | if (rc) |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 542 | goto out_release; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 543 | |
Chuck Lever | 451d26e | 2017-06-08 11:52:04 -0400 | [diff] [blame] | 544 | /* ORDER: Now DMA unmap all of the MRs, and return |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 545 | * them to the free MR list. |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 546 | */ |
Chuck Lever | b892a69 | 2016-03-04 11:28:01 -0500 | [diff] [blame] | 547 | unmap: |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 548 | while (!list_empty(mrs)) { |
| 549 | mr = rpcrdma_mr_pop(mrs); |
Chuck Lever | ec12e47 | 2017-12-14 20:58:04 -0500 | [diff] [blame] | 550 | rpcrdma_mr_unmap_and_put(mr); |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 551 | } |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 552 | return; |
| 553 | |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 554 | out_release: |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 555 | pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc); |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 556 | |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 557 | /* Unmap and release the MRs in the LOCAL_INV WRs that did not |
Chuck Lever | 8d75483 | 2017-06-08 11:52:28 -0400 | [diff] [blame] | 558 | * get posted. |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 559 | */ |
Chuck Lever | 8d75483 | 2017-06-08 11:52:28 -0400 | [diff] [blame] | 560 | while (bad_wr) { |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 561 | frwr = container_of(bad_wr, struct rpcrdma_frwr, |
| 562 | fr_invwr); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 563 | mr = container_of(frwr, struct rpcrdma_mr, frwr); |
Chuck Lever | 8d75483 | 2017-06-08 11:52:28 -0400 | [diff] [blame] | 564 | bad_wr = bad_wr->next; |
Chuck Lever | 61da886 | 2018-10-01 14:25:25 -0400 | [diff] [blame] | 565 | |
| 566 | list_del(&mr->mr_list); |
| 567 | frwr_op_release_mr(mr); |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 568 | } |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 569 | } |
| 570 | |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 571 | const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = { |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 572 | .ro_map = frwr_op_map, |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 573 | .ro_send = frwr_op_send, |
Chuck Lever | c344161 | 2017-12-14 20:56:26 -0500 | [diff] [blame] | 574 | .ro_reminv = frwr_op_reminv, |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 575 | .ro_unmap_sync = frwr_op_unmap_sync, |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 576 | .ro_open = frwr_op_open, |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 577 | .ro_maxpages = frwr_op_maxpages, |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 578 | .ro_init_mr = frwr_op_init_mr, |
| 579 | .ro_release_mr = frwr_op_release_mr, |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 580 | .ro_displayname = "frwr", |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 581 | .ro_send_w_inv_ok = RPCRDMA_CMP_F_SND_W_INV_OK, |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 582 | }; |