\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 1 | /* |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 2 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the BSD-type |
| 8 | * license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or without |
| 11 | * modification, are permitted provided that the following conditions |
| 12 | * are met: |
| 13 | * |
| 14 | * Redistributions of source code must retain the above copyright |
| 15 | * notice, this list of conditions and the following disclaimer. |
| 16 | * |
| 17 | * Redistributions in binary form must reproduce the above |
| 18 | * copyright notice, this list of conditions and the following |
| 19 | * disclaimer in the documentation and/or other materials provided |
| 20 | * with the distribution. |
| 21 | * |
| 22 | * Neither the name of the Network Appliance, Inc. nor the names of |
| 23 | * its contributors may be used to endorse or promote products |
| 24 | * derived from this software without specific prior written |
| 25 | * permission. |
| 26 | * |
| 27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 31 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 32 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 33 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 34 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 35 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 37 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 38 | */ |
| 39 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 40 | /* |
| 41 | * verbs.c |
| 42 | * |
| 43 | * Encapsulates the major functions managing: |
| 44 | * o adapters |
| 45 | * o endpoints |
| 46 | * o connections |
| 47 | * o buffer memory |
| 48 | */ |
| 49 | |
Alexey Dobriyan | a6b7a40 | 2011-06-06 10:43:46 +0000 | [diff] [blame] | 50 | #include <linux/interrupt.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 51 | #include <linux/slab.h> |
Chuck Lever | eba8ff6 | 2015-01-21 11:03:02 -0500 | [diff] [blame] | 52 | #include <linux/prefetch.h> |
Chuck Lever | 0dd39ca | 2015-03-30 14:33:43 -0400 | [diff] [blame] | 53 | #include <linux/sunrpc/addr.h> |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame^] | 54 | #include <linux/sunrpc/svc_rdma.h> |
Chuck Lever | 65866f8 | 2014-05-28 10:33:59 -0400 | [diff] [blame] | 55 | #include <asm/bitops.h> |
Devesh Sharma | d0f36c4 | 2015-08-03 13:05:04 -0400 | [diff] [blame] | 56 | #include <linux/module.h> /* try_module_get()/module_put() */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 57 | |
\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 58 | #include "xprt_rdma.h" |
| 59 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 60 | /* |
| 61 | * Globals/Macros |
| 62 | */ |
| 63 | |
Jeff Layton | f895b25 | 2014-11-17 16:58:04 -0500 | [diff] [blame] | 64 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 65 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 66 | #endif |
| 67 | |
| 68 | /* |
| 69 | * internal functions |
| 70 | */ |
| 71 | |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 72 | static struct workqueue_struct *rpcrdma_receive_wq; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 73 | |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 74 | int |
| 75 | rpcrdma_alloc_wq(void) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 76 | { |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 77 | struct workqueue_struct *recv_wq; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 78 | |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 79 | recv_wq = alloc_workqueue("xprtrdma_receive", |
| 80 | WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_HIGHPRI, |
| 81 | 0); |
| 82 | if (!recv_wq) |
| 83 | return -ENOMEM; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 84 | |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 85 | rpcrdma_receive_wq = recv_wq; |
| 86 | return 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 87 | } |
| 88 | |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 89 | void |
| 90 | rpcrdma_destroy_wq(void) |
Chuck Lever | f1a03b7 | 2014-11-08 20:14:37 -0500 | [diff] [blame] | 91 | { |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 92 | struct workqueue_struct *wq; |
Chuck Lever | f1a03b7 | 2014-11-08 20:14:37 -0500 | [diff] [blame] | 93 | |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 94 | if (rpcrdma_receive_wq) { |
| 95 | wq = rpcrdma_receive_wq; |
| 96 | rpcrdma_receive_wq = NULL; |
| 97 | destroy_workqueue(wq); |
| 98 | } |
Chuck Lever | f1a03b7 | 2014-11-08 20:14:37 -0500 | [diff] [blame] | 99 | } |
| 100 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 101 | static void |
| 102 | rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context) |
| 103 | { |
| 104 | struct rpcrdma_ep *ep = context; |
| 105 | |
Chuck Lever | 7ff11de | 2014-11-08 20:15:01 -0500 | [diff] [blame] | 106 | pr_err("RPC: %s: %s on device %s ep %p\n", |
Sagi Grimberg | 76357c7 | 2015-05-18 13:40:32 +0300 | [diff] [blame] | 107 | __func__, ib_event_msg(event->event), |
Chuck Lever | 7ff11de | 2014-11-08 20:15:01 -0500 | [diff] [blame] | 108 | event->device->name, context); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 109 | if (ep->rep_connected == 1) { |
| 110 | ep->rep_connected = -EIO; |
Chuck Lever | afadc46 | 2015-01-21 11:03:11 -0500 | [diff] [blame] | 111 | rpcrdma_conn_func(ep); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 112 | wake_up_all(&ep->rep_connect_wait); |
| 113 | } |
| 114 | } |
| 115 | |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 116 | /** |
| 117 | * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC |
| 118 | * @cq: completion queue (ignored) |
| 119 | * @wc: completed WR |
| 120 | * |
Chuck Lever | 4220a07 | 2015-10-24 17:26:45 -0400 | [diff] [blame] | 121 | */ |
| 122 | static void |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 123 | rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 124 | { |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 125 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
| 126 | if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) |
| 127 | pr_err("rpcrdma: Send: %s (%u/0x%x)\n", |
| 128 | ib_wc_status_msg(wc->status), |
| 129 | wc->status, wc->vendor_err); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | static void |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 133 | rpcrdma_receive_worker(struct work_struct *work) |
| 134 | { |
| 135 | struct rpcrdma_rep *rep = |
| 136 | container_of(work, struct rpcrdma_rep, rr_work); |
| 137 | |
| 138 | rpcrdma_reply_handler(rep); |
| 139 | } |
| 140 | |
Chuck Lever | 23826c7 | 2016-03-04 11:28:27 -0500 | [diff] [blame] | 141 | /* Perform basic sanity checking to avoid using garbage |
| 142 | * to update the credit grant value. |
| 143 | */ |
| 144 | static void |
| 145 | rpcrdma_update_granted_credits(struct rpcrdma_rep *rep) |
| 146 | { |
| 147 | struct rpcrdma_msg *rmsgp = rdmab_to_msg(rep->rr_rdmabuf); |
| 148 | struct rpcrdma_buffer *buffer = &rep->rr_rxprt->rx_buf; |
| 149 | u32 credits; |
| 150 | |
| 151 | if (rep->rr_len < RPCRDMA_HDRLEN_ERR) |
| 152 | return; |
| 153 | |
| 154 | credits = be32_to_cpu(rmsgp->rm_credit); |
| 155 | if (credits == 0) |
| 156 | credits = 1; /* don't deadlock */ |
| 157 | else if (credits > buffer->rb_max_requests) |
| 158 | credits = buffer->rb_max_requests; |
| 159 | |
| 160 | atomic_set(&buffer->rb_credits, credits); |
| 161 | } |
| 162 | |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 163 | /** |
| 164 | * rpcrdma_receive_wc - Invoked by RDMA provider for each polled Receive WC |
| 165 | * @cq: completion queue (ignored) |
| 166 | * @wc: completed WR |
| 167 | * |
| 168 | */ |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 169 | static void |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 170 | rpcrdma_receive_wc(struct ib_cq *cq, struct ib_wc *wc) |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 171 | { |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 172 | struct ib_cqe *cqe = wc->wr_cqe; |
| 173 | struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep, |
| 174 | rr_cqe); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 175 | |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 176 | /* WARNING: Only wr_id and status are reliable at this point */ |
| 177 | if (wc->status != IB_WC_SUCCESS) |
| 178 | goto out_fail; |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 179 | |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 180 | /* status == SUCCESS means all fields in wc are trustworthy */ |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 181 | if (wc->opcode != IB_WC_RECV) |
| 182 | return; |
| 183 | |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 184 | dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n", |
| 185 | __func__, rep, wc->byte_len); |
| 186 | |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 187 | rep->rr_len = wc->byte_len; |
Chuck Lever | 89e0d112 | 2015-05-26 11:51:56 -0400 | [diff] [blame] | 188 | ib_dma_sync_single_for_cpu(rep->rr_device, |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 189 | rdmab_addr(rep->rr_rdmabuf), |
| 190 | rep->rr_len, DMA_FROM_DEVICE); |
Chuck Lever | 23826c7 | 2016-03-04 11:28:27 -0500 | [diff] [blame] | 191 | |
| 192 | rpcrdma_update_granted_credits(rep); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 193 | |
| 194 | out_schedule: |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 195 | queue_work(rpcrdma_receive_wq, &rep->rr_work); |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 196 | return; |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 197 | |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 198 | out_fail: |
| 199 | if (wc->status != IB_WC_WR_FLUSH_ERR) |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 200 | pr_err("rpcrdma: Recv: %s (%u/0x%x)\n", |
| 201 | ib_wc_status_msg(wc->status), |
| 202 | wc->status, wc->vendor_err); |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 203 | rep->rr_len = RPCRDMA_BAD_LEN; |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 204 | goto out_schedule; |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 205 | } |
| 206 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 207 | static int |
| 208 | rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event) |
| 209 | { |
| 210 | struct rpcrdma_xprt *xprt = id->context; |
| 211 | struct rpcrdma_ia *ia = &xprt->rx_ia; |
| 212 | struct rpcrdma_ep *ep = &xprt->rx_ep; |
Jeff Layton | f895b25 | 2014-11-17 16:58:04 -0500 | [diff] [blame] | 213 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
Chuck Lever | 0dd39ca | 2015-03-30 14:33:43 -0400 | [diff] [blame] | 214 | struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr; |
Ingo Molnar | ff0db04 | 2008-11-25 16:58:42 -0800 | [diff] [blame] | 215 | #endif |
Chuck Lever | ce1ab9a | 2015-01-21 11:03:35 -0500 | [diff] [blame] | 216 | struct ib_qp_attr *attr = &ia->ri_qp_attr; |
| 217 | struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 218 | int connstate = 0; |
| 219 | |
| 220 | switch (event->event) { |
| 221 | case RDMA_CM_EVENT_ADDR_RESOLVED: |
| 222 | case RDMA_CM_EVENT_ROUTE_RESOLVED: |
Tom Talpey | 5675add | 2008-10-09 15:01:41 -0400 | [diff] [blame] | 223 | ia->ri_async_rc = 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 224 | complete(&ia->ri_done); |
| 225 | break; |
| 226 | case RDMA_CM_EVENT_ADDR_ERROR: |
| 227 | ia->ri_async_rc = -EHOSTUNREACH; |
| 228 | dprintk("RPC: %s: CM address resolution error, ep 0x%p\n", |
| 229 | __func__, ep); |
| 230 | complete(&ia->ri_done); |
| 231 | break; |
| 232 | case RDMA_CM_EVENT_ROUTE_ERROR: |
| 233 | ia->ri_async_rc = -ENETUNREACH; |
| 234 | dprintk("RPC: %s: CM route resolution error, ep 0x%p\n", |
| 235 | __func__, ep); |
| 236 | complete(&ia->ri_done); |
| 237 | break; |
| 238 | case RDMA_CM_EVENT_ESTABLISHED: |
| 239 | connstate = 1; |
Chuck Lever | ce1ab9a | 2015-01-21 11:03:35 -0500 | [diff] [blame] | 240 | ib_query_qp(ia->ri_id->qp, attr, |
| 241 | IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC, |
| 242 | iattr); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 243 | dprintk("RPC: %s: %d responder resources" |
| 244 | " (%d initiator)\n", |
Chuck Lever | ce1ab9a | 2015-01-21 11:03:35 -0500 | [diff] [blame] | 245 | __func__, attr->max_dest_rd_atomic, |
| 246 | attr->max_rd_atomic); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 247 | goto connected; |
| 248 | case RDMA_CM_EVENT_CONNECT_ERROR: |
| 249 | connstate = -ENOTCONN; |
| 250 | goto connected; |
| 251 | case RDMA_CM_EVENT_UNREACHABLE: |
| 252 | connstate = -ENETDOWN; |
| 253 | goto connected; |
| 254 | case RDMA_CM_EVENT_REJECTED: |
| 255 | connstate = -ECONNREFUSED; |
| 256 | goto connected; |
| 257 | case RDMA_CM_EVENT_DISCONNECTED: |
| 258 | connstate = -ECONNABORTED; |
| 259 | goto connected; |
| 260 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
| 261 | connstate = -ENODEV; |
| 262 | connected: |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 263 | dprintk("RPC: %s: %sconnected\n", |
| 264 | __func__, connstate > 0 ? "" : "dis"); |
Chuck Lever | 23826c7 | 2016-03-04 11:28:27 -0500 | [diff] [blame] | 265 | atomic_set(&xprt->rx_buf.rb_credits, 1); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 266 | ep->rep_connected = connstate; |
Chuck Lever | afadc46 | 2015-01-21 11:03:11 -0500 | [diff] [blame] | 267 | rpcrdma_conn_func(ep); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 268 | wake_up_all(&ep->rep_connect_wait); |
Chuck Lever | 8079fb7 | 2014-07-29 17:26:12 -0400 | [diff] [blame] | 269 | /*FALLTHROUGH*/ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 270 | default: |
Chuck Lever | 0dd39ca | 2015-03-30 14:33:43 -0400 | [diff] [blame] | 271 | dprintk("RPC: %s: %pIS:%u (ep 0x%p): %s\n", |
| 272 | __func__, sap, rpc_get_port(sap), ep, |
Sagi Grimberg | 76357c7 | 2015-05-18 13:40:32 +0300 | [diff] [blame] | 273 | rdma_event_msg(event->event)); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 274 | break; |
| 275 | } |
| 276 | |
Jeff Layton | f895b25 | 2014-11-17 16:58:04 -0500 | [diff] [blame] | 277 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
Tom Talpey | b3cd8d4 | 2008-10-09 15:02:02 -0400 | [diff] [blame] | 278 | if (connstate == 1) { |
Chuck Lever | ce1ab9a | 2015-01-21 11:03:35 -0500 | [diff] [blame] | 279 | int ird = attr->max_dest_rd_atomic; |
Tom Talpey | b3cd8d4 | 2008-10-09 15:02:02 -0400 | [diff] [blame] | 280 | int tird = ep->rep_remote_cma.responder_resources; |
Chuck Lever | 0dd39ca | 2015-03-30 14:33:43 -0400 | [diff] [blame] | 281 | |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 282 | pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n", |
Chuck Lever | 0dd39ca | 2015-03-30 14:33:43 -0400 | [diff] [blame] | 283 | sap, rpc_get_port(sap), |
Chuck Lever | 89e0d112 | 2015-05-26 11:51:56 -0400 | [diff] [blame] | 284 | ia->ri_device->name, |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 285 | ia->ri_ops->ro_displayname, |
Tom Talpey | b3cd8d4 | 2008-10-09 15:02:02 -0400 | [diff] [blame] | 286 | xprt->rx_buf.rb_max_requests, |
| 287 | ird, ird < 4 && ird < tird / 2 ? " (low!)" : ""); |
| 288 | } else if (connstate < 0) { |
Chuck Lever | 0dd39ca | 2015-03-30 14:33:43 -0400 | [diff] [blame] | 289 | pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n", |
| 290 | sap, rpc_get_port(sap), connstate); |
Tom Talpey | b3cd8d4 | 2008-10-09 15:02:02 -0400 | [diff] [blame] | 291 | } |
| 292 | #endif |
| 293 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 294 | return 0; |
| 295 | } |
| 296 | |
Devesh Sharma | d0f36c4 | 2015-08-03 13:05:04 -0400 | [diff] [blame] | 297 | static void rpcrdma_destroy_id(struct rdma_cm_id *id) |
| 298 | { |
| 299 | if (id) { |
| 300 | module_put(id->device->owner); |
| 301 | rdma_destroy_id(id); |
| 302 | } |
| 303 | } |
| 304 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 305 | static struct rdma_cm_id * |
| 306 | rpcrdma_create_id(struct rpcrdma_xprt *xprt, |
| 307 | struct rpcrdma_ia *ia, struct sockaddr *addr) |
| 308 | { |
| 309 | struct rdma_cm_id *id; |
| 310 | int rc; |
| 311 | |
Tom Talpey | 1a95405 | 2008-10-09 15:01:31 -0400 | [diff] [blame] | 312 | init_completion(&ia->ri_done); |
| 313 | |
Guy Shapiro | fa20105 | 2015-10-22 15:20:10 +0300 | [diff] [blame] | 314 | id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, |
| 315 | IB_QPT_RC); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 316 | if (IS_ERR(id)) { |
| 317 | rc = PTR_ERR(id); |
| 318 | dprintk("RPC: %s: rdma_create_id() failed %i\n", |
| 319 | __func__, rc); |
| 320 | return id; |
| 321 | } |
| 322 | |
Tom Talpey | 5675add | 2008-10-09 15:01:41 -0400 | [diff] [blame] | 323 | ia->ri_async_rc = -ETIMEDOUT; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 324 | rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT); |
| 325 | if (rc) { |
| 326 | dprintk("RPC: %s: rdma_resolve_addr() failed %i\n", |
| 327 | __func__, rc); |
| 328 | goto out; |
| 329 | } |
Tom Talpey | 5675add | 2008-10-09 15:01:41 -0400 | [diff] [blame] | 330 | wait_for_completion_interruptible_timeout(&ia->ri_done, |
| 331 | msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1); |
Devesh Sharma | d0f36c4 | 2015-08-03 13:05:04 -0400 | [diff] [blame] | 332 | |
| 333 | /* FIXME: |
| 334 | * Until xprtrdma supports DEVICE_REMOVAL, the provider must |
| 335 | * be pinned while there are active NFS/RDMA mounts to prevent |
| 336 | * hangs and crashes at umount time. |
| 337 | */ |
| 338 | if (!ia->ri_async_rc && !try_module_get(id->device->owner)) { |
| 339 | dprintk("RPC: %s: Failed to get device module\n", |
| 340 | __func__); |
| 341 | ia->ri_async_rc = -ENODEV; |
| 342 | } |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 343 | rc = ia->ri_async_rc; |
| 344 | if (rc) |
| 345 | goto out; |
| 346 | |
Tom Talpey | 5675add | 2008-10-09 15:01:41 -0400 | [diff] [blame] | 347 | ia->ri_async_rc = -ETIMEDOUT; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 348 | rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT); |
| 349 | if (rc) { |
| 350 | dprintk("RPC: %s: rdma_resolve_route() failed %i\n", |
| 351 | __func__, rc); |
Devesh Sharma | d0f36c4 | 2015-08-03 13:05:04 -0400 | [diff] [blame] | 352 | goto put; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 353 | } |
Tom Talpey | 5675add | 2008-10-09 15:01:41 -0400 | [diff] [blame] | 354 | wait_for_completion_interruptible_timeout(&ia->ri_done, |
| 355 | msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 356 | rc = ia->ri_async_rc; |
| 357 | if (rc) |
Devesh Sharma | d0f36c4 | 2015-08-03 13:05:04 -0400 | [diff] [blame] | 358 | goto put; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 359 | |
| 360 | return id; |
Devesh Sharma | d0f36c4 | 2015-08-03 13:05:04 -0400 | [diff] [blame] | 361 | put: |
| 362 | module_put(id->device->owner); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 363 | out: |
| 364 | rdma_destroy_id(id); |
| 365 | return ERR_PTR(rc); |
| 366 | } |
| 367 | |
| 368 | /* |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 369 | * Exported functions. |
| 370 | */ |
| 371 | |
| 372 | /* |
| 373 | * Open and initialize an Interface Adapter. |
| 374 | * o initializes fields of struct rpcrdma_ia, including |
| 375 | * interface and provider attributes and protection zone. |
| 376 | */ |
| 377 | int |
| 378 | rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) |
| 379 | { |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 380 | struct rpcrdma_ia *ia = &xprt->rx_ia; |
Chuck Lever | d1ed857 | 2015-08-03 13:03:30 -0400 | [diff] [blame] | 381 | int rc; |
| 382 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 383 | ia->ri_id = rpcrdma_create_id(xprt, ia, addr); |
| 384 | if (IS_ERR(ia->ri_id)) { |
| 385 | rc = PTR_ERR(ia->ri_id); |
| 386 | goto out1; |
| 387 | } |
Chuck Lever | 89e0d112 | 2015-05-26 11:51:56 -0400 | [diff] [blame] | 388 | ia->ri_device = ia->ri_id->device; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 389 | |
Chuck Lever | 89e0d112 | 2015-05-26 11:51:56 -0400 | [diff] [blame] | 390 | ia->ri_pd = ib_alloc_pd(ia->ri_device); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 391 | if (IS_ERR(ia->ri_pd)) { |
| 392 | rc = PTR_ERR(ia->ri_pd); |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 393 | pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 394 | goto out2; |
| 395 | } |
| 396 | |
Tom Talpey | bd7ed1d | 2008-10-09 15:00:09 -0400 | [diff] [blame] | 397 | switch (memreg) { |
Tom Talpey | 3197d309 | 2008-10-09 15:00:20 -0400 | [diff] [blame] | 398 | case RPCRDMA_FRMR: |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 399 | if (frwr_is_supported(ia)) { |
| 400 | ia->ri_ops = &rpcrdma_frwr_memreg_ops; |
| 401 | break; |
| 402 | } |
| 403 | /*FALLTHROUGH*/ |
Tom Talpey | bd7ed1d | 2008-10-09 15:00:09 -0400 | [diff] [blame] | 404 | case RPCRDMA_MTHCAFMR: |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 405 | if (fmr_is_supported(ia)) { |
| 406 | ia->ri_ops = &rpcrdma_fmr_memreg_ops; |
| 407 | break; |
| 408 | } |
| 409 | /*FALLTHROUGH*/ |
Tom Talpey | bd7ed1d | 2008-10-09 15:00:09 -0400 | [diff] [blame] | 410 | default: |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 411 | pr_err("rpcrdma: Unsupported memory registration mode: %d\n", |
| 412 | memreg); |
| 413 | rc = -EINVAL; |
Chuck Lever | 5ae711a | 2015-01-21 11:03:19 -0500 | [diff] [blame] | 414 | goto out3; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 415 | } |
| 416 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 417 | return 0; |
Chuck Lever | 5ae711a | 2015-01-21 11:03:19 -0500 | [diff] [blame] | 418 | |
| 419 | out3: |
| 420 | ib_dealloc_pd(ia->ri_pd); |
| 421 | ia->ri_pd = NULL; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 422 | out2: |
Devesh Sharma | d0f36c4 | 2015-08-03 13:05:04 -0400 | [diff] [blame] | 423 | rpcrdma_destroy_id(ia->ri_id); |
Tom Talpey | fee08ca | 2008-10-09 15:01:00 -0400 | [diff] [blame] | 424 | ia->ri_id = NULL; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 425 | out1: |
| 426 | return rc; |
| 427 | } |
| 428 | |
| 429 | /* |
| 430 | * Clean up/close an IA. |
| 431 | * o if event handles and PD have been initialized, free them. |
| 432 | * o close the IA |
| 433 | */ |
| 434 | void |
| 435 | rpcrdma_ia_close(struct rpcrdma_ia *ia) |
| 436 | { |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 437 | dprintk("RPC: %s: entering\n", __func__); |
Tom Talpey | fee08ca | 2008-10-09 15:01:00 -0400 | [diff] [blame] | 438 | if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) { |
| 439 | if (ia->ri_id->qp) |
| 440 | rdma_destroy_qp(ia->ri_id); |
Devesh Sharma | d0f36c4 | 2015-08-03 13:05:04 -0400 | [diff] [blame] | 441 | rpcrdma_destroy_id(ia->ri_id); |
Tom Talpey | fee08ca | 2008-10-09 15:01:00 -0400 | [diff] [blame] | 442 | ia->ri_id = NULL; |
| 443 | } |
Chuck Lever | 6d44698 | 2015-05-26 11:51:27 -0400 | [diff] [blame] | 444 | |
| 445 | /* If the pd is still busy, xprtrdma missed freeing a resource */ |
| 446 | if (ia->ri_pd && !IS_ERR(ia->ri_pd)) |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 447 | ib_dealloc_pd(ia->ri_pd); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 448 | } |
| 449 | |
| 450 | /* |
| 451 | * Create unconnected endpoint. |
| 452 | */ |
| 453 | int |
| 454 | rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, |
| 455 | struct rpcrdma_create_data_internal *cdata) |
| 456 | { |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 457 | struct ib_cq *sendcq, *recvcq; |
Chuck Lever | 124fa17 | 2015-10-24 17:27:51 -0400 | [diff] [blame] | 458 | unsigned int max_qp_wr; |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 459 | int rc; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 460 | |
Or Gerlitz | e3e45b1 | 2015-12-18 10:59:48 +0200 | [diff] [blame] | 461 | if (ia->ri_device->attrs.max_sge < RPCRDMA_MAX_IOVS) { |
Chuck Lever | b3221d6 | 2015-08-03 13:03:39 -0400 | [diff] [blame] | 462 | dprintk("RPC: %s: insufficient sge's available\n", |
| 463 | __func__); |
| 464 | return -ENOMEM; |
| 465 | } |
| 466 | |
Or Gerlitz | e3e45b1 | 2015-12-18 10:59:48 +0200 | [diff] [blame] | 467 | if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) { |
Chuck Lever | 124fa17 | 2015-10-24 17:27:51 -0400 | [diff] [blame] | 468 | dprintk("RPC: %s: insufficient wqe's available\n", |
| 469 | __func__); |
| 470 | return -ENOMEM; |
| 471 | } |
Chuck Lever | 550d750 | 2016-05-02 14:41:47 -0400 | [diff] [blame] | 472 | max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS - 1; |
Chuck Lever | 124fa17 | 2015-10-24 17:27:51 -0400 | [diff] [blame] | 473 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 474 | /* check provider's send/recv wr limits */ |
Chuck Lever | 124fa17 | 2015-10-24 17:27:51 -0400 | [diff] [blame] | 475 | if (cdata->max_requests > max_qp_wr) |
| 476 | cdata->max_requests = max_qp_wr; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 477 | |
| 478 | ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; |
| 479 | ep->rep_attr.qp_context = ep; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 480 | ep->rep_attr.srq = NULL; |
| 481 | ep->rep_attr.cap.max_send_wr = cdata->max_requests; |
Chuck Lever | 124fa17 | 2015-10-24 17:27:51 -0400 | [diff] [blame] | 482 | ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS; |
Chuck Lever | 550d750 | 2016-05-02 14:41:47 -0400 | [diff] [blame] | 483 | ep->rep_attr.cap.max_send_wr += 1; /* drain cqe */ |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 484 | rc = ia->ri_ops->ro_open(ia, ep, cdata); |
| 485 | if (rc) |
| 486 | return rc; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 487 | ep->rep_attr.cap.max_recv_wr = cdata->max_requests; |
Chuck Lever | 124fa17 | 2015-10-24 17:27:51 -0400 | [diff] [blame] | 488 | ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; |
Chuck Lever | 550d750 | 2016-05-02 14:41:47 -0400 | [diff] [blame] | 489 | ep->rep_attr.cap.max_recv_wr += 1; /* drain cqe */ |
Chuck Lever | b3221d6 | 2015-08-03 13:03:39 -0400 | [diff] [blame] | 490 | ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 491 | ep->rep_attr.cap.max_recv_sge = 1; |
| 492 | ep->rep_attr.cap.max_inline_data = 0; |
| 493 | ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
| 494 | ep->rep_attr.qp_type = IB_QPT_RC; |
| 495 | ep->rep_attr.port_num = ~0; |
| 496 | |
| 497 | dprintk("RPC: %s: requested max: dtos: send %d recv %d; " |
| 498 | "iovs: send %d recv %d\n", |
| 499 | __func__, |
| 500 | ep->rep_attr.cap.max_send_wr, |
| 501 | ep->rep_attr.cap.max_recv_wr, |
| 502 | ep->rep_attr.cap.max_send_sge, |
| 503 | ep->rep_attr.cap.max_recv_sge); |
| 504 | |
| 505 | /* set trigger for requesting send completion */ |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 506 | ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1; |
Chuck Lever | 26ae9d1 | 2015-12-16 17:23:20 -0500 | [diff] [blame] | 507 | if (ep->rep_cqinit <= 2) |
| 508 | ep->rep_cqinit = 0; /* always signal? */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 509 | INIT_CQCOUNT(ep); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 510 | init_waitqueue_head(&ep->rep_connect_wait); |
Chuck Lever | 254f91e | 2014-05-28 10:32:17 -0400 | [diff] [blame] | 511 | INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 512 | |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 513 | sendcq = ib_alloc_cq(ia->ri_device, NULL, |
| 514 | ep->rep_attr.cap.max_send_wr + 1, |
| 515 | 0, IB_POLL_SOFTIRQ); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 516 | if (IS_ERR(sendcq)) { |
| 517 | rc = PTR_ERR(sendcq); |
| 518 | dprintk("RPC: %s: failed to create send CQ: %i\n", |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 519 | __func__, rc); |
| 520 | goto out1; |
| 521 | } |
| 522 | |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 523 | recvcq = ib_alloc_cq(ia->ri_device, NULL, |
| 524 | ep->rep_attr.cap.max_recv_wr + 1, |
| 525 | 0, IB_POLL_SOFTIRQ); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 526 | if (IS_ERR(recvcq)) { |
| 527 | rc = PTR_ERR(recvcq); |
| 528 | dprintk("RPC: %s: failed to create recv CQ: %i\n", |
| 529 | __func__, rc); |
| 530 | goto out2; |
| 531 | } |
| 532 | |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 533 | ep->rep_attr.send_cq = sendcq; |
| 534 | ep->rep_attr.recv_cq = recvcq; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 535 | |
| 536 | /* Initialize cma parameters */ |
Chuck Lever | b2dde94 | 2016-05-02 14:43:03 -0400 | [diff] [blame] | 537 | memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma)); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 538 | |
| 539 | /* RPC/RDMA does not use private data */ |
| 540 | ep->rep_remote_cma.private_data = NULL; |
| 541 | ep->rep_remote_cma.private_data_len = 0; |
| 542 | |
| 543 | /* Client offers RDMA Read but does not initiate */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 544 | ep->rep_remote_cma.initiator_depth = 0; |
Or Gerlitz | e3e45b1 | 2015-12-18 10:59:48 +0200 | [diff] [blame] | 545 | if (ia->ri_device->attrs.max_qp_rd_atom > 32) /* arbitrary but <= 255 */ |
Tom Tucker | b334eaa | 2008-10-09 15:00:30 -0400 | [diff] [blame] | 546 | ep->rep_remote_cma.responder_resources = 32; |
| 547 | else |
Chuck Lever | 7bc7972 | 2015-01-21 11:03:27 -0500 | [diff] [blame] | 548 | ep->rep_remote_cma.responder_resources = |
Or Gerlitz | e3e45b1 | 2015-12-18 10:59:48 +0200 | [diff] [blame] | 549 | ia->ri_device->attrs.max_qp_rd_atom; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 550 | |
Chuck Lever | b2dde94 | 2016-05-02 14:43:03 -0400 | [diff] [blame] | 551 | /* Limit transport retries so client can detect server |
| 552 | * GID changes quickly. RPC layer handles re-establishing |
| 553 | * transport connection and retransmission. |
| 554 | */ |
| 555 | ep->rep_remote_cma.retry_count = 6; |
| 556 | |
| 557 | /* RPC-over-RDMA handles its own flow control. In addition, |
| 558 | * make all RNR NAKs visible so we know that RPC-over-RDMA |
| 559 | * flow control is working correctly (no NAKs should be seen). |
| 560 | */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 561 | ep->rep_remote_cma.flow_control = 0; |
| 562 | ep->rep_remote_cma.rnr_retry_count = 0; |
| 563 | |
| 564 | return 0; |
| 565 | |
| 566 | out2: |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 567 | ib_free_cq(sendcq); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 568 | out1: |
| 569 | return rc; |
| 570 | } |
| 571 | |
| 572 | /* |
| 573 | * rpcrdma_ep_destroy |
| 574 | * |
| 575 | * Disconnect and destroy endpoint. After this, the only |
| 576 | * valid operations on the ep are to free it (if dynamically |
| 577 | * allocated) or re-create it. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 578 | */ |
Chuck Lever | 7f1d541 | 2014-05-28 10:33:16 -0400 | [diff] [blame] | 579 | void |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 580 | rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
| 581 | { |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 582 | dprintk("RPC: %s: entering, connected is %d\n", |
| 583 | __func__, ep->rep_connected); |
| 584 | |
Chuck Lever | 254f91e | 2014-05-28 10:32:17 -0400 | [diff] [blame] | 585 | cancel_delayed_work_sync(&ep->rep_connect_worker); |
| 586 | |
Steve Wise | 72c0217 | 2015-09-21 12:24:23 -0500 | [diff] [blame] | 587 | if (ia->ri_id->qp) { |
Chuck Lever | 550d750 | 2016-05-02 14:41:47 -0400 | [diff] [blame] | 588 | rpcrdma_ep_disconnect(ep, ia); |
Tom Talpey | fee08ca | 2008-10-09 15:01:00 -0400 | [diff] [blame] | 589 | rdma_destroy_qp(ia->ri_id); |
| 590 | ia->ri_id->qp = NULL; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 591 | } |
| 592 | |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 593 | ib_free_cq(ep->rep_attr.recv_cq); |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 594 | ib_free_cq(ep->rep_attr.send_cq); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 595 | } |
| 596 | |
| 597 | /* |
| 598 | * Connect unconnected endpoint. |
| 599 | */ |
| 600 | int |
| 601 | rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
| 602 | { |
Chuck Lever | 73806c8 | 2014-07-29 17:23:25 -0400 | [diff] [blame] | 603 | struct rdma_cm_id *id, *old; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 604 | int rc = 0; |
| 605 | int retry_count = 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 606 | |
Tom Talpey | c055551 | 2008-10-10 11:32:45 -0400 | [diff] [blame] | 607 | if (ep->rep_connected != 0) { |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 608 | struct rpcrdma_xprt *xprt; |
| 609 | retry: |
Chuck Lever | ec62f40 | 2014-05-28 10:34:07 -0400 | [diff] [blame] | 610 | dprintk("RPC: %s: reconnecting...\n", __func__); |
Chuck Lever | 282191c | 2014-07-29 17:25:55 -0400 | [diff] [blame] | 611 | |
| 612 | rpcrdma_ep_disconnect(ep, ia); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 613 | |
| 614 | xprt = container_of(ia, struct rpcrdma_xprt, rx_ia); |
| 615 | id = rpcrdma_create_id(xprt, ia, |
| 616 | (struct sockaddr *)&xprt->rx_data.addr); |
| 617 | if (IS_ERR(id)) { |
Chuck Lever | ec62f40 | 2014-05-28 10:34:07 -0400 | [diff] [blame] | 618 | rc = -EHOSTUNREACH; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 619 | goto out; |
| 620 | } |
| 621 | /* TEMP TEMP TEMP - fail if new device: |
| 622 | * Deregister/remarshal *all* requests! |
| 623 | * Close and recreate adapter, pd, etc! |
| 624 | * Re-determine all attributes still sane! |
| 625 | * More stuff I haven't thought of! |
| 626 | * Rrrgh! |
| 627 | */ |
Chuck Lever | 89e0d112 | 2015-05-26 11:51:56 -0400 | [diff] [blame] | 628 | if (ia->ri_device != id->device) { |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 629 | printk("RPC: %s: can't reconnect on " |
| 630 | "different device!\n", __func__); |
Devesh Sharma | d0f36c4 | 2015-08-03 13:05:04 -0400 | [diff] [blame] | 631 | rpcrdma_destroy_id(id); |
Chuck Lever | ec62f40 | 2014-05-28 10:34:07 -0400 | [diff] [blame] | 632 | rc = -ENETUNREACH; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 633 | goto out; |
| 634 | } |
| 635 | /* END TEMP */ |
Chuck Lever | ec62f40 | 2014-05-28 10:34:07 -0400 | [diff] [blame] | 636 | rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr); |
| 637 | if (rc) { |
| 638 | dprintk("RPC: %s: rdma_create_qp failed %i\n", |
| 639 | __func__, rc); |
Devesh Sharma | d0f36c4 | 2015-08-03 13:05:04 -0400 | [diff] [blame] | 640 | rpcrdma_destroy_id(id); |
Chuck Lever | ec62f40 | 2014-05-28 10:34:07 -0400 | [diff] [blame] | 641 | rc = -ENETUNREACH; |
| 642 | goto out; |
| 643 | } |
Chuck Lever | 73806c8 | 2014-07-29 17:23:25 -0400 | [diff] [blame] | 644 | |
Chuck Lever | 73806c8 | 2014-07-29 17:23:25 -0400 | [diff] [blame] | 645 | old = ia->ri_id; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 646 | ia->ri_id = id; |
Chuck Lever | 73806c8 | 2014-07-29 17:23:25 -0400 | [diff] [blame] | 647 | |
| 648 | rdma_destroy_qp(old); |
Devesh Sharma | d0f36c4 | 2015-08-03 13:05:04 -0400 | [diff] [blame] | 649 | rpcrdma_destroy_id(old); |
Chuck Lever | ec62f40 | 2014-05-28 10:34:07 -0400 | [diff] [blame] | 650 | } else { |
| 651 | dprintk("RPC: %s: connecting...\n", __func__); |
| 652 | rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); |
| 653 | if (rc) { |
| 654 | dprintk("RPC: %s: rdma_create_qp failed %i\n", |
| 655 | __func__, rc); |
| 656 | /* do not update ep->rep_connected */ |
| 657 | return -ENETUNREACH; |
| 658 | } |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 659 | } |
| 660 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 661 | ep->rep_connected = 0; |
| 662 | |
| 663 | rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma); |
| 664 | if (rc) { |
| 665 | dprintk("RPC: %s: rdma_connect() failed with %i\n", |
| 666 | __func__, rc); |
| 667 | goto out; |
| 668 | } |
| 669 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 670 | wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0); |
| 671 | |
| 672 | /* |
| 673 | * Check state. A non-peer reject indicates no listener |
| 674 | * (ECONNREFUSED), which may be a transient state. All |
| 675 | * others indicate a transport condition which has already |
| 676 | * undergone a best-effort. |
| 677 | */ |
Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 678 | if (ep->rep_connected == -ECONNREFUSED && |
| 679 | ++retry_count <= RDMA_CONNECT_RETRY_MAX) { |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 680 | dprintk("RPC: %s: non-peer_reject, retry\n", __func__); |
| 681 | goto retry; |
| 682 | } |
| 683 | if (ep->rep_connected <= 0) { |
| 684 | /* Sometimes, the only way to reliably connect to remote |
| 685 | * CMs is to use same nonzero values for ORD and IRD. */ |
Tom Tucker | b334eaa | 2008-10-09 15:00:30 -0400 | [diff] [blame] | 686 | if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 && |
| 687 | (ep->rep_remote_cma.responder_resources == 0 || |
| 688 | ep->rep_remote_cma.initiator_depth != |
| 689 | ep->rep_remote_cma.responder_resources)) { |
| 690 | if (ep->rep_remote_cma.responder_resources == 0) |
| 691 | ep->rep_remote_cma.responder_resources = 1; |
| 692 | ep->rep_remote_cma.initiator_depth = |
| 693 | ep->rep_remote_cma.responder_resources; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 694 | goto retry; |
Tom Tucker | b334eaa | 2008-10-09 15:00:30 -0400 | [diff] [blame] | 695 | } |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 696 | rc = ep->rep_connected; |
| 697 | } else { |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 698 | struct rpcrdma_xprt *r_xprt; |
| 699 | unsigned int extras; |
| 700 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 701 | dprintk("RPC: %s: connected\n", __func__); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 702 | |
| 703 | r_xprt = container_of(ia, struct rpcrdma_xprt, rx_ia); |
| 704 | extras = r_xprt->rx_buf.rb_bc_srv_max_requests; |
| 705 | |
| 706 | if (extras) { |
| 707 | rc = rpcrdma_ep_post_extra_recv(r_xprt, extras); |
Dan Carpenter | 38b95bc | 2015-11-05 11:37:08 +0300 | [diff] [blame] | 708 | if (rc) { |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 709 | pr_warn("%s: rpcrdma_ep_post_extra_recv: %i\n", |
| 710 | __func__, rc); |
| 711 | rc = 0; |
Dan Carpenter | 38b95bc | 2015-11-05 11:37:08 +0300 | [diff] [blame] | 712 | } |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 713 | } |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 714 | } |
| 715 | |
| 716 | out: |
| 717 | if (rc) |
| 718 | ep->rep_connected = rc; |
| 719 | return rc; |
| 720 | } |
| 721 | |
| 722 | /* |
| 723 | * rpcrdma_ep_disconnect |
| 724 | * |
| 725 | * This is separate from destroy to facilitate the ability |
| 726 | * to reconnect without recreating the endpoint. |
| 727 | * |
| 728 | * This call is not reentrant, and must not be made in parallel |
| 729 | * on the same endpoint. |
| 730 | */ |
Chuck Lever | 282191c | 2014-07-29 17:25:55 -0400 | [diff] [blame] | 731 | void |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 732 | rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
| 733 | { |
| 734 | int rc; |
| 735 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 736 | rc = rdma_disconnect(ia->ri_id); |
| 737 | if (!rc) { |
| 738 | /* returns without wait if not connected */ |
| 739 | wait_event_interruptible(ep->rep_connect_wait, |
| 740 | ep->rep_connected != 1); |
| 741 | dprintk("RPC: %s: after wait, %sconnected\n", __func__, |
| 742 | (ep->rep_connected == 1) ? "still " : "dis"); |
| 743 | } else { |
| 744 | dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc); |
| 745 | ep->rep_connected = rc; |
| 746 | } |
Chuck Lever | 550d750 | 2016-05-02 14:41:47 -0400 | [diff] [blame] | 747 | |
| 748 | ib_drain_qp(ia->ri_id->qp); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 749 | } |
| 750 | |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 751 | static void |
| 752 | rpcrdma_mr_recovery_worker(struct work_struct *work) |
| 753 | { |
| 754 | struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, |
| 755 | rb_recovery_worker.work); |
| 756 | struct rpcrdma_mw *mw; |
| 757 | |
| 758 | spin_lock(&buf->rb_recovery_lock); |
| 759 | while (!list_empty(&buf->rb_stale_mrs)) { |
| 760 | mw = list_first_entry(&buf->rb_stale_mrs, |
| 761 | struct rpcrdma_mw, mw_list); |
| 762 | list_del_init(&mw->mw_list); |
| 763 | spin_unlock(&buf->rb_recovery_lock); |
| 764 | |
| 765 | dprintk("RPC: %s: recovering MR %p\n", __func__, mw); |
| 766 | mw->mw_xprt->rx_ia.ri_ops->ro_recover_mr(mw); |
| 767 | |
| 768 | spin_lock(&buf->rb_recovery_lock); |
kbuild test robot | 53d7852 | 2016-07-16 06:02:05 +0800 | [diff] [blame] | 769 | } |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 770 | spin_unlock(&buf->rb_recovery_lock); |
| 771 | } |
| 772 | |
| 773 | void |
| 774 | rpcrdma_defer_mr_recovery(struct rpcrdma_mw *mw) |
| 775 | { |
| 776 | struct rpcrdma_xprt *r_xprt = mw->mw_xprt; |
| 777 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
| 778 | |
| 779 | spin_lock(&buf->rb_recovery_lock); |
| 780 | list_add(&mw->mw_list, &buf->rb_stale_mrs); |
| 781 | spin_unlock(&buf->rb_recovery_lock); |
| 782 | |
| 783 | schedule_delayed_work(&buf->rb_recovery_worker, 0); |
| 784 | } |
| 785 | |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 786 | static void |
| 787 | rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt) |
| 788 | { |
| 789 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
| 790 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
| 791 | unsigned int count; |
| 792 | LIST_HEAD(free); |
| 793 | LIST_HEAD(all); |
| 794 | |
| 795 | for (count = 0; count < 32; count++) { |
| 796 | struct rpcrdma_mw *mw; |
| 797 | int rc; |
| 798 | |
| 799 | mw = kzalloc(sizeof(*mw), GFP_KERNEL); |
| 800 | if (!mw) |
| 801 | break; |
| 802 | |
| 803 | rc = ia->ri_ops->ro_init_mr(ia, mw); |
| 804 | if (rc) { |
| 805 | kfree(mw); |
| 806 | break; |
| 807 | } |
| 808 | |
| 809 | mw->mw_xprt = r_xprt; |
| 810 | |
| 811 | list_add(&mw->mw_list, &free); |
| 812 | list_add(&mw->mw_all, &all); |
| 813 | } |
| 814 | |
| 815 | spin_lock(&buf->rb_mwlock); |
| 816 | list_splice(&free, &buf->rb_mws); |
| 817 | list_splice(&all, &buf->rb_all); |
| 818 | r_xprt->rx_stats.mrs_allocated += count; |
| 819 | spin_unlock(&buf->rb_mwlock); |
| 820 | |
| 821 | dprintk("RPC: %s: created %u MRs\n", __func__, count); |
| 822 | } |
| 823 | |
| 824 | static void |
| 825 | rpcrdma_mr_refresh_worker(struct work_struct *work) |
| 826 | { |
| 827 | struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, |
| 828 | rb_refresh_worker.work); |
| 829 | struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, |
| 830 | rx_buf); |
| 831 | |
| 832 | rpcrdma_create_mrs(r_xprt); |
| 833 | } |
| 834 | |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 835 | struct rpcrdma_req * |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 836 | rpcrdma_create_req(struct rpcrdma_xprt *r_xprt) |
| 837 | { |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 838 | struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 839 | struct rpcrdma_req *req; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 840 | |
Chuck Lever | 85275c8 | 2015-01-21 11:04:16 -0500 | [diff] [blame] | 841 | req = kzalloc(sizeof(*req), GFP_KERNEL); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 842 | if (req == NULL) |
Chuck Lever | 85275c8 | 2015-01-21 11:04:16 -0500 | [diff] [blame] | 843 | return ERR_PTR(-ENOMEM); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 844 | |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 845 | INIT_LIST_HEAD(&req->rl_free); |
| 846 | spin_lock(&buffer->rb_reqslock); |
| 847 | list_add(&req->rl_all, &buffer->rb_allreqs); |
| 848 | spin_unlock(&buffer->rb_reqslock); |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 849 | req->rl_cqe.done = rpcrdma_wc_send; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 850 | req->rl_buffer = &r_xprt->rx_buf; |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 851 | INIT_LIST_HEAD(&req->rl_registered); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 852 | return req; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 853 | } |
| 854 | |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 855 | struct rpcrdma_rep * |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 856 | rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt) |
| 857 | { |
| 858 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 859 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
| 860 | struct rpcrdma_rep *rep; |
| 861 | int rc; |
| 862 | |
| 863 | rc = -ENOMEM; |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 864 | rep = kzalloc(sizeof(*rep), GFP_KERNEL); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 865 | if (rep == NULL) |
| 866 | goto out; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 867 | |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 868 | rep->rr_rdmabuf = rpcrdma_alloc_regbuf(ia, cdata->inline_rsize, |
| 869 | GFP_KERNEL); |
| 870 | if (IS_ERR(rep->rr_rdmabuf)) { |
| 871 | rc = PTR_ERR(rep->rr_rdmabuf); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 872 | goto out_free; |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 873 | } |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 874 | |
Chuck Lever | 89e0d112 | 2015-05-26 11:51:56 -0400 | [diff] [blame] | 875 | rep->rr_device = ia->ri_device; |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 876 | rep->rr_cqe.done = rpcrdma_receive_wc; |
Chuck Lever | fed171b | 2015-05-26 11:51:37 -0400 | [diff] [blame] | 877 | rep->rr_rxprt = r_xprt; |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 878 | INIT_WORK(&rep->rr_work, rpcrdma_receive_worker); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 879 | return rep; |
| 880 | |
| 881 | out_free: |
| 882 | kfree(rep); |
| 883 | out: |
| 884 | return ERR_PTR(rc); |
| 885 | } |
| 886 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 887 | int |
Chuck Lever | ac920d0 | 2015-01-21 11:03:44 -0500 | [diff] [blame] | 888 | rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 889 | { |
Chuck Lever | ac920d0 | 2015-01-21 11:03:44 -0500 | [diff] [blame] | 890 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 891 | int i, rc; |
| 892 | |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 893 | buf->rb_max_requests = r_xprt->rx_data.max_requests; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 894 | buf->rb_bc_srv_max_requests = 0; |
Chuck Lever | 23826c7 | 2016-03-04 11:28:27 -0500 | [diff] [blame] | 895 | atomic_set(&buf->rb_credits, 1); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 896 | spin_lock_init(&buf->rb_mwlock); |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 897 | spin_lock_init(&buf->rb_lock); |
| 898 | spin_lock_init(&buf->rb_recovery_lock); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 899 | INIT_LIST_HEAD(&buf->rb_mws); |
| 900 | INIT_LIST_HEAD(&buf->rb_all); |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 901 | INIT_LIST_HEAD(&buf->rb_stale_mrs); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 902 | INIT_DELAYED_WORK(&buf->rb_refresh_worker, |
| 903 | rpcrdma_mr_refresh_worker); |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 904 | INIT_DELAYED_WORK(&buf->rb_recovery_worker, |
| 905 | rpcrdma_mr_recovery_worker); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 906 | |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 907 | rpcrdma_create_mrs(r_xprt); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 908 | |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 909 | INIT_LIST_HEAD(&buf->rb_send_bufs); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 910 | INIT_LIST_HEAD(&buf->rb_allreqs); |
| 911 | spin_lock_init(&buf->rb_reqslock); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 912 | for (i = 0; i < buf->rb_max_requests; i++) { |
| 913 | struct rpcrdma_req *req; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 914 | |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 915 | req = rpcrdma_create_req(r_xprt); |
| 916 | if (IS_ERR(req)) { |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 917 | dprintk("RPC: %s: request buffer %d alloc" |
| 918 | " failed\n", __func__, i); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 919 | rc = PTR_ERR(req); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 920 | goto out; |
| 921 | } |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 922 | req->rl_backchannel = false; |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 923 | list_add(&req->rl_free, &buf->rb_send_bufs); |
| 924 | } |
| 925 | |
| 926 | INIT_LIST_HEAD(&buf->rb_recv_bufs); |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame^] | 927 | for (i = 0; i < buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; i++) { |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 928 | struct rpcrdma_rep *rep; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 929 | |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 930 | rep = rpcrdma_create_rep(r_xprt); |
| 931 | if (IS_ERR(rep)) { |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 932 | dprintk("RPC: %s: reply buffer %d alloc failed\n", |
| 933 | __func__, i); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 934 | rc = PTR_ERR(rep); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 935 | goto out; |
| 936 | } |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 937 | list_add(&rep->rr_list, &buf->rb_recv_bufs); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 938 | } |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 939 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 940 | return 0; |
| 941 | out: |
| 942 | rpcrdma_buffer_destroy(buf); |
| 943 | return rc; |
| 944 | } |
| 945 | |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 946 | static struct rpcrdma_req * |
| 947 | rpcrdma_buffer_get_req_locked(struct rpcrdma_buffer *buf) |
| 948 | { |
| 949 | struct rpcrdma_req *req; |
| 950 | |
| 951 | req = list_first_entry(&buf->rb_send_bufs, |
| 952 | struct rpcrdma_req, rl_free); |
| 953 | list_del(&req->rl_free); |
| 954 | return req; |
| 955 | } |
| 956 | |
| 957 | static struct rpcrdma_rep * |
| 958 | rpcrdma_buffer_get_rep_locked(struct rpcrdma_buffer *buf) |
| 959 | { |
| 960 | struct rpcrdma_rep *rep; |
| 961 | |
| 962 | rep = list_first_entry(&buf->rb_recv_bufs, |
| 963 | struct rpcrdma_rep, rr_list); |
| 964 | list_del(&rep->rr_list); |
| 965 | return rep; |
| 966 | } |
| 967 | |
Chuck Lever | 2e84522 | 2014-07-29 17:25:38 -0400 | [diff] [blame] | 968 | static void |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 969 | rpcrdma_destroy_rep(struct rpcrdma_ia *ia, struct rpcrdma_rep *rep) |
| 970 | { |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 971 | rpcrdma_free_regbuf(ia, rep->rr_rdmabuf); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 972 | kfree(rep); |
| 973 | } |
| 974 | |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 975 | void |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 976 | rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req) |
| 977 | { |
Chuck Lever | 0ca77dc | 2015-01-21 11:04:08 -0500 | [diff] [blame] | 978 | rpcrdma_free_regbuf(ia, req->rl_sendbuf); |
Chuck Lever | 85275c8 | 2015-01-21 11:04:16 -0500 | [diff] [blame] | 979 | rpcrdma_free_regbuf(ia, req->rl_rdmabuf); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 980 | kfree(req); |
| 981 | } |
| 982 | |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 983 | static void |
| 984 | rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf) |
| 985 | { |
| 986 | struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, |
| 987 | rx_buf); |
| 988 | struct rpcrdma_ia *ia = rdmab_to_ia(buf); |
| 989 | struct rpcrdma_mw *mw; |
| 990 | unsigned int count; |
| 991 | |
| 992 | count = 0; |
| 993 | spin_lock(&buf->rb_mwlock); |
| 994 | while (!list_empty(&buf->rb_all)) { |
| 995 | mw = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); |
| 996 | list_del(&mw->mw_all); |
| 997 | |
| 998 | spin_unlock(&buf->rb_mwlock); |
| 999 | ia->ri_ops->ro_release_mr(mw); |
| 1000 | count++; |
| 1001 | spin_lock(&buf->rb_mwlock); |
| 1002 | } |
| 1003 | spin_unlock(&buf->rb_mwlock); |
| 1004 | r_xprt->rx_stats.mrs_allocated = 0; |
| 1005 | |
| 1006 | dprintk("RPC: %s: released %u MRs\n", __func__, count); |
| 1007 | } |
| 1008 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1009 | void |
| 1010 | rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) |
| 1011 | { |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1012 | struct rpcrdma_ia *ia = rdmab_to_ia(buf); |
| 1013 | |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 1014 | cancel_delayed_work_sync(&buf->rb_recovery_worker); |
| 1015 | |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1016 | while (!list_empty(&buf->rb_recv_bufs)) { |
| 1017 | struct rpcrdma_rep *rep; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1018 | |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1019 | rep = rpcrdma_buffer_get_rep_locked(buf); |
| 1020 | rpcrdma_destroy_rep(ia, rep); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1021 | } |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame^] | 1022 | buf->rb_send_count = 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1023 | |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1024 | spin_lock(&buf->rb_reqslock); |
| 1025 | while (!list_empty(&buf->rb_allreqs)) { |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1026 | struct rpcrdma_req *req; |
Allen Andrews | 4034ba0 | 2014-05-28 10:32:09 -0400 | [diff] [blame] | 1027 | |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1028 | req = list_first_entry(&buf->rb_allreqs, |
| 1029 | struct rpcrdma_req, rl_all); |
| 1030 | list_del(&req->rl_all); |
| 1031 | |
| 1032 | spin_unlock(&buf->rb_reqslock); |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1033 | rpcrdma_destroy_req(ia, req); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1034 | spin_lock(&buf->rb_reqslock); |
Chuck Lever | 9f9d802 | 2014-07-29 17:24:45 -0400 | [diff] [blame] | 1035 | } |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1036 | spin_unlock(&buf->rb_reqslock); |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame^] | 1037 | buf->rb_recv_count = 0; |
Chuck Lever | 9f9d802 | 2014-07-29 17:24:45 -0400 | [diff] [blame] | 1038 | |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1039 | rpcrdma_destroy_mrs(buf); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1040 | } |
| 1041 | |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1042 | struct rpcrdma_mw * |
| 1043 | rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt) |
Chuck Lever | c2922c0 | 2014-07-29 17:24:36 -0400 | [diff] [blame] | 1044 | { |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1045 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
| 1046 | struct rpcrdma_mw *mw = NULL; |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1047 | |
Chuck Lever | 58d1dcf | 2015-05-26 11:53:13 -0400 | [diff] [blame] | 1048 | spin_lock(&buf->rb_mwlock); |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1049 | if (!list_empty(&buf->rb_mws)) { |
| 1050 | mw = list_first_entry(&buf->rb_mws, |
| 1051 | struct rpcrdma_mw, mw_list); |
| 1052 | list_del_init(&mw->mw_list); |
Chuck Lever | c2922c0 | 2014-07-29 17:24:36 -0400 | [diff] [blame] | 1053 | } |
Chuck Lever | 58d1dcf | 2015-05-26 11:53:13 -0400 | [diff] [blame] | 1054 | spin_unlock(&buf->rb_mwlock); |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1055 | |
| 1056 | if (!mw) |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1057 | goto out_nomws; |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1058 | return mw; |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1059 | |
| 1060 | out_nomws: |
| 1061 | dprintk("RPC: %s: no MWs available\n", __func__); |
| 1062 | schedule_delayed_work(&buf->rb_refresh_worker, 0); |
| 1063 | |
| 1064 | /* Allow the reply handler and refresh worker to run */ |
| 1065 | cond_resched(); |
| 1066 | |
| 1067 | return NULL; |
Chuck Lever | c2922c0 | 2014-07-29 17:24:36 -0400 | [diff] [blame] | 1068 | } |
| 1069 | |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1070 | void |
| 1071 | rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw) |
Chuck Lever | c2922c0 | 2014-07-29 17:24:36 -0400 | [diff] [blame] | 1072 | { |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1073 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
Chuck Lever | c2922c0 | 2014-07-29 17:24:36 -0400 | [diff] [blame] | 1074 | |
Chuck Lever | 58d1dcf | 2015-05-26 11:53:13 -0400 | [diff] [blame] | 1075 | spin_lock(&buf->rb_mwlock); |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1076 | list_add_tail(&mw->mw_list, &buf->rb_mws); |
Chuck Lever | 58d1dcf | 2015-05-26 11:53:13 -0400 | [diff] [blame] | 1077 | spin_unlock(&buf->rb_mwlock); |
Chuck Lever | c2922c0 | 2014-07-29 17:24:36 -0400 | [diff] [blame] | 1078 | } |
| 1079 | |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame^] | 1080 | static struct rpcrdma_rep * |
| 1081 | rpcrdma_buffer_get_rep(struct rpcrdma_buffer *buffers) |
| 1082 | { |
| 1083 | /* If an RPC previously completed without a reply (say, a |
| 1084 | * credential problem or a soft timeout occurs) then hold off |
| 1085 | * on supplying more Receive buffers until the number of new |
| 1086 | * pending RPCs catches up to the number of posted Receives. |
| 1087 | */ |
| 1088 | if (unlikely(buffers->rb_send_count < buffers->rb_recv_count)) |
| 1089 | return NULL; |
| 1090 | |
| 1091 | if (unlikely(list_empty(&buffers->rb_recv_bufs))) |
| 1092 | return NULL; |
| 1093 | buffers->rb_recv_count++; |
| 1094 | return rpcrdma_buffer_get_rep_locked(buffers); |
| 1095 | } |
| 1096 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1097 | /* |
| 1098 | * Get a set of request/reply buffers. |
Chuck Lever | 78d506e | 2016-09-06 11:22:49 -0400 | [diff] [blame] | 1099 | * |
| 1100 | * Reply buffer (if available) is attached to send buffer upon return. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1101 | */ |
| 1102 | struct rpcrdma_req * |
| 1103 | rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) |
| 1104 | { |
| 1105 | struct rpcrdma_req *req; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1106 | |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1107 | spin_lock(&buffers->rb_lock); |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1108 | if (list_empty(&buffers->rb_send_bufs)) |
| 1109 | goto out_reqbuf; |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame^] | 1110 | buffers->rb_send_count++; |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1111 | req = rpcrdma_buffer_get_req_locked(buffers); |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame^] | 1112 | req->rl_reply = rpcrdma_buffer_get_rep(buffers); |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1113 | spin_unlock(&buffers->rb_lock); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1114 | return req; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1115 | |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1116 | out_reqbuf: |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1117 | spin_unlock(&buffers->rb_lock); |
Chuck Lever | 78d506e | 2016-09-06 11:22:49 -0400 | [diff] [blame] | 1118 | pr_warn("RPC: %s: out of request buffers\n", __func__); |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1119 | return NULL; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1120 | } |
| 1121 | |
| 1122 | /* |
| 1123 | * Put request/reply buffers back into pool. |
| 1124 | * Pre-decrement counter/array index. |
| 1125 | */ |
| 1126 | void |
| 1127 | rpcrdma_buffer_put(struct rpcrdma_req *req) |
| 1128 | { |
| 1129 | struct rpcrdma_buffer *buffers = req->rl_buffer; |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1130 | struct rpcrdma_rep *rep = req->rl_reply; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1131 | |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1132 | req->rl_niovs = 0; |
| 1133 | req->rl_reply = NULL; |
| 1134 | |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1135 | spin_lock(&buffers->rb_lock); |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame^] | 1136 | buffers->rb_send_count--; |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1137 | list_add_tail(&req->rl_free, &buffers->rb_send_bufs); |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame^] | 1138 | if (rep) { |
| 1139 | buffers->rb_recv_count--; |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1140 | list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs); |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame^] | 1141 | } |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1142 | spin_unlock(&buffers->rb_lock); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1143 | } |
| 1144 | |
| 1145 | /* |
| 1146 | * Recover reply buffers from pool. |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1147 | * This happens when recovering from disconnect. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1148 | */ |
| 1149 | void |
| 1150 | rpcrdma_recv_buffer_get(struct rpcrdma_req *req) |
| 1151 | { |
| 1152 | struct rpcrdma_buffer *buffers = req->rl_buffer; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1153 | |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1154 | spin_lock(&buffers->rb_lock); |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame^] | 1155 | req->rl_reply = rpcrdma_buffer_get_rep(buffers); |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1156 | spin_unlock(&buffers->rb_lock); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1157 | } |
| 1158 | |
| 1159 | /* |
| 1160 | * Put reply buffers back into pool when not attached to |
| 1161 | * request. This happens in error conditions. |
| 1162 | */ |
| 1163 | void |
| 1164 | rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) |
| 1165 | { |
Chuck Lever | fed171b | 2015-05-26 11:51:37 -0400 | [diff] [blame] | 1166 | struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1167 | |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1168 | spin_lock(&buffers->rb_lock); |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame^] | 1169 | buffers->rb_recv_count--; |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1170 | list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs); |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1171 | spin_unlock(&buffers->rb_lock); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1172 | } |
| 1173 | |
| 1174 | /* |
| 1175 | * Wrappers for internal-use kmalloc memory registration, used by buffer code. |
| 1176 | */ |
| 1177 | |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1178 | /** |
| 1179 | * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers |
| 1180 | * @ia: controlling rpcrdma_ia |
| 1181 | * @size: size of buffer to be allocated, in bytes |
| 1182 | * @flags: GFP flags |
| 1183 | * |
| 1184 | * Returns pointer to private header of an area of internally |
| 1185 | * registered memory, or an ERR_PTR. The registered buffer follows |
| 1186 | * the end of the private header. |
| 1187 | * |
| 1188 | * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for |
| 1189 | * receiving the payload of RDMA RECV operations. regbufs are not |
| 1190 | * used for RDMA READ/WRITE operations, thus are registered only for |
| 1191 | * LOCAL access. |
| 1192 | */ |
| 1193 | struct rpcrdma_regbuf * |
| 1194 | rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags) |
| 1195 | { |
| 1196 | struct rpcrdma_regbuf *rb; |
Chuck Lever | e531dca | 2015-08-03 13:03:20 -0400 | [diff] [blame] | 1197 | struct ib_sge *iov; |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1198 | |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1199 | rb = kmalloc(sizeof(*rb) + size, flags); |
| 1200 | if (rb == NULL) |
| 1201 | goto out; |
| 1202 | |
Chuck Lever | e531dca | 2015-08-03 13:03:20 -0400 | [diff] [blame] | 1203 | iov = &rb->rg_iov; |
| 1204 | iov->addr = ib_dma_map_single(ia->ri_device, |
| 1205 | (void *)rb->rg_base, size, |
| 1206 | DMA_BIDIRECTIONAL); |
| 1207 | if (ib_dma_mapping_error(ia->ri_device, iov->addr)) |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1208 | goto out_free; |
| 1209 | |
Chuck Lever | e531dca | 2015-08-03 13:03:20 -0400 | [diff] [blame] | 1210 | iov->length = size; |
Chuck Lever | bb6c96d | 2015-09-24 10:34:21 +0300 | [diff] [blame] | 1211 | iov->lkey = ia->ri_pd->local_dma_lkey; |
Chuck Lever | e531dca | 2015-08-03 13:03:20 -0400 | [diff] [blame] | 1212 | rb->rg_size = size; |
| 1213 | rb->rg_owner = NULL; |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1214 | return rb; |
| 1215 | |
| 1216 | out_free: |
| 1217 | kfree(rb); |
| 1218 | out: |
Chuck Lever | e531dca | 2015-08-03 13:03:20 -0400 | [diff] [blame] | 1219 | return ERR_PTR(-ENOMEM); |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1220 | } |
| 1221 | |
| 1222 | /** |
| 1223 | * rpcrdma_free_regbuf - deregister and free registered buffer |
| 1224 | * @ia: controlling rpcrdma_ia |
| 1225 | * @rb: regbuf to be deregistered and freed |
| 1226 | */ |
| 1227 | void |
| 1228 | rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) |
| 1229 | { |
Chuck Lever | e531dca | 2015-08-03 13:03:20 -0400 | [diff] [blame] | 1230 | struct ib_sge *iov; |
| 1231 | |
| 1232 | if (!rb) |
| 1233 | return; |
| 1234 | |
| 1235 | iov = &rb->rg_iov; |
| 1236 | ib_dma_unmap_single(ia->ri_device, |
| 1237 | iov->addr, iov->length, DMA_BIDIRECTIONAL); |
| 1238 | kfree(rb); |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1239 | } |
| 1240 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1241 | /* |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1242 | * Prepost any receive buffer, then post send. |
| 1243 | * |
| 1244 | * Receive buffer is donated to hardware, reclaimed upon recv completion. |
| 1245 | */ |
| 1246 | int |
| 1247 | rpcrdma_ep_post(struct rpcrdma_ia *ia, |
| 1248 | struct rpcrdma_ep *ep, |
| 1249 | struct rpcrdma_req *req) |
| 1250 | { |
Chuck Lever | b3221d6 | 2015-08-03 13:03:39 -0400 | [diff] [blame] | 1251 | struct ib_device *device = ia->ri_device; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1252 | struct ib_send_wr send_wr, *send_wr_fail; |
| 1253 | struct rpcrdma_rep *rep = req->rl_reply; |
Chuck Lever | b3221d6 | 2015-08-03 13:03:39 -0400 | [diff] [blame] | 1254 | struct ib_sge *iov = req->rl_send_iov; |
| 1255 | int i, rc; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1256 | |
| 1257 | if (rep) { |
| 1258 | rc = rpcrdma_ep_post_recv(ia, ep, rep); |
| 1259 | if (rc) |
Chuck Lever | 7a89f9c | 2016-06-29 13:53:43 -0400 | [diff] [blame] | 1260 | return rc; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1261 | req->rl_reply = NULL; |
| 1262 | } |
| 1263 | |
| 1264 | send_wr.next = NULL; |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 1265 | send_wr.wr_cqe = &req->rl_cqe; |
Chuck Lever | b3221d6 | 2015-08-03 13:03:39 -0400 | [diff] [blame] | 1266 | send_wr.sg_list = iov; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1267 | send_wr.num_sge = req->rl_niovs; |
| 1268 | send_wr.opcode = IB_WR_SEND; |
Chuck Lever | b3221d6 | 2015-08-03 13:03:39 -0400 | [diff] [blame] | 1269 | |
| 1270 | for (i = 0; i < send_wr.num_sge; i++) |
| 1271 | ib_dma_sync_single_for_device(device, iov[i].addr, |
| 1272 | iov[i].length, DMA_TO_DEVICE); |
| 1273 | dprintk("RPC: %s: posting %d s/g entries\n", |
| 1274 | __func__, send_wr.num_sge); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1275 | |
| 1276 | if (DECR_CQCOUNT(ep) > 0) |
| 1277 | send_wr.send_flags = 0; |
| 1278 | else { /* Provider must take a send completion every now and then */ |
| 1279 | INIT_CQCOUNT(ep); |
| 1280 | send_wr.send_flags = IB_SEND_SIGNALED; |
| 1281 | } |
| 1282 | |
| 1283 | rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail); |
| 1284 | if (rc) |
Chuck Lever | 7a89f9c | 2016-06-29 13:53:43 -0400 | [diff] [blame] | 1285 | goto out_postsend_err; |
| 1286 | return 0; |
| 1287 | |
| 1288 | out_postsend_err: |
| 1289 | pr_err("rpcrdma: RDMA Send ib_post_send returned %i\n", rc); |
| 1290 | return -ENOTCONN; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1291 | } |
| 1292 | |
| 1293 | /* |
| 1294 | * (Re)post a receive buffer. |
| 1295 | */ |
| 1296 | int |
| 1297 | rpcrdma_ep_post_recv(struct rpcrdma_ia *ia, |
| 1298 | struct rpcrdma_ep *ep, |
| 1299 | struct rpcrdma_rep *rep) |
| 1300 | { |
| 1301 | struct ib_recv_wr recv_wr, *recv_wr_fail; |
| 1302 | int rc; |
| 1303 | |
| 1304 | recv_wr.next = NULL; |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 1305 | recv_wr.wr_cqe = &rep->rr_cqe; |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 1306 | recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1307 | recv_wr.num_sge = 1; |
| 1308 | |
Chuck Lever | 89e0d112 | 2015-05-26 11:51:56 -0400 | [diff] [blame] | 1309 | ib_dma_sync_single_for_cpu(ia->ri_device, |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 1310 | rdmab_addr(rep->rr_rdmabuf), |
| 1311 | rdmab_length(rep->rr_rdmabuf), |
| 1312 | DMA_BIDIRECTIONAL); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1313 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1314 | rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1315 | if (rc) |
Chuck Lever | 7a89f9c | 2016-06-29 13:53:43 -0400 | [diff] [blame] | 1316 | goto out_postrecv; |
| 1317 | return 0; |
| 1318 | |
| 1319 | out_postrecv: |
| 1320 | pr_err("rpcrdma: ib_post_recv returned %i\n", rc); |
| 1321 | return -ENOTCONN; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1322 | } |
Chuck Lever | 43e9598 | 2014-07-29 17:23:34 -0400 | [diff] [blame] | 1323 | |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1324 | /** |
| 1325 | * rpcrdma_ep_post_extra_recv - Post buffers for incoming backchannel requests |
| 1326 | * @r_xprt: transport associated with these backchannel resources |
| 1327 | * @min_reqs: minimum number of incoming requests expected |
| 1328 | * |
| 1329 | * Returns zero if all requested buffers were posted, or a negative errno. |
| 1330 | */ |
| 1331 | int |
| 1332 | rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count) |
| 1333 | { |
| 1334 | struct rpcrdma_buffer *buffers = &r_xprt->rx_buf; |
| 1335 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
| 1336 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
| 1337 | struct rpcrdma_rep *rep; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1338 | int rc; |
| 1339 | |
| 1340 | while (count--) { |
Chuck Lever | 9b06688 | 2015-12-16 17:22:06 -0500 | [diff] [blame] | 1341 | spin_lock(&buffers->rb_lock); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1342 | if (list_empty(&buffers->rb_recv_bufs)) |
| 1343 | goto out_reqbuf; |
| 1344 | rep = rpcrdma_buffer_get_rep_locked(buffers); |
Chuck Lever | 9b06688 | 2015-12-16 17:22:06 -0500 | [diff] [blame] | 1345 | spin_unlock(&buffers->rb_lock); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1346 | |
| 1347 | rc = rpcrdma_ep_post_recv(ia, ep, rep); |
| 1348 | if (rc) |
| 1349 | goto out_rc; |
| 1350 | } |
| 1351 | |
| 1352 | return 0; |
| 1353 | |
| 1354 | out_reqbuf: |
Chuck Lever | 9b06688 | 2015-12-16 17:22:06 -0500 | [diff] [blame] | 1355 | spin_unlock(&buffers->rb_lock); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1356 | pr_warn("%s: no extra receive buffers\n", __func__); |
| 1357 | return -ENOMEM; |
| 1358 | |
| 1359 | out_rc: |
| 1360 | rpcrdma_recv_buffer_put(rep); |
| 1361 | return rc; |
| 1362 | } |