Chuck Lever | a2268cf | 2018-05-04 15:34:32 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause |
\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 2 | /* |
Chuck Lever | 62b56a6 | 2017-10-30 16:22:14 -0400 | [diff] [blame] | 3 | * Copyright (c) 2014-2017 Oracle. All rights reserved. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 4 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
| 5 | * |
| 6 | * This software is available to you under a choice of one of two |
| 7 | * licenses. You may choose to be licensed under the terms of the GNU |
| 8 | * General Public License (GPL) Version 2, available from the file |
| 9 | * COPYING in the main directory of this source tree, or the BSD-type |
| 10 | * license below: |
| 11 | * |
| 12 | * Redistribution and use in source and binary forms, with or without |
| 13 | * modification, are permitted provided that the following conditions |
| 14 | * are met: |
| 15 | * |
| 16 | * Redistributions of source code must retain the above copyright |
| 17 | * notice, this list of conditions and the following disclaimer. |
| 18 | * |
| 19 | * Redistributions in binary form must reproduce the above |
| 20 | * copyright notice, this list of conditions and the following |
| 21 | * disclaimer in the documentation and/or other materials provided |
| 22 | * with the distribution. |
| 23 | * |
| 24 | * Neither the name of the Network Appliance, Inc. nor the names of |
| 25 | * its contributors may be used to endorse or promote products |
| 26 | * derived from this software without specific prior written |
| 27 | * permission. |
| 28 | * |
| 29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 30 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 31 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 32 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 33 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 34 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 35 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 36 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 37 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 38 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 39 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 40 | */ |
| 41 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 42 | /* |
| 43 | * verbs.c |
| 44 | * |
| 45 | * Encapsulates the major functions managing: |
| 46 | * o adapters |
| 47 | * o endpoints |
| 48 | * o connections |
| 49 | * o buffer memory |
| 50 | */ |
| 51 | |
Alexey Dobriyan | a6b7a40 | 2011-06-06 10:43:46 +0000 | [diff] [blame] | 52 | #include <linux/interrupt.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 53 | #include <linux/slab.h> |
Chuck Lever | 0dd39ca | 2015-03-30 14:33:43 -0400 | [diff] [blame] | 54 | #include <linux/sunrpc/addr.h> |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame] | 55 | #include <linux/sunrpc/svc_rdma.h> |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 56 | |
| 57 | #include <asm-generic/barrier.h> |
Chuck Lever | 65866f8 | 2014-05-28 10:33:59 -0400 | [diff] [blame] | 58 | #include <asm/bitops.h> |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 59 | |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 60 | #include <rdma/ib_cm.h> |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 61 | |
\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 62 | #include "xprt_rdma.h" |
Chuck Lever | b6e717cb | 2018-05-07 15:27:05 -0400 | [diff] [blame] | 63 | #include <trace/events/rpcrdma.h> |
\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 64 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 65 | /* |
| 66 | * Globals/Macros |
| 67 | */ |
| 68 | |
Jeff Layton | f895b25 | 2014-11-17 16:58:04 -0500 | [diff] [blame] | 69 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 70 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 71 | #endif |
| 72 | |
| 73 | /* |
| 74 | * internal functions |
| 75 | */ |
Chuck Lever | efd81e9 | 2018-05-04 15:35:41 -0400 | [diff] [blame] | 76 | static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 77 | static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt); |
| 78 | static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf); |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 79 | static int rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt, bool temp); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 80 | static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 81 | |
Chuck Lever | d8f532d | 2017-10-16 15:01:30 -0400 | [diff] [blame] | 82 | struct workqueue_struct *rpcrdma_receive_wq __read_mostly; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 83 | |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 84 | int |
| 85 | rpcrdma_alloc_wq(void) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 86 | { |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 87 | struct workqueue_struct *recv_wq; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 88 | |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 89 | recv_wq = alloc_workqueue("xprtrdma_receive", |
Chuck Lever | ccede75 | 2017-12-04 14:04:04 -0500 | [diff] [blame] | 90 | WQ_MEM_RECLAIM | WQ_HIGHPRI, |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 91 | 0); |
| 92 | if (!recv_wq) |
| 93 | return -ENOMEM; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 94 | |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 95 | rpcrdma_receive_wq = recv_wq; |
| 96 | return 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 97 | } |
| 98 | |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 99 | void |
| 100 | rpcrdma_destroy_wq(void) |
Chuck Lever | f1a03b7 | 2014-11-08 20:14:37 -0500 | [diff] [blame] | 101 | { |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 102 | struct workqueue_struct *wq; |
Chuck Lever | f1a03b7 | 2014-11-08 20:14:37 -0500 | [diff] [blame] | 103 | |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 104 | if (rpcrdma_receive_wq) { |
| 105 | wq = rpcrdma_receive_wq; |
| 106 | rpcrdma_receive_wq = NULL; |
| 107 | destroy_workqueue(wq); |
| 108 | } |
Chuck Lever | f1a03b7 | 2014-11-08 20:14:37 -0500 | [diff] [blame] | 109 | } |
| 110 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 111 | static void |
| 112 | rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context) |
| 113 | { |
| 114 | struct rpcrdma_ep *ep = context; |
Chuck Lever | 643cf32 | 2017-12-20 16:31:45 -0500 | [diff] [blame] | 115 | struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt, |
| 116 | rx_ep); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 117 | |
Chuck Lever | 643cf32 | 2017-12-20 16:31:45 -0500 | [diff] [blame] | 118 | trace_xprtrdma_qp_error(r_xprt, event); |
Chuck Lever | 2f6922c | 2016-11-29 10:53:21 -0500 | [diff] [blame] | 119 | pr_err("rpcrdma: %s on device %s ep %p\n", |
| 120 | ib_event_msg(event->event), event->device->name, context); |
| 121 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 122 | if (ep->rep_connected == 1) { |
| 123 | ep->rep_connected = -EIO; |
Chuck Lever | afadc46 | 2015-01-21 11:03:11 -0500 | [diff] [blame] | 124 | rpcrdma_conn_func(ep); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 125 | wake_up_all(&ep->rep_connect_wait); |
| 126 | } |
| 127 | } |
| 128 | |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 129 | /** |
| 130 | * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC |
| 131 | * @cq: completion queue (ignored) |
| 132 | * @wc: completed WR |
| 133 | * |
Chuck Lever | 4220a07 | 2015-10-24 17:26:45 -0400 | [diff] [blame] | 134 | */ |
| 135 | static void |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 136 | rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 137 | { |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 138 | struct ib_cqe *cqe = wc->wr_cqe; |
| 139 | struct rpcrdma_sendctx *sc = |
| 140 | container_of(cqe, struct rpcrdma_sendctx, sc_cqe); |
| 141 | |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 142 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
Chuck Lever | ab03eff | 2017-12-20 16:30:40 -0500 | [diff] [blame] | 143 | trace_xprtrdma_wc_send(sc, wc); |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 144 | if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) |
| 145 | pr_err("rpcrdma: Send: %s (%u/0x%x)\n", |
| 146 | ib_wc_status_msg(wc->status), |
| 147 | wc->status, wc->vendor_err); |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 148 | |
| 149 | rpcrdma_sendctx_put_locked(sc); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 150 | } |
| 151 | |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 152 | /** |
Chuck Lever | 1519e96 | 2016-09-15 10:57:49 -0400 | [diff] [blame] | 153 | * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 154 | * @cq: completion queue (ignored) |
| 155 | * @wc: completed WR |
| 156 | * |
| 157 | */ |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 158 | static void |
Chuck Lever | 1519e96 | 2016-09-15 10:57:49 -0400 | [diff] [blame] | 159 | rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 160 | { |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 161 | struct ib_cqe *cqe = wc->wr_cqe; |
| 162 | struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep, |
| 163 | rr_cqe); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 164 | |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 165 | /* WARNING: Only wr_id and status are reliable at this point */ |
Chuck Lever | 0e0b854 | 2018-05-04 15:35:14 -0400 | [diff] [blame] | 166 | trace_xprtrdma_wc_receive(wc); |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 167 | if (wc->status != IB_WC_SUCCESS) |
| 168 | goto out_fail; |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 169 | |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 170 | /* status == SUCCESS means all fields in wc are trustworthy */ |
Chuck Lever | 96f8778 | 2017-08-03 14:30:03 -0400 | [diff] [blame] | 171 | rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len); |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 172 | rep->rr_wc_flags = wc->wc_flags; |
| 173 | rep->rr_inv_rkey = wc->ex.invalidate_rkey; |
| 174 | |
Chuck Lever | 91a10c5 | 2017-04-11 13:23:02 -0400 | [diff] [blame] | 175 | ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf), |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 176 | rdmab_addr(rep->rr_rdmabuf), |
Chuck Lever | e2a6719 | 2017-08-03 14:30:44 -0400 | [diff] [blame] | 177 | wc->byte_len, DMA_FROM_DEVICE); |
Chuck Lever | 23826c7 | 2016-03-04 11:28:27 -0500 | [diff] [blame] | 178 | |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 179 | out_schedule: |
Chuck Lever | d8f532d | 2017-10-16 15:01:30 -0400 | [diff] [blame] | 180 | rpcrdma_reply_handler(rep); |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 181 | return; |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 182 | |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 183 | out_fail: |
| 184 | if (wc->status != IB_WC_WR_FLUSH_ERR) |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 185 | pr_err("rpcrdma: Recv: %s (%u/0x%x)\n", |
| 186 | ib_wc_status_msg(wc->status), |
| 187 | wc->status, wc->vendor_err); |
Chuck Lever | e2a6719 | 2017-08-03 14:30:44 -0400 | [diff] [blame] | 188 | rpcrdma_set_xdrlen(&rep->rr_hdrbuf, 0); |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 189 | goto out_schedule; |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 190 | } |
| 191 | |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 192 | static void |
| 193 | rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt, |
| 194 | struct rdma_conn_param *param) |
| 195 | { |
| 196 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; |
| 197 | const struct rpcrdma_connect_private *pmsg = param->private_data; |
| 198 | unsigned int rsize, wsize; |
| 199 | |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 200 | /* Default settings for RPC-over-RDMA Version One */ |
Chuck Lever | b5f0afb | 2017-02-08 16:59:54 -0500 | [diff] [blame] | 201 | r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize; |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 202 | rsize = RPCRDMA_V1_DEF_INLINE_SIZE; |
| 203 | wsize = RPCRDMA_V1_DEF_INLINE_SIZE; |
| 204 | |
| 205 | if (pmsg && |
| 206 | pmsg->cp_magic == rpcrdma_cmp_magic && |
| 207 | pmsg->cp_version == RPCRDMA_CMP_VERSION) { |
Chuck Lever | c95a3c6 | 2017-02-08 17:00:02 -0500 | [diff] [blame] | 208 | r_xprt->rx_ia.ri_implicit_roundup = true; |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 209 | rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); |
| 210 | wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); |
| 211 | } |
| 212 | |
| 213 | if (rsize < cdata->inline_rsize) |
| 214 | cdata->inline_rsize = rsize; |
| 215 | if (wsize < cdata->inline_wsize) |
| 216 | cdata->inline_wsize = wsize; |
Chuck Lever | 6d6bf72 | 2016-11-29 10:53:13 -0500 | [diff] [blame] | 217 | dprintk("RPC: %s: max send %u, max recv %u\n", |
| 218 | __func__, cdata->inline_wsize, cdata->inline_rsize); |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 219 | rpcrdma_set_max_header_sizes(r_xprt); |
| 220 | } |
| 221 | |
Chuck Lever | ae38288 | 2018-10-01 14:25:47 -0400 | [diff] [blame] | 222 | /** |
| 223 | * rpcrdma_cm_event_handler - Handle RDMA CM events |
| 224 | * @id: rdma_cm_id on which an event has occurred |
| 225 | * @event: details of the event |
| 226 | * |
| 227 | * Called with @id's mutex held. Returns 1 if caller should |
| 228 | * destroy @id, otherwise 0. |
| 229 | */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 230 | static int |
Chuck Lever | ae38288 | 2018-10-01 14:25:47 -0400 | [diff] [blame] | 231 | rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 232 | { |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 233 | struct rpcrdma_xprt *r_xprt = id->context; |
| 234 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
| 235 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
| 236 | struct rpc_xprt *xprt = &r_xprt->rx_xprt; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 237 | |
Chuck Lever | ae38288 | 2018-10-01 14:25:47 -0400 | [diff] [blame] | 238 | might_sleep(); |
| 239 | |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 240 | trace_xprtrdma_cm_event(r_xprt, event); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 241 | switch (event->event) { |
| 242 | case RDMA_CM_EVENT_ADDR_RESOLVED: |
| 243 | case RDMA_CM_EVENT_ROUTE_RESOLVED: |
Tom Talpey | 5675add | 2008-10-09 15:01:41 -0400 | [diff] [blame] | 244 | ia->ri_async_rc = 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 245 | complete(&ia->ri_done); |
Chuck Lever | 316a616 | 2018-10-01 14:26:03 -0400 | [diff] [blame^] | 246 | return 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 247 | case RDMA_CM_EVENT_ADDR_ERROR: |
Chuck Lever | 52d28fe | 2018-05-04 15:34:37 -0400 | [diff] [blame] | 248 | ia->ri_async_rc = -EPROTO; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 249 | complete(&ia->ri_done); |
Chuck Lever | 316a616 | 2018-10-01 14:26:03 -0400 | [diff] [blame^] | 250 | return 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 251 | case RDMA_CM_EVENT_ROUTE_ERROR: |
| 252 | ia->ri_async_rc = -ENETUNREACH; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 253 | complete(&ia->ri_done); |
Chuck Lever | 316a616 | 2018-10-01 14:26:03 -0400 | [diff] [blame^] | 254 | return 0; |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 255 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
| 256 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
Chuck Lever | d461f1f | 2017-12-14 20:56:50 -0500 | [diff] [blame] | 257 | pr_info("rpcrdma: removing device %s for %s:%s\n", |
Chuck Lever | 173b8f4 | 2017-06-08 11:53:00 -0400 | [diff] [blame] | 258 | ia->ri_device->name, |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 259 | rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt)); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 260 | #endif |
| 261 | set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags); |
| 262 | ep->rep_connected = -ENODEV; |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 263 | xprt_force_disconnect(xprt); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 264 | wait_for_completion(&ia->ri_remove_done); |
| 265 | |
| 266 | ia->ri_id = NULL; |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 267 | ia->ri_device = NULL; |
| 268 | /* Return 1 to ensure the core destroys the id. */ |
| 269 | return 1; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 270 | case RDMA_CM_EVENT_ESTABLISHED: |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 271 | ++xprt->connect_cookie; |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 272 | ep->rep_connected = 1; |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 273 | rpcrdma_update_connect_private(r_xprt, &event->param.conn); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 274 | goto connected; |
| 275 | case RDMA_CM_EVENT_CONNECT_ERROR: |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 276 | ep->rep_connected = -ENOTCONN; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 277 | goto connected; |
| 278 | case RDMA_CM_EVENT_UNREACHABLE: |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 279 | ep->rep_connected = -ENETUNREACH; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 280 | goto connected; |
| 281 | case RDMA_CM_EVENT_REJECTED: |
Chuck Lever | d461f1f | 2017-12-14 20:56:50 -0500 | [diff] [blame] | 282 | dprintk("rpcrdma: connection to %s:%s rejected: %s\n", |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 283 | rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt), |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 284 | rdma_reject_msg(id, event->status)); |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 285 | ep->rep_connected = -ECONNREFUSED; |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 286 | if (event->status == IB_CM_REJ_STALE_CONN) |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 287 | ep->rep_connected = -EAGAIN; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 288 | goto connected; |
| 289 | case RDMA_CM_EVENT_DISCONNECTED: |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 290 | ++xprt->connect_cookie; |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 291 | ep->rep_connected = -ECONNABORTED; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 292 | connected: |
Chuck Lever | afadc46 | 2015-01-21 11:03:11 -0500 | [diff] [blame] | 293 | rpcrdma_conn_func(ep); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 294 | wake_up_all(&ep->rep_connect_wait); |
Chuck Lever | 316a616 | 2018-10-01 14:26:03 -0400 | [diff] [blame^] | 295 | break; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 296 | default: |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 297 | break; |
| 298 | } |
| 299 | |
Chuck Lever | 316a616 | 2018-10-01 14:26:03 -0400 | [diff] [blame^] | 300 | dprintk("RPC: %s: %s:%s on %s/%s: %s\n", __func__, |
| 301 | rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt), |
| 302 | ia->ri_device->name, ia->ri_ops->ro_displayname, |
| 303 | rdma_event_msg(event->event)); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 304 | return 0; |
| 305 | } |
| 306 | |
| 307 | static struct rdma_cm_id * |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 308 | rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 309 | { |
Chuck Lever | 109b88a | 2016-11-29 10:52:40 -0500 | [diff] [blame] | 310 | unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 311 | struct rdma_cm_id *id; |
| 312 | int rc; |
| 313 | |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 314 | trace_xprtrdma_conn_start(xprt); |
| 315 | |
Tom Talpey | 1a95405 | 2008-10-09 15:01:31 -0400 | [diff] [blame] | 316 | init_completion(&ia->ri_done); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 317 | init_completion(&ia->ri_remove_done); |
Tom Talpey | 1a95405 | 2008-10-09 15:01:31 -0400 | [diff] [blame] | 318 | |
Chuck Lever | ae38288 | 2018-10-01 14:25:47 -0400 | [diff] [blame] | 319 | id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler, |
Chuck Lever | 107c4be | 2018-05-04 15:34:42 -0400 | [diff] [blame] | 320 | xprt, RDMA_PS_TCP, IB_QPT_RC); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 321 | if (IS_ERR(id)) { |
| 322 | rc = PTR_ERR(id); |
| 323 | dprintk("RPC: %s: rdma_create_id() failed %i\n", |
| 324 | __func__, rc); |
| 325 | return id; |
| 326 | } |
| 327 | |
Tom Talpey | 5675add | 2008-10-09 15:01:41 -0400 | [diff] [blame] | 328 | ia->ri_async_rc = -ETIMEDOUT; |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 329 | rc = rdma_resolve_addr(id, NULL, |
| 330 | (struct sockaddr *)&xprt->rx_xprt.addr, |
| 331 | RDMA_RESOLVE_TIMEOUT); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 332 | if (rc) { |
| 333 | dprintk("RPC: %s: rdma_resolve_addr() failed %i\n", |
| 334 | __func__, rc); |
| 335 | goto out; |
| 336 | } |
Chuck Lever | 109b88a | 2016-11-29 10:52:40 -0500 | [diff] [blame] | 337 | rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); |
| 338 | if (rc < 0) { |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 339 | trace_xprtrdma_conn_tout(xprt); |
Chuck Lever | 109b88a | 2016-11-29 10:52:40 -0500 | [diff] [blame] | 340 | goto out; |
| 341 | } |
Devesh Sharma | d0f36c4 | 2015-08-03 13:05:04 -0400 | [diff] [blame] | 342 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 343 | rc = ia->ri_async_rc; |
| 344 | if (rc) |
| 345 | goto out; |
| 346 | |
Tom Talpey | 5675add | 2008-10-09 15:01:41 -0400 | [diff] [blame] | 347 | ia->ri_async_rc = -ETIMEDOUT; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 348 | rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT); |
| 349 | if (rc) { |
| 350 | dprintk("RPC: %s: rdma_resolve_route() failed %i\n", |
| 351 | __func__, rc); |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 352 | goto out; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 353 | } |
Chuck Lever | 109b88a | 2016-11-29 10:52:40 -0500 | [diff] [blame] | 354 | rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); |
| 355 | if (rc < 0) { |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 356 | trace_xprtrdma_conn_tout(xprt); |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 357 | goto out; |
Chuck Lever | 109b88a | 2016-11-29 10:52:40 -0500 | [diff] [blame] | 358 | } |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 359 | rc = ia->ri_async_rc; |
| 360 | if (rc) |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 361 | goto out; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 362 | |
| 363 | return id; |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 364 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 365 | out: |
| 366 | rdma_destroy_id(id); |
| 367 | return ERR_PTR(rc); |
| 368 | } |
| 369 | |
| 370 | /* |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 371 | * Exported functions. |
| 372 | */ |
| 373 | |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 374 | /** |
| 375 | * rpcrdma_ia_open - Open and initialize an Interface Adapter. |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 376 | * @xprt: transport with IA to (re)initialize |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 377 | * |
| 378 | * Returns 0 on success, negative errno if an appropriate |
| 379 | * Interface Adapter could not be found and opened. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 380 | */ |
| 381 | int |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 382 | rpcrdma_ia_open(struct rpcrdma_xprt *xprt) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 383 | { |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 384 | struct rpcrdma_ia *ia = &xprt->rx_ia; |
Chuck Lever | d1ed857 | 2015-08-03 13:03:30 -0400 | [diff] [blame] | 385 | int rc; |
| 386 | |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 387 | ia->ri_id = rpcrdma_create_id(xprt, ia); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 388 | if (IS_ERR(ia->ri_id)) { |
| 389 | rc = PTR_ERR(ia->ri_id); |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 390 | goto out_err; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 391 | } |
Chuck Lever | 89e0d112 | 2015-05-26 11:51:56 -0400 | [diff] [blame] | 392 | ia->ri_device = ia->ri_id->device; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 393 | |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 394 | ia->ri_pd = ib_alloc_pd(ia->ri_device, 0); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 395 | if (IS_ERR(ia->ri_pd)) { |
| 396 | rc = PTR_ERR(ia->ri_pd); |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 397 | pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc); |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 398 | goto out_err; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 399 | } |
| 400 | |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 401 | switch (xprt_rdma_memreg_strategy) { |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 402 | case RPCRDMA_FRWR: |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 403 | if (frwr_is_supported(ia)) { |
| 404 | ia->ri_ops = &rpcrdma_frwr_memreg_ops; |
| 405 | break; |
| 406 | } |
| 407 | /*FALLTHROUGH*/ |
Tom Talpey | bd7ed1d | 2008-10-09 15:00:09 -0400 | [diff] [blame] | 408 | case RPCRDMA_MTHCAFMR: |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 409 | if (fmr_is_supported(ia)) { |
| 410 | ia->ri_ops = &rpcrdma_fmr_memreg_ops; |
| 411 | break; |
| 412 | } |
| 413 | /*FALLTHROUGH*/ |
Tom Talpey | bd7ed1d | 2008-10-09 15:00:09 -0400 | [diff] [blame] | 414 | default: |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 415 | pr_err("rpcrdma: Device %s does not support memreg mode %d\n", |
| 416 | ia->ri_device->name, xprt_rdma_memreg_strategy); |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 417 | rc = -EINVAL; |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 418 | goto out_err; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 419 | } |
| 420 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 421 | return 0; |
Chuck Lever | 5ae711a | 2015-01-21 11:03:19 -0500 | [diff] [blame] | 422 | |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 423 | out_err: |
| 424 | rpcrdma_ia_close(ia); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 425 | return rc; |
| 426 | } |
| 427 | |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 428 | /** |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 429 | * rpcrdma_ia_remove - Handle device driver unload |
| 430 | * @ia: interface adapter being removed |
| 431 | * |
| 432 | * Divest transport H/W resources associated with this adapter, |
| 433 | * but allow it to be restored later. |
| 434 | */ |
| 435 | void |
| 436 | rpcrdma_ia_remove(struct rpcrdma_ia *ia) |
| 437 | { |
| 438 | struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, |
| 439 | rx_ia); |
| 440 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
| 441 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
| 442 | struct rpcrdma_req *req; |
| 443 | struct rpcrdma_rep *rep; |
| 444 | |
| 445 | cancel_delayed_work_sync(&buf->rb_refresh_worker); |
| 446 | |
| 447 | /* This is similar to rpcrdma_ep_destroy, but: |
| 448 | * - Don't cancel the connect worker. |
| 449 | * - Don't call rpcrdma_ep_disconnect, which waits |
| 450 | * for another conn upcall, which will deadlock. |
| 451 | * - rdma_disconnect is unneeded, the underlying |
| 452 | * connection is already gone. |
| 453 | */ |
| 454 | if (ia->ri_id->qp) { |
| 455 | ib_drain_qp(ia->ri_id->qp); |
| 456 | rdma_destroy_qp(ia->ri_id); |
| 457 | ia->ri_id->qp = NULL; |
| 458 | } |
| 459 | ib_free_cq(ep->rep_attr.recv_cq); |
Chuck Lever | 2552428 | 2018-03-19 14:23:16 -0400 | [diff] [blame] | 460 | ep->rep_attr.recv_cq = NULL; |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 461 | ib_free_cq(ep->rep_attr.send_cq); |
Chuck Lever | 2552428 | 2018-03-19 14:23:16 -0400 | [diff] [blame] | 462 | ep->rep_attr.send_cq = NULL; |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 463 | |
| 464 | /* The ULP is responsible for ensuring all DMA |
| 465 | * mappings and MRs are gone. |
| 466 | */ |
| 467 | list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list) |
| 468 | rpcrdma_dma_unmap_regbuf(rep->rr_rdmabuf); |
| 469 | list_for_each_entry(req, &buf->rb_allreqs, rl_all) { |
| 470 | rpcrdma_dma_unmap_regbuf(req->rl_rdmabuf); |
| 471 | rpcrdma_dma_unmap_regbuf(req->rl_sendbuf); |
| 472 | rpcrdma_dma_unmap_regbuf(req->rl_recvbuf); |
| 473 | } |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 474 | rpcrdma_mrs_destroy(buf); |
Chuck Lever | 2552428 | 2018-03-19 14:23:16 -0400 | [diff] [blame] | 475 | ib_dealloc_pd(ia->ri_pd); |
| 476 | ia->ri_pd = NULL; |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 477 | |
| 478 | /* Allow waiters to continue */ |
| 479 | complete(&ia->ri_remove_done); |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 480 | |
| 481 | trace_xprtrdma_remove(r_xprt); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 482 | } |
| 483 | |
| 484 | /** |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 485 | * rpcrdma_ia_close - Clean up/close an IA. |
| 486 | * @ia: interface adapter to close |
| 487 | * |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 488 | */ |
| 489 | void |
| 490 | rpcrdma_ia_close(struct rpcrdma_ia *ia) |
| 491 | { |
Tom Talpey | fee08ca | 2008-10-09 15:01:00 -0400 | [diff] [blame] | 492 | if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) { |
| 493 | if (ia->ri_id->qp) |
| 494 | rdma_destroy_qp(ia->ri_id); |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 495 | rdma_destroy_id(ia->ri_id); |
Tom Talpey | fee08ca | 2008-10-09 15:01:00 -0400 | [diff] [blame] | 496 | } |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 497 | ia->ri_id = NULL; |
| 498 | ia->ri_device = NULL; |
Chuck Lever | 6d44698 | 2015-05-26 11:51:27 -0400 | [diff] [blame] | 499 | |
| 500 | /* If the pd is still busy, xprtrdma missed freeing a resource */ |
| 501 | if (ia->ri_pd && !IS_ERR(ia->ri_pd)) |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 502 | ib_dealloc_pd(ia->ri_pd); |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 503 | ia->ri_pd = NULL; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 504 | } |
| 505 | |
| 506 | /* |
| 507 | * Create unconnected endpoint. |
| 508 | */ |
| 509 | int |
| 510 | rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, |
Chuck Lever | 16f906d | 2017-02-08 17:00:10 -0500 | [diff] [blame] | 511 | struct rpcrdma_create_data_internal *cdata) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 512 | { |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 513 | struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private; |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 514 | struct ib_cq *sendcq, *recvcq; |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 515 | unsigned int max_sge; |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 516 | int rc; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 517 | |
Steve Wise | 33023fb | 2018-06-18 08:05:26 -0700 | [diff] [blame] | 518 | max_sge = min_t(unsigned int, ia->ri_device->attrs.max_send_sge, |
Chuck Lever | eed5087 | 2017-03-11 15:52:47 -0500 | [diff] [blame] | 519 | RPCRDMA_MAX_SEND_SGES); |
Chuck Lever | 16f906d | 2017-02-08 17:00:10 -0500 | [diff] [blame] | 520 | if (max_sge < RPCRDMA_MIN_SEND_SGES) { |
| 521 | pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); |
Chuck Lever | b3221d6 | 2015-08-03 13:03:39 -0400 | [diff] [blame] | 522 | return -ENOMEM; |
| 523 | } |
Chuck Lever | 1179e2c | 2018-01-31 12:34:05 -0500 | [diff] [blame] | 524 | ia->ri_max_send_sges = max_sge; |
Chuck Lever | b3221d6 | 2015-08-03 13:03:39 -0400 | [diff] [blame] | 525 | |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 526 | rc = ia->ri_ops->ro_open(ia, ep, cdata); |
| 527 | if (rc) |
| 528 | return rc; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 529 | |
| 530 | ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; |
| 531 | ep->rep_attr.qp_context = ep; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 532 | ep->rep_attr.srq = NULL; |
Chuck Lever | 16f906d | 2017-02-08 17:00:10 -0500 | [diff] [blame] | 533 | ep->rep_attr.cap.max_send_sge = max_sge; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 534 | ep->rep_attr.cap.max_recv_sge = 1; |
| 535 | ep->rep_attr.cap.max_inline_data = 0; |
| 536 | ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
| 537 | ep->rep_attr.qp_type = IB_QPT_RC; |
| 538 | ep->rep_attr.port_num = ~0; |
| 539 | |
| 540 | dprintk("RPC: %s: requested max: dtos: send %d recv %d; " |
| 541 | "iovs: send %d recv %d\n", |
| 542 | __func__, |
| 543 | ep->rep_attr.cap.max_send_wr, |
| 544 | ep->rep_attr.cap.max_recv_wr, |
| 545 | ep->rep_attr.cap.max_send_sge, |
| 546 | ep->rep_attr.cap.max_recv_sge); |
| 547 | |
| 548 | /* set trigger for requesting send completion */ |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 549 | ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH, |
| 550 | cdata->max_requests >> 2); |
| 551 | ep->rep_send_count = ep->rep_send_batch; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 552 | init_waitqueue_head(&ep->rep_connect_wait); |
Chuck Lever | 254f91e | 2014-05-28 10:32:17 -0400 | [diff] [blame] | 553 | INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 554 | |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 555 | sendcq = ib_alloc_cq(ia->ri_device, NULL, |
| 556 | ep->rep_attr.cap.max_send_wr + 1, |
Chuck Lever | a4699f5 | 2017-10-30 16:21:49 -0400 | [diff] [blame] | 557 | 1, IB_POLL_WORKQUEUE); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 558 | if (IS_ERR(sendcq)) { |
| 559 | rc = PTR_ERR(sendcq); |
| 560 | dprintk("RPC: %s: failed to create send CQ: %i\n", |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 561 | __func__, rc); |
| 562 | goto out1; |
| 563 | } |
| 564 | |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 565 | recvcq = ib_alloc_cq(ia->ri_device, NULL, |
| 566 | ep->rep_attr.cap.max_recv_wr + 1, |
Chuck Lever | d8f532d | 2017-10-16 15:01:30 -0400 | [diff] [blame] | 567 | 0, IB_POLL_WORKQUEUE); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 568 | if (IS_ERR(recvcq)) { |
| 569 | rc = PTR_ERR(recvcq); |
| 570 | dprintk("RPC: %s: failed to create recv CQ: %i\n", |
| 571 | __func__, rc); |
| 572 | goto out2; |
| 573 | } |
| 574 | |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 575 | ep->rep_attr.send_cq = sendcq; |
| 576 | ep->rep_attr.recv_cq = recvcq; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 577 | |
| 578 | /* Initialize cma parameters */ |
Chuck Lever | b2dde94 | 2016-05-02 14:43:03 -0400 | [diff] [blame] | 579 | memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma)); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 580 | |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 581 | /* Prepare RDMA-CM private message */ |
| 582 | pmsg->cp_magic = rpcrdma_cmp_magic; |
| 583 | pmsg->cp_version = RPCRDMA_CMP_VERSION; |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 584 | pmsg->cp_flags |= ia->ri_ops->ro_send_w_inv_ok; |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 585 | pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize); |
| 586 | pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize); |
| 587 | ep->rep_remote_cma.private_data = pmsg; |
| 588 | ep->rep_remote_cma.private_data_len = sizeof(*pmsg); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 589 | |
| 590 | /* Client offers RDMA Read but does not initiate */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 591 | ep->rep_remote_cma.initiator_depth = 0; |
Chuck Lever | b7e85fff | 2018-02-28 15:30:33 -0500 | [diff] [blame] | 592 | ep->rep_remote_cma.responder_resources = |
| 593 | min_t(int, U8_MAX, ia->ri_device->attrs.max_qp_rd_atom); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 594 | |
Chuck Lever | b2dde94 | 2016-05-02 14:43:03 -0400 | [diff] [blame] | 595 | /* Limit transport retries so client can detect server |
| 596 | * GID changes quickly. RPC layer handles re-establishing |
| 597 | * transport connection and retransmission. |
| 598 | */ |
| 599 | ep->rep_remote_cma.retry_count = 6; |
| 600 | |
| 601 | /* RPC-over-RDMA handles its own flow control. In addition, |
| 602 | * make all RNR NAKs visible so we know that RPC-over-RDMA |
| 603 | * flow control is working correctly (no NAKs should be seen). |
| 604 | */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 605 | ep->rep_remote_cma.flow_control = 0; |
| 606 | ep->rep_remote_cma.rnr_retry_count = 0; |
| 607 | |
| 608 | return 0; |
| 609 | |
| 610 | out2: |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 611 | ib_free_cq(sendcq); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 612 | out1: |
| 613 | return rc; |
| 614 | } |
| 615 | |
| 616 | /* |
| 617 | * rpcrdma_ep_destroy |
| 618 | * |
| 619 | * Disconnect and destroy endpoint. After this, the only |
| 620 | * valid operations on the ep are to free it (if dynamically |
| 621 | * allocated) or re-create it. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 622 | */ |
Chuck Lever | 7f1d541 | 2014-05-28 10:33:16 -0400 | [diff] [blame] | 623 | void |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 624 | rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
| 625 | { |
Chuck Lever | 254f91e | 2014-05-28 10:32:17 -0400 | [diff] [blame] | 626 | cancel_delayed_work_sync(&ep->rep_connect_worker); |
| 627 | |
Chuck Lever | 2552428 | 2018-03-19 14:23:16 -0400 | [diff] [blame] | 628 | if (ia->ri_id && ia->ri_id->qp) { |
Chuck Lever | 550d750 | 2016-05-02 14:41:47 -0400 | [diff] [blame] | 629 | rpcrdma_ep_disconnect(ep, ia); |
Tom Talpey | fee08ca | 2008-10-09 15:01:00 -0400 | [diff] [blame] | 630 | rdma_destroy_qp(ia->ri_id); |
| 631 | ia->ri_id->qp = NULL; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 632 | } |
| 633 | |
Chuck Lever | 2552428 | 2018-03-19 14:23:16 -0400 | [diff] [blame] | 634 | if (ep->rep_attr.recv_cq) |
| 635 | ib_free_cq(ep->rep_attr.recv_cq); |
| 636 | if (ep->rep_attr.send_cq) |
| 637 | ib_free_cq(ep->rep_attr.send_cq); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 638 | } |
| 639 | |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 640 | /* Re-establish a connection after a device removal event. |
| 641 | * Unlike a normal reconnection, a fresh PD and a new set |
| 642 | * of MRs and buffers is needed. |
| 643 | */ |
| 644 | static int |
| 645 | rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt, |
| 646 | struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
| 647 | { |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 648 | int rc, err; |
| 649 | |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 650 | trace_xprtrdma_reinsert(r_xprt); |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 651 | |
| 652 | rc = -EHOSTUNREACH; |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 653 | if (rpcrdma_ia_open(r_xprt)) |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 654 | goto out1; |
| 655 | |
| 656 | rc = -ENOMEM; |
| 657 | err = rpcrdma_ep_create(ep, ia, &r_xprt->rx_data); |
| 658 | if (err) { |
| 659 | pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err); |
| 660 | goto out2; |
| 661 | } |
| 662 | |
| 663 | rc = -ENETUNREACH; |
| 664 | err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); |
| 665 | if (err) { |
| 666 | pr_err("rpcrdma: rdma_create_qp returned %d\n", err); |
| 667 | goto out3; |
| 668 | } |
| 669 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 670 | rpcrdma_mrs_create(r_xprt); |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 671 | return 0; |
| 672 | |
| 673 | out3: |
| 674 | rpcrdma_ep_destroy(ep, ia); |
| 675 | out2: |
| 676 | rpcrdma_ia_close(ia); |
| 677 | out1: |
| 678 | return rc; |
| 679 | } |
| 680 | |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 681 | static int |
| 682 | rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep, |
| 683 | struct rpcrdma_ia *ia) |
| 684 | { |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 685 | struct rdma_cm_id *id, *old; |
| 686 | int err, rc; |
| 687 | |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 688 | trace_xprtrdma_reconnect(r_xprt); |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 689 | |
| 690 | rpcrdma_ep_disconnect(ep, ia); |
| 691 | |
| 692 | rc = -EHOSTUNREACH; |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 693 | id = rpcrdma_create_id(r_xprt, ia); |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 694 | if (IS_ERR(id)) |
| 695 | goto out; |
| 696 | |
| 697 | /* As long as the new ID points to the same device as the |
| 698 | * old ID, we can reuse the transport's existing PD and all |
| 699 | * previously allocated MRs. Also, the same device means |
| 700 | * the transport's previous DMA mappings are still valid. |
| 701 | * |
| 702 | * This is a sanity check only. There should be no way these |
| 703 | * point to two different devices here. |
| 704 | */ |
| 705 | old = id; |
| 706 | rc = -ENETUNREACH; |
| 707 | if (ia->ri_device != id->device) { |
| 708 | pr_err("rpcrdma: can't reconnect on different device!\n"); |
| 709 | goto out_destroy; |
| 710 | } |
| 711 | |
| 712 | err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr); |
| 713 | if (err) { |
| 714 | dprintk("RPC: %s: rdma_create_qp returned %d\n", |
| 715 | __func__, err); |
| 716 | goto out_destroy; |
| 717 | } |
| 718 | |
| 719 | /* Atomically replace the transport's ID and QP. */ |
| 720 | rc = 0; |
| 721 | old = ia->ri_id; |
| 722 | ia->ri_id = id; |
| 723 | rdma_destroy_qp(old); |
| 724 | |
| 725 | out_destroy: |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 726 | rdma_destroy_id(old); |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 727 | out: |
| 728 | return rc; |
| 729 | } |
| 730 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 731 | /* |
| 732 | * Connect unconnected endpoint. |
| 733 | */ |
| 734 | int |
| 735 | rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
| 736 | { |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 737 | struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, |
| 738 | rx_ia); |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 739 | int rc; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 740 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 741 | retry: |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 742 | switch (ep->rep_connected) { |
| 743 | case 0: |
Chuck Lever | ec62f40 | 2014-05-28 10:34:07 -0400 | [diff] [blame] | 744 | dprintk("RPC: %s: connecting...\n", __func__); |
| 745 | rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); |
| 746 | if (rc) { |
| 747 | dprintk("RPC: %s: rdma_create_qp failed %i\n", |
| 748 | __func__, rc); |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 749 | rc = -ENETUNREACH; |
| 750 | goto out_noupdate; |
Chuck Lever | ec62f40 | 2014-05-28 10:34:07 -0400 | [diff] [blame] | 751 | } |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 752 | break; |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 753 | case -ENODEV: |
| 754 | rc = rpcrdma_ep_recreate_xprt(r_xprt, ep, ia); |
| 755 | if (rc) |
| 756 | goto out_noupdate; |
| 757 | break; |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 758 | default: |
| 759 | rc = rpcrdma_ep_reconnect(r_xprt, ep, ia); |
| 760 | if (rc) |
| 761 | goto out; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 762 | } |
| 763 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 764 | ep->rep_connected = 0; |
Chuck Lever | 8d4fb8f | 2018-07-28 10:46:47 -0400 | [diff] [blame] | 765 | rpcrdma_post_recvs(r_xprt, true); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 766 | |
| 767 | rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma); |
| 768 | if (rc) { |
| 769 | dprintk("RPC: %s: rdma_connect() failed with %i\n", |
| 770 | __func__, rc); |
| 771 | goto out; |
| 772 | } |
| 773 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 774 | wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 775 | if (ep->rep_connected <= 0) { |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 776 | if (ep->rep_connected == -EAGAIN) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 777 | goto retry; |
| 778 | rc = ep->rep_connected; |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 779 | goto out; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 780 | } |
| 781 | |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 782 | dprintk("RPC: %s: connected\n", __func__); |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 783 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 784 | out: |
| 785 | if (rc) |
| 786 | ep->rep_connected = rc; |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 787 | |
| 788 | out_noupdate: |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 789 | return rc; |
| 790 | } |
| 791 | |
| 792 | /* |
| 793 | * rpcrdma_ep_disconnect |
| 794 | * |
| 795 | * This is separate from destroy to facilitate the ability |
| 796 | * to reconnect without recreating the endpoint. |
| 797 | * |
| 798 | * This call is not reentrant, and must not be made in parallel |
| 799 | * on the same endpoint. |
| 800 | */ |
Chuck Lever | 282191c | 2014-07-29 17:25:55 -0400 | [diff] [blame] | 801 | void |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 802 | rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
| 803 | { |
| 804 | int rc; |
| 805 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 806 | rc = rdma_disconnect(ia->ri_id); |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 807 | if (!rc) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 808 | /* returns without wait if not connected */ |
| 809 | wait_event_interruptible(ep->rep_connect_wait, |
| 810 | ep->rep_connected != 1); |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 811 | else |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 812 | ep->rep_connected = rc; |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 813 | trace_xprtrdma_disconnect(container_of(ep, struct rpcrdma_xprt, |
| 814 | rx_ep), rc); |
Chuck Lever | 550d750 | 2016-05-02 14:41:47 -0400 | [diff] [blame] | 815 | |
| 816 | ib_drain_qp(ia->ri_id->qp); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 817 | } |
| 818 | |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 819 | /* Fixed-size circular FIFO queue. This implementation is wait-free and |
| 820 | * lock-free. |
| 821 | * |
| 822 | * Consumer is the code path that posts Sends. This path dequeues a |
| 823 | * sendctx for use by a Send operation. Multiple consumer threads |
| 824 | * are serialized by the RPC transport lock, which allows only one |
| 825 | * ->send_request call at a time. |
| 826 | * |
| 827 | * Producer is the code path that handles Send completions. This path |
| 828 | * enqueues a sendctx that has been completed. Multiple producer |
| 829 | * threads are serialized by the ib_poll_cq() function. |
| 830 | */ |
| 831 | |
| 832 | /* rpcrdma_sendctxs_destroy() assumes caller has already quiesced |
| 833 | * queue activity, and ib_drain_qp has flushed all remaining Send |
| 834 | * requests. |
| 835 | */ |
| 836 | static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf) |
| 837 | { |
| 838 | unsigned long i; |
| 839 | |
| 840 | for (i = 0; i <= buf->rb_sc_last; i++) |
| 841 | kfree(buf->rb_sc_ctxs[i]); |
| 842 | kfree(buf->rb_sc_ctxs); |
| 843 | } |
| 844 | |
| 845 | static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia) |
| 846 | { |
| 847 | struct rpcrdma_sendctx *sc; |
| 848 | |
| 849 | sc = kzalloc(sizeof(*sc) + |
| 850 | ia->ri_max_send_sges * sizeof(struct ib_sge), |
| 851 | GFP_KERNEL); |
| 852 | if (!sc) |
| 853 | return NULL; |
| 854 | |
| 855 | sc->sc_wr.wr_cqe = &sc->sc_cqe; |
| 856 | sc->sc_wr.sg_list = sc->sc_sges; |
| 857 | sc->sc_wr.opcode = IB_WR_SEND; |
| 858 | sc->sc_cqe.done = rpcrdma_wc_send; |
| 859 | return sc; |
| 860 | } |
| 861 | |
| 862 | static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt) |
| 863 | { |
| 864 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
| 865 | struct rpcrdma_sendctx *sc; |
| 866 | unsigned long i; |
| 867 | |
| 868 | /* Maximum number of concurrent outstanding Send WRs. Capping |
| 869 | * the circular queue size stops Send Queue overflow by causing |
| 870 | * the ->send_request call to fail temporarily before too many |
| 871 | * Sends are posted. |
| 872 | */ |
| 873 | i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; |
| 874 | dprintk("RPC: %s: allocating %lu send_ctxs\n", __func__, i); |
| 875 | buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL); |
| 876 | if (!buf->rb_sc_ctxs) |
| 877 | return -ENOMEM; |
| 878 | |
| 879 | buf->rb_sc_last = i - 1; |
| 880 | for (i = 0; i <= buf->rb_sc_last; i++) { |
| 881 | sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); |
| 882 | if (!sc) |
| 883 | goto out_destroy; |
| 884 | |
| 885 | sc->sc_xprt = r_xprt; |
| 886 | buf->rb_sc_ctxs[i] = sc; |
| 887 | } |
Chuck Lever | 2fad659 | 2018-05-04 15:35:57 -0400 | [diff] [blame] | 888 | buf->rb_flags = 0; |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 889 | |
| 890 | return 0; |
| 891 | |
| 892 | out_destroy: |
| 893 | rpcrdma_sendctxs_destroy(buf); |
| 894 | return -ENOMEM; |
| 895 | } |
| 896 | |
| 897 | /* The sendctx queue is not guaranteed to have a size that is a |
| 898 | * power of two, thus the helpers in circ_buf.h cannot be used. |
| 899 | * The other option is to use modulus (%), which can be expensive. |
| 900 | */ |
| 901 | static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf, |
| 902 | unsigned long item) |
| 903 | { |
| 904 | return likely(item < buf->rb_sc_last) ? item + 1 : 0; |
| 905 | } |
| 906 | |
| 907 | /** |
| 908 | * rpcrdma_sendctx_get_locked - Acquire a send context |
| 909 | * @buf: transport buffers from which to acquire an unused context |
| 910 | * |
| 911 | * Returns pointer to a free send completion context; or NULL if |
| 912 | * the queue is empty. |
| 913 | * |
| 914 | * Usage: Called to acquire an SGE array before preparing a Send WR. |
| 915 | * |
| 916 | * The caller serializes calls to this function (per rpcrdma_buffer), |
| 917 | * and provides an effective memory barrier that flushes the new value |
| 918 | * of rb_sc_head. |
| 919 | */ |
| 920 | struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf) |
| 921 | { |
| 922 | struct rpcrdma_xprt *r_xprt; |
| 923 | struct rpcrdma_sendctx *sc; |
| 924 | unsigned long next_head; |
| 925 | |
| 926 | next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head); |
| 927 | |
| 928 | if (next_head == READ_ONCE(buf->rb_sc_tail)) |
| 929 | goto out_emptyq; |
| 930 | |
| 931 | /* ORDER: item must be accessed _before_ head is updated */ |
| 932 | sc = buf->rb_sc_ctxs[next_head]; |
| 933 | |
| 934 | /* Releasing the lock in the caller acts as a memory |
| 935 | * barrier that flushes rb_sc_head. |
| 936 | */ |
| 937 | buf->rb_sc_head = next_head; |
| 938 | |
| 939 | return sc; |
| 940 | |
| 941 | out_emptyq: |
| 942 | /* The queue is "empty" if there have not been enough Send |
| 943 | * completions recently. This is a sign the Send Queue is |
| 944 | * backing up. Cause the caller to pause and try again. |
| 945 | */ |
Chuck Lever | 2fad659 | 2018-05-04 15:35:57 -0400 | [diff] [blame] | 946 | set_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags); |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 947 | r_xprt = container_of(buf, struct rpcrdma_xprt, rx_buf); |
| 948 | r_xprt->rx_stats.empty_sendctx_q++; |
| 949 | return NULL; |
| 950 | } |
| 951 | |
| 952 | /** |
| 953 | * rpcrdma_sendctx_put_locked - Release a send context |
| 954 | * @sc: send context to release |
| 955 | * |
| 956 | * Usage: Called from Send completion to return a sendctxt |
| 957 | * to the queue. |
| 958 | * |
| 959 | * The caller serializes calls to this function (per rpcrdma_buffer). |
| 960 | */ |
Chuck Lever | efd81e9 | 2018-05-04 15:35:41 -0400 | [diff] [blame] | 961 | static void |
| 962 | rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc) |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 963 | { |
| 964 | struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf; |
| 965 | unsigned long next_tail; |
| 966 | |
| 967 | /* Unmap SGEs of previously completed by unsignaled |
| 968 | * Sends by walking up the queue until @sc is found. |
| 969 | */ |
| 970 | next_tail = buf->rb_sc_tail; |
| 971 | do { |
| 972 | next_tail = rpcrdma_sendctx_next(buf, next_tail); |
| 973 | |
| 974 | /* ORDER: item must be accessed _before_ tail is updated */ |
| 975 | rpcrdma_unmap_sendctx(buf->rb_sc_ctxs[next_tail]); |
| 976 | |
| 977 | } while (buf->rb_sc_ctxs[next_tail] != sc); |
| 978 | |
| 979 | /* Paired with READ_ONCE */ |
| 980 | smp_store_release(&buf->rb_sc_tail, next_tail); |
Chuck Lever | 2fad659 | 2018-05-04 15:35:57 -0400 | [diff] [blame] | 981 | |
| 982 | if (test_and_clear_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags)) { |
| 983 | smp_mb__after_atomic(); |
| 984 | xprt_write_space(&sc->sc_xprt->rx_xprt); |
| 985 | } |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 986 | } |
| 987 | |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 988 | static void |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 989 | rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt) |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 990 | { |
| 991 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
| 992 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
| 993 | unsigned int count; |
| 994 | LIST_HEAD(free); |
| 995 | LIST_HEAD(all); |
| 996 | |
Chuck Lever | c421ece | 2018-10-01 14:25:20 -0400 | [diff] [blame] | 997 | for (count = 0; count < ia->ri_max_segs; count++) { |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 998 | struct rpcrdma_mr *mr; |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 999 | int rc; |
| 1000 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1001 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
| 1002 | if (!mr) |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1003 | break; |
| 1004 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1005 | rc = ia->ri_ops->ro_init_mr(ia, mr); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1006 | if (rc) { |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1007 | kfree(mr); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1008 | break; |
| 1009 | } |
| 1010 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1011 | mr->mr_xprt = r_xprt; |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1012 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1013 | list_add(&mr->mr_list, &free); |
| 1014 | list_add(&mr->mr_all, &all); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1015 | } |
| 1016 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1017 | spin_lock(&buf->rb_mrlock); |
| 1018 | list_splice(&free, &buf->rb_mrs); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1019 | list_splice(&all, &buf->rb_all); |
| 1020 | r_xprt->rx_stats.mrs_allocated += count; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1021 | spin_unlock(&buf->rb_mrlock); |
Chuck Lever | 1c443eff | 2017-12-20 16:31:21 -0500 | [diff] [blame] | 1022 | trace_xprtrdma_createmrs(r_xprt, count); |
Chuck Lever | 9e679d5 | 2018-02-28 15:30:44 -0500 | [diff] [blame] | 1023 | |
| 1024 | xprt_write_space(&r_xprt->rx_xprt); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1025 | } |
| 1026 | |
| 1027 | static void |
| 1028 | rpcrdma_mr_refresh_worker(struct work_struct *work) |
| 1029 | { |
| 1030 | struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, |
| 1031 | rb_refresh_worker.work); |
| 1032 | struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, |
| 1033 | rx_buf); |
| 1034 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1035 | rpcrdma_mrs_create(r_xprt); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1036 | } |
| 1037 | |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1038 | struct rpcrdma_req * |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1039 | rpcrdma_create_req(struct rpcrdma_xprt *r_xprt) |
| 1040 | { |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1041 | struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; |
Chuck Lever | 2dd4a01 | 2018-02-28 15:31:05 -0500 | [diff] [blame] | 1042 | struct rpcrdma_regbuf *rb; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1043 | struct rpcrdma_req *req; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1044 | |
Chuck Lever | 85275c8 | 2015-01-21 11:04:16 -0500 | [diff] [blame] | 1045 | req = kzalloc(sizeof(*req), GFP_KERNEL); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1046 | if (req == NULL) |
Chuck Lever | 85275c8 | 2015-01-21 11:04:16 -0500 | [diff] [blame] | 1047 | return ERR_PTR(-ENOMEM); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1048 | |
Chuck Lever | 2dd4a01 | 2018-02-28 15:31:05 -0500 | [diff] [blame] | 1049 | rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE, |
| 1050 | DMA_TO_DEVICE, GFP_KERNEL); |
| 1051 | if (IS_ERR(rb)) { |
| 1052 | kfree(req); |
| 1053 | return ERR_PTR(-ENOMEM); |
| 1054 | } |
| 1055 | req->rl_rdmabuf = rb; |
| 1056 | xdr_buf_init(&req->rl_hdrbuf, rb->rg_base, rdmab_length(rb)); |
| 1057 | req->rl_buffer = buffer; |
| 1058 | INIT_LIST_HEAD(&req->rl_registered); |
| 1059 | |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1060 | spin_lock(&buffer->rb_reqslock); |
| 1061 | list_add(&req->rl_all, &buffer->rb_allreqs); |
| 1062 | spin_unlock(&buffer->rb_reqslock); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1063 | return req; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1064 | } |
| 1065 | |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1066 | static int |
| 1067 | rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt, bool temp) |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1068 | { |
| 1069 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; |
Chuck Lever | d698c4a | 2017-12-14 20:56:09 -0500 | [diff] [blame] | 1070 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1071 | struct rpcrdma_rep *rep; |
| 1072 | int rc; |
| 1073 | |
| 1074 | rc = -ENOMEM; |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 1075 | rep = kzalloc(sizeof(*rep), GFP_KERNEL); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1076 | if (rep == NULL) |
| 1077 | goto out; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1078 | |
Chuck Lever | 13650c2 | 2016-09-15 10:56:26 -0400 | [diff] [blame] | 1079 | rep->rr_rdmabuf = rpcrdma_alloc_regbuf(cdata->inline_rsize, |
Chuck Lever | 99ef4db | 2016-09-15 10:56:10 -0400 | [diff] [blame] | 1080 | DMA_FROM_DEVICE, GFP_KERNEL); |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 1081 | if (IS_ERR(rep->rr_rdmabuf)) { |
| 1082 | rc = PTR_ERR(rep->rr_rdmabuf); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1083 | goto out_free; |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 1084 | } |
Chuck Lever | 96f8778 | 2017-08-03 14:30:03 -0400 | [diff] [blame] | 1085 | xdr_buf_init(&rep->rr_hdrbuf, rep->rr_rdmabuf->rg_base, |
| 1086 | rdmab_length(rep->rr_rdmabuf)); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1087 | |
Chuck Lever | 1519e96 | 2016-09-15 10:57:49 -0400 | [diff] [blame] | 1088 | rep->rr_cqe.done = rpcrdma_wc_receive; |
Chuck Lever | fed171b | 2015-05-26 11:51:37 -0400 | [diff] [blame] | 1089 | rep->rr_rxprt = r_xprt; |
Chuck Lever | d8f532d | 2017-10-16 15:01:30 -0400 | [diff] [blame] | 1090 | INIT_WORK(&rep->rr_work, rpcrdma_deferred_completion); |
Chuck Lever | 6ea8e71 | 2016-09-15 10:56:51 -0400 | [diff] [blame] | 1091 | rep->rr_recv_wr.next = NULL; |
| 1092 | rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; |
| 1093 | rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; |
| 1094 | rep->rr_recv_wr.num_sge = 1; |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1095 | rep->rr_temp = temp; |
Chuck Lever | d698c4a | 2017-12-14 20:56:09 -0500 | [diff] [blame] | 1096 | |
| 1097 | spin_lock(&buf->rb_lock); |
| 1098 | list_add(&rep->rr_list, &buf->rb_recv_bufs); |
| 1099 | spin_unlock(&buf->rb_lock); |
| 1100 | return 0; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1101 | |
| 1102 | out_free: |
| 1103 | kfree(rep); |
| 1104 | out: |
Chuck Lever | d698c4a | 2017-12-14 20:56:09 -0500 | [diff] [blame] | 1105 | dprintk("RPC: %s: reply buffer %d alloc failed\n", |
| 1106 | __func__, rc); |
| 1107 | return rc; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1108 | } |
| 1109 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1110 | int |
Chuck Lever | ac920d0 | 2015-01-21 11:03:44 -0500 | [diff] [blame] | 1111 | rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1112 | { |
Chuck Lever | ac920d0 | 2015-01-21 11:03:44 -0500 | [diff] [blame] | 1113 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1114 | int i, rc; |
| 1115 | |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1116 | buf->rb_max_requests = r_xprt->rx_data.max_requests; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1117 | buf->rb_bc_srv_max_requests = 0; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1118 | spin_lock_init(&buf->rb_mrlock); |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 1119 | spin_lock_init(&buf->rb_lock); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1120 | INIT_LIST_HEAD(&buf->rb_mrs); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1121 | INIT_LIST_HEAD(&buf->rb_all); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1122 | INIT_DELAYED_WORK(&buf->rb_refresh_worker, |
| 1123 | rpcrdma_mr_refresh_worker); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1124 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1125 | rpcrdma_mrs_create(r_xprt); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1126 | |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1127 | INIT_LIST_HEAD(&buf->rb_send_bufs); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1128 | INIT_LIST_HEAD(&buf->rb_allreqs); |
| 1129 | spin_lock_init(&buf->rb_reqslock); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1130 | for (i = 0; i < buf->rb_max_requests; i++) { |
| 1131 | struct rpcrdma_req *req; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1132 | |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1133 | req = rpcrdma_create_req(r_xprt); |
| 1134 | if (IS_ERR(req)) { |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1135 | dprintk("RPC: %s: request buffer %d alloc" |
| 1136 | " failed\n", __func__, i); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1137 | rc = PTR_ERR(req); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1138 | goto out; |
| 1139 | } |
Chuck Lever | a80d66c | 2017-06-08 11:52:12 -0400 | [diff] [blame] | 1140 | list_add(&req->rl_list, &buf->rb_send_bufs); |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1141 | } |
| 1142 | |
Chuck Lever | 8d4fb8f | 2018-07-28 10:46:47 -0400 | [diff] [blame] | 1143 | buf->rb_credits = 1; |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1144 | buf->rb_posted_receives = 0; |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1145 | INIT_LIST_HEAD(&buf->rb_recv_bufs); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1146 | |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 1147 | rc = rpcrdma_sendctxs_create(r_xprt); |
| 1148 | if (rc) |
| 1149 | goto out; |
| 1150 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1151 | return 0; |
| 1152 | out: |
| 1153 | rpcrdma_buffer_destroy(buf); |
| 1154 | return rc; |
| 1155 | } |
| 1156 | |
Chuck Lever | 2e84522 | 2014-07-29 17:25:38 -0400 | [diff] [blame] | 1157 | static void |
Chuck Lever | 13650c2 | 2016-09-15 10:56:26 -0400 | [diff] [blame] | 1158 | rpcrdma_destroy_rep(struct rpcrdma_rep *rep) |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1159 | { |
Chuck Lever | 13650c2 | 2016-09-15 10:56:26 -0400 | [diff] [blame] | 1160 | rpcrdma_free_regbuf(rep->rr_rdmabuf); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1161 | kfree(rep); |
| 1162 | } |
| 1163 | |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1164 | void |
Chuck Lever | 13650c2 | 2016-09-15 10:56:26 -0400 | [diff] [blame] | 1165 | rpcrdma_destroy_req(struct rpcrdma_req *req) |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1166 | { |
Chuck Lever | 13650c2 | 2016-09-15 10:56:26 -0400 | [diff] [blame] | 1167 | rpcrdma_free_regbuf(req->rl_recvbuf); |
| 1168 | rpcrdma_free_regbuf(req->rl_sendbuf); |
| 1169 | rpcrdma_free_regbuf(req->rl_rdmabuf); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1170 | kfree(req); |
| 1171 | } |
| 1172 | |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1173 | static void |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1174 | rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf) |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1175 | { |
| 1176 | struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, |
| 1177 | rx_buf); |
| 1178 | struct rpcrdma_ia *ia = rdmab_to_ia(buf); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1179 | struct rpcrdma_mr *mr; |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1180 | unsigned int count; |
| 1181 | |
| 1182 | count = 0; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1183 | spin_lock(&buf->rb_mrlock); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1184 | while (!list_empty(&buf->rb_all)) { |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1185 | mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all); |
| 1186 | list_del(&mr->mr_all); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1187 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1188 | spin_unlock(&buf->rb_mrlock); |
Chuck Lever | 054f155 | 2018-05-01 11:37:14 -0400 | [diff] [blame] | 1189 | |
| 1190 | /* Ensure MW is not on any rl_registered list */ |
| 1191 | if (!list_empty(&mr->mr_list)) |
| 1192 | list_del(&mr->mr_list); |
| 1193 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1194 | ia->ri_ops->ro_release_mr(mr); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1195 | count++; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1196 | spin_lock(&buf->rb_mrlock); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1197 | } |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1198 | spin_unlock(&buf->rb_mrlock); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1199 | r_xprt->rx_stats.mrs_allocated = 0; |
| 1200 | |
| 1201 | dprintk("RPC: %s: released %u MRs\n", __func__, count); |
| 1202 | } |
| 1203 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1204 | void |
| 1205 | rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) |
| 1206 | { |
Chuck Lever | 9378b27 | 2017-04-11 13:22:29 -0400 | [diff] [blame] | 1207 | cancel_delayed_work_sync(&buf->rb_refresh_worker); |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 1208 | |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 1209 | rpcrdma_sendctxs_destroy(buf); |
| 1210 | |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1211 | while (!list_empty(&buf->rb_recv_bufs)) { |
| 1212 | struct rpcrdma_rep *rep; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1213 | |
Chuck Lever | 9d95cd5 | 2018-05-04 15:35:36 -0400 | [diff] [blame] | 1214 | rep = list_first_entry(&buf->rb_recv_bufs, |
| 1215 | struct rpcrdma_rep, rr_list); |
| 1216 | list_del(&rep->rr_list); |
Chuck Lever | 13650c2 | 2016-09-15 10:56:26 -0400 | [diff] [blame] | 1217 | rpcrdma_destroy_rep(rep); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1218 | } |
| 1219 | |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1220 | spin_lock(&buf->rb_reqslock); |
| 1221 | while (!list_empty(&buf->rb_allreqs)) { |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1222 | struct rpcrdma_req *req; |
Allen Andrews | 4034ba0 | 2014-05-28 10:32:09 -0400 | [diff] [blame] | 1223 | |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1224 | req = list_first_entry(&buf->rb_allreqs, |
| 1225 | struct rpcrdma_req, rl_all); |
| 1226 | list_del(&req->rl_all); |
| 1227 | |
| 1228 | spin_unlock(&buf->rb_reqslock); |
Chuck Lever | 13650c2 | 2016-09-15 10:56:26 -0400 | [diff] [blame] | 1229 | rpcrdma_destroy_req(req); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1230 | spin_lock(&buf->rb_reqslock); |
Chuck Lever | 9f9d802 | 2014-07-29 17:24:45 -0400 | [diff] [blame] | 1231 | } |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1232 | spin_unlock(&buf->rb_reqslock); |
Chuck Lever | 9f9d802 | 2014-07-29 17:24:45 -0400 | [diff] [blame] | 1233 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1234 | rpcrdma_mrs_destroy(buf); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1235 | } |
| 1236 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1237 | /** |
| 1238 | * rpcrdma_mr_get - Allocate an rpcrdma_mr object |
| 1239 | * @r_xprt: controlling transport |
| 1240 | * |
| 1241 | * Returns an initialized rpcrdma_mr or NULL if no free |
| 1242 | * rpcrdma_mr objects are available. |
| 1243 | */ |
| 1244 | struct rpcrdma_mr * |
| 1245 | rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt) |
Chuck Lever | c2922c0 | 2014-07-29 17:24:36 -0400 | [diff] [blame] | 1246 | { |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1247 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1248 | struct rpcrdma_mr *mr = NULL; |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1249 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1250 | spin_lock(&buf->rb_mrlock); |
| 1251 | if (!list_empty(&buf->rb_mrs)) |
| 1252 | mr = rpcrdma_mr_pop(&buf->rb_mrs); |
| 1253 | spin_unlock(&buf->rb_mrlock); |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1254 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1255 | if (!mr) |
| 1256 | goto out_nomrs; |
| 1257 | return mr; |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1258 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1259 | out_nomrs: |
Chuck Lever | 1c443eff | 2017-12-20 16:31:21 -0500 | [diff] [blame] | 1260 | trace_xprtrdma_nomrs(r_xprt); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 1261 | if (r_xprt->rx_ep.rep_connected != -ENODEV) |
| 1262 | schedule_delayed_work(&buf->rb_refresh_worker, 0); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1263 | |
| 1264 | /* Allow the reply handler and refresh worker to run */ |
| 1265 | cond_resched(); |
| 1266 | |
| 1267 | return NULL; |
Chuck Lever | c2922c0 | 2014-07-29 17:24:36 -0400 | [diff] [blame] | 1268 | } |
| 1269 | |
Chuck Lever | ec12e47 | 2017-12-14 20:58:04 -0500 | [diff] [blame] | 1270 | static void |
| 1271 | __rpcrdma_mr_put(struct rpcrdma_buffer *buf, struct rpcrdma_mr *mr) |
| 1272 | { |
| 1273 | spin_lock(&buf->rb_mrlock); |
| 1274 | rpcrdma_mr_push(mr, &buf->rb_mrs); |
| 1275 | spin_unlock(&buf->rb_mrlock); |
| 1276 | } |
| 1277 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1278 | /** |
| 1279 | * rpcrdma_mr_put - Release an rpcrdma_mr object |
| 1280 | * @mr: object to release |
| 1281 | * |
| 1282 | */ |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1283 | void |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1284 | rpcrdma_mr_put(struct rpcrdma_mr *mr) |
Chuck Lever | c2922c0 | 2014-07-29 17:24:36 -0400 | [diff] [blame] | 1285 | { |
Chuck Lever | ec12e47 | 2017-12-14 20:58:04 -0500 | [diff] [blame] | 1286 | __rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr); |
| 1287 | } |
Chuck Lever | c2922c0 | 2014-07-29 17:24:36 -0400 | [diff] [blame] | 1288 | |
Chuck Lever | ec12e47 | 2017-12-14 20:58:04 -0500 | [diff] [blame] | 1289 | /** |
| 1290 | * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it |
| 1291 | * @mr: object to release |
| 1292 | * |
| 1293 | */ |
| 1294 | void |
| 1295 | rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr) |
| 1296 | { |
| 1297 | struct rpcrdma_xprt *r_xprt = mr->mr_xprt; |
| 1298 | |
Chuck Lever | d379eaa | 2018-10-01 14:25:30 -0400 | [diff] [blame] | 1299 | trace_xprtrdma_mr_unmap(mr); |
Chuck Lever | ec12e47 | 2017-12-14 20:58:04 -0500 | [diff] [blame] | 1300 | ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, |
| 1301 | mr->mr_sg, mr->mr_nents, mr->mr_dir); |
| 1302 | __rpcrdma_mr_put(&r_xprt->rx_buf, mr); |
Chuck Lever | c2922c0 | 2014-07-29 17:24:36 -0400 | [diff] [blame] | 1303 | } |
| 1304 | |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1305 | /** |
| 1306 | * rpcrdma_buffer_get - Get a request buffer |
| 1307 | * @buffers: Buffer pool from which to obtain a buffer |
Chuck Lever | 78d506e | 2016-09-06 11:22:49 -0400 | [diff] [blame] | 1308 | * |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1309 | * Returns a fresh rpcrdma_req, or NULL if none are available. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1310 | */ |
| 1311 | struct rpcrdma_req * |
| 1312 | rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) |
| 1313 | { |
| 1314 | struct rpcrdma_req *req; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1315 | |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1316 | spin_lock(&buffers->rb_lock); |
Chuck Lever | e68699c | 2018-05-04 15:35:31 -0400 | [diff] [blame] | 1317 | req = list_first_entry_or_null(&buffers->rb_send_bufs, |
| 1318 | struct rpcrdma_req, rl_list); |
| 1319 | if (req) |
| 1320 | list_del_init(&req->rl_list); |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1321 | spin_unlock(&buffers->rb_lock); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1322 | return req; |
| 1323 | } |
| 1324 | |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1325 | /** |
| 1326 | * rpcrdma_buffer_put - Put request/reply buffers back into pool |
| 1327 | * @req: object to return |
| 1328 | * |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1329 | */ |
| 1330 | void |
| 1331 | rpcrdma_buffer_put(struct rpcrdma_req *req) |
| 1332 | { |
| 1333 | struct rpcrdma_buffer *buffers = req->rl_buffer; |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1334 | struct rpcrdma_rep *rep = req->rl_reply; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1335 | |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1336 | req->rl_reply = NULL; |
| 1337 | |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1338 | spin_lock(&buffers->rb_lock); |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1339 | list_add(&req->rl_list, &buffers->rb_send_bufs); |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame] | 1340 | if (rep) { |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1341 | if (!rep->rr_temp) { |
| 1342 | list_add(&rep->rr_list, &buffers->rb_recv_bufs); |
| 1343 | rep = NULL; |
| 1344 | } |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame] | 1345 | } |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1346 | spin_unlock(&buffers->rb_lock); |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1347 | if (rep) |
| 1348 | rpcrdma_destroy_rep(rep); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1349 | } |
| 1350 | |
| 1351 | /* |
| 1352 | * Put reply buffers back into pool when not attached to |
Chuck Lever | b45ccfd | 2014-05-28 10:32:34 -0400 | [diff] [blame] | 1353 | * request. This happens in error conditions. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1354 | */ |
| 1355 | void |
| 1356 | rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) |
| 1357 | { |
Chuck Lever | fed171b | 2015-05-26 11:51:37 -0400 | [diff] [blame] | 1358 | struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1359 | |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1360 | if (!rep->rr_temp) { |
| 1361 | spin_lock(&buffers->rb_lock); |
| 1362 | list_add(&rep->rr_list, &buffers->rb_recv_bufs); |
| 1363 | spin_unlock(&buffers->rb_lock); |
| 1364 | } else { |
| 1365 | rpcrdma_destroy_rep(rep); |
| 1366 | } |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1367 | } |
| 1368 | |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1369 | /** |
Chuck Lever | 99ef4db | 2016-09-15 10:56:10 -0400 | [diff] [blame] | 1370 | * rpcrdma_alloc_regbuf - allocate and DMA-map memory for SEND/RECV buffers |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1371 | * @size: size of buffer to be allocated, in bytes |
Chuck Lever | 99ef4db | 2016-09-15 10:56:10 -0400 | [diff] [blame] | 1372 | * @direction: direction of data movement |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1373 | * @flags: GFP flags |
| 1374 | * |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1375 | * Returns an ERR_PTR, or a pointer to a regbuf, a buffer that |
| 1376 | * can be persistently DMA-mapped for I/O. |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1377 | * |
| 1378 | * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for |
Chuck Lever | 99ef4db | 2016-09-15 10:56:10 -0400 | [diff] [blame] | 1379 | * receiving the payload of RDMA RECV operations. During Long Calls |
| 1380 | * or Replies they may be registered externally via ro_map. |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1381 | */ |
| 1382 | struct rpcrdma_regbuf * |
Chuck Lever | 13650c2 | 2016-09-15 10:56:26 -0400 | [diff] [blame] | 1383 | rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction, |
| 1384 | gfp_t flags) |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1385 | { |
| 1386 | struct rpcrdma_regbuf *rb; |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1387 | |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1388 | rb = kmalloc(sizeof(*rb) + size, flags); |
| 1389 | if (rb == NULL) |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1390 | return ERR_PTR(-ENOMEM); |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1391 | |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1392 | rb->rg_device = NULL; |
Chuck Lever | 99ef4db | 2016-09-15 10:56:10 -0400 | [diff] [blame] | 1393 | rb->rg_direction = direction; |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1394 | rb->rg_iov.length = size; |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1395 | |
| 1396 | return rb; |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1397 | } |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1398 | |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1399 | /** |
| 1400 | * __rpcrdma_map_regbuf - DMA-map a regbuf |
| 1401 | * @ia: controlling rpcrdma_ia |
| 1402 | * @rb: regbuf to be mapped |
| 1403 | */ |
| 1404 | bool |
| 1405 | __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) |
| 1406 | { |
Chuck Lever | 91a10c5 | 2017-04-11 13:23:02 -0400 | [diff] [blame] | 1407 | struct ib_device *device = ia->ri_device; |
| 1408 | |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1409 | if (rb->rg_direction == DMA_NONE) |
| 1410 | return false; |
| 1411 | |
Chuck Lever | 91a10c5 | 2017-04-11 13:23:02 -0400 | [diff] [blame] | 1412 | rb->rg_iov.addr = ib_dma_map_single(device, |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1413 | (void *)rb->rg_base, |
| 1414 | rdmab_length(rb), |
| 1415 | rb->rg_direction); |
Chuck Lever | 91a10c5 | 2017-04-11 13:23:02 -0400 | [diff] [blame] | 1416 | if (ib_dma_mapping_error(device, rdmab_addr(rb))) |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1417 | return false; |
| 1418 | |
Chuck Lever | 91a10c5 | 2017-04-11 13:23:02 -0400 | [diff] [blame] | 1419 | rb->rg_device = device; |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1420 | rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey; |
| 1421 | return true; |
| 1422 | } |
| 1423 | |
| 1424 | static void |
| 1425 | rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb) |
| 1426 | { |
Chuck Lever | e89e8d8f | 2018-01-31 12:34:13 -0500 | [diff] [blame] | 1427 | if (!rb) |
| 1428 | return; |
| 1429 | |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1430 | if (!rpcrdma_regbuf_is_mapped(rb)) |
| 1431 | return; |
| 1432 | |
| 1433 | ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), |
| 1434 | rdmab_length(rb), rb->rg_direction); |
| 1435 | rb->rg_device = NULL; |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1436 | } |
| 1437 | |
| 1438 | /** |
| 1439 | * rpcrdma_free_regbuf - deregister and free registered buffer |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1440 | * @rb: regbuf to be deregistered and freed |
| 1441 | */ |
| 1442 | void |
Chuck Lever | 13650c2 | 2016-09-15 10:56:26 -0400 | [diff] [blame] | 1443 | rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb) |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1444 | { |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1445 | rpcrdma_dma_unmap_regbuf(rb); |
Chuck Lever | e531dca | 2015-08-03 13:03:20 -0400 | [diff] [blame] | 1446 | kfree(rb); |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1447 | } |
| 1448 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1449 | /* |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1450 | * Prepost any receive buffer, then post send. |
| 1451 | * |
| 1452 | * Receive buffer is donated to hardware, reclaimed upon recv completion. |
| 1453 | */ |
| 1454 | int |
| 1455 | rpcrdma_ep_post(struct rpcrdma_ia *ia, |
| 1456 | struct rpcrdma_ep *ep, |
| 1457 | struct rpcrdma_req *req) |
| 1458 | { |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 1459 | struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr; |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 1460 | int rc; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1461 | |
Chuck Lever | 01bb35c | 2017-10-20 10:48:36 -0400 | [diff] [blame] | 1462 | if (!ep->rep_send_count || |
| 1463 | test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) { |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 1464 | send_wr->send_flags |= IB_SEND_SIGNALED; |
| 1465 | ep->rep_send_count = ep->rep_send_batch; |
| 1466 | } else { |
| 1467 | send_wr->send_flags &= ~IB_SEND_SIGNALED; |
| 1468 | --ep->rep_send_count; |
| 1469 | } |
Chuck Lever | 7a89f9c | 2016-06-29 13:53:43 -0400 | [diff] [blame] | 1470 | |
Chuck Lever | f287762 | 2018-02-28 15:30:59 -0500 | [diff] [blame] | 1471 | rc = ia->ri_ops->ro_send(ia, req); |
Chuck Lever | ab03eff | 2017-12-20 16:30:40 -0500 | [diff] [blame] | 1472 | trace_xprtrdma_post_send(req, rc); |
| 1473 | if (rc) |
| 1474 | return -ENOTCONN; |
| 1475 | return 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1476 | } |
| 1477 | |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1478 | /** |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1479 | * rpcrdma_post_recvs - Maybe post some Receive buffers |
| 1480 | * @r_xprt: controlling transport |
| 1481 | * @temp: when true, allocate temp rpcrdma_rep objects |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1482 | * |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1483 | */ |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1484 | void |
| 1485 | rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1486 | { |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1487 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
| 1488 | struct ib_recv_wr *wr, *bad_wr; |
| 1489 | int needed, count, rc; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1490 | |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1491 | needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1); |
| 1492 | if (buf->rb_posted_receives > needed) |
| 1493 | return; |
| 1494 | needed -= buf->rb_posted_receives; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1495 | |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1496 | count = 0; |
| 1497 | wr = NULL; |
| 1498 | while (needed) { |
| 1499 | struct rpcrdma_regbuf *rb; |
| 1500 | struct rpcrdma_rep *rep; |
| 1501 | |
| 1502 | spin_lock(&buf->rb_lock); |
| 1503 | rep = list_first_entry_or_null(&buf->rb_recv_bufs, |
| 1504 | struct rpcrdma_rep, rr_list); |
| 1505 | if (likely(rep)) |
| 1506 | list_del(&rep->rr_list); |
| 1507 | spin_unlock(&buf->rb_lock); |
| 1508 | if (!rep) { |
| 1509 | if (rpcrdma_create_rep(r_xprt, temp)) |
| 1510 | break; |
| 1511 | continue; |
| 1512 | } |
| 1513 | |
| 1514 | rb = rep->rr_rdmabuf; |
| 1515 | if (!rpcrdma_regbuf_is_mapped(rb)) { |
| 1516 | if (!__rpcrdma_dma_map_regbuf(&r_xprt->rx_ia, rb)) { |
| 1517 | rpcrdma_recv_buffer_put(rep); |
| 1518 | break; |
| 1519 | } |
| 1520 | } |
| 1521 | |
| 1522 | trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe); |
| 1523 | rep->rr_recv_wr.next = wr; |
| 1524 | wr = &rep->rr_recv_wr; |
| 1525 | ++count; |
| 1526 | --needed; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1527 | } |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1528 | if (!count) |
| 1529 | return; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1530 | |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 1531 | rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr, |
| 1532 | (const struct ib_recv_wr **)&bad_wr); |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1533 | if (rc) { |
| 1534 | for (wr = bad_wr; wr; wr = wr->next) { |
| 1535 | struct rpcrdma_rep *rep; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1536 | |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1537 | rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr); |
| 1538 | rpcrdma_recv_buffer_put(rep); |
| 1539 | --count; |
| 1540 | } |
| 1541 | } |
| 1542 | buf->rb_posted_receives += count; |
| 1543 | trace_xprtrdma_post_recvs(r_xprt, count, rc); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1544 | } |