Chuck Lever | a2268cf | 2018-05-04 15:34:32 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause |
\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 2 | /* |
Chuck Lever | 62b56a6 | 2017-10-30 16:22:14 -0400 | [diff] [blame] | 3 | * Copyright (c) 2014-2017 Oracle. All rights reserved. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 4 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
| 5 | * |
| 6 | * This software is available to you under a choice of one of two |
| 7 | * licenses. You may choose to be licensed under the terms of the GNU |
| 8 | * General Public License (GPL) Version 2, available from the file |
| 9 | * COPYING in the main directory of this source tree, or the BSD-type |
| 10 | * license below: |
| 11 | * |
| 12 | * Redistribution and use in source and binary forms, with or without |
| 13 | * modification, are permitted provided that the following conditions |
| 14 | * are met: |
| 15 | * |
| 16 | * Redistributions of source code must retain the above copyright |
| 17 | * notice, this list of conditions and the following disclaimer. |
| 18 | * |
| 19 | * Redistributions in binary form must reproduce the above |
| 20 | * copyright notice, this list of conditions and the following |
| 21 | * disclaimer in the documentation and/or other materials provided |
| 22 | * with the distribution. |
| 23 | * |
| 24 | * Neither the name of the Network Appliance, Inc. nor the names of |
| 25 | * its contributors may be used to endorse or promote products |
| 26 | * derived from this software without specific prior written |
| 27 | * permission. |
| 28 | * |
| 29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 30 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 31 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 32 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 33 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 34 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 35 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 36 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 37 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 38 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 39 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 40 | */ |
| 41 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 42 | /* |
| 43 | * verbs.c |
| 44 | * |
| 45 | * Encapsulates the major functions managing: |
| 46 | * o adapters |
| 47 | * o endpoints |
| 48 | * o connections |
| 49 | * o buffer memory |
| 50 | */ |
| 51 | |
Alexey Dobriyan | a6b7a40 | 2011-06-06 10:43:46 +0000 | [diff] [blame] | 52 | #include <linux/interrupt.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 53 | #include <linux/slab.h> |
Chuck Lever | 0dd39ca | 2015-03-30 14:33:43 -0400 | [diff] [blame] | 54 | #include <linux/sunrpc/addr.h> |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame] | 55 | #include <linux/sunrpc/svc_rdma.h> |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 56 | |
| 57 | #include <asm-generic/barrier.h> |
Chuck Lever | 65866f8 | 2014-05-28 10:33:59 -0400 | [diff] [blame] | 58 | #include <asm/bitops.h> |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 59 | |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 60 | #include <rdma/ib_cm.h> |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 61 | |
\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 62 | #include "xprt_rdma.h" |
Chuck Lever | b6e717cb | 2018-05-07 15:27:05 -0400 | [diff] [blame] | 63 | #include <trace/events/rpcrdma.h> |
\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 64 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 65 | /* |
| 66 | * Globals/Macros |
| 67 | */ |
| 68 | |
Jeff Layton | f895b25 | 2014-11-17 16:58:04 -0500 | [diff] [blame] | 69 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 70 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 71 | #endif |
| 72 | |
| 73 | /* |
| 74 | * internal functions |
| 75 | */ |
Chuck Lever | efd81e9 | 2018-05-04 15:35:41 -0400 | [diff] [blame] | 76 | static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 77 | static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt); |
| 78 | static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf); |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 79 | static struct rpcrdma_regbuf * |
| 80 | rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction, |
| 81 | gfp_t flags); |
| 82 | static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb); |
| 83 | static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb); |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 84 | static void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 85 | |
Chuck Lever | b8fe677 | 2019-04-24 09:40:36 -0400 | [diff] [blame] | 86 | /* Wait for outstanding transport work to finish. ib_drain_qp |
| 87 | * handles the drains in the wrong order for us, so open code |
| 88 | * them here. |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 89 | */ |
| 90 | static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 91 | { |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 92 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 93 | |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 94 | /* Flush Receives, then wait for deferred Reply work |
| 95 | * to complete. |
| 96 | */ |
Chuck Lever | e1ede31 | 2019-04-09 17:04:09 -0400 | [diff] [blame] | 97 | ib_drain_rq(ia->ri_id->qp); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 98 | |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 99 | /* Deferred Reply processing might have scheduled |
| 100 | * local invalidations. |
| 101 | */ |
| 102 | ib_drain_sq(ia->ri_id->qp); |
Chuck Lever | f1a03b7 | 2014-11-08 20:14:37 -0500 | [diff] [blame] | 103 | } |
| 104 | |
Chuck Lever | 31e62d2 | 2018-10-01 14:26:08 -0400 | [diff] [blame] | 105 | /** |
Chuck Lever | f9521d5 | 2018-10-01 14:26:13 -0400 | [diff] [blame] | 106 | * rpcrdma_qp_event_handler - Handle one QP event (error notification) |
| 107 | * @event: details of the event |
| 108 | * @context: ep that owns QP where event occurred |
| 109 | * |
| 110 | * Called from the RDMA provider (device driver) possibly in an interrupt |
| 111 | * context. |
| 112 | */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 113 | static void |
Chuck Lever | f9521d5 | 2018-10-01 14:26:13 -0400 | [diff] [blame] | 114 | rpcrdma_qp_event_handler(struct ib_event *event, void *context) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 115 | { |
| 116 | struct rpcrdma_ep *ep = context; |
Chuck Lever | 643cf32 | 2017-12-20 16:31:45 -0500 | [diff] [blame] | 117 | struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt, |
| 118 | rx_ep); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 119 | |
Chuck Lever | f9521d5 | 2018-10-01 14:26:13 -0400 | [diff] [blame] | 120 | trace_xprtrdma_qp_event(r_xprt, event); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 121 | } |
| 122 | |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 123 | /** |
| 124 | * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC |
| 125 | * @cq: completion queue (ignored) |
| 126 | * @wc: completed WR |
| 127 | * |
Chuck Lever | 4220a07 | 2015-10-24 17:26:45 -0400 | [diff] [blame] | 128 | */ |
| 129 | static void |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 130 | rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 131 | { |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 132 | struct ib_cqe *cqe = wc->wr_cqe; |
| 133 | struct rpcrdma_sendctx *sc = |
| 134 | container_of(cqe, struct rpcrdma_sendctx, sc_cqe); |
| 135 | |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 136 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
Chuck Lever | ab03eff | 2017-12-20 16:30:40 -0500 | [diff] [blame] | 137 | trace_xprtrdma_wc_send(sc, wc); |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 138 | rpcrdma_sendctx_put_locked(sc); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 139 | } |
| 140 | |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 141 | /** |
Chuck Lever | 1519e96 | 2016-09-15 10:57:49 -0400 | [diff] [blame] | 142 | * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 143 | * @cq: completion queue (ignored) |
| 144 | * @wc: completed WR |
| 145 | * |
| 146 | */ |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 147 | static void |
Chuck Lever | 1519e96 | 2016-09-15 10:57:49 -0400 | [diff] [blame] | 148 | rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 149 | { |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 150 | struct ib_cqe *cqe = wc->wr_cqe; |
| 151 | struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep, |
| 152 | rr_cqe); |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 153 | struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 154 | |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 155 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
Chuck Lever | 0e0b854 | 2018-05-04 15:35:14 -0400 | [diff] [blame] | 156 | trace_xprtrdma_wc_receive(wc); |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 157 | --r_xprt->rx_ep.rep_receive_count; |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 158 | if (wc->status != IB_WC_SUCCESS) |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 159 | goto out_flushed; |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 160 | |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 161 | /* status == SUCCESS means all fields in wc are trustworthy */ |
Chuck Lever | 96f8778 | 2017-08-03 14:30:03 -0400 | [diff] [blame] | 162 | rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len); |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 163 | rep->rr_wc_flags = wc->wc_flags; |
| 164 | rep->rr_inv_rkey = wc->ex.invalidate_rkey; |
| 165 | |
Chuck Lever | 91a10c5 | 2017-04-11 13:23:02 -0400 | [diff] [blame] | 166 | ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf), |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 167 | rdmab_addr(rep->rr_rdmabuf), |
Chuck Lever | e2a6719 | 2017-08-03 14:30:44 -0400 | [diff] [blame] | 168 | wc->byte_len, DMA_FROM_DEVICE); |
Chuck Lever | 23826c7 | 2016-03-04 11:28:27 -0500 | [diff] [blame] | 169 | |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 170 | rpcrdma_post_recvs(r_xprt, false); |
Chuck Lever | d8f532d | 2017-10-16 15:01:30 -0400 | [diff] [blame] | 171 | rpcrdma_reply_handler(rep); |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 172 | return; |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 173 | |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 174 | out_flushed: |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 175 | rpcrdma_recv_buffer_put(rep); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 176 | } |
| 177 | |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 178 | static void |
| 179 | rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt, |
| 180 | struct rdma_conn_param *param) |
| 181 | { |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 182 | const struct rpcrdma_connect_private *pmsg = param->private_data; |
| 183 | unsigned int rsize, wsize; |
| 184 | |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 185 | /* Default settings for RPC-over-RDMA Version One */ |
Chuck Lever | b5f0afb | 2017-02-08 16:59:54 -0500 | [diff] [blame] | 186 | r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize; |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 187 | rsize = RPCRDMA_V1_DEF_INLINE_SIZE; |
| 188 | wsize = RPCRDMA_V1_DEF_INLINE_SIZE; |
| 189 | |
| 190 | if (pmsg && |
| 191 | pmsg->cp_magic == rpcrdma_cmp_magic && |
| 192 | pmsg->cp_version == RPCRDMA_CMP_VERSION) { |
Chuck Lever | c95a3c6 | 2017-02-08 17:00:02 -0500 | [diff] [blame] | 193 | r_xprt->rx_ia.ri_implicit_roundup = true; |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 194 | rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); |
| 195 | wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); |
| 196 | } |
| 197 | |
Chuck Lever | 94087e9 | 2019-04-24 09:40:20 -0400 | [diff] [blame] | 198 | if (rsize < r_xprt->rx_ep.rep_inline_recv) |
| 199 | r_xprt->rx_ep.rep_inline_recv = rsize; |
| 200 | if (wsize < r_xprt->rx_ep.rep_inline_send) |
| 201 | r_xprt->rx_ep.rep_inline_send = wsize; |
| 202 | dprintk("RPC: %s: max send %u, max recv %u\n", __func__, |
| 203 | r_xprt->rx_ep.rep_inline_send, |
| 204 | r_xprt->rx_ep.rep_inline_recv); |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 205 | rpcrdma_set_max_header_sizes(r_xprt); |
| 206 | } |
| 207 | |
Chuck Lever | ae38288 | 2018-10-01 14:25:47 -0400 | [diff] [blame] | 208 | /** |
| 209 | * rpcrdma_cm_event_handler - Handle RDMA CM events |
| 210 | * @id: rdma_cm_id on which an event has occurred |
| 211 | * @event: details of the event |
| 212 | * |
| 213 | * Called with @id's mutex held. Returns 1 if caller should |
| 214 | * destroy @id, otherwise 0. |
| 215 | */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 216 | static int |
Chuck Lever | ae38288 | 2018-10-01 14:25:47 -0400 | [diff] [blame] | 217 | rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 218 | { |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 219 | struct rpcrdma_xprt *r_xprt = id->context; |
| 220 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
| 221 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
| 222 | struct rpc_xprt *xprt = &r_xprt->rx_xprt; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 223 | |
Chuck Lever | ae38288 | 2018-10-01 14:25:47 -0400 | [diff] [blame] | 224 | might_sleep(); |
| 225 | |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 226 | trace_xprtrdma_cm_event(r_xprt, event); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 227 | switch (event->event) { |
| 228 | case RDMA_CM_EVENT_ADDR_RESOLVED: |
| 229 | case RDMA_CM_EVENT_ROUTE_RESOLVED: |
Tom Talpey | 5675add | 2008-10-09 15:01:41 -0400 | [diff] [blame] | 230 | ia->ri_async_rc = 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 231 | complete(&ia->ri_done); |
Chuck Lever | 316a616 | 2018-10-01 14:26:03 -0400 | [diff] [blame] | 232 | return 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 233 | case RDMA_CM_EVENT_ADDR_ERROR: |
Chuck Lever | 52d28fe | 2018-05-04 15:34:37 -0400 | [diff] [blame] | 234 | ia->ri_async_rc = -EPROTO; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 235 | complete(&ia->ri_done); |
Chuck Lever | 316a616 | 2018-10-01 14:26:03 -0400 | [diff] [blame] | 236 | return 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 237 | case RDMA_CM_EVENT_ROUTE_ERROR: |
| 238 | ia->ri_async_rc = -ENETUNREACH; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 239 | complete(&ia->ri_done); |
Chuck Lever | 316a616 | 2018-10-01 14:26:03 -0400 | [diff] [blame] | 240 | return 0; |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 241 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
| 242 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
Chuck Lever | d461f1f | 2017-12-14 20:56:50 -0500 | [diff] [blame] | 243 | pr_info("rpcrdma: removing device %s for %s:%s\n", |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 244 | ia->ri_id->device->name, |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 245 | rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt)); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 246 | #endif |
| 247 | set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags); |
| 248 | ep->rep_connected = -ENODEV; |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 249 | xprt_force_disconnect(xprt); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 250 | wait_for_completion(&ia->ri_remove_done); |
| 251 | |
| 252 | ia->ri_id = NULL; |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 253 | /* Return 1 to ensure the core destroys the id. */ |
| 254 | return 1; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 255 | case RDMA_CM_EVENT_ESTABLISHED: |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 256 | ++xprt->connect_cookie; |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 257 | ep->rep_connected = 1; |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 258 | rpcrdma_update_connect_private(r_xprt, &event->param.conn); |
Chuck Lever | 31e62d2 | 2018-10-01 14:26:08 -0400 | [diff] [blame] | 259 | wake_up_all(&ep->rep_connect_wait); |
| 260 | break; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 261 | case RDMA_CM_EVENT_CONNECT_ERROR: |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 262 | ep->rep_connected = -ENOTCONN; |
Chuck Lever | 31e62d2 | 2018-10-01 14:26:08 -0400 | [diff] [blame] | 263 | goto disconnected; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 264 | case RDMA_CM_EVENT_UNREACHABLE: |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 265 | ep->rep_connected = -ENETUNREACH; |
Chuck Lever | 31e62d2 | 2018-10-01 14:26:08 -0400 | [diff] [blame] | 266 | goto disconnected; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 267 | case RDMA_CM_EVENT_REJECTED: |
Chuck Lever | d461f1f | 2017-12-14 20:56:50 -0500 | [diff] [blame] | 268 | dprintk("rpcrdma: connection to %s:%s rejected: %s\n", |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 269 | rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt), |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 270 | rdma_reject_msg(id, event->status)); |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 271 | ep->rep_connected = -ECONNREFUSED; |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 272 | if (event->status == IB_CM_REJ_STALE_CONN) |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 273 | ep->rep_connected = -EAGAIN; |
Chuck Lever | 31e62d2 | 2018-10-01 14:26:08 -0400 | [diff] [blame] | 274 | goto disconnected; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 275 | case RDMA_CM_EVENT_DISCONNECTED: |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 276 | ep->rep_connected = -ECONNABORTED; |
Chuck Lever | 31e62d2 | 2018-10-01 14:26:08 -0400 | [diff] [blame] | 277 | disconnected: |
| 278 | xprt_force_disconnect(xprt); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 279 | wake_up_all(&ep->rep_connect_wait); |
Chuck Lever | 316a616 | 2018-10-01 14:26:03 -0400 | [diff] [blame] | 280 | break; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 281 | default: |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 282 | break; |
| 283 | } |
| 284 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 285 | dprintk("RPC: %s: %s:%s on %s/frwr: %s\n", __func__, |
Chuck Lever | 316a616 | 2018-10-01 14:26:03 -0400 | [diff] [blame] | 286 | rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt), |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 287 | ia->ri_id->device->name, rdma_event_msg(event->event)); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 288 | return 0; |
| 289 | } |
| 290 | |
| 291 | static struct rdma_cm_id * |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 292 | rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 293 | { |
Chuck Lever | 109b88a | 2016-11-29 10:52:40 -0500 | [diff] [blame] | 294 | unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 295 | struct rdma_cm_id *id; |
| 296 | int rc; |
| 297 | |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 298 | trace_xprtrdma_conn_start(xprt); |
| 299 | |
Tom Talpey | 1a95405 | 2008-10-09 15:01:31 -0400 | [diff] [blame] | 300 | init_completion(&ia->ri_done); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 301 | init_completion(&ia->ri_remove_done); |
Tom Talpey | 1a95405 | 2008-10-09 15:01:31 -0400 | [diff] [blame] | 302 | |
Chuck Lever | ae38288 | 2018-10-01 14:25:47 -0400 | [diff] [blame] | 303 | id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler, |
Chuck Lever | 107c4be | 2018-05-04 15:34:42 -0400 | [diff] [blame] | 304 | xprt, RDMA_PS_TCP, IB_QPT_RC); |
Chuck Lever | ddbb347 | 2018-12-19 10:59:39 -0500 | [diff] [blame] | 305 | if (IS_ERR(id)) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 306 | return id; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 307 | |
Tom Talpey | 5675add | 2008-10-09 15:01:41 -0400 | [diff] [blame] | 308 | ia->ri_async_rc = -ETIMEDOUT; |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 309 | rc = rdma_resolve_addr(id, NULL, |
| 310 | (struct sockaddr *)&xprt->rx_xprt.addr, |
| 311 | RDMA_RESOLVE_TIMEOUT); |
Chuck Lever | ddbb347 | 2018-12-19 10:59:39 -0500 | [diff] [blame] | 312 | if (rc) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 313 | goto out; |
Chuck Lever | 109b88a | 2016-11-29 10:52:40 -0500 | [diff] [blame] | 314 | rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); |
| 315 | if (rc < 0) { |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 316 | trace_xprtrdma_conn_tout(xprt); |
Chuck Lever | 109b88a | 2016-11-29 10:52:40 -0500 | [diff] [blame] | 317 | goto out; |
| 318 | } |
Devesh Sharma | d0f36c4 | 2015-08-03 13:05:04 -0400 | [diff] [blame] | 319 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 320 | rc = ia->ri_async_rc; |
| 321 | if (rc) |
| 322 | goto out; |
| 323 | |
Tom Talpey | 5675add | 2008-10-09 15:01:41 -0400 | [diff] [blame] | 324 | ia->ri_async_rc = -ETIMEDOUT; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 325 | rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT); |
Chuck Lever | ddbb347 | 2018-12-19 10:59:39 -0500 | [diff] [blame] | 326 | if (rc) |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 327 | goto out; |
Chuck Lever | 109b88a | 2016-11-29 10:52:40 -0500 | [diff] [blame] | 328 | rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); |
| 329 | if (rc < 0) { |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 330 | trace_xprtrdma_conn_tout(xprt); |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 331 | goto out; |
Chuck Lever | 109b88a | 2016-11-29 10:52:40 -0500 | [diff] [blame] | 332 | } |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 333 | rc = ia->ri_async_rc; |
| 334 | if (rc) |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 335 | goto out; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 336 | |
| 337 | return id; |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 338 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 339 | out: |
| 340 | rdma_destroy_id(id); |
| 341 | return ERR_PTR(rc); |
| 342 | } |
| 343 | |
| 344 | /* |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 345 | * Exported functions. |
| 346 | */ |
| 347 | |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 348 | /** |
| 349 | * rpcrdma_ia_open - Open and initialize an Interface Adapter. |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 350 | * @xprt: transport with IA to (re)initialize |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 351 | * |
| 352 | * Returns 0 on success, negative errno if an appropriate |
| 353 | * Interface Adapter could not be found and opened. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 354 | */ |
| 355 | int |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 356 | rpcrdma_ia_open(struct rpcrdma_xprt *xprt) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 357 | { |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 358 | struct rpcrdma_ia *ia = &xprt->rx_ia; |
Chuck Lever | d1ed857 | 2015-08-03 13:03:30 -0400 | [diff] [blame] | 359 | int rc; |
| 360 | |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 361 | ia->ri_id = rpcrdma_create_id(xprt, ia); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 362 | if (IS_ERR(ia->ri_id)) { |
| 363 | rc = PTR_ERR(ia->ri_id); |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 364 | goto out_err; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 365 | } |
| 366 | |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 367 | ia->ri_pd = ib_alloc_pd(ia->ri_id->device, 0); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 368 | if (IS_ERR(ia->ri_pd)) { |
| 369 | rc = PTR_ERR(ia->ri_pd); |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 370 | pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc); |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 371 | goto out_err; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 372 | } |
| 373 | |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 374 | switch (xprt_rdma_memreg_strategy) { |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 375 | case RPCRDMA_FRWR: |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 376 | if (frwr_is_supported(ia->ri_id->device)) |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 377 | break; |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 378 | /*FALLTHROUGH*/ |
Tom Talpey | bd7ed1d | 2008-10-09 15:00:09 -0400 | [diff] [blame] | 379 | default: |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 380 | pr_err("rpcrdma: Device %s does not support memreg mode %d\n", |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 381 | ia->ri_id->device->name, xprt_rdma_memreg_strategy); |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 382 | rc = -EINVAL; |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 383 | goto out_err; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 384 | } |
| 385 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 386 | return 0; |
Chuck Lever | 5ae711a | 2015-01-21 11:03:19 -0500 | [diff] [blame] | 387 | |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 388 | out_err: |
| 389 | rpcrdma_ia_close(ia); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 390 | return rc; |
| 391 | } |
| 392 | |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 393 | /** |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 394 | * rpcrdma_ia_remove - Handle device driver unload |
| 395 | * @ia: interface adapter being removed |
| 396 | * |
| 397 | * Divest transport H/W resources associated with this adapter, |
| 398 | * but allow it to be restored later. |
| 399 | */ |
| 400 | void |
| 401 | rpcrdma_ia_remove(struct rpcrdma_ia *ia) |
| 402 | { |
| 403 | struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, |
| 404 | rx_ia); |
| 405 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
| 406 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
| 407 | struct rpcrdma_req *req; |
| 408 | struct rpcrdma_rep *rep; |
| 409 | |
| 410 | cancel_delayed_work_sync(&buf->rb_refresh_worker); |
| 411 | |
| 412 | /* This is similar to rpcrdma_ep_destroy, but: |
| 413 | * - Don't cancel the connect worker. |
| 414 | * - Don't call rpcrdma_ep_disconnect, which waits |
| 415 | * for another conn upcall, which will deadlock. |
| 416 | * - rdma_disconnect is unneeded, the underlying |
| 417 | * connection is already gone. |
| 418 | */ |
| 419 | if (ia->ri_id->qp) { |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 420 | rpcrdma_xprt_drain(r_xprt); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 421 | rdma_destroy_qp(ia->ri_id); |
| 422 | ia->ri_id->qp = NULL; |
| 423 | } |
| 424 | ib_free_cq(ep->rep_attr.recv_cq); |
Chuck Lever | 2552428 | 2018-03-19 14:23:16 -0400 | [diff] [blame] | 425 | ep->rep_attr.recv_cq = NULL; |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 426 | ib_free_cq(ep->rep_attr.send_cq); |
Chuck Lever | 2552428 | 2018-03-19 14:23:16 -0400 | [diff] [blame] | 427 | ep->rep_attr.send_cq = NULL; |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 428 | |
| 429 | /* The ULP is responsible for ensuring all DMA |
| 430 | * mappings and MRs are gone. |
| 431 | */ |
| 432 | list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list) |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 433 | rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 434 | list_for_each_entry(req, &buf->rb_allreqs, rl_all) { |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 435 | rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf); |
| 436 | rpcrdma_regbuf_dma_unmap(req->rl_sendbuf); |
| 437 | rpcrdma_regbuf_dma_unmap(req->rl_recvbuf); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 438 | } |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 439 | rpcrdma_mrs_destroy(buf); |
Chuck Lever | 2552428 | 2018-03-19 14:23:16 -0400 | [diff] [blame] | 440 | ib_dealloc_pd(ia->ri_pd); |
| 441 | ia->ri_pd = NULL; |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 442 | |
| 443 | /* Allow waiters to continue */ |
| 444 | complete(&ia->ri_remove_done); |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 445 | |
| 446 | trace_xprtrdma_remove(r_xprt); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 447 | } |
| 448 | |
| 449 | /** |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 450 | * rpcrdma_ia_close - Clean up/close an IA. |
| 451 | * @ia: interface adapter to close |
| 452 | * |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 453 | */ |
| 454 | void |
| 455 | rpcrdma_ia_close(struct rpcrdma_ia *ia) |
| 456 | { |
Tom Talpey | fee08ca | 2008-10-09 15:01:00 -0400 | [diff] [blame] | 457 | if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) { |
| 458 | if (ia->ri_id->qp) |
| 459 | rdma_destroy_qp(ia->ri_id); |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 460 | rdma_destroy_id(ia->ri_id); |
Tom Talpey | fee08ca | 2008-10-09 15:01:00 -0400 | [diff] [blame] | 461 | } |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 462 | ia->ri_id = NULL; |
Chuck Lever | 6d44698 | 2015-05-26 11:51:27 -0400 | [diff] [blame] | 463 | |
| 464 | /* If the pd is still busy, xprtrdma missed freeing a resource */ |
| 465 | if (ia->ri_pd && !IS_ERR(ia->ri_pd)) |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 466 | ib_dealloc_pd(ia->ri_pd); |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 467 | ia->ri_pd = NULL; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 468 | } |
| 469 | |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 470 | /** |
| 471 | * rpcrdma_ep_create - Create unconnected endpoint |
| 472 | * @r_xprt: transport to instantiate |
| 473 | * |
| 474 | * Returns zero on success, or a negative errno. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 475 | */ |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 476 | int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 477 | { |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 478 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
| 479 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 480 | struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private; |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 481 | struct ib_cq *sendcq, *recvcq; |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 482 | unsigned int max_sge; |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 483 | int rc; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 484 | |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 485 | ep->rep_max_requests = xprt_rdma_slot_table_entries; |
Chuck Lever | 94087e9 | 2019-04-24 09:40:20 -0400 | [diff] [blame] | 486 | ep->rep_inline_send = xprt_rdma_max_inline_write; |
| 487 | ep->rep_inline_recv = xprt_rdma_max_inline_read; |
| 488 | |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 489 | max_sge = min_t(unsigned int, ia->ri_id->device->attrs.max_send_sge, |
Chuck Lever | eed5087 | 2017-03-11 15:52:47 -0500 | [diff] [blame] | 490 | RPCRDMA_MAX_SEND_SGES); |
Chuck Lever | 16f906d | 2017-02-08 17:00:10 -0500 | [diff] [blame] | 491 | if (max_sge < RPCRDMA_MIN_SEND_SGES) { |
| 492 | pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); |
Chuck Lever | b3221d6 | 2015-08-03 13:03:39 -0400 | [diff] [blame] | 493 | return -ENOMEM; |
| 494 | } |
Chuck Lever | 1179e2c | 2018-01-31 12:34:05 -0500 | [diff] [blame] | 495 | ia->ri_max_send_sges = max_sge; |
Chuck Lever | b3221d6 | 2015-08-03 13:03:39 -0400 | [diff] [blame] | 496 | |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 497 | rc = frwr_open(ia, ep); |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 498 | if (rc) |
| 499 | return rc; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 500 | |
Chuck Lever | f9521d5 | 2018-10-01 14:26:13 -0400 | [diff] [blame] | 501 | ep->rep_attr.event_handler = rpcrdma_qp_event_handler; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 502 | ep->rep_attr.qp_context = ep; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 503 | ep->rep_attr.srq = NULL; |
Chuck Lever | 16f906d | 2017-02-08 17:00:10 -0500 | [diff] [blame] | 504 | ep->rep_attr.cap.max_send_sge = max_sge; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 505 | ep->rep_attr.cap.max_recv_sge = 1; |
| 506 | ep->rep_attr.cap.max_inline_data = 0; |
| 507 | ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
| 508 | ep->rep_attr.qp_type = IB_QPT_RC; |
| 509 | ep->rep_attr.port_num = ~0; |
| 510 | |
| 511 | dprintk("RPC: %s: requested max: dtos: send %d recv %d; " |
| 512 | "iovs: send %d recv %d\n", |
| 513 | __func__, |
| 514 | ep->rep_attr.cap.max_send_wr, |
| 515 | ep->rep_attr.cap.max_recv_wr, |
| 516 | ep->rep_attr.cap.max_send_sge, |
| 517 | ep->rep_attr.cap.max_recv_sge); |
| 518 | |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 519 | ep->rep_send_batch = ep->rep_max_requests >> 3; |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 520 | ep->rep_send_count = ep->rep_send_batch; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 521 | init_waitqueue_head(&ep->rep_connect_wait); |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 522 | ep->rep_receive_count = 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 523 | |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 524 | sendcq = ib_alloc_cq(ia->ri_id->device, NULL, |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 525 | ep->rep_attr.cap.max_send_wr + 1, |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 526 | ia->ri_id->device->num_comp_vectors > 1 ? 1 : 0, |
Nicolas Morey-Chaisemartin | a4cb5bd | 2019-02-05 18:21:02 +0100 | [diff] [blame] | 527 | IB_POLL_WORKQUEUE); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 528 | if (IS_ERR(sendcq)) { |
| 529 | rc = PTR_ERR(sendcq); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 530 | goto out1; |
| 531 | } |
| 532 | |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 533 | recvcq = ib_alloc_cq(ia->ri_id->device, NULL, |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 534 | ep->rep_attr.cap.max_recv_wr + 1, |
Chuck Lever | d8f532d | 2017-10-16 15:01:30 -0400 | [diff] [blame] | 535 | 0, IB_POLL_WORKQUEUE); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 536 | if (IS_ERR(recvcq)) { |
| 537 | rc = PTR_ERR(recvcq); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 538 | goto out2; |
| 539 | } |
| 540 | |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 541 | ep->rep_attr.send_cq = sendcq; |
| 542 | ep->rep_attr.recv_cq = recvcq; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 543 | |
| 544 | /* Initialize cma parameters */ |
Chuck Lever | b2dde94 | 2016-05-02 14:43:03 -0400 | [diff] [blame] | 545 | memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma)); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 546 | |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 547 | /* Prepare RDMA-CM private message */ |
| 548 | pmsg->cp_magic = rpcrdma_cmp_magic; |
| 549 | pmsg->cp_version = RPCRDMA_CMP_VERSION; |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 550 | pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK; |
Chuck Lever | 94087e9 | 2019-04-24 09:40:20 -0400 | [diff] [blame] | 551 | pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->rep_inline_send); |
| 552 | pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->rep_inline_recv); |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 553 | ep->rep_remote_cma.private_data = pmsg; |
| 554 | ep->rep_remote_cma.private_data_len = sizeof(*pmsg); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 555 | |
| 556 | /* Client offers RDMA Read but does not initiate */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 557 | ep->rep_remote_cma.initiator_depth = 0; |
Chuck Lever | b7e85fff | 2018-02-28 15:30:33 -0500 | [diff] [blame] | 558 | ep->rep_remote_cma.responder_resources = |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 559 | min_t(int, U8_MAX, ia->ri_id->device->attrs.max_qp_rd_atom); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 560 | |
Chuck Lever | b2dde94 | 2016-05-02 14:43:03 -0400 | [diff] [blame] | 561 | /* Limit transport retries so client can detect server |
| 562 | * GID changes quickly. RPC layer handles re-establishing |
| 563 | * transport connection and retransmission. |
| 564 | */ |
| 565 | ep->rep_remote_cma.retry_count = 6; |
| 566 | |
| 567 | /* RPC-over-RDMA handles its own flow control. In addition, |
| 568 | * make all RNR NAKs visible so we know that RPC-over-RDMA |
| 569 | * flow control is working correctly (no NAKs should be seen). |
| 570 | */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 571 | ep->rep_remote_cma.flow_control = 0; |
| 572 | ep->rep_remote_cma.rnr_retry_count = 0; |
| 573 | |
| 574 | return 0; |
| 575 | |
| 576 | out2: |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 577 | ib_free_cq(sendcq); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 578 | out1: |
| 579 | return rc; |
| 580 | } |
| 581 | |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 582 | /** |
| 583 | * rpcrdma_ep_destroy - Disconnect and destroy endpoint. |
| 584 | * @r_xprt: transport instance to shut down |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 585 | * |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 586 | */ |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 587 | void rpcrdma_ep_destroy(struct rpcrdma_xprt *r_xprt) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 588 | { |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 589 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
| 590 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
| 591 | |
Chuck Lever | 2552428 | 2018-03-19 14:23:16 -0400 | [diff] [blame] | 592 | if (ia->ri_id && ia->ri_id->qp) { |
Chuck Lever | 550d750 | 2016-05-02 14:41:47 -0400 | [diff] [blame] | 593 | rpcrdma_ep_disconnect(ep, ia); |
Tom Talpey | fee08ca | 2008-10-09 15:01:00 -0400 | [diff] [blame] | 594 | rdma_destroy_qp(ia->ri_id); |
| 595 | ia->ri_id->qp = NULL; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 596 | } |
| 597 | |
Chuck Lever | 2552428 | 2018-03-19 14:23:16 -0400 | [diff] [blame] | 598 | if (ep->rep_attr.recv_cq) |
| 599 | ib_free_cq(ep->rep_attr.recv_cq); |
| 600 | if (ep->rep_attr.send_cq) |
| 601 | ib_free_cq(ep->rep_attr.send_cq); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 602 | } |
| 603 | |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 604 | /* Re-establish a connection after a device removal event. |
| 605 | * Unlike a normal reconnection, a fresh PD and a new set |
| 606 | * of MRs and buffers is needed. |
| 607 | */ |
| 608 | static int |
| 609 | rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt, |
| 610 | struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
| 611 | { |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 612 | int rc, err; |
| 613 | |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 614 | trace_xprtrdma_reinsert(r_xprt); |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 615 | |
| 616 | rc = -EHOSTUNREACH; |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 617 | if (rpcrdma_ia_open(r_xprt)) |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 618 | goto out1; |
| 619 | |
| 620 | rc = -ENOMEM; |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 621 | err = rpcrdma_ep_create(r_xprt); |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 622 | if (err) { |
| 623 | pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err); |
| 624 | goto out2; |
| 625 | } |
| 626 | |
| 627 | rc = -ENETUNREACH; |
| 628 | err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); |
| 629 | if (err) { |
| 630 | pr_err("rpcrdma: rdma_create_qp returned %d\n", err); |
| 631 | goto out3; |
| 632 | } |
| 633 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 634 | rpcrdma_mrs_create(r_xprt); |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 635 | return 0; |
| 636 | |
| 637 | out3: |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 638 | rpcrdma_ep_destroy(r_xprt); |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 639 | out2: |
| 640 | rpcrdma_ia_close(ia); |
| 641 | out1: |
| 642 | return rc; |
| 643 | } |
| 644 | |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 645 | static int |
| 646 | rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep, |
| 647 | struct rpcrdma_ia *ia) |
| 648 | { |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 649 | struct rdma_cm_id *id, *old; |
| 650 | int err, rc; |
| 651 | |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 652 | trace_xprtrdma_reconnect(r_xprt); |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 653 | |
| 654 | rpcrdma_ep_disconnect(ep, ia); |
| 655 | |
| 656 | rc = -EHOSTUNREACH; |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 657 | id = rpcrdma_create_id(r_xprt, ia); |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 658 | if (IS_ERR(id)) |
| 659 | goto out; |
| 660 | |
| 661 | /* As long as the new ID points to the same device as the |
| 662 | * old ID, we can reuse the transport's existing PD and all |
| 663 | * previously allocated MRs. Also, the same device means |
| 664 | * the transport's previous DMA mappings are still valid. |
| 665 | * |
| 666 | * This is a sanity check only. There should be no way these |
| 667 | * point to two different devices here. |
| 668 | */ |
| 669 | old = id; |
| 670 | rc = -ENETUNREACH; |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 671 | if (ia->ri_id->device != id->device) { |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 672 | pr_err("rpcrdma: can't reconnect on different device!\n"); |
| 673 | goto out_destroy; |
| 674 | } |
| 675 | |
| 676 | err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr); |
Chuck Lever | ddbb347 | 2018-12-19 10:59:39 -0500 | [diff] [blame] | 677 | if (err) |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 678 | goto out_destroy; |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 679 | |
| 680 | /* Atomically replace the transport's ID and QP. */ |
| 681 | rc = 0; |
| 682 | old = ia->ri_id; |
| 683 | ia->ri_id = id; |
| 684 | rdma_destroy_qp(old); |
| 685 | |
| 686 | out_destroy: |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 687 | rdma_destroy_id(old); |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 688 | out: |
| 689 | return rc; |
| 690 | } |
| 691 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 692 | /* |
| 693 | * Connect unconnected endpoint. |
| 694 | */ |
| 695 | int |
| 696 | rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
| 697 | { |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 698 | struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, |
| 699 | rx_ia); |
Chuck Lever | 31e62d2 | 2018-10-01 14:26:08 -0400 | [diff] [blame] | 700 | struct rpc_xprt *xprt = &r_xprt->rx_xprt; |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 701 | int rc; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 702 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 703 | retry: |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 704 | switch (ep->rep_connected) { |
| 705 | case 0: |
Chuck Lever | ec62f40 | 2014-05-28 10:34:07 -0400 | [diff] [blame] | 706 | dprintk("RPC: %s: connecting...\n", __func__); |
| 707 | rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); |
| 708 | if (rc) { |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 709 | rc = -ENETUNREACH; |
| 710 | goto out_noupdate; |
Chuck Lever | ec62f40 | 2014-05-28 10:34:07 -0400 | [diff] [blame] | 711 | } |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 712 | break; |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 713 | case -ENODEV: |
| 714 | rc = rpcrdma_ep_recreate_xprt(r_xprt, ep, ia); |
| 715 | if (rc) |
| 716 | goto out_noupdate; |
| 717 | break; |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 718 | default: |
| 719 | rc = rpcrdma_ep_reconnect(r_xprt, ep, ia); |
| 720 | if (rc) |
| 721 | goto out; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 722 | } |
| 723 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 724 | ep->rep_connected = 0; |
Chuck Lever | 31e62d2 | 2018-10-01 14:26:08 -0400 | [diff] [blame] | 725 | xprt_clear_connected(xprt); |
| 726 | |
Chuck Lever | 8d4fb8f | 2018-07-28 10:46:47 -0400 | [diff] [blame] | 727 | rpcrdma_post_recvs(r_xprt, true); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 728 | |
| 729 | rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma); |
Chuck Lever | ddbb347 | 2018-12-19 10:59:39 -0500 | [diff] [blame] | 730 | if (rc) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 731 | goto out; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 732 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 733 | wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 734 | if (ep->rep_connected <= 0) { |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 735 | if (ep->rep_connected == -EAGAIN) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 736 | goto retry; |
| 737 | rc = ep->rep_connected; |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 738 | goto out; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 739 | } |
| 740 | |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 741 | dprintk("RPC: %s: connected\n", __func__); |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 742 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 743 | out: |
| 744 | if (rc) |
| 745 | ep->rep_connected = rc; |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 746 | |
| 747 | out_noupdate: |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 748 | return rc; |
| 749 | } |
| 750 | |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 751 | /** |
| 752 | * rpcrdma_ep_disconnect - Disconnect underlying transport |
| 753 | * @ep: endpoint to disconnect |
| 754 | * @ia: associated interface adapter |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 755 | * |
| 756 | * This is separate from destroy to facilitate the ability |
| 757 | * to reconnect without recreating the endpoint. |
| 758 | * |
| 759 | * This call is not reentrant, and must not be made in parallel |
| 760 | * on the same endpoint. |
| 761 | */ |
Chuck Lever | 282191c | 2014-07-29 17:25:55 -0400 | [diff] [blame] | 762 | void |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 763 | rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
| 764 | { |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 765 | struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt, |
| 766 | rx_ep); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 767 | int rc; |
| 768 | |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 769 | /* returns without wait if ID is not connected */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 770 | rc = rdma_disconnect(ia->ri_id); |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 771 | if (!rc) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 772 | wait_event_interruptible(ep->rep_connect_wait, |
| 773 | ep->rep_connected != 1); |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 774 | else |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 775 | ep->rep_connected = rc; |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 776 | trace_xprtrdma_disconnect(r_xprt, rc); |
Chuck Lever | 550d750 | 2016-05-02 14:41:47 -0400 | [diff] [blame] | 777 | |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 778 | rpcrdma_xprt_drain(r_xprt); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 779 | } |
| 780 | |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 781 | /* Fixed-size circular FIFO queue. This implementation is wait-free and |
| 782 | * lock-free. |
| 783 | * |
| 784 | * Consumer is the code path that posts Sends. This path dequeues a |
| 785 | * sendctx for use by a Send operation. Multiple consumer threads |
| 786 | * are serialized by the RPC transport lock, which allows only one |
| 787 | * ->send_request call at a time. |
| 788 | * |
| 789 | * Producer is the code path that handles Send completions. This path |
| 790 | * enqueues a sendctx that has been completed. Multiple producer |
| 791 | * threads are serialized by the ib_poll_cq() function. |
| 792 | */ |
| 793 | |
| 794 | /* rpcrdma_sendctxs_destroy() assumes caller has already quiesced |
Chuck Lever | b8fe677 | 2019-04-24 09:40:36 -0400 | [diff] [blame] | 795 | * queue activity, and rpcrdma_xprt_drain has flushed all remaining |
| 796 | * Send requests. |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 797 | */ |
| 798 | static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf) |
| 799 | { |
| 800 | unsigned long i; |
| 801 | |
| 802 | for (i = 0; i <= buf->rb_sc_last; i++) |
| 803 | kfree(buf->rb_sc_ctxs[i]); |
| 804 | kfree(buf->rb_sc_ctxs); |
| 805 | } |
| 806 | |
| 807 | static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia) |
| 808 | { |
| 809 | struct rpcrdma_sendctx *sc; |
| 810 | |
Gustavo A. R. Silva | 66d4218 | 2019-01-30 18:46:22 -0600 | [diff] [blame] | 811 | sc = kzalloc(struct_size(sc, sc_sges, ia->ri_max_send_sges), |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 812 | GFP_KERNEL); |
| 813 | if (!sc) |
| 814 | return NULL; |
| 815 | |
| 816 | sc->sc_wr.wr_cqe = &sc->sc_cqe; |
| 817 | sc->sc_wr.sg_list = sc->sc_sges; |
| 818 | sc->sc_wr.opcode = IB_WR_SEND; |
| 819 | sc->sc_cqe.done = rpcrdma_wc_send; |
| 820 | return sc; |
| 821 | } |
| 822 | |
| 823 | static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt) |
| 824 | { |
| 825 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
| 826 | struct rpcrdma_sendctx *sc; |
| 827 | unsigned long i; |
| 828 | |
| 829 | /* Maximum number of concurrent outstanding Send WRs. Capping |
| 830 | * the circular queue size stops Send Queue overflow by causing |
| 831 | * the ->send_request call to fail temporarily before too many |
| 832 | * Sends are posted. |
| 833 | */ |
| 834 | i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; |
| 835 | dprintk("RPC: %s: allocating %lu send_ctxs\n", __func__, i); |
| 836 | buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL); |
| 837 | if (!buf->rb_sc_ctxs) |
| 838 | return -ENOMEM; |
| 839 | |
| 840 | buf->rb_sc_last = i - 1; |
| 841 | for (i = 0; i <= buf->rb_sc_last; i++) { |
| 842 | sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); |
| 843 | if (!sc) |
Dan Carpenter | 6e17f58 | 2019-01-05 16:06:48 +0300 | [diff] [blame] | 844 | return -ENOMEM; |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 845 | |
| 846 | sc->sc_xprt = r_xprt; |
| 847 | buf->rb_sc_ctxs[i] = sc; |
| 848 | } |
| 849 | |
| 850 | return 0; |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 851 | } |
| 852 | |
| 853 | /* The sendctx queue is not guaranteed to have a size that is a |
| 854 | * power of two, thus the helpers in circ_buf.h cannot be used. |
| 855 | * The other option is to use modulus (%), which can be expensive. |
| 856 | */ |
| 857 | static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf, |
| 858 | unsigned long item) |
| 859 | { |
| 860 | return likely(item < buf->rb_sc_last) ? item + 1 : 0; |
| 861 | } |
| 862 | |
| 863 | /** |
| 864 | * rpcrdma_sendctx_get_locked - Acquire a send context |
Chuck Lever | dbcc53a | 2019-04-24 09:39:53 -0400 | [diff] [blame] | 865 | * @r_xprt: controlling transport instance |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 866 | * |
| 867 | * Returns pointer to a free send completion context; or NULL if |
| 868 | * the queue is empty. |
| 869 | * |
| 870 | * Usage: Called to acquire an SGE array before preparing a Send WR. |
| 871 | * |
Chuck Lever | dbcc53a | 2019-04-24 09:39:53 -0400 | [diff] [blame] | 872 | * The caller serializes calls to this function (per transport), and |
| 873 | * provides an effective memory barrier that flushes the new value |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 874 | * of rb_sc_head. |
| 875 | */ |
Chuck Lever | dbcc53a | 2019-04-24 09:39:53 -0400 | [diff] [blame] | 876 | struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt) |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 877 | { |
Chuck Lever | dbcc53a | 2019-04-24 09:39:53 -0400 | [diff] [blame] | 878 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 879 | struct rpcrdma_sendctx *sc; |
| 880 | unsigned long next_head; |
| 881 | |
| 882 | next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head); |
| 883 | |
| 884 | if (next_head == READ_ONCE(buf->rb_sc_tail)) |
| 885 | goto out_emptyq; |
| 886 | |
| 887 | /* ORDER: item must be accessed _before_ head is updated */ |
| 888 | sc = buf->rb_sc_ctxs[next_head]; |
| 889 | |
| 890 | /* Releasing the lock in the caller acts as a memory |
| 891 | * barrier that flushes rb_sc_head. |
| 892 | */ |
| 893 | buf->rb_sc_head = next_head; |
| 894 | |
| 895 | return sc; |
| 896 | |
| 897 | out_emptyq: |
| 898 | /* The queue is "empty" if there have not been enough Send |
| 899 | * completions recently. This is a sign the Send Queue is |
| 900 | * backing up. Cause the caller to pause and try again. |
| 901 | */ |
Chuck Lever | 05eb06d | 2019-06-19 10:32:48 -0400 | [diff] [blame] | 902 | xprt_wait_for_buffer_space(&r_xprt->rx_xprt); |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 903 | r_xprt->rx_stats.empty_sendctx_q++; |
| 904 | return NULL; |
| 905 | } |
| 906 | |
| 907 | /** |
| 908 | * rpcrdma_sendctx_put_locked - Release a send context |
| 909 | * @sc: send context to release |
| 910 | * |
| 911 | * Usage: Called from Send completion to return a sendctxt |
| 912 | * to the queue. |
| 913 | * |
Chuck Lever | dbcc53a | 2019-04-24 09:39:53 -0400 | [diff] [blame] | 914 | * The caller serializes calls to this function (per transport). |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 915 | */ |
Chuck Lever | efd81e9 | 2018-05-04 15:35:41 -0400 | [diff] [blame] | 916 | static void |
| 917 | rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc) |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 918 | { |
| 919 | struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf; |
| 920 | unsigned long next_tail; |
| 921 | |
Chuck Lever | dbcc53a | 2019-04-24 09:39:53 -0400 | [diff] [blame] | 922 | /* Unmap SGEs of previously completed but unsignaled |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 923 | * Sends by walking up the queue until @sc is found. |
| 924 | */ |
| 925 | next_tail = buf->rb_sc_tail; |
| 926 | do { |
| 927 | next_tail = rpcrdma_sendctx_next(buf, next_tail); |
| 928 | |
| 929 | /* ORDER: item must be accessed _before_ tail is updated */ |
Chuck Lever | dbcc53a | 2019-04-24 09:39:53 -0400 | [diff] [blame] | 930 | rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]); |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 931 | |
| 932 | } while (buf->rb_sc_ctxs[next_tail] != sc); |
| 933 | |
| 934 | /* Paired with READ_ONCE */ |
| 935 | smp_store_release(&buf->rb_sc_tail, next_tail); |
Chuck Lever | 2fad659 | 2018-05-04 15:35:57 -0400 | [diff] [blame] | 936 | |
Chuck Lever | 05eb06d | 2019-06-19 10:32:48 -0400 | [diff] [blame] | 937 | xprt_write_space(&sc->sc_xprt->rx_xprt); |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 938 | } |
| 939 | |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 940 | static void |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 941 | rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt) |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 942 | { |
| 943 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
| 944 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
| 945 | unsigned int count; |
| 946 | LIST_HEAD(free); |
| 947 | LIST_HEAD(all); |
| 948 | |
Chuck Lever | c421ece | 2018-10-01 14:25:20 -0400 | [diff] [blame] | 949 | for (count = 0; count < ia->ri_max_segs; count++) { |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 950 | struct rpcrdma_mr *mr; |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 951 | int rc; |
| 952 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 953 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
| 954 | if (!mr) |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 955 | break; |
| 956 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 957 | rc = frwr_init_mr(ia, mr); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 958 | if (rc) { |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 959 | kfree(mr); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 960 | break; |
| 961 | } |
| 962 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 963 | mr->mr_xprt = r_xprt; |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 964 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 965 | list_add(&mr->mr_list, &free); |
| 966 | list_add(&mr->mr_all, &all); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 967 | } |
| 968 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 969 | spin_lock(&buf->rb_mrlock); |
| 970 | list_splice(&free, &buf->rb_mrs); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 971 | list_splice(&all, &buf->rb_all); |
| 972 | r_xprt->rx_stats.mrs_allocated += count; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 973 | spin_unlock(&buf->rb_mrlock); |
Chuck Lever | 1c443eff | 2017-12-20 16:31:21 -0500 | [diff] [blame] | 974 | trace_xprtrdma_createmrs(r_xprt, count); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 975 | } |
| 976 | |
| 977 | static void |
| 978 | rpcrdma_mr_refresh_worker(struct work_struct *work) |
| 979 | { |
| 980 | struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, |
| 981 | rb_refresh_worker.work); |
| 982 | struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, |
| 983 | rx_buf); |
| 984 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 985 | rpcrdma_mrs_create(r_xprt); |
Chuck Lever | 05eb06d | 2019-06-19 10:32:48 -0400 | [diff] [blame] | 986 | xprt_write_space(&r_xprt->rx_xprt); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 987 | } |
| 988 | |
Chuck Lever | 1769e6a | 2019-04-24 09:39:05 -0400 | [diff] [blame] | 989 | /** |
| 990 | * rpcrdma_req_create - Allocate an rpcrdma_req object |
| 991 | * @r_xprt: controlling r_xprt |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 992 | * @size: initial size, in bytes, of send and receive buffers |
Chuck Lever | 1769e6a | 2019-04-24 09:39:05 -0400 | [diff] [blame] | 993 | * @flags: GFP flags passed to memory allocators |
| 994 | * |
| 995 | * Returns an allocated and fully initialized rpcrdma_req or NULL. |
| 996 | */ |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 997 | struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size, |
| 998 | gfp_t flags) |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 999 | { |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1000 | struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; |
Chuck Lever | 2dd4a01 | 2018-02-28 15:31:05 -0500 | [diff] [blame] | 1001 | struct rpcrdma_regbuf *rb; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1002 | struct rpcrdma_req *req; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1003 | |
Chuck Lever | 1769e6a | 2019-04-24 09:39:05 -0400 | [diff] [blame] | 1004 | req = kzalloc(sizeof(*req), flags); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1005 | if (req == NULL) |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 1006 | goto out1; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1007 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1008 | rb = rpcrdma_regbuf_alloc(RPCRDMA_HDRBUF_SIZE, DMA_TO_DEVICE, flags); |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 1009 | if (!rb) |
| 1010 | goto out2; |
Chuck Lever | 2dd4a01 | 2018-02-28 15:31:05 -0500 | [diff] [blame] | 1011 | req->rl_rdmabuf = rb; |
Chuck Lever | 8cec3db | 2019-04-24 09:39:16 -0400 | [diff] [blame] | 1012 | xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb)); |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 1013 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1014 | req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags); |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 1015 | if (!req->rl_sendbuf) |
| 1016 | goto out3; |
| 1017 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1018 | req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags); |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 1019 | if (!req->rl_recvbuf) |
| 1020 | goto out4; |
| 1021 | |
Chuck Lever | 2dd4a01 | 2018-02-28 15:31:05 -0500 | [diff] [blame] | 1022 | INIT_LIST_HEAD(&req->rl_registered); |
Chuck Lever | 92f4433 | 2018-12-19 10:59:33 -0500 | [diff] [blame] | 1023 | spin_lock(&buffer->rb_lock); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1024 | list_add(&req->rl_all, &buffer->rb_allreqs); |
Chuck Lever | 92f4433 | 2018-12-19 10:59:33 -0500 | [diff] [blame] | 1025 | spin_unlock(&buffer->rb_lock); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1026 | return req; |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 1027 | |
| 1028 | out4: |
| 1029 | kfree(req->rl_sendbuf); |
| 1030 | out3: |
| 1031 | kfree(req->rl_rdmabuf); |
| 1032 | out2: |
| 1033 | kfree(req); |
| 1034 | out1: |
| 1035 | return NULL; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1036 | } |
| 1037 | |
Chuck Lever | 379d1bc | 2019-06-19 10:33:20 -0400 | [diff] [blame] | 1038 | static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, |
| 1039 | bool temp) |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1040 | { |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1041 | struct rpcrdma_rep *rep; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1042 | |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 1043 | rep = kzalloc(sizeof(*rep), GFP_KERNEL); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1044 | if (rep == NULL) |
| 1045 | goto out; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1046 | |
Chuck Lever | 94087e9 | 2019-04-24 09:40:20 -0400 | [diff] [blame] | 1047 | rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep.rep_inline_recv, |
Chuck Lever | 99ef4db | 2016-09-15 10:56:10 -0400 | [diff] [blame] | 1048 | DMA_FROM_DEVICE, GFP_KERNEL); |
Chuck Lever | 8cec3db | 2019-04-24 09:39:16 -0400 | [diff] [blame] | 1049 | if (!rep->rr_rdmabuf) |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1050 | goto out_free; |
Chuck Lever | 379d1bc | 2019-06-19 10:33:20 -0400 | [diff] [blame] | 1051 | |
Chuck Lever | 8cec3db | 2019-04-24 09:39:16 -0400 | [diff] [blame] | 1052 | xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf), |
Chuck Lever | 96f8778 | 2017-08-03 14:30:03 -0400 | [diff] [blame] | 1053 | rdmab_length(rep->rr_rdmabuf)); |
Chuck Lever | 1519e96 | 2016-09-15 10:57:49 -0400 | [diff] [blame] | 1054 | rep->rr_cqe.done = rpcrdma_wc_receive; |
Chuck Lever | fed171b | 2015-05-26 11:51:37 -0400 | [diff] [blame] | 1055 | rep->rr_rxprt = r_xprt; |
Chuck Lever | 6ea8e71 | 2016-09-15 10:56:51 -0400 | [diff] [blame] | 1056 | rep->rr_recv_wr.next = NULL; |
| 1057 | rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; |
| 1058 | rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; |
| 1059 | rep->rr_recv_wr.num_sge = 1; |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1060 | rep->rr_temp = temp; |
Chuck Lever | 379d1bc | 2019-06-19 10:33:20 -0400 | [diff] [blame] | 1061 | return rep; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1062 | |
| 1063 | out_free: |
| 1064 | kfree(rep); |
| 1065 | out: |
Chuck Lever | 379d1bc | 2019-06-19 10:33:20 -0400 | [diff] [blame] | 1066 | return NULL; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1067 | } |
| 1068 | |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 1069 | /** |
| 1070 | * rpcrdma_buffer_create - Create initial set of req/rep objects |
| 1071 | * @r_xprt: transport instance to (re)initialize |
| 1072 | * |
| 1073 | * Returns zero on success, otherwise a negative errno. |
| 1074 | */ |
| 1075 | int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1076 | { |
Chuck Lever | ac920d0 | 2015-01-21 11:03:44 -0500 | [diff] [blame] | 1077 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1078 | int i, rc; |
| 1079 | |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 1080 | buf->rb_max_requests = r_xprt->rx_ep.rep_max_requests; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1081 | buf->rb_bc_srv_max_requests = 0; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1082 | spin_lock_init(&buf->rb_mrlock); |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 1083 | spin_lock_init(&buf->rb_lock); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1084 | INIT_LIST_HEAD(&buf->rb_mrs); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1085 | INIT_LIST_HEAD(&buf->rb_all); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1086 | INIT_DELAYED_WORK(&buf->rb_refresh_worker, |
| 1087 | rpcrdma_mr_refresh_worker); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1088 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1089 | rpcrdma_mrs_create(r_xprt); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1090 | |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1091 | INIT_LIST_HEAD(&buf->rb_send_bufs); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1092 | INIT_LIST_HEAD(&buf->rb_allreqs); |
Chuck Lever | 1769e6a | 2019-04-24 09:39:05 -0400 | [diff] [blame] | 1093 | |
| 1094 | rc = -ENOMEM; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1095 | for (i = 0; i < buf->rb_max_requests; i++) { |
| 1096 | struct rpcrdma_req *req; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1097 | |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 1098 | req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE, |
| 1099 | GFP_KERNEL); |
Chuck Lever | 1769e6a | 2019-04-24 09:39:05 -0400 | [diff] [blame] | 1100 | if (!req) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1101 | goto out; |
Chuck Lever | a80d66c | 2017-06-08 11:52:12 -0400 | [diff] [blame] | 1102 | list_add(&req->rl_list, &buf->rb_send_bufs); |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1103 | } |
| 1104 | |
Chuck Lever | 8d4fb8f | 2018-07-28 10:46:47 -0400 | [diff] [blame] | 1105 | buf->rb_credits = 1; |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1106 | INIT_LIST_HEAD(&buf->rb_recv_bufs); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1107 | |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 1108 | rc = rpcrdma_sendctxs_create(r_xprt); |
| 1109 | if (rc) |
| 1110 | goto out; |
| 1111 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1112 | return 0; |
| 1113 | out: |
| 1114 | rpcrdma_buffer_destroy(buf); |
| 1115 | return rc; |
| 1116 | } |
| 1117 | |
Chuck Lever | 2314650 | 2019-04-24 09:39:11 -0400 | [diff] [blame] | 1118 | static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep) |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1119 | { |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1120 | rpcrdma_regbuf_free(rep->rr_rdmabuf); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1121 | kfree(rep); |
| 1122 | } |
| 1123 | |
Chuck Lever | 92f4433 | 2018-12-19 10:59:33 -0500 | [diff] [blame] | 1124 | /** |
| 1125 | * rpcrdma_req_destroy - Destroy an rpcrdma_req object |
| 1126 | * @req: unused object to be destroyed |
| 1127 | * |
| 1128 | * This function assumes that the caller prevents concurrent device |
| 1129 | * unload and transport tear-down. |
| 1130 | */ |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1131 | void |
Chuck Lever | 92f4433 | 2018-12-19 10:59:33 -0500 | [diff] [blame] | 1132 | rpcrdma_req_destroy(struct rpcrdma_req *req) |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1133 | { |
Chuck Lever | 92f4433 | 2018-12-19 10:59:33 -0500 | [diff] [blame] | 1134 | list_del(&req->rl_all); |
| 1135 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1136 | rpcrdma_regbuf_free(req->rl_recvbuf); |
| 1137 | rpcrdma_regbuf_free(req->rl_sendbuf); |
| 1138 | rpcrdma_regbuf_free(req->rl_rdmabuf); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1139 | kfree(req); |
| 1140 | } |
| 1141 | |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1142 | static void |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1143 | rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf) |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1144 | { |
| 1145 | struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, |
| 1146 | rx_buf); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1147 | struct rpcrdma_mr *mr; |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1148 | unsigned int count; |
| 1149 | |
| 1150 | count = 0; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1151 | spin_lock(&buf->rb_mrlock); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1152 | while (!list_empty(&buf->rb_all)) { |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1153 | mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all); |
| 1154 | list_del(&mr->mr_all); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1155 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1156 | spin_unlock(&buf->rb_mrlock); |
Chuck Lever | 054f155 | 2018-05-01 11:37:14 -0400 | [diff] [blame] | 1157 | |
| 1158 | /* Ensure MW is not on any rl_registered list */ |
| 1159 | if (!list_empty(&mr->mr_list)) |
| 1160 | list_del(&mr->mr_list); |
| 1161 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 1162 | frwr_release_mr(mr); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1163 | count++; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1164 | spin_lock(&buf->rb_mrlock); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1165 | } |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1166 | spin_unlock(&buf->rb_mrlock); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1167 | r_xprt->rx_stats.mrs_allocated = 0; |
| 1168 | |
| 1169 | dprintk("RPC: %s: released %u MRs\n", __func__, count); |
| 1170 | } |
| 1171 | |
Chuck Lever | af65ed4 | 2018-12-19 11:00:37 -0500 | [diff] [blame] | 1172 | /** |
| 1173 | * rpcrdma_buffer_destroy - Release all hw resources |
| 1174 | * @buf: root control block for resources |
| 1175 | * |
Chuck Lever | b8fe677 | 2019-04-24 09:40:36 -0400 | [diff] [blame] | 1176 | * ORDERING: relies on a prior rpcrdma_xprt_drain : |
Chuck Lever | af65ed4 | 2018-12-19 11:00:37 -0500 | [diff] [blame] | 1177 | * - No more Send or Receive completions can occur |
| 1178 | * - All MRs, reps, and reqs are returned to their free lists |
| 1179 | */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1180 | void |
| 1181 | rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) |
| 1182 | { |
Chuck Lever | 9378b27 | 2017-04-11 13:22:29 -0400 | [diff] [blame] | 1183 | cancel_delayed_work_sync(&buf->rb_refresh_worker); |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 1184 | |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 1185 | rpcrdma_sendctxs_destroy(buf); |
| 1186 | |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1187 | while (!list_empty(&buf->rb_recv_bufs)) { |
| 1188 | struct rpcrdma_rep *rep; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1189 | |
Chuck Lever | 9d95cd5 | 2018-05-04 15:35:36 -0400 | [diff] [blame] | 1190 | rep = list_first_entry(&buf->rb_recv_bufs, |
| 1191 | struct rpcrdma_rep, rr_list); |
| 1192 | list_del(&rep->rr_list); |
Chuck Lever | 2314650 | 2019-04-24 09:39:11 -0400 | [diff] [blame] | 1193 | rpcrdma_rep_destroy(rep); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1194 | } |
| 1195 | |
Chuck Lever | 92f4433 | 2018-12-19 10:59:33 -0500 | [diff] [blame] | 1196 | while (!list_empty(&buf->rb_send_bufs)) { |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1197 | struct rpcrdma_req *req; |
Allen Andrews | 4034ba0 | 2014-05-28 10:32:09 -0400 | [diff] [blame] | 1198 | |
Chuck Lever | 92f4433 | 2018-12-19 10:59:33 -0500 | [diff] [blame] | 1199 | req = list_first_entry(&buf->rb_send_bufs, |
| 1200 | struct rpcrdma_req, rl_list); |
| 1201 | list_del(&req->rl_list); |
| 1202 | rpcrdma_req_destroy(req); |
Chuck Lever | 9f9d802 | 2014-07-29 17:24:45 -0400 | [diff] [blame] | 1203 | } |
| 1204 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1205 | rpcrdma_mrs_destroy(buf); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1206 | } |
| 1207 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1208 | /** |
| 1209 | * rpcrdma_mr_get - Allocate an rpcrdma_mr object |
| 1210 | * @r_xprt: controlling transport |
| 1211 | * |
| 1212 | * Returns an initialized rpcrdma_mr or NULL if no free |
| 1213 | * rpcrdma_mr objects are available. |
| 1214 | */ |
| 1215 | struct rpcrdma_mr * |
| 1216 | rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt) |
Chuck Lever | c2922c0 | 2014-07-29 17:24:36 -0400 | [diff] [blame] | 1217 | { |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1218 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1219 | struct rpcrdma_mr *mr = NULL; |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1220 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1221 | spin_lock(&buf->rb_mrlock); |
| 1222 | if (!list_empty(&buf->rb_mrs)) |
| 1223 | mr = rpcrdma_mr_pop(&buf->rb_mrs); |
| 1224 | spin_unlock(&buf->rb_mrlock); |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1225 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1226 | if (!mr) |
| 1227 | goto out_nomrs; |
| 1228 | return mr; |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1229 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1230 | out_nomrs: |
Chuck Lever | 1c443eff | 2017-12-20 16:31:21 -0500 | [diff] [blame] | 1231 | trace_xprtrdma_nomrs(r_xprt); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 1232 | if (r_xprt->rx_ep.rep_connected != -ENODEV) |
| 1233 | schedule_delayed_work(&buf->rb_refresh_worker, 0); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1234 | |
| 1235 | /* Allow the reply handler and refresh worker to run */ |
| 1236 | cond_resched(); |
| 1237 | |
| 1238 | return NULL; |
Chuck Lever | c2922c0 | 2014-07-29 17:24:36 -0400 | [diff] [blame] | 1239 | } |
| 1240 | |
Chuck Lever | ec12e47 | 2017-12-14 20:58:04 -0500 | [diff] [blame] | 1241 | static void |
| 1242 | __rpcrdma_mr_put(struct rpcrdma_buffer *buf, struct rpcrdma_mr *mr) |
| 1243 | { |
| 1244 | spin_lock(&buf->rb_mrlock); |
| 1245 | rpcrdma_mr_push(mr, &buf->rb_mrs); |
| 1246 | spin_unlock(&buf->rb_mrlock); |
| 1247 | } |
| 1248 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1249 | /** |
| 1250 | * rpcrdma_mr_put - Release an rpcrdma_mr object |
| 1251 | * @mr: object to release |
| 1252 | * |
| 1253 | */ |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1254 | void |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1255 | rpcrdma_mr_put(struct rpcrdma_mr *mr) |
Chuck Lever | c2922c0 | 2014-07-29 17:24:36 -0400 | [diff] [blame] | 1256 | { |
Chuck Lever | ec12e47 | 2017-12-14 20:58:04 -0500 | [diff] [blame] | 1257 | __rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr); |
| 1258 | } |
Chuck Lever | c2922c0 | 2014-07-29 17:24:36 -0400 | [diff] [blame] | 1259 | |
Chuck Lever | ec12e47 | 2017-12-14 20:58:04 -0500 | [diff] [blame] | 1260 | /** |
| 1261 | * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it |
| 1262 | * @mr: object to release |
| 1263 | * |
| 1264 | */ |
| 1265 | void |
| 1266 | rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr) |
| 1267 | { |
| 1268 | struct rpcrdma_xprt *r_xprt = mr->mr_xprt; |
| 1269 | |
Chuck Lever | e2f34e2 | 2018-12-19 10:58:13 -0500 | [diff] [blame] | 1270 | if (mr->mr_dir != DMA_NONE) { |
| 1271 | trace_xprtrdma_mr_unmap(mr); |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 1272 | ib_dma_unmap_sg(r_xprt->rx_ia.ri_id->device, |
Chuck Lever | e2f34e2 | 2018-12-19 10:58:13 -0500 | [diff] [blame] | 1273 | mr->mr_sg, mr->mr_nents, mr->mr_dir); |
| 1274 | mr->mr_dir = DMA_NONE; |
| 1275 | } |
Chuck Lever | ec12e47 | 2017-12-14 20:58:04 -0500 | [diff] [blame] | 1276 | __rpcrdma_mr_put(&r_xprt->rx_buf, mr); |
Chuck Lever | c2922c0 | 2014-07-29 17:24:36 -0400 | [diff] [blame] | 1277 | } |
| 1278 | |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1279 | /** |
| 1280 | * rpcrdma_buffer_get - Get a request buffer |
| 1281 | * @buffers: Buffer pool from which to obtain a buffer |
Chuck Lever | 78d506e | 2016-09-06 11:22:49 -0400 | [diff] [blame] | 1282 | * |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1283 | * Returns a fresh rpcrdma_req, or NULL if none are available. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1284 | */ |
| 1285 | struct rpcrdma_req * |
| 1286 | rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) |
| 1287 | { |
| 1288 | struct rpcrdma_req *req; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1289 | |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1290 | spin_lock(&buffers->rb_lock); |
Chuck Lever | e68699c | 2018-05-04 15:35:31 -0400 | [diff] [blame] | 1291 | req = list_first_entry_or_null(&buffers->rb_send_bufs, |
| 1292 | struct rpcrdma_req, rl_list); |
| 1293 | if (req) |
| 1294 | list_del_init(&req->rl_list); |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1295 | spin_unlock(&buffers->rb_lock); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1296 | return req; |
| 1297 | } |
| 1298 | |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1299 | /** |
| 1300 | * rpcrdma_buffer_put - Put request/reply buffers back into pool |
Chuck Lever | 5828ceb | 2019-06-19 10:33:36 -0400 | [diff] [blame^] | 1301 | * @buffers: buffer pool |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1302 | * @req: object to return |
| 1303 | * |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1304 | */ |
Chuck Lever | 5828ceb | 2019-06-19 10:33:36 -0400 | [diff] [blame^] | 1305 | void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1306 | { |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1307 | struct rpcrdma_rep *rep = req->rl_reply; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1308 | |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1309 | req->rl_reply = NULL; |
| 1310 | |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1311 | spin_lock(&buffers->rb_lock); |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1312 | list_add(&req->rl_list, &buffers->rb_send_bufs); |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame] | 1313 | if (rep) { |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1314 | if (!rep->rr_temp) { |
| 1315 | list_add(&rep->rr_list, &buffers->rb_recv_bufs); |
| 1316 | rep = NULL; |
| 1317 | } |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame] | 1318 | } |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1319 | spin_unlock(&buffers->rb_lock); |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1320 | if (rep) |
Chuck Lever | 2314650 | 2019-04-24 09:39:11 -0400 | [diff] [blame] | 1321 | rpcrdma_rep_destroy(rep); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1322 | } |
| 1323 | |
| 1324 | /* |
| 1325 | * Put reply buffers back into pool when not attached to |
Chuck Lever | b45ccfd | 2014-05-28 10:32:34 -0400 | [diff] [blame] | 1326 | * request. This happens in error conditions. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1327 | */ |
| 1328 | void |
| 1329 | rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) |
| 1330 | { |
Chuck Lever | fed171b | 2015-05-26 11:51:37 -0400 | [diff] [blame] | 1331 | struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1332 | |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1333 | if (!rep->rr_temp) { |
| 1334 | spin_lock(&buffers->rb_lock); |
| 1335 | list_add(&rep->rr_list, &buffers->rb_recv_bufs); |
| 1336 | spin_unlock(&buffers->rb_lock); |
| 1337 | } else { |
Chuck Lever | 2314650 | 2019-04-24 09:39:11 -0400 | [diff] [blame] | 1338 | rpcrdma_rep_destroy(rep); |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1339 | } |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1340 | } |
| 1341 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1342 | /* Returns a pointer to a rpcrdma_regbuf object, or NULL. |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1343 | * |
| 1344 | * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for |
Chuck Lever | 99ef4db | 2016-09-15 10:56:10 -0400 | [diff] [blame] | 1345 | * receiving the payload of RDMA RECV operations. During Long Calls |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 1346 | * or Replies they may be registered externally via frwr_map. |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1347 | */ |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1348 | static struct rpcrdma_regbuf * |
| 1349 | rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction, |
Chuck Lever | 13650c2 | 2016-09-15 10:56:26 -0400 | [diff] [blame] | 1350 | gfp_t flags) |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1351 | { |
| 1352 | struct rpcrdma_regbuf *rb; |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1353 | |
Chuck Lever | 8cec3db | 2019-04-24 09:39:16 -0400 | [diff] [blame] | 1354 | rb = kmalloc(sizeof(*rb), flags); |
| 1355 | if (!rb) |
| 1356 | return NULL; |
| 1357 | rb->rg_data = kmalloc(size, flags); |
| 1358 | if (!rb->rg_data) { |
| 1359 | kfree(rb); |
| 1360 | return NULL; |
| 1361 | } |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1362 | |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1363 | rb->rg_device = NULL; |
Chuck Lever | 99ef4db | 2016-09-15 10:56:10 -0400 | [diff] [blame] | 1364 | rb->rg_direction = direction; |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1365 | rb->rg_iov.length = size; |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1366 | return rb; |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1367 | } |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1368 | |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1369 | /** |
Chuck Lever | 0f665ce | 2019-04-24 09:39:27 -0400 | [diff] [blame] | 1370 | * rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer |
| 1371 | * @rb: regbuf to reallocate |
| 1372 | * @size: size of buffer to be allocated, in bytes |
| 1373 | * @flags: GFP flags |
| 1374 | * |
| 1375 | * Returns true if reallocation was successful. If false is |
| 1376 | * returned, @rb is left untouched. |
| 1377 | */ |
| 1378 | bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags) |
| 1379 | { |
| 1380 | void *buf; |
| 1381 | |
| 1382 | buf = kmalloc(size, flags); |
| 1383 | if (!buf) |
| 1384 | return false; |
| 1385 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1386 | rpcrdma_regbuf_dma_unmap(rb); |
Chuck Lever | 0f665ce | 2019-04-24 09:39:27 -0400 | [diff] [blame] | 1387 | kfree(rb->rg_data); |
| 1388 | |
| 1389 | rb->rg_data = buf; |
| 1390 | rb->rg_iov.length = size; |
| 1391 | return true; |
| 1392 | } |
| 1393 | |
| 1394 | /** |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1395 | * __rpcrdma_regbuf_dma_map - DMA-map a regbuf |
| 1396 | * @r_xprt: controlling transport instance |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1397 | * @rb: regbuf to be mapped |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1398 | * |
| 1399 | * Returns true if the buffer is now DMA mapped to @r_xprt's device |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1400 | */ |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1401 | bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt, |
| 1402 | struct rpcrdma_regbuf *rb) |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1403 | { |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 1404 | struct ib_device *device = r_xprt->rx_ia.ri_id->device; |
Chuck Lever | 91a10c5 | 2017-04-11 13:23:02 -0400 | [diff] [blame] | 1405 | |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1406 | if (rb->rg_direction == DMA_NONE) |
| 1407 | return false; |
| 1408 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1409 | rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb), |
| 1410 | rdmab_length(rb), rb->rg_direction); |
Chuck Lever | 53b2c1c | 2018-12-19 11:00:06 -0500 | [diff] [blame] | 1411 | if (ib_dma_mapping_error(device, rdmab_addr(rb))) { |
| 1412 | trace_xprtrdma_dma_maperr(rdmab_addr(rb)); |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1413 | return false; |
Chuck Lever | 53b2c1c | 2018-12-19 11:00:06 -0500 | [diff] [blame] | 1414 | } |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1415 | |
Chuck Lever | 91a10c5 | 2017-04-11 13:23:02 -0400 | [diff] [blame] | 1416 | rb->rg_device = device; |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1417 | rb->rg_iov.lkey = r_xprt->rx_ia.ri_pd->local_dma_lkey; |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1418 | return true; |
| 1419 | } |
| 1420 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1421 | static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb) |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1422 | { |
Chuck Lever | e89e8d8f | 2018-01-31 12:34:13 -0500 | [diff] [blame] | 1423 | if (!rb) |
| 1424 | return; |
| 1425 | |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1426 | if (!rpcrdma_regbuf_is_mapped(rb)) |
| 1427 | return; |
| 1428 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1429 | ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb), |
| 1430 | rb->rg_direction); |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1431 | rb->rg_device = NULL; |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1432 | } |
| 1433 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1434 | static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb) |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1435 | { |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1436 | rpcrdma_regbuf_dma_unmap(rb); |
Chuck Lever | 8cec3db | 2019-04-24 09:39:16 -0400 | [diff] [blame] | 1437 | if (rb) |
| 1438 | kfree(rb->rg_data); |
Chuck Lever | e531dca | 2015-08-03 13:03:20 -0400 | [diff] [blame] | 1439 | kfree(rb); |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1440 | } |
| 1441 | |
Chuck Lever | 995d312 | 2018-12-19 11:00:32 -0500 | [diff] [blame] | 1442 | /** |
| 1443 | * rpcrdma_ep_post - Post WRs to a transport's Send Queue |
| 1444 | * @ia: transport's device information |
| 1445 | * @ep: transport's RDMA endpoint information |
| 1446 | * @req: rpcrdma_req containing the Send WR to post |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1447 | * |
Chuck Lever | 995d312 | 2018-12-19 11:00:32 -0500 | [diff] [blame] | 1448 | * Returns 0 if the post was successful, otherwise -ENOTCONN |
| 1449 | * is returned. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1450 | */ |
| 1451 | int |
| 1452 | rpcrdma_ep_post(struct rpcrdma_ia *ia, |
| 1453 | struct rpcrdma_ep *ep, |
| 1454 | struct rpcrdma_req *req) |
| 1455 | { |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 1456 | struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr; |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 1457 | int rc; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1458 | |
Chuck Lever | 0ab1152 | 2019-06-19 10:33:15 -0400 | [diff] [blame] | 1459 | if (!ep->rep_send_count || kref_read(&req->rl_kref) > 1) { |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 1460 | send_wr->send_flags |= IB_SEND_SIGNALED; |
| 1461 | ep->rep_send_count = ep->rep_send_batch; |
| 1462 | } else { |
| 1463 | send_wr->send_flags &= ~IB_SEND_SIGNALED; |
| 1464 | --ep->rep_send_count; |
| 1465 | } |
Chuck Lever | 7a89f9c | 2016-06-29 13:53:43 -0400 | [diff] [blame] | 1466 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 1467 | rc = frwr_send(ia, req); |
Chuck Lever | ab03eff | 2017-12-20 16:30:40 -0500 | [diff] [blame] | 1468 | trace_xprtrdma_post_send(req, rc); |
| 1469 | if (rc) |
| 1470 | return -ENOTCONN; |
| 1471 | return 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1472 | } |
| 1473 | |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 1474 | static void |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1475 | rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1476 | { |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1477 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 1478 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1479 | struct ib_recv_wr *i, *wr, *bad_wr; |
| 1480 | struct rpcrdma_rep *rep; |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1481 | int needed, count, rc; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1482 | |
Chuck Lever | 61c208a | 2018-10-01 14:26:35 -0400 | [diff] [blame] | 1483 | rc = 0; |
| 1484 | count = 0; |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1485 | |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1486 | needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1); |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 1487 | if (ep->rep_receive_count > needed) |
Chuck Lever | 61c208a | 2018-10-01 14:26:35 -0400 | [diff] [blame] | 1488 | goto out; |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 1489 | needed -= ep->rep_receive_count; |
Chuck Lever | e340c2d | 2019-02-11 11:23:54 -0500 | [diff] [blame] | 1490 | if (!temp) |
| 1491 | needed += RPCRDMA_MAX_RECV_BATCH; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1492 | |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1493 | /* fast path: all needed reps can be found on the free list */ |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1494 | wr = NULL; |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1495 | spin_lock(&buf->rb_lock); |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1496 | while (needed) { |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1497 | rep = list_first_entry_or_null(&buf->rb_recv_bufs, |
| 1498 | struct rpcrdma_rep, rr_list); |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1499 | if (!rep) |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1500 | break; |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1501 | |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1502 | list_del(&rep->rr_list); |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1503 | rep->rr_recv_wr.next = wr; |
| 1504 | wr = &rep->rr_recv_wr; |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1505 | --needed; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1506 | } |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1507 | spin_unlock(&buf->rb_lock); |
| 1508 | |
| 1509 | while (needed) { |
| 1510 | rep = rpcrdma_rep_create(r_xprt, temp); |
| 1511 | if (!rep) |
| 1512 | break; |
| 1513 | |
| 1514 | rep->rr_recv_wr.next = wr; |
| 1515 | wr = &rep->rr_recv_wr; |
| 1516 | --needed; |
| 1517 | } |
| 1518 | if (!wr) |
Chuck Lever | 61c208a | 2018-10-01 14:26:35 -0400 | [diff] [blame] | 1519 | goto out; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1520 | |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1521 | for (i = wr; i; i = i->next) { |
| 1522 | rep = container_of(i, struct rpcrdma_rep, rr_recv_wr); |
| 1523 | |
| 1524 | if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) |
| 1525 | goto release_wrs; |
| 1526 | |
| 1527 | trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe); |
| 1528 | ++count; |
| 1529 | } |
| 1530 | |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 1531 | rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr, |
| 1532 | (const struct ib_recv_wr **)&bad_wr); |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1533 | out: |
| 1534 | trace_xprtrdma_post_recvs(r_xprt, count, rc); |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1535 | if (rc) { |
Chuck Lever | 2d0abe3 | 2019-06-19 10:32:38 -0400 | [diff] [blame] | 1536 | for (wr = bad_wr; wr;) { |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1537 | struct rpcrdma_rep *rep; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1538 | |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1539 | rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr); |
Chuck Lever | 2d0abe3 | 2019-06-19 10:32:38 -0400 | [diff] [blame] | 1540 | wr = wr->next; |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1541 | rpcrdma_recv_buffer_put(rep); |
| 1542 | --count; |
| 1543 | } |
| 1544 | } |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 1545 | ep->rep_receive_count += count; |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1546 | return; |
| 1547 | |
| 1548 | release_wrs: |
| 1549 | for (i = wr; i;) { |
| 1550 | rep = container_of(i, struct rpcrdma_rep, rr_recv_wr); |
| 1551 | i = i->next; |
| 1552 | rpcrdma_recv_buffer_put(rep); |
| 1553 | } |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1554 | } |