Chuck Lever | a2268cf | 2018-05-04 15:34:32 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause |
\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 2 | /* |
Chuck Lever | 62b56a6 | 2017-10-30 16:22:14 -0400 | [diff] [blame] | 3 | * Copyright (c) 2014-2017 Oracle. All rights reserved. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 4 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
| 5 | * |
| 6 | * This software is available to you under a choice of one of two |
| 7 | * licenses. You may choose to be licensed under the terms of the GNU |
| 8 | * General Public License (GPL) Version 2, available from the file |
| 9 | * COPYING in the main directory of this source tree, or the BSD-type |
| 10 | * license below: |
| 11 | * |
| 12 | * Redistribution and use in source and binary forms, with or without |
| 13 | * modification, are permitted provided that the following conditions |
| 14 | * are met: |
| 15 | * |
| 16 | * Redistributions of source code must retain the above copyright |
| 17 | * notice, this list of conditions and the following disclaimer. |
| 18 | * |
| 19 | * Redistributions in binary form must reproduce the above |
| 20 | * copyright notice, this list of conditions and the following |
| 21 | * disclaimer in the documentation and/or other materials provided |
| 22 | * with the distribution. |
| 23 | * |
| 24 | * Neither the name of the Network Appliance, Inc. nor the names of |
| 25 | * its contributors may be used to endorse or promote products |
| 26 | * derived from this software without specific prior written |
| 27 | * permission. |
| 28 | * |
| 29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 30 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 31 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 32 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 33 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 34 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 35 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 36 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 37 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 38 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 39 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 40 | */ |
| 41 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 42 | /* |
| 43 | * verbs.c |
| 44 | * |
| 45 | * Encapsulates the major functions managing: |
| 46 | * o adapters |
| 47 | * o endpoints |
| 48 | * o connections |
| 49 | * o buffer memory |
| 50 | */ |
| 51 | |
Alexey Dobriyan | a6b7a40 | 2011-06-06 10:43:46 +0000 | [diff] [blame] | 52 | #include <linux/interrupt.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 53 | #include <linux/slab.h> |
Chuck Lever | 0dd39ca | 2015-03-30 14:33:43 -0400 | [diff] [blame] | 54 | #include <linux/sunrpc/addr.h> |
Chuck Lever | 05c9746 | 2016-09-06 11:22:58 -0400 | [diff] [blame] | 55 | #include <linux/sunrpc/svc_rdma.h> |
Chuck Lever | f3c66a2 | 2019-08-19 18:40:11 -0400 | [diff] [blame] | 56 | #include <linux/log2.h> |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 57 | |
| 58 | #include <asm-generic/barrier.h> |
Chuck Lever | 65866f8 | 2014-05-28 10:33:59 -0400 | [diff] [blame] | 59 | #include <asm/bitops.h> |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 60 | |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 61 | #include <rdma/ib_cm.h> |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 62 | |
\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 63 | #include "xprt_rdma.h" |
Chuck Lever | b6e717cb | 2018-05-07 15:27:05 -0400 | [diff] [blame] | 64 | #include <trace/events/rpcrdma.h> |
\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 65 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 66 | /* |
| 67 | * Globals/Macros |
| 68 | */ |
| 69 | |
Jeff Layton | f895b25 | 2014-11-17 16:58:04 -0500 | [diff] [blame] | 70 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 71 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 72 | #endif |
| 73 | |
| 74 | /* |
| 75 | * internal functions |
| 76 | */ |
Chuck Lever | f995879 | 2019-10-17 14:31:18 -0400 | [diff] [blame^] | 77 | static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt, |
| 78 | struct rpcrdma_sendctx *sc); |
Chuck Lever | a31b2f9 | 2019-10-09 13:07:27 -0400 | [diff] [blame] | 79 | static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt); |
Chuck Lever | b0b227f | 2019-08-19 18:48:43 -0400 | [diff] [blame] | 80 | static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 81 | static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt); |
Chuck Lever | 9d2da4f | 2019-10-09 13:07:48 -0400 | [diff] [blame] | 82 | static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt); |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 83 | static struct rpcrdma_regbuf * |
| 84 | rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction, |
| 85 | gfp_t flags); |
| 86 | static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb); |
| 87 | static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 88 | |
Chuck Lever | b8fe677 | 2019-04-24 09:40:36 -0400 | [diff] [blame] | 89 | /* Wait for outstanding transport work to finish. ib_drain_qp |
| 90 | * handles the drains in the wrong order for us, so open code |
| 91 | * them here. |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 92 | */ |
| 93 | static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 94 | { |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 95 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 96 | |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 97 | /* Flush Receives, then wait for deferred Reply work |
| 98 | * to complete. |
| 99 | */ |
Chuck Lever | e1ede31 | 2019-04-09 17:04:09 -0400 | [diff] [blame] | 100 | ib_drain_rq(ia->ri_id->qp); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 101 | |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 102 | /* Deferred Reply processing might have scheduled |
| 103 | * local invalidations. |
| 104 | */ |
| 105 | ib_drain_sq(ia->ri_id->qp); |
Chuck Lever | f1a03b7 | 2014-11-08 20:14:37 -0500 | [diff] [blame] | 106 | } |
| 107 | |
Chuck Lever | 31e62d2 | 2018-10-01 14:26:08 -0400 | [diff] [blame] | 108 | /** |
Chuck Lever | f9521d5 | 2018-10-01 14:26:13 -0400 | [diff] [blame] | 109 | * rpcrdma_qp_event_handler - Handle one QP event (error notification) |
| 110 | * @event: details of the event |
| 111 | * @context: ep that owns QP where event occurred |
| 112 | * |
| 113 | * Called from the RDMA provider (device driver) possibly in an interrupt |
| 114 | * context. |
| 115 | */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 116 | static void |
Chuck Lever | f9521d5 | 2018-10-01 14:26:13 -0400 | [diff] [blame] | 117 | rpcrdma_qp_event_handler(struct ib_event *event, void *context) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 118 | { |
| 119 | struct rpcrdma_ep *ep = context; |
Chuck Lever | 643cf32 | 2017-12-20 16:31:45 -0500 | [diff] [blame] | 120 | struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt, |
| 121 | rx_ep); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 122 | |
Chuck Lever | f9521d5 | 2018-10-01 14:26:13 -0400 | [diff] [blame] | 123 | trace_xprtrdma_qp_event(r_xprt, event); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 124 | } |
| 125 | |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 126 | /** |
| 127 | * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC |
Chuck Lever | f995879 | 2019-10-17 14:31:18 -0400 | [diff] [blame^] | 128 | * @cq: completion queue |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 129 | * @wc: completed WR |
| 130 | * |
Chuck Lever | 4220a07 | 2015-10-24 17:26:45 -0400 | [diff] [blame] | 131 | */ |
| 132 | static void |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 133 | rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 134 | { |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 135 | struct ib_cqe *cqe = wc->wr_cqe; |
| 136 | struct rpcrdma_sendctx *sc = |
| 137 | container_of(cqe, struct rpcrdma_sendctx, sc_cqe); |
| 138 | |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 139 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
Chuck Lever | ab03eff | 2017-12-20 16:30:40 -0500 | [diff] [blame] | 140 | trace_xprtrdma_wc_send(sc, wc); |
Chuck Lever | f995879 | 2019-10-17 14:31:18 -0400 | [diff] [blame^] | 141 | rpcrdma_sendctx_put_locked((struct rpcrdma_xprt *)cq->cq_context, sc); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 142 | } |
| 143 | |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 144 | /** |
Chuck Lever | 1519e96 | 2016-09-15 10:57:49 -0400 | [diff] [blame] | 145 | * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 146 | * @cq: completion queue (ignored) |
| 147 | * @wc: completed WR |
| 148 | * |
| 149 | */ |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 150 | static void |
Chuck Lever | 1519e96 | 2016-09-15 10:57:49 -0400 | [diff] [blame] | 151 | rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 152 | { |
Chuck Lever | 552bf22 | 2016-03-04 11:28:36 -0500 | [diff] [blame] | 153 | struct ib_cqe *cqe = wc->wr_cqe; |
| 154 | struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep, |
| 155 | rr_cqe); |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 156 | struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 157 | |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 158 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
Chuck Lever | 0e0b854 | 2018-05-04 15:35:14 -0400 | [diff] [blame] | 159 | trace_xprtrdma_wc_receive(wc); |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 160 | --r_xprt->rx_ep.rep_receive_count; |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 161 | if (wc->status != IB_WC_SUCCESS) |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 162 | goto out_flushed; |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 163 | |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 164 | /* status == SUCCESS means all fields in wc are trustworthy */ |
Chuck Lever | 96f8778 | 2017-08-03 14:30:03 -0400 | [diff] [blame] | 165 | rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len); |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 166 | rep->rr_wc_flags = wc->wc_flags; |
| 167 | rep->rr_inv_rkey = wc->ex.invalidate_rkey; |
| 168 | |
Chuck Lever | 91a10c5 | 2017-04-11 13:23:02 -0400 | [diff] [blame] | 169 | ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf), |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 170 | rdmab_addr(rep->rr_rdmabuf), |
Chuck Lever | e2a6719 | 2017-08-03 14:30:44 -0400 | [diff] [blame] | 171 | wc->byte_len, DMA_FROM_DEVICE); |
Chuck Lever | 23826c7 | 2016-03-04 11:28:27 -0500 | [diff] [blame] | 172 | |
Chuck Lever | d8f532d | 2017-10-16 15:01:30 -0400 | [diff] [blame] | 173 | rpcrdma_reply_handler(rep); |
Chuck Lever | 8502427 | 2015-01-21 11:02:04 -0500 | [diff] [blame] | 174 | return; |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 175 | |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 176 | out_flushed: |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 177 | rpcrdma_recv_buffer_put(rep); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 178 | } |
| 179 | |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 180 | static void |
| 181 | rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt, |
| 182 | struct rdma_conn_param *param) |
| 183 | { |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 184 | const struct rpcrdma_connect_private *pmsg = param->private_data; |
| 185 | unsigned int rsize, wsize; |
| 186 | |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 187 | /* Default settings for RPC-over-RDMA Version One */ |
Chuck Lever | b5f0afb | 2017-02-08 16:59:54 -0500 | [diff] [blame] | 188 | r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize; |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 189 | rsize = RPCRDMA_V1_DEF_INLINE_SIZE; |
| 190 | wsize = RPCRDMA_V1_DEF_INLINE_SIZE; |
| 191 | |
| 192 | if (pmsg && |
| 193 | pmsg->cp_magic == rpcrdma_cmp_magic && |
| 194 | pmsg->cp_version == RPCRDMA_CMP_VERSION) { |
Chuck Lever | c95a3c6 | 2017-02-08 17:00:02 -0500 | [diff] [blame] | 195 | r_xprt->rx_ia.ri_implicit_roundup = true; |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 196 | rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); |
| 197 | wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); |
| 198 | } |
| 199 | |
Chuck Lever | 94087e9 | 2019-04-24 09:40:20 -0400 | [diff] [blame] | 200 | if (rsize < r_xprt->rx_ep.rep_inline_recv) |
| 201 | r_xprt->rx_ep.rep_inline_recv = rsize; |
| 202 | if (wsize < r_xprt->rx_ep.rep_inline_send) |
| 203 | r_xprt->rx_ep.rep_inline_send = wsize; |
| 204 | dprintk("RPC: %s: max send %u, max recv %u\n", __func__, |
| 205 | r_xprt->rx_ep.rep_inline_send, |
| 206 | r_xprt->rx_ep.rep_inline_recv); |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 207 | rpcrdma_set_max_header_sizes(r_xprt); |
| 208 | } |
| 209 | |
Chuck Lever | ae38288 | 2018-10-01 14:25:47 -0400 | [diff] [blame] | 210 | /** |
| 211 | * rpcrdma_cm_event_handler - Handle RDMA CM events |
| 212 | * @id: rdma_cm_id on which an event has occurred |
| 213 | * @event: details of the event |
| 214 | * |
| 215 | * Called with @id's mutex held. Returns 1 if caller should |
| 216 | * destroy @id, otherwise 0. |
| 217 | */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 218 | static int |
Chuck Lever | ae38288 | 2018-10-01 14:25:47 -0400 | [diff] [blame] | 219 | rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 220 | { |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 221 | struct rpcrdma_xprt *r_xprt = id->context; |
| 222 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
| 223 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
| 224 | struct rpc_xprt *xprt = &r_xprt->rx_xprt; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 225 | |
Chuck Lever | ae38288 | 2018-10-01 14:25:47 -0400 | [diff] [blame] | 226 | might_sleep(); |
| 227 | |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 228 | trace_xprtrdma_cm_event(r_xprt, event); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 229 | switch (event->event) { |
| 230 | case RDMA_CM_EVENT_ADDR_RESOLVED: |
| 231 | case RDMA_CM_EVENT_ROUTE_RESOLVED: |
Tom Talpey | 5675add | 2008-10-09 15:01:41 -0400 | [diff] [blame] | 232 | ia->ri_async_rc = 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 233 | complete(&ia->ri_done); |
Chuck Lever | 316a616 | 2018-10-01 14:26:03 -0400 | [diff] [blame] | 234 | return 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 235 | case RDMA_CM_EVENT_ADDR_ERROR: |
Chuck Lever | 52d28fe | 2018-05-04 15:34:37 -0400 | [diff] [blame] | 236 | ia->ri_async_rc = -EPROTO; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 237 | complete(&ia->ri_done); |
Chuck Lever | 316a616 | 2018-10-01 14:26:03 -0400 | [diff] [blame] | 238 | return 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 239 | case RDMA_CM_EVENT_ROUTE_ERROR: |
| 240 | ia->ri_async_rc = -ENETUNREACH; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 241 | complete(&ia->ri_done); |
Chuck Lever | 316a616 | 2018-10-01 14:26:03 -0400 | [diff] [blame] | 242 | return 0; |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 243 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
| 244 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
Chuck Lever | d461f1f | 2017-12-14 20:56:50 -0500 | [diff] [blame] | 245 | pr_info("rpcrdma: removing device %s for %s:%s\n", |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 246 | ia->ri_id->device->name, |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 247 | rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt)); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 248 | #endif |
| 249 | set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags); |
| 250 | ep->rep_connected = -ENODEV; |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 251 | xprt_force_disconnect(xprt); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 252 | wait_for_completion(&ia->ri_remove_done); |
| 253 | |
| 254 | ia->ri_id = NULL; |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 255 | /* Return 1 to ensure the core destroys the id. */ |
| 256 | return 1; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 257 | case RDMA_CM_EVENT_ESTABLISHED: |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 258 | ++xprt->connect_cookie; |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 259 | ep->rep_connected = 1; |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 260 | rpcrdma_update_connect_private(r_xprt, &event->param.conn); |
Chuck Lever | 31e62d2 | 2018-10-01 14:26:08 -0400 | [diff] [blame] | 261 | wake_up_all(&ep->rep_connect_wait); |
| 262 | break; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 263 | case RDMA_CM_EVENT_CONNECT_ERROR: |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 264 | ep->rep_connected = -ENOTCONN; |
Chuck Lever | 31e62d2 | 2018-10-01 14:26:08 -0400 | [diff] [blame] | 265 | goto disconnected; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 266 | case RDMA_CM_EVENT_UNREACHABLE: |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 267 | ep->rep_connected = -ENETUNREACH; |
Chuck Lever | 31e62d2 | 2018-10-01 14:26:08 -0400 | [diff] [blame] | 268 | goto disconnected; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 269 | case RDMA_CM_EVENT_REJECTED: |
Chuck Lever | d461f1f | 2017-12-14 20:56:50 -0500 | [diff] [blame] | 270 | dprintk("rpcrdma: connection to %s:%s rejected: %s\n", |
Chuck Lever | ed97f1f | 2018-10-01 14:25:52 -0400 | [diff] [blame] | 271 | rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt), |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 272 | rdma_reject_msg(id, event->status)); |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 273 | ep->rep_connected = -ECONNREFUSED; |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 274 | if (event->status == IB_CM_REJ_STALE_CONN) |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 275 | ep->rep_connected = -EAGAIN; |
Chuck Lever | 31e62d2 | 2018-10-01 14:26:08 -0400 | [diff] [blame] | 276 | goto disconnected; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 277 | case RDMA_CM_EVENT_DISCONNECTED: |
Chuck Lever | aadc5a9 | 2018-10-01 14:25:57 -0400 | [diff] [blame] | 278 | ep->rep_connected = -ECONNABORTED; |
Chuck Lever | 31e62d2 | 2018-10-01 14:26:08 -0400 | [diff] [blame] | 279 | disconnected: |
| 280 | xprt_force_disconnect(xprt); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 281 | wake_up_all(&ep->rep_connect_wait); |
Chuck Lever | 316a616 | 2018-10-01 14:26:03 -0400 | [diff] [blame] | 282 | break; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 283 | default: |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 284 | break; |
| 285 | } |
| 286 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 287 | dprintk("RPC: %s: %s:%s on %s/frwr: %s\n", __func__, |
Chuck Lever | 316a616 | 2018-10-01 14:26:03 -0400 | [diff] [blame] | 288 | rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt), |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 289 | ia->ri_id->device->name, rdma_event_msg(event->event)); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 290 | return 0; |
| 291 | } |
| 292 | |
| 293 | static struct rdma_cm_id * |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 294 | rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 295 | { |
Chuck Lever | 109b88a | 2016-11-29 10:52:40 -0500 | [diff] [blame] | 296 | unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 297 | struct rdma_cm_id *id; |
| 298 | int rc; |
| 299 | |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 300 | trace_xprtrdma_conn_start(xprt); |
| 301 | |
Tom Talpey | 1a95405 | 2008-10-09 15:01:31 -0400 | [diff] [blame] | 302 | init_completion(&ia->ri_done); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 303 | init_completion(&ia->ri_remove_done); |
Tom Talpey | 1a95405 | 2008-10-09 15:01:31 -0400 | [diff] [blame] | 304 | |
Chuck Lever | ae38288 | 2018-10-01 14:25:47 -0400 | [diff] [blame] | 305 | id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler, |
Chuck Lever | 107c4be | 2018-05-04 15:34:42 -0400 | [diff] [blame] | 306 | xprt, RDMA_PS_TCP, IB_QPT_RC); |
Chuck Lever | ddbb347 | 2018-12-19 10:59:39 -0500 | [diff] [blame] | 307 | if (IS_ERR(id)) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 308 | return id; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 309 | |
Tom Talpey | 5675add | 2008-10-09 15:01:41 -0400 | [diff] [blame] | 310 | ia->ri_async_rc = -ETIMEDOUT; |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 311 | rc = rdma_resolve_addr(id, NULL, |
| 312 | (struct sockaddr *)&xprt->rx_xprt.addr, |
| 313 | RDMA_RESOLVE_TIMEOUT); |
Chuck Lever | ddbb347 | 2018-12-19 10:59:39 -0500 | [diff] [blame] | 314 | if (rc) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 315 | goto out; |
Chuck Lever | 109b88a | 2016-11-29 10:52:40 -0500 | [diff] [blame] | 316 | rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); |
| 317 | if (rc < 0) { |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 318 | trace_xprtrdma_conn_tout(xprt); |
Chuck Lever | 109b88a | 2016-11-29 10:52:40 -0500 | [diff] [blame] | 319 | goto out; |
| 320 | } |
Devesh Sharma | d0f36c4 | 2015-08-03 13:05:04 -0400 | [diff] [blame] | 321 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 322 | rc = ia->ri_async_rc; |
| 323 | if (rc) |
| 324 | goto out; |
| 325 | |
Tom Talpey | 5675add | 2008-10-09 15:01:41 -0400 | [diff] [blame] | 326 | ia->ri_async_rc = -ETIMEDOUT; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 327 | rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT); |
Chuck Lever | ddbb347 | 2018-12-19 10:59:39 -0500 | [diff] [blame] | 328 | if (rc) |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 329 | goto out; |
Chuck Lever | 109b88a | 2016-11-29 10:52:40 -0500 | [diff] [blame] | 330 | rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); |
| 331 | if (rc < 0) { |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 332 | trace_xprtrdma_conn_tout(xprt); |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 333 | goto out; |
Chuck Lever | 109b88a | 2016-11-29 10:52:40 -0500 | [diff] [blame] | 334 | } |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 335 | rc = ia->ri_async_rc; |
| 336 | if (rc) |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 337 | goto out; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 338 | |
| 339 | return id; |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 340 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 341 | out: |
| 342 | rdma_destroy_id(id); |
| 343 | return ERR_PTR(rc); |
| 344 | } |
| 345 | |
| 346 | /* |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 347 | * Exported functions. |
| 348 | */ |
| 349 | |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 350 | /** |
| 351 | * rpcrdma_ia_open - Open and initialize an Interface Adapter. |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 352 | * @xprt: transport with IA to (re)initialize |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 353 | * |
| 354 | * Returns 0 on success, negative errno if an appropriate |
| 355 | * Interface Adapter could not be found and opened. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 356 | */ |
| 357 | int |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 358 | rpcrdma_ia_open(struct rpcrdma_xprt *xprt) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 359 | { |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 360 | struct rpcrdma_ia *ia = &xprt->rx_ia; |
Chuck Lever | d1ed857 | 2015-08-03 13:03:30 -0400 | [diff] [blame] | 361 | int rc; |
| 362 | |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 363 | ia->ri_id = rpcrdma_create_id(xprt, ia); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 364 | if (IS_ERR(ia->ri_id)) { |
| 365 | rc = PTR_ERR(ia->ri_id); |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 366 | goto out_err; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 367 | } |
| 368 | |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 369 | ia->ri_pd = ib_alloc_pd(ia->ri_id->device, 0); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 370 | if (IS_ERR(ia->ri_pd)) { |
| 371 | rc = PTR_ERR(ia->ri_pd); |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 372 | pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc); |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 373 | goto out_err; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 374 | } |
| 375 | |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 376 | switch (xprt_rdma_memreg_strategy) { |
Chuck Lever | ce5b371 | 2017-12-14 20:57:47 -0500 | [diff] [blame] | 377 | case RPCRDMA_FRWR: |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 378 | if (frwr_is_supported(ia->ri_id->device)) |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 379 | break; |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 380 | /*FALLTHROUGH*/ |
Tom Talpey | bd7ed1d | 2008-10-09 15:00:09 -0400 | [diff] [blame] | 381 | default: |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 382 | pr_err("rpcrdma: Device %s does not support memreg mode %d\n", |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 383 | ia->ri_id->device->name, xprt_rdma_memreg_strategy); |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 384 | rc = -EINVAL; |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 385 | goto out_err; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 386 | } |
| 387 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 388 | return 0; |
Chuck Lever | 5ae711a | 2015-01-21 11:03:19 -0500 | [diff] [blame] | 389 | |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 390 | out_err: |
| 391 | rpcrdma_ia_close(ia); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 392 | return rc; |
| 393 | } |
| 394 | |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 395 | /** |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 396 | * rpcrdma_ia_remove - Handle device driver unload |
| 397 | * @ia: interface adapter being removed |
| 398 | * |
| 399 | * Divest transport H/W resources associated with this adapter, |
| 400 | * but allow it to be restored later. |
| 401 | */ |
| 402 | void |
| 403 | rpcrdma_ia_remove(struct rpcrdma_ia *ia) |
| 404 | { |
| 405 | struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, |
| 406 | rx_ia); |
| 407 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
| 408 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
| 409 | struct rpcrdma_req *req; |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 410 | |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 411 | /* This is similar to rpcrdma_ep_destroy, but: |
| 412 | * - Don't cancel the connect worker. |
| 413 | * - Don't call rpcrdma_ep_disconnect, which waits |
| 414 | * for another conn upcall, which will deadlock. |
| 415 | * - rdma_disconnect is unneeded, the underlying |
| 416 | * connection is already gone. |
| 417 | */ |
| 418 | if (ia->ri_id->qp) { |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 419 | rpcrdma_xprt_drain(r_xprt); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 420 | rdma_destroy_qp(ia->ri_id); |
| 421 | ia->ri_id->qp = NULL; |
| 422 | } |
| 423 | ib_free_cq(ep->rep_attr.recv_cq); |
Chuck Lever | 2552428 | 2018-03-19 14:23:16 -0400 | [diff] [blame] | 424 | ep->rep_attr.recv_cq = NULL; |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 425 | ib_free_cq(ep->rep_attr.send_cq); |
Chuck Lever | 2552428 | 2018-03-19 14:23:16 -0400 | [diff] [blame] | 426 | ep->rep_attr.send_cq = NULL; |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 427 | |
| 428 | /* The ULP is responsible for ensuring all DMA |
| 429 | * mappings and MRs are gone. |
| 430 | */ |
Chuck Lever | b0b227f | 2019-08-19 18:48:43 -0400 | [diff] [blame] | 431 | rpcrdma_reps_destroy(buf); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 432 | list_for_each_entry(req, &buf->rb_allreqs, rl_all) { |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 433 | rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf); |
| 434 | rpcrdma_regbuf_dma_unmap(req->rl_sendbuf); |
| 435 | rpcrdma_regbuf_dma_unmap(req->rl_recvbuf); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 436 | } |
Chuck Lever | 9d2da4f | 2019-10-09 13:07:48 -0400 | [diff] [blame] | 437 | rpcrdma_mrs_destroy(r_xprt); |
Chuck Lever | 2552428 | 2018-03-19 14:23:16 -0400 | [diff] [blame] | 438 | ib_dealloc_pd(ia->ri_pd); |
| 439 | ia->ri_pd = NULL; |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 440 | |
| 441 | /* Allow waiters to continue */ |
| 442 | complete(&ia->ri_remove_done); |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 443 | |
| 444 | trace_xprtrdma_remove(r_xprt); |
Chuck Lever | bebd031 | 2017-04-11 13:23:10 -0400 | [diff] [blame] | 445 | } |
| 446 | |
| 447 | /** |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 448 | * rpcrdma_ia_close - Clean up/close an IA. |
| 449 | * @ia: interface adapter to close |
| 450 | * |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 451 | */ |
| 452 | void |
| 453 | rpcrdma_ia_close(struct rpcrdma_ia *ia) |
| 454 | { |
Tom Talpey | fee08ca | 2008-10-09 15:01:00 -0400 | [diff] [blame] | 455 | if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) { |
| 456 | if (ia->ri_id->qp) |
| 457 | rdma_destroy_qp(ia->ri_id); |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 458 | rdma_destroy_id(ia->ri_id); |
Tom Talpey | fee08ca | 2008-10-09 15:01:00 -0400 | [diff] [blame] | 459 | } |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 460 | ia->ri_id = NULL; |
Chuck Lever | 6d44698 | 2015-05-26 11:51:27 -0400 | [diff] [blame] | 461 | |
| 462 | /* If the pd is still busy, xprtrdma missed freeing a resource */ |
| 463 | if (ia->ri_pd && !IS_ERR(ia->ri_pd)) |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 464 | ib_dealloc_pd(ia->ri_pd); |
Chuck Lever | fff0959 | 2017-04-11 13:22:54 -0400 | [diff] [blame] | 465 | ia->ri_pd = NULL; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 466 | } |
| 467 | |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 468 | /** |
| 469 | * rpcrdma_ep_create - Create unconnected endpoint |
| 470 | * @r_xprt: transport to instantiate |
| 471 | * |
| 472 | * Returns zero on success, or a negative errno. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 473 | */ |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 474 | int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 475 | { |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 476 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
| 477 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 478 | struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private; |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 479 | struct ib_cq *sendcq, *recvcq; |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 480 | unsigned int max_sge; |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 481 | int rc; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 482 | |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 483 | ep->rep_max_requests = xprt_rdma_slot_table_entries; |
Chuck Lever | 94087e9 | 2019-04-24 09:40:20 -0400 | [diff] [blame] | 484 | ep->rep_inline_send = xprt_rdma_max_inline_write; |
| 485 | ep->rep_inline_recv = xprt_rdma_max_inline_read; |
| 486 | |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 487 | max_sge = min_t(unsigned int, ia->ri_id->device->attrs.max_send_sge, |
Chuck Lever | eed5087 | 2017-03-11 15:52:47 -0500 | [diff] [blame] | 488 | RPCRDMA_MAX_SEND_SGES); |
Chuck Lever | 16f906d | 2017-02-08 17:00:10 -0500 | [diff] [blame] | 489 | if (max_sge < RPCRDMA_MIN_SEND_SGES) { |
| 490 | pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); |
Chuck Lever | b3221d6 | 2015-08-03 13:03:39 -0400 | [diff] [blame] | 491 | return -ENOMEM; |
| 492 | } |
Chuck Lever | 1179e2c | 2018-01-31 12:34:05 -0500 | [diff] [blame] | 493 | ia->ri_max_send_sges = max_sge; |
Chuck Lever | b3221d6 | 2015-08-03 13:03:39 -0400 | [diff] [blame] | 494 | |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 495 | rc = frwr_open(ia, ep); |
Chuck Lever | 914fcad | 2018-05-04 15:34:48 -0400 | [diff] [blame] | 496 | if (rc) |
| 497 | return rc; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 498 | |
Chuck Lever | f9521d5 | 2018-10-01 14:26:13 -0400 | [diff] [blame] | 499 | ep->rep_attr.event_handler = rpcrdma_qp_event_handler; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 500 | ep->rep_attr.qp_context = ep; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 501 | ep->rep_attr.srq = NULL; |
Chuck Lever | 16f906d | 2017-02-08 17:00:10 -0500 | [diff] [blame] | 502 | ep->rep_attr.cap.max_send_sge = max_sge; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 503 | ep->rep_attr.cap.max_recv_sge = 1; |
| 504 | ep->rep_attr.cap.max_inline_data = 0; |
| 505 | ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
| 506 | ep->rep_attr.qp_type = IB_QPT_RC; |
| 507 | ep->rep_attr.port_num = ~0; |
| 508 | |
| 509 | dprintk("RPC: %s: requested max: dtos: send %d recv %d; " |
| 510 | "iovs: send %d recv %d\n", |
| 511 | __func__, |
| 512 | ep->rep_attr.cap.max_send_wr, |
| 513 | ep->rep_attr.cap.max_recv_wr, |
| 514 | ep->rep_attr.cap.max_send_sge, |
| 515 | ep->rep_attr.cap.max_recv_sge); |
| 516 | |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 517 | ep->rep_send_batch = ep->rep_max_requests >> 3; |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 518 | ep->rep_send_count = ep->rep_send_batch; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 519 | init_waitqueue_head(&ep->rep_connect_wait); |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 520 | ep->rep_receive_count = 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 521 | |
Chuck Lever | f995879 | 2019-10-17 14:31:18 -0400 | [diff] [blame^] | 522 | sendcq = ib_alloc_cq_any(ia->ri_id->device, r_xprt, |
Chuck Lever | 20cf4e0 | 2019-07-29 13:22:09 -0400 | [diff] [blame] | 523 | ep->rep_attr.cap.max_send_wr + 1, |
| 524 | IB_POLL_WORKQUEUE); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 525 | if (IS_ERR(sendcq)) { |
| 526 | rc = PTR_ERR(sendcq); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 527 | goto out1; |
| 528 | } |
| 529 | |
Chuck Lever | 20cf4e0 | 2019-07-29 13:22:09 -0400 | [diff] [blame] | 530 | recvcq = ib_alloc_cq_any(ia->ri_id->device, NULL, |
| 531 | ep->rep_attr.cap.max_recv_wr + 1, |
| 532 | IB_POLL_WORKQUEUE); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 533 | if (IS_ERR(recvcq)) { |
| 534 | rc = PTR_ERR(recvcq); |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 535 | goto out2; |
| 536 | } |
| 537 | |
Chuck Lever | fc66448 | 2014-05-28 10:33:25 -0400 | [diff] [blame] | 538 | ep->rep_attr.send_cq = sendcq; |
| 539 | ep->rep_attr.recv_cq = recvcq; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 540 | |
| 541 | /* Initialize cma parameters */ |
Chuck Lever | b2dde94 | 2016-05-02 14:43:03 -0400 | [diff] [blame] | 542 | memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma)); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 543 | |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 544 | /* Prepare RDMA-CM private message */ |
| 545 | pmsg->cp_magic = rpcrdma_cmp_magic; |
| 546 | pmsg->cp_version = RPCRDMA_CMP_VERSION; |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 547 | pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK; |
Chuck Lever | 94087e9 | 2019-04-24 09:40:20 -0400 | [diff] [blame] | 548 | pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->rep_inline_send); |
| 549 | pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->rep_inline_recv); |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 550 | ep->rep_remote_cma.private_data = pmsg; |
| 551 | ep->rep_remote_cma.private_data_len = sizeof(*pmsg); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 552 | |
| 553 | /* Client offers RDMA Read but does not initiate */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 554 | ep->rep_remote_cma.initiator_depth = 0; |
Chuck Lever | b7e85fff | 2018-02-28 15:30:33 -0500 | [diff] [blame] | 555 | ep->rep_remote_cma.responder_resources = |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 556 | min_t(int, U8_MAX, ia->ri_id->device->attrs.max_qp_rd_atom); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 557 | |
Chuck Lever | b2dde94 | 2016-05-02 14:43:03 -0400 | [diff] [blame] | 558 | /* Limit transport retries so client can detect server |
| 559 | * GID changes quickly. RPC layer handles re-establishing |
| 560 | * transport connection and retransmission. |
| 561 | */ |
| 562 | ep->rep_remote_cma.retry_count = 6; |
| 563 | |
| 564 | /* RPC-over-RDMA handles its own flow control. In addition, |
| 565 | * make all RNR NAKs visible so we know that RPC-over-RDMA |
| 566 | * flow control is working correctly (no NAKs should be seen). |
| 567 | */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 568 | ep->rep_remote_cma.flow_control = 0; |
| 569 | ep->rep_remote_cma.rnr_retry_count = 0; |
| 570 | |
| 571 | return 0; |
| 572 | |
| 573 | out2: |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 574 | ib_free_cq(sendcq); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 575 | out1: |
| 576 | return rc; |
| 577 | } |
| 578 | |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 579 | /** |
| 580 | * rpcrdma_ep_destroy - Disconnect and destroy endpoint. |
| 581 | * @r_xprt: transport instance to shut down |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 582 | * |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 583 | */ |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 584 | void rpcrdma_ep_destroy(struct rpcrdma_xprt *r_xprt) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 585 | { |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 586 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
| 587 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
| 588 | |
Chuck Lever | 2552428 | 2018-03-19 14:23:16 -0400 | [diff] [blame] | 589 | if (ia->ri_id && ia->ri_id->qp) { |
Chuck Lever | 550d750 | 2016-05-02 14:41:47 -0400 | [diff] [blame] | 590 | rpcrdma_ep_disconnect(ep, ia); |
Tom Talpey | fee08ca | 2008-10-09 15:01:00 -0400 | [diff] [blame] | 591 | rdma_destroy_qp(ia->ri_id); |
| 592 | ia->ri_id->qp = NULL; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 593 | } |
| 594 | |
Chuck Lever | 2552428 | 2018-03-19 14:23:16 -0400 | [diff] [blame] | 595 | if (ep->rep_attr.recv_cq) |
| 596 | ib_free_cq(ep->rep_attr.recv_cq); |
| 597 | if (ep->rep_attr.send_cq) |
| 598 | ib_free_cq(ep->rep_attr.send_cq); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 599 | } |
| 600 | |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 601 | /* Re-establish a connection after a device removal event. |
| 602 | * Unlike a normal reconnection, a fresh PD and a new set |
| 603 | * of MRs and buffers is needed. |
| 604 | */ |
Chuck Lever | 98ef77d | 2019-08-26 13:12:57 -0400 | [diff] [blame] | 605 | static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt, |
| 606 | struct ib_qp_init_attr *qp_init_attr) |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 607 | { |
Chuck Lever | 98ef77d | 2019-08-26 13:12:57 -0400 | [diff] [blame] | 608 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 609 | int rc, err; |
| 610 | |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 611 | trace_xprtrdma_reinsert(r_xprt); |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 612 | |
| 613 | rc = -EHOSTUNREACH; |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 614 | if (rpcrdma_ia_open(r_xprt)) |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 615 | goto out1; |
| 616 | |
| 617 | rc = -ENOMEM; |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 618 | err = rpcrdma_ep_create(r_xprt); |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 619 | if (err) { |
| 620 | pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err); |
| 621 | goto out2; |
| 622 | } |
| 623 | |
| 624 | rc = -ENETUNREACH; |
Chuck Lever | 98ef77d | 2019-08-26 13:12:57 -0400 | [diff] [blame] | 625 | err = rdma_create_qp(ia->ri_id, ia->ri_pd, qp_init_attr); |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 626 | if (err) { |
| 627 | pr_err("rpcrdma: rdma_create_qp returned %d\n", err); |
| 628 | goto out3; |
| 629 | } |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 630 | return 0; |
| 631 | |
| 632 | out3: |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 633 | rpcrdma_ep_destroy(r_xprt); |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 634 | out2: |
| 635 | rpcrdma_ia_close(ia); |
| 636 | out1: |
| 637 | return rc; |
| 638 | } |
| 639 | |
Chuck Lever | 98ef77d | 2019-08-26 13:12:57 -0400 | [diff] [blame] | 640 | static int rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, |
| 641 | struct ib_qp_init_attr *qp_init_attr) |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 642 | { |
Chuck Lever | 98ef77d | 2019-08-26 13:12:57 -0400 | [diff] [blame] | 643 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 644 | struct rdma_cm_id *id, *old; |
| 645 | int err, rc; |
| 646 | |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 647 | trace_xprtrdma_reconnect(r_xprt); |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 648 | |
Chuck Lever | 98ef77d | 2019-08-26 13:12:57 -0400 | [diff] [blame] | 649 | rpcrdma_ep_disconnect(&r_xprt->rx_ep, ia); |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 650 | |
| 651 | rc = -EHOSTUNREACH; |
Chuck Lever | dd229ce | 2017-12-14 20:56:58 -0500 | [diff] [blame] | 652 | id = rpcrdma_create_id(r_xprt, ia); |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 653 | if (IS_ERR(id)) |
| 654 | goto out; |
| 655 | |
| 656 | /* As long as the new ID points to the same device as the |
| 657 | * old ID, we can reuse the transport's existing PD and all |
| 658 | * previously allocated MRs. Also, the same device means |
| 659 | * the transport's previous DMA mappings are still valid. |
| 660 | * |
| 661 | * This is a sanity check only. There should be no way these |
| 662 | * point to two different devices here. |
| 663 | */ |
| 664 | old = id; |
| 665 | rc = -ENETUNREACH; |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 666 | if (ia->ri_id->device != id->device) { |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 667 | pr_err("rpcrdma: can't reconnect on different device!\n"); |
| 668 | goto out_destroy; |
| 669 | } |
| 670 | |
Chuck Lever | 98ef77d | 2019-08-26 13:12:57 -0400 | [diff] [blame] | 671 | err = rdma_create_qp(id, ia->ri_pd, qp_init_attr); |
Chuck Lever | ddbb347 | 2018-12-19 10:59:39 -0500 | [diff] [blame] | 672 | if (err) |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 673 | goto out_destroy; |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 674 | |
| 675 | /* Atomically replace the transport's ID and QP. */ |
| 676 | rc = 0; |
| 677 | old = ia->ri_id; |
| 678 | ia->ri_id = id; |
| 679 | rdma_destroy_qp(old); |
| 680 | |
| 681 | out_destroy: |
Chuck Lever | 56a6bd1 | 2017-04-11 13:23:34 -0400 | [diff] [blame] | 682 | rdma_destroy_id(old); |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 683 | out: |
| 684 | return rc; |
| 685 | } |
| 686 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 687 | /* |
| 688 | * Connect unconnected endpoint. |
| 689 | */ |
| 690 | int |
| 691 | rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
| 692 | { |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 693 | struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, |
| 694 | rx_ia); |
Chuck Lever | 31e62d2 | 2018-10-01 14:26:08 -0400 | [diff] [blame] | 695 | struct rpc_xprt *xprt = &r_xprt->rx_xprt; |
Chuck Lever | 98ef77d | 2019-08-26 13:12:57 -0400 | [diff] [blame] | 696 | struct ib_qp_init_attr qp_init_attr; |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 697 | int rc; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 698 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 699 | retry: |
Chuck Lever | 98ef77d | 2019-08-26 13:12:57 -0400 | [diff] [blame] | 700 | memcpy(&qp_init_attr, &ep->rep_attr, sizeof(qp_init_attr)); |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 701 | switch (ep->rep_connected) { |
| 702 | case 0: |
Chuck Lever | 98ef77d | 2019-08-26 13:12:57 -0400 | [diff] [blame] | 703 | rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &qp_init_attr); |
Chuck Lever | ec62f40 | 2014-05-28 10:34:07 -0400 | [diff] [blame] | 704 | if (rc) { |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 705 | rc = -ENETUNREACH; |
| 706 | goto out_noupdate; |
Chuck Lever | ec62f40 | 2014-05-28 10:34:07 -0400 | [diff] [blame] | 707 | } |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 708 | break; |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 709 | case -ENODEV: |
Chuck Lever | 98ef77d | 2019-08-26 13:12:57 -0400 | [diff] [blame] | 710 | rc = rpcrdma_ep_recreate_xprt(r_xprt, &qp_init_attr); |
Chuck Lever | a9b0e38 | 2017-04-11 13:23:26 -0400 | [diff] [blame] | 711 | if (rc) |
| 712 | goto out_noupdate; |
| 713 | break; |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 714 | default: |
Chuck Lever | 98ef77d | 2019-08-26 13:12:57 -0400 | [diff] [blame] | 715 | rc = rpcrdma_ep_reconnect(r_xprt, &qp_init_attr); |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 716 | if (rc) |
| 717 | goto out; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 718 | } |
| 719 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 720 | ep->rep_connected = 0; |
Chuck Lever | 31e62d2 | 2018-10-01 14:26:08 -0400 | [diff] [blame] | 721 | xprt_clear_connected(xprt); |
| 722 | |
Chuck Lever | eea63ca | 2019-10-09 13:07:32 -0400 | [diff] [blame] | 723 | rpcrdma_reset_cwnd(r_xprt); |
Chuck Lever | 8d4fb8f | 2018-07-28 10:46:47 -0400 | [diff] [blame] | 724 | rpcrdma_post_recvs(r_xprt, true); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 725 | |
| 726 | rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma); |
Chuck Lever | ddbb347 | 2018-12-19 10:59:39 -0500 | [diff] [blame] | 727 | if (rc) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 728 | goto out; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 729 | |
Chuck Lever | f9e1afe | 2019-08-26 13:12:51 -0400 | [diff] [blame] | 730 | if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO) |
| 731 | xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 732 | wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 733 | if (ep->rep_connected <= 0) { |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 734 | if (ep->rep_connected == -EAGAIN) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 735 | goto retry; |
| 736 | rc = ep->rep_connected; |
Chuck Lever | 0a90487 | 2017-02-08 17:00:35 -0500 | [diff] [blame] | 737 | goto out; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 738 | } |
| 739 | |
Chuck Lever | 9d2da4f | 2019-10-09 13:07:48 -0400 | [diff] [blame] | 740 | rpcrdma_mrs_create(r_xprt); |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 741 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 742 | out: |
| 743 | if (rc) |
| 744 | ep->rep_connected = rc; |
Chuck Lever | 1890896 | 2017-04-11 13:23:18 -0400 | [diff] [blame] | 745 | |
| 746 | out_noupdate: |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 747 | return rc; |
| 748 | } |
| 749 | |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 750 | /** |
| 751 | * rpcrdma_ep_disconnect - Disconnect underlying transport |
| 752 | * @ep: endpoint to disconnect |
| 753 | * @ia: associated interface adapter |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 754 | * |
Chuck Lever | 9d2da4f | 2019-10-09 13:07:48 -0400 | [diff] [blame] | 755 | * Caller serializes. Either the transport send lock is held, |
| 756 | * or we're being called to destroy the transport. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 757 | */ |
Chuck Lever | 282191c | 2014-07-29 17:25:55 -0400 | [diff] [blame] | 758 | void |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 759 | rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
| 760 | { |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 761 | struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt, |
| 762 | rx_ep); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 763 | int rc; |
| 764 | |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 765 | /* returns without wait if ID is not connected */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 766 | rc = rdma_disconnect(ia->ri_id); |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 767 | if (!rc) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 768 | wait_event_interruptible(ep->rep_connect_wait, |
| 769 | ep->rep_connected != 1); |
Chuck Lever | b4744e0 | 2017-12-20 16:31:29 -0500 | [diff] [blame] | 770 | else |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 771 | ep->rep_connected = rc; |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 772 | trace_xprtrdma_disconnect(r_xprt, rc); |
Chuck Lever | 550d750 | 2016-05-02 14:41:47 -0400 | [diff] [blame] | 773 | |
Chuck Lever | 6d2d0ee | 2018-12-19 10:58:29 -0500 | [diff] [blame] | 774 | rpcrdma_xprt_drain(r_xprt); |
Chuck Lever | a31b2f9 | 2019-10-09 13:07:27 -0400 | [diff] [blame] | 775 | rpcrdma_reqs_reset(r_xprt); |
Chuck Lever | 9d2da4f | 2019-10-09 13:07:48 -0400 | [diff] [blame] | 776 | rpcrdma_mrs_destroy(r_xprt); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 777 | } |
| 778 | |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 779 | /* Fixed-size circular FIFO queue. This implementation is wait-free and |
| 780 | * lock-free. |
| 781 | * |
| 782 | * Consumer is the code path that posts Sends. This path dequeues a |
| 783 | * sendctx for use by a Send operation. Multiple consumer threads |
| 784 | * are serialized by the RPC transport lock, which allows only one |
| 785 | * ->send_request call at a time. |
| 786 | * |
| 787 | * Producer is the code path that handles Send completions. This path |
| 788 | * enqueues a sendctx that has been completed. Multiple producer |
| 789 | * threads are serialized by the ib_poll_cq() function. |
| 790 | */ |
| 791 | |
| 792 | /* rpcrdma_sendctxs_destroy() assumes caller has already quiesced |
Chuck Lever | b8fe677 | 2019-04-24 09:40:36 -0400 | [diff] [blame] | 793 | * queue activity, and rpcrdma_xprt_drain has flushed all remaining |
| 794 | * Send requests. |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 795 | */ |
| 796 | static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf) |
| 797 | { |
| 798 | unsigned long i; |
| 799 | |
| 800 | for (i = 0; i <= buf->rb_sc_last; i++) |
| 801 | kfree(buf->rb_sc_ctxs[i]); |
| 802 | kfree(buf->rb_sc_ctxs); |
| 803 | } |
| 804 | |
| 805 | static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia) |
| 806 | { |
| 807 | struct rpcrdma_sendctx *sc; |
| 808 | |
Gustavo A. R. Silva | 66d4218 | 2019-01-30 18:46:22 -0600 | [diff] [blame] | 809 | sc = kzalloc(struct_size(sc, sc_sges, ia->ri_max_send_sges), |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 810 | GFP_KERNEL); |
| 811 | if (!sc) |
| 812 | return NULL; |
| 813 | |
| 814 | sc->sc_wr.wr_cqe = &sc->sc_cqe; |
| 815 | sc->sc_wr.sg_list = sc->sc_sges; |
| 816 | sc->sc_wr.opcode = IB_WR_SEND; |
| 817 | sc->sc_cqe.done = rpcrdma_wc_send; |
| 818 | return sc; |
| 819 | } |
| 820 | |
| 821 | static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt) |
| 822 | { |
| 823 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
| 824 | struct rpcrdma_sendctx *sc; |
| 825 | unsigned long i; |
| 826 | |
| 827 | /* Maximum number of concurrent outstanding Send WRs. Capping |
| 828 | * the circular queue size stops Send Queue overflow by causing |
| 829 | * the ->send_request call to fail temporarily before too many |
| 830 | * Sends are posted. |
| 831 | */ |
| 832 | i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; |
| 833 | dprintk("RPC: %s: allocating %lu send_ctxs\n", __func__, i); |
| 834 | buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL); |
| 835 | if (!buf->rb_sc_ctxs) |
| 836 | return -ENOMEM; |
| 837 | |
| 838 | buf->rb_sc_last = i - 1; |
| 839 | for (i = 0; i <= buf->rb_sc_last; i++) { |
| 840 | sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); |
| 841 | if (!sc) |
Dan Carpenter | 6e17f58 | 2019-01-05 16:06:48 +0300 | [diff] [blame] | 842 | return -ENOMEM; |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 843 | |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 844 | buf->rb_sc_ctxs[i] = sc; |
| 845 | } |
| 846 | |
| 847 | return 0; |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 848 | } |
| 849 | |
| 850 | /* The sendctx queue is not guaranteed to have a size that is a |
| 851 | * power of two, thus the helpers in circ_buf.h cannot be used. |
| 852 | * The other option is to use modulus (%), which can be expensive. |
| 853 | */ |
| 854 | static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf, |
| 855 | unsigned long item) |
| 856 | { |
| 857 | return likely(item < buf->rb_sc_last) ? item + 1 : 0; |
| 858 | } |
| 859 | |
| 860 | /** |
| 861 | * rpcrdma_sendctx_get_locked - Acquire a send context |
Chuck Lever | dbcc53a | 2019-04-24 09:39:53 -0400 | [diff] [blame] | 862 | * @r_xprt: controlling transport instance |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 863 | * |
| 864 | * Returns pointer to a free send completion context; or NULL if |
| 865 | * the queue is empty. |
| 866 | * |
| 867 | * Usage: Called to acquire an SGE array before preparing a Send WR. |
| 868 | * |
Chuck Lever | dbcc53a | 2019-04-24 09:39:53 -0400 | [diff] [blame] | 869 | * The caller serializes calls to this function (per transport), and |
| 870 | * provides an effective memory barrier that flushes the new value |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 871 | * of rb_sc_head. |
| 872 | */ |
Chuck Lever | dbcc53a | 2019-04-24 09:39:53 -0400 | [diff] [blame] | 873 | struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt) |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 874 | { |
Chuck Lever | dbcc53a | 2019-04-24 09:39:53 -0400 | [diff] [blame] | 875 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 876 | struct rpcrdma_sendctx *sc; |
| 877 | unsigned long next_head; |
| 878 | |
| 879 | next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head); |
| 880 | |
| 881 | if (next_head == READ_ONCE(buf->rb_sc_tail)) |
| 882 | goto out_emptyq; |
| 883 | |
| 884 | /* ORDER: item must be accessed _before_ head is updated */ |
| 885 | sc = buf->rb_sc_ctxs[next_head]; |
| 886 | |
| 887 | /* Releasing the lock in the caller acts as a memory |
| 888 | * barrier that flushes rb_sc_head. |
| 889 | */ |
| 890 | buf->rb_sc_head = next_head; |
| 891 | |
| 892 | return sc; |
| 893 | |
| 894 | out_emptyq: |
| 895 | /* The queue is "empty" if there have not been enough Send |
| 896 | * completions recently. This is a sign the Send Queue is |
| 897 | * backing up. Cause the caller to pause and try again. |
| 898 | */ |
Chuck Lever | 05eb06d | 2019-06-19 10:32:48 -0400 | [diff] [blame] | 899 | xprt_wait_for_buffer_space(&r_xprt->rx_xprt); |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 900 | r_xprt->rx_stats.empty_sendctx_q++; |
| 901 | return NULL; |
| 902 | } |
| 903 | |
| 904 | /** |
| 905 | * rpcrdma_sendctx_put_locked - Release a send context |
Chuck Lever | f995879 | 2019-10-17 14:31:18 -0400 | [diff] [blame^] | 906 | * @r_xprt: controlling transport instance |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 907 | * @sc: send context to release |
| 908 | * |
| 909 | * Usage: Called from Send completion to return a sendctxt |
| 910 | * to the queue. |
| 911 | * |
Chuck Lever | dbcc53a | 2019-04-24 09:39:53 -0400 | [diff] [blame] | 912 | * The caller serializes calls to this function (per transport). |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 913 | */ |
Chuck Lever | f995879 | 2019-10-17 14:31:18 -0400 | [diff] [blame^] | 914 | static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt, |
| 915 | struct rpcrdma_sendctx *sc) |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 916 | { |
Chuck Lever | f995879 | 2019-10-17 14:31:18 -0400 | [diff] [blame^] | 917 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 918 | unsigned long next_tail; |
| 919 | |
Chuck Lever | dbcc53a | 2019-04-24 09:39:53 -0400 | [diff] [blame] | 920 | /* Unmap SGEs of previously completed but unsignaled |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 921 | * Sends by walking up the queue until @sc is found. |
| 922 | */ |
| 923 | next_tail = buf->rb_sc_tail; |
| 924 | do { |
| 925 | next_tail = rpcrdma_sendctx_next(buf, next_tail); |
| 926 | |
| 927 | /* ORDER: item must be accessed _before_ tail is updated */ |
Chuck Lever | dbcc53a | 2019-04-24 09:39:53 -0400 | [diff] [blame] | 928 | rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]); |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 929 | |
| 930 | } while (buf->rb_sc_ctxs[next_tail] != sc); |
| 931 | |
| 932 | /* Paired with READ_ONCE */ |
| 933 | smp_store_release(&buf->rb_sc_tail, next_tail); |
Chuck Lever | 2fad659 | 2018-05-04 15:35:57 -0400 | [diff] [blame] | 934 | |
Chuck Lever | f995879 | 2019-10-17 14:31:18 -0400 | [diff] [blame^] | 935 | xprt_write_space(&r_xprt->rx_xprt); |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 936 | } |
| 937 | |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 938 | static void |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 939 | rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt) |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 940 | { |
| 941 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
| 942 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
| 943 | unsigned int count; |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 944 | |
Chuck Lever | c421ece | 2018-10-01 14:25:20 -0400 | [diff] [blame] | 945 | for (count = 0; count < ia->ri_max_segs; count++) { |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 946 | struct rpcrdma_mr *mr; |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 947 | int rc; |
| 948 | |
Chuck Lever | 805a1f6 | 2019-08-19 18:46:24 -0400 | [diff] [blame] | 949 | mr = kzalloc(sizeof(*mr), GFP_NOFS); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 950 | if (!mr) |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 951 | break; |
| 952 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 953 | rc = frwr_init_mr(ia, mr); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 954 | if (rc) { |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 955 | kfree(mr); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 956 | break; |
| 957 | } |
| 958 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 959 | mr->mr_xprt = r_xprt; |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 960 | |
Chuck Lever | 4d6b889 | 2019-08-19 18:47:57 -0400 | [diff] [blame] | 961 | spin_lock(&buf->rb_lock); |
Chuck Lever | c370078 | 2019-10-09 13:07:43 -0400 | [diff] [blame] | 962 | rpcrdma_mr_push(mr, &buf->rb_mrs); |
Chuck Lever | eed48a9 | 2019-08-19 18:42:31 -0400 | [diff] [blame] | 963 | list_add(&mr->mr_all, &buf->rb_all_mrs); |
Chuck Lever | 4d6b889 | 2019-08-19 18:47:57 -0400 | [diff] [blame] | 964 | spin_unlock(&buf->rb_lock); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 965 | } |
| 966 | |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 967 | r_xprt->rx_stats.mrs_allocated += count; |
Chuck Lever | 1c443eff | 2017-12-20 16:31:21 -0500 | [diff] [blame] | 968 | trace_xprtrdma_createmrs(r_xprt, count); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 969 | } |
| 970 | |
| 971 | static void |
| 972 | rpcrdma_mr_refresh_worker(struct work_struct *work) |
| 973 | { |
| 974 | struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, |
Chuck Lever | 3b39f52 | 2019-08-19 18:45:37 -0400 | [diff] [blame] | 975 | rb_refresh_worker); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 976 | struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, |
| 977 | rx_buf); |
| 978 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 979 | rpcrdma_mrs_create(r_xprt); |
Chuck Lever | 05eb06d | 2019-06-19 10:32:48 -0400 | [diff] [blame] | 980 | xprt_write_space(&r_xprt->rx_xprt); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 981 | } |
| 982 | |
Chuck Lever | 1769e6a | 2019-04-24 09:39:05 -0400 | [diff] [blame] | 983 | /** |
Chuck Lever | 9d2da4f | 2019-10-09 13:07:48 -0400 | [diff] [blame] | 984 | * rpcrdma_mrs_refresh - Wake the MR refresh worker |
| 985 | * @r_xprt: controlling transport instance |
| 986 | * |
| 987 | */ |
| 988 | void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt) |
| 989 | { |
| 990 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
| 991 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
| 992 | |
| 993 | /* If there is no underlying device, it's no use to |
| 994 | * wake the refresh worker. |
| 995 | */ |
| 996 | if (ep->rep_connected != -ENODEV) { |
| 997 | /* The work is scheduled on a WQ_MEM_RECLAIM |
| 998 | * workqueue in order to prevent MR allocation |
| 999 | * from recursing into NFS during direct reclaim. |
| 1000 | */ |
| 1001 | queue_work(xprtiod_workqueue, &buf->rb_refresh_worker); |
| 1002 | } |
| 1003 | } |
| 1004 | |
| 1005 | /** |
Chuck Lever | 1769e6a | 2019-04-24 09:39:05 -0400 | [diff] [blame] | 1006 | * rpcrdma_req_create - Allocate an rpcrdma_req object |
| 1007 | * @r_xprt: controlling r_xprt |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 1008 | * @size: initial size, in bytes, of send and receive buffers |
Chuck Lever | 1769e6a | 2019-04-24 09:39:05 -0400 | [diff] [blame] | 1009 | * @flags: GFP flags passed to memory allocators |
| 1010 | * |
| 1011 | * Returns an allocated and fully initialized rpcrdma_req or NULL. |
| 1012 | */ |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 1013 | struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size, |
| 1014 | gfp_t flags) |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1015 | { |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1016 | struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; |
Chuck Lever | 2dd4a01 | 2018-02-28 15:31:05 -0500 | [diff] [blame] | 1017 | struct rpcrdma_regbuf *rb; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1018 | struct rpcrdma_req *req; |
Chuck Lever | f3c66a2 | 2019-08-19 18:40:11 -0400 | [diff] [blame] | 1019 | size_t maxhdrsize; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1020 | |
Chuck Lever | 1769e6a | 2019-04-24 09:39:05 -0400 | [diff] [blame] | 1021 | req = kzalloc(sizeof(*req), flags); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1022 | if (req == NULL) |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 1023 | goto out1; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1024 | |
Chuck Lever | f3c66a2 | 2019-08-19 18:40:11 -0400 | [diff] [blame] | 1025 | /* Compute maximum header buffer size in bytes */ |
| 1026 | maxhdrsize = rpcrdma_fixed_maxsz + 3 + |
| 1027 | r_xprt->rx_ia.ri_max_segs * rpcrdma_readchunk_maxsz; |
| 1028 | maxhdrsize *= sizeof(__be32); |
| 1029 | rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize), |
| 1030 | DMA_TO_DEVICE, flags); |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 1031 | if (!rb) |
| 1032 | goto out2; |
Chuck Lever | 2dd4a01 | 2018-02-28 15:31:05 -0500 | [diff] [blame] | 1033 | req->rl_rdmabuf = rb; |
Chuck Lever | 8cec3db | 2019-04-24 09:39:16 -0400 | [diff] [blame] | 1034 | xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb)); |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 1035 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1036 | req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags); |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 1037 | if (!req->rl_sendbuf) |
| 1038 | goto out3; |
| 1039 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1040 | req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags); |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 1041 | if (!req->rl_recvbuf) |
| 1042 | goto out4; |
| 1043 | |
Chuck Lever | 6dc6ec9 | 2019-08-19 18:47:10 -0400 | [diff] [blame] | 1044 | INIT_LIST_HEAD(&req->rl_free_mrs); |
Chuck Lever | 2dd4a01 | 2018-02-28 15:31:05 -0500 | [diff] [blame] | 1045 | INIT_LIST_HEAD(&req->rl_registered); |
Chuck Lever | 92f4433 | 2018-12-19 10:59:33 -0500 | [diff] [blame] | 1046 | spin_lock(&buffer->rb_lock); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1047 | list_add(&req->rl_all, &buffer->rb_allreqs); |
Chuck Lever | 92f4433 | 2018-12-19 10:59:33 -0500 | [diff] [blame] | 1048 | spin_unlock(&buffer->rb_lock); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1049 | return req; |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 1050 | |
| 1051 | out4: |
| 1052 | kfree(req->rl_sendbuf); |
| 1053 | out3: |
| 1054 | kfree(req->rl_rdmabuf); |
| 1055 | out2: |
| 1056 | kfree(req); |
| 1057 | out1: |
| 1058 | return NULL; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1059 | } |
| 1060 | |
Chuck Lever | a31b2f9 | 2019-10-09 13:07:27 -0400 | [diff] [blame] | 1061 | /** |
| 1062 | * rpcrdma_reqs_reset - Reset all reqs owned by a transport |
| 1063 | * @r_xprt: controlling transport instance |
| 1064 | * |
| 1065 | * ASSUMPTION: the rb_allreqs list is stable for the duration, |
| 1066 | * and thus can be walked without holding rb_lock. Eg. the |
| 1067 | * caller is holding the transport send lock to exclude |
| 1068 | * device removal or disconnection. |
| 1069 | */ |
| 1070 | static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt) |
| 1071 | { |
| 1072 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
| 1073 | struct rpcrdma_req *req; |
| 1074 | |
| 1075 | list_for_each_entry(req, &buf->rb_allreqs, rl_all) { |
| 1076 | /* Credits are valid only for one connection */ |
| 1077 | req->rl_slot.rq_cong = 0; |
| 1078 | } |
| 1079 | } |
| 1080 | |
Chuck Lever | 379d1bc | 2019-06-19 10:33:20 -0400 | [diff] [blame] | 1081 | static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, |
| 1082 | bool temp) |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1083 | { |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1084 | struct rpcrdma_rep *rep; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1085 | |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 1086 | rep = kzalloc(sizeof(*rep), GFP_KERNEL); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1087 | if (rep == NULL) |
| 1088 | goto out; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1089 | |
Chuck Lever | 94087e9 | 2019-04-24 09:40:20 -0400 | [diff] [blame] | 1090 | rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep.rep_inline_recv, |
Chuck Lever | 99ef4db | 2016-09-15 10:56:10 -0400 | [diff] [blame] | 1091 | DMA_FROM_DEVICE, GFP_KERNEL); |
Chuck Lever | 8cec3db | 2019-04-24 09:39:16 -0400 | [diff] [blame] | 1092 | if (!rep->rr_rdmabuf) |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1093 | goto out_free; |
Chuck Lever | 379d1bc | 2019-06-19 10:33:20 -0400 | [diff] [blame] | 1094 | |
Chuck Lever | 8cec3db | 2019-04-24 09:39:16 -0400 | [diff] [blame] | 1095 | xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf), |
Chuck Lever | 96f8778 | 2017-08-03 14:30:03 -0400 | [diff] [blame] | 1096 | rdmab_length(rep->rr_rdmabuf)); |
Chuck Lever | 1519e96 | 2016-09-15 10:57:49 -0400 | [diff] [blame] | 1097 | rep->rr_cqe.done = rpcrdma_wc_receive; |
Chuck Lever | fed171b | 2015-05-26 11:51:37 -0400 | [diff] [blame] | 1098 | rep->rr_rxprt = r_xprt; |
Chuck Lever | 6ea8e71 | 2016-09-15 10:56:51 -0400 | [diff] [blame] | 1099 | rep->rr_recv_wr.next = NULL; |
| 1100 | rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; |
| 1101 | rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; |
| 1102 | rep->rr_recv_wr.num_sge = 1; |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1103 | rep->rr_temp = temp; |
Chuck Lever | 379d1bc | 2019-06-19 10:33:20 -0400 | [diff] [blame] | 1104 | return rep; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1105 | |
| 1106 | out_free: |
| 1107 | kfree(rep); |
| 1108 | out: |
Chuck Lever | 379d1bc | 2019-06-19 10:33:20 -0400 | [diff] [blame] | 1109 | return NULL; |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1110 | } |
| 1111 | |
Chuck Lever | b0b227f | 2019-08-19 18:48:43 -0400 | [diff] [blame] | 1112 | static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep) |
| 1113 | { |
| 1114 | rpcrdma_regbuf_free(rep->rr_rdmabuf); |
| 1115 | kfree(rep); |
| 1116 | } |
| 1117 | |
| 1118 | static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf) |
| 1119 | { |
| 1120 | struct llist_node *node; |
| 1121 | |
| 1122 | /* Calls to llist_del_first are required to be serialized */ |
| 1123 | node = llist_del_first(&buf->rb_free_reps); |
| 1124 | if (!node) |
| 1125 | return NULL; |
| 1126 | return llist_entry(node, struct rpcrdma_rep, rr_node); |
| 1127 | } |
| 1128 | |
| 1129 | static void rpcrdma_rep_put(struct rpcrdma_buffer *buf, |
| 1130 | struct rpcrdma_rep *rep) |
| 1131 | { |
| 1132 | if (!rep->rr_temp) |
| 1133 | llist_add(&rep->rr_node, &buf->rb_free_reps); |
| 1134 | else |
| 1135 | rpcrdma_rep_destroy(rep); |
| 1136 | } |
| 1137 | |
| 1138 | static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf) |
| 1139 | { |
| 1140 | struct rpcrdma_rep *rep; |
| 1141 | |
| 1142 | while ((rep = rpcrdma_rep_get_locked(buf)) != NULL) |
| 1143 | rpcrdma_rep_destroy(rep); |
| 1144 | } |
| 1145 | |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 1146 | /** |
| 1147 | * rpcrdma_buffer_create - Create initial set of req/rep objects |
| 1148 | * @r_xprt: transport instance to (re)initialize |
| 1149 | * |
| 1150 | * Returns zero on success, otherwise a negative errno. |
| 1151 | */ |
| 1152 | int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1153 | { |
Chuck Lever | ac920d0 | 2015-01-21 11:03:44 -0500 | [diff] [blame] | 1154 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1155 | int i, rc; |
| 1156 | |
Chuck Lever | 86c4ccd | 2019-04-24 09:40:25 -0400 | [diff] [blame] | 1157 | buf->rb_max_requests = r_xprt->rx_ep.rep_max_requests; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1158 | buf->rb_bc_srv_max_requests = 0; |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 1159 | spin_lock_init(&buf->rb_lock); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1160 | INIT_LIST_HEAD(&buf->rb_mrs); |
Chuck Lever | eed48a9 | 2019-08-19 18:42:31 -0400 | [diff] [blame] | 1161 | INIT_LIST_HEAD(&buf->rb_all_mrs); |
Chuck Lever | 3b39f52 | 2019-08-19 18:45:37 -0400 | [diff] [blame] | 1162 | INIT_WORK(&buf->rb_refresh_worker, rpcrdma_mr_refresh_worker); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1163 | |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1164 | INIT_LIST_HEAD(&buf->rb_send_bufs); |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1165 | INIT_LIST_HEAD(&buf->rb_allreqs); |
Chuck Lever | 1769e6a | 2019-04-24 09:39:05 -0400 | [diff] [blame] | 1166 | |
| 1167 | rc = -ENOMEM; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1168 | for (i = 0; i < buf->rb_max_requests; i++) { |
| 1169 | struct rpcrdma_req *req; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1170 | |
Chuck Lever | bb93a1a | 2019-04-24 09:39:21 -0400 | [diff] [blame] | 1171 | req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE, |
| 1172 | GFP_KERNEL); |
Chuck Lever | 1769e6a | 2019-04-24 09:39:05 -0400 | [diff] [blame] | 1173 | if (!req) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1174 | goto out; |
Chuck Lever | a80d66c | 2017-06-08 11:52:12 -0400 | [diff] [blame] | 1175 | list_add(&req->rl_list, &buf->rb_send_bufs); |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1176 | } |
| 1177 | |
Chuck Lever | b0b227f | 2019-08-19 18:48:43 -0400 | [diff] [blame] | 1178 | init_llist_head(&buf->rb_free_reps); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1179 | |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 1180 | rc = rpcrdma_sendctxs_create(r_xprt); |
| 1181 | if (rc) |
| 1182 | goto out; |
| 1183 | |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1184 | return 0; |
| 1185 | out: |
| 1186 | rpcrdma_buffer_destroy(buf); |
| 1187 | return rc; |
| 1188 | } |
| 1189 | |
Chuck Lever | 92f4433 | 2018-12-19 10:59:33 -0500 | [diff] [blame] | 1190 | /** |
| 1191 | * rpcrdma_req_destroy - Destroy an rpcrdma_req object |
| 1192 | * @req: unused object to be destroyed |
| 1193 | * |
Chuck Lever | 9d2da4f | 2019-10-09 13:07:48 -0400 | [diff] [blame] | 1194 | * Relies on caller holding the transport send lock to protect |
| 1195 | * removing req->rl_all from buf->rb_all_reqs safely. |
Chuck Lever | 92f4433 | 2018-12-19 10:59:33 -0500 | [diff] [blame] | 1196 | */ |
Chuck Lever | 6dc6ec9 | 2019-08-19 18:47:10 -0400 | [diff] [blame] | 1197 | void rpcrdma_req_destroy(struct rpcrdma_req *req) |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1198 | { |
Chuck Lever | c370078 | 2019-10-09 13:07:43 -0400 | [diff] [blame] | 1199 | struct rpcrdma_mr *mr; |
| 1200 | |
Chuck Lever | 92f4433 | 2018-12-19 10:59:33 -0500 | [diff] [blame] | 1201 | list_del(&req->rl_all); |
| 1202 | |
Chuck Lever | c370078 | 2019-10-09 13:07:43 -0400 | [diff] [blame] | 1203 | while ((mr = rpcrdma_mr_pop(&req->rl_free_mrs))) { |
| 1204 | struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf; |
| 1205 | |
| 1206 | spin_lock(&buf->rb_lock); |
| 1207 | list_del(&mr->mr_all); |
| 1208 | spin_unlock(&buf->rb_lock); |
| 1209 | |
| 1210 | frwr_release_mr(mr); |
| 1211 | } |
Chuck Lever | 6dc6ec9 | 2019-08-19 18:47:10 -0400 | [diff] [blame] | 1212 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1213 | rpcrdma_regbuf_free(req->rl_recvbuf); |
| 1214 | rpcrdma_regbuf_free(req->rl_sendbuf); |
| 1215 | rpcrdma_regbuf_free(req->rl_rdmabuf); |
Chuck Lever | 1392402 | 2015-01-21 11:03:52 -0500 | [diff] [blame] | 1216 | kfree(req); |
| 1217 | } |
| 1218 | |
Chuck Lever | c370078 | 2019-10-09 13:07:43 -0400 | [diff] [blame] | 1219 | /** |
| 1220 | * rpcrdma_mrs_destroy - Release all of a transport's MRs |
Chuck Lever | 9d2da4f | 2019-10-09 13:07:48 -0400 | [diff] [blame] | 1221 | * @r_xprt: controlling transport instance |
Chuck Lever | c370078 | 2019-10-09 13:07:43 -0400 | [diff] [blame] | 1222 | * |
| 1223 | * Relies on caller holding the transport send lock to protect |
| 1224 | * removing mr->mr_list from req->rl_free_mrs safely. |
| 1225 | */ |
Chuck Lever | 9d2da4f | 2019-10-09 13:07:48 -0400 | [diff] [blame] | 1226 | static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt) |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1227 | { |
Chuck Lever | 9d2da4f | 2019-10-09 13:07:48 -0400 | [diff] [blame] | 1228 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1229 | struct rpcrdma_mr *mr; |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1230 | |
Chuck Lever | 9d2da4f | 2019-10-09 13:07:48 -0400 | [diff] [blame] | 1231 | cancel_work_sync(&buf->rb_refresh_worker); |
| 1232 | |
Chuck Lever | 4d6b889 | 2019-08-19 18:47:57 -0400 | [diff] [blame] | 1233 | spin_lock(&buf->rb_lock); |
Chuck Lever | eed48a9 | 2019-08-19 18:42:31 -0400 | [diff] [blame] | 1234 | while ((mr = list_first_entry_or_null(&buf->rb_all_mrs, |
| 1235 | struct rpcrdma_mr, |
| 1236 | mr_all)) != NULL) { |
Chuck Lever | c370078 | 2019-10-09 13:07:43 -0400 | [diff] [blame] | 1237 | list_del(&mr->mr_list); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1238 | list_del(&mr->mr_all); |
Chuck Lever | 4d6b889 | 2019-08-19 18:47:57 -0400 | [diff] [blame] | 1239 | spin_unlock(&buf->rb_lock); |
Chuck Lever | 054f155 | 2018-05-01 11:37:14 -0400 | [diff] [blame] | 1240 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 1241 | frwr_release_mr(mr); |
Chuck Lever | 9d2da4f | 2019-10-09 13:07:48 -0400 | [diff] [blame] | 1242 | |
Chuck Lever | 4d6b889 | 2019-08-19 18:47:57 -0400 | [diff] [blame] | 1243 | spin_lock(&buf->rb_lock); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1244 | } |
Chuck Lever | 4d6b889 | 2019-08-19 18:47:57 -0400 | [diff] [blame] | 1245 | spin_unlock(&buf->rb_lock); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 1246 | } |
| 1247 | |
Chuck Lever | af65ed4 | 2018-12-19 11:00:37 -0500 | [diff] [blame] | 1248 | /** |
| 1249 | * rpcrdma_buffer_destroy - Release all hw resources |
| 1250 | * @buf: root control block for resources |
| 1251 | * |
Chuck Lever | b8fe677 | 2019-04-24 09:40:36 -0400 | [diff] [blame] | 1252 | * ORDERING: relies on a prior rpcrdma_xprt_drain : |
Chuck Lever | af65ed4 | 2018-12-19 11:00:37 -0500 | [diff] [blame] | 1253 | * - No more Send or Receive completions can occur |
| 1254 | * - All MRs, reps, and reqs are returned to their free lists |
| 1255 | */ |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1256 | void |
| 1257 | rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) |
| 1258 | { |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 1259 | rpcrdma_sendctxs_destroy(buf); |
Chuck Lever | b0b227f | 2019-08-19 18:48:43 -0400 | [diff] [blame] | 1260 | rpcrdma_reps_destroy(buf); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1261 | |
Chuck Lever | 92f4433 | 2018-12-19 10:59:33 -0500 | [diff] [blame] | 1262 | while (!list_empty(&buf->rb_send_bufs)) { |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1263 | struct rpcrdma_req *req; |
Allen Andrews | 4034ba0 | 2014-05-28 10:32:09 -0400 | [diff] [blame] | 1264 | |
Chuck Lever | 92f4433 | 2018-12-19 10:59:33 -0500 | [diff] [blame] | 1265 | req = list_first_entry(&buf->rb_send_bufs, |
| 1266 | struct rpcrdma_req, rl_list); |
| 1267 | list_del(&req->rl_list); |
| 1268 | rpcrdma_req_destroy(req); |
Chuck Lever | 9f9d802 | 2014-07-29 17:24:45 -0400 | [diff] [blame] | 1269 | } |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1270 | } |
| 1271 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1272 | /** |
| 1273 | * rpcrdma_mr_get - Allocate an rpcrdma_mr object |
| 1274 | * @r_xprt: controlling transport |
| 1275 | * |
| 1276 | * Returns an initialized rpcrdma_mr or NULL if no free |
| 1277 | * rpcrdma_mr objects are available. |
| 1278 | */ |
| 1279 | struct rpcrdma_mr * |
| 1280 | rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt) |
Chuck Lever | c2922c0 | 2014-07-29 17:24:36 -0400 | [diff] [blame] | 1281 | { |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1282 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
Chuck Lever | 265a38d | 2019-08-19 18:44:04 -0400 | [diff] [blame] | 1283 | struct rpcrdma_mr *mr; |
Chuck Lever | 346aa66 | 2015-05-26 11:52:06 -0400 | [diff] [blame] | 1284 | |
Chuck Lever | 4d6b889 | 2019-08-19 18:47:57 -0400 | [diff] [blame] | 1285 | spin_lock(&buf->rb_lock); |
Chuck Lever | 265a38d | 2019-08-19 18:44:04 -0400 | [diff] [blame] | 1286 | mr = rpcrdma_mr_pop(&buf->rb_mrs); |
Chuck Lever | 4d6b889 | 2019-08-19 18:47:57 -0400 | [diff] [blame] | 1287 | spin_unlock(&buf->rb_lock); |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1288 | return mr; |
Chuck Lever | ec12e47 | 2017-12-14 20:58:04 -0500 | [diff] [blame] | 1289 | } |
| 1290 | |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1291 | /** |
Chuck Lever | 1ca3f4c | 2019-08-19 18:44:50 -0400 | [diff] [blame] | 1292 | * rpcrdma_mr_put - DMA unmap an MR and release it |
| 1293 | * @mr: MR to release |
Chuck Lever | 96cedde | 2017-12-14 20:57:55 -0500 | [diff] [blame] | 1294 | * |
| 1295 | */ |
Chuck Lever | 1ca3f4c | 2019-08-19 18:44:50 -0400 | [diff] [blame] | 1296 | void rpcrdma_mr_put(struct rpcrdma_mr *mr) |
Chuck Lever | ec12e47 | 2017-12-14 20:58:04 -0500 | [diff] [blame] | 1297 | { |
| 1298 | struct rpcrdma_xprt *r_xprt = mr->mr_xprt; |
| 1299 | |
Chuck Lever | e2f34e2 | 2018-12-19 10:58:13 -0500 | [diff] [blame] | 1300 | if (mr->mr_dir != DMA_NONE) { |
| 1301 | trace_xprtrdma_mr_unmap(mr); |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 1302 | ib_dma_unmap_sg(r_xprt->rx_ia.ri_id->device, |
Chuck Lever | e2f34e2 | 2018-12-19 10:58:13 -0500 | [diff] [blame] | 1303 | mr->mr_sg, mr->mr_nents, mr->mr_dir); |
| 1304 | mr->mr_dir = DMA_NONE; |
| 1305 | } |
Chuck Lever | 1ca3f4c | 2019-08-19 18:44:50 -0400 | [diff] [blame] | 1306 | |
Chuck Lever | 6dc6ec9 | 2019-08-19 18:47:10 -0400 | [diff] [blame] | 1307 | rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs); |
| 1308 | } |
| 1309 | |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1310 | /** |
| 1311 | * rpcrdma_buffer_get - Get a request buffer |
| 1312 | * @buffers: Buffer pool from which to obtain a buffer |
Chuck Lever | 78d506e | 2016-09-06 11:22:49 -0400 | [diff] [blame] | 1313 | * |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1314 | * Returns a fresh rpcrdma_req, or NULL if none are available. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1315 | */ |
| 1316 | struct rpcrdma_req * |
| 1317 | rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) |
| 1318 | { |
| 1319 | struct rpcrdma_req *req; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1320 | |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1321 | spin_lock(&buffers->rb_lock); |
Chuck Lever | e68699c | 2018-05-04 15:35:31 -0400 | [diff] [blame] | 1322 | req = list_first_entry_or_null(&buffers->rb_send_bufs, |
| 1323 | struct rpcrdma_req, rl_list); |
| 1324 | if (req) |
| 1325 | list_del_init(&req->rl_list); |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1326 | spin_unlock(&buffers->rb_lock); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1327 | return req; |
| 1328 | } |
| 1329 | |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1330 | /** |
| 1331 | * rpcrdma_buffer_put - Put request/reply buffers back into pool |
Chuck Lever | 5828ceb | 2019-06-19 10:33:36 -0400 | [diff] [blame] | 1332 | * @buffers: buffer pool |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1333 | * @req: object to return |
| 1334 | * |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1335 | */ |
Chuck Lever | 5828ceb | 2019-06-19 10:33:36 -0400 | [diff] [blame] | 1336 | void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1337 | { |
Chuck Lever | b0b227f | 2019-08-19 18:48:43 -0400 | [diff] [blame] | 1338 | if (req->rl_reply) |
| 1339 | rpcrdma_rep_put(buffers, req->rl_reply); |
Chuck Lever | 1e465fd | 2015-10-24 17:27:02 -0400 | [diff] [blame] | 1340 | req->rl_reply = NULL; |
| 1341 | |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1342 | spin_lock(&buffers->rb_lock); |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1343 | list_add(&req->rl_list, &buffers->rb_send_bufs); |
Chuck Lever | a5b027e | 2015-10-24 17:27:27 -0400 | [diff] [blame] | 1344 | spin_unlock(&buffers->rb_lock); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1345 | } |
| 1346 | |
Chuck Lever | b0b227f | 2019-08-19 18:48:43 -0400 | [diff] [blame] | 1347 | /** |
| 1348 | * rpcrdma_recv_buffer_put - Release rpcrdma_rep back to free list |
| 1349 | * @rep: rep to release |
| 1350 | * |
| 1351 | * Used after error conditions. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1352 | */ |
Chuck Lever | b0b227f | 2019-08-19 18:48:43 -0400 | [diff] [blame] | 1353 | void rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1354 | { |
Chuck Lever | b0b227f | 2019-08-19 18:48:43 -0400 | [diff] [blame] | 1355 | rpcrdma_rep_put(&rep->rr_rxprt->rx_buf, rep); |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1356 | } |
| 1357 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1358 | /* Returns a pointer to a rpcrdma_regbuf object, or NULL. |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1359 | * |
| 1360 | * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for |
Chuck Lever | 99ef4db | 2016-09-15 10:56:10 -0400 | [diff] [blame] | 1361 | * receiving the payload of RDMA RECV operations. During Long Calls |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 1362 | * or Replies they may be registered externally via frwr_map. |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1363 | */ |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1364 | static struct rpcrdma_regbuf * |
| 1365 | rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction, |
Chuck Lever | 13650c2 | 2016-09-15 10:56:26 -0400 | [diff] [blame] | 1366 | gfp_t flags) |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1367 | { |
| 1368 | struct rpcrdma_regbuf *rb; |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1369 | |
Chuck Lever | 8cec3db | 2019-04-24 09:39:16 -0400 | [diff] [blame] | 1370 | rb = kmalloc(sizeof(*rb), flags); |
| 1371 | if (!rb) |
| 1372 | return NULL; |
| 1373 | rb->rg_data = kmalloc(size, flags); |
| 1374 | if (!rb->rg_data) { |
| 1375 | kfree(rb); |
| 1376 | return NULL; |
| 1377 | } |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1378 | |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1379 | rb->rg_device = NULL; |
Chuck Lever | 99ef4db | 2016-09-15 10:56:10 -0400 | [diff] [blame] | 1380 | rb->rg_direction = direction; |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1381 | rb->rg_iov.length = size; |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1382 | return rb; |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1383 | } |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1384 | |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1385 | /** |
Chuck Lever | 0f665ce | 2019-04-24 09:39:27 -0400 | [diff] [blame] | 1386 | * rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer |
| 1387 | * @rb: regbuf to reallocate |
| 1388 | * @size: size of buffer to be allocated, in bytes |
| 1389 | * @flags: GFP flags |
| 1390 | * |
| 1391 | * Returns true if reallocation was successful. If false is |
| 1392 | * returned, @rb is left untouched. |
| 1393 | */ |
| 1394 | bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags) |
| 1395 | { |
| 1396 | void *buf; |
| 1397 | |
| 1398 | buf = kmalloc(size, flags); |
| 1399 | if (!buf) |
| 1400 | return false; |
| 1401 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1402 | rpcrdma_regbuf_dma_unmap(rb); |
Chuck Lever | 0f665ce | 2019-04-24 09:39:27 -0400 | [diff] [blame] | 1403 | kfree(rb->rg_data); |
| 1404 | |
| 1405 | rb->rg_data = buf; |
| 1406 | rb->rg_iov.length = size; |
| 1407 | return true; |
| 1408 | } |
| 1409 | |
| 1410 | /** |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1411 | * __rpcrdma_regbuf_dma_map - DMA-map a regbuf |
| 1412 | * @r_xprt: controlling transport instance |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1413 | * @rb: regbuf to be mapped |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1414 | * |
| 1415 | * Returns true if the buffer is now DMA mapped to @r_xprt's device |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1416 | */ |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1417 | bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt, |
| 1418 | struct rpcrdma_regbuf *rb) |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1419 | { |
Chuck Lever | f19bd0b | 2019-04-24 09:40:04 -0400 | [diff] [blame] | 1420 | struct ib_device *device = r_xprt->rx_ia.ri_id->device; |
Chuck Lever | 91a10c5 | 2017-04-11 13:23:02 -0400 | [diff] [blame] | 1421 | |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1422 | if (rb->rg_direction == DMA_NONE) |
| 1423 | return false; |
| 1424 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1425 | rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb), |
| 1426 | rdmab_length(rb), rb->rg_direction); |
Chuck Lever | 53b2c1c | 2018-12-19 11:00:06 -0500 | [diff] [blame] | 1427 | if (ib_dma_mapping_error(device, rdmab_addr(rb))) { |
| 1428 | trace_xprtrdma_dma_maperr(rdmab_addr(rb)); |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1429 | return false; |
Chuck Lever | 53b2c1c | 2018-12-19 11:00:06 -0500 | [diff] [blame] | 1430 | } |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1431 | |
Chuck Lever | 91a10c5 | 2017-04-11 13:23:02 -0400 | [diff] [blame] | 1432 | rb->rg_device = device; |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1433 | rb->rg_iov.lkey = r_xprt->rx_ia.ri_pd->local_dma_lkey; |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1434 | return true; |
| 1435 | } |
| 1436 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1437 | static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb) |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1438 | { |
Chuck Lever | e89e8d8f | 2018-01-31 12:34:13 -0500 | [diff] [blame] | 1439 | if (!rb) |
| 1440 | return; |
| 1441 | |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1442 | if (!rpcrdma_regbuf_is_mapped(rb)) |
| 1443 | return; |
| 1444 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1445 | ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb), |
| 1446 | rb->rg_direction); |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 1447 | rb->rg_device = NULL; |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1448 | } |
| 1449 | |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1450 | static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb) |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1451 | { |
Chuck Lever | d2832af | 2019-04-24 09:39:32 -0400 | [diff] [blame] | 1452 | rpcrdma_regbuf_dma_unmap(rb); |
Chuck Lever | 8cec3db | 2019-04-24 09:39:16 -0400 | [diff] [blame] | 1453 | if (rb) |
| 1454 | kfree(rb->rg_data); |
Chuck Lever | e531dca | 2015-08-03 13:03:20 -0400 | [diff] [blame] | 1455 | kfree(rb); |
Chuck Lever | 9128c3e | 2015-01-21 11:04:00 -0500 | [diff] [blame] | 1456 | } |
| 1457 | |
Chuck Lever | 995d312 | 2018-12-19 11:00:32 -0500 | [diff] [blame] | 1458 | /** |
| 1459 | * rpcrdma_ep_post - Post WRs to a transport's Send Queue |
| 1460 | * @ia: transport's device information |
| 1461 | * @ep: transport's RDMA endpoint information |
| 1462 | * @req: rpcrdma_req containing the Send WR to post |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1463 | * |
Chuck Lever | 995d312 | 2018-12-19 11:00:32 -0500 | [diff] [blame] | 1464 | * Returns 0 if the post was successful, otherwise -ENOTCONN |
| 1465 | * is returned. |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1466 | */ |
| 1467 | int |
| 1468 | rpcrdma_ep_post(struct rpcrdma_ia *ia, |
| 1469 | struct rpcrdma_ep *ep, |
| 1470 | struct rpcrdma_req *req) |
| 1471 | { |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 1472 | struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr; |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 1473 | int rc; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1474 | |
Chuck Lever | 0ab1152 | 2019-06-19 10:33:15 -0400 | [diff] [blame] | 1475 | if (!ep->rep_send_count || kref_read(&req->rl_kref) > 1) { |
Chuck Lever | ae72950 | 2017-10-20 10:48:12 -0400 | [diff] [blame] | 1476 | send_wr->send_flags |= IB_SEND_SIGNALED; |
| 1477 | ep->rep_send_count = ep->rep_send_batch; |
| 1478 | } else { |
| 1479 | send_wr->send_flags &= ~IB_SEND_SIGNALED; |
| 1480 | --ep->rep_send_count; |
| 1481 | } |
Chuck Lever | 7a89f9c | 2016-06-29 13:53:43 -0400 | [diff] [blame] | 1482 | |
Chuck Lever | 5f62412 | 2018-12-19 10:59:01 -0500 | [diff] [blame] | 1483 | rc = frwr_send(ia, req); |
Chuck Lever | ab03eff | 2017-12-20 16:30:40 -0500 | [diff] [blame] | 1484 | trace_xprtrdma_post_send(req, rc); |
| 1485 | if (rc) |
| 1486 | return -ENOTCONN; |
| 1487 | return 0; |
\"Talpey, Thomas\ | c56c65f | 2007-09-10 13:51:18 -0400 | [diff] [blame] | 1488 | } |
| 1489 | |
Chuck Lever | 2ae50ad | 2019-10-09 13:07:38 -0400 | [diff] [blame] | 1490 | /** |
| 1491 | * rpcrdma_post_recvs - Refill the Receive Queue |
| 1492 | * @r_xprt: controlling transport instance |
| 1493 | * @temp: mark Receive buffers to be deleted after use |
| 1494 | * |
| 1495 | */ |
| 1496 | void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1497 | { |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1498 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 1499 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1500 | struct ib_recv_wr *i, *wr, *bad_wr; |
| 1501 | struct rpcrdma_rep *rep; |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1502 | int needed, count, rc; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1503 | |
Chuck Lever | 61c208a | 2018-10-01 14:26:35 -0400 | [diff] [blame] | 1504 | rc = 0; |
| 1505 | count = 0; |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1506 | |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1507 | needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1); |
Chuck Lever | 435eba4 | 2019-08-19 18:51:49 -0400 | [diff] [blame] | 1508 | if (likely(ep->rep_receive_count > needed)) |
Chuck Lever | 61c208a | 2018-10-01 14:26:35 -0400 | [diff] [blame] | 1509 | goto out; |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 1510 | needed -= ep->rep_receive_count; |
Chuck Lever | e340c2d | 2019-02-11 11:23:54 -0500 | [diff] [blame] | 1511 | if (!temp) |
| 1512 | needed += RPCRDMA_MAX_RECV_BATCH; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1513 | |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1514 | /* fast path: all needed reps can be found on the free list */ |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1515 | wr = NULL; |
| 1516 | while (needed) { |
Chuck Lever | b0b227f | 2019-08-19 18:48:43 -0400 | [diff] [blame] | 1517 | rep = rpcrdma_rep_get_locked(buf); |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1518 | if (!rep) |
Chuck Lever | b0b227f | 2019-08-19 18:48:43 -0400 | [diff] [blame] | 1519 | rep = rpcrdma_rep_create(r_xprt, temp); |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1520 | if (!rep) |
| 1521 | break; |
| 1522 | |
| 1523 | rep->rr_recv_wr.next = wr; |
| 1524 | wr = &rep->rr_recv_wr; |
| 1525 | --needed; |
| 1526 | } |
| 1527 | if (!wr) |
Chuck Lever | 61c208a | 2018-10-01 14:26:35 -0400 | [diff] [blame] | 1528 | goto out; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1529 | |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1530 | for (i = wr; i; i = i->next) { |
| 1531 | rep = container_of(i, struct rpcrdma_rep, rr_recv_wr); |
| 1532 | |
| 1533 | if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) |
| 1534 | goto release_wrs; |
| 1535 | |
Chuck Lever | 2dfdcd8 | 2019-08-19 18:41:44 -0400 | [diff] [blame] | 1536 | trace_xprtrdma_post_recv(rep); |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1537 | ++count; |
| 1538 | } |
| 1539 | |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 1540 | rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr, |
| 1541 | (const struct ib_recv_wr **)&bad_wr); |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1542 | out: |
| 1543 | trace_xprtrdma_post_recvs(r_xprt, count, rc); |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1544 | if (rc) { |
Chuck Lever | 2d0abe3 | 2019-06-19 10:32:38 -0400 | [diff] [blame] | 1545 | for (wr = bad_wr; wr;) { |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1546 | struct rpcrdma_rep *rep; |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1547 | |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1548 | rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr); |
Chuck Lever | 2d0abe3 | 2019-06-19 10:32:38 -0400 | [diff] [blame] | 1549 | wr = wr->next; |
Chuck Lever | 7c8d9e7 | 2018-05-04 15:35:20 -0400 | [diff] [blame] | 1550 | rpcrdma_recv_buffer_put(rep); |
| 1551 | --count; |
| 1552 | } |
| 1553 | } |
Chuck Lever | 6ceea36 | 2018-12-19 10:58:24 -0500 | [diff] [blame] | 1554 | ep->rep_receive_count += count; |
Chuck Lever | 9ef33ef | 2019-06-19 10:33:26 -0400 | [diff] [blame] | 1555 | return; |
| 1556 | |
| 1557 | release_wrs: |
| 1558 | for (i = wr; i;) { |
| 1559 | rep = container_of(i, struct rpcrdma_rep, rr_recv_wr); |
| 1560 | i = i->next; |
| 1561 | rpcrdma_recv_buffer_put(rep); |
| 1562 | } |
Chuck Lever | f531a5d | 2015-10-24 17:27:43 -0400 | [diff] [blame] | 1563 | } |