blob: baf4b8c99d4c15292d1671c121bb03c2723b58d5 [file] [log] [blame]
Chuck Levera2268cf2018-05-04 15:34:32 -04001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04002/*
Chuck Lever62b56a62017-10-30 16:22:14 -04003 * Copyright (c) 2014-2017 Oracle. All rights reserved.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04004 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the BSD-type
10 * license below:
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * Neither the name of the Network Appliance, Inc. nor the names of
25 * its contributors may be used to endorse or promote products
26 * derived from this software without specific prior written
27 * permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040040 */
41
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040042/*
43 * verbs.c
44 *
45 * Encapsulates the major functions managing:
46 * o adapters
47 * o endpoints
48 * o connections
49 * o buffer memory
50 */
51
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000052#include <linux/interrupt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090053#include <linux/slab.h>
Chuck Lever0dd39ca2015-03-30 14:33:43 -040054#include <linux/sunrpc/addr.h>
Chuck Lever05c97462016-09-06 11:22:58 -040055#include <linux/sunrpc/svc_rdma.h>
Chuck Leverf3c66a22019-08-19 18:40:11 -040056#include <linux/log2.h>
Chuck Leverae729502017-10-20 10:48:12 -040057
58#include <asm-generic/barrier.h>
Chuck Lever65866f82014-05-28 10:33:59 -040059#include <asm/bitops.h>
Chuck Lever56a6bd12017-04-11 13:23:34 -040060
Chuck Lever0a904872017-02-08 17:00:35 -050061#include <rdma/ib_cm.h>
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040062
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040063#include "xprt_rdma.h"
Chuck Leverb6e717cb2018-05-07 15:27:05 -040064#include <trace/events/rpcrdma.h>
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040065
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040066/*
67 * Globals/Macros
68 */
69
Jeff Laytonf895b252014-11-17 16:58:04 -050070#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040071# define RPCDBG_FACILITY RPCDBG_TRANS
72#endif
73
74/*
75 * internal functions
76 */
Chuck Levercb586de2020-01-03 11:56:32 -050077static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt);
78static void rpcrdma_sendctxs_destroy(struct rpcrdma_xprt *r_xprt);
Chuck Leverf9958792019-10-17 14:31:18 -040079static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
80 struct rpcrdma_sendctx *sc);
Chuck Leverb78de1d2020-01-03 11:56:53 -050081static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt);
Chuck Levera31b2f92019-10-09 13:07:27 -040082static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt);
Chuck Lever5030c9a2021-04-19 14:02:16 -040083static void rpcrdma_rep_put(struct rpcrdma_buffer *buf,
84 struct rpcrdma_rep *rep);
Chuck Lever85810382020-01-03 11:56:58 -050085static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep);
Chuck Lever671c4502020-01-03 11:52:22 -050086static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt);
Chuck Lever96cedde2017-12-14 20:57:55 -050087static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
Chuck Lever9d2da4f2019-10-09 13:07:48 -040088static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
Chuck Lever2acc5ca2020-06-15 09:20:52 -040089static void rpcrdma_ep_get(struct rpcrdma_ep *ep);
90static int rpcrdma_ep_put(struct rpcrdma_ep *ep);
Chuck Leverd2832af2019-04-24 09:39:32 -040091static struct rpcrdma_regbuf *
92rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
93 gfp_t flags);
94static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
95static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040096
Chuck Leverb8fe6772019-04-24 09:40:36 -040097/* Wait for outstanding transport work to finish. ib_drain_qp
98 * handles the drains in the wrong order for us, so open code
99 * them here.
Chuck Lever6d2d0ee2018-12-19 10:58:29 -0500100 */
101static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400102{
Chuck Lever2acc5ca2020-06-15 09:20:52 -0400103 struct rpcrdma_ep *ep = r_xprt->rx_ep;
104 struct rdma_cm_id *id = ep->re_id;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400105
Chuck Lever15788d12021-04-19 14:02:09 -0400106 /* Wait for rpcrdma_post_recvs() to leave its critical
107 * section.
108 */
109 if (atomic_inc_return(&ep->re_receiving) > 1)
110 wait_for_completion(&ep->re_done);
111
Chuck Lever6d2d0ee2018-12-19 10:58:29 -0500112 /* Flush Receives, then wait for deferred Reply work
113 * to complete.
114 */
Chuck Lever93aa8e02020-02-21 17:00:54 -0500115 ib_drain_rq(id->qp);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400116
Chuck Lever6d2d0ee2018-12-19 10:58:29 -0500117 /* Deferred Reply processing might have scheduled
118 * local invalidations.
119 */
Chuck Lever93aa8e02020-02-21 17:00:54 -0500120 ib_drain_sq(id->qp);
Chuck Lever2acc5ca2020-06-15 09:20:52 -0400121
122 rpcrdma_ep_put(ep);
Chuck Leverf1a03b72014-11-08 20:14:37 -0500123}
124
Chuck Lever31e62d22018-10-01 14:26:08 -0400125/**
Chuck Leverf9521d52018-10-01 14:26:13 -0400126 * rpcrdma_qp_event_handler - Handle one QP event (error notification)
127 * @event: details of the event
128 * @context: ep that owns QP where event occurred
129 *
130 * Called from the RDMA provider (device driver) possibly in an interrupt
Chuck Lever745b7342020-02-21 17:01:00 -0500131 * context. The QP is always destroyed before the ID, so the ID will be
132 * reliably available when this handler is invoked.
Chuck Leverf9521d52018-10-01 14:26:13 -0400133 */
Chuck Lever745b7342020-02-21 17:01:00 -0500134static void rpcrdma_qp_event_handler(struct ib_event *event, void *context)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400135{
136 struct rpcrdma_ep *ep = context;
137
Chuck Lever745b7342020-02-21 17:01:00 -0500138 trace_xprtrdma_qp_event(ep, event);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400139}
140
Chuck Leverc487eb72020-06-15 09:21:07 -0400141/* Ensure xprt_force_disconnect() is invoked exactly once when a
142 * connection is closed or lost. (The important thing is it needs
143 * to be invoked "at least" once).
144 */
145static void rpcrdma_force_disconnect(struct rpcrdma_ep *ep)
146{
147 if (atomic_add_unless(&ep->re_force_disconnect, 1, 1))
148 xprt_force_disconnect(ep->re_xprt);
149}
150
Chuck Lever2fa8f882016-03-04 11:28:53 -0500151/**
Chuck Leverd6ccebf2020-02-21 17:00:49 -0500152 * rpcrdma_flush_disconnect - Disconnect on flushed completion
Chuck Leverf423f752020-06-15 09:21:02 -0400153 * @r_xprt: transport to disconnect
Chuck Leverd6ccebf2020-02-21 17:00:49 -0500154 * @wc: work completion entry
155 *
156 * Must be called in process context.
157 */
Chuck Leverf423f752020-06-15 09:21:02 -0400158void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc)
Chuck Leverd6ccebf2020-02-21 17:00:49 -0500159{
Chuck Leverc487eb72020-06-15 09:21:07 -0400160 if (wc->status != IB_WC_SUCCESS)
161 rpcrdma_force_disconnect(r_xprt->rx_ep);
Chuck Leverd6ccebf2020-02-21 17:00:49 -0500162}
163
164/**
Chuck Lever2fa8f882016-03-04 11:28:53 -0500165 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
Chuck Leverf9958792019-10-17 14:31:18 -0400166 * @cq: completion queue
Chuck Leverd6ccebf2020-02-21 17:00:49 -0500167 * @wc: WCE for a completed Send WR
Chuck Lever2fa8f882016-03-04 11:28:53 -0500168 *
Chuck Lever4220a072015-10-24 17:26:45 -0400169 */
Chuck Leverd6ccebf2020-02-21 17:00:49 -0500170static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400171{
Chuck Leverae729502017-10-20 10:48:12 -0400172 struct ib_cqe *cqe = wc->wr_cqe;
173 struct rpcrdma_sendctx *sc =
174 container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
Chuck Leverf423f752020-06-15 09:21:02 -0400175 struct rpcrdma_xprt *r_xprt = cq->cq_context;
Chuck Leverae729502017-10-20 10:48:12 -0400176
Chuck Lever2fa8f882016-03-04 11:28:53 -0500177 /* WARNING: Only wr_cqe and status are reliable at this point */
Chuck Leverb2e74672020-11-09 14:39:26 -0500178 trace_xprtrdma_wc_send(wc, &sc->sc_cid);
Chuck Leverf423f752020-06-15 09:21:02 -0400179 rpcrdma_sendctx_put_locked(r_xprt, sc);
180 rpcrdma_flush_disconnect(r_xprt, wc);
Chuck Leverfc664482014-05-28 10:33:25 -0400181}
182
Chuck Lever552bf222016-03-04 11:28:36 -0500183/**
Chuck Lever1519e962016-09-15 10:57:49 -0400184 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
Chuck Leverd6ccebf2020-02-21 17:00:49 -0500185 * @cq: completion queue
186 * @wc: WCE for a completed Receive WR
Chuck Lever552bf222016-03-04 11:28:36 -0500187 *
188 */
Chuck Leverd6ccebf2020-02-21 17:00:49 -0500189static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
Chuck Leverfc664482014-05-28 10:33:25 -0400190{
Chuck Lever552bf222016-03-04 11:28:36 -0500191 struct ib_cqe *cqe = wc->wr_cqe;
192 struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
193 rr_cqe);
Chuck Leverd6ccebf2020-02-21 17:00:49 -0500194 struct rpcrdma_xprt *r_xprt = cq->cq_context;
Chuck Leverfc664482014-05-28 10:33:25 -0400195
Chuck Lever6ceea362018-12-19 10:58:24 -0500196 /* WARNING: Only wr_cqe and status are reliable at this point */
Chuck Leveraf5865d2020-11-09 14:39:21 -0500197 trace_xprtrdma_wc_receive(wc, &rep->rr_cid);
Chuck Levere28ce902020-02-21 17:01:05 -0500198 --r_xprt->rx_ep->re_receive_count;
Chuck Lever85024272015-01-21 11:02:04 -0500199 if (wc->status != IB_WC_SUCCESS)
Chuck Lever6ceea362018-12-19 10:58:24 -0500200 goto out_flushed;
Chuck Leverfc664482014-05-28 10:33:25 -0400201
Chuck Lever85024272015-01-21 11:02:04 -0500202 /* status == SUCCESS means all fields in wc are trustworthy */
Chuck Lever96f87782017-08-03 14:30:03 -0400203 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
Chuck Leverc8b920b2016-09-15 10:57:16 -0400204 rep->rr_wc_flags = wc->wc_flags;
205 rep->rr_inv_rkey = wc->ex.invalidate_rkey;
206
Chuck Lever91a10c52017-04-11 13:23:02 -0400207 ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
Chuck Lever6b1184c2015-01-21 11:04:25 -0500208 rdmab_addr(rep->rr_rdmabuf),
Chuck Levere2a67192017-08-03 14:30:44 -0400209 wc->byte_len, DMA_FROM_DEVICE);
Chuck Lever23826c72016-03-04 11:28:27 -0500210
Chuck Leverd8f532d2017-10-16 15:01:30 -0400211 rpcrdma_reply_handler(rep);
Chuck Lever85024272015-01-21 11:02:04 -0500212 return;
Chuck Leverfe97b472015-10-24 17:27:10 -0400213
Chuck Lever6ceea362018-12-19 10:58:24 -0500214out_flushed:
Chuck Leverf423f752020-06-15 09:21:02 -0400215 rpcrdma_flush_disconnect(r_xprt, wc);
Chuck Lever5030c9a2021-04-19 14:02:16 -0400216 rpcrdma_rep_put(&r_xprt->rx_buf, rep);
Chuck Leverfc664482014-05-28 10:33:25 -0400217}
218
Chuck Lever745b7342020-02-21 17:01:00 -0500219static void rpcrdma_update_cm_private(struct rpcrdma_ep *ep,
Chuck Leverf54c8702019-10-23 10:02:09 -0400220 struct rdma_conn_param *param)
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400221{
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400222 const struct rpcrdma_connect_private *pmsg = param->private_data;
223 unsigned int rsize, wsize;
224
Chuck Leverc8b920b2016-09-15 10:57:16 -0400225 /* Default settings for RPC-over-RDMA Version One */
Chuck Lever93aa8e02020-02-21 17:00:54 -0500226 ep->re_implicit_roundup = xprt_rdma_pad_optimize;
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400227 rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
228 wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
229
230 if (pmsg &&
231 pmsg->cp_magic == rpcrdma_cmp_magic &&
232 pmsg->cp_version == RPCRDMA_CMP_VERSION) {
Chuck Lever93aa8e02020-02-21 17:00:54 -0500233 ep->re_implicit_roundup = true;
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400234 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
235 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
236 }
237
Chuck Lever93aa8e02020-02-21 17:00:54 -0500238 if (rsize < ep->re_inline_recv)
239 ep->re_inline_recv = rsize;
240 if (wsize < ep->re_inline_send)
241 ep->re_inline_send = wsize;
Chuck Leverf54c8702019-10-23 10:02:09 -0400242
Chuck Lever93aa8e02020-02-21 17:00:54 -0500243 rpcrdma_set_max_header_sizes(ep);
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400244}
245
Chuck Leverae382882018-10-01 14:25:47 -0400246/**
247 * rpcrdma_cm_event_handler - Handle RDMA CM events
248 * @id: rdma_cm_id on which an event has occurred
249 * @event: details of the event
250 *
251 * Called with @id's mutex held. Returns 1 if caller should
252 * destroy @id, otherwise 0.
253 */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400254static int
Chuck Leverae382882018-10-01 14:25:47 -0400255rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400256{
Chuck Lever745b7342020-02-21 17:01:00 -0500257 struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr;
Chuck Levere28ce902020-02-21 17:01:05 -0500258 struct rpcrdma_ep *ep = id->context;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400259
Chuck Leverae382882018-10-01 14:25:47 -0400260 might_sleep();
261
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400262 switch (event->event) {
263 case RDMA_CM_EVENT_ADDR_RESOLVED:
264 case RDMA_CM_EVENT_ROUTE_RESOLVED:
Chuck Lever93aa8e02020-02-21 17:00:54 -0500265 ep->re_async_rc = 0;
266 complete(&ep->re_done);
Chuck Lever316a6162018-10-01 14:26:03 -0400267 return 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400268 case RDMA_CM_EVENT_ADDR_ERROR:
Chuck Lever93aa8e02020-02-21 17:00:54 -0500269 ep->re_async_rc = -EPROTO;
270 complete(&ep->re_done);
Chuck Lever316a6162018-10-01 14:26:03 -0400271 return 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400272 case RDMA_CM_EVENT_ROUTE_ERROR:
Chuck Lever93aa8e02020-02-21 17:00:54 -0500273 ep->re_async_rc = -ENETUNREACH;
274 complete(&ep->re_done);
Chuck Lever316a6162018-10-01 14:26:03 -0400275 return 0;
Chuck Leverbebd0312017-04-11 13:23:10 -0400276 case RDMA_CM_EVENT_DEVICE_REMOVAL:
Chuck Lever745b7342020-02-21 17:01:00 -0500277 pr_info("rpcrdma: removing device %s for %pISpc\n",
278 ep->re_id->device->name, sap);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500279 fallthrough;
Chuck Levere28ce902020-02-21 17:01:05 -0500280 case RDMA_CM_EVENT_ADDR_CHANGE:
Chuck Lever93aa8e02020-02-21 17:00:54 -0500281 ep->re_connect_status = -ENODEV;
Chuck Levere28ce902020-02-21 17:01:05 -0500282 goto disconnected;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400283 case RDMA_CM_EVENT_ESTABLISHED:
Chuck Lever2acc5ca2020-06-15 09:20:52 -0400284 rpcrdma_ep_get(ep);
Chuck Lever93aa8e02020-02-21 17:00:54 -0500285 ep->re_connect_status = 1;
Chuck Lever745b7342020-02-21 17:01:00 -0500286 rpcrdma_update_cm_private(ep, &event->param.conn);
287 trace_xprtrdma_inline_thresh(ep);
Chuck Lever93aa8e02020-02-21 17:00:54 -0500288 wake_up_all(&ep->re_connect_wait);
Chuck Lever31e62d22018-10-01 14:26:08 -0400289 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400290 case RDMA_CM_EVENT_CONNECT_ERROR:
Chuck Lever93aa8e02020-02-21 17:00:54 -0500291 ep->re_connect_status = -ENOTCONN;
Chuck Leveraf667522020-06-27 12:35:20 -0400292 goto wake_connect_worker;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400293 case RDMA_CM_EVENT_UNREACHABLE:
Chuck Lever93aa8e02020-02-21 17:00:54 -0500294 ep->re_connect_status = -ENETUNREACH;
Chuck Leveraf667522020-06-27 12:35:20 -0400295 goto wake_connect_worker;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400296 case RDMA_CM_EVENT_REJECTED:
Chuck Lever745b7342020-02-21 17:01:00 -0500297 dprintk("rpcrdma: connection to %pISpc rejected: %s\n",
298 sap, rdma_reject_msg(id, event->status));
Chuck Lever93aa8e02020-02-21 17:00:54 -0500299 ep->re_connect_status = -ECONNREFUSED;
Chuck Lever0a904872017-02-08 17:00:35 -0500300 if (event->status == IB_CM_REJ_STALE_CONN)
Chuck Lever4cf44be2020-06-27 12:35:09 -0400301 ep->re_connect_status = -ENOTCONN;
Chuck Leveraf667522020-06-27 12:35:20 -0400302wake_connect_worker:
303 wake_up_all(&ep->re_connect_wait);
304 return 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400305 case RDMA_CM_EVENT_DISCONNECTED:
Chuck Lever93aa8e02020-02-21 17:00:54 -0500306 ep->re_connect_status = -ECONNABORTED;
Chuck Lever31e62d22018-10-01 14:26:08 -0400307disconnected:
Chuck Leverc487eb72020-06-15 09:21:07 -0400308 rpcrdma_force_disconnect(ep);
Chuck Lever2acc5ca2020-06-15 09:20:52 -0400309 return rpcrdma_ep_put(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400310 default:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400311 break;
312 }
313
Chuck Lever745b7342020-02-21 17:01:00 -0500314 dprintk("RPC: %s: %pISpc on %s/frwr: %s\n", __func__, sap,
Chuck Lever93aa8e02020-02-21 17:00:54 -0500315 ep->re_id->device->name, rdma_event_msg(event->event));
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400316 return 0;
317}
318
Chuck Lever93aa8e02020-02-21 17:00:54 -0500319static struct rdma_cm_id *rpcrdma_create_id(struct rpcrdma_xprt *r_xprt,
320 struct rpcrdma_ep *ep)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400321{
Chuck Lever109b88a2016-11-29 10:52:40 -0500322 unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
Chuck Lever93aa8e02020-02-21 17:00:54 -0500323 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400324 struct rdma_cm_id *id;
325 int rc;
326
Chuck Lever93aa8e02020-02-21 17:00:54 -0500327 init_completion(&ep->re_done);
Tom Talpey1a954052008-10-09 15:01:31 -0400328
Chuck Levere28ce902020-02-21 17:01:05 -0500329 id = rdma_create_id(xprt->xprt_net, rpcrdma_cm_event_handler, ep,
Chuck Lever93aa8e02020-02-21 17:00:54 -0500330 RDMA_PS_TCP, IB_QPT_RC);
Chuck Leverddbb3472018-12-19 10:59:39 -0500331 if (IS_ERR(id))
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400332 return id;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400333
Chuck Lever93aa8e02020-02-21 17:00:54 -0500334 ep->re_async_rc = -ETIMEDOUT;
335 rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)&xprt->addr,
Chuck Leverdd229ce2017-12-14 20:56:58 -0500336 RDMA_RESOLVE_TIMEOUT);
Chuck Leverddbb3472018-12-19 10:59:39 -0500337 if (rc)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400338 goto out;
Chuck Lever93aa8e02020-02-21 17:00:54 -0500339 rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout);
Chuck Lever7b020f12019-10-23 10:01:58 -0400340 if (rc < 0)
Chuck Lever109b88a2016-11-29 10:52:40 -0500341 goto out;
Devesh Sharmad0f36c42015-08-03 13:05:04 -0400342
Chuck Lever93aa8e02020-02-21 17:00:54 -0500343 rc = ep->re_async_rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400344 if (rc)
345 goto out;
346
Chuck Lever93aa8e02020-02-21 17:00:54 -0500347 ep->re_async_rc = -ETIMEDOUT;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400348 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
Chuck Leverddbb3472018-12-19 10:59:39 -0500349 if (rc)
Chuck Lever56a6bd12017-04-11 13:23:34 -0400350 goto out;
Chuck Lever93aa8e02020-02-21 17:00:54 -0500351 rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout);
Chuck Lever7b020f12019-10-23 10:01:58 -0400352 if (rc < 0)
Chuck Lever56a6bd12017-04-11 13:23:34 -0400353 goto out;
Chuck Lever93aa8e02020-02-21 17:00:54 -0500354 rc = ep->re_async_rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400355 if (rc)
Chuck Lever56a6bd12017-04-11 13:23:34 -0400356 goto out;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400357
358 return id;
Chuck Lever56a6bd12017-04-11 13:23:34 -0400359
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400360out:
361 rdma_destroy_id(id);
362 return ERR_PTR(rc);
363}
364
Chuck Lever2acc5ca2020-06-15 09:20:52 -0400365static void rpcrdma_ep_destroy(struct kref *kref)
Chuck Levere28ce902020-02-21 17:01:05 -0500366{
367 struct rpcrdma_ep *ep = container_of(kref, struct rpcrdma_ep, re_kref);
368
369 if (ep->re_id->qp) {
370 rdma_destroy_qp(ep->re_id);
371 ep->re_id->qp = NULL;
372 }
373
374 if (ep->re_attr.recv_cq)
375 ib_free_cq(ep->re_attr.recv_cq);
376 ep->re_attr.recv_cq = NULL;
377 if (ep->re_attr.send_cq)
378 ib_free_cq(ep->re_attr.send_cq);
379 ep->re_attr.send_cq = NULL;
380
381 if (ep->re_pd)
382 ib_dealloc_pd(ep->re_pd);
383 ep->re_pd = NULL;
384
385 kfree(ep);
386 module_put(THIS_MODULE);
387}
388
Chuck Lever2acc5ca2020-06-15 09:20:52 -0400389static noinline void rpcrdma_ep_get(struct rpcrdma_ep *ep)
390{
391 kref_get(&ep->re_kref);
392}
393
Chuck Levere28ce902020-02-21 17:01:05 -0500394/* Returns:
395 * %0 if @ep still has a positive kref count, or
396 * %1 if @ep was destroyed successfully.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400397 */
Chuck Lever2acc5ca2020-06-15 09:20:52 -0400398static noinline int rpcrdma_ep_put(struct rpcrdma_ep *ep)
Chuck Levere28ce902020-02-21 17:01:05 -0500399{
Chuck Lever2acc5ca2020-06-15 09:20:52 -0400400 return kref_put(&ep->re_kref, rpcrdma_ep_destroy);
Chuck Levere28ce902020-02-21 17:01:05 -0500401}
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400402
Chuck Lever81fe0c52020-02-21 17:00:38 -0500403static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400404{
Chuck Levere28ce902020-02-21 17:01:05 -0500405 struct rpcrdma_connect_private *pmsg;
406 struct ib_device *device;
Chuck Lever81fe0c52020-02-21 17:00:38 -0500407 struct rdma_cm_id *id;
Chuck Levere28ce902020-02-21 17:01:05 -0500408 struct rpcrdma_ep *ep;
Chuck Lever2fa8f882016-03-04 11:28:53 -0500409 int rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400410
Chuck Levere28ce902020-02-21 17:01:05 -0500411 ep = kzalloc(sizeof(*ep), GFP_NOFS);
412 if (!ep)
Chuck Leverdda9a952020-06-27 12:35:15 -0400413 return -ENOTCONN;
Chuck Levere28ce902020-02-21 17:01:05 -0500414 ep->re_xprt = &r_xprt->rx_xprt;
415 kref_init(&ep->re_kref);
416
Chuck Lever93aa8e02020-02-21 17:00:54 -0500417 id = rpcrdma_create_id(r_xprt, ep);
Chuck Levere28ce902020-02-21 17:01:05 -0500418 if (IS_ERR(id)) {
Chuck Lever85bfd712020-06-27 12:35:04 -0400419 kfree(ep);
420 return PTR_ERR(id);
Chuck Levere28ce902020-02-21 17:01:05 -0500421 }
422 __module_get(THIS_MODULE);
423 device = id->device;
424 ep->re_id = id;
Chuck Lever15788d12021-04-19 14:02:09 -0400425 reinit_completion(&ep->re_done);
Chuck Lever81fe0c52020-02-21 17:00:38 -0500426
Chuck Lever93aa8e02020-02-21 17:00:54 -0500427 ep->re_max_requests = r_xprt->rx_xprt.max_reqs;
428 ep->re_inline_send = xprt_rdma_max_inline_write;
429 ep->re_inline_recv = xprt_rdma_max_inline_read;
Chuck Levere28ce902020-02-21 17:01:05 -0500430 rc = frwr_query_device(ep, device);
Chuck Lever914fcad2018-05-04 15:34:48 -0400431 if (rc)
Chuck Lever81fe0c52020-02-21 17:00:38 -0500432 goto out_destroy;
433
Chuck Lever93aa8e02020-02-21 17:00:54 -0500434 r_xprt->rx_buf.rb_max_requests = cpu_to_be32(ep->re_max_requests);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400435
Chuck Lever93aa8e02020-02-21 17:00:54 -0500436 ep->re_attr.event_handler = rpcrdma_qp_event_handler;
437 ep->re_attr.qp_context = ep;
438 ep->re_attr.srq = NULL;
439 ep->re_attr.cap.max_inline_data = 0;
440 ep->re_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
441 ep->re_attr.qp_type = IB_QPT_RC;
442 ep->re_attr.port_num = ~0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400443
444 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
445 "iovs: send %d recv %d\n",
446 __func__,
Chuck Lever93aa8e02020-02-21 17:00:54 -0500447 ep->re_attr.cap.max_send_wr,
448 ep->re_attr.cap.max_recv_wr,
449 ep->re_attr.cap.max_send_sge,
450 ep->re_attr.cap.max_recv_sge);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400451
Chuck Lever93aa8e02020-02-21 17:00:54 -0500452 ep->re_send_batch = ep->re_max_requests >> 3;
453 ep->re_send_count = ep->re_send_batch;
454 init_waitqueue_head(&ep->re_connect_wait);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400455
Chuck Levere28ce902020-02-21 17:01:05 -0500456 ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt,
Chuck Lever93aa8e02020-02-21 17:00:54 -0500457 ep->re_attr.cap.max_send_wr,
458 IB_POLL_WORKQUEUE);
459 if (IS_ERR(ep->re_attr.send_cq)) {
460 rc = PTR_ERR(ep->re_attr.send_cq);
Chuck Lever85cd8e22020-02-21 17:00:12 -0500461 goto out_destroy;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400462 }
463
Chuck Levere28ce902020-02-21 17:01:05 -0500464 ep->re_attr.recv_cq = ib_alloc_cq_any(device, r_xprt,
Chuck Lever93aa8e02020-02-21 17:00:54 -0500465 ep->re_attr.cap.max_recv_wr,
466 IB_POLL_WORKQUEUE);
467 if (IS_ERR(ep->re_attr.recv_cq)) {
468 rc = PTR_ERR(ep->re_attr.recv_cq);
Chuck Lever85cd8e22020-02-21 17:00:12 -0500469 goto out_destroy;
Chuck Leverfc664482014-05-28 10:33:25 -0400470 }
Chuck Lever93aa8e02020-02-21 17:00:54 -0500471 ep->re_receive_count = 0;
Chuck Leverfc664482014-05-28 10:33:25 -0400472
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400473 /* Initialize cma parameters */
Chuck Lever93aa8e02020-02-21 17:00:54 -0500474 memset(&ep->re_remote_cma, 0, sizeof(ep->re_remote_cma));
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400475
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400476 /* Prepare RDMA-CM private message */
Chuck Levere28ce902020-02-21 17:01:05 -0500477 pmsg = &ep->re_cm_private;
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400478 pmsg->cp_magic = rpcrdma_cmp_magic;
479 pmsg->cp_version = RPCRDMA_CMP_VERSION;
Chuck Lever5f624122018-12-19 10:59:01 -0500480 pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK;
Chuck Lever93aa8e02020-02-21 17:00:54 -0500481 pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->re_inline_send);
482 pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->re_inline_recv);
483 ep->re_remote_cma.private_data = pmsg;
484 ep->re_remote_cma.private_data_len = sizeof(*pmsg);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400485
486 /* Client offers RDMA Read but does not initiate */
Chuck Lever93aa8e02020-02-21 17:00:54 -0500487 ep->re_remote_cma.initiator_depth = 0;
488 ep->re_remote_cma.responder_resources =
Chuck Levere28ce902020-02-21 17:01:05 -0500489 min_t(int, U8_MAX, device->attrs.max_qp_rd_atom);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400490
Chuck Leverb2dde942016-05-02 14:43:03 -0400491 /* Limit transport retries so client can detect server
492 * GID changes quickly. RPC layer handles re-establishing
493 * transport connection and retransmission.
494 */
Chuck Lever93aa8e02020-02-21 17:00:54 -0500495 ep->re_remote_cma.retry_count = 6;
Chuck Leverb2dde942016-05-02 14:43:03 -0400496
497 /* RPC-over-RDMA handles its own flow control. In addition,
498 * make all RNR NAKs visible so we know that RPC-over-RDMA
499 * flow control is working correctly (no NAKs should be seen).
500 */
Chuck Lever93aa8e02020-02-21 17:00:54 -0500501 ep->re_remote_cma.flow_control = 0;
502 ep->re_remote_cma.rnr_retry_count = 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400503
Chuck Levere28ce902020-02-21 17:01:05 -0500504 ep->re_pd = ib_alloc_pd(device, 0);
Chuck Lever93aa8e02020-02-21 17:00:54 -0500505 if (IS_ERR(ep->re_pd)) {
506 rc = PTR_ERR(ep->re_pd);
Chuck Lever9ba373e2020-02-21 17:00:33 -0500507 goto out_destroy;
508 }
509
Chuck Lever93aa8e02020-02-21 17:00:54 -0500510 rc = rdma_create_qp(id, ep->re_pd, &ep->re_attr);
Chuck Lever85cd8e22020-02-21 17:00:12 -0500511 if (rc)
512 goto out_destroy;
Chuck Lever93aa8e02020-02-21 17:00:54 -0500513
Chuck Levere28ce902020-02-21 17:01:05 -0500514 r_xprt->rx_ep = ep;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400515 return 0;
516
Chuck Lever85cd8e22020-02-21 17:00:12 -0500517out_destroy:
Chuck Lever2acc5ca2020-06-15 09:20:52 -0400518 rpcrdma_ep_put(ep);
Chuck Lever81fe0c52020-02-21 17:00:38 -0500519 rdma_destroy_id(id);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400520 return rc;
521}
522
Chuck Levere28ce902020-02-21 17:01:05 -0500523/**
524 * rpcrdma_xprt_connect - Connect an unconnected transport
525 * @r_xprt: controlling transport instance
526 *
527 * Returns 0 on success or a negative errno.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400528 */
Chuck Lever9144a802020-02-21 17:00:28 -0500529int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400530{
Chuck Lever31e62d22018-10-01 14:26:08 -0400531 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
Chuck Levere28ce902020-02-21 17:01:05 -0500532 struct rpcrdma_ep *ep;
Chuck Lever18908962017-04-11 13:23:18 -0400533 int rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400534
Chuck Lever81fe0c52020-02-21 17:00:38 -0500535 rc = rpcrdma_ep_create(r_xprt);
536 if (rc)
Chuck Levere28ce902020-02-21 17:01:05 -0500537 return rc;
538 ep = r_xprt->rx_ep;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400539
Chuck Lever31e62d22018-10-01 14:26:08 -0400540 xprt_clear_connected(xprt);
Chuck Levereea63ca2019-10-09 13:07:32 -0400541 rpcrdma_reset_cwnd(r_xprt);
Chuck Lever2acc5ca2020-06-15 09:20:52 -0400542
543 /* Bump the ep's reference count while there are
544 * outstanding Receives.
545 */
546 rpcrdma_ep_get(ep);
Chuck Lever8d4fb8f2018-07-28 10:46:47 -0400547 rpcrdma_post_recvs(r_xprt, true);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400548
Chuck Lever93aa8e02020-02-21 17:00:54 -0500549 rc = rdma_connect(ep->re_id, &ep->re_remote_cma);
Chuck Leverddbb3472018-12-19 10:59:39 -0500550 if (rc)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400551 goto out;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400552
Chuck Leverf9e1afe2019-08-26 13:12:51 -0400553 if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO)
554 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
Chuck Lever93aa8e02020-02-21 17:00:54 -0500555 wait_event_interruptible(ep->re_connect_wait,
556 ep->re_connect_status != 0);
557 if (ep->re_connect_status <= 0) {
Chuck Lever93aa8e02020-02-21 17:00:54 -0500558 rc = ep->re_connect_status;
Chuck Lever0a904872017-02-08 17:00:35 -0500559 goto out;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400560 }
561
Chuck Leverdda9a952020-06-27 12:35:15 -0400562 rc = rpcrdma_sendctxs_create(r_xprt);
563 if (rc) {
564 rc = -ENOTCONN;
Chuck Leverb78de1d2020-01-03 11:56:53 -0500565 goto out;
Chuck Leverdda9a952020-06-27 12:35:15 -0400566 }
567
568 rc = rpcrdma_reqs_setup(r_xprt);
569 if (rc) {
570 rc = -ENOTCONN;
571 goto out;
572 }
Chuck Lever9d2da4f2019-10-09 13:07:48 -0400573 rpcrdma_mrs_create(r_xprt);
Chuck Lever7c8d9e72018-05-04 15:35:20 -0400574
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400575out:
Chuck Lever7b020f12019-10-23 10:01:58 -0400576 trace_xprtrdma_connect(r_xprt, rc);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400577 return rc;
578}
579
Chuck Lever6d2d0ee2018-12-19 10:58:29 -0500580/**
Chuck Lever9144a802020-02-21 17:00:28 -0500581 * rpcrdma_xprt_disconnect - Disconnect underlying transport
582 * @r_xprt: controlling transport instance
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400583 *
Chuck Lever9d2da4f2019-10-09 13:07:48 -0400584 * Caller serializes. Either the transport send lock is held,
585 * or we're being called to destroy the transport.
Chuck Levere28ce902020-02-21 17:01:05 -0500586 *
587 * On return, @r_xprt is completely divested of all hardware
588 * resources and prepared for the next ->connect operation.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400589 */
Chuck Lever9144a802020-02-21 17:00:28 -0500590void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400591{
Chuck Levere28ce902020-02-21 17:01:05 -0500592 struct rpcrdma_ep *ep = r_xprt->rx_ep;
593 struct rdma_cm_id *id;
594 int rc;
Chuck Lever897b7be2020-02-21 17:00:44 -0500595
Chuck Levere28ce902020-02-21 17:01:05 -0500596 if (!ep)
Chuck Lever897b7be2020-02-21 17:00:44 -0500597 return;
Chuck Lever85cd8e22020-02-21 17:00:12 -0500598
Chuck Levere28ce902020-02-21 17:01:05 -0500599 id = ep->re_id;
Chuck Lever85cd8e22020-02-21 17:00:12 -0500600 rc = rdma_disconnect(id);
Chuck Lever6d2d0ee2018-12-19 10:58:29 -0500601 trace_xprtrdma_disconnect(r_xprt, rc);
Chuck Lever550d7502016-05-02 14:41:47 -0400602
Chuck Levere28ce902020-02-21 17:01:05 -0500603 rpcrdma_xprt_drain(r_xprt);
Chuck Lever897b7be2020-02-21 17:00:44 -0500604 rpcrdma_reps_unmap(r_xprt);
Chuck Levera31b2f92019-10-09 13:07:27 -0400605 rpcrdma_reqs_reset(r_xprt);
Chuck Lever9d2da4f2019-10-09 13:07:48 -0400606 rpcrdma_mrs_destroy(r_xprt);
Chuck Levercb586de2020-01-03 11:56:32 -0500607 rpcrdma_sendctxs_destroy(r_xprt);
Chuck Lever85cd8e22020-02-21 17:00:12 -0500608
Chuck Lever2acc5ca2020-06-15 09:20:52 -0400609 if (rpcrdma_ep_put(ep))
Chuck Lever897b7be2020-02-21 17:00:44 -0500610 rdma_destroy_id(id);
Chuck Levere28ce902020-02-21 17:01:05 -0500611
612 r_xprt->rx_ep = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400613}
614
Chuck Leverae729502017-10-20 10:48:12 -0400615/* Fixed-size circular FIFO queue. This implementation is wait-free and
616 * lock-free.
617 *
618 * Consumer is the code path that posts Sends. This path dequeues a
619 * sendctx for use by a Send operation. Multiple consumer threads
620 * are serialized by the RPC transport lock, which allows only one
621 * ->send_request call at a time.
622 *
623 * Producer is the code path that handles Send completions. This path
624 * enqueues a sendctx that has been completed. Multiple producer
625 * threads are serialized by the ib_poll_cq() function.
626 */
627
628/* rpcrdma_sendctxs_destroy() assumes caller has already quiesced
Chuck Leverb8fe6772019-04-24 09:40:36 -0400629 * queue activity, and rpcrdma_xprt_drain has flushed all remaining
630 * Send requests.
Chuck Leverae729502017-10-20 10:48:12 -0400631 */
Chuck Levercb586de2020-01-03 11:56:32 -0500632static void rpcrdma_sendctxs_destroy(struct rpcrdma_xprt *r_xprt)
Chuck Leverae729502017-10-20 10:48:12 -0400633{
Chuck Levercb586de2020-01-03 11:56:32 -0500634 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
Chuck Leverae729502017-10-20 10:48:12 -0400635 unsigned long i;
636
Chuck Levercb586de2020-01-03 11:56:32 -0500637 if (!buf->rb_sc_ctxs)
638 return;
Chuck Leverae729502017-10-20 10:48:12 -0400639 for (i = 0; i <= buf->rb_sc_last; i++)
640 kfree(buf->rb_sc_ctxs[i]);
641 kfree(buf->rb_sc_ctxs);
Chuck Levercb586de2020-01-03 11:56:32 -0500642 buf->rb_sc_ctxs = NULL;
Chuck Leverae729502017-10-20 10:48:12 -0400643}
644
Chuck Lever2e870362020-01-03 11:56:27 -0500645static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ep *ep)
Chuck Leverae729502017-10-20 10:48:12 -0400646{
647 struct rpcrdma_sendctx *sc;
648
Chuck Lever93aa8e02020-02-21 17:00:54 -0500649 sc = kzalloc(struct_size(sc, sc_sges, ep->re_attr.cap.max_send_sge),
Chuck Leverae729502017-10-20 10:48:12 -0400650 GFP_KERNEL);
651 if (!sc)
652 return NULL;
653
Chuck Leverae729502017-10-20 10:48:12 -0400654 sc->sc_cqe.done = rpcrdma_wc_send;
Chuck Leverb2e74672020-11-09 14:39:26 -0500655 sc->sc_cid.ci_queue_id = ep->re_attr.send_cq->res.id;
656 sc->sc_cid.ci_completion_id =
657 atomic_inc_return(&ep->re_completion_ids);
Chuck Leverae729502017-10-20 10:48:12 -0400658 return sc;
659}
660
661static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
662{
663 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
664 struct rpcrdma_sendctx *sc;
665 unsigned long i;
666
667 /* Maximum number of concurrent outstanding Send WRs. Capping
668 * the circular queue size stops Send Queue overflow by causing
669 * the ->send_request call to fail temporarily before too many
670 * Sends are posted.
671 */
Chuck Levere28ce902020-02-21 17:01:05 -0500672 i = r_xprt->rx_ep->re_max_requests + RPCRDMA_MAX_BC_REQUESTS;
Chuck Leverae729502017-10-20 10:48:12 -0400673 buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
674 if (!buf->rb_sc_ctxs)
675 return -ENOMEM;
676
677 buf->rb_sc_last = i - 1;
678 for (i = 0; i <= buf->rb_sc_last; i++) {
Chuck Levere28ce902020-02-21 17:01:05 -0500679 sc = rpcrdma_sendctx_create(r_xprt->rx_ep);
Chuck Leverae729502017-10-20 10:48:12 -0400680 if (!sc)
Dan Carpenter6e17f582019-01-05 16:06:48 +0300681 return -ENOMEM;
Chuck Leverae729502017-10-20 10:48:12 -0400682
Chuck Leverae729502017-10-20 10:48:12 -0400683 buf->rb_sc_ctxs[i] = sc;
684 }
685
Chuck Levercb586de2020-01-03 11:56:32 -0500686 buf->rb_sc_head = 0;
687 buf->rb_sc_tail = 0;
Chuck Leverae729502017-10-20 10:48:12 -0400688 return 0;
Chuck Leverae729502017-10-20 10:48:12 -0400689}
690
691/* The sendctx queue is not guaranteed to have a size that is a
692 * power of two, thus the helpers in circ_buf.h cannot be used.
693 * The other option is to use modulus (%), which can be expensive.
694 */
695static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf,
696 unsigned long item)
697{
698 return likely(item < buf->rb_sc_last) ? item + 1 : 0;
699}
700
701/**
702 * rpcrdma_sendctx_get_locked - Acquire a send context
Chuck Leverdbcc53a2019-04-24 09:39:53 -0400703 * @r_xprt: controlling transport instance
Chuck Leverae729502017-10-20 10:48:12 -0400704 *
705 * Returns pointer to a free send completion context; or NULL if
706 * the queue is empty.
707 *
708 * Usage: Called to acquire an SGE array before preparing a Send WR.
709 *
Chuck Leverdbcc53a2019-04-24 09:39:53 -0400710 * The caller serializes calls to this function (per transport), and
711 * provides an effective memory barrier that flushes the new value
Chuck Leverae729502017-10-20 10:48:12 -0400712 * of rb_sc_head.
713 */
Chuck Leverdbcc53a2019-04-24 09:39:53 -0400714struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt)
Chuck Leverae729502017-10-20 10:48:12 -0400715{
Chuck Leverdbcc53a2019-04-24 09:39:53 -0400716 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
Chuck Leverae729502017-10-20 10:48:12 -0400717 struct rpcrdma_sendctx *sc;
718 unsigned long next_head;
719
720 next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);
721
722 if (next_head == READ_ONCE(buf->rb_sc_tail))
723 goto out_emptyq;
724
725 /* ORDER: item must be accessed _before_ head is updated */
726 sc = buf->rb_sc_ctxs[next_head];
727
728 /* Releasing the lock in the caller acts as a memory
729 * barrier that flushes rb_sc_head.
730 */
731 buf->rb_sc_head = next_head;
732
733 return sc;
734
735out_emptyq:
736 /* The queue is "empty" if there have not been enough Send
737 * completions recently. This is a sign the Send Queue is
738 * backing up. Cause the caller to pause and try again.
739 */
Chuck Lever05eb06d2019-06-19 10:32:48 -0400740 xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
Chuck Leverae729502017-10-20 10:48:12 -0400741 r_xprt->rx_stats.empty_sendctx_q++;
742 return NULL;
743}
744
745/**
746 * rpcrdma_sendctx_put_locked - Release a send context
Chuck Leverf9958792019-10-17 14:31:18 -0400747 * @r_xprt: controlling transport instance
Chuck Leverae729502017-10-20 10:48:12 -0400748 * @sc: send context to release
749 *
750 * Usage: Called from Send completion to return a sendctxt
751 * to the queue.
752 *
Chuck Leverdbcc53a2019-04-24 09:39:53 -0400753 * The caller serializes calls to this function (per transport).
Chuck Leverae729502017-10-20 10:48:12 -0400754 */
Chuck Leverf9958792019-10-17 14:31:18 -0400755static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
756 struct rpcrdma_sendctx *sc)
Chuck Leverae729502017-10-20 10:48:12 -0400757{
Chuck Leverf9958792019-10-17 14:31:18 -0400758 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
Chuck Leverae729502017-10-20 10:48:12 -0400759 unsigned long next_tail;
760
Chuck Leverdbcc53a2019-04-24 09:39:53 -0400761 /* Unmap SGEs of previously completed but unsignaled
Chuck Leverae729502017-10-20 10:48:12 -0400762 * Sends by walking up the queue until @sc is found.
763 */
764 next_tail = buf->rb_sc_tail;
765 do {
766 next_tail = rpcrdma_sendctx_next(buf, next_tail);
767
768 /* ORDER: item must be accessed _before_ tail is updated */
Chuck Leverdbcc53a2019-04-24 09:39:53 -0400769 rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]);
Chuck Leverae729502017-10-20 10:48:12 -0400770
771 } while (buf->rb_sc_ctxs[next_tail] != sc);
772
773 /* Paired with READ_ONCE */
774 smp_store_release(&buf->rb_sc_tail, next_tail);
Chuck Lever2fad6592018-05-04 15:35:57 -0400775
Chuck Leverf9958792019-10-17 14:31:18 -0400776 xprt_write_space(&r_xprt->rx_xprt);
Chuck Leverae729502017-10-20 10:48:12 -0400777}
778
Chuck Lever505bbe62016-06-29 13:52:54 -0400779static void
Chuck Lever96cedde2017-12-14 20:57:55 -0500780rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
Chuck Levere2ac2362016-06-29 13:54:00 -0400781{
782 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
Chuck Levere28ce902020-02-21 17:01:05 -0500783 struct rpcrdma_ep *ep = r_xprt->rx_ep;
Chuck Levere2ac2362016-06-29 13:54:00 -0400784 unsigned int count;
Chuck Levere2ac2362016-06-29 13:54:00 -0400785
Chuck Lever93aa8e02020-02-21 17:00:54 -0500786 for (count = 0; count < ep->re_max_rdma_segs; count++) {
Chuck Lever96cedde2017-12-14 20:57:55 -0500787 struct rpcrdma_mr *mr;
Chuck Levere2ac2362016-06-29 13:54:00 -0400788 int rc;
789
Chuck Lever805a1f62019-08-19 18:46:24 -0400790 mr = kzalloc(sizeof(*mr), GFP_NOFS);
Chuck Lever96cedde2017-12-14 20:57:55 -0500791 if (!mr)
Chuck Levere2ac2362016-06-29 13:54:00 -0400792 break;
793
Chuck Lever253a5162020-02-21 17:00:17 -0500794 rc = frwr_mr_init(r_xprt, mr);
Chuck Levere2ac2362016-06-29 13:54:00 -0400795 if (rc) {
Chuck Lever96cedde2017-12-14 20:57:55 -0500796 kfree(mr);
Chuck Levere2ac2362016-06-29 13:54:00 -0400797 break;
798 }
799
Chuck Lever4d6b8892019-08-19 18:47:57 -0400800 spin_lock(&buf->rb_lock);
Chuck Leverc3700782019-10-09 13:07:43 -0400801 rpcrdma_mr_push(mr, &buf->rb_mrs);
Chuck Levereed48a92019-08-19 18:42:31 -0400802 list_add(&mr->mr_all, &buf->rb_all_mrs);
Chuck Lever4d6b8892019-08-19 18:47:57 -0400803 spin_unlock(&buf->rb_lock);
Chuck Levere2ac2362016-06-29 13:54:00 -0400804 }
805
Chuck Levere2ac2362016-06-29 13:54:00 -0400806 r_xprt->rx_stats.mrs_allocated += count;
Chuck Lever1c443eff2017-12-20 16:31:21 -0500807 trace_xprtrdma_createmrs(r_xprt, count);
Chuck Levere2ac2362016-06-29 13:54:00 -0400808}
809
810static void
811rpcrdma_mr_refresh_worker(struct work_struct *work)
812{
813 struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
Chuck Lever3b39f522019-08-19 18:45:37 -0400814 rb_refresh_worker);
Chuck Levere2ac2362016-06-29 13:54:00 -0400815 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
816 rx_buf);
817
Chuck Lever96cedde2017-12-14 20:57:55 -0500818 rpcrdma_mrs_create(r_xprt);
Chuck Lever05eb06d2019-06-19 10:32:48 -0400819 xprt_write_space(&r_xprt->rx_xprt);
Chuck Levere2ac2362016-06-29 13:54:00 -0400820}
821
Chuck Lever1769e6a2019-04-24 09:39:05 -0400822/**
Chuck Lever9d2da4f2019-10-09 13:07:48 -0400823 * rpcrdma_mrs_refresh - Wake the MR refresh worker
824 * @r_xprt: controlling transport instance
825 *
826 */
827void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt)
828{
829 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
Chuck Levere28ce902020-02-21 17:01:05 -0500830 struct rpcrdma_ep *ep = r_xprt->rx_ep;
Chuck Lever9d2da4f2019-10-09 13:07:48 -0400831
Chuck Lever897b7be2020-02-21 17:00:44 -0500832 /* If there is no underlying connection, it's no use
833 * to wake the refresh worker.
Chuck Lever9d2da4f2019-10-09 13:07:48 -0400834 */
Chuck Lever93aa8e02020-02-21 17:00:54 -0500835 if (ep->re_connect_status == 1) {
Chuck Lever9d2da4f2019-10-09 13:07:48 -0400836 /* The work is scheduled on a WQ_MEM_RECLAIM
837 * workqueue in order to prevent MR allocation
838 * from recursing into NFS during direct reclaim.
839 */
840 queue_work(xprtiod_workqueue, &buf->rb_refresh_worker);
841 }
842}
843
844/**
Chuck Lever1769e6a2019-04-24 09:39:05 -0400845 * rpcrdma_req_create - Allocate an rpcrdma_req object
846 * @r_xprt: controlling r_xprt
Chuck Leverbb93a1a2019-04-24 09:39:21 -0400847 * @size: initial size, in bytes, of send and receive buffers
Chuck Lever1769e6a2019-04-24 09:39:05 -0400848 * @flags: GFP flags passed to memory allocators
849 *
850 * Returns an allocated and fully initialized rpcrdma_req or NULL.
851 */
Chuck Leverbb93a1a2019-04-24 09:39:21 -0400852struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
853 gfp_t flags)
Chuck Lever13924022015-01-21 11:03:52 -0500854{
Chuck Leverf531a5d2015-10-24 17:27:43 -0400855 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
Chuck Lever13924022015-01-21 11:03:52 -0500856 struct rpcrdma_req *req;
Chuck Lever13924022015-01-21 11:03:52 -0500857
Chuck Lever1769e6a2019-04-24 09:39:05 -0400858 req = kzalloc(sizeof(*req), flags);
Chuck Lever13924022015-01-21 11:03:52 -0500859 if (req == NULL)
Chuck Leverbb93a1a2019-04-24 09:39:21 -0400860 goto out1;
Chuck Lever13924022015-01-21 11:03:52 -0500861
Chuck Leverd2832af2019-04-24 09:39:32 -0400862 req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags);
Chuck Leverbb93a1a2019-04-24 09:39:21 -0400863 if (!req->rl_sendbuf)
Chuck Leverb78de1d2020-01-03 11:56:53 -0500864 goto out2;
Chuck Leverbb93a1a2019-04-24 09:39:21 -0400865
Chuck Leverd2832af2019-04-24 09:39:32 -0400866 req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags);
Chuck Leverbb93a1a2019-04-24 09:39:21 -0400867 if (!req->rl_recvbuf)
Chuck Leverb78de1d2020-01-03 11:56:53 -0500868 goto out3;
Chuck Leverbb93a1a2019-04-24 09:39:21 -0400869
Chuck Lever6dc6ec92019-08-19 18:47:10 -0400870 INIT_LIST_HEAD(&req->rl_free_mrs);
Chuck Lever2dd4a012018-02-28 15:31:05 -0500871 INIT_LIST_HEAD(&req->rl_registered);
Chuck Lever92f44332018-12-19 10:59:33 -0500872 spin_lock(&buffer->rb_lock);
Chuck Leverf531a5d2015-10-24 17:27:43 -0400873 list_add(&req->rl_all, &buffer->rb_allreqs);
Chuck Lever92f44332018-12-19 10:59:33 -0500874 spin_unlock(&buffer->rb_lock);
Chuck Lever13924022015-01-21 11:03:52 -0500875 return req;
Chuck Leverbb93a1a2019-04-24 09:39:21 -0400876
Chuck Leverbb93a1a2019-04-24 09:39:21 -0400877out3:
Chuck Leverb78de1d2020-01-03 11:56:53 -0500878 kfree(req->rl_sendbuf);
Chuck Leverbb93a1a2019-04-24 09:39:21 -0400879out2:
880 kfree(req);
881out1:
882 return NULL;
Chuck Lever13924022015-01-21 11:03:52 -0500883}
884
Chuck Levera31b2f92019-10-09 13:07:27 -0400885/**
Chuck Leverb78de1d2020-01-03 11:56:53 -0500886 * rpcrdma_req_setup - Per-connection instance setup of an rpcrdma_req object
Chuck Levera31b2f92019-10-09 13:07:27 -0400887 * @r_xprt: controlling transport instance
Chuck Leverb78de1d2020-01-03 11:56:53 -0500888 * @req: rpcrdma_req object to set up
Chuck Levera31b2f92019-10-09 13:07:27 -0400889 *
Chuck Leverb78de1d2020-01-03 11:56:53 -0500890 * Returns zero on success, and a negative errno on failure.
891 */
892int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
893{
894 struct rpcrdma_regbuf *rb;
895 size_t maxhdrsize;
896
897 /* Compute maximum header buffer size in bytes */
898 maxhdrsize = rpcrdma_fixed_maxsz + 3 +
Chuck Levere28ce902020-02-21 17:01:05 -0500899 r_xprt->rx_ep->re_max_rdma_segs * rpcrdma_readchunk_maxsz;
Chuck Leverb78de1d2020-01-03 11:56:53 -0500900 maxhdrsize *= sizeof(__be32);
901 rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
902 DMA_TO_DEVICE, GFP_KERNEL);
903 if (!rb)
904 goto out;
905
906 if (!__rpcrdma_regbuf_dma_map(r_xprt, rb))
907 goto out_free;
908
909 req->rl_rdmabuf = rb;
910 xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb));
911 return 0;
912
913out_free:
914 rpcrdma_regbuf_free(rb);
915out:
916 return -ENOMEM;
917}
918
919/* ASSUMPTION: the rb_allreqs list is stable for the duration,
920 * and thus can be walked without holding rb_lock. Eg. the
921 * caller is holding the transport send lock to exclude
922 * device removal or disconnection.
923 */
924static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt)
925{
926 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
927 struct rpcrdma_req *req;
928 int rc;
929
930 list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
931 rc = rpcrdma_req_setup(r_xprt, req);
932 if (rc)
933 return rc;
934 }
935 return 0;
936}
937
938static void rpcrdma_req_reset(struct rpcrdma_req *req)
939{
940 /* Credits are valid for only one connection */
941 req->rl_slot.rq_cong = 0;
942
943 rpcrdma_regbuf_free(req->rl_rdmabuf);
944 req->rl_rdmabuf = NULL;
945
946 rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
947 rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
Chuck Lever5de55ce2020-08-17 11:19:26 -0400948
949 frwr_reset(req);
Chuck Leverb78de1d2020-01-03 11:56:53 -0500950}
951
952/* ASSUMPTION: the rb_allreqs list is stable for the duration,
Chuck Levera31b2f92019-10-09 13:07:27 -0400953 * and thus can be walked without holding rb_lock. Eg. the
954 * caller is holding the transport send lock to exclude
955 * device removal or disconnection.
956 */
957static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt)
958{
959 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
960 struct rpcrdma_req *req;
961
Chuck Leverb78de1d2020-01-03 11:56:53 -0500962 list_for_each_entry(req, &buf->rb_allreqs, rl_all)
963 rpcrdma_req_reset(req);
Chuck Levera31b2f92019-10-09 13:07:27 -0400964}
965
Chuck Lever85810382020-01-03 11:56:58 -0500966/* No locking needed here. This function is called only by the
967 * Receive completion handler.
968 */
Chuck Levere515dd92020-01-03 11:57:09 -0500969static noinline
970struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
971 bool temp)
Chuck Lever13924022015-01-21 11:03:52 -0500972{
Chuck Lever13924022015-01-21 11:03:52 -0500973 struct rpcrdma_rep *rep;
Chuck Lever13924022015-01-21 11:03:52 -0500974
Chuck Lever6b1184c2015-01-21 11:04:25 -0500975 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
Chuck Lever13924022015-01-21 11:03:52 -0500976 if (rep == NULL)
977 goto out;
Chuck Lever13924022015-01-21 11:03:52 -0500978
Chuck Levere28ce902020-02-21 17:01:05 -0500979 rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep->re_inline_recv,
Chuck Lever99ef4db2016-09-15 10:56:10 -0400980 DMA_FROM_DEVICE, GFP_KERNEL);
Chuck Lever8cec3db2019-04-24 09:39:16 -0400981 if (!rep->rr_rdmabuf)
Chuck Lever13924022015-01-21 11:03:52 -0500982 goto out_free;
Chuck Lever379d1bc2019-06-19 10:33:20 -0400983
Chuck Levere515dd92020-01-03 11:57:09 -0500984 if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
985 goto out_free_regbuf;
986
Chuck Leveraf5865d2020-11-09 14:39:21 -0500987 rep->rr_cid.ci_completion_id =
988 atomic_inc_return(&r_xprt->rx_ep->re_completion_ids);
989
Chuck Lever8cec3db2019-04-24 09:39:16 -0400990 xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf),
Chuck Lever96f87782017-08-03 14:30:03 -0400991 rdmab_length(rep->rr_rdmabuf));
Chuck Lever1519e962016-09-15 10:57:49 -0400992 rep->rr_cqe.done = rpcrdma_wc_receive;
Chuck Leverfed171b2015-05-26 11:51:37 -0400993 rep->rr_rxprt = r_xprt;
Chuck Lever6ea8e712016-09-15 10:56:51 -0400994 rep->rr_recv_wr.next = NULL;
995 rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
996 rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
997 rep->rr_recv_wr.num_sge = 1;
Chuck Lever7c8d9e72018-05-04 15:35:20 -0400998 rep->rr_temp = temp;
Chuck Lever671c4502020-01-03 11:52:22 -0500999 list_add(&rep->rr_all, &r_xprt->rx_buf.rb_all_reps);
Chuck Lever379d1bc2019-06-19 10:33:20 -04001000 return rep;
Chuck Lever13924022015-01-21 11:03:52 -05001001
Chuck Levere515dd92020-01-03 11:57:09 -05001002out_free_regbuf:
1003 rpcrdma_regbuf_free(rep->rr_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -05001004out_free:
1005 kfree(rep);
1006out:
Chuck Lever379d1bc2019-06-19 10:33:20 -04001007 return NULL;
Chuck Lever13924022015-01-21 11:03:52 -05001008}
1009
Chuck Levereaf86e82021-04-24 15:02:28 -04001010static void rpcrdma_rep_free(struct rpcrdma_rep *rep)
Chuck Leverb0b227f2019-08-19 18:48:43 -04001011{
1012 rpcrdma_regbuf_free(rep->rr_rdmabuf);
1013 kfree(rep);
1014}
1015
Chuck Levereaf86e82021-04-24 15:02:28 -04001016static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
1017{
1018 struct rpcrdma_buffer *buf = &rep->rr_rxprt->rx_buf;
1019
1020 spin_lock(&buf->rb_lock);
1021 list_del(&rep->rr_all);
1022 spin_unlock(&buf->rb_lock);
1023
1024 rpcrdma_rep_free(rep);
1025}
1026
Chuck Leverb0b227f2019-08-19 18:48:43 -04001027static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf)
1028{
1029 struct llist_node *node;
1030
1031 /* Calls to llist_del_first are required to be serialized */
1032 node = llist_del_first(&buf->rb_free_reps);
1033 if (!node)
1034 return NULL;
1035 return llist_entry(node, struct rpcrdma_rep, rr_node);
1036}
1037
1038static void rpcrdma_rep_put(struct rpcrdma_buffer *buf,
1039 struct rpcrdma_rep *rep)
1040{
Chuck Lever671c4502020-01-03 11:52:22 -05001041 llist_add(&rep->rr_node, &buf->rb_free_reps);
1042}
1043
Chuck Lever8b5292be2021-04-19 14:02:28 -04001044/* Caller must ensure the QP is quiescent (RQ is drained) before
1045 * invoking this function, to guarantee rb_all_reps is not
1046 * changing.
1047 */
Chuck Lever671c4502020-01-03 11:52:22 -05001048static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt)
1049{
1050 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1051 struct rpcrdma_rep *rep;
1052
Chuck Leverb7ff0182020-01-03 11:57:04 -05001053 list_for_each_entry(rep, &buf->rb_all_reps, rr_all) {
Chuck Lever671c4502020-01-03 11:52:22 -05001054 rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf);
Chuck Lever8b5292be2021-04-19 14:02:28 -04001055 rep->rr_temp = true; /* Mark this rep for destruction */
Chuck Leverb7ff0182020-01-03 11:57:04 -05001056 }
Chuck Leverb0b227f2019-08-19 18:48:43 -04001057}
1058
1059static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf)
1060{
1061 struct rpcrdma_rep *rep;
1062
Chuck Levereaf86e82021-04-24 15:02:28 -04001063 spin_lock(&buf->rb_lock);
1064 while ((rep = list_first_entry_or_null(&buf->rb_all_reps,
1065 struct rpcrdma_rep,
1066 rr_all)) != NULL) {
1067 list_del(&rep->rr_all);
1068 spin_unlock(&buf->rb_lock);
1069
1070 rpcrdma_rep_free(rep);
1071
1072 spin_lock(&buf->rb_lock);
1073 }
1074 spin_unlock(&buf->rb_lock);
Chuck Leverb0b227f2019-08-19 18:48:43 -04001075}
1076
Chuck Lever86c4ccd2019-04-24 09:40:25 -04001077/**
1078 * rpcrdma_buffer_create - Create initial set of req/rep objects
1079 * @r_xprt: transport instance to (re)initialize
1080 *
1081 * Returns zero on success, otherwise a negative errno.
1082 */
1083int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001084{
Chuck Leverac920d02015-01-21 11:03:44 -05001085 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001086 int i, rc;
1087
Chuck Leverf531a5d2015-10-24 17:27:43 -04001088 buf->rb_bc_srv_max_requests = 0;
Chuck Lever505bbe62016-06-29 13:52:54 -04001089 spin_lock_init(&buf->rb_lock);
Chuck Lever96cedde2017-12-14 20:57:55 -05001090 INIT_LIST_HEAD(&buf->rb_mrs);
Chuck Levereed48a92019-08-19 18:42:31 -04001091 INIT_LIST_HEAD(&buf->rb_all_mrs);
Chuck Lever3b39f522019-08-19 18:45:37 -04001092 INIT_WORK(&buf->rb_refresh_worker, rpcrdma_mr_refresh_worker);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001093
Chuck Lever1e465fd2015-10-24 17:27:02 -04001094 INIT_LIST_HEAD(&buf->rb_send_bufs);
Chuck Leverf531a5d2015-10-24 17:27:43 -04001095 INIT_LIST_HEAD(&buf->rb_allreqs);
Chuck Lever671c4502020-01-03 11:52:22 -05001096 INIT_LIST_HEAD(&buf->rb_all_reps);
Chuck Lever1769e6a2019-04-24 09:39:05 -04001097
1098 rc = -ENOMEM;
Chuck Lever7581d902020-01-03 11:56:37 -05001099 for (i = 0; i < r_xprt->rx_xprt.max_reqs; i++) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001100 struct rpcrdma_req *req;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001101
Chuck Lever614f3c92019-10-17 14:31:53 -04001102 req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE * 2,
Chuck Leverbb93a1a2019-04-24 09:39:21 -04001103 GFP_KERNEL);
Chuck Lever1769e6a2019-04-24 09:39:05 -04001104 if (!req)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001105 goto out;
Chuck Levera80d66c2017-06-08 11:52:12 -04001106 list_add(&req->rl_list, &buf->rb_send_bufs);
Chuck Lever1e465fd2015-10-24 17:27:02 -04001107 }
1108
Chuck Leverb0b227f2019-08-19 18:48:43 -04001109 init_llist_head(&buf->rb_free_reps);
Chuck Lever13924022015-01-21 11:03:52 -05001110
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001111 return 0;
1112out:
1113 rpcrdma_buffer_destroy(buf);
1114 return rc;
1115}
1116
Chuck Lever92f44332018-12-19 10:59:33 -05001117/**
1118 * rpcrdma_req_destroy - Destroy an rpcrdma_req object
1119 * @req: unused object to be destroyed
1120 *
Chuck Lever9d2da4f2019-10-09 13:07:48 -04001121 * Relies on caller holding the transport send lock to protect
1122 * removing req->rl_all from buf->rb_all_reqs safely.
Chuck Lever92f44332018-12-19 10:59:33 -05001123 */
Chuck Lever6dc6ec92019-08-19 18:47:10 -04001124void rpcrdma_req_destroy(struct rpcrdma_req *req)
Chuck Lever13924022015-01-21 11:03:52 -05001125{
Chuck Leverc3700782019-10-09 13:07:43 -04001126 struct rpcrdma_mr *mr;
1127
Chuck Lever92f44332018-12-19 10:59:33 -05001128 list_del(&req->rl_all);
1129
Chuck Leverc3700782019-10-09 13:07:43 -04001130 while ((mr = rpcrdma_mr_pop(&req->rl_free_mrs))) {
1131 struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf;
1132
1133 spin_lock(&buf->rb_lock);
1134 list_del(&mr->mr_all);
1135 spin_unlock(&buf->rb_lock);
1136
1137 frwr_release_mr(mr);
1138 }
Chuck Lever6dc6ec92019-08-19 18:47:10 -04001139
Chuck Leverd2832af2019-04-24 09:39:32 -04001140 rpcrdma_regbuf_free(req->rl_recvbuf);
1141 rpcrdma_regbuf_free(req->rl_sendbuf);
1142 rpcrdma_regbuf_free(req->rl_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -05001143 kfree(req);
1144}
1145
Chuck Leverc3700782019-10-09 13:07:43 -04001146/**
1147 * rpcrdma_mrs_destroy - Release all of a transport's MRs
Chuck Lever9d2da4f2019-10-09 13:07:48 -04001148 * @r_xprt: controlling transport instance
Chuck Leverc3700782019-10-09 13:07:43 -04001149 *
1150 * Relies on caller holding the transport send lock to protect
1151 * removing mr->mr_list from req->rl_free_mrs safely.
1152 */
Chuck Lever9d2da4f2019-10-09 13:07:48 -04001153static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt)
Chuck Levere2ac2362016-06-29 13:54:00 -04001154{
Chuck Lever9d2da4f2019-10-09 13:07:48 -04001155 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
Chuck Lever96cedde2017-12-14 20:57:55 -05001156 struct rpcrdma_mr *mr;
Chuck Levere2ac2362016-06-29 13:54:00 -04001157
Chuck Lever9d2da4f2019-10-09 13:07:48 -04001158 cancel_work_sync(&buf->rb_refresh_worker);
1159
Chuck Lever4d6b8892019-08-19 18:47:57 -04001160 spin_lock(&buf->rb_lock);
Chuck Levereed48a92019-08-19 18:42:31 -04001161 while ((mr = list_first_entry_or_null(&buf->rb_all_mrs,
1162 struct rpcrdma_mr,
1163 mr_all)) != NULL) {
Chuck Leverc3700782019-10-09 13:07:43 -04001164 list_del(&mr->mr_list);
Chuck Lever96cedde2017-12-14 20:57:55 -05001165 list_del(&mr->mr_all);
Chuck Lever4d6b8892019-08-19 18:47:57 -04001166 spin_unlock(&buf->rb_lock);
Chuck Lever054f1552018-05-01 11:37:14 -04001167
Chuck Lever5f624122018-12-19 10:59:01 -05001168 frwr_release_mr(mr);
Chuck Lever9d2da4f2019-10-09 13:07:48 -04001169
Chuck Lever4d6b8892019-08-19 18:47:57 -04001170 spin_lock(&buf->rb_lock);
Chuck Levere2ac2362016-06-29 13:54:00 -04001171 }
Chuck Lever4d6b8892019-08-19 18:47:57 -04001172 spin_unlock(&buf->rb_lock);
Chuck Levere2ac2362016-06-29 13:54:00 -04001173}
1174
Chuck Leveraf65ed42018-12-19 11:00:37 -05001175/**
1176 * rpcrdma_buffer_destroy - Release all hw resources
1177 * @buf: root control block for resources
1178 *
Chuck Leverb8fe6772019-04-24 09:40:36 -04001179 * ORDERING: relies on a prior rpcrdma_xprt_drain :
Chuck Leveraf65ed42018-12-19 11:00:37 -05001180 * - No more Send or Receive completions can occur
1181 * - All MRs, reps, and reqs are returned to their free lists
1182 */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001183void
1184rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1185{
Chuck Leverb0b227f2019-08-19 18:48:43 -04001186 rpcrdma_reps_destroy(buf);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001187
Chuck Lever92f44332018-12-19 10:59:33 -05001188 while (!list_empty(&buf->rb_send_bufs)) {
Chuck Lever1e465fd2015-10-24 17:27:02 -04001189 struct rpcrdma_req *req;
Allen Andrews4034ba02014-05-28 10:32:09 -04001190
Chuck Lever92f44332018-12-19 10:59:33 -05001191 req = list_first_entry(&buf->rb_send_bufs,
1192 struct rpcrdma_req, rl_list);
1193 list_del(&req->rl_list);
1194 rpcrdma_req_destroy(req);
Chuck Lever9f9d8022014-07-29 17:24:45 -04001195 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001196}
1197
Chuck Lever96cedde2017-12-14 20:57:55 -05001198/**
1199 * rpcrdma_mr_get - Allocate an rpcrdma_mr object
1200 * @r_xprt: controlling transport
1201 *
1202 * Returns an initialized rpcrdma_mr or NULL if no free
1203 * rpcrdma_mr objects are available.
1204 */
1205struct rpcrdma_mr *
1206rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
Chuck Leverc2922c02014-07-29 17:24:36 -04001207{
Chuck Lever346aa662015-05-26 11:52:06 -04001208 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
Chuck Lever265a38d2019-08-19 18:44:04 -04001209 struct rpcrdma_mr *mr;
Chuck Lever346aa662015-05-26 11:52:06 -04001210
Chuck Lever4d6b8892019-08-19 18:47:57 -04001211 spin_lock(&buf->rb_lock);
Chuck Lever265a38d2019-08-19 18:44:04 -04001212 mr = rpcrdma_mr_pop(&buf->rb_mrs);
Chuck Lever4d6b8892019-08-19 18:47:57 -04001213 spin_unlock(&buf->rb_lock);
Chuck Lever96cedde2017-12-14 20:57:55 -05001214 return mr;
Chuck Leverec12e472017-12-14 20:58:04 -05001215}
1216
Chuck Lever96cedde2017-12-14 20:57:55 -05001217/**
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001218 * rpcrdma_buffer_get - Get a request buffer
1219 * @buffers: Buffer pool from which to obtain a buffer
Chuck Lever78d506e2016-09-06 11:22:49 -04001220 *
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001221 * Returns a fresh rpcrdma_req, or NULL if none are available.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001222 */
1223struct rpcrdma_req *
1224rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1225{
1226 struct rpcrdma_req *req;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001227
Chuck Levera5b027e2015-10-24 17:27:27 -04001228 spin_lock(&buffers->rb_lock);
Chuck Levere68699c2018-05-04 15:35:31 -04001229 req = list_first_entry_or_null(&buffers->rb_send_bufs,
1230 struct rpcrdma_req, rl_list);
1231 if (req)
1232 list_del_init(&req->rl_list);
Chuck Levera5b027e2015-10-24 17:27:27 -04001233 spin_unlock(&buffers->rb_lock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001234 return req;
1235}
1236
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001237/**
1238 * rpcrdma_buffer_put - Put request/reply buffers back into pool
Chuck Lever5828ceb2019-06-19 10:33:36 -04001239 * @buffers: buffer pool
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001240 * @req: object to return
1241 *
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001242 */
Chuck Lever5828ceb2019-06-19 10:33:36 -04001243void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001244{
Chuck Leverb0b227f2019-08-19 18:48:43 -04001245 if (req->rl_reply)
1246 rpcrdma_rep_put(buffers, req->rl_reply);
Chuck Lever1e465fd2015-10-24 17:27:02 -04001247 req->rl_reply = NULL;
1248
Chuck Levera5b027e2015-10-24 17:27:27 -04001249 spin_lock(&buffers->rb_lock);
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001250 list_add(&req->rl_list, &buffers->rb_send_bufs);
Chuck Levera5b027e2015-10-24 17:27:27 -04001251 spin_unlock(&buffers->rb_lock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001252}
1253
Chuck Leverb0b227f2019-08-19 18:48:43 -04001254/**
1255 * rpcrdma_recv_buffer_put - Release rpcrdma_rep back to free list
1256 * @rep: rep to release
1257 *
1258 * Used after error conditions.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001259 */
Chuck Leverb0b227f2019-08-19 18:48:43 -04001260void rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001261{
Chuck Leverb0b227f2019-08-19 18:48:43 -04001262 rpcrdma_rep_put(&rep->rr_rxprt->rx_buf, rep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001263}
1264
Chuck Leverd2832af2019-04-24 09:39:32 -04001265/* Returns a pointer to a rpcrdma_regbuf object, or NULL.
Chuck Lever9128c3e2015-01-21 11:04:00 -05001266 *
1267 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
Chuck Lever99ef4db2016-09-15 10:56:10 -04001268 * receiving the payload of RDMA RECV operations. During Long Calls
Chuck Lever5f624122018-12-19 10:59:01 -05001269 * or Replies they may be registered externally via frwr_map.
Chuck Lever9128c3e2015-01-21 11:04:00 -05001270 */
Chuck Leverd2832af2019-04-24 09:39:32 -04001271static struct rpcrdma_regbuf *
1272rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
Chuck Lever13650c22016-09-15 10:56:26 -04001273 gfp_t flags)
Chuck Lever9128c3e2015-01-21 11:04:00 -05001274{
1275 struct rpcrdma_regbuf *rb;
Chuck Lever9128c3e2015-01-21 11:04:00 -05001276
Chuck Lever8cec3db2019-04-24 09:39:16 -04001277 rb = kmalloc(sizeof(*rb), flags);
1278 if (!rb)
1279 return NULL;
1280 rb->rg_data = kmalloc(size, flags);
1281 if (!rb->rg_data) {
1282 kfree(rb);
1283 return NULL;
1284 }
Chuck Lever9128c3e2015-01-21 11:04:00 -05001285
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001286 rb->rg_device = NULL;
Chuck Lever99ef4db2016-09-15 10:56:10 -04001287 rb->rg_direction = direction;
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001288 rb->rg_iov.length = size;
Chuck Lever9128c3e2015-01-21 11:04:00 -05001289 return rb;
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001290}
Chuck Lever9128c3e2015-01-21 11:04:00 -05001291
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001292/**
Chuck Lever0f665ce2019-04-24 09:39:27 -04001293 * rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer
1294 * @rb: regbuf to reallocate
1295 * @size: size of buffer to be allocated, in bytes
1296 * @flags: GFP flags
1297 *
1298 * Returns true if reallocation was successful. If false is
1299 * returned, @rb is left untouched.
1300 */
1301bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags)
1302{
1303 void *buf;
1304
1305 buf = kmalloc(size, flags);
1306 if (!buf)
1307 return false;
1308
Chuck Leverd2832af2019-04-24 09:39:32 -04001309 rpcrdma_regbuf_dma_unmap(rb);
Chuck Lever0f665ce2019-04-24 09:39:27 -04001310 kfree(rb->rg_data);
1311
1312 rb->rg_data = buf;
1313 rb->rg_iov.length = size;
1314 return true;
1315}
1316
1317/**
Chuck Leverd2832af2019-04-24 09:39:32 -04001318 * __rpcrdma_regbuf_dma_map - DMA-map a regbuf
1319 * @r_xprt: controlling transport instance
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001320 * @rb: regbuf to be mapped
Chuck Leverd2832af2019-04-24 09:39:32 -04001321 *
1322 * Returns true if the buffer is now DMA mapped to @r_xprt's device
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001323 */
Chuck Leverd2832af2019-04-24 09:39:32 -04001324bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
1325 struct rpcrdma_regbuf *rb)
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001326{
Chuck Levere28ce902020-02-21 17:01:05 -05001327 struct ib_device *device = r_xprt->rx_ep->re_id->device;
Chuck Lever91a10c52017-04-11 13:23:02 -04001328
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001329 if (rb->rg_direction == DMA_NONE)
1330 return false;
1331
Chuck Leverd2832af2019-04-24 09:39:32 -04001332 rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb),
1333 rdmab_length(rb), rb->rg_direction);
Chuck Lever53b2c1c2018-12-19 11:00:06 -05001334 if (ib_dma_mapping_error(device, rdmab_addr(rb))) {
1335 trace_xprtrdma_dma_maperr(rdmab_addr(rb));
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001336 return false;
Chuck Lever53b2c1c2018-12-19 11:00:06 -05001337 }
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001338
Chuck Lever91a10c52017-04-11 13:23:02 -04001339 rb->rg_device = device;
Chuck Levere28ce902020-02-21 17:01:05 -05001340 rb->rg_iov.lkey = r_xprt->rx_ep->re_pd->local_dma_lkey;
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001341 return true;
1342}
1343
Chuck Leverd2832af2019-04-24 09:39:32 -04001344static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb)
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001345{
Chuck Levere89e8d8f2018-01-31 12:34:13 -05001346 if (!rb)
1347 return;
1348
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001349 if (!rpcrdma_regbuf_is_mapped(rb))
1350 return;
1351
Chuck Leverd2832af2019-04-24 09:39:32 -04001352 ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb),
1353 rb->rg_direction);
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001354 rb->rg_device = NULL;
Chuck Lever9128c3e2015-01-21 11:04:00 -05001355}
1356
Chuck Leverd2832af2019-04-24 09:39:32 -04001357static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb)
Chuck Lever9128c3e2015-01-21 11:04:00 -05001358{
Chuck Leverd2832af2019-04-24 09:39:32 -04001359 rpcrdma_regbuf_dma_unmap(rb);
Chuck Lever8cec3db2019-04-24 09:39:16 -04001360 if (rb)
1361 kfree(rb->rg_data);
Chuck Levere531dca2015-08-03 13:03:20 -04001362 kfree(rb);
Chuck Lever9128c3e2015-01-21 11:04:00 -05001363}
1364
Chuck Lever995d3122018-12-19 11:00:32 -05001365/**
Chuck Lever97d0de82020-02-21 17:00:23 -05001366 * rpcrdma_post_sends - Post WRs to a transport's Send Queue
1367 * @r_xprt: controlling transport instance
Chuck Lever995d3122018-12-19 11:00:32 -05001368 * @req: rpcrdma_req containing the Send WR to post
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001369 *
Chuck Lever995d3122018-12-19 11:00:32 -05001370 * Returns 0 if the post was successful, otherwise -ENOTCONN
1371 * is returned.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001372 */
Chuck Lever97d0de82020-02-21 17:00:23 -05001373int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001374{
Chuck Leverdc15c3d2019-10-17 14:31:35 -04001375 struct ib_send_wr *send_wr = &req->rl_wr;
Chuck Levere28ce902020-02-21 17:01:05 -05001376 struct rpcrdma_ep *ep = r_xprt->rx_ep;
Chuck Lever655fec62016-09-15 10:57:24 -04001377 int rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001378
Chuck Lever93aa8e02020-02-21 17:00:54 -05001379 if (!ep->re_send_count || kref_read(&req->rl_kref) > 1) {
Chuck Leverae729502017-10-20 10:48:12 -04001380 send_wr->send_flags |= IB_SEND_SIGNALED;
Chuck Lever93aa8e02020-02-21 17:00:54 -05001381 ep->re_send_count = ep->re_send_batch;
Chuck Leverae729502017-10-20 10:48:12 -04001382 } else {
1383 send_wr->send_flags &= ~IB_SEND_SIGNALED;
Chuck Lever93aa8e02020-02-21 17:00:54 -05001384 --ep->re_send_count;
Chuck Leverae729502017-10-20 10:48:12 -04001385 }
Chuck Lever7a89f9c2016-06-29 13:53:43 -04001386
Chuck Leverbdb2ce82020-04-19 20:03:05 -04001387 trace_xprtrdma_post_send(req);
Chuck Lever97d0de82020-02-21 17:00:23 -05001388 rc = frwr_send(r_xprt, req);
Chuck Leverab03eff2017-12-20 16:30:40 -05001389 if (rc)
1390 return -ENOTCONN;
1391 return 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001392}
1393
Chuck Lever2ae50ad2019-10-09 13:07:38 -04001394/**
1395 * rpcrdma_post_recvs - Refill the Receive Queue
1396 * @r_xprt: controlling transport instance
1397 * @temp: mark Receive buffers to be deleted after use
1398 *
1399 */
1400void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
Chuck Leverf531a5d2015-10-24 17:27:43 -04001401{
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001402 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
Chuck Levere28ce902020-02-21 17:01:05 -05001403 struct rpcrdma_ep *ep = r_xprt->rx_ep;
Chuck Levere515dd92020-01-03 11:57:09 -05001404 struct ib_recv_wr *wr, *bad_wr;
Chuck Lever9ef33ef2019-06-19 10:33:26 -04001405 struct rpcrdma_rep *rep;
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001406 int needed, count, rc;
Chuck Leverf531a5d2015-10-24 17:27:43 -04001407
Chuck Lever61c208a2018-10-01 14:26:35 -04001408 rc = 0;
1409 count = 0;
Chuck Lever9ef33ef2019-06-19 10:33:26 -04001410
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001411 needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
Chuck Lever93aa8e02020-02-21 17:00:54 -05001412 if (likely(ep->re_receive_count > needed))
Chuck Lever61c208a2018-10-01 14:26:35 -04001413 goto out;
Chuck Lever93aa8e02020-02-21 17:00:54 -05001414 needed -= ep->re_receive_count;
Chuck Levere340c2d2019-02-11 11:23:54 -05001415 if (!temp)
1416 needed += RPCRDMA_MAX_RECV_BATCH;
Chuck Leverf531a5d2015-10-24 17:27:43 -04001417
Chuck Lever15788d12021-04-19 14:02:09 -04001418 if (atomic_inc_return(&ep->re_receiving) > 1)
1419 goto out;
1420
Chuck Lever9ef33ef2019-06-19 10:33:26 -04001421 /* fast path: all needed reps can be found on the free list */
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001422 wr = NULL;
1423 while (needed) {
Chuck Leverb0b227f2019-08-19 18:48:43 -04001424 rep = rpcrdma_rep_get_locked(buf);
Chuck Lever671c4502020-01-03 11:52:22 -05001425 if (rep && rep->rr_temp) {
1426 rpcrdma_rep_destroy(rep);
1427 continue;
1428 }
Chuck Lever9ef33ef2019-06-19 10:33:26 -04001429 if (!rep)
Chuck Leverb0b227f2019-08-19 18:48:43 -04001430 rep = rpcrdma_rep_create(r_xprt, temp);
Chuck Lever9ef33ef2019-06-19 10:33:26 -04001431 if (!rep)
1432 break;
1433
Chuck Leveraf5865d2020-11-09 14:39:21 -05001434 rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id;
Chuck Levere515dd92020-01-03 11:57:09 -05001435 trace_xprtrdma_post_recv(rep);
Chuck Lever9ef33ef2019-06-19 10:33:26 -04001436 rep->rr_recv_wr.next = wr;
1437 wr = &rep->rr_recv_wr;
1438 --needed;
Chuck Levere515dd92020-01-03 11:57:09 -05001439 ++count;
Chuck Lever9ef33ef2019-06-19 10:33:26 -04001440 }
1441 if (!wr)
Chuck Lever61c208a2018-10-01 14:26:35 -04001442 goto out;
Chuck Leverf531a5d2015-10-24 17:27:43 -04001443
Chuck Levere28ce902020-02-21 17:01:05 -05001444 rc = ib_post_recv(ep->re_id->qp, wr,
Bart Van Assched34ac5c2018-07-18 09:25:32 -07001445 (const struct ib_recv_wr **)&bad_wr);
Chuck Lever15788d12021-04-19 14:02:09 -04001446 if (atomic_dec_return(&ep->re_receiving) > 0)
1447 complete(&ep->re_done);
1448
Chuck Lever9ef33ef2019-06-19 10:33:26 -04001449out:
1450 trace_xprtrdma_post_recvs(r_xprt, count, rc);
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001451 if (rc) {
Chuck Lever2d0abe32019-06-19 10:32:38 -04001452 for (wr = bad_wr; wr;) {
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001453 struct rpcrdma_rep *rep;
Chuck Leverf531a5d2015-10-24 17:27:43 -04001454
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001455 rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr);
Chuck Lever2d0abe32019-06-19 10:32:38 -04001456 wr = wr->next;
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001457 rpcrdma_recv_buffer_put(rep);
1458 --count;
1459 }
1460 }
Chuck Lever93aa8e02020-02-21 17:00:54 -05001461 ep->re_receive_count += count;
Chuck Lever9ef33ef2019-06-19 10:33:26 -04001462 return;
Chuck Leverf531a5d2015-10-24 17:27:43 -04001463}