blob: abbd3cdc259af157a891b104e67e8de7ee8b99de [file] [log] [blame]
Chuck Levera2268cf2018-05-04 15:34:32 -04001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04002/*
Chuck Lever62b56a62017-10-30 16:22:14 -04003 * Copyright (c) 2014-2017 Oracle. All rights reserved.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04004 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the BSD-type
10 * license below:
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * Neither the name of the Network Appliance, Inc. nor the names of
25 * its contributors may be used to endorse or promote products
26 * derived from this software without specific prior written
27 * permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040040 */
41
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040042/*
43 * verbs.c
44 *
45 * Encapsulates the major functions managing:
46 * o adapters
47 * o endpoints
48 * o connections
49 * o buffer memory
50 */
51
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000052#include <linux/interrupt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090053#include <linux/slab.h>
Chuck Lever0dd39ca2015-03-30 14:33:43 -040054#include <linux/sunrpc/addr.h>
Chuck Lever05c97462016-09-06 11:22:58 -040055#include <linux/sunrpc/svc_rdma.h>
Chuck Leverae729502017-10-20 10:48:12 -040056
57#include <asm-generic/barrier.h>
Chuck Lever65866f82014-05-28 10:33:59 -040058#include <asm/bitops.h>
Chuck Lever56a6bd12017-04-11 13:23:34 -040059
Chuck Lever0a904872017-02-08 17:00:35 -050060#include <rdma/ib_cm.h>
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040061
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040062#include "xprt_rdma.h"
Chuck Leverb6e717cb2018-05-07 15:27:05 -040063#include <trace/events/rpcrdma.h>
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040064
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040065/*
66 * Globals/Macros
67 */
68
Jeff Laytonf895b252014-11-17 16:58:04 -050069#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040070# define RPCDBG_FACILITY RPCDBG_TRANS
71#endif
72
73/*
74 * internal functions
75 */
Chuck Leverefd81e92018-05-04 15:35:41 -040076static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc);
Chuck Lever96cedde2017-12-14 20:57:55 -050077static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
78static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf);
Chuck Lever7c8d9e72018-05-04 15:35:20 -040079static int rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt, bool temp);
Chuck Leverbebd0312017-04-11 13:23:10 -040080static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040081
Chuck Leverd8f532d2017-10-16 15:01:30 -040082struct workqueue_struct *rpcrdma_receive_wq __read_mostly;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040083
Chuck Leverfe97b472015-10-24 17:27:10 -040084int
85rpcrdma_alloc_wq(void)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040086{
Chuck Leverfe97b472015-10-24 17:27:10 -040087 struct workqueue_struct *recv_wq;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040088
Chuck Leverfe97b472015-10-24 17:27:10 -040089 recv_wq = alloc_workqueue("xprtrdma_receive",
Chuck Leverccede752017-12-04 14:04:04 -050090 WQ_MEM_RECLAIM | WQ_HIGHPRI,
Chuck Leverfe97b472015-10-24 17:27:10 -040091 0);
92 if (!recv_wq)
93 return -ENOMEM;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040094
Chuck Leverfe97b472015-10-24 17:27:10 -040095 rpcrdma_receive_wq = recv_wq;
96 return 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040097}
98
Chuck Leverfe97b472015-10-24 17:27:10 -040099void
100rpcrdma_destroy_wq(void)
Chuck Leverf1a03b72014-11-08 20:14:37 -0500101{
Chuck Leverfe97b472015-10-24 17:27:10 -0400102 struct workqueue_struct *wq;
Chuck Leverf1a03b72014-11-08 20:14:37 -0500103
Chuck Leverfe97b472015-10-24 17:27:10 -0400104 if (rpcrdma_receive_wq) {
105 wq = rpcrdma_receive_wq;
106 rpcrdma_receive_wq = NULL;
107 destroy_workqueue(wq);
108 }
Chuck Leverf1a03b72014-11-08 20:14:37 -0500109}
110
Chuck Lever31e62d22018-10-01 14:26:08 -0400111/**
112 * rpcrdma_disconnect_worker - Force a disconnect
113 * @work: endpoint to be disconnected
114 *
115 * Provider callbacks can possibly run in an IRQ context. This function
116 * is invoked in a worker thread to guarantee that disconnect wake-up
117 * calls are always done in process context.
118 */
119static void
120rpcrdma_disconnect_worker(struct work_struct *work)
121{
122 struct rpcrdma_ep *ep = container_of(work, struct rpcrdma_ep,
123 rep_disconnect_worker.work);
124 struct rpcrdma_xprt *r_xprt =
125 container_of(ep, struct rpcrdma_xprt, rx_ep);
126
127 xprt_force_disconnect(&r_xprt->rx_xprt);
128}
129
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400130static void
131rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
132{
133 struct rpcrdma_ep *ep = context;
Chuck Lever643cf322017-12-20 16:31:45 -0500134 struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
135 rx_ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400136
Chuck Lever643cf322017-12-20 16:31:45 -0500137 trace_xprtrdma_qp_error(r_xprt, event);
Chuck Lever2f6922c2016-11-29 10:53:21 -0500138 pr_err("rpcrdma: %s on device %s ep %p\n",
139 ib_event_msg(event->event), event->device->name, context);
140
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400141 if (ep->rep_connected == 1) {
142 ep->rep_connected = -EIO;
Chuck Lever31e62d22018-10-01 14:26:08 -0400143 schedule_delayed_work(&ep->rep_disconnect_worker, 0);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400144 wake_up_all(&ep->rep_connect_wait);
145 }
146}
147
Chuck Lever2fa8f882016-03-04 11:28:53 -0500148/**
149 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
150 * @cq: completion queue (ignored)
151 * @wc: completed WR
152 *
Chuck Lever4220a072015-10-24 17:26:45 -0400153 */
154static void
Chuck Lever2fa8f882016-03-04 11:28:53 -0500155rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400156{
Chuck Leverae729502017-10-20 10:48:12 -0400157 struct ib_cqe *cqe = wc->wr_cqe;
158 struct rpcrdma_sendctx *sc =
159 container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
160
Chuck Lever2fa8f882016-03-04 11:28:53 -0500161 /* WARNING: Only wr_cqe and status are reliable at this point */
Chuck Leverab03eff2017-12-20 16:30:40 -0500162 trace_xprtrdma_wc_send(sc, wc);
Chuck Lever2fa8f882016-03-04 11:28:53 -0500163 if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR)
164 pr_err("rpcrdma: Send: %s (%u/0x%x)\n",
165 ib_wc_status_msg(wc->status),
166 wc->status, wc->vendor_err);
Chuck Leverae729502017-10-20 10:48:12 -0400167
168 rpcrdma_sendctx_put_locked(sc);
Chuck Leverfc664482014-05-28 10:33:25 -0400169}
170
Chuck Lever552bf222016-03-04 11:28:36 -0500171/**
Chuck Lever1519e962016-09-15 10:57:49 -0400172 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
Chuck Lever552bf222016-03-04 11:28:36 -0500173 * @cq: completion queue (ignored)
174 * @wc: completed WR
175 *
176 */
Chuck Leverfe97b472015-10-24 17:27:10 -0400177static void
Chuck Lever1519e962016-09-15 10:57:49 -0400178rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
Chuck Leverfc664482014-05-28 10:33:25 -0400179{
Chuck Lever552bf222016-03-04 11:28:36 -0500180 struct ib_cqe *cqe = wc->wr_cqe;
181 struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
182 rr_cqe);
Chuck Leverfc664482014-05-28 10:33:25 -0400183
Chuck Lever85024272015-01-21 11:02:04 -0500184 /* WARNING: Only wr_id and status are reliable at this point */
Chuck Lever0e0b8542018-05-04 15:35:14 -0400185 trace_xprtrdma_wc_receive(wc);
Chuck Lever85024272015-01-21 11:02:04 -0500186 if (wc->status != IB_WC_SUCCESS)
187 goto out_fail;
Chuck Leverfc664482014-05-28 10:33:25 -0400188
Chuck Lever85024272015-01-21 11:02:04 -0500189 /* status == SUCCESS means all fields in wc are trustworthy */
Chuck Lever96f87782017-08-03 14:30:03 -0400190 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
Chuck Leverc8b920b2016-09-15 10:57:16 -0400191 rep->rr_wc_flags = wc->wc_flags;
192 rep->rr_inv_rkey = wc->ex.invalidate_rkey;
193
Chuck Lever91a10c52017-04-11 13:23:02 -0400194 ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
Chuck Lever6b1184c2015-01-21 11:04:25 -0500195 rdmab_addr(rep->rr_rdmabuf),
Chuck Levere2a67192017-08-03 14:30:44 -0400196 wc->byte_len, DMA_FROM_DEVICE);
Chuck Lever23826c72016-03-04 11:28:27 -0500197
Chuck Leverfc664482014-05-28 10:33:25 -0400198out_schedule:
Chuck Leverd8f532d2017-10-16 15:01:30 -0400199 rpcrdma_reply_handler(rep);
Chuck Lever85024272015-01-21 11:02:04 -0500200 return;
Chuck Leverfe97b472015-10-24 17:27:10 -0400201
Chuck Lever85024272015-01-21 11:02:04 -0500202out_fail:
203 if (wc->status != IB_WC_WR_FLUSH_ERR)
Chuck Lever552bf222016-03-04 11:28:36 -0500204 pr_err("rpcrdma: Recv: %s (%u/0x%x)\n",
205 ib_wc_status_msg(wc->status),
206 wc->status, wc->vendor_err);
Chuck Levere2a67192017-08-03 14:30:44 -0400207 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, 0);
Chuck Lever85024272015-01-21 11:02:04 -0500208 goto out_schedule;
Chuck Leverfc664482014-05-28 10:33:25 -0400209}
210
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400211static void
212rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
213 struct rdma_conn_param *param)
214{
215 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
216 const struct rpcrdma_connect_private *pmsg = param->private_data;
217 unsigned int rsize, wsize;
218
Chuck Leverc8b920b2016-09-15 10:57:16 -0400219 /* Default settings for RPC-over-RDMA Version One */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500220 r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize;
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400221 rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
222 wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
223
224 if (pmsg &&
225 pmsg->cp_magic == rpcrdma_cmp_magic &&
226 pmsg->cp_version == RPCRDMA_CMP_VERSION) {
Chuck Leverc95a3c62017-02-08 17:00:02 -0500227 r_xprt->rx_ia.ri_implicit_roundup = true;
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400228 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
229 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
230 }
231
232 if (rsize < cdata->inline_rsize)
233 cdata->inline_rsize = rsize;
234 if (wsize < cdata->inline_wsize)
235 cdata->inline_wsize = wsize;
Chuck Lever6d6bf722016-11-29 10:53:13 -0500236 dprintk("RPC: %s: max send %u, max recv %u\n",
237 __func__, cdata->inline_wsize, cdata->inline_rsize);
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400238 rpcrdma_set_max_header_sizes(r_xprt);
239}
240
Chuck Leverae382882018-10-01 14:25:47 -0400241/**
242 * rpcrdma_cm_event_handler - Handle RDMA CM events
243 * @id: rdma_cm_id on which an event has occurred
244 * @event: details of the event
245 *
246 * Called with @id's mutex held. Returns 1 if caller should
247 * destroy @id, otherwise 0.
248 */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400249static int
Chuck Leverae382882018-10-01 14:25:47 -0400250rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400251{
Chuck Levered97f1f2018-10-01 14:25:52 -0400252 struct rpcrdma_xprt *r_xprt = id->context;
253 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
254 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
255 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400256
Chuck Leverae382882018-10-01 14:25:47 -0400257 might_sleep();
258
Chuck Levered97f1f2018-10-01 14:25:52 -0400259 trace_xprtrdma_cm_event(r_xprt, event);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400260 switch (event->event) {
261 case RDMA_CM_EVENT_ADDR_RESOLVED:
262 case RDMA_CM_EVENT_ROUTE_RESOLVED:
Tom Talpey5675add2008-10-09 15:01:41 -0400263 ia->ri_async_rc = 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400264 complete(&ia->ri_done);
Chuck Lever316a6162018-10-01 14:26:03 -0400265 return 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400266 case RDMA_CM_EVENT_ADDR_ERROR:
Chuck Lever52d28fe2018-05-04 15:34:37 -0400267 ia->ri_async_rc = -EPROTO;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400268 complete(&ia->ri_done);
Chuck Lever316a6162018-10-01 14:26:03 -0400269 return 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400270 case RDMA_CM_EVENT_ROUTE_ERROR:
271 ia->ri_async_rc = -ENETUNREACH;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400272 complete(&ia->ri_done);
Chuck Lever316a6162018-10-01 14:26:03 -0400273 return 0;
Chuck Leverbebd0312017-04-11 13:23:10 -0400274 case RDMA_CM_EVENT_DEVICE_REMOVAL:
275#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
Chuck Leverd461f1f2017-12-14 20:56:50 -0500276 pr_info("rpcrdma: removing device %s for %s:%s\n",
Chuck Lever173b8f42017-06-08 11:53:00 -0400277 ia->ri_device->name,
Chuck Levered97f1f2018-10-01 14:25:52 -0400278 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
Chuck Leverbebd0312017-04-11 13:23:10 -0400279#endif
280 set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
281 ep->rep_connected = -ENODEV;
Chuck Levered97f1f2018-10-01 14:25:52 -0400282 xprt_force_disconnect(xprt);
Chuck Leverbebd0312017-04-11 13:23:10 -0400283 wait_for_completion(&ia->ri_remove_done);
284
285 ia->ri_id = NULL;
Chuck Leverbebd0312017-04-11 13:23:10 -0400286 ia->ri_device = NULL;
287 /* Return 1 to ensure the core destroys the id. */
288 return 1;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400289 case RDMA_CM_EVENT_ESTABLISHED:
Chuck Levered97f1f2018-10-01 14:25:52 -0400290 ++xprt->connect_cookie;
Chuck Leveraadc5a92018-10-01 14:25:57 -0400291 ep->rep_connected = 1;
Chuck Levered97f1f2018-10-01 14:25:52 -0400292 rpcrdma_update_connect_private(r_xprt, &event->param.conn);
Chuck Lever31e62d22018-10-01 14:26:08 -0400293 wake_up_all(&ep->rep_connect_wait);
294 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400295 case RDMA_CM_EVENT_CONNECT_ERROR:
Chuck Leveraadc5a92018-10-01 14:25:57 -0400296 ep->rep_connected = -ENOTCONN;
Chuck Lever31e62d22018-10-01 14:26:08 -0400297 goto disconnected;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400298 case RDMA_CM_EVENT_UNREACHABLE:
Chuck Leveraadc5a92018-10-01 14:25:57 -0400299 ep->rep_connected = -ENETUNREACH;
Chuck Lever31e62d22018-10-01 14:26:08 -0400300 goto disconnected;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400301 case RDMA_CM_EVENT_REJECTED:
Chuck Leverd461f1f2017-12-14 20:56:50 -0500302 dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
Chuck Levered97f1f2018-10-01 14:25:52 -0400303 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
Chuck Lever0a904872017-02-08 17:00:35 -0500304 rdma_reject_msg(id, event->status));
Chuck Leveraadc5a92018-10-01 14:25:57 -0400305 ep->rep_connected = -ECONNREFUSED;
Chuck Lever0a904872017-02-08 17:00:35 -0500306 if (event->status == IB_CM_REJ_STALE_CONN)
Chuck Leveraadc5a92018-10-01 14:25:57 -0400307 ep->rep_connected = -EAGAIN;
Chuck Lever31e62d22018-10-01 14:26:08 -0400308 goto disconnected;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400309 case RDMA_CM_EVENT_DISCONNECTED:
Chuck Levered97f1f2018-10-01 14:25:52 -0400310 ++xprt->connect_cookie;
Chuck Leveraadc5a92018-10-01 14:25:57 -0400311 ep->rep_connected = -ECONNABORTED;
Chuck Lever31e62d22018-10-01 14:26:08 -0400312disconnected:
313 xprt_force_disconnect(xprt);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400314 wake_up_all(&ep->rep_connect_wait);
Chuck Lever316a6162018-10-01 14:26:03 -0400315 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400316 default:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400317 break;
318 }
319
Chuck Lever316a6162018-10-01 14:26:03 -0400320 dprintk("RPC: %s: %s:%s on %s/%s: %s\n", __func__,
321 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
322 ia->ri_device->name, ia->ri_ops->ro_displayname,
323 rdma_event_msg(event->event));
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400324 return 0;
325}
326
327static struct rdma_cm_id *
Chuck Leverdd229ce2017-12-14 20:56:58 -0500328rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400329{
Chuck Lever109b88a2016-11-29 10:52:40 -0500330 unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400331 struct rdma_cm_id *id;
332 int rc;
333
Chuck Leverb4744e02017-12-20 16:31:29 -0500334 trace_xprtrdma_conn_start(xprt);
335
Tom Talpey1a954052008-10-09 15:01:31 -0400336 init_completion(&ia->ri_done);
Chuck Leverbebd0312017-04-11 13:23:10 -0400337 init_completion(&ia->ri_remove_done);
Tom Talpey1a954052008-10-09 15:01:31 -0400338
Chuck Leverae382882018-10-01 14:25:47 -0400339 id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler,
Chuck Lever107c4be2018-05-04 15:34:42 -0400340 xprt, RDMA_PS_TCP, IB_QPT_RC);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400341 if (IS_ERR(id)) {
342 rc = PTR_ERR(id);
343 dprintk("RPC: %s: rdma_create_id() failed %i\n",
344 __func__, rc);
345 return id;
346 }
347
Tom Talpey5675add2008-10-09 15:01:41 -0400348 ia->ri_async_rc = -ETIMEDOUT;
Chuck Leverdd229ce2017-12-14 20:56:58 -0500349 rc = rdma_resolve_addr(id, NULL,
350 (struct sockaddr *)&xprt->rx_xprt.addr,
351 RDMA_RESOLVE_TIMEOUT);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400352 if (rc) {
353 dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
354 __func__, rc);
355 goto out;
356 }
Chuck Lever109b88a2016-11-29 10:52:40 -0500357 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
358 if (rc < 0) {
Chuck Leverb4744e02017-12-20 16:31:29 -0500359 trace_xprtrdma_conn_tout(xprt);
Chuck Lever109b88a2016-11-29 10:52:40 -0500360 goto out;
361 }
Devesh Sharmad0f36c42015-08-03 13:05:04 -0400362
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400363 rc = ia->ri_async_rc;
364 if (rc)
365 goto out;
366
Tom Talpey5675add2008-10-09 15:01:41 -0400367 ia->ri_async_rc = -ETIMEDOUT;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400368 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
369 if (rc) {
370 dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
371 __func__, rc);
Chuck Lever56a6bd12017-04-11 13:23:34 -0400372 goto out;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400373 }
Chuck Lever109b88a2016-11-29 10:52:40 -0500374 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
375 if (rc < 0) {
Chuck Leverb4744e02017-12-20 16:31:29 -0500376 trace_xprtrdma_conn_tout(xprt);
Chuck Lever56a6bd12017-04-11 13:23:34 -0400377 goto out;
Chuck Lever109b88a2016-11-29 10:52:40 -0500378 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400379 rc = ia->ri_async_rc;
380 if (rc)
Chuck Lever56a6bd12017-04-11 13:23:34 -0400381 goto out;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400382
383 return id;
Chuck Lever56a6bd12017-04-11 13:23:34 -0400384
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400385out:
386 rdma_destroy_id(id);
387 return ERR_PTR(rc);
388}
389
390/*
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400391 * Exported functions.
392 */
393
Chuck Leverfff09592017-04-11 13:22:54 -0400394/**
395 * rpcrdma_ia_open - Open and initialize an Interface Adapter.
Chuck Leverdd229ce2017-12-14 20:56:58 -0500396 * @xprt: transport with IA to (re)initialize
Chuck Leverfff09592017-04-11 13:22:54 -0400397 *
398 * Returns 0 on success, negative errno if an appropriate
399 * Interface Adapter could not be found and opened.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400400 */
401int
Chuck Leverdd229ce2017-12-14 20:56:58 -0500402rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400403{
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400404 struct rpcrdma_ia *ia = &xprt->rx_ia;
Chuck Leverd1ed8572015-08-03 13:03:30 -0400405 int rc;
406
Chuck Leverdd229ce2017-12-14 20:56:58 -0500407 ia->ri_id = rpcrdma_create_id(xprt, ia);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400408 if (IS_ERR(ia->ri_id)) {
409 rc = PTR_ERR(ia->ri_id);
Chuck Leverfff09592017-04-11 13:22:54 -0400410 goto out_err;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400411 }
Chuck Lever89e0d1122015-05-26 11:51:56 -0400412 ia->ri_device = ia->ri_id->device;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400413
Christoph Hellwiged082d32016-09-05 12:56:17 +0200414 ia->ri_pd = ib_alloc_pd(ia->ri_device, 0);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400415 if (IS_ERR(ia->ri_pd)) {
416 rc = PTR_ERR(ia->ri_pd);
Chuck Leverb54054c2016-06-29 13:53:27 -0400417 pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);
Chuck Leverfff09592017-04-11 13:22:54 -0400418 goto out_err;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400419 }
420
Chuck Leverfff09592017-04-11 13:22:54 -0400421 switch (xprt_rdma_memreg_strategy) {
Chuck Leverce5b3712017-12-14 20:57:47 -0500422 case RPCRDMA_FRWR:
Chuck Leverb54054c2016-06-29 13:53:27 -0400423 if (frwr_is_supported(ia)) {
424 ia->ri_ops = &rpcrdma_frwr_memreg_ops;
425 break;
426 }
427 /*FALLTHROUGH*/
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400428 case RPCRDMA_MTHCAFMR:
Chuck Leverb54054c2016-06-29 13:53:27 -0400429 if (fmr_is_supported(ia)) {
430 ia->ri_ops = &rpcrdma_fmr_memreg_ops;
431 break;
432 }
433 /*FALLTHROUGH*/
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400434 default:
Chuck Leverfff09592017-04-11 13:22:54 -0400435 pr_err("rpcrdma: Device %s does not support memreg mode %d\n",
436 ia->ri_device->name, xprt_rdma_memreg_strategy);
Chuck Leverb54054c2016-06-29 13:53:27 -0400437 rc = -EINVAL;
Chuck Leverfff09592017-04-11 13:22:54 -0400438 goto out_err;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400439 }
440
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400441 return 0;
Chuck Lever5ae711a2015-01-21 11:03:19 -0500442
Chuck Leverfff09592017-04-11 13:22:54 -0400443out_err:
444 rpcrdma_ia_close(ia);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400445 return rc;
446}
447
Chuck Leverfff09592017-04-11 13:22:54 -0400448/**
Chuck Leverbebd0312017-04-11 13:23:10 -0400449 * rpcrdma_ia_remove - Handle device driver unload
450 * @ia: interface adapter being removed
451 *
452 * Divest transport H/W resources associated with this adapter,
453 * but allow it to be restored later.
454 */
455void
456rpcrdma_ia_remove(struct rpcrdma_ia *ia)
457{
458 struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
459 rx_ia);
460 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
461 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
462 struct rpcrdma_req *req;
463 struct rpcrdma_rep *rep;
464
465 cancel_delayed_work_sync(&buf->rb_refresh_worker);
466
467 /* This is similar to rpcrdma_ep_destroy, but:
468 * - Don't cancel the connect worker.
469 * - Don't call rpcrdma_ep_disconnect, which waits
470 * for another conn upcall, which will deadlock.
471 * - rdma_disconnect is unneeded, the underlying
472 * connection is already gone.
473 */
474 if (ia->ri_id->qp) {
475 ib_drain_qp(ia->ri_id->qp);
476 rdma_destroy_qp(ia->ri_id);
477 ia->ri_id->qp = NULL;
478 }
479 ib_free_cq(ep->rep_attr.recv_cq);
Chuck Lever25524282018-03-19 14:23:16 -0400480 ep->rep_attr.recv_cq = NULL;
Chuck Leverbebd0312017-04-11 13:23:10 -0400481 ib_free_cq(ep->rep_attr.send_cq);
Chuck Lever25524282018-03-19 14:23:16 -0400482 ep->rep_attr.send_cq = NULL;
Chuck Leverbebd0312017-04-11 13:23:10 -0400483
484 /* The ULP is responsible for ensuring all DMA
485 * mappings and MRs are gone.
486 */
487 list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list)
488 rpcrdma_dma_unmap_regbuf(rep->rr_rdmabuf);
489 list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
490 rpcrdma_dma_unmap_regbuf(req->rl_rdmabuf);
491 rpcrdma_dma_unmap_regbuf(req->rl_sendbuf);
492 rpcrdma_dma_unmap_regbuf(req->rl_recvbuf);
493 }
Chuck Lever96cedde2017-12-14 20:57:55 -0500494 rpcrdma_mrs_destroy(buf);
Chuck Lever25524282018-03-19 14:23:16 -0400495 ib_dealloc_pd(ia->ri_pd);
496 ia->ri_pd = NULL;
Chuck Leverbebd0312017-04-11 13:23:10 -0400497
498 /* Allow waiters to continue */
499 complete(&ia->ri_remove_done);
Chuck Leverb4744e02017-12-20 16:31:29 -0500500
501 trace_xprtrdma_remove(r_xprt);
Chuck Leverbebd0312017-04-11 13:23:10 -0400502}
503
504/**
Chuck Leverfff09592017-04-11 13:22:54 -0400505 * rpcrdma_ia_close - Clean up/close an IA.
506 * @ia: interface adapter to close
507 *
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400508 */
509void
510rpcrdma_ia_close(struct rpcrdma_ia *ia)
511{
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400512 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
513 if (ia->ri_id->qp)
514 rdma_destroy_qp(ia->ri_id);
Chuck Lever56a6bd12017-04-11 13:23:34 -0400515 rdma_destroy_id(ia->ri_id);
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400516 }
Chuck Leverfff09592017-04-11 13:22:54 -0400517 ia->ri_id = NULL;
518 ia->ri_device = NULL;
Chuck Lever6d446982015-05-26 11:51:27 -0400519
520 /* If the pd is still busy, xprtrdma missed freeing a resource */
521 if (ia->ri_pd && !IS_ERR(ia->ri_pd))
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600522 ib_dealloc_pd(ia->ri_pd);
Chuck Leverfff09592017-04-11 13:22:54 -0400523 ia->ri_pd = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400524}
525
526/*
527 * Create unconnected endpoint.
528 */
529int
530rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
Chuck Lever16f906d2017-02-08 17:00:10 -0500531 struct rpcrdma_create_data_internal *cdata)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400532{
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400533 struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
Chuck Leverfc664482014-05-28 10:33:25 -0400534 struct ib_cq *sendcq, *recvcq;
Chuck Lever914fcad2018-05-04 15:34:48 -0400535 unsigned int max_sge;
Chuck Lever2fa8f882016-03-04 11:28:53 -0500536 int rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400537
Steve Wise33023fb2018-06-18 08:05:26 -0700538 max_sge = min_t(unsigned int, ia->ri_device->attrs.max_send_sge,
Chuck Levereed50872017-03-11 15:52:47 -0500539 RPCRDMA_MAX_SEND_SGES);
Chuck Lever16f906d2017-02-08 17:00:10 -0500540 if (max_sge < RPCRDMA_MIN_SEND_SGES) {
541 pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
Chuck Leverb3221d62015-08-03 13:03:39 -0400542 return -ENOMEM;
543 }
Chuck Lever1179e2c2018-01-31 12:34:05 -0500544 ia->ri_max_send_sges = max_sge;
Chuck Leverb3221d62015-08-03 13:03:39 -0400545
Chuck Lever914fcad2018-05-04 15:34:48 -0400546 rc = ia->ri_ops->ro_open(ia, ep, cdata);
547 if (rc)
548 return rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400549
550 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
551 ep->rep_attr.qp_context = ep;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400552 ep->rep_attr.srq = NULL;
Chuck Lever16f906d2017-02-08 17:00:10 -0500553 ep->rep_attr.cap.max_send_sge = max_sge;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400554 ep->rep_attr.cap.max_recv_sge = 1;
555 ep->rep_attr.cap.max_inline_data = 0;
556 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
557 ep->rep_attr.qp_type = IB_QPT_RC;
558 ep->rep_attr.port_num = ~0;
559
560 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
561 "iovs: send %d recv %d\n",
562 __func__,
563 ep->rep_attr.cap.max_send_wr,
564 ep->rep_attr.cap.max_recv_wr,
565 ep->rep_attr.cap.max_send_sge,
566 ep->rep_attr.cap.max_recv_sge);
567
568 /* set trigger for requesting send completion */
Chuck Leverae729502017-10-20 10:48:12 -0400569 ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH,
570 cdata->max_requests >> 2);
571 ep->rep_send_count = ep->rep_send_batch;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400572 init_waitqueue_head(&ep->rep_connect_wait);
Chuck Lever31e62d22018-10-01 14:26:08 -0400573 INIT_DELAYED_WORK(&ep->rep_disconnect_worker,
574 rpcrdma_disconnect_worker);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400575
Chuck Lever2fa8f882016-03-04 11:28:53 -0500576 sendcq = ib_alloc_cq(ia->ri_device, NULL,
577 ep->rep_attr.cap.max_send_wr + 1,
Chuck Levera4699f52017-10-30 16:21:49 -0400578 1, IB_POLL_WORKQUEUE);
Chuck Leverfc664482014-05-28 10:33:25 -0400579 if (IS_ERR(sendcq)) {
580 rc = PTR_ERR(sendcq);
581 dprintk("RPC: %s: failed to create send CQ: %i\n",
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400582 __func__, rc);
583 goto out1;
584 }
585
Chuck Lever552bf222016-03-04 11:28:36 -0500586 recvcq = ib_alloc_cq(ia->ri_device, NULL,
587 ep->rep_attr.cap.max_recv_wr + 1,
Chuck Leverd8f532d2017-10-16 15:01:30 -0400588 0, IB_POLL_WORKQUEUE);
Chuck Leverfc664482014-05-28 10:33:25 -0400589 if (IS_ERR(recvcq)) {
590 rc = PTR_ERR(recvcq);
591 dprintk("RPC: %s: failed to create recv CQ: %i\n",
592 __func__, rc);
593 goto out2;
594 }
595
Chuck Leverfc664482014-05-28 10:33:25 -0400596 ep->rep_attr.send_cq = sendcq;
597 ep->rep_attr.recv_cq = recvcq;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400598
599 /* Initialize cma parameters */
Chuck Leverb2dde942016-05-02 14:43:03 -0400600 memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma));
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400601
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400602 /* Prepare RDMA-CM private message */
603 pmsg->cp_magic = rpcrdma_cmp_magic;
604 pmsg->cp_version = RPCRDMA_CMP_VERSION;
Chuck Leverc8b920b2016-09-15 10:57:16 -0400605 pmsg->cp_flags |= ia->ri_ops->ro_send_w_inv_ok;
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400606 pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize);
607 pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize);
608 ep->rep_remote_cma.private_data = pmsg;
609 ep->rep_remote_cma.private_data_len = sizeof(*pmsg);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400610
611 /* Client offers RDMA Read but does not initiate */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400612 ep->rep_remote_cma.initiator_depth = 0;
Chuck Leverb7e85fff2018-02-28 15:30:33 -0500613 ep->rep_remote_cma.responder_resources =
614 min_t(int, U8_MAX, ia->ri_device->attrs.max_qp_rd_atom);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400615
Chuck Leverb2dde942016-05-02 14:43:03 -0400616 /* Limit transport retries so client can detect server
617 * GID changes quickly. RPC layer handles re-establishing
618 * transport connection and retransmission.
619 */
620 ep->rep_remote_cma.retry_count = 6;
621
622 /* RPC-over-RDMA handles its own flow control. In addition,
623 * make all RNR NAKs visible so we know that RPC-over-RDMA
624 * flow control is working correctly (no NAKs should be seen).
625 */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400626 ep->rep_remote_cma.flow_control = 0;
627 ep->rep_remote_cma.rnr_retry_count = 0;
628
629 return 0;
630
631out2:
Chuck Lever2fa8f882016-03-04 11:28:53 -0500632 ib_free_cq(sendcq);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400633out1:
634 return rc;
635}
636
637/*
638 * rpcrdma_ep_destroy
639 *
640 * Disconnect and destroy endpoint. After this, the only
641 * valid operations on the ep are to free it (if dynamically
642 * allocated) or re-create it.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400643 */
Chuck Lever7f1d5412014-05-28 10:33:16 -0400644void
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400645rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
646{
Chuck Lever31e62d22018-10-01 14:26:08 -0400647 cancel_delayed_work_sync(&ep->rep_disconnect_worker);
Chuck Lever254f91e2014-05-28 10:32:17 -0400648
Chuck Lever25524282018-03-19 14:23:16 -0400649 if (ia->ri_id && ia->ri_id->qp) {
Chuck Lever550d7502016-05-02 14:41:47 -0400650 rpcrdma_ep_disconnect(ep, ia);
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400651 rdma_destroy_qp(ia->ri_id);
652 ia->ri_id->qp = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400653 }
654
Chuck Lever25524282018-03-19 14:23:16 -0400655 if (ep->rep_attr.recv_cq)
656 ib_free_cq(ep->rep_attr.recv_cq);
657 if (ep->rep_attr.send_cq)
658 ib_free_cq(ep->rep_attr.send_cq);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400659}
660
Chuck Levera9b0e382017-04-11 13:23:26 -0400661/* Re-establish a connection after a device removal event.
662 * Unlike a normal reconnection, a fresh PD and a new set
663 * of MRs and buffers is needed.
664 */
665static int
666rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
667 struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
668{
Chuck Levera9b0e382017-04-11 13:23:26 -0400669 int rc, err;
670
Chuck Leverb4744e02017-12-20 16:31:29 -0500671 trace_xprtrdma_reinsert(r_xprt);
Chuck Levera9b0e382017-04-11 13:23:26 -0400672
673 rc = -EHOSTUNREACH;
Chuck Leverdd229ce2017-12-14 20:56:58 -0500674 if (rpcrdma_ia_open(r_xprt))
Chuck Levera9b0e382017-04-11 13:23:26 -0400675 goto out1;
676
677 rc = -ENOMEM;
678 err = rpcrdma_ep_create(ep, ia, &r_xprt->rx_data);
679 if (err) {
680 pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err);
681 goto out2;
682 }
683
684 rc = -ENETUNREACH;
685 err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
686 if (err) {
687 pr_err("rpcrdma: rdma_create_qp returned %d\n", err);
688 goto out3;
689 }
690
Chuck Lever96cedde2017-12-14 20:57:55 -0500691 rpcrdma_mrs_create(r_xprt);
Chuck Levera9b0e382017-04-11 13:23:26 -0400692 return 0;
693
694out3:
695 rpcrdma_ep_destroy(ep, ia);
696out2:
697 rpcrdma_ia_close(ia);
698out1:
699 return rc;
700}
701
Chuck Lever18908962017-04-11 13:23:18 -0400702static int
703rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep,
704 struct rpcrdma_ia *ia)
705{
Chuck Lever18908962017-04-11 13:23:18 -0400706 struct rdma_cm_id *id, *old;
707 int err, rc;
708
Chuck Leverb4744e02017-12-20 16:31:29 -0500709 trace_xprtrdma_reconnect(r_xprt);
Chuck Lever18908962017-04-11 13:23:18 -0400710
711 rpcrdma_ep_disconnect(ep, ia);
712
713 rc = -EHOSTUNREACH;
Chuck Leverdd229ce2017-12-14 20:56:58 -0500714 id = rpcrdma_create_id(r_xprt, ia);
Chuck Lever18908962017-04-11 13:23:18 -0400715 if (IS_ERR(id))
716 goto out;
717
718 /* As long as the new ID points to the same device as the
719 * old ID, we can reuse the transport's existing PD and all
720 * previously allocated MRs. Also, the same device means
721 * the transport's previous DMA mappings are still valid.
722 *
723 * This is a sanity check only. There should be no way these
724 * point to two different devices here.
725 */
726 old = id;
727 rc = -ENETUNREACH;
728 if (ia->ri_device != id->device) {
729 pr_err("rpcrdma: can't reconnect on different device!\n");
730 goto out_destroy;
731 }
732
733 err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
734 if (err) {
735 dprintk("RPC: %s: rdma_create_qp returned %d\n",
736 __func__, err);
737 goto out_destroy;
738 }
739
740 /* Atomically replace the transport's ID and QP. */
741 rc = 0;
742 old = ia->ri_id;
743 ia->ri_id = id;
744 rdma_destroy_qp(old);
745
746out_destroy:
Chuck Lever56a6bd12017-04-11 13:23:34 -0400747 rdma_destroy_id(old);
Chuck Lever18908962017-04-11 13:23:18 -0400748out:
749 return rc;
750}
751
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400752/*
753 * Connect unconnected endpoint.
754 */
755int
756rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
757{
Chuck Lever0a904872017-02-08 17:00:35 -0500758 struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
759 rx_ia);
Chuck Lever31e62d22018-10-01 14:26:08 -0400760 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
Chuck Lever18908962017-04-11 13:23:18 -0400761 int rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400762
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400763retry:
Chuck Lever18908962017-04-11 13:23:18 -0400764 switch (ep->rep_connected) {
765 case 0:
Chuck Leverec62f402014-05-28 10:34:07 -0400766 dprintk("RPC: %s: connecting...\n", __func__);
767 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
768 if (rc) {
769 dprintk("RPC: %s: rdma_create_qp failed %i\n",
770 __func__, rc);
Chuck Lever18908962017-04-11 13:23:18 -0400771 rc = -ENETUNREACH;
772 goto out_noupdate;
Chuck Leverec62f402014-05-28 10:34:07 -0400773 }
Chuck Lever18908962017-04-11 13:23:18 -0400774 break;
Chuck Levera9b0e382017-04-11 13:23:26 -0400775 case -ENODEV:
776 rc = rpcrdma_ep_recreate_xprt(r_xprt, ep, ia);
777 if (rc)
778 goto out_noupdate;
779 break;
Chuck Lever18908962017-04-11 13:23:18 -0400780 default:
781 rc = rpcrdma_ep_reconnect(r_xprt, ep, ia);
782 if (rc)
783 goto out;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400784 }
785
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400786 ep->rep_connected = 0;
Chuck Lever31e62d22018-10-01 14:26:08 -0400787 xprt_clear_connected(xprt);
788
Chuck Lever8d4fb8f2018-07-28 10:46:47 -0400789 rpcrdma_post_recvs(r_xprt, true);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400790
791 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
792 if (rc) {
793 dprintk("RPC: %s: rdma_connect() failed with %i\n",
794 __func__, rc);
795 goto out;
796 }
797
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400798 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400799 if (ep->rep_connected <= 0) {
Chuck Lever0a904872017-02-08 17:00:35 -0500800 if (ep->rep_connected == -EAGAIN)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400801 goto retry;
802 rc = ep->rep_connected;
Chuck Lever0a904872017-02-08 17:00:35 -0500803 goto out;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400804 }
805
Chuck Lever0a904872017-02-08 17:00:35 -0500806 dprintk("RPC: %s: connected\n", __func__);
Chuck Lever7c8d9e72018-05-04 15:35:20 -0400807
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400808out:
809 if (rc)
810 ep->rep_connected = rc;
Chuck Lever18908962017-04-11 13:23:18 -0400811
812out_noupdate:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400813 return rc;
814}
815
816/*
817 * rpcrdma_ep_disconnect
818 *
819 * This is separate from destroy to facilitate the ability
820 * to reconnect without recreating the endpoint.
821 *
822 * This call is not reentrant, and must not be made in parallel
823 * on the same endpoint.
824 */
Chuck Lever282191c2014-07-29 17:25:55 -0400825void
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400826rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
827{
828 int rc;
829
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400830 rc = rdma_disconnect(ia->ri_id);
Chuck Leverb4744e02017-12-20 16:31:29 -0500831 if (!rc)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400832 /* returns without wait if not connected */
833 wait_event_interruptible(ep->rep_connect_wait,
834 ep->rep_connected != 1);
Chuck Leverb4744e02017-12-20 16:31:29 -0500835 else
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400836 ep->rep_connected = rc;
Chuck Leverb4744e02017-12-20 16:31:29 -0500837 trace_xprtrdma_disconnect(container_of(ep, struct rpcrdma_xprt,
838 rx_ep), rc);
Chuck Lever550d7502016-05-02 14:41:47 -0400839
840 ib_drain_qp(ia->ri_id->qp);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400841}
842
Chuck Leverae729502017-10-20 10:48:12 -0400843/* Fixed-size circular FIFO queue. This implementation is wait-free and
844 * lock-free.
845 *
846 * Consumer is the code path that posts Sends. This path dequeues a
847 * sendctx for use by a Send operation. Multiple consumer threads
848 * are serialized by the RPC transport lock, which allows only one
849 * ->send_request call at a time.
850 *
851 * Producer is the code path that handles Send completions. This path
852 * enqueues a sendctx that has been completed. Multiple producer
853 * threads are serialized by the ib_poll_cq() function.
854 */
855
856/* rpcrdma_sendctxs_destroy() assumes caller has already quiesced
857 * queue activity, and ib_drain_qp has flushed all remaining Send
858 * requests.
859 */
860static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf)
861{
862 unsigned long i;
863
864 for (i = 0; i <= buf->rb_sc_last; i++)
865 kfree(buf->rb_sc_ctxs[i]);
866 kfree(buf->rb_sc_ctxs);
867}
868
869static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia)
870{
871 struct rpcrdma_sendctx *sc;
872
873 sc = kzalloc(sizeof(*sc) +
874 ia->ri_max_send_sges * sizeof(struct ib_sge),
875 GFP_KERNEL);
876 if (!sc)
877 return NULL;
878
879 sc->sc_wr.wr_cqe = &sc->sc_cqe;
880 sc->sc_wr.sg_list = sc->sc_sges;
881 sc->sc_wr.opcode = IB_WR_SEND;
882 sc->sc_cqe.done = rpcrdma_wc_send;
883 return sc;
884}
885
886static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
887{
888 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
889 struct rpcrdma_sendctx *sc;
890 unsigned long i;
891
892 /* Maximum number of concurrent outstanding Send WRs. Capping
893 * the circular queue size stops Send Queue overflow by causing
894 * the ->send_request call to fail temporarily before too many
895 * Sends are posted.
896 */
897 i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS;
898 dprintk("RPC: %s: allocating %lu send_ctxs\n", __func__, i);
899 buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
900 if (!buf->rb_sc_ctxs)
901 return -ENOMEM;
902
903 buf->rb_sc_last = i - 1;
904 for (i = 0; i <= buf->rb_sc_last; i++) {
905 sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
906 if (!sc)
907 goto out_destroy;
908
909 sc->sc_xprt = r_xprt;
910 buf->rb_sc_ctxs[i] = sc;
911 }
Chuck Lever2fad6592018-05-04 15:35:57 -0400912 buf->rb_flags = 0;
Chuck Leverae729502017-10-20 10:48:12 -0400913
914 return 0;
915
916out_destroy:
917 rpcrdma_sendctxs_destroy(buf);
918 return -ENOMEM;
919}
920
921/* The sendctx queue is not guaranteed to have a size that is a
922 * power of two, thus the helpers in circ_buf.h cannot be used.
923 * The other option is to use modulus (%), which can be expensive.
924 */
925static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf,
926 unsigned long item)
927{
928 return likely(item < buf->rb_sc_last) ? item + 1 : 0;
929}
930
931/**
932 * rpcrdma_sendctx_get_locked - Acquire a send context
933 * @buf: transport buffers from which to acquire an unused context
934 *
935 * Returns pointer to a free send completion context; or NULL if
936 * the queue is empty.
937 *
938 * Usage: Called to acquire an SGE array before preparing a Send WR.
939 *
940 * The caller serializes calls to this function (per rpcrdma_buffer),
941 * and provides an effective memory barrier that flushes the new value
942 * of rb_sc_head.
943 */
944struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf)
945{
946 struct rpcrdma_xprt *r_xprt;
947 struct rpcrdma_sendctx *sc;
948 unsigned long next_head;
949
950 next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);
951
952 if (next_head == READ_ONCE(buf->rb_sc_tail))
953 goto out_emptyq;
954
955 /* ORDER: item must be accessed _before_ head is updated */
956 sc = buf->rb_sc_ctxs[next_head];
957
958 /* Releasing the lock in the caller acts as a memory
959 * barrier that flushes rb_sc_head.
960 */
961 buf->rb_sc_head = next_head;
962
963 return sc;
964
965out_emptyq:
966 /* The queue is "empty" if there have not been enough Send
967 * completions recently. This is a sign the Send Queue is
968 * backing up. Cause the caller to pause and try again.
969 */
Chuck Lever2fad6592018-05-04 15:35:57 -0400970 set_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags);
Chuck Leverae729502017-10-20 10:48:12 -0400971 r_xprt = container_of(buf, struct rpcrdma_xprt, rx_buf);
972 r_xprt->rx_stats.empty_sendctx_q++;
973 return NULL;
974}
975
976/**
977 * rpcrdma_sendctx_put_locked - Release a send context
978 * @sc: send context to release
979 *
980 * Usage: Called from Send completion to return a sendctxt
981 * to the queue.
982 *
983 * The caller serializes calls to this function (per rpcrdma_buffer).
984 */
Chuck Leverefd81e92018-05-04 15:35:41 -0400985static void
986rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
Chuck Leverae729502017-10-20 10:48:12 -0400987{
988 struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf;
989 unsigned long next_tail;
990
991 /* Unmap SGEs of previously completed by unsignaled
992 * Sends by walking up the queue until @sc is found.
993 */
994 next_tail = buf->rb_sc_tail;
995 do {
996 next_tail = rpcrdma_sendctx_next(buf, next_tail);
997
998 /* ORDER: item must be accessed _before_ tail is updated */
999 rpcrdma_unmap_sendctx(buf->rb_sc_ctxs[next_tail]);
1000
1001 } while (buf->rb_sc_ctxs[next_tail] != sc);
1002
1003 /* Paired with READ_ONCE */
1004 smp_store_release(&buf->rb_sc_tail, next_tail);
Chuck Lever2fad6592018-05-04 15:35:57 -04001005
1006 if (test_and_clear_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags)) {
1007 smp_mb__after_atomic();
1008 xprt_write_space(&sc->sc_xprt->rx_xprt);
1009 }
Chuck Leverae729502017-10-20 10:48:12 -04001010}
1011
Chuck Lever505bbe62016-06-29 13:52:54 -04001012static void
Chuck Lever96cedde2017-12-14 20:57:55 -05001013rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
Chuck Levere2ac2362016-06-29 13:54:00 -04001014{
1015 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1016 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1017 unsigned int count;
1018 LIST_HEAD(free);
1019 LIST_HEAD(all);
1020
Chuck Leverc421ece2018-10-01 14:25:20 -04001021 for (count = 0; count < ia->ri_max_segs; count++) {
Chuck Lever96cedde2017-12-14 20:57:55 -05001022 struct rpcrdma_mr *mr;
Chuck Levere2ac2362016-06-29 13:54:00 -04001023 int rc;
1024
Chuck Lever96cedde2017-12-14 20:57:55 -05001025 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1026 if (!mr)
Chuck Levere2ac2362016-06-29 13:54:00 -04001027 break;
1028
Chuck Lever96cedde2017-12-14 20:57:55 -05001029 rc = ia->ri_ops->ro_init_mr(ia, mr);
Chuck Levere2ac2362016-06-29 13:54:00 -04001030 if (rc) {
Chuck Lever96cedde2017-12-14 20:57:55 -05001031 kfree(mr);
Chuck Levere2ac2362016-06-29 13:54:00 -04001032 break;
1033 }
1034
Chuck Lever96cedde2017-12-14 20:57:55 -05001035 mr->mr_xprt = r_xprt;
Chuck Levere2ac2362016-06-29 13:54:00 -04001036
Chuck Lever96cedde2017-12-14 20:57:55 -05001037 list_add(&mr->mr_list, &free);
1038 list_add(&mr->mr_all, &all);
Chuck Levere2ac2362016-06-29 13:54:00 -04001039 }
1040
Chuck Lever96cedde2017-12-14 20:57:55 -05001041 spin_lock(&buf->rb_mrlock);
1042 list_splice(&free, &buf->rb_mrs);
Chuck Levere2ac2362016-06-29 13:54:00 -04001043 list_splice(&all, &buf->rb_all);
1044 r_xprt->rx_stats.mrs_allocated += count;
Chuck Lever96cedde2017-12-14 20:57:55 -05001045 spin_unlock(&buf->rb_mrlock);
Chuck Lever1c443eff2017-12-20 16:31:21 -05001046 trace_xprtrdma_createmrs(r_xprt, count);
Chuck Lever9e679d52018-02-28 15:30:44 -05001047
1048 xprt_write_space(&r_xprt->rx_xprt);
Chuck Levere2ac2362016-06-29 13:54:00 -04001049}
1050
1051static void
1052rpcrdma_mr_refresh_worker(struct work_struct *work)
1053{
1054 struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
1055 rb_refresh_worker.work);
1056 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
1057 rx_buf);
1058
Chuck Lever96cedde2017-12-14 20:57:55 -05001059 rpcrdma_mrs_create(r_xprt);
Chuck Levere2ac2362016-06-29 13:54:00 -04001060}
1061
Chuck Leverf531a5d2015-10-24 17:27:43 -04001062struct rpcrdma_req *
Chuck Lever13924022015-01-21 11:03:52 -05001063rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
1064{
Chuck Leverf531a5d2015-10-24 17:27:43 -04001065 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
Chuck Lever2dd4a012018-02-28 15:31:05 -05001066 struct rpcrdma_regbuf *rb;
Chuck Lever13924022015-01-21 11:03:52 -05001067 struct rpcrdma_req *req;
Chuck Lever13924022015-01-21 11:03:52 -05001068
Chuck Lever85275c82015-01-21 11:04:16 -05001069 req = kzalloc(sizeof(*req), GFP_KERNEL);
Chuck Lever13924022015-01-21 11:03:52 -05001070 if (req == NULL)
Chuck Lever85275c82015-01-21 11:04:16 -05001071 return ERR_PTR(-ENOMEM);
Chuck Lever13924022015-01-21 11:03:52 -05001072
Chuck Lever2dd4a012018-02-28 15:31:05 -05001073 rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE,
1074 DMA_TO_DEVICE, GFP_KERNEL);
1075 if (IS_ERR(rb)) {
1076 kfree(req);
1077 return ERR_PTR(-ENOMEM);
1078 }
1079 req->rl_rdmabuf = rb;
1080 xdr_buf_init(&req->rl_hdrbuf, rb->rg_base, rdmab_length(rb));
1081 req->rl_buffer = buffer;
1082 INIT_LIST_HEAD(&req->rl_registered);
1083
Chuck Leverf531a5d2015-10-24 17:27:43 -04001084 spin_lock(&buffer->rb_reqslock);
1085 list_add(&req->rl_all, &buffer->rb_allreqs);
1086 spin_unlock(&buffer->rb_reqslock);
Chuck Lever13924022015-01-21 11:03:52 -05001087 return req;
Chuck Lever13924022015-01-21 11:03:52 -05001088}
1089
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001090static int
1091rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt, bool temp)
Chuck Lever13924022015-01-21 11:03:52 -05001092{
1093 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
Chuck Leverd698c4a2017-12-14 20:56:09 -05001094 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
Chuck Lever13924022015-01-21 11:03:52 -05001095 struct rpcrdma_rep *rep;
1096 int rc;
1097
1098 rc = -ENOMEM;
Chuck Lever6b1184c2015-01-21 11:04:25 -05001099 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
Chuck Lever13924022015-01-21 11:03:52 -05001100 if (rep == NULL)
1101 goto out;
Chuck Lever13924022015-01-21 11:03:52 -05001102
Chuck Lever13650c22016-09-15 10:56:26 -04001103 rep->rr_rdmabuf = rpcrdma_alloc_regbuf(cdata->inline_rsize,
Chuck Lever99ef4db2016-09-15 10:56:10 -04001104 DMA_FROM_DEVICE, GFP_KERNEL);
Chuck Lever6b1184c2015-01-21 11:04:25 -05001105 if (IS_ERR(rep->rr_rdmabuf)) {
1106 rc = PTR_ERR(rep->rr_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -05001107 goto out_free;
Chuck Lever6b1184c2015-01-21 11:04:25 -05001108 }
Chuck Lever96f87782017-08-03 14:30:03 -04001109 xdr_buf_init(&rep->rr_hdrbuf, rep->rr_rdmabuf->rg_base,
1110 rdmab_length(rep->rr_rdmabuf));
Chuck Lever13924022015-01-21 11:03:52 -05001111
Chuck Lever1519e962016-09-15 10:57:49 -04001112 rep->rr_cqe.done = rpcrdma_wc_receive;
Chuck Leverfed171b2015-05-26 11:51:37 -04001113 rep->rr_rxprt = r_xprt;
Chuck Leverd8f532d2017-10-16 15:01:30 -04001114 INIT_WORK(&rep->rr_work, rpcrdma_deferred_completion);
Chuck Lever6ea8e712016-09-15 10:56:51 -04001115 rep->rr_recv_wr.next = NULL;
1116 rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
1117 rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
1118 rep->rr_recv_wr.num_sge = 1;
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001119 rep->rr_temp = temp;
Chuck Leverd698c4a2017-12-14 20:56:09 -05001120
1121 spin_lock(&buf->rb_lock);
1122 list_add(&rep->rr_list, &buf->rb_recv_bufs);
1123 spin_unlock(&buf->rb_lock);
1124 return 0;
Chuck Lever13924022015-01-21 11:03:52 -05001125
1126out_free:
1127 kfree(rep);
1128out:
Chuck Leverd698c4a2017-12-14 20:56:09 -05001129 dprintk("RPC: %s: reply buffer %d alloc failed\n",
1130 __func__, rc);
1131 return rc;
Chuck Lever13924022015-01-21 11:03:52 -05001132}
1133
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001134int
Chuck Leverac920d02015-01-21 11:03:44 -05001135rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001136{
Chuck Leverac920d02015-01-21 11:03:44 -05001137 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001138 int i, rc;
1139
Chuck Lever1e465fd2015-10-24 17:27:02 -04001140 buf->rb_max_requests = r_xprt->rx_data.max_requests;
Chuck Leverf531a5d2015-10-24 17:27:43 -04001141 buf->rb_bc_srv_max_requests = 0;
Chuck Lever96cedde2017-12-14 20:57:55 -05001142 spin_lock_init(&buf->rb_mrlock);
Chuck Lever505bbe62016-06-29 13:52:54 -04001143 spin_lock_init(&buf->rb_lock);
Chuck Lever96cedde2017-12-14 20:57:55 -05001144 INIT_LIST_HEAD(&buf->rb_mrs);
Chuck Levere2ac2362016-06-29 13:54:00 -04001145 INIT_LIST_HEAD(&buf->rb_all);
Chuck Levere2ac2362016-06-29 13:54:00 -04001146 INIT_DELAYED_WORK(&buf->rb_refresh_worker,
1147 rpcrdma_mr_refresh_worker);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001148
Chuck Lever96cedde2017-12-14 20:57:55 -05001149 rpcrdma_mrs_create(r_xprt);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001150
Chuck Lever1e465fd2015-10-24 17:27:02 -04001151 INIT_LIST_HEAD(&buf->rb_send_bufs);
Chuck Leverf531a5d2015-10-24 17:27:43 -04001152 INIT_LIST_HEAD(&buf->rb_allreqs);
1153 spin_lock_init(&buf->rb_reqslock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001154 for (i = 0; i < buf->rb_max_requests; i++) {
1155 struct rpcrdma_req *req;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001156
Chuck Lever13924022015-01-21 11:03:52 -05001157 req = rpcrdma_create_req(r_xprt);
1158 if (IS_ERR(req)) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001159 dprintk("RPC: %s: request buffer %d alloc"
1160 " failed\n", __func__, i);
Chuck Lever13924022015-01-21 11:03:52 -05001161 rc = PTR_ERR(req);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001162 goto out;
1163 }
Chuck Levera80d66c2017-06-08 11:52:12 -04001164 list_add(&req->rl_list, &buf->rb_send_bufs);
Chuck Lever1e465fd2015-10-24 17:27:02 -04001165 }
1166
Chuck Lever8d4fb8f2018-07-28 10:46:47 -04001167 buf->rb_credits = 1;
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001168 buf->rb_posted_receives = 0;
Chuck Lever1e465fd2015-10-24 17:27:02 -04001169 INIT_LIST_HEAD(&buf->rb_recv_bufs);
Chuck Lever13924022015-01-21 11:03:52 -05001170
Chuck Leverae729502017-10-20 10:48:12 -04001171 rc = rpcrdma_sendctxs_create(r_xprt);
1172 if (rc)
1173 goto out;
1174
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001175 return 0;
1176out:
1177 rpcrdma_buffer_destroy(buf);
1178 return rc;
1179}
1180
Chuck Lever2e845222014-07-29 17:25:38 -04001181static void
Chuck Lever13650c22016-09-15 10:56:26 -04001182rpcrdma_destroy_rep(struct rpcrdma_rep *rep)
Chuck Lever13924022015-01-21 11:03:52 -05001183{
Chuck Lever13650c22016-09-15 10:56:26 -04001184 rpcrdma_free_regbuf(rep->rr_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -05001185 kfree(rep);
1186}
1187
Chuck Leverf531a5d2015-10-24 17:27:43 -04001188void
Chuck Lever13650c22016-09-15 10:56:26 -04001189rpcrdma_destroy_req(struct rpcrdma_req *req)
Chuck Lever13924022015-01-21 11:03:52 -05001190{
Chuck Lever13650c22016-09-15 10:56:26 -04001191 rpcrdma_free_regbuf(req->rl_recvbuf);
1192 rpcrdma_free_regbuf(req->rl_sendbuf);
1193 rpcrdma_free_regbuf(req->rl_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -05001194 kfree(req);
1195}
1196
Chuck Levere2ac2362016-06-29 13:54:00 -04001197static void
Chuck Lever96cedde2017-12-14 20:57:55 -05001198rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
Chuck Levere2ac2362016-06-29 13:54:00 -04001199{
1200 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
1201 rx_buf);
1202 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
Chuck Lever96cedde2017-12-14 20:57:55 -05001203 struct rpcrdma_mr *mr;
Chuck Levere2ac2362016-06-29 13:54:00 -04001204 unsigned int count;
1205
1206 count = 0;
Chuck Lever96cedde2017-12-14 20:57:55 -05001207 spin_lock(&buf->rb_mrlock);
Chuck Levere2ac2362016-06-29 13:54:00 -04001208 while (!list_empty(&buf->rb_all)) {
Chuck Lever96cedde2017-12-14 20:57:55 -05001209 mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all);
1210 list_del(&mr->mr_all);
Chuck Levere2ac2362016-06-29 13:54:00 -04001211
Chuck Lever96cedde2017-12-14 20:57:55 -05001212 spin_unlock(&buf->rb_mrlock);
Chuck Lever054f1552018-05-01 11:37:14 -04001213
1214 /* Ensure MW is not on any rl_registered list */
1215 if (!list_empty(&mr->mr_list))
1216 list_del(&mr->mr_list);
1217
Chuck Lever96cedde2017-12-14 20:57:55 -05001218 ia->ri_ops->ro_release_mr(mr);
Chuck Levere2ac2362016-06-29 13:54:00 -04001219 count++;
Chuck Lever96cedde2017-12-14 20:57:55 -05001220 spin_lock(&buf->rb_mrlock);
Chuck Levere2ac2362016-06-29 13:54:00 -04001221 }
Chuck Lever96cedde2017-12-14 20:57:55 -05001222 spin_unlock(&buf->rb_mrlock);
Chuck Levere2ac2362016-06-29 13:54:00 -04001223 r_xprt->rx_stats.mrs_allocated = 0;
1224
1225 dprintk("RPC: %s: released %u MRs\n", __func__, count);
1226}
1227
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001228void
1229rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1230{
Chuck Lever9378b272017-04-11 13:22:29 -04001231 cancel_delayed_work_sync(&buf->rb_refresh_worker);
Chuck Lever505bbe62016-06-29 13:52:54 -04001232
Chuck Leverae729502017-10-20 10:48:12 -04001233 rpcrdma_sendctxs_destroy(buf);
1234
Chuck Lever1e465fd2015-10-24 17:27:02 -04001235 while (!list_empty(&buf->rb_recv_bufs)) {
1236 struct rpcrdma_rep *rep;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001237
Chuck Lever9d95cd52018-05-04 15:35:36 -04001238 rep = list_first_entry(&buf->rb_recv_bufs,
1239 struct rpcrdma_rep, rr_list);
1240 list_del(&rep->rr_list);
Chuck Lever13650c22016-09-15 10:56:26 -04001241 rpcrdma_destroy_rep(rep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001242 }
1243
Chuck Leverf531a5d2015-10-24 17:27:43 -04001244 spin_lock(&buf->rb_reqslock);
1245 while (!list_empty(&buf->rb_allreqs)) {
Chuck Lever1e465fd2015-10-24 17:27:02 -04001246 struct rpcrdma_req *req;
Allen Andrews4034ba02014-05-28 10:32:09 -04001247
Chuck Leverf531a5d2015-10-24 17:27:43 -04001248 req = list_first_entry(&buf->rb_allreqs,
1249 struct rpcrdma_req, rl_all);
1250 list_del(&req->rl_all);
1251
1252 spin_unlock(&buf->rb_reqslock);
Chuck Lever13650c22016-09-15 10:56:26 -04001253 rpcrdma_destroy_req(req);
Chuck Leverf531a5d2015-10-24 17:27:43 -04001254 spin_lock(&buf->rb_reqslock);
Chuck Lever9f9d8022014-07-29 17:24:45 -04001255 }
Chuck Leverf531a5d2015-10-24 17:27:43 -04001256 spin_unlock(&buf->rb_reqslock);
Chuck Lever9f9d8022014-07-29 17:24:45 -04001257
Chuck Lever96cedde2017-12-14 20:57:55 -05001258 rpcrdma_mrs_destroy(buf);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001259}
1260
Chuck Lever96cedde2017-12-14 20:57:55 -05001261/**
1262 * rpcrdma_mr_get - Allocate an rpcrdma_mr object
1263 * @r_xprt: controlling transport
1264 *
1265 * Returns an initialized rpcrdma_mr or NULL if no free
1266 * rpcrdma_mr objects are available.
1267 */
1268struct rpcrdma_mr *
1269rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
Chuck Leverc2922c02014-07-29 17:24:36 -04001270{
Chuck Lever346aa662015-05-26 11:52:06 -04001271 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
Chuck Lever96cedde2017-12-14 20:57:55 -05001272 struct rpcrdma_mr *mr = NULL;
Chuck Lever346aa662015-05-26 11:52:06 -04001273
Chuck Lever96cedde2017-12-14 20:57:55 -05001274 spin_lock(&buf->rb_mrlock);
1275 if (!list_empty(&buf->rb_mrs))
1276 mr = rpcrdma_mr_pop(&buf->rb_mrs);
1277 spin_unlock(&buf->rb_mrlock);
Chuck Lever346aa662015-05-26 11:52:06 -04001278
Chuck Lever96cedde2017-12-14 20:57:55 -05001279 if (!mr)
1280 goto out_nomrs;
1281 return mr;
Chuck Levere2ac2362016-06-29 13:54:00 -04001282
Chuck Lever96cedde2017-12-14 20:57:55 -05001283out_nomrs:
Chuck Lever1c443eff2017-12-20 16:31:21 -05001284 trace_xprtrdma_nomrs(r_xprt);
Chuck Leverbebd0312017-04-11 13:23:10 -04001285 if (r_xprt->rx_ep.rep_connected != -ENODEV)
1286 schedule_delayed_work(&buf->rb_refresh_worker, 0);
Chuck Levere2ac2362016-06-29 13:54:00 -04001287
1288 /* Allow the reply handler and refresh worker to run */
1289 cond_resched();
1290
1291 return NULL;
Chuck Leverc2922c02014-07-29 17:24:36 -04001292}
1293
Chuck Leverec12e472017-12-14 20:58:04 -05001294static void
1295__rpcrdma_mr_put(struct rpcrdma_buffer *buf, struct rpcrdma_mr *mr)
1296{
1297 spin_lock(&buf->rb_mrlock);
1298 rpcrdma_mr_push(mr, &buf->rb_mrs);
1299 spin_unlock(&buf->rb_mrlock);
1300}
1301
Chuck Lever96cedde2017-12-14 20:57:55 -05001302/**
1303 * rpcrdma_mr_put - Release an rpcrdma_mr object
1304 * @mr: object to release
1305 *
1306 */
Chuck Lever346aa662015-05-26 11:52:06 -04001307void
Chuck Lever96cedde2017-12-14 20:57:55 -05001308rpcrdma_mr_put(struct rpcrdma_mr *mr)
Chuck Leverc2922c02014-07-29 17:24:36 -04001309{
Chuck Leverec12e472017-12-14 20:58:04 -05001310 __rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr);
1311}
Chuck Leverc2922c02014-07-29 17:24:36 -04001312
Chuck Leverec12e472017-12-14 20:58:04 -05001313/**
1314 * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it
1315 * @mr: object to release
1316 *
1317 */
1318void
1319rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
1320{
1321 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
1322
Chuck Leverd379eaa2018-10-01 14:25:30 -04001323 trace_xprtrdma_mr_unmap(mr);
Chuck Leverec12e472017-12-14 20:58:04 -05001324 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
1325 mr->mr_sg, mr->mr_nents, mr->mr_dir);
1326 __rpcrdma_mr_put(&r_xprt->rx_buf, mr);
Chuck Leverc2922c02014-07-29 17:24:36 -04001327}
1328
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001329/**
1330 * rpcrdma_buffer_get - Get a request buffer
1331 * @buffers: Buffer pool from which to obtain a buffer
Chuck Lever78d506e2016-09-06 11:22:49 -04001332 *
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001333 * Returns a fresh rpcrdma_req, or NULL if none are available.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001334 */
1335struct rpcrdma_req *
1336rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1337{
1338 struct rpcrdma_req *req;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001339
Chuck Levera5b027e2015-10-24 17:27:27 -04001340 spin_lock(&buffers->rb_lock);
Chuck Levere68699c2018-05-04 15:35:31 -04001341 req = list_first_entry_or_null(&buffers->rb_send_bufs,
1342 struct rpcrdma_req, rl_list);
1343 if (req)
1344 list_del_init(&req->rl_list);
Chuck Levera5b027e2015-10-24 17:27:27 -04001345 spin_unlock(&buffers->rb_lock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001346 return req;
1347}
1348
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001349/**
1350 * rpcrdma_buffer_put - Put request/reply buffers back into pool
1351 * @req: object to return
1352 *
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001353 */
1354void
1355rpcrdma_buffer_put(struct rpcrdma_req *req)
1356{
1357 struct rpcrdma_buffer *buffers = req->rl_buffer;
Chuck Lever1e465fd2015-10-24 17:27:02 -04001358 struct rpcrdma_rep *rep = req->rl_reply;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001359
Chuck Lever1e465fd2015-10-24 17:27:02 -04001360 req->rl_reply = NULL;
1361
Chuck Levera5b027e2015-10-24 17:27:27 -04001362 spin_lock(&buffers->rb_lock);
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001363 list_add(&req->rl_list, &buffers->rb_send_bufs);
Chuck Lever05c97462016-09-06 11:22:58 -04001364 if (rep) {
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001365 if (!rep->rr_temp) {
1366 list_add(&rep->rr_list, &buffers->rb_recv_bufs);
1367 rep = NULL;
1368 }
Chuck Lever05c97462016-09-06 11:22:58 -04001369 }
Chuck Levera5b027e2015-10-24 17:27:27 -04001370 spin_unlock(&buffers->rb_lock);
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001371 if (rep)
1372 rpcrdma_destroy_rep(rep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001373}
1374
1375/*
1376 * Put reply buffers back into pool when not attached to
Chuck Leverb45ccfd2014-05-28 10:32:34 -04001377 * request. This happens in error conditions.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001378 */
1379void
1380rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1381{
Chuck Leverfed171b2015-05-26 11:51:37 -04001382 struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001383
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001384 if (!rep->rr_temp) {
1385 spin_lock(&buffers->rb_lock);
1386 list_add(&rep->rr_list, &buffers->rb_recv_bufs);
1387 spin_unlock(&buffers->rb_lock);
1388 } else {
1389 rpcrdma_destroy_rep(rep);
1390 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001391}
1392
Chuck Lever9128c3e2015-01-21 11:04:00 -05001393/**
Chuck Lever99ef4db2016-09-15 10:56:10 -04001394 * rpcrdma_alloc_regbuf - allocate and DMA-map memory for SEND/RECV buffers
Chuck Lever9128c3e2015-01-21 11:04:00 -05001395 * @size: size of buffer to be allocated, in bytes
Chuck Lever99ef4db2016-09-15 10:56:10 -04001396 * @direction: direction of data movement
Chuck Lever9128c3e2015-01-21 11:04:00 -05001397 * @flags: GFP flags
1398 *
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001399 * Returns an ERR_PTR, or a pointer to a regbuf, a buffer that
1400 * can be persistently DMA-mapped for I/O.
Chuck Lever9128c3e2015-01-21 11:04:00 -05001401 *
1402 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
Chuck Lever99ef4db2016-09-15 10:56:10 -04001403 * receiving the payload of RDMA RECV operations. During Long Calls
1404 * or Replies they may be registered externally via ro_map.
Chuck Lever9128c3e2015-01-21 11:04:00 -05001405 */
1406struct rpcrdma_regbuf *
Chuck Lever13650c22016-09-15 10:56:26 -04001407rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction,
1408 gfp_t flags)
Chuck Lever9128c3e2015-01-21 11:04:00 -05001409{
1410 struct rpcrdma_regbuf *rb;
Chuck Lever9128c3e2015-01-21 11:04:00 -05001411
Chuck Lever9128c3e2015-01-21 11:04:00 -05001412 rb = kmalloc(sizeof(*rb) + size, flags);
1413 if (rb == NULL)
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001414 return ERR_PTR(-ENOMEM);
Chuck Lever9128c3e2015-01-21 11:04:00 -05001415
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001416 rb->rg_device = NULL;
Chuck Lever99ef4db2016-09-15 10:56:10 -04001417 rb->rg_direction = direction;
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001418 rb->rg_iov.length = size;
Chuck Lever9128c3e2015-01-21 11:04:00 -05001419
1420 return rb;
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001421}
Chuck Lever9128c3e2015-01-21 11:04:00 -05001422
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001423/**
1424 * __rpcrdma_map_regbuf - DMA-map a regbuf
1425 * @ia: controlling rpcrdma_ia
1426 * @rb: regbuf to be mapped
1427 */
1428bool
1429__rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
1430{
Chuck Lever91a10c52017-04-11 13:23:02 -04001431 struct ib_device *device = ia->ri_device;
1432
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001433 if (rb->rg_direction == DMA_NONE)
1434 return false;
1435
Chuck Lever91a10c52017-04-11 13:23:02 -04001436 rb->rg_iov.addr = ib_dma_map_single(device,
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001437 (void *)rb->rg_base,
1438 rdmab_length(rb),
1439 rb->rg_direction);
Chuck Lever91a10c52017-04-11 13:23:02 -04001440 if (ib_dma_mapping_error(device, rdmab_addr(rb)))
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001441 return false;
1442
Chuck Lever91a10c52017-04-11 13:23:02 -04001443 rb->rg_device = device;
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001444 rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey;
1445 return true;
1446}
1447
1448static void
1449rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb)
1450{
Chuck Levere89e8d8f2018-01-31 12:34:13 -05001451 if (!rb)
1452 return;
1453
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001454 if (!rpcrdma_regbuf_is_mapped(rb))
1455 return;
1456
1457 ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb),
1458 rdmab_length(rb), rb->rg_direction);
1459 rb->rg_device = NULL;
Chuck Lever9128c3e2015-01-21 11:04:00 -05001460}
1461
1462/**
1463 * rpcrdma_free_regbuf - deregister and free registered buffer
Chuck Lever9128c3e2015-01-21 11:04:00 -05001464 * @rb: regbuf to be deregistered and freed
1465 */
1466void
Chuck Lever13650c22016-09-15 10:56:26 -04001467rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb)
Chuck Lever9128c3e2015-01-21 11:04:00 -05001468{
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001469 rpcrdma_dma_unmap_regbuf(rb);
Chuck Levere531dca2015-08-03 13:03:20 -04001470 kfree(rb);
Chuck Lever9128c3e2015-01-21 11:04:00 -05001471}
1472
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001473/*
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001474 * Prepost any receive buffer, then post send.
1475 *
1476 * Receive buffer is donated to hardware, reclaimed upon recv completion.
1477 */
1478int
1479rpcrdma_ep_post(struct rpcrdma_ia *ia,
1480 struct rpcrdma_ep *ep,
1481 struct rpcrdma_req *req)
1482{
Chuck Leverae729502017-10-20 10:48:12 -04001483 struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr;
Chuck Lever655fec62016-09-15 10:57:24 -04001484 int rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001485
Chuck Lever01bb35c2017-10-20 10:48:36 -04001486 if (!ep->rep_send_count ||
1487 test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
Chuck Leverae729502017-10-20 10:48:12 -04001488 send_wr->send_flags |= IB_SEND_SIGNALED;
1489 ep->rep_send_count = ep->rep_send_batch;
1490 } else {
1491 send_wr->send_flags &= ~IB_SEND_SIGNALED;
1492 --ep->rep_send_count;
1493 }
Chuck Lever7a89f9c2016-06-29 13:53:43 -04001494
Chuck Leverf2877622018-02-28 15:30:59 -05001495 rc = ia->ri_ops->ro_send(ia, req);
Chuck Leverab03eff2017-12-20 16:30:40 -05001496 trace_xprtrdma_post_send(req, rc);
1497 if (rc)
1498 return -ENOTCONN;
1499 return 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001500}
1501
Chuck Leverf531a5d2015-10-24 17:27:43 -04001502/**
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001503 * rpcrdma_post_recvs - Maybe post some Receive buffers
1504 * @r_xprt: controlling transport
1505 * @temp: when true, allocate temp rpcrdma_rep objects
Chuck Leverf531a5d2015-10-24 17:27:43 -04001506 *
Chuck Leverf531a5d2015-10-24 17:27:43 -04001507 */
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001508void
1509rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
Chuck Leverf531a5d2015-10-24 17:27:43 -04001510{
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001511 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1512 struct ib_recv_wr *wr, *bad_wr;
1513 int needed, count, rc;
Chuck Leverf531a5d2015-10-24 17:27:43 -04001514
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001515 needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
1516 if (buf->rb_posted_receives > needed)
1517 return;
1518 needed -= buf->rb_posted_receives;
Chuck Leverf531a5d2015-10-24 17:27:43 -04001519
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001520 count = 0;
1521 wr = NULL;
1522 while (needed) {
1523 struct rpcrdma_regbuf *rb;
1524 struct rpcrdma_rep *rep;
1525
1526 spin_lock(&buf->rb_lock);
1527 rep = list_first_entry_or_null(&buf->rb_recv_bufs,
1528 struct rpcrdma_rep, rr_list);
1529 if (likely(rep))
1530 list_del(&rep->rr_list);
1531 spin_unlock(&buf->rb_lock);
1532 if (!rep) {
1533 if (rpcrdma_create_rep(r_xprt, temp))
1534 break;
1535 continue;
1536 }
1537
1538 rb = rep->rr_rdmabuf;
1539 if (!rpcrdma_regbuf_is_mapped(rb)) {
1540 if (!__rpcrdma_dma_map_regbuf(&r_xprt->rx_ia, rb)) {
1541 rpcrdma_recv_buffer_put(rep);
1542 break;
1543 }
1544 }
1545
1546 trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe);
1547 rep->rr_recv_wr.next = wr;
1548 wr = &rep->rr_recv_wr;
1549 ++count;
1550 --needed;
Chuck Leverf531a5d2015-10-24 17:27:43 -04001551 }
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001552 if (!count)
1553 return;
Chuck Leverf531a5d2015-10-24 17:27:43 -04001554
Bart Van Assched34ac5c2018-07-18 09:25:32 -07001555 rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr,
1556 (const struct ib_recv_wr **)&bad_wr);
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001557 if (rc) {
1558 for (wr = bad_wr; wr; wr = wr->next) {
1559 struct rpcrdma_rep *rep;
Chuck Leverf531a5d2015-10-24 17:27:43 -04001560
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001561 rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr);
1562 rpcrdma_recv_buffer_put(rep);
1563 --count;
1564 }
1565 }
1566 buf->rb_posted_receives += count;
1567 trace_xprtrdma_post_recvs(r_xprt, count, rc);
Chuck Leverf531a5d2015-10-24 17:27:43 -04001568}