blob: c60172f88a0d64f70f307f892b57a27895404683 [file] [log] [blame]
Chuck Levera2268cf2018-05-04 15:34:32 -04001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04002/*
Chuck Lever62b56a62017-10-30 16:22:14 -04003 * Copyright (c) 2014-2017 Oracle. All rights reserved.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04004 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the BSD-type
10 * license below:
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * Neither the name of the Network Appliance, Inc. nor the names of
25 * its contributors may be used to endorse or promote products
26 * derived from this software without specific prior written
27 * permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040040 */
41
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040042/*
43 * verbs.c
44 *
45 * Encapsulates the major functions managing:
46 * o adapters
47 * o endpoints
48 * o connections
49 * o buffer memory
50 */
51
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000052#include <linux/interrupt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090053#include <linux/slab.h>
Chuck Lever0dd39ca2015-03-30 14:33:43 -040054#include <linux/sunrpc/addr.h>
Chuck Lever05c97462016-09-06 11:22:58 -040055#include <linux/sunrpc/svc_rdma.h>
Chuck Leverae729502017-10-20 10:48:12 -040056
57#include <asm-generic/barrier.h>
Chuck Lever65866f82014-05-28 10:33:59 -040058#include <asm/bitops.h>
Chuck Lever56a6bd12017-04-11 13:23:34 -040059
Chuck Lever0a904872017-02-08 17:00:35 -050060#include <rdma/ib_cm.h>
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040061
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040062#include "xprt_rdma.h"
Chuck Leverb6e717cb2018-05-07 15:27:05 -040063#include <trace/events/rpcrdma.h>
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040064
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040065/*
66 * Globals/Macros
67 */
68
Jeff Laytonf895b252014-11-17 16:58:04 -050069#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040070# define RPCDBG_FACILITY RPCDBG_TRANS
71#endif
72
73/*
74 * internal functions
75 */
Chuck Leverefd81e92018-05-04 15:35:41 -040076static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc);
Chuck Lever96cedde2017-12-14 20:57:55 -050077static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
78static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf);
Chuck Lever7c8d9e72018-05-04 15:35:20 -040079static int rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt, bool temp);
Chuck Leverbebd0312017-04-11 13:23:10 -040080static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040081
Chuck Leverd8f532d2017-10-16 15:01:30 -040082struct workqueue_struct *rpcrdma_receive_wq __read_mostly;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040083
Chuck Leverfe97b472015-10-24 17:27:10 -040084int
85rpcrdma_alloc_wq(void)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040086{
Chuck Leverfe97b472015-10-24 17:27:10 -040087 struct workqueue_struct *recv_wq;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040088
Chuck Leverfe97b472015-10-24 17:27:10 -040089 recv_wq = alloc_workqueue("xprtrdma_receive",
Chuck Leverccede752017-12-04 14:04:04 -050090 WQ_MEM_RECLAIM | WQ_HIGHPRI,
Chuck Leverfe97b472015-10-24 17:27:10 -040091 0);
92 if (!recv_wq)
93 return -ENOMEM;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040094
Chuck Leverfe97b472015-10-24 17:27:10 -040095 rpcrdma_receive_wq = recv_wq;
96 return 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040097}
98
Chuck Leverfe97b472015-10-24 17:27:10 -040099void
100rpcrdma_destroy_wq(void)
Chuck Leverf1a03b72014-11-08 20:14:37 -0500101{
Chuck Leverfe97b472015-10-24 17:27:10 -0400102 struct workqueue_struct *wq;
Chuck Leverf1a03b72014-11-08 20:14:37 -0500103
Chuck Leverfe97b472015-10-24 17:27:10 -0400104 if (rpcrdma_receive_wq) {
105 wq = rpcrdma_receive_wq;
106 rpcrdma_receive_wq = NULL;
107 destroy_workqueue(wq);
108 }
Chuck Leverf1a03b72014-11-08 20:14:37 -0500109}
110
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400111static void
112rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
113{
114 struct rpcrdma_ep *ep = context;
Chuck Lever643cf322017-12-20 16:31:45 -0500115 struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
116 rx_ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400117
Chuck Lever643cf322017-12-20 16:31:45 -0500118 trace_xprtrdma_qp_error(r_xprt, event);
Chuck Lever2f6922c2016-11-29 10:53:21 -0500119 pr_err("rpcrdma: %s on device %s ep %p\n",
120 ib_event_msg(event->event), event->device->name, context);
121
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400122 if (ep->rep_connected == 1) {
123 ep->rep_connected = -EIO;
Chuck Leverafadc462015-01-21 11:03:11 -0500124 rpcrdma_conn_func(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400125 wake_up_all(&ep->rep_connect_wait);
126 }
127}
128
Chuck Lever2fa8f882016-03-04 11:28:53 -0500129/**
130 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
131 * @cq: completion queue (ignored)
132 * @wc: completed WR
133 *
Chuck Lever4220a072015-10-24 17:26:45 -0400134 */
135static void
Chuck Lever2fa8f882016-03-04 11:28:53 -0500136rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400137{
Chuck Leverae729502017-10-20 10:48:12 -0400138 struct ib_cqe *cqe = wc->wr_cqe;
139 struct rpcrdma_sendctx *sc =
140 container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
141
Chuck Lever2fa8f882016-03-04 11:28:53 -0500142 /* WARNING: Only wr_cqe and status are reliable at this point */
Chuck Leverab03eff2017-12-20 16:30:40 -0500143 trace_xprtrdma_wc_send(sc, wc);
Chuck Lever2fa8f882016-03-04 11:28:53 -0500144 if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR)
145 pr_err("rpcrdma: Send: %s (%u/0x%x)\n",
146 ib_wc_status_msg(wc->status),
147 wc->status, wc->vendor_err);
Chuck Leverae729502017-10-20 10:48:12 -0400148
149 rpcrdma_sendctx_put_locked(sc);
Chuck Leverfc664482014-05-28 10:33:25 -0400150}
151
Chuck Lever552bf222016-03-04 11:28:36 -0500152/**
Chuck Lever1519e962016-09-15 10:57:49 -0400153 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
Chuck Lever552bf222016-03-04 11:28:36 -0500154 * @cq: completion queue (ignored)
155 * @wc: completed WR
156 *
157 */
Chuck Leverfe97b472015-10-24 17:27:10 -0400158static void
Chuck Lever1519e962016-09-15 10:57:49 -0400159rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
Chuck Leverfc664482014-05-28 10:33:25 -0400160{
Chuck Lever552bf222016-03-04 11:28:36 -0500161 struct ib_cqe *cqe = wc->wr_cqe;
162 struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
163 rr_cqe);
Chuck Leverfc664482014-05-28 10:33:25 -0400164
Chuck Lever85024272015-01-21 11:02:04 -0500165 /* WARNING: Only wr_id and status are reliable at this point */
Chuck Lever0e0b8542018-05-04 15:35:14 -0400166 trace_xprtrdma_wc_receive(wc);
Chuck Lever85024272015-01-21 11:02:04 -0500167 if (wc->status != IB_WC_SUCCESS)
168 goto out_fail;
Chuck Leverfc664482014-05-28 10:33:25 -0400169
Chuck Lever85024272015-01-21 11:02:04 -0500170 /* status == SUCCESS means all fields in wc are trustworthy */
Chuck Lever96f87782017-08-03 14:30:03 -0400171 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
Chuck Leverc8b920b2016-09-15 10:57:16 -0400172 rep->rr_wc_flags = wc->wc_flags;
173 rep->rr_inv_rkey = wc->ex.invalidate_rkey;
174
Chuck Lever91a10c52017-04-11 13:23:02 -0400175 ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
Chuck Lever6b1184c2015-01-21 11:04:25 -0500176 rdmab_addr(rep->rr_rdmabuf),
Chuck Levere2a67192017-08-03 14:30:44 -0400177 wc->byte_len, DMA_FROM_DEVICE);
Chuck Lever23826c72016-03-04 11:28:27 -0500178
Chuck Leverfc664482014-05-28 10:33:25 -0400179out_schedule:
Chuck Leverd8f532d2017-10-16 15:01:30 -0400180 rpcrdma_reply_handler(rep);
Chuck Lever85024272015-01-21 11:02:04 -0500181 return;
Chuck Leverfe97b472015-10-24 17:27:10 -0400182
Chuck Lever85024272015-01-21 11:02:04 -0500183out_fail:
184 if (wc->status != IB_WC_WR_FLUSH_ERR)
Chuck Lever552bf222016-03-04 11:28:36 -0500185 pr_err("rpcrdma: Recv: %s (%u/0x%x)\n",
186 ib_wc_status_msg(wc->status),
187 wc->status, wc->vendor_err);
Chuck Levere2a67192017-08-03 14:30:44 -0400188 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, 0);
Chuck Lever85024272015-01-21 11:02:04 -0500189 goto out_schedule;
Chuck Leverfc664482014-05-28 10:33:25 -0400190}
191
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400192static void
193rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
194 struct rdma_conn_param *param)
195{
196 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
197 const struct rpcrdma_connect_private *pmsg = param->private_data;
198 unsigned int rsize, wsize;
199
Chuck Leverc8b920b2016-09-15 10:57:16 -0400200 /* Default settings for RPC-over-RDMA Version One */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500201 r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize;
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400202 rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
203 wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
204
205 if (pmsg &&
206 pmsg->cp_magic == rpcrdma_cmp_magic &&
207 pmsg->cp_version == RPCRDMA_CMP_VERSION) {
Chuck Leverc95a3c62017-02-08 17:00:02 -0500208 r_xprt->rx_ia.ri_implicit_roundup = true;
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400209 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
210 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
211 }
212
213 if (rsize < cdata->inline_rsize)
214 cdata->inline_rsize = rsize;
215 if (wsize < cdata->inline_wsize)
216 cdata->inline_wsize = wsize;
Chuck Lever6d6bf722016-11-29 10:53:13 -0500217 dprintk("RPC: %s: max send %u, max recv %u\n",
218 __func__, cdata->inline_wsize, cdata->inline_rsize);
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400219 rpcrdma_set_max_header_sizes(r_xprt);
220}
221
Chuck Leverae382882018-10-01 14:25:47 -0400222/**
223 * rpcrdma_cm_event_handler - Handle RDMA CM events
224 * @id: rdma_cm_id on which an event has occurred
225 * @event: details of the event
226 *
227 * Called with @id's mutex held. Returns 1 if caller should
228 * destroy @id, otherwise 0.
229 */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400230static int
Chuck Leverae382882018-10-01 14:25:47 -0400231rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400232{
Chuck Levered97f1f2018-10-01 14:25:52 -0400233 struct rpcrdma_xprt *r_xprt = id->context;
234 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
235 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
236 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400237
Chuck Leverae382882018-10-01 14:25:47 -0400238 might_sleep();
239
Chuck Levered97f1f2018-10-01 14:25:52 -0400240 trace_xprtrdma_cm_event(r_xprt, event);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400241 switch (event->event) {
242 case RDMA_CM_EVENT_ADDR_RESOLVED:
243 case RDMA_CM_EVENT_ROUTE_RESOLVED:
Tom Talpey5675add2008-10-09 15:01:41 -0400244 ia->ri_async_rc = 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400245 complete(&ia->ri_done);
Chuck Lever316a6162018-10-01 14:26:03 -0400246 return 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400247 case RDMA_CM_EVENT_ADDR_ERROR:
Chuck Lever52d28fe2018-05-04 15:34:37 -0400248 ia->ri_async_rc = -EPROTO;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400249 complete(&ia->ri_done);
Chuck Lever316a6162018-10-01 14:26:03 -0400250 return 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400251 case RDMA_CM_EVENT_ROUTE_ERROR:
252 ia->ri_async_rc = -ENETUNREACH;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400253 complete(&ia->ri_done);
Chuck Lever316a6162018-10-01 14:26:03 -0400254 return 0;
Chuck Leverbebd0312017-04-11 13:23:10 -0400255 case RDMA_CM_EVENT_DEVICE_REMOVAL:
256#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
Chuck Leverd461f1f2017-12-14 20:56:50 -0500257 pr_info("rpcrdma: removing device %s for %s:%s\n",
Chuck Lever173b8f42017-06-08 11:53:00 -0400258 ia->ri_device->name,
Chuck Levered97f1f2018-10-01 14:25:52 -0400259 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
Chuck Leverbebd0312017-04-11 13:23:10 -0400260#endif
261 set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
262 ep->rep_connected = -ENODEV;
Chuck Levered97f1f2018-10-01 14:25:52 -0400263 xprt_force_disconnect(xprt);
Chuck Leverbebd0312017-04-11 13:23:10 -0400264 wait_for_completion(&ia->ri_remove_done);
265
266 ia->ri_id = NULL;
Chuck Leverbebd0312017-04-11 13:23:10 -0400267 ia->ri_device = NULL;
268 /* Return 1 to ensure the core destroys the id. */
269 return 1;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400270 case RDMA_CM_EVENT_ESTABLISHED:
Chuck Levered97f1f2018-10-01 14:25:52 -0400271 ++xprt->connect_cookie;
Chuck Leveraadc5a92018-10-01 14:25:57 -0400272 ep->rep_connected = 1;
Chuck Levered97f1f2018-10-01 14:25:52 -0400273 rpcrdma_update_connect_private(r_xprt, &event->param.conn);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400274 goto connected;
275 case RDMA_CM_EVENT_CONNECT_ERROR:
Chuck Leveraadc5a92018-10-01 14:25:57 -0400276 ep->rep_connected = -ENOTCONN;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400277 goto connected;
278 case RDMA_CM_EVENT_UNREACHABLE:
Chuck Leveraadc5a92018-10-01 14:25:57 -0400279 ep->rep_connected = -ENETUNREACH;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400280 goto connected;
281 case RDMA_CM_EVENT_REJECTED:
Chuck Leverd461f1f2017-12-14 20:56:50 -0500282 dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
Chuck Levered97f1f2018-10-01 14:25:52 -0400283 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
Chuck Lever0a904872017-02-08 17:00:35 -0500284 rdma_reject_msg(id, event->status));
Chuck Leveraadc5a92018-10-01 14:25:57 -0400285 ep->rep_connected = -ECONNREFUSED;
Chuck Lever0a904872017-02-08 17:00:35 -0500286 if (event->status == IB_CM_REJ_STALE_CONN)
Chuck Leveraadc5a92018-10-01 14:25:57 -0400287 ep->rep_connected = -EAGAIN;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400288 goto connected;
289 case RDMA_CM_EVENT_DISCONNECTED:
Chuck Levered97f1f2018-10-01 14:25:52 -0400290 ++xprt->connect_cookie;
Chuck Leveraadc5a92018-10-01 14:25:57 -0400291 ep->rep_connected = -ECONNABORTED;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400292connected:
Chuck Leverafadc462015-01-21 11:03:11 -0500293 rpcrdma_conn_func(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400294 wake_up_all(&ep->rep_connect_wait);
Chuck Lever316a6162018-10-01 14:26:03 -0400295 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400296 default:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400297 break;
298 }
299
Chuck Lever316a6162018-10-01 14:26:03 -0400300 dprintk("RPC: %s: %s:%s on %s/%s: %s\n", __func__,
301 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
302 ia->ri_device->name, ia->ri_ops->ro_displayname,
303 rdma_event_msg(event->event));
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400304 return 0;
305}
306
307static struct rdma_cm_id *
Chuck Leverdd229ce2017-12-14 20:56:58 -0500308rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400309{
Chuck Lever109b88a2016-11-29 10:52:40 -0500310 unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400311 struct rdma_cm_id *id;
312 int rc;
313
Chuck Leverb4744e02017-12-20 16:31:29 -0500314 trace_xprtrdma_conn_start(xprt);
315
Tom Talpey1a954052008-10-09 15:01:31 -0400316 init_completion(&ia->ri_done);
Chuck Leverbebd0312017-04-11 13:23:10 -0400317 init_completion(&ia->ri_remove_done);
Tom Talpey1a954052008-10-09 15:01:31 -0400318
Chuck Leverae382882018-10-01 14:25:47 -0400319 id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler,
Chuck Lever107c4be2018-05-04 15:34:42 -0400320 xprt, RDMA_PS_TCP, IB_QPT_RC);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400321 if (IS_ERR(id)) {
322 rc = PTR_ERR(id);
323 dprintk("RPC: %s: rdma_create_id() failed %i\n",
324 __func__, rc);
325 return id;
326 }
327
Tom Talpey5675add2008-10-09 15:01:41 -0400328 ia->ri_async_rc = -ETIMEDOUT;
Chuck Leverdd229ce2017-12-14 20:56:58 -0500329 rc = rdma_resolve_addr(id, NULL,
330 (struct sockaddr *)&xprt->rx_xprt.addr,
331 RDMA_RESOLVE_TIMEOUT);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400332 if (rc) {
333 dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
334 __func__, rc);
335 goto out;
336 }
Chuck Lever109b88a2016-11-29 10:52:40 -0500337 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
338 if (rc < 0) {
Chuck Leverb4744e02017-12-20 16:31:29 -0500339 trace_xprtrdma_conn_tout(xprt);
Chuck Lever109b88a2016-11-29 10:52:40 -0500340 goto out;
341 }
Devesh Sharmad0f36c42015-08-03 13:05:04 -0400342
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400343 rc = ia->ri_async_rc;
344 if (rc)
345 goto out;
346
Tom Talpey5675add2008-10-09 15:01:41 -0400347 ia->ri_async_rc = -ETIMEDOUT;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400348 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
349 if (rc) {
350 dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
351 __func__, rc);
Chuck Lever56a6bd12017-04-11 13:23:34 -0400352 goto out;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400353 }
Chuck Lever109b88a2016-11-29 10:52:40 -0500354 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
355 if (rc < 0) {
Chuck Leverb4744e02017-12-20 16:31:29 -0500356 trace_xprtrdma_conn_tout(xprt);
Chuck Lever56a6bd12017-04-11 13:23:34 -0400357 goto out;
Chuck Lever109b88a2016-11-29 10:52:40 -0500358 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400359 rc = ia->ri_async_rc;
360 if (rc)
Chuck Lever56a6bd12017-04-11 13:23:34 -0400361 goto out;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400362
363 return id;
Chuck Lever56a6bd12017-04-11 13:23:34 -0400364
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400365out:
366 rdma_destroy_id(id);
367 return ERR_PTR(rc);
368}
369
370/*
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400371 * Exported functions.
372 */
373
Chuck Leverfff09592017-04-11 13:22:54 -0400374/**
375 * rpcrdma_ia_open - Open and initialize an Interface Adapter.
Chuck Leverdd229ce2017-12-14 20:56:58 -0500376 * @xprt: transport with IA to (re)initialize
Chuck Leverfff09592017-04-11 13:22:54 -0400377 *
378 * Returns 0 on success, negative errno if an appropriate
379 * Interface Adapter could not be found and opened.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400380 */
381int
Chuck Leverdd229ce2017-12-14 20:56:58 -0500382rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400383{
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400384 struct rpcrdma_ia *ia = &xprt->rx_ia;
Chuck Leverd1ed8572015-08-03 13:03:30 -0400385 int rc;
386
Chuck Leverdd229ce2017-12-14 20:56:58 -0500387 ia->ri_id = rpcrdma_create_id(xprt, ia);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400388 if (IS_ERR(ia->ri_id)) {
389 rc = PTR_ERR(ia->ri_id);
Chuck Leverfff09592017-04-11 13:22:54 -0400390 goto out_err;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400391 }
Chuck Lever89e0d1122015-05-26 11:51:56 -0400392 ia->ri_device = ia->ri_id->device;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400393
Christoph Hellwiged082d32016-09-05 12:56:17 +0200394 ia->ri_pd = ib_alloc_pd(ia->ri_device, 0);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400395 if (IS_ERR(ia->ri_pd)) {
396 rc = PTR_ERR(ia->ri_pd);
Chuck Leverb54054c2016-06-29 13:53:27 -0400397 pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);
Chuck Leverfff09592017-04-11 13:22:54 -0400398 goto out_err;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400399 }
400
Chuck Leverfff09592017-04-11 13:22:54 -0400401 switch (xprt_rdma_memreg_strategy) {
Chuck Leverce5b3712017-12-14 20:57:47 -0500402 case RPCRDMA_FRWR:
Chuck Leverb54054c2016-06-29 13:53:27 -0400403 if (frwr_is_supported(ia)) {
404 ia->ri_ops = &rpcrdma_frwr_memreg_ops;
405 break;
406 }
407 /*FALLTHROUGH*/
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400408 case RPCRDMA_MTHCAFMR:
Chuck Leverb54054c2016-06-29 13:53:27 -0400409 if (fmr_is_supported(ia)) {
410 ia->ri_ops = &rpcrdma_fmr_memreg_ops;
411 break;
412 }
413 /*FALLTHROUGH*/
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400414 default:
Chuck Leverfff09592017-04-11 13:22:54 -0400415 pr_err("rpcrdma: Device %s does not support memreg mode %d\n",
416 ia->ri_device->name, xprt_rdma_memreg_strategy);
Chuck Leverb54054c2016-06-29 13:53:27 -0400417 rc = -EINVAL;
Chuck Leverfff09592017-04-11 13:22:54 -0400418 goto out_err;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400419 }
420
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400421 return 0;
Chuck Lever5ae711a2015-01-21 11:03:19 -0500422
Chuck Leverfff09592017-04-11 13:22:54 -0400423out_err:
424 rpcrdma_ia_close(ia);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400425 return rc;
426}
427
Chuck Leverfff09592017-04-11 13:22:54 -0400428/**
Chuck Leverbebd0312017-04-11 13:23:10 -0400429 * rpcrdma_ia_remove - Handle device driver unload
430 * @ia: interface adapter being removed
431 *
432 * Divest transport H/W resources associated with this adapter,
433 * but allow it to be restored later.
434 */
435void
436rpcrdma_ia_remove(struct rpcrdma_ia *ia)
437{
438 struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
439 rx_ia);
440 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
441 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
442 struct rpcrdma_req *req;
443 struct rpcrdma_rep *rep;
444
445 cancel_delayed_work_sync(&buf->rb_refresh_worker);
446
447 /* This is similar to rpcrdma_ep_destroy, but:
448 * - Don't cancel the connect worker.
449 * - Don't call rpcrdma_ep_disconnect, which waits
450 * for another conn upcall, which will deadlock.
451 * - rdma_disconnect is unneeded, the underlying
452 * connection is already gone.
453 */
454 if (ia->ri_id->qp) {
455 ib_drain_qp(ia->ri_id->qp);
456 rdma_destroy_qp(ia->ri_id);
457 ia->ri_id->qp = NULL;
458 }
459 ib_free_cq(ep->rep_attr.recv_cq);
Chuck Lever25524282018-03-19 14:23:16 -0400460 ep->rep_attr.recv_cq = NULL;
Chuck Leverbebd0312017-04-11 13:23:10 -0400461 ib_free_cq(ep->rep_attr.send_cq);
Chuck Lever25524282018-03-19 14:23:16 -0400462 ep->rep_attr.send_cq = NULL;
Chuck Leverbebd0312017-04-11 13:23:10 -0400463
464 /* The ULP is responsible for ensuring all DMA
465 * mappings and MRs are gone.
466 */
467 list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list)
468 rpcrdma_dma_unmap_regbuf(rep->rr_rdmabuf);
469 list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
470 rpcrdma_dma_unmap_regbuf(req->rl_rdmabuf);
471 rpcrdma_dma_unmap_regbuf(req->rl_sendbuf);
472 rpcrdma_dma_unmap_regbuf(req->rl_recvbuf);
473 }
Chuck Lever96cedde2017-12-14 20:57:55 -0500474 rpcrdma_mrs_destroy(buf);
Chuck Lever25524282018-03-19 14:23:16 -0400475 ib_dealloc_pd(ia->ri_pd);
476 ia->ri_pd = NULL;
Chuck Leverbebd0312017-04-11 13:23:10 -0400477
478 /* Allow waiters to continue */
479 complete(&ia->ri_remove_done);
Chuck Leverb4744e02017-12-20 16:31:29 -0500480
481 trace_xprtrdma_remove(r_xprt);
Chuck Leverbebd0312017-04-11 13:23:10 -0400482}
483
484/**
Chuck Leverfff09592017-04-11 13:22:54 -0400485 * rpcrdma_ia_close - Clean up/close an IA.
486 * @ia: interface adapter to close
487 *
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400488 */
489void
490rpcrdma_ia_close(struct rpcrdma_ia *ia)
491{
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400492 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
493 if (ia->ri_id->qp)
494 rdma_destroy_qp(ia->ri_id);
Chuck Lever56a6bd12017-04-11 13:23:34 -0400495 rdma_destroy_id(ia->ri_id);
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400496 }
Chuck Leverfff09592017-04-11 13:22:54 -0400497 ia->ri_id = NULL;
498 ia->ri_device = NULL;
Chuck Lever6d446982015-05-26 11:51:27 -0400499
500 /* If the pd is still busy, xprtrdma missed freeing a resource */
501 if (ia->ri_pd && !IS_ERR(ia->ri_pd))
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600502 ib_dealloc_pd(ia->ri_pd);
Chuck Leverfff09592017-04-11 13:22:54 -0400503 ia->ri_pd = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400504}
505
506/*
507 * Create unconnected endpoint.
508 */
509int
510rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
Chuck Lever16f906d2017-02-08 17:00:10 -0500511 struct rpcrdma_create_data_internal *cdata)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400512{
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400513 struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
Chuck Leverfc664482014-05-28 10:33:25 -0400514 struct ib_cq *sendcq, *recvcq;
Chuck Lever914fcad2018-05-04 15:34:48 -0400515 unsigned int max_sge;
Chuck Lever2fa8f882016-03-04 11:28:53 -0500516 int rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400517
Steve Wise33023fb2018-06-18 08:05:26 -0700518 max_sge = min_t(unsigned int, ia->ri_device->attrs.max_send_sge,
Chuck Levereed50872017-03-11 15:52:47 -0500519 RPCRDMA_MAX_SEND_SGES);
Chuck Lever16f906d2017-02-08 17:00:10 -0500520 if (max_sge < RPCRDMA_MIN_SEND_SGES) {
521 pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
Chuck Leverb3221d62015-08-03 13:03:39 -0400522 return -ENOMEM;
523 }
Chuck Lever1179e2c2018-01-31 12:34:05 -0500524 ia->ri_max_send_sges = max_sge;
Chuck Leverb3221d62015-08-03 13:03:39 -0400525
Chuck Lever914fcad2018-05-04 15:34:48 -0400526 rc = ia->ri_ops->ro_open(ia, ep, cdata);
527 if (rc)
528 return rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400529
530 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
531 ep->rep_attr.qp_context = ep;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400532 ep->rep_attr.srq = NULL;
Chuck Lever16f906d2017-02-08 17:00:10 -0500533 ep->rep_attr.cap.max_send_sge = max_sge;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400534 ep->rep_attr.cap.max_recv_sge = 1;
535 ep->rep_attr.cap.max_inline_data = 0;
536 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
537 ep->rep_attr.qp_type = IB_QPT_RC;
538 ep->rep_attr.port_num = ~0;
539
540 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
541 "iovs: send %d recv %d\n",
542 __func__,
543 ep->rep_attr.cap.max_send_wr,
544 ep->rep_attr.cap.max_recv_wr,
545 ep->rep_attr.cap.max_send_sge,
546 ep->rep_attr.cap.max_recv_sge);
547
548 /* set trigger for requesting send completion */
Chuck Leverae729502017-10-20 10:48:12 -0400549 ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH,
550 cdata->max_requests >> 2);
551 ep->rep_send_count = ep->rep_send_batch;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400552 init_waitqueue_head(&ep->rep_connect_wait);
Chuck Lever254f91e2014-05-28 10:32:17 -0400553 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400554
Chuck Lever2fa8f882016-03-04 11:28:53 -0500555 sendcq = ib_alloc_cq(ia->ri_device, NULL,
556 ep->rep_attr.cap.max_send_wr + 1,
Chuck Levera4699f52017-10-30 16:21:49 -0400557 1, IB_POLL_WORKQUEUE);
Chuck Leverfc664482014-05-28 10:33:25 -0400558 if (IS_ERR(sendcq)) {
559 rc = PTR_ERR(sendcq);
560 dprintk("RPC: %s: failed to create send CQ: %i\n",
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400561 __func__, rc);
562 goto out1;
563 }
564
Chuck Lever552bf222016-03-04 11:28:36 -0500565 recvcq = ib_alloc_cq(ia->ri_device, NULL,
566 ep->rep_attr.cap.max_recv_wr + 1,
Chuck Leverd8f532d2017-10-16 15:01:30 -0400567 0, IB_POLL_WORKQUEUE);
Chuck Leverfc664482014-05-28 10:33:25 -0400568 if (IS_ERR(recvcq)) {
569 rc = PTR_ERR(recvcq);
570 dprintk("RPC: %s: failed to create recv CQ: %i\n",
571 __func__, rc);
572 goto out2;
573 }
574
Chuck Leverfc664482014-05-28 10:33:25 -0400575 ep->rep_attr.send_cq = sendcq;
576 ep->rep_attr.recv_cq = recvcq;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400577
578 /* Initialize cma parameters */
Chuck Leverb2dde942016-05-02 14:43:03 -0400579 memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma));
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400580
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400581 /* Prepare RDMA-CM private message */
582 pmsg->cp_magic = rpcrdma_cmp_magic;
583 pmsg->cp_version = RPCRDMA_CMP_VERSION;
Chuck Leverc8b920b2016-09-15 10:57:16 -0400584 pmsg->cp_flags |= ia->ri_ops->ro_send_w_inv_ok;
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400585 pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize);
586 pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize);
587 ep->rep_remote_cma.private_data = pmsg;
588 ep->rep_remote_cma.private_data_len = sizeof(*pmsg);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400589
590 /* Client offers RDMA Read but does not initiate */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400591 ep->rep_remote_cma.initiator_depth = 0;
Chuck Leverb7e85fff2018-02-28 15:30:33 -0500592 ep->rep_remote_cma.responder_resources =
593 min_t(int, U8_MAX, ia->ri_device->attrs.max_qp_rd_atom);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400594
Chuck Leverb2dde942016-05-02 14:43:03 -0400595 /* Limit transport retries so client can detect server
596 * GID changes quickly. RPC layer handles re-establishing
597 * transport connection and retransmission.
598 */
599 ep->rep_remote_cma.retry_count = 6;
600
601 /* RPC-over-RDMA handles its own flow control. In addition,
602 * make all RNR NAKs visible so we know that RPC-over-RDMA
603 * flow control is working correctly (no NAKs should be seen).
604 */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400605 ep->rep_remote_cma.flow_control = 0;
606 ep->rep_remote_cma.rnr_retry_count = 0;
607
608 return 0;
609
610out2:
Chuck Lever2fa8f882016-03-04 11:28:53 -0500611 ib_free_cq(sendcq);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400612out1:
613 return rc;
614}
615
616/*
617 * rpcrdma_ep_destroy
618 *
619 * Disconnect and destroy endpoint. After this, the only
620 * valid operations on the ep are to free it (if dynamically
621 * allocated) or re-create it.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400622 */
Chuck Lever7f1d5412014-05-28 10:33:16 -0400623void
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400624rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
625{
Chuck Lever254f91e2014-05-28 10:32:17 -0400626 cancel_delayed_work_sync(&ep->rep_connect_worker);
627
Chuck Lever25524282018-03-19 14:23:16 -0400628 if (ia->ri_id && ia->ri_id->qp) {
Chuck Lever550d7502016-05-02 14:41:47 -0400629 rpcrdma_ep_disconnect(ep, ia);
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400630 rdma_destroy_qp(ia->ri_id);
631 ia->ri_id->qp = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400632 }
633
Chuck Lever25524282018-03-19 14:23:16 -0400634 if (ep->rep_attr.recv_cq)
635 ib_free_cq(ep->rep_attr.recv_cq);
636 if (ep->rep_attr.send_cq)
637 ib_free_cq(ep->rep_attr.send_cq);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400638}
639
Chuck Levera9b0e382017-04-11 13:23:26 -0400640/* Re-establish a connection after a device removal event.
641 * Unlike a normal reconnection, a fresh PD and a new set
642 * of MRs and buffers is needed.
643 */
644static int
645rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
646 struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
647{
Chuck Levera9b0e382017-04-11 13:23:26 -0400648 int rc, err;
649
Chuck Leverb4744e02017-12-20 16:31:29 -0500650 trace_xprtrdma_reinsert(r_xprt);
Chuck Levera9b0e382017-04-11 13:23:26 -0400651
652 rc = -EHOSTUNREACH;
Chuck Leverdd229ce2017-12-14 20:56:58 -0500653 if (rpcrdma_ia_open(r_xprt))
Chuck Levera9b0e382017-04-11 13:23:26 -0400654 goto out1;
655
656 rc = -ENOMEM;
657 err = rpcrdma_ep_create(ep, ia, &r_xprt->rx_data);
658 if (err) {
659 pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err);
660 goto out2;
661 }
662
663 rc = -ENETUNREACH;
664 err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
665 if (err) {
666 pr_err("rpcrdma: rdma_create_qp returned %d\n", err);
667 goto out3;
668 }
669
Chuck Lever96cedde2017-12-14 20:57:55 -0500670 rpcrdma_mrs_create(r_xprt);
Chuck Levera9b0e382017-04-11 13:23:26 -0400671 return 0;
672
673out3:
674 rpcrdma_ep_destroy(ep, ia);
675out2:
676 rpcrdma_ia_close(ia);
677out1:
678 return rc;
679}
680
Chuck Lever18908962017-04-11 13:23:18 -0400681static int
682rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep,
683 struct rpcrdma_ia *ia)
684{
Chuck Lever18908962017-04-11 13:23:18 -0400685 struct rdma_cm_id *id, *old;
686 int err, rc;
687
Chuck Leverb4744e02017-12-20 16:31:29 -0500688 trace_xprtrdma_reconnect(r_xprt);
Chuck Lever18908962017-04-11 13:23:18 -0400689
690 rpcrdma_ep_disconnect(ep, ia);
691
692 rc = -EHOSTUNREACH;
Chuck Leverdd229ce2017-12-14 20:56:58 -0500693 id = rpcrdma_create_id(r_xprt, ia);
Chuck Lever18908962017-04-11 13:23:18 -0400694 if (IS_ERR(id))
695 goto out;
696
697 /* As long as the new ID points to the same device as the
698 * old ID, we can reuse the transport's existing PD and all
699 * previously allocated MRs. Also, the same device means
700 * the transport's previous DMA mappings are still valid.
701 *
702 * This is a sanity check only. There should be no way these
703 * point to two different devices here.
704 */
705 old = id;
706 rc = -ENETUNREACH;
707 if (ia->ri_device != id->device) {
708 pr_err("rpcrdma: can't reconnect on different device!\n");
709 goto out_destroy;
710 }
711
712 err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
713 if (err) {
714 dprintk("RPC: %s: rdma_create_qp returned %d\n",
715 __func__, err);
716 goto out_destroy;
717 }
718
719 /* Atomically replace the transport's ID and QP. */
720 rc = 0;
721 old = ia->ri_id;
722 ia->ri_id = id;
723 rdma_destroy_qp(old);
724
725out_destroy:
Chuck Lever56a6bd12017-04-11 13:23:34 -0400726 rdma_destroy_id(old);
Chuck Lever18908962017-04-11 13:23:18 -0400727out:
728 return rc;
729}
730
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400731/*
732 * Connect unconnected endpoint.
733 */
734int
735rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
736{
Chuck Lever0a904872017-02-08 17:00:35 -0500737 struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
738 rx_ia);
Chuck Lever18908962017-04-11 13:23:18 -0400739 int rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400740
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400741retry:
Chuck Lever18908962017-04-11 13:23:18 -0400742 switch (ep->rep_connected) {
743 case 0:
Chuck Leverec62f402014-05-28 10:34:07 -0400744 dprintk("RPC: %s: connecting...\n", __func__);
745 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
746 if (rc) {
747 dprintk("RPC: %s: rdma_create_qp failed %i\n",
748 __func__, rc);
Chuck Lever18908962017-04-11 13:23:18 -0400749 rc = -ENETUNREACH;
750 goto out_noupdate;
Chuck Leverec62f402014-05-28 10:34:07 -0400751 }
Chuck Lever18908962017-04-11 13:23:18 -0400752 break;
Chuck Levera9b0e382017-04-11 13:23:26 -0400753 case -ENODEV:
754 rc = rpcrdma_ep_recreate_xprt(r_xprt, ep, ia);
755 if (rc)
756 goto out_noupdate;
757 break;
Chuck Lever18908962017-04-11 13:23:18 -0400758 default:
759 rc = rpcrdma_ep_reconnect(r_xprt, ep, ia);
760 if (rc)
761 goto out;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400762 }
763
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400764 ep->rep_connected = 0;
Chuck Lever8d4fb8f2018-07-28 10:46:47 -0400765 rpcrdma_post_recvs(r_xprt, true);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400766
767 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
768 if (rc) {
769 dprintk("RPC: %s: rdma_connect() failed with %i\n",
770 __func__, rc);
771 goto out;
772 }
773
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400774 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400775 if (ep->rep_connected <= 0) {
Chuck Lever0a904872017-02-08 17:00:35 -0500776 if (ep->rep_connected == -EAGAIN)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400777 goto retry;
778 rc = ep->rep_connected;
Chuck Lever0a904872017-02-08 17:00:35 -0500779 goto out;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400780 }
781
Chuck Lever0a904872017-02-08 17:00:35 -0500782 dprintk("RPC: %s: connected\n", __func__);
Chuck Lever7c8d9e72018-05-04 15:35:20 -0400783
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400784out:
785 if (rc)
786 ep->rep_connected = rc;
Chuck Lever18908962017-04-11 13:23:18 -0400787
788out_noupdate:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400789 return rc;
790}
791
792/*
793 * rpcrdma_ep_disconnect
794 *
795 * This is separate from destroy to facilitate the ability
796 * to reconnect without recreating the endpoint.
797 *
798 * This call is not reentrant, and must not be made in parallel
799 * on the same endpoint.
800 */
Chuck Lever282191c2014-07-29 17:25:55 -0400801void
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400802rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
803{
804 int rc;
805
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400806 rc = rdma_disconnect(ia->ri_id);
Chuck Leverb4744e02017-12-20 16:31:29 -0500807 if (!rc)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400808 /* returns without wait if not connected */
809 wait_event_interruptible(ep->rep_connect_wait,
810 ep->rep_connected != 1);
Chuck Leverb4744e02017-12-20 16:31:29 -0500811 else
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400812 ep->rep_connected = rc;
Chuck Leverb4744e02017-12-20 16:31:29 -0500813 trace_xprtrdma_disconnect(container_of(ep, struct rpcrdma_xprt,
814 rx_ep), rc);
Chuck Lever550d7502016-05-02 14:41:47 -0400815
816 ib_drain_qp(ia->ri_id->qp);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400817}
818
Chuck Leverae729502017-10-20 10:48:12 -0400819/* Fixed-size circular FIFO queue. This implementation is wait-free and
820 * lock-free.
821 *
822 * Consumer is the code path that posts Sends. This path dequeues a
823 * sendctx for use by a Send operation. Multiple consumer threads
824 * are serialized by the RPC transport lock, which allows only one
825 * ->send_request call at a time.
826 *
827 * Producer is the code path that handles Send completions. This path
828 * enqueues a sendctx that has been completed. Multiple producer
829 * threads are serialized by the ib_poll_cq() function.
830 */
831
832/* rpcrdma_sendctxs_destroy() assumes caller has already quiesced
833 * queue activity, and ib_drain_qp has flushed all remaining Send
834 * requests.
835 */
836static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf)
837{
838 unsigned long i;
839
840 for (i = 0; i <= buf->rb_sc_last; i++)
841 kfree(buf->rb_sc_ctxs[i]);
842 kfree(buf->rb_sc_ctxs);
843}
844
845static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia)
846{
847 struct rpcrdma_sendctx *sc;
848
849 sc = kzalloc(sizeof(*sc) +
850 ia->ri_max_send_sges * sizeof(struct ib_sge),
851 GFP_KERNEL);
852 if (!sc)
853 return NULL;
854
855 sc->sc_wr.wr_cqe = &sc->sc_cqe;
856 sc->sc_wr.sg_list = sc->sc_sges;
857 sc->sc_wr.opcode = IB_WR_SEND;
858 sc->sc_cqe.done = rpcrdma_wc_send;
859 return sc;
860}
861
862static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
863{
864 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
865 struct rpcrdma_sendctx *sc;
866 unsigned long i;
867
868 /* Maximum number of concurrent outstanding Send WRs. Capping
869 * the circular queue size stops Send Queue overflow by causing
870 * the ->send_request call to fail temporarily before too many
871 * Sends are posted.
872 */
873 i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS;
874 dprintk("RPC: %s: allocating %lu send_ctxs\n", __func__, i);
875 buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
876 if (!buf->rb_sc_ctxs)
877 return -ENOMEM;
878
879 buf->rb_sc_last = i - 1;
880 for (i = 0; i <= buf->rb_sc_last; i++) {
881 sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
882 if (!sc)
883 goto out_destroy;
884
885 sc->sc_xprt = r_xprt;
886 buf->rb_sc_ctxs[i] = sc;
887 }
Chuck Lever2fad6592018-05-04 15:35:57 -0400888 buf->rb_flags = 0;
Chuck Leverae729502017-10-20 10:48:12 -0400889
890 return 0;
891
892out_destroy:
893 rpcrdma_sendctxs_destroy(buf);
894 return -ENOMEM;
895}
896
897/* The sendctx queue is not guaranteed to have a size that is a
898 * power of two, thus the helpers in circ_buf.h cannot be used.
899 * The other option is to use modulus (%), which can be expensive.
900 */
901static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf,
902 unsigned long item)
903{
904 return likely(item < buf->rb_sc_last) ? item + 1 : 0;
905}
906
907/**
908 * rpcrdma_sendctx_get_locked - Acquire a send context
909 * @buf: transport buffers from which to acquire an unused context
910 *
911 * Returns pointer to a free send completion context; or NULL if
912 * the queue is empty.
913 *
914 * Usage: Called to acquire an SGE array before preparing a Send WR.
915 *
916 * The caller serializes calls to this function (per rpcrdma_buffer),
917 * and provides an effective memory barrier that flushes the new value
918 * of rb_sc_head.
919 */
920struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf)
921{
922 struct rpcrdma_xprt *r_xprt;
923 struct rpcrdma_sendctx *sc;
924 unsigned long next_head;
925
926 next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);
927
928 if (next_head == READ_ONCE(buf->rb_sc_tail))
929 goto out_emptyq;
930
931 /* ORDER: item must be accessed _before_ head is updated */
932 sc = buf->rb_sc_ctxs[next_head];
933
934 /* Releasing the lock in the caller acts as a memory
935 * barrier that flushes rb_sc_head.
936 */
937 buf->rb_sc_head = next_head;
938
939 return sc;
940
941out_emptyq:
942 /* The queue is "empty" if there have not been enough Send
943 * completions recently. This is a sign the Send Queue is
944 * backing up. Cause the caller to pause and try again.
945 */
Chuck Lever2fad6592018-05-04 15:35:57 -0400946 set_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags);
Chuck Leverae729502017-10-20 10:48:12 -0400947 r_xprt = container_of(buf, struct rpcrdma_xprt, rx_buf);
948 r_xprt->rx_stats.empty_sendctx_q++;
949 return NULL;
950}
951
952/**
953 * rpcrdma_sendctx_put_locked - Release a send context
954 * @sc: send context to release
955 *
956 * Usage: Called from Send completion to return a sendctxt
957 * to the queue.
958 *
959 * The caller serializes calls to this function (per rpcrdma_buffer).
960 */
Chuck Leverefd81e92018-05-04 15:35:41 -0400961static void
962rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
Chuck Leverae729502017-10-20 10:48:12 -0400963{
964 struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf;
965 unsigned long next_tail;
966
967 /* Unmap SGEs of previously completed by unsignaled
968 * Sends by walking up the queue until @sc is found.
969 */
970 next_tail = buf->rb_sc_tail;
971 do {
972 next_tail = rpcrdma_sendctx_next(buf, next_tail);
973
974 /* ORDER: item must be accessed _before_ tail is updated */
975 rpcrdma_unmap_sendctx(buf->rb_sc_ctxs[next_tail]);
976
977 } while (buf->rb_sc_ctxs[next_tail] != sc);
978
979 /* Paired with READ_ONCE */
980 smp_store_release(&buf->rb_sc_tail, next_tail);
Chuck Lever2fad6592018-05-04 15:35:57 -0400981
982 if (test_and_clear_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags)) {
983 smp_mb__after_atomic();
984 xprt_write_space(&sc->sc_xprt->rx_xprt);
985 }
Chuck Leverae729502017-10-20 10:48:12 -0400986}
987
Chuck Lever505bbe62016-06-29 13:52:54 -0400988static void
Chuck Lever96cedde2017-12-14 20:57:55 -0500989rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
Chuck Levere2ac2362016-06-29 13:54:00 -0400990{
991 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
992 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
993 unsigned int count;
994 LIST_HEAD(free);
995 LIST_HEAD(all);
996
Chuck Leverc421ece2018-10-01 14:25:20 -0400997 for (count = 0; count < ia->ri_max_segs; count++) {
Chuck Lever96cedde2017-12-14 20:57:55 -0500998 struct rpcrdma_mr *mr;
Chuck Levere2ac2362016-06-29 13:54:00 -0400999 int rc;
1000
Chuck Lever96cedde2017-12-14 20:57:55 -05001001 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1002 if (!mr)
Chuck Levere2ac2362016-06-29 13:54:00 -04001003 break;
1004
Chuck Lever96cedde2017-12-14 20:57:55 -05001005 rc = ia->ri_ops->ro_init_mr(ia, mr);
Chuck Levere2ac2362016-06-29 13:54:00 -04001006 if (rc) {
Chuck Lever96cedde2017-12-14 20:57:55 -05001007 kfree(mr);
Chuck Levere2ac2362016-06-29 13:54:00 -04001008 break;
1009 }
1010
Chuck Lever96cedde2017-12-14 20:57:55 -05001011 mr->mr_xprt = r_xprt;
Chuck Levere2ac2362016-06-29 13:54:00 -04001012
Chuck Lever96cedde2017-12-14 20:57:55 -05001013 list_add(&mr->mr_list, &free);
1014 list_add(&mr->mr_all, &all);
Chuck Levere2ac2362016-06-29 13:54:00 -04001015 }
1016
Chuck Lever96cedde2017-12-14 20:57:55 -05001017 spin_lock(&buf->rb_mrlock);
1018 list_splice(&free, &buf->rb_mrs);
Chuck Levere2ac2362016-06-29 13:54:00 -04001019 list_splice(&all, &buf->rb_all);
1020 r_xprt->rx_stats.mrs_allocated += count;
Chuck Lever96cedde2017-12-14 20:57:55 -05001021 spin_unlock(&buf->rb_mrlock);
Chuck Lever1c443eff2017-12-20 16:31:21 -05001022 trace_xprtrdma_createmrs(r_xprt, count);
Chuck Lever9e679d52018-02-28 15:30:44 -05001023
1024 xprt_write_space(&r_xprt->rx_xprt);
Chuck Levere2ac2362016-06-29 13:54:00 -04001025}
1026
1027static void
1028rpcrdma_mr_refresh_worker(struct work_struct *work)
1029{
1030 struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
1031 rb_refresh_worker.work);
1032 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
1033 rx_buf);
1034
Chuck Lever96cedde2017-12-14 20:57:55 -05001035 rpcrdma_mrs_create(r_xprt);
Chuck Levere2ac2362016-06-29 13:54:00 -04001036}
1037
Chuck Leverf531a5d2015-10-24 17:27:43 -04001038struct rpcrdma_req *
Chuck Lever13924022015-01-21 11:03:52 -05001039rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
1040{
Chuck Leverf531a5d2015-10-24 17:27:43 -04001041 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
Chuck Lever2dd4a012018-02-28 15:31:05 -05001042 struct rpcrdma_regbuf *rb;
Chuck Lever13924022015-01-21 11:03:52 -05001043 struct rpcrdma_req *req;
Chuck Lever13924022015-01-21 11:03:52 -05001044
Chuck Lever85275c82015-01-21 11:04:16 -05001045 req = kzalloc(sizeof(*req), GFP_KERNEL);
Chuck Lever13924022015-01-21 11:03:52 -05001046 if (req == NULL)
Chuck Lever85275c82015-01-21 11:04:16 -05001047 return ERR_PTR(-ENOMEM);
Chuck Lever13924022015-01-21 11:03:52 -05001048
Chuck Lever2dd4a012018-02-28 15:31:05 -05001049 rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE,
1050 DMA_TO_DEVICE, GFP_KERNEL);
1051 if (IS_ERR(rb)) {
1052 kfree(req);
1053 return ERR_PTR(-ENOMEM);
1054 }
1055 req->rl_rdmabuf = rb;
1056 xdr_buf_init(&req->rl_hdrbuf, rb->rg_base, rdmab_length(rb));
1057 req->rl_buffer = buffer;
1058 INIT_LIST_HEAD(&req->rl_registered);
1059
Chuck Leverf531a5d2015-10-24 17:27:43 -04001060 spin_lock(&buffer->rb_reqslock);
1061 list_add(&req->rl_all, &buffer->rb_allreqs);
1062 spin_unlock(&buffer->rb_reqslock);
Chuck Lever13924022015-01-21 11:03:52 -05001063 return req;
Chuck Lever13924022015-01-21 11:03:52 -05001064}
1065
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001066static int
1067rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt, bool temp)
Chuck Lever13924022015-01-21 11:03:52 -05001068{
1069 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
Chuck Leverd698c4a2017-12-14 20:56:09 -05001070 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
Chuck Lever13924022015-01-21 11:03:52 -05001071 struct rpcrdma_rep *rep;
1072 int rc;
1073
1074 rc = -ENOMEM;
Chuck Lever6b1184c2015-01-21 11:04:25 -05001075 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
Chuck Lever13924022015-01-21 11:03:52 -05001076 if (rep == NULL)
1077 goto out;
Chuck Lever13924022015-01-21 11:03:52 -05001078
Chuck Lever13650c22016-09-15 10:56:26 -04001079 rep->rr_rdmabuf = rpcrdma_alloc_regbuf(cdata->inline_rsize,
Chuck Lever99ef4db2016-09-15 10:56:10 -04001080 DMA_FROM_DEVICE, GFP_KERNEL);
Chuck Lever6b1184c2015-01-21 11:04:25 -05001081 if (IS_ERR(rep->rr_rdmabuf)) {
1082 rc = PTR_ERR(rep->rr_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -05001083 goto out_free;
Chuck Lever6b1184c2015-01-21 11:04:25 -05001084 }
Chuck Lever96f87782017-08-03 14:30:03 -04001085 xdr_buf_init(&rep->rr_hdrbuf, rep->rr_rdmabuf->rg_base,
1086 rdmab_length(rep->rr_rdmabuf));
Chuck Lever13924022015-01-21 11:03:52 -05001087
Chuck Lever1519e962016-09-15 10:57:49 -04001088 rep->rr_cqe.done = rpcrdma_wc_receive;
Chuck Leverfed171b2015-05-26 11:51:37 -04001089 rep->rr_rxprt = r_xprt;
Chuck Leverd8f532d2017-10-16 15:01:30 -04001090 INIT_WORK(&rep->rr_work, rpcrdma_deferred_completion);
Chuck Lever6ea8e712016-09-15 10:56:51 -04001091 rep->rr_recv_wr.next = NULL;
1092 rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
1093 rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
1094 rep->rr_recv_wr.num_sge = 1;
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001095 rep->rr_temp = temp;
Chuck Leverd698c4a2017-12-14 20:56:09 -05001096
1097 spin_lock(&buf->rb_lock);
1098 list_add(&rep->rr_list, &buf->rb_recv_bufs);
1099 spin_unlock(&buf->rb_lock);
1100 return 0;
Chuck Lever13924022015-01-21 11:03:52 -05001101
1102out_free:
1103 kfree(rep);
1104out:
Chuck Leverd698c4a2017-12-14 20:56:09 -05001105 dprintk("RPC: %s: reply buffer %d alloc failed\n",
1106 __func__, rc);
1107 return rc;
Chuck Lever13924022015-01-21 11:03:52 -05001108}
1109
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001110int
Chuck Leverac920d02015-01-21 11:03:44 -05001111rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001112{
Chuck Leverac920d02015-01-21 11:03:44 -05001113 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001114 int i, rc;
1115
Chuck Lever1e465fd2015-10-24 17:27:02 -04001116 buf->rb_max_requests = r_xprt->rx_data.max_requests;
Chuck Leverf531a5d2015-10-24 17:27:43 -04001117 buf->rb_bc_srv_max_requests = 0;
Chuck Lever96cedde2017-12-14 20:57:55 -05001118 spin_lock_init(&buf->rb_mrlock);
Chuck Lever505bbe62016-06-29 13:52:54 -04001119 spin_lock_init(&buf->rb_lock);
Chuck Lever96cedde2017-12-14 20:57:55 -05001120 INIT_LIST_HEAD(&buf->rb_mrs);
Chuck Levere2ac2362016-06-29 13:54:00 -04001121 INIT_LIST_HEAD(&buf->rb_all);
Chuck Levere2ac2362016-06-29 13:54:00 -04001122 INIT_DELAYED_WORK(&buf->rb_refresh_worker,
1123 rpcrdma_mr_refresh_worker);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001124
Chuck Lever96cedde2017-12-14 20:57:55 -05001125 rpcrdma_mrs_create(r_xprt);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001126
Chuck Lever1e465fd2015-10-24 17:27:02 -04001127 INIT_LIST_HEAD(&buf->rb_send_bufs);
Chuck Leverf531a5d2015-10-24 17:27:43 -04001128 INIT_LIST_HEAD(&buf->rb_allreqs);
1129 spin_lock_init(&buf->rb_reqslock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001130 for (i = 0; i < buf->rb_max_requests; i++) {
1131 struct rpcrdma_req *req;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001132
Chuck Lever13924022015-01-21 11:03:52 -05001133 req = rpcrdma_create_req(r_xprt);
1134 if (IS_ERR(req)) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001135 dprintk("RPC: %s: request buffer %d alloc"
1136 " failed\n", __func__, i);
Chuck Lever13924022015-01-21 11:03:52 -05001137 rc = PTR_ERR(req);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001138 goto out;
1139 }
Chuck Levera80d66c2017-06-08 11:52:12 -04001140 list_add(&req->rl_list, &buf->rb_send_bufs);
Chuck Lever1e465fd2015-10-24 17:27:02 -04001141 }
1142
Chuck Lever8d4fb8f2018-07-28 10:46:47 -04001143 buf->rb_credits = 1;
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001144 buf->rb_posted_receives = 0;
Chuck Lever1e465fd2015-10-24 17:27:02 -04001145 INIT_LIST_HEAD(&buf->rb_recv_bufs);
Chuck Lever13924022015-01-21 11:03:52 -05001146
Chuck Leverae729502017-10-20 10:48:12 -04001147 rc = rpcrdma_sendctxs_create(r_xprt);
1148 if (rc)
1149 goto out;
1150
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001151 return 0;
1152out:
1153 rpcrdma_buffer_destroy(buf);
1154 return rc;
1155}
1156
Chuck Lever2e845222014-07-29 17:25:38 -04001157static void
Chuck Lever13650c22016-09-15 10:56:26 -04001158rpcrdma_destroy_rep(struct rpcrdma_rep *rep)
Chuck Lever13924022015-01-21 11:03:52 -05001159{
Chuck Lever13650c22016-09-15 10:56:26 -04001160 rpcrdma_free_regbuf(rep->rr_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -05001161 kfree(rep);
1162}
1163
Chuck Leverf531a5d2015-10-24 17:27:43 -04001164void
Chuck Lever13650c22016-09-15 10:56:26 -04001165rpcrdma_destroy_req(struct rpcrdma_req *req)
Chuck Lever13924022015-01-21 11:03:52 -05001166{
Chuck Lever13650c22016-09-15 10:56:26 -04001167 rpcrdma_free_regbuf(req->rl_recvbuf);
1168 rpcrdma_free_regbuf(req->rl_sendbuf);
1169 rpcrdma_free_regbuf(req->rl_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -05001170 kfree(req);
1171}
1172
Chuck Levere2ac2362016-06-29 13:54:00 -04001173static void
Chuck Lever96cedde2017-12-14 20:57:55 -05001174rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
Chuck Levere2ac2362016-06-29 13:54:00 -04001175{
1176 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
1177 rx_buf);
1178 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
Chuck Lever96cedde2017-12-14 20:57:55 -05001179 struct rpcrdma_mr *mr;
Chuck Levere2ac2362016-06-29 13:54:00 -04001180 unsigned int count;
1181
1182 count = 0;
Chuck Lever96cedde2017-12-14 20:57:55 -05001183 spin_lock(&buf->rb_mrlock);
Chuck Levere2ac2362016-06-29 13:54:00 -04001184 while (!list_empty(&buf->rb_all)) {
Chuck Lever96cedde2017-12-14 20:57:55 -05001185 mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all);
1186 list_del(&mr->mr_all);
Chuck Levere2ac2362016-06-29 13:54:00 -04001187
Chuck Lever96cedde2017-12-14 20:57:55 -05001188 spin_unlock(&buf->rb_mrlock);
Chuck Lever054f1552018-05-01 11:37:14 -04001189
1190 /* Ensure MW is not on any rl_registered list */
1191 if (!list_empty(&mr->mr_list))
1192 list_del(&mr->mr_list);
1193
Chuck Lever96cedde2017-12-14 20:57:55 -05001194 ia->ri_ops->ro_release_mr(mr);
Chuck Levere2ac2362016-06-29 13:54:00 -04001195 count++;
Chuck Lever96cedde2017-12-14 20:57:55 -05001196 spin_lock(&buf->rb_mrlock);
Chuck Levere2ac2362016-06-29 13:54:00 -04001197 }
Chuck Lever96cedde2017-12-14 20:57:55 -05001198 spin_unlock(&buf->rb_mrlock);
Chuck Levere2ac2362016-06-29 13:54:00 -04001199 r_xprt->rx_stats.mrs_allocated = 0;
1200
1201 dprintk("RPC: %s: released %u MRs\n", __func__, count);
1202}
1203
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001204void
1205rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1206{
Chuck Lever9378b272017-04-11 13:22:29 -04001207 cancel_delayed_work_sync(&buf->rb_refresh_worker);
Chuck Lever505bbe62016-06-29 13:52:54 -04001208
Chuck Leverae729502017-10-20 10:48:12 -04001209 rpcrdma_sendctxs_destroy(buf);
1210
Chuck Lever1e465fd2015-10-24 17:27:02 -04001211 while (!list_empty(&buf->rb_recv_bufs)) {
1212 struct rpcrdma_rep *rep;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001213
Chuck Lever9d95cd52018-05-04 15:35:36 -04001214 rep = list_first_entry(&buf->rb_recv_bufs,
1215 struct rpcrdma_rep, rr_list);
1216 list_del(&rep->rr_list);
Chuck Lever13650c22016-09-15 10:56:26 -04001217 rpcrdma_destroy_rep(rep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001218 }
1219
Chuck Leverf531a5d2015-10-24 17:27:43 -04001220 spin_lock(&buf->rb_reqslock);
1221 while (!list_empty(&buf->rb_allreqs)) {
Chuck Lever1e465fd2015-10-24 17:27:02 -04001222 struct rpcrdma_req *req;
Allen Andrews4034ba02014-05-28 10:32:09 -04001223
Chuck Leverf531a5d2015-10-24 17:27:43 -04001224 req = list_first_entry(&buf->rb_allreqs,
1225 struct rpcrdma_req, rl_all);
1226 list_del(&req->rl_all);
1227
1228 spin_unlock(&buf->rb_reqslock);
Chuck Lever13650c22016-09-15 10:56:26 -04001229 rpcrdma_destroy_req(req);
Chuck Leverf531a5d2015-10-24 17:27:43 -04001230 spin_lock(&buf->rb_reqslock);
Chuck Lever9f9d8022014-07-29 17:24:45 -04001231 }
Chuck Leverf531a5d2015-10-24 17:27:43 -04001232 spin_unlock(&buf->rb_reqslock);
Chuck Lever9f9d8022014-07-29 17:24:45 -04001233
Chuck Lever96cedde2017-12-14 20:57:55 -05001234 rpcrdma_mrs_destroy(buf);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001235}
1236
Chuck Lever96cedde2017-12-14 20:57:55 -05001237/**
1238 * rpcrdma_mr_get - Allocate an rpcrdma_mr object
1239 * @r_xprt: controlling transport
1240 *
1241 * Returns an initialized rpcrdma_mr or NULL if no free
1242 * rpcrdma_mr objects are available.
1243 */
1244struct rpcrdma_mr *
1245rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
Chuck Leverc2922c02014-07-29 17:24:36 -04001246{
Chuck Lever346aa662015-05-26 11:52:06 -04001247 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
Chuck Lever96cedde2017-12-14 20:57:55 -05001248 struct rpcrdma_mr *mr = NULL;
Chuck Lever346aa662015-05-26 11:52:06 -04001249
Chuck Lever96cedde2017-12-14 20:57:55 -05001250 spin_lock(&buf->rb_mrlock);
1251 if (!list_empty(&buf->rb_mrs))
1252 mr = rpcrdma_mr_pop(&buf->rb_mrs);
1253 spin_unlock(&buf->rb_mrlock);
Chuck Lever346aa662015-05-26 11:52:06 -04001254
Chuck Lever96cedde2017-12-14 20:57:55 -05001255 if (!mr)
1256 goto out_nomrs;
1257 return mr;
Chuck Levere2ac2362016-06-29 13:54:00 -04001258
Chuck Lever96cedde2017-12-14 20:57:55 -05001259out_nomrs:
Chuck Lever1c443eff2017-12-20 16:31:21 -05001260 trace_xprtrdma_nomrs(r_xprt);
Chuck Leverbebd0312017-04-11 13:23:10 -04001261 if (r_xprt->rx_ep.rep_connected != -ENODEV)
1262 schedule_delayed_work(&buf->rb_refresh_worker, 0);
Chuck Levere2ac2362016-06-29 13:54:00 -04001263
1264 /* Allow the reply handler and refresh worker to run */
1265 cond_resched();
1266
1267 return NULL;
Chuck Leverc2922c02014-07-29 17:24:36 -04001268}
1269
Chuck Leverec12e472017-12-14 20:58:04 -05001270static void
1271__rpcrdma_mr_put(struct rpcrdma_buffer *buf, struct rpcrdma_mr *mr)
1272{
1273 spin_lock(&buf->rb_mrlock);
1274 rpcrdma_mr_push(mr, &buf->rb_mrs);
1275 spin_unlock(&buf->rb_mrlock);
1276}
1277
Chuck Lever96cedde2017-12-14 20:57:55 -05001278/**
1279 * rpcrdma_mr_put - Release an rpcrdma_mr object
1280 * @mr: object to release
1281 *
1282 */
Chuck Lever346aa662015-05-26 11:52:06 -04001283void
Chuck Lever96cedde2017-12-14 20:57:55 -05001284rpcrdma_mr_put(struct rpcrdma_mr *mr)
Chuck Leverc2922c02014-07-29 17:24:36 -04001285{
Chuck Leverec12e472017-12-14 20:58:04 -05001286 __rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr);
1287}
Chuck Leverc2922c02014-07-29 17:24:36 -04001288
Chuck Leverec12e472017-12-14 20:58:04 -05001289/**
1290 * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it
1291 * @mr: object to release
1292 *
1293 */
1294void
1295rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
1296{
1297 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
1298
Chuck Leverd379eaa2018-10-01 14:25:30 -04001299 trace_xprtrdma_mr_unmap(mr);
Chuck Leverec12e472017-12-14 20:58:04 -05001300 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
1301 mr->mr_sg, mr->mr_nents, mr->mr_dir);
1302 __rpcrdma_mr_put(&r_xprt->rx_buf, mr);
Chuck Leverc2922c02014-07-29 17:24:36 -04001303}
1304
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001305/**
1306 * rpcrdma_buffer_get - Get a request buffer
1307 * @buffers: Buffer pool from which to obtain a buffer
Chuck Lever78d506e2016-09-06 11:22:49 -04001308 *
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001309 * Returns a fresh rpcrdma_req, or NULL if none are available.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001310 */
1311struct rpcrdma_req *
1312rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1313{
1314 struct rpcrdma_req *req;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001315
Chuck Levera5b027e2015-10-24 17:27:27 -04001316 spin_lock(&buffers->rb_lock);
Chuck Levere68699c2018-05-04 15:35:31 -04001317 req = list_first_entry_or_null(&buffers->rb_send_bufs,
1318 struct rpcrdma_req, rl_list);
1319 if (req)
1320 list_del_init(&req->rl_list);
Chuck Levera5b027e2015-10-24 17:27:27 -04001321 spin_unlock(&buffers->rb_lock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001322 return req;
1323}
1324
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001325/**
1326 * rpcrdma_buffer_put - Put request/reply buffers back into pool
1327 * @req: object to return
1328 *
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001329 */
1330void
1331rpcrdma_buffer_put(struct rpcrdma_req *req)
1332{
1333 struct rpcrdma_buffer *buffers = req->rl_buffer;
Chuck Lever1e465fd2015-10-24 17:27:02 -04001334 struct rpcrdma_rep *rep = req->rl_reply;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001335
Chuck Lever1e465fd2015-10-24 17:27:02 -04001336 req->rl_reply = NULL;
1337
Chuck Levera5b027e2015-10-24 17:27:27 -04001338 spin_lock(&buffers->rb_lock);
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001339 list_add(&req->rl_list, &buffers->rb_send_bufs);
Chuck Lever05c97462016-09-06 11:22:58 -04001340 if (rep) {
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001341 if (!rep->rr_temp) {
1342 list_add(&rep->rr_list, &buffers->rb_recv_bufs);
1343 rep = NULL;
1344 }
Chuck Lever05c97462016-09-06 11:22:58 -04001345 }
Chuck Levera5b027e2015-10-24 17:27:27 -04001346 spin_unlock(&buffers->rb_lock);
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001347 if (rep)
1348 rpcrdma_destroy_rep(rep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001349}
1350
1351/*
1352 * Put reply buffers back into pool when not attached to
Chuck Leverb45ccfd2014-05-28 10:32:34 -04001353 * request. This happens in error conditions.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001354 */
1355void
1356rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1357{
Chuck Leverfed171b2015-05-26 11:51:37 -04001358 struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001359
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001360 if (!rep->rr_temp) {
1361 spin_lock(&buffers->rb_lock);
1362 list_add(&rep->rr_list, &buffers->rb_recv_bufs);
1363 spin_unlock(&buffers->rb_lock);
1364 } else {
1365 rpcrdma_destroy_rep(rep);
1366 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001367}
1368
Chuck Lever9128c3e2015-01-21 11:04:00 -05001369/**
Chuck Lever99ef4db2016-09-15 10:56:10 -04001370 * rpcrdma_alloc_regbuf - allocate and DMA-map memory for SEND/RECV buffers
Chuck Lever9128c3e2015-01-21 11:04:00 -05001371 * @size: size of buffer to be allocated, in bytes
Chuck Lever99ef4db2016-09-15 10:56:10 -04001372 * @direction: direction of data movement
Chuck Lever9128c3e2015-01-21 11:04:00 -05001373 * @flags: GFP flags
1374 *
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001375 * Returns an ERR_PTR, or a pointer to a regbuf, a buffer that
1376 * can be persistently DMA-mapped for I/O.
Chuck Lever9128c3e2015-01-21 11:04:00 -05001377 *
1378 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
Chuck Lever99ef4db2016-09-15 10:56:10 -04001379 * receiving the payload of RDMA RECV operations. During Long Calls
1380 * or Replies they may be registered externally via ro_map.
Chuck Lever9128c3e2015-01-21 11:04:00 -05001381 */
1382struct rpcrdma_regbuf *
Chuck Lever13650c22016-09-15 10:56:26 -04001383rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction,
1384 gfp_t flags)
Chuck Lever9128c3e2015-01-21 11:04:00 -05001385{
1386 struct rpcrdma_regbuf *rb;
Chuck Lever9128c3e2015-01-21 11:04:00 -05001387
Chuck Lever9128c3e2015-01-21 11:04:00 -05001388 rb = kmalloc(sizeof(*rb) + size, flags);
1389 if (rb == NULL)
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001390 return ERR_PTR(-ENOMEM);
Chuck Lever9128c3e2015-01-21 11:04:00 -05001391
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001392 rb->rg_device = NULL;
Chuck Lever99ef4db2016-09-15 10:56:10 -04001393 rb->rg_direction = direction;
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001394 rb->rg_iov.length = size;
Chuck Lever9128c3e2015-01-21 11:04:00 -05001395
1396 return rb;
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001397}
Chuck Lever9128c3e2015-01-21 11:04:00 -05001398
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001399/**
1400 * __rpcrdma_map_regbuf - DMA-map a regbuf
1401 * @ia: controlling rpcrdma_ia
1402 * @rb: regbuf to be mapped
1403 */
1404bool
1405__rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
1406{
Chuck Lever91a10c52017-04-11 13:23:02 -04001407 struct ib_device *device = ia->ri_device;
1408
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001409 if (rb->rg_direction == DMA_NONE)
1410 return false;
1411
Chuck Lever91a10c52017-04-11 13:23:02 -04001412 rb->rg_iov.addr = ib_dma_map_single(device,
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001413 (void *)rb->rg_base,
1414 rdmab_length(rb),
1415 rb->rg_direction);
Chuck Lever91a10c52017-04-11 13:23:02 -04001416 if (ib_dma_mapping_error(device, rdmab_addr(rb)))
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001417 return false;
1418
Chuck Lever91a10c52017-04-11 13:23:02 -04001419 rb->rg_device = device;
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001420 rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey;
1421 return true;
1422}
1423
1424static void
1425rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb)
1426{
Chuck Levere89e8d8f2018-01-31 12:34:13 -05001427 if (!rb)
1428 return;
1429
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001430 if (!rpcrdma_regbuf_is_mapped(rb))
1431 return;
1432
1433 ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb),
1434 rdmab_length(rb), rb->rg_direction);
1435 rb->rg_device = NULL;
Chuck Lever9128c3e2015-01-21 11:04:00 -05001436}
1437
1438/**
1439 * rpcrdma_free_regbuf - deregister and free registered buffer
Chuck Lever9128c3e2015-01-21 11:04:00 -05001440 * @rb: regbuf to be deregistered and freed
1441 */
1442void
Chuck Lever13650c22016-09-15 10:56:26 -04001443rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb)
Chuck Lever9128c3e2015-01-21 11:04:00 -05001444{
Chuck Lever54cbd6b2016-09-15 10:56:18 -04001445 rpcrdma_dma_unmap_regbuf(rb);
Chuck Levere531dca2015-08-03 13:03:20 -04001446 kfree(rb);
Chuck Lever9128c3e2015-01-21 11:04:00 -05001447}
1448
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001449/*
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001450 * Prepost any receive buffer, then post send.
1451 *
1452 * Receive buffer is donated to hardware, reclaimed upon recv completion.
1453 */
1454int
1455rpcrdma_ep_post(struct rpcrdma_ia *ia,
1456 struct rpcrdma_ep *ep,
1457 struct rpcrdma_req *req)
1458{
Chuck Leverae729502017-10-20 10:48:12 -04001459 struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr;
Chuck Lever655fec62016-09-15 10:57:24 -04001460 int rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001461
Chuck Lever01bb35c2017-10-20 10:48:36 -04001462 if (!ep->rep_send_count ||
1463 test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
Chuck Leverae729502017-10-20 10:48:12 -04001464 send_wr->send_flags |= IB_SEND_SIGNALED;
1465 ep->rep_send_count = ep->rep_send_batch;
1466 } else {
1467 send_wr->send_flags &= ~IB_SEND_SIGNALED;
1468 --ep->rep_send_count;
1469 }
Chuck Lever7a89f9c2016-06-29 13:53:43 -04001470
Chuck Leverf2877622018-02-28 15:30:59 -05001471 rc = ia->ri_ops->ro_send(ia, req);
Chuck Leverab03eff2017-12-20 16:30:40 -05001472 trace_xprtrdma_post_send(req, rc);
1473 if (rc)
1474 return -ENOTCONN;
1475 return 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001476}
1477
Chuck Leverf531a5d2015-10-24 17:27:43 -04001478/**
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001479 * rpcrdma_post_recvs - Maybe post some Receive buffers
1480 * @r_xprt: controlling transport
1481 * @temp: when true, allocate temp rpcrdma_rep objects
Chuck Leverf531a5d2015-10-24 17:27:43 -04001482 *
Chuck Leverf531a5d2015-10-24 17:27:43 -04001483 */
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001484void
1485rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
Chuck Leverf531a5d2015-10-24 17:27:43 -04001486{
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001487 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1488 struct ib_recv_wr *wr, *bad_wr;
1489 int needed, count, rc;
Chuck Leverf531a5d2015-10-24 17:27:43 -04001490
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001491 needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
1492 if (buf->rb_posted_receives > needed)
1493 return;
1494 needed -= buf->rb_posted_receives;
Chuck Leverf531a5d2015-10-24 17:27:43 -04001495
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001496 count = 0;
1497 wr = NULL;
1498 while (needed) {
1499 struct rpcrdma_regbuf *rb;
1500 struct rpcrdma_rep *rep;
1501
1502 spin_lock(&buf->rb_lock);
1503 rep = list_first_entry_or_null(&buf->rb_recv_bufs,
1504 struct rpcrdma_rep, rr_list);
1505 if (likely(rep))
1506 list_del(&rep->rr_list);
1507 spin_unlock(&buf->rb_lock);
1508 if (!rep) {
1509 if (rpcrdma_create_rep(r_xprt, temp))
1510 break;
1511 continue;
1512 }
1513
1514 rb = rep->rr_rdmabuf;
1515 if (!rpcrdma_regbuf_is_mapped(rb)) {
1516 if (!__rpcrdma_dma_map_regbuf(&r_xprt->rx_ia, rb)) {
1517 rpcrdma_recv_buffer_put(rep);
1518 break;
1519 }
1520 }
1521
1522 trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe);
1523 rep->rr_recv_wr.next = wr;
1524 wr = &rep->rr_recv_wr;
1525 ++count;
1526 --needed;
Chuck Leverf531a5d2015-10-24 17:27:43 -04001527 }
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001528 if (!count)
1529 return;
Chuck Leverf531a5d2015-10-24 17:27:43 -04001530
Bart Van Assched34ac5c2018-07-18 09:25:32 -07001531 rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr,
1532 (const struct ib_recv_wr **)&bad_wr);
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001533 if (rc) {
1534 for (wr = bad_wr; wr; wr = wr->next) {
1535 struct rpcrdma_rep *rep;
Chuck Leverf531a5d2015-10-24 17:27:43 -04001536
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001537 rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr);
1538 rpcrdma_recv_buffer_put(rep);
1539 --count;
1540 }
1541 }
1542 buf->rb_posted_receives += count;
1543 trace_xprtrdma_post_recvs(r_xprt, count, rc);
Chuck Leverf531a5d2015-10-24 17:27:43 -04001544}