blob: 1aa55b74b1b7e52f29a43a592f4088746529189e [file] [log] [blame]
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04001/*
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040038 */
39
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040040/*
41 * verbs.c
42 *
43 * Encapsulates the major functions managing:
44 * o adapters
45 * o endpoints
46 * o connections
47 * o buffer memory
48 */
49
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000050#include <linux/interrupt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090051#include <linux/slab.h>
Chuck Levereba8ff62015-01-21 11:03:02 -050052#include <linux/prefetch.h>
Chuck Lever0dd39ca2015-03-30 14:33:43 -040053#include <linux/sunrpc/addr.h>
Chuck Lever65866f82014-05-28 10:33:59 -040054#include <asm/bitops.h>
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040055
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040056#include "xprt_rdma.h"
57
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040058/*
59 * Globals/Macros
60 */
61
Jeff Laytonf895b252014-11-17 16:58:04 -050062#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040063# define RPCDBG_FACILITY RPCDBG_TRANS
64#endif
65
Chuck Lever9f9d8022014-07-29 17:24:45 -040066static void rpcrdma_reset_frmrs(struct rpcrdma_ia *);
Chuck Lever467c9672014-11-08 20:14:29 -050067static void rpcrdma_reset_fmrs(struct rpcrdma_ia *);
Chuck Lever9f9d8022014-07-29 17:24:45 -040068
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040069/*
70 * internal functions
71 */
72
73/*
74 * handle replies in tasklet context, using a single, global list
75 * rdma tasklet function -- just turn around and call the func
76 * for all replies on the list
77 */
78
79static DEFINE_SPINLOCK(rpcrdma_tk_lock_g);
80static LIST_HEAD(rpcrdma_tasklets_g);
81
82static void
83rpcrdma_run_tasklet(unsigned long data)
84{
85 struct rpcrdma_rep *rep;
86 void (*func)(struct rpcrdma_rep *);
87 unsigned long flags;
88
89 data = data;
90 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
91 while (!list_empty(&rpcrdma_tasklets_g)) {
92 rep = list_entry(rpcrdma_tasklets_g.next,
93 struct rpcrdma_rep, rr_list);
94 list_del(&rep->rr_list);
95 func = rep->rr_func;
96 rep->rr_func = NULL;
97 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
98
99 if (func)
100 func(rep);
101 else
102 rpcrdma_recv_buffer_put(rep);
103
104 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
105 }
106 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
107}
108
109static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL);
110
Chuck Lever7ff11de2014-11-08 20:15:01 -0500111static const char * const async_event[] = {
112 "CQ error",
113 "QP fatal error",
114 "QP request error",
115 "QP access error",
116 "communication established",
117 "send queue drained",
118 "path migration successful",
119 "path mig error",
120 "device fatal error",
121 "port active",
122 "port error",
123 "LID change",
124 "P_key change",
125 "SM change",
126 "SRQ error",
127 "SRQ limit reached",
128 "last WQE reached",
129 "client reregister",
130 "GID change",
131};
132
133#define ASYNC_MSG(status) \
134 ((status) < ARRAY_SIZE(async_event) ? \
135 async_event[(status)] : "unknown async error")
136
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400137static void
Chuck Leverf1a03b72014-11-08 20:14:37 -0500138rpcrdma_schedule_tasklet(struct list_head *sched_list)
139{
140 unsigned long flags;
141
142 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
143 list_splice_tail(sched_list, &rpcrdma_tasklets_g);
144 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
145 tasklet_schedule(&rpcrdma_tasklet_g);
146}
147
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400148static void
149rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
150{
151 struct rpcrdma_ep *ep = context;
152
Chuck Lever7ff11de2014-11-08 20:15:01 -0500153 pr_err("RPC: %s: %s on device %s ep %p\n",
154 __func__, ASYNC_MSG(event->event),
155 event->device->name, context);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400156 if (ep->rep_connected == 1) {
157 ep->rep_connected = -EIO;
Chuck Leverafadc462015-01-21 11:03:11 -0500158 rpcrdma_conn_func(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400159 wake_up_all(&ep->rep_connect_wait);
160 }
161}
162
163static void
164rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
165{
166 struct rpcrdma_ep *ep = context;
167
Chuck Lever7ff11de2014-11-08 20:15:01 -0500168 pr_err("RPC: %s: %s on device %s ep %p\n",
169 __func__, ASYNC_MSG(event->event),
170 event->device->name, context);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400171 if (ep->rep_connected == 1) {
172 ep->rep_connected = -EIO;
Chuck Leverafadc462015-01-21 11:03:11 -0500173 rpcrdma_conn_func(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400174 wake_up_all(&ep->rep_connect_wait);
175 }
176}
177
Chuck Lever85024272015-01-21 11:02:04 -0500178static const char * const wc_status[] = {
179 "success",
180 "local length error",
181 "local QP operation error",
182 "local EE context operation error",
183 "local protection error",
184 "WR flushed",
185 "memory management operation error",
186 "bad response error",
187 "local access error",
188 "remote invalid request error",
189 "remote access error",
190 "remote operation error",
191 "transport retry counter exceeded",
192 "RNR retrycounter exceeded",
193 "local RDD violation error",
194 "remove invalid RD request",
195 "operation aborted",
196 "invalid EE context number",
197 "invalid EE context state",
198 "fatal error",
199 "response timeout error",
200 "general error",
201};
202
203#define COMPLETION_MSG(status) \
204 ((status) < ARRAY_SIZE(wc_status) ? \
205 wc_status[(status)] : "unexpected completion error")
206
Chuck Leverfc664482014-05-28 10:33:25 -0400207static void
208rpcrdma_sendcq_process_wc(struct ib_wc *wc)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400209{
Chuck Lever85024272015-01-21 11:02:04 -0500210 if (likely(wc->status == IB_WC_SUCCESS))
Chuck Leverfc664482014-05-28 10:33:25 -0400211 return;
Chuck Lever85024272015-01-21 11:02:04 -0500212
213 /* WARNING: Only wr_id and status are reliable at this point */
214 if (wc->wr_id == 0ULL) {
215 if (wc->status != IB_WC_WR_FLUSH_ERR)
216 pr_err("RPC: %s: SEND: %s\n",
217 __func__, COMPLETION_MSG(wc->status));
218 } else {
219 struct rpcrdma_mw *r;
220
221 r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
222 r->r.frmr.fr_state = FRMR_IS_STALE;
223 pr_err("RPC: %s: frmr %p (stale): %s\n",
224 __func__, r, COMPLETION_MSG(wc->status));
225 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400226}
227
Chuck Leverfc664482014-05-28 10:33:25 -0400228static int
Chuck Lever1c00dd02014-05-28 10:33:42 -0400229rpcrdma_sendcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400230{
Chuck Lever1c00dd02014-05-28 10:33:42 -0400231 struct ib_wc *wcs;
Chuck Lever8301a2c2014-05-28 10:33:51 -0400232 int budget, count, rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400233
Chuck Lever8301a2c2014-05-28 10:33:51 -0400234 budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
Chuck Lever1c00dd02014-05-28 10:33:42 -0400235 do {
236 wcs = ep->rep_send_wcs;
237
238 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
239 if (rc <= 0)
240 return rc;
241
242 count = rc;
243 while (count-- > 0)
244 rpcrdma_sendcq_process_wc(wcs++);
Chuck Lever8301a2c2014-05-28 10:33:51 -0400245 } while (rc == RPCRDMA_POLLSIZE && --budget);
Chuck Lever1c00dd02014-05-28 10:33:42 -0400246 return 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400247}
248
249/*
Chuck Leverfc664482014-05-28 10:33:25 -0400250 * Handle send, fast_reg_mr, and local_inv completions.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400251 *
Chuck Leverfc664482014-05-28 10:33:25 -0400252 * Send events are typically suppressed and thus do not result
253 * in an upcall. Occasionally one is signaled, however. This
254 * prevents the provider's completion queue from wrapping and
255 * losing a completion.
256 */
257static void
258rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
259{
Chuck Lever1c00dd02014-05-28 10:33:42 -0400260 struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
Chuck Leverfc664482014-05-28 10:33:25 -0400261 int rc;
262
Chuck Lever1c00dd02014-05-28 10:33:42 -0400263 rc = rpcrdma_sendcq_poll(cq, ep);
Chuck Leverfc664482014-05-28 10:33:25 -0400264 if (rc) {
265 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
266 __func__, rc);
267 return;
268 }
269
Chuck Lever7f23f6f2014-05-28 10:33:34 -0400270 rc = ib_req_notify_cq(cq,
271 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
272 if (rc == 0)
273 return;
274 if (rc < 0) {
Chuck Leverfc664482014-05-28 10:33:25 -0400275 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
276 __func__, rc);
277 return;
278 }
279
Chuck Lever1c00dd02014-05-28 10:33:42 -0400280 rpcrdma_sendcq_poll(cq, ep);
Chuck Leverfc664482014-05-28 10:33:25 -0400281}
282
283static void
Chuck Leverbb961932014-07-29 17:25:46 -0400284rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
Chuck Leverfc664482014-05-28 10:33:25 -0400285{
286 struct rpcrdma_rep *rep =
287 (struct rpcrdma_rep *)(unsigned long)wc->wr_id;
288
Chuck Lever85024272015-01-21 11:02:04 -0500289 /* WARNING: Only wr_id and status are reliable at this point */
290 if (wc->status != IB_WC_SUCCESS)
291 goto out_fail;
Chuck Leverfc664482014-05-28 10:33:25 -0400292
Chuck Lever85024272015-01-21 11:02:04 -0500293 /* status == SUCCESS means all fields in wc are trustworthy */
Chuck Leverfc664482014-05-28 10:33:25 -0400294 if (wc->opcode != IB_WC_RECV)
295 return;
296
Chuck Lever85024272015-01-21 11:02:04 -0500297 dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n",
298 __func__, rep, wc->byte_len);
299
Chuck Leverfc664482014-05-28 10:33:25 -0400300 rep->rr_len = wc->byte_len;
301 ib_dma_sync_single_for_cpu(rdmab_to_ia(rep->rr_buffer)->ri_id->device,
Chuck Lever6b1184c2015-01-21 11:04:25 -0500302 rdmab_addr(rep->rr_rdmabuf),
303 rep->rr_len, DMA_FROM_DEVICE);
304 prefetch(rdmab_to_msg(rep->rr_rdmabuf));
Chuck Leverfc664482014-05-28 10:33:25 -0400305
306out_schedule:
Chuck Leverbb961932014-07-29 17:25:46 -0400307 list_add_tail(&rep->rr_list, sched_list);
Chuck Lever85024272015-01-21 11:02:04 -0500308 return;
309out_fail:
310 if (wc->status != IB_WC_WR_FLUSH_ERR)
311 pr_err("RPC: %s: rep %p: %s\n",
312 __func__, rep, COMPLETION_MSG(wc->status));
313 rep->rr_len = ~0U;
314 goto out_schedule;
Chuck Leverfc664482014-05-28 10:33:25 -0400315}
316
317static int
Chuck Lever1c00dd02014-05-28 10:33:42 -0400318rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
Chuck Leverfc664482014-05-28 10:33:25 -0400319{
Chuck Leverbb961932014-07-29 17:25:46 -0400320 struct list_head sched_list;
Chuck Lever1c00dd02014-05-28 10:33:42 -0400321 struct ib_wc *wcs;
Chuck Lever8301a2c2014-05-28 10:33:51 -0400322 int budget, count, rc;
Chuck Leverfc664482014-05-28 10:33:25 -0400323
Chuck Leverbb961932014-07-29 17:25:46 -0400324 INIT_LIST_HEAD(&sched_list);
Chuck Lever8301a2c2014-05-28 10:33:51 -0400325 budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
Chuck Lever1c00dd02014-05-28 10:33:42 -0400326 do {
327 wcs = ep->rep_recv_wcs;
328
329 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
330 if (rc <= 0)
Chuck Leverbb961932014-07-29 17:25:46 -0400331 goto out_schedule;
Chuck Lever1c00dd02014-05-28 10:33:42 -0400332
333 count = rc;
334 while (count-- > 0)
Chuck Leverbb961932014-07-29 17:25:46 -0400335 rpcrdma_recvcq_process_wc(wcs++, &sched_list);
Chuck Lever8301a2c2014-05-28 10:33:51 -0400336 } while (rc == RPCRDMA_POLLSIZE && --budget);
Chuck Leverbb961932014-07-29 17:25:46 -0400337 rc = 0;
338
339out_schedule:
Chuck Leverf1a03b72014-11-08 20:14:37 -0500340 rpcrdma_schedule_tasklet(&sched_list);
Chuck Leverbb961932014-07-29 17:25:46 -0400341 return rc;
Chuck Leverfc664482014-05-28 10:33:25 -0400342}
343
344/*
345 * Handle receive completions.
346 *
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400347 * It is reentrant but processes single events in order to maintain
348 * ordering of receives to keep server credits.
349 *
350 * It is the responsibility of the scheduled tasklet to return
351 * recv buffers to the pool. NOTE: this affects synchronization of
352 * connection shutdown. That is, the structures required for
353 * the completion of the reply handler must remain intact until
354 * all memory has been reclaimed.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400355 */
356static void
Chuck Leverfc664482014-05-28 10:33:25 -0400357rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400358{
Chuck Lever1c00dd02014-05-28 10:33:42 -0400359 struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400360 int rc;
361
Chuck Lever1c00dd02014-05-28 10:33:42 -0400362 rc = rpcrdma_recvcq_poll(cq, ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400363 if (rc) {
Chuck Leverfc664482014-05-28 10:33:25 -0400364 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400365 __func__, rc);
366 return;
367 }
368
Chuck Lever7f23f6f2014-05-28 10:33:34 -0400369 rc = ib_req_notify_cq(cq,
370 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
371 if (rc == 0)
372 return;
373 if (rc < 0) {
Chuck Leverfc664482014-05-28 10:33:25 -0400374 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
375 __func__, rc);
376 return;
377 }
378
Chuck Lever1c00dd02014-05-28 10:33:42 -0400379 rpcrdma_recvcq_poll(cq, ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400380}
381
Chuck Levera7bc2112014-07-29 17:23:52 -0400382static void
383rpcrdma_flush_cqs(struct rpcrdma_ep *ep)
384{
Chuck Lever5c166bef2014-11-08 20:14:45 -0500385 struct ib_wc wc;
386 LIST_HEAD(sched_list);
387
388 while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0)
389 rpcrdma_recvcq_process_wc(&wc, &sched_list);
390 if (!list_empty(&sched_list))
391 rpcrdma_schedule_tasklet(&sched_list);
392 while (ib_poll_cq(ep->rep_attr.send_cq, 1, &wc) > 0)
393 rpcrdma_sendcq_process_wc(&wc);
Chuck Levera7bc2112014-07-29 17:23:52 -0400394}
395
Jeff Laytonf895b252014-11-17 16:58:04 -0500396#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400397static const char * const conn[] = {
398 "address resolved",
399 "address error",
400 "route resolved",
401 "route error",
402 "connect request",
403 "connect response",
404 "connect error",
405 "unreachable",
406 "rejected",
407 "established",
408 "disconnected",
Chuck Lever8079fb72014-07-29 17:26:12 -0400409 "device removal",
410 "multicast join",
411 "multicast error",
412 "address change",
413 "timewait exit",
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400414};
Chuck Lever8079fb72014-07-29 17:26:12 -0400415
416#define CONNECTION_MSG(status) \
417 ((status) < ARRAY_SIZE(conn) ? \
418 conn[(status)] : "unrecognized connection error")
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400419#endif
420
421static int
422rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
423{
424 struct rpcrdma_xprt *xprt = id->context;
425 struct rpcrdma_ia *ia = &xprt->rx_ia;
426 struct rpcrdma_ep *ep = &xprt->rx_ep;
Jeff Laytonf895b252014-11-17 16:58:04 -0500427#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
Chuck Lever0dd39ca2015-03-30 14:33:43 -0400428 struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr;
Ingo Molnarff0db042008-11-25 16:58:42 -0800429#endif
Chuck Leverce1ab9a2015-01-21 11:03:35 -0500430 struct ib_qp_attr *attr = &ia->ri_qp_attr;
431 struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400432 int connstate = 0;
433
434 switch (event->event) {
435 case RDMA_CM_EVENT_ADDR_RESOLVED:
436 case RDMA_CM_EVENT_ROUTE_RESOLVED:
Tom Talpey5675add2008-10-09 15:01:41 -0400437 ia->ri_async_rc = 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400438 complete(&ia->ri_done);
439 break;
440 case RDMA_CM_EVENT_ADDR_ERROR:
441 ia->ri_async_rc = -EHOSTUNREACH;
442 dprintk("RPC: %s: CM address resolution error, ep 0x%p\n",
443 __func__, ep);
444 complete(&ia->ri_done);
445 break;
446 case RDMA_CM_EVENT_ROUTE_ERROR:
447 ia->ri_async_rc = -ENETUNREACH;
448 dprintk("RPC: %s: CM route resolution error, ep 0x%p\n",
449 __func__, ep);
450 complete(&ia->ri_done);
451 break;
452 case RDMA_CM_EVENT_ESTABLISHED:
453 connstate = 1;
Chuck Leverce1ab9a2015-01-21 11:03:35 -0500454 ib_query_qp(ia->ri_id->qp, attr,
455 IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
456 iattr);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400457 dprintk("RPC: %s: %d responder resources"
458 " (%d initiator)\n",
Chuck Leverce1ab9a2015-01-21 11:03:35 -0500459 __func__, attr->max_dest_rd_atomic,
460 attr->max_rd_atomic);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400461 goto connected;
462 case RDMA_CM_EVENT_CONNECT_ERROR:
463 connstate = -ENOTCONN;
464 goto connected;
465 case RDMA_CM_EVENT_UNREACHABLE:
466 connstate = -ENETDOWN;
467 goto connected;
468 case RDMA_CM_EVENT_REJECTED:
469 connstate = -ECONNREFUSED;
470 goto connected;
471 case RDMA_CM_EVENT_DISCONNECTED:
472 connstate = -ECONNABORTED;
473 goto connected;
474 case RDMA_CM_EVENT_DEVICE_REMOVAL:
475 connstate = -ENODEV;
476connected:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400477 dprintk("RPC: %s: %sconnected\n",
478 __func__, connstate > 0 ? "" : "dis");
479 ep->rep_connected = connstate;
Chuck Leverafadc462015-01-21 11:03:11 -0500480 rpcrdma_conn_func(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400481 wake_up_all(&ep->rep_connect_wait);
Chuck Lever8079fb72014-07-29 17:26:12 -0400482 /*FALLTHROUGH*/
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400483 default:
Chuck Lever0dd39ca2015-03-30 14:33:43 -0400484 dprintk("RPC: %s: %pIS:%u (ep 0x%p): %s\n",
485 __func__, sap, rpc_get_port(sap), ep,
Chuck Lever8079fb72014-07-29 17:26:12 -0400486 CONNECTION_MSG(event->event));
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400487 break;
488 }
489
Jeff Laytonf895b252014-11-17 16:58:04 -0500490#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400491 if (connstate == 1) {
Chuck Leverce1ab9a2015-01-21 11:03:35 -0500492 int ird = attr->max_dest_rd_atomic;
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400493 int tird = ep->rep_remote_cma.responder_resources;
Chuck Lever0dd39ca2015-03-30 14:33:43 -0400494
495 pr_info("rpcrdma: connection to %pIS:%u on %s, memreg %d slots %d ird %d%s\n",
496 sap, rpc_get_port(sap),
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400497 ia->ri_id->device->name,
498 ia->ri_memreg_strategy,
499 xprt->rx_buf.rb_max_requests,
500 ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
501 } else if (connstate < 0) {
Chuck Lever0dd39ca2015-03-30 14:33:43 -0400502 pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n",
503 sap, rpc_get_port(sap), connstate);
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400504 }
505#endif
506
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400507 return 0;
508}
509
510static struct rdma_cm_id *
511rpcrdma_create_id(struct rpcrdma_xprt *xprt,
512 struct rpcrdma_ia *ia, struct sockaddr *addr)
513{
514 struct rdma_cm_id *id;
515 int rc;
516
Tom Talpey1a954052008-10-09 15:01:31 -0400517 init_completion(&ia->ri_done);
518
Sean Heftyb26f9b92010-04-01 17:08:41 +0000519 id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400520 if (IS_ERR(id)) {
521 rc = PTR_ERR(id);
522 dprintk("RPC: %s: rdma_create_id() failed %i\n",
523 __func__, rc);
524 return id;
525 }
526
Tom Talpey5675add2008-10-09 15:01:41 -0400527 ia->ri_async_rc = -ETIMEDOUT;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400528 rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
529 if (rc) {
530 dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
531 __func__, rc);
532 goto out;
533 }
Tom Talpey5675add2008-10-09 15:01:41 -0400534 wait_for_completion_interruptible_timeout(&ia->ri_done,
535 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400536 rc = ia->ri_async_rc;
537 if (rc)
538 goto out;
539
Tom Talpey5675add2008-10-09 15:01:41 -0400540 ia->ri_async_rc = -ETIMEDOUT;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400541 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
542 if (rc) {
543 dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
544 __func__, rc);
545 goto out;
546 }
Tom Talpey5675add2008-10-09 15:01:41 -0400547 wait_for_completion_interruptible_timeout(&ia->ri_done,
548 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400549 rc = ia->ri_async_rc;
550 if (rc)
551 goto out;
552
553 return id;
554
555out:
556 rdma_destroy_id(id);
557 return ERR_PTR(rc);
558}
559
560/*
561 * Drain any cq, prior to teardown.
562 */
563static void
564rpcrdma_clean_cq(struct ib_cq *cq)
565{
566 struct ib_wc wc;
567 int count = 0;
568
569 while (1 == ib_poll_cq(cq, 1, &wc))
570 ++count;
571
572 if (count)
573 dprintk("RPC: %s: flushed %d events (last 0x%x)\n",
574 __func__, count, wc.opcode);
575}
576
577/*
578 * Exported functions.
579 */
580
581/*
582 * Open and initialize an Interface Adapter.
583 * o initializes fields of struct rpcrdma_ia, including
584 * interface and provider attributes and protection zone.
585 */
586int
587rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
588{
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400589 int rc, mem_priv;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400590 struct rpcrdma_ia *ia = &xprt->rx_ia;
Chuck Lever7bc79722015-01-21 11:03:27 -0500591 struct ib_device_attr *devattr = &ia->ri_devattr;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400592
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400593 ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
594 if (IS_ERR(ia->ri_id)) {
595 rc = PTR_ERR(ia->ri_id);
596 goto out1;
597 }
598
599 ia->ri_pd = ib_alloc_pd(ia->ri_id->device);
600 if (IS_ERR(ia->ri_pd)) {
601 rc = PTR_ERR(ia->ri_pd);
602 dprintk("RPC: %s: ib_alloc_pd() failed %i\n",
603 __func__, rc);
604 goto out2;
605 }
606
Chuck Lever7bc79722015-01-21 11:03:27 -0500607 rc = ib_query_device(ia->ri_id->device, devattr);
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400608 if (rc) {
609 dprintk("RPC: %s: ib_query_device failed %d\n",
610 __func__, rc);
Chuck Lever5ae711a2015-01-21 11:03:19 -0500611 goto out3;
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400612 }
613
Chuck Lever7bc79722015-01-21 11:03:27 -0500614 if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400615 ia->ri_have_dma_lkey = 1;
616 ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey;
617 }
618
Chuck Leverf10eafd2014-05-28 10:32:51 -0400619 if (memreg == RPCRDMA_FRMR) {
Tom Talpey3197d3092008-10-09 15:00:20 -0400620 /* Requires both frmr reg and local dma lkey */
Chuck Lever7bc79722015-01-21 11:03:27 -0500621 if ((devattr->device_cap_flags &
Tom Talpey3197d3092008-10-09 15:00:20 -0400622 (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) !=
623 (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) {
Tom Talpey3197d3092008-10-09 15:00:20 -0400624 dprintk("RPC: %s: FRMR registration "
Chuck Leverf10eafd2014-05-28 10:32:51 -0400625 "not supported by HCA\n", __func__);
626 memreg = RPCRDMA_MTHCAFMR;
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400627 } else {
628 /* Mind the ia limit on FRMR page list depth */
629 ia->ri_max_frmr_depth = min_t(unsigned int,
630 RPCRDMA_MAX_DATA_SEGS,
Chuck Lever7bc79722015-01-21 11:03:27 -0500631 devattr->max_fast_reg_page_list_len);
Tom Talpey3197d3092008-10-09 15:00:20 -0400632 }
Chuck Leverf10eafd2014-05-28 10:32:51 -0400633 }
634 if (memreg == RPCRDMA_MTHCAFMR) {
635 if (!ia->ri_id->device->alloc_fmr) {
636 dprintk("RPC: %s: MTHCAFMR registration "
637 "not supported by HCA\n", __func__);
Chuck Leverf10eafd2014-05-28 10:32:51 -0400638 memreg = RPCRDMA_ALLPHYSICAL;
Chuck Leverf10eafd2014-05-28 10:32:51 -0400639 }
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400640 }
641
642 /*
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400643 * Optionally obtain an underlying physical identity mapping in
644 * order to do a memory window-based bind. This base registration
645 * is protected from remote access - that is enabled only by binding
646 * for the specific bytes targeted during each RPC operation, and
647 * revoked after the corresponding completion similar to a storage
648 * adapter.
649 */
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400650 switch (memreg) {
Tom Talpey3197d3092008-10-09 15:00:20 -0400651 case RPCRDMA_FRMR:
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400652 break;
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400653 case RPCRDMA_ALLPHYSICAL:
654 mem_priv = IB_ACCESS_LOCAL_WRITE |
655 IB_ACCESS_REMOTE_WRITE |
656 IB_ACCESS_REMOTE_READ;
657 goto register_setup;
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400658 case RPCRDMA_MTHCAFMR:
659 if (ia->ri_have_dma_lkey)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400660 break;
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400661 mem_priv = IB_ACCESS_LOCAL_WRITE;
662 register_setup:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400663 ia->ri_bind_mem = ib_get_dma_mr(ia->ri_pd, mem_priv);
664 if (IS_ERR(ia->ri_bind_mem)) {
665 printk(KERN_ALERT "%s: ib_get_dma_mr for "
Chuck Lever0ac531c2014-05-28 10:32:43 -0400666 "phys register failed with %lX\n",
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400667 __func__, PTR_ERR(ia->ri_bind_mem));
Chuck Lever0ac531c2014-05-28 10:32:43 -0400668 rc = -ENOMEM;
Chuck Lever5ae711a2015-01-21 11:03:19 -0500669 goto out3;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400670 }
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400671 break;
672 default:
Chuck Levercdd9ade2014-05-28 10:33:00 -0400673 printk(KERN_ERR "RPC: Unsupported memory "
674 "registration mode: %d\n", memreg);
675 rc = -ENOMEM;
Chuck Lever5ae711a2015-01-21 11:03:19 -0500676 goto out3;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400677 }
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400678 dprintk("RPC: %s: memory registration strategy is %d\n",
679 __func__, memreg);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400680
681 /* Else will do memory reg/dereg for each chunk */
682 ia->ri_memreg_strategy = memreg;
683
Chuck Lever73806c82014-07-29 17:23:25 -0400684 rwlock_init(&ia->ri_qplock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400685 return 0;
Chuck Lever5ae711a2015-01-21 11:03:19 -0500686
687out3:
688 ib_dealloc_pd(ia->ri_pd);
689 ia->ri_pd = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400690out2:
691 rdma_destroy_id(ia->ri_id);
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400692 ia->ri_id = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400693out1:
694 return rc;
695}
696
697/*
698 * Clean up/close an IA.
699 * o if event handles and PD have been initialized, free them.
700 * o close the IA
701 */
702void
703rpcrdma_ia_close(struct rpcrdma_ia *ia)
704{
705 int rc;
706
707 dprintk("RPC: %s: entering\n", __func__);
708 if (ia->ri_bind_mem != NULL) {
709 rc = ib_dereg_mr(ia->ri_bind_mem);
710 dprintk("RPC: %s: ib_dereg_mr returned %i\n",
711 __func__, rc);
712 }
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400713 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
714 if (ia->ri_id->qp)
715 rdma_destroy_qp(ia->ri_id);
716 rdma_destroy_id(ia->ri_id);
717 ia->ri_id = NULL;
718 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400719 if (ia->ri_pd != NULL && !IS_ERR(ia->ri_pd)) {
720 rc = ib_dealloc_pd(ia->ri_pd);
721 dprintk("RPC: %s: ib_dealloc_pd returned %i\n",
722 __func__, rc);
723 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400724}
725
726/*
727 * Create unconnected endpoint.
728 */
729int
730rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
731 struct rpcrdma_create_data_internal *cdata)
732{
Chuck Lever7bc79722015-01-21 11:03:27 -0500733 struct ib_device_attr *devattr = &ia->ri_devattr;
Chuck Leverfc664482014-05-28 10:33:25 -0400734 struct ib_cq *sendcq, *recvcq;
Chuck Lever5d40a8a2007-10-26 13:30:54 -0400735 int rc, err;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400736
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400737 /* check provider's send/recv wr limits */
Chuck Lever7bc79722015-01-21 11:03:27 -0500738 if (cdata->max_requests > devattr->max_qp_wr)
739 cdata->max_requests = devattr->max_qp_wr;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400740
741 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
742 ep->rep_attr.qp_context = ep;
743 /* send_cq and recv_cq initialized below */
744 ep->rep_attr.srq = NULL;
745 ep->rep_attr.cap.max_send_wr = cdata->max_requests;
746 switch (ia->ri_memreg_strategy) {
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400747 case RPCRDMA_FRMR: {
748 int depth = 7;
749
Tom Tucker15cdc6442010-08-11 12:47:24 -0400750 /* Add room for frmr register and invalidate WRs.
751 * 1. FRMR reg WR for head
752 * 2. FRMR invalidate WR for head
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400753 * 3. N FRMR reg WRs for pagelist
754 * 4. N FRMR invalidate WRs for pagelist
Tom Tucker15cdc6442010-08-11 12:47:24 -0400755 * 5. FRMR reg WR for tail
756 * 6. FRMR invalidate WR for tail
757 * 7. The RDMA_SEND WR
758 */
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400759
760 /* Calculate N if the device max FRMR depth is smaller than
761 * RPCRDMA_MAX_DATA_SEGS.
762 */
763 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
764 int delta = RPCRDMA_MAX_DATA_SEGS -
765 ia->ri_max_frmr_depth;
766
767 do {
768 depth += 2; /* FRMR reg + invalidate */
769 delta -= ia->ri_max_frmr_depth;
770 } while (delta > 0);
771
772 }
773 ep->rep_attr.cap.max_send_wr *= depth;
Chuck Lever7bc79722015-01-21 11:03:27 -0500774 if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) {
775 cdata->max_requests = devattr->max_qp_wr / depth;
Tom Tucker15cdc6442010-08-11 12:47:24 -0400776 if (!cdata->max_requests)
777 return -EINVAL;
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400778 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
779 depth;
Tom Tucker15cdc6442010-08-11 12:47:24 -0400780 }
Tom Talpey3197d3092008-10-09 15:00:20 -0400781 break;
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400782 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400783 default:
784 break;
785 }
786 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
787 ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2);
788 ep->rep_attr.cap.max_recv_sge = 1;
789 ep->rep_attr.cap.max_inline_data = 0;
790 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
791 ep->rep_attr.qp_type = IB_QPT_RC;
792 ep->rep_attr.port_num = ~0;
793
Chuck Leverc05fbb52015-01-21 11:04:33 -0500794 if (cdata->padding) {
795 ep->rep_padbuf = rpcrdma_alloc_regbuf(ia, cdata->padding,
796 GFP_KERNEL);
797 if (IS_ERR(ep->rep_padbuf))
798 return PTR_ERR(ep->rep_padbuf);
799 } else
800 ep->rep_padbuf = NULL;
801
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400802 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
803 "iovs: send %d recv %d\n",
804 __func__,
805 ep->rep_attr.cap.max_send_wr,
806 ep->rep_attr.cap.max_recv_wr,
807 ep->rep_attr.cap.max_send_sge,
808 ep->rep_attr.cap.max_recv_sge);
809
810 /* set trigger for requesting send completion */
Chuck Leverfc664482014-05-28 10:33:25 -0400811 ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
Chuck Levere7104a22014-11-08 20:14:20 -0500812 if (ep->rep_cqinit > RPCRDMA_MAX_UNSIGNALED_SENDS)
813 ep->rep_cqinit = RPCRDMA_MAX_UNSIGNALED_SENDS;
814 else if (ep->rep_cqinit <= 2)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400815 ep->rep_cqinit = 0;
816 INIT_CQCOUNT(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400817 init_waitqueue_head(&ep->rep_connect_wait);
Chuck Lever254f91e2014-05-28 10:32:17 -0400818 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400819
Chuck Leverfc664482014-05-28 10:33:25 -0400820 sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall,
Chuck Lever1c00dd02014-05-28 10:33:42 -0400821 rpcrdma_cq_async_error_upcall, ep,
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400822 ep->rep_attr.cap.max_send_wr + 1, 0);
Chuck Leverfc664482014-05-28 10:33:25 -0400823 if (IS_ERR(sendcq)) {
824 rc = PTR_ERR(sendcq);
825 dprintk("RPC: %s: failed to create send CQ: %i\n",
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400826 __func__, rc);
827 goto out1;
828 }
829
Chuck Leverfc664482014-05-28 10:33:25 -0400830 rc = ib_req_notify_cq(sendcq, IB_CQ_NEXT_COMP);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400831 if (rc) {
832 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
833 __func__, rc);
834 goto out2;
835 }
836
Chuck Leverfc664482014-05-28 10:33:25 -0400837 recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall,
Chuck Lever1c00dd02014-05-28 10:33:42 -0400838 rpcrdma_cq_async_error_upcall, ep,
Chuck Leverfc664482014-05-28 10:33:25 -0400839 ep->rep_attr.cap.max_recv_wr + 1, 0);
840 if (IS_ERR(recvcq)) {
841 rc = PTR_ERR(recvcq);
842 dprintk("RPC: %s: failed to create recv CQ: %i\n",
843 __func__, rc);
844 goto out2;
845 }
846
847 rc = ib_req_notify_cq(recvcq, IB_CQ_NEXT_COMP);
848 if (rc) {
849 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
850 __func__, rc);
851 ib_destroy_cq(recvcq);
852 goto out2;
853 }
854
855 ep->rep_attr.send_cq = sendcq;
856 ep->rep_attr.recv_cq = recvcq;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400857
858 /* Initialize cma parameters */
859
860 /* RPC/RDMA does not use private data */
861 ep->rep_remote_cma.private_data = NULL;
862 ep->rep_remote_cma.private_data_len = 0;
863
864 /* Client offers RDMA Read but does not initiate */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400865 ep->rep_remote_cma.initiator_depth = 0;
Chuck Lever7bc79722015-01-21 11:03:27 -0500866 if (devattr->max_qp_rd_atom > 32) /* arbitrary but <= 255 */
Tom Tuckerb334eaa2008-10-09 15:00:30 -0400867 ep->rep_remote_cma.responder_resources = 32;
868 else
Chuck Lever7bc79722015-01-21 11:03:27 -0500869 ep->rep_remote_cma.responder_resources =
870 devattr->max_qp_rd_atom;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400871
872 ep->rep_remote_cma.retry_count = 7;
873 ep->rep_remote_cma.flow_control = 0;
874 ep->rep_remote_cma.rnr_retry_count = 0;
875
876 return 0;
877
878out2:
Chuck Leverfc664482014-05-28 10:33:25 -0400879 err = ib_destroy_cq(sendcq);
Chuck Lever5d40a8a2007-10-26 13:30:54 -0400880 if (err)
881 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
882 __func__, err);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400883out1:
Chuck Leverc05fbb52015-01-21 11:04:33 -0500884 rpcrdma_free_regbuf(ia, ep->rep_padbuf);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400885 return rc;
886}
887
888/*
889 * rpcrdma_ep_destroy
890 *
891 * Disconnect and destroy endpoint. After this, the only
892 * valid operations on the ep are to free it (if dynamically
893 * allocated) or re-create it.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400894 */
Chuck Lever7f1d5412014-05-28 10:33:16 -0400895void
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400896rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
897{
898 int rc;
899
900 dprintk("RPC: %s: entering, connected is %d\n",
901 __func__, ep->rep_connected);
902
Chuck Lever254f91e2014-05-28 10:32:17 -0400903 cancel_delayed_work_sync(&ep->rep_connect_worker);
904
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400905 if (ia->ri_id->qp) {
Chuck Lever282191c2014-07-29 17:25:55 -0400906 rpcrdma_ep_disconnect(ep, ia);
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400907 rdma_destroy_qp(ia->ri_id);
908 ia->ri_id->qp = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400909 }
910
Chuck Leverc05fbb52015-01-21 11:04:33 -0500911 rpcrdma_free_regbuf(ia, ep->rep_padbuf);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400912
Chuck Leverfc664482014-05-28 10:33:25 -0400913 rpcrdma_clean_cq(ep->rep_attr.recv_cq);
914 rc = ib_destroy_cq(ep->rep_attr.recv_cq);
915 if (rc)
916 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
917 __func__, rc);
918
919 rpcrdma_clean_cq(ep->rep_attr.send_cq);
920 rc = ib_destroy_cq(ep->rep_attr.send_cq);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400921 if (rc)
922 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
923 __func__, rc);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400924}
925
926/*
927 * Connect unconnected endpoint.
928 */
929int
930rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
931{
Chuck Lever73806c82014-07-29 17:23:25 -0400932 struct rdma_cm_id *id, *old;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400933 int rc = 0;
934 int retry_count = 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400935
Tom Talpeyc0555512008-10-10 11:32:45 -0400936 if (ep->rep_connected != 0) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400937 struct rpcrdma_xprt *xprt;
938retry:
Chuck Leverec62f402014-05-28 10:34:07 -0400939 dprintk("RPC: %s: reconnecting...\n", __func__);
Chuck Lever282191c2014-07-29 17:25:55 -0400940
941 rpcrdma_ep_disconnect(ep, ia);
Chuck Levera7bc2112014-07-29 17:23:52 -0400942 rpcrdma_flush_cqs(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400943
Chuck Lever467c9672014-11-08 20:14:29 -0500944 switch (ia->ri_memreg_strategy) {
945 case RPCRDMA_FRMR:
Chuck Lever9f9d8022014-07-29 17:24:45 -0400946 rpcrdma_reset_frmrs(ia);
Chuck Lever467c9672014-11-08 20:14:29 -0500947 break;
948 case RPCRDMA_MTHCAFMR:
949 rpcrdma_reset_fmrs(ia);
950 break;
951 case RPCRDMA_ALLPHYSICAL:
952 break;
953 default:
954 rc = -EIO;
955 goto out;
956 }
Chuck Lever9f9d8022014-07-29 17:24:45 -0400957
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400958 xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
959 id = rpcrdma_create_id(xprt, ia,
960 (struct sockaddr *)&xprt->rx_data.addr);
961 if (IS_ERR(id)) {
Chuck Leverec62f402014-05-28 10:34:07 -0400962 rc = -EHOSTUNREACH;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400963 goto out;
964 }
965 /* TEMP TEMP TEMP - fail if new device:
966 * Deregister/remarshal *all* requests!
967 * Close and recreate adapter, pd, etc!
968 * Re-determine all attributes still sane!
969 * More stuff I haven't thought of!
970 * Rrrgh!
971 */
972 if (ia->ri_id->device != id->device) {
973 printk("RPC: %s: can't reconnect on "
974 "different device!\n", __func__);
975 rdma_destroy_id(id);
Chuck Leverec62f402014-05-28 10:34:07 -0400976 rc = -ENETUNREACH;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400977 goto out;
978 }
979 /* END TEMP */
Chuck Leverec62f402014-05-28 10:34:07 -0400980 rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
981 if (rc) {
982 dprintk("RPC: %s: rdma_create_qp failed %i\n",
983 __func__, rc);
984 rdma_destroy_id(id);
985 rc = -ENETUNREACH;
986 goto out;
987 }
Chuck Lever73806c82014-07-29 17:23:25 -0400988
989 write_lock(&ia->ri_qplock);
990 old = ia->ri_id;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400991 ia->ri_id = id;
Chuck Lever73806c82014-07-29 17:23:25 -0400992 write_unlock(&ia->ri_qplock);
993
994 rdma_destroy_qp(old);
995 rdma_destroy_id(old);
Chuck Leverec62f402014-05-28 10:34:07 -0400996 } else {
997 dprintk("RPC: %s: connecting...\n", __func__);
998 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
999 if (rc) {
1000 dprintk("RPC: %s: rdma_create_qp failed %i\n",
1001 __func__, rc);
1002 /* do not update ep->rep_connected */
1003 return -ENETUNREACH;
1004 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001005 }
1006
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001007 ep->rep_connected = 0;
1008
1009 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
1010 if (rc) {
1011 dprintk("RPC: %s: rdma_connect() failed with %i\n",
1012 __func__, rc);
1013 goto out;
1014 }
1015
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001016 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
1017
1018 /*
1019 * Check state. A non-peer reject indicates no listener
1020 * (ECONNREFUSED), which may be a transient state. All
1021 * others indicate a transport condition which has already
1022 * undergone a best-effort.
1023 */
Joe Perchesf64f9e72009-11-29 16:55:45 -08001024 if (ep->rep_connected == -ECONNREFUSED &&
1025 ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001026 dprintk("RPC: %s: non-peer_reject, retry\n", __func__);
1027 goto retry;
1028 }
1029 if (ep->rep_connected <= 0) {
1030 /* Sometimes, the only way to reliably connect to remote
1031 * CMs is to use same nonzero values for ORD and IRD. */
Tom Tuckerb334eaa2008-10-09 15:00:30 -04001032 if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 &&
1033 (ep->rep_remote_cma.responder_resources == 0 ||
1034 ep->rep_remote_cma.initiator_depth !=
1035 ep->rep_remote_cma.responder_resources)) {
1036 if (ep->rep_remote_cma.responder_resources == 0)
1037 ep->rep_remote_cma.responder_resources = 1;
1038 ep->rep_remote_cma.initiator_depth =
1039 ep->rep_remote_cma.responder_resources;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001040 goto retry;
Tom Tuckerb334eaa2008-10-09 15:00:30 -04001041 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001042 rc = ep->rep_connected;
1043 } else {
1044 dprintk("RPC: %s: connected\n", __func__);
1045 }
1046
1047out:
1048 if (rc)
1049 ep->rep_connected = rc;
1050 return rc;
1051}
1052
1053/*
1054 * rpcrdma_ep_disconnect
1055 *
1056 * This is separate from destroy to facilitate the ability
1057 * to reconnect without recreating the endpoint.
1058 *
1059 * This call is not reentrant, and must not be made in parallel
1060 * on the same endpoint.
1061 */
Chuck Lever282191c2014-07-29 17:25:55 -04001062void
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001063rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
1064{
1065 int rc;
1066
Chuck Levera7bc2112014-07-29 17:23:52 -04001067 rpcrdma_flush_cqs(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001068 rc = rdma_disconnect(ia->ri_id);
1069 if (!rc) {
1070 /* returns without wait if not connected */
1071 wait_event_interruptible(ep->rep_connect_wait,
1072 ep->rep_connected != 1);
1073 dprintk("RPC: %s: after wait, %sconnected\n", __func__,
1074 (ep->rep_connected == 1) ? "still " : "dis");
1075 } else {
1076 dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc);
1077 ep->rep_connected = rc;
1078 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001079}
1080
Chuck Lever13924022015-01-21 11:03:52 -05001081static struct rpcrdma_req *
1082rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
1083{
Chuck Lever13924022015-01-21 11:03:52 -05001084 struct rpcrdma_req *req;
Chuck Lever13924022015-01-21 11:03:52 -05001085
Chuck Lever85275c82015-01-21 11:04:16 -05001086 req = kzalloc(sizeof(*req), GFP_KERNEL);
Chuck Lever13924022015-01-21 11:03:52 -05001087 if (req == NULL)
Chuck Lever85275c82015-01-21 11:04:16 -05001088 return ERR_PTR(-ENOMEM);
Chuck Lever13924022015-01-21 11:03:52 -05001089
Chuck Lever13924022015-01-21 11:03:52 -05001090 req->rl_buffer = &r_xprt->rx_buf;
1091 return req;
Chuck Lever13924022015-01-21 11:03:52 -05001092}
1093
1094static struct rpcrdma_rep *
1095rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
1096{
1097 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
Chuck Lever13924022015-01-21 11:03:52 -05001098 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1099 struct rpcrdma_rep *rep;
1100 int rc;
1101
1102 rc = -ENOMEM;
Chuck Lever6b1184c2015-01-21 11:04:25 -05001103 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
Chuck Lever13924022015-01-21 11:03:52 -05001104 if (rep == NULL)
1105 goto out;
Chuck Lever13924022015-01-21 11:03:52 -05001106
Chuck Lever6b1184c2015-01-21 11:04:25 -05001107 rep->rr_rdmabuf = rpcrdma_alloc_regbuf(ia, cdata->inline_rsize,
1108 GFP_KERNEL);
1109 if (IS_ERR(rep->rr_rdmabuf)) {
1110 rc = PTR_ERR(rep->rr_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -05001111 goto out_free;
Chuck Lever6b1184c2015-01-21 11:04:25 -05001112 }
Chuck Lever13924022015-01-21 11:03:52 -05001113
1114 rep->rr_buffer = &r_xprt->rx_buf;
1115 return rep;
1116
1117out_free:
1118 kfree(rep);
1119out:
1120 return ERR_PTR(rc);
1121}
1122
Chuck Lever2e845222014-07-29 17:25:38 -04001123static int
1124rpcrdma_init_fmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf)
1125{
1126 int mr_access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ;
1127 struct ib_fmr_attr fmr_attr = {
1128 .max_pages = RPCRDMA_MAX_DATA_SEGS,
1129 .max_maps = 1,
1130 .page_shift = PAGE_SHIFT
1131 };
1132 struct rpcrdma_mw *r;
1133 int i, rc;
1134
1135 i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS;
1136 dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i);
1137
1138 while (i--) {
1139 r = kzalloc(sizeof(*r), GFP_KERNEL);
1140 if (r == NULL)
1141 return -ENOMEM;
1142
1143 r->r.fmr = ib_alloc_fmr(ia->ri_pd, mr_access_flags, &fmr_attr);
1144 if (IS_ERR(r->r.fmr)) {
1145 rc = PTR_ERR(r->r.fmr);
1146 dprintk("RPC: %s: ib_alloc_fmr failed %i\n",
1147 __func__, rc);
1148 goto out_free;
1149 }
1150
1151 list_add(&r->mw_list, &buf->rb_mws);
1152 list_add(&r->mw_all, &buf->rb_all);
1153 }
1154 return 0;
1155
1156out_free:
1157 kfree(r);
1158 return rc;
1159}
1160
1161static int
1162rpcrdma_init_frmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf)
1163{
1164 struct rpcrdma_frmr *f;
1165 struct rpcrdma_mw *r;
1166 int i, rc;
1167
1168 i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS;
1169 dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i);
1170
1171 while (i--) {
1172 r = kzalloc(sizeof(*r), GFP_KERNEL);
1173 if (r == NULL)
1174 return -ENOMEM;
1175 f = &r->r.frmr;
1176
1177 f->fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd,
1178 ia->ri_max_frmr_depth);
1179 if (IS_ERR(f->fr_mr)) {
1180 rc = PTR_ERR(f->fr_mr);
1181 dprintk("RPC: %s: ib_alloc_fast_reg_mr "
1182 "failed %i\n", __func__, rc);
1183 goto out_free;
1184 }
1185
1186 f->fr_pgl = ib_alloc_fast_reg_page_list(ia->ri_id->device,
1187 ia->ri_max_frmr_depth);
1188 if (IS_ERR(f->fr_pgl)) {
1189 rc = PTR_ERR(f->fr_pgl);
1190 dprintk("RPC: %s: ib_alloc_fast_reg_page_list "
1191 "failed %i\n", __func__, rc);
1192
1193 ib_dereg_mr(f->fr_mr);
1194 goto out_free;
1195 }
1196
1197 list_add(&r->mw_list, &buf->rb_mws);
1198 list_add(&r->mw_all, &buf->rb_all);
1199 }
1200
1201 return 0;
1202
1203out_free:
1204 kfree(r);
1205 return rc;
1206}
1207
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001208int
Chuck Leverac920d02015-01-21 11:03:44 -05001209rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001210{
Chuck Leverac920d02015-01-21 11:03:44 -05001211 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1212 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1213 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001214 char *p;
Chuck Lever13924022015-01-21 11:03:52 -05001215 size_t len;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001216 int i, rc;
1217
1218 buf->rb_max_requests = cdata->max_requests;
1219 spin_lock_init(&buf->rb_lock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001220
1221 /* Need to allocate:
1222 * 1. arrays for send and recv pointers
1223 * 2. arrays of struct rpcrdma_req to fill in pointers
1224 * 3. array of struct rpcrdma_rep for replies
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001225 * Send/recv buffers in req/rep need to be registered
1226 */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001227 len = buf->rb_max_requests *
1228 (sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *));
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001229
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001230 p = kzalloc(len, GFP_KERNEL);
1231 if (p == NULL) {
1232 dprintk("RPC: %s: req_t/rep_t/pad kzalloc(%zd) failed\n",
1233 __func__, len);
1234 rc = -ENOMEM;
1235 goto out;
1236 }
1237 buf->rb_pool = p; /* for freeing it later */
1238
1239 buf->rb_send_bufs = (struct rpcrdma_req **) p;
1240 p = (char *) &buf->rb_send_bufs[buf->rb_max_requests];
1241 buf->rb_recv_bufs = (struct rpcrdma_rep **) p;
1242 p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests];
1243
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001244 INIT_LIST_HEAD(&buf->rb_mws);
Chuck Lever3111d722014-07-29 17:24:28 -04001245 INIT_LIST_HEAD(&buf->rb_all);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001246 switch (ia->ri_memreg_strategy) {
Tom Talpey3197d3092008-10-09 15:00:20 -04001247 case RPCRDMA_FRMR:
Chuck Lever2e845222014-07-29 17:25:38 -04001248 rc = rpcrdma_init_frmrs(ia, buf);
1249 if (rc)
1250 goto out;
Tom Talpey3197d3092008-10-09 15:00:20 -04001251 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001252 case RPCRDMA_MTHCAFMR:
Chuck Lever2e845222014-07-29 17:25:38 -04001253 rc = rpcrdma_init_fmrs(ia, buf);
1254 if (rc)
1255 goto out;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001256 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001257 default:
1258 break;
1259 }
1260
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001261 for (i = 0; i < buf->rb_max_requests; i++) {
1262 struct rpcrdma_req *req;
1263 struct rpcrdma_rep *rep;
1264
Chuck Lever13924022015-01-21 11:03:52 -05001265 req = rpcrdma_create_req(r_xprt);
1266 if (IS_ERR(req)) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001267 dprintk("RPC: %s: request buffer %d alloc"
1268 " failed\n", __func__, i);
Chuck Lever13924022015-01-21 11:03:52 -05001269 rc = PTR_ERR(req);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001270 goto out;
1271 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001272 buf->rb_send_bufs[i] = req;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001273
Chuck Lever13924022015-01-21 11:03:52 -05001274 rep = rpcrdma_create_rep(r_xprt);
1275 if (IS_ERR(rep)) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001276 dprintk("RPC: %s: reply buffer %d alloc failed\n",
1277 __func__, i);
Chuck Lever13924022015-01-21 11:03:52 -05001278 rc = PTR_ERR(rep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001279 goto out;
1280 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001281 buf->rb_recv_bufs[i] = rep;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001282 }
Chuck Lever13924022015-01-21 11:03:52 -05001283
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001284 return 0;
1285out:
1286 rpcrdma_buffer_destroy(buf);
1287 return rc;
1288}
1289
Chuck Lever2e845222014-07-29 17:25:38 -04001290static void
Chuck Lever13924022015-01-21 11:03:52 -05001291rpcrdma_destroy_rep(struct rpcrdma_ia *ia, struct rpcrdma_rep *rep)
1292{
1293 if (!rep)
1294 return;
1295
Chuck Lever6b1184c2015-01-21 11:04:25 -05001296 rpcrdma_free_regbuf(ia, rep->rr_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -05001297 kfree(rep);
1298}
1299
1300static void
1301rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
1302{
1303 if (!req)
1304 return;
1305
Chuck Lever0ca77dc2015-01-21 11:04:08 -05001306 rpcrdma_free_regbuf(ia, req->rl_sendbuf);
Chuck Lever85275c82015-01-21 11:04:16 -05001307 rpcrdma_free_regbuf(ia, req->rl_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -05001308 kfree(req);
1309}
1310
1311static void
Chuck Lever2e845222014-07-29 17:25:38 -04001312rpcrdma_destroy_fmrs(struct rpcrdma_buffer *buf)
1313{
1314 struct rpcrdma_mw *r;
1315 int rc;
1316
1317 while (!list_empty(&buf->rb_all)) {
1318 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
1319 list_del(&r->mw_all);
1320 list_del(&r->mw_list);
1321
1322 rc = ib_dealloc_fmr(r->r.fmr);
1323 if (rc)
1324 dprintk("RPC: %s: ib_dealloc_fmr failed %i\n",
1325 __func__, rc);
1326
1327 kfree(r);
1328 }
1329}
1330
1331static void
1332rpcrdma_destroy_frmrs(struct rpcrdma_buffer *buf)
1333{
1334 struct rpcrdma_mw *r;
1335 int rc;
1336
1337 while (!list_empty(&buf->rb_all)) {
1338 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
1339 list_del(&r->mw_all);
1340 list_del(&r->mw_list);
1341
1342 rc = ib_dereg_mr(r->r.frmr.fr_mr);
1343 if (rc)
1344 dprintk("RPC: %s: ib_dereg_mr failed %i\n",
1345 __func__, rc);
1346 ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
1347
1348 kfree(r);
1349 }
1350}
1351
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001352void
1353rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1354{
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001355 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
Chuck Lever2e845222014-07-29 17:25:38 -04001356 int i;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001357
1358 /* clean up in reverse order from create
1359 * 1. recv mr memory (mr free, then kfree)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001360 * 2. send mr memory (mr free, then kfree)
Chuck Lever2e845222014-07-29 17:25:38 -04001361 * 3. MWs
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001362 */
1363 dprintk("RPC: %s: entering\n", __func__);
1364
1365 for (i = 0; i < buf->rb_max_requests; i++) {
Chuck Lever13924022015-01-21 11:03:52 -05001366 if (buf->rb_recv_bufs)
1367 rpcrdma_destroy_rep(ia, buf->rb_recv_bufs[i]);
1368 if (buf->rb_send_bufs)
1369 rpcrdma_destroy_req(ia, buf->rb_send_bufs[i]);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001370 }
1371
Chuck Lever2e845222014-07-29 17:25:38 -04001372 switch (ia->ri_memreg_strategy) {
1373 case RPCRDMA_FRMR:
1374 rpcrdma_destroy_frmrs(buf);
1375 break;
1376 case RPCRDMA_MTHCAFMR:
1377 rpcrdma_destroy_fmrs(buf);
1378 break;
1379 default:
1380 break;
Allen Andrews4034ba02014-05-28 10:32:09 -04001381 }
1382
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001383 kfree(buf->rb_pool);
1384}
1385
Chuck Lever467c9672014-11-08 20:14:29 -05001386/* After a disconnect, unmap all FMRs.
1387 *
1388 * This is invoked only in the transport connect worker in order
1389 * to serialize with rpcrdma_register_fmr_external().
1390 */
1391static void
1392rpcrdma_reset_fmrs(struct rpcrdma_ia *ia)
1393{
1394 struct rpcrdma_xprt *r_xprt =
1395 container_of(ia, struct rpcrdma_xprt, rx_ia);
1396 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1397 struct list_head *pos;
1398 struct rpcrdma_mw *r;
1399 LIST_HEAD(l);
1400 int rc;
1401
1402 list_for_each(pos, &buf->rb_all) {
1403 r = list_entry(pos, struct rpcrdma_mw, mw_all);
1404
1405 INIT_LIST_HEAD(&l);
1406 list_add(&r->r.fmr->list, &l);
1407 rc = ib_unmap_fmr(&l);
1408 if (rc)
1409 dprintk("RPC: %s: ib_unmap_fmr failed %i\n",
1410 __func__, rc);
1411 }
1412}
1413
Chuck Lever9f9d8022014-07-29 17:24:45 -04001414/* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in
1415 * an unusable state. Find FRMRs in this state and dereg / reg
1416 * each. FRMRs that are VALID and attached to an rpcrdma_req are
1417 * also torn down.
1418 *
1419 * This gives all in-use FRMRs a fresh rkey and leaves them INVALID.
1420 *
1421 * This is invoked only in the transport connect worker in order
1422 * to serialize with rpcrdma_register_frmr_external().
1423 */
1424static void
1425rpcrdma_reset_frmrs(struct rpcrdma_ia *ia)
1426{
1427 struct rpcrdma_xprt *r_xprt =
1428 container_of(ia, struct rpcrdma_xprt, rx_ia);
1429 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1430 struct list_head *pos;
1431 struct rpcrdma_mw *r;
1432 int rc;
1433
1434 list_for_each(pos, &buf->rb_all) {
1435 r = list_entry(pos, struct rpcrdma_mw, mw_all);
1436
1437 if (r->r.frmr.fr_state == FRMR_IS_INVALID)
1438 continue;
1439
1440 rc = ib_dereg_mr(r->r.frmr.fr_mr);
1441 if (rc)
1442 dprintk("RPC: %s: ib_dereg_mr failed %i\n",
1443 __func__, rc);
1444 ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
1445
1446 r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd,
1447 ia->ri_max_frmr_depth);
1448 if (IS_ERR(r->r.frmr.fr_mr)) {
1449 rc = PTR_ERR(r->r.frmr.fr_mr);
1450 dprintk("RPC: %s: ib_alloc_fast_reg_mr"
1451 " failed %i\n", __func__, rc);
1452 continue;
1453 }
1454 r->r.frmr.fr_pgl = ib_alloc_fast_reg_page_list(
1455 ia->ri_id->device,
1456 ia->ri_max_frmr_depth);
1457 if (IS_ERR(r->r.frmr.fr_pgl)) {
1458 rc = PTR_ERR(r->r.frmr.fr_pgl);
1459 dprintk("RPC: %s: "
1460 "ib_alloc_fast_reg_page_list "
1461 "failed %i\n", __func__, rc);
1462
1463 ib_dereg_mr(r->r.frmr.fr_mr);
1464 continue;
1465 }
1466 r->r.frmr.fr_state = FRMR_IS_INVALID;
1467 }
1468}
1469
Chuck Leverc2922c02014-07-29 17:24:36 -04001470/* "*mw" can be NULL when rpcrdma_buffer_get_mrs() fails, leaving
1471 * some req segments uninitialized.
1472 */
1473static void
1474rpcrdma_buffer_put_mr(struct rpcrdma_mw **mw, struct rpcrdma_buffer *buf)
1475{
1476 if (*mw) {
1477 list_add_tail(&(*mw)->mw_list, &buf->rb_mws);
1478 *mw = NULL;
1479 }
1480}
1481
1482/* Cycle mw's back in reverse order, and "spin" them.
1483 * This delays and scrambles reuse as much as possible.
1484 */
1485static void
1486rpcrdma_buffer_put_mrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1487{
1488 struct rpcrdma_mr_seg *seg = req->rl_segments;
1489 struct rpcrdma_mr_seg *seg1 = seg;
1490 int i;
1491
1492 for (i = 1, seg++; i < RPCRDMA_MAX_SEGS; seg++, i++)
Chuck Lever3eb35812015-01-21 11:02:54 -05001493 rpcrdma_buffer_put_mr(&seg->rl_mw, buf);
1494 rpcrdma_buffer_put_mr(&seg1->rl_mw, buf);
Chuck Leverc2922c02014-07-29 17:24:36 -04001495}
1496
1497static void
1498rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1499{
1500 buf->rb_send_bufs[--buf->rb_send_index] = req;
1501 req->rl_niovs = 0;
1502 if (req->rl_reply) {
1503 buf->rb_recv_bufs[--buf->rb_recv_index] = req->rl_reply;
1504 req->rl_reply->rr_func = NULL;
1505 req->rl_reply = NULL;
1506 }
1507}
1508
Chuck Leverddb6beb2014-07-29 17:24:54 -04001509/* rpcrdma_unmap_one() was already done by rpcrdma_deregister_frmr_external().
1510 * Redo only the ib_post_send().
1511 */
1512static void
1513rpcrdma_retry_local_inv(struct rpcrdma_mw *r, struct rpcrdma_ia *ia)
1514{
1515 struct rpcrdma_xprt *r_xprt =
1516 container_of(ia, struct rpcrdma_xprt, rx_ia);
1517 struct ib_send_wr invalidate_wr, *bad_wr;
1518 int rc;
1519
1520 dprintk("RPC: %s: FRMR %p is stale\n", __func__, r);
1521
1522 /* When this FRMR is re-inserted into rb_mws, it is no longer stale */
Chuck Leverdab7e3b2014-07-29 17:25:20 -04001523 r->r.frmr.fr_state = FRMR_IS_INVALID;
Chuck Leverddb6beb2014-07-29 17:24:54 -04001524
1525 memset(&invalidate_wr, 0, sizeof(invalidate_wr));
1526 invalidate_wr.wr_id = (unsigned long)(void *)r;
1527 invalidate_wr.opcode = IB_WR_LOCAL_INV;
Chuck Leverddb6beb2014-07-29 17:24:54 -04001528 invalidate_wr.ex.invalidate_rkey = r->r.frmr.fr_mr->rkey;
1529 DECR_CQCOUNT(&r_xprt->rx_ep);
1530
1531 dprintk("RPC: %s: frmr %p invalidating rkey %08x\n",
1532 __func__, r, r->r.frmr.fr_mr->rkey);
1533
1534 read_lock(&ia->ri_qplock);
1535 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
1536 read_unlock(&ia->ri_qplock);
1537 if (rc) {
1538 /* Force rpcrdma_buffer_get() to retry */
1539 r->r.frmr.fr_state = FRMR_IS_STALE;
1540 dprintk("RPC: %s: ib_post_send failed, %i\n",
1541 __func__, rc);
1542 }
1543}
1544
1545static void
1546rpcrdma_retry_flushed_linv(struct list_head *stale,
1547 struct rpcrdma_buffer *buf)
1548{
1549 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
1550 struct list_head *pos;
1551 struct rpcrdma_mw *r;
1552 unsigned long flags;
1553
1554 list_for_each(pos, stale) {
1555 r = list_entry(pos, struct rpcrdma_mw, mw_list);
1556 rpcrdma_retry_local_inv(r, ia);
1557 }
1558
1559 spin_lock_irqsave(&buf->rb_lock, flags);
1560 list_splice_tail(stale, &buf->rb_mws);
1561 spin_unlock_irqrestore(&buf->rb_lock, flags);
1562}
1563
Chuck Leverc2922c02014-07-29 17:24:36 -04001564static struct rpcrdma_req *
Chuck Leverddb6beb2014-07-29 17:24:54 -04001565rpcrdma_buffer_get_frmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf,
1566 struct list_head *stale)
1567{
1568 struct rpcrdma_mw *r;
1569 int i;
1570
1571 i = RPCRDMA_MAX_SEGS - 1;
1572 while (!list_empty(&buf->rb_mws)) {
1573 r = list_entry(buf->rb_mws.next,
1574 struct rpcrdma_mw, mw_list);
1575 list_del(&r->mw_list);
1576 if (r->r.frmr.fr_state == FRMR_IS_STALE) {
1577 list_add(&r->mw_list, stale);
1578 continue;
1579 }
Chuck Lever3eb35812015-01-21 11:02:54 -05001580 req->rl_segments[i].rl_mw = r;
Chuck Leverddb6beb2014-07-29 17:24:54 -04001581 if (unlikely(i-- == 0))
1582 return req; /* Success */
1583 }
1584
1585 /* Not enough entries on rb_mws for this req */
1586 rpcrdma_buffer_put_sendbuf(req, buf);
1587 rpcrdma_buffer_put_mrs(req, buf);
1588 return NULL;
1589}
1590
1591static struct rpcrdma_req *
1592rpcrdma_buffer_get_fmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
Chuck Leverc2922c02014-07-29 17:24:36 -04001593{
1594 struct rpcrdma_mw *r;
1595 int i;
1596
1597 i = RPCRDMA_MAX_SEGS - 1;
1598 while (!list_empty(&buf->rb_mws)) {
1599 r = list_entry(buf->rb_mws.next,
1600 struct rpcrdma_mw, mw_list);
1601 list_del(&r->mw_list);
Chuck Lever3eb35812015-01-21 11:02:54 -05001602 req->rl_segments[i].rl_mw = r;
Chuck Leverc2922c02014-07-29 17:24:36 -04001603 if (unlikely(i-- == 0))
1604 return req; /* Success */
1605 }
1606
1607 /* Not enough entries on rb_mws for this req */
1608 rpcrdma_buffer_put_sendbuf(req, buf);
1609 rpcrdma_buffer_put_mrs(req, buf);
1610 return NULL;
1611}
1612
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001613/*
1614 * Get a set of request/reply buffers.
1615 *
1616 * Reply buffer (if needed) is attached to send buffer upon return.
1617 * Rule:
1618 * rb_send_index and rb_recv_index MUST always be pointing to the
1619 * *next* available buffer (non-NULL). They are incremented after
1620 * removing buffers, and decremented *before* returning them.
1621 */
1622struct rpcrdma_req *
1623rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1624{
Chuck Leverc2922c02014-07-29 17:24:36 -04001625 struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
Chuck Leverddb6beb2014-07-29 17:24:54 -04001626 struct list_head stale;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001627 struct rpcrdma_req *req;
1628 unsigned long flags;
1629
1630 spin_lock_irqsave(&buffers->rb_lock, flags);
1631 if (buffers->rb_send_index == buffers->rb_max_requests) {
1632 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1633 dprintk("RPC: %s: out of request buffers\n", __func__);
1634 return ((struct rpcrdma_req *)NULL);
1635 }
1636
1637 req = buffers->rb_send_bufs[buffers->rb_send_index];
1638 if (buffers->rb_send_index < buffers->rb_recv_index) {
1639 dprintk("RPC: %s: %d extra receives outstanding (ok)\n",
1640 __func__,
1641 buffers->rb_recv_index - buffers->rb_send_index);
1642 req->rl_reply = NULL;
1643 } else {
1644 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1645 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1646 }
1647 buffers->rb_send_bufs[buffers->rb_send_index++] = NULL;
Chuck Leverddb6beb2014-07-29 17:24:54 -04001648
1649 INIT_LIST_HEAD(&stale);
Chuck Leverc2922c02014-07-29 17:24:36 -04001650 switch (ia->ri_memreg_strategy) {
1651 case RPCRDMA_FRMR:
Chuck Leverddb6beb2014-07-29 17:24:54 -04001652 req = rpcrdma_buffer_get_frmrs(req, buffers, &stale);
1653 break;
Chuck Leverc2922c02014-07-29 17:24:36 -04001654 case RPCRDMA_MTHCAFMR:
Chuck Leverddb6beb2014-07-29 17:24:54 -04001655 req = rpcrdma_buffer_get_fmrs(req, buffers);
Chuck Leverc2922c02014-07-29 17:24:36 -04001656 break;
1657 default:
1658 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001659 }
1660 spin_unlock_irqrestore(&buffers->rb_lock, flags);
Chuck Leverddb6beb2014-07-29 17:24:54 -04001661 if (!list_empty(&stale))
1662 rpcrdma_retry_flushed_linv(&stale, buffers);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001663 return req;
1664}
1665
1666/*
1667 * Put request/reply buffers back into pool.
1668 * Pre-decrement counter/array index.
1669 */
1670void
1671rpcrdma_buffer_put(struct rpcrdma_req *req)
1672{
1673 struct rpcrdma_buffer *buffers = req->rl_buffer;
1674 struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001675 unsigned long flags;
1676
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001677 spin_lock_irqsave(&buffers->rb_lock, flags);
Chuck Leverc2922c02014-07-29 17:24:36 -04001678 rpcrdma_buffer_put_sendbuf(req, buffers);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001679 switch (ia->ri_memreg_strategy) {
Tom Talpey3197d3092008-10-09 15:00:20 -04001680 case RPCRDMA_FRMR:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001681 case RPCRDMA_MTHCAFMR:
Chuck Leverc2922c02014-07-29 17:24:36 -04001682 rpcrdma_buffer_put_mrs(req, buffers);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001683 break;
1684 default:
1685 break;
1686 }
1687 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1688}
1689
1690/*
1691 * Recover reply buffers from pool.
1692 * This happens when recovering from error conditions.
1693 * Post-increment counter/array index.
1694 */
1695void
1696rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
1697{
1698 struct rpcrdma_buffer *buffers = req->rl_buffer;
1699 unsigned long flags;
1700
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001701 spin_lock_irqsave(&buffers->rb_lock, flags);
1702 if (buffers->rb_recv_index < buffers->rb_max_requests) {
1703 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1704 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1705 }
1706 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1707}
1708
1709/*
1710 * Put reply buffers back into pool when not attached to
Chuck Leverb45ccfd2014-05-28 10:32:34 -04001711 * request. This happens in error conditions.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001712 */
1713void
1714rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1715{
1716 struct rpcrdma_buffer *buffers = rep->rr_buffer;
1717 unsigned long flags;
1718
1719 rep->rr_func = NULL;
1720 spin_lock_irqsave(&buffers->rb_lock, flags);
1721 buffers->rb_recv_bufs[--buffers->rb_recv_index] = rep;
1722 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1723}
1724
1725/*
1726 * Wrappers for internal-use kmalloc memory registration, used by buffer code.
1727 */
1728
Chuck Leverdf515ca2015-01-21 11:04:41 -05001729static int
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001730rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len,
1731 struct ib_mr **mrp, struct ib_sge *iov)
1732{
1733 struct ib_phys_buf ipb;
1734 struct ib_mr *mr;
1735 int rc;
1736
1737 /*
1738 * All memory passed here was kmalloc'ed, therefore phys-contiguous.
1739 */
1740 iov->addr = ib_dma_map_single(ia->ri_id->device,
1741 va, len, DMA_BIDIRECTIONAL);
Yan Burmanbf858ab2014-06-19 16:06:30 +03001742 if (ib_dma_mapping_error(ia->ri_id->device, iov->addr))
1743 return -ENOMEM;
1744
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001745 iov->length = len;
1746
Tom Talpeybd7ed1d2008-10-09 15:00:09 -04001747 if (ia->ri_have_dma_lkey) {
1748 *mrp = NULL;
1749 iov->lkey = ia->ri_dma_lkey;
1750 return 0;
1751 } else if (ia->ri_bind_mem != NULL) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001752 *mrp = NULL;
1753 iov->lkey = ia->ri_bind_mem->lkey;
1754 return 0;
1755 }
1756
1757 ipb.addr = iov->addr;
1758 ipb.size = iov->length;
1759 mr = ib_reg_phys_mr(ia->ri_pd, &ipb, 1,
1760 IB_ACCESS_LOCAL_WRITE, &iov->addr);
1761
1762 dprintk("RPC: %s: phys convert: 0x%llx "
1763 "registered 0x%llx length %d\n",
Andrew Mortona56daeb2007-10-16 01:29:57 -07001764 __func__, (unsigned long long)ipb.addr,
1765 (unsigned long long)iov->addr, len);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001766
1767 if (IS_ERR(mr)) {
1768 *mrp = NULL;
1769 rc = PTR_ERR(mr);
1770 dprintk("RPC: %s: failed with %i\n", __func__, rc);
1771 } else {
1772 *mrp = mr;
1773 iov->lkey = mr->lkey;
1774 rc = 0;
1775 }
1776
1777 return rc;
1778}
1779
Chuck Leverdf515ca2015-01-21 11:04:41 -05001780static int
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001781rpcrdma_deregister_internal(struct rpcrdma_ia *ia,
1782 struct ib_mr *mr, struct ib_sge *iov)
1783{
1784 int rc;
1785
1786 ib_dma_unmap_single(ia->ri_id->device,
1787 iov->addr, iov->length, DMA_BIDIRECTIONAL);
1788
1789 if (NULL == mr)
1790 return 0;
1791
1792 rc = ib_dereg_mr(mr);
1793 if (rc)
1794 dprintk("RPC: %s: ib_dereg_mr failed %i\n", __func__, rc);
1795 return rc;
1796}
1797
Chuck Lever9128c3e2015-01-21 11:04:00 -05001798/**
1799 * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers
1800 * @ia: controlling rpcrdma_ia
1801 * @size: size of buffer to be allocated, in bytes
1802 * @flags: GFP flags
1803 *
1804 * Returns pointer to private header of an area of internally
1805 * registered memory, or an ERR_PTR. The registered buffer follows
1806 * the end of the private header.
1807 *
1808 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1809 * receiving the payload of RDMA RECV operations. regbufs are not
1810 * used for RDMA READ/WRITE operations, thus are registered only for
1811 * LOCAL access.
1812 */
1813struct rpcrdma_regbuf *
1814rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
1815{
1816 struct rpcrdma_regbuf *rb;
1817 int rc;
1818
1819 rc = -ENOMEM;
1820 rb = kmalloc(sizeof(*rb) + size, flags);
1821 if (rb == NULL)
1822 goto out;
1823
1824 rb->rg_size = size;
1825 rb->rg_owner = NULL;
1826 rc = rpcrdma_register_internal(ia, rb->rg_base, size,
1827 &rb->rg_mr, &rb->rg_iov);
1828 if (rc)
1829 goto out_free;
1830
1831 return rb;
1832
1833out_free:
1834 kfree(rb);
1835out:
1836 return ERR_PTR(rc);
1837}
1838
1839/**
1840 * rpcrdma_free_regbuf - deregister and free registered buffer
1841 * @ia: controlling rpcrdma_ia
1842 * @rb: regbuf to be deregistered and freed
1843 */
1844void
1845rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
1846{
1847 if (rb) {
1848 rpcrdma_deregister_internal(ia, rb->rg_mr, &rb->rg_iov);
1849 kfree(rb);
1850 }
1851}
1852
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001853/*
1854 * Wrappers for chunk registration, shared by read/write chunk code.
1855 */
1856
1857static void
1858rpcrdma_map_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg, int writing)
1859{
1860 seg->mr_dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1861 seg->mr_dmalen = seg->mr_len;
1862 if (seg->mr_page)
1863 seg->mr_dma = ib_dma_map_page(ia->ri_id->device,
1864 seg->mr_page, offset_in_page(seg->mr_offset),
1865 seg->mr_dmalen, seg->mr_dir);
1866 else
1867 seg->mr_dma = ib_dma_map_single(ia->ri_id->device,
1868 seg->mr_offset,
1869 seg->mr_dmalen, seg->mr_dir);
Tom Tucker5c635e02011-02-09 19:45:34 +00001870 if (ib_dma_mapping_error(ia->ri_id->device, seg->mr_dma)) {
1871 dprintk("RPC: %s: mr_dma %llx mr_offset %p mr_dma_len %zu\n",
1872 __func__,
Randy Dunlap986d4ab2011-03-15 17:11:59 -07001873 (unsigned long long)seg->mr_dma,
1874 seg->mr_offset, seg->mr_dmalen);
Tom Tucker5c635e02011-02-09 19:45:34 +00001875 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001876}
1877
1878static void
1879rpcrdma_unmap_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg)
1880{
1881 if (seg->mr_page)
1882 ib_dma_unmap_page(ia->ri_id->device,
1883 seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
1884 else
1885 ib_dma_unmap_single(ia->ri_id->device,
1886 seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
1887}
1888
Tom Talpey8d4ba032008-10-09 14:59:49 -04001889static int
Tom Talpey3197d3092008-10-09 15:00:20 -04001890rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
1891 int *nsegs, int writing, struct rpcrdma_ia *ia,
1892 struct rpcrdma_xprt *r_xprt)
1893{
1894 struct rpcrdma_mr_seg *seg1 = seg;
Chuck Lever3eb35812015-01-21 11:02:54 -05001895 struct rpcrdma_mw *mw = seg1->rl_mw;
Chuck Lever0dbb4102014-07-29 17:24:09 -04001896 struct rpcrdma_frmr *frmr = &mw->r.frmr;
1897 struct ib_mr *mr = frmr->fr_mr;
Chuck Leverf590e872014-07-29 17:25:29 -04001898 struct ib_send_wr fastreg_wr, *bad_wr;
Tom Talpey3197d3092008-10-09 15:00:20 -04001899 u8 key;
1900 int len, pageoff;
1901 int i, rc;
Tom Tucker9b781452012-02-20 13:07:57 -06001902 int seg_len;
1903 u64 pa;
1904 int page_no;
Tom Talpey3197d3092008-10-09 15:00:20 -04001905
1906 pageoff = offset_in_page(seg1->mr_offset);
1907 seg1->mr_offset -= pageoff; /* start of page */
1908 seg1->mr_len += pageoff;
1909 len = -pageoff;
Steve Wise0fc6c4e2014-05-28 10:32:00 -04001910 if (*nsegs > ia->ri_max_frmr_depth)
1911 *nsegs = ia->ri_max_frmr_depth;
Tom Tucker9b781452012-02-20 13:07:57 -06001912 for (page_no = i = 0; i < *nsegs;) {
Tom Talpey3197d3092008-10-09 15:00:20 -04001913 rpcrdma_map_one(ia, seg, writing);
Tom Tucker9b781452012-02-20 13:07:57 -06001914 pa = seg->mr_dma;
1915 for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) {
Chuck Lever0dbb4102014-07-29 17:24:09 -04001916 frmr->fr_pgl->page_list[page_no++] = pa;
Tom Tucker9b781452012-02-20 13:07:57 -06001917 pa += PAGE_SIZE;
1918 }
Tom Talpey3197d3092008-10-09 15:00:20 -04001919 len += seg->mr_len;
1920 ++seg;
1921 ++i;
1922 /* Check for holes */
1923 if ((i < *nsegs && offset_in_page(seg->mr_offset)) ||
1924 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
1925 break;
1926 }
1927 dprintk("RPC: %s: Using frmr %p to map %d segments\n",
Chuck Lever0dbb4102014-07-29 17:24:09 -04001928 __func__, mw, i);
Tom Talpey3197d3092008-10-09 15:00:20 -04001929
Chuck Lever05055722014-07-29 17:25:12 -04001930 frmr->fr_state = FRMR_IS_VALID;
1931
Chuck Leverf590e872014-07-29 17:25:29 -04001932 memset(&fastreg_wr, 0, sizeof(fastreg_wr));
1933 fastreg_wr.wr_id = (unsigned long)(void *)mw;
1934 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
1935 fastreg_wr.wr.fast_reg.iova_start = seg1->mr_dma;
1936 fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl;
1937 fastreg_wr.wr.fast_reg.page_list_len = page_no;
1938 fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1939 fastreg_wr.wr.fast_reg.length = page_no << PAGE_SHIFT;
1940 if (fastreg_wr.wr.fast_reg.length < len) {
Chuck Lever5fc83f42014-07-29 17:23:17 -04001941 rc = -EIO;
1942 goto out_err;
Chuck Leverc977dea2014-05-28 10:35:06 -04001943 }
1944
1945 /* Bump the key */
Chuck Lever0dbb4102014-07-29 17:24:09 -04001946 key = (u8)(mr->rkey & 0x000000FF);
1947 ib_update_fast_reg_key(mr, ++key);
Chuck Leverc977dea2014-05-28 10:35:06 -04001948
Chuck Leverf590e872014-07-29 17:25:29 -04001949 fastreg_wr.wr.fast_reg.access_flags = (writing ?
Vu Pham68743082009-05-26 14:51:00 -04001950 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
1951 IB_ACCESS_REMOTE_READ);
Chuck Leverf590e872014-07-29 17:25:29 -04001952 fastreg_wr.wr.fast_reg.rkey = mr->rkey;
Tom Talpey3197d3092008-10-09 15:00:20 -04001953 DECR_CQCOUNT(&r_xprt->rx_ep);
1954
Chuck Leverf590e872014-07-29 17:25:29 -04001955 rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr);
Tom Talpey3197d3092008-10-09 15:00:20 -04001956 if (rc) {
1957 dprintk("RPC: %s: failed ib_post_send for register,"
1958 " status %i\n", __func__, rc);
Chuck Leverc93e9862014-07-29 17:24:19 -04001959 ib_update_fast_reg_key(mr, --key);
Chuck Lever5fc83f42014-07-29 17:23:17 -04001960 goto out_err;
Tom Talpey3197d3092008-10-09 15:00:20 -04001961 } else {
Chuck Lever0dbb4102014-07-29 17:24:09 -04001962 seg1->mr_rkey = mr->rkey;
Tom Talpey3197d3092008-10-09 15:00:20 -04001963 seg1->mr_base = seg1->mr_dma + pageoff;
1964 seg1->mr_nsegs = i;
1965 seg1->mr_len = len;
1966 }
1967 *nsegs = i;
Chuck Lever5fc83f42014-07-29 17:23:17 -04001968 return 0;
1969out_err:
Chuck Lever05055722014-07-29 17:25:12 -04001970 frmr->fr_state = FRMR_IS_INVALID;
Chuck Lever5fc83f42014-07-29 17:23:17 -04001971 while (i--)
1972 rpcrdma_unmap_one(ia, --seg);
Tom Talpey3197d3092008-10-09 15:00:20 -04001973 return rc;
1974}
1975
1976static int
1977rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg *seg,
1978 struct rpcrdma_ia *ia, struct rpcrdma_xprt *r_xprt)
1979{
1980 struct rpcrdma_mr_seg *seg1 = seg;
1981 struct ib_send_wr invalidate_wr, *bad_wr;
1982 int rc;
1983
Chuck Lever3eb35812015-01-21 11:02:54 -05001984 seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
Chuck Leverdab7e3b2014-07-29 17:25:20 -04001985
Tom Talpey3197d3092008-10-09 15:00:20 -04001986 memset(&invalidate_wr, 0, sizeof invalidate_wr);
Chuck Lever3eb35812015-01-21 11:02:54 -05001987 invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw;
Tom Talpey3197d3092008-10-09 15:00:20 -04001988 invalidate_wr.opcode = IB_WR_LOCAL_INV;
Chuck Lever3eb35812015-01-21 11:02:54 -05001989 invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey;
Tom Talpey3197d3092008-10-09 15:00:20 -04001990 DECR_CQCOUNT(&r_xprt->rx_ep);
1991
Chuck Lever73806c82014-07-29 17:23:25 -04001992 read_lock(&ia->ri_qplock);
1993 while (seg1->mr_nsegs--)
1994 rpcrdma_unmap_one(ia, seg++);
Tom Talpey3197d3092008-10-09 15:00:20 -04001995 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
Chuck Lever73806c82014-07-29 17:23:25 -04001996 read_unlock(&ia->ri_qplock);
Chuck Leverdab7e3b2014-07-29 17:25:20 -04001997 if (rc) {
1998 /* Force rpcrdma_buffer_get() to retry */
Chuck Lever3eb35812015-01-21 11:02:54 -05001999 seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE;
Tom Talpey3197d3092008-10-09 15:00:20 -04002000 dprintk("RPC: %s: failed ib_post_send for invalidate,"
2001 " status %i\n", __func__, rc);
Chuck Leverdab7e3b2014-07-29 17:25:20 -04002002 }
Tom Talpey3197d3092008-10-09 15:00:20 -04002003 return rc;
2004}
2005
2006static int
Tom Talpey8d4ba032008-10-09 14:59:49 -04002007rpcrdma_register_fmr_external(struct rpcrdma_mr_seg *seg,
2008 int *nsegs, int writing, struct rpcrdma_ia *ia)
2009{
2010 struct rpcrdma_mr_seg *seg1 = seg;
2011 u64 physaddrs[RPCRDMA_MAX_DATA_SEGS];
2012 int len, pageoff, i, rc;
2013
2014 pageoff = offset_in_page(seg1->mr_offset);
2015 seg1->mr_offset -= pageoff; /* start of page */
2016 seg1->mr_len += pageoff;
2017 len = -pageoff;
2018 if (*nsegs > RPCRDMA_MAX_DATA_SEGS)
2019 *nsegs = RPCRDMA_MAX_DATA_SEGS;
2020 for (i = 0; i < *nsegs;) {
2021 rpcrdma_map_one(ia, seg, writing);
2022 physaddrs[i] = seg->mr_dma;
2023 len += seg->mr_len;
2024 ++seg;
2025 ++i;
2026 /* Check for holes */
2027 if ((i < *nsegs && offset_in_page(seg->mr_offset)) ||
2028 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
2029 break;
2030 }
Chuck Lever3eb35812015-01-21 11:02:54 -05002031 rc = ib_map_phys_fmr(seg1->rl_mw->r.fmr, physaddrs, i, seg1->mr_dma);
Tom Talpey8d4ba032008-10-09 14:59:49 -04002032 if (rc) {
2033 dprintk("RPC: %s: failed ib_map_phys_fmr "
2034 "%u@0x%llx+%i (%d)... status %i\n", __func__,
2035 len, (unsigned long long)seg1->mr_dma,
2036 pageoff, i, rc);
2037 while (i--)
2038 rpcrdma_unmap_one(ia, --seg);
2039 } else {
Chuck Lever3eb35812015-01-21 11:02:54 -05002040 seg1->mr_rkey = seg1->rl_mw->r.fmr->rkey;
Tom Talpey8d4ba032008-10-09 14:59:49 -04002041 seg1->mr_base = seg1->mr_dma + pageoff;
2042 seg1->mr_nsegs = i;
2043 seg1->mr_len = len;
2044 }
2045 *nsegs = i;
2046 return rc;
2047}
2048
2049static int
2050rpcrdma_deregister_fmr_external(struct rpcrdma_mr_seg *seg,
2051 struct rpcrdma_ia *ia)
2052{
2053 struct rpcrdma_mr_seg *seg1 = seg;
2054 LIST_HEAD(l);
2055 int rc;
2056
Chuck Lever3eb35812015-01-21 11:02:54 -05002057 list_add(&seg1->rl_mw->r.fmr->list, &l);
Tom Talpey8d4ba032008-10-09 14:59:49 -04002058 rc = ib_unmap_fmr(&l);
Chuck Lever73806c82014-07-29 17:23:25 -04002059 read_lock(&ia->ri_qplock);
Tom Talpey8d4ba032008-10-09 14:59:49 -04002060 while (seg1->mr_nsegs--)
2061 rpcrdma_unmap_one(ia, seg++);
Chuck Lever73806c82014-07-29 17:23:25 -04002062 read_unlock(&ia->ri_qplock);
Tom Talpey8d4ba032008-10-09 14:59:49 -04002063 if (rc)
2064 dprintk("RPC: %s: failed ib_unmap_fmr,"
2065 " status %i\n", __func__, rc);
2066 return rc;
2067}
2068
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002069int
2070rpcrdma_register_external(struct rpcrdma_mr_seg *seg,
2071 int nsegs, int writing, struct rpcrdma_xprt *r_xprt)
2072{
2073 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002074 int rc = 0;
2075
2076 switch (ia->ri_memreg_strategy) {
2077
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002078 case RPCRDMA_ALLPHYSICAL:
2079 rpcrdma_map_one(ia, seg, writing);
2080 seg->mr_rkey = ia->ri_bind_mem->rkey;
2081 seg->mr_base = seg->mr_dma;
2082 seg->mr_nsegs = 1;
2083 nsegs = 1;
2084 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002085
Tom Talpey3197d3092008-10-09 15:00:20 -04002086 /* Registration using frmr registration */
2087 case RPCRDMA_FRMR:
2088 rc = rpcrdma_register_frmr_external(seg, &nsegs, writing, ia, r_xprt);
2089 break;
2090
Tom Talpey8d4ba032008-10-09 14:59:49 -04002091 /* Registration using fmr memory registration */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002092 case RPCRDMA_MTHCAFMR:
Tom Talpey8d4ba032008-10-09 14:59:49 -04002093 rc = rpcrdma_register_fmr_external(seg, &nsegs, writing, ia);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002094 break;
2095
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002096 default:
Chuck Lever92b98362014-11-08 20:14:12 -05002097 return -EIO;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002098 }
2099 if (rc)
Chuck Lever92b98362014-11-08 20:14:12 -05002100 return rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002101
2102 return nsegs;
2103}
2104
2105int
2106rpcrdma_deregister_external(struct rpcrdma_mr_seg *seg,
Chuck Lever13c9ff82014-05-28 10:33:08 -04002107 struct rpcrdma_xprt *r_xprt)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002108{
2109 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002110 int nsegs = seg->mr_nsegs, rc;
2111
2112 switch (ia->ri_memreg_strategy) {
2113
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002114 case RPCRDMA_ALLPHYSICAL:
Chuck Lever73806c82014-07-29 17:23:25 -04002115 read_lock(&ia->ri_qplock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002116 rpcrdma_unmap_one(ia, seg);
Chuck Lever73806c82014-07-29 17:23:25 -04002117 read_unlock(&ia->ri_qplock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002118 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002119
Tom Talpey3197d3092008-10-09 15:00:20 -04002120 case RPCRDMA_FRMR:
2121 rc = rpcrdma_deregister_frmr_external(seg, ia, r_xprt);
2122 break;
2123
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002124 case RPCRDMA_MTHCAFMR:
Tom Talpey8d4ba032008-10-09 14:59:49 -04002125 rc = rpcrdma_deregister_fmr_external(seg, ia);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002126 break;
2127
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002128 default:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002129 break;
2130 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002131 return nsegs;
2132}
2133
2134/*
2135 * Prepost any receive buffer, then post send.
2136 *
2137 * Receive buffer is donated to hardware, reclaimed upon recv completion.
2138 */
2139int
2140rpcrdma_ep_post(struct rpcrdma_ia *ia,
2141 struct rpcrdma_ep *ep,
2142 struct rpcrdma_req *req)
2143{
2144 struct ib_send_wr send_wr, *send_wr_fail;
2145 struct rpcrdma_rep *rep = req->rl_reply;
2146 int rc;
2147
2148 if (rep) {
2149 rc = rpcrdma_ep_post_recv(ia, ep, rep);
2150 if (rc)
2151 goto out;
2152 req->rl_reply = NULL;
2153 }
2154
2155 send_wr.next = NULL;
2156 send_wr.wr_id = 0ULL; /* no send cookie */
2157 send_wr.sg_list = req->rl_send_iov;
2158 send_wr.num_sge = req->rl_niovs;
2159 send_wr.opcode = IB_WR_SEND;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002160 if (send_wr.num_sge == 4) /* no need to sync any pad (constant) */
2161 ib_dma_sync_single_for_device(ia->ri_id->device,
2162 req->rl_send_iov[3].addr, req->rl_send_iov[3].length,
2163 DMA_TO_DEVICE);
2164 ib_dma_sync_single_for_device(ia->ri_id->device,
2165 req->rl_send_iov[1].addr, req->rl_send_iov[1].length,
2166 DMA_TO_DEVICE);
2167 ib_dma_sync_single_for_device(ia->ri_id->device,
2168 req->rl_send_iov[0].addr, req->rl_send_iov[0].length,
2169 DMA_TO_DEVICE);
2170
2171 if (DECR_CQCOUNT(ep) > 0)
2172 send_wr.send_flags = 0;
2173 else { /* Provider must take a send completion every now and then */
2174 INIT_CQCOUNT(ep);
2175 send_wr.send_flags = IB_SEND_SIGNALED;
2176 }
2177
2178 rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail);
2179 if (rc)
2180 dprintk("RPC: %s: ib_post_send returned %i\n", __func__,
2181 rc);
2182out:
2183 return rc;
2184}
2185
2186/*
2187 * (Re)post a receive buffer.
2188 */
2189int
2190rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
2191 struct rpcrdma_ep *ep,
2192 struct rpcrdma_rep *rep)
2193{
2194 struct ib_recv_wr recv_wr, *recv_wr_fail;
2195 int rc;
2196
2197 recv_wr.next = NULL;
2198 recv_wr.wr_id = (u64) (unsigned long) rep;
Chuck Lever6b1184c2015-01-21 11:04:25 -05002199 recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002200 recv_wr.num_sge = 1;
2201
2202 ib_dma_sync_single_for_cpu(ia->ri_id->device,
Chuck Lever6b1184c2015-01-21 11:04:25 -05002203 rdmab_addr(rep->rr_rdmabuf),
2204 rdmab_length(rep->rr_rdmabuf),
2205 DMA_BIDIRECTIONAL);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002206
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002207 rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail);
2208
2209 if (rc)
2210 dprintk("RPC: %s: ib_post_recv returned %i\n", __func__,
2211 rc);
2212 return rc;
2213}
Chuck Lever43e95982014-07-29 17:23:34 -04002214
2215/* Physical mapping means one Read/Write list entry per-page.
2216 * All list entries must fit within an inline buffer
2217 *
2218 * NB: The server must return a Write list for NFS READ,
2219 * which has the same constraint. Factor in the inline
2220 * rsize as well.
2221 */
2222static size_t
2223rpcrdma_physical_max_payload(struct rpcrdma_xprt *r_xprt)
2224{
2225 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
2226 unsigned int inline_size, pages;
2227
2228 inline_size = min_t(unsigned int,
2229 cdata->inline_wsize, cdata->inline_rsize);
2230 inline_size -= RPCRDMA_HDRLEN_MIN;
2231 pages = inline_size / sizeof(struct rpcrdma_segment);
2232 return pages << PAGE_SHIFT;
2233}
2234
2235static size_t
2236rpcrdma_mr_max_payload(struct rpcrdma_xprt *r_xprt)
2237{
2238 return RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT;
2239}
2240
2241size_t
2242rpcrdma_max_payload(struct rpcrdma_xprt *r_xprt)
2243{
2244 size_t result;
2245
2246 switch (r_xprt->rx_ia.ri_memreg_strategy) {
2247 case RPCRDMA_ALLPHYSICAL:
2248 result = rpcrdma_physical_max_payload(r_xprt);
2249 break;
2250 default:
2251 result = rpcrdma_mr_max_payload(r_xprt);
2252 }
2253 return result;
2254}