blob: 14165b673b2068685e9e98a8c232d831dd33aeb0 [file] [log] [blame]
Chuck Levera2268cf2018-05-04 15:34:32 -04001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04002/*
Chuck Lever62b56a62017-10-30 16:22:14 -04003 * Copyright (c) 2014-2017 Oracle. All rights reserved.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04004 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the BSD-type
10 * license below:
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * Neither the name of the Network Appliance, Inc. nor the names of
25 * its contributors may be used to endorse or promote products
26 * derived from this software without specific prior written
27 * permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42/*
43 * transport.c
44 *
45 * This file contains the top-level implementation of an RPC RDMA
46 * transport.
47 *
48 * Naming convention: functions beginning with xprt_ are part of the
49 * transport switch. All others are RPC RDMA internal.
50 */
51
52#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090053#include <linux/slab.h>
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040054#include <linux/seq_file.h>
Chuck Leverbd2abef2018-05-07 15:27:16 -040055#include <linux/smp.h>
56
Jeff Layton59766872013-02-04 12:50:00 -050057#include <linux/sunrpc/addr.h>
Chuck Leverbd2abef2018-05-07 15:27:16 -040058#include <linux/sunrpc/svc_rdma.h>
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040059
60#include "xprt_rdma.h"
Chuck Leverb6e717cb2018-05-07 15:27:05 -040061#include <trace/events/rpcrdma.h>
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040062
Jeff Laytonf895b252014-11-17 16:58:04 -050063#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040064# define RPCDBG_FACILITY RPCDBG_TRANS
65#endif
66
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040067/*
68 * tunables
69 */
70
Zou Wei5bffb002020-04-23 15:10:02 +080071static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE;
Chuck Lever5d252f92016-01-07 14:50:10 -050072unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE;
Chuck Lever94087e92019-04-24 09:40:20 -040073unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE;
Chuck Leverce5b3712017-12-14 20:57:47 -050074unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRWR;
Chuck Leverfff09592017-04-11 13:22:54 -040075int xprt_rdma_pad_optimize;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040076
Jeff Laytonf895b252014-11-17 16:58:04 -050077#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040078
79static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE;
80static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE;
Chuck Lever29c55422016-05-02 14:40:48 -040081static unsigned int min_inline_size = RPCRDMA_MIN_INLINE;
82static unsigned int max_inline_size = RPCRDMA_MAX_INLINE;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040083static unsigned int max_padding = PAGE_SIZE;
84static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS;
85static unsigned int max_memreg = RPCRDMA_LAST - 1;
Chuck Lever10492702017-12-14 20:56:42 -050086static unsigned int dummy;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040087
88static struct ctl_table_header *sunrpc_table_header;
89
Joe Perchesfe2c6332013-06-11 23:04:25 -070090static struct ctl_table xr_tunables_table[] = {
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040091 {
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040092 .procname = "rdma_slot_table_entries",
93 .data = &xprt_rdma_slot_table_entries,
94 .maxlen = sizeof(unsigned int),
95 .mode = 0644,
Eric W. Biederman6d456112009-11-16 03:11:48 -080096 .proc_handler = proc_dointvec_minmax,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040097 .extra1 = &min_slot_table_size,
98 .extra2 = &max_slot_table_size
99 },
100 {
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400101 .procname = "rdma_max_inline_read",
102 .data = &xprt_rdma_max_inline_read,
103 .maxlen = sizeof(unsigned int),
104 .mode = 0644,
Chuck Lever44829d02016-09-15 10:57:32 -0400105 .proc_handler = proc_dointvec_minmax,
Chuck Lever29c55422016-05-02 14:40:48 -0400106 .extra1 = &min_inline_size,
107 .extra2 = &max_inline_size,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400108 },
109 {
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400110 .procname = "rdma_max_inline_write",
111 .data = &xprt_rdma_max_inline_write,
112 .maxlen = sizeof(unsigned int),
113 .mode = 0644,
Chuck Lever44829d02016-09-15 10:57:32 -0400114 .proc_handler = proc_dointvec_minmax,
Chuck Lever29c55422016-05-02 14:40:48 -0400115 .extra1 = &min_inline_size,
116 .extra2 = &max_inline_size,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400117 },
118 {
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400119 .procname = "rdma_inline_write_padding",
Chuck Lever10492702017-12-14 20:56:42 -0500120 .data = &dummy,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400121 .maxlen = sizeof(unsigned int),
122 .mode = 0644,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800123 .proc_handler = proc_dointvec_minmax,
Matteo Croceeec48442019-07-18 15:58:50 -0700124 .extra1 = SYSCTL_ZERO,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400125 .extra2 = &max_padding,
126 },
127 {
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400128 .procname = "rdma_memreg_strategy",
129 .data = &xprt_rdma_memreg_strategy,
130 .maxlen = sizeof(unsigned int),
131 .mode = 0644,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800132 .proc_handler = proc_dointvec_minmax,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400133 .extra1 = &min_memreg,
134 .extra2 = &max_memreg,
135 },
136 {
Tom Talpey9191ca32008-10-09 15:01:11 -0400137 .procname = "rdma_pad_optimize",
138 .data = &xprt_rdma_pad_optimize,
139 .maxlen = sizeof(unsigned int),
140 .mode = 0644,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800141 .proc_handler = proc_dointvec,
Tom Talpey9191ca32008-10-09 15:01:11 -0400142 },
Eric W. Biedermanf8572d82009-11-05 13:32:03 -0800143 { },
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400144};
145
Joe Perchesfe2c6332013-06-11 23:04:25 -0700146static struct ctl_table sunrpc_table[] = {
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400147 {
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400148 .procname = "sunrpc",
149 .mode = 0555,
150 .child = xr_tunables_table
151 },
Eric W. Biedermanf8572d82009-11-05 13:32:03 -0800152 { },
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400153};
154
155#endif
156
Chuck Leverd31ae252017-08-01 12:00:39 -0400157static const struct rpc_xprt_ops xprt_rdma_procs;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400158
159static void
Chuck Lever0dd39ca2015-03-30 14:33:43 -0400160xprt_rdma_format_addresses4(struct rpc_xprt *xprt, struct sockaddr *sap)
161{
162 struct sockaddr_in *sin = (struct sockaddr_in *)sap;
163 char buf[20];
164
165 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
166 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
167
168 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA;
169}
170
171static void
172xprt_rdma_format_addresses6(struct rpc_xprt *xprt, struct sockaddr *sap)
173{
174 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
175 char buf[40];
176
177 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
178 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
179
180 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA6;
181}
182
Chuck Lever5d252f92016-01-07 14:50:10 -0500183void
Chuck Lever5231eb92015-08-03 13:02:41 -0400184xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400185{
Chuck Lever0dd39ca2015-03-30 14:33:43 -0400186 char buf[128];
187
188 switch (sap->sa_family) {
189 case AF_INET:
190 xprt_rdma_format_addresses4(xprt, sap);
191 break;
192 case AF_INET6:
193 xprt_rdma_format_addresses6(xprt, sap);
194 break;
195 default:
196 pr_err("rpcrdma: Unrecognized address family\n");
197 return;
198 }
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400199
Chuck Leverc877b842009-08-09 15:09:36 -0400200 (void)rpc_ntop(sap, buf, sizeof(buf));
201 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400202
Joe Perches81160e662010-03-08 12:15:59 -0800203 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
Chuck Leverc877b842009-08-09 15:09:36 -0400204 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400205
Joe Perches81160e662010-03-08 12:15:59 -0800206 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
Chuck Leverc877b842009-08-09 15:09:36 -0400207 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400208
Chuck Lever0dd39ca2015-03-30 14:33:43 -0400209 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma";
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400210}
211
Chuck Lever5d252f92016-01-07 14:50:10 -0500212void
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400213xprt_rdma_free_addresses(struct rpc_xprt *xprt)
214{
Chuck Lever33e01dc2008-01-14 12:32:20 -0500215 unsigned int i;
216
217 for (i = 0; i < RPC_DISPLAY_MAX; i++)
218 switch (i) {
219 case RPC_DISPLAY_PROTO:
220 case RPC_DISPLAY_NETID:
221 continue;
222 default:
223 kfree(xprt->address_strings[i]);
224 }
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400225}
226
Chuck Lever31e62d22018-10-01 14:26:08 -0400227/**
228 * xprt_rdma_connect_worker - establish connection in the background
229 * @work: worker thread context
230 *
231 * Requester holds the xprt's send lock to prevent activity on this
232 * transport while a fresh connection is being established. RPC tasks
233 * sleep on the xprt's pending queue waiting for connect to complete.
234 */
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400235static void
236xprt_rdma_connect_worker(struct work_struct *work)
237{
Chuck Lever5abefb82015-01-21 11:02:37 -0500238 struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
239 rx_connect_worker.work);
240 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
Chuck Lever31e62d22018-10-01 14:26:08 -0400241 int rc;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400242
Chuck Lever9144a802020-02-21 17:00:28 -0500243 rc = rpcrdma_xprt_connect(r_xprt);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400244 xprt_clear_connecting(xprt);
Chuck Lever2d97f462020-06-15 09:20:57 -0400245 if (!rc) {
Chuck Levere28ce902020-02-21 17:01:05 -0500246 xprt->connect_cookie++;
Chuck Lever6cb28682019-10-23 10:01:52 -0400247 xprt->stat.connect_count++;
248 xprt->stat.connect_time += (long)jiffies -
249 xprt->stat.connect_start;
250 xprt_set_connected(xprt);
251 rc = -EAGAIN;
Chuck Lever31e62d22018-10-01 14:26:08 -0400252 }
Chuck Lever6cb28682019-10-23 10:01:52 -0400253 xprt_wake_pending_tasks(xprt, rc);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400254}
255
Chuck Leverf26c32f2018-10-01 14:26:40 -0400256/**
257 * xprt_rdma_inject_disconnect - inject a connection fault
258 * @xprt: transport context
259 *
260 * If @xprt is connected, disconnect it to simulate spurious connection
261 * loss.
262 */
Chuck Lever4a068252015-05-11 14:02:25 -0400263static void
264xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
265{
Chuck Leverad091182018-10-01 14:26:45 -0400266 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
Chuck Lever4a068252015-05-11 14:02:25 -0400267
Chuck Lever395069f2018-12-19 11:00:00 -0500268 trace_xprtrdma_op_inject_dsc(r_xprt);
Chuck Levere28ce902020-02-21 17:01:05 -0500269 rdma_disconnect(r_xprt->rx_ep->re_id);
Chuck Lever4a068252015-05-11 14:02:25 -0400270}
271
Chuck Leverf26c32f2018-10-01 14:26:40 -0400272/**
273 * xprt_rdma_destroy - Full tear down of transport
274 * @xprt: doomed transport context
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400275 *
Chuck Leverf26c32f2018-10-01 14:26:40 -0400276 * Caller guarantees there will be no more calls to us with
277 * this @xprt.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400278 */
279static void
280xprt_rdma_destroy(struct rpc_xprt *xprt)
281{
282 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400283
Chuck Lever5abefb82015-01-21 11:02:37 -0500284 cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400285
Chuck Lever9144a802020-02-21 17:00:28 -0500286 rpcrdma_xprt_disconnect(r_xprt);
Steve Wise72c02172015-09-21 12:24:23 -0500287 rpcrdma_buffer_destroy(&r_xprt->rx_buf);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400288
289 xprt_rdma_free_addresses(xprt);
Pavel Emelyanove204e622010-09-29 16:03:13 +0400290 xprt_free(xprt);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400291
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400292 module_put(THIS_MODULE);
293}
294
Chuck Lever675dd902019-06-19 10:33:42 -0400295/* 60 second timeout, no retries */
Trond Myklebust2881ae72007-12-20 16:03:54 -0500296static const struct rpc_timeout xprt_rdma_default_timeout = {
297 .to_initval = 60 * HZ,
298 .to_maxval = 60 * HZ,
299};
300
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400301/**
302 * xprt_setup_rdma - Set up transport to use RDMA
303 *
304 * @args: rpc transport arguments
305 */
306static struct rpc_xprt *
307xprt_setup_rdma(struct xprt_create *args)
308{
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400309 struct rpc_xprt *xprt;
310 struct rpcrdma_xprt *new_xprt;
Chuck Lever5231eb92015-08-03 13:02:41 -0400311 struct sockaddr *sap;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400312 int rc;
313
Chuck Leverddbb3472018-12-19 10:59:39 -0500314 if (args->addrlen > sizeof(xprt->addr))
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400315 return ERR_PTR(-EBADF);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400316
Chuck Levere28ce902020-02-21 17:01:05 -0500317 if (!try_module_get(THIS_MODULE))
318 return ERR_PTR(-EIO);
319
Chuck Lever7581d902020-01-03 11:56:37 -0500320 xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 0,
321 xprt_rdma_slot_table_entries);
Chuck Levere28ce902020-02-21 17:01:05 -0500322 if (!xprt) {
323 module_put(THIS_MODULE);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400324 return ERR_PTR(-ENOMEM);
Chuck Levere28ce902020-02-21 17:01:05 -0500325 }
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400326
Trond Myklebustba7392b2007-12-20 16:03:55 -0500327 xprt->timeout = &xprt_rdma_default_timeout;
Chuck Lever675dd902019-06-19 10:33:42 -0400328 xprt->connect_timeout = xprt->timeout->to_initval;
329 xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
Chuck Leverbfaee092014-05-28 10:34:32 -0400330 xprt->bind_timeout = RPCRDMA_BIND_TO;
331 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
332 xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400333
334 xprt->resvport = 0; /* privileged port not needed */
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400335 xprt->ops = &xprt_rdma_procs;
336
337 /*
338 * Set up RDMA-specific connect data.
339 */
Chuck Leverdd229ce2017-12-14 20:56:58 -0500340 sap = args->dstaddr;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400341
342 /* Ensure xprt->addr holds valid server TCP (not RDMA)
343 * address, for any side protocols which peek at it */
344 xprt->prot = IPPROTO_TCP;
345 xprt->addrlen = args->addrlen;
Chuck Lever5231eb92015-08-03 13:02:41 -0400346 memcpy(&xprt->addr, sap, xprt->addrlen);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400347
Chuck Lever5231eb92015-08-03 13:02:41 -0400348 if (rpc_get_port(sap))
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400349 xprt_set_bound(xprt);
Chuck Leverd461f1f2017-12-14 20:56:50 -0500350 xprt_rdma_format_addresses(xprt, sap);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400351
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400352 new_xprt = rpcx_to_rdmax(xprt);
Chuck Leverac920d02015-01-21 11:03:44 -0500353 rc = rpcrdma_buffer_create(new_xprt);
Chuck Levere28ce902020-02-21 17:01:05 -0500354 if (rc) {
355 xprt_rdma_free_addresses(xprt);
356 xprt_free(xprt);
357 module_put(THIS_MODULE);
358 return ERR_PTR(rc);
359 }
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400360
Chuck Lever18d065a2020-01-03 11:56:43 -0500361 INIT_DELAYED_WORK(&new_xprt->rx_connect_worker,
362 xprt_rdma_connect_worker);
Chuck Lever93aa8e02020-02-21 17:00:54 -0500363
Chuck Lever18d065a2020-01-03 11:56:43 -0500364 xprt->max_payload = RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT;
365
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400366 return xprt;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400367}
368
Chuck Leverbebd0312017-04-11 13:23:10 -0400369/**
Chuck Leverf26c32f2018-10-01 14:26:40 -0400370 * xprt_rdma_close - close a transport connection
371 * @xprt: transport context
Chuck Leverbebd0312017-04-11 13:23:10 -0400372 *
Chuck Lever395069f2018-12-19 11:00:00 -0500373 * Called during autoclose or device removal.
374 *
Chuck Leverf26c32f2018-10-01 14:26:40 -0400375 * Caller holds @xprt's send lock to prevent activity on this
376 * transport while the connection is torn down.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400377 */
Chuck Lever0c0829b2018-12-19 10:58:40 -0500378void xprt_rdma_close(struct rpc_xprt *xprt)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400379{
380 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
Chuck Lever6d2d0ee2018-12-19 10:58:29 -0500381
Chuck Lever9144a802020-02-21 17:00:28 -0500382 rpcrdma_xprt_disconnect(r_xprt);
Chuck Leveref739b22018-10-01 14:25:14 -0400383
Chuck Leverf9e1afe2019-08-26 13:12:51 -0400384 xprt->reestablish_timeout = 0;
Chuck Lever0c0829b2018-12-19 10:58:40 -0500385 ++xprt->connect_cookie;
386 xprt_disconnect_done(xprt);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400387}
388
Chuck Lever20035ed2017-12-14 20:57:06 -0500389/**
390 * xprt_rdma_set_port - update server port with rpcbind result
391 * @xprt: controlling RPC transport
392 * @port: new port value
393 *
394 * Transport connect status is unchanged.
395 */
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400396static void
397xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port)
398{
Chuck Lever20035ed2017-12-14 20:57:06 -0500399 struct sockaddr *sap = (struct sockaddr *)&xprt->addr;
400 char buf[8];
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400401
Chuck Lever20035ed2017-12-14 20:57:06 -0500402 rpc_set_port(sap, port);
403
404 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
405 snprintf(buf, sizeof(buf), "%u", port);
406 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
407
408 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
409 snprintf(buf, sizeof(buf), "%4hx", port);
410 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
Chuck Levera52c23b2019-10-23 10:02:14 -0400411
412 trace_xprtrdma_op_setport(container_of(xprt, struct rpcrdma_xprt,
413 rx_xprt));
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400414}
415
Chuck Lever33849792017-04-11 13:22:46 -0400416/**
417 * xprt_rdma_timer - invoked when an RPC times out
418 * @xprt: controlling RPC transport
419 * @task: RPC task that timed out
420 *
421 * Invoked when the transport is still connected, but an RPC
422 * retransmit timeout occurs.
423 *
424 * Since RDMA connections don't have a keep-alive, forcibly
425 * disconnect and retry to connect. This drives full
426 * detection of the network path, and retransmissions of
427 * all pending RPCs.
428 */
429static void
430xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task)
431{
Chuck Lever33849792017-04-11 13:22:46 -0400432 xprt_force_disconnect(xprt);
433}
434
Chuck Leverf26c32f2018-10-01 14:26:40 -0400435/**
Chuck Lever675dd902019-06-19 10:33:42 -0400436 * xprt_rdma_set_connect_timeout - set timeouts for establishing a connection
437 * @xprt: controlling transport instance
438 * @connect_timeout: reconnect timeout after client disconnects
439 * @reconnect_timeout: reconnect timeout after server disconnects
440 *
441 */
Chuck Lever2a7f77c72019-08-19 18:49:30 -0400442static void xprt_rdma_set_connect_timeout(struct rpc_xprt *xprt,
443 unsigned long connect_timeout,
444 unsigned long reconnect_timeout)
Chuck Lever675dd902019-06-19 10:33:42 -0400445{
446 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
447
448 trace_xprtrdma_op_set_cto(r_xprt, connect_timeout, reconnect_timeout);
449
450 spin_lock(&xprt->transport_lock);
451
452 if (connect_timeout < xprt->connect_timeout) {
453 struct rpc_timeout to;
454 unsigned long initval;
455
456 to = *xprt->timeout;
457 initval = connect_timeout;
458 if (initval < RPCRDMA_INIT_REEST_TO << 1)
459 initval = RPCRDMA_INIT_REEST_TO << 1;
460 to.to_initval = initval;
461 to.to_maxval = initval;
462 r_xprt->rx_timeout = to;
463 xprt->timeout = &r_xprt->rx_timeout;
464 xprt->connect_timeout = connect_timeout;
465 }
466
467 if (reconnect_timeout < xprt->max_reconnect_timeout)
468 xprt->max_reconnect_timeout = reconnect_timeout;
469
470 spin_unlock(&xprt->transport_lock);
471}
472
473/**
474 * xprt_rdma_connect - schedule an attempt to reconnect
Chuck Leverf26c32f2018-10-01 14:26:40 -0400475 * @xprt: transport state
Chuck Lever675dd902019-06-19 10:33:42 -0400476 * @task: RPC scheduler context (unused)
Chuck Leverf26c32f2018-10-01 14:26:40 -0400477 *
478 */
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400479static void
Trond Myklebust1b092092013-01-08 09:26:49 -0500480xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400481{
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400482 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
Chuck Levere28ce902020-02-21 17:01:05 -0500483 struct rpcrdma_ep *ep = r_xprt->rx_ep;
Chuck Lever675dd902019-06-19 10:33:42 -0400484 unsigned long delay;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400485
Chuck Lever675dd902019-06-19 10:33:42 -0400486 delay = 0;
Chuck Levere28ce902020-02-21 17:01:05 -0500487 if (ep && ep->re_connect_status != 0) {
Chuck Lever675dd902019-06-19 10:33:42 -0400488 delay = xprt_reconnect_delay(xprt);
489 xprt_reconnect_backoff(xprt, RPCRDMA_INIT_REEST_TO);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400490 }
Chuck Lever7b020f12019-10-23 10:01:58 -0400491 trace_xprtrdma_op_connect(r_xprt, delay);
Chuck Lever675dd902019-06-19 10:33:42 -0400492 queue_delayed_work(xprtiod_workqueue, &r_xprt->rx_connect_worker,
493 delay);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400494}
495
Chuck Lever48be5392018-05-04 15:35:04 -0400496/**
497 * xprt_rdma_alloc_slot - allocate an rpc_rqst
498 * @xprt: controlling RPC transport
499 * @task: RPC task requesting a fresh rpc_rqst
500 *
501 * tk_status values:
502 * %0 if task->tk_rqstp points to a fresh rpc_rqst
503 * %-EAGAIN if no rpc_rqst is available; queued on backlog
504 */
505static void
506xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
507{
Chuck Leveredb41e62018-05-04 15:35:09 -0400508 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
509 struct rpcrdma_req *req;
Chuck Lever48be5392018-05-04 15:35:04 -0400510
Chuck Leveredb41e62018-05-04 15:35:09 -0400511 req = rpcrdma_buffer_get(&r_xprt->rx_buf);
512 if (!req)
Chuck Lever48be5392018-05-04 15:35:04 -0400513 goto out_sleep;
Chuck Leveredb41e62018-05-04 15:35:09 -0400514 task->tk_rqstp = &req->rl_slot;
Chuck Lever48be5392018-05-04 15:35:04 -0400515 task->tk_status = 0;
516 return;
517
518out_sleep:
Chuck Lever39579052019-08-19 18:43:17 -0400519 set_bit(XPRT_CONGESTED, &xprt->state);
Chuck Lever48be5392018-05-04 15:35:04 -0400520 rpc_sleep_on(&xprt->backlog, task, NULL);
Chuck Lever48be5392018-05-04 15:35:04 -0400521 task->tk_status = -EAGAIN;
522}
523
524/**
525 * xprt_rdma_free_slot - release an rpc_rqst
526 * @xprt: controlling RPC transport
527 * @rqst: rpc_rqst to release
528 *
529 */
530static void
531xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
532{
Chuck Lever5828ceb2019-06-19 10:33:36 -0400533 struct rpcrdma_xprt *r_xprt =
534 container_of(xprt, struct rpcrdma_xprt, rx_xprt);
535
Chuck Lever48be5392018-05-04 15:35:04 -0400536 memset(rqst, 0, sizeof(*rqst));
Chuck Lever5828ceb2019-06-19 10:33:36 -0400537 rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
Chuck Lever39579052019-08-19 18:43:17 -0400538 if (unlikely(!rpc_wake_up_next(&xprt->backlog)))
539 clear_bit(XPRT_CONGESTED, &xprt->state);
Chuck Lever48be5392018-05-04 15:35:04 -0400540}
541
Chuck Lever0f665ce2019-04-24 09:39:27 -0400542static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt,
543 struct rpcrdma_regbuf *rb, size_t size,
544 gfp_t flags)
Chuck Lever9c40c492016-09-15 10:55:53 -0400545{
Chuck Lever0f665ce2019-04-24 09:39:27 -0400546 if (unlikely(rdmab_length(rb) < size)) {
547 if (!rpcrdma_regbuf_realloc(rb, size, flags))
548 return false;
549 r_xprt->rx_stats.hardway_register_count += size;
550 }
Chuck Lever9c40c492016-09-15 10:55:53 -0400551 return true;
552}
553
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400554/**
555 * xprt_rdma_allocate - allocate transport resources for an RPC
556 * @task: RPC task
557 *
558 * Return values:
559 * 0: Success; rq_buffer points to RPC buffer to use
560 * ENOMEM: Out of memory, call again later
561 * EIO: A permanent error occurred, do not retry
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400562 */
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400563static int
564xprt_rdma_allocate(struct rpc_task *task)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400565{
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400566 struct rpc_rqst *rqst = task->tk_rqstp;
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400567 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
Chuck Leveredb41e62018-05-04 15:35:09 -0400568 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
Chuck Levera0a1d502015-01-26 17:11:47 -0500569 gfp_t flags;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400570
Chuck Lever5d252f92016-01-07 14:50:10 -0500571 flags = RPCRDMA_DEF_GFP;
Chuck Levera0a1d502015-01-26 17:11:47 -0500572 if (RPC_IS_SWAPPER(task))
573 flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
574
Chuck Lever0f665ce2019-04-24 09:39:27 -0400575 if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize,
576 flags))
Chuck Lever9c40c492016-09-15 10:55:53 -0400577 goto out_fail;
Chuck Lever0f665ce2019-04-24 09:39:27 -0400578 if (!rpcrdma_check_regbuf(r_xprt, req->rl_recvbuf, rqst->rq_rcvsize,
579 flags))
Chuck Lever9c40c492016-09-15 10:55:53 -0400580 goto out_fail;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400581
Chuck Lever8cec3db2019-04-24 09:39:16 -0400582 rqst->rq_buffer = rdmab_data(req->rl_sendbuf);
583 rqst->rq_rbuffer = rdmab_data(req->rl_recvbuf);
Chuck Lever395069f2018-12-19 11:00:00 -0500584 trace_xprtrdma_op_allocate(task, req);
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400585 return 0;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400586
Chuck Lever0ca77dc2015-01-21 11:04:08 -0500587out_fail:
Chuck Lever395069f2018-12-19 11:00:00 -0500588 trace_xprtrdma_op_allocate(task, NULL);
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400589 return -ENOMEM;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400590}
591
Chuck Lever3435c742016-09-15 10:55:29 -0400592/**
593 * xprt_rdma_free - release resources allocated by xprt_rdma_allocate
594 * @task: RPC task
595 *
596 * Caller guarantees rqst->rq_buffer is non-NULL.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400597 */
598static void
Chuck Lever3435c742016-09-15 10:55:29 -0400599xprt_rdma_free(struct rpc_task *task)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400600{
Chuck Lever3435c742016-09-15 10:55:29 -0400601 struct rpc_rqst *rqst = task->tk_rqstp;
602 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
603 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400604
Chuck Lever395069f2018-12-19 11:00:00 -0500605 trace_xprtrdma_op_free(task, req);
Chuck Lever0ab11522019-06-19 10:33:15 -0400606
607 if (!list_empty(&req->rl_registered))
608 frwr_unmap_sync(r_xprt, req);
609
610 /* XXX: If the RPC is completing because of a signal and
611 * not because a reply was received, we ought to ensure
612 * that the Send completion has fired, so that memory
613 * involved with the Send is not still visible to the NIC.
614 */
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400615}
616
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400617/**
618 * xprt_rdma_send_request - marshal and send an RPC request
Trond Myklebustadfa7142018-09-03 23:58:59 -0400619 * @rqst: RPC message in rq_snd_buf
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400620 *
Chuck Leverbebd0312017-04-11 13:23:10 -0400621 * Caller holds the transport's write lock.
622 *
Chuck Levercf73daf2017-12-14 20:57:31 -0500623 * Returns:
624 * %0 if the RPC message has been sent
625 * %-ENOTCONN if the caller should reconnect and call again
Chuck Lever9e679d52018-02-28 15:30:44 -0500626 * %-EAGAIN if the caller should call again
627 * %-ENOBUFS if the caller should call again after a delay
Chuck Lever6946f822018-12-19 10:58:45 -0500628 * %-EMSGSIZE if encoding ran out of buffer space. The request
629 * was not sent. Do not try to send this message again.
630 * %-EIO if an I/O error occurred. The request was not sent.
631 * Do not try to send this message again.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400632 */
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400633static int
Trond Myklebustadfa7142018-09-03 23:58:59 -0400634xprt_rdma_send_request(struct rpc_rqst *rqst)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400635{
Trond Myklebusta4f08352013-01-08 09:10:21 -0500636 struct rpc_xprt *xprt = rqst->rq_xprt;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400637 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
638 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
Chuck Lever6ab59942014-07-29 17:23:43 -0400639 int rc = 0;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400640
Chuck Levercf73daf2017-12-14 20:57:31 -0500641#if defined(CONFIG_SUNRPC_BACKCHANNEL)
642 if (unlikely(!rqst->rq_buffer))
643 return xprt_rdma_bc_send_reply(rqst);
644#endif /* CONFIG_SUNRPC_BACKCHANNEL */
645
Chuck Leverbebd0312017-04-11 13:23:10 -0400646 if (!xprt_connected(xprt))
Chuck Lever0c0829b2018-12-19 10:58:40 -0500647 return -ENOTCONN;
Chuck Leverbebd0312017-04-11 13:23:10 -0400648
Trond Myklebust75891f52018-09-03 17:37:36 -0400649 if (!xprt_request_get_cong(xprt, rqst))
650 return -EBADSLT;
651
Chuck Lever09e60642017-08-10 12:47:12 -0400652 rc = rpcrdma_marshal_req(r_xprt, rqst);
Chuck Lever6ab59942014-07-29 17:23:43 -0400653 if (rc < 0)
654 goto failed_marshal;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400655
Tom Talpey575448b2008-10-09 15:00:40 -0400656 /* Must suppress retransmit to maintain credits */
Chuck Lever8a147932018-02-28 15:30:38 -0500657 if (rqst->rq_connect_cookie == xprt->connect_cookie)
Tom Talpey575448b2008-10-09 15:00:40 -0400658 goto drop_connection;
Chuck Lever78215752018-03-05 15:13:07 -0500659 rqst->rq_xtime = ktime_get();
Tom Talpey575448b2008-10-09 15:00:40 -0400660
Chuck Lever97d0de82020-02-21 17:00:23 -0500661 if (rpcrdma_post_sends(r_xprt, req))
Tom Talpey575448b2008-10-09 15:00:40 -0400662 goto drop_connection;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400663
Trond Myklebustd60dbb22010-05-13 12:51:49 -0400664 rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len;
Chuck Leverfb14ae82018-02-28 15:30:54 -0500665
666 /* An RPC with no reply will throw off credit accounting,
667 * so drop the connection to reset the credit grant.
668 */
Trond Myklebust50f484e2018-08-30 13:27:29 -0400669 if (!rpc_reply_expected(rqst->rq_task))
Chuck Leverfb14ae82018-02-28 15:30:54 -0500670 goto drop_connection;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400671 return 0;
Tom Talpey575448b2008-10-09 15:00:40 -0400672
Chuck Leverc93c6222014-05-28 10:35:14 -0400673failed_marshal:
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400674 if (rc != -ENOTCONN)
675 return rc;
Tom Talpey575448b2008-10-09 15:00:40 -0400676drop_connection:
Chuck Lever0c0829b2018-12-19 10:58:40 -0500677 xprt_rdma_close(xprt);
678 return -ENOTCONN;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400679}
680
Chuck Lever5d252f92016-01-07 14:50:10 -0500681void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400682{
683 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
684 long idle_time = 0;
685
686 if (xprt_connected(xprt))
687 idle_time = (long)(jiffies - xprt->last_used) / HZ;
688
Chuck Lever763f7e42015-08-03 13:04:36 -0400689 seq_puts(seq, "\txprt:\trdma ");
690 seq_printf(seq, "%u %lu %lu %lu %ld %lu %lu %lu %llu %llu ",
691 0, /* need a local port? */
692 xprt->stat.bind_count,
693 xprt->stat.connect_count,
Chuck Lever8440a882018-10-01 14:25:41 -0400694 xprt->stat.connect_time / HZ,
Chuck Lever763f7e42015-08-03 13:04:36 -0400695 idle_time,
696 xprt->stat.sends,
697 xprt->stat.recvs,
698 xprt->stat.bad_xids,
699 xprt->stat.req_u,
700 xprt->stat.bklog_u);
Chuck Lever505bbe62016-06-29 13:52:54 -0400701 seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %lu %lu %lu %lu ",
Chuck Lever763f7e42015-08-03 13:04:36 -0400702 r_xprt->rx_stats.read_chunk_count,
703 r_xprt->rx_stats.write_chunk_count,
704 r_xprt->rx_stats.reply_chunk_count,
705 r_xprt->rx_stats.total_rdma_request,
706 r_xprt->rx_stats.total_rdma_reply,
707 r_xprt->rx_stats.pullup_copy_count,
708 r_xprt->rx_stats.fixup_copy_count,
709 r_xprt->rx_stats.hardway_register_count,
710 r_xprt->rx_stats.failed_marshal_count,
Chuck Lever860477d2015-08-03 13:04:45 -0400711 r_xprt->rx_stats.bad_reply_count,
712 r_xprt->rx_stats.nomsg_call_count);
Chuck Lever01bb35c2017-10-20 10:48:36 -0400713 seq_printf(seq, "%lu %lu %lu %lu %lu %lu\n",
Chuck Lever61da8862018-10-01 14:25:25 -0400714 r_xprt->rx_stats.mrs_recycled,
Chuck Levere2ac2362016-06-29 13:54:00 -0400715 r_xprt->rx_stats.mrs_orphaned,
Chuck Leverc8b920b2016-09-15 10:57:16 -0400716 r_xprt->rx_stats.mrs_allocated,
Chuck Leverae729502017-10-20 10:48:12 -0400717 r_xprt->rx_stats.local_inv_needed,
Chuck Lever01bb35c2017-10-20 10:48:36 -0400718 r_xprt->rx_stats.empty_sendctx_q,
719 r_xprt->rx_stats.reply_waits_for_send);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400720}
721
Jeff Laytond67fa4d2015-06-03 16:14:29 -0400722static int
723xprt_rdma_enable_swap(struct rpc_xprt *xprt)
724{
Chuck Levera0451782015-10-24 17:26:29 -0400725 return 0;
Jeff Laytond67fa4d2015-06-03 16:14:29 -0400726}
727
728static void
729xprt_rdma_disable_swap(struct rpc_xprt *xprt)
730{
731}
732
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400733/*
734 * Plumbing for rpc transport switch and kernel module
735 */
736
Chuck Leverd31ae252017-08-01 12:00:39 -0400737static const struct rpc_xprt_ops xprt_rdma_procs = {
Chuck Levere7ce7102014-05-28 10:34:57 -0400738 .reserve_xprt = xprt_reserve_xprt_cong,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400739 .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */
Chuck Lever48be5392018-05-04 15:35:04 -0400740 .alloc_slot = xprt_rdma_alloc_slot,
741 .free_slot = xprt_rdma_free_slot,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400742 .release_request = xprt_release_rqst_cong, /* ditto */
Trond Myklebust8ba6a922019-04-07 13:58:46 -0400743 .wait_for_reply_request = xprt_wait_for_reply_request_def, /* ditto */
Chuck Lever33849792017-04-11 13:22:46 -0400744 .timer = xprt_rdma_timer,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400745 .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */
746 .set_port = xprt_rdma_set_port,
747 .connect = xprt_rdma_connect,
748 .buf_alloc = xprt_rdma_allocate,
749 .buf_free = xprt_rdma_free,
750 .send_request = xprt_rdma_send_request,
751 .close = xprt_rdma_close,
752 .destroy = xprt_rdma_destroy,
Chuck Lever2a7f77c72019-08-19 18:49:30 -0400753 .set_connect_timeout = xprt_rdma_set_connect_timeout,
Jeff Laytond67fa4d2015-06-03 16:14:29 -0400754 .print_stats = xprt_rdma_print_stats,
755 .enable_swap = xprt_rdma_enable_swap,
756 .disable_swap = xprt_rdma_disable_swap,
Chuck Leverf531a5d2015-10-24 17:27:43 -0400757 .inject_disconnect = xprt_rdma_inject_disconnect,
758#if defined(CONFIG_SUNRPC_BACKCHANNEL)
759 .bc_setup = xprt_rdma_bc_setup,
Chuck Lever6b26cc82016-05-02 14:40:40 -0400760 .bc_maxpayload = xprt_rdma_bc_maxpayload,
Trond Myklebust7402a4f2019-07-16 13:51:29 -0400761 .bc_num_slots = xprt_rdma_bc_max_slots,
Chuck Leverf531a5d2015-10-24 17:27:43 -0400762 .bc_free_rqst = xprt_rdma_bc_free_rqst,
763 .bc_destroy = xprt_rdma_bc_destroy,
764#endif
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400765};
766
767static struct xprt_class xprt_rdma = {
768 .list = LIST_HEAD_INIT(xprt_rdma.list),
769 .name = "rdma",
770 .owner = THIS_MODULE,
771 .ident = XPRT_TRANSPORT_RDMA,
772 .setup = xprt_setup_rdma,
773};
774
Chuck Leverffe1f0d2015-06-04 11:21:42 -0400775void xprt_rdma_cleanup(void)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400776{
Jeff Laytonf895b252014-11-17 16:58:04 -0500777#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400778 if (sunrpc_table_header) {
779 unregister_sysctl_table(sunrpc_table_header);
780 sunrpc_table_header = NULL;
781 }
782#endif
Chuck Lever951e7212015-05-26 11:52:25 -0400783
Chuck Leverddbb3472018-12-19 10:59:39 -0500784 xprt_unregister_transport(&xprt_rdma);
785 xprt_unregister_transport(&xprt_rdma_bc);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400786}
787
Chuck Leverffe1f0d2015-06-04 11:21:42 -0400788int xprt_rdma_init(void)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400789{
790 int rc;
791
Chuck Lever6d2d0ee2018-12-19 10:58:29 -0500792 rc = xprt_register_transport(&xprt_rdma);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400793 if (rc)
794 return rc;
795
Chuck Lever5d252f92016-01-07 14:50:10 -0500796 rc = xprt_register_transport(&xprt_rdma_bc);
797 if (rc) {
798 xprt_unregister_transport(&xprt_rdma);
Chuck Lever5d252f92016-01-07 14:50:10 -0500799 return rc;
800 }
801
Jeff Laytonf895b252014-11-17 16:58:04 -0500802#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400803 if (!sunrpc_table_header)
804 sunrpc_table_header = register_sysctl_table(sunrpc_table);
805#endif
806 return 0;
807}