blob: 16e5696314a4f31cba124612f800d7e5b1daf00c [file] [log] [blame]
Chuck Levera2268cf2018-05-04 15:34:32 -04001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04002/*
Chuck Lever62b56a62017-10-30 16:22:14 -04003 * Copyright (c) 2014-2017 Oracle. All rights reserved.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04004 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the BSD-type
10 * license below:
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * Neither the name of the Network Appliance, Inc. nor the names of
25 * its contributors may be used to endorse or promote products
26 * derived from this software without specific prior written
27 * permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42/*
43 * transport.c
44 *
45 * This file contains the top-level implementation of an RPC RDMA
46 * transport.
47 *
48 * Naming convention: functions beginning with xprt_ are part of the
49 * transport switch. All others are RPC RDMA internal.
50 */
51
52#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090053#include <linux/slab.h>
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040054#include <linux/seq_file.h>
Chuck Leverbd2abef2018-05-07 15:27:16 -040055#include <linux/smp.h>
56
Jeff Layton59766872013-02-04 12:50:00 -050057#include <linux/sunrpc/addr.h>
Chuck Leverbd2abef2018-05-07 15:27:16 -040058#include <linux/sunrpc/svc_rdma.h>
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040059
60#include "xprt_rdma.h"
Chuck Leverb6e717cb2018-05-07 15:27:05 -040061#include <trace/events/rpcrdma.h>
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040062
Jeff Laytonf895b252014-11-17 16:58:04 -050063#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040064# define RPCDBG_FACILITY RPCDBG_TRANS
65#endif
66
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040067/*
68 * tunables
69 */
70
Zou Wei5bffb002020-04-23 15:10:02 +080071static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE;
Chuck Lever5d252f92016-01-07 14:50:10 -050072unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE;
Chuck Lever94087e92019-04-24 09:40:20 -040073unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE;
Chuck Leverce5b3712017-12-14 20:57:47 -050074unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRWR;
Chuck Leverfff09592017-04-11 13:22:54 -040075int xprt_rdma_pad_optimize;
Olga Kornievskaiad3abc732021-06-08 15:59:15 -040076static struct xprt_class xprt_rdma;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040077
Jeff Laytonf895b252014-11-17 16:58:04 -050078#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040079
80static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE;
81static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE;
Chuck Lever29c55422016-05-02 14:40:48 -040082static unsigned int min_inline_size = RPCRDMA_MIN_INLINE;
83static unsigned int max_inline_size = RPCRDMA_MAX_INLINE;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040084static unsigned int max_padding = PAGE_SIZE;
85static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS;
86static unsigned int max_memreg = RPCRDMA_LAST - 1;
Chuck Lever10492702017-12-14 20:56:42 -050087static unsigned int dummy;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040088
89static struct ctl_table_header *sunrpc_table_header;
90
Joe Perchesfe2c6332013-06-11 23:04:25 -070091static struct ctl_table xr_tunables_table[] = {
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040092 {
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040093 .procname = "rdma_slot_table_entries",
94 .data = &xprt_rdma_slot_table_entries,
95 .maxlen = sizeof(unsigned int),
96 .mode = 0644,
Eric W. Biederman6d456112009-11-16 03:11:48 -080097 .proc_handler = proc_dointvec_minmax,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040098 .extra1 = &min_slot_table_size,
99 .extra2 = &max_slot_table_size
100 },
101 {
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400102 .procname = "rdma_max_inline_read",
103 .data = &xprt_rdma_max_inline_read,
104 .maxlen = sizeof(unsigned int),
105 .mode = 0644,
Chuck Lever44829d02016-09-15 10:57:32 -0400106 .proc_handler = proc_dointvec_minmax,
Chuck Lever29c55422016-05-02 14:40:48 -0400107 .extra1 = &min_inline_size,
108 .extra2 = &max_inline_size,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400109 },
110 {
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400111 .procname = "rdma_max_inline_write",
112 .data = &xprt_rdma_max_inline_write,
113 .maxlen = sizeof(unsigned int),
114 .mode = 0644,
Chuck Lever44829d02016-09-15 10:57:32 -0400115 .proc_handler = proc_dointvec_minmax,
Chuck Lever29c55422016-05-02 14:40:48 -0400116 .extra1 = &min_inline_size,
117 .extra2 = &max_inline_size,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400118 },
119 {
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400120 .procname = "rdma_inline_write_padding",
Chuck Lever10492702017-12-14 20:56:42 -0500121 .data = &dummy,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400122 .maxlen = sizeof(unsigned int),
123 .mode = 0644,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800124 .proc_handler = proc_dointvec_minmax,
Matteo Croceeec48442019-07-18 15:58:50 -0700125 .extra1 = SYSCTL_ZERO,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400126 .extra2 = &max_padding,
127 },
128 {
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400129 .procname = "rdma_memreg_strategy",
130 .data = &xprt_rdma_memreg_strategy,
131 .maxlen = sizeof(unsigned int),
132 .mode = 0644,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800133 .proc_handler = proc_dointvec_minmax,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400134 .extra1 = &min_memreg,
135 .extra2 = &max_memreg,
136 },
137 {
Tom Talpey9191ca32008-10-09 15:01:11 -0400138 .procname = "rdma_pad_optimize",
139 .data = &xprt_rdma_pad_optimize,
140 .maxlen = sizeof(unsigned int),
141 .mode = 0644,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800142 .proc_handler = proc_dointvec,
Tom Talpey9191ca32008-10-09 15:01:11 -0400143 },
Eric W. Biedermanf8572d82009-11-05 13:32:03 -0800144 { },
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400145};
146
Joe Perchesfe2c6332013-06-11 23:04:25 -0700147static struct ctl_table sunrpc_table[] = {
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400148 {
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400149 .procname = "sunrpc",
150 .mode = 0555,
151 .child = xr_tunables_table
152 },
Eric W. Biedermanf8572d82009-11-05 13:32:03 -0800153 { },
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400154};
155
156#endif
157
Chuck Leverd31ae252017-08-01 12:00:39 -0400158static const struct rpc_xprt_ops xprt_rdma_procs;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400159
160static void
Chuck Lever0dd39ca2015-03-30 14:33:43 -0400161xprt_rdma_format_addresses4(struct rpc_xprt *xprt, struct sockaddr *sap)
162{
163 struct sockaddr_in *sin = (struct sockaddr_in *)sap;
164 char buf[20];
165
166 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
167 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
168
169 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA;
170}
171
172static void
173xprt_rdma_format_addresses6(struct rpc_xprt *xprt, struct sockaddr *sap)
174{
175 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
176 char buf[40];
177
178 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
179 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
180
181 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA6;
182}
183
Chuck Lever5d252f92016-01-07 14:50:10 -0500184void
Chuck Lever5231eb92015-08-03 13:02:41 -0400185xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400186{
Chuck Lever0dd39ca2015-03-30 14:33:43 -0400187 char buf[128];
188
189 switch (sap->sa_family) {
190 case AF_INET:
191 xprt_rdma_format_addresses4(xprt, sap);
192 break;
193 case AF_INET6:
194 xprt_rdma_format_addresses6(xprt, sap);
195 break;
196 default:
197 pr_err("rpcrdma: Unrecognized address family\n");
198 return;
199 }
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400200
Chuck Leverc877b842009-08-09 15:09:36 -0400201 (void)rpc_ntop(sap, buf, sizeof(buf));
202 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400203
Joe Perches81160e662010-03-08 12:15:59 -0800204 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
Chuck Leverc877b842009-08-09 15:09:36 -0400205 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400206
Joe Perches81160e662010-03-08 12:15:59 -0800207 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
Chuck Leverc877b842009-08-09 15:09:36 -0400208 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400209
Chuck Lever0dd39ca2015-03-30 14:33:43 -0400210 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma";
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400211}
212
Chuck Lever5d252f92016-01-07 14:50:10 -0500213void
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400214xprt_rdma_free_addresses(struct rpc_xprt *xprt)
215{
Chuck Lever33e01dc2008-01-14 12:32:20 -0500216 unsigned int i;
217
218 for (i = 0; i < RPC_DISPLAY_MAX; i++)
219 switch (i) {
220 case RPC_DISPLAY_PROTO:
221 case RPC_DISPLAY_NETID:
222 continue;
223 default:
224 kfree(xprt->address_strings[i]);
225 }
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400226}
227
Chuck Lever31e62d22018-10-01 14:26:08 -0400228/**
229 * xprt_rdma_connect_worker - establish connection in the background
230 * @work: worker thread context
231 *
232 * Requester holds the xprt's send lock to prevent activity on this
233 * transport while a fresh connection is being established. RPC tasks
234 * sleep on the xprt's pending queue waiting for connect to complete.
235 */
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400236static void
237xprt_rdma_connect_worker(struct work_struct *work)
238{
Chuck Lever5abefb82015-01-21 11:02:37 -0500239 struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
240 rx_connect_worker.work);
241 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
Chuck Lever31e62d22018-10-01 14:26:08 -0400242 int rc;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400243
Chuck Lever9144a802020-02-21 17:00:28 -0500244 rc = rpcrdma_xprt_connect(r_xprt);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400245 xprt_clear_connecting(xprt);
Chuck Lever2d97f462020-06-15 09:20:57 -0400246 if (!rc) {
Chuck Levere28ce902020-02-21 17:01:05 -0500247 xprt->connect_cookie++;
Chuck Lever6cb28682019-10-23 10:01:52 -0400248 xprt->stat.connect_count++;
249 xprt->stat.connect_time += (long)jiffies -
250 xprt->stat.connect_start;
251 xprt_set_connected(xprt);
252 rc = -EAGAIN;
Trond Myklebustf99fa502021-07-26 08:03:12 -0400253 } else
254 rpcrdma_xprt_disconnect(r_xprt);
255 xprt_unlock_connect(xprt, r_xprt);
Chuck Lever6cb28682019-10-23 10:01:52 -0400256 xprt_wake_pending_tasks(xprt, rc);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400257}
258
Chuck Leverf26c32f2018-10-01 14:26:40 -0400259/**
260 * xprt_rdma_inject_disconnect - inject a connection fault
261 * @xprt: transport context
262 *
Chuck Lever7638e0b2021-03-31 13:22:14 -0400263 * If @xprt is connected, disconnect it to simulate spurious
264 * connection loss. Caller must hold @xprt's send lock to
265 * ensure that data structures and hardware resources are
266 * stable during the rdma_disconnect() call.
Chuck Leverf26c32f2018-10-01 14:26:40 -0400267 */
Chuck Lever4a068252015-05-11 14:02:25 -0400268static void
269xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
270{
Chuck Leverad091182018-10-01 14:26:45 -0400271 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
Chuck Lever4a068252015-05-11 14:02:25 -0400272
Chuck Lever395069f2018-12-19 11:00:00 -0500273 trace_xprtrdma_op_inject_dsc(r_xprt);
Chuck Levere28ce902020-02-21 17:01:05 -0500274 rdma_disconnect(r_xprt->rx_ep->re_id);
Chuck Lever4a068252015-05-11 14:02:25 -0400275}
276
Chuck Leverf26c32f2018-10-01 14:26:40 -0400277/**
278 * xprt_rdma_destroy - Full tear down of transport
279 * @xprt: doomed transport context
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400280 *
Chuck Leverf26c32f2018-10-01 14:26:40 -0400281 * Caller guarantees there will be no more calls to us with
282 * this @xprt.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400283 */
284static void
285xprt_rdma_destroy(struct rpc_xprt *xprt)
286{
287 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400288
Chuck Lever5abefb82015-01-21 11:02:37 -0500289 cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400290
Chuck Lever9144a802020-02-21 17:00:28 -0500291 rpcrdma_xprt_disconnect(r_xprt);
Steve Wise72c02172015-09-21 12:24:23 -0500292 rpcrdma_buffer_destroy(&r_xprt->rx_buf);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400293
294 xprt_rdma_free_addresses(xprt);
Pavel Emelyanove204e622010-09-29 16:03:13 +0400295 xprt_free(xprt);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400296
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400297 module_put(THIS_MODULE);
298}
299
Chuck Lever675dd902019-06-19 10:33:42 -0400300/* 60 second timeout, no retries */
Trond Myklebust2881ae72007-12-20 16:03:54 -0500301static const struct rpc_timeout xprt_rdma_default_timeout = {
302 .to_initval = 60 * HZ,
303 .to_maxval = 60 * HZ,
304};
305
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400306/**
307 * xprt_setup_rdma - Set up transport to use RDMA
308 *
309 * @args: rpc transport arguments
310 */
311static struct rpc_xprt *
312xprt_setup_rdma(struct xprt_create *args)
313{
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400314 struct rpc_xprt *xprt;
315 struct rpcrdma_xprt *new_xprt;
Chuck Lever5231eb92015-08-03 13:02:41 -0400316 struct sockaddr *sap;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400317 int rc;
318
Chuck Leverddbb3472018-12-19 10:59:39 -0500319 if (args->addrlen > sizeof(xprt->addr))
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400320 return ERR_PTR(-EBADF);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400321
Chuck Levere28ce902020-02-21 17:01:05 -0500322 if (!try_module_get(THIS_MODULE))
323 return ERR_PTR(-EIO);
324
Chuck Lever7581d902020-01-03 11:56:37 -0500325 xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 0,
326 xprt_rdma_slot_table_entries);
Chuck Levere28ce902020-02-21 17:01:05 -0500327 if (!xprt) {
328 module_put(THIS_MODULE);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400329 return ERR_PTR(-ENOMEM);
Chuck Levere28ce902020-02-21 17:01:05 -0500330 }
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400331
Trond Myklebustba7392b2007-12-20 16:03:55 -0500332 xprt->timeout = &xprt_rdma_default_timeout;
Chuck Lever675dd902019-06-19 10:33:42 -0400333 xprt->connect_timeout = xprt->timeout->to_initval;
334 xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
Chuck Leverbfaee092014-05-28 10:34:32 -0400335 xprt->bind_timeout = RPCRDMA_BIND_TO;
336 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
337 xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400338
339 xprt->resvport = 0; /* privileged port not needed */
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400340 xprt->ops = &xprt_rdma_procs;
341
342 /*
343 * Set up RDMA-specific connect data.
344 */
Chuck Leverdd229ce2017-12-14 20:56:58 -0500345 sap = args->dstaddr;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400346
347 /* Ensure xprt->addr holds valid server TCP (not RDMA)
348 * address, for any side protocols which peek at it */
349 xprt->prot = IPPROTO_TCP;
Olga Kornievskaiad3abc732021-06-08 15:59:15 -0400350 xprt->xprt_class = &xprt_rdma;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400351 xprt->addrlen = args->addrlen;
Chuck Lever5231eb92015-08-03 13:02:41 -0400352 memcpy(&xprt->addr, sap, xprt->addrlen);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400353
Chuck Lever5231eb92015-08-03 13:02:41 -0400354 if (rpc_get_port(sap))
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400355 xprt_set_bound(xprt);
Chuck Leverd461f1f2017-12-14 20:56:50 -0500356 xprt_rdma_format_addresses(xprt, sap);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400357
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400358 new_xprt = rpcx_to_rdmax(xprt);
Chuck Leverac920d02015-01-21 11:03:44 -0500359 rc = rpcrdma_buffer_create(new_xprt);
Chuck Levere28ce902020-02-21 17:01:05 -0500360 if (rc) {
361 xprt_rdma_free_addresses(xprt);
362 xprt_free(xprt);
363 module_put(THIS_MODULE);
364 return ERR_PTR(rc);
365 }
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400366
Chuck Lever18d065a2020-01-03 11:56:43 -0500367 INIT_DELAYED_WORK(&new_xprt->rx_connect_worker,
368 xprt_rdma_connect_worker);
Chuck Lever93aa8e02020-02-21 17:00:54 -0500369
Chuck Lever18d065a2020-01-03 11:56:43 -0500370 xprt->max_payload = RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT;
371
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400372 return xprt;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400373}
374
Chuck Leverbebd0312017-04-11 13:23:10 -0400375/**
Chuck Leverf26c32f2018-10-01 14:26:40 -0400376 * xprt_rdma_close - close a transport connection
377 * @xprt: transport context
Chuck Leverbebd0312017-04-11 13:23:10 -0400378 *
Chuck Lever395069f2018-12-19 11:00:00 -0500379 * Called during autoclose or device removal.
380 *
Chuck Leverf26c32f2018-10-01 14:26:40 -0400381 * Caller holds @xprt's send lock to prevent activity on this
382 * transport while the connection is torn down.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400383 */
Chuck Lever0c0829b2018-12-19 10:58:40 -0500384void xprt_rdma_close(struct rpc_xprt *xprt)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400385{
386 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
Chuck Lever6d2d0ee2018-12-19 10:58:29 -0500387
Chuck Lever9144a802020-02-21 17:00:28 -0500388 rpcrdma_xprt_disconnect(r_xprt);
Chuck Leveref739b22018-10-01 14:25:14 -0400389
Chuck Leverf9e1afe2019-08-26 13:12:51 -0400390 xprt->reestablish_timeout = 0;
Chuck Lever0c0829b2018-12-19 10:58:40 -0500391 ++xprt->connect_cookie;
392 xprt_disconnect_done(xprt);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400393}
394
Chuck Lever20035ed2017-12-14 20:57:06 -0500395/**
396 * xprt_rdma_set_port - update server port with rpcbind result
397 * @xprt: controlling RPC transport
398 * @port: new port value
399 *
400 * Transport connect status is unchanged.
401 */
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400402static void
403xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port)
404{
Chuck Lever20035ed2017-12-14 20:57:06 -0500405 struct sockaddr *sap = (struct sockaddr *)&xprt->addr;
406 char buf[8];
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400407
Chuck Lever20035ed2017-12-14 20:57:06 -0500408 rpc_set_port(sap, port);
409
410 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
411 snprintf(buf, sizeof(buf), "%u", port);
412 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
413
414 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
415 snprintf(buf, sizeof(buf), "%4hx", port);
416 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400417}
418
Chuck Lever33849792017-04-11 13:22:46 -0400419/**
420 * xprt_rdma_timer - invoked when an RPC times out
421 * @xprt: controlling RPC transport
422 * @task: RPC task that timed out
423 *
424 * Invoked when the transport is still connected, but an RPC
425 * retransmit timeout occurs.
426 *
427 * Since RDMA connections don't have a keep-alive, forcibly
428 * disconnect and retry to connect. This drives full
429 * detection of the network path, and retransmissions of
430 * all pending RPCs.
431 */
432static void
433xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task)
434{
Chuck Lever33849792017-04-11 13:22:46 -0400435 xprt_force_disconnect(xprt);
436}
437
Chuck Leverf26c32f2018-10-01 14:26:40 -0400438/**
Chuck Lever675dd902019-06-19 10:33:42 -0400439 * xprt_rdma_set_connect_timeout - set timeouts for establishing a connection
440 * @xprt: controlling transport instance
441 * @connect_timeout: reconnect timeout after client disconnects
442 * @reconnect_timeout: reconnect timeout after server disconnects
443 *
444 */
Chuck Lever2a7f77c72019-08-19 18:49:30 -0400445static void xprt_rdma_set_connect_timeout(struct rpc_xprt *xprt,
446 unsigned long connect_timeout,
447 unsigned long reconnect_timeout)
Chuck Lever675dd902019-06-19 10:33:42 -0400448{
449 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
450
451 trace_xprtrdma_op_set_cto(r_xprt, connect_timeout, reconnect_timeout);
452
453 spin_lock(&xprt->transport_lock);
454
455 if (connect_timeout < xprt->connect_timeout) {
456 struct rpc_timeout to;
457 unsigned long initval;
458
459 to = *xprt->timeout;
460 initval = connect_timeout;
461 if (initval < RPCRDMA_INIT_REEST_TO << 1)
462 initval = RPCRDMA_INIT_REEST_TO << 1;
463 to.to_initval = initval;
464 to.to_maxval = initval;
465 r_xprt->rx_timeout = to;
466 xprt->timeout = &r_xprt->rx_timeout;
467 xprt->connect_timeout = connect_timeout;
468 }
469
470 if (reconnect_timeout < xprt->max_reconnect_timeout)
471 xprt->max_reconnect_timeout = reconnect_timeout;
472
473 spin_unlock(&xprt->transport_lock);
474}
475
476/**
477 * xprt_rdma_connect - schedule an attempt to reconnect
Chuck Leverf26c32f2018-10-01 14:26:40 -0400478 * @xprt: transport state
Chuck Lever675dd902019-06-19 10:33:42 -0400479 * @task: RPC scheduler context (unused)
Chuck Leverf26c32f2018-10-01 14:26:40 -0400480 *
481 */
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400482static void
Trond Myklebust1b092092013-01-08 09:26:49 -0500483xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400484{
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400485 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
Chuck Levere28ce902020-02-21 17:01:05 -0500486 struct rpcrdma_ep *ep = r_xprt->rx_ep;
Chuck Lever675dd902019-06-19 10:33:42 -0400487 unsigned long delay;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400488
Trond Myklebustf99fa502021-07-26 08:03:12 -0400489 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, r_xprt));
490
Chuck Lever675dd902019-06-19 10:33:42 -0400491 delay = 0;
Chuck Levere28ce902020-02-21 17:01:05 -0500492 if (ep && ep->re_connect_status != 0) {
Chuck Lever675dd902019-06-19 10:33:42 -0400493 delay = xprt_reconnect_delay(xprt);
494 xprt_reconnect_backoff(xprt, RPCRDMA_INIT_REEST_TO);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400495 }
Chuck Lever7b020f12019-10-23 10:01:58 -0400496 trace_xprtrdma_op_connect(r_xprt, delay);
Chuck Lever675dd902019-06-19 10:33:42 -0400497 queue_delayed_work(xprtiod_workqueue, &r_xprt->rx_connect_worker,
498 delay);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400499}
500
Chuck Lever48be5392018-05-04 15:35:04 -0400501/**
502 * xprt_rdma_alloc_slot - allocate an rpc_rqst
503 * @xprt: controlling RPC transport
504 * @task: RPC task requesting a fresh rpc_rqst
505 *
506 * tk_status values:
507 * %0 if task->tk_rqstp points to a fresh rpc_rqst
508 * %-EAGAIN if no rpc_rqst is available; queued on backlog
509 */
510static void
511xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
512{
Chuck Leveredb41e62018-05-04 15:35:09 -0400513 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
514 struct rpcrdma_req *req;
Chuck Lever48be5392018-05-04 15:35:04 -0400515
Chuck Leveredb41e62018-05-04 15:35:09 -0400516 req = rpcrdma_buffer_get(&r_xprt->rx_buf);
517 if (!req)
Chuck Lever48be5392018-05-04 15:35:04 -0400518 goto out_sleep;
Chuck Leveredb41e62018-05-04 15:35:09 -0400519 task->tk_rqstp = &req->rl_slot;
Chuck Lever48be5392018-05-04 15:35:04 -0400520 task->tk_status = 0;
521 return;
522
523out_sleep:
Chuck Lever48be5392018-05-04 15:35:04 -0400524 task->tk_status = -EAGAIN;
Trond Myklebuste86be3a2021-05-25 18:43:38 -0400525 xprt_add_backlog(xprt, task);
Chuck Lever48be5392018-05-04 15:35:04 -0400526}
527
528/**
529 * xprt_rdma_free_slot - release an rpc_rqst
530 * @xprt: controlling RPC transport
531 * @rqst: rpc_rqst to release
532 *
533 */
534static void
535xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
536{
Chuck Lever5828ceb2019-06-19 10:33:36 -0400537 struct rpcrdma_xprt *r_xprt =
538 container_of(xprt, struct rpcrdma_xprt, rx_xprt);
539
Trond Myklebuste86be3a2021-05-25 18:43:38 -0400540 rpcrdma_reply_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
541 if (!xprt_wake_up_backlog(xprt, rqst)) {
542 memset(rqst, 0, sizeof(*rqst));
543 rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
544 }
Chuck Lever48be5392018-05-04 15:35:04 -0400545}
546
Chuck Lever0f665ce2019-04-24 09:39:27 -0400547static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt,
548 struct rpcrdma_regbuf *rb, size_t size,
549 gfp_t flags)
Chuck Lever9c40c492016-09-15 10:55:53 -0400550{
Chuck Lever0f665ce2019-04-24 09:39:27 -0400551 if (unlikely(rdmab_length(rb) < size)) {
552 if (!rpcrdma_regbuf_realloc(rb, size, flags))
553 return false;
554 r_xprt->rx_stats.hardway_register_count += size;
555 }
Chuck Lever9c40c492016-09-15 10:55:53 -0400556 return true;
557}
558
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400559/**
560 * xprt_rdma_allocate - allocate transport resources for an RPC
561 * @task: RPC task
562 *
563 * Return values:
564 * 0: Success; rq_buffer points to RPC buffer to use
565 * ENOMEM: Out of memory, call again later
566 * EIO: A permanent error occurred, do not retry
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400567 */
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400568static int
569xprt_rdma_allocate(struct rpc_task *task)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400570{
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400571 struct rpc_rqst *rqst = task->tk_rqstp;
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400572 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
Chuck Leveredb41e62018-05-04 15:35:09 -0400573 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
Chuck Levera0a1d502015-01-26 17:11:47 -0500574 gfp_t flags;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400575
Chuck Lever5d252f92016-01-07 14:50:10 -0500576 flags = RPCRDMA_DEF_GFP;
Chuck Levera0a1d502015-01-26 17:11:47 -0500577 if (RPC_IS_SWAPPER(task))
578 flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
579
Chuck Lever0f665ce2019-04-24 09:39:27 -0400580 if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize,
581 flags))
Chuck Lever9c40c492016-09-15 10:55:53 -0400582 goto out_fail;
Chuck Lever0f665ce2019-04-24 09:39:27 -0400583 if (!rpcrdma_check_regbuf(r_xprt, req->rl_recvbuf, rqst->rq_rcvsize,
584 flags))
Chuck Lever9c40c492016-09-15 10:55:53 -0400585 goto out_fail;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400586
Chuck Lever8cec3db2019-04-24 09:39:16 -0400587 rqst->rq_buffer = rdmab_data(req->rl_sendbuf);
588 rqst->rq_rbuffer = rdmab_data(req->rl_recvbuf);
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400589 return 0;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400590
Chuck Lever0ca77dc2015-01-21 11:04:08 -0500591out_fail:
Chuck Lever5fe6eaa2016-09-15 10:55:20 -0400592 return -ENOMEM;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400593}
594
Chuck Lever3435c742016-09-15 10:55:29 -0400595/**
596 * xprt_rdma_free - release resources allocated by xprt_rdma_allocate
597 * @task: RPC task
598 *
599 * Caller guarantees rqst->rq_buffer is non-NULL.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400600 */
601static void
Chuck Lever3435c742016-09-15 10:55:29 -0400602xprt_rdma_free(struct rpc_task *task)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400603{
Chuck Lever3435c742016-09-15 10:55:29 -0400604 struct rpc_rqst *rqst = task->tk_rqstp;
Chuck Lever3435c742016-09-15 10:55:29 -0400605 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400606
Chuck Lever8e24e192020-11-09 14:40:08 -0500607 if (unlikely(!list_empty(&req->rl_registered))) {
608 trace_xprtrdma_mrs_zap(task);
609 frwr_unmap_sync(rpcx_to_rdmax(rqst->rq_xprt), req);
610 }
Chuck Lever0ab11522019-06-19 10:33:15 -0400611
612 /* XXX: If the RPC is completing because of a signal and
613 * not because a reply was received, we ought to ensure
614 * that the Send completion has fired, so that memory
615 * involved with the Send is not still visible to the NIC.
616 */
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400617}
618
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400619/**
620 * xprt_rdma_send_request - marshal and send an RPC request
Trond Myklebustadfa7142018-09-03 23:58:59 -0400621 * @rqst: RPC message in rq_snd_buf
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400622 *
Chuck Leverbebd0312017-04-11 13:23:10 -0400623 * Caller holds the transport's write lock.
624 *
Chuck Levercf73daf2017-12-14 20:57:31 -0500625 * Returns:
626 * %0 if the RPC message has been sent
627 * %-ENOTCONN if the caller should reconnect and call again
Chuck Lever9e679d52018-02-28 15:30:44 -0500628 * %-EAGAIN if the caller should call again
629 * %-ENOBUFS if the caller should call again after a delay
Chuck Lever6946f822018-12-19 10:58:45 -0500630 * %-EMSGSIZE if encoding ran out of buffer space. The request
631 * was not sent. Do not try to send this message again.
632 * %-EIO if an I/O error occurred. The request was not sent.
633 * Do not try to send this message again.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400634 */
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400635static int
Trond Myklebustadfa7142018-09-03 23:58:59 -0400636xprt_rdma_send_request(struct rpc_rqst *rqst)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400637{
Trond Myklebusta4f08352013-01-08 09:10:21 -0500638 struct rpc_xprt *xprt = rqst->rq_xprt;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400639 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
640 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
Chuck Lever6ab59942014-07-29 17:23:43 -0400641 int rc = 0;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400642
Chuck Levercf73daf2017-12-14 20:57:31 -0500643#if defined(CONFIG_SUNRPC_BACKCHANNEL)
644 if (unlikely(!rqst->rq_buffer))
645 return xprt_rdma_bc_send_reply(rqst);
646#endif /* CONFIG_SUNRPC_BACKCHANNEL */
647
Chuck Leverbebd0312017-04-11 13:23:10 -0400648 if (!xprt_connected(xprt))
Chuck Lever0c0829b2018-12-19 10:58:40 -0500649 return -ENOTCONN;
Chuck Leverbebd0312017-04-11 13:23:10 -0400650
Trond Myklebust75891f52018-09-03 17:37:36 -0400651 if (!xprt_request_get_cong(xprt, rqst))
652 return -EBADSLT;
653
Chuck Lever09e60642017-08-10 12:47:12 -0400654 rc = rpcrdma_marshal_req(r_xprt, rqst);
Chuck Lever6ab59942014-07-29 17:23:43 -0400655 if (rc < 0)
656 goto failed_marshal;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400657
Tom Talpey575448b2008-10-09 15:00:40 -0400658 /* Must suppress retransmit to maintain credits */
Chuck Lever8a147932018-02-28 15:30:38 -0500659 if (rqst->rq_connect_cookie == xprt->connect_cookie)
Tom Talpey575448b2008-10-09 15:00:40 -0400660 goto drop_connection;
Chuck Lever78215752018-03-05 15:13:07 -0500661 rqst->rq_xtime = ktime_get();
Tom Talpey575448b2008-10-09 15:00:40 -0400662
Chuck Lever8d863b12021-08-02 14:44:42 -0400663 if (frwr_send(r_xprt, req))
Tom Talpey575448b2008-10-09 15:00:40 -0400664 goto drop_connection;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400665
Trond Myklebustd60dbb22010-05-13 12:51:49 -0400666 rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len;
Chuck Leverfb14ae82018-02-28 15:30:54 -0500667
668 /* An RPC with no reply will throw off credit accounting,
669 * so drop the connection to reset the credit grant.
670 */
Trond Myklebust50f484e2018-08-30 13:27:29 -0400671 if (!rpc_reply_expected(rqst->rq_task))
Chuck Leverfb14ae82018-02-28 15:30:54 -0500672 goto drop_connection;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400673 return 0;
Tom Talpey575448b2008-10-09 15:00:40 -0400674
Chuck Leverc93c6222014-05-28 10:35:14 -0400675failed_marshal:
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400676 if (rc != -ENOTCONN)
677 return rc;
Tom Talpey575448b2008-10-09 15:00:40 -0400678drop_connection:
Chuck Lever0c0829b2018-12-19 10:58:40 -0500679 xprt_rdma_close(xprt);
680 return -ENOTCONN;
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400681}
682
Chuck Lever5d252f92016-01-07 14:50:10 -0500683void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400684{
685 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
686 long idle_time = 0;
687
688 if (xprt_connected(xprt))
689 idle_time = (long)(jiffies - xprt->last_used) / HZ;
690
Chuck Lever763f7e42015-08-03 13:04:36 -0400691 seq_puts(seq, "\txprt:\trdma ");
692 seq_printf(seq, "%u %lu %lu %lu %ld %lu %lu %lu %llu %llu ",
693 0, /* need a local port? */
694 xprt->stat.bind_count,
695 xprt->stat.connect_count,
Chuck Lever8440a882018-10-01 14:25:41 -0400696 xprt->stat.connect_time / HZ,
Chuck Lever763f7e42015-08-03 13:04:36 -0400697 idle_time,
698 xprt->stat.sends,
699 xprt->stat.recvs,
700 xprt->stat.bad_xids,
701 xprt->stat.req_u,
702 xprt->stat.bklog_u);
Chuck Lever505bbe62016-06-29 13:52:54 -0400703 seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %lu %lu %lu %lu ",
Chuck Lever763f7e42015-08-03 13:04:36 -0400704 r_xprt->rx_stats.read_chunk_count,
705 r_xprt->rx_stats.write_chunk_count,
706 r_xprt->rx_stats.reply_chunk_count,
707 r_xprt->rx_stats.total_rdma_request,
708 r_xprt->rx_stats.total_rdma_reply,
709 r_xprt->rx_stats.pullup_copy_count,
710 r_xprt->rx_stats.fixup_copy_count,
711 r_xprt->rx_stats.hardway_register_count,
712 r_xprt->rx_stats.failed_marshal_count,
Chuck Lever860477d2015-08-03 13:04:45 -0400713 r_xprt->rx_stats.bad_reply_count,
714 r_xprt->rx_stats.nomsg_call_count);
Chuck Lever01bb35c2017-10-20 10:48:36 -0400715 seq_printf(seq, "%lu %lu %lu %lu %lu %lu\n",
Chuck Lever61da8862018-10-01 14:25:25 -0400716 r_xprt->rx_stats.mrs_recycled,
Chuck Levere2ac2362016-06-29 13:54:00 -0400717 r_xprt->rx_stats.mrs_orphaned,
Chuck Leverc8b920b2016-09-15 10:57:16 -0400718 r_xprt->rx_stats.mrs_allocated,
Chuck Leverae729502017-10-20 10:48:12 -0400719 r_xprt->rx_stats.local_inv_needed,
Chuck Lever01bb35c2017-10-20 10:48:36 -0400720 r_xprt->rx_stats.empty_sendctx_q,
721 r_xprt->rx_stats.reply_waits_for_send);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400722}
723
Jeff Laytond67fa4d2015-06-03 16:14:29 -0400724static int
725xprt_rdma_enable_swap(struct rpc_xprt *xprt)
726{
Chuck Levera0451782015-10-24 17:26:29 -0400727 return 0;
Jeff Laytond67fa4d2015-06-03 16:14:29 -0400728}
729
730static void
731xprt_rdma_disable_swap(struct rpc_xprt *xprt)
732{
733}
734
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400735/*
736 * Plumbing for rpc transport switch and kernel module
737 */
738
Chuck Leverd31ae252017-08-01 12:00:39 -0400739static const struct rpc_xprt_ops xprt_rdma_procs = {
Chuck Levere7ce7102014-05-28 10:34:57 -0400740 .reserve_xprt = xprt_reserve_xprt_cong,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400741 .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */
Chuck Lever48be5392018-05-04 15:35:04 -0400742 .alloc_slot = xprt_rdma_alloc_slot,
743 .free_slot = xprt_rdma_free_slot,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400744 .release_request = xprt_release_rqst_cong, /* ditto */
Trond Myklebust8ba6a922019-04-07 13:58:46 -0400745 .wait_for_reply_request = xprt_wait_for_reply_request_def, /* ditto */
Chuck Lever33849792017-04-11 13:22:46 -0400746 .timer = xprt_rdma_timer,
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400747 .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */
748 .set_port = xprt_rdma_set_port,
749 .connect = xprt_rdma_connect,
750 .buf_alloc = xprt_rdma_allocate,
751 .buf_free = xprt_rdma_free,
752 .send_request = xprt_rdma_send_request,
753 .close = xprt_rdma_close,
754 .destroy = xprt_rdma_destroy,
Chuck Lever2a7f77c72019-08-19 18:49:30 -0400755 .set_connect_timeout = xprt_rdma_set_connect_timeout,
Jeff Laytond67fa4d2015-06-03 16:14:29 -0400756 .print_stats = xprt_rdma_print_stats,
757 .enable_swap = xprt_rdma_enable_swap,
758 .disable_swap = xprt_rdma_disable_swap,
Chuck Leverf531a5d2015-10-24 17:27:43 -0400759 .inject_disconnect = xprt_rdma_inject_disconnect,
760#if defined(CONFIG_SUNRPC_BACKCHANNEL)
761 .bc_setup = xprt_rdma_bc_setup,
Chuck Lever6b26cc82016-05-02 14:40:40 -0400762 .bc_maxpayload = xprt_rdma_bc_maxpayload,
Trond Myklebust7402a4f2019-07-16 13:51:29 -0400763 .bc_num_slots = xprt_rdma_bc_max_slots,
Chuck Leverf531a5d2015-10-24 17:27:43 -0400764 .bc_free_rqst = xprt_rdma_bc_free_rqst,
765 .bc_destroy = xprt_rdma_bc_destroy,
766#endif
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400767};
768
769static struct xprt_class xprt_rdma = {
770 .list = LIST_HEAD_INIT(xprt_rdma.list),
771 .name = "rdma",
772 .owner = THIS_MODULE,
773 .ident = XPRT_TRANSPORT_RDMA,
774 .setup = xprt_setup_rdma,
Trond Myklebustd5aa6b22020-11-06 16:33:38 -0500775 .netid = { "rdma", "rdma6", "" },
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400776};
777
Chuck Leverffe1f0d2015-06-04 11:21:42 -0400778void xprt_rdma_cleanup(void)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400779{
Jeff Laytonf895b252014-11-17 16:58:04 -0500780#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400781 if (sunrpc_table_header) {
782 unregister_sysctl_table(sunrpc_table_header);
783 sunrpc_table_header = NULL;
784 }
785#endif
Chuck Lever951e7212015-05-26 11:52:25 -0400786
Chuck Leverddbb3472018-12-19 10:59:39 -0500787 xprt_unregister_transport(&xprt_rdma);
788 xprt_unregister_transport(&xprt_rdma_bc);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400789}
790
Chuck Leverffe1f0d2015-06-04 11:21:42 -0400791int xprt_rdma_init(void)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400792{
793 int rc;
794
Chuck Lever6d2d0ee2018-12-19 10:58:29 -0500795 rc = xprt_register_transport(&xprt_rdma);
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400796 if (rc)
797 return rc;
798
Chuck Lever5d252f92016-01-07 14:50:10 -0500799 rc = xprt_register_transport(&xprt_rdma_bc);
800 if (rc) {
801 xprt_unregister_transport(&xprt_rdma);
Chuck Lever5d252f92016-01-07 14:50:10 -0500802 return rc;
803 }
804
Jeff Laytonf895b252014-11-17 16:58:04 -0500805#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -0400806 if (!sunrpc_table_header)
807 sunrpc_table_header = register_sysctl_table(sunrpc_table);
808#endif
809 return 0;
810}