blob: 5a8e47bbfb9f45a925a6eb2fd891fb917a1e09a4 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/net/sunrpc/xprt.c
4 *
5 * This is a generic RPC call interface supporting congestion avoidance,
6 * and asynchronous calls.
7 *
8 * The interface works like this:
9 *
10 * - When a process places a call, it allocates a request slot if
11 * one is available. Otherwise, it sleeps on the backlog queue
12 * (xprt_reserve).
13 * - Next, the caller puts together the RPC message, stuffs it into
Chuck Lever55aa4f52005-08-11 16:25:47 -040014 * the request struct, and calls xprt_transmit().
15 * - xprt_transmit sends the message and installs the caller on the
Ricardo Labiaga55ae1aa2009-04-01 09:23:03 -040016 * transport's wait list. At the same time, if a reply is expected,
17 * it installs a timer that is run after the packet's timeout has
18 * expired.
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 * - When a packet arrives, the data_ready handler walks the list of
Chuck Lever55aa4f52005-08-11 16:25:47 -040020 * pending requests for that transport. If a matching XID is found, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 * caller is woken up, and the timer removed.
22 * - When no reply arrives within the timeout interval, the timer is
23 * fired by the kernel and runs xprt_timer(). It either adjusts the
24 * timeout values (minor timeout) or wakes up the caller with a status
25 * of -ETIMEDOUT.
26 * - When the caller receives a notification from RPC that a reply arrived,
27 * it should release the RPC slot, and process the reply.
28 * If the call timed out, it may choose to retry the operation by
29 * adjusting the initial timeout value, and simply calling rpc_call
30 * again.
31 *
32 * Support for async RPC is done through a set of RPC-specific scheduling
33 * primitives that `transparently' work for processes as well as async
34 * tasks that rely on callbacks.
35 *
36 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
Chuck Lever55aa4f52005-08-11 16:25:47 -040037 *
38 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 */
40
Chuck Levera246b012005-08-11 16:25:23 -040041#include <linux/module.h>
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/types.h>
Chuck Levera246b012005-08-11 16:25:23 -040044#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <linux/workqueue.h>
Chuck Leverbf3fcf82006-05-25 01:40:51 -040046#include <linux/net.h>
Chuck Leverff839972010-05-07 13:34:47 -040047#include <linux/ktime.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Chuck Levera246b012005-08-11 16:25:23 -040049#include <linux/sunrpc/clnt.h>
Chuck Lever11c556b2006-03-20 13:44:22 -050050#include <linux/sunrpc/metrics.h>
Trond Myklebustc9acb422010-03-19 15:36:22 -040051#include <linux/sunrpc/bc_xprt.h>
Trond Myklebustfda1bfe2015-02-14 17:48:49 -050052#include <linux/rcupdate.h>
Trond Myklebusta1231fd2019-02-18 10:02:29 -050053#include <linux/sched/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Jeff Layton3705ad62014-10-28 14:24:13 -040055#include <trace/events/sunrpc.h>
56
Ricardo Labiaga55ae1aa2009-04-01 09:23:03 -040057#include "sunrpc.h"
58
Linus Torvalds1da177e2005-04-16 15:20:36 -070059/*
60 * Local variables
61 */
62
Jeff Laytonf895b252014-11-17 16:58:04 -050063#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
Linus Torvalds1da177e2005-04-16 15:20:36 -070064# define RPCDBG_FACILITY RPCDBG_XPRT
65#endif
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067/*
68 * Local functions
69 */
Trond Myklebust21de0a92011-07-17 16:57:32 -040070static void xprt_init(struct rpc_xprt *xprt, struct net *net);
Chuck Lever37ac86c2018-05-04 15:34:53 -040071static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
Trond Myklebust4e0038b2012-03-01 17:01:05 -050072static void xprt_destroy(struct rpc_xprt *xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Jiri Slaby5ba03e82007-11-22 19:40:22 +080074static DEFINE_SPINLOCK(xprt_list_lock);
\"Talpey, Thomas\81c098a2007-09-10 13:46:00 -040075static LIST_HEAD(xprt_list);
76
Trond Myklebust9e910bf2019-04-07 13:58:53 -040077static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
78{
79 unsigned long timeout = jiffies + req->rq_timeout;
80
81 if (time_before(timeout, req->rq_majortimeo))
82 return timeout;
83 return req->rq_majortimeo;
84}
85
Chuck Lever12a80462005-08-25 16:25:51 -070086/**
\"Talpey, Thomas\81c098a2007-09-10 13:46:00 -040087 * xprt_register_transport - register a transport implementation
88 * @transport: transport to register
89 *
90 * If a transport implementation is loaded as a kernel module, it can
91 * call this interface to make itself known to the RPC client.
92 *
93 * Returns:
94 * 0: transport successfully registered
95 * -EEXIST: transport already registered
96 * -EINVAL: transport module being unloaded
97 */
98int xprt_register_transport(struct xprt_class *transport)
99{
100 struct xprt_class *t;
101 int result;
102
103 result = -EEXIST;
104 spin_lock(&xprt_list_lock);
105 list_for_each_entry(t, &xprt_list, list) {
106 /* don't register the same transport class twice */
\"Talpey, Thomas\4fa016e2007-09-10 13:47:57 -0400107 if (t->ident == transport->ident)
\"Talpey, Thomas\81c098a2007-09-10 13:46:00 -0400108 goto out;
109 }
110
Denis V. Lunevc9f6cde2008-07-31 09:53:56 +0400111 list_add_tail(&transport->list, &xprt_list);
112 printk(KERN_INFO "RPC: Registered %s transport module.\n",
113 transport->name);
114 result = 0;
\"Talpey, Thomas\81c098a2007-09-10 13:46:00 -0400115
116out:
117 spin_unlock(&xprt_list_lock);
118 return result;
119}
120EXPORT_SYMBOL_GPL(xprt_register_transport);
121
122/**
123 * xprt_unregister_transport - unregister a transport implementation
Randy Dunlap65b6e422008-02-13 15:03:23 -0800124 * @transport: transport to unregister
\"Talpey, Thomas\81c098a2007-09-10 13:46:00 -0400125 *
126 * Returns:
127 * 0: transport successfully unregistered
128 * -ENOENT: transport never registered
129 */
130int xprt_unregister_transport(struct xprt_class *transport)
131{
132 struct xprt_class *t;
133 int result;
134
135 result = 0;
136 spin_lock(&xprt_list_lock);
137 list_for_each_entry(t, &xprt_list, list) {
138 if (t == transport) {
139 printk(KERN_INFO
140 "RPC: Unregistered %s transport module.\n",
141 transport->name);
142 list_del_init(&transport->list);
\"Talpey, Thomas\81c098a2007-09-10 13:46:00 -0400143 goto out;
144 }
145 }
146 result = -ENOENT;
147
148out:
149 spin_unlock(&xprt_list_lock);
150 return result;
151}
152EXPORT_SYMBOL_GPL(xprt_unregister_transport);
153
154/**
Tom Talpey441e3e22009-03-11 14:37:56 -0400155 * xprt_load_transport - load a transport implementation
156 * @transport_name: transport to load
157 *
158 * Returns:
159 * 0: transport successfully loaded
160 * -ENOENT: transport module not available
161 */
162int xprt_load_transport(const char *transport_name)
163{
164 struct xprt_class *t;
Tom Talpey441e3e22009-03-11 14:37:56 -0400165 int result;
166
167 result = 0;
168 spin_lock(&xprt_list_lock);
169 list_for_each_entry(t, &xprt_list, list) {
170 if (strcmp(t->name, transport_name) == 0) {
171 spin_unlock(&xprt_list_lock);
172 goto out;
173 }
174 }
175 spin_unlock(&xprt_list_lock);
Alex Riesenef7ffe82010-05-24 14:33:05 -0700176 result = request_module("xprt%s", transport_name);
Tom Talpey441e3e22009-03-11 14:37:56 -0400177out:
178 return result;
179}
180EXPORT_SYMBOL_GPL(xprt_load_transport);
181
Trond Myklebustc5445772018-09-03 23:39:27 -0400182static void xprt_clear_locked(struct rpc_xprt *xprt)
183{
184 xprt->snd_task = NULL;
185 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
186 smp_mb__before_atomic();
187 clear_bit(XPRT_LOCKED, &xprt->state);
188 smp_mb__after_atomic();
189 } else
190 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
191}
192
Tom Talpey441e3e22009-03-11 14:37:56 -0400193/**
Chuck Lever12a80462005-08-25 16:25:51 -0700194 * xprt_reserve_xprt - serialize write access to transports
195 * @task: task that is requesting access to the transport
Randy Dunlap177c27b2011-07-28 06:54:36 +0000196 * @xprt: pointer to the target transport
Chuck Lever12a80462005-08-25 16:25:51 -0700197 *
198 * This prevents mixing the payload of separate requests, and prevents
199 * transport connects from colliding with writes. No congestion control
200 * is provided.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 */
Trond Myklebust43cedbf0e2011-07-17 16:01:03 -0400202int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Chuck Lever12a80462005-08-25 16:25:51 -0700204 struct rpc_rqst *req = task->tk_rqstp;
205
206 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
207 if (task == xprt->snd_task)
Chuck Leverbf7ca702019-10-09 12:58:14 -0400208 goto out_locked;
Chuck Lever12a80462005-08-25 16:25:51 -0700209 goto out_sleep;
210 }
Trond Myklebustc5445772018-09-03 23:39:27 -0400211 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
212 goto out_unlock;
Chuck Lever12a80462005-08-25 16:25:51 -0700213 xprt->snd_task = task;
j223yang@asset.uwaterloo.ca4d4a76f2011-03-10 12:40:28 -0500214
Chuck Leverbf7ca702019-10-09 12:58:14 -0400215out_locked:
216 trace_xprt_reserve_xprt(xprt, task);
Chuck Lever12a80462005-08-25 16:25:51 -0700217 return 1;
218
Trond Myklebustc5445772018-09-03 23:39:27 -0400219out_unlock:
220 xprt_clear_locked(xprt);
Chuck Lever12a80462005-08-25 16:25:51 -0700221out_sleep:
Chuck Lever12a80462005-08-25 16:25:51 -0700222 task->tk_status = -EAGAIN;
Trond Myklebust6b2e6852019-04-07 13:58:49 -0400223 if (RPC_IS_SOFT(task))
224 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
Trond Myklebust9e910bf2019-04-07 13:58:53 -0400225 xprt_request_timeout(req));
Trond Myklebust6b2e6852019-04-07 13:58:49 -0400226 else
227 rpc_sleep_on(&xprt->sending, task, NULL);
Chuck Lever12a80462005-08-25 16:25:51 -0700228 return 0;
229}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400230EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
Chuck Lever12a80462005-08-25 16:25:51 -0700231
Trond Myklebust75891f52018-09-03 17:37:36 -0400232static bool
233xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
234{
235 return test_bit(XPRT_CWND_WAIT, &xprt->state);
236}
237
238static void
239xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
240{
241 if (!list_empty(&xprt->xmit_queue)) {
242 /* Peek at head of queue to see if it can make progress */
243 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
244 rq_xmit)->rq_cong)
245 return;
246 }
247 set_bit(XPRT_CWND_WAIT, &xprt->state);
248}
249
250static void
251xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
252{
253 if (!RPCXPRT_CONGESTED(xprt))
254 clear_bit(XPRT_CWND_WAIT, &xprt->state);
255}
256
Chuck Lever12a80462005-08-25 16:25:51 -0700257/*
258 * xprt_reserve_xprt_cong - serialize write access to transports
259 * @task: task that is requesting access to the transport
260 *
261 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
262 * integrated into the decision of whether a request is allowed to be
263 * woken up and given access to the transport.
Trond Myklebust75891f52018-09-03 17:37:36 -0400264 * Note that the lock is only granted if we know there are free slots.
Chuck Lever12a80462005-08-25 16:25:51 -0700265 */
Trond Myklebust43cedbf0e2011-07-17 16:01:03 -0400266int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
Chuck Lever12a80462005-08-25 16:25:51 -0700267{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 struct rpc_rqst *req = task->tk_rqstp;
269
Chuck Lever2226feb2005-08-11 16:25:38 -0400270 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 if (task == xprt->snd_task)
Chuck Leverbf7ca702019-10-09 12:58:14 -0400272 goto out_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 goto out_sleep;
274 }
Trond Myklebust43cedbf0e2011-07-17 16:01:03 -0400275 if (req == NULL) {
276 xprt->snd_task = task;
Chuck Leverbf7ca702019-10-09 12:58:14 -0400277 goto out_locked;
Trond Myklebust43cedbf0e2011-07-17 16:01:03 -0400278 }
Trond Myklebustc5445772018-09-03 23:39:27 -0400279 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
280 goto out_unlock;
Trond Myklebust75891f52018-09-03 17:37:36 -0400281 if (!xprt_need_congestion_window_wait(xprt)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 xprt->snd_task = task;
Chuck Leverbf7ca702019-10-09 12:58:14 -0400283 goto out_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 }
Trond Myklebustc5445772018-09-03 23:39:27 -0400285out_unlock:
Trond Myklebust632e3bd2006-01-03 09:55:55 +0100286 xprt_clear_locked(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287out_sleep:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 task->tk_status = -EAGAIN;
Trond Myklebust6b2e6852019-04-07 13:58:49 -0400289 if (RPC_IS_SOFT(task))
290 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
Trond Myklebust9e910bf2019-04-07 13:58:53 -0400291 xprt_request_timeout(req));
Trond Myklebust6b2e6852019-04-07 13:58:49 -0400292 else
293 rpc_sleep_on(&xprt->sending, task, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 return 0;
Chuck Leverbf7ca702019-10-09 12:58:14 -0400295out_locked:
296 trace_xprt_reserve_cong(xprt, task);
297 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400299EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Chuck Lever12a80462005-08-25 16:25:51 -0700301static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302{
303 int retval;
304
Trond Myklebustbd79bc52018-09-07 19:38:55 -0400305 if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
306 return 1;
Trond Myklebustb5e92412019-05-02 11:21:08 -0400307 spin_lock(&xprt->transport_lock);
Trond Myklebust43cedbf0e2011-07-17 16:01:03 -0400308 retval = xprt->ops->reserve_xprt(xprt, task);
Trond Myklebustb5e92412019-05-02 11:21:08 -0400309 spin_unlock(&xprt->transport_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 return retval;
311}
312
Trond Myklebust961a8282012-01-17 22:57:37 -0500313static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314{
Trond Myklebust961a8282012-01-17 22:57:37 -0500315 struct rpc_xprt *xprt = data;
Chuck Lever49e9a892005-08-25 16:25:51 -0700316
Chuck Lever49e9a892005-08-25 16:25:51 -0700317 xprt->snd_task = task;
Trond Myklebust961a8282012-01-17 22:57:37 -0500318 return true;
319}
Chuck Lever49e9a892005-08-25 16:25:51 -0700320
Trond Myklebust961a8282012-01-17 22:57:37 -0500321static void __xprt_lock_write_next(struct rpc_xprt *xprt)
322{
323 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
324 return;
Trond Myklebustc5445772018-09-03 23:39:27 -0400325 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
326 goto out_unlock;
Trond Myklebustf1dc2372016-05-27 12:59:33 -0400327 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
328 __xprt_lock_write_func, xprt))
Trond Myklebust961a8282012-01-17 22:57:37 -0500329 return;
Trond Myklebustc5445772018-09-03 23:39:27 -0400330out_unlock:
Trond Myklebust632e3bd2006-01-03 09:55:55 +0100331 xprt_clear_locked(xprt);
Chuck Lever49e9a892005-08-25 16:25:51 -0700332}
333
Trond Myklebust961a8282012-01-17 22:57:37 -0500334static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
335{
336 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
337 return;
Trond Myklebustc5445772018-09-03 23:39:27 -0400338 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
339 goto out_unlock;
Trond Myklebust75891f52018-09-03 17:37:36 -0400340 if (xprt_need_congestion_window_wait(xprt))
Trond Myklebust961a8282012-01-17 22:57:37 -0500341 goto out_unlock;
Trond Myklebustf1dc2372016-05-27 12:59:33 -0400342 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
Trond Myklebust75891f52018-09-03 17:37:36 -0400343 __xprt_lock_write_func, xprt))
Trond Myklebust961a8282012-01-17 22:57:37 -0500344 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345out_unlock:
Trond Myklebust632e3bd2006-01-03 09:55:55 +0100346 xprt_clear_locked(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347}
348
Chuck Lever49e9a892005-08-25 16:25:51 -0700349/**
350 * xprt_release_xprt - allow other requests to use a transport
351 * @xprt: transport with other tasks potentially waiting
352 * @task: task that is releasing access to the transport
353 *
354 * Note that "task" can be NULL. No congestion control is provided.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 */
Chuck Lever49e9a892005-08-25 16:25:51 -0700356void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357{
358 if (xprt->snd_task == task) {
Trond Myklebust632e3bd2006-01-03 09:55:55 +0100359 xprt_clear_locked(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 __xprt_lock_write_next(xprt);
361 }
Chuck Leverbf7ca702019-10-09 12:58:14 -0400362 trace_xprt_release_xprt(xprt, task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400364EXPORT_SYMBOL_GPL(xprt_release_xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
Chuck Lever49e9a892005-08-25 16:25:51 -0700366/**
367 * xprt_release_xprt_cong - allow other requests to use a transport
368 * @xprt: transport with other tasks potentially waiting
369 * @task: task that is releasing access to the transport
370 *
371 * Note that "task" can be NULL. Another task is awoken to use the
372 * transport if the transport's congestion window allows it.
373 */
374void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
375{
376 if (xprt->snd_task == task) {
Trond Myklebust632e3bd2006-01-03 09:55:55 +0100377 xprt_clear_locked(xprt);
Chuck Lever49e9a892005-08-25 16:25:51 -0700378 __xprt_lock_write_next_cong(xprt);
379 }
Chuck Leverbf7ca702019-10-09 12:58:14 -0400380 trace_xprt_release_cong(xprt, task);
Chuck Lever49e9a892005-08-25 16:25:51 -0700381}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400382EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
Chuck Lever49e9a892005-08-25 16:25:51 -0700383
384static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385{
Trond Myklebustbd79bc52018-09-07 19:38:55 -0400386 if (xprt->snd_task != task)
387 return;
Trond Myklebustb5e92412019-05-02 11:21:08 -0400388 spin_lock(&xprt->transport_lock);
Chuck Lever49e9a892005-08-25 16:25:51 -0700389 xprt->ops->release_xprt(xprt, task);
Trond Myklebustb5e92412019-05-02 11:21:08 -0400390 spin_unlock(&xprt->transport_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391}
392
393/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 * Van Jacobson congestion avoidance. Check if the congestion window
395 * overflowed. Put the task to sleep if this is the case.
396 */
397static int
Trond Myklebust75891f52018-09-03 17:37:36 -0400398__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 if (req->rq_cong)
401 return 1;
Chuck Leverbf7ca702019-10-09 12:58:14 -0400402 trace_xprt_get_cong(xprt, req->rq_task);
Trond Myklebust75891f52018-09-03 17:37:36 -0400403 if (RPCXPRT_CONGESTED(xprt)) {
404 xprt_set_congestion_window_wait(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 return 0;
Trond Myklebust75891f52018-09-03 17:37:36 -0400406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 req->rq_cong = 1;
408 xprt->cong += RPC_CWNDSCALE;
409 return 1;
410}
411
412/*
413 * Adjust the congestion window, and wake up the next task
414 * that has been sleeping due to congestion
415 */
416static void
417__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
418{
419 if (!req->rq_cong)
420 return;
421 req->rq_cong = 0;
422 xprt->cong -= RPC_CWNDSCALE;
Trond Myklebust75891f52018-09-03 17:37:36 -0400423 xprt_test_and_clear_congestion_window_wait(xprt);
Chuck Leverbf7ca702019-10-09 12:58:14 -0400424 trace_xprt_put_cong(xprt, req->rq_task);
Chuck Lever49e9a892005-08-25 16:25:51 -0700425 __xprt_lock_write_next_cong(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426}
427
Chuck Lever46c0ee82005-08-25 16:25:52 -0700428/**
Trond Myklebust75891f52018-09-03 17:37:36 -0400429 * xprt_request_get_cong - Request congestion control credits
430 * @xprt: pointer to transport
431 * @req: pointer to RPC request
432 *
433 * Useful for transports that require congestion control.
434 */
435bool
436xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
437{
438 bool ret = false;
439
440 if (req->rq_cong)
441 return true;
Trond Myklebustb5e92412019-05-02 11:21:08 -0400442 spin_lock(&xprt->transport_lock);
Trond Myklebust75891f52018-09-03 17:37:36 -0400443 ret = __xprt_get_cong(xprt, req) != 0;
Trond Myklebustb5e92412019-05-02 11:21:08 -0400444 spin_unlock(&xprt->transport_lock);
Trond Myklebust75891f52018-09-03 17:37:36 -0400445 return ret;
446}
447EXPORT_SYMBOL_GPL(xprt_request_get_cong);
448
449/**
Chuck Levera58dd392005-08-25 16:25:53 -0700450 * xprt_release_rqst_cong - housekeeping when request is complete
451 * @task: RPC request that recently completed
452 *
453 * Useful for transports that require congestion control.
454 */
455void xprt_release_rqst_cong(struct rpc_task *task)
456{
Trond Myklebusta4f08352013-01-08 09:10:21 -0500457 struct rpc_rqst *req = task->tk_rqstp;
458
459 __xprt_put_cong(req->rq_xprt, req);
Chuck Levera58dd392005-08-25 16:25:53 -0700460}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400461EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
Chuck Levera58dd392005-08-25 16:25:53 -0700462
Chuck Lever8593e012019-09-13 16:01:07 -0400463static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
464{
465 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
466 __xprt_lock_write_next_cong(xprt);
467}
468
Trond Myklebust75891f52018-09-03 17:37:36 -0400469/*
470 * Clear the congestion window wait flag and wake up the next
471 * entry on xprt->sending
472 */
473static void
474xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
475{
476 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
Trond Myklebustb5e92412019-05-02 11:21:08 -0400477 spin_lock(&xprt->transport_lock);
Trond Myklebust75891f52018-09-03 17:37:36 -0400478 __xprt_lock_write_next_cong(xprt);
Trond Myklebustb5e92412019-05-02 11:21:08 -0400479 spin_unlock(&xprt->transport_lock);
Trond Myklebust75891f52018-09-03 17:37:36 -0400480 }
481}
482
Chuck Levera58dd392005-08-25 16:25:53 -0700483/**
Chuck Lever46c0ee82005-08-25 16:25:52 -0700484 * xprt_adjust_cwnd - adjust transport congestion window
Trond Myklebust6a24dfb2013-01-08 09:48:15 -0500485 * @xprt: pointer to xprt
Chuck Lever46c0ee82005-08-25 16:25:52 -0700486 * @task: recently completed RPC request used to adjust window
487 * @result: result code of completed RPC request
488 *
Chuck Lever4f4cf5a2014-05-28 10:34:49 -0400489 * The transport code maintains an estimate on the maximum number of out-
490 * standing RPC requests, using a smoothed version of the congestion
491 * avoidance implemented in 44BSD. This is basically the Van Jacobson
492 * congestion algorithm: If a retransmit occurs, the congestion window is
493 * halved; otherwise, it is incremented by 1/cwnd when
494 *
495 * - a reply is received and
496 * - a full number of requests are outstanding and
497 * - the congestion window hasn't been updated recently.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 */
Trond Myklebust6a24dfb2013-01-08 09:48:15 -0500499void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500{
Chuck Lever46c0ee82005-08-25 16:25:52 -0700501 struct rpc_rqst *req = task->tk_rqstp;
Chuck Lever46c0ee82005-08-25 16:25:52 -0700502 unsigned long cwnd = xprt->cwnd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 if (result >= 0 && cwnd <= xprt->cong) {
505 /* The (cwnd >> 1) term makes sure
506 * the result gets rounded properly. */
507 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
508 if (cwnd > RPC_MAXCWND(xprt))
509 cwnd = RPC_MAXCWND(xprt);
Chuck Lever49e9a892005-08-25 16:25:51 -0700510 __xprt_lock_write_next_cong(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 } else if (result == -ETIMEDOUT) {
512 cwnd >>= 1;
513 if (cwnd < RPC_CWNDSCALE)
514 cwnd = RPC_CWNDSCALE;
515 }
Chuck Lever46121cf2007-01-31 12:14:08 -0500516 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 xprt->cong, xprt->cwnd, cwnd);
518 xprt->cwnd = cwnd;
Chuck Lever46c0ee82005-08-25 16:25:52 -0700519 __xprt_put_cong(xprt, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400521EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Chuck Lever44fbac22005-08-11 16:25:44 -0400523/**
524 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
525 * @xprt: transport with waiting tasks
526 * @status: result code to plant in each task before waking it
527 *
528 */
529void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
530{
531 if (status < 0)
532 rpc_wake_up_status(&xprt->pending, status);
533 else
534 rpc_wake_up(&xprt->pending);
535}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400536EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
Chuck Lever44fbac22005-08-11 16:25:44 -0400537
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400538/**
539 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
Trond Myklebustc5445772018-09-03 23:39:27 -0400540 * @xprt: transport
Trond Myklebusta9a6b522013-02-22 14:57:57 -0500541 *
542 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
543 * we don't in general want to force a socket disconnection due to
544 * an incomplete RPC call transmission.
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400545 */
Trond Myklebustc5445772018-09-03 23:39:27 -0400546void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400547{
Trond Myklebustc5445772018-09-03 23:39:27 -0400548 set_bit(XPRT_WRITE_SPACE, &xprt->state);
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400549}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400550EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400551
Trond Myklebustc5445772018-09-03 23:39:27 -0400552static bool
553xprt_clear_write_space_locked(struct rpc_xprt *xprt)
554{
555 if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
556 __xprt_lock_write_next(xprt);
557 dprintk("RPC: write space: waking waiting task on "
558 "xprt %p\n", xprt);
559 return true;
560 }
561 return false;
562}
563
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400564/**
565 * xprt_write_space - wake the task waiting for transport output buffer space
566 * @xprt: transport with waiting tasks
567 *
568 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
569 */
Trond Myklebustc5445772018-09-03 23:39:27 -0400570bool xprt_write_space(struct rpc_xprt *xprt)
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400571{
Trond Myklebustc5445772018-09-03 23:39:27 -0400572 bool ret;
573
574 if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
575 return false;
Trond Myklebustb5e92412019-05-02 11:21:08 -0400576 spin_lock(&xprt->transport_lock);
Trond Myklebustc5445772018-09-03 23:39:27 -0400577 ret = xprt_clear_write_space_locked(xprt);
Trond Myklebustb5e92412019-05-02 11:21:08 -0400578 spin_unlock(&xprt->transport_lock);
Trond Myklebustc5445772018-09-03 23:39:27 -0400579 return ret;
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400580}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400581EXPORT_SYMBOL_GPL(xprt_write_space);
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400582
Trond Myklebustda953062019-04-07 13:58:56 -0400583static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
584{
585 s64 delta = ktime_to_ns(ktime_get() - abstime);
586 return likely(delta >= 0) ?
587 jiffies - nsecs_to_jiffies(delta) :
588 jiffies + nsecs_to_jiffies(-delta);
589}
590
591static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592{
Trond Myklebustba7392b2007-12-20 16:03:55 -0500593 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
Trond Myklebustda953062019-04-07 13:58:56 -0400594 unsigned long majortimeo = req->rq_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 if (to->to_exponential)
Trond Myklebustda953062019-04-07 13:58:56 -0400597 majortimeo <<= to->to_retries;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 else
Trond Myklebustda953062019-04-07 13:58:56 -0400599 majortimeo += to->to_increment * to->to_retries;
600 if (majortimeo > to->to_maxval || majortimeo == 0)
601 majortimeo = to->to_maxval;
602 return majortimeo;
603}
604
605static void xprt_reset_majortimeo(struct rpc_rqst *req)
606{
607 req->rq_majortimeo += xprt_calc_majortimeo(req);
608}
609
Olga Kornievskaia7de62bc2020-07-15 13:17:52 -0400610static void xprt_reset_minortimeo(struct rpc_rqst *req)
611{
612 req->rq_minortimeo += req->rq_timeout;
613}
614
Trond Myklebustda953062019-04-07 13:58:56 -0400615static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
616{
617 unsigned long time_init;
618 struct rpc_xprt *xprt = req->rq_xprt;
619
620 if (likely(xprt && xprt_connected(xprt)))
621 time_init = jiffies;
622 else
623 time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
624 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
625 req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
Olga Kornievskaia7de62bc2020-07-15 13:17:52 -0400626 req->rq_minortimeo = time_init + req->rq_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627}
628
Chuck Lever9903cd12005-08-11 16:25:26 -0400629/**
630 * xprt_adjust_timeout - adjust timeout values for next retransmit
631 * @req: RPC request containing parameters to use for the adjustment
632 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 */
634int xprt_adjust_timeout(struct rpc_rqst *req)
635{
636 struct rpc_xprt *xprt = req->rq_xprt;
Trond Myklebustba7392b2007-12-20 16:03:55 -0500637 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 int status = 0;
639
Olga Kornievskaia7de62bc2020-07-15 13:17:52 -0400640 if (time_before(jiffies, req->rq_minortimeo))
641 return status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 if (time_before(jiffies, req->rq_majortimeo)) {
643 if (to->to_exponential)
644 req->rq_timeout <<= 1;
645 else
646 req->rq_timeout += to->to_increment;
647 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
648 req->rq_timeout = to->to_maxval;
649 req->rq_retries++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 } else {
651 req->rq_timeout = to->to_initval;
652 req->rq_retries = 0;
653 xprt_reset_majortimeo(req);
654 /* Reset the RTT counters == "slow start" */
Trond Myklebustb5e92412019-05-02 11:21:08 -0400655 spin_lock(&xprt->transport_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
Trond Myklebustb5e92412019-05-02 11:21:08 -0400657 spin_unlock(&xprt->transport_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 status = -ETIMEDOUT;
659 }
Olga Kornievskaia7de62bc2020-07-15 13:17:52 -0400660 xprt_reset_minortimeo(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661
662 if (req->rq_timeout == 0) {
663 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
664 req->rq_timeout = 5 * HZ;
665 }
666 return status;
667}
668
David Howells65f27f32006-11-22 14:55:48 +0000669static void xprt_autoclose(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670{
David Howells65f27f32006-11-22 14:55:48 +0000671 struct rpc_xprt *xprt =
672 container_of(work, struct rpc_xprt, task_cleanup);
Trond Myklebusta1231fd2019-02-18 10:02:29 -0500673 unsigned int pflags = memalloc_nofs_save();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
Chuck Lever911813d2020-05-12 17:13:34 -0400675 trace_xprt_disconnect_auto(xprt);
Trond Myklebust66af1e552007-11-06 10:18:36 -0500676 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
Trond Myklebust4876cc72015-06-19 16:17:57 -0400677 xprt->ops->close(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 xprt_release_write(xprt, NULL);
Trond Myklebust79234c32015-09-18 15:53:24 -0400679 wake_up_bit(&xprt->state, XPRT_LOCKED);
Trond Myklebusta1231fd2019-02-18 10:02:29 -0500680 memalloc_nofs_restore(pflags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681}
682
Chuck Lever9903cd12005-08-11 16:25:26 -0400683/**
Trond Myklebust62da3b22007-11-06 18:44:20 -0500684 * xprt_disconnect_done - mark a transport as disconnected
Chuck Lever9903cd12005-08-11 16:25:26 -0400685 * @xprt: transport to flag for disconnect
686 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 */
Trond Myklebust62da3b22007-11-06 18:44:20 -0500688void xprt_disconnect_done(struct rpc_xprt *xprt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689{
Chuck Lever911813d2020-05-12 17:13:34 -0400690 trace_xprt_disconnect_done(xprt);
Trond Myklebustb5e92412019-05-02 11:21:08 -0400691 spin_lock(&xprt->transport_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 xprt_clear_connected(xprt);
Trond Myklebustc5445772018-09-03 23:39:27 -0400693 xprt_clear_write_space_locked(xprt);
Chuck Lever8593e012019-09-13 16:01:07 -0400694 xprt_clear_congestion_window_wait_locked(xprt);
Trond Myklebust27adc782019-03-15 08:01:16 -0400695 xprt_wake_pending_tasks(xprt, -ENOTCONN);
Trond Myklebustb5e92412019-05-02 11:21:08 -0400696 spin_unlock(&xprt->transport_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697}
Trond Myklebust62da3b22007-11-06 18:44:20 -0500698EXPORT_SYMBOL_GPL(xprt_disconnect_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
Trond Myklebust66af1e552007-11-06 10:18:36 -0500700/**
701 * xprt_force_disconnect - force a transport to disconnect
702 * @xprt: transport to disconnect
703 *
704 */
705void xprt_force_disconnect(struct rpc_xprt *xprt)
706{
Chuck Lever911813d2020-05-12 17:13:34 -0400707 trace_xprt_disconnect_force(xprt);
708
Trond Myklebust66af1e552007-11-06 10:18:36 -0500709 /* Don't race with the test_bit() in xprt_clear_locked() */
Trond Myklebustb5e92412019-05-02 11:21:08 -0400710 spin_lock(&xprt->transport_lock);
Trond Myklebust66af1e552007-11-06 10:18:36 -0500711 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
712 /* Try to schedule an autoclose RPC call */
713 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
Trond Myklebust40a5f1b2016-05-27 10:39:50 -0400714 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
Trond Myklebust0445f922018-12-17 13:34:59 -0500715 else if (xprt->snd_task)
716 rpc_wake_up_queued_task_set_status(&xprt->pending,
717 xprt->snd_task, -ENOTCONN);
Trond Myklebustb5e92412019-05-02 11:21:08 -0400718 spin_unlock(&xprt->transport_lock);
Trond Myklebust66af1e552007-11-06 10:18:36 -0500719}
Chuck Levere2a4f4f2017-04-11 13:22:38 -0400720EXPORT_SYMBOL_GPL(xprt_force_disconnect);
Trond Myklebust66af1e552007-11-06 10:18:36 -0500721
Trond Myklebust7f3a1d12018-08-23 00:03:43 -0400722static unsigned int
723xprt_connect_cookie(struct rpc_xprt *xprt)
724{
725 return READ_ONCE(xprt->connect_cookie);
726}
727
728static bool
729xprt_request_retransmit_after_disconnect(struct rpc_task *task)
730{
731 struct rpc_rqst *req = task->tk_rqstp;
732 struct rpc_xprt *xprt = req->rq_xprt;
733
734 return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
735 !xprt_connected(xprt);
736}
737
Trond Myklebust7c1d71c2008-04-17 16:52:57 -0400738/**
739 * xprt_conditional_disconnect - force a transport to disconnect
740 * @xprt: transport to disconnect
741 * @cookie: 'connection cookie'
742 *
743 * This attempts to break the connection if and only if 'cookie' matches
744 * the current transport 'connection cookie'. It ensures that we don't
745 * try to break the connection more than once when we need to retransmit
746 * a batch of RPC requests.
747 *
748 */
749void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
750{
751 /* Don't race with the test_bit() in xprt_clear_locked() */
Trond Myklebustb5e92412019-05-02 11:21:08 -0400752 spin_lock(&xprt->transport_lock);
Trond Myklebust7c1d71c2008-04-17 16:52:57 -0400753 if (cookie != xprt->connect_cookie)
754 goto out;
NeilBrown2c2ee6d2016-11-23 14:44:58 +1100755 if (test_bit(XPRT_CLOSING, &xprt->state))
Trond Myklebust7c1d71c2008-04-17 16:52:57 -0400756 goto out;
757 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
758 /* Try to schedule an autoclose RPC call */
759 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
Trond Myklebust40a5f1b2016-05-27 10:39:50 -0400760 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
Trond Myklebust2a491992009-03-11 14:38:00 -0400761 xprt_wake_pending_tasks(xprt, -EAGAIN);
Trond Myklebust7c1d71c2008-04-17 16:52:57 -0400762out:
Trond Myklebustb5e92412019-05-02 11:21:08 -0400763 spin_unlock(&xprt->transport_lock);
Trond Myklebust7c1d71c2008-04-17 16:52:57 -0400764}
765
Trond Myklebustad3331a2016-08-02 13:47:43 -0400766static bool
767xprt_has_timer(const struct rpc_xprt *xprt)
768{
769 return xprt->idle_timeout != 0;
770}
771
772static void
773xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
774 __must_hold(&xprt->transport_lock)
775{
Dave Wysochanski80d3c452019-06-26 16:30:24 -0400776 xprt->last_used = jiffies;
Trond Myklebust95f76912018-09-07 08:35:22 -0400777 if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
Trond Myklebustad3331a2016-08-02 13:47:43 -0400778 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
779}
780
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781static void
Kees Cookff861c42017-10-16 17:29:42 -0700782xprt_init_autodisconnect(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783{
Kees Cookff861c42017-10-16 17:29:42 -0700784 struct rpc_xprt *xprt = from_timer(xprt, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785
Trond Myklebust95f76912018-09-07 08:35:22 -0400786 if (!RB_EMPTY_ROOT(&xprt->recv_queue))
Trond Myklebustb5e92412019-05-02 11:21:08 -0400787 return;
Trond Myklebustad3331a2016-08-02 13:47:43 -0400788 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
789 xprt->last_used = jiffies;
Chuck Lever2226feb2005-08-11 16:25:38 -0400790 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
Trond Myklebustb5e92412019-05-02 11:21:08 -0400791 return;
Trond Myklebust40a5f1b2016-05-27 10:39:50 -0400792 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793}
794
Trond Myklebust718ba5b2015-02-08 18:19:25 -0500795bool xprt_lock_connect(struct rpc_xprt *xprt,
796 struct rpc_task *task,
797 void *cookie)
798{
799 bool ret = false;
800
Trond Myklebustb5e92412019-05-02 11:21:08 -0400801 spin_lock(&xprt->transport_lock);
Trond Myklebust718ba5b2015-02-08 18:19:25 -0500802 if (!test_bit(XPRT_LOCKED, &xprt->state))
803 goto out;
804 if (xprt->snd_task != task)
805 goto out;
806 xprt->snd_task = cookie;
807 ret = true;
808out:
Trond Myklebustb5e92412019-05-02 11:21:08 -0400809 spin_unlock(&xprt->transport_lock);
Trond Myklebust718ba5b2015-02-08 18:19:25 -0500810 return ret;
811}
812
813void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
814{
Trond Myklebustb5e92412019-05-02 11:21:08 -0400815 spin_lock(&xprt->transport_lock);
Trond Myklebust718ba5b2015-02-08 18:19:25 -0500816 if (xprt->snd_task != cookie)
817 goto out;
818 if (!test_bit(XPRT_LOCKED, &xprt->state))
819 goto out;
820 xprt->snd_task =NULL;
821 xprt->ops->release_xprt(xprt, NULL);
Trond Myklebustad3331a2016-08-02 13:47:43 -0400822 xprt_schedule_autodisconnect(xprt);
Trond Myklebust718ba5b2015-02-08 18:19:25 -0500823out:
Trond Myklebustb5e92412019-05-02 11:21:08 -0400824 spin_unlock(&xprt->transport_lock);
Trond Myklebust79234c32015-09-18 15:53:24 -0400825 wake_up_bit(&xprt->state, XPRT_LOCKED);
Trond Myklebust718ba5b2015-02-08 18:19:25 -0500826}
827
Chuck Lever9903cd12005-08-11 16:25:26 -0400828/**
829 * xprt_connect - schedule a transport connect operation
830 * @task: RPC task that is requesting the connect
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 *
832 */
833void xprt_connect(struct rpc_task *task)
834{
Trond Myklebustad2368d2013-01-08 10:08:33 -0500835 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
Chuck Lever46121cf2007-01-31 12:14:08 -0500837 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 xprt, (xprt_connected(xprt) ? "is" : "is not"));
839
Chuck Leverec739ef2006-08-22 20:06:15 -0400840 if (!xprt_bound(xprt)) {
Trond Myklebust01d37c42009-03-11 14:09:39 -0400841 task->tk_status = -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 return;
843 }
844 if (!xprt_lock_write(xprt, task))
845 return;
Trond Myklebustfeb8ca32009-12-03 08:10:17 -0500846
Chuck Lever911813d2020-05-12 17:13:34 -0400847 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
848 trace_xprt_disconnect_cleanup(xprt);
Trond Myklebustfeb8ca32009-12-03 08:10:17 -0500849 xprt->ops->close(xprt);
Chuck Lever911813d2020-05-12 17:13:34 -0400850 }
Trond Myklebustfeb8ca32009-12-03 08:10:17 -0500851
Trond Myklebust718ba5b2015-02-08 18:19:25 -0500852 if (!xprt_connected(xprt)) {
NeilBrown2c2ee6d2016-11-23 14:44:58 +1100853 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
Trond Myklebust6b2e6852019-04-07 13:58:49 -0400854 rpc_sleep_on_timeout(&xprt->pending, task, NULL,
Trond Myklebust9e910bf2019-04-07 13:58:53 -0400855 xprt_request_timeout(task->tk_rqstp));
Trond Myklebust0b9e7942010-04-16 16:41:57 -0400856
857 if (test_bit(XPRT_CLOSING, &xprt->state))
858 return;
859 if (xprt_test_and_set_connecting(xprt))
860 return;
Trond Myklebust0a9a4302018-12-01 23:18:00 -0500861 /* Race breaker */
862 if (!xprt_connected(xprt)) {
863 xprt->stat.connect_start = jiffies;
864 xprt->ops->connect(xprt, task);
865 } else {
866 xprt_clear_connecting(xprt);
867 task->tk_status = 0;
868 rpc_wake_up_queued_task(&xprt->pending, task);
869 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 }
Trond Myklebust718ba5b2015-02-08 18:19:25 -0500871 xprt_release_write(xprt, task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872}
873
Chuck Lever675dd902019-06-19 10:33:42 -0400874/**
875 * xprt_reconnect_delay - compute the wait before scheduling a connect
876 * @xprt: transport instance
877 *
878 */
879unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
880{
881 unsigned long start, now = jiffies;
882
883 start = xprt->stat.connect_start + xprt->reestablish_timeout;
884 if (time_after(start, now))
885 return start - now;
886 return 0;
887}
888EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
889
890/**
891 * xprt_reconnect_backoff - compute the new re-establish timeout
892 * @xprt: transport instance
893 * @init_to: initial reestablish timeout
894 *
895 */
896void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
897{
898 xprt->reestablish_timeout <<= 1;
899 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
900 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
901 if (xprt->reestablish_timeout < init_to)
902 xprt->reestablish_timeout = init_to;
903}
904EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
905
Trond Myklebust95f76912018-09-07 08:35:22 -0400906enum xprt_xid_rb_cmp {
907 XID_RB_EQUAL,
908 XID_RB_LEFT,
909 XID_RB_RIGHT,
910};
911static enum xprt_xid_rb_cmp
912xprt_xid_cmp(__be32 xid1, __be32 xid2)
913{
914 if (xid1 == xid2)
915 return XID_RB_EQUAL;
916 if ((__force u32)xid1 < (__force u32)xid2)
917 return XID_RB_LEFT;
918 return XID_RB_RIGHT;
919}
920
921static struct rpc_rqst *
922xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
923{
924 struct rb_node *n = xprt->recv_queue.rb_node;
925 struct rpc_rqst *req;
926
927 while (n != NULL) {
928 req = rb_entry(n, struct rpc_rqst, rq_recv);
929 switch (xprt_xid_cmp(xid, req->rq_xid)) {
930 case XID_RB_LEFT:
931 n = n->rb_left;
932 break;
933 case XID_RB_RIGHT:
934 n = n->rb_right;
935 break;
936 case XID_RB_EQUAL:
937 return req;
938 }
939 }
940 return NULL;
941}
942
943static void
944xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
945{
946 struct rb_node **p = &xprt->recv_queue.rb_node;
947 struct rb_node *n = NULL;
948 struct rpc_rqst *req;
949
950 while (*p != NULL) {
951 n = *p;
952 req = rb_entry(n, struct rpc_rqst, rq_recv);
953 switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
954 case XID_RB_LEFT:
955 p = &n->rb_left;
956 break;
957 case XID_RB_RIGHT:
958 p = &n->rb_right;
959 break;
960 case XID_RB_EQUAL:
961 WARN_ON_ONCE(new != req);
962 return;
963 }
964 }
965 rb_link_node(&new->rq_recv, n, p);
966 rb_insert_color(&new->rq_recv, &xprt->recv_queue);
967}
968
969static void
970xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
971{
972 rb_erase(&req->rq_recv, &xprt->recv_queue);
973}
974
Chuck Lever9903cd12005-08-11 16:25:26 -0400975/**
976 * xprt_lookup_rqst - find an RPC request corresponding to an XID
977 * @xprt: transport on which the original request was transmitted
978 * @xid: RPC XID of incoming reply
979 *
Trond Myklebust75c84152018-08-31 10:21:00 -0400980 * Caller holds xprt->queue_lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 */
Alexey Dobriyand8ed0292006-09-26 22:29:38 -0700982struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983{
Pavel Emelyanov8f3a6de2010-10-05 23:30:19 +0400984 struct rpc_rqst *entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
Trond Myklebust95f76912018-09-07 08:35:22 -0400986 entry = xprt_request_rb_find(xprt, xid);
987 if (entry != NULL) {
988 trace_xprt_lookup_rqst(xprt, xid, 0);
989 entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
990 return entry;
991 }
Chuck Lever46121cf2007-01-31 12:14:08 -0500992
993 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
994 ntohl(xid));
Jeff Layton3705ad62014-10-28 14:24:13 -0400995 trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
Chuck Lever262ca072006-03-20 13:44:16 -0500996 xprt->stat.bad_xids++;
997 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400999EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
Trond Myklebustcf9946c2018-08-06 12:55:34 -04001001static bool
1002xprt_is_pinned_rqst(struct rpc_rqst *req)
1003{
1004 return atomic_read(&req->rq_pin) != 0;
1005}
1006
Trond Myklebust729749b2017-08-13 10:03:59 -04001007/**
1008 * xprt_pin_rqst - Pin a request on the transport receive list
1009 * @req: Request to pin
1010 *
1011 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
Chuck Lever1f7d1c72019-04-24 09:40:09 -04001012 * so should be holding xprt->queue_lock.
Trond Myklebust729749b2017-08-13 10:03:59 -04001013 */
1014void xprt_pin_rqst(struct rpc_rqst *req)
1015{
Trond Myklebustcf9946c2018-08-06 12:55:34 -04001016 atomic_inc(&req->rq_pin);
Trond Myklebust729749b2017-08-13 10:03:59 -04001017}
Chuck Lever9590d082017-08-23 17:05:58 -04001018EXPORT_SYMBOL_GPL(xprt_pin_rqst);
Trond Myklebust729749b2017-08-13 10:03:59 -04001019
1020/**
1021 * xprt_unpin_rqst - Unpin a request on the transport receive list
1022 * @req: Request to pin
1023 *
Chuck Lever1f7d1c72019-04-24 09:40:09 -04001024 * Caller should be holding xprt->queue_lock.
Trond Myklebust729749b2017-08-13 10:03:59 -04001025 */
1026void xprt_unpin_rqst(struct rpc_rqst *req)
1027{
Trond Myklebustcf9946c2018-08-06 12:55:34 -04001028 if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1029 atomic_dec(&req->rq_pin);
1030 return;
1031 }
1032 if (atomic_dec_and_test(&req->rq_pin))
1033 wake_up_var(&req->rq_pin);
Trond Myklebust729749b2017-08-13 10:03:59 -04001034}
Chuck Lever9590d082017-08-23 17:05:58 -04001035EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
Trond Myklebust729749b2017-08-13 10:03:59 -04001036
1037static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
Trond Myklebust729749b2017-08-13 10:03:59 -04001038{
Trond Myklebustcf9946c2018-08-06 12:55:34 -04001039 wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
Trond Myklebust729749b2017-08-13 10:03:59 -04001040}
1041
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001042static bool
1043xprt_request_data_received(struct rpc_task *task)
1044{
1045 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1046 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1047}
1048
1049static bool
1050xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1051{
1052 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1053 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1054}
1055
1056/**
1057 * xprt_request_enqueue_receive - Add an request to the receive queue
1058 * @task: RPC task
1059 *
1060 */
1061void
1062xprt_request_enqueue_receive(struct rpc_task *task)
1063{
1064 struct rpc_rqst *req = task->tk_rqstp;
1065 struct rpc_xprt *xprt = req->rq_xprt;
1066
1067 if (!xprt_request_need_enqueue_receive(task, req))
1068 return;
Trond Myklebust75369082019-07-17 21:22:38 -04001069
1070 xprt_request_prepare(task->tk_rqstp);
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001071 spin_lock(&xprt->queue_lock);
1072
1073 /* Update the softirq receive buffer */
1074 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1075 sizeof(req->rq_private_buf));
1076
1077 /* Add request to the receive list */
Trond Myklebust95f76912018-09-07 08:35:22 -04001078 xprt_request_rb_insert(xprt, req);
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001079 set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1080 spin_unlock(&xprt->queue_lock);
1081
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001082 /* Turn off autodisconnect */
1083 del_singleshot_timer_sync(&xprt->timer);
1084}
1085
1086/**
1087 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1088 * @task: RPC task
1089 *
1090 * Caller must hold xprt->queue_lock.
1091 */
1092static void
1093xprt_request_dequeue_receive_locked(struct rpc_task *task)
1094{
Trond Myklebust95f76912018-09-07 08:35:22 -04001095 struct rpc_rqst *req = task->tk_rqstp;
1096
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001097 if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
Trond Myklebust95f76912018-09-07 08:35:22 -04001098 xprt_request_rb_remove(req->rq_xprt, req);
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001099}
1100
Chuck Leverecd465e2018-03-05 15:12:57 -05001101/**
1102 * xprt_update_rtt - Update RPC RTT statistics
1103 * @task: RPC request that recently completed
1104 *
Trond Myklebust75c84152018-08-31 10:21:00 -04001105 * Caller holds xprt->queue_lock.
Chuck Leverecd465e2018-03-05 15:12:57 -05001106 */
1107void xprt_update_rtt(struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108{
Chuck Lever1570c1e2005-08-25 16:25:52 -07001109 struct rpc_rqst *req = task->tk_rqstp;
1110 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
Eric Dumazet95c96172012-04-15 05:58:06 +00001111 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
Trond Myklebustd60dbb22010-05-13 12:51:49 -04001112 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113
Chuck Lever1570c1e2005-08-25 16:25:52 -07001114 if (timer) {
1115 if (req->rq_ntrans == 1)
Chuck Leverff839972010-05-07 13:34:47 -04001116 rpc_update_rtt(rtt, timer, m);
Chuck Lever1570c1e2005-08-25 16:25:52 -07001117 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 }
Chuck Lever1570c1e2005-08-25 16:25:52 -07001119}
Chuck Leverecd465e2018-03-05 15:12:57 -05001120EXPORT_SYMBOL_GPL(xprt_update_rtt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121
Chuck Lever1570c1e2005-08-25 16:25:52 -07001122/**
1123 * xprt_complete_rqst - called when reply processing is complete
1124 * @task: RPC request that recently completed
1125 * @copied: actual number of bytes received from the transport
1126 *
Trond Myklebust75c84152018-08-31 10:21:00 -04001127 * Caller holds xprt->queue_lock.
Chuck Lever1570c1e2005-08-25 16:25:52 -07001128 */
1129void xprt_complete_rqst(struct rpc_task *task, int copied)
1130{
1131 struct rpc_rqst *req = task->tk_rqstp;
Trond Myklebustfda13932008-02-22 16:34:12 -05001132 struct rpc_xprt *xprt = req->rq_xprt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
Jeff Layton3705ad62014-10-28 14:24:13 -04001134 trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
Trond Myklebustfda13932008-02-22 16:34:12 -05001136 xprt->stat.recvs++;
Chuck Leveref759a22006-03-20 13:44:17 -05001137
Trond Myklebust1e799b62008-03-21 16:19:41 -04001138 req->rq_private_buf.len = copied;
Ricardo Labiagadd2b63d2009-04-01 09:23:28 -04001139 /* Ensure all writes are done before we update */
1140 /* req->rq_reply_bytes_recvd */
Trond Myklebust43ac3f22006-03-20 13:44:51 -05001141 smp_wmb();
Ricardo Labiagadd2b63d2009-04-01 09:23:28 -04001142 req->rq_reply_bytes_recvd = copied;
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001143 xprt_request_dequeue_receive_locked(task);
Trond Myklebustfda13932008-02-22 16:34:12 -05001144 rpc_wake_up_queued_task(&xprt->pending, task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -04001146EXPORT_SYMBOL_GPL(xprt_complete_rqst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147
Chuck Lever46c0ee82005-08-25 16:25:52 -07001148static void xprt_timer(struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149{
Chuck Lever46c0ee82005-08-25 16:25:52 -07001150 struct rpc_rqst *req = task->tk_rqstp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 struct rpc_xprt *xprt = req->rq_xprt;
1152
Trond Myklebust5d008372008-02-22 16:34:17 -05001153 if (task->tk_status != -ETIMEDOUT)
1154 return;
Chuck Lever46c0ee82005-08-25 16:25:52 -07001155
Chuck Lever82476d92018-01-03 15:38:25 -05001156 trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
Ricardo Labiagadd2b63d2009-04-01 09:23:28 -04001157 if (!req->rq_reply_bytes_recvd) {
Chuck Lever46c0ee82005-08-25 16:25:52 -07001158 if (xprt->ops->timer)
Trond Myklebust6a24dfb2013-01-08 09:48:15 -05001159 xprt->ops->timer(xprt, task);
Trond Myklebust5d008372008-02-22 16:34:17 -05001160 } else
1161 task->tk_status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162}
1163
Chuck Lever9903cd12005-08-11 16:25:26 -04001164/**
Trond Myklebust8ba6a922019-04-07 13:58:46 -04001165 * xprt_wait_for_reply_request_def - wait for reply
1166 * @task: pointer to rpc_task
1167 *
1168 * Set a request's retransmit timeout based on the transport's
1169 * default timeout parameters. Used by transports that don't adjust
1170 * the retransmit timeout based on round-trip time estimation,
1171 * and put the task to sleep on the pending queue.
1172 */
1173void xprt_wait_for_reply_request_def(struct rpc_task *task)
1174{
1175 struct rpc_rqst *req = task->tk_rqstp;
1176
Trond Myklebust6b2e6852019-04-07 13:58:49 -04001177 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
Trond Myklebust9e910bf2019-04-07 13:58:53 -04001178 xprt_request_timeout(req));
Trond Myklebust8ba6a922019-04-07 13:58:46 -04001179}
1180EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1181
1182/**
1183 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1184 * @task: pointer to rpc_task
1185 *
1186 * Set a request's retransmit timeout using the RTT estimator,
1187 * and put the task to sleep on the pending queue.
1188 */
1189void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1190{
1191 int timer = task->tk_msg.rpc_proc->p_timer;
1192 struct rpc_clnt *clnt = task->tk_client;
1193 struct rpc_rtt *rtt = clnt->cl_rtt;
1194 struct rpc_rqst *req = task->tk_rqstp;
1195 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
Trond Myklebust6b2e6852019-04-07 13:58:49 -04001196 unsigned long timeout;
Trond Myklebust8ba6a922019-04-07 13:58:46 -04001197
Trond Myklebust6b2e6852019-04-07 13:58:49 -04001198 timeout = rpc_calc_rto(rtt, timer);
1199 timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1200 if (timeout > max_timeout || timeout == 0)
1201 timeout = max_timeout;
1202 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1203 jiffies + timeout);
Trond Myklebust8ba6a922019-04-07 13:58:46 -04001204}
1205EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1206
1207/**
Trond Myklebust7f3a1d12018-08-23 00:03:43 -04001208 * xprt_request_wait_receive - wait for the reply to an RPC request
1209 * @task: RPC task about to send a request
1210 *
1211 */
1212void xprt_request_wait_receive(struct rpc_task *task)
1213{
1214 struct rpc_rqst *req = task->tk_rqstp;
1215 struct rpc_xprt *xprt = req->rq_xprt;
1216
1217 if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1218 return;
1219 /*
1220 * Sleep on the pending queue if we're expecting a reply.
1221 * The spinlock ensures atomicity between the test of
1222 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1223 */
1224 spin_lock(&xprt->queue_lock);
1225 if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
Trond Myklebust8ba6a922019-04-07 13:58:46 -04001226 xprt->ops->wait_for_reply_request(task);
Trond Myklebust7f3a1d12018-08-23 00:03:43 -04001227 /*
1228 * Send an extra queue wakeup call if the
1229 * connection was dropped in case the call to
1230 * rpc_sleep_on() raced.
1231 */
1232 if (xprt_request_retransmit_after_disconnect(task))
1233 rpc_wake_up_queued_task_set_status(&xprt->pending,
1234 task, -ENOTCONN);
1235 }
1236 spin_unlock(&xprt->queue_lock);
1237}
1238
Trond Myklebust944b0422018-08-09 23:33:21 -04001239static bool
Trond Myklebust944b0422018-08-09 23:33:21 -04001240xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1241{
Trond Myklebust762e4e62018-08-24 16:28:28 -04001242 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
Trond Myklebust944b0422018-08-09 23:33:21 -04001243}
1244
1245/**
1246 * xprt_request_enqueue_transmit - queue a task for transmission
1247 * @task: pointer to rpc_task
1248 *
1249 * Add a task to the transmission queue.
1250 */
1251void
1252xprt_request_enqueue_transmit(struct rpc_task *task)
1253{
Trond Myklebust918f3c12018-09-09 11:37:22 -04001254 struct rpc_rqst *pos, *req = task->tk_rqstp;
Trond Myklebust944b0422018-08-09 23:33:21 -04001255 struct rpc_xprt *xprt = req->rq_xprt;
1256
1257 if (xprt_request_need_enqueue_transmit(task, req)) {
Trond Myklebuste66721f2019-01-02 17:53:10 -05001258 req->rq_bytes_sent = 0;
Trond Myklebust944b0422018-08-09 23:33:21 -04001259 spin_lock(&xprt->queue_lock);
Trond Myklebust75891f52018-09-03 17:37:36 -04001260 /*
1261 * Requests that carry congestion control credits are added
1262 * to the head of the list to avoid starvation issues.
1263 */
1264 if (req->rq_cong) {
1265 xprt_clear_congestion_window_wait(xprt);
1266 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1267 if (pos->rq_cong)
1268 continue;
1269 /* Note: req is added _before_ pos */
1270 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1271 INIT_LIST_HEAD(&req->rq_xmit2);
Chuck Lever0c776682019-02-11 11:25:04 -05001272 trace_xprt_enq_xmit(task, 1);
Trond Myklebust75891f52018-09-03 17:37:36 -04001273 goto out;
1274 }
Trond Myklebust86aeee02018-09-08 14:22:41 -04001275 } else if (RPC_IS_SWAPPER(task)) {
1276 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1277 if (pos->rq_cong || pos->rq_bytes_sent)
1278 continue;
1279 if (RPC_IS_SWAPPER(pos->rq_task))
1280 continue;
1281 /* Note: req is added _before_ pos */
1282 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1283 INIT_LIST_HEAD(&req->rq_xmit2);
Chuck Lever0c776682019-02-11 11:25:04 -05001284 trace_xprt_enq_xmit(task, 2);
Trond Myklebust86aeee02018-09-08 14:22:41 -04001285 goto out;
1286 }
Chuck Leverdeaa5c92019-01-09 10:04:57 -05001287 } else if (!req->rq_seqno) {
Trond Myklebust75891f52018-09-03 17:37:36 -04001288 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1289 if (pos->rq_task->tk_owner != task->tk_owner)
1290 continue;
1291 list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1292 INIT_LIST_HEAD(&req->rq_xmit);
Chuck Lever0c776682019-02-11 11:25:04 -05001293 trace_xprt_enq_xmit(task, 3);
Trond Myklebust75891f52018-09-03 17:37:36 -04001294 goto out;
1295 }
Trond Myklebust918f3c12018-09-09 11:37:22 -04001296 }
Trond Myklebust944b0422018-08-09 23:33:21 -04001297 list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
Trond Myklebust918f3c12018-09-09 11:37:22 -04001298 INIT_LIST_HEAD(&req->rq_xmit2);
Chuck Lever0c776682019-02-11 11:25:04 -05001299 trace_xprt_enq_xmit(task, 4);
Trond Myklebust918f3c12018-09-09 11:37:22 -04001300out:
Trond Myklebust944b0422018-08-09 23:33:21 -04001301 set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1302 spin_unlock(&xprt->queue_lock);
1303 }
1304}
1305
1306/**
1307 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1308 * @task: pointer to rpc_task
1309 *
1310 * Remove a task from the transmission queue
1311 * Caller must hold xprt->queue_lock
1312 */
1313static void
1314xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1315{
Trond Myklebust918f3c12018-09-09 11:37:22 -04001316 struct rpc_rqst *req = task->tk_rqstp;
1317
1318 if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1319 return;
1320 if (!list_empty(&req->rq_xmit)) {
1321 list_del(&req->rq_xmit);
1322 if (!list_empty(&req->rq_xmit2)) {
1323 struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1324 struct rpc_rqst, rq_xmit2);
1325 list_del(&req->rq_xmit2);
1326 list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1327 }
1328 } else
1329 list_del(&req->rq_xmit2);
Trond Myklebust944b0422018-08-09 23:33:21 -04001330}
1331
1332/**
1333 * xprt_request_dequeue_transmit - remove a task from the transmission queue
1334 * @task: pointer to rpc_task
1335 *
1336 * Remove a task from the transmission queue
1337 */
1338static void
1339xprt_request_dequeue_transmit(struct rpc_task *task)
1340{
1341 struct rpc_rqst *req = task->tk_rqstp;
1342 struct rpc_xprt *xprt = req->rq_xprt;
1343
1344 spin_lock(&xprt->queue_lock);
1345 xprt_request_dequeue_transmit_locked(task);
1346 spin_unlock(&xprt->queue_lock);
1347}
1348
Trond Myklebust7f3a1d12018-08-23 00:03:43 -04001349/**
Trond Myklebustcc204d02019-09-10 13:01:35 -04001350 * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1351 * @task: pointer to rpc_task
1352 *
1353 * Remove a task from the transmit and receive queues, and ensure that
1354 * it is not pinned by the receive work item.
1355 */
1356void
1357xprt_request_dequeue_xprt(struct rpc_task *task)
1358{
1359 struct rpc_rqst *req = task->tk_rqstp;
1360 struct rpc_xprt *xprt = req->rq_xprt;
1361
1362 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1363 test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1364 xprt_is_pinned_rqst(req)) {
1365 spin_lock(&xprt->queue_lock);
1366 xprt_request_dequeue_transmit_locked(task);
1367 xprt_request_dequeue_receive_locked(task);
1368 while (xprt_is_pinned_rqst(req)) {
1369 set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1370 spin_unlock(&xprt->queue_lock);
1371 xprt_wait_on_pinned_rqst(req);
1372 spin_lock(&xprt->queue_lock);
1373 clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1374 }
1375 spin_unlock(&xprt->queue_lock);
1376 }
1377}
1378
1379/**
Trond Myklebust9d96acb2018-09-13 12:22:04 -04001380 * xprt_request_prepare - prepare an encoded request for transport
1381 * @req: pointer to rpc_rqst
1382 *
1383 * Calls into the transport layer to do whatever is needed to prepare
1384 * the request for transmission or receive.
1385 */
1386void
1387xprt_request_prepare(struct rpc_rqst *req)
1388{
1389 struct rpc_xprt *xprt = req->rq_xprt;
1390
1391 if (xprt->ops->prepare_request)
1392 xprt->ops->prepare_request(req);
1393}
1394
1395/**
Trond Myklebust762e4e62018-08-24 16:28:28 -04001396 * xprt_request_need_retransmit - Test if a task needs retransmission
1397 * @task: pointer to rpc_task
1398 *
1399 * Test for whether a connection breakage requires the task to retransmit
1400 */
1401bool
1402xprt_request_need_retransmit(struct rpc_task *task)
1403{
1404 return xprt_request_retransmit_after_disconnect(task);
1405}
1406
1407/**
Chuck Lever9903cd12005-08-11 16:25:26 -04001408 * xprt_prepare_transmit - reserve the transport before sending a request
1409 * @task: RPC task about to send a request
1410 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 */
Trond Myklebust90051ea2013-09-25 12:17:18 -04001412bool xprt_prepare_transmit(struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413{
1414 struct rpc_rqst *req = task->tk_rqstp;
1415 struct rpc_xprt *xprt = req->rq_xprt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416
Chuck Lever46121cf2007-01-31 12:14:08 -05001417 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418
Trond Myklebust5f2f6bd2018-09-01 14:25:24 -04001419 if (!xprt_lock_write(xprt, task)) {
1420 /* Race breaker: someone may have transmitted us */
Trond Myklebust944b0422018-08-09 23:33:21 -04001421 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
Trond Myklebust5f2f6bd2018-09-01 14:25:24 -04001422 rpc_wake_up_queued_task_set_status(&xprt->sending,
1423 task, 0);
1424 return false;
1425
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 }
Trond Myklebust5f2f6bd2018-09-01 14:25:24 -04001427 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428}
1429
Trond Myklebuste0ab53d2006-07-27 17:22:50 -04001430void xprt_end_transmit(struct rpc_task *task)
Trond Myklebust5e5ce5b2005-10-18 14:20:11 -07001431{
Rahul Iyer343952f2009-04-01 09:23:17 -04001432 xprt_release_write(task->tk_rqstp->rq_xprt, task);
Trond Myklebust5e5ce5b2005-10-18 14:20:11 -07001433}
1434
Chuck Lever9903cd12005-08-11 16:25:26 -04001435/**
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001436 * xprt_request_transmit - send an RPC request on a transport
1437 * @req: pointer to request to transmit
1438 * @snd_task: RPC task that owns the transport lock
Chuck Lever9903cd12005-08-11 16:25:26 -04001439 *
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001440 * This performs the transmission of a single request.
1441 * Note that if the request is not the same as snd_task, then it
1442 * does need to be pinned.
1443 * Returns '0' on success.
Chuck Lever9903cd12005-08-11 16:25:26 -04001444 */
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001445static int
1446xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447{
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001448 struct rpc_xprt *xprt = req->rq_xprt;
1449 struct rpc_task *task = req->rq_task;
Trond Myklebust90d91b02017-12-14 21:24:08 -05001450 unsigned int connect_cookie;
Trond Myklebustdcbbeda2018-09-01 14:29:18 -04001451 int is_retrans = RPC_WAS_SENT(task);
Chuck Leverff699ea82018-03-05 15:13:13 -05001452 int status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001454 if (!req->rq_bytes_sent) {
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001455 if (xprt_request_data_received(task)) {
1456 status = 0;
Trond Myklebust944b0422018-08-09 23:33:21 -04001457 goto out_dequeue;
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001458 }
Trond Myklebust3021a5bb2018-08-14 13:50:21 -04001459 /* Verify that our message lies in the RPCSEC_GSS window */
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001460 if (rpcauth_xmit_need_reencode(task)) {
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001461 status = -EBADMSG;
Trond Myklebust944b0422018-08-09 23:33:21 -04001462 goto out_dequeue;
Trond Myklebust3021a5bb2018-08-14 13:50:21 -04001463 }
Trond Myklebustae67bd32019-04-07 13:58:44 -04001464 if (RPC_SIGNALLED(task)) {
1465 status = -ERESTARTSYS;
1466 goto out_dequeue;
1467 }
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001468 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469
Trond Myklebustdcbbeda2018-09-01 14:29:18 -04001470 /*
1471 * Update req->rq_ntrans before transmitting to avoid races with
1472 * xprt_update_rtt(), which needs to know that it is recording a
1473 * reply to the first transmission.
1474 */
1475 req->rq_ntrans++;
1476
Chuck Leverc509f15a2020-05-12 17:13:28 -04001477 trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
Trond Myklebust90d91b02017-12-14 21:24:08 -05001478 connect_cookie = xprt->connect_cookie;
Trond Myklebustadfa7142018-09-03 23:58:59 -04001479 status = xprt->ops->send_request(req);
Trond Myklebustc8485e42009-03-11 14:37:59 -04001480 if (status != 0) {
Trond Myklebustdcbbeda2018-09-01 14:29:18 -04001481 req->rq_ntrans--;
Chuck Lever0c776682019-02-11 11:25:04 -05001482 trace_xprt_transmit(req, status);
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001483 return status;
Chuck Leverfe3aca22005-08-25 16:25:50 -07001484 }
Trond Myklebust7ebbbc62018-08-28 09:00:27 -04001485
Trond Myklebustdcbbeda2018-09-01 14:29:18 -04001486 if (is_retrans)
1487 task->tk_client->cl_stats->rpcretrans++;
1488
Chuck Lever4a068252015-05-11 14:02:25 -04001489 xprt_inject_disconnect(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
Bryan Schumaker468f8612011-04-18 15:57:32 -04001491 task->tk_flags |= RPC_TASK_SENT;
Trond Myklebustb5e92412019-05-02 11:21:08 -04001492 spin_lock(&xprt->transport_lock);
Trond Myklebustc8485e42009-03-11 14:37:59 -04001493
Trond Myklebustc8485e42009-03-11 14:37:59 -04001494 xprt->stat.sends++;
1495 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1496 xprt->stat.bklog_u += xprt->backlog.qlen;
Andy Adamson15a45202012-02-14 16:19:18 -05001497 xprt->stat.sending_u += xprt->sending.qlen;
1498 xprt->stat.pending_u += xprt->pending.qlen;
Trond Myklebustb5e92412019-05-02 11:21:08 -04001499 spin_unlock(&xprt->transport_lock);
Trond Myklebust90d91b02017-12-14 21:24:08 -05001500
1501 req->rq_connect_cookie = connect_cookie;
Trond Myklebust944b0422018-08-09 23:33:21 -04001502out_dequeue:
Chuck Lever0c776682019-02-11 11:25:04 -05001503 trace_xprt_transmit(req, status);
Trond Myklebust944b0422018-08-09 23:33:21 -04001504 xprt_request_dequeue_transmit(task);
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001505 rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1506 return status;
1507}
1508
1509/**
1510 * xprt_transmit - send an RPC request on a transport
1511 * @task: controlling RPC task
1512 *
1513 * Attempts to drain the transmit queue. On exit, either the transport
1514 * signalled an error that needs to be handled before transmission can
1515 * resume, or @task finished transmitting, and detected that it already
1516 * received a reply.
1517 */
1518void
1519xprt_transmit(struct rpc_task *task)
1520{
1521 struct rpc_rqst *next, *req = task->tk_rqstp;
1522 struct rpc_xprt *xprt = req->rq_xprt;
1523 int status;
1524
1525 spin_lock(&xprt->queue_lock);
1526 while (!list_empty(&xprt->xmit_queue)) {
1527 next = list_first_entry(&xprt->xmit_queue,
1528 struct rpc_rqst, rq_xmit);
1529 xprt_pin_rqst(next);
1530 spin_unlock(&xprt->queue_lock);
1531 status = xprt_request_transmit(next, task);
1532 if (status == -EBADMSG && next != req)
1533 status = 0;
1534 cond_resched();
1535 spin_lock(&xprt->queue_lock);
1536 xprt_unpin_rqst(next);
1537 if (status == 0) {
1538 if (!xprt_request_data_received(task) ||
1539 test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1540 continue;
Trond Myklebustc5445772018-09-03 23:39:27 -04001541 } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001542 task->tk_status = status;
1543 break;
1544 }
1545 spin_unlock(&xprt->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546}
1547
Trond Myklebustba60eb22013-04-14 10:49:37 -04001548static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1549{
1550 set_bit(XPRT_CONGESTED, &xprt->state);
1551 rpc_sleep_on(&xprt->backlog, task, NULL);
1552}
1553
1554static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1555{
1556 if (rpc_wake_up_next(&xprt->backlog) == NULL)
1557 clear_bit(XPRT_CONGESTED, &xprt->state);
1558}
1559
1560static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1561{
1562 bool ret = false;
1563
1564 if (!test_bit(XPRT_CONGESTED, &xprt->state))
1565 goto out;
1566 spin_lock(&xprt->reserve_lock);
1567 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1568 rpc_sleep_on(&xprt->backlog, task, NULL);
1569 ret = true;
1570 }
1571 spin_unlock(&xprt->reserve_lock);
1572out:
1573 return ret;
1574}
1575
Trond Myklebust92ea0112017-06-20 19:35:39 -04001576static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001577{
1578 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1579
Chuck Leverff699ea82018-03-05 15:13:13 -05001580 if (xprt->num_reqs >= xprt->max_reqs)
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001581 goto out;
Chuck Leverff699ea82018-03-05 15:13:13 -05001582 ++xprt->num_reqs;
Trond Myklebust92ea0112017-06-20 19:35:39 -04001583 spin_unlock(&xprt->reserve_lock);
1584 req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
1585 spin_lock(&xprt->reserve_lock);
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001586 if (req != NULL)
1587 goto out;
Chuck Leverff699ea82018-03-05 15:13:13 -05001588 --xprt->num_reqs;
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001589 req = ERR_PTR(-ENOMEM);
1590out:
1591 return req;
1592}
1593
1594static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1595{
Chuck Leverff699ea82018-03-05 15:13:13 -05001596 if (xprt->num_reqs > xprt->min_reqs) {
1597 --xprt->num_reqs;
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001598 kfree(req);
1599 return true;
1600 }
1601 return false;
1602}
1603
Trond Myklebustf39c1bf2012-09-07 11:08:50 -04001604void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605{
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001606 struct rpc_rqst *req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607
Trond Myklebustf39c1bf2012-09-07 11:08:50 -04001608 spin_lock(&xprt->reserve_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 if (!list_empty(&xprt->free)) {
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001610 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1611 list_del(&req->rq_list);
1612 goto out_init_req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 }
Trond Myklebust92ea0112017-06-20 19:35:39 -04001614 req = xprt_dynamic_alloc_slot(xprt);
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001615 if (!IS_ERR(req))
1616 goto out_init_req;
1617 switch (PTR_ERR(req)) {
1618 case -ENOMEM:
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001619 dprintk("RPC: dynamic allocation of request slot "
1620 "failed! Retrying\n");
Trond Myklebust1afeaf52012-05-19 12:12:53 -04001621 task->tk_status = -ENOMEM;
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001622 break;
1623 case -EAGAIN:
Trond Myklebustba60eb22013-04-14 10:49:37 -04001624 xprt_add_backlog(xprt, task);
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001625 dprintk("RPC: waiting for request slot\n");
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05001626 fallthrough;
Trond Myklebust1afeaf52012-05-19 12:12:53 -04001627 default:
1628 task->tk_status = -EAGAIN;
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001629 }
Trond Myklebustf39c1bf2012-09-07 11:08:50 -04001630 spin_unlock(&xprt->reserve_lock);
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001631 return;
1632out_init_req:
Chuck Leverff699ea82018-03-05 15:13:13 -05001633 xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1634 xprt->num_reqs);
Chuck Lever37ac86c2018-05-04 15:34:53 -04001635 spin_unlock(&xprt->reserve_lock);
1636
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001637 task->tk_status = 0;
1638 task->tk_rqstp = req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639}
Trond Myklebustf39c1bf2012-09-07 11:08:50 -04001640EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1641
Chuck Levera9cde232018-05-04 15:34:59 -04001642void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
Trond Myklebustee5ebe82010-04-16 16:37:01 -04001643{
Trond Myklebustee5ebe82010-04-16 16:37:01 -04001644 spin_lock(&xprt->reserve_lock);
Trond Myklebustc25573b2011-12-01 14:16:17 -05001645 if (!xprt_dynamic_free_slot(xprt, req)) {
1646 memset(req, 0, sizeof(*req)); /* mark unused */
1647 list_add(&req->rq_list, &xprt->free);
1648 }
Trond Myklebustba60eb22013-04-14 10:49:37 -04001649 xprt_wake_up_backlog(xprt);
Trond Myklebustee5ebe82010-04-16 16:37:01 -04001650 spin_unlock(&xprt->reserve_lock);
1651}
Chuck Levera9cde232018-05-04 15:34:59 -04001652EXPORT_SYMBOL_GPL(xprt_free_slot);
Trond Myklebustee5ebe82010-04-16 16:37:01 -04001653
Trond Myklebust21de0a92011-07-17 16:57:32 -04001654static void xprt_free_all_slots(struct rpc_xprt *xprt)
1655{
1656 struct rpc_rqst *req;
1657 while (!list_empty(&xprt->free)) {
1658 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1659 list_del(&req->rq_list);
1660 kfree(req);
1661 }
1662}
1663
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001664struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1665 unsigned int num_prealloc,
1666 unsigned int max_alloc)
Pavel Emelyanovbd1722d2010-09-29 16:02:43 +04001667{
1668 struct rpc_xprt *xprt;
Trond Myklebust21de0a92011-07-17 16:57:32 -04001669 struct rpc_rqst *req;
1670 int i;
Pavel Emelyanovbd1722d2010-09-29 16:02:43 +04001671
1672 xprt = kzalloc(size, GFP_KERNEL);
1673 if (xprt == NULL)
1674 goto out;
1675
Trond Myklebust21de0a92011-07-17 16:57:32 -04001676 xprt_init(xprt, net);
1677
1678 for (i = 0; i < num_prealloc; i++) {
1679 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1680 if (!req)
wangweidong83131642013-10-15 11:44:30 +08001681 goto out_free;
Trond Myklebust21de0a92011-07-17 16:57:32 -04001682 list_add(&req->rq_list, &xprt->free);
1683 }
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001684 if (max_alloc > num_prealloc)
1685 xprt->max_reqs = max_alloc;
1686 else
1687 xprt->max_reqs = num_prealloc;
1688 xprt->min_reqs = num_prealloc;
Chuck Leverff699ea82018-03-05 15:13:13 -05001689 xprt->num_reqs = num_prealloc;
Pavel Emelyanovbd1722d2010-09-29 16:02:43 +04001690
1691 return xprt;
1692
1693out_free:
Trond Myklebust21de0a92011-07-17 16:57:32 -04001694 xprt_free(xprt);
Pavel Emelyanovbd1722d2010-09-29 16:02:43 +04001695out:
1696 return NULL;
1697}
1698EXPORT_SYMBOL_GPL(xprt_alloc);
1699
Pavel Emelyanove204e622010-09-29 16:03:13 +04001700void xprt_free(struct rpc_xprt *xprt)
1701{
Pavel Emelyanov37aa2132010-09-29 16:05:43 +04001702 put_net(xprt->xprt_net);
Trond Myklebust21de0a92011-07-17 16:57:32 -04001703 xprt_free_all_slots(xprt);
Trond Myklebustfda1bfe2015-02-14 17:48:49 -05001704 kfree_rcu(xprt, rcu);
Pavel Emelyanove204e622010-09-29 16:03:13 +04001705}
1706EXPORT_SYMBOL_GPL(xprt_free);
1707
Trond Myklebust902c5882018-09-01 17:21:01 -04001708static void
1709xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1710{
1711 req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1712}
1713
Trond Myklebust9dc6edc2018-08-22 14:24:16 -04001714static __be32
1715xprt_alloc_xid(struct rpc_xprt *xprt)
1716{
1717 __be32 xid;
1718
1719 spin_lock(&xprt->reserve_lock);
1720 xid = (__force __be32)xprt->xid++;
1721 spin_unlock(&xprt->reserve_lock);
1722 return xid;
1723}
1724
1725static void
1726xprt_init_xid(struct rpc_xprt *xprt)
1727{
1728 xprt->xid = prandom_u32();
1729}
1730
1731static void
1732xprt_request_init(struct rpc_task *task)
1733{
1734 struct rpc_xprt *xprt = task->tk_xprt;
1735 struct rpc_rqst *req = task->tk_rqstp;
1736
Trond Myklebust9dc6edc2018-08-22 14:24:16 -04001737 req->rq_task = task;
1738 req->rq_xprt = xprt;
1739 req->rq_buffer = NULL;
1740 req->rq_xid = xprt_alloc_xid(xprt);
Trond Myklebust902c5882018-09-01 17:21:01 -04001741 xprt_init_connect_cookie(req, xprt);
Trond Myklebust9dc6edc2018-08-22 14:24:16 -04001742 req->rq_snd_buf.len = 0;
1743 req->rq_snd_buf.buflen = 0;
1744 req->rq_rcv_buf.len = 0;
1745 req->rq_rcv_buf.buflen = 0;
Trond Myklebust71700bb2018-11-30 16:11:15 -05001746 req->rq_snd_buf.bvec = NULL;
1747 req->rq_rcv_buf.bvec = NULL;
Trond Myklebust9dc6edc2018-08-22 14:24:16 -04001748 req->rq_release_snd_buf = NULL;
Trond Myklebustda953062019-04-07 13:58:56 -04001749 xprt_init_majortimeo(task, req);
Trond Myklebust9dc6edc2018-08-22 14:24:16 -04001750 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1751 req, ntohl(req->rq_xid));
1752}
1753
1754static void
1755xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1756{
1757 xprt->ops->alloc_slot(xprt, task);
1758 if (task->tk_rqstp != NULL)
1759 xprt_request_init(task);
1760}
1761
Chuck Lever9903cd12005-08-11 16:25:26 -04001762/**
1763 * xprt_reserve - allocate an RPC request slot
1764 * @task: RPC task requesting a slot allocation
1765 *
Trond Myklebustba60eb22013-04-14 10:49:37 -04001766 * If the transport is marked as being congested, or if no more
1767 * slots are available, place the task on the transport's
Chuck Lever9903cd12005-08-11 16:25:26 -04001768 * backlog queue.
1769 */
1770void xprt_reserve(struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771{
Trond Myklebustfb43d172016-01-30 16:39:26 -05001772 struct rpc_xprt *xprt = task->tk_xprt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
Trond Myklebust43cedbf0e2011-07-17 16:01:03 -04001774 task->tk_status = 0;
1775 if (task->tk_rqstp != NULL)
1776 return;
1777
Trond Myklebust43cedbf0e2011-07-17 16:01:03 -04001778 task->tk_status = -EAGAIN;
Trond Myklebustba60eb22013-04-14 10:49:37 -04001779 if (!xprt_throttle_congested(xprt, task))
Trond Myklebust9dc6edc2018-08-22 14:24:16 -04001780 xprt_do_reserve(xprt, task);
Trond Myklebustba60eb22013-04-14 10:49:37 -04001781}
1782
1783/**
1784 * xprt_retry_reserve - allocate an RPC request slot
1785 * @task: RPC task requesting a slot allocation
1786 *
1787 * If no more slots are available, place the task on the transport's
1788 * backlog queue.
1789 * Note that the only difference with xprt_reserve is that we now
1790 * ignore the value of the XPRT_CONGESTED flag.
1791 */
1792void xprt_retry_reserve(struct rpc_task *task)
1793{
Trond Myklebustfb43d172016-01-30 16:39:26 -05001794 struct rpc_xprt *xprt = task->tk_xprt;
Trond Myklebustba60eb22013-04-14 10:49:37 -04001795
1796 task->tk_status = 0;
1797 if (task->tk_rqstp != NULL)
1798 return;
1799
Trond Myklebustba60eb22013-04-14 10:49:37 -04001800 task->tk_status = -EAGAIN;
Trond Myklebust9dc6edc2018-08-22 14:24:16 -04001801 xprt_do_reserve(xprt, task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802}
1803
Chuck Lever9903cd12005-08-11 16:25:26 -04001804/**
1805 * xprt_release - release an RPC request slot
1806 * @task: task which is finished with the slot
1807 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 */
Chuck Lever9903cd12005-08-11 16:25:26 -04001809void xprt_release(struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810{
Ricardo Labiaga55ae1aa2009-04-01 09:23:03 -04001811 struct rpc_xprt *xprt;
Trond Myklebust87ed5002013-01-07 14:30:46 -05001812 struct rpc_rqst *req = task->tk_rqstp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813
Trond Myklebust87ed5002013-01-07 14:30:46 -05001814 if (req == NULL) {
1815 if (task->tk_client) {
Trond Myklebustfb43d172016-01-30 16:39:26 -05001816 xprt = task->tk_xprt;
Trond Myklebustbd79bc52018-09-07 19:38:55 -04001817 xprt_release_write(xprt, task);
Trond Myklebust87ed5002013-01-07 14:30:46 -05001818 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 return;
Trond Myklebust87ed5002013-01-07 14:30:46 -05001820 }
Ricardo Labiaga55ae1aa2009-04-01 09:23:03 -04001821
Ricardo Labiaga55ae1aa2009-04-01 09:23:03 -04001822 xprt = req->rq_xprt;
Trond Myklebustcc204d02019-09-10 13:01:35 -04001823 xprt_request_dequeue_xprt(task);
Trond Myklebustb5e92412019-05-02 11:21:08 -04001824 spin_lock(&xprt->transport_lock);
Chuck Lever49e9a892005-08-25 16:25:51 -07001825 xprt->ops->release_xprt(xprt, task);
Chuck Levera58dd392005-08-25 16:25:53 -07001826 if (xprt->ops->release_request)
1827 xprt->ops->release_request(task);
Trond Myklebustad3331a2016-08-02 13:47:43 -04001828 xprt_schedule_autodisconnect(xprt);
Trond Myklebustb5e92412019-05-02 11:21:08 -04001829 spin_unlock(&xprt->transport_lock);
Trond Myklebustee5ebe82010-04-16 16:37:01 -04001830 if (req->rq_buffer)
Chuck Lever3435c742016-09-15 10:55:29 -04001831 xprt->ops->buf_free(task);
Chuck Lever4a068252015-05-11 14:02:25 -04001832 xprt_inject_disconnect(xprt);
Trond Myklebust9d96acb2018-09-13 12:22:04 -04001833 xdr_free_bvec(&req->rq_rcv_buf);
Trond Myklebust0472e472019-02-19 13:00:13 -05001834 xdr_free_bvec(&req->rq_snd_buf);
Trond Myklebusta17c2152010-07-31 14:29:08 -04001835 if (req->rq_cred != NULL)
1836 put_rpccred(req->rq_cred);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 task->tk_rqstp = NULL;
J. Bruce Fieldsead5e1c2005-10-13 16:54:43 -04001838 if (req->rq_release_snd_buf)
1839 req->rq_release_snd_buf(req);
Ricardo Labiaga55ae1aa2009-04-01 09:23:03 -04001840
Chuck Lever46121cf2007-01-31 12:14:08 -05001841 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
Trond Myklebustee5ebe82010-04-16 16:37:01 -04001842 if (likely(!bc_prealloc(req)))
Chuck Levera9cde232018-05-04 15:34:59 -04001843 xprt->ops->free_slot(xprt, req);
Trond Myklebustee5ebe82010-04-16 16:37:01 -04001844 else
Trond Myklebustc9acb422010-03-19 15:36:22 -04001845 xprt_free_bc_request(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846}
1847
Trond Myklebust902c5882018-09-01 17:21:01 -04001848#ifdef CONFIG_SUNRPC_BACKCHANNEL
1849void
1850xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1851{
1852 struct xdr_buf *xbufp = &req->rq_snd_buf;
1853
1854 task->tk_rqstp = req;
1855 req->rq_task = task;
1856 xprt_init_connect_cookie(req, req->rq_xprt);
1857 /*
1858 * Set up the xdr_buf length.
1859 * This also indicates that the buffer is XDR encoded already.
1860 */
1861 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1862 xbufp->tail[0].iov_len;
Trond Myklebust902c5882018-09-01 17:21:01 -04001863}
1864#endif
1865
Trond Myklebust21de0a92011-07-17 16:57:32 -04001866static void xprt_init(struct rpc_xprt *xprt, struct net *net)
Chuck Leverc2866762006-08-22 20:06:20 -04001867{
Trond Myklebust30c51162015-02-24 20:31:39 -05001868 kref_init(&xprt->kref);
Chuck Leverc2866762006-08-22 20:06:20 -04001869
1870 spin_lock_init(&xprt->transport_lock);
1871 spin_lock_init(&xprt->reserve_lock);
Trond Myklebust75c84152018-08-31 10:21:00 -04001872 spin_lock_init(&xprt->queue_lock);
Chuck Leverc2866762006-08-22 20:06:20 -04001873
1874 INIT_LIST_HEAD(&xprt->free);
Trond Myklebust95f76912018-09-07 08:35:22 -04001875 xprt->recv_queue = RB_ROOT;
Trond Myklebust944b0422018-08-09 23:33:21 -04001876 INIT_LIST_HEAD(&xprt->xmit_queue);
Trond Myklebust9e00abc2011-07-13 19:20:49 -04001877#if defined(CONFIG_SUNRPC_BACKCHANNEL)
Ricardo Labiagaf9acac12009-04-01 09:22:59 -04001878 spin_lock_init(&xprt->bc_pa_lock);
1879 INIT_LIST_HEAD(&xprt->bc_pa_list);
Trond Myklebust9e00abc2011-07-13 19:20:49 -04001880#endif /* CONFIG_SUNRPC_BACKCHANNEL */
Trond Myklebust80b14d52015-02-14 20:31:59 -05001881 INIT_LIST_HEAD(&xprt->xprt_switch);
Ricardo Labiagaf9acac12009-04-01 09:22:59 -04001882
Chuck Leverc2866762006-08-22 20:06:20 -04001883 xprt->last_used = jiffies;
1884 xprt->cwnd = RPC_INITCWND;
Chuck Levera5090502007-03-29 16:48:04 -04001885 xprt->bind_index = 0;
Chuck Leverc2866762006-08-22 20:06:20 -04001886
1887 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1888 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
Trond Myklebust79c99152018-09-09 13:53:05 -04001889 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
Chuck Leverc2866762006-08-22 20:06:20 -04001890 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1891
Chuck Leverc2866762006-08-22 20:06:20 -04001892 xprt_init_xid(xprt);
1893
Trond Myklebust21de0a92011-07-17 16:57:32 -04001894 xprt->xprt_net = get_net(net);
Trond Myklebust8d9266f2011-07-17 16:01:09 -04001895}
1896
1897/**
1898 * xprt_create_transport - create an RPC transport
1899 * @args: rpc transport creation arguments
1900 *
1901 */
1902struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1903{
1904 struct rpc_xprt *xprt;
1905 struct xprt_class *t;
1906
1907 spin_lock(&xprt_list_lock);
1908 list_for_each_entry(t, &xprt_list, list) {
1909 if (t->ident == args->ident) {
1910 spin_unlock(&xprt_list_lock);
1911 goto found;
1912 }
1913 }
1914 spin_unlock(&xprt_list_lock);
Chuck Lever3c45ddf2014-07-16 15:38:32 -04001915 dprintk("RPC: transport (%d) not supported\n", args->ident);
Trond Myklebust8d9266f2011-07-17 16:01:09 -04001916 return ERR_PTR(-EIO);
1917
1918found:
1919 xprt = t->setup(args);
Chuck Lever911813d2020-05-12 17:13:34 -04001920 if (IS_ERR(xprt))
Trond Myklebust21de0a92011-07-17 16:57:32 -04001921 goto out;
J. Bruce Fields33d90ac2013-04-11 15:06:36 -04001922 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1923 xprt->idle_timeout = 0;
Trond Myklebust21de0a92011-07-17 16:57:32 -04001924 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1925 if (xprt_has_timer(xprt))
Anna Schumaker502980e2019-06-18 14:57:33 -04001926 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
Trond Myklebust21de0a92011-07-17 16:57:32 -04001927 else
Kees Cookff861c42017-10-16 17:29:42 -07001928 timer_setup(&xprt->timer, NULL, 0);
Trond Myklebust4e0038b2012-03-01 17:01:05 -05001929
1930 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1931 xprt_destroy(xprt);
1932 return ERR_PTR(-EINVAL);
1933 }
1934 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1935 if (xprt->servername == NULL) {
1936 xprt_destroy(xprt);
1937 return ERR_PTR(-ENOMEM);
1938 }
1939
Jeff Layton3f940092015-03-31 12:03:28 -04001940 rpc_xprt_debugfs_register(xprt);
Jeff Layton388f0c72014-11-26 14:44:44 -05001941
Chuck Lever911813d2020-05-12 17:13:34 -04001942 trace_xprt_create(xprt);
Trond Myklebust21de0a92011-07-17 16:57:32 -04001943out:
Chuck Leverc2866762006-08-22 20:06:20 -04001944 return xprt;
1945}
1946
Trond Myklebust528fd352017-10-19 12:13:10 -04001947static void xprt_destroy_cb(struct work_struct *work)
1948{
1949 struct rpc_xprt *xprt =
1950 container_of(work, struct rpc_xprt, task_cleanup);
1951
Chuck Lever911813d2020-05-12 17:13:34 -04001952 trace_xprt_destroy(xprt);
1953
Trond Myklebust528fd352017-10-19 12:13:10 -04001954 rpc_xprt_debugfs_unregister(xprt);
1955 rpc_destroy_wait_queue(&xprt->binding);
1956 rpc_destroy_wait_queue(&xprt->pending);
1957 rpc_destroy_wait_queue(&xprt->sending);
1958 rpc_destroy_wait_queue(&xprt->backlog);
1959 kfree(xprt->servername);
1960 /*
Trond Myklebust669996a2019-10-17 09:02:21 -04001961 * Destroy any existing back channel
1962 */
1963 xprt_destroy_backchannel(xprt, UINT_MAX);
1964
1965 /*
Trond Myklebust528fd352017-10-19 12:13:10 -04001966 * Tear down transport state and free the rpc_xprt
1967 */
1968 xprt->ops->destroy(xprt);
1969}
1970
Chuck Lever9903cd12005-08-11 16:25:26 -04001971/**
1972 * xprt_destroy - destroy an RPC transport, killing off all requests.
Trond Myklebusta8de2402011-03-15 19:56:30 -04001973 * @xprt: transport to destroy
Chuck Lever9903cd12005-08-11 16:25:26 -04001974 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 */
Trond Myklebusta8de2402011-03-15 19:56:30 -04001976static void xprt_destroy(struct rpc_xprt *xprt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977{
Trond Myklebust528fd352017-10-19 12:13:10 -04001978 /*
1979 * Exclude transport connect/disconnect handlers and autoclose
1980 */
Trond Myklebust79234c32015-09-18 15:53:24 -04001981 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
1982
Trond Myklebust0065db32006-01-03 09:55:56 +01001983 del_timer_sync(&xprt->timer);
Chuck Leverc8541ec2006-10-17 14:44:27 -04001984
1985 /*
Trond Myklebust528fd352017-10-19 12:13:10 -04001986 * Destroy sockets etc from the system workqueue so they can
1987 * safely flush receive work running on rpciod.
Chuck Leverc8541ec2006-10-17 14:44:27 -04001988 */
Trond Myklebust528fd352017-10-19 12:13:10 -04001989 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
1990 schedule_work(&xprt->task_cleanup);
Trond Myklebust6b6ca862006-09-05 12:55:57 -04001991}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992
Trond Myklebust30c51162015-02-24 20:31:39 -05001993static void xprt_destroy_kref(struct kref *kref)
1994{
1995 xprt_destroy(container_of(kref, struct rpc_xprt, kref));
1996}
1997
1998/**
1999 * xprt_get - return a reference to an RPC transport.
2000 * @xprt: pointer to the transport
2001 *
2002 */
2003struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
2004{
2005 if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
2006 return xprt;
2007 return NULL;
2008}
2009EXPORT_SYMBOL_GPL(xprt_get);
2010
Trond Myklebust6b6ca862006-09-05 12:55:57 -04002011/**
2012 * xprt_put - release a reference to an RPC transport.
2013 * @xprt: pointer to the transport
2014 *
2015 */
2016void xprt_put(struct rpc_xprt *xprt)
2017{
Trond Myklebust30c51162015-02-24 20:31:39 -05002018 if (xprt != NULL)
2019 kref_put(&xprt->kref, xprt_destroy_kref);
Trond Myklebust6b6ca862006-09-05 12:55:57 -04002020}
Chuck Lever5d252f92016-01-07 14:50:10 -05002021EXPORT_SYMBOL_GPL(xprt_put);