blob: bf490d0c98c6a62a2da2e59a9c812c9c85fff23b [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/net/sunrpc/xprt.c
4 *
5 * This is a generic RPC call interface supporting congestion avoidance,
6 * and asynchronous calls.
7 *
8 * The interface works like this:
9 *
10 * - When a process places a call, it allocates a request slot if
11 * one is available. Otherwise, it sleeps on the backlog queue
12 * (xprt_reserve).
13 * - Next, the caller puts together the RPC message, stuffs it into
Chuck Lever55aa4f52005-08-11 16:25:47 -040014 * the request struct, and calls xprt_transmit().
15 * - xprt_transmit sends the message and installs the caller on the
Ricardo Labiaga55ae1aa2009-04-01 09:23:03 -040016 * transport's wait list. At the same time, if a reply is expected,
17 * it installs a timer that is run after the packet's timeout has
18 * expired.
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 * - When a packet arrives, the data_ready handler walks the list of
Chuck Lever55aa4f52005-08-11 16:25:47 -040020 * pending requests for that transport. If a matching XID is found, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 * caller is woken up, and the timer removed.
22 * - When no reply arrives within the timeout interval, the timer is
23 * fired by the kernel and runs xprt_timer(). It either adjusts the
24 * timeout values (minor timeout) or wakes up the caller with a status
25 * of -ETIMEDOUT.
26 * - When the caller receives a notification from RPC that a reply arrived,
27 * it should release the RPC slot, and process the reply.
28 * If the call timed out, it may choose to retry the operation by
29 * adjusting the initial timeout value, and simply calling rpc_call
30 * again.
31 *
32 * Support for async RPC is done through a set of RPC-specific scheduling
33 * primitives that `transparently' work for processes as well as async
34 * tasks that rely on callbacks.
35 *
36 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
Chuck Lever55aa4f52005-08-11 16:25:47 -040037 *
38 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 */
40
Chuck Levera246b012005-08-11 16:25:23 -040041#include <linux/module.h>
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/types.h>
Chuck Levera246b012005-08-11 16:25:23 -040044#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <linux/workqueue.h>
Chuck Leverbf3fcf82006-05-25 01:40:51 -040046#include <linux/net.h>
Chuck Leverff839972010-05-07 13:34:47 -040047#include <linux/ktime.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Chuck Levera246b012005-08-11 16:25:23 -040049#include <linux/sunrpc/clnt.h>
Chuck Lever11c556b2006-03-20 13:44:22 -050050#include <linux/sunrpc/metrics.h>
Trond Myklebustc9acb422010-03-19 15:36:22 -040051#include <linux/sunrpc/bc_xprt.h>
Trond Myklebustfda1bfe2015-02-14 17:48:49 -050052#include <linux/rcupdate.h>
Trond Myklebusta1231fd2019-02-18 10:02:29 -050053#include <linux/sched/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Jeff Layton3705ad62014-10-28 14:24:13 -040055#include <trace/events/sunrpc.h>
56
Ricardo Labiaga55ae1aa2009-04-01 09:23:03 -040057#include "sunrpc.h"
58
Linus Torvalds1da177e2005-04-16 15:20:36 -070059/*
60 * Local variables
61 */
62
Jeff Laytonf895b252014-11-17 16:58:04 -050063#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
Linus Torvalds1da177e2005-04-16 15:20:36 -070064# define RPCDBG_FACILITY RPCDBG_XPRT
65#endif
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067/*
68 * Local functions
69 */
Trond Myklebust21de0a92011-07-17 16:57:32 -040070static void xprt_init(struct rpc_xprt *xprt, struct net *net);
Chuck Lever37ac86c2018-05-04 15:34:53 -040071static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
Trond Myklebust4e0038b2012-03-01 17:01:05 -050072static void xprt_destroy(struct rpc_xprt *xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Jiri Slaby5ba03e82007-11-22 19:40:22 +080074static DEFINE_SPINLOCK(xprt_list_lock);
\"Talpey, Thomas\81c098a2007-09-10 13:46:00 -040075static LIST_HEAD(xprt_list);
76
Trond Myklebust9e910bf2019-04-07 13:58:53 -040077static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
78{
79 unsigned long timeout = jiffies + req->rq_timeout;
80
81 if (time_before(timeout, req->rq_majortimeo))
82 return timeout;
83 return req->rq_majortimeo;
84}
85
Chuck Lever12a80462005-08-25 16:25:51 -070086/**
\"Talpey, Thomas\81c098a2007-09-10 13:46:00 -040087 * xprt_register_transport - register a transport implementation
88 * @transport: transport to register
89 *
90 * If a transport implementation is loaded as a kernel module, it can
91 * call this interface to make itself known to the RPC client.
92 *
93 * Returns:
94 * 0: transport successfully registered
95 * -EEXIST: transport already registered
96 * -EINVAL: transport module being unloaded
97 */
98int xprt_register_transport(struct xprt_class *transport)
99{
100 struct xprt_class *t;
101 int result;
102
103 result = -EEXIST;
104 spin_lock(&xprt_list_lock);
105 list_for_each_entry(t, &xprt_list, list) {
106 /* don't register the same transport class twice */
\"Talpey, Thomas\4fa016e2007-09-10 13:47:57 -0400107 if (t->ident == transport->ident)
\"Talpey, Thomas\81c098a2007-09-10 13:46:00 -0400108 goto out;
109 }
110
Denis V. Lunevc9f6cde2008-07-31 09:53:56 +0400111 list_add_tail(&transport->list, &xprt_list);
112 printk(KERN_INFO "RPC: Registered %s transport module.\n",
113 transport->name);
114 result = 0;
\"Talpey, Thomas\81c098a2007-09-10 13:46:00 -0400115
116out:
117 spin_unlock(&xprt_list_lock);
118 return result;
119}
120EXPORT_SYMBOL_GPL(xprt_register_transport);
121
122/**
123 * xprt_unregister_transport - unregister a transport implementation
Randy Dunlap65b6e422008-02-13 15:03:23 -0800124 * @transport: transport to unregister
\"Talpey, Thomas\81c098a2007-09-10 13:46:00 -0400125 *
126 * Returns:
127 * 0: transport successfully unregistered
128 * -ENOENT: transport never registered
129 */
130int xprt_unregister_transport(struct xprt_class *transport)
131{
132 struct xprt_class *t;
133 int result;
134
135 result = 0;
136 spin_lock(&xprt_list_lock);
137 list_for_each_entry(t, &xprt_list, list) {
138 if (t == transport) {
139 printk(KERN_INFO
140 "RPC: Unregistered %s transport module.\n",
141 transport->name);
142 list_del_init(&transport->list);
\"Talpey, Thomas\81c098a2007-09-10 13:46:00 -0400143 goto out;
144 }
145 }
146 result = -ENOENT;
147
148out:
149 spin_unlock(&xprt_list_lock);
150 return result;
151}
152EXPORT_SYMBOL_GPL(xprt_unregister_transport);
153
Trond Myklebustd5aa6b22020-11-06 16:33:38 -0500154static void
155xprt_class_release(const struct xprt_class *t)
156{
157 module_put(t->owner);
158}
159
160static const struct xprt_class *
Trond Myklebust9bccd262020-11-10 12:58:22 -0500161xprt_class_find_by_ident_locked(int ident)
162{
163 const struct xprt_class *t;
164
165 list_for_each_entry(t, &xprt_list, list) {
166 if (t->ident != ident)
167 continue;
168 if (!try_module_get(t->owner))
169 continue;
170 return t;
171 }
172 return NULL;
173}
174
175static const struct xprt_class *
176xprt_class_find_by_ident(int ident)
177{
178 const struct xprt_class *t;
179
180 spin_lock(&xprt_list_lock);
181 t = xprt_class_find_by_ident_locked(ident);
182 spin_unlock(&xprt_list_lock);
183 return t;
184}
185
186static const struct xprt_class *
Trond Myklebustd5aa6b22020-11-06 16:33:38 -0500187xprt_class_find_by_netid_locked(const char *netid)
188{
189 const struct xprt_class *t;
190 unsigned int i;
191
192 list_for_each_entry(t, &xprt_list, list) {
193 for (i = 0; t->netid[i][0] != '\0'; i++) {
194 if (strcmp(t->netid[i], netid) != 0)
195 continue;
196 if (!try_module_get(t->owner))
197 continue;
198 return t;
199 }
200 }
201 return NULL;
202}
203
204static const struct xprt_class *
205xprt_class_find_by_netid(const char *netid)
206{
207 const struct xprt_class *t;
208
209 spin_lock(&xprt_list_lock);
210 t = xprt_class_find_by_netid_locked(netid);
211 if (!t) {
212 spin_unlock(&xprt_list_lock);
213 request_module("rpc%s", netid);
214 spin_lock(&xprt_list_lock);
215 t = xprt_class_find_by_netid_locked(netid);
216 }
217 spin_unlock(&xprt_list_lock);
218 return t;
219}
220
\"Talpey, Thomas\81c098a2007-09-10 13:46:00 -0400221/**
Tom Talpey441e3e22009-03-11 14:37:56 -0400222 * xprt_load_transport - load a transport implementation
Trond Myklebustd5aa6b22020-11-06 16:33:38 -0500223 * @netid: transport to load
Tom Talpey441e3e22009-03-11 14:37:56 -0400224 *
225 * Returns:
226 * 0: transport successfully loaded
227 * -ENOENT: transport module not available
228 */
Trond Myklebustd5aa6b22020-11-06 16:33:38 -0500229int xprt_load_transport(const char *netid)
Tom Talpey441e3e22009-03-11 14:37:56 -0400230{
Trond Myklebustd5aa6b22020-11-06 16:33:38 -0500231 const struct xprt_class *t;
Tom Talpey441e3e22009-03-11 14:37:56 -0400232
Trond Myklebustd5aa6b22020-11-06 16:33:38 -0500233 t = xprt_class_find_by_netid(netid);
234 if (!t)
235 return -ENOENT;
236 xprt_class_release(t);
237 return 0;
Tom Talpey441e3e22009-03-11 14:37:56 -0400238}
239EXPORT_SYMBOL_GPL(xprt_load_transport);
240
Trond Myklebustc5445772018-09-03 23:39:27 -0400241static void xprt_clear_locked(struct rpc_xprt *xprt)
242{
243 xprt->snd_task = NULL;
244 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
245 smp_mb__before_atomic();
246 clear_bit(XPRT_LOCKED, &xprt->state);
247 smp_mb__after_atomic();
248 } else
249 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
250}
251
Tom Talpey441e3e22009-03-11 14:37:56 -0400252/**
Chuck Lever12a80462005-08-25 16:25:51 -0700253 * xprt_reserve_xprt - serialize write access to transports
254 * @task: task that is requesting access to the transport
Randy Dunlap177c27b2011-07-28 06:54:36 +0000255 * @xprt: pointer to the target transport
Chuck Lever12a80462005-08-25 16:25:51 -0700256 *
257 * This prevents mixing the payload of separate requests, and prevents
258 * transport connects from colliding with writes. No congestion control
259 * is provided.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 */
Trond Myklebust43cedbf0e2011-07-17 16:01:03 -0400261int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262{
Chuck Lever12a80462005-08-25 16:25:51 -0700263 struct rpc_rqst *req = task->tk_rqstp;
264
265 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
266 if (task == xprt->snd_task)
Chuck Leverbf7ca702019-10-09 12:58:14 -0400267 goto out_locked;
Chuck Lever12a80462005-08-25 16:25:51 -0700268 goto out_sleep;
269 }
Trond Myklebustc5445772018-09-03 23:39:27 -0400270 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
271 goto out_unlock;
Chuck Lever12a80462005-08-25 16:25:51 -0700272 xprt->snd_task = task;
j223yang@asset.uwaterloo.ca4d4a76f2011-03-10 12:40:28 -0500273
Chuck Leverbf7ca702019-10-09 12:58:14 -0400274out_locked:
275 trace_xprt_reserve_xprt(xprt, task);
Chuck Lever12a80462005-08-25 16:25:51 -0700276 return 1;
277
Trond Myklebustc5445772018-09-03 23:39:27 -0400278out_unlock:
279 xprt_clear_locked(xprt);
Chuck Lever12a80462005-08-25 16:25:51 -0700280out_sleep:
Chuck Lever12a80462005-08-25 16:25:51 -0700281 task->tk_status = -EAGAIN;
Trond Myklebust6b2e6852019-04-07 13:58:49 -0400282 if (RPC_IS_SOFT(task))
283 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
Trond Myklebust9e910bf2019-04-07 13:58:53 -0400284 xprt_request_timeout(req));
Trond Myklebust6b2e6852019-04-07 13:58:49 -0400285 else
286 rpc_sleep_on(&xprt->sending, task, NULL);
Chuck Lever12a80462005-08-25 16:25:51 -0700287 return 0;
288}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400289EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
Chuck Lever12a80462005-08-25 16:25:51 -0700290
Trond Myklebust75891f52018-09-03 17:37:36 -0400291static bool
292xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
293{
294 return test_bit(XPRT_CWND_WAIT, &xprt->state);
295}
296
297static void
298xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
299{
300 if (!list_empty(&xprt->xmit_queue)) {
301 /* Peek at head of queue to see if it can make progress */
302 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
303 rq_xmit)->rq_cong)
304 return;
305 }
306 set_bit(XPRT_CWND_WAIT, &xprt->state);
307}
308
309static void
310xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
311{
312 if (!RPCXPRT_CONGESTED(xprt))
313 clear_bit(XPRT_CWND_WAIT, &xprt->state);
314}
315
Chuck Lever12a80462005-08-25 16:25:51 -0700316/*
317 * xprt_reserve_xprt_cong - serialize write access to transports
318 * @task: task that is requesting access to the transport
319 *
320 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
321 * integrated into the decision of whether a request is allowed to be
322 * woken up and given access to the transport.
Trond Myklebust75891f52018-09-03 17:37:36 -0400323 * Note that the lock is only granted if we know there are free slots.
Chuck Lever12a80462005-08-25 16:25:51 -0700324 */
Trond Myklebust43cedbf0e2011-07-17 16:01:03 -0400325int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
Chuck Lever12a80462005-08-25 16:25:51 -0700326{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 struct rpc_rqst *req = task->tk_rqstp;
328
Chuck Lever2226feb2005-08-11 16:25:38 -0400329 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 if (task == xprt->snd_task)
Chuck Leverbf7ca702019-10-09 12:58:14 -0400331 goto out_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 goto out_sleep;
333 }
Trond Myklebust43cedbf0e2011-07-17 16:01:03 -0400334 if (req == NULL) {
335 xprt->snd_task = task;
Chuck Leverbf7ca702019-10-09 12:58:14 -0400336 goto out_locked;
Trond Myklebust43cedbf0e2011-07-17 16:01:03 -0400337 }
Trond Myklebustc5445772018-09-03 23:39:27 -0400338 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
339 goto out_unlock;
Trond Myklebust75891f52018-09-03 17:37:36 -0400340 if (!xprt_need_congestion_window_wait(xprt)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 xprt->snd_task = task;
Chuck Leverbf7ca702019-10-09 12:58:14 -0400342 goto out_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 }
Trond Myklebustc5445772018-09-03 23:39:27 -0400344out_unlock:
Trond Myklebust632e3bd2006-01-03 09:55:55 +0100345 xprt_clear_locked(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346out_sleep:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 task->tk_status = -EAGAIN;
Trond Myklebust6b2e6852019-04-07 13:58:49 -0400348 if (RPC_IS_SOFT(task))
349 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
Trond Myklebust9e910bf2019-04-07 13:58:53 -0400350 xprt_request_timeout(req));
Trond Myklebust6b2e6852019-04-07 13:58:49 -0400351 else
352 rpc_sleep_on(&xprt->sending, task, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 return 0;
Chuck Leverbf7ca702019-10-09 12:58:14 -0400354out_locked:
355 trace_xprt_reserve_cong(xprt, task);
356 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400358EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
Chuck Lever12a80462005-08-25 16:25:51 -0700360static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
362 int retval;
363
Trond Myklebustbd79bc52018-09-07 19:38:55 -0400364 if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
365 return 1;
Trond Myklebustb5e92412019-05-02 11:21:08 -0400366 spin_lock(&xprt->transport_lock);
Trond Myklebust43cedbf0e2011-07-17 16:01:03 -0400367 retval = xprt->ops->reserve_xprt(xprt, task);
Trond Myklebustb5e92412019-05-02 11:21:08 -0400368 spin_unlock(&xprt->transport_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 return retval;
370}
371
Trond Myklebust961a8282012-01-17 22:57:37 -0500372static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373{
Trond Myklebust961a8282012-01-17 22:57:37 -0500374 struct rpc_xprt *xprt = data;
Chuck Lever49e9a892005-08-25 16:25:51 -0700375
Chuck Lever49e9a892005-08-25 16:25:51 -0700376 xprt->snd_task = task;
Trond Myklebust961a8282012-01-17 22:57:37 -0500377 return true;
378}
Chuck Lever49e9a892005-08-25 16:25:51 -0700379
Trond Myklebust961a8282012-01-17 22:57:37 -0500380static void __xprt_lock_write_next(struct rpc_xprt *xprt)
381{
382 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
383 return;
Trond Myklebustc5445772018-09-03 23:39:27 -0400384 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
385 goto out_unlock;
Trond Myklebustf1dc2372016-05-27 12:59:33 -0400386 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
387 __xprt_lock_write_func, xprt))
Trond Myklebust961a8282012-01-17 22:57:37 -0500388 return;
Trond Myklebustc5445772018-09-03 23:39:27 -0400389out_unlock:
Trond Myklebust632e3bd2006-01-03 09:55:55 +0100390 xprt_clear_locked(xprt);
Chuck Lever49e9a892005-08-25 16:25:51 -0700391}
392
Trond Myklebust961a8282012-01-17 22:57:37 -0500393static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
394{
395 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
396 return;
Trond Myklebustc5445772018-09-03 23:39:27 -0400397 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
398 goto out_unlock;
Trond Myklebust75891f52018-09-03 17:37:36 -0400399 if (xprt_need_congestion_window_wait(xprt))
Trond Myklebust961a8282012-01-17 22:57:37 -0500400 goto out_unlock;
Trond Myklebustf1dc2372016-05-27 12:59:33 -0400401 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
Trond Myklebust75891f52018-09-03 17:37:36 -0400402 __xprt_lock_write_func, xprt))
Trond Myklebust961a8282012-01-17 22:57:37 -0500403 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404out_unlock:
Trond Myklebust632e3bd2006-01-03 09:55:55 +0100405 xprt_clear_locked(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406}
407
Chuck Lever49e9a892005-08-25 16:25:51 -0700408/**
409 * xprt_release_xprt - allow other requests to use a transport
410 * @xprt: transport with other tasks potentially waiting
411 * @task: task that is releasing access to the transport
412 *
413 * Note that "task" can be NULL. No congestion control is provided.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 */
Chuck Lever49e9a892005-08-25 16:25:51 -0700415void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416{
417 if (xprt->snd_task == task) {
Trond Myklebust632e3bd2006-01-03 09:55:55 +0100418 xprt_clear_locked(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 __xprt_lock_write_next(xprt);
420 }
Chuck Leverbf7ca702019-10-09 12:58:14 -0400421 trace_xprt_release_xprt(xprt, task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400423EXPORT_SYMBOL_GPL(xprt_release_xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
Chuck Lever49e9a892005-08-25 16:25:51 -0700425/**
426 * xprt_release_xprt_cong - allow other requests to use a transport
427 * @xprt: transport with other tasks potentially waiting
428 * @task: task that is releasing access to the transport
429 *
430 * Note that "task" can be NULL. Another task is awoken to use the
431 * transport if the transport's congestion window allows it.
432 */
433void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
434{
435 if (xprt->snd_task == task) {
Trond Myklebust632e3bd2006-01-03 09:55:55 +0100436 xprt_clear_locked(xprt);
Chuck Lever49e9a892005-08-25 16:25:51 -0700437 __xprt_lock_write_next_cong(xprt);
438 }
Chuck Leverbf7ca702019-10-09 12:58:14 -0400439 trace_xprt_release_cong(xprt, task);
Chuck Lever49e9a892005-08-25 16:25:51 -0700440}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400441EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
Chuck Lever49e9a892005-08-25 16:25:51 -0700442
443static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444{
Trond Myklebustbd79bc52018-09-07 19:38:55 -0400445 if (xprt->snd_task != task)
446 return;
Trond Myklebustb5e92412019-05-02 11:21:08 -0400447 spin_lock(&xprt->transport_lock);
Chuck Lever49e9a892005-08-25 16:25:51 -0700448 xprt->ops->release_xprt(xprt, task);
Trond Myklebustb5e92412019-05-02 11:21:08 -0400449 spin_unlock(&xprt->transport_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450}
451
452/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 * Van Jacobson congestion avoidance. Check if the congestion window
454 * overflowed. Put the task to sleep if this is the case.
455 */
456static int
Trond Myklebust75891f52018-09-03 17:37:36 -0400457__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 if (req->rq_cong)
460 return 1;
Chuck Leverbf7ca702019-10-09 12:58:14 -0400461 trace_xprt_get_cong(xprt, req->rq_task);
Trond Myklebust75891f52018-09-03 17:37:36 -0400462 if (RPCXPRT_CONGESTED(xprt)) {
463 xprt_set_congestion_window_wait(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 return 0;
Trond Myklebust75891f52018-09-03 17:37:36 -0400465 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 req->rq_cong = 1;
467 xprt->cong += RPC_CWNDSCALE;
468 return 1;
469}
470
471/*
472 * Adjust the congestion window, and wake up the next task
473 * that has been sleeping due to congestion
474 */
475static void
476__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
477{
478 if (!req->rq_cong)
479 return;
480 req->rq_cong = 0;
481 xprt->cong -= RPC_CWNDSCALE;
Trond Myklebust75891f52018-09-03 17:37:36 -0400482 xprt_test_and_clear_congestion_window_wait(xprt);
Chuck Leverbf7ca702019-10-09 12:58:14 -0400483 trace_xprt_put_cong(xprt, req->rq_task);
Chuck Lever49e9a892005-08-25 16:25:51 -0700484 __xprt_lock_write_next_cong(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485}
486
Chuck Lever46c0ee82005-08-25 16:25:52 -0700487/**
Trond Myklebust75891f52018-09-03 17:37:36 -0400488 * xprt_request_get_cong - Request congestion control credits
489 * @xprt: pointer to transport
490 * @req: pointer to RPC request
491 *
492 * Useful for transports that require congestion control.
493 */
494bool
495xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
496{
497 bool ret = false;
498
499 if (req->rq_cong)
500 return true;
Trond Myklebustb5e92412019-05-02 11:21:08 -0400501 spin_lock(&xprt->transport_lock);
Trond Myklebust75891f52018-09-03 17:37:36 -0400502 ret = __xprt_get_cong(xprt, req) != 0;
Trond Myklebustb5e92412019-05-02 11:21:08 -0400503 spin_unlock(&xprt->transport_lock);
Trond Myklebust75891f52018-09-03 17:37:36 -0400504 return ret;
505}
506EXPORT_SYMBOL_GPL(xprt_request_get_cong);
507
508/**
Chuck Levera58dd392005-08-25 16:25:53 -0700509 * xprt_release_rqst_cong - housekeeping when request is complete
510 * @task: RPC request that recently completed
511 *
512 * Useful for transports that require congestion control.
513 */
514void xprt_release_rqst_cong(struct rpc_task *task)
515{
Trond Myklebusta4f08352013-01-08 09:10:21 -0500516 struct rpc_rqst *req = task->tk_rqstp;
517
518 __xprt_put_cong(req->rq_xprt, req);
Chuck Levera58dd392005-08-25 16:25:53 -0700519}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400520EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
Chuck Levera58dd392005-08-25 16:25:53 -0700521
Chuck Lever8593e012019-09-13 16:01:07 -0400522static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
523{
524 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
525 __xprt_lock_write_next_cong(xprt);
526}
527
Trond Myklebust75891f52018-09-03 17:37:36 -0400528/*
529 * Clear the congestion window wait flag and wake up the next
530 * entry on xprt->sending
531 */
532static void
533xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
534{
535 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
Trond Myklebustb5e92412019-05-02 11:21:08 -0400536 spin_lock(&xprt->transport_lock);
Trond Myklebust75891f52018-09-03 17:37:36 -0400537 __xprt_lock_write_next_cong(xprt);
Trond Myklebustb5e92412019-05-02 11:21:08 -0400538 spin_unlock(&xprt->transport_lock);
Trond Myklebust75891f52018-09-03 17:37:36 -0400539 }
540}
541
Chuck Levera58dd392005-08-25 16:25:53 -0700542/**
Chuck Lever46c0ee82005-08-25 16:25:52 -0700543 * xprt_adjust_cwnd - adjust transport congestion window
Trond Myklebust6a24dfb2013-01-08 09:48:15 -0500544 * @xprt: pointer to xprt
Chuck Lever46c0ee82005-08-25 16:25:52 -0700545 * @task: recently completed RPC request used to adjust window
546 * @result: result code of completed RPC request
547 *
Chuck Lever4f4cf5a2014-05-28 10:34:49 -0400548 * The transport code maintains an estimate on the maximum number of out-
549 * standing RPC requests, using a smoothed version of the congestion
550 * avoidance implemented in 44BSD. This is basically the Van Jacobson
551 * congestion algorithm: If a retransmit occurs, the congestion window is
552 * halved; otherwise, it is incremented by 1/cwnd when
553 *
554 * - a reply is received and
555 * - a full number of requests are outstanding and
556 * - the congestion window hasn't been updated recently.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 */
Trond Myklebust6a24dfb2013-01-08 09:48:15 -0500558void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559{
Chuck Lever46c0ee82005-08-25 16:25:52 -0700560 struct rpc_rqst *req = task->tk_rqstp;
Chuck Lever46c0ee82005-08-25 16:25:52 -0700561 unsigned long cwnd = xprt->cwnd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 if (result >= 0 && cwnd <= xprt->cong) {
564 /* The (cwnd >> 1) term makes sure
565 * the result gets rounded properly. */
566 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
567 if (cwnd > RPC_MAXCWND(xprt))
568 cwnd = RPC_MAXCWND(xprt);
Chuck Lever49e9a892005-08-25 16:25:51 -0700569 __xprt_lock_write_next_cong(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 } else if (result == -ETIMEDOUT) {
571 cwnd >>= 1;
572 if (cwnd < RPC_CWNDSCALE)
573 cwnd = RPC_CWNDSCALE;
574 }
Chuck Lever46121cf2007-01-31 12:14:08 -0500575 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 xprt->cong, xprt->cwnd, cwnd);
577 xprt->cwnd = cwnd;
Chuck Lever46c0ee82005-08-25 16:25:52 -0700578 __xprt_put_cong(xprt, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400580EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
Chuck Lever44fbac22005-08-11 16:25:44 -0400582/**
583 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
584 * @xprt: transport with waiting tasks
585 * @status: result code to plant in each task before waking it
586 *
587 */
588void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
589{
590 if (status < 0)
591 rpc_wake_up_status(&xprt->pending, status);
592 else
593 rpc_wake_up(&xprt->pending);
594}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400595EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
Chuck Lever44fbac22005-08-11 16:25:44 -0400596
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400597/**
598 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
Trond Myklebustc5445772018-09-03 23:39:27 -0400599 * @xprt: transport
Trond Myklebusta9a6b522013-02-22 14:57:57 -0500600 *
601 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
602 * we don't in general want to force a socket disconnection due to
603 * an incomplete RPC call transmission.
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400604 */
Trond Myklebustc5445772018-09-03 23:39:27 -0400605void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400606{
Trond Myklebustc5445772018-09-03 23:39:27 -0400607 set_bit(XPRT_WRITE_SPACE, &xprt->state);
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400608}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400609EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400610
Trond Myklebustc5445772018-09-03 23:39:27 -0400611static bool
612xprt_clear_write_space_locked(struct rpc_xprt *xprt)
613{
614 if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
615 __xprt_lock_write_next(xprt);
616 dprintk("RPC: write space: waking waiting task on "
617 "xprt %p\n", xprt);
618 return true;
619 }
620 return false;
621}
622
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400623/**
624 * xprt_write_space - wake the task waiting for transport output buffer space
625 * @xprt: transport with waiting tasks
626 *
627 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
628 */
Trond Myklebustc5445772018-09-03 23:39:27 -0400629bool xprt_write_space(struct rpc_xprt *xprt)
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400630{
Trond Myklebustc5445772018-09-03 23:39:27 -0400631 bool ret;
632
633 if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
634 return false;
Trond Myklebustb5e92412019-05-02 11:21:08 -0400635 spin_lock(&xprt->transport_lock);
Trond Myklebustc5445772018-09-03 23:39:27 -0400636 ret = xprt_clear_write_space_locked(xprt);
Trond Myklebustb5e92412019-05-02 11:21:08 -0400637 spin_unlock(&xprt->transport_lock);
Trond Myklebustc5445772018-09-03 23:39:27 -0400638 return ret;
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400639}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -0400640EXPORT_SYMBOL_GPL(xprt_write_space);
Chuck Leverc7b2cae2005-08-11 16:25:50 -0400641
Trond Myklebustda953062019-04-07 13:58:56 -0400642static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
643{
644 s64 delta = ktime_to_ns(ktime_get() - abstime);
645 return likely(delta >= 0) ?
646 jiffies - nsecs_to_jiffies(delta) :
647 jiffies + nsecs_to_jiffies(-delta);
648}
649
650static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651{
Trond Myklebustba7392b2007-12-20 16:03:55 -0500652 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
Trond Myklebustda953062019-04-07 13:58:56 -0400653 unsigned long majortimeo = req->rq_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 if (to->to_exponential)
Trond Myklebustda953062019-04-07 13:58:56 -0400656 majortimeo <<= to->to_retries;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 else
Trond Myklebustda953062019-04-07 13:58:56 -0400658 majortimeo += to->to_increment * to->to_retries;
659 if (majortimeo > to->to_maxval || majortimeo == 0)
660 majortimeo = to->to_maxval;
661 return majortimeo;
662}
663
664static void xprt_reset_majortimeo(struct rpc_rqst *req)
665{
666 req->rq_majortimeo += xprt_calc_majortimeo(req);
667}
668
Olga Kornievskaia7de62bc2020-07-15 13:17:52 -0400669static void xprt_reset_minortimeo(struct rpc_rqst *req)
670{
671 req->rq_minortimeo += req->rq_timeout;
672}
673
Trond Myklebustda953062019-04-07 13:58:56 -0400674static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
675{
676 unsigned long time_init;
677 struct rpc_xprt *xprt = req->rq_xprt;
678
679 if (likely(xprt && xprt_connected(xprt)))
680 time_init = jiffies;
681 else
682 time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
683 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
684 req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
Olga Kornievskaia7de62bc2020-07-15 13:17:52 -0400685 req->rq_minortimeo = time_init + req->rq_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686}
687
Chuck Lever9903cd12005-08-11 16:25:26 -0400688/**
689 * xprt_adjust_timeout - adjust timeout values for next retransmit
690 * @req: RPC request containing parameters to use for the adjustment
691 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 */
693int xprt_adjust_timeout(struct rpc_rqst *req)
694{
695 struct rpc_xprt *xprt = req->rq_xprt;
Trond Myklebustba7392b2007-12-20 16:03:55 -0500696 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 int status = 0;
698
Olga Kornievskaia7de62bc2020-07-15 13:17:52 -0400699 if (time_before(jiffies, req->rq_minortimeo))
700 return status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 if (time_before(jiffies, req->rq_majortimeo)) {
702 if (to->to_exponential)
703 req->rq_timeout <<= 1;
704 else
705 req->rq_timeout += to->to_increment;
706 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
707 req->rq_timeout = to->to_maxval;
708 req->rq_retries++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 } else {
710 req->rq_timeout = to->to_initval;
711 req->rq_retries = 0;
712 xprt_reset_majortimeo(req);
713 /* Reset the RTT counters == "slow start" */
Trond Myklebustb5e92412019-05-02 11:21:08 -0400714 spin_lock(&xprt->transport_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
Trond Myklebustb5e92412019-05-02 11:21:08 -0400716 spin_unlock(&xprt->transport_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 status = -ETIMEDOUT;
718 }
Olga Kornievskaia7de62bc2020-07-15 13:17:52 -0400719 xprt_reset_minortimeo(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
721 if (req->rq_timeout == 0) {
722 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
723 req->rq_timeout = 5 * HZ;
724 }
725 return status;
726}
727
David Howells65f27f32006-11-22 14:55:48 +0000728static void xprt_autoclose(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729{
David Howells65f27f32006-11-22 14:55:48 +0000730 struct rpc_xprt *xprt =
731 container_of(work, struct rpc_xprt, task_cleanup);
Trond Myklebusta1231fd2019-02-18 10:02:29 -0500732 unsigned int pflags = memalloc_nofs_save();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
Chuck Lever911813d2020-05-12 17:13:34 -0400734 trace_xprt_disconnect_auto(xprt);
Trond Myklebust66af1e552007-11-06 10:18:36 -0500735 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
Trond Myklebust4876cc72015-06-19 16:17:57 -0400736 xprt->ops->close(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 xprt_release_write(xprt, NULL);
Trond Myklebust79234c32015-09-18 15:53:24 -0400738 wake_up_bit(&xprt->state, XPRT_LOCKED);
Trond Myklebusta1231fd2019-02-18 10:02:29 -0500739 memalloc_nofs_restore(pflags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740}
741
Chuck Lever9903cd12005-08-11 16:25:26 -0400742/**
Trond Myklebust62da3b22007-11-06 18:44:20 -0500743 * xprt_disconnect_done - mark a transport as disconnected
Chuck Lever9903cd12005-08-11 16:25:26 -0400744 * @xprt: transport to flag for disconnect
745 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 */
Trond Myklebust62da3b22007-11-06 18:44:20 -0500747void xprt_disconnect_done(struct rpc_xprt *xprt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748{
Chuck Lever911813d2020-05-12 17:13:34 -0400749 trace_xprt_disconnect_done(xprt);
Trond Myklebustb5e92412019-05-02 11:21:08 -0400750 spin_lock(&xprt->transport_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 xprt_clear_connected(xprt);
Trond Myklebustc5445772018-09-03 23:39:27 -0400752 xprt_clear_write_space_locked(xprt);
Chuck Lever8593e012019-09-13 16:01:07 -0400753 xprt_clear_congestion_window_wait_locked(xprt);
Trond Myklebust27adc782019-03-15 08:01:16 -0400754 xprt_wake_pending_tasks(xprt, -ENOTCONN);
Trond Myklebustb5e92412019-05-02 11:21:08 -0400755 spin_unlock(&xprt->transport_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756}
Trond Myklebust62da3b22007-11-06 18:44:20 -0500757EXPORT_SYMBOL_GPL(xprt_disconnect_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758
Trond Myklebust66af1e552007-11-06 10:18:36 -0500759/**
760 * xprt_force_disconnect - force a transport to disconnect
761 * @xprt: transport to disconnect
762 *
763 */
764void xprt_force_disconnect(struct rpc_xprt *xprt)
765{
Chuck Lever911813d2020-05-12 17:13:34 -0400766 trace_xprt_disconnect_force(xprt);
767
Trond Myklebust66af1e552007-11-06 10:18:36 -0500768 /* Don't race with the test_bit() in xprt_clear_locked() */
Trond Myklebustb5e92412019-05-02 11:21:08 -0400769 spin_lock(&xprt->transport_lock);
Trond Myklebust66af1e552007-11-06 10:18:36 -0500770 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
771 /* Try to schedule an autoclose RPC call */
772 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
Trond Myklebust40a5f1b2016-05-27 10:39:50 -0400773 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
Trond Myklebust0445f922018-12-17 13:34:59 -0500774 else if (xprt->snd_task)
775 rpc_wake_up_queued_task_set_status(&xprt->pending,
776 xprt->snd_task, -ENOTCONN);
Trond Myklebustb5e92412019-05-02 11:21:08 -0400777 spin_unlock(&xprt->transport_lock);
Trond Myklebust66af1e552007-11-06 10:18:36 -0500778}
Chuck Levere2a4f4f2017-04-11 13:22:38 -0400779EXPORT_SYMBOL_GPL(xprt_force_disconnect);
Trond Myklebust66af1e552007-11-06 10:18:36 -0500780
Trond Myklebust7f3a1d12018-08-23 00:03:43 -0400781static unsigned int
782xprt_connect_cookie(struct rpc_xprt *xprt)
783{
784 return READ_ONCE(xprt->connect_cookie);
785}
786
787static bool
788xprt_request_retransmit_after_disconnect(struct rpc_task *task)
789{
790 struct rpc_rqst *req = task->tk_rqstp;
791 struct rpc_xprt *xprt = req->rq_xprt;
792
793 return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
794 !xprt_connected(xprt);
795}
796
Trond Myklebust7c1d71c2008-04-17 16:52:57 -0400797/**
798 * xprt_conditional_disconnect - force a transport to disconnect
799 * @xprt: transport to disconnect
800 * @cookie: 'connection cookie'
801 *
802 * This attempts to break the connection if and only if 'cookie' matches
803 * the current transport 'connection cookie'. It ensures that we don't
804 * try to break the connection more than once when we need to retransmit
805 * a batch of RPC requests.
806 *
807 */
808void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
809{
810 /* Don't race with the test_bit() in xprt_clear_locked() */
Trond Myklebustb5e92412019-05-02 11:21:08 -0400811 spin_lock(&xprt->transport_lock);
Trond Myklebust7c1d71c2008-04-17 16:52:57 -0400812 if (cookie != xprt->connect_cookie)
813 goto out;
NeilBrown2c2ee6d2016-11-23 14:44:58 +1100814 if (test_bit(XPRT_CLOSING, &xprt->state))
Trond Myklebust7c1d71c2008-04-17 16:52:57 -0400815 goto out;
816 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
817 /* Try to schedule an autoclose RPC call */
818 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
Trond Myklebust40a5f1b2016-05-27 10:39:50 -0400819 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
Trond Myklebust2a491992009-03-11 14:38:00 -0400820 xprt_wake_pending_tasks(xprt, -EAGAIN);
Trond Myklebust7c1d71c2008-04-17 16:52:57 -0400821out:
Trond Myklebustb5e92412019-05-02 11:21:08 -0400822 spin_unlock(&xprt->transport_lock);
Trond Myklebust7c1d71c2008-04-17 16:52:57 -0400823}
824
Trond Myklebustad3331a2016-08-02 13:47:43 -0400825static bool
826xprt_has_timer(const struct rpc_xprt *xprt)
827{
828 return xprt->idle_timeout != 0;
829}
830
831static void
832xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
833 __must_hold(&xprt->transport_lock)
834{
Dave Wysochanski80d3c452019-06-26 16:30:24 -0400835 xprt->last_used = jiffies;
Trond Myklebust95f76912018-09-07 08:35:22 -0400836 if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
Trond Myklebustad3331a2016-08-02 13:47:43 -0400837 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
838}
839
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840static void
Kees Cookff861c42017-10-16 17:29:42 -0700841xprt_init_autodisconnect(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842{
Kees Cookff861c42017-10-16 17:29:42 -0700843 struct rpc_xprt *xprt = from_timer(xprt, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844
Trond Myklebust95f76912018-09-07 08:35:22 -0400845 if (!RB_EMPTY_ROOT(&xprt->recv_queue))
Trond Myklebustb5e92412019-05-02 11:21:08 -0400846 return;
Trond Myklebustad3331a2016-08-02 13:47:43 -0400847 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
848 xprt->last_used = jiffies;
Chuck Lever2226feb2005-08-11 16:25:38 -0400849 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
Trond Myklebustb5e92412019-05-02 11:21:08 -0400850 return;
Trond Myklebust40a5f1b2016-05-27 10:39:50 -0400851 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852}
853
Trond Myklebust718ba5b2015-02-08 18:19:25 -0500854bool xprt_lock_connect(struct rpc_xprt *xprt,
855 struct rpc_task *task,
856 void *cookie)
857{
858 bool ret = false;
859
Trond Myklebustb5e92412019-05-02 11:21:08 -0400860 spin_lock(&xprt->transport_lock);
Trond Myklebust718ba5b2015-02-08 18:19:25 -0500861 if (!test_bit(XPRT_LOCKED, &xprt->state))
862 goto out;
863 if (xprt->snd_task != task)
864 goto out;
865 xprt->snd_task = cookie;
866 ret = true;
867out:
Trond Myklebustb5e92412019-05-02 11:21:08 -0400868 spin_unlock(&xprt->transport_lock);
Trond Myklebust718ba5b2015-02-08 18:19:25 -0500869 return ret;
870}
871
872void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
873{
Trond Myklebustb5e92412019-05-02 11:21:08 -0400874 spin_lock(&xprt->transport_lock);
Trond Myklebust718ba5b2015-02-08 18:19:25 -0500875 if (xprt->snd_task != cookie)
876 goto out;
877 if (!test_bit(XPRT_LOCKED, &xprt->state))
878 goto out;
879 xprt->snd_task =NULL;
880 xprt->ops->release_xprt(xprt, NULL);
Trond Myklebustad3331a2016-08-02 13:47:43 -0400881 xprt_schedule_autodisconnect(xprt);
Trond Myklebust718ba5b2015-02-08 18:19:25 -0500882out:
Trond Myklebustb5e92412019-05-02 11:21:08 -0400883 spin_unlock(&xprt->transport_lock);
Trond Myklebust79234c32015-09-18 15:53:24 -0400884 wake_up_bit(&xprt->state, XPRT_LOCKED);
Trond Myklebust718ba5b2015-02-08 18:19:25 -0500885}
886
Chuck Lever9903cd12005-08-11 16:25:26 -0400887/**
888 * xprt_connect - schedule a transport connect operation
889 * @task: RPC task that is requesting the connect
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 *
891 */
892void xprt_connect(struct rpc_task *task)
893{
Trond Myklebustad2368d2013-01-08 10:08:33 -0500894 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895
Chuck Leverdb0a86c2020-07-08 16:09:47 -0400896 trace_xprt_connect(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897
Chuck Leverec739ef2006-08-22 20:06:15 -0400898 if (!xprt_bound(xprt)) {
Trond Myklebust01d37c42009-03-11 14:09:39 -0400899 task->tk_status = -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 return;
901 }
902 if (!xprt_lock_write(xprt, task))
903 return;
Trond Myklebustfeb8ca32009-12-03 08:10:17 -0500904
Chuck Lever911813d2020-05-12 17:13:34 -0400905 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
906 trace_xprt_disconnect_cleanup(xprt);
Trond Myklebustfeb8ca32009-12-03 08:10:17 -0500907 xprt->ops->close(xprt);
Chuck Lever911813d2020-05-12 17:13:34 -0400908 }
Trond Myklebustfeb8ca32009-12-03 08:10:17 -0500909
Trond Myklebust718ba5b2015-02-08 18:19:25 -0500910 if (!xprt_connected(xprt)) {
NeilBrown2c2ee6d2016-11-23 14:44:58 +1100911 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
Trond Myklebust6b2e6852019-04-07 13:58:49 -0400912 rpc_sleep_on_timeout(&xprt->pending, task, NULL,
Trond Myklebust9e910bf2019-04-07 13:58:53 -0400913 xprt_request_timeout(task->tk_rqstp));
Trond Myklebust0b9e7942010-04-16 16:41:57 -0400914
915 if (test_bit(XPRT_CLOSING, &xprt->state))
916 return;
917 if (xprt_test_and_set_connecting(xprt))
918 return;
Trond Myklebust0a9a4302018-12-01 23:18:00 -0500919 /* Race breaker */
920 if (!xprt_connected(xprt)) {
921 xprt->stat.connect_start = jiffies;
922 xprt->ops->connect(xprt, task);
923 } else {
924 xprt_clear_connecting(xprt);
925 task->tk_status = 0;
926 rpc_wake_up_queued_task(&xprt->pending, task);
927 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 }
Trond Myklebust718ba5b2015-02-08 18:19:25 -0500929 xprt_release_write(xprt, task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930}
931
Chuck Lever675dd902019-06-19 10:33:42 -0400932/**
933 * xprt_reconnect_delay - compute the wait before scheduling a connect
934 * @xprt: transport instance
935 *
936 */
937unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
938{
939 unsigned long start, now = jiffies;
940
941 start = xprt->stat.connect_start + xprt->reestablish_timeout;
942 if (time_after(start, now))
943 return start - now;
944 return 0;
945}
946EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
947
948/**
949 * xprt_reconnect_backoff - compute the new re-establish timeout
950 * @xprt: transport instance
951 * @init_to: initial reestablish timeout
952 *
953 */
954void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
955{
956 xprt->reestablish_timeout <<= 1;
957 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
958 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
959 if (xprt->reestablish_timeout < init_to)
960 xprt->reestablish_timeout = init_to;
961}
962EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
963
Trond Myklebust95f76912018-09-07 08:35:22 -0400964enum xprt_xid_rb_cmp {
965 XID_RB_EQUAL,
966 XID_RB_LEFT,
967 XID_RB_RIGHT,
968};
969static enum xprt_xid_rb_cmp
970xprt_xid_cmp(__be32 xid1, __be32 xid2)
971{
972 if (xid1 == xid2)
973 return XID_RB_EQUAL;
974 if ((__force u32)xid1 < (__force u32)xid2)
975 return XID_RB_LEFT;
976 return XID_RB_RIGHT;
977}
978
979static struct rpc_rqst *
980xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
981{
982 struct rb_node *n = xprt->recv_queue.rb_node;
983 struct rpc_rqst *req;
984
985 while (n != NULL) {
986 req = rb_entry(n, struct rpc_rqst, rq_recv);
987 switch (xprt_xid_cmp(xid, req->rq_xid)) {
988 case XID_RB_LEFT:
989 n = n->rb_left;
990 break;
991 case XID_RB_RIGHT:
992 n = n->rb_right;
993 break;
994 case XID_RB_EQUAL:
995 return req;
996 }
997 }
998 return NULL;
999}
1000
1001static void
1002xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
1003{
1004 struct rb_node **p = &xprt->recv_queue.rb_node;
1005 struct rb_node *n = NULL;
1006 struct rpc_rqst *req;
1007
1008 while (*p != NULL) {
1009 n = *p;
1010 req = rb_entry(n, struct rpc_rqst, rq_recv);
1011 switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
1012 case XID_RB_LEFT:
1013 p = &n->rb_left;
1014 break;
1015 case XID_RB_RIGHT:
1016 p = &n->rb_right;
1017 break;
1018 case XID_RB_EQUAL:
1019 WARN_ON_ONCE(new != req);
1020 return;
1021 }
1022 }
1023 rb_link_node(&new->rq_recv, n, p);
1024 rb_insert_color(&new->rq_recv, &xprt->recv_queue);
1025}
1026
1027static void
1028xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
1029{
1030 rb_erase(&req->rq_recv, &xprt->recv_queue);
1031}
1032
Chuck Lever9903cd12005-08-11 16:25:26 -04001033/**
1034 * xprt_lookup_rqst - find an RPC request corresponding to an XID
1035 * @xprt: transport on which the original request was transmitted
1036 * @xid: RPC XID of incoming reply
1037 *
Trond Myklebust75c84152018-08-31 10:21:00 -04001038 * Caller holds xprt->queue_lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 */
Alexey Dobriyand8ed0292006-09-26 22:29:38 -07001040struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041{
Pavel Emelyanov8f3a6de2010-10-05 23:30:19 +04001042 struct rpc_rqst *entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043
Trond Myklebust95f76912018-09-07 08:35:22 -04001044 entry = xprt_request_rb_find(xprt, xid);
1045 if (entry != NULL) {
1046 trace_xprt_lookup_rqst(xprt, xid, 0);
1047 entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
1048 return entry;
1049 }
Chuck Lever46121cf2007-01-31 12:14:08 -05001050
1051 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
1052 ntohl(xid));
Jeff Layton3705ad62014-10-28 14:24:13 -04001053 trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
Chuck Lever262ca072006-03-20 13:44:16 -05001054 xprt->stat.bad_xids++;
1055 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -04001057EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
Trond Myklebustcf9946c2018-08-06 12:55:34 -04001059static bool
1060xprt_is_pinned_rqst(struct rpc_rqst *req)
1061{
1062 return atomic_read(&req->rq_pin) != 0;
1063}
1064
Trond Myklebust729749b2017-08-13 10:03:59 -04001065/**
1066 * xprt_pin_rqst - Pin a request on the transport receive list
1067 * @req: Request to pin
1068 *
1069 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
Chuck Lever1f7d1c72019-04-24 09:40:09 -04001070 * so should be holding xprt->queue_lock.
Trond Myklebust729749b2017-08-13 10:03:59 -04001071 */
1072void xprt_pin_rqst(struct rpc_rqst *req)
1073{
Trond Myklebustcf9946c2018-08-06 12:55:34 -04001074 atomic_inc(&req->rq_pin);
Trond Myklebust729749b2017-08-13 10:03:59 -04001075}
Chuck Lever9590d082017-08-23 17:05:58 -04001076EXPORT_SYMBOL_GPL(xprt_pin_rqst);
Trond Myklebust729749b2017-08-13 10:03:59 -04001077
1078/**
1079 * xprt_unpin_rqst - Unpin a request on the transport receive list
1080 * @req: Request to pin
1081 *
Chuck Lever1f7d1c72019-04-24 09:40:09 -04001082 * Caller should be holding xprt->queue_lock.
Trond Myklebust729749b2017-08-13 10:03:59 -04001083 */
1084void xprt_unpin_rqst(struct rpc_rqst *req)
1085{
Trond Myklebustcf9946c2018-08-06 12:55:34 -04001086 if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1087 atomic_dec(&req->rq_pin);
1088 return;
1089 }
1090 if (atomic_dec_and_test(&req->rq_pin))
1091 wake_up_var(&req->rq_pin);
Trond Myklebust729749b2017-08-13 10:03:59 -04001092}
Chuck Lever9590d082017-08-23 17:05:58 -04001093EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
Trond Myklebust729749b2017-08-13 10:03:59 -04001094
1095static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
Trond Myklebust729749b2017-08-13 10:03:59 -04001096{
Trond Myklebustcf9946c2018-08-06 12:55:34 -04001097 wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
Trond Myklebust729749b2017-08-13 10:03:59 -04001098}
1099
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001100static bool
1101xprt_request_data_received(struct rpc_task *task)
1102{
1103 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1104 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1105}
1106
1107static bool
1108xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1109{
1110 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1111 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1112}
1113
1114/**
1115 * xprt_request_enqueue_receive - Add an request to the receive queue
1116 * @task: RPC task
1117 *
1118 */
1119void
1120xprt_request_enqueue_receive(struct rpc_task *task)
1121{
1122 struct rpc_rqst *req = task->tk_rqstp;
1123 struct rpc_xprt *xprt = req->rq_xprt;
1124
1125 if (!xprt_request_need_enqueue_receive(task, req))
1126 return;
Trond Myklebust75369082019-07-17 21:22:38 -04001127
1128 xprt_request_prepare(task->tk_rqstp);
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001129 spin_lock(&xprt->queue_lock);
1130
1131 /* Update the softirq receive buffer */
1132 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1133 sizeof(req->rq_private_buf));
1134
1135 /* Add request to the receive list */
Trond Myklebust95f76912018-09-07 08:35:22 -04001136 xprt_request_rb_insert(xprt, req);
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001137 set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1138 spin_unlock(&xprt->queue_lock);
1139
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001140 /* Turn off autodisconnect */
1141 del_singleshot_timer_sync(&xprt->timer);
1142}
1143
1144/**
1145 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1146 * @task: RPC task
1147 *
1148 * Caller must hold xprt->queue_lock.
1149 */
1150static void
1151xprt_request_dequeue_receive_locked(struct rpc_task *task)
1152{
Trond Myklebust95f76912018-09-07 08:35:22 -04001153 struct rpc_rqst *req = task->tk_rqstp;
1154
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001155 if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
Trond Myklebust95f76912018-09-07 08:35:22 -04001156 xprt_request_rb_remove(req->rq_xprt, req);
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001157}
1158
Chuck Leverecd465e2018-03-05 15:12:57 -05001159/**
1160 * xprt_update_rtt - Update RPC RTT statistics
1161 * @task: RPC request that recently completed
1162 *
Trond Myklebust75c84152018-08-31 10:21:00 -04001163 * Caller holds xprt->queue_lock.
Chuck Leverecd465e2018-03-05 15:12:57 -05001164 */
1165void xprt_update_rtt(struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166{
Chuck Lever1570c1e2005-08-25 16:25:52 -07001167 struct rpc_rqst *req = task->tk_rqstp;
1168 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
Eric Dumazet95c96172012-04-15 05:58:06 +00001169 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
Trond Myklebustd60dbb22010-05-13 12:51:49 -04001170 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171
Chuck Lever1570c1e2005-08-25 16:25:52 -07001172 if (timer) {
1173 if (req->rq_ntrans == 1)
Chuck Leverff839972010-05-07 13:34:47 -04001174 rpc_update_rtt(rtt, timer, m);
Chuck Lever1570c1e2005-08-25 16:25:52 -07001175 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 }
Chuck Lever1570c1e2005-08-25 16:25:52 -07001177}
Chuck Leverecd465e2018-03-05 15:12:57 -05001178EXPORT_SYMBOL_GPL(xprt_update_rtt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
Chuck Lever1570c1e2005-08-25 16:25:52 -07001180/**
1181 * xprt_complete_rqst - called when reply processing is complete
1182 * @task: RPC request that recently completed
1183 * @copied: actual number of bytes received from the transport
1184 *
Trond Myklebust75c84152018-08-31 10:21:00 -04001185 * Caller holds xprt->queue_lock.
Chuck Lever1570c1e2005-08-25 16:25:52 -07001186 */
1187void xprt_complete_rqst(struct rpc_task *task, int copied)
1188{
1189 struct rpc_rqst *req = task->tk_rqstp;
Trond Myklebustfda13932008-02-22 16:34:12 -05001190 struct rpc_xprt *xprt = req->rq_xprt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
Trond Myklebustfda13932008-02-22 16:34:12 -05001192 xprt->stat.recvs++;
Chuck Leveref759a22006-03-20 13:44:17 -05001193
Trond Myklebust1e799b62008-03-21 16:19:41 -04001194 req->rq_private_buf.len = copied;
Ricardo Labiagadd2b63d2009-04-01 09:23:28 -04001195 /* Ensure all writes are done before we update */
1196 /* req->rq_reply_bytes_recvd */
Trond Myklebust43ac3f22006-03-20 13:44:51 -05001197 smp_wmb();
Ricardo Labiagadd2b63d2009-04-01 09:23:28 -04001198 req->rq_reply_bytes_recvd = copied;
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001199 xprt_request_dequeue_receive_locked(task);
Trond Myklebustfda13932008-02-22 16:34:12 -05001200 rpc_wake_up_queued_task(&xprt->pending, task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201}
\"Talpey, Thomas\12444802007-09-10 13:45:36 -04001202EXPORT_SYMBOL_GPL(xprt_complete_rqst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203
Chuck Lever46c0ee82005-08-25 16:25:52 -07001204static void xprt_timer(struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205{
Chuck Lever46c0ee82005-08-25 16:25:52 -07001206 struct rpc_rqst *req = task->tk_rqstp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 struct rpc_xprt *xprt = req->rq_xprt;
1208
Trond Myklebust5d008372008-02-22 16:34:17 -05001209 if (task->tk_status != -ETIMEDOUT)
1210 return;
Chuck Lever46c0ee82005-08-25 16:25:52 -07001211
Chuck Lever82476d92018-01-03 15:38:25 -05001212 trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
Ricardo Labiagadd2b63d2009-04-01 09:23:28 -04001213 if (!req->rq_reply_bytes_recvd) {
Chuck Lever46c0ee82005-08-25 16:25:52 -07001214 if (xprt->ops->timer)
Trond Myklebust6a24dfb2013-01-08 09:48:15 -05001215 xprt->ops->timer(xprt, task);
Trond Myklebust5d008372008-02-22 16:34:17 -05001216 } else
1217 task->tk_status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218}
1219
Chuck Lever9903cd12005-08-11 16:25:26 -04001220/**
Trond Myklebust8ba6a922019-04-07 13:58:46 -04001221 * xprt_wait_for_reply_request_def - wait for reply
1222 * @task: pointer to rpc_task
1223 *
1224 * Set a request's retransmit timeout based on the transport's
1225 * default timeout parameters. Used by transports that don't adjust
1226 * the retransmit timeout based on round-trip time estimation,
1227 * and put the task to sleep on the pending queue.
1228 */
1229void xprt_wait_for_reply_request_def(struct rpc_task *task)
1230{
1231 struct rpc_rqst *req = task->tk_rqstp;
1232
Trond Myklebust6b2e6852019-04-07 13:58:49 -04001233 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
Trond Myklebust9e910bf2019-04-07 13:58:53 -04001234 xprt_request_timeout(req));
Trond Myklebust8ba6a922019-04-07 13:58:46 -04001235}
1236EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1237
1238/**
1239 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1240 * @task: pointer to rpc_task
1241 *
1242 * Set a request's retransmit timeout using the RTT estimator,
1243 * and put the task to sleep on the pending queue.
1244 */
1245void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1246{
1247 int timer = task->tk_msg.rpc_proc->p_timer;
1248 struct rpc_clnt *clnt = task->tk_client;
1249 struct rpc_rtt *rtt = clnt->cl_rtt;
1250 struct rpc_rqst *req = task->tk_rqstp;
1251 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
Trond Myklebust6b2e6852019-04-07 13:58:49 -04001252 unsigned long timeout;
Trond Myklebust8ba6a922019-04-07 13:58:46 -04001253
Trond Myklebust6b2e6852019-04-07 13:58:49 -04001254 timeout = rpc_calc_rto(rtt, timer);
1255 timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1256 if (timeout > max_timeout || timeout == 0)
1257 timeout = max_timeout;
1258 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1259 jiffies + timeout);
Trond Myklebust8ba6a922019-04-07 13:58:46 -04001260}
1261EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1262
1263/**
Trond Myklebust7f3a1d12018-08-23 00:03:43 -04001264 * xprt_request_wait_receive - wait for the reply to an RPC request
1265 * @task: RPC task about to send a request
1266 *
1267 */
1268void xprt_request_wait_receive(struct rpc_task *task)
1269{
1270 struct rpc_rqst *req = task->tk_rqstp;
1271 struct rpc_xprt *xprt = req->rq_xprt;
1272
1273 if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1274 return;
1275 /*
1276 * Sleep on the pending queue if we're expecting a reply.
1277 * The spinlock ensures atomicity between the test of
1278 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1279 */
1280 spin_lock(&xprt->queue_lock);
1281 if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
Trond Myklebust8ba6a922019-04-07 13:58:46 -04001282 xprt->ops->wait_for_reply_request(task);
Trond Myklebust7f3a1d12018-08-23 00:03:43 -04001283 /*
1284 * Send an extra queue wakeup call if the
1285 * connection was dropped in case the call to
1286 * rpc_sleep_on() raced.
1287 */
1288 if (xprt_request_retransmit_after_disconnect(task))
1289 rpc_wake_up_queued_task_set_status(&xprt->pending,
1290 task, -ENOTCONN);
1291 }
1292 spin_unlock(&xprt->queue_lock);
1293}
1294
Trond Myklebust944b0422018-08-09 23:33:21 -04001295static bool
Trond Myklebust944b0422018-08-09 23:33:21 -04001296xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1297{
Trond Myklebust762e4e62018-08-24 16:28:28 -04001298 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
Trond Myklebust944b0422018-08-09 23:33:21 -04001299}
1300
1301/**
1302 * xprt_request_enqueue_transmit - queue a task for transmission
1303 * @task: pointer to rpc_task
1304 *
1305 * Add a task to the transmission queue.
1306 */
1307void
1308xprt_request_enqueue_transmit(struct rpc_task *task)
1309{
Trond Myklebust918f3c12018-09-09 11:37:22 -04001310 struct rpc_rqst *pos, *req = task->tk_rqstp;
Trond Myklebust944b0422018-08-09 23:33:21 -04001311 struct rpc_xprt *xprt = req->rq_xprt;
1312
1313 if (xprt_request_need_enqueue_transmit(task, req)) {
Trond Myklebuste66721f2019-01-02 17:53:10 -05001314 req->rq_bytes_sent = 0;
Trond Myklebust944b0422018-08-09 23:33:21 -04001315 spin_lock(&xprt->queue_lock);
Trond Myklebust75891f52018-09-03 17:37:36 -04001316 /*
1317 * Requests that carry congestion control credits are added
1318 * to the head of the list to avoid starvation issues.
1319 */
1320 if (req->rq_cong) {
1321 xprt_clear_congestion_window_wait(xprt);
1322 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1323 if (pos->rq_cong)
1324 continue;
1325 /* Note: req is added _before_ pos */
1326 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1327 INIT_LIST_HEAD(&req->rq_xmit2);
1328 goto out;
1329 }
Trond Myklebust86aeee02018-09-08 14:22:41 -04001330 } else if (RPC_IS_SWAPPER(task)) {
1331 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1332 if (pos->rq_cong || pos->rq_bytes_sent)
1333 continue;
1334 if (RPC_IS_SWAPPER(pos->rq_task))
1335 continue;
1336 /* Note: req is added _before_ pos */
1337 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1338 INIT_LIST_HEAD(&req->rq_xmit2);
1339 goto out;
1340 }
Chuck Leverdeaa5c92019-01-09 10:04:57 -05001341 } else if (!req->rq_seqno) {
Trond Myklebust75891f52018-09-03 17:37:36 -04001342 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1343 if (pos->rq_task->tk_owner != task->tk_owner)
1344 continue;
1345 list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1346 INIT_LIST_HEAD(&req->rq_xmit);
1347 goto out;
1348 }
Trond Myklebust918f3c12018-09-09 11:37:22 -04001349 }
Trond Myklebust944b0422018-08-09 23:33:21 -04001350 list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
Trond Myklebust918f3c12018-09-09 11:37:22 -04001351 INIT_LIST_HEAD(&req->rq_xmit2);
1352out:
Trond Myklebust944b0422018-08-09 23:33:21 -04001353 set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1354 spin_unlock(&xprt->queue_lock);
1355 }
1356}
1357
1358/**
1359 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1360 * @task: pointer to rpc_task
1361 *
1362 * Remove a task from the transmission queue
1363 * Caller must hold xprt->queue_lock
1364 */
1365static void
1366xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1367{
Trond Myklebust918f3c12018-09-09 11:37:22 -04001368 struct rpc_rqst *req = task->tk_rqstp;
1369
1370 if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1371 return;
1372 if (!list_empty(&req->rq_xmit)) {
1373 list_del(&req->rq_xmit);
1374 if (!list_empty(&req->rq_xmit2)) {
1375 struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1376 struct rpc_rqst, rq_xmit2);
1377 list_del(&req->rq_xmit2);
1378 list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1379 }
1380 } else
1381 list_del(&req->rq_xmit2);
Trond Myklebust944b0422018-08-09 23:33:21 -04001382}
1383
1384/**
1385 * xprt_request_dequeue_transmit - remove a task from the transmission queue
1386 * @task: pointer to rpc_task
1387 *
1388 * Remove a task from the transmission queue
1389 */
1390static void
1391xprt_request_dequeue_transmit(struct rpc_task *task)
1392{
1393 struct rpc_rqst *req = task->tk_rqstp;
1394 struct rpc_xprt *xprt = req->rq_xprt;
1395
1396 spin_lock(&xprt->queue_lock);
1397 xprt_request_dequeue_transmit_locked(task);
1398 spin_unlock(&xprt->queue_lock);
1399}
1400
Trond Myklebust7f3a1d12018-08-23 00:03:43 -04001401/**
Trond Myklebustcc204d02019-09-10 13:01:35 -04001402 * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1403 * @task: pointer to rpc_task
1404 *
1405 * Remove a task from the transmit and receive queues, and ensure that
1406 * it is not pinned by the receive work item.
1407 */
1408void
1409xprt_request_dequeue_xprt(struct rpc_task *task)
1410{
1411 struct rpc_rqst *req = task->tk_rqstp;
1412 struct rpc_xprt *xprt = req->rq_xprt;
1413
1414 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1415 test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1416 xprt_is_pinned_rqst(req)) {
1417 spin_lock(&xprt->queue_lock);
1418 xprt_request_dequeue_transmit_locked(task);
1419 xprt_request_dequeue_receive_locked(task);
1420 while (xprt_is_pinned_rqst(req)) {
1421 set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1422 spin_unlock(&xprt->queue_lock);
1423 xprt_wait_on_pinned_rqst(req);
1424 spin_lock(&xprt->queue_lock);
1425 clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1426 }
1427 spin_unlock(&xprt->queue_lock);
1428 }
1429}
1430
1431/**
Trond Myklebust9d96acb2018-09-13 12:22:04 -04001432 * xprt_request_prepare - prepare an encoded request for transport
1433 * @req: pointer to rpc_rqst
1434 *
1435 * Calls into the transport layer to do whatever is needed to prepare
1436 * the request for transmission or receive.
1437 */
1438void
1439xprt_request_prepare(struct rpc_rqst *req)
1440{
1441 struct rpc_xprt *xprt = req->rq_xprt;
1442
1443 if (xprt->ops->prepare_request)
1444 xprt->ops->prepare_request(req);
1445}
1446
1447/**
Trond Myklebust762e4e62018-08-24 16:28:28 -04001448 * xprt_request_need_retransmit - Test if a task needs retransmission
1449 * @task: pointer to rpc_task
1450 *
1451 * Test for whether a connection breakage requires the task to retransmit
1452 */
1453bool
1454xprt_request_need_retransmit(struct rpc_task *task)
1455{
1456 return xprt_request_retransmit_after_disconnect(task);
1457}
1458
1459/**
Chuck Lever9903cd12005-08-11 16:25:26 -04001460 * xprt_prepare_transmit - reserve the transport before sending a request
1461 * @task: RPC task about to send a request
1462 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 */
Trond Myklebust90051ea2013-09-25 12:17:18 -04001464bool xprt_prepare_transmit(struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465{
1466 struct rpc_rqst *req = task->tk_rqstp;
1467 struct rpc_xprt *xprt = req->rq_xprt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
Trond Myklebust5f2f6bd2018-09-01 14:25:24 -04001469 if (!xprt_lock_write(xprt, task)) {
Chuck Lever9ce07ae2020-07-08 16:09:26 -04001470 trace_xprt_transmit_queued(xprt, task);
1471
Trond Myklebust5f2f6bd2018-09-01 14:25:24 -04001472 /* Race breaker: someone may have transmitted us */
Trond Myklebust944b0422018-08-09 23:33:21 -04001473 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
Trond Myklebust5f2f6bd2018-09-01 14:25:24 -04001474 rpc_wake_up_queued_task_set_status(&xprt->sending,
1475 task, 0);
1476 return false;
1477
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 }
Trond Myklebust5f2f6bd2018-09-01 14:25:24 -04001479 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480}
1481
Trond Myklebuste0ab53d2006-07-27 17:22:50 -04001482void xprt_end_transmit(struct rpc_task *task)
Trond Myklebust5e5ce5b2005-10-18 14:20:11 -07001483{
Rahul Iyer343952f2009-04-01 09:23:17 -04001484 xprt_release_write(task->tk_rqstp->rq_xprt, task);
Trond Myklebust5e5ce5b2005-10-18 14:20:11 -07001485}
1486
Chuck Lever9903cd12005-08-11 16:25:26 -04001487/**
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001488 * xprt_request_transmit - send an RPC request on a transport
1489 * @req: pointer to request to transmit
1490 * @snd_task: RPC task that owns the transport lock
Chuck Lever9903cd12005-08-11 16:25:26 -04001491 *
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001492 * This performs the transmission of a single request.
1493 * Note that if the request is not the same as snd_task, then it
1494 * does need to be pinned.
1495 * Returns '0' on success.
Chuck Lever9903cd12005-08-11 16:25:26 -04001496 */
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001497static int
1498xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499{
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001500 struct rpc_xprt *xprt = req->rq_xprt;
1501 struct rpc_task *task = req->rq_task;
Trond Myklebust90d91b02017-12-14 21:24:08 -05001502 unsigned int connect_cookie;
Trond Myklebustdcbbeda2018-09-01 14:29:18 -04001503 int is_retrans = RPC_WAS_SENT(task);
Chuck Leverff699ea82018-03-05 15:13:13 -05001504 int status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001506 if (!req->rq_bytes_sent) {
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001507 if (xprt_request_data_received(task)) {
1508 status = 0;
Trond Myklebust944b0422018-08-09 23:33:21 -04001509 goto out_dequeue;
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001510 }
Trond Myklebust3021a5bb2018-08-14 13:50:21 -04001511 /* Verify that our message lies in the RPCSEC_GSS window */
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001512 if (rpcauth_xmit_need_reencode(task)) {
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001513 status = -EBADMSG;
Trond Myklebust944b0422018-08-09 23:33:21 -04001514 goto out_dequeue;
Trond Myklebust3021a5bb2018-08-14 13:50:21 -04001515 }
Trond Myklebustae67bd32019-04-07 13:58:44 -04001516 if (RPC_SIGNALLED(task)) {
1517 status = -ERESTARTSYS;
1518 goto out_dequeue;
1519 }
Trond Myklebustedc81dc2018-08-22 17:55:46 -04001520 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521
Trond Myklebustdcbbeda2018-09-01 14:29:18 -04001522 /*
1523 * Update req->rq_ntrans before transmitting to avoid races with
1524 * xprt_update_rtt(), which needs to know that it is recording a
1525 * reply to the first transmission.
1526 */
1527 req->rq_ntrans++;
1528
Chuck Leverc509f15a2020-05-12 17:13:28 -04001529 trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
Trond Myklebust90d91b02017-12-14 21:24:08 -05001530 connect_cookie = xprt->connect_cookie;
Trond Myklebustadfa7142018-09-03 23:58:59 -04001531 status = xprt->ops->send_request(req);
Trond Myklebustc8485e42009-03-11 14:37:59 -04001532 if (status != 0) {
Trond Myklebustdcbbeda2018-09-01 14:29:18 -04001533 req->rq_ntrans--;
Chuck Lever0c776682019-02-11 11:25:04 -05001534 trace_xprt_transmit(req, status);
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001535 return status;
Chuck Leverfe3aca22005-08-25 16:25:50 -07001536 }
Trond Myklebust7ebbbc62018-08-28 09:00:27 -04001537
Trond Myklebustdcbbeda2018-09-01 14:29:18 -04001538 if (is_retrans)
1539 task->tk_client->cl_stats->rpcretrans++;
1540
Chuck Lever4a068252015-05-11 14:02:25 -04001541 xprt_inject_disconnect(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542
Bryan Schumaker468f8612011-04-18 15:57:32 -04001543 task->tk_flags |= RPC_TASK_SENT;
Trond Myklebustb5e92412019-05-02 11:21:08 -04001544 spin_lock(&xprt->transport_lock);
Trond Myklebustc8485e42009-03-11 14:37:59 -04001545
Trond Myklebustc8485e42009-03-11 14:37:59 -04001546 xprt->stat.sends++;
1547 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1548 xprt->stat.bklog_u += xprt->backlog.qlen;
Andy Adamson15a45202012-02-14 16:19:18 -05001549 xprt->stat.sending_u += xprt->sending.qlen;
1550 xprt->stat.pending_u += xprt->pending.qlen;
Trond Myklebustb5e92412019-05-02 11:21:08 -04001551 spin_unlock(&xprt->transport_lock);
Trond Myklebust90d91b02017-12-14 21:24:08 -05001552
1553 req->rq_connect_cookie = connect_cookie;
Trond Myklebust944b0422018-08-09 23:33:21 -04001554out_dequeue:
Chuck Lever0c776682019-02-11 11:25:04 -05001555 trace_xprt_transmit(req, status);
Trond Myklebust944b0422018-08-09 23:33:21 -04001556 xprt_request_dequeue_transmit(task);
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001557 rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1558 return status;
1559}
1560
1561/**
1562 * xprt_transmit - send an RPC request on a transport
1563 * @task: controlling RPC task
1564 *
1565 * Attempts to drain the transmit queue. On exit, either the transport
1566 * signalled an error that needs to be handled before transmission can
1567 * resume, or @task finished transmitting, and detected that it already
1568 * received a reply.
1569 */
1570void
1571xprt_transmit(struct rpc_task *task)
1572{
1573 struct rpc_rqst *next, *req = task->tk_rqstp;
1574 struct rpc_xprt *xprt = req->rq_xprt;
Chuck Lever6f9f1722020-07-08 16:09:53 -04001575 int counter, status;
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001576
1577 spin_lock(&xprt->queue_lock);
Chuck Lever6f9f1722020-07-08 16:09:53 -04001578 counter = 0;
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001579 while (!list_empty(&xprt->xmit_queue)) {
Chuck Lever6f9f1722020-07-08 16:09:53 -04001580 if (++counter == 20)
1581 break;
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001582 next = list_first_entry(&xprt->xmit_queue,
1583 struct rpc_rqst, rq_xmit);
1584 xprt_pin_rqst(next);
1585 spin_unlock(&xprt->queue_lock);
1586 status = xprt_request_transmit(next, task);
1587 if (status == -EBADMSG && next != req)
1588 status = 0;
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001589 spin_lock(&xprt->queue_lock);
1590 xprt_unpin_rqst(next);
1591 if (status == 0) {
1592 if (!xprt_request_data_received(task) ||
1593 test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1594 continue;
Trond Myklebustc5445772018-09-03 23:39:27 -04001595 } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
Trond Myklebust89f90fe2018-08-29 17:40:55 -04001596 task->tk_status = status;
1597 break;
1598 }
1599 spin_unlock(&xprt->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600}
1601
Trond Myklebustba60eb22013-04-14 10:49:37 -04001602static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1603{
1604 set_bit(XPRT_CONGESTED, &xprt->state);
1605 rpc_sleep_on(&xprt->backlog, task, NULL);
1606}
1607
1608static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1609{
1610 if (rpc_wake_up_next(&xprt->backlog) == NULL)
1611 clear_bit(XPRT_CONGESTED, &xprt->state);
1612}
1613
1614static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1615{
1616 bool ret = false;
1617
1618 if (!test_bit(XPRT_CONGESTED, &xprt->state))
1619 goto out;
1620 spin_lock(&xprt->reserve_lock);
1621 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1622 rpc_sleep_on(&xprt->backlog, task, NULL);
1623 ret = true;
1624 }
1625 spin_unlock(&xprt->reserve_lock);
1626out:
1627 return ret;
1628}
1629
Trond Myklebust92ea0112017-06-20 19:35:39 -04001630static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001631{
1632 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1633
Chuck Leverff699ea82018-03-05 15:13:13 -05001634 if (xprt->num_reqs >= xprt->max_reqs)
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001635 goto out;
Chuck Leverff699ea82018-03-05 15:13:13 -05001636 ++xprt->num_reqs;
Trond Myklebust92ea0112017-06-20 19:35:39 -04001637 spin_unlock(&xprt->reserve_lock);
1638 req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
1639 spin_lock(&xprt->reserve_lock);
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001640 if (req != NULL)
1641 goto out;
Chuck Leverff699ea82018-03-05 15:13:13 -05001642 --xprt->num_reqs;
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001643 req = ERR_PTR(-ENOMEM);
1644out:
1645 return req;
1646}
1647
1648static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1649{
Chuck Leverff699ea82018-03-05 15:13:13 -05001650 if (xprt->num_reqs > xprt->min_reqs) {
1651 --xprt->num_reqs;
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001652 kfree(req);
1653 return true;
1654 }
1655 return false;
1656}
1657
Trond Myklebustf39c1bf2012-09-07 11:08:50 -04001658void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659{
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001660 struct rpc_rqst *req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
Trond Myklebustf39c1bf2012-09-07 11:08:50 -04001662 spin_lock(&xprt->reserve_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 if (!list_empty(&xprt->free)) {
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001664 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1665 list_del(&req->rq_list);
1666 goto out_init_req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 }
Trond Myklebust92ea0112017-06-20 19:35:39 -04001668 req = xprt_dynamic_alloc_slot(xprt);
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001669 if (!IS_ERR(req))
1670 goto out_init_req;
1671 switch (PTR_ERR(req)) {
1672 case -ENOMEM:
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001673 dprintk("RPC: dynamic allocation of request slot "
1674 "failed! Retrying\n");
Trond Myklebust1afeaf52012-05-19 12:12:53 -04001675 task->tk_status = -ENOMEM;
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001676 break;
1677 case -EAGAIN:
Trond Myklebustba60eb22013-04-14 10:49:37 -04001678 xprt_add_backlog(xprt, task);
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001679 dprintk("RPC: waiting for request slot\n");
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05001680 fallthrough;
Trond Myklebust1afeaf52012-05-19 12:12:53 -04001681 default:
1682 task->tk_status = -EAGAIN;
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001683 }
Trond Myklebustf39c1bf2012-09-07 11:08:50 -04001684 spin_unlock(&xprt->reserve_lock);
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001685 return;
1686out_init_req:
Chuck Leverff699ea82018-03-05 15:13:13 -05001687 xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1688 xprt->num_reqs);
Chuck Lever37ac86c2018-05-04 15:34:53 -04001689 spin_unlock(&xprt->reserve_lock);
1690
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001691 task->tk_status = 0;
1692 task->tk_rqstp = req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693}
Trond Myklebustf39c1bf2012-09-07 11:08:50 -04001694EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1695
Chuck Levera9cde232018-05-04 15:34:59 -04001696void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
Trond Myklebustee5ebe82010-04-16 16:37:01 -04001697{
Trond Myklebustee5ebe82010-04-16 16:37:01 -04001698 spin_lock(&xprt->reserve_lock);
Trond Myklebustc25573b2011-12-01 14:16:17 -05001699 if (!xprt_dynamic_free_slot(xprt, req)) {
1700 memset(req, 0, sizeof(*req)); /* mark unused */
1701 list_add(&req->rq_list, &xprt->free);
1702 }
Trond Myklebustba60eb22013-04-14 10:49:37 -04001703 xprt_wake_up_backlog(xprt);
Trond Myklebustee5ebe82010-04-16 16:37:01 -04001704 spin_unlock(&xprt->reserve_lock);
1705}
Chuck Levera9cde232018-05-04 15:34:59 -04001706EXPORT_SYMBOL_GPL(xprt_free_slot);
Trond Myklebustee5ebe82010-04-16 16:37:01 -04001707
Trond Myklebust21de0a92011-07-17 16:57:32 -04001708static void xprt_free_all_slots(struct rpc_xprt *xprt)
1709{
1710 struct rpc_rqst *req;
1711 while (!list_empty(&xprt->free)) {
1712 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1713 list_del(&req->rq_list);
1714 kfree(req);
1715 }
1716}
1717
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001718struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1719 unsigned int num_prealloc,
1720 unsigned int max_alloc)
Pavel Emelyanovbd1722d2010-09-29 16:02:43 +04001721{
1722 struct rpc_xprt *xprt;
Trond Myklebust21de0a92011-07-17 16:57:32 -04001723 struct rpc_rqst *req;
1724 int i;
Pavel Emelyanovbd1722d2010-09-29 16:02:43 +04001725
1726 xprt = kzalloc(size, GFP_KERNEL);
1727 if (xprt == NULL)
1728 goto out;
1729
Trond Myklebust21de0a92011-07-17 16:57:32 -04001730 xprt_init(xprt, net);
1731
1732 for (i = 0; i < num_prealloc; i++) {
1733 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1734 if (!req)
wangweidong83131642013-10-15 11:44:30 +08001735 goto out_free;
Trond Myklebust21de0a92011-07-17 16:57:32 -04001736 list_add(&req->rq_list, &xprt->free);
1737 }
Trond Myklebustd9ba1312011-07-17 18:11:30 -04001738 if (max_alloc > num_prealloc)
1739 xprt->max_reqs = max_alloc;
1740 else
1741 xprt->max_reqs = num_prealloc;
1742 xprt->min_reqs = num_prealloc;
Chuck Leverff699ea82018-03-05 15:13:13 -05001743 xprt->num_reqs = num_prealloc;
Pavel Emelyanovbd1722d2010-09-29 16:02:43 +04001744
1745 return xprt;
1746
1747out_free:
Trond Myklebust21de0a92011-07-17 16:57:32 -04001748 xprt_free(xprt);
Pavel Emelyanovbd1722d2010-09-29 16:02:43 +04001749out:
1750 return NULL;
1751}
1752EXPORT_SYMBOL_GPL(xprt_alloc);
1753
Pavel Emelyanove204e622010-09-29 16:03:13 +04001754void xprt_free(struct rpc_xprt *xprt)
1755{
Pavel Emelyanov37aa2132010-09-29 16:05:43 +04001756 put_net(xprt->xprt_net);
Trond Myklebust21de0a92011-07-17 16:57:32 -04001757 xprt_free_all_slots(xprt);
Trond Myklebustfda1bfe2015-02-14 17:48:49 -05001758 kfree_rcu(xprt, rcu);
Pavel Emelyanove204e622010-09-29 16:03:13 +04001759}
1760EXPORT_SYMBOL_GPL(xprt_free);
1761
Trond Myklebust902c5882018-09-01 17:21:01 -04001762static void
1763xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1764{
1765 req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1766}
1767
Trond Myklebust9dc6edc2018-08-22 14:24:16 -04001768static __be32
1769xprt_alloc_xid(struct rpc_xprt *xprt)
1770{
1771 __be32 xid;
1772
1773 spin_lock(&xprt->reserve_lock);
1774 xid = (__force __be32)xprt->xid++;
1775 spin_unlock(&xprt->reserve_lock);
1776 return xid;
1777}
1778
1779static void
1780xprt_init_xid(struct rpc_xprt *xprt)
1781{
1782 xprt->xid = prandom_u32();
1783}
1784
1785static void
1786xprt_request_init(struct rpc_task *task)
1787{
1788 struct rpc_xprt *xprt = task->tk_xprt;
1789 struct rpc_rqst *req = task->tk_rqstp;
1790
Trond Myklebust9dc6edc2018-08-22 14:24:16 -04001791 req->rq_task = task;
1792 req->rq_xprt = xprt;
1793 req->rq_buffer = NULL;
1794 req->rq_xid = xprt_alloc_xid(xprt);
Trond Myklebust902c5882018-09-01 17:21:01 -04001795 xprt_init_connect_cookie(req, xprt);
Trond Myklebust9dc6edc2018-08-22 14:24:16 -04001796 req->rq_snd_buf.len = 0;
1797 req->rq_snd_buf.buflen = 0;
1798 req->rq_rcv_buf.len = 0;
1799 req->rq_rcv_buf.buflen = 0;
Trond Myklebust71700bb2018-11-30 16:11:15 -05001800 req->rq_snd_buf.bvec = NULL;
1801 req->rq_rcv_buf.bvec = NULL;
Trond Myklebust9dc6edc2018-08-22 14:24:16 -04001802 req->rq_release_snd_buf = NULL;
Trond Myklebustda953062019-04-07 13:58:56 -04001803 xprt_init_majortimeo(task, req);
Chuck Lever09d2ba02020-07-08 16:09:21 -04001804
1805 trace_xprt_reserve(req);
Trond Myklebust9dc6edc2018-08-22 14:24:16 -04001806}
1807
1808static void
1809xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1810{
1811 xprt->ops->alloc_slot(xprt, task);
1812 if (task->tk_rqstp != NULL)
1813 xprt_request_init(task);
1814}
1815
Chuck Lever9903cd12005-08-11 16:25:26 -04001816/**
1817 * xprt_reserve - allocate an RPC request slot
1818 * @task: RPC task requesting a slot allocation
1819 *
Trond Myklebustba60eb22013-04-14 10:49:37 -04001820 * If the transport is marked as being congested, or if no more
1821 * slots are available, place the task on the transport's
Chuck Lever9903cd12005-08-11 16:25:26 -04001822 * backlog queue.
1823 */
1824void xprt_reserve(struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825{
Trond Myklebustfb43d172016-01-30 16:39:26 -05001826 struct rpc_xprt *xprt = task->tk_xprt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827
Trond Myklebust43cedbf0e2011-07-17 16:01:03 -04001828 task->tk_status = 0;
1829 if (task->tk_rqstp != NULL)
1830 return;
1831
Trond Myklebust43cedbf0e2011-07-17 16:01:03 -04001832 task->tk_status = -EAGAIN;
Trond Myklebustba60eb22013-04-14 10:49:37 -04001833 if (!xprt_throttle_congested(xprt, task))
Trond Myklebust9dc6edc2018-08-22 14:24:16 -04001834 xprt_do_reserve(xprt, task);
Trond Myklebustba60eb22013-04-14 10:49:37 -04001835}
1836
1837/**
1838 * xprt_retry_reserve - allocate an RPC request slot
1839 * @task: RPC task requesting a slot allocation
1840 *
1841 * If no more slots are available, place the task on the transport's
1842 * backlog queue.
1843 * Note that the only difference with xprt_reserve is that we now
1844 * ignore the value of the XPRT_CONGESTED flag.
1845 */
1846void xprt_retry_reserve(struct rpc_task *task)
1847{
Trond Myklebustfb43d172016-01-30 16:39:26 -05001848 struct rpc_xprt *xprt = task->tk_xprt;
Trond Myklebustba60eb22013-04-14 10:49:37 -04001849
1850 task->tk_status = 0;
1851 if (task->tk_rqstp != NULL)
1852 return;
1853
Trond Myklebustba60eb22013-04-14 10:49:37 -04001854 task->tk_status = -EAGAIN;
Trond Myklebust9dc6edc2018-08-22 14:24:16 -04001855 xprt_do_reserve(xprt, task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856}
1857
Chuck Lever9903cd12005-08-11 16:25:26 -04001858/**
1859 * xprt_release - release an RPC request slot
1860 * @task: task which is finished with the slot
1861 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 */
Chuck Lever9903cd12005-08-11 16:25:26 -04001863void xprt_release(struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864{
Ricardo Labiaga55ae1aa2009-04-01 09:23:03 -04001865 struct rpc_xprt *xprt;
Trond Myklebust87ed5002013-01-07 14:30:46 -05001866 struct rpc_rqst *req = task->tk_rqstp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867
Trond Myklebust87ed5002013-01-07 14:30:46 -05001868 if (req == NULL) {
1869 if (task->tk_client) {
Trond Myklebustfb43d172016-01-30 16:39:26 -05001870 xprt = task->tk_xprt;
Trond Myklebustbd79bc52018-09-07 19:38:55 -04001871 xprt_release_write(xprt, task);
Trond Myklebust87ed5002013-01-07 14:30:46 -05001872 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 return;
Trond Myklebust87ed5002013-01-07 14:30:46 -05001874 }
Ricardo Labiaga55ae1aa2009-04-01 09:23:03 -04001875
Ricardo Labiaga55ae1aa2009-04-01 09:23:03 -04001876 xprt = req->rq_xprt;
Trond Myklebustcc204d02019-09-10 13:01:35 -04001877 xprt_request_dequeue_xprt(task);
Trond Myklebustb5e92412019-05-02 11:21:08 -04001878 spin_lock(&xprt->transport_lock);
Chuck Lever49e9a892005-08-25 16:25:51 -07001879 xprt->ops->release_xprt(xprt, task);
Chuck Levera58dd392005-08-25 16:25:53 -07001880 if (xprt->ops->release_request)
1881 xprt->ops->release_request(task);
Trond Myklebustad3331a2016-08-02 13:47:43 -04001882 xprt_schedule_autodisconnect(xprt);
Trond Myklebustb5e92412019-05-02 11:21:08 -04001883 spin_unlock(&xprt->transport_lock);
Trond Myklebustee5ebe82010-04-16 16:37:01 -04001884 if (req->rq_buffer)
Chuck Lever3435c742016-09-15 10:55:29 -04001885 xprt->ops->buf_free(task);
Chuck Lever4a068252015-05-11 14:02:25 -04001886 xprt_inject_disconnect(xprt);
Trond Myklebust9d96acb2018-09-13 12:22:04 -04001887 xdr_free_bvec(&req->rq_rcv_buf);
Trond Myklebust0472e472019-02-19 13:00:13 -05001888 xdr_free_bvec(&req->rq_snd_buf);
Trond Myklebusta17c2152010-07-31 14:29:08 -04001889 if (req->rq_cred != NULL)
1890 put_rpccred(req->rq_cred);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 task->tk_rqstp = NULL;
J. Bruce Fieldsead5e1c2005-10-13 16:54:43 -04001892 if (req->rq_release_snd_buf)
1893 req->rq_release_snd_buf(req);
Ricardo Labiaga55ae1aa2009-04-01 09:23:03 -04001894
Trond Myklebustee5ebe82010-04-16 16:37:01 -04001895 if (likely(!bc_prealloc(req)))
Chuck Levera9cde232018-05-04 15:34:59 -04001896 xprt->ops->free_slot(xprt, req);
Trond Myklebustee5ebe82010-04-16 16:37:01 -04001897 else
Trond Myklebustc9acb422010-03-19 15:36:22 -04001898 xprt_free_bc_request(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899}
1900
Trond Myklebust902c5882018-09-01 17:21:01 -04001901#ifdef CONFIG_SUNRPC_BACKCHANNEL
1902void
1903xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1904{
1905 struct xdr_buf *xbufp = &req->rq_snd_buf;
1906
1907 task->tk_rqstp = req;
1908 req->rq_task = task;
1909 xprt_init_connect_cookie(req, req->rq_xprt);
1910 /*
1911 * Set up the xdr_buf length.
1912 * This also indicates that the buffer is XDR encoded already.
1913 */
1914 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1915 xbufp->tail[0].iov_len;
Trond Myklebust902c5882018-09-01 17:21:01 -04001916}
1917#endif
1918
Trond Myklebust21de0a92011-07-17 16:57:32 -04001919static void xprt_init(struct rpc_xprt *xprt, struct net *net)
Chuck Leverc2866762006-08-22 20:06:20 -04001920{
Trond Myklebust30c51162015-02-24 20:31:39 -05001921 kref_init(&xprt->kref);
Chuck Leverc2866762006-08-22 20:06:20 -04001922
1923 spin_lock_init(&xprt->transport_lock);
1924 spin_lock_init(&xprt->reserve_lock);
Trond Myklebust75c84152018-08-31 10:21:00 -04001925 spin_lock_init(&xprt->queue_lock);
Chuck Leverc2866762006-08-22 20:06:20 -04001926
1927 INIT_LIST_HEAD(&xprt->free);
Trond Myklebust95f76912018-09-07 08:35:22 -04001928 xprt->recv_queue = RB_ROOT;
Trond Myklebust944b0422018-08-09 23:33:21 -04001929 INIT_LIST_HEAD(&xprt->xmit_queue);
Trond Myklebust9e00abc2011-07-13 19:20:49 -04001930#if defined(CONFIG_SUNRPC_BACKCHANNEL)
Ricardo Labiagaf9acac12009-04-01 09:22:59 -04001931 spin_lock_init(&xprt->bc_pa_lock);
1932 INIT_LIST_HEAD(&xprt->bc_pa_list);
Trond Myklebust9e00abc2011-07-13 19:20:49 -04001933#endif /* CONFIG_SUNRPC_BACKCHANNEL */
Trond Myklebust80b14d52015-02-14 20:31:59 -05001934 INIT_LIST_HEAD(&xprt->xprt_switch);
Ricardo Labiagaf9acac12009-04-01 09:22:59 -04001935
Chuck Leverc2866762006-08-22 20:06:20 -04001936 xprt->last_used = jiffies;
1937 xprt->cwnd = RPC_INITCWND;
Chuck Levera5090502007-03-29 16:48:04 -04001938 xprt->bind_index = 0;
Chuck Leverc2866762006-08-22 20:06:20 -04001939
1940 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1941 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
Trond Myklebust79c99152018-09-09 13:53:05 -04001942 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
Chuck Leverc2866762006-08-22 20:06:20 -04001943 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1944
Chuck Leverc2866762006-08-22 20:06:20 -04001945 xprt_init_xid(xprt);
1946
Trond Myklebust21de0a92011-07-17 16:57:32 -04001947 xprt->xprt_net = get_net(net);
Trond Myklebust8d9266f2011-07-17 16:01:09 -04001948}
1949
1950/**
1951 * xprt_create_transport - create an RPC transport
1952 * @args: rpc transport creation arguments
1953 *
1954 */
1955struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1956{
1957 struct rpc_xprt *xprt;
Trond Myklebust9bccd262020-11-10 12:58:22 -05001958 const struct xprt_class *t;
Trond Myklebust8d9266f2011-07-17 16:01:09 -04001959
Trond Myklebust9bccd262020-11-10 12:58:22 -05001960 t = xprt_class_find_by_ident(args->ident);
1961 if (!t) {
1962 dprintk("RPC: transport (%d) not supported\n", args->ident);
1963 return ERR_PTR(-EIO);
Trond Myklebust8d9266f2011-07-17 16:01:09 -04001964 }
Trond Myklebust8d9266f2011-07-17 16:01:09 -04001965
Trond Myklebust8d9266f2011-07-17 16:01:09 -04001966 xprt = t->setup(args);
Trond Myklebust9bccd262020-11-10 12:58:22 -05001967 xprt_class_release(t);
1968
Chuck Lever911813d2020-05-12 17:13:34 -04001969 if (IS_ERR(xprt))
Trond Myklebust21de0a92011-07-17 16:57:32 -04001970 goto out;
J. Bruce Fields33d90ac2013-04-11 15:06:36 -04001971 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1972 xprt->idle_timeout = 0;
Trond Myklebust21de0a92011-07-17 16:57:32 -04001973 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1974 if (xprt_has_timer(xprt))
Anna Schumaker502980e2019-06-18 14:57:33 -04001975 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
Trond Myklebust21de0a92011-07-17 16:57:32 -04001976 else
Kees Cookff861c42017-10-16 17:29:42 -07001977 timer_setup(&xprt->timer, NULL, 0);
Trond Myklebust4e0038b2012-03-01 17:01:05 -05001978
1979 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1980 xprt_destroy(xprt);
1981 return ERR_PTR(-EINVAL);
1982 }
1983 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1984 if (xprt->servername == NULL) {
1985 xprt_destroy(xprt);
1986 return ERR_PTR(-ENOMEM);
1987 }
1988
Jeff Layton3f940092015-03-31 12:03:28 -04001989 rpc_xprt_debugfs_register(xprt);
Jeff Layton388f0c72014-11-26 14:44:44 -05001990
Chuck Lever911813d2020-05-12 17:13:34 -04001991 trace_xprt_create(xprt);
Trond Myklebust21de0a92011-07-17 16:57:32 -04001992out:
Chuck Leverc2866762006-08-22 20:06:20 -04001993 return xprt;
1994}
1995
Trond Myklebust528fd352017-10-19 12:13:10 -04001996static void xprt_destroy_cb(struct work_struct *work)
1997{
1998 struct rpc_xprt *xprt =
1999 container_of(work, struct rpc_xprt, task_cleanup);
2000
Chuck Lever911813d2020-05-12 17:13:34 -04002001 trace_xprt_destroy(xprt);
2002
Trond Myklebust528fd352017-10-19 12:13:10 -04002003 rpc_xprt_debugfs_unregister(xprt);
2004 rpc_destroy_wait_queue(&xprt->binding);
2005 rpc_destroy_wait_queue(&xprt->pending);
2006 rpc_destroy_wait_queue(&xprt->sending);
2007 rpc_destroy_wait_queue(&xprt->backlog);
2008 kfree(xprt->servername);
2009 /*
Trond Myklebust669996a2019-10-17 09:02:21 -04002010 * Destroy any existing back channel
2011 */
2012 xprt_destroy_backchannel(xprt, UINT_MAX);
2013
2014 /*
Trond Myklebust528fd352017-10-19 12:13:10 -04002015 * Tear down transport state and free the rpc_xprt
2016 */
2017 xprt->ops->destroy(xprt);
2018}
2019
Chuck Lever9903cd12005-08-11 16:25:26 -04002020/**
2021 * xprt_destroy - destroy an RPC transport, killing off all requests.
Trond Myklebusta8de2402011-03-15 19:56:30 -04002022 * @xprt: transport to destroy
Chuck Lever9903cd12005-08-11 16:25:26 -04002023 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024 */
Trond Myklebusta8de2402011-03-15 19:56:30 -04002025static void xprt_destroy(struct rpc_xprt *xprt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026{
Trond Myklebust528fd352017-10-19 12:13:10 -04002027 /*
2028 * Exclude transport connect/disconnect handlers and autoclose
2029 */
Trond Myklebust79234c32015-09-18 15:53:24 -04002030 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
2031
Trond Myklebust0065db32006-01-03 09:55:56 +01002032 del_timer_sync(&xprt->timer);
Chuck Leverc8541ec2006-10-17 14:44:27 -04002033
2034 /*
Trond Myklebust528fd352017-10-19 12:13:10 -04002035 * Destroy sockets etc from the system workqueue so they can
2036 * safely flush receive work running on rpciod.
Chuck Leverc8541ec2006-10-17 14:44:27 -04002037 */
Trond Myklebust528fd352017-10-19 12:13:10 -04002038 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
2039 schedule_work(&xprt->task_cleanup);
Trond Myklebust6b6ca862006-09-05 12:55:57 -04002040}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
Trond Myklebust30c51162015-02-24 20:31:39 -05002042static void xprt_destroy_kref(struct kref *kref)
2043{
2044 xprt_destroy(container_of(kref, struct rpc_xprt, kref));
2045}
2046
2047/**
2048 * xprt_get - return a reference to an RPC transport.
2049 * @xprt: pointer to the transport
2050 *
2051 */
2052struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
2053{
2054 if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
2055 return xprt;
2056 return NULL;
2057}
2058EXPORT_SYMBOL_GPL(xprt_get);
2059
Trond Myklebust6b6ca862006-09-05 12:55:57 -04002060/**
2061 * xprt_put - release a reference to an RPC transport.
2062 * @xprt: pointer to the transport
2063 *
2064 */
2065void xprt_put(struct rpc_xprt *xprt)
2066{
Trond Myklebust30c51162015-02-24 20:31:39 -05002067 if (xprt != NULL)
2068 kref_put(&xprt->kref, xprt_destroy_kref);
Trond Myklebust6b6ca862006-09-05 12:55:57 -04002069}
Chuck Lever5d252f92016-01-07 14:50:10 -05002070EXPORT_SYMBOL_GPL(xprt_put);