blob: 2284ff038dadb9f9b7f505e8a07eb49c33e79f62 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Tom Tucker1d8206b92007-12-30 21:07:15 -06002/*
3 * linux/net/sunrpc/svc_xprt.c
4 *
5 * Author: Tom Tucker <tom@opengridcomputing.com>
6 */
7
8#include <linux/sched.h>
9#include <linux/errno.h>
Tom Tucker1d8206b92007-12-30 21:07:15 -060010#include <linux/freezer.h>
Jeff Layton70867212008-02-07 16:34:54 -050011#include <linux/kthread.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Tom Tucker1d8206b92007-12-30 21:07:15 -060013#include <net/sock.h>
Scott Mayhewc3d48792015-12-11 16:45:58 -050014#include <linux/sunrpc/addr.h>
Tom Tucker1d8206b92007-12-30 21:07:15 -060015#include <linux/sunrpc/stats.h>
16#include <linux/sunrpc/svc_xprt.h>
H Hartley Sweetendcf1a352009-04-22 20:18:19 -040017#include <linux/sunrpc/svcsock.h>
J. Bruce Fields99de8ea2010-12-08 12:45:44 -050018#include <linux/sunrpc/xprt.h>
Paul Gortmaker3a9a2312011-05-27 09:12:25 -040019#include <linux/module.h>
Scott Mayhewc3d48792015-12-11 16:45:58 -050020#include <linux/netdevice.h>
Jeff Layton860a0d92014-10-28 14:24:12 -040021#include <trace/events/sunrpc.h>
Tom Tucker1d8206b92007-12-30 21:07:15 -060022
23#define RPCDBG_FACILITY RPCDBG_SVCXPRT
24
Trond Myklebustff3ac5c2016-06-24 10:55:50 -040025static unsigned int svc_rpc_per_connection_limit __read_mostly;
26module_param(svc_rpc_per_connection_limit, uint, 0644);
27
28
Tom Tucker0f0257e2007-12-30 21:08:27 -060029static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
30static int svc_deferred_recv(struct svc_rqst *rqstp);
31static struct cache_deferred_req *svc_defer(struct cache_req *req);
Kees Cookff861c42017-10-16 17:29:42 -070032static void svc_age_temp_xprts(struct timer_list *t);
J. Bruce Fields7710ec32011-11-25 18:44:05 -050033static void svc_delete_xprt(struct svc_xprt *xprt);
Tom Tucker0f0257e2007-12-30 21:08:27 -060034
35/* apparently the "standard" is that clients close
36 * idle connections after 5 minutes, servers after
37 * 6 minutes
J. Bruce Fields855c9e72019-06-18 16:43:11 -040038 * http://nfsv4bat.org/Documents/ConnectAThon/1996/nfstcp.pdf
Tom Tucker0f0257e2007-12-30 21:08:27 -060039 */
40static int svc_conn_age_period = 6*60;
41
Tom Tucker1d8206b92007-12-30 21:07:15 -060042/* List of registered transport classes */
43static DEFINE_SPINLOCK(svc_xprt_class_lock);
44static LIST_HEAD(svc_xprt_class_list);
45
Tom Tucker0f0257e2007-12-30 21:08:27 -060046/* SMP locking strategy:
47 *
48 * svc_pool->sp_lock protects most of the fields of that pool.
49 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
50 * when both need to be taken (rare), svc_serv->sv_lock is first.
Jeff Layton3c519912015-01-22 08:19:32 -050051 * The "service mutex" protects svc_serv->sv_nrthread.
Tom Tucker0f0257e2007-12-30 21:08:27 -060052 * svc_sock->sk_lock protects the svc_sock->sk_deferred list
53 * and the ->sk_info_authunix cache.
54 *
55 * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being
56 * enqueued multiply. During normal transport processing this bit
57 * is set by svc_xprt_enqueue and cleared by svc_xprt_received.
58 * Providers should not manipulate this bit directly.
59 *
60 * Some flags can be set to certain values at any time
61 * providing that certain rules are followed:
62 *
63 * XPT_CONN, XPT_DATA:
64 * - Can be set or cleared at any time.
65 * - After a set, svc_xprt_enqueue must be called to enqueue
66 * the transport for processing.
67 * - After a clear, the transport must be read/accepted.
68 * If this succeeds, it must be set again.
69 * XPT_CLOSE:
70 * - Can set at any time. It is never cleared.
71 * XPT_DEAD:
72 * - Can only be set while XPT_BUSY is held which ensures
73 * that no other thread will be using the transport or will
74 * try to set XPT_DEAD.
75 */
Tom Tucker1d8206b92007-12-30 21:07:15 -060076int svc_reg_xprt_class(struct svc_xprt_class *xcl)
77{
78 struct svc_xprt_class *cl;
79 int res = -EEXIST;
80
81 dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name);
82
83 INIT_LIST_HEAD(&xcl->xcl_list);
84 spin_lock(&svc_xprt_class_lock);
85 /* Make sure there isn't already a class with the same name */
86 list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) {
87 if (strcmp(xcl->xcl_name, cl->xcl_name) == 0)
88 goto out;
89 }
90 list_add_tail(&xcl->xcl_list, &svc_xprt_class_list);
91 res = 0;
92out:
93 spin_unlock(&svc_xprt_class_lock);
94 return res;
95}
96EXPORT_SYMBOL_GPL(svc_reg_xprt_class);
97
98void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
99{
100 dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name);
101 spin_lock(&svc_xprt_class_lock);
102 list_del_init(&xcl->xcl_list);
103 spin_unlock(&svc_xprt_class_lock);
104}
105EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
106
Christophe JAILLETb25b60d2020-03-27 17:15:39 +0100107/**
108 * svc_print_xprts - Format the transport list for printing
109 * @buf: target buffer for formatted address
110 * @maxlen: length of target buffer
111 *
112 * Fills in @buf with a string containing a list of transport names, each name
113 * terminated with '\n'. If the buffer is too small, some entries may be
114 * missing, but it is guaranteed that all lines in the output buffer are
115 * complete.
116 *
117 * Returns positive length of the filled-in string.
Tom Tuckerdc9a16e2007-12-30 21:08:31 -0600118 */
119int svc_print_xprts(char *buf, int maxlen)
120{
Pavel Emelyanov8f3a6de2010-10-05 23:30:19 +0400121 struct svc_xprt_class *xcl;
Tom Tuckerdc9a16e2007-12-30 21:08:31 -0600122 char tmpstr[80];
123 int len = 0;
124 buf[0] = '\0';
125
126 spin_lock(&svc_xprt_class_lock);
Pavel Emelyanov8f3a6de2010-10-05 23:30:19 +0400127 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
Tom Tuckerdc9a16e2007-12-30 21:08:31 -0600128 int slen;
Tom Tuckerdc9a16e2007-12-30 21:08:31 -0600129
Christophe JAILLETb25b60d2020-03-27 17:15:39 +0100130 slen = snprintf(tmpstr, sizeof(tmpstr), "%s %d\n",
131 xcl->xcl_name, xcl->xcl_max_payload);
132 if (slen >= sizeof(tmpstr) || len + slen >= maxlen)
Tom Tuckerdc9a16e2007-12-30 21:08:31 -0600133 break;
134 len += slen;
135 strcat(buf, tmpstr);
136 }
137 spin_unlock(&svc_xprt_class_lock);
138
139 return len;
140}
141
Tom Tuckere1b31572007-12-30 21:07:46 -0600142static void svc_xprt_free(struct kref *kref)
143{
144 struct svc_xprt *xprt =
145 container_of(kref, struct svc_xprt, xpt_ref);
146 struct module *owner = xprt->xpt_class->xcl_owner;
Pavel Emelyanove3bfca02010-09-27 13:58:42 +0400147 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
148 svcauth_unix_info_release(xprt);
Trond Myklebust4df493a2019-04-09 12:13:37 -0400149 put_cred(xprt->xpt_cred);
Pavel Emelyanov4fb85182010-09-27 14:00:49 +0400150 put_net(xprt->xpt_net);
J. Bruce Fields99de8ea2010-12-08 12:45:44 -0500151 /* See comment on corresponding get in xs_setup_bc_tcp(): */
152 if (xprt->xpt_bc_xprt)
153 xprt_put(xprt->xpt_bc_xprt);
J. Bruce Fields39a9bea2016-05-17 12:38:21 -0400154 if (xprt->xpt_bc_xps)
155 xprt_switch_put(xprt->xpt_bc_xps);
Tom Tuckere1b31572007-12-30 21:07:46 -0600156 xprt->xpt_ops->xpo_free(xprt);
157 module_put(owner);
158}
159
160void svc_xprt_put(struct svc_xprt *xprt)
161{
162 kref_put(&xprt->xpt_ref, svc_xprt_free);
163}
164EXPORT_SYMBOL_GPL(svc_xprt_put);
165
Tom Tucker1d8206b92007-12-30 21:07:15 -0600166/*
167 * Called by transport drivers to initialize the transport independent
168 * portion of the transport instance.
169 */
Stanislav Kinsburskybd4620d2011-12-06 14:19:10 +0300170void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl,
171 struct svc_xprt *xprt, struct svc_serv *serv)
Tom Tucker1d8206b92007-12-30 21:07:15 -0600172{
173 memset(xprt, 0, sizeof(*xprt));
174 xprt->xpt_class = xcl;
175 xprt->xpt_ops = xcl->xcl_ops;
Tom Tuckere1b31572007-12-30 21:07:46 -0600176 kref_init(&xprt->xpt_ref);
Tom Tuckerbb5cf162007-12-30 21:07:50 -0600177 xprt->xpt_server = serv;
Tom Tucker7a182082007-12-30 21:07:53 -0600178 INIT_LIST_HEAD(&xprt->xpt_list);
179 INIT_LIST_HEAD(&xprt->xpt_ready);
Tom Tucker8c7b0172007-12-30 21:08:10 -0600180 INIT_LIST_HEAD(&xprt->xpt_deferred);
J. Bruce Fieldsedc7a892010-03-22 15:37:17 -0400181 INIT_LIST_HEAD(&xprt->xpt_users);
Tom Tuckera50fea22007-12-30 21:07:59 -0600182 mutex_init(&xprt->xpt_mutex);
Tom Tuckerdef13d72007-12-30 21:08:08 -0600183 spin_lock_init(&xprt->xpt_lock);
Tom Tucker4e5caaa2007-12-30 21:08:20 -0600184 set_bit(XPT_BUSY, &xprt->xpt_flags);
Stanislav Kinsburskybd4620d2011-12-06 14:19:10 +0300185 xprt->xpt_net = get_net(net);
Chuck Leverece200d2018-03-27 10:51:00 -0400186 strcpy(xprt->xpt_remotebuf, "uninitialized");
Tom Tucker1d8206b92007-12-30 21:07:15 -0600187}
188EXPORT_SYMBOL_GPL(svc_xprt_init);
Tom Tuckerb700cbb2007-12-30 21:07:42 -0600189
Chuck Lever5dd248f2008-06-30 18:45:37 -0400190static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
191 struct svc_serv *serv,
Pavel Emelyanov62832c02010-09-29 16:04:18 +0400192 struct net *net,
Chuck Lever9652ada2009-03-18 20:46:21 -0400193 const int family,
194 const unsigned short port,
195 int flags)
Tom Tuckerb700cbb2007-12-30 21:07:42 -0600196{
Tom Tuckerb700cbb2007-12-30 21:07:42 -0600197 struct sockaddr_in sin = {
198 .sin_family = AF_INET,
Al Viroe6f1ceb2008-03-17 22:44:53 -0700199 .sin_addr.s_addr = htonl(INADDR_ANY),
Tom Tuckerb700cbb2007-12-30 21:07:42 -0600200 .sin_port = htons(port),
201 };
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000202#if IS_ENABLED(CONFIG_IPV6)
Chuck Lever5dd248f2008-06-30 18:45:37 -0400203 struct sockaddr_in6 sin6 = {
204 .sin6_family = AF_INET6,
205 .sin6_addr = IN6ADDR_ANY_INIT,
206 .sin6_port = htons(port),
207 };
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000208#endif
Chuck Lever5dd248f2008-06-30 18:45:37 -0400209 struct sockaddr *sap;
210 size_t len;
211
Chuck Lever9652ada2009-03-18 20:46:21 -0400212 switch (family) {
213 case PF_INET:
Chuck Lever5dd248f2008-06-30 18:45:37 -0400214 sap = (struct sockaddr *)&sin;
215 len = sizeof(sin);
216 break;
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000217#if IS_ENABLED(CONFIG_IPV6)
Chuck Lever9652ada2009-03-18 20:46:21 -0400218 case PF_INET6:
Chuck Lever5dd248f2008-06-30 18:45:37 -0400219 sap = (struct sockaddr *)&sin6;
220 len = sizeof(sin6);
221 break;
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000222#endif
Chuck Lever5dd248f2008-06-30 18:45:37 -0400223 default:
224 return ERR_PTR(-EAFNOSUPPORT);
225 }
226
Pavel Emelyanov62832c02010-09-29 16:04:18 +0400227 return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
Chuck Lever5dd248f2008-06-30 18:45:37 -0400228}
229
J. Bruce Fields67410192012-08-17 22:12:19 -0400230/*
231 * svc_xprt_received conditionally queues the transport for processing
232 * by another thread. The caller must hold the XPT_BUSY bit and must
233 * not thereafter touch transport data.
234 *
235 * Note: XPT_DATA only gets cleared when a read-attempt finds no (or
236 * insufficient) data.
237 */
238static void svc_xprt_received(struct svc_xprt *xprt)
239{
Jeff Laytonacf06a72014-12-01 13:45:24 -0500240 if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) {
241 WARN_ONCE(1, "xprt=0x%p already busy!", xprt);
Weston Andros Adamsonff1fdb92012-10-23 10:43:40 -0400242 return;
Jeff Laytonacf06a72014-12-01 13:45:24 -0500243 }
244
J. Bruce Fields67410192012-08-17 22:12:19 -0400245 /* As soon as we clear busy, the xprt could be closed and
Jeff Laytonb9e13cd2015-06-08 12:06:51 -0700246 * 'put', so we need a reference to call svc_enqueue_xprt with:
J. Bruce Fields67410192012-08-17 22:12:19 -0400247 */
248 svc_xprt_get(xprt);
Trond Myklebust09713742014-07-24 23:59:31 -0400249 smp_mb__before_atomic();
J. Bruce Fields67410192012-08-17 22:12:19 -0400250 clear_bit(XPT_BUSY, &xprt->xpt_flags);
Jeff Laytonb9e13cd2015-06-08 12:06:51 -0700251 xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
J. Bruce Fields67410192012-08-17 22:12:19 -0400252 svc_xprt_put(xprt);
253}
254
J. Bruce Fields39b55302012-08-14 15:50:34 -0400255void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new)
256{
257 clear_bit(XPT_TEMP, &new->xpt_flags);
258 spin_lock_bh(&serv->sv_lock);
259 list_add(&new->xpt_list, &serv->sv_permsocks);
260 spin_unlock_bh(&serv->sv_lock);
261 svc_xprt_received(new);
262}
263
Colin Ian Kingda36e6d2017-10-16 14:40:21 +0100264static int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
265 struct net *net, const int family,
Trond Myklebust4df493a2019-04-09 12:13:37 -0400266 const unsigned short port, int flags,
267 const struct cred *cred)
Chuck Lever5dd248f2008-06-30 18:45:37 -0400268{
269 struct svc_xprt_class *xcl;
270
Tom Tuckerb700cbb2007-12-30 21:07:42 -0600271 spin_lock(&svc_xprt_class_lock);
272 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
Tom Tucker4e5caaa2007-12-30 21:08:20 -0600273 struct svc_xprt *newxprt;
NeilBrowned2849d2010-11-16 16:55:19 +1100274 unsigned short newport;
Tom Tucker4e5caaa2007-12-30 21:08:20 -0600275
276 if (strcmp(xprt_name, xcl->xcl_name))
277 continue;
278
279 if (!try_module_get(xcl->xcl_owner))
280 goto err;
281
282 spin_unlock(&svc_xprt_class_lock);
Pavel Emelyanov62832c02010-09-29 16:04:18 +0400283 newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags);
Tom Tucker4e5caaa2007-12-30 21:08:20 -0600284 if (IS_ERR(newxprt)) {
285 module_put(xcl->xcl_owner);
286 return PTR_ERR(newxprt);
Tom Tuckerb700cbb2007-12-30 21:07:42 -0600287 }
Trond Myklebust4df493a2019-04-09 12:13:37 -0400288 newxprt->xpt_cred = get_cred(cred);
J. Bruce Fields39b55302012-08-14 15:50:34 -0400289 svc_add_new_perm_xprt(serv, newxprt);
NeilBrowned2849d2010-11-16 16:55:19 +1100290 newport = svc_xprt_local_port(newxprt);
NeilBrowned2849d2010-11-16 16:55:19 +1100291 return newport;
Tom Tuckerb700cbb2007-12-30 21:07:42 -0600292 }
Tom Tucker4e5caaa2007-12-30 21:08:20 -0600293 err:
Tom Tuckerb700cbb2007-12-30 21:07:42 -0600294 spin_unlock(&svc_xprt_class_lock);
Chuck Lever68717902010-01-26 14:04:13 -0500295 /* This errno is exposed to user space. Provide a reasonable
296 * perror msg for a bad transport. */
297 return -EPROTONOSUPPORT;
Tom Tuckerb700cbb2007-12-30 21:07:42 -0600298}
J. Bruce Fieldsd96b9c92016-05-18 14:50:14 -0400299
300int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
301 struct net *net, const int family,
Trond Myklebust4df493a2019-04-09 12:13:37 -0400302 const unsigned short port, int flags,
303 const struct cred *cred)
J. Bruce Fieldsd96b9c92016-05-18 14:50:14 -0400304{
305 int err;
306
307 dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
Trond Myklebust4df493a2019-04-09 12:13:37 -0400308 err = _svc_create_xprt(serv, xprt_name, net, family, port, flags, cred);
J. Bruce Fieldsd96b9c92016-05-18 14:50:14 -0400309 if (err == -EPROTONOSUPPORT) {
310 request_module("svc%s", xprt_name);
Trond Myklebust4df493a2019-04-09 12:13:37 -0400311 err = _svc_create_xprt(serv, xprt_name, net, family, port, flags, cred);
J. Bruce Fieldsd96b9c92016-05-18 14:50:14 -0400312 }
Vasily Averin9ac31282018-12-24 14:46:14 +0300313 if (err < 0)
J. Bruce Fieldsd96b9c92016-05-18 14:50:14 -0400314 dprintk("svc: transport %s not found, err %d\n",
Vasily Averin9ac31282018-12-24 14:46:14 +0300315 xprt_name, -err);
J. Bruce Fieldsd96b9c92016-05-18 14:50:14 -0400316 return err;
317}
Tom Tuckerb700cbb2007-12-30 21:07:42 -0600318EXPORT_SYMBOL_GPL(svc_create_xprt);
Tom Tucker9dbc2402007-12-30 21:08:12 -0600319
320/*
321 * Copy the local and remote xprt addresses to the rqstp structure
322 */
323void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
324{
Tom Tucker9dbc2402007-12-30 21:08:12 -0600325 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
326 rqstp->rq_addrlen = xprt->xpt_remotelen;
327
328 /*
329 * Destination address in request is needed for binding the
330 * source address in RPC replies/callbacks later.
331 */
Mi Jinlong849a1cf2011-08-30 17:18:41 +0800332 memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen);
333 rqstp->rq_daddrlen = xprt->xpt_locallen;
Tom Tucker9dbc2402007-12-30 21:08:12 -0600334}
335EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs);
336
Tom Tucker0f0257e2007-12-30 21:08:27 -0600337/**
338 * svc_print_addr - Format rq_addr field for printing
339 * @rqstp: svc_rqst struct containing address to print
340 * @buf: target buffer for formatted address
341 * @len: length of target buffer
342 *
343 */
344char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
345{
346 return __svc_print_addr(svc_addr(rqstp), buf, len);
347}
348EXPORT_SYMBOL_GPL(svc_print_addr);
349
Trond Myklebustff3ac5c2016-06-24 10:55:50 -0400350static bool svc_xprt_slots_in_range(struct svc_xprt *xprt)
351{
352 unsigned int limit = svc_rpc_per_connection_limit;
353 int nrqsts = atomic_read(&xprt->xpt_nr_rqsts);
354
355 return limit == 0 || (nrqsts >= 0 && nrqsts < limit);
356}
357
358static bool svc_xprt_reserve_slot(struct svc_rqst *rqstp, struct svc_xprt *xprt)
359{
360 if (!test_bit(RQ_DATA, &rqstp->rq_flags)) {
361 if (!svc_xprt_slots_in_range(xprt))
362 return false;
363 atomic_inc(&xprt->xpt_nr_rqsts);
364 set_bit(RQ_DATA, &rqstp->rq_flags);
365 }
366 return true;
367}
368
369static void svc_xprt_release_slot(struct svc_rqst *rqstp)
370{
371 struct svc_xprt *xprt = rqstp->rq_xprt;
372 if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) {
373 atomic_dec(&xprt->xpt_nr_rqsts);
J. Bruce Fields95503d22019-01-11 15:36:40 -0500374 smp_wmb(); /* See smp_rmb() in svc_xprt_ready() */
Trond Myklebustff3ac5c2016-06-24 10:55:50 -0400375 svc_xprt_enqueue(xprt);
376 }
377}
378
J. Bruce Fields66c898ca2019-01-11 15:57:09 -0500379static bool svc_xprt_ready(struct svc_xprt *xprt)
J. Bruce Fields9c335c02010-10-26 11:32:03 -0400380{
Trond Myklebust1602a7b2019-01-03 09:17:12 -0500381 unsigned long xpt_flags;
382
J. Bruce Fields95503d22019-01-11 15:36:40 -0500383 /*
384 * If another cpu has recently updated xpt_flags,
385 * sk_sock->flags, xpt_reserved, or xpt_nr_rqsts, we need to
386 * know about it; otherwise it's possible that both that cpu and
387 * this one could call svc_xprt_enqueue() without either
388 * svc_xprt_enqueue() recognizing that the conditions below
389 * are satisfied, and we could stall indefinitely:
390 */
391 smp_rmb();
Trond Myklebust1602a7b2019-01-03 09:17:12 -0500392 xpt_flags = READ_ONCE(xprt->xpt_flags);
393
394 if (xpt_flags & (BIT(XPT_CONN) | BIT(XPT_CLOSE)))
J. Bruce Fields9c335c02010-10-26 11:32:03 -0400395 return true;
Trond Myklebust1602a7b2019-01-03 09:17:12 -0500396 if (xpt_flags & (BIT(XPT_DATA) | BIT(XPT_DEFERRED))) {
Trond Myklebustff3ac5c2016-06-24 10:55:50 -0400397 if (xprt->xpt_ops->xpo_has_wspace(xprt) &&
398 svc_xprt_slots_in_range(xprt))
Trond Myklebust82ea2d72016-06-24 10:55:45 -0400399 return true;
400 trace_svc_xprt_no_write_space(xprt);
401 return false;
402 }
J. Bruce Fields9c335c02010-10-26 11:32:03 -0400403 return false;
404}
405
Jeff Laytonb9e13cd2015-06-08 12:06:51 -0700406void svc_xprt_do_enqueue(struct svc_xprt *xprt)
Tom Tucker0f0257e2007-12-30 21:08:27 -0600407{
Tom Tucker0f0257e2007-12-30 21:08:27 -0600408 struct svc_pool *pool;
Jeff Layton83a712e2014-11-21 14:19:31 -0500409 struct svc_rqst *rqstp = NULL;
Tom Tucker0f0257e2007-12-30 21:08:27 -0600410 int cpu;
411
J. Bruce Fields66c898ca2019-01-11 15:57:09 -0500412 if (!svc_xprt_ready(xprt))
Chuck Lever7dbb53b2018-03-27 10:50:27 -0400413 return;
Tom Tucker0f0257e2007-12-30 21:08:27 -0600414
Tom Tucker0f0257e2007-12-30 21:08:27 -0600415 /* Mark transport as busy. It will remain in this state until
416 * the provider calls svc_xprt_received. We update XPT_BUSY
417 * atomically because it also guards against trying to enqueue
418 * the transport twice.
419 */
Chuck Lever7dbb53b2018-03-27 10:50:27 -0400420 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
421 return;
Tom Tucker0f0257e2007-12-30 21:08:27 -0600422
Trond Myklebust0c0746d2014-08-03 13:03:12 -0400423 cpu = get_cpu();
424 pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
Trond Myklebust0c0746d2014-08-03 13:03:12 -0400425
Jeff Layton403c7b42014-11-21 14:19:29 -0500426 atomic_long_inc(&pool->sp_stats.packets);
Trond Myklebust0c0746d2014-08-03 13:03:12 -0400427
Trond Myklebust22700f3c2017-10-10 17:31:43 -0400428 spin_lock_bh(&pool->sp_lock);
429 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
430 pool->sp_stats.sockets_queued++;
431 spin_unlock_bh(&pool->sp_lock);
432
Jeff Laytonb1691bc2014-11-21 14:19:30 -0500433 /* find a thread for this xprt */
434 rcu_read_lock();
435 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
Trond Myklebust22700f3c2017-10-10 17:31:43 -0400436 if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags))
Jeff Laytonb1691bc2014-11-21 14:19:30 -0500437 continue;
Jeff Layton403c7b42014-11-21 14:19:29 -0500438 atomic_long_inc(&pool->sp_stats.threads_woken);
Chuck Lever55f50882018-03-27 10:52:27 -0400439 rqstp->rq_qtime = ktime_get();
Jeff Laytonb1691bc2014-11-21 14:19:30 -0500440 wake_up_process(rqstp->rq_task);
Trond Myklebust22700f3c2017-10-10 17:31:43 -0400441 goto out_unlock;
Jeff Laytonb1691bc2014-11-21 14:19:30 -0500442 }
Trond Myklebust22700f3c2017-10-10 17:31:43 -0400443 set_bit(SP_CONGESTED, &pool->sp_flags);
Jeff Layton83a712e2014-11-21 14:19:31 -0500444 rqstp = NULL;
Trond Myklebust22700f3c2017-10-10 17:31:43 -0400445out_unlock:
446 rcu_read_unlock();
Trond Myklebust983c6842014-08-03 13:03:10 -0400447 put_cpu();
Jeff Layton83a712e2014-11-21 14:19:31 -0500448 trace_svc_xprt_do_enqueue(xprt, rqstp);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600449}
Jeff Laytonb9e13cd2015-06-08 12:06:51 -0700450EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue);
Trond Myklebust09713742014-07-24 23:59:31 -0400451
452/*
453 * Queue up a transport with data pending. If there are idle nfsd
454 * processes, wake 'em up.
455 *
456 */
457void svc_xprt_enqueue(struct svc_xprt *xprt)
458{
459 if (test_bit(XPT_BUSY, &xprt->xpt_flags))
460 return;
Jeff Laytonb9e13cd2015-06-08 12:06:51 -0700461 xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
Trond Myklebust09713742014-07-24 23:59:31 -0400462}
Tom Tucker0f0257e2007-12-30 21:08:27 -0600463EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
464
465/*
Jeff Laytonb1691bc2014-11-21 14:19:30 -0500466 * Dequeue the first transport, if there is one.
Tom Tucker0f0257e2007-12-30 21:08:27 -0600467 */
468static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
469{
Jeff Laytonb1691bc2014-11-21 14:19:30 -0500470 struct svc_xprt *xprt = NULL;
Tom Tucker0f0257e2007-12-30 21:08:27 -0600471
472 if (list_empty(&pool->sp_sockets))
Jeff Layton83a712e2014-11-21 14:19:31 -0500473 goto out;
Tom Tucker0f0257e2007-12-30 21:08:27 -0600474
Jeff Laytonb1691bc2014-11-21 14:19:30 -0500475 spin_lock_bh(&pool->sp_lock);
476 if (likely(!list_empty(&pool->sp_sockets))) {
477 xprt = list_first_entry(&pool->sp_sockets,
478 struct svc_xprt, xpt_ready);
479 list_del_init(&xprt->xpt_ready);
480 svc_xprt_get(xprt);
Jeff Laytonb1691bc2014-11-21 14:19:30 -0500481 }
482 spin_unlock_bh(&pool->sp_lock);
Jeff Layton83a712e2014-11-21 14:19:31 -0500483out:
Tom Tucker0f0257e2007-12-30 21:08:27 -0600484 return xprt;
485}
486
Tom Tucker0f0257e2007-12-30 21:08:27 -0600487/**
488 * svc_reserve - change the space reserved for the reply to a request.
489 * @rqstp: The request in question
490 * @space: new max space to reserve
491 *
492 * Each request reserves some space on the output queue of the transport
493 * to make sure the reply fits. This function reduces that reserved
494 * space to be the amount of space used already, plus @space.
495 *
496 */
497void svc_reserve(struct svc_rqst *rqstp, int space)
498{
Vasily Averind4b09ac2018-12-24 14:44:52 +0300499 struct svc_xprt *xprt = rqstp->rq_xprt;
500
Tom Tucker0f0257e2007-12-30 21:08:27 -0600501 space += rqstp->rq_res.head[0].iov_len;
502
Vasily Averind4b09ac2018-12-24 14:44:52 +0300503 if (xprt && space < rqstp->rq_reserved) {
Tom Tucker0f0257e2007-12-30 21:08:27 -0600504 atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
505 rqstp->rq_reserved = space;
J. Bruce Fields95503d22019-01-11 15:36:40 -0500506 smp_wmb(); /* See smp_rmb() in svc_xprt_ready() */
Tom Tucker0f0257e2007-12-30 21:08:27 -0600507 svc_xprt_enqueue(xprt);
508 }
509}
Trond Myklebust24c37672008-12-23 16:30:12 -0500510EXPORT_SYMBOL_GPL(svc_reserve);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600511
512static void svc_xprt_release(struct svc_rqst *rqstp)
513{
514 struct svc_xprt *xprt = rqstp->rq_xprt;
515
Chuck Lever63a1b152018-03-27 10:49:22 -0400516 xprt->xpt_ops->xpo_release_rqst(rqstp);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600517
Tom Tucker2779e3a2009-01-05 11:12:52 -0600518 kfree(rqstp->rq_deferred);
519 rqstp->rq_deferred = NULL;
520
Tom Tucker0f0257e2007-12-30 21:08:27 -0600521 svc_free_res_pages(rqstp);
522 rqstp->rq_res.page_len = 0;
523 rqstp->rq_res.page_base = 0;
524
525 /* Reset response buffer and release
526 * the reservation.
527 * But first, check that enough space was reserved
528 * for the reply, otherwise we have a bug!
529 */
530 if ((rqstp->rq_res.len) > rqstp->rq_reserved)
531 printk(KERN_ERR "RPC request reserved %d but used %d\n",
532 rqstp->rq_reserved,
533 rqstp->rq_res.len);
534
535 rqstp->rq_res.head[0].iov_len = 0;
536 svc_reserve(rqstp, 0);
Trond Myklebustff3ac5c2016-06-24 10:55:50 -0400537 svc_xprt_release_slot(rqstp);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600538 rqstp->rq_xprt = NULL;
Tom Tucker0f0257e2007-12-30 21:08:27 -0600539 svc_xprt_put(xprt);
540}
541
542/*
Jeff Laytonceff7392014-11-19 07:51:21 -0500543 * Some svc_serv's will have occasional work to do, even when a xprt is not
544 * waiting to be serviced. This function is there to "kick" a task in one of
545 * those services so that it can wake up and do that work. Note that we only
546 * bother with pool 0 as we don't need to wake up more than one thread for
547 * this purpose.
Tom Tucker0f0257e2007-12-30 21:08:27 -0600548 */
549void svc_wake_up(struct svc_serv *serv)
550{
551 struct svc_rqst *rqstp;
Tom Tucker0f0257e2007-12-30 21:08:27 -0600552 struct svc_pool *pool;
553
Jeff Laytonceff7392014-11-19 07:51:21 -0500554 pool = &serv->sv_pools[0];
Tom Tucker0f0257e2007-12-30 21:08:27 -0600555
Jeff Laytonb1691bc2014-11-21 14:19:30 -0500556 rcu_read_lock();
557 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
558 /* skip any that aren't queued */
559 if (test_bit(RQ_BUSY, &rqstp->rq_flags))
560 continue;
561 rcu_read_unlock();
Jeff Laytonceff7392014-11-19 07:51:21 -0500562 wake_up_process(rqstp->rq_task);
Jeff Layton83a712e2014-11-21 14:19:31 -0500563 trace_svc_wake_up(rqstp->rq_task->pid);
Jeff Laytonb1691bc2014-11-21 14:19:30 -0500564 return;
565 }
566 rcu_read_unlock();
567
568 /* No free entries available */
569 set_bit(SP_TASK_PENDING, &pool->sp_flags);
570 smp_wmb();
Jeff Layton83a712e2014-11-21 14:19:31 -0500571 trace_svc_wake_up(0);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600572}
Trond Myklebust24c37672008-12-23 16:30:12 -0500573EXPORT_SYMBOL_GPL(svc_wake_up);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600574
575int svc_port_is_privileged(struct sockaddr *sin)
576{
577 switch (sin->sa_family) {
578 case AF_INET:
579 return ntohs(((struct sockaddr_in *)sin)->sin_port)
580 < PROT_SOCK;
581 case AF_INET6:
582 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
583 < PROT_SOCK;
584 default:
585 return 0;
586 }
587}
588
589/*
Jeff Laytonc9233eb2008-10-20 11:51:57 -0400590 * Make sure that we don't have too many active connections. If we have,
591 * something must be dropped. It's not clear what will happen if we allow
592 * "too many" connections, but when dealing with network-facing software,
593 * we have to code defensively. Here we do that by imposing hard limits.
Tom Tucker0f0257e2007-12-30 21:08:27 -0600594 *
595 * There's no point in trying to do random drop here for DoS
596 * prevention. The NFS clients does 1 reconnect in 15 seconds. An
597 * attacker can easily beat that.
598 *
599 * The only somewhat efficient mechanism would be if drop old
600 * connections from the same IP first. But right now we don't even
601 * record the client IP in svc_sock.
Jeff Laytonc9233eb2008-10-20 11:51:57 -0400602 *
603 * single-threaded services that expect a lot of clients will probably
604 * need to set sv_maxconn to override the default value which is based
605 * on the number of threads
Tom Tucker0f0257e2007-12-30 21:08:27 -0600606 */
607static void svc_check_conn_limits(struct svc_serv *serv)
608{
Jeff Laytonc9233eb2008-10-20 11:51:57 -0400609 unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn :
610 (serv->sv_nrthreads+3) * 20;
611
612 if (serv->sv_tmpcnt > limit) {
Tom Tucker0f0257e2007-12-30 21:08:27 -0600613 struct svc_xprt *xprt = NULL;
614 spin_lock_bh(&serv->sv_lock);
615 if (!list_empty(&serv->sv_tempsocks)) {
Joe Perchese87cc472012-05-13 21:56:26 +0000616 /* Try to help the admin */
617 net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n",
618 serv->sv_name, serv->sv_maxconn ?
619 "max number of connections" :
620 "number of threads");
Tom Tucker0f0257e2007-12-30 21:08:27 -0600621 /*
622 * Always select the oldest connection. It's not fair,
623 * but so is life
624 */
625 xprt = list_entry(serv->sv_tempsocks.prev,
626 struct svc_xprt,
627 xpt_list);
628 set_bit(XPT_CLOSE, &xprt->xpt_flags);
629 svc_xprt_get(xprt);
630 }
631 spin_unlock_bh(&serv->sv_lock);
632
633 if (xprt) {
634 svc_xprt_enqueue(xprt);
635 svc_xprt_put(xprt);
636 }
637 }
638}
639
Rashika Kheriae1d83ee2014-02-09 22:33:03 +0530640static int svc_alloc_arg(struct svc_rqst *rqstp)
Tom Tucker0f0257e2007-12-30 21:08:27 -0600641{
J. Bruce Fields6797fa52012-08-18 15:33:51 -0400642 struct svc_serv *serv = rqstp->rq_server;
643 struct xdr_buf *arg;
644 int pages;
645 int i;
Tom Tucker0f0257e2007-12-30 21:08:27 -0600646
647 /* now allocate needed pages. If we get a failure, sleep briefly */
Chuck Lever8c6ae492017-06-30 12:03:54 -0400648 pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT;
649 if (pages > RPCSVC_MAXPAGES) {
650 pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu\n",
651 pages, RPCSVC_MAXPAGES);
Weston Andros Adamsonb25cd052012-10-23 10:43:41 -0400652 /* use as many pages as possible */
Chuck Lever8c6ae492017-06-30 12:03:54 -0400653 pages = RPCSVC_MAXPAGES;
654 }
Tom Tucker0f0257e2007-12-30 21:08:27 -0600655 for (i = 0; i < pages ; i++)
656 while (rqstp->rq_pages[i] == NULL) {
657 struct page *p = alloc_page(GFP_KERNEL);
658 if (!p) {
Jeff Layton7b54fe62008-02-12 11:47:24 -0500659 set_current_state(TASK_INTERRUPTIBLE);
660 if (signalled() || kthread_should_stop()) {
661 set_current_state(TASK_RUNNING);
Jeff Layton70867212008-02-07 16:34:54 -0500662 return -EINTR;
Jeff Layton7b54fe62008-02-12 11:47:24 -0500663 }
664 schedule_timeout(msecs_to_jiffies(500));
Tom Tucker0f0257e2007-12-30 21:08:27 -0600665 }
666 rqstp->rq_pages[i] = p;
667 }
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400668 rqstp->rq_page_end = &rqstp->rq_pages[i];
Tom Tucker0f0257e2007-12-30 21:08:27 -0600669 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
Tom Tucker0f0257e2007-12-30 21:08:27 -0600670
671 /* Make arg->head point to first page and arg->pages point to rest */
672 arg = &rqstp->rq_arg;
673 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
674 arg->head[0].iov_len = PAGE_SIZE;
675 arg->pages = rqstp->rq_pages + 1;
676 arg->page_base = 0;
677 /* save at least one page for response */
678 arg->page_len = (pages-2)*PAGE_SIZE;
679 arg->len = (pages-1)*PAGE_SIZE;
680 arg->tail[0].iov_len = 0;
J. Bruce Fields6797fa52012-08-18 15:33:51 -0400681 return 0;
682}
Tom Tucker0f0257e2007-12-30 21:08:27 -0600683
Jeff Laytonb1691bc2014-11-21 14:19:30 -0500684static bool
685rqst_should_sleep(struct svc_rqst *rqstp)
686{
687 struct svc_pool *pool = rqstp->rq_pool;
688
689 /* did someone call svc_wake_up? */
690 if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags))
691 return false;
692
693 /* was a socket queued? */
694 if (!list_empty(&pool->sp_sockets))
695 return false;
696
697 /* are we shutting down? */
698 if (signalled() || kthread_should_stop())
699 return false;
700
701 /* are we freezing? */
702 if (freezing(current))
703 return false;
704
705 return true;
706}
707
Rashika Kheriae1d83ee2014-02-09 22:33:03 +0530708static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
J. Bruce Fields6797fa52012-08-18 15:33:51 -0400709{
J. Bruce Fields6797fa52012-08-18 15:33:51 -0400710 struct svc_pool *pool = rqstp->rq_pool;
Trond Myklebusta4aa8052014-08-03 13:03:11 -0400711 long time_left = 0;
Tom Tucker0f0257e2007-12-30 21:08:27 -0600712
Jeff Laytonb1691bc2014-11-21 14:19:30 -0500713 /* rq_xprt should be clear on entry */
714 WARN_ON_ONCE(rqstp->rq_xprt);
715
Trond Myklebust22700f3c2017-10-10 17:31:43 -0400716 rqstp->rq_xprt = svc_xprt_dequeue(pool);
717 if (rqstp->rq_xprt)
718 goto out_found;
Jeff Laytonb1691bc2014-11-21 14:19:30 -0500719
720 /*
721 * We have to be able to interrupt this wait
722 * to bring down the daemons ...
723 */
724 set_current_state(TASK_INTERRUPTIBLE);
Trond Myklebust22700f3c2017-10-10 17:31:43 -0400725 smp_mb__before_atomic();
726 clear_bit(SP_CONGESTED, &pool->sp_flags);
Jeff Laytonb1691bc2014-11-21 14:19:30 -0500727 clear_bit(RQ_BUSY, &rqstp->rq_flags);
Trond Myklebust22700f3c2017-10-10 17:31:43 -0400728 smp_mb__after_atomic();
Jeff Laytonb1691bc2014-11-21 14:19:30 -0500729
730 if (likely(rqst_should_sleep(rqstp)))
731 time_left = schedule_timeout(timeout);
732 else
733 __set_current_state(TASK_RUNNING);
734
735 try_to_freeze();
736
Jeff Laytonb1691bc2014-11-21 14:19:30 -0500737 set_bit(RQ_BUSY, &rqstp->rq_flags);
Trond Myklebust22700f3c2017-10-10 17:31:43 -0400738 smp_mb__after_atomic();
739 rqstp->rq_xprt = svc_xprt_dequeue(pool);
740 if (rqstp->rq_xprt)
741 goto out_found;
Jeff Laytonb1691bc2014-11-21 14:19:30 -0500742
743 if (!time_left)
744 atomic_long_inc(&pool->sp_stats.threads_timedout);
745
746 if (signalled() || kthread_should_stop())
747 return ERR_PTR(-EINTR);
748 return ERR_PTR(-EAGAIN);
Trond Myklebust22700f3c2017-10-10 17:31:43 -0400749out_found:
750 /* Normally we will wait up to 5 seconds for any required
751 * cache information to be provided.
752 */
753 if (!test_bit(SP_CONGESTED, &pool->sp_flags))
754 rqstp->rq_chandle.thread_wait = 5*HZ;
755 else
756 rqstp->rq_chandle.thread_wait = 1*HZ;
Chuck Lever55f50882018-03-27 10:52:27 -0400757 trace_svc_xprt_dequeue(rqstp);
Trond Myklebust22700f3c2017-10-10 17:31:43 -0400758 return rqstp->rq_xprt;
J. Bruce Fields6797fa52012-08-18 15:33:51 -0400759}
Tom Tucker0f0257e2007-12-30 21:08:27 -0600760
Rashika Kheriae1d83ee2014-02-09 22:33:03 +0530761static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
J. Bruce Fields65b2e662012-08-18 15:44:33 -0400762{
763 spin_lock_bh(&serv->sv_lock);
764 set_bit(XPT_TEMP, &newxpt->xpt_flags);
765 list_add(&newxpt->xpt_list, &serv->sv_tempsocks);
766 serv->sv_tmpcnt++;
767 if (serv->sv_temptimer.function == NULL) {
768 /* setup timer to age temp transports */
Kees Cook841b86f2017-10-23 09:40:42 +0200769 serv->sv_temptimer.function = svc_age_temp_xprts;
J. Bruce Fields65b2e662012-08-18 15:44:33 -0400770 mod_timer(&serv->sv_temptimer,
771 jiffies + svc_conn_age_period * HZ);
772 }
773 spin_unlock_bh(&serv->sv_lock);
774 svc_xprt_received(newxpt);
775}
776
J. Bruce Fields6797fa52012-08-18 15:33:51 -0400777static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
778{
779 struct svc_serv *serv = rqstp->rq_server;
780 int len = 0;
781
J. Bruce Fields1b644b62010-02-28 16:33:31 -0500782 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
783 dprintk("svc_recv: found XPT_CLOSE\n");
Scott Mayhew546125d2017-01-05 16:34:51 -0500784 if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags))
785 xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
J. Bruce Fields1b644b62010-02-28 16:33:31 -0500786 svc_delete_xprt(xprt);
J. Bruce Fieldsca7896cd2010-10-25 14:12:40 -0400787 /* Leave XPT_BUSY set on the dead xprt: */
Jeff Layton83a712e2014-11-21 14:19:31 -0500788 goto out;
J. Bruce Fieldsca7896cd2010-10-25 14:12:40 -0400789 }
790 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
Tom Tucker0f0257e2007-12-30 21:08:27 -0600791 struct svc_xprt *newxpt;
J. Bruce Fields65b2e662012-08-18 15:44:33 -0400792 /*
793 * We know this module_get will succeed because the
794 * listener holds a reference too
795 */
796 __module_get(xprt->xpt_class->xcl_owner);
797 svc_check_conn_limits(xprt->xpt_server);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600798 newxpt = xprt->xpt_ops->xpo_accept(xprt);
Trond Myklebust1237d352019-04-09 12:13:38 -0400799 if (newxpt) {
800 newxpt->xpt_cred = get_cred(xprt->xpt_cred);
J. Bruce Fields65b2e662012-08-18 15:44:33 -0400801 svc_add_new_temp_xprt(serv, newxpt);
Trond Myklebust1237d352019-04-09 12:13:38 -0400802 } else
Trond Myklebustc7891022014-05-18 14:05:22 -0400803 module_put(xprt->xpt_class->xcl_owner);
Trond Myklebustff3ac5c2016-06-24 10:55:50 -0400804 } else if (svc_xprt_reserve_slot(rqstp, xprt)) {
J. Bruce Fields6797fa52012-08-18 15:33:51 -0400805 /* XPT_DATA|XPT_DEFERRED case: */
Tom Tucker0f0257e2007-12-30 21:08:27 -0600806 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
J. Bruce Fields6797fa52012-08-18 15:33:51 -0400807 rqstp, rqstp->rq_pool->sp_id, xprt,
Peter Zijlstra2c935bc2016-11-14 17:29:48 +0100808 kref_read(&xprt->xpt_ref));
Tom Tucker0f0257e2007-12-30 21:08:27 -0600809 rqstp->rq_deferred = svc_deferred_dequeue(xprt);
J. Bruce Fieldsca7896cd2010-10-25 14:12:40 -0400810 if (rqstp->rq_deferred)
Tom Tucker0f0257e2007-12-30 21:08:27 -0600811 len = svc_deferred_recv(rqstp);
J. Bruce Fieldsca7896cd2010-10-25 14:12:40 -0400812 else
Tom Tucker0f0257e2007-12-30 21:08:27 -0600813 len = xprt->xpt_ops->xpo_recvfrom(rqstp);
Chuck Leverb20dfc32020-03-02 15:01:08 -0500814 if (len > 0)
815 trace_svc_recvfrom(&rqstp->rq_arg);
Chuck Leveraaba72c2018-03-27 10:51:39 -0400816 rqstp->rq_stime = ktime_get();
J. Bruce Fieldsd10f27a2012-08-17 17:31:53 -0400817 rqstp->rq_reserved = serv->sv_max_mesg;
818 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600819 }
J. Bruce Fields6797fa52012-08-18 15:33:51 -0400820 /* clear XPT_BUSY: */
J. Bruce Fieldsca7896cd2010-10-25 14:12:40 -0400821 svc_xprt_received(xprt);
Jeff Layton83a712e2014-11-21 14:19:31 -0500822out:
823 trace_svc_handle_xprt(xprt, len);
J. Bruce Fields6797fa52012-08-18 15:33:51 -0400824 return len;
825}
826
827/*
828 * Receive the next request on any transport. This code is carefully
829 * organised not to touch any cachelines in the shared svc_serv
830 * structure, only cachelines in the local svc_pool.
831 */
832int svc_recv(struct svc_rqst *rqstp, long timeout)
833{
834 struct svc_xprt *xprt = NULL;
835 struct svc_serv *serv = rqstp->rq_server;
836 int len, err;
837
838 dprintk("svc: server %p waiting for data (to = %ld)\n",
839 rqstp, timeout);
840
841 if (rqstp->rq_xprt)
842 printk(KERN_ERR
843 "svc_recv: service %p, transport not NULL!\n",
844 rqstp);
Trond Myklebust983c6842014-08-03 13:03:10 -0400845
J. Bruce Fields6797fa52012-08-18 15:33:51 -0400846 err = svc_alloc_arg(rqstp);
847 if (err)
Jeff Layton860a0d92014-10-28 14:24:12 -0400848 goto out;
J. Bruce Fields6797fa52012-08-18 15:33:51 -0400849
850 try_to_freeze();
851 cond_resched();
Jeff Layton860a0d92014-10-28 14:24:12 -0400852 err = -EINTR;
J. Bruce Fields6797fa52012-08-18 15:33:51 -0400853 if (signalled() || kthread_should_stop())
Jeff Layton860a0d92014-10-28 14:24:12 -0400854 goto out;
J. Bruce Fields6797fa52012-08-18 15:33:51 -0400855
856 xprt = svc_get_next_xprt(rqstp, timeout);
Jeff Layton860a0d92014-10-28 14:24:12 -0400857 if (IS_ERR(xprt)) {
858 err = PTR_ERR(xprt);
859 goto out;
860 }
J. Bruce Fields6797fa52012-08-18 15:33:51 -0400861
862 len = svc_handle_xprt(rqstp, xprt);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600863
864 /* No data, incomplete (TCP) read, or accept() */
Jeff Layton860a0d92014-10-28 14:24:12 -0400865 err = -EAGAIN;
J. Bruce Fields9f9d2eb2012-08-17 21:35:24 -0400866 if (len <= 0)
Jeff Layton860a0d92014-10-28 14:24:12 -0400867 goto out_release;
J. Bruce Fieldsca7896cd2010-10-25 14:12:40 -0400868
Tom Tucker0f0257e2007-12-30 21:08:27 -0600869 clear_bit(XPT_OLD, &xprt->xpt_flags);
870
Chuck Lever989f8812018-03-27 10:49:38 -0400871 xprt->xpt_ops->xpo_secure_port(rqstp);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600872 rqstp->rq_chandle.defer = svc_defer;
Jeff Layton860a0d92014-10-28 14:24:12 -0400873 rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600874
875 if (serv->sv_stats)
876 serv->sv_stats->netcnt++;
Jeff Layton860a0d92014-10-28 14:24:12 -0400877 trace_svc_recv(rqstp, len);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600878 return len;
Jeff Layton860a0d92014-10-28 14:24:12 -0400879out_release:
J. Bruce Fieldsca7896cd2010-10-25 14:12:40 -0400880 rqstp->rq_res.len = 0;
881 svc_xprt_release(rqstp);
Jeff Layton860a0d92014-10-28 14:24:12 -0400882out:
Jeff Layton860a0d92014-10-28 14:24:12 -0400883 return err;
Tom Tucker0f0257e2007-12-30 21:08:27 -0600884}
Trond Myklebust24c37672008-12-23 16:30:12 -0500885EXPORT_SYMBOL_GPL(svc_recv);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600886
887/*
888 * Drop request
889 */
890void svc_drop(struct svc_rqst *rqstp)
891{
Trond Myklebust104f6352016-06-24 10:55:46 -0400892 trace_svc_drop(rqstp);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600893 dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
894 svc_xprt_release(rqstp);
895}
Trond Myklebust24c37672008-12-23 16:30:12 -0500896EXPORT_SYMBOL_GPL(svc_drop);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600897
898/*
899 * Return reply to client.
900 */
901int svc_send(struct svc_rqst *rqstp)
902{
903 struct svc_xprt *xprt;
Jeff Layton860a0d92014-10-28 14:24:12 -0400904 int len = -EFAULT;
Tom Tucker0f0257e2007-12-30 21:08:27 -0600905 struct xdr_buf *xb;
906
907 xprt = rqstp->rq_xprt;
908 if (!xprt)
Jeff Layton860a0d92014-10-28 14:24:12 -0400909 goto out;
Tom Tucker0f0257e2007-12-30 21:08:27 -0600910
Tom Tucker0f0257e2007-12-30 21:08:27 -0600911 /* calculate over-all length */
912 xb = &rqstp->rq_res;
913 xb->len = xb->head[0].iov_len +
914 xb->page_len +
915 xb->tail[0].iov_len;
Chuck Leverb20dfc32020-03-02 15:01:08 -0500916 trace_svc_sendto(xb);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600917
918 /* Grab mutex to serialize outgoing data. */
919 mutex_lock(&xprt->xpt_mutex);
Chuck Leveraaba72c2018-03-27 10:51:39 -0400920 trace_svc_stats_latency(rqstp);
J. Bruce Fieldsf06f00a2012-08-20 16:04:40 -0400921 if (test_bit(XPT_DEAD, &xprt->xpt_flags)
922 || test_bit(XPT_CLOSE, &xprt->xpt_flags))
Tom Tucker0f0257e2007-12-30 21:08:27 -0600923 len = -ENOTCONN;
924 else
925 len = xprt->xpt_ops->xpo_sendto(rqstp);
926 mutex_unlock(&xprt->xpt_mutex);
Chuck Leverece200d2018-03-27 10:51:00 -0400927 trace_svc_send(rqstp, len);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600928 svc_xprt_release(rqstp);
929
930 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
Jeff Layton860a0d92014-10-28 14:24:12 -0400931 len = 0;
932out:
Tom Tucker0f0257e2007-12-30 21:08:27 -0600933 return len;
934}
935
936/*
937 * Timer function to close old temporary transports, using
938 * a mark-and-sweep algorithm.
939 */
Kees Cookff861c42017-10-16 17:29:42 -0700940static void svc_age_temp_xprts(struct timer_list *t)
Tom Tucker0f0257e2007-12-30 21:08:27 -0600941{
Kees Cookff861c42017-10-16 17:29:42 -0700942 struct svc_serv *serv = from_timer(serv, t, sv_temptimer);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600943 struct svc_xprt *xprt;
944 struct list_head *le, *next;
Tom Tucker0f0257e2007-12-30 21:08:27 -0600945
946 dprintk("svc_age_temp_xprts\n");
947
948 if (!spin_trylock_bh(&serv->sv_lock)) {
949 /* busy, try again 1 sec later */
950 dprintk("svc_age_temp_xprts: busy\n");
951 mod_timer(&serv->sv_temptimer, jiffies + HZ);
952 return;
953 }
954
955 list_for_each_safe(le, next, &serv->sv_tempsocks) {
956 xprt = list_entry(le, struct svc_xprt, xpt_list);
957
958 /* First time through, just mark it OLD. Second time
959 * through, close it. */
960 if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags))
961 continue;
Peter Zijlstra2c935bc2016-11-14 17:29:48 +0100962 if (kref_read(&xprt->xpt_ref) > 1 ||
Joe Perchesf64f9e72009-11-29 16:55:45 -0800963 test_bit(XPT_BUSY, &xprt->xpt_flags))
Tom Tucker0f0257e2007-12-30 21:08:27 -0600964 continue;
J. Bruce Fieldse75bafb2013-02-10 11:33:48 -0500965 list_del_init(le);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600966 set_bit(XPT_CLOSE, &xprt->xpt_flags);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600967 dprintk("queuing xprt %p for closing\n", xprt);
968
969 /* a thread will dequeue and close it soon */
970 svc_xprt_enqueue(xprt);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600971 }
J. Bruce Fieldse75bafb2013-02-10 11:33:48 -0500972 spin_unlock_bh(&serv->sv_lock);
Tom Tucker0f0257e2007-12-30 21:08:27 -0600973
974 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
975}
976
Scott Mayhewc3d48792015-12-11 16:45:58 -0500977/* Close temporary transports whose xpt_local matches server_addr immediately
978 * instead of waiting for them to be picked up by the timer.
979 *
980 * This is meant to be called from a notifier_block that runs when an ip
981 * address is deleted.
982 */
983void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
984{
985 struct svc_xprt *xprt;
Scott Mayhewc3d48792015-12-11 16:45:58 -0500986 struct list_head *le, *next;
987 LIST_HEAD(to_be_closed);
Scott Mayhewc3d48792015-12-11 16:45:58 -0500988
989 spin_lock_bh(&serv->sv_lock);
990 list_for_each_safe(le, next, &serv->sv_tempsocks) {
991 xprt = list_entry(le, struct svc_xprt, xpt_list);
992 if (rpc_cmp_addr(server_addr, (struct sockaddr *)
993 &xprt->xpt_local)) {
994 dprintk("svc_age_temp_xprts_now: found %p\n", xprt);
995 list_move(le, &to_be_closed);
996 }
997 }
998 spin_unlock_bh(&serv->sv_lock);
999
1000 while (!list_empty(&to_be_closed)) {
1001 le = to_be_closed.next;
1002 list_del_init(le);
1003 xprt = list_entry(le, struct svc_xprt, xpt_list);
Scott Mayhew546125d2017-01-05 16:34:51 -05001004 set_bit(XPT_CLOSE, &xprt->xpt_flags);
1005 set_bit(XPT_KILL_TEMP, &xprt->xpt_flags);
1006 dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n",
1007 xprt);
1008 svc_xprt_enqueue(xprt);
Scott Mayhewc3d48792015-12-11 16:45:58 -05001009 }
1010}
1011EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now);
1012
J. Bruce Fieldsedc7a892010-03-22 15:37:17 -04001013static void call_xpt_users(struct svc_xprt *xprt)
1014{
1015 struct svc_xpt_user *u;
1016
1017 spin_lock(&xprt->xpt_lock);
1018 while (!list_empty(&xprt->xpt_users)) {
1019 u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
Trond Myklebustbb6ad552018-10-09 15:54:15 -04001020 list_del_init(&u->list);
J. Bruce Fieldsedc7a892010-03-22 15:37:17 -04001021 u->callback(u);
1022 }
1023 spin_unlock(&xprt->xpt_lock);
1024}
1025
Tom Tucker0f0257e2007-12-30 21:08:27 -06001026/*
1027 * Remove a dead transport
1028 */
J. Bruce Fields7710ec32011-11-25 18:44:05 -05001029static void svc_delete_xprt(struct svc_xprt *xprt)
Tom Tucker0f0257e2007-12-30 21:08:27 -06001030{
1031 struct svc_serv *serv = xprt->xpt_server;
Tom Tucker22945e42009-01-05 15:21:19 -06001032 struct svc_deferred_req *dr;
1033
1034 /* Only do this once */
1035 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
J. Bruce Fieldsac9303e2010-10-23 11:16:10 -04001036 BUG();
Tom Tucker0f0257e2007-12-30 21:08:27 -06001037
1038 dprintk("svc: svc_delete_xprt(%p)\n", xprt);
1039 xprt->xpt_ops->xpo_detach(xprt);
Chuck Lever6221f1d2020-04-17 12:40:31 -04001040 if (xprt->xpt_bc_xprt)
1041 xprt->xpt_bc_xprt->ops->close(xprt->xpt_bc_xprt);
Tom Tucker0f0257e2007-12-30 21:08:27 -06001042
1043 spin_lock_bh(&serv->sv_lock);
Jeff Layton8d65ef72014-11-17 17:02:57 -05001044 list_del_init(&xprt->xpt_list);
Weston Andros Adamson01047292012-10-23 10:43:48 -04001045 WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
Tom Tucker22945e42009-01-05 15:21:19 -06001046 if (test_bit(XPT_TEMP, &xprt->xpt_flags))
1047 serv->sv_tmpcnt--;
J. Bruce Fields788e69e2010-03-29 21:02:31 -04001048 spin_unlock_bh(&serv->sv_lock);
Tom Tucker22945e42009-01-05 15:21:19 -06001049
Neil Brownab1b18f2010-02-27 09:33:40 +11001050 while ((dr = svc_deferred_dequeue(xprt)) != NULL)
Tom Tucker22945e42009-01-05 15:21:19 -06001051 kfree(dr);
Tom Tucker22945e42009-01-05 15:21:19 -06001052
J. Bruce Fieldsedc7a892010-03-22 15:37:17 -04001053 call_xpt_users(xprt);
Tom Tucker22945e42009-01-05 15:21:19 -06001054 svc_xprt_put(xprt);
Tom Tucker0f0257e2007-12-30 21:08:27 -06001055}
1056
1057void svc_close_xprt(struct svc_xprt *xprt)
1058{
1059 set_bit(XPT_CLOSE, &xprt->xpt_flags);
1060 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
1061 /* someone else will have to effect the close */
1062 return;
J. Bruce Fieldsb1763312010-10-25 20:24:48 -04001063 /*
1064 * We expect svc_close_xprt() to work even when no threads are
1065 * running (e.g., while configuring the server before starting
1066 * any threads), so if the transport isn't busy, we delete
1067 * it ourself:
1068 */
Tom Tucker0f0257e2007-12-30 21:08:27 -06001069 svc_delete_xprt(xprt);
Tom Tucker0f0257e2007-12-30 21:08:27 -06001070}
Tom Tuckera217813f2007-12-30 21:08:35 -06001071EXPORT_SYMBOL_GPL(svc_close_xprt);
Tom Tucker0f0257e2007-12-30 21:08:27 -06001072
J. Bruce Fieldscc630d92013-02-10 16:08:11 -05001073static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
Tom Tucker0f0257e2007-12-30 21:08:27 -06001074{
1075 struct svc_xprt *xprt;
J. Bruce Fieldscc630d92013-02-10 16:08:11 -05001076 int ret = 0;
Tom Tucker0f0257e2007-12-30 21:08:27 -06001077
J. Bruce Fields719f8bc2012-08-13 17:03:00 -04001078 spin_lock(&serv->sv_lock);
J. Bruce Fieldsb4f36f82011-11-29 17:00:26 -05001079 list_for_each_entry(xprt, xprt_list, xpt_list) {
Stanislav Kinsbursky7b147f12012-01-31 14:09:17 +04001080 if (xprt->xpt_net != net)
1081 continue;
J. Bruce Fieldscc630d92013-02-10 16:08:11 -05001082 ret++;
Tom Tucker0f0257e2007-12-30 21:08:27 -06001083 set_bit(XPT_CLOSE, &xprt->xpt_flags);
J. Bruce Fieldscc630d92013-02-10 16:08:11 -05001084 svc_xprt_enqueue(xprt);
Tom Tucker0f0257e2007-12-30 21:08:27 -06001085 }
J. Bruce Fields719f8bc2012-08-13 17:03:00 -04001086 spin_unlock(&serv->sv_lock);
J. Bruce Fieldscc630d92013-02-10 16:08:11 -05001087 return ret;
Tom Tucker0f0257e2007-12-30 21:08:27 -06001088}
1089
J. Bruce Fieldscc630d92013-02-10 16:08:11 -05001090static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
J. Bruce Fields2fefb8a2011-11-29 11:35:35 -05001091{
J. Bruce Fieldsb4f36f82011-11-29 17:00:26 -05001092 struct svc_pool *pool;
1093 struct svc_xprt *xprt;
1094 struct svc_xprt *tmp;
1095 int i;
1096
J. Bruce Fieldsb4f36f82011-11-29 17:00:26 -05001097 for (i = 0; i < serv->sv_nrpools; i++) {
1098 pool = &serv->sv_pools[i];
1099
1100 spin_lock_bh(&pool->sp_lock);
Stanislav Kinsbursky6f513362012-01-31 14:09:00 +04001101 list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
Stanislav Kinsbursky7b147f12012-01-31 14:09:17 +04001102 if (xprt->xpt_net != net)
1103 continue;
J. Bruce Fieldsb4f36f82011-11-29 17:00:26 -05001104 list_del_init(&xprt->xpt_ready);
J. Bruce Fieldscc630d92013-02-10 16:08:11 -05001105 spin_unlock_bh(&pool->sp_lock);
1106 return xprt;
J. Bruce Fieldsb4f36f82011-11-29 17:00:26 -05001107 }
1108 spin_unlock_bh(&pool->sp_lock);
1109 }
J. Bruce Fieldscc630d92013-02-10 16:08:11 -05001110 return NULL;
Stanislav Kinsbursky6f513362012-01-31 14:09:00 +04001111}
1112
J. Bruce Fieldscc630d92013-02-10 16:08:11 -05001113static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
Stanislav Kinsbursky6f513362012-01-31 14:09:00 +04001114{
1115 struct svc_xprt *xprt;
Stanislav Kinsbursky6f513362012-01-31 14:09:00 +04001116
J. Bruce Fieldscc630d92013-02-10 16:08:11 -05001117 while ((xprt = svc_dequeue_net(serv, net))) {
1118 set_bit(XPT_CLOSE, &xprt->xpt_flags);
J. Bruce Fields719f8bc2012-08-13 17:03:00 -04001119 svc_delete_xprt(xprt);
J. Bruce Fieldscc630d92013-02-10 16:08:11 -05001120 }
Stanislav Kinsbursky3a22bf52012-01-31 14:09:08 +04001121}
1122
J. Bruce Fieldscc630d92013-02-10 16:08:11 -05001123/*
1124 * Server threads may still be running (especially in the case where the
1125 * service is still running in other network namespaces).
1126 *
1127 * So we shut down sockets the same way we would on a running server, by
1128 * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do
1129 * the close. In the case there are no such other threads,
1130 * threads running, svc_clean_up_xprts() does a simple version of a
1131 * server's main event loop, and in the case where there are other
1132 * threads, we may need to wait a little while and then check again to
1133 * see if they're done.
1134 */
Stanislav Kinsbursky7b147f12012-01-31 14:09:17 +04001135void svc_close_net(struct svc_serv *serv, struct net *net)
Stanislav Kinsbursky3a22bf52012-01-31 14:09:08 +04001136{
J. Bruce Fieldscc630d92013-02-10 16:08:11 -05001137 int delay = 0;
Stanislav Kinsbursky6f513362012-01-31 14:09:00 +04001138
J. Bruce Fieldscc630d92013-02-10 16:08:11 -05001139 while (svc_close_list(serv, &serv->sv_permsocks, net) +
1140 svc_close_list(serv, &serv->sv_tempsocks, net)) {
1141
1142 svc_clean_up_xprts(serv, net);
1143 msleep(delay++);
1144 }
J. Bruce Fields2fefb8a2011-11-29 11:35:35 -05001145}
1146
Tom Tucker0f0257e2007-12-30 21:08:27 -06001147/*
1148 * Handle defer and revisit of requests
1149 */
1150
1151static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1152{
1153 struct svc_deferred_req *dr =
1154 container_of(dreq, struct svc_deferred_req, handle);
1155 struct svc_xprt *xprt = dr->xprt;
1156
Tom Tucker22945e42009-01-05 15:21:19 -06001157 spin_lock(&xprt->xpt_lock);
1158 set_bit(XPT_DEFERRED, &xprt->xpt_flags);
1159 if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
1160 spin_unlock(&xprt->xpt_lock);
1161 dprintk("revisit canceled\n");
Tom Tucker0f0257e2007-12-30 21:08:27 -06001162 svc_xprt_put(xprt);
Trond Myklebust104f6352016-06-24 10:55:46 -04001163 trace_svc_drop_deferred(dr);
Tom Tucker0f0257e2007-12-30 21:08:27 -06001164 kfree(dr);
1165 return;
1166 }
1167 dprintk("revisit queued\n");
1168 dr->xprt = NULL;
Tom Tucker0f0257e2007-12-30 21:08:27 -06001169 list_add(&dr->handle.recent, &xprt->xpt_deferred);
1170 spin_unlock(&xprt->xpt_lock);
Tom Tucker0f0257e2007-12-30 21:08:27 -06001171 svc_xprt_enqueue(xprt);
1172 svc_xprt_put(xprt);
1173}
1174
Tom Tucker260c1d12007-12-30 21:08:29 -06001175/*
1176 * Save the request off for later processing. The request buffer looks
1177 * like this:
1178 *
1179 * <xprt-header><rpc-header><rpc-pagelist><rpc-tail>
1180 *
1181 * This code can only handle requests that consist of an xprt-header
1182 * and rpc-header.
1183 */
Tom Tucker0f0257e2007-12-30 21:08:27 -06001184static struct cache_deferred_req *svc_defer(struct cache_req *req)
1185{
1186 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
Tom Tucker0f0257e2007-12-30 21:08:27 -06001187 struct svc_deferred_req *dr;
1188
Jeff Layton30660e04b2014-11-19 07:51:16 -05001189 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags))
Tom Tucker0f0257e2007-12-30 21:08:27 -06001190 return NULL; /* if more than a page, give up FIXME */
1191 if (rqstp->rq_deferred) {
1192 dr = rqstp->rq_deferred;
1193 rqstp->rq_deferred = NULL;
1194 } else {
Tom Tucker260c1d12007-12-30 21:08:29 -06001195 size_t skip;
1196 size_t size;
Tom Tucker0f0257e2007-12-30 21:08:27 -06001197 /* FIXME maybe discard if size too large */
Tom Tucker260c1d12007-12-30 21:08:29 -06001198 size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len;
Tom Tucker0f0257e2007-12-30 21:08:27 -06001199 dr = kmalloc(size, GFP_KERNEL);
1200 if (dr == NULL)
1201 return NULL;
1202
1203 dr->handle.owner = rqstp->rq_server;
1204 dr->prot = rqstp->rq_prot;
1205 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
1206 dr->addrlen = rqstp->rq_addrlen;
1207 dr->daddr = rqstp->rq_daddr;
1208 dr->argslen = rqstp->rq_arg.len >> 2;
Tom Tucker260c1d12007-12-30 21:08:29 -06001209 dr->xprt_hlen = rqstp->rq_xprt_hlen;
1210
1211 /* back up head to the start of the buffer and copy */
1212 skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
1213 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
1214 dr->argslen << 2);
Tom Tucker0f0257e2007-12-30 21:08:27 -06001215 }
1216 svc_xprt_get(rqstp->rq_xprt);
1217 dr->xprt = rqstp->rq_xprt;
Jeff Layton78b65eb2014-11-19 07:51:17 -05001218 set_bit(RQ_DROPME, &rqstp->rq_flags);
Tom Tucker0f0257e2007-12-30 21:08:27 -06001219
1220 dr->handle.revisit = svc_revisit;
Trond Myklebust104f6352016-06-24 10:55:46 -04001221 trace_svc_defer(rqstp);
Tom Tucker0f0257e2007-12-30 21:08:27 -06001222 return &dr->handle;
1223}
1224
1225/*
1226 * recv data from a deferred request into an active one
1227 */
1228static int svc_deferred_recv(struct svc_rqst *rqstp)
1229{
1230 struct svc_deferred_req *dr = rqstp->rq_deferred;
1231
Tom Tucker260c1d12007-12-30 21:08:29 -06001232 /* setup iov_base past transport header */
1233 rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2);
1234 /* The iov_len does not include the transport header bytes */
1235 rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen;
Tom Tucker0f0257e2007-12-30 21:08:27 -06001236 rqstp->rq_arg.page_len = 0;
Tom Tucker260c1d12007-12-30 21:08:29 -06001237 /* The rq_arg.len includes the transport header bytes */
1238 rqstp->rq_arg.len = dr->argslen<<2;
Tom Tucker0f0257e2007-12-30 21:08:27 -06001239 rqstp->rq_prot = dr->prot;
1240 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
1241 rqstp->rq_addrlen = dr->addrlen;
Tom Tucker260c1d12007-12-30 21:08:29 -06001242 /* Save off transport header len in case we get deferred again */
1243 rqstp->rq_xprt_hlen = dr->xprt_hlen;
Tom Tucker0f0257e2007-12-30 21:08:27 -06001244 rqstp->rq_daddr = dr->daddr;
1245 rqstp->rq_respages = rqstp->rq_pages;
Tom Tucker260c1d12007-12-30 21:08:29 -06001246 return (dr->argslen<<2) - dr->xprt_hlen;
Tom Tucker0f0257e2007-12-30 21:08:27 -06001247}
1248
1249
1250static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
1251{
1252 struct svc_deferred_req *dr = NULL;
1253
1254 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
1255 return NULL;
1256 spin_lock(&xprt->xpt_lock);
Tom Tucker0f0257e2007-12-30 21:08:27 -06001257 if (!list_empty(&xprt->xpt_deferred)) {
1258 dr = list_entry(xprt->xpt_deferred.next,
1259 struct svc_deferred_req,
1260 handle.recent);
1261 list_del_init(&dr->handle.recent);
Trond Myklebust104f6352016-06-24 10:55:46 -04001262 trace_svc_revisit_deferred(dr);
J. Bruce Fields62bac4a2010-10-25 12:50:15 -04001263 } else
1264 clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
Tom Tucker0f0257e2007-12-30 21:08:27 -06001265 spin_unlock(&xprt->xpt_lock);
1266 return dr;
1267}
Tom Tucker7fcb98d2007-12-30 21:08:33 -06001268
Chuck Lever156e6202009-03-18 20:45:58 -04001269/**
1270 * svc_find_xprt - find an RPC transport instance
1271 * @serv: pointer to svc_serv to search
1272 * @xcl_name: C string containing transport's class name
Stanislav Kinsbursky4cb54ca2012-01-20 16:50:53 +04001273 * @net: owner net pointer
Chuck Lever156e6202009-03-18 20:45:58 -04001274 * @af: Address family of transport's local address
1275 * @port: transport's IP port number
1276 *
Tom Tucker7fcb98d2007-12-30 21:08:33 -06001277 * Return the transport instance pointer for the endpoint accepting
1278 * connections/peer traffic from the specified transport class,
1279 * address family and port.
1280 *
1281 * Specifying 0 for the address family or port is effectively a
1282 * wild-card, and will result in matching the first transport in the
1283 * service's list that has a matching class name.
1284 */
Chuck Lever156e6202009-03-18 20:45:58 -04001285struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
Stanislav Kinsbursky4cb54ca2012-01-20 16:50:53 +04001286 struct net *net, const sa_family_t af,
1287 const unsigned short port)
Tom Tucker7fcb98d2007-12-30 21:08:33 -06001288{
1289 struct svc_xprt *xprt;
1290 struct svc_xprt *found = NULL;
1291
1292 /* Sanity check the args */
Chuck Lever156e6202009-03-18 20:45:58 -04001293 if (serv == NULL || xcl_name == NULL)
Tom Tucker7fcb98d2007-12-30 21:08:33 -06001294 return found;
1295
1296 spin_lock_bh(&serv->sv_lock);
1297 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
Stanislav Kinsbursky4cb54ca2012-01-20 16:50:53 +04001298 if (xprt->xpt_net != net)
1299 continue;
Tom Tucker7fcb98d2007-12-30 21:08:33 -06001300 if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
1301 continue;
1302 if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family)
1303 continue;
Chuck Lever156e6202009-03-18 20:45:58 -04001304 if (port != 0 && port != svc_xprt_local_port(xprt))
Tom Tucker7fcb98d2007-12-30 21:08:33 -06001305 continue;
1306 found = xprt;
Tom Tuckera217813f2007-12-30 21:08:35 -06001307 svc_xprt_get(xprt);
Tom Tucker7fcb98d2007-12-30 21:08:33 -06001308 break;
1309 }
1310 spin_unlock_bh(&serv->sv_lock);
1311 return found;
1312}
1313EXPORT_SYMBOL_GPL(svc_find_xprt);
Tom Tucker9571af12007-12-30 21:08:37 -06001314
Chuck Lever335c54b2009-04-23 19:32:25 -04001315static int svc_one_xprt_name(const struct svc_xprt *xprt,
1316 char *pos, int remaining)
1317{
1318 int len;
1319
1320 len = snprintf(pos, remaining, "%s %u\n",
1321 xprt->xpt_class->xcl_name,
1322 svc_xprt_local_port(xprt));
1323 if (len >= remaining)
1324 return -ENAMETOOLONG;
1325 return len;
1326}
1327
1328/**
1329 * svc_xprt_names - format a buffer with a list of transport names
1330 * @serv: pointer to an RPC service
1331 * @buf: pointer to a buffer to be filled in
1332 * @buflen: length of buffer to be filled in
1333 *
1334 * Fills in @buf with a string containing a list of transport names,
1335 * each name terminated with '\n'.
1336 *
1337 * Returns positive length of the filled-in string on success; otherwise
1338 * a negative errno value is returned if an error occurs.
Tom Tucker9571af12007-12-30 21:08:37 -06001339 */
Chuck Lever335c54b2009-04-23 19:32:25 -04001340int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen)
Tom Tucker9571af12007-12-30 21:08:37 -06001341{
1342 struct svc_xprt *xprt;
Chuck Lever335c54b2009-04-23 19:32:25 -04001343 int len, totlen;
1344 char *pos;
Tom Tucker9571af12007-12-30 21:08:37 -06001345
1346 /* Sanity check args */
1347 if (!serv)
1348 return 0;
1349
1350 spin_lock_bh(&serv->sv_lock);
Chuck Lever335c54b2009-04-23 19:32:25 -04001351
1352 pos = buf;
1353 totlen = 0;
Tom Tucker9571af12007-12-30 21:08:37 -06001354 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
Chuck Lever335c54b2009-04-23 19:32:25 -04001355 len = svc_one_xprt_name(xprt, pos, buflen - totlen);
1356 if (len < 0) {
1357 *buf = '\0';
1358 totlen = len;
1359 }
1360 if (len <= 0)
Tom Tucker9571af12007-12-30 21:08:37 -06001361 break;
Chuck Lever335c54b2009-04-23 19:32:25 -04001362
1363 pos += len;
Tom Tucker9571af12007-12-30 21:08:37 -06001364 totlen += len;
1365 }
Chuck Lever335c54b2009-04-23 19:32:25 -04001366
Tom Tucker9571af12007-12-30 21:08:37 -06001367 spin_unlock_bh(&serv->sv_lock);
1368 return totlen;
1369}
1370EXPORT_SYMBOL_GPL(svc_xprt_names);
Greg Banks03cf6c92009-01-13 21:26:36 +11001371
1372
1373/*----------------------------------------------------------------------------*/
1374
1375static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
1376{
1377 unsigned int pidx = (unsigned int)*pos;
1378 struct svc_serv *serv = m->private;
1379
1380 dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
1381
Greg Banks03cf6c92009-01-13 21:26:36 +11001382 if (!pidx)
1383 return SEQ_START_TOKEN;
1384 return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
1385}
1386
1387static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
1388{
1389 struct svc_pool *pool = p;
1390 struct svc_serv *serv = m->private;
1391
1392 dprintk("svc_pool_stats_next, *pos=%llu\n", *pos);
1393
1394 if (p == SEQ_START_TOKEN) {
1395 pool = &serv->sv_pools[0];
1396 } else {
1397 unsigned int pidx = (pool - &serv->sv_pools[0]);
1398 if (pidx < serv->sv_nrpools-1)
1399 pool = &serv->sv_pools[pidx+1];
1400 else
1401 pool = NULL;
1402 }
1403 ++*pos;
1404 return pool;
1405}
1406
1407static void svc_pool_stats_stop(struct seq_file *m, void *p)
1408{
Greg Banks03cf6c92009-01-13 21:26:36 +11001409}
1410
1411static int svc_pool_stats_show(struct seq_file *m, void *p)
1412{
1413 struct svc_pool *pool = p;
1414
1415 if (p == SEQ_START_TOKEN) {
J. Bruce Fields78c210e2009-08-06 15:41:34 -04001416 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n");
Greg Banks03cf6c92009-01-13 21:26:36 +11001417 return 0;
1418 }
1419
J. Bruce Fields78c210e2009-08-06 15:41:34 -04001420 seq_printf(m, "%u %lu %lu %lu %lu\n",
Greg Banks03cf6c92009-01-13 21:26:36 +11001421 pool->sp_id,
Jeff Layton403c7b42014-11-21 14:19:29 -05001422 (unsigned long)atomic_long_read(&pool->sp_stats.packets),
Greg Banks03cf6c92009-01-13 21:26:36 +11001423 pool->sp_stats.sockets_queued,
Jeff Layton403c7b42014-11-21 14:19:29 -05001424 (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken),
1425 (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout));
Greg Banks03cf6c92009-01-13 21:26:36 +11001426
1427 return 0;
1428}
1429
1430static const struct seq_operations svc_pool_stats_seq_ops = {
1431 .start = svc_pool_stats_start,
1432 .next = svc_pool_stats_next,
1433 .stop = svc_pool_stats_stop,
1434 .show = svc_pool_stats_show,
1435};
1436
1437int svc_pool_stats_open(struct svc_serv *serv, struct file *file)
1438{
1439 int err;
1440
1441 err = seq_open(file, &svc_pool_stats_seq_ops);
1442 if (!err)
1443 ((struct seq_file *) file->private_data)->private = serv;
1444 return err;
1445}
1446EXPORT_SYMBOL(svc_pool_stats_open);
1447
1448/*----------------------------------------------------------------------------*/