blob: b78659adeff3f09698a2ddeaa96035ec6c7e77df [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/net/sunrpc/svcsock.c
3 *
4 * These are the RPC server socket internals.
5 *
6 * The server scheduling algorithm does not always distribute the load
7 * evenly when servicing a single client. May need to modify the
8 * svc_sock_enqueue procedure...
9 *
10 * TCP support is largely untested and may be a little slow. The problem
11 * is that we currently do two separate recvfrom's, one for the 4-byte
12 * record length, and the second for the actual record. This could possibly
13 * be improved by always reading a minimum size of around 100 bytes and
14 * tucking any superfluous bytes away in a temporary store. Still, that
15 * leaves write requests out in the rain. An alternative may be to peek at
16 * the first skb in the queue, and if it matches the next TCP sequence
17 * number, to extract the record marker. Yuck.
18 *
19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
20 */
21
22#include <linux/sched.h>
23#include <linux/errno.h>
24#include <linux/fcntl.h>
25#include <linux/net.h>
26#include <linux/in.h>
27#include <linux/inet.h>
28#include <linux/udp.h>
Andrew Morton91483c42005-08-09 20:20:07 -070029#include <linux/tcp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/unistd.h>
31#include <linux/slab.h>
32#include <linux/netdevice.h>
33#include <linux/skbuff.h>
NeilBrownb41b66d2006-10-02 02:17:48 -070034#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <net/sock.h>
36#include <net/checksum.h>
37#include <net/ip.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070038#include <net/tcp_states.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <asm/uaccess.h>
40#include <asm/ioctls.h>
41
42#include <linux/sunrpc/types.h>
43#include <linux/sunrpc/xdr.h>
44#include <linux/sunrpc/svcsock.h>
45#include <linux/sunrpc/stats.h>
46
47/* SMP locking strategy:
48 *
Greg Banks3262c812006-10-02 02:17:58 -070049 * svc_pool->sp_lock protects most of the fields of that pool.
50 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
51 * when both need to be taken (rare), svc_serv->sv_lock is first.
52 * BKL protects svc_serv->sv_nrthread.
Greg Banks1a68d952006-10-02 02:17:55 -070053 * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list
Greg Banksc081a0c2006-10-02 02:17:57 -070054 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 *
56 * Some flags can be set to certain values at any time
57 * providing that certain rules are followed:
58 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 * SK_CONN, SK_DATA, can be set or cleared at any time.
60 * after a set, svc_sock_enqueue must be called.
61 * after a clear, the socket must be read/accepted
62 * if this succeeds, it must be set again.
63 * SK_CLOSE can set at any time. It is never cleared.
64 *
65 */
66
67#define RPCDBG_FACILITY RPCDBG_SVCSOCK
68
69
70static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
71 int *errp, int pmap_reg);
72static void svc_udp_data_ready(struct sock *, int);
73static int svc_udp_recvfrom(struct svc_rqst *);
74static int svc_udp_sendto(struct svc_rqst *);
75
76static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk);
77static int svc_deferred_recv(struct svc_rqst *rqstp);
78static struct cache_deferred_req *svc_defer(struct cache_req *req);
79
Greg Banks36bdfc82006-10-02 02:17:54 -070080/* apparently the "standard" is that clients close
81 * idle connections after 5 minutes, servers after
82 * 6 minutes
83 * http://www.connectathon.org/talks96/nfstcp.pdf
84 */
85static int svc_conn_age_period = 6*60;
86
Linus Torvalds1da177e2005-04-16 15:20:36 -070087/*
Greg Banks3262c812006-10-02 02:17:58 -070088 * Queue up an idle server thread. Must have pool->sp_lock held.
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 * Note: this is really a stack rather than a queue, so that we only
Greg Banks3262c812006-10-02 02:17:58 -070090 * use as many different threads as we need, and the rest don't pollute
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 * the cache.
92 */
93static inline void
Greg Banks3262c812006-10-02 02:17:58 -070094svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Greg Banks3262c812006-10-02 02:17:58 -070096 list_add(&rqstp->rq_list, &pool->sp_threads);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99/*
Greg Banks3262c812006-10-02 02:17:58 -0700100 * Dequeue an nfsd thread. Must have pool->sp_lock held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 */
102static inline void
Greg Banks3262c812006-10-02 02:17:58 -0700103svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
105 list_del(&rqstp->rq_list);
106}
107
108/*
109 * Release an skbuff after use
110 */
111static inline void
112svc_release_skb(struct svc_rqst *rqstp)
113{
114 struct sk_buff *skb = rqstp->rq_skbuff;
115 struct svc_deferred_req *dr = rqstp->rq_deferred;
116
117 if (skb) {
118 rqstp->rq_skbuff = NULL;
119
120 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
121 skb_free_datagram(rqstp->rq_sock->sk_sk, skb);
122 }
123 if (dr) {
124 rqstp->rq_deferred = NULL;
125 kfree(dr);
126 }
127}
128
129/*
130 * Any space to write?
131 */
132static inline unsigned long
133svc_sock_wspace(struct svc_sock *svsk)
134{
135 int wspace;
136
137 if (svsk->sk_sock->type == SOCK_STREAM)
138 wspace = sk_stream_wspace(svsk->sk_sk);
139 else
140 wspace = sock_wspace(svsk->sk_sk);
141
142 return wspace;
143}
144
145/*
146 * Queue up a socket with data pending. If there are idle nfsd
147 * processes, wake 'em up.
148 *
149 */
150static void
151svc_sock_enqueue(struct svc_sock *svsk)
152{
153 struct svc_serv *serv = svsk->sk_server;
Greg Banks3262c812006-10-02 02:17:58 -0700154 struct svc_pool *pool = &serv->sv_pools[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 struct svc_rqst *rqstp;
156
157 if (!(svsk->sk_flags &
158 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) ))
159 return;
160 if (test_bit(SK_DEAD, &svsk->sk_flags))
161 return;
162
Greg Banks3262c812006-10-02 02:17:58 -0700163 spin_lock_bh(&pool->sp_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
Greg Banks3262c812006-10-02 02:17:58 -0700165 if (!list_empty(&pool->sp_threads) &&
166 !list_empty(&pool->sp_sockets))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 printk(KERN_ERR
168 "svc_sock_enqueue: threads and sockets both waiting??\n");
169
170 if (test_bit(SK_DEAD, &svsk->sk_flags)) {
171 /* Don't enqueue dead sockets */
172 dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk);
173 goto out_unlock;
174 }
175
Greg Banksc081a0c2006-10-02 02:17:57 -0700176 /* Mark socket as busy. It will remain in this state until the
177 * server has processed all pending data and put the socket back
178 * on the idle list. We update SK_BUSY atomically because
179 * it also guards against trying to enqueue the svc_sock twice.
180 */
181 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) {
182 /* Don't enqueue socket while already enqueued */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
184 goto out_unlock;
185 }
Greg Banks3262c812006-10-02 02:17:58 -0700186 BUG_ON(svsk->sk_pool != NULL);
187 svsk->sk_pool = pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
Greg Banks5685f0f2006-10-02 02:17:56 -0700190 if (((atomic_read(&svsk->sk_reserved) + serv->sv_bufsz)*2
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 > svc_sock_wspace(svsk))
192 && !test_bit(SK_CLOSE, &svsk->sk_flags)
193 && !test_bit(SK_CONN, &svsk->sk_flags)) {
194 /* Don't enqueue while not enough space for reply */
195 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
Greg Banks5685f0f2006-10-02 02:17:56 -0700196 svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_bufsz,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 svc_sock_wspace(svsk));
Greg Banks3262c812006-10-02 02:17:58 -0700198 svsk->sk_pool = NULL;
Greg Banksc081a0c2006-10-02 02:17:57 -0700199 clear_bit(SK_BUSY, &svsk->sk_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 goto out_unlock;
201 }
202 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
Greg Banks3262c812006-10-02 02:17:58 -0700205 if (!list_empty(&pool->sp_threads)) {
206 rqstp = list_entry(pool->sp_threads.next,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 struct svc_rqst,
208 rq_list);
209 dprintk("svc: socket %p served by daemon %p\n",
210 svsk->sk_sk, rqstp);
Greg Banks3262c812006-10-02 02:17:58 -0700211 svc_thread_dequeue(pool, rqstp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 if (rqstp->rq_sock)
213 printk(KERN_ERR
214 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
215 rqstp, rqstp->rq_sock);
216 rqstp->rq_sock = svsk;
Greg Banksc45c3572006-10-02 02:17:54 -0700217 atomic_inc(&svsk->sk_inuse);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 rqstp->rq_reserved = serv->sv_bufsz;
Greg Banks5685f0f2006-10-02 02:17:56 -0700219 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
Greg Banks3262c812006-10-02 02:17:58 -0700220 BUG_ON(svsk->sk_pool != pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 wake_up(&rqstp->rq_wait);
222 } else {
223 dprintk("svc: socket %p put into queue\n", svsk->sk_sk);
Greg Banks3262c812006-10-02 02:17:58 -0700224 list_add_tail(&svsk->sk_ready, &pool->sp_sockets);
225 BUG_ON(svsk->sk_pool != pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 }
227
228out_unlock:
Greg Banks3262c812006-10-02 02:17:58 -0700229 spin_unlock_bh(&pool->sp_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230}
231
232/*
Greg Banks3262c812006-10-02 02:17:58 -0700233 * Dequeue the first socket. Must be called with the pool->sp_lock held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 */
235static inline struct svc_sock *
Greg Banks3262c812006-10-02 02:17:58 -0700236svc_sock_dequeue(struct svc_pool *pool)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237{
238 struct svc_sock *svsk;
239
Greg Banks3262c812006-10-02 02:17:58 -0700240 if (list_empty(&pool->sp_sockets))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 return NULL;
242
Greg Banks3262c812006-10-02 02:17:58 -0700243 svsk = list_entry(pool->sp_sockets.next,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 struct svc_sock, sk_ready);
245 list_del_init(&svsk->sk_ready);
246
247 dprintk("svc: socket %p dequeued, inuse=%d\n",
Greg Banksc45c3572006-10-02 02:17:54 -0700248 svsk->sk_sk, atomic_read(&svsk->sk_inuse));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
250 return svsk;
251}
252
253/*
254 * Having read something from a socket, check whether it
255 * needs to be re-enqueued.
256 * Note: SK_DATA only gets cleared when a read-attempt finds
257 * no (or insufficient) data.
258 */
259static inline void
260svc_sock_received(struct svc_sock *svsk)
261{
Greg Banks3262c812006-10-02 02:17:58 -0700262 svsk->sk_pool = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 clear_bit(SK_BUSY, &svsk->sk_flags);
264 svc_sock_enqueue(svsk);
265}
266
267
268/**
269 * svc_reserve - change the space reserved for the reply to a request.
270 * @rqstp: The request in question
271 * @space: new max space to reserve
272 *
273 * Each request reserves some space on the output queue of the socket
274 * to make sure the reply fits. This function reduces that reserved
275 * space to be the amount of space used already, plus @space.
276 *
277 */
278void svc_reserve(struct svc_rqst *rqstp, int space)
279{
280 space += rqstp->rq_res.head[0].iov_len;
281
282 if (space < rqstp->rq_reserved) {
283 struct svc_sock *svsk = rqstp->rq_sock;
Greg Banks5685f0f2006-10-02 02:17:56 -0700284 atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 rqstp->rq_reserved = space;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
287 svc_sock_enqueue(svsk);
288 }
289}
290
291/*
292 * Release a socket after use.
293 */
294static inline void
295svc_sock_put(struct svc_sock *svsk)
296{
Greg Banksc45c3572006-10-02 02:17:54 -0700297 if (atomic_dec_and_test(&svsk->sk_inuse) && test_bit(SK_DEAD, &svsk->sk_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 dprintk("svc: releasing dead socket\n");
299 sock_release(svsk->sk_sock);
300 kfree(svsk);
301 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302}
303
304static void
305svc_sock_release(struct svc_rqst *rqstp)
306{
307 struct svc_sock *svsk = rqstp->rq_sock;
308
309 svc_release_skb(rqstp);
310
311 svc_free_allpages(rqstp);
312 rqstp->rq_res.page_len = 0;
313 rqstp->rq_res.page_base = 0;
314
315
316 /* Reset response buffer and release
317 * the reservation.
318 * But first, check that enough space was reserved
319 * for the reply, otherwise we have a bug!
320 */
321 if ((rqstp->rq_res.len) > rqstp->rq_reserved)
322 printk(KERN_ERR "RPC request reserved %d but used %d\n",
323 rqstp->rq_reserved,
324 rqstp->rq_res.len);
325
326 rqstp->rq_res.head[0].iov_len = 0;
327 svc_reserve(rqstp, 0);
328 rqstp->rq_sock = NULL;
329
330 svc_sock_put(svsk);
331}
332
333/*
334 * External function to wake up a server waiting for data
Greg Banks3262c812006-10-02 02:17:58 -0700335 * This really only makes sense for services like lockd
336 * which have exactly one thread anyway.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 */
338void
339svc_wake_up(struct svc_serv *serv)
340{
341 struct svc_rqst *rqstp;
Greg Banks3262c812006-10-02 02:17:58 -0700342 unsigned int i;
343 struct svc_pool *pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Greg Banks3262c812006-10-02 02:17:58 -0700345 for (i = 0; i < serv->sv_nrpools; i++) {
346 pool = &serv->sv_pools[i];
347
348 spin_lock_bh(&pool->sp_lock);
349 if (!list_empty(&pool->sp_threads)) {
350 rqstp = list_entry(pool->sp_threads.next,
351 struct svc_rqst,
352 rq_list);
353 dprintk("svc: daemon %p woken up.\n", rqstp);
354 /*
355 svc_thread_dequeue(pool, rqstp);
356 rqstp->rq_sock = NULL;
357 */
358 wake_up(&rqstp->rq_wait);
359 }
360 spin_unlock_bh(&pool->sp_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362}
363
364/*
365 * Generic sendto routine
366 */
367static int
368svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
369{
370 struct svc_sock *svsk = rqstp->rq_sock;
371 struct socket *sock = svsk->sk_sock;
372 int slen;
373 char buffer[CMSG_SPACE(sizeof(struct in_pktinfo))];
374 struct cmsghdr *cmh = (struct cmsghdr *)buffer;
375 struct in_pktinfo *pki = (struct in_pktinfo *)CMSG_DATA(cmh);
376 int len = 0;
377 int result;
378 int size;
379 struct page **ppage = xdr->pages;
380 size_t base = xdr->page_base;
381 unsigned int pglen = xdr->page_len;
382 unsigned int flags = MSG_MORE;
383
384 slen = xdr->len;
385
386 if (rqstp->rq_prot == IPPROTO_UDP) {
387 /* set the source and destination */
388 struct msghdr msg;
389 msg.msg_name = &rqstp->rq_addr;
390 msg.msg_namelen = sizeof(rqstp->rq_addr);
391 msg.msg_iov = NULL;
392 msg.msg_iovlen = 0;
393 msg.msg_flags = MSG_MORE;
394
395 msg.msg_control = cmh;
396 msg.msg_controllen = sizeof(buffer);
397 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
398 cmh->cmsg_level = SOL_IP;
399 cmh->cmsg_type = IP_PKTINFO;
400 pki->ipi_ifindex = 0;
401 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr;
402
403 if (sock_sendmsg(sock, &msg, 0) < 0)
404 goto out;
405 }
406
407 /* send head */
408 if (slen == xdr->head[0].iov_len)
409 flags = 0;
Sridhar Samudralae6242e92006-08-07 20:58:01 -0700410 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, xdr->head[0].iov_len, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 if (len != xdr->head[0].iov_len)
412 goto out;
413 slen -= xdr->head[0].iov_len;
414 if (slen == 0)
415 goto out;
416
417 /* send page data */
418 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen;
419 while (pglen > 0) {
420 if (slen == size)
421 flags = 0;
Sridhar Samudralae6242e92006-08-07 20:58:01 -0700422 result = kernel_sendpage(sock, *ppage, base, size, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 if (result > 0)
424 len += result;
425 if (result != size)
426 goto out;
427 slen -= size;
428 pglen -= size;
429 size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen;
430 base = 0;
431 ppage++;
432 }
433 /* send tail */
434 if (xdr->tail[0].iov_len) {
Sridhar Samudralae6242e92006-08-07 20:58:01 -0700435 result = kernel_sendpage(sock, rqstp->rq_respages[rqstp->rq_restailpage],
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 ((unsigned long)xdr->tail[0].iov_base)& (PAGE_SIZE-1),
437 xdr->tail[0].iov_len, 0);
438
439 if (result > 0)
440 len += result;
441 }
442out:
443 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %x)\n",
444 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len, xdr->len, len,
445 rqstp->rq_addr.sin_addr.s_addr);
446
447 return len;
448}
449
450/*
NeilBrown80212d52006-10-02 02:17:47 -0700451 * Report socket names for nfsdfs
452 */
453static int one_sock_name(char *buf, struct svc_sock *svsk)
454{
455 int len;
456
457 switch(svsk->sk_sk->sk_family) {
458 case AF_INET:
459 len = sprintf(buf, "ipv4 %s %u.%u.%u.%u %d\n",
460 svsk->sk_sk->sk_protocol==IPPROTO_UDP?
461 "udp" : "tcp",
462 NIPQUAD(inet_sk(svsk->sk_sk)->rcv_saddr),
463 inet_sk(svsk->sk_sk)->num);
464 break;
465 default:
466 len = sprintf(buf, "*unknown-%d*\n",
467 svsk->sk_sk->sk_family);
468 }
469 return len;
470}
471
472int
NeilBrownb41b66d2006-10-02 02:17:48 -0700473svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
NeilBrown80212d52006-10-02 02:17:47 -0700474{
NeilBrownb41b66d2006-10-02 02:17:48 -0700475 struct svc_sock *svsk, *closesk = NULL;
NeilBrown80212d52006-10-02 02:17:47 -0700476 int len = 0;
477
478 if (!serv)
479 return 0;
480 spin_lock(&serv->sv_lock);
481 list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) {
482 int onelen = one_sock_name(buf+len, svsk);
NeilBrownb41b66d2006-10-02 02:17:48 -0700483 if (toclose && strcmp(toclose, buf+len) == 0)
484 closesk = svsk;
485 else
486 len += onelen;
NeilBrown80212d52006-10-02 02:17:47 -0700487 }
488 spin_unlock(&serv->sv_lock);
NeilBrownb41b66d2006-10-02 02:17:48 -0700489 if (closesk)
490 svc_delete_socket(closesk);
NeilBrown80212d52006-10-02 02:17:47 -0700491 return len;
492}
493EXPORT_SYMBOL(svc_sock_names);
494
495/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 * Check input queue length
497 */
498static int
499svc_recv_available(struct svc_sock *svsk)
500{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 struct socket *sock = svsk->sk_sock;
502 int avail, err;
503
Sridhar Samudralae6242e92006-08-07 20:58:01 -0700504 err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
506 return (err >= 0)? avail : err;
507}
508
509/*
510 * Generic recvfrom routine.
511 */
512static int
513svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen)
514{
515 struct msghdr msg;
516 struct socket *sock;
517 int len, alen;
518
519 rqstp->rq_addrlen = sizeof(rqstp->rq_addr);
520 sock = rqstp->rq_sock->sk_sock;
521
522 msg.msg_name = &rqstp->rq_addr;
523 msg.msg_namelen = sizeof(rqstp->rq_addr);
524 msg.msg_control = NULL;
525 msg.msg_controllen = 0;
526
527 msg.msg_flags = MSG_DONTWAIT;
528
529 len = kernel_recvmsg(sock, &msg, iov, nr, buflen, MSG_DONTWAIT);
530
531 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
532 * possibly we should cache this in the svc_sock structure
533 * at accept time. FIXME
534 */
535 alen = sizeof(rqstp->rq_addr);
Sridhar Samudralae6242e92006-08-07 20:58:01 -0700536 kernel_getpeername(sock, (struct sockaddr *)&rqstp->rq_addr, &alen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
538 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
539 rqstp->rq_sock, iov[0].iov_base, iov[0].iov_len, len);
540
541 return len;
542}
543
544/*
545 * Set socket snd and rcv buffer lengths
546 */
547static inline void
548svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv)
549{
550#if 0
551 mm_segment_t oldfs;
552 oldfs = get_fs(); set_fs(KERNEL_DS);
553 sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
554 (char*)&snd, sizeof(snd));
555 sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
556 (char*)&rcv, sizeof(rcv));
557#else
558 /* sock_setsockopt limits use to sysctl_?mem_max,
559 * which isn't acceptable. Until that is made conditional
560 * on not having CAP_SYS_RESOURCE or similar, we go direct...
561 * DaveM said I could!
562 */
563 lock_sock(sock->sk);
564 sock->sk->sk_sndbuf = snd * 2;
565 sock->sk->sk_rcvbuf = rcv * 2;
566 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
567 release_sock(sock->sk);
568#endif
569}
570/*
571 * INET callback when data has been received on the socket.
572 */
573static void
574svc_udp_data_ready(struct sock *sk, int count)
575{
Neil Brown939bb7e2005-09-13 01:25:39 -0700576 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
Neil Brown939bb7e2005-09-13 01:25:39 -0700578 if (svsk) {
579 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
580 svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags));
581 set_bit(SK_DATA, &svsk->sk_flags);
582 svc_sock_enqueue(svsk);
583 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
585 wake_up_interruptible(sk->sk_sleep);
586}
587
588/*
589 * INET callback when space is newly available on the socket.
590 */
591static void
592svc_write_space(struct sock *sk)
593{
594 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
595
596 if (svsk) {
597 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
598 svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags));
599 svc_sock_enqueue(svsk);
600 }
601
602 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
Neil Brown939bb7e2005-09-13 01:25:39 -0700603 dprintk("RPC svc_write_space: someone sleeping on %p\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 svsk);
605 wake_up_interruptible(sk->sk_sleep);
606 }
607}
608
609/*
610 * Receive a datagram from a UDP socket.
611 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612static int
613svc_udp_recvfrom(struct svc_rqst *rqstp)
614{
615 struct svc_sock *svsk = rqstp->rq_sock;
616 struct svc_serv *serv = svsk->sk_server;
617 struct sk_buff *skb;
618 int err, len;
619
620 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
621 /* udp sockets need large rcvbuf as all pending
622 * requests are still in that buffer. sndbuf must
623 * also be large enough that there is enough space
Greg Banks3262c812006-10-02 02:17:58 -0700624 * for one reply per thread. We count all threads
625 * rather than threads in a particular pool, which
626 * provides an upper bound on the number of threads
627 * which will access the socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 */
629 svc_sock_setbufsize(svsk->sk_sock,
630 (serv->sv_nrthreads+3) * serv->sv_bufsz,
631 (serv->sv_nrthreads+3) * serv->sv_bufsz);
632
633 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
634 svc_sock_received(svsk);
635 return svc_deferred_recv(rqstp);
636 }
637
638 clear_bit(SK_DATA, &svsk->sk_flags);
639 while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) {
640 if (err == -EAGAIN) {
641 svc_sock_received(svsk);
642 return err;
643 }
644 /* possibly an icmp error */
645 dprintk("svc: recvfrom returned error %d\n", -err);
646 }
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700647 if (skb->tstamp.off_sec == 0) {
648 struct timeval tv;
649
650 tv.tv_sec = xtime.tv_sec;
Andrew Morton4bcde032005-10-26 01:59:03 -0700651 tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700652 skb_set_timestamp(skb, &tv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 /* Don't enable netstamp, sunrpc doesn't
654 need that much accuracy */
655 }
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700656 skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */
658
659 /*
660 * Maybe more packets - kick another thread ASAP.
661 */
662 svc_sock_received(svsk);
663
664 len = skb->len - sizeof(struct udphdr);
665 rqstp->rq_arg.len = len;
666
667 rqstp->rq_prot = IPPROTO_UDP;
668
669 /* Get sender address */
670 rqstp->rq_addr.sin_family = AF_INET;
671 rqstp->rq_addr.sin_port = skb->h.uh->source;
672 rqstp->rq_addr.sin_addr.s_addr = skb->nh.iph->saddr;
673 rqstp->rq_daddr = skb->nh.iph->daddr;
674
675 if (skb_is_nonlinear(skb)) {
676 /* we have to copy */
677 local_bh_disable();
678 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
679 local_bh_enable();
680 /* checksum error */
681 skb_free_datagram(svsk->sk_sk, skb);
682 return 0;
683 }
684 local_bh_enable();
685 skb_free_datagram(svsk->sk_sk, skb);
686 } else {
687 /* we can use it in-place */
688 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
689 rqstp->rq_arg.head[0].iov_len = len;
Herbert Xufb286bb2005-11-10 13:01:24 -0800690 if (skb_checksum_complete(skb)) {
691 skb_free_datagram(svsk->sk_sk, skb);
692 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 }
694 rqstp->rq_skbuff = skb;
695 }
696
697 rqstp->rq_arg.page_base = 0;
698 if (len <= rqstp->rq_arg.head[0].iov_len) {
699 rqstp->rq_arg.head[0].iov_len = len;
700 rqstp->rq_arg.page_len = 0;
701 } else {
702 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
703 rqstp->rq_argused += (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE;
704 }
705
706 if (serv->sv_stats)
707 serv->sv_stats->netudpcnt++;
708
709 return len;
710}
711
712static int
713svc_udp_sendto(struct svc_rqst *rqstp)
714{
715 int error;
716
717 error = svc_sendto(rqstp, &rqstp->rq_res);
718 if (error == -ECONNREFUSED)
719 /* ICMP error on earlier request. */
720 error = svc_sendto(rqstp, &rqstp->rq_res);
721
722 return error;
723}
724
725static void
726svc_udp_init(struct svc_sock *svsk)
727{
728 svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
729 svsk->sk_sk->sk_write_space = svc_write_space;
730 svsk->sk_recvfrom = svc_udp_recvfrom;
731 svsk->sk_sendto = svc_udp_sendto;
732
733 /* initialise setting must have enough space to
734 * receive and respond to one request.
735 * svc_udp_recvfrom will re-adjust if necessary
736 */
737 svc_sock_setbufsize(svsk->sk_sock,
738 3 * svsk->sk_server->sv_bufsz,
739 3 * svsk->sk_server->sv_bufsz);
740
741 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
742 set_bit(SK_CHNGBUF, &svsk->sk_flags);
743}
744
745/*
746 * A data_ready event on a listening socket means there's a connection
747 * pending. Do not use state_change as a substitute for it.
748 */
749static void
750svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
751{
Neil Brown939bb7e2005-09-13 01:25:39 -0700752 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
754 dprintk("svc: socket %p TCP (listen) state change %d\n",
Neil Brown939bb7e2005-09-13 01:25:39 -0700755 sk, sk->sk_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
Neil Brown939bb7e2005-09-13 01:25:39 -0700757 /*
758 * This callback may called twice when a new connection
759 * is established as a child socket inherits everything
760 * from a parent LISTEN socket.
761 * 1) data_ready method of the parent socket will be called
762 * when one of child sockets become ESTABLISHED.
763 * 2) data_ready method of the child socket may be called
764 * when it receives data before the socket is accepted.
765 * In case of 2, we should ignore it silently.
766 */
767 if (sk->sk_state == TCP_LISTEN) {
768 if (svsk) {
769 set_bit(SK_CONN, &svsk->sk_flags);
770 svc_sock_enqueue(svsk);
771 } else
772 printk("svc: socket %p: no user data\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 }
Neil Brown939bb7e2005-09-13 01:25:39 -0700774
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
776 wake_up_interruptible_all(sk->sk_sleep);
777}
778
779/*
780 * A state change on a connected socket means it's dying or dead.
781 */
782static void
783svc_tcp_state_change(struct sock *sk)
784{
Neil Brown939bb7e2005-09-13 01:25:39 -0700785 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
787 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
Neil Brown939bb7e2005-09-13 01:25:39 -0700788 sk, sk->sk_state, sk->sk_user_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
Neil Brown939bb7e2005-09-13 01:25:39 -0700790 if (!svsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 printk("svc: socket %p: no user data\n", sk);
Neil Brown939bb7e2005-09-13 01:25:39 -0700792 else {
793 set_bit(SK_CLOSE, &svsk->sk_flags);
794 svc_sock_enqueue(svsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
797 wake_up_interruptible_all(sk->sk_sleep);
798}
799
800static void
801svc_tcp_data_ready(struct sock *sk, int count)
802{
Neil Brown939bb7e2005-09-13 01:25:39 -0700803 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804
805 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
Neil Brown939bb7e2005-09-13 01:25:39 -0700806 sk, sk->sk_user_data);
807 if (svsk) {
808 set_bit(SK_DATA, &svsk->sk_flags);
809 svc_sock_enqueue(svsk);
810 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
812 wake_up_interruptible(sk->sk_sleep);
813}
814
815/*
816 * Accept a TCP connection
817 */
818static void
819svc_tcp_accept(struct svc_sock *svsk)
820{
821 struct sockaddr_in sin;
822 struct svc_serv *serv = svsk->sk_server;
823 struct socket *sock = svsk->sk_sock;
824 struct socket *newsock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 struct svc_sock *newsvsk;
826 int err, slen;
827
828 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock);
829 if (!sock)
830 return;
831
Sridhar Samudralae6242e92006-08-07 20:58:01 -0700832 clear_bit(SK_CONN, &svsk->sk_flags);
833 err = kernel_accept(sock, &newsock, O_NONBLOCK);
834 if (err < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 if (err == -ENOMEM)
836 printk(KERN_WARNING "%s: no more sockets!\n",
837 serv->sv_name);
Sridhar Samudralae6242e92006-08-07 20:58:01 -0700838 else if (err != -EAGAIN && net_ratelimit())
839 printk(KERN_WARNING "%s: accept failed (err %d)!\n",
840 serv->sv_name, -err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 return;
842 }
843
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 set_bit(SK_CONN, &svsk->sk_flags);
845 svc_sock_enqueue(svsk);
846
847 slen = sizeof(sin);
Sridhar Samudralae6242e92006-08-07 20:58:01 -0700848 err = kernel_getpeername(newsock, (struct sockaddr *) &sin, &slen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 if (err < 0) {
850 if (net_ratelimit())
851 printk(KERN_WARNING "%s: peername failed (err %d)!\n",
852 serv->sv_name, -err);
853 goto failed; /* aborted connection or whatever */
854 }
855
856 /* Ideally, we would want to reject connections from unauthorized
857 * hosts here, but when we get encription, the IP of the host won't
858 * tell us anything. For now just warn about unpriv connections.
859 */
860 if (ntohs(sin.sin_port) >= 1024) {
861 dprintk(KERN_WARNING
862 "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n",
863 serv->sv_name,
864 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
865 }
866
867 dprintk("%s: connect from %u.%u.%u.%u:%04x\n", serv->sv_name,
868 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
869
870 /* make sure that a write doesn't block forever when
871 * low on memory
872 */
873 newsock->sk->sk_sndtimeo = HZ*30;
874
875 if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 0)))
876 goto failed;
877
878
879 /* make sure that we don't have too many active connections.
880 * If we have, something must be dropped.
881 *
882 * There's no point in trying to do random drop here for
883 * DoS prevention. The NFS clients does 1 reconnect in 15
884 * seconds. An attacker can easily beat that.
885 *
886 * The only somewhat efficient mechanism would be if drop
887 * old connections from the same IP first. But right now
888 * we don't even record the client IP in svc_sock.
889 */
890 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
891 struct svc_sock *svsk = NULL;
892 spin_lock_bh(&serv->sv_lock);
893 if (!list_empty(&serv->sv_tempsocks)) {
894 if (net_ratelimit()) {
895 /* Try to help the admin */
896 printk(KERN_NOTICE "%s: too many open TCP "
897 "sockets, consider increasing the "
898 "number of nfsd threads\n",
899 serv->sv_name);
900 printk(KERN_NOTICE "%s: last TCP connect from "
901 "%u.%u.%u.%u:%d\n",
902 serv->sv_name,
903 NIPQUAD(sin.sin_addr.s_addr),
904 ntohs(sin.sin_port));
905 }
906 /*
907 * Always select the oldest socket. It's not fair,
908 * but so is life
909 */
910 svsk = list_entry(serv->sv_tempsocks.prev,
911 struct svc_sock,
912 sk_list);
913 set_bit(SK_CLOSE, &svsk->sk_flags);
Greg Banksc45c3572006-10-02 02:17:54 -0700914 atomic_inc(&svsk->sk_inuse);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 }
916 spin_unlock_bh(&serv->sv_lock);
917
918 if (svsk) {
919 svc_sock_enqueue(svsk);
920 svc_sock_put(svsk);
921 }
922
923 }
924
925 if (serv->sv_stats)
926 serv->sv_stats->nettcpconn++;
927
928 return;
929
930failed:
931 sock_release(newsock);
932 return;
933}
934
935/*
936 * Receive data from a TCP socket.
937 */
938static int
939svc_tcp_recvfrom(struct svc_rqst *rqstp)
940{
941 struct svc_sock *svsk = rqstp->rq_sock;
942 struct svc_serv *serv = svsk->sk_server;
943 int len;
944 struct kvec vec[RPCSVC_MAXPAGES];
945 int pnum, vlen;
946
947 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
948 svsk, test_bit(SK_DATA, &svsk->sk_flags),
949 test_bit(SK_CONN, &svsk->sk_flags),
950 test_bit(SK_CLOSE, &svsk->sk_flags));
951
952 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
953 svc_sock_received(svsk);
954 return svc_deferred_recv(rqstp);
955 }
956
957 if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
958 svc_delete_socket(svsk);
959 return 0;
960 }
961
962 if (test_bit(SK_CONN, &svsk->sk_flags)) {
963 svc_tcp_accept(svsk);
964 svc_sock_received(svsk);
965 return 0;
966 }
967
968 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
969 /* sndbuf needs to have room for one request
970 * per thread, otherwise we can stall even when the
971 * network isn't a bottleneck.
Greg Banks3262c812006-10-02 02:17:58 -0700972 *
973 * We count all threads rather than threads in a
974 * particular pool, which provides an upper bound
975 * on the number of threads which will access the socket.
976 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 * rcvbuf just needs to be able to hold a few requests.
978 * Normally they will be removed from the queue
979 * as soon a a complete request arrives.
980 */
981 svc_sock_setbufsize(svsk->sk_sock,
982 (serv->sv_nrthreads+3) * serv->sv_bufsz,
983 3 * serv->sv_bufsz);
984
985 clear_bit(SK_DATA, &svsk->sk_flags);
986
987 /* Receive data. If we haven't got the record length yet, get
988 * the next four bytes. Otherwise try to gobble up as much as
989 * possible up to the complete record length.
990 */
991 if (svsk->sk_tcplen < 4) {
992 unsigned long want = 4 - svsk->sk_tcplen;
993 struct kvec iov;
994
995 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
996 iov.iov_len = want;
997 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
998 goto error;
999 svsk->sk_tcplen += len;
1000
1001 if (len < want) {
1002 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
1003 len, want);
1004 svc_sock_received(svsk);
1005 return -EAGAIN; /* record header not complete */
1006 }
1007
1008 svsk->sk_reclen = ntohl(svsk->sk_reclen);
1009 if (!(svsk->sk_reclen & 0x80000000)) {
1010 /* FIXME: technically, a record can be fragmented,
1011 * and non-terminal fragments will not have the top
1012 * bit set in the fragment length header.
1013 * But apparently no known nfs clients send fragmented
1014 * records. */
1015 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (non-terminal)\n",
1016 (unsigned long) svsk->sk_reclen);
1017 goto err_delete;
1018 }
1019 svsk->sk_reclen &= 0x7fffffff;
1020 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
1021 if (svsk->sk_reclen > serv->sv_bufsz) {
1022 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n",
1023 (unsigned long) svsk->sk_reclen);
1024 goto err_delete;
1025 }
1026 }
1027
1028 /* Check whether enough data is available */
1029 len = svc_recv_available(svsk);
1030 if (len < 0)
1031 goto error;
1032
1033 if (len < svsk->sk_reclen) {
1034 dprintk("svc: incomplete TCP record (%d of %d)\n",
1035 len, svsk->sk_reclen);
1036 svc_sock_received(svsk);
1037 return -EAGAIN; /* record not complete */
1038 }
1039 len = svsk->sk_reclen;
1040 set_bit(SK_DATA, &svsk->sk_flags);
1041
1042 vec[0] = rqstp->rq_arg.head[0];
1043 vlen = PAGE_SIZE;
1044 pnum = 1;
1045 while (vlen < len) {
1046 vec[pnum].iov_base = page_address(rqstp->rq_argpages[rqstp->rq_argused++]);
1047 vec[pnum].iov_len = PAGE_SIZE;
1048 pnum++;
1049 vlen += PAGE_SIZE;
1050 }
1051
1052 /* Now receive data */
1053 len = svc_recvfrom(rqstp, vec, pnum, len);
1054 if (len < 0)
1055 goto error;
1056
1057 dprintk("svc: TCP complete record (%d bytes)\n", len);
1058 rqstp->rq_arg.len = len;
1059 rqstp->rq_arg.page_base = 0;
1060 if (len <= rqstp->rq_arg.head[0].iov_len) {
1061 rqstp->rq_arg.head[0].iov_len = len;
1062 rqstp->rq_arg.page_len = 0;
1063 } else {
1064 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
1065 }
1066
1067 rqstp->rq_skbuff = NULL;
1068 rqstp->rq_prot = IPPROTO_TCP;
1069
1070 /* Reset TCP read info */
1071 svsk->sk_reclen = 0;
1072 svsk->sk_tcplen = 0;
1073
1074 svc_sock_received(svsk);
1075 if (serv->sv_stats)
1076 serv->sv_stats->nettcpcnt++;
1077
1078 return len;
1079
1080 err_delete:
1081 svc_delete_socket(svsk);
1082 return -EAGAIN;
1083
1084 error:
1085 if (len == -EAGAIN) {
1086 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1087 svc_sock_received(svsk);
1088 } else {
1089 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
1090 svsk->sk_server->sv_name, -len);
Olaf Kirch93fbf1a2006-01-06 00:19:56 -08001091 goto err_delete;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 }
1093
1094 return len;
1095}
1096
1097/*
1098 * Send out data on TCP socket.
1099 */
1100static int
1101svc_tcp_sendto(struct svc_rqst *rqstp)
1102{
1103 struct xdr_buf *xbufp = &rqstp->rq_res;
1104 int sent;
Alexey Dobriyand8ed0292006-09-26 22:29:38 -07001105 __be32 reclen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
1107 /* Set up the first element of the reply kvec.
1108 * Any other kvecs that may be in use have been taken
1109 * care of by the server implementation itself.
1110 */
1111 reclen = htonl(0x80000000|((xbufp->len ) - 4));
1112 memcpy(xbufp->head[0].iov_base, &reclen, 4);
1113
1114 if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags))
1115 return -ENOTCONN;
1116
1117 sent = svc_sendto(rqstp, &rqstp->rq_res);
1118 if (sent != xbufp->len) {
1119 printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1120 rqstp->rq_sock->sk_server->sv_name,
1121 (sent<0)?"got error":"sent only",
1122 sent, xbufp->len);
1123 svc_delete_socket(rqstp->rq_sock);
1124 sent = -EAGAIN;
1125 }
1126 return sent;
1127}
1128
1129static void
1130svc_tcp_init(struct svc_sock *svsk)
1131{
1132 struct sock *sk = svsk->sk_sk;
1133 struct tcp_sock *tp = tcp_sk(sk);
1134
1135 svsk->sk_recvfrom = svc_tcp_recvfrom;
1136 svsk->sk_sendto = svc_tcp_sendto;
1137
1138 if (sk->sk_state == TCP_LISTEN) {
1139 dprintk("setting up TCP socket for listening\n");
1140 sk->sk_data_ready = svc_tcp_listen_data_ready;
1141 set_bit(SK_CONN, &svsk->sk_flags);
1142 } else {
1143 dprintk("setting up TCP socket for reading\n");
1144 sk->sk_state_change = svc_tcp_state_change;
1145 sk->sk_data_ready = svc_tcp_data_ready;
1146 sk->sk_write_space = svc_write_space;
1147
1148 svsk->sk_reclen = 0;
1149 svsk->sk_tcplen = 0;
1150
1151 tp->nonagle = 1; /* disable Nagle's algorithm */
1152
1153 /* initialise setting must have enough space to
1154 * receive and respond to one request.
1155 * svc_tcp_recvfrom will re-adjust if necessary
1156 */
1157 svc_sock_setbufsize(svsk->sk_sock,
1158 3 * svsk->sk_server->sv_bufsz,
1159 3 * svsk->sk_server->sv_bufsz);
1160
1161 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1162 set_bit(SK_DATA, &svsk->sk_flags);
1163 if (sk->sk_state != TCP_ESTABLISHED)
1164 set_bit(SK_CLOSE, &svsk->sk_flags);
1165 }
1166}
1167
1168void
1169svc_sock_update_bufs(struct svc_serv *serv)
1170{
1171 /*
1172 * The number of server threads has changed. Update
1173 * rcvbuf and sndbuf accordingly on all sockets
1174 */
1175 struct list_head *le;
1176
1177 spin_lock_bh(&serv->sv_lock);
1178 list_for_each(le, &serv->sv_permsocks) {
1179 struct svc_sock *svsk =
1180 list_entry(le, struct svc_sock, sk_list);
1181 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1182 }
1183 list_for_each(le, &serv->sv_tempsocks) {
1184 struct svc_sock *svsk =
1185 list_entry(le, struct svc_sock, sk_list);
1186 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1187 }
1188 spin_unlock_bh(&serv->sv_lock);
1189}
1190
1191/*
Greg Banks3262c812006-10-02 02:17:58 -07001192 * Receive the next request on any socket. This code is carefully
1193 * organised not to touch any cachelines in the shared svc_serv
1194 * structure, only cachelines in the local svc_pool.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 */
1196int
NeilBrown6fb2b472006-10-02 02:17:50 -07001197svc_recv(struct svc_rqst *rqstp, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198{
1199 struct svc_sock *svsk =NULL;
NeilBrown6fb2b472006-10-02 02:17:50 -07001200 struct svc_serv *serv = rqstp->rq_server;
Greg Banks3262c812006-10-02 02:17:58 -07001201 struct svc_pool *pool = rqstp->rq_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 int len;
1203 int pages;
1204 struct xdr_buf *arg;
1205 DECLARE_WAITQUEUE(wait, current);
1206
1207 dprintk("svc: server %p waiting for data (to = %ld)\n",
1208 rqstp, timeout);
1209
1210 if (rqstp->rq_sock)
1211 printk(KERN_ERR
1212 "svc_recv: service %p, socket not NULL!\n",
1213 rqstp);
1214 if (waitqueue_active(&rqstp->rq_wait))
1215 printk(KERN_ERR
1216 "svc_recv: service %p, wait queue active!\n",
1217 rqstp);
1218
1219 /* Initialize the buffers */
1220 /* first reclaim pages that were moved to response list */
1221 svc_pushback_allpages(rqstp);
1222
1223 /* now allocate needed pages. If we get a failure, sleep briefly */
1224 pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE;
1225 while (rqstp->rq_arghi < pages) {
1226 struct page *p = alloc_page(GFP_KERNEL);
1227 if (!p) {
Nishanth Aravamudan121caf52005-09-12 14:15:34 -07001228 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 continue;
1230 }
1231 rqstp->rq_argpages[rqstp->rq_arghi++] = p;
1232 }
1233
1234 /* Make arg->head point to first page and arg->pages point to rest */
1235 arg = &rqstp->rq_arg;
1236 arg->head[0].iov_base = page_address(rqstp->rq_argpages[0]);
1237 arg->head[0].iov_len = PAGE_SIZE;
1238 rqstp->rq_argused = 1;
1239 arg->pages = rqstp->rq_argpages + 1;
1240 arg->page_base = 0;
1241 /* save at least one page for response */
1242 arg->page_len = (pages-2)*PAGE_SIZE;
1243 arg->len = (pages-1)*PAGE_SIZE;
1244 arg->tail[0].iov_len = 0;
Christoph Lameter3e1d1d22005-06-24 23:13:50 -07001245
1246 try_to_freeze();
NeilBrown1887b932005-11-15 00:09:10 -08001247 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 if (signalled())
1249 return -EINTR;
1250
Greg Banks3262c812006-10-02 02:17:58 -07001251 spin_lock_bh(&pool->sp_lock);
1252 if ((svsk = svc_sock_dequeue(pool)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 rqstp->rq_sock = svsk;
Greg Banksc45c3572006-10-02 02:17:54 -07001254 atomic_inc(&svsk->sk_inuse);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 rqstp->rq_reserved = serv->sv_bufsz;
Greg Banks5685f0f2006-10-02 02:17:56 -07001256 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 } else {
1258 /* No data pending. Go to sleep */
Greg Banks3262c812006-10-02 02:17:58 -07001259 svc_thread_enqueue(pool, rqstp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
1261 /*
1262 * We have to be able to interrupt this wait
1263 * to bring down the daemons ...
1264 */
1265 set_current_state(TASK_INTERRUPTIBLE);
1266 add_wait_queue(&rqstp->rq_wait, &wait);
Greg Banks3262c812006-10-02 02:17:58 -07001267 spin_unlock_bh(&pool->sp_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268
1269 schedule_timeout(timeout);
1270
Christoph Lameter3e1d1d22005-06-24 23:13:50 -07001271 try_to_freeze();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Greg Banks3262c812006-10-02 02:17:58 -07001273 spin_lock_bh(&pool->sp_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 remove_wait_queue(&rqstp->rq_wait, &wait);
1275
1276 if (!(svsk = rqstp->rq_sock)) {
Greg Banks3262c812006-10-02 02:17:58 -07001277 svc_thread_dequeue(pool, rqstp);
1278 spin_unlock_bh(&pool->sp_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 dprintk("svc: server %p, no data yet\n", rqstp);
1280 return signalled()? -EINTR : -EAGAIN;
1281 }
1282 }
Greg Banks3262c812006-10-02 02:17:58 -07001283 spin_unlock_bh(&pool->sp_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
Greg Banks3262c812006-10-02 02:17:58 -07001285 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
1286 rqstp, pool->sp_id, svsk, atomic_read(&svsk->sk_inuse));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 len = svsk->sk_recvfrom(rqstp);
1288 dprintk("svc: got len=%d\n", len);
1289
1290 /* No data, incomplete (TCP) read, or accept() */
1291 if (len == 0 || len == -EAGAIN) {
1292 rqstp->rq_res.len = 0;
1293 svc_sock_release(rqstp);
1294 return -EAGAIN;
1295 }
1296 svsk->sk_lastrecv = get_seconds();
Greg Banks36bdfc82006-10-02 02:17:54 -07001297 clear_bit(SK_OLD, &svsk->sk_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298
1299 rqstp->rq_secure = ntohs(rqstp->rq_addr.sin_port) < 1024;
1300 rqstp->rq_chandle.defer = svc_defer;
1301
1302 if (serv->sv_stats)
1303 serv->sv_stats->netcnt++;
1304 return len;
1305}
1306
1307/*
1308 * Drop request
1309 */
1310void
1311svc_drop(struct svc_rqst *rqstp)
1312{
1313 dprintk("svc: socket %p dropped request\n", rqstp->rq_sock);
1314 svc_sock_release(rqstp);
1315}
1316
1317/*
1318 * Return reply to client.
1319 */
1320int
1321svc_send(struct svc_rqst *rqstp)
1322{
1323 struct svc_sock *svsk;
1324 int len;
1325 struct xdr_buf *xb;
1326
1327 if ((svsk = rqstp->rq_sock) == NULL) {
1328 printk(KERN_WARNING "NULL socket pointer in %s:%d\n",
1329 __FILE__, __LINE__);
1330 return -EFAULT;
1331 }
1332
1333 /* release the receive skb before sending the reply */
1334 svc_release_skb(rqstp);
1335
1336 /* calculate over-all length */
1337 xb = & rqstp->rq_res;
1338 xb->len = xb->head[0].iov_len +
1339 xb->page_len +
1340 xb->tail[0].iov_len;
1341
Ingo Molnar57b47a52006-03-20 22:35:41 -08001342 /* Grab svsk->sk_mutex to serialize outgoing data. */
1343 mutex_lock(&svsk->sk_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 if (test_bit(SK_DEAD, &svsk->sk_flags))
1345 len = -ENOTCONN;
1346 else
1347 len = svsk->sk_sendto(rqstp);
Ingo Molnar57b47a52006-03-20 22:35:41 -08001348 mutex_unlock(&svsk->sk_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 svc_sock_release(rqstp);
1350
1351 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
1352 return 0;
1353 return len;
1354}
1355
1356/*
Greg Banks36bdfc82006-10-02 02:17:54 -07001357 * Timer function to close old temporary sockets, using
1358 * a mark-and-sweep algorithm.
1359 */
1360static void
1361svc_age_temp_sockets(unsigned long closure)
1362{
1363 struct svc_serv *serv = (struct svc_serv *)closure;
1364 struct svc_sock *svsk;
1365 struct list_head *le, *next;
1366 LIST_HEAD(to_be_aged);
1367
1368 dprintk("svc_age_temp_sockets\n");
1369
1370 if (!spin_trylock_bh(&serv->sv_lock)) {
1371 /* busy, try again 1 sec later */
1372 dprintk("svc_age_temp_sockets: busy\n");
1373 mod_timer(&serv->sv_temptimer, jiffies + HZ);
1374 return;
1375 }
1376
1377 list_for_each_safe(le, next, &serv->sv_tempsocks) {
1378 svsk = list_entry(le, struct svc_sock, sk_list);
1379
1380 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags))
1381 continue;
Greg Banksc45c3572006-10-02 02:17:54 -07001382 if (atomic_read(&svsk->sk_inuse) || test_bit(SK_BUSY, &svsk->sk_flags))
Greg Banks36bdfc82006-10-02 02:17:54 -07001383 continue;
Greg Banksc45c3572006-10-02 02:17:54 -07001384 atomic_inc(&svsk->sk_inuse);
Greg Banks36bdfc82006-10-02 02:17:54 -07001385 list_move(le, &to_be_aged);
1386 set_bit(SK_CLOSE, &svsk->sk_flags);
1387 set_bit(SK_DETACHED, &svsk->sk_flags);
1388 }
1389 spin_unlock_bh(&serv->sv_lock);
1390
1391 while (!list_empty(&to_be_aged)) {
1392 le = to_be_aged.next;
1393 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */
1394 list_del_init(le);
1395 svsk = list_entry(le, struct svc_sock, sk_list);
1396
1397 dprintk("queuing svsk %p for closing, %lu seconds old\n",
1398 svsk, get_seconds() - svsk->sk_lastrecv);
1399
1400 /* a thread will dequeue and close it soon */
1401 svc_sock_enqueue(svsk);
1402 svc_sock_put(svsk);
1403 }
1404
1405 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
1406}
1407
1408/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 * Initialize socket for RPC use and create svc_sock struct
1410 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1411 */
1412static struct svc_sock *
1413svc_setup_socket(struct svc_serv *serv, struct socket *sock,
1414 int *errp, int pmap_register)
1415{
1416 struct svc_sock *svsk;
1417 struct sock *inet;
1418
1419 dprintk("svc: svc_setup_socket %p\n", sock);
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07001420 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 *errp = -ENOMEM;
1422 return NULL;
1423 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
1425 inet = sock->sk;
1426
1427 /* Register socket with portmapper */
1428 if (*errp >= 0 && pmap_register)
1429 *errp = svc_register(serv, inet->sk_protocol,
1430 ntohs(inet_sk(inet)->sport));
1431
1432 if (*errp < 0) {
1433 kfree(svsk);
1434 return NULL;
1435 }
1436
1437 set_bit(SK_BUSY, &svsk->sk_flags);
1438 inet->sk_user_data = svsk;
1439 svsk->sk_sock = sock;
1440 svsk->sk_sk = inet;
1441 svsk->sk_ostate = inet->sk_state_change;
1442 svsk->sk_odata = inet->sk_data_ready;
1443 svsk->sk_owspace = inet->sk_write_space;
1444 svsk->sk_server = serv;
Greg Banksc45c3572006-10-02 02:17:54 -07001445 atomic_set(&svsk->sk_inuse, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 svsk->sk_lastrecv = get_seconds();
Greg Banks1a68d952006-10-02 02:17:55 -07001447 spin_lock_init(&svsk->sk_defer_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 INIT_LIST_HEAD(&svsk->sk_deferred);
1449 INIT_LIST_HEAD(&svsk->sk_ready);
Ingo Molnar57b47a52006-03-20 22:35:41 -08001450 mutex_init(&svsk->sk_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451
1452 /* Initialize the socket */
1453 if (sock->type == SOCK_DGRAM)
1454 svc_udp_init(svsk);
1455 else
1456 svc_tcp_init(svsk);
1457
1458 spin_lock_bh(&serv->sv_lock);
1459 if (!pmap_register) {
1460 set_bit(SK_TEMP, &svsk->sk_flags);
1461 list_add(&svsk->sk_list, &serv->sv_tempsocks);
1462 serv->sv_tmpcnt++;
Greg Banks36bdfc82006-10-02 02:17:54 -07001463 if (serv->sv_temptimer.function == NULL) {
1464 /* setup timer to age temp sockets */
1465 setup_timer(&serv->sv_temptimer, svc_age_temp_sockets,
1466 (unsigned long)serv);
1467 mod_timer(&serv->sv_temptimer,
1468 jiffies + svc_conn_age_period * HZ);
1469 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 } else {
1471 clear_bit(SK_TEMP, &svsk->sk_flags);
1472 list_add(&svsk->sk_list, &serv->sv_permsocks);
1473 }
1474 spin_unlock_bh(&serv->sv_lock);
1475
1476 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1477 svsk, svsk->sk_sk);
1478
1479 clear_bit(SK_BUSY, &svsk->sk_flags);
1480 svc_sock_enqueue(svsk);
1481 return svsk;
1482}
1483
NeilBrownb41b66d2006-10-02 02:17:48 -07001484int svc_addsock(struct svc_serv *serv,
1485 int fd,
1486 char *name_return,
1487 int *proto)
1488{
1489 int err = 0;
1490 struct socket *so = sockfd_lookup(fd, &err);
1491 struct svc_sock *svsk = NULL;
1492
1493 if (!so)
1494 return err;
1495 if (so->sk->sk_family != AF_INET)
1496 err = -EAFNOSUPPORT;
1497 else if (so->sk->sk_protocol != IPPROTO_TCP &&
1498 so->sk->sk_protocol != IPPROTO_UDP)
1499 err = -EPROTONOSUPPORT;
1500 else if (so->state > SS_UNCONNECTED)
1501 err = -EISCONN;
1502 else {
1503 svsk = svc_setup_socket(serv, so, &err, 1);
1504 if (svsk)
1505 err = 0;
1506 }
1507 if (err) {
1508 sockfd_put(so);
1509 return err;
1510 }
1511 if (proto) *proto = so->sk->sk_protocol;
1512 return one_sock_name(name_return, svsk);
1513}
1514EXPORT_SYMBOL_GPL(svc_addsock);
1515
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516/*
1517 * Create socket for RPC service.
1518 */
1519static int
1520svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin)
1521{
1522 struct svc_sock *svsk;
1523 struct socket *sock;
1524 int error;
1525 int type;
1526
1527 dprintk("svc: svc_create_socket(%s, %d, %u.%u.%u.%u:%d)\n",
1528 serv->sv_program->pg_name, protocol,
1529 NIPQUAD(sin->sin_addr.s_addr),
1530 ntohs(sin->sin_port));
1531
1532 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
1533 printk(KERN_WARNING "svc: only UDP and TCP "
1534 "sockets supported\n");
1535 return -EINVAL;
1536 }
1537 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
1538
1539 if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0)
1540 return error;
1541
Eric Sesterhenn18114742006-09-28 14:37:07 -07001542 if (type == SOCK_STREAM)
1543 sock->sk->sk_reuse = 1; /* allow address reuse */
1544 error = kernel_bind(sock, (struct sockaddr *) sin,
1545 sizeof(*sin));
1546 if (error < 0)
1547 goto bummer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548
1549 if (protocol == IPPROTO_TCP) {
Sridhar Samudralae6242e92006-08-07 20:58:01 -07001550 if ((error = kernel_listen(sock, 64)) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 goto bummer;
1552 }
1553
1554 if ((svsk = svc_setup_socket(serv, sock, &error, 1)) != NULL)
1555 return 0;
1556
1557bummer:
1558 dprintk("svc: svc_create_socket error = %d\n", -error);
1559 sock_release(sock);
1560 return error;
1561}
1562
1563/*
1564 * Remove a dead socket
1565 */
1566void
1567svc_delete_socket(struct svc_sock *svsk)
1568{
1569 struct svc_serv *serv;
1570 struct sock *sk;
1571
1572 dprintk("svc: svc_delete_socket(%p)\n", svsk);
1573
1574 serv = svsk->sk_server;
1575 sk = svsk->sk_sk;
1576
1577 sk->sk_state_change = svsk->sk_ostate;
1578 sk->sk_data_ready = svsk->sk_odata;
1579 sk->sk_write_space = svsk->sk_owspace;
1580
1581 spin_lock_bh(&serv->sv_lock);
1582
Greg Banks36bdfc82006-10-02 02:17:54 -07001583 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags))
1584 list_del_init(&svsk->sk_list);
Greg Banks3262c812006-10-02 02:17:58 -07001585 /*
1586 * We used to delete the svc_sock from whichever list
1587 * it's sk_ready node was on, but we don't actually
1588 * need to. This is because the only time we're called
1589 * while still attached to a queue, the queue itself
1590 * is about to be destroyed (in svc_destroy).
1591 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags))
1593 if (test_bit(SK_TEMP, &svsk->sk_flags))
1594 serv->sv_tmpcnt--;
1595
Greg Banksc45c3572006-10-02 02:17:54 -07001596 if (!atomic_read(&svsk->sk_inuse)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 spin_unlock_bh(&serv->sv_lock);
NeilBrownb41b66d2006-10-02 02:17:48 -07001598 if (svsk->sk_sock->file)
1599 sockfd_put(svsk->sk_sock);
1600 else
1601 sock_release(svsk->sk_sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 kfree(svsk);
1603 } else {
1604 spin_unlock_bh(&serv->sv_lock);
1605 dprintk(KERN_NOTICE "svc: server socket destroy delayed\n");
1606 /* svsk->sk_server = NULL; */
1607 }
1608}
1609
1610/*
1611 * Make a socket for nfsd and lockd
1612 */
1613int
1614svc_makesock(struct svc_serv *serv, int protocol, unsigned short port)
1615{
1616 struct sockaddr_in sin;
1617
1618 dprintk("svc: creating socket proto = %d\n", protocol);
1619 sin.sin_family = AF_INET;
1620 sin.sin_addr.s_addr = INADDR_ANY;
1621 sin.sin_port = htons(port);
1622 return svc_create_socket(serv, protocol, &sin);
1623}
1624
1625/*
1626 * Handle defer and revisit of requests
1627 */
1628
1629static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1630{
1631 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 struct svc_sock *svsk;
1633
1634 if (too_many) {
1635 svc_sock_put(dr->svsk);
1636 kfree(dr);
1637 return;
1638 }
1639 dprintk("revisit queued\n");
1640 svsk = dr->svsk;
1641 dr->svsk = NULL;
Greg Banks1a68d952006-10-02 02:17:55 -07001642 spin_lock_bh(&svsk->sk_defer_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 list_add(&dr->handle.recent, &svsk->sk_deferred);
Greg Banks1a68d952006-10-02 02:17:55 -07001644 spin_unlock_bh(&svsk->sk_defer_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 set_bit(SK_DEFERRED, &svsk->sk_flags);
1646 svc_sock_enqueue(svsk);
1647 svc_sock_put(svsk);
1648}
1649
1650static struct cache_deferred_req *
1651svc_defer(struct cache_req *req)
1652{
1653 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
1654 int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len);
1655 struct svc_deferred_req *dr;
1656
1657 if (rqstp->rq_arg.page_len)
1658 return NULL; /* if more than a page, give up FIXME */
1659 if (rqstp->rq_deferred) {
1660 dr = rqstp->rq_deferred;
1661 rqstp->rq_deferred = NULL;
1662 } else {
1663 int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
1664 /* FIXME maybe discard if size too large */
1665 dr = kmalloc(size, GFP_KERNEL);
1666 if (dr == NULL)
1667 return NULL;
1668
1669 dr->handle.owner = rqstp->rq_server;
1670 dr->prot = rqstp->rq_prot;
1671 dr->addr = rqstp->rq_addr;
J. Bruce Fields1918e342006-01-18 17:43:16 -08001672 dr->daddr = rqstp->rq_daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 dr->argslen = rqstp->rq_arg.len >> 2;
1674 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
1675 }
Greg Banksc45c3572006-10-02 02:17:54 -07001676 atomic_inc(&rqstp->rq_sock->sk_inuse);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 dr->svsk = rqstp->rq_sock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
1679 dr->handle.revisit = svc_revisit;
1680 return &dr->handle;
1681}
1682
1683/*
1684 * recv data from a deferred request into an active one
1685 */
1686static int svc_deferred_recv(struct svc_rqst *rqstp)
1687{
1688 struct svc_deferred_req *dr = rqstp->rq_deferred;
1689
1690 rqstp->rq_arg.head[0].iov_base = dr->args;
1691 rqstp->rq_arg.head[0].iov_len = dr->argslen<<2;
1692 rqstp->rq_arg.page_len = 0;
1693 rqstp->rq_arg.len = dr->argslen<<2;
1694 rqstp->rq_prot = dr->prot;
1695 rqstp->rq_addr = dr->addr;
J. Bruce Fields1918e342006-01-18 17:43:16 -08001696 rqstp->rq_daddr = dr->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 return dr->argslen<<2;
1698}
1699
1700
1701static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1702{
1703 struct svc_deferred_req *dr = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704
1705 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
1706 return NULL;
Greg Banks1a68d952006-10-02 02:17:55 -07001707 spin_lock_bh(&svsk->sk_defer_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 clear_bit(SK_DEFERRED, &svsk->sk_flags);
1709 if (!list_empty(&svsk->sk_deferred)) {
1710 dr = list_entry(svsk->sk_deferred.next,
1711 struct svc_deferred_req,
1712 handle.recent);
1713 list_del_init(&dr->handle.recent);
1714 set_bit(SK_DEFERRED, &svsk->sk_flags);
1715 }
Greg Banks1a68d952006-10-02 02:17:55 -07001716 spin_unlock_bh(&svsk->sk_defer_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 return dr;
1718}