blob: 96cdf77925f33b6db8b1e3ad68426ab5712208c5 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Request reply cache. This is currently a global cache, but this may
4 * change in the future and be a per-client cache.
5 *
6 * This code is heavily inspired by the 44BSD implementation, although
7 * it does things a bit differently.
8 *
9 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
10 */
11
J. Bruce Fields3ba75832019-05-17 09:03:38 -040012#include <linux/sunrpc/svc_xprt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Jeff Layton8f975142016-10-26 07:26:40 -040014#include <linux/vmalloc.h>
Jeff Layton59766872013-02-04 12:50:00 -050015#include <linux/sunrpc/addr.h>
Jeff Layton0338dd12013-02-04 08:18:02 -050016#include <linux/highmem.h>
Jeff Layton0733c7b2013-03-27 10:15:39 -040017#include <linux/log2.h>
18#include <linux/hash.h>
Jeff Layton01a7dec2013-02-04 11:57:27 -050019#include <net/checksum.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020
Boaz Harrosh9a74af22009-12-03 20:30:56 +020021#include "nfsd.h"
22#include "cache.h"
Chuck Lever0b175b12020-05-02 11:34:40 -040023#include "trace.h"
Jeff Layton0338dd12013-02-04 08:18:02 -050024
Jeff Layton0733c7b2013-03-27 10:15:39 -040025/*
26 * We use this value to determine the number of hash buckets from the max
27 * cache size, the idea being that when the cache is at its maximum number
28 * of entries, then this should be the average number of entries per bucket.
29 */
30#define TARGET_BUCKET_SIZE 64
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Trond Myklebust7142b982014-08-06 13:44:20 -040032struct nfsd_drc_bucket {
Trond Myklebust736c6622018-10-01 10:41:57 -040033 struct rb_root rb_head;
Trond Myklebustbedd4b62014-08-06 13:44:21 -040034 struct list_head lru_head;
Trond Myklebust89a26b32014-08-06 13:44:24 -040035 spinlock_t cache_lock;
Trond Myklebust7142b982014-08-06 13:44:20 -040036};
37
J. Bruce Fields027690c2020-06-01 17:44:45 -040038static struct kmem_cache *drc_slab;
39
Linus Torvalds1da177e2005-04-16 15:20:36 -070040static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
Dave Chinner1ab6c492013-08-28 10:18:09 +100041static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
42 struct shrink_control *sc);
43static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
44 struct shrink_control *sc);
Jeff Laytonb4e7f2c2013-02-04 08:18:06 -050045
Greg Banksfca42172009-04-01 07:28:13 +110046/*
Jeff Layton0338dd12013-02-04 08:18:02 -050047 * Put a cap on the size of the DRC based on the amount of available
48 * low memory in the machine.
49 *
50 * 64MB: 8192
51 * 128MB: 11585
52 * 256MB: 16384
53 * 512MB: 23170
54 * 1GB: 32768
55 * 2GB: 46340
56 * 4GB: 65536
57 * 8GB: 92681
58 * 16GB: 131072
59 *
60 * ...with a hard cap of 256k entries. In the worst case, each entry will be
61 * ~1k, so the above numbers should give a rough max of the amount of memory
62 * used in k.
J. Bruce Fields3ba75832019-05-17 09:03:38 -040063 *
64 * XXX: these limits are per-container, so memory used will increase
65 * linearly with number of containers. Maybe that's OK.
Jeff Layton0338dd12013-02-04 08:18:02 -050066 */
67static unsigned int
68nfsd_cache_size_limit(void)
69{
70 unsigned int limit;
Arun KSca79b0c2018-12-28 00:34:29 -080071 unsigned long low_pages = totalram_pages() - totalhigh_pages();
Jeff Layton0338dd12013-02-04 08:18:02 -050072
73 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
74 return min_t(unsigned int, limit, 256*1024);
75}
76
Jeff Layton0733c7b2013-03-27 10:15:39 -040077/*
78 * Compute the number of hash buckets we need. Divide the max cachesize by
79 * the "target" max bucket size, and round up to next power of two.
80 */
81static unsigned int
82nfsd_hashsize(unsigned int limit)
83{
84 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
85}
86
Trond Myklebust7142b982014-08-06 13:44:20 -040087static u32
J. Bruce Fields3ba75832019-05-17 09:03:38 -040088nfsd_cache_hash(__be32 xid, struct nfsd_net *nn)
Trond Myklebust7142b982014-08-06 13:44:20 -040089{
J. Bruce Fields3ba75832019-05-17 09:03:38 -040090 return hash_32(be32_to_cpu(xid), nn->maskbits);
Trond Myklebust7142b982014-08-06 13:44:20 -040091}
92
Jeff Laytonf09841f2013-01-28 14:41:11 -050093static struct svc_cacherep *
J. Bruce Fields3ba75832019-05-17 09:03:38 -040094nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
95 struct nfsd_net *nn)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096{
97 struct svc_cacherep *rp;
Jeff Laytonf09841f2013-01-28 14:41:11 -050098
J. Bruce Fields027690c2020-06-01 17:44:45 -040099 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
Jeff Laytonf09841f2013-01-28 14:41:11 -0500100 if (rp) {
101 rp->c_state = RC_UNUSED;
102 rp->c_type = RC_NOCACHE;
Trond Myklebust736c6622018-10-01 10:41:57 -0400103 RB_CLEAR_NODE(&rp->c_node);
Jeff Laytonf09841f2013-01-28 14:41:11 -0500104 INIT_LIST_HEAD(&rp->c_lru);
Trond Myklebust76ecec22018-10-01 10:41:55 -0400105
Trond Myklebusted00c2f2018-10-03 13:11:51 -0400106 memset(&rp->c_key, 0, sizeof(rp->c_key));
107 rp->c_key.k_xid = rqstp->rq_xid;
108 rp->c_key.k_proc = rqstp->rq_proc;
109 rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp));
110 rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp)));
111 rp->c_key.k_prot = rqstp->rq_prot;
112 rp->c_key.k_vers = rqstp->rq_vers;
113 rp->c_key.k_len = rqstp->rq_arg.len;
114 rp->c_key.k_csum = csum;
Jeff Laytonf09841f2013-01-28 14:41:11 -0500115 }
116 return rp;
117}
118
119static void
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400120nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
121 struct nfsd_net *nn)
Jeff Laytonf09841f2013-01-28 14:41:11 -0500122{
Jeff Layton6c6910c2013-03-27 10:15:38 -0400123 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
Amir Goldsteine567b982021-01-06 09:52:35 +0200124 nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
Jeff Laytonf09841f2013-01-28 14:41:11 -0500125 kfree(rp->c_replvec.iov_base);
Jeff Layton6c6910c2013-03-27 10:15:38 -0400126 }
Trond Myklebust76ecec22018-10-01 10:41:55 -0400127 if (rp->c_state != RC_UNUSED) {
Trond Myklebust736c6622018-10-01 10:41:57 -0400128 rb_erase(&rp->c_node, &b->rb_head);
Trond Myklebust76ecec22018-10-01 10:41:55 -0400129 list_del(&rp->c_lru);
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400130 atomic_dec(&nn->num_drc_entries);
Amir Goldsteine567b982021-01-06 09:52:35 +0200131 nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
Trond Myklebust76ecec22018-10-01 10:41:55 -0400132 }
J. Bruce Fields027690c2020-06-01 17:44:45 -0400133 kmem_cache_free(drc_slab, rp);
Jeff Laytonf09841f2013-01-28 14:41:11 -0500134}
135
Jeff Layton2c6b6912013-02-04 08:18:04 -0500136static void
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400137nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
138 struct nfsd_net *nn)
Jeff Layton2c6b6912013-02-04 08:18:04 -0500139{
Trond Myklebust89a26b32014-08-06 13:44:24 -0400140 spin_lock(&b->cache_lock);
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400141 nfsd_reply_cache_free_locked(b, rp, nn);
Trond Myklebust89a26b32014-08-06 13:44:24 -0400142 spin_unlock(&b->cache_lock);
Jeff Layton2c6b6912013-02-04 08:18:04 -0500143}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
J. Bruce Fields027690c2020-06-01 17:44:45 -0400145int nfsd_drc_slab_create(void)
146{
147 drc_slab = kmem_cache_create("nfsd_drc",
148 sizeof(struct svc_cacherep), 0, 0, NULL);
149 return drc_slab ? 0: -ENOMEM;
150}
151
152void nfsd_drc_slab_free(void)
153{
154 kmem_cache_destroy(drc_slab);
155}
156
Amir Goldsteine567b982021-01-06 09:52:35 +0200157static int nfsd_reply_cache_stats_init(struct nfsd_net *nn)
158{
159 return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
160}
161
162static void nfsd_reply_cache_stats_destroy(struct nfsd_net *nn)
163{
164 nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
165}
166
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400167int nfsd_reply_cache_init(struct nfsd_net *nn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168{
Jeff Layton0733c7b2013-03-27 10:15:39 -0400169 unsigned int hashsize;
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400170 unsigned int i;
Kinglong Meea68465c2015-03-19 19:48:31 +0800171 int status = 0;
Jeff Layton0733c7b2013-03-27 10:15:39 -0400172
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400173 nn->max_drc_entries = nfsd_cache_size_limit();
174 atomic_set(&nn->num_drc_entries, 0);
175 hashsize = nfsd_hashsize(nn->max_drc_entries);
176 nn->maskbits = ilog2(hashsize);
Jeff Laytonac534ff2013-03-15 09:16:29 -0400177
Amir Goldsteine567b982021-01-06 09:52:35 +0200178 status = nfsd_reply_cache_stats_init(nn);
179 if (status)
180 goto out_nomem;
181
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400182 nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
183 nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
184 nn->nfsd_reply_cache_shrinker.seeks = 1;
185 status = register_shrinker(&nn->nfsd_reply_cache_shrinker);
Kinglong Meea68465c2015-03-19 19:48:31 +0800186 if (status)
Amir Goldsteine567b982021-01-06 09:52:35 +0200187 goto out_stats_destroy;
Kinglong Meea68465c2015-03-19 19:48:31 +0800188
Rik van Riel8c38b702020-09-14 13:07:19 -0400189 nn->drc_hashtbl = kvzalloc(array_size(hashsize,
190 sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
191 if (!nn->drc_hashtbl)
192 goto out_shrinker;
Jeff Layton8f975142016-10-26 07:26:40 -0400193
Trond Myklebust89a26b32014-08-06 13:44:24 -0400194 for (i = 0; i < hashsize; i++) {
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400195 INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head);
196 spin_lock_init(&nn->drc_hashtbl[i].cache_lock);
Trond Myklebust89a26b32014-08-06 13:44:24 -0400197 }
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400198 nn->drc_hashsize = hashsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
J. Bruce Fieldsd5c34282007-11-09 14:10:56 -0500200 return 0;
J. Bruce Fields689d7ba2019-06-05 18:03:52 -0400201out_shrinker:
202 unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
Amir Goldsteine567b982021-01-06 09:52:35 +0200203out_stats_destroy:
204 nfsd_reply_cache_stats_destroy(nn);
J. Bruce Fieldsd5c34282007-11-09 14:10:56 -0500205out_nomem:
206 printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
J. Bruce Fieldsd5c34282007-11-09 14:10:56 -0500207 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208}
209
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400210void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211{
212 struct svc_cacherep *rp;
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400213 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Amir Goldsteine567b982021-01-06 09:52:35 +0200215 nfsd_reply_cache_stats_destroy(nn);
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400216 unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
Jeff Laytonaca8a232013-02-04 08:18:05 -0500217
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400218 for (i = 0; i < nn->drc_hashsize; i++) {
219 struct list_head *head = &nn->drc_hashtbl[i].lru_head;
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400220 while (!list_empty(head)) {
221 rp = list_first_entry(head, struct svc_cacherep, c_lru);
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400222 nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i],
223 rp, nn);
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400224 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 }
226
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400227 kvfree(nn->drc_hashtbl);
228 nn->drc_hashtbl = NULL;
229 nn->drc_hashsize = 0;
Jeff Layton8a8bc402013-01-28 14:41:10 -0500230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231}
232
233/*
Jeff Laytonaca8a232013-02-04 08:18:05 -0500234 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
235 * not already scheduled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 */
237static void
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400238lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239{
Jeff Layton56c25482013-02-04 08:18:00 -0500240 rp->c_timestamp = jiffies;
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400241 list_move_tail(&rp->c_lru, &b->lru_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242}
243
Dave Chinner1ab6c492013-08-28 10:18:09 +1000244static long
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400245prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn)
Jeff Laytonaca8a232013-02-04 08:18:05 -0500246{
247 struct svc_cacherep *rp, *tmp;
Dave Chinner1ab6c492013-08-28 10:18:09 +1000248 long freed = 0;
Jeff Laytonaca8a232013-02-04 08:18:05 -0500249
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400250 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
Jeff Layton1b194532014-06-05 09:45:00 -0400251 /*
252 * Don't free entries attached to calls that are still
253 * in-progress, but do keep scanning the list.
254 */
255 if (rp->c_state == RC_INPROG)
256 continue;
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400257 if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
Jeff Layton1b194532014-06-05 09:45:00 -0400258 time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
Jeff Laytonaca8a232013-02-04 08:18:05 -0500259 break;
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400260 nfsd_reply_cache_free_locked(b, rp, nn);
Dave Chinner1ab6c492013-08-28 10:18:09 +1000261 freed++;
Jeff Laytonaca8a232013-02-04 08:18:05 -0500262 }
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400263 return freed;
264}
265
266/*
267 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
268 * Also prune the oldest ones when the total exceeds the max number of entries.
269 */
270static long
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400271prune_cache_entries(struct nfsd_net *nn)
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400272{
273 unsigned int i;
274 long freed = 0;
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400275
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400276 for (i = 0; i < nn->drc_hashsize; i++) {
277 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400278
Trond Myklebust89a26b32014-08-06 13:44:24 -0400279 if (list_empty(&b->lru_head))
280 continue;
281 spin_lock(&b->cache_lock);
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400282 freed += prune_bucket(b, nn);
Trond Myklebust89a26b32014-08-06 13:44:24 -0400283 spin_unlock(&b->cache_lock);
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400284 }
Dave Chinner1ab6c492013-08-28 10:18:09 +1000285 return freed;
Jeff Laytonaca8a232013-02-04 08:18:05 -0500286}
287
Dave Chinner1ab6c492013-08-28 10:18:09 +1000288static unsigned long
289nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
Jeff Laytonb4e7f2c2013-02-04 08:18:06 -0500290{
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400291 struct nfsd_net *nn = container_of(shrink,
292 struct nfsd_net, nfsd_reply_cache_shrinker);
293
294 return atomic_read(&nn->num_drc_entries);
Jeff Laytonb4e7f2c2013-02-04 08:18:06 -0500295}
296
Dave Chinner1ab6c492013-08-28 10:18:09 +1000297static unsigned long
298nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
299{
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400300 struct nfsd_net *nn = container_of(shrink,
301 struct nfsd_net, nfsd_reply_cache_shrinker);
302
303 return prune_cache_entries(nn);
Dave Chinner1ab6c492013-08-28 10:18:09 +1000304}
Jeff Laytonaca8a232013-02-04 08:18:05 -0500305/*
Jeff Layton01a7dec2013-02-04 11:57:27 -0500306 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
307 */
308static __wsum
309nfsd_cache_csum(struct svc_rqst *rqstp)
310{
311 int idx;
312 unsigned int base;
313 __wsum csum;
314 struct xdr_buf *buf = &rqstp->rq_arg;
315 const unsigned char *p = buf->head[0].iov_base;
316 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
317 RC_CSUMLEN);
318 size_t len = min(buf->head[0].iov_len, csum_len);
319
320 /* rq_arg.head first */
321 csum = csum_partial(p, len, 0);
322 csum_len -= len;
323
324 /* Continue into page array */
325 idx = buf->page_base / PAGE_SIZE;
326 base = buf->page_base & ~PAGE_MASK;
327 while (csum_len) {
328 p = page_address(buf->pages[idx]) + base;
Jeff Layton56edc862013-02-15 13:36:34 -0500329 len = min_t(size_t, PAGE_SIZE - base, csum_len);
Jeff Layton01a7dec2013-02-04 11:57:27 -0500330 csum = csum_partial(p, len, csum);
331 csum_len -= len;
332 base = 0;
333 ++idx;
334 }
335 return csum;
336}
337
Trond Myklebusted00c2f2018-10-03 13:11:51 -0400338static int
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400339nfsd_cache_key_cmp(const struct svc_cacherep *key,
340 const struct svc_cacherep *rp, struct nfsd_net *nn)
Jeff Layton9dc56142013-03-27 10:15:37 -0400341{
Trond Myklebusted00c2f2018-10-03 13:11:51 -0400342 if (key->c_key.k_xid == rp->c_key.k_xid &&
Chuck Lever0b175b12020-05-02 11:34:40 -0400343 key->c_key.k_csum != rp->c_key.k_csum) {
Amir Goldsteine567b982021-01-06 09:52:35 +0200344 nfsd_stats_payload_misses_inc(nn);
Chuck Lever0b175b12020-05-02 11:34:40 -0400345 trace_nfsd_drc_mismatch(nn, key, rp);
346 }
Jeff Layton9dc56142013-03-27 10:15:37 -0400347
Trond Myklebusted00c2f2018-10-03 13:11:51 -0400348 return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
Jeff Layton9dc56142013-03-27 10:15:37 -0400349}
350
Jeff Layton01a7dec2013-02-04 11:57:27 -0500351/*
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500352 * Search the request hash for an entry that matches the given rqstp.
353 * Must be called with cache_lock held. Returns the found entry or
Trond Myklebust76ecec22018-10-01 10:41:55 -0400354 * inserts an empty key on failure.
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500355 */
356static struct svc_cacherep *
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400357nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
358 struct nfsd_net *nn)
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500359{
Trond Myklebust76ecec22018-10-01 10:41:55 -0400360 struct svc_cacherep *rp, *ret = key;
Trond Myklebust736c6622018-10-01 10:41:57 -0400361 struct rb_node **p = &b->rb_head.rb_node,
362 *parent = NULL;
Jeff Layton98d821b2013-03-27 10:15:39 -0400363 unsigned int entries = 0;
Trond Myklebust736c6622018-10-01 10:41:57 -0400364 int cmp;
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500365
Trond Myklebust736c6622018-10-01 10:41:57 -0400366 while (*p != NULL) {
Jeff Layton98d821b2013-03-27 10:15:39 -0400367 ++entries;
Trond Myklebust736c6622018-10-01 10:41:57 -0400368 parent = *p;
369 rp = rb_entry(parent, struct svc_cacherep, c_node);
370
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400371 cmp = nfsd_cache_key_cmp(key, rp, nn);
Trond Myklebust736c6622018-10-01 10:41:57 -0400372 if (cmp < 0)
373 p = &parent->rb_left;
374 else if (cmp > 0)
375 p = &parent->rb_right;
376 else {
Jeff Layton98d821b2013-03-27 10:15:39 -0400377 ret = rp;
Trond Myklebust736c6622018-10-01 10:41:57 -0400378 goto out;
Jeff Layton98d821b2013-03-27 10:15:39 -0400379 }
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500380 }
Trond Myklebust736c6622018-10-01 10:41:57 -0400381 rb_link_node(&key->c_node, parent, p);
382 rb_insert_color(&key->c_node, &b->rb_head);
383out:
Jeff Layton98d821b2013-03-27 10:15:39 -0400384 /* tally hash chain length stats */
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400385 if (entries > nn->longest_chain) {
386 nn->longest_chain = entries;
387 nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries);
388 } else if (entries == nn->longest_chain) {
Jeff Layton98d821b2013-03-27 10:15:39 -0400389 /* prefer to keep the smallest cachesize possible here */
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400390 nn->longest_chain_cachesize = min_t(unsigned int,
391 nn->longest_chain_cachesize,
392 atomic_read(&nn->num_drc_entries));
Jeff Layton98d821b2013-03-27 10:15:39 -0400393 }
394
Trond Myklebust76ecec22018-10-01 10:41:55 -0400395 lru_put_end(b, ret);
Jeff Layton98d821b2013-03-27 10:15:39 -0400396 return ret;
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500397}
398
Chuck Lever0b175b12020-05-02 11:34:40 -0400399/**
400 * nfsd_cache_lookup - Find an entry in the duplicate reply cache
401 * @rqstp: Incoming Call to find
402 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 * Try to find an entry matching the current call in the cache. When none
Jeff Layton1ac83622013-02-14 16:45:13 -0500404 * is found, we try to grab the oldest expired entry off the LRU list. If
405 * a suitable one isn't there, then drop the cache_lock and allocate a
406 * new one, then search again in case one got inserted while this thread
407 * didn't hold the lock.
Chuck Lever0b175b12020-05-02 11:34:40 -0400408 *
409 * Return values:
410 * %RC_DOIT: Process the request normally
411 * %RC_REPLY: Reply from cache
412 * %RC_DROPIT: Do not process the request further
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 */
Chuck Lever0b175b12020-05-02 11:34:40 -0400414int nfsd_cache_lookup(struct svc_rqst *rqstp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415{
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400416 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
Jeff Layton0338dd12013-02-04 08:18:02 -0500417 struct svc_cacherep *rp, *found;
Al Viroc7afef12006-10-19 23:29:02 -0700418 __be32 xid = rqstp->rq_xid;
Jeff Layton01a7dec2013-02-04 11:57:27 -0500419 __wsum csum;
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400420 u32 hash = nfsd_cache_hash(xid, nn);
421 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[hash];
J. Bruce Fields10910062011-01-24 12:11:02 -0500422 int type = rqstp->rq_cachetype;
Jeff Layton0b9ea372013-03-27 10:15:37 -0400423 int rtn = RC_DOIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
425 rqstp->rq_cacherep = NULL;
Jeff Layton13cc8a72013-02-04 08:18:03 -0500426 if (type == RC_NOCACHE) {
Amir Goldsteine567b982021-01-06 09:52:35 +0200427 nfsd_stats_rc_nocache_inc();
Chuck Lever0b175b12020-05-02 11:34:40 -0400428 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 }
430
Jeff Layton01a7dec2013-02-04 11:57:27 -0500431 csum = nfsd_cache_csum(rqstp);
432
Jeff Layton0b9ea372013-03-27 10:15:37 -0400433 /*
434 * Since the common case is a cache miss followed by an insert,
Jeff Laytona0ef5e192013-12-05 06:00:51 -0500435 * preallocate an entry.
Jeff Layton0b9ea372013-03-27 10:15:37 -0400436 */
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400437 rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
Chuck Lever0b175b12020-05-02 11:34:40 -0400438 if (!rp)
439 goto out;
Trond Myklebust76ecec22018-10-01 10:41:55 -0400440
441 spin_lock(&b->cache_lock);
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400442 found = nfsd_cache_insert(b, rp, nn);
Trond Myklebust76ecec22018-10-01 10:41:55 -0400443 if (found != rp) {
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400444 nfsd_reply_cache_free_locked(NULL, rp, nn);
Trond Myklebust76ecec22018-10-01 10:41:55 -0400445 rp = found;
446 goto found_entry;
Jeff Layton0b9ea372013-03-27 10:15:37 -0400447 }
448
Amir Goldsteine567b982021-01-06 09:52:35 +0200449 nfsd_stats_rc_misses_inc();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 rqstp->rq_cacherep = rp;
451 rp->c_state = RC_INPROG;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400453 atomic_inc(&nn->num_drc_entries);
Amir Goldsteine567b982021-01-06 09:52:35 +0200454 nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
Trond Myklebust76ecec22018-10-01 10:41:55 -0400455
456 /* go ahead and prune the cache */
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400457 prune_bucket(b, nn);
Chuck Lever0b175b12020-05-02 11:34:40 -0400458
459out_unlock:
Trond Myklebust89a26b32014-08-06 13:44:24 -0400460 spin_unlock(&b->cache_lock);
Chuck Lever0b175b12020-05-02 11:34:40 -0400461out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 return rtn;
463
464found_entry:
465 /* We found a matching entry which is either in progress or done. */
Amir Goldsteine567b982021-01-06 09:52:35 +0200466 nfsd_stats_rc_hits_inc();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 rtn = RC_DROPIT;
Trond Myklebust76ecec22018-10-01 10:41:55 -0400468
Trond Myklebust7e5d0e02018-03-28 12:18:01 -0400469 /* Request being processed */
470 if (rp->c_state == RC_INPROG)
Chuck Lever0b175b12020-05-02 11:34:40 -0400471 goto out_trace;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
473 /* From the hall of fame of impractical attacks:
474 * Is this a user who tries to snoop on the cache? */
475 rtn = RC_DOIT;
Jeff Layton4d152e22014-11-19 07:51:14 -0500476 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
Chuck Lever0b175b12020-05-02 11:34:40 -0400477 goto out_trace;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
479 /* Compose RPC reply header */
480 switch (rp->c_type) {
481 case RC_NOCACHE:
482 break;
483 case RC_REPLSTAT:
484 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
485 rtn = RC_REPLY;
486 break;
487 case RC_REPLBUFF:
488 if (!nfsd_cache_append(rqstp, &rp->c_replvec))
Chuck Lever0b175b12020-05-02 11:34:40 -0400489 goto out_unlock; /* should not happen */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 rtn = RC_REPLY;
491 break;
492 default:
J. Bruce Fieldsc25bf182020-06-03 11:12:32 -0400493 WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 }
495
Chuck Lever0b175b12020-05-02 11:34:40 -0400496out_trace:
497 trace_nfsd_drc_found(nn, rqstp, rtn);
498 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499}
500
Chuck Lever0b175b12020-05-02 11:34:40 -0400501/**
502 * nfsd_cache_update - Update an entry in the duplicate reply cache.
503 * @rqstp: svc_rqst with a finished Reply
504 * @cachetype: which cache to update
505 * @statp: Reply's status code
506 *
507 * This is called from nfsd_dispatch when the procedure has been
508 * executed and the complete reply is in rqstp->rq_res.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 *
510 * We're copying around data here rather than swapping buffers because
511 * the toplevel loop requires max-sized buffers, which would be a waste
512 * of memory for a cache with a max reply size of 100 bytes (diropokres).
513 *
514 * If we should start to use different types of cache entries tailored
515 * specifically for attrstat and fh's, we may save even more space.
516 *
517 * Also note that a cachetype of RC_NOCACHE can legally be passed when
518 * nfsd failed to encode a reply that otherwise would have been cached.
519 * In this case, nfsd_cache_update is called with statp == NULL.
520 */
Chuck Lever0b175b12020-05-02 11:34:40 -0400521void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522{
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400523 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
Jeff Layton13cc8a72013-02-04 08:18:03 -0500524 struct svc_cacherep *rp = rqstp->rq_cacherep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400526 u32 hash;
527 struct nfsd_drc_bucket *b;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 int len;
Jeff Layton6c6910c2013-03-27 10:15:38 -0400529 size_t bufsize = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
Jeff Layton13cc8a72013-02-04 08:18:03 -0500531 if (!rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 return;
533
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400534 hash = nfsd_cache_hash(rp->c_key.k_xid, nn);
535 b = &nn->drc_hashtbl[hash];
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400536
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
538 len >>= 2;
Greg Banksfca42172009-04-01 07:28:13 +1100539
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 /* Don't cache excessive amounts of data and XDR failures */
541 if (!statp || len > (256 >> 2)) {
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400542 nfsd_reply_cache_free(b, rp, nn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 return;
544 }
545
546 switch (cachetype) {
547 case RC_REPLSTAT:
548 if (len != 1)
549 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
550 rp->c_replstat = *statp;
551 break;
552 case RC_REPLBUFF:
553 cachv = &rp->c_replvec;
Jeff Layton6c6910c2013-03-27 10:15:38 -0400554 bufsize = len << 2;
555 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 if (!cachv->iov_base) {
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400557 nfsd_reply_cache_free(b, rp, nn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 return;
559 }
Jeff Layton6c6910c2013-03-27 10:15:38 -0400560 cachv->iov_len = bufsize;
561 memcpy(cachv->iov_base, statp, bufsize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 break;
Jeff Layton2c6b6912013-02-04 08:18:04 -0500563 case RC_NOCACHE:
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400564 nfsd_reply_cache_free(b, rp, nn);
Jeff Layton2c6b6912013-02-04 08:18:04 -0500565 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 }
Trond Myklebust89a26b32014-08-06 13:44:24 -0400567 spin_lock(&b->cache_lock);
Amir Goldsteine567b982021-01-06 09:52:35 +0200568 nfsd_stats_drc_mem_usage_add(nn, bufsize);
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400569 lru_put_end(b, rp);
Jeff Layton4d152e22014-11-19 07:51:14 -0500570 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 rp->c_type = cachetype;
572 rp->c_state = RC_DONE;
Trond Myklebust89a26b32014-08-06 13:44:24 -0400573 spin_unlock(&b->cache_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 return;
575}
576
577/*
578 * Copy cached reply to current reply buffer. Should always fit.
579 * FIXME as reply is in a page, we should just attach the page, and
580 * keep a refcount....
581 */
582static int
583nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
584{
585 struct kvec *vec = &rqstp->rq_res.head[0];
586
587 if (vec->iov_len + data->iov_len > PAGE_SIZE) {
Alexey Dobriyan5b5e0922017-02-27 14:30:02 -0800588 printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 data->iov_len);
590 return 0;
591 }
592 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
593 vec->iov_len += data->iov_len;
594 return 1;
595}
Jeff Laytona2f999a2013-03-27 10:15:38 -0400596
597/*
598 * Note that fields may be added, removed or reordered in the future. Programs
599 * scraping this file for info should test the labels to ensure they're
600 * getting the correct field.
601 */
602static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
603{
He Zhe78e70e72019-08-06 17:41:04 +0800604 struct nfsd_net *nn = m->private;
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400605
606 seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
Trond Myklebust31e60f52014-08-06 13:44:23 -0400607 seq_printf(m, "num entries: %u\n",
Amir Goldsteine567b982021-01-06 09:52:35 +0200608 atomic_read(&nn->num_drc_entries));
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400609 seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits);
Amir Goldsteine567b982021-01-06 09:52:35 +0200610 seq_printf(m, "mem usage: %lld\n",
611 percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE]));
612 seq_printf(m, "cache hits: %lld\n",
613 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
614 seq_printf(m, "cache misses: %lld\n",
615 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]));
616 seq_printf(m, "not cached: %lld\n",
617 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
618 seq_printf(m, "payload misses: %lld\n",
619 percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES]));
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400620 seq_printf(m, "longest chain len: %u\n", nn->longest_chain);
621 seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize);
Jeff Laytona2f999a2013-03-27 10:15:38 -0400622 return 0;
623}
624
625int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
626{
J. Bruce Fields3ba75832019-05-17 09:03:38 -0400627 struct nfsd_net *nn = net_generic(file_inode(file)->i_sb->s_fs_info,
628 nfsd_net_id);
629
630 return single_open(file, nfsd_reply_cache_stats_show, nn);
Jeff Laytona2f999a2013-03-27 10:15:38 -0400631}