Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * Request reply cache. This is currently a global cache, but this may |
| 4 | * change in the future and be a per-client cache. |
| 5 | * |
| 6 | * This code is heavily inspired by the 44BSD implementation, although |
| 7 | * it does things a bit differently. |
| 8 | * |
| 9 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> |
| 10 | */ |
| 11 | |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 12 | #include <linux/sunrpc/svc_xprt.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 13 | #include <linux/slab.h> |
Jeff Layton | 8f97514 | 2016-10-26 07:26:40 -0400 | [diff] [blame] | 14 | #include <linux/vmalloc.h> |
Jeff Layton | 5976687 | 2013-02-04 12:50:00 -0500 | [diff] [blame] | 15 | #include <linux/sunrpc/addr.h> |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 16 | #include <linux/highmem.h> |
Jeff Layton | 0733c7b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 17 | #include <linux/log2.h> |
| 18 | #include <linux/hash.h> |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 19 | #include <net/checksum.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 20 | |
Boaz Harrosh | 9a74af2 | 2009-12-03 20:30:56 +0200 | [diff] [blame] | 21 | #include "nfsd.h" |
| 22 | #include "cache.h" |
Chuck Lever | 0b175b1 | 2020-05-02 11:34:40 -0400 | [diff] [blame^] | 23 | #include "trace.h" |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 24 | |
Jeff Layton | 0733c7b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 25 | /* |
| 26 | * We use this value to determine the number of hash buckets from the max |
| 27 | * cache size, the idea being that when the cache is at its maximum number |
| 28 | * of entries, then this should be the average number of entries per bucket. |
| 29 | */ |
| 30 | #define TARGET_BUCKET_SIZE 64 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | |
Trond Myklebust | 7142b98 | 2014-08-06 13:44:20 -0400 | [diff] [blame] | 32 | struct nfsd_drc_bucket { |
Trond Myklebust | 736c662 | 2018-10-01 10:41:57 -0400 | [diff] [blame] | 33 | struct rb_root rb_head; |
Trond Myklebust | bedd4b6 | 2014-08-06 13:44:21 -0400 | [diff] [blame] | 34 | struct list_head lru_head; |
Trond Myklebust | 89a26b3 | 2014-08-06 13:44:24 -0400 | [diff] [blame] | 35 | spinlock_t cache_lock; |
Trond Myklebust | 7142b98 | 2014-08-06 13:44:20 -0400 | [diff] [blame] | 36 | }; |
| 37 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); |
Dave Chinner | 1ab6c49 | 2013-08-28 10:18:09 +1000 | [diff] [blame] | 39 | static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, |
| 40 | struct shrink_control *sc); |
| 41 | static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, |
| 42 | struct shrink_control *sc); |
Jeff Layton | b4e7f2c | 2013-02-04 08:18:06 -0500 | [diff] [blame] | 43 | |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 44 | /* |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 45 | * Put a cap on the size of the DRC based on the amount of available |
| 46 | * low memory in the machine. |
| 47 | * |
| 48 | * 64MB: 8192 |
| 49 | * 128MB: 11585 |
| 50 | * 256MB: 16384 |
| 51 | * 512MB: 23170 |
| 52 | * 1GB: 32768 |
| 53 | * 2GB: 46340 |
| 54 | * 4GB: 65536 |
| 55 | * 8GB: 92681 |
| 56 | * 16GB: 131072 |
| 57 | * |
| 58 | * ...with a hard cap of 256k entries. In the worst case, each entry will be |
| 59 | * ~1k, so the above numbers should give a rough max of the amount of memory |
| 60 | * used in k. |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 61 | * |
| 62 | * XXX: these limits are per-container, so memory used will increase |
| 63 | * linearly with number of containers. Maybe that's OK. |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 64 | */ |
| 65 | static unsigned int |
| 66 | nfsd_cache_size_limit(void) |
| 67 | { |
| 68 | unsigned int limit; |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 69 | unsigned long low_pages = totalram_pages() - totalhigh_pages(); |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 70 | |
| 71 | limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); |
| 72 | return min_t(unsigned int, limit, 256*1024); |
| 73 | } |
| 74 | |
Jeff Layton | 0733c7b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 75 | /* |
| 76 | * Compute the number of hash buckets we need. Divide the max cachesize by |
| 77 | * the "target" max bucket size, and round up to next power of two. |
| 78 | */ |
| 79 | static unsigned int |
| 80 | nfsd_hashsize(unsigned int limit) |
| 81 | { |
| 82 | return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE); |
| 83 | } |
| 84 | |
Trond Myklebust | 7142b98 | 2014-08-06 13:44:20 -0400 | [diff] [blame] | 85 | static u32 |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 86 | nfsd_cache_hash(__be32 xid, struct nfsd_net *nn) |
Trond Myklebust | 7142b98 | 2014-08-06 13:44:20 -0400 | [diff] [blame] | 87 | { |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 88 | return hash_32(be32_to_cpu(xid), nn->maskbits); |
Trond Myklebust | 7142b98 | 2014-08-06 13:44:20 -0400 | [diff] [blame] | 89 | } |
| 90 | |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 91 | static struct svc_cacherep * |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 92 | nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum, |
| 93 | struct nfsd_net *nn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | { |
| 95 | struct svc_cacherep *rp; |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 96 | |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 97 | rp = kmem_cache_alloc(nn->drc_slab, GFP_KERNEL); |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 98 | if (rp) { |
| 99 | rp->c_state = RC_UNUSED; |
| 100 | rp->c_type = RC_NOCACHE; |
Trond Myklebust | 736c662 | 2018-10-01 10:41:57 -0400 | [diff] [blame] | 101 | RB_CLEAR_NODE(&rp->c_node); |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 102 | INIT_LIST_HEAD(&rp->c_lru); |
Trond Myklebust | 76ecec2 | 2018-10-01 10:41:55 -0400 | [diff] [blame] | 103 | |
Trond Myklebust | ed00c2f | 2018-10-03 13:11:51 -0400 | [diff] [blame] | 104 | memset(&rp->c_key, 0, sizeof(rp->c_key)); |
| 105 | rp->c_key.k_xid = rqstp->rq_xid; |
| 106 | rp->c_key.k_proc = rqstp->rq_proc; |
| 107 | rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp)); |
| 108 | rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp))); |
| 109 | rp->c_key.k_prot = rqstp->rq_prot; |
| 110 | rp->c_key.k_vers = rqstp->rq_vers; |
| 111 | rp->c_key.k_len = rqstp->rq_arg.len; |
| 112 | rp->c_key.k_csum = csum; |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 113 | } |
| 114 | return rp; |
| 115 | } |
| 116 | |
| 117 | static void |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 118 | nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, |
| 119 | struct nfsd_net *nn) |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 120 | { |
Jeff Layton | 6c6910c | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 121 | if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 122 | nn->drc_mem_usage -= rp->c_replvec.iov_len; |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 123 | kfree(rp->c_replvec.iov_base); |
Jeff Layton | 6c6910c | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 124 | } |
Trond Myklebust | 76ecec2 | 2018-10-01 10:41:55 -0400 | [diff] [blame] | 125 | if (rp->c_state != RC_UNUSED) { |
Trond Myklebust | 736c662 | 2018-10-01 10:41:57 -0400 | [diff] [blame] | 126 | rb_erase(&rp->c_node, &b->rb_head); |
Trond Myklebust | 76ecec2 | 2018-10-01 10:41:55 -0400 | [diff] [blame] | 127 | list_del(&rp->c_lru); |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 128 | atomic_dec(&nn->num_drc_entries); |
| 129 | nn->drc_mem_usage -= sizeof(*rp); |
Trond Myklebust | 76ecec2 | 2018-10-01 10:41:55 -0400 | [diff] [blame] | 130 | } |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 131 | kmem_cache_free(nn->drc_slab, rp); |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 132 | } |
| 133 | |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 134 | static void |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 135 | nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, |
| 136 | struct nfsd_net *nn) |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 137 | { |
Trond Myklebust | 89a26b3 | 2014-08-06 13:44:24 -0400 | [diff] [blame] | 138 | spin_lock(&b->cache_lock); |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 139 | nfsd_reply_cache_free_locked(b, rp, nn); |
Trond Myklebust | 89a26b3 | 2014-08-06 13:44:24 -0400 | [diff] [blame] | 140 | spin_unlock(&b->cache_lock); |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 141 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 143 | int nfsd_reply_cache_init(struct nfsd_net *nn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | { |
Jeff Layton | 0733c7b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 145 | unsigned int hashsize; |
Trond Myklebust | bedd4b6 | 2014-08-06 13:44:21 -0400 | [diff] [blame] | 146 | unsigned int i; |
Kinglong Mee | a68465c | 2015-03-19 19:48:31 +0800 | [diff] [blame] | 147 | int status = 0; |
Jeff Layton | 0733c7b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 148 | |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 149 | nn->max_drc_entries = nfsd_cache_size_limit(); |
| 150 | atomic_set(&nn->num_drc_entries, 0); |
| 151 | hashsize = nfsd_hashsize(nn->max_drc_entries); |
| 152 | nn->maskbits = ilog2(hashsize); |
Jeff Layton | ac534ff | 2013-03-15 09:16:29 -0400 | [diff] [blame] | 153 | |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 154 | nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan; |
| 155 | nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count; |
| 156 | nn->nfsd_reply_cache_shrinker.seeks = 1; |
| 157 | status = register_shrinker(&nn->nfsd_reply_cache_shrinker); |
Kinglong Mee | a68465c | 2015-03-19 19:48:31 +0800 | [diff] [blame] | 158 | if (status) |
J. Bruce Fields | 689d7ba | 2019-06-05 18:03:52 -0400 | [diff] [blame] | 159 | goto out_nomem; |
Kinglong Mee | a68465c | 2015-03-19 19:48:31 +0800 | [diff] [blame] | 160 | |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 161 | nn->drc_slab = kmem_cache_create("nfsd_drc", |
| 162 | sizeof(struct svc_cacherep), 0, 0, NULL); |
| 163 | if (!nn->drc_slab) |
J. Bruce Fields | 689d7ba | 2019-06-05 18:03:52 -0400 | [diff] [blame] | 164 | goto out_shrinker; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 166 | nn->drc_hashtbl = kcalloc(hashsize, |
| 167 | sizeof(*nn->drc_hashtbl), GFP_KERNEL); |
| 168 | if (!nn->drc_hashtbl) { |
| 169 | nn->drc_hashtbl = vzalloc(array_size(hashsize, |
| 170 | sizeof(*nn->drc_hashtbl))); |
| 171 | if (!nn->drc_hashtbl) |
J. Bruce Fields | 689d7ba | 2019-06-05 18:03:52 -0400 | [diff] [blame] | 172 | goto out_slab; |
Jeff Layton | 8f97514 | 2016-10-26 07:26:40 -0400 | [diff] [blame] | 173 | } |
| 174 | |
Trond Myklebust | 89a26b3 | 2014-08-06 13:44:24 -0400 | [diff] [blame] | 175 | for (i = 0; i < hashsize; i++) { |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 176 | INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head); |
| 177 | spin_lock_init(&nn->drc_hashtbl[i].cache_lock); |
Trond Myklebust | 89a26b3 | 2014-08-06 13:44:24 -0400 | [diff] [blame] | 178 | } |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 179 | nn->drc_hashsize = hashsize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | |
J. Bruce Fields | d5c3428 | 2007-11-09 14:10:56 -0500 | [diff] [blame] | 181 | return 0; |
J. Bruce Fields | 689d7ba | 2019-06-05 18:03:52 -0400 | [diff] [blame] | 182 | out_slab: |
| 183 | kmem_cache_destroy(nn->drc_slab); |
| 184 | out_shrinker: |
| 185 | unregister_shrinker(&nn->nfsd_reply_cache_shrinker); |
J. Bruce Fields | d5c3428 | 2007-11-09 14:10:56 -0500 | [diff] [blame] | 186 | out_nomem: |
| 187 | printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); |
J. Bruce Fields | d5c3428 | 2007-11-09 14:10:56 -0500 | [diff] [blame] | 188 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | } |
| 190 | |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 191 | void nfsd_reply_cache_shutdown(struct nfsd_net *nn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | { |
| 193 | struct svc_cacherep *rp; |
Trond Myklebust | bedd4b6 | 2014-08-06 13:44:21 -0400 | [diff] [blame] | 194 | unsigned int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 196 | unregister_shrinker(&nn->nfsd_reply_cache_shrinker); |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 197 | |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 198 | for (i = 0; i < nn->drc_hashsize; i++) { |
| 199 | struct list_head *head = &nn->drc_hashtbl[i].lru_head; |
Trond Myklebust | bedd4b6 | 2014-08-06 13:44:21 -0400 | [diff] [blame] | 200 | while (!list_empty(head)) { |
| 201 | rp = list_first_entry(head, struct svc_cacherep, c_lru); |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 202 | nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i], |
| 203 | rp, nn); |
Trond Myklebust | bedd4b6 | 2014-08-06 13:44:21 -0400 | [diff] [blame] | 204 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | } |
| 206 | |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 207 | kvfree(nn->drc_hashtbl); |
| 208 | nn->drc_hashtbl = NULL; |
| 209 | nn->drc_hashsize = 0; |
Jeff Layton | 8a8bc40 | 2013-01-28 14:41:10 -0500 | [diff] [blame] | 210 | |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 211 | kmem_cache_destroy(nn->drc_slab); |
| 212 | nn->drc_slab = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | } |
| 214 | |
| 215 | /* |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 216 | * Move cache entry to end of LRU list, and queue the cleaner to run if it's |
| 217 | * not already scheduled. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | */ |
| 219 | static void |
Trond Myklebust | bedd4b6 | 2014-08-06 13:44:21 -0400 | [diff] [blame] | 220 | lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | { |
Jeff Layton | 56c2548 | 2013-02-04 08:18:00 -0500 | [diff] [blame] | 222 | rp->c_timestamp = jiffies; |
Trond Myklebust | bedd4b6 | 2014-08-06 13:44:21 -0400 | [diff] [blame] | 223 | list_move_tail(&rp->c_lru, &b->lru_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | } |
| 225 | |
Dave Chinner | 1ab6c49 | 2013-08-28 10:18:09 +1000 | [diff] [blame] | 226 | static long |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 227 | prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn) |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 228 | { |
| 229 | struct svc_cacherep *rp, *tmp; |
Dave Chinner | 1ab6c49 | 2013-08-28 10:18:09 +1000 | [diff] [blame] | 230 | long freed = 0; |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 231 | |
Trond Myklebust | bedd4b6 | 2014-08-06 13:44:21 -0400 | [diff] [blame] | 232 | list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) { |
Jeff Layton | 1b19453 | 2014-06-05 09:45:00 -0400 | [diff] [blame] | 233 | /* |
| 234 | * Don't free entries attached to calls that are still |
| 235 | * in-progress, but do keep scanning the list. |
| 236 | */ |
| 237 | if (rp->c_state == RC_INPROG) |
| 238 | continue; |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 239 | if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries && |
Jeff Layton | 1b19453 | 2014-06-05 09:45:00 -0400 | [diff] [blame] | 240 | time_before(jiffies, rp->c_timestamp + RC_EXPIRE)) |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 241 | break; |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 242 | nfsd_reply_cache_free_locked(b, rp, nn); |
Dave Chinner | 1ab6c49 | 2013-08-28 10:18:09 +1000 | [diff] [blame] | 243 | freed++; |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 244 | } |
Trond Myklebust | bedd4b6 | 2014-08-06 13:44:21 -0400 | [diff] [blame] | 245 | return freed; |
| 246 | } |
| 247 | |
| 248 | /* |
| 249 | * Walk the LRU list and prune off entries that are older than RC_EXPIRE. |
| 250 | * Also prune the oldest ones when the total exceeds the max number of entries. |
| 251 | */ |
| 252 | static long |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 253 | prune_cache_entries(struct nfsd_net *nn) |
Trond Myklebust | bedd4b6 | 2014-08-06 13:44:21 -0400 | [diff] [blame] | 254 | { |
| 255 | unsigned int i; |
| 256 | long freed = 0; |
Trond Myklebust | bedd4b6 | 2014-08-06 13:44:21 -0400 | [diff] [blame] | 257 | |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 258 | for (i = 0; i < nn->drc_hashsize; i++) { |
| 259 | struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i]; |
Trond Myklebust | bedd4b6 | 2014-08-06 13:44:21 -0400 | [diff] [blame] | 260 | |
Trond Myklebust | 89a26b3 | 2014-08-06 13:44:24 -0400 | [diff] [blame] | 261 | if (list_empty(&b->lru_head)) |
| 262 | continue; |
| 263 | spin_lock(&b->cache_lock); |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 264 | freed += prune_bucket(b, nn); |
Trond Myklebust | 89a26b3 | 2014-08-06 13:44:24 -0400 | [diff] [blame] | 265 | spin_unlock(&b->cache_lock); |
Trond Myklebust | bedd4b6 | 2014-08-06 13:44:21 -0400 | [diff] [blame] | 266 | } |
Dave Chinner | 1ab6c49 | 2013-08-28 10:18:09 +1000 | [diff] [blame] | 267 | return freed; |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 268 | } |
| 269 | |
Dave Chinner | 1ab6c49 | 2013-08-28 10:18:09 +1000 | [diff] [blame] | 270 | static unsigned long |
| 271 | nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) |
Jeff Layton | b4e7f2c | 2013-02-04 08:18:06 -0500 | [diff] [blame] | 272 | { |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 273 | struct nfsd_net *nn = container_of(shrink, |
| 274 | struct nfsd_net, nfsd_reply_cache_shrinker); |
| 275 | |
| 276 | return atomic_read(&nn->num_drc_entries); |
Jeff Layton | b4e7f2c | 2013-02-04 08:18:06 -0500 | [diff] [blame] | 277 | } |
| 278 | |
Dave Chinner | 1ab6c49 | 2013-08-28 10:18:09 +1000 | [diff] [blame] | 279 | static unsigned long |
| 280 | nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) |
| 281 | { |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 282 | struct nfsd_net *nn = container_of(shrink, |
| 283 | struct nfsd_net, nfsd_reply_cache_shrinker); |
| 284 | |
| 285 | return prune_cache_entries(nn); |
Dave Chinner | 1ab6c49 | 2013-08-28 10:18:09 +1000 | [diff] [blame] | 286 | } |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 287 | /* |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 288 | * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes |
| 289 | */ |
| 290 | static __wsum |
| 291 | nfsd_cache_csum(struct svc_rqst *rqstp) |
| 292 | { |
| 293 | int idx; |
| 294 | unsigned int base; |
| 295 | __wsum csum; |
| 296 | struct xdr_buf *buf = &rqstp->rq_arg; |
| 297 | const unsigned char *p = buf->head[0].iov_base; |
| 298 | size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, |
| 299 | RC_CSUMLEN); |
| 300 | size_t len = min(buf->head[0].iov_len, csum_len); |
| 301 | |
| 302 | /* rq_arg.head first */ |
| 303 | csum = csum_partial(p, len, 0); |
| 304 | csum_len -= len; |
| 305 | |
| 306 | /* Continue into page array */ |
| 307 | idx = buf->page_base / PAGE_SIZE; |
| 308 | base = buf->page_base & ~PAGE_MASK; |
| 309 | while (csum_len) { |
| 310 | p = page_address(buf->pages[idx]) + base; |
Jeff Layton | 56edc86 | 2013-02-15 13:36:34 -0500 | [diff] [blame] | 311 | len = min_t(size_t, PAGE_SIZE - base, csum_len); |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 312 | csum = csum_partial(p, len, csum); |
| 313 | csum_len -= len; |
| 314 | base = 0; |
| 315 | ++idx; |
| 316 | } |
| 317 | return csum; |
| 318 | } |
| 319 | |
Trond Myklebust | ed00c2f | 2018-10-03 13:11:51 -0400 | [diff] [blame] | 320 | static int |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 321 | nfsd_cache_key_cmp(const struct svc_cacherep *key, |
| 322 | const struct svc_cacherep *rp, struct nfsd_net *nn) |
Jeff Layton | 9dc5614 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 323 | { |
Trond Myklebust | ed00c2f | 2018-10-03 13:11:51 -0400 | [diff] [blame] | 324 | if (key->c_key.k_xid == rp->c_key.k_xid && |
Chuck Lever | 0b175b1 | 2020-05-02 11:34:40 -0400 | [diff] [blame^] | 325 | key->c_key.k_csum != rp->c_key.k_csum) { |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 326 | ++nn->payload_misses; |
Chuck Lever | 0b175b1 | 2020-05-02 11:34:40 -0400 | [diff] [blame^] | 327 | trace_nfsd_drc_mismatch(nn, key, rp); |
| 328 | } |
Jeff Layton | 9dc5614 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 329 | |
Trond Myklebust | ed00c2f | 2018-10-03 13:11:51 -0400 | [diff] [blame] | 330 | return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key)); |
Jeff Layton | 9dc5614 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 331 | } |
| 332 | |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 333 | /* |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 334 | * Search the request hash for an entry that matches the given rqstp. |
| 335 | * Must be called with cache_lock held. Returns the found entry or |
Trond Myklebust | 76ecec2 | 2018-10-01 10:41:55 -0400 | [diff] [blame] | 336 | * inserts an empty key on failure. |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 337 | */ |
| 338 | static struct svc_cacherep * |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 339 | nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key, |
| 340 | struct nfsd_net *nn) |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 341 | { |
Trond Myklebust | 76ecec2 | 2018-10-01 10:41:55 -0400 | [diff] [blame] | 342 | struct svc_cacherep *rp, *ret = key; |
Trond Myklebust | 736c662 | 2018-10-01 10:41:57 -0400 | [diff] [blame] | 343 | struct rb_node **p = &b->rb_head.rb_node, |
| 344 | *parent = NULL; |
Jeff Layton | 98d821b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 345 | unsigned int entries = 0; |
Trond Myklebust | 736c662 | 2018-10-01 10:41:57 -0400 | [diff] [blame] | 346 | int cmp; |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 347 | |
Trond Myklebust | 736c662 | 2018-10-01 10:41:57 -0400 | [diff] [blame] | 348 | while (*p != NULL) { |
Jeff Layton | 98d821b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 349 | ++entries; |
Trond Myklebust | 736c662 | 2018-10-01 10:41:57 -0400 | [diff] [blame] | 350 | parent = *p; |
| 351 | rp = rb_entry(parent, struct svc_cacherep, c_node); |
| 352 | |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 353 | cmp = nfsd_cache_key_cmp(key, rp, nn); |
Trond Myklebust | 736c662 | 2018-10-01 10:41:57 -0400 | [diff] [blame] | 354 | if (cmp < 0) |
| 355 | p = &parent->rb_left; |
| 356 | else if (cmp > 0) |
| 357 | p = &parent->rb_right; |
| 358 | else { |
Jeff Layton | 98d821b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 359 | ret = rp; |
Trond Myklebust | 736c662 | 2018-10-01 10:41:57 -0400 | [diff] [blame] | 360 | goto out; |
Jeff Layton | 98d821b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 361 | } |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 362 | } |
Trond Myklebust | 736c662 | 2018-10-01 10:41:57 -0400 | [diff] [blame] | 363 | rb_link_node(&key->c_node, parent, p); |
| 364 | rb_insert_color(&key->c_node, &b->rb_head); |
| 365 | out: |
Jeff Layton | 98d821b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 366 | /* tally hash chain length stats */ |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 367 | if (entries > nn->longest_chain) { |
| 368 | nn->longest_chain = entries; |
| 369 | nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries); |
| 370 | } else if (entries == nn->longest_chain) { |
Jeff Layton | 98d821b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 371 | /* prefer to keep the smallest cachesize possible here */ |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 372 | nn->longest_chain_cachesize = min_t(unsigned int, |
| 373 | nn->longest_chain_cachesize, |
| 374 | atomic_read(&nn->num_drc_entries)); |
Jeff Layton | 98d821b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 375 | } |
| 376 | |
Trond Myklebust | 76ecec2 | 2018-10-01 10:41:55 -0400 | [diff] [blame] | 377 | lru_put_end(b, ret); |
Jeff Layton | 98d821b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 378 | return ret; |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 379 | } |
| 380 | |
Chuck Lever | 0b175b1 | 2020-05-02 11:34:40 -0400 | [diff] [blame^] | 381 | /** |
| 382 | * nfsd_cache_lookup - Find an entry in the duplicate reply cache |
| 383 | * @rqstp: Incoming Call to find |
| 384 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | * Try to find an entry matching the current call in the cache. When none |
Jeff Layton | 1ac8362 | 2013-02-14 16:45:13 -0500 | [diff] [blame] | 386 | * is found, we try to grab the oldest expired entry off the LRU list. If |
| 387 | * a suitable one isn't there, then drop the cache_lock and allocate a |
| 388 | * new one, then search again in case one got inserted while this thread |
| 389 | * didn't hold the lock. |
Chuck Lever | 0b175b1 | 2020-05-02 11:34:40 -0400 | [diff] [blame^] | 390 | * |
| 391 | * Return values: |
| 392 | * %RC_DOIT: Process the request normally |
| 393 | * %RC_REPLY: Reply from cache |
| 394 | * %RC_DROPIT: Do not process the request further |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | */ |
Chuck Lever | 0b175b1 | 2020-05-02 11:34:40 -0400 | [diff] [blame^] | 396 | int nfsd_cache_lookup(struct svc_rqst *rqstp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | { |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 398 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 399 | struct svc_cacherep *rp, *found; |
Al Viro | c7afef1 | 2006-10-19 23:29:02 -0700 | [diff] [blame] | 400 | __be32 xid = rqstp->rq_xid; |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 401 | __wsum csum; |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 402 | u32 hash = nfsd_cache_hash(xid, nn); |
| 403 | struct nfsd_drc_bucket *b = &nn->drc_hashtbl[hash]; |
J. Bruce Fields | 1091006 | 2011-01-24 12:11:02 -0500 | [diff] [blame] | 404 | int type = rqstp->rq_cachetype; |
Jeff Layton | 0b9ea37 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 405 | int rtn = RC_DOIT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | |
| 407 | rqstp->rq_cacherep = NULL; |
Jeff Layton | 13cc8a7 | 2013-02-04 08:18:03 -0500 | [diff] [blame] | 408 | if (type == RC_NOCACHE) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | nfsdstats.rcnocache++; |
Chuck Lever | 0b175b1 | 2020-05-02 11:34:40 -0400 | [diff] [blame^] | 410 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | } |
| 412 | |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 413 | csum = nfsd_cache_csum(rqstp); |
| 414 | |
Jeff Layton | 0b9ea37 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 415 | /* |
| 416 | * Since the common case is a cache miss followed by an insert, |
Jeff Layton | a0ef5e19 | 2013-12-05 06:00:51 -0500 | [diff] [blame] | 417 | * preallocate an entry. |
Jeff Layton | 0b9ea37 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 418 | */ |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 419 | rp = nfsd_reply_cache_alloc(rqstp, csum, nn); |
Chuck Lever | 0b175b1 | 2020-05-02 11:34:40 -0400 | [diff] [blame^] | 420 | if (!rp) |
| 421 | goto out; |
Trond Myklebust | 76ecec2 | 2018-10-01 10:41:55 -0400 | [diff] [blame] | 422 | |
| 423 | spin_lock(&b->cache_lock); |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 424 | found = nfsd_cache_insert(b, rp, nn); |
Trond Myklebust | 76ecec2 | 2018-10-01 10:41:55 -0400 | [diff] [blame] | 425 | if (found != rp) { |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 426 | nfsd_reply_cache_free_locked(NULL, rp, nn); |
Trond Myklebust | 76ecec2 | 2018-10-01 10:41:55 -0400 | [diff] [blame] | 427 | rp = found; |
| 428 | goto found_entry; |
Jeff Layton | 0b9ea37 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 429 | } |
| 430 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | nfsdstats.rcmisses++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | rqstp->rq_cacherep = rp; |
| 433 | rp->c_state = RC_INPROG; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 435 | atomic_inc(&nn->num_drc_entries); |
| 436 | nn->drc_mem_usage += sizeof(*rp); |
Trond Myklebust | 76ecec2 | 2018-10-01 10:41:55 -0400 | [diff] [blame] | 437 | |
| 438 | /* go ahead and prune the cache */ |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 439 | prune_bucket(b, nn); |
Chuck Lever | 0b175b1 | 2020-05-02 11:34:40 -0400 | [diff] [blame^] | 440 | |
| 441 | out_unlock: |
Trond Myklebust | 89a26b3 | 2014-08-06 13:44:24 -0400 | [diff] [blame] | 442 | spin_unlock(&b->cache_lock); |
Chuck Lever | 0b175b1 | 2020-05-02 11:34:40 -0400 | [diff] [blame^] | 443 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | return rtn; |
| 445 | |
| 446 | found_entry: |
| 447 | /* We found a matching entry which is either in progress or done. */ |
Trond Myklebust | 76ecec2 | 2018-10-01 10:41:55 -0400 | [diff] [blame] | 448 | nfsdstats.rchits++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | rtn = RC_DROPIT; |
Trond Myklebust | 76ecec2 | 2018-10-01 10:41:55 -0400 | [diff] [blame] | 450 | |
Trond Myklebust | 7e5d0e0 | 2018-03-28 12:18:01 -0400 | [diff] [blame] | 451 | /* Request being processed */ |
| 452 | if (rp->c_state == RC_INPROG) |
Chuck Lever | 0b175b1 | 2020-05-02 11:34:40 -0400 | [diff] [blame^] | 453 | goto out_trace; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | |
| 455 | /* From the hall of fame of impractical attacks: |
| 456 | * Is this a user who tries to snoop on the cache? */ |
| 457 | rtn = RC_DOIT; |
Jeff Layton | 4d152e2 | 2014-11-19 07:51:14 -0500 | [diff] [blame] | 458 | if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) |
Chuck Lever | 0b175b1 | 2020-05-02 11:34:40 -0400 | [diff] [blame^] | 459 | goto out_trace; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | |
| 461 | /* Compose RPC reply header */ |
| 462 | switch (rp->c_type) { |
| 463 | case RC_NOCACHE: |
| 464 | break; |
| 465 | case RC_REPLSTAT: |
| 466 | svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); |
| 467 | rtn = RC_REPLY; |
| 468 | break; |
| 469 | case RC_REPLBUFF: |
| 470 | if (!nfsd_cache_append(rqstp, &rp->c_replvec)) |
Chuck Lever | 0b175b1 | 2020-05-02 11:34:40 -0400 | [diff] [blame^] | 471 | goto out_unlock; /* should not happen */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | rtn = RC_REPLY; |
| 473 | break; |
| 474 | default: |
| 475 | printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 476 | nfsd_reply_cache_free_locked(b, rp, nn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | } |
| 478 | |
Chuck Lever | 0b175b1 | 2020-05-02 11:34:40 -0400 | [diff] [blame^] | 479 | out_trace: |
| 480 | trace_nfsd_drc_found(nn, rqstp, rtn); |
| 481 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | } |
| 483 | |
Chuck Lever | 0b175b1 | 2020-05-02 11:34:40 -0400 | [diff] [blame^] | 484 | /** |
| 485 | * nfsd_cache_update - Update an entry in the duplicate reply cache. |
| 486 | * @rqstp: svc_rqst with a finished Reply |
| 487 | * @cachetype: which cache to update |
| 488 | * @statp: Reply's status code |
| 489 | * |
| 490 | * This is called from nfsd_dispatch when the procedure has been |
| 491 | * executed and the complete reply is in rqstp->rq_res. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | * |
| 493 | * We're copying around data here rather than swapping buffers because |
| 494 | * the toplevel loop requires max-sized buffers, which would be a waste |
| 495 | * of memory for a cache with a max reply size of 100 bytes (diropokres). |
| 496 | * |
| 497 | * If we should start to use different types of cache entries tailored |
| 498 | * specifically for attrstat and fh's, we may save even more space. |
| 499 | * |
| 500 | * Also note that a cachetype of RC_NOCACHE can legally be passed when |
| 501 | * nfsd failed to encode a reply that otherwise would have been cached. |
| 502 | * In this case, nfsd_cache_update is called with statp == NULL. |
| 503 | */ |
Chuck Lever | 0b175b1 | 2020-05-02 11:34:40 -0400 | [diff] [blame^] | 504 | void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | { |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 506 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); |
Jeff Layton | 13cc8a7 | 2013-02-04 08:18:03 -0500 | [diff] [blame] | 507 | struct svc_cacherep *rp = rqstp->rq_cacherep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | struct kvec *resv = &rqstp->rq_res.head[0], *cachv; |
Trond Myklebust | bedd4b6 | 2014-08-06 13:44:21 -0400 | [diff] [blame] | 509 | u32 hash; |
| 510 | struct nfsd_drc_bucket *b; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | int len; |
Jeff Layton | 6c6910c | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 512 | size_t bufsize = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | |
Jeff Layton | 13cc8a7 | 2013-02-04 08:18:03 -0500 | [diff] [blame] | 514 | if (!rp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | return; |
| 516 | |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 517 | hash = nfsd_cache_hash(rp->c_key.k_xid, nn); |
| 518 | b = &nn->drc_hashtbl[hash]; |
Trond Myklebust | bedd4b6 | 2014-08-06 13:44:21 -0400 | [diff] [blame] | 519 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); |
| 521 | len >>= 2; |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 522 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | /* Don't cache excessive amounts of data and XDR failures */ |
| 524 | if (!statp || len > (256 >> 2)) { |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 525 | nfsd_reply_cache_free(b, rp, nn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | return; |
| 527 | } |
| 528 | |
| 529 | switch (cachetype) { |
| 530 | case RC_REPLSTAT: |
| 531 | if (len != 1) |
| 532 | printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); |
| 533 | rp->c_replstat = *statp; |
| 534 | break; |
| 535 | case RC_REPLBUFF: |
| 536 | cachv = &rp->c_replvec; |
Jeff Layton | 6c6910c | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 537 | bufsize = len << 2; |
| 538 | cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | if (!cachv->iov_base) { |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 540 | nfsd_reply_cache_free(b, rp, nn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | return; |
| 542 | } |
Jeff Layton | 6c6910c | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 543 | cachv->iov_len = bufsize; |
| 544 | memcpy(cachv->iov_base, statp, bufsize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | break; |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 546 | case RC_NOCACHE: |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 547 | nfsd_reply_cache_free(b, rp, nn); |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 548 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | } |
Trond Myklebust | 89a26b3 | 2014-08-06 13:44:24 -0400 | [diff] [blame] | 550 | spin_lock(&b->cache_lock); |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 551 | nn->drc_mem_usage += bufsize; |
Trond Myklebust | bedd4b6 | 2014-08-06 13:44:21 -0400 | [diff] [blame] | 552 | lru_put_end(b, rp); |
Jeff Layton | 4d152e2 | 2014-11-19 07:51:14 -0500 | [diff] [blame] | 553 | rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | rp->c_type = cachetype; |
| 555 | rp->c_state = RC_DONE; |
Trond Myklebust | 89a26b3 | 2014-08-06 13:44:24 -0400 | [diff] [blame] | 556 | spin_unlock(&b->cache_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | return; |
| 558 | } |
| 559 | |
| 560 | /* |
| 561 | * Copy cached reply to current reply buffer. Should always fit. |
| 562 | * FIXME as reply is in a page, we should just attach the page, and |
| 563 | * keep a refcount.... |
| 564 | */ |
| 565 | static int |
| 566 | nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) |
| 567 | { |
| 568 | struct kvec *vec = &rqstp->rq_res.head[0]; |
| 569 | |
| 570 | if (vec->iov_len + data->iov_len > PAGE_SIZE) { |
Alexey Dobriyan | 5b5e092 | 2017-02-27 14:30:02 -0800 | [diff] [blame] | 571 | printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 | data->iov_len); |
| 573 | return 0; |
| 574 | } |
| 575 | memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); |
| 576 | vec->iov_len += data->iov_len; |
| 577 | return 1; |
| 578 | } |
Jeff Layton | a2f999a | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 579 | |
| 580 | /* |
| 581 | * Note that fields may be added, removed or reordered in the future. Programs |
| 582 | * scraping this file for info should test the labels to ensure they're |
| 583 | * getting the correct field. |
| 584 | */ |
| 585 | static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) |
| 586 | { |
He Zhe | 78e70e7 | 2019-08-06 17:41:04 +0800 | [diff] [blame] | 587 | struct nfsd_net *nn = m->private; |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 588 | |
| 589 | seq_printf(m, "max entries: %u\n", nn->max_drc_entries); |
Trond Myklebust | 31e60f5 | 2014-08-06 13:44:23 -0400 | [diff] [blame] | 590 | seq_printf(m, "num entries: %u\n", |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 591 | atomic_read(&nn->num_drc_entries)); |
| 592 | seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits); |
| 593 | seq_printf(m, "mem usage: %u\n", nn->drc_mem_usage); |
Jeff Layton | a2f999a | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 594 | seq_printf(m, "cache hits: %u\n", nfsdstats.rchits); |
| 595 | seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses); |
| 596 | seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache); |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 597 | seq_printf(m, "payload misses: %u\n", nn->payload_misses); |
| 598 | seq_printf(m, "longest chain len: %u\n", nn->longest_chain); |
| 599 | seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize); |
Jeff Layton | a2f999a | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 600 | return 0; |
| 601 | } |
| 602 | |
| 603 | int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file) |
| 604 | { |
J. Bruce Fields | 3ba7583 | 2019-05-17 09:03:38 -0400 | [diff] [blame] | 605 | struct nfsd_net *nn = net_generic(file_inode(file)->i_sb->s_fs_info, |
| 606 | nfsd_net_id); |
| 607 | |
| 608 | return single_open(file, nfsd_reply_cache_stats_show, nn); |
Jeff Layton | a2f999a | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 609 | } |