Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Request reply cache. This is currently a global cache, but this may |
| 3 | * change in the future and be a per-client cache. |
| 4 | * |
| 5 | * This code is heavily inspired by the 44BSD implementation, although |
| 6 | * it does things a bit differently. |
| 7 | * |
| 8 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> |
| 9 | */ |
| 10 | |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 11 | #include <linux/slab.h> |
Jeff Layton | 7b9e852 | 2013-01-28 14:41:07 -0500 | [diff] [blame] | 12 | #include <linux/sunrpc/clnt.h> |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 13 | #include <linux/highmem.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 14 | |
Boaz Harrosh | 9a74af2 | 2009-12-03 20:30:56 +0200 | [diff] [blame] | 15 | #include "nfsd.h" |
| 16 | #include "cache.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 18 | #define NFSDDBG_FACILITY NFSDDBG_REPCACHE |
| 19 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #define HASHSIZE 64 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 22 | static struct hlist_head * cache_hash; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | static struct list_head lru_head; |
Jeff Layton | 8a8bc40 | 2013-01-28 14:41:10 -0500 | [diff] [blame] | 24 | static struct kmem_cache *drc_slab; |
Jeff Layton | 0ee0bf7 | 2013-02-04 08:18:01 -0500 | [diff] [blame] | 25 | static unsigned int num_drc_entries; |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 26 | static unsigned int max_drc_entries; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 28 | /* |
| 29 | * Calculate the hash index from an XID. |
| 30 | */ |
| 31 | static inline u32 request_hash(u32 xid) |
| 32 | { |
| 33 | u32 h = xid; |
| 34 | h ^= (xid >> 24); |
| 35 | return h & (HASHSIZE-1); |
| 36 | } |
| 37 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame^] | 39 | static void cache_cleaner_func(struct work_struct *unused); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 41 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | * locking for the reply cache: |
| 43 | * A cache entry is "single use" if c_state == RC_INPROG |
| 44 | * Otherwise, it when accessing _prev or _next, the lock must be held. |
| 45 | */ |
| 46 | static DEFINE_SPINLOCK(cache_lock); |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame^] | 47 | static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 49 | /* |
| 50 | * Put a cap on the size of the DRC based on the amount of available |
| 51 | * low memory in the machine. |
| 52 | * |
| 53 | * 64MB: 8192 |
| 54 | * 128MB: 11585 |
| 55 | * 256MB: 16384 |
| 56 | * 512MB: 23170 |
| 57 | * 1GB: 32768 |
| 58 | * 2GB: 46340 |
| 59 | * 4GB: 65536 |
| 60 | * 8GB: 92681 |
| 61 | * 16GB: 131072 |
| 62 | * |
| 63 | * ...with a hard cap of 256k entries. In the worst case, each entry will be |
| 64 | * ~1k, so the above numbers should give a rough max of the amount of memory |
| 65 | * used in k. |
| 66 | */ |
| 67 | static unsigned int |
| 68 | nfsd_cache_size_limit(void) |
| 69 | { |
| 70 | unsigned int limit; |
| 71 | unsigned long low_pages = totalram_pages - totalhigh_pages; |
| 72 | |
| 73 | limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); |
| 74 | return min_t(unsigned int, limit, 256*1024); |
| 75 | } |
| 76 | |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 77 | static struct svc_cacherep * |
| 78 | nfsd_reply_cache_alloc(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | { |
| 80 | struct svc_cacherep *rp; |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 81 | |
| 82 | rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); |
| 83 | if (rp) { |
| 84 | rp->c_state = RC_UNUSED; |
| 85 | rp->c_type = RC_NOCACHE; |
| 86 | INIT_LIST_HEAD(&rp->c_lru); |
| 87 | INIT_HLIST_NODE(&rp->c_hash); |
| 88 | } |
| 89 | return rp; |
| 90 | } |
| 91 | |
| 92 | static void |
| 93 | nfsd_reply_cache_free_locked(struct svc_cacherep *rp) |
| 94 | { |
Jeff Layton | 25e6b8b | 2013-01-28 14:41:12 -0500 | [diff] [blame] | 95 | if (rp->c_type == RC_REPLBUFF) |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 96 | kfree(rp->c_replvec.iov_base); |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 97 | hlist_del(&rp->c_hash); |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 98 | list_del(&rp->c_lru); |
Jeff Layton | 0ee0bf7 | 2013-02-04 08:18:01 -0500 | [diff] [blame] | 99 | --num_drc_entries; |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 100 | kmem_cache_free(drc_slab, rp); |
| 101 | } |
| 102 | |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 103 | static void |
| 104 | nfsd_reply_cache_free(struct svc_cacherep *rp) |
| 105 | { |
| 106 | spin_lock(&cache_lock); |
| 107 | nfsd_reply_cache_free_locked(rp); |
| 108 | spin_unlock(&cache_lock); |
| 109 | } |
| 110 | |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 111 | int nfsd_reply_cache_init(void) |
| 112 | { |
Jeff Layton | 8a8bc40 | 2013-01-28 14:41:10 -0500 | [diff] [blame] | 113 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), |
| 114 | 0, 0, NULL); |
| 115 | if (!drc_slab) |
| 116 | goto out_nomem; |
| 117 | |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 118 | cache_hash = kcalloc(HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL); |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 119 | if (!cache_hash) |
J. Bruce Fields | d5c3428 | 2007-11-09 14:10:56 -0500 | [diff] [blame] | 120 | goto out_nomem; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 122 | INIT_LIST_HEAD(&lru_head); |
| 123 | max_drc_entries = nfsd_cache_size_limit(); |
| 124 | num_drc_entries = 0; |
J. Bruce Fields | d5c3428 | 2007-11-09 14:10:56 -0500 | [diff] [blame] | 125 | return 0; |
| 126 | out_nomem: |
| 127 | printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); |
| 128 | nfsd_reply_cache_shutdown(); |
| 129 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | } |
| 131 | |
J. Bruce Fields | d5c3428 | 2007-11-09 14:10:56 -0500 | [diff] [blame] | 132 | void nfsd_reply_cache_shutdown(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | { |
| 134 | struct svc_cacherep *rp; |
| 135 | |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame^] | 136 | cancel_delayed_work_sync(&cache_cleaner); |
| 137 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | while (!list_empty(&lru_head)) { |
| 139 | rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 140 | nfsd_reply_cache_free_locked(rp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | } |
| 142 | |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 143 | kfree (cache_hash); |
| 144 | cache_hash = NULL; |
Jeff Layton | 8a8bc40 | 2013-01-28 14:41:10 -0500 | [diff] [blame] | 145 | |
| 146 | if (drc_slab) { |
| 147 | kmem_cache_destroy(drc_slab); |
| 148 | drc_slab = NULL; |
| 149 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | } |
| 151 | |
| 152 | /* |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame^] | 153 | * Move cache entry to end of LRU list, and queue the cleaner to run if it's |
| 154 | * not already scheduled. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | */ |
| 156 | static void |
| 157 | lru_put_end(struct svc_cacherep *rp) |
| 158 | { |
Jeff Layton | 56c2548 | 2013-02-04 08:18:00 -0500 | [diff] [blame] | 159 | rp->c_timestamp = jiffies; |
Akinobu Mita | f116629 | 2006-06-26 00:24:46 -0700 | [diff] [blame] | 160 | list_move_tail(&rp->c_lru, &lru_head); |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame^] | 161 | schedule_delayed_work(&cache_cleaner, RC_EXPIRE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | } |
| 163 | |
| 164 | /* |
| 165 | * Move a cache entry from one hash list to another |
| 166 | */ |
| 167 | static void |
| 168 | hash_refile(struct svc_cacherep *rp) |
| 169 | { |
| 170 | hlist_del_init(&rp->c_hash); |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 171 | hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | } |
| 173 | |
Jeff Layton | d1a0774 | 2013-01-28 14:41:13 -0500 | [diff] [blame] | 174 | static inline bool |
| 175 | nfsd_cache_entry_expired(struct svc_cacherep *rp) |
| 176 | { |
| 177 | return rp->c_state != RC_INPROG && |
| 178 | time_after(jiffies, rp->c_timestamp + RC_EXPIRE); |
| 179 | } |
| 180 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | /* |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame^] | 182 | * Walk the LRU list and prune off entries that are older than RC_EXPIRE. |
| 183 | * Also prune the oldest ones when the total exceeds the max number of entries. |
| 184 | */ |
| 185 | static void |
| 186 | prune_cache_entries(void) |
| 187 | { |
| 188 | struct svc_cacherep *rp, *tmp; |
| 189 | |
| 190 | list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { |
| 191 | if (!nfsd_cache_entry_expired(rp) && |
| 192 | num_drc_entries <= max_drc_entries) |
| 193 | break; |
| 194 | nfsd_reply_cache_free_locked(rp); |
| 195 | } |
| 196 | |
| 197 | /* |
| 198 | * Conditionally rearm the job. If we cleaned out the list, then |
| 199 | * cancel any pending run (since there won't be any work to do). |
| 200 | * Otherwise, we rearm the job or modify the existing one to run in |
| 201 | * RC_EXPIRE since we just ran the pruner. |
| 202 | */ |
| 203 | if (list_empty(&lru_head)) |
| 204 | cancel_delayed_work(&cache_cleaner); |
| 205 | else |
| 206 | mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); |
| 207 | } |
| 208 | |
| 209 | static void |
| 210 | cache_cleaner_func(struct work_struct *unused) |
| 211 | { |
| 212 | spin_lock(&cache_lock); |
| 213 | prune_cache_entries(); |
| 214 | spin_unlock(&cache_lock); |
| 215 | } |
| 216 | |
| 217 | /* |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 218 | * Search the request hash for an entry that matches the given rqstp. |
| 219 | * Must be called with cache_lock held. Returns the found entry or |
| 220 | * NULL on failure. |
| 221 | */ |
| 222 | static struct svc_cacherep * |
| 223 | nfsd_cache_search(struct svc_rqst *rqstp) |
| 224 | { |
| 225 | struct svc_cacherep *rp; |
| 226 | struct hlist_node *hn; |
| 227 | struct hlist_head *rh; |
| 228 | __be32 xid = rqstp->rq_xid; |
| 229 | u32 proto = rqstp->rq_prot, |
| 230 | vers = rqstp->rq_vers, |
| 231 | proc = rqstp->rq_proc; |
| 232 | |
| 233 | rh = &cache_hash[request_hash(xid)]; |
| 234 | hlist_for_each_entry(rp, hn, rh, c_hash) { |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 235 | if (xid == rp->c_xid && proc == rp->c_proc && |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 236 | proto == rp->c_prot && vers == rp->c_vers && |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 237 | rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) && |
| 238 | rpc_get_port(svc_addr(rqstp)) == rpc_get_port((struct sockaddr *)&rp->c_addr)) |
| 239 | return rp; |
| 240 | } |
| 241 | return NULL; |
| 242 | } |
| 243 | |
| 244 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | * Try to find an entry matching the current call in the cache. When none |
| 246 | * is found, we grab the oldest unlocked entry off the LRU list. |
| 247 | * Note that no operation within the loop may sleep. |
| 248 | */ |
| 249 | int |
J. Bruce Fields | 1091006 | 2011-01-24 12:11:02 -0500 | [diff] [blame] | 250 | nfsd_cache_lookup(struct svc_rqst *rqstp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | { |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 252 | struct svc_cacherep *rp, *found; |
Al Viro | c7afef1 | 2006-10-19 23:29:02 -0700 | [diff] [blame] | 253 | __be32 xid = rqstp->rq_xid; |
| 254 | u32 proto = rqstp->rq_prot, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | vers = rqstp->rq_vers, |
| 256 | proc = rqstp->rq_proc; |
| 257 | unsigned long age; |
J. Bruce Fields | 1091006 | 2011-01-24 12:11:02 -0500 | [diff] [blame] | 258 | int type = rqstp->rq_cachetype; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | int rtn; |
| 260 | |
| 261 | rqstp->rq_cacherep = NULL; |
Jeff Layton | 13cc8a7 | 2013-02-04 08:18:03 -0500 | [diff] [blame] | 262 | if (type == RC_NOCACHE) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | nfsdstats.rcnocache++; |
| 264 | return RC_DOIT; |
| 265 | } |
| 266 | |
| 267 | spin_lock(&cache_lock); |
| 268 | rtn = RC_DOIT; |
| 269 | |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 270 | rp = nfsd_cache_search(rqstp); |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 271 | if (rp) |
| 272 | goto found_entry; |
| 273 | |
| 274 | /* Try to use the first entry on the LRU */ |
| 275 | if (!list_empty(&lru_head)) { |
| 276 | rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); |
| 277 | if (nfsd_cache_entry_expired(rp) || |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame^] | 278 | num_drc_entries >= max_drc_entries) { |
| 279 | lru_put_end(rp); |
| 280 | prune_cache_entries(); |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 281 | goto setup_entry; |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame^] | 282 | } |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 283 | } |
| 284 | |
| 285 | spin_unlock(&cache_lock); |
| 286 | rp = nfsd_reply_cache_alloc(); |
| 287 | if (!rp) { |
| 288 | dprintk("nfsd: unable to allocate DRC entry!\n"); |
| 289 | return RC_DOIT; |
| 290 | } |
| 291 | spin_lock(&cache_lock); |
| 292 | ++num_drc_entries; |
| 293 | |
| 294 | /* |
| 295 | * Must search again just in case someone inserted one |
| 296 | * after we dropped the lock above. |
| 297 | */ |
| 298 | found = nfsd_cache_search(rqstp); |
| 299 | if (found) { |
| 300 | nfsd_reply_cache_free_locked(rp); |
| 301 | rp = found; |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 302 | goto found_entry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | } |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 304 | |
| 305 | /* |
| 306 | * We're keeping the one we just allocated. Are we now over the |
| 307 | * limit? Prune one off the tip of the LRU in trade for the one we |
| 308 | * just allocated if so. |
| 309 | */ |
| 310 | if (num_drc_entries >= max_drc_entries) |
| 311 | nfsd_reply_cache_free_locked(list_first_entry(&lru_head, |
| 312 | struct svc_cacherep, c_lru)); |
| 313 | |
| 314 | setup_entry: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | nfsdstats.rcmisses++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | rqstp->rq_cacherep = rp; |
| 317 | rp->c_state = RC_INPROG; |
| 318 | rp->c_xid = xid; |
| 319 | rp->c_proc = proc; |
Jeff Layton | 7b9e852 | 2013-01-28 14:41:07 -0500 | [diff] [blame] | 320 | rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp)); |
| 321 | rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | rp->c_prot = proto; |
| 323 | rp->c_vers = vers; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | |
| 325 | hash_refile(rp); |
Jeff Layton | 56c2548 | 2013-02-04 08:18:00 -0500 | [diff] [blame] | 326 | lru_put_end(rp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | |
| 328 | /* release any buffer */ |
| 329 | if (rp->c_type == RC_REPLBUFF) { |
| 330 | kfree(rp->c_replvec.iov_base); |
| 331 | rp->c_replvec.iov_base = NULL; |
| 332 | } |
| 333 | rp->c_type = RC_NOCACHE; |
| 334 | out: |
| 335 | spin_unlock(&cache_lock); |
| 336 | return rtn; |
| 337 | |
| 338 | found_entry: |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 339 | nfsdstats.rchits++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | /* We found a matching entry which is either in progress or done. */ |
| 341 | age = jiffies - rp->c_timestamp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | lru_put_end(rp); |
| 343 | |
| 344 | rtn = RC_DROPIT; |
| 345 | /* Request being processed or excessive rexmits */ |
| 346 | if (rp->c_state == RC_INPROG || age < RC_DELAY) |
| 347 | goto out; |
| 348 | |
| 349 | /* From the hall of fame of impractical attacks: |
| 350 | * Is this a user who tries to snoop on the cache? */ |
| 351 | rtn = RC_DOIT; |
| 352 | if (!rqstp->rq_secure && rp->c_secure) |
| 353 | goto out; |
| 354 | |
| 355 | /* Compose RPC reply header */ |
| 356 | switch (rp->c_type) { |
| 357 | case RC_NOCACHE: |
| 358 | break; |
| 359 | case RC_REPLSTAT: |
| 360 | svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); |
| 361 | rtn = RC_REPLY; |
| 362 | break; |
| 363 | case RC_REPLBUFF: |
| 364 | if (!nfsd_cache_append(rqstp, &rp->c_replvec)) |
| 365 | goto out; /* should not happen */ |
| 366 | rtn = RC_REPLY; |
| 367 | break; |
| 368 | default: |
| 369 | printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 370 | nfsd_reply_cache_free_locked(rp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | } |
| 372 | |
| 373 | goto out; |
| 374 | } |
| 375 | |
| 376 | /* |
| 377 | * Update a cache entry. This is called from nfsd_dispatch when |
| 378 | * the procedure has been executed and the complete reply is in |
| 379 | * rqstp->rq_res. |
| 380 | * |
| 381 | * We're copying around data here rather than swapping buffers because |
| 382 | * the toplevel loop requires max-sized buffers, which would be a waste |
| 383 | * of memory for a cache with a max reply size of 100 bytes (diropokres). |
| 384 | * |
| 385 | * If we should start to use different types of cache entries tailored |
| 386 | * specifically for attrstat and fh's, we may save even more space. |
| 387 | * |
| 388 | * Also note that a cachetype of RC_NOCACHE can legally be passed when |
| 389 | * nfsd failed to encode a reply that otherwise would have been cached. |
| 390 | * In this case, nfsd_cache_update is called with statp == NULL. |
| 391 | */ |
| 392 | void |
Al Viro | c7afef1 | 2006-10-19 23:29:02 -0700 | [diff] [blame] | 393 | nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | { |
Jeff Layton | 13cc8a7 | 2013-02-04 08:18:03 -0500 | [diff] [blame] | 395 | struct svc_cacherep *rp = rqstp->rq_cacherep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | struct kvec *resv = &rqstp->rq_res.head[0], *cachv; |
| 397 | int len; |
| 398 | |
Jeff Layton | 13cc8a7 | 2013-02-04 08:18:03 -0500 | [diff] [blame] | 399 | if (!rp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | return; |
| 401 | |
| 402 | len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); |
| 403 | len >>= 2; |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 404 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | /* Don't cache excessive amounts of data and XDR failures */ |
| 406 | if (!statp || len > (256 >> 2)) { |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 407 | nfsd_reply_cache_free(rp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | return; |
| 409 | } |
| 410 | |
| 411 | switch (cachetype) { |
| 412 | case RC_REPLSTAT: |
| 413 | if (len != 1) |
| 414 | printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); |
| 415 | rp->c_replstat = *statp; |
| 416 | break; |
| 417 | case RC_REPLBUFF: |
| 418 | cachv = &rp->c_replvec; |
| 419 | cachv->iov_base = kmalloc(len << 2, GFP_KERNEL); |
| 420 | if (!cachv->iov_base) { |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 421 | nfsd_reply_cache_free(rp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | return; |
| 423 | } |
| 424 | cachv->iov_len = len << 2; |
| 425 | memcpy(cachv->iov_base, statp, len << 2); |
| 426 | break; |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 427 | case RC_NOCACHE: |
| 428 | nfsd_reply_cache_free(rp); |
| 429 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | } |
| 431 | spin_lock(&cache_lock); |
| 432 | lru_put_end(rp); |
| 433 | rp->c_secure = rqstp->rq_secure; |
| 434 | rp->c_type = cachetype; |
| 435 | rp->c_state = RC_DONE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | spin_unlock(&cache_lock); |
| 437 | return; |
| 438 | } |
| 439 | |
| 440 | /* |
| 441 | * Copy cached reply to current reply buffer. Should always fit. |
| 442 | * FIXME as reply is in a page, we should just attach the page, and |
| 443 | * keep a refcount.... |
| 444 | */ |
| 445 | static int |
| 446 | nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) |
| 447 | { |
| 448 | struct kvec *vec = &rqstp->rq_res.head[0]; |
| 449 | |
| 450 | if (vec->iov_len + data->iov_len > PAGE_SIZE) { |
| 451 | printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n", |
| 452 | data->iov_len); |
| 453 | return 0; |
| 454 | } |
| 455 | memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); |
| 456 | vec->iov_len += data->iov_len; |
| 457 | return 1; |
| 458 | } |