blob: 25eac61f1c2962e1c4ae905104f22b6d439e6f4e [file] [log] [blame]
David Howells95493322021-10-20 15:00:26 +01001// SPDX-License-Identifier: GPL-2.0-or-later
2/* FS-Cache cache handling
3 *
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define FSCACHE_DEBUG_LEVEL CACHE
9#include <linux/export.h>
10#include <linux/slab.h>
11#include "internal.h"
12
13static LIST_HEAD(fscache_caches);
14DECLARE_RWSEM(fscache_addremove_sem);
15EXPORT_SYMBOL(fscache_addremove_sem);
16
17static atomic_t fscache_cache_debug_id;
18
19/*
20 * Allocate a cache cookie.
21 */
22static struct fscache_cache *fscache_alloc_cache(const char *name)
23{
24 struct fscache_cache *cache;
25
26 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
27 if (cache) {
28 if (name) {
29 cache->name = kstrdup(name, GFP_KERNEL);
30 if (!cache->name) {
31 kfree(cache);
32 return NULL;
33 }
34 }
35 refcount_set(&cache->ref, 1);
36 INIT_LIST_HEAD(&cache->cache_link);
37 cache->debug_id = atomic_inc_return(&fscache_cache_debug_id);
38 }
39 return cache;
40}
41
42static bool fscache_get_cache_maybe(struct fscache_cache *cache,
43 enum fscache_cache_trace where)
44{
45 bool success;
46 int ref;
47
48 success = __refcount_inc_not_zero(&cache->ref, &ref);
49 if (success)
50 trace_fscache_cache(cache->debug_id, ref + 1, where);
51 return success;
52}
53
54/*
55 * Look up a cache cookie.
56 */
57struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache)
58{
59 struct fscache_cache *candidate, *cache, *unnamed = NULL;
60
61 /* firstly check for the existence of the cache under read lock */
62 down_read(&fscache_addremove_sem);
63
64 list_for_each_entry(cache, &fscache_caches, cache_link) {
65 if (cache->name && name && strcmp(cache->name, name) == 0 &&
66 fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
67 goto got_cache_r;
68 if (!cache->name && !name &&
69 fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
70 goto got_cache_r;
71 }
72
73 if (!name) {
74 list_for_each_entry(cache, &fscache_caches, cache_link) {
75 if (cache->name &&
76 fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
77 goto got_cache_r;
78 }
79 }
80
81 up_read(&fscache_addremove_sem);
82
83 /* the cache does not exist - create a candidate */
84 candidate = fscache_alloc_cache(name);
85 if (!candidate)
86 return ERR_PTR(-ENOMEM);
87
88 /* write lock, search again and add if still not present */
89 down_write(&fscache_addremove_sem);
90
91 list_for_each_entry(cache, &fscache_caches, cache_link) {
92 if (cache->name && name && strcmp(cache->name, name) == 0 &&
93 fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
94 goto got_cache_w;
95 if (!cache->name) {
96 unnamed = cache;
97 if (!name &&
98 fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
99 goto got_cache_w;
100 }
101 }
102
103 if (unnamed && is_cache &&
104 fscache_get_cache_maybe(unnamed, fscache_cache_get_acquire))
105 goto use_unnamed_cache;
106
107 if (!name) {
108 list_for_each_entry(cache, &fscache_caches, cache_link) {
109 if (cache->name &&
110 fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
111 goto got_cache_w;
112 }
113 }
114
115 list_add_tail(&candidate->cache_link, &fscache_caches);
116 trace_fscache_cache(candidate->debug_id,
117 refcount_read(&candidate->ref),
118 fscache_cache_new_acquire);
119 up_write(&fscache_addremove_sem);
120 return candidate;
121
122got_cache_r:
123 up_read(&fscache_addremove_sem);
124 return cache;
125use_unnamed_cache:
126 cache = unnamed;
127 cache->name = candidate->name;
128 candidate->name = NULL;
129got_cache_w:
130 up_write(&fscache_addremove_sem);
131 kfree(candidate->name);
132 kfree(candidate);
133 return cache;
134}
135
136/**
137 * fscache_acquire_cache - Acquire a cache-level cookie.
138 * @name: The name of the cache.
139 *
140 * Get a cookie to represent an actual cache. If a name is given and there is
141 * a nameless cache record available, this will acquire that and set its name,
142 * directing all the volumes using it to this cache.
143 *
144 * The cache will be switched over to the preparing state if not currently in
145 * use, otherwise -EBUSY will be returned.
146 */
147struct fscache_cache *fscache_acquire_cache(const char *name)
148{
149 struct fscache_cache *cache;
150
151 ASSERT(name);
152 cache = fscache_lookup_cache(name, true);
153 if (IS_ERR(cache))
154 return cache;
155
156 if (!fscache_set_cache_state_maybe(cache,
157 FSCACHE_CACHE_IS_NOT_PRESENT,
158 FSCACHE_CACHE_IS_PREPARING)) {
159 pr_warn("Cache tag %s in use\n", name);
160 fscache_put_cache(cache, fscache_cache_put_cache);
161 return ERR_PTR(-EBUSY);
162 }
163
164 return cache;
165}
166EXPORT_SYMBOL(fscache_acquire_cache);
167
168/**
169 * fscache_put_cache - Release a cache-level cookie.
170 * @cache: The cache cookie to be released
171 * @where: An indication of where the release happened
172 *
173 * Release the caller's reference on a cache-level cookie. The @where
174 * indication should give information about the circumstances in which the call
175 * occurs and will be logged through a tracepoint.
176 */
177void fscache_put_cache(struct fscache_cache *cache,
178 enum fscache_cache_trace where)
179{
180 unsigned int debug_id = cache->debug_id;
181 bool zero;
182 int ref;
183
184 if (IS_ERR_OR_NULL(cache))
185 return;
186
187 zero = __refcount_dec_and_test(&cache->ref, &ref);
188 trace_fscache_cache(debug_id, ref - 1, where);
189
190 if (zero) {
191 down_write(&fscache_addremove_sem);
192 list_del_init(&cache->cache_link);
193 up_write(&fscache_addremove_sem);
194 kfree(cache->name);
195 kfree(cache);
196 }
197}
198
199/**
200 * fscache_relinquish_cache - Reset cache state and release cookie
201 * @cache: The cache cookie to be released
202 *
203 * Reset the state of a cache and release the caller's reference on a cache
204 * cookie.
205 */
206void fscache_relinquish_cache(struct fscache_cache *cache)
207{
208 enum fscache_cache_trace where =
209 (cache->state == FSCACHE_CACHE_IS_PREPARING) ?
210 fscache_cache_put_prep_failed :
211 fscache_cache_put_relinquish;
212
David Howells2e0c76a2021-10-20 15:00:26 +0100213 cache->ops = NULL;
David Howells95493322021-10-20 15:00:26 +0100214 cache->cache_priv = NULL;
215 smp_store_release(&cache->state, FSCACHE_CACHE_IS_NOT_PRESENT);
216 fscache_put_cache(cache, where);
217}
218EXPORT_SYMBOL(fscache_relinquish_cache);
219
David Howells23e12e22021-10-20 15:00:26 +0100220/**
David Howells2e0c76a2021-10-20 15:00:26 +0100221 * fscache_add_cache - Declare a cache as being open for business
222 * @cache: The cache-level cookie representing the cache
223 * @ops: Table of cache operations to use
224 * @cache_priv: Private data for the cache record
225 *
226 * Add a cache to the system, making it available for netfs's to use.
227 *
228 * See Documentation/filesystems/caching/backend-api.rst for a complete
229 * description.
230 */
231int fscache_add_cache(struct fscache_cache *cache,
232 const struct fscache_cache_ops *ops,
233 void *cache_priv)
234{
235 int n_accesses;
236
237 _enter("{%s,%s}", ops->name, cache->name);
238
239 BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING);
240
241 /* Get a ref on the cache cookie and keep its n_accesses counter raised
242 * by 1 to prevent wakeups from transitioning it to 0 until we're
243 * withdrawing caching services from it.
244 */
245 n_accesses = atomic_inc_return(&cache->n_accesses);
246 trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
247 n_accesses, fscache_access_cache_pin);
248
249 down_write(&fscache_addremove_sem);
250
251 cache->ops = ops;
252 cache->cache_priv = cache_priv;
253 fscache_set_cache_state(cache, FSCACHE_CACHE_IS_ACTIVE);
254
255 up_write(&fscache_addremove_sem);
256 pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name);
257 _leave(" = 0 [%s]", cache->name);
258 return 0;
259}
260EXPORT_SYMBOL(fscache_add_cache);
261
262/**
David Howells23e12e22021-10-20 15:00:26 +0100263 * fscache_begin_cache_access - Pin a cache so it can be accessed
264 * @cache: The cache-level cookie
265 * @why: An indication of the circumstances of the access for tracing
266 *
267 * Attempt to pin the cache to prevent it from going away whilst we're
268 * accessing it and returns true if successful. This works as follows:
269 *
270 * (1) If the cache tests as not live (state is not FSCACHE_CACHE_IS_ACTIVE),
271 * then we return false to indicate access was not permitted.
272 *
273 * (2) If the cache tests as live, then we increment the n_accesses count and
274 * then recheck the liveness, ending the access if it ceased to be live.
275 *
276 * (3) When we end the access, we decrement n_accesses and wake up the any
277 * waiters if it reaches 0.
278 *
279 * (4) Whilst the cache is caching, n_accesses is kept artificially
280 * incremented to prevent wakeups from happening.
281 *
282 * (5) When the cache is taken offline, the state is changed to prevent new
283 * accesses, n_accesses is decremented and we wait for n_accesses to
284 * become 0.
285 */
286bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
287{
288 int n_accesses;
289
290 if (!fscache_cache_is_live(cache))
291 return false;
292
293 n_accesses = atomic_inc_return(&cache->n_accesses);
294 smp_mb__after_atomic(); /* Reread live flag after n_accesses */
295 trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
296 n_accesses, why);
297 if (!fscache_cache_is_live(cache)) {
298 fscache_end_cache_access(cache, fscache_access_unlive);
299 return false;
300 }
301 return true;
302}
303
304/**
305 * fscache_end_cache_access - Unpin a cache at the end of an access.
306 * @cache: The cache-level cookie
307 * @why: An indication of the circumstances of the access for tracing
308 *
309 * Unpin a cache after we've accessed it. The @why indicator is merely
310 * provided for tracing purposes.
311 */
312void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
313{
314 int n_accesses;
315
316 smp_mb__before_atomic();
317 n_accesses = atomic_dec_return(&cache->n_accesses);
318 trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
319 n_accesses, why);
320 if (n_accesses == 0)
321 wake_up_var(&cache->n_accesses);
322}
323
David Howells2e0c76a2021-10-20 15:00:26 +0100324/**
David Howells29f18e72021-10-20 15:00:26 +0100325 * fscache_io_error - Note a cache I/O error
326 * @cache: The record describing the cache
327 *
328 * Note that an I/O error occurred in a cache and that it should no longer be
329 * used for anything. This also reports the error into the kernel log.
330 *
331 * See Documentation/filesystems/caching/backend-api.rst for a complete
332 * description.
333 */
334void fscache_io_error(struct fscache_cache *cache)
335{
336 if (fscache_set_cache_state_maybe(cache,
337 FSCACHE_CACHE_IS_ACTIVE,
338 FSCACHE_CACHE_GOT_IOERROR))
339 pr_err("Cache '%s' stopped due to I/O error\n",
340 cache->name);
341}
342EXPORT_SYMBOL(fscache_io_error);
343
344/**
David Howells2e0c76a2021-10-20 15:00:26 +0100345 * fscache_withdraw_cache - Withdraw a cache from the active service
346 * @cache: The cache cookie
347 *
348 * Begin the process of withdrawing a cache from service. This stops new
349 * cache-level and volume-level accesses from taking place and waits for
350 * currently ongoing cache-level accesses to end.
351 */
352void fscache_withdraw_cache(struct fscache_cache *cache)
353{
354 int n_accesses;
355
356 pr_notice("Withdrawing cache \"%s\" (%u objs)\n",
357 cache->name, atomic_read(&cache->object_count));
358
359 fscache_set_cache_state(cache, FSCACHE_CACHE_IS_WITHDRAWN);
360
361 /* Allow wakeups on dec-to-0 */
362 n_accesses = atomic_dec_return(&cache->n_accesses);
363 trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
364 n_accesses, fscache_access_cache_unpin);
365
366 wait_var_event(&cache->n_accesses,
367 atomic_read(&cache->n_accesses) == 0);
368}
369EXPORT_SYMBOL(fscache_withdraw_cache);
370
David Howells95493322021-10-20 15:00:26 +0100371#ifdef CONFIG_PROC_FS
372static const char fscache_cache_states[NR__FSCACHE_CACHE_STATE] = "-PAEW";
373
374/*
375 * Generate a list of caches in /proc/fs/fscache/caches
376 */
377static int fscache_caches_seq_show(struct seq_file *m, void *v)
378{
379 struct fscache_cache *cache;
380
381 if (v == &fscache_caches) {
382 seq_puts(m,
383 "CACHE REF VOLS OBJS ACCES S NAME\n"
384 "======== ===== ===== ===== ===== = ===============\n"
385 );
386 return 0;
387 }
388
389 cache = list_entry(v, struct fscache_cache, cache_link);
390 seq_printf(m,
391 "%08x %5d %5d %5d %5d %c %s\n",
392 cache->debug_id,
393 refcount_read(&cache->ref),
394 atomic_read(&cache->n_volumes),
395 atomic_read(&cache->object_count),
396 atomic_read(&cache->n_accesses),
397 fscache_cache_states[cache->state],
398 cache->name ?: "-");
399 return 0;
400}
401
402static void *fscache_caches_seq_start(struct seq_file *m, loff_t *_pos)
403 __acquires(fscache_addremove_sem)
404{
405 down_read(&fscache_addremove_sem);
406 return seq_list_start_head(&fscache_caches, *_pos);
407}
408
409static void *fscache_caches_seq_next(struct seq_file *m, void *v, loff_t *_pos)
410{
411 return seq_list_next(v, &fscache_caches, _pos);
412}
413
414static void fscache_caches_seq_stop(struct seq_file *m, void *v)
415 __releases(fscache_addremove_sem)
416{
417 up_read(&fscache_addremove_sem);
418}
419
420const struct seq_operations fscache_caches_seq_ops = {
421 .start = fscache_caches_seq_start,
422 .next = fscache_caches_seq_next,
423 .stop = fscache_caches_seq_stop,
424 .show = fscache_caches_seq_show,
425};
426#endif /* CONFIG_PROC_FS */