David Howells | d64f455 | 2021-10-20 14:06:34 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* Cache data I/O routines |
| 3 | * |
| 4 | * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. |
| 5 | * Written by David Howells (dhowells@redhat.com) |
| 6 | */ |
| 7 | #define FSCACHE_DEBUG_LEVEL OPERATION |
| 8 | #include <linux/fscache-cache.h> |
| 9 | #include <linux/uio.h> |
| 10 | #include <linux/bvec.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <linux/uio.h> |
| 13 | #include "internal.h" |
| 14 | |
| 15 | /** |
| 16 | * fscache_wait_for_operation - Wait for an object become accessible |
| 17 | * @cres: The cache resources for the operation being performed |
| 18 | * @want_state: The minimum state the object must be at |
| 19 | * |
| 20 | * See if the target cache object is at the specified minimum state of |
| 21 | * accessibility yet, and if not, wait for it. |
| 22 | */ |
| 23 | bool fscache_wait_for_operation(struct netfs_cache_resources *cres, |
| 24 | enum fscache_want_state want_state) |
| 25 | { |
| 26 | struct fscache_cookie *cookie = fscache_cres_cookie(cres); |
| 27 | enum fscache_cookie_state state; |
| 28 | |
| 29 | again: |
| 30 | if (!fscache_cache_is_live(cookie->volume->cache)) { |
| 31 | _leave(" [broken]"); |
| 32 | return false; |
| 33 | } |
| 34 | |
| 35 | state = fscache_cookie_state(cookie); |
| 36 | _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state); |
| 37 | |
| 38 | switch (state) { |
| 39 | case FSCACHE_COOKIE_STATE_CREATING: |
| 40 | case FSCACHE_COOKIE_STATE_INVALIDATING: |
| 41 | if (want_state == FSCACHE_WANT_PARAMS) |
| 42 | goto ready; /* There can be no content */ |
| 43 | fallthrough; |
| 44 | case FSCACHE_COOKIE_STATE_LOOKING_UP: |
| 45 | case FSCACHE_COOKIE_STATE_LRU_DISCARDING: |
| 46 | wait_var_event(&cookie->state, |
| 47 | fscache_cookie_state(cookie) != state); |
| 48 | goto again; |
| 49 | |
| 50 | case FSCACHE_COOKIE_STATE_ACTIVE: |
| 51 | goto ready; |
| 52 | case FSCACHE_COOKIE_STATE_DROPPED: |
| 53 | case FSCACHE_COOKIE_STATE_RELINQUISHING: |
| 54 | default: |
| 55 | _leave(" [not live]"); |
| 56 | return false; |
| 57 | } |
| 58 | |
| 59 | ready: |
| 60 | if (!cres->cache_priv2) |
| 61 | return cookie->volume->cache->ops->begin_operation(cres, want_state); |
| 62 | return true; |
| 63 | } |
| 64 | EXPORT_SYMBOL(fscache_wait_for_operation); |
| 65 | |
| 66 | /* |
| 67 | * Begin an I/O operation on the cache, waiting till we reach the right state. |
| 68 | * |
| 69 | * Attaches the resources required to the operation resources record. |
| 70 | */ |
| 71 | static int fscache_begin_operation(struct netfs_cache_resources *cres, |
| 72 | struct fscache_cookie *cookie, |
| 73 | enum fscache_want_state want_state, |
| 74 | enum fscache_access_trace why) |
| 75 | { |
| 76 | enum fscache_cookie_state state; |
| 77 | long timeo; |
| 78 | bool once_only = false; |
| 79 | |
| 80 | cres->ops = NULL; |
| 81 | cres->cache_priv = cookie; |
| 82 | cres->cache_priv2 = NULL; |
| 83 | cres->debug_id = cookie->debug_id; |
| 84 | cres->inval_counter = cookie->inval_counter; |
| 85 | |
| 86 | if (!fscache_begin_cookie_access(cookie, why)) |
| 87 | return -ENOBUFS; |
| 88 | |
| 89 | again: |
| 90 | spin_lock(&cookie->lock); |
| 91 | |
| 92 | state = fscache_cookie_state(cookie); |
| 93 | _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state); |
| 94 | |
| 95 | switch (state) { |
| 96 | case FSCACHE_COOKIE_STATE_LOOKING_UP: |
| 97 | case FSCACHE_COOKIE_STATE_LRU_DISCARDING: |
| 98 | case FSCACHE_COOKIE_STATE_INVALIDATING: |
| 99 | goto wait_for_file_wrangling; |
| 100 | case FSCACHE_COOKIE_STATE_CREATING: |
| 101 | if (want_state == FSCACHE_WANT_PARAMS) |
| 102 | goto ready; /* There can be no content */ |
| 103 | goto wait_for_file_wrangling; |
| 104 | case FSCACHE_COOKIE_STATE_ACTIVE: |
| 105 | goto ready; |
| 106 | case FSCACHE_COOKIE_STATE_DROPPED: |
| 107 | case FSCACHE_COOKIE_STATE_RELINQUISHING: |
| 108 | WARN(1, "Can't use cookie in state %u\n", cookie->state); |
| 109 | goto not_live; |
| 110 | default: |
| 111 | goto not_live; |
| 112 | } |
| 113 | |
| 114 | ready: |
| 115 | spin_unlock(&cookie->lock); |
| 116 | if (!cookie->volume->cache->ops->begin_operation(cres, want_state)) |
| 117 | goto failed; |
| 118 | return 0; |
| 119 | |
| 120 | wait_for_file_wrangling: |
| 121 | spin_unlock(&cookie->lock); |
| 122 | trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref), |
| 123 | atomic_read(&cookie->n_accesses), |
| 124 | fscache_access_io_wait); |
| 125 | timeo = wait_var_event_timeout(&cookie->state, |
| 126 | fscache_cookie_state(cookie) != state, 20 * HZ); |
| 127 | if (timeo <= 1 && !once_only) { |
| 128 | pr_warn("%s: cookie state change wait timed out: cookie->state=%u state=%u", |
| 129 | __func__, fscache_cookie_state(cookie), state); |
| 130 | fscache_print_cookie(cookie, 'O'); |
| 131 | once_only = true; |
| 132 | } |
| 133 | goto again; |
| 134 | |
| 135 | not_live: |
| 136 | spin_unlock(&cookie->lock); |
| 137 | failed: |
| 138 | cres->cache_priv = NULL; |
| 139 | cres->ops = NULL; |
| 140 | fscache_end_cookie_access(cookie, fscache_access_io_not_live); |
| 141 | _leave(" = -ENOBUFS"); |
| 142 | return -ENOBUFS; |
| 143 | } |
| 144 | |
| 145 | int __fscache_begin_read_operation(struct netfs_cache_resources *cres, |
| 146 | struct fscache_cookie *cookie) |
| 147 | { |
| 148 | return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS, |
| 149 | fscache_access_io_read); |
| 150 | } |
| 151 | EXPORT_SYMBOL(__fscache_begin_read_operation); |
David Howells | b6e1665 | 2021-10-20 14:06:34 +0100 | [diff] [blame] | 152 | |
David Howells | 16f2f4e | 2021-08-27 15:19:34 +0100 | [diff] [blame] | 153 | int __fscache_begin_write_operation(struct netfs_cache_resources *cres, |
| 154 | struct fscache_cookie *cookie) |
| 155 | { |
| 156 | return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS, |
| 157 | fscache_access_io_write); |
| 158 | } |
| 159 | EXPORT_SYMBOL(__fscache_begin_write_operation); |
| 160 | |
David Howells | 08276bd | 2021-10-20 23:50:01 +0100 | [diff] [blame] | 161 | /** |
| 162 | * fscache_set_page_dirty - Mark page dirty and pin a cache object for writeback |
| 163 | * @page: The page being dirtied |
| 164 | * @cookie: The cookie referring to the cache object |
| 165 | * |
| 166 | * Set the dirty flag on a page and pin an in-use cache object in memory when |
| 167 | * dirtying a page so that writeback can later write to it. This is intended |
| 168 | * to be called from the filesystem's ->set_page_dirty() method. |
| 169 | * |
| 170 | * Returns 1 if PG_dirty was set on the page, 0 otherwise. |
| 171 | */ |
| 172 | int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cookie) |
| 173 | { |
| 174 | struct inode *inode = page->mapping->host; |
| 175 | bool need_use = false; |
| 176 | |
| 177 | _enter(""); |
| 178 | |
| 179 | if (!__set_page_dirty_nobuffers(page)) |
| 180 | return 0; |
| 181 | if (!fscache_cookie_valid(cookie)) |
| 182 | return 1; |
| 183 | |
| 184 | if (!(inode->i_state & I_PINNING_FSCACHE_WB)) { |
| 185 | spin_lock(&inode->i_lock); |
| 186 | if (!(inode->i_state & I_PINNING_FSCACHE_WB)) { |
| 187 | inode->i_state |= I_PINNING_FSCACHE_WB; |
| 188 | need_use = true; |
| 189 | } |
| 190 | spin_unlock(&inode->i_lock); |
| 191 | |
| 192 | if (need_use) |
| 193 | fscache_use_cookie(cookie, true); |
| 194 | } |
| 195 | return 1; |
| 196 | } |
| 197 | EXPORT_SYMBOL(fscache_set_page_dirty); |
| 198 | |
David Howells | b6e1665 | 2021-10-20 14:06:34 +0100 | [diff] [blame] | 199 | struct fscache_write_request { |
| 200 | struct netfs_cache_resources cache_resources; |
| 201 | struct address_space *mapping; |
| 202 | loff_t start; |
| 203 | size_t len; |
| 204 | bool set_bits; |
| 205 | netfs_io_terminated_t term_func; |
| 206 | void *term_func_priv; |
| 207 | }; |
| 208 | |
| 209 | void __fscache_clear_page_bits(struct address_space *mapping, |
| 210 | loff_t start, size_t len) |
| 211 | { |
| 212 | pgoff_t first = start / PAGE_SIZE; |
| 213 | pgoff_t last = (start + len - 1) / PAGE_SIZE; |
| 214 | struct page *page; |
| 215 | |
| 216 | if (len) { |
| 217 | XA_STATE(xas, &mapping->i_pages, first); |
| 218 | |
| 219 | rcu_read_lock(); |
| 220 | xas_for_each(&xas, page, last) { |
| 221 | end_page_fscache(page); |
| 222 | } |
| 223 | rcu_read_unlock(); |
| 224 | } |
| 225 | } |
| 226 | EXPORT_SYMBOL(__fscache_clear_page_bits); |
| 227 | |
| 228 | /* |
| 229 | * Deal with the completion of writing the data to the cache. |
| 230 | */ |
| 231 | static void fscache_wreq_done(void *priv, ssize_t transferred_or_error, |
| 232 | bool was_async) |
| 233 | { |
| 234 | struct fscache_write_request *wreq = priv; |
| 235 | |
| 236 | fscache_clear_page_bits(fscache_cres_cookie(&wreq->cache_resources), |
| 237 | wreq->mapping, wreq->start, wreq->len, |
| 238 | wreq->set_bits); |
| 239 | |
| 240 | if (wreq->term_func) |
| 241 | wreq->term_func(wreq->term_func_priv, transferred_or_error, |
| 242 | was_async); |
| 243 | fscache_end_operation(&wreq->cache_resources); |
| 244 | kfree(wreq); |
| 245 | } |
| 246 | |
| 247 | void __fscache_write_to_cache(struct fscache_cookie *cookie, |
| 248 | struct address_space *mapping, |
| 249 | loff_t start, size_t len, loff_t i_size, |
| 250 | netfs_io_terminated_t term_func, |
| 251 | void *term_func_priv, |
| 252 | bool cond) |
| 253 | { |
| 254 | struct fscache_write_request *wreq; |
| 255 | struct netfs_cache_resources *cres; |
| 256 | struct iov_iter iter; |
| 257 | int ret = -ENOBUFS; |
| 258 | |
| 259 | if (len == 0) |
| 260 | goto abandon; |
| 261 | |
| 262 | _enter("%llx,%zx", start, len); |
| 263 | |
| 264 | wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS); |
| 265 | if (!wreq) |
| 266 | goto abandon; |
| 267 | wreq->mapping = mapping; |
| 268 | wreq->start = start; |
| 269 | wreq->len = len; |
| 270 | wreq->set_bits = cond; |
| 271 | wreq->term_func = term_func; |
| 272 | wreq->term_func_priv = term_func_priv; |
| 273 | |
| 274 | cres = &wreq->cache_resources; |
| 275 | if (fscache_begin_operation(cres, cookie, FSCACHE_WANT_WRITE, |
| 276 | fscache_access_io_write) < 0) |
| 277 | goto abandon_free; |
| 278 | |
| 279 | ret = cres->ops->prepare_write(cres, &start, &len, i_size, false); |
| 280 | if (ret < 0) |
| 281 | goto abandon_end; |
| 282 | |
| 283 | /* TODO: Consider clearing page bits now for space the write isn't |
| 284 | * covering. This is more complicated than it appears when THPs are |
| 285 | * taken into account. |
| 286 | */ |
| 287 | |
| 288 | iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len); |
| 289 | fscache_write(cres, start, &iter, fscache_wreq_done, wreq); |
| 290 | return; |
| 291 | |
| 292 | abandon_end: |
| 293 | return fscache_wreq_done(wreq, ret, false); |
| 294 | abandon_free: |
| 295 | kfree(wreq); |
| 296 | abandon: |
| 297 | fscache_clear_page_bits(cookie, mapping, start, len, cond); |
| 298 | if (term_func) |
| 299 | term_func(term_func_priv, ret, false); |
| 300 | } |
| 301 | EXPORT_SYMBOL(__fscache_write_to_cache); |
David Howells | 16a96bd | 2021-10-20 14:06:34 +0100 | [diff] [blame] | 302 | |
| 303 | /* |
| 304 | * Change the size of a backing object. |
| 305 | */ |
| 306 | void __fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size) |
| 307 | { |
| 308 | struct netfs_cache_resources cres; |
| 309 | |
| 310 | trace_fscache_resize(cookie, new_size); |
| 311 | if (fscache_begin_operation(&cres, cookie, FSCACHE_WANT_WRITE, |
| 312 | fscache_access_io_resize) == 0) { |
| 313 | fscache_stat(&fscache_n_resizes); |
| 314 | set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags); |
| 315 | |
| 316 | /* We cannot defer a resize as we need to do it inside the |
| 317 | * netfs's inode lock so that we're serialised with respect to |
| 318 | * writes. |
| 319 | */ |
| 320 | cookie->volume->cache->ops->resize_cookie(&cres, new_size); |
| 321 | fscache_end_operation(&cres); |
| 322 | } else { |
| 323 | fscache_stat(&fscache_n_resizes_null); |
| 324 | } |
| 325 | } |
| 326 | EXPORT_SYMBOL(__fscache_resize_cookie); |