David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 1 | /* Cache page management and data I/O routines |
| 2 | * |
| 3 | * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #define FSCACHE_DEBUG_LEVEL PAGE |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/fscache-cache.h> |
| 15 | #include <linux/buffer_head.h> |
| 16 | #include <linux/pagevec.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 17 | #include <linux/slab.h> |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 18 | #include "internal.h" |
| 19 | |
| 20 | /* |
| 21 | * check to see if a page is being written to the cache |
| 22 | */ |
| 23 | bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page) |
| 24 | { |
| 25 | void *val; |
| 26 | |
| 27 | rcu_read_lock(); |
| 28 | val = radix_tree_lookup(&cookie->stores, page->index); |
| 29 | rcu_read_unlock(); |
| 30 | |
| 31 | return val != NULL; |
| 32 | } |
| 33 | EXPORT_SYMBOL(__fscache_check_page_write); |
| 34 | |
| 35 | /* |
| 36 | * wait for a page to finish being written to the cache |
| 37 | */ |
| 38 | void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page) |
| 39 | { |
| 40 | wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); |
| 41 | |
| 42 | wait_event(*wq, !__fscache_check_page_write(cookie, page)); |
| 43 | } |
| 44 | EXPORT_SYMBOL(__fscache_wait_on_page_write); |
| 45 | |
| 46 | /* |
Milosz Tanski | 9776de9 | 2014-08-13 12:58:16 -0400 | [diff] [blame] | 47 | * wait for a page to finish being written to the cache. Put a timeout here |
| 48 | * since we might be called recursively via parent fs. |
| 49 | */ |
| 50 | static |
| 51 | bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page) |
| 52 | { |
| 53 | wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); |
| 54 | |
| 55 | return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page), |
| 56 | HZ); |
| 57 | } |
| 58 | |
| 59 | /* |
David Howells | 201a154 | 2009-11-19 18:11:35 +0000 | [diff] [blame] | 60 | * decide whether a page can be released, possibly by cancelling a store to it |
| 61 | * - we're allowed to sleep if __GFP_WAIT is flagged |
| 62 | */ |
| 63 | bool __fscache_maybe_release_page(struct fscache_cookie *cookie, |
| 64 | struct page *page, |
| 65 | gfp_t gfp) |
| 66 | { |
| 67 | struct page *xpage; |
| 68 | void *val; |
| 69 | |
| 70 | _enter("%p,%p,%x", cookie, page, gfp); |
| 71 | |
David Howells | 8c209ce | 2012-12-05 13:34:49 +0000 | [diff] [blame] | 72 | try_again: |
David Howells | 201a154 | 2009-11-19 18:11:35 +0000 | [diff] [blame] | 73 | rcu_read_lock(); |
| 74 | val = radix_tree_lookup(&cookie->stores, page->index); |
| 75 | if (!val) { |
| 76 | rcu_read_unlock(); |
| 77 | fscache_stat(&fscache_n_store_vmscan_not_storing); |
| 78 | __fscache_uncache_page(cookie, page); |
| 79 | return true; |
| 80 | } |
| 81 | |
| 82 | /* see if the page is actually undergoing storage - if so we can't get |
| 83 | * rid of it till the cache has finished with it */ |
| 84 | if (radix_tree_tag_get(&cookie->stores, page->index, |
| 85 | FSCACHE_COOKIE_STORING_TAG)) { |
| 86 | rcu_read_unlock(); |
| 87 | goto page_busy; |
| 88 | } |
| 89 | |
| 90 | /* the page is pending storage, so we attempt to cancel the store and |
| 91 | * discard the store request so that the page can be reclaimed */ |
| 92 | spin_lock(&cookie->stores_lock); |
| 93 | rcu_read_unlock(); |
| 94 | |
| 95 | if (radix_tree_tag_get(&cookie->stores, page->index, |
| 96 | FSCACHE_COOKIE_STORING_TAG)) { |
| 97 | /* the page started to undergo storage whilst we were looking, |
| 98 | * so now we can only wait or return */ |
| 99 | spin_unlock(&cookie->stores_lock); |
| 100 | goto page_busy; |
| 101 | } |
| 102 | |
| 103 | xpage = radix_tree_delete(&cookie->stores, page->index); |
| 104 | spin_unlock(&cookie->stores_lock); |
| 105 | |
| 106 | if (xpage) { |
| 107 | fscache_stat(&fscache_n_store_vmscan_cancelled); |
| 108 | fscache_stat(&fscache_n_store_radix_deletes); |
| 109 | ASSERTCMP(xpage, ==, page); |
| 110 | } else { |
| 111 | fscache_stat(&fscache_n_store_vmscan_gone); |
| 112 | } |
| 113 | |
| 114 | wake_up_bit(&cookie->flags, 0); |
| 115 | if (xpage) |
| 116 | page_cache_release(xpage); |
| 117 | __fscache_uncache_page(cookie, page); |
| 118 | return true; |
| 119 | |
| 120 | page_busy: |
David Howells | 8c209ce | 2012-12-05 13:34:49 +0000 | [diff] [blame] | 121 | /* We will wait here if we're allowed to, but that could deadlock the |
| 122 | * allocator as the work threads writing to the cache may all end up |
| 123 | * sleeping on memory allocation, so we may need to impose a timeout |
| 124 | * too. */ |
David Howells | 0c59a95 | 2013-05-10 19:50:25 +0100 | [diff] [blame] | 125 | if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) { |
David Howells | 8c209ce | 2012-12-05 13:34:49 +0000 | [diff] [blame] | 126 | fscache_stat(&fscache_n_store_vmscan_busy); |
| 127 | return false; |
| 128 | } |
| 129 | |
| 130 | fscache_stat(&fscache_n_store_vmscan_wait); |
Milosz Tanski | 9776de9 | 2014-08-13 12:58:16 -0400 | [diff] [blame] | 131 | if (!release_page_wait_timeout(cookie, page)) |
| 132 | _debug("fscache writeout timeout page: %p{%lx}", |
| 133 | page, page->index); |
| 134 | |
David Howells | 8c209ce | 2012-12-05 13:34:49 +0000 | [diff] [blame] | 135 | gfp &= ~__GFP_WAIT; |
| 136 | goto try_again; |
David Howells | 201a154 | 2009-11-19 18:11:35 +0000 | [diff] [blame] | 137 | } |
| 138 | EXPORT_SYMBOL(__fscache_maybe_release_page); |
| 139 | |
| 140 | /* |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 141 | * note that a page has finished being written to the cache |
| 142 | */ |
David Howells | 1bccf51 | 2009-11-19 18:11:25 +0000 | [diff] [blame] | 143 | static void fscache_end_page_write(struct fscache_object *object, |
| 144 | struct page *page) |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 145 | { |
David Howells | 1bccf51 | 2009-11-19 18:11:25 +0000 | [diff] [blame] | 146 | struct fscache_cookie *cookie; |
| 147 | struct page *xpage = NULL; |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 148 | |
David Howells | 1bccf51 | 2009-11-19 18:11:25 +0000 | [diff] [blame] | 149 | spin_lock(&object->lock); |
| 150 | cookie = object->cookie; |
| 151 | if (cookie) { |
| 152 | /* delete the page from the tree if it is now no longer |
| 153 | * pending */ |
| 154 | spin_lock(&cookie->stores_lock); |
David Howells | 201a154 | 2009-11-19 18:11:35 +0000 | [diff] [blame] | 155 | radix_tree_tag_clear(&cookie->stores, page->index, |
| 156 | FSCACHE_COOKIE_STORING_TAG); |
David Howells | 285e728 | 2009-11-19 18:11:29 +0000 | [diff] [blame] | 157 | if (!radix_tree_tag_get(&cookie->stores, page->index, |
| 158 | FSCACHE_COOKIE_PENDING_TAG)) { |
| 159 | fscache_stat(&fscache_n_store_radix_deletes); |
| 160 | xpage = radix_tree_delete(&cookie->stores, page->index); |
| 161 | } |
David Howells | 1bccf51 | 2009-11-19 18:11:25 +0000 | [diff] [blame] | 162 | spin_unlock(&cookie->stores_lock); |
| 163 | wake_up_bit(&cookie->flags, 0); |
| 164 | } |
| 165 | spin_unlock(&object->lock); |
| 166 | if (xpage) |
| 167 | page_cache_release(xpage); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 168 | } |
| 169 | |
| 170 | /* |
| 171 | * actually apply the changed attributes to a cache object |
| 172 | */ |
| 173 | static void fscache_attr_changed_op(struct fscache_operation *op) |
| 174 | { |
| 175 | struct fscache_object *object = op->object; |
David Howells | 440f0af | 2009-11-19 18:11:01 +0000 | [diff] [blame] | 176 | int ret; |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 177 | |
| 178 | _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); |
| 179 | |
| 180 | fscache_stat(&fscache_n_attr_changed_calls); |
| 181 | |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 182 | if (fscache_object_is_active(object)) { |
David Howells | 52bd75f | 2009-11-19 18:11:08 +0000 | [diff] [blame] | 183 | fscache_stat(&fscache_n_cop_attr_changed); |
David Howells | 440f0af | 2009-11-19 18:11:01 +0000 | [diff] [blame] | 184 | ret = object->cache->ops->attr_changed(object); |
David Howells | 52bd75f | 2009-11-19 18:11:08 +0000 | [diff] [blame] | 185 | fscache_stat_d(&fscache_n_cop_attr_changed); |
David Howells | 440f0af | 2009-11-19 18:11:01 +0000 | [diff] [blame] | 186 | if (ret < 0) |
| 187 | fscache_abort_object(object); |
| 188 | } |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 189 | |
David Howells | 1f372df | 2012-12-13 20:03:13 +0000 | [diff] [blame] | 190 | fscache_op_complete(op, true); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 191 | _leave(""); |
| 192 | } |
| 193 | |
| 194 | /* |
| 195 | * notification that the attributes on an object have changed |
| 196 | */ |
| 197 | int __fscache_attr_changed(struct fscache_cookie *cookie) |
| 198 | { |
| 199 | struct fscache_operation *op; |
| 200 | struct fscache_object *object; |
Milosz Tanski | 3e1199d | 2014-08-13 12:58:26 -0400 | [diff] [blame^] | 201 | bool wake_cookie = false; |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 202 | |
| 203 | _enter("%p", cookie); |
| 204 | |
| 205 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); |
| 206 | |
| 207 | fscache_stat(&fscache_n_attr_changed); |
| 208 | |
| 209 | op = kzalloc(sizeof(*op), GFP_KERNEL); |
| 210 | if (!op) { |
| 211 | fscache_stat(&fscache_n_attr_changed_nomem); |
| 212 | _leave(" = -ENOMEM"); |
| 213 | return -ENOMEM; |
| 214 | } |
| 215 | |
Tejun Heo | 8af7c12 | 2010-07-20 22:09:01 +0200 | [diff] [blame] | 216 | fscache_operation_init(op, fscache_attr_changed_op, NULL); |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 217 | op->flags = FSCACHE_OP_ASYNC | |
| 218 | (1 << FSCACHE_OP_EXCLUSIVE) | |
| 219 | (1 << FSCACHE_OP_UNUSE_COOKIE); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 220 | |
| 221 | spin_lock(&cookie->lock); |
| 222 | |
David Howells | 94d30ae | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 223 | if (!fscache_cookie_enabled(cookie) || |
| 224 | hlist_empty(&cookie->backing_objects)) |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 225 | goto nobufs; |
| 226 | object = hlist_entry(cookie->backing_objects.first, |
| 227 | struct fscache_object, cookie_link); |
| 228 | |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 229 | __fscache_use_cookie(cookie); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 230 | if (fscache_submit_exclusive_op(object, op) < 0) |
Milosz Tanski | 3e1199d | 2014-08-13 12:58:26 -0400 | [diff] [blame^] | 231 | goto nobufs_dec; |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 232 | spin_unlock(&cookie->lock); |
| 233 | fscache_stat(&fscache_n_attr_changed_ok); |
| 234 | fscache_put_operation(op); |
| 235 | _leave(" = 0"); |
| 236 | return 0; |
| 237 | |
Milosz Tanski | 3e1199d | 2014-08-13 12:58:26 -0400 | [diff] [blame^] | 238 | nobufs_dec: |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 239 | wake_cookie = __fscache_unuse_cookie(cookie); |
Milosz Tanski | 3e1199d | 2014-08-13 12:58:26 -0400 | [diff] [blame^] | 240 | nobufs: |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 241 | spin_unlock(&cookie->lock); |
| 242 | kfree(op); |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 243 | if (wake_cookie) |
| 244 | __fscache_wake_unused_cookie(cookie); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 245 | fscache_stat(&fscache_n_attr_changed_nobufs); |
| 246 | _leave(" = %d", -ENOBUFS); |
| 247 | return -ENOBUFS; |
| 248 | } |
| 249 | EXPORT_SYMBOL(__fscache_attr_changed); |
| 250 | |
| 251 | /* |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 252 | * release a retrieval op reference |
| 253 | */ |
| 254 | static void fscache_release_retrieval_op(struct fscache_operation *_op) |
| 255 | { |
| 256 | struct fscache_retrieval *op = |
| 257 | container_of(_op, struct fscache_retrieval, op); |
| 258 | |
| 259 | _enter("{OP%x}", op->op.debug_id); |
| 260 | |
David Howells | 1bb4b7f9 | 2013-05-21 13:44:15 +0100 | [diff] [blame] | 261 | ASSERTCMP(atomic_read(&op->n_pages), ==, 0); |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 262 | |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 263 | fscache_hist(fscache_retrieval_histogram, op->start_time); |
| 264 | if (op->context) |
| 265 | fscache_put_context(op->op.object->cookie, op->context); |
| 266 | |
| 267 | _leave(""); |
| 268 | } |
| 269 | |
| 270 | /* |
| 271 | * allocate a retrieval op |
| 272 | */ |
| 273 | static struct fscache_retrieval *fscache_alloc_retrieval( |
David Howells | 1362729 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 274 | struct fscache_cookie *cookie, |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 275 | struct address_space *mapping, |
| 276 | fscache_rw_complete_t end_io_func, |
| 277 | void *context) |
| 278 | { |
| 279 | struct fscache_retrieval *op; |
| 280 | |
| 281 | /* allocate a retrieval operation and attempt to submit it */ |
| 282 | op = kzalloc(sizeof(*op), GFP_NOIO); |
| 283 | if (!op) { |
| 284 | fscache_stat(&fscache_n_retrievals_nomem); |
| 285 | return NULL; |
| 286 | } |
| 287 | |
Tejun Heo | 8af7c12 | 2010-07-20 22:09:01 +0200 | [diff] [blame] | 288 | fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op); |
David Howells | 1362729 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 289 | op->op.flags = FSCACHE_OP_MYTHREAD | |
| 290 | (1UL << FSCACHE_OP_WAITING) | |
| 291 | (1UL << FSCACHE_OP_UNUSE_COOKIE); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 292 | op->mapping = mapping; |
| 293 | op->end_io_func = end_io_func; |
| 294 | op->context = context; |
| 295 | op->start_time = jiffies; |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 296 | INIT_LIST_HEAD(&op->to_do); |
| 297 | return op; |
| 298 | } |
| 299 | |
| 300 | /* |
| 301 | * wait for a deferred lookup to complete |
| 302 | */ |
David Howells | da9803b | 2013-08-21 17:29:38 -0400 | [diff] [blame] | 303 | int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 304 | { |
| 305 | unsigned long jif; |
| 306 | |
| 307 | _enter(""); |
| 308 | |
| 309 | if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) { |
| 310 | _leave(" = 0 [imm]"); |
| 311 | return 0; |
| 312 | } |
| 313 | |
| 314 | fscache_stat(&fscache_n_retrievals_wait); |
| 315 | |
| 316 | jif = jiffies; |
| 317 | if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 318 | TASK_INTERRUPTIBLE) != 0) { |
| 319 | fscache_stat(&fscache_n_retrievals_intr); |
| 320 | _leave(" = -ERESTARTSYS"); |
| 321 | return -ERESTARTSYS; |
| 322 | } |
| 323 | |
| 324 | ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)); |
| 325 | |
| 326 | smp_rmb(); |
| 327 | fscache_hist(fscache_retrieval_delay_histogram, jif); |
| 328 | _leave(" = 0 [dly]"); |
| 329 | return 0; |
| 330 | } |
| 331 | |
| 332 | /* |
David Howells | 91c7fbb | 2012-12-14 11:02:22 +0000 | [diff] [blame] | 333 | * Handle cancellation of a pending retrieval op |
| 334 | */ |
| 335 | static void fscache_do_cancel_retrieval(struct fscache_operation *_op) |
| 336 | { |
| 337 | struct fscache_retrieval *op = |
| 338 | container_of(_op, struct fscache_retrieval, op); |
| 339 | |
David Howells | 1bb4b7f9 | 2013-05-21 13:44:15 +0100 | [diff] [blame] | 340 | atomic_set(&op->n_pages, 0); |
David Howells | 91c7fbb | 2012-12-14 11:02:22 +0000 | [diff] [blame] | 341 | } |
| 342 | |
| 343 | /* |
David Howells | 60d543c | 2009-11-19 18:11:45 +0000 | [diff] [blame] | 344 | * wait for an object to become active (or dead) |
| 345 | */ |
David Howells | da9803b | 2013-08-21 17:29:38 -0400 | [diff] [blame] | 346 | int fscache_wait_for_operation_activation(struct fscache_object *object, |
| 347 | struct fscache_operation *op, |
| 348 | atomic_t *stat_op_waits, |
| 349 | atomic_t *stat_object_dead, |
| 350 | void (*do_cancel)(struct fscache_operation *)) |
David Howells | 60d543c | 2009-11-19 18:11:45 +0000 | [diff] [blame] | 351 | { |
| 352 | int ret; |
| 353 | |
David Howells | da9803b | 2013-08-21 17:29:38 -0400 | [diff] [blame] | 354 | if (!test_bit(FSCACHE_OP_WAITING, &op->flags)) |
David Howells | 60d543c | 2009-11-19 18:11:45 +0000 | [diff] [blame] | 355 | goto check_if_dead; |
| 356 | |
| 357 | _debug(">>> WT"); |
David Howells | da9803b | 2013-08-21 17:29:38 -0400 | [diff] [blame] | 358 | if (stat_op_waits) |
| 359 | fscache_stat(stat_op_waits); |
| 360 | if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING, |
David Howells | 9c04caa | 2012-12-07 18:08:02 +0000 | [diff] [blame] | 361 | TASK_INTERRUPTIBLE) != 0) { |
David Howells | da9803b | 2013-08-21 17:29:38 -0400 | [diff] [blame] | 362 | ret = fscache_cancel_op(op, do_cancel); |
David Howells | 60d543c | 2009-11-19 18:11:45 +0000 | [diff] [blame] | 363 | if (ret == 0) |
| 364 | return -ERESTARTSYS; |
| 365 | |
| 366 | /* it's been removed from the pending queue by another party, |
| 367 | * so we should get to run shortly */ |
David Howells | da9803b | 2013-08-21 17:29:38 -0400 | [diff] [blame] | 368 | wait_on_bit(&op->flags, FSCACHE_OP_WAITING, |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 369 | TASK_UNINTERRUPTIBLE); |
David Howells | 60d543c | 2009-11-19 18:11:45 +0000 | [diff] [blame] | 370 | } |
| 371 | _debug("<<< GO"); |
| 372 | |
| 373 | check_if_dead: |
David Howells | da9803b | 2013-08-21 17:29:38 -0400 | [diff] [blame] | 374 | if (op->state == FSCACHE_OP_ST_CANCELLED) { |
| 375 | if (stat_object_dead) |
| 376 | fscache_stat(stat_object_dead); |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 377 | _leave(" = -ENOBUFS [cancelled]"); |
| 378 | return -ENOBUFS; |
| 379 | } |
David Howells | 60d543c | 2009-11-19 18:11:45 +0000 | [diff] [blame] | 380 | if (unlikely(fscache_object_is_dead(object))) { |
David Howells | da9803b | 2013-08-21 17:29:38 -0400 | [diff] [blame] | 381 | pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state); |
| 382 | fscache_cancel_op(op, do_cancel); |
| 383 | if (stat_object_dead) |
| 384 | fscache_stat(stat_object_dead); |
David Howells | 60d543c | 2009-11-19 18:11:45 +0000 | [diff] [blame] | 385 | return -ENOBUFS; |
| 386 | } |
| 387 | return 0; |
| 388 | } |
| 389 | |
| 390 | /* |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 391 | * read a page from the cache or allocate a block in which to store it |
| 392 | * - we return: |
| 393 | * -ENOMEM - out of memory, nothing done |
| 394 | * -ERESTARTSYS - interrupted |
| 395 | * -ENOBUFS - no backing object available in which to cache the block |
| 396 | * -ENODATA - no data available in the backing object for this block |
| 397 | * 0 - dispatched a read - it'll call end_io_func() when finished |
| 398 | */ |
| 399 | int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, |
| 400 | struct page *page, |
| 401 | fscache_rw_complete_t end_io_func, |
| 402 | void *context, |
| 403 | gfp_t gfp) |
| 404 | { |
| 405 | struct fscache_retrieval *op; |
| 406 | struct fscache_object *object; |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 407 | bool wake_cookie = false; |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 408 | int ret; |
| 409 | |
| 410 | _enter("%p,%p,,,", cookie, page); |
| 411 | |
| 412 | fscache_stat(&fscache_n_retrievals); |
| 413 | |
| 414 | if (hlist_empty(&cookie->backing_objects)) |
| 415 | goto nobufs; |
| 416 | |
David Howells | ef778e7 | 2012-12-20 21:52:36 +0000 | [diff] [blame] | 417 | if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { |
| 418 | _leave(" = -ENOBUFS [invalidating]"); |
| 419 | return -ENOBUFS; |
| 420 | } |
| 421 | |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 422 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); |
| 423 | ASSERTCMP(page, !=, NULL); |
| 424 | |
| 425 | if (fscache_wait_for_deferred_lookup(cookie) < 0) |
| 426 | return -ERESTARTSYS; |
| 427 | |
David Howells | 1362729 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 428 | op = fscache_alloc_retrieval(cookie, page->mapping, |
David Howells | 94d30ae | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 429 | end_io_func, context); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 430 | if (!op) { |
| 431 | _leave(" = -ENOMEM"); |
| 432 | return -ENOMEM; |
| 433 | } |
David Howells | 1bb4b7f9 | 2013-05-21 13:44:15 +0100 | [diff] [blame] | 434 | atomic_set(&op->n_pages, 1); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 435 | |
| 436 | spin_lock(&cookie->lock); |
| 437 | |
David Howells | 94d30ae | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 438 | if (!fscache_cookie_enabled(cookie) || |
| 439 | hlist_empty(&cookie->backing_objects)) |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 440 | goto nobufs_unlock; |
| 441 | object = hlist_entry(cookie->backing_objects.first, |
| 442 | struct fscache_object, cookie_link); |
| 443 | |
David Howells | caaef69 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 444 | ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 445 | |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 446 | __fscache_use_cookie(cookie); |
David Howells | 4fbf429 | 2009-11-19 18:11:04 +0000 | [diff] [blame] | 447 | atomic_inc(&object->n_reads); |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 448 | __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); |
David Howells | 4fbf429 | 2009-11-19 18:11:04 +0000 | [diff] [blame] | 449 | |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 450 | if (fscache_submit_op(object, &op->op) < 0) |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 451 | goto nobufs_unlock_dec; |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 452 | spin_unlock(&cookie->lock); |
| 453 | |
| 454 | fscache_stat(&fscache_n_retrieval_ops); |
| 455 | |
| 456 | /* pin the netfs read context in case we need to do the actual netfs |
| 457 | * read because we've encountered a cache read failure */ |
| 458 | fscache_get_context(object->cookie, op->context); |
| 459 | |
| 460 | /* we wait for the operation to become active, and then process it |
| 461 | * *here*, in this thread, and not in the thread pool */ |
David Howells | da9803b | 2013-08-21 17:29:38 -0400 | [diff] [blame] | 462 | ret = fscache_wait_for_operation_activation( |
| 463 | object, &op->op, |
David Howells | 60d543c | 2009-11-19 18:11:45 +0000 | [diff] [blame] | 464 | __fscache_stat(&fscache_n_retrieval_op_waits), |
David Howells | da9803b | 2013-08-21 17:29:38 -0400 | [diff] [blame] | 465 | __fscache_stat(&fscache_n_retrievals_object_dead), |
| 466 | fscache_do_cancel_retrieval); |
David Howells | 60d543c | 2009-11-19 18:11:45 +0000 | [diff] [blame] | 467 | if (ret < 0) |
| 468 | goto error; |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 469 | |
| 470 | /* ask the cache to honour the operation */ |
| 471 | if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { |
David Howells | 52bd75f | 2009-11-19 18:11:08 +0000 | [diff] [blame] | 472 | fscache_stat(&fscache_n_cop_allocate_page); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 473 | ret = object->cache->ops->allocate_page(op, page, gfp); |
David Howells | 52bd75f | 2009-11-19 18:11:08 +0000 | [diff] [blame] | 474 | fscache_stat_d(&fscache_n_cop_allocate_page); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 475 | if (ret == 0) |
| 476 | ret = -ENODATA; |
| 477 | } else { |
David Howells | 52bd75f | 2009-11-19 18:11:08 +0000 | [diff] [blame] | 478 | fscache_stat(&fscache_n_cop_read_or_alloc_page); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 479 | ret = object->cache->ops->read_or_alloc_page(op, page, gfp); |
David Howells | 52bd75f | 2009-11-19 18:11:08 +0000 | [diff] [blame] | 480 | fscache_stat_d(&fscache_n_cop_read_or_alloc_page); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 481 | } |
| 482 | |
David Howells | 5753c44 | 2009-11-19 18:11:19 +0000 | [diff] [blame] | 483 | error: |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 484 | if (ret == -ENOMEM) |
| 485 | fscache_stat(&fscache_n_retrievals_nomem); |
| 486 | else if (ret == -ERESTARTSYS) |
| 487 | fscache_stat(&fscache_n_retrievals_intr); |
| 488 | else if (ret == -ENODATA) |
| 489 | fscache_stat(&fscache_n_retrievals_nodata); |
| 490 | else if (ret < 0) |
| 491 | fscache_stat(&fscache_n_retrievals_nobufs); |
| 492 | else |
| 493 | fscache_stat(&fscache_n_retrievals_ok); |
| 494 | |
| 495 | fscache_put_retrieval(op); |
| 496 | _leave(" = %d", ret); |
| 497 | return ret; |
| 498 | |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 499 | nobufs_unlock_dec: |
| 500 | atomic_dec(&object->n_reads); |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 501 | wake_cookie = __fscache_unuse_cookie(cookie); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 502 | nobufs_unlock: |
| 503 | spin_unlock(&cookie->lock); |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 504 | if (wake_cookie) |
| 505 | __fscache_wake_unused_cookie(cookie); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 506 | kfree(op); |
| 507 | nobufs: |
| 508 | fscache_stat(&fscache_n_retrievals_nobufs); |
| 509 | _leave(" = -ENOBUFS"); |
| 510 | return -ENOBUFS; |
| 511 | } |
| 512 | EXPORT_SYMBOL(__fscache_read_or_alloc_page); |
| 513 | |
| 514 | /* |
| 515 | * read a list of page from the cache or allocate a block in which to store |
| 516 | * them |
| 517 | * - we return: |
| 518 | * -ENOMEM - out of memory, some pages may be being read |
| 519 | * -ERESTARTSYS - interrupted, some pages may be being read |
| 520 | * -ENOBUFS - no backing object or space available in which to cache any |
| 521 | * pages not being read |
| 522 | * -ENODATA - no data available in the backing object for some or all of |
| 523 | * the pages |
| 524 | * 0 - dispatched a read on all pages |
| 525 | * |
| 526 | * end_io_func() will be called for each page read from the cache as it is |
| 527 | * finishes being read |
| 528 | * |
| 529 | * any pages for which a read is dispatched will be removed from pages and |
| 530 | * nr_pages |
| 531 | */ |
| 532 | int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, |
| 533 | struct address_space *mapping, |
| 534 | struct list_head *pages, |
| 535 | unsigned *nr_pages, |
| 536 | fscache_rw_complete_t end_io_func, |
| 537 | void *context, |
| 538 | gfp_t gfp) |
| 539 | { |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 540 | struct fscache_retrieval *op; |
| 541 | struct fscache_object *object; |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 542 | bool wake_cookie = false; |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 543 | int ret; |
| 544 | |
| 545 | _enter("%p,,%d,,,", cookie, *nr_pages); |
| 546 | |
| 547 | fscache_stat(&fscache_n_retrievals); |
| 548 | |
| 549 | if (hlist_empty(&cookie->backing_objects)) |
| 550 | goto nobufs; |
| 551 | |
David Howells | ef778e7 | 2012-12-20 21:52:36 +0000 | [diff] [blame] | 552 | if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { |
| 553 | _leave(" = -ENOBUFS [invalidating]"); |
| 554 | return -ENOBUFS; |
| 555 | } |
| 556 | |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 557 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); |
| 558 | ASSERTCMP(*nr_pages, >, 0); |
| 559 | ASSERT(!list_empty(pages)); |
| 560 | |
| 561 | if (fscache_wait_for_deferred_lookup(cookie) < 0) |
| 562 | return -ERESTARTSYS; |
| 563 | |
David Howells | 1362729 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 564 | op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 565 | if (!op) |
| 566 | return -ENOMEM; |
David Howells | 1bb4b7f9 | 2013-05-21 13:44:15 +0100 | [diff] [blame] | 567 | atomic_set(&op->n_pages, *nr_pages); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 568 | |
| 569 | spin_lock(&cookie->lock); |
| 570 | |
David Howells | 94d30ae | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 571 | if (!fscache_cookie_enabled(cookie) || |
| 572 | hlist_empty(&cookie->backing_objects)) |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 573 | goto nobufs_unlock; |
| 574 | object = hlist_entry(cookie->backing_objects.first, |
| 575 | struct fscache_object, cookie_link); |
| 576 | |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 577 | __fscache_use_cookie(cookie); |
David Howells | 4fbf429 | 2009-11-19 18:11:04 +0000 | [diff] [blame] | 578 | atomic_inc(&object->n_reads); |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 579 | __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); |
David Howells | 4fbf429 | 2009-11-19 18:11:04 +0000 | [diff] [blame] | 580 | |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 581 | if (fscache_submit_op(object, &op->op) < 0) |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 582 | goto nobufs_unlock_dec; |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 583 | spin_unlock(&cookie->lock); |
| 584 | |
| 585 | fscache_stat(&fscache_n_retrieval_ops); |
| 586 | |
| 587 | /* pin the netfs read context in case we need to do the actual netfs |
| 588 | * read because we've encountered a cache read failure */ |
| 589 | fscache_get_context(object->cookie, op->context); |
| 590 | |
| 591 | /* we wait for the operation to become active, and then process it |
| 592 | * *here*, in this thread, and not in the thread pool */ |
David Howells | da9803b | 2013-08-21 17:29:38 -0400 | [diff] [blame] | 593 | ret = fscache_wait_for_operation_activation( |
| 594 | object, &op->op, |
David Howells | 60d543c | 2009-11-19 18:11:45 +0000 | [diff] [blame] | 595 | __fscache_stat(&fscache_n_retrieval_op_waits), |
David Howells | da9803b | 2013-08-21 17:29:38 -0400 | [diff] [blame] | 596 | __fscache_stat(&fscache_n_retrievals_object_dead), |
| 597 | fscache_do_cancel_retrieval); |
David Howells | 60d543c | 2009-11-19 18:11:45 +0000 | [diff] [blame] | 598 | if (ret < 0) |
| 599 | goto error; |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 600 | |
| 601 | /* ask the cache to honour the operation */ |
David Howells | 52bd75f | 2009-11-19 18:11:08 +0000 | [diff] [blame] | 602 | if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { |
| 603 | fscache_stat(&fscache_n_cop_allocate_pages); |
| 604 | ret = object->cache->ops->allocate_pages( |
| 605 | op, pages, nr_pages, gfp); |
| 606 | fscache_stat_d(&fscache_n_cop_allocate_pages); |
| 607 | } else { |
| 608 | fscache_stat(&fscache_n_cop_read_or_alloc_pages); |
| 609 | ret = object->cache->ops->read_or_alloc_pages( |
| 610 | op, pages, nr_pages, gfp); |
| 611 | fscache_stat_d(&fscache_n_cop_read_or_alloc_pages); |
| 612 | } |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 613 | |
David Howells | 5753c44 | 2009-11-19 18:11:19 +0000 | [diff] [blame] | 614 | error: |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 615 | if (ret == -ENOMEM) |
| 616 | fscache_stat(&fscache_n_retrievals_nomem); |
| 617 | else if (ret == -ERESTARTSYS) |
| 618 | fscache_stat(&fscache_n_retrievals_intr); |
| 619 | else if (ret == -ENODATA) |
| 620 | fscache_stat(&fscache_n_retrievals_nodata); |
| 621 | else if (ret < 0) |
| 622 | fscache_stat(&fscache_n_retrievals_nobufs); |
| 623 | else |
| 624 | fscache_stat(&fscache_n_retrievals_ok); |
| 625 | |
| 626 | fscache_put_retrieval(op); |
| 627 | _leave(" = %d", ret); |
| 628 | return ret; |
| 629 | |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 630 | nobufs_unlock_dec: |
| 631 | atomic_dec(&object->n_reads); |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 632 | wake_cookie = __fscache_unuse_cookie(cookie); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 633 | nobufs_unlock: |
| 634 | spin_unlock(&cookie->lock); |
| 635 | kfree(op); |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 636 | if (wake_cookie) |
| 637 | __fscache_wake_unused_cookie(cookie); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 638 | nobufs: |
| 639 | fscache_stat(&fscache_n_retrievals_nobufs); |
| 640 | _leave(" = -ENOBUFS"); |
| 641 | return -ENOBUFS; |
| 642 | } |
| 643 | EXPORT_SYMBOL(__fscache_read_or_alloc_pages); |
| 644 | |
| 645 | /* |
| 646 | * allocate a block in the cache on which to store a page |
| 647 | * - we return: |
| 648 | * -ENOMEM - out of memory, nothing done |
| 649 | * -ERESTARTSYS - interrupted |
| 650 | * -ENOBUFS - no backing object available in which to cache the block |
| 651 | * 0 - block allocated |
| 652 | */ |
| 653 | int __fscache_alloc_page(struct fscache_cookie *cookie, |
| 654 | struct page *page, |
| 655 | gfp_t gfp) |
| 656 | { |
| 657 | struct fscache_retrieval *op; |
| 658 | struct fscache_object *object; |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 659 | bool wake_cookie = false; |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 660 | int ret; |
| 661 | |
| 662 | _enter("%p,%p,,,", cookie, page); |
| 663 | |
| 664 | fscache_stat(&fscache_n_allocs); |
| 665 | |
| 666 | if (hlist_empty(&cookie->backing_objects)) |
| 667 | goto nobufs; |
| 668 | |
| 669 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); |
| 670 | ASSERTCMP(page, !=, NULL); |
| 671 | |
David Howells | ef778e7 | 2012-12-20 21:52:36 +0000 | [diff] [blame] | 672 | if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { |
| 673 | _leave(" = -ENOBUFS [invalidating]"); |
| 674 | return -ENOBUFS; |
| 675 | } |
| 676 | |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 677 | if (fscache_wait_for_deferred_lookup(cookie) < 0) |
| 678 | return -ERESTARTSYS; |
| 679 | |
David Howells | 1362729 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 680 | op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 681 | if (!op) |
| 682 | return -ENOMEM; |
David Howells | 1bb4b7f9 | 2013-05-21 13:44:15 +0100 | [diff] [blame] | 683 | atomic_set(&op->n_pages, 1); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 684 | |
| 685 | spin_lock(&cookie->lock); |
| 686 | |
David Howells | 94d30ae | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 687 | if (!fscache_cookie_enabled(cookie) || |
| 688 | hlist_empty(&cookie->backing_objects)) |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 689 | goto nobufs_unlock; |
| 690 | object = hlist_entry(cookie->backing_objects.first, |
| 691 | struct fscache_object, cookie_link); |
| 692 | |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 693 | __fscache_use_cookie(cookie); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 694 | if (fscache_submit_op(object, &op->op) < 0) |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 695 | goto nobufs_unlock_dec; |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 696 | spin_unlock(&cookie->lock); |
| 697 | |
| 698 | fscache_stat(&fscache_n_alloc_ops); |
| 699 | |
David Howells | da9803b | 2013-08-21 17:29:38 -0400 | [diff] [blame] | 700 | ret = fscache_wait_for_operation_activation( |
| 701 | object, &op->op, |
David Howells | 60d543c | 2009-11-19 18:11:45 +0000 | [diff] [blame] | 702 | __fscache_stat(&fscache_n_alloc_op_waits), |
David Howells | da9803b | 2013-08-21 17:29:38 -0400 | [diff] [blame] | 703 | __fscache_stat(&fscache_n_allocs_object_dead), |
| 704 | fscache_do_cancel_retrieval); |
David Howells | 60d543c | 2009-11-19 18:11:45 +0000 | [diff] [blame] | 705 | if (ret < 0) |
| 706 | goto error; |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 707 | |
| 708 | /* ask the cache to honour the operation */ |
David Howells | 52bd75f | 2009-11-19 18:11:08 +0000 | [diff] [blame] | 709 | fscache_stat(&fscache_n_cop_allocate_page); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 710 | ret = object->cache->ops->allocate_page(op, page, gfp); |
David Howells | 52bd75f | 2009-11-19 18:11:08 +0000 | [diff] [blame] | 711 | fscache_stat_d(&fscache_n_cop_allocate_page); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 712 | |
David Howells | 5753c44 | 2009-11-19 18:11:19 +0000 | [diff] [blame] | 713 | error: |
| 714 | if (ret == -ERESTARTSYS) |
| 715 | fscache_stat(&fscache_n_allocs_intr); |
| 716 | else if (ret < 0) |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 717 | fscache_stat(&fscache_n_allocs_nobufs); |
| 718 | else |
| 719 | fscache_stat(&fscache_n_allocs_ok); |
| 720 | |
| 721 | fscache_put_retrieval(op); |
| 722 | _leave(" = %d", ret); |
| 723 | return ret; |
| 724 | |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 725 | nobufs_unlock_dec: |
| 726 | wake_cookie = __fscache_unuse_cookie(cookie); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 727 | nobufs_unlock: |
| 728 | spin_unlock(&cookie->lock); |
| 729 | kfree(op); |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 730 | if (wake_cookie) |
| 731 | __fscache_wake_unused_cookie(cookie); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 732 | nobufs: |
| 733 | fscache_stat(&fscache_n_allocs_nobufs); |
| 734 | _leave(" = -ENOBUFS"); |
| 735 | return -ENOBUFS; |
| 736 | } |
| 737 | EXPORT_SYMBOL(__fscache_alloc_page); |
| 738 | |
| 739 | /* |
Milosz Tanski | 5a6f282 | 2013-08-21 17:30:11 -0400 | [diff] [blame] | 740 | * Unmark pages allocate in the readahead code path (via: |
| 741 | * fscache_readpages_or_alloc) after delegating to the base filesystem |
| 742 | */ |
| 743 | void __fscache_readpages_cancel(struct fscache_cookie *cookie, |
| 744 | struct list_head *pages) |
| 745 | { |
| 746 | struct page *page; |
| 747 | |
| 748 | list_for_each_entry(page, pages, lru) { |
| 749 | if (PageFsCache(page)) |
| 750 | __fscache_uncache_page(cookie, page); |
| 751 | } |
| 752 | } |
| 753 | EXPORT_SYMBOL(__fscache_readpages_cancel); |
| 754 | |
| 755 | /* |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 756 | * release a write op reference |
| 757 | */ |
| 758 | static void fscache_release_write_op(struct fscache_operation *_op) |
| 759 | { |
| 760 | _enter("{OP%x}", _op->debug_id); |
| 761 | } |
| 762 | |
| 763 | /* |
| 764 | * perform the background storage of a page into the cache |
| 765 | */ |
| 766 | static void fscache_write_op(struct fscache_operation *_op) |
| 767 | { |
| 768 | struct fscache_storage *op = |
| 769 | container_of(_op, struct fscache_storage, op); |
| 770 | struct fscache_object *object = op->op.object; |
David Howells | 1bccf51 | 2009-11-19 18:11:25 +0000 | [diff] [blame] | 771 | struct fscache_cookie *cookie; |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 772 | struct page *page; |
| 773 | unsigned n; |
| 774 | void *results[1]; |
| 775 | int ret; |
| 776 | |
| 777 | _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); |
| 778 | |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 779 | spin_lock(&object->lock); |
David Howells | 1bccf51 | 2009-11-19 18:11:25 +0000 | [diff] [blame] | 780 | cookie = object->cookie; |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 781 | |
David Howells | 7ef001e | 2012-12-07 10:41:26 +0000 | [diff] [blame] | 782 | if (!fscache_object_is_active(object)) { |
| 783 | /* If we get here, then the on-disk cache object likely longer |
| 784 | * exists, so we should just cancel this write operation. |
| 785 | */ |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 786 | spin_unlock(&object->lock); |
David Howells | 1f372df | 2012-12-13 20:03:13 +0000 | [diff] [blame] | 787 | fscache_op_complete(&op->op, false); |
David Howells | 7ef001e | 2012-12-07 10:41:26 +0000 | [diff] [blame] | 788 | _leave(" [inactive]"); |
| 789 | return; |
| 790 | } |
| 791 | |
| 792 | if (!cookie) { |
| 793 | /* If we get here, then the cookie belonging to the object was |
| 794 | * detached, probably by the cookie being withdrawn due to |
| 795 | * memory pressure, which means that the pages we might write |
| 796 | * to the cache from no longer exist - therefore, we can just |
| 797 | * cancel this write operation. |
| 798 | */ |
| 799 | spin_unlock(&object->lock); |
David Howells | 1f372df | 2012-12-13 20:03:13 +0000 | [diff] [blame] | 800 | fscache_op_complete(&op->op, false); |
David Howells | caaef69 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 801 | _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}", |
| 802 | _op->flags, _op->state, object->state->short_name, |
| 803 | object->flags); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 804 | return; |
| 805 | } |
| 806 | |
David Howells | 1bccf51 | 2009-11-19 18:11:25 +0000 | [diff] [blame] | 807 | spin_lock(&cookie->stores_lock); |
| 808 | |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 809 | fscache_stat(&fscache_n_store_calls); |
| 810 | |
| 811 | /* find a page to store */ |
| 812 | page = NULL; |
| 813 | n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1, |
| 814 | FSCACHE_COOKIE_PENDING_TAG); |
| 815 | if (n != 1) |
| 816 | goto superseded; |
| 817 | page = results[0]; |
| 818 | _debug("gang %d [%lx]", n, page->index); |
David Howells | 1bccf51 | 2009-11-19 18:11:25 +0000 | [diff] [blame] | 819 | if (page->index > op->store_limit) { |
| 820 | fscache_stat(&fscache_n_store_pages_over_limit); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 821 | goto superseded; |
David Howells | 1bccf51 | 2009-11-19 18:11:25 +0000 | [diff] [blame] | 822 | } |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 823 | |
Dan Carpenter | 08a6685 | 2010-06-01 20:58:22 +0100 | [diff] [blame] | 824 | radix_tree_tag_set(&cookie->stores, page->index, |
| 825 | FSCACHE_COOKIE_STORING_TAG); |
| 826 | radix_tree_tag_clear(&cookie->stores, page->index, |
| 827 | FSCACHE_COOKIE_PENDING_TAG); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 828 | |
David Howells | 1bccf51 | 2009-11-19 18:11:25 +0000 | [diff] [blame] | 829 | spin_unlock(&cookie->stores_lock); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 830 | spin_unlock(&object->lock); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 831 | |
Dan Carpenter | 08a6685 | 2010-06-01 20:58:22 +0100 | [diff] [blame] | 832 | fscache_stat(&fscache_n_store_pages); |
| 833 | fscache_stat(&fscache_n_cop_write_page); |
| 834 | ret = object->cache->ops->write_page(op, page); |
| 835 | fscache_stat_d(&fscache_n_cop_write_page); |
Dan Carpenter | 08a6685 | 2010-06-01 20:58:22 +0100 | [diff] [blame] | 836 | fscache_end_page_write(object, page); |
| 837 | if (ret < 0) { |
Dan Carpenter | 08a6685 | 2010-06-01 20:58:22 +0100 | [diff] [blame] | 838 | fscache_abort_object(object); |
David Howells | 1f372df | 2012-12-13 20:03:13 +0000 | [diff] [blame] | 839 | fscache_op_complete(&op->op, true); |
Dan Carpenter | 08a6685 | 2010-06-01 20:58:22 +0100 | [diff] [blame] | 840 | } else { |
| 841 | fscache_enqueue_operation(&op->op); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 842 | } |
| 843 | |
| 844 | _leave(""); |
| 845 | return; |
| 846 | |
| 847 | superseded: |
| 848 | /* this writer is going away and there aren't any more things to |
| 849 | * write */ |
| 850 | _debug("cease"); |
David Howells | 1bccf51 | 2009-11-19 18:11:25 +0000 | [diff] [blame] | 851 | spin_unlock(&cookie->stores_lock); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 852 | clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); |
| 853 | spin_unlock(&object->lock); |
David Howells | 1f372df | 2012-12-13 20:03:13 +0000 | [diff] [blame] | 854 | fscache_op_complete(&op->op, true); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 855 | _leave(""); |
| 856 | } |
| 857 | |
| 858 | /* |
David Howells | ef778e7 | 2012-12-20 21:52:36 +0000 | [diff] [blame] | 859 | * Clear the pages pending writing for invalidation |
| 860 | */ |
| 861 | void fscache_invalidate_writes(struct fscache_cookie *cookie) |
| 862 | { |
| 863 | struct page *page; |
| 864 | void *results[16]; |
| 865 | int n, i; |
| 866 | |
| 867 | _enter(""); |
| 868 | |
Sebastian Andrzej Siewior | ee8be57 | 2013-05-10 19:50:24 +0100 | [diff] [blame] | 869 | for (;;) { |
| 870 | spin_lock(&cookie->stores_lock); |
| 871 | n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, |
| 872 | ARRAY_SIZE(results), |
| 873 | FSCACHE_COOKIE_PENDING_TAG); |
| 874 | if (n == 0) { |
| 875 | spin_unlock(&cookie->stores_lock); |
| 876 | break; |
| 877 | } |
| 878 | |
David Howells | ef778e7 | 2012-12-20 21:52:36 +0000 | [diff] [blame] | 879 | for (i = n - 1; i >= 0; i--) { |
| 880 | page = results[i]; |
| 881 | radix_tree_delete(&cookie->stores, page->index); |
| 882 | } |
| 883 | |
| 884 | spin_unlock(&cookie->stores_lock); |
| 885 | |
| 886 | for (i = n - 1; i >= 0; i--) |
| 887 | page_cache_release(results[i]); |
| 888 | } |
| 889 | |
David Howells | ef778e7 | 2012-12-20 21:52:36 +0000 | [diff] [blame] | 890 | _leave(""); |
| 891 | } |
| 892 | |
| 893 | /* |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 894 | * request a page be stored in the cache |
| 895 | * - returns: |
| 896 | * -ENOMEM - out of memory, nothing done |
| 897 | * -ENOBUFS - no backing object available in which to cache the page |
| 898 | * 0 - dispatched a write - it'll call end_io_func() when finished |
| 899 | * |
| 900 | * if the cookie still has a backing object at this point, that object can be |
| 901 | * in one of a few states with respect to storage processing: |
| 902 | * |
| 903 | * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is |
| 904 | * set) |
| 905 | * |
David Howells | caaef69 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 906 | * (a) no writes yet |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 907 | * |
| 908 | * (b) writes deferred till post-creation (mark page for writing and |
| 909 | * return immediately) |
| 910 | * |
| 911 | * (2) negative lookup, object created, initial fill being made from netfs |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 912 | * |
| 913 | * (a) fill point not yet reached this page (mark page for writing and |
| 914 | * return) |
| 915 | * |
| 916 | * (b) fill point passed this page (queue op to store this page) |
| 917 | * |
| 918 | * (3) object extant (queue op to store this page) |
| 919 | * |
| 920 | * any other state is invalid |
| 921 | */ |
| 922 | int __fscache_write_page(struct fscache_cookie *cookie, |
| 923 | struct page *page, |
| 924 | gfp_t gfp) |
| 925 | { |
| 926 | struct fscache_storage *op; |
| 927 | struct fscache_object *object; |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 928 | bool wake_cookie = false; |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 929 | int ret; |
| 930 | |
| 931 | _enter("%p,%x,", cookie, (u32) page->flags); |
| 932 | |
| 933 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); |
| 934 | ASSERT(PageFsCache(page)); |
| 935 | |
| 936 | fscache_stat(&fscache_n_stores); |
| 937 | |
David Howells | ef778e7 | 2012-12-20 21:52:36 +0000 | [diff] [blame] | 938 | if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { |
| 939 | _leave(" = -ENOBUFS [invalidating]"); |
| 940 | return -ENOBUFS; |
| 941 | } |
| 942 | |
David Howells | 5f4f9f4 | 2012-12-20 21:52:33 +0000 | [diff] [blame] | 943 | op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 944 | if (!op) |
| 945 | goto nomem; |
| 946 | |
Tejun Heo | 8af7c12 | 2010-07-20 22:09:01 +0200 | [diff] [blame] | 947 | fscache_operation_init(&op->op, fscache_write_op, |
| 948 | fscache_release_write_op); |
David Howells | 1362729 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 949 | op->op.flags = FSCACHE_OP_ASYNC | |
| 950 | (1 << FSCACHE_OP_WAITING) | |
| 951 | (1 << FSCACHE_OP_UNUSE_COOKIE); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 952 | |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 953 | ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 954 | if (ret < 0) |
| 955 | goto nomem_free; |
| 956 | |
| 957 | ret = -ENOBUFS; |
| 958 | spin_lock(&cookie->lock); |
| 959 | |
David Howells | 94d30ae | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 960 | if (!fscache_cookie_enabled(cookie) || |
| 961 | hlist_empty(&cookie->backing_objects)) |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 962 | goto nobufs; |
| 963 | object = hlist_entry(cookie->backing_objects.first, |
| 964 | struct fscache_object, cookie_link); |
| 965 | if (test_bit(FSCACHE_IOERROR, &object->cache->flags)) |
| 966 | goto nobufs; |
| 967 | |
| 968 | /* add the page to the pending-storage radix tree on the backing |
| 969 | * object */ |
| 970 | spin_lock(&object->lock); |
David Howells | 1bccf51 | 2009-11-19 18:11:25 +0000 | [diff] [blame] | 971 | spin_lock(&cookie->stores_lock); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 972 | |
| 973 | _debug("store limit %llx", (unsigned long long) object->store_limit); |
| 974 | |
| 975 | ret = radix_tree_insert(&cookie->stores, page->index, page); |
| 976 | if (ret < 0) { |
| 977 | if (ret == -EEXIST) |
| 978 | goto already_queued; |
| 979 | _debug("insert failed %d", ret); |
| 980 | goto nobufs_unlock_obj; |
| 981 | } |
| 982 | |
| 983 | radix_tree_tag_set(&cookie->stores, page->index, |
| 984 | FSCACHE_COOKIE_PENDING_TAG); |
| 985 | page_cache_get(page); |
| 986 | |
| 987 | /* we only want one writer at a time, but we do need to queue new |
| 988 | * writers after exclusive ops */ |
| 989 | if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags)) |
| 990 | goto already_pending; |
| 991 | |
David Howells | 1bccf51 | 2009-11-19 18:11:25 +0000 | [diff] [blame] | 992 | spin_unlock(&cookie->stores_lock); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 993 | spin_unlock(&object->lock); |
| 994 | |
| 995 | op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); |
| 996 | op->store_limit = object->store_limit; |
| 997 | |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 998 | __fscache_use_cookie(cookie); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 999 | if (fscache_submit_op(object, &op->op) < 0) |
| 1000 | goto submit_failed; |
| 1001 | |
| 1002 | spin_unlock(&cookie->lock); |
| 1003 | radix_tree_preload_end(); |
| 1004 | fscache_stat(&fscache_n_store_ops); |
| 1005 | fscache_stat(&fscache_n_stores_ok); |
| 1006 | |
Tejun Heo | 8af7c12 | 2010-07-20 22:09:01 +0200 | [diff] [blame] | 1007 | /* the work queue now carries its own ref on the object */ |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 1008 | fscache_put_operation(&op->op); |
| 1009 | _leave(" = 0"); |
| 1010 | return 0; |
| 1011 | |
| 1012 | already_queued: |
| 1013 | fscache_stat(&fscache_n_stores_again); |
| 1014 | already_pending: |
David Howells | 1bccf51 | 2009-11-19 18:11:25 +0000 | [diff] [blame] | 1015 | spin_unlock(&cookie->stores_lock); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 1016 | spin_unlock(&object->lock); |
| 1017 | spin_unlock(&cookie->lock); |
| 1018 | radix_tree_preload_end(); |
| 1019 | kfree(op); |
| 1020 | fscache_stat(&fscache_n_stores_ok); |
| 1021 | _leave(" = 0"); |
| 1022 | return 0; |
| 1023 | |
| 1024 | submit_failed: |
David Howells | 1bccf51 | 2009-11-19 18:11:25 +0000 | [diff] [blame] | 1025 | spin_lock(&cookie->stores_lock); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 1026 | radix_tree_delete(&cookie->stores, page->index); |
David Howells | 1bccf51 | 2009-11-19 18:11:25 +0000 | [diff] [blame] | 1027 | spin_unlock(&cookie->stores_lock); |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 1028 | wake_cookie = __fscache_unuse_cookie(cookie); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 1029 | page_cache_release(page); |
| 1030 | ret = -ENOBUFS; |
| 1031 | goto nobufs; |
| 1032 | |
| 1033 | nobufs_unlock_obj: |
Dan Carpenter | 1147d0f | 2010-03-23 14:48:37 +0000 | [diff] [blame] | 1034 | spin_unlock(&cookie->stores_lock); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 1035 | spin_unlock(&object->lock); |
| 1036 | nobufs: |
| 1037 | spin_unlock(&cookie->lock); |
| 1038 | radix_tree_preload_end(); |
| 1039 | kfree(op); |
David Howells | 8fb883f | 2013-09-21 00:09:31 +0100 | [diff] [blame] | 1040 | if (wake_cookie) |
| 1041 | __fscache_wake_unused_cookie(cookie); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 1042 | fscache_stat(&fscache_n_stores_nobufs); |
| 1043 | _leave(" = -ENOBUFS"); |
| 1044 | return -ENOBUFS; |
| 1045 | |
| 1046 | nomem_free: |
| 1047 | kfree(op); |
| 1048 | nomem: |
| 1049 | fscache_stat(&fscache_n_stores_oom); |
| 1050 | _leave(" = -ENOMEM"); |
| 1051 | return -ENOMEM; |
| 1052 | } |
| 1053 | EXPORT_SYMBOL(__fscache_write_page); |
| 1054 | |
| 1055 | /* |
| 1056 | * remove a page from the cache |
| 1057 | */ |
| 1058 | void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page) |
| 1059 | { |
| 1060 | struct fscache_object *object; |
| 1061 | |
| 1062 | _enter(",%p", page); |
| 1063 | |
| 1064 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); |
| 1065 | ASSERTCMP(page, !=, NULL); |
| 1066 | |
| 1067 | fscache_stat(&fscache_n_uncaches); |
| 1068 | |
| 1069 | /* cache withdrawal may beat us to it */ |
| 1070 | if (!PageFsCache(page)) |
| 1071 | goto done; |
| 1072 | |
| 1073 | /* get the object */ |
| 1074 | spin_lock(&cookie->lock); |
| 1075 | |
| 1076 | if (hlist_empty(&cookie->backing_objects)) { |
| 1077 | ClearPageFsCache(page); |
| 1078 | goto done_unlock; |
| 1079 | } |
| 1080 | |
| 1081 | object = hlist_entry(cookie->backing_objects.first, |
| 1082 | struct fscache_object, cookie_link); |
| 1083 | |
| 1084 | /* there might now be stuff on disk we could read */ |
| 1085 | clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); |
| 1086 | |
| 1087 | /* only invoke the cache backend if we managed to mark the page |
| 1088 | * uncached here; this deals with synchronisation vs withdrawal */ |
| 1089 | if (TestClearPageFsCache(page) && |
| 1090 | object->cache->ops->uncache_page) { |
| 1091 | /* the cache backend releases the cookie lock */ |
David Howells | 52bd75f | 2009-11-19 18:11:08 +0000 | [diff] [blame] | 1092 | fscache_stat(&fscache_n_cop_uncache_page); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 1093 | object->cache->ops->uncache_page(object, page); |
David Howells | 52bd75f | 2009-11-19 18:11:08 +0000 | [diff] [blame] | 1094 | fscache_stat_d(&fscache_n_cop_uncache_page); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 1095 | goto done; |
| 1096 | } |
| 1097 | |
| 1098 | done_unlock: |
| 1099 | spin_unlock(&cookie->lock); |
| 1100 | done: |
| 1101 | _leave(""); |
| 1102 | } |
| 1103 | EXPORT_SYMBOL(__fscache_uncache_page); |
| 1104 | |
| 1105 | /** |
David Howells | c4d6d8d | 2012-12-20 21:52:32 +0000 | [diff] [blame] | 1106 | * fscache_mark_page_cached - Mark a page as being cached |
| 1107 | * @op: The retrieval op pages are being marked for |
| 1108 | * @page: The page to be marked |
| 1109 | * |
| 1110 | * Mark a netfs page as being cached. After this is called, the netfs |
| 1111 | * must call fscache_uncache_page() to remove the mark. |
| 1112 | */ |
| 1113 | void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page) |
| 1114 | { |
| 1115 | struct fscache_cookie *cookie = op->op.object->cookie; |
| 1116 | |
| 1117 | #ifdef CONFIG_FSCACHE_STATS |
| 1118 | atomic_inc(&fscache_n_marks); |
| 1119 | #endif |
| 1120 | |
| 1121 | _debug("- mark %p{%lx}", page, page->index); |
| 1122 | if (TestSetPageFsCache(page)) { |
| 1123 | static bool once_only; |
| 1124 | if (!once_only) { |
| 1125 | once_only = true; |
Fabian Frederick | 36dfd11 | 2014-06-04 16:05:38 -0700 | [diff] [blame] | 1126 | pr_warn("Cookie type %s marked page %lx multiple times\n", |
| 1127 | cookie->def->name, page->index); |
David Howells | c4d6d8d | 2012-12-20 21:52:32 +0000 | [diff] [blame] | 1128 | } |
| 1129 | } |
| 1130 | |
| 1131 | if (cookie->def->mark_page_cached) |
| 1132 | cookie->def->mark_page_cached(cookie->netfs_data, |
| 1133 | op->mapping, page); |
| 1134 | } |
| 1135 | EXPORT_SYMBOL(fscache_mark_page_cached); |
| 1136 | |
| 1137 | /** |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 1138 | * fscache_mark_pages_cached - Mark pages as being cached |
| 1139 | * @op: The retrieval op pages are being marked for |
| 1140 | * @pagevec: The pages to be marked |
| 1141 | * |
| 1142 | * Mark a bunch of netfs pages as being cached. After this is called, |
| 1143 | * the netfs must call fscache_uncache_page() to remove the mark. |
| 1144 | */ |
| 1145 | void fscache_mark_pages_cached(struct fscache_retrieval *op, |
| 1146 | struct pagevec *pagevec) |
| 1147 | { |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 1148 | unsigned long loop; |
| 1149 | |
David Howells | c4d6d8d | 2012-12-20 21:52:32 +0000 | [diff] [blame] | 1150 | for (loop = 0; loop < pagevec->nr; loop++) |
| 1151 | fscache_mark_page_cached(op, pagevec->pages[loop]); |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 1152 | |
David Howells | b510882 | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 1153 | pagevec_reinit(pagevec); |
| 1154 | } |
| 1155 | EXPORT_SYMBOL(fscache_mark_pages_cached); |
David Howells | c902ce1 | 2011-07-07 12:19:48 +0100 | [diff] [blame] | 1156 | |
| 1157 | /* |
| 1158 | * Uncache all the pages in an inode that are marked PG_fscache, assuming them |
| 1159 | * to be associated with the given cookie. |
| 1160 | */ |
| 1161 | void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, |
| 1162 | struct inode *inode) |
| 1163 | { |
| 1164 | struct address_space *mapping = inode->i_mapping; |
| 1165 | struct pagevec pvec; |
| 1166 | pgoff_t next; |
| 1167 | int i; |
| 1168 | |
| 1169 | _enter("%p,%p", cookie, inode); |
| 1170 | |
| 1171 | if (!mapping || mapping->nrpages == 0) { |
| 1172 | _leave(" [no pages]"); |
| 1173 | return; |
| 1174 | } |
| 1175 | |
| 1176 | pagevec_init(&pvec, 0); |
| 1177 | next = 0; |
Jan Beulich | b307d46 | 2011-07-21 15:02:43 +0100 | [diff] [blame] | 1178 | do { |
| 1179 | if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) |
| 1180 | break; |
David Howells | c902ce1 | 2011-07-07 12:19:48 +0100 | [diff] [blame] | 1181 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 1182 | struct page *page = pvec.pages[i]; |
Jan Beulich | b307d46 | 2011-07-21 15:02:43 +0100 | [diff] [blame] | 1183 | next = page->index; |
David Howells | c902ce1 | 2011-07-07 12:19:48 +0100 | [diff] [blame] | 1184 | if (PageFsCache(page)) { |
| 1185 | __fscache_wait_on_page_write(cookie, page); |
| 1186 | __fscache_uncache_page(cookie, page); |
| 1187 | } |
| 1188 | } |
| 1189 | pagevec_release(&pvec); |
| 1190 | cond_resched(); |
Jan Beulich | b307d46 | 2011-07-21 15:02:43 +0100 | [diff] [blame] | 1191 | } while (++next); |
David Howells | c902ce1 | 2011-07-07 12:19:48 +0100 | [diff] [blame] | 1192 | |
| 1193 | _leave(""); |
| 1194 | } |
| 1195 | EXPORT_SYMBOL(__fscache_uncache_all_inode_pages); |