Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Ceph cache definitions. |
| 3 | * |
| 4 | * Copyright (C) 2013 by Adfin Solutions, Inc. All Rights Reserved. |
| 5 | * Written by Milosz Tanski (milosz@adfin.com) |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 |
| 9 | * as published by the Free Software Foundation. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with this program; if not, write to: |
| 18 | * Free Software Foundation |
| 19 | * 51 Franklin Street, Fifth Floor |
| 20 | * Boston, MA 02111-1301 USA |
| 21 | * |
| 22 | */ |
| 23 | |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 24 | #include "super.h" |
| 25 | #include "cache.h" |
| 26 | |
| 27 | struct ceph_aux_inode { |
Arnd Bergmann | 9bbeab4 | 2018-07-13 22:18:36 +0200 | [diff] [blame] | 28 | u64 version; |
| 29 | u64 mtime_sec; |
| 30 | u64 mtime_nsec; |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 31 | }; |
| 32 | |
| 33 | struct fscache_netfs ceph_cache_netfs = { |
| 34 | .name = "ceph", |
| 35 | .version = 0, |
| 36 | }; |
| 37 | |
Yan, Zheng | 1d8f836 | 2017-06-27 11:57:56 +0800 | [diff] [blame] | 38 | static DEFINE_MUTEX(ceph_fscache_lock); |
| 39 | static LIST_HEAD(ceph_fscache_list); |
| 40 | |
| 41 | struct ceph_fscache_entry { |
| 42 | struct list_head list; |
| 43 | struct fscache_cookie *fscache; |
Yan, Zheng | 1d8f836 | 2017-06-27 11:57:56 +0800 | [diff] [blame] | 44 | size_t uniq_len; |
David Howells | 402cb8d | 2018-04-04 13:41:28 +0100 | [diff] [blame] | 45 | /* The following members must be last */ |
| 46 | struct ceph_fsid fsid; |
Yan, Zheng | 1d8f836 | 2017-06-27 11:57:56 +0800 | [diff] [blame] | 47 | char uniquifier[0]; |
| 48 | }; |
| 49 | |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 50 | static const struct fscache_cookie_def ceph_fscache_fsid_object_def = { |
| 51 | .name = "CEPH.fsid", |
| 52 | .type = FSCACHE_COOKIE_TYPE_INDEX, |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 53 | }; |
| 54 | |
Chengguang Xu | 57a35df | 2018-03-10 20:32:05 +0800 | [diff] [blame] | 55 | int __init ceph_fscache_register(void) |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 56 | { |
| 57 | return fscache_register_netfs(&ceph_cache_netfs); |
| 58 | } |
| 59 | |
Milosz Tanski | 971f0bd | 2013-09-06 15:13:18 +0000 | [diff] [blame] | 60 | void ceph_fscache_unregister(void) |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 61 | { |
| 62 | fscache_unregister_netfs(&ceph_cache_netfs); |
| 63 | } |
| 64 | |
| 65 | int ceph_fscache_register_fs(struct ceph_fs_client* fsc) |
| 66 | { |
Yan, Zheng | 1d8f836 | 2017-06-27 11:57:56 +0800 | [diff] [blame] | 67 | const struct ceph_fsid *fsid = &fsc->client->fsid; |
| 68 | const char *fscache_uniq = fsc->mount_options->fscache_uniq; |
| 69 | size_t uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0; |
| 70 | struct ceph_fscache_entry *ent; |
| 71 | int err = 0; |
| 72 | |
| 73 | mutex_lock(&ceph_fscache_lock); |
| 74 | list_for_each_entry(ent, &ceph_fscache_list, list) { |
| 75 | if (memcmp(&ent->fsid, fsid, sizeof(*fsid))) |
| 76 | continue; |
| 77 | if (ent->uniq_len != uniq_len) |
| 78 | continue; |
| 79 | if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len)) |
| 80 | continue; |
| 81 | |
| 82 | pr_err("fscache cookie already registered for fsid %pU\n", fsid); |
| 83 | pr_err(" use fsc=%%s mount option to specify a uniquifier\n"); |
| 84 | err = -EBUSY; |
| 85 | goto out_unlock; |
| 86 | } |
| 87 | |
| 88 | ent = kzalloc(sizeof(*ent) + uniq_len, GFP_KERNEL); |
| 89 | if (!ent) { |
| 90 | err = -ENOMEM; |
| 91 | goto out_unlock; |
| 92 | } |
| 93 | |
David Howells | 402cb8d | 2018-04-04 13:41:28 +0100 | [diff] [blame] | 94 | memcpy(&ent->fsid, fsid, sizeof(*fsid)); |
| 95 | if (uniq_len > 0) { |
| 96 | memcpy(&ent->uniquifier, fscache_uniq, uniq_len); |
| 97 | ent->uniq_len = uniq_len; |
| 98 | } |
| 99 | |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 100 | fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index, |
| 101 | &ceph_fscache_fsid_object_def, |
David Howells | 402cb8d | 2018-04-04 13:41:28 +0100 | [diff] [blame] | 102 | &ent->fsid, sizeof(ent->fsid) + uniq_len, |
| 103 | NULL, 0, |
David Howells | ee1235a | 2018-04-04 13:41:28 +0100 | [diff] [blame] | 104 | fsc, 0, true); |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 105 | |
Yan, Zheng | 1d8f836 | 2017-06-27 11:57:56 +0800 | [diff] [blame] | 106 | if (fsc->fscache) { |
Yan, Zheng | 1d8f836 | 2017-06-27 11:57:56 +0800 | [diff] [blame] | 107 | ent->fscache = fsc->fscache; |
| 108 | list_add_tail(&ent->list, &ceph_fscache_list); |
| 109 | } else { |
| 110 | kfree(ent); |
| 111 | pr_err("unable to register fscache cookie for fsid %pU\n", |
| 112 | fsid); |
| 113 | /* all other fs ignore this error */ |
| 114 | } |
| 115 | out_unlock: |
| 116 | mutex_unlock(&ceph_fscache_lock); |
| 117 | return err; |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 118 | } |
| 119 | |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 120 | static enum fscache_checkaux ceph_fscache_inode_check_aux( |
David Howells | ee1235a | 2018-04-04 13:41:28 +0100 | [diff] [blame] | 121 | void *cookie_netfs_data, const void *data, uint16_t dlen, |
| 122 | loff_t object_size) |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 123 | { |
| 124 | struct ceph_aux_inode aux; |
| 125 | struct ceph_inode_info* ci = cookie_netfs_data; |
| 126 | struct inode* inode = &ci->vfs_inode; |
| 127 | |
David Howells | ee1235a | 2018-04-04 13:41:28 +0100 | [diff] [blame] | 128 | if (dlen != sizeof(aux) || |
| 129 | i_size_read(inode) != object_size) |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 130 | return FSCACHE_CHECKAUX_OBSOLETE; |
| 131 | |
| 132 | memset(&aux, 0, sizeof(aux)); |
Yan, Zheng | f6973c0 | 2016-05-20 16:57:29 +0800 | [diff] [blame] | 133 | aux.version = ci->i_version; |
Arnd Bergmann | 9bbeab4 | 2018-07-13 22:18:36 +0200 | [diff] [blame] | 134 | aux.mtime_sec = inode->i_mtime.tv_sec; |
| 135 | aux.mtime_nsec = inode->i_mtime.tv_nsec; |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 136 | |
| 137 | if (memcmp(data, &aux, sizeof(aux)) != 0) |
| 138 | return FSCACHE_CHECKAUX_OBSOLETE; |
| 139 | |
Chengguang Xu | 4c069a5 | 2018-01-30 16:29:17 +0800 | [diff] [blame] | 140 | dout("ceph inode 0x%p cached okay\n", ci); |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 141 | return FSCACHE_CHECKAUX_OKAY; |
| 142 | } |
| 143 | |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 144 | static const struct fscache_cookie_def ceph_fscache_inode_object_def = { |
| 145 | .name = "CEPH.inode", |
| 146 | .type = FSCACHE_COOKIE_TYPE_DATAFILE, |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 147 | .check_aux = ceph_fscache_inode_check_aux, |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 148 | }; |
| 149 | |
Yan, Zheng | 46b59b2 | 2016-05-18 15:25:03 +0800 | [diff] [blame] | 150 | void ceph_fscache_register_inode_cookie(struct inode *inode) |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 151 | { |
Yan, Zheng | 46b59b2 | 2016-05-18 15:25:03 +0800 | [diff] [blame] | 152 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 153 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); |
David Howells | 402cb8d | 2018-04-04 13:41:28 +0100 | [diff] [blame] | 154 | struct ceph_aux_inode aux; |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 155 | |
| 156 | /* No caching for filesystem */ |
Markus Elfring | d37b1d9 | 2017-08-20 20:22:02 +0200 | [diff] [blame] | 157 | if (!fsc->fscache) |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 158 | return; |
| 159 | |
| 160 | /* Only cache for regular files that are read only */ |
Yan, Zheng | 46b59b2 | 2016-05-18 15:25:03 +0800 | [diff] [blame] | 161 | if (!S_ISREG(inode->i_mode)) |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 162 | return; |
| 163 | |
Yan, Zheng | 46b59b2 | 2016-05-18 15:25:03 +0800 | [diff] [blame] | 164 | inode_lock_nested(inode, I_MUTEX_CHILD); |
| 165 | if (!ci->fscache) { |
David Howells | 402cb8d | 2018-04-04 13:41:28 +0100 | [diff] [blame] | 166 | memset(&aux, 0, sizeof(aux)); |
| 167 | aux.version = ci->i_version; |
Arnd Bergmann | 9bbeab4 | 2018-07-13 22:18:36 +0200 | [diff] [blame] | 168 | aux.mtime_sec = inode->i_mtime.tv_sec; |
| 169 | aux.mtime_nsec = inode->i_mtime.tv_nsec; |
Yan, Zheng | 46b59b2 | 2016-05-18 15:25:03 +0800 | [diff] [blame] | 170 | ci->fscache = fscache_acquire_cookie(fsc->fscache, |
David Howells | 402cb8d | 2018-04-04 13:41:28 +0100 | [diff] [blame] | 171 | &ceph_fscache_inode_object_def, |
| 172 | &ci->i_vino, sizeof(ci->i_vino), |
| 173 | &aux, sizeof(aux), |
David Howells | ee1235a | 2018-04-04 13:41:28 +0100 | [diff] [blame] | 174 | ci, i_size_read(inode), false); |
Yan, Zheng | 46b59b2 | 2016-05-18 15:25:03 +0800 | [diff] [blame] | 175 | } |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 176 | inode_unlock(inode); |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 177 | } |
| 178 | |
| 179 | void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) |
| 180 | { |
| 181 | struct fscache_cookie* cookie; |
| 182 | |
| 183 | if ((cookie = ci->fscache) == NULL) |
| 184 | return; |
| 185 | |
| 186 | ci->fscache = NULL; |
| 187 | |
| 188 | fscache_uncache_all_inode_pages(cookie, &ci->vfs_inode); |
David Howells | 402cb8d | 2018-04-04 13:41:28 +0100 | [diff] [blame] | 189 | fscache_relinquish_cookie(cookie, &ci->i_vino, false); |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 190 | } |
| 191 | |
Yan, Zheng | 46b59b2 | 2016-05-18 15:25:03 +0800 | [diff] [blame] | 192 | static bool ceph_fscache_can_enable(void *data) |
| 193 | { |
| 194 | struct inode *inode = data; |
| 195 | return !inode_is_open_for_write(inode); |
| 196 | } |
| 197 | |
| 198 | void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp) |
| 199 | { |
| 200 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 201 | |
| 202 | if (!fscache_cookie_valid(ci->fscache)) |
| 203 | return; |
| 204 | |
| 205 | if (inode_is_open_for_write(inode)) { |
| 206 | dout("fscache_file_set_cookie %p %p disabling cache\n", |
| 207 | inode, filp); |
David Howells | 402cb8d | 2018-04-04 13:41:28 +0100 | [diff] [blame] | 208 | fscache_disable_cookie(ci->fscache, &ci->i_vino, false); |
Yan, Zheng | 46b59b2 | 2016-05-18 15:25:03 +0800 | [diff] [blame] | 209 | fscache_uncache_all_inode_pages(ci->fscache, inode); |
| 210 | } else { |
David Howells | ee1235a | 2018-04-04 13:41:28 +0100 | [diff] [blame] | 211 | fscache_enable_cookie(ci->fscache, &ci->i_vino, i_size_read(inode), |
David Howells | 402cb8d | 2018-04-04 13:41:28 +0100 | [diff] [blame] | 212 | ceph_fscache_can_enable, inode); |
Yan, Zheng | 46b59b2 | 2016-05-18 15:25:03 +0800 | [diff] [blame] | 213 | if (fscache_cookie_enabled(ci->fscache)) { |
Colin Ian King | 0fbc536 | 2016-12-29 20:19:32 +0000 | [diff] [blame] | 214 | dout("fscache_file_set_cookie %p %p enabling cache\n", |
Yan, Zheng | 46b59b2 | 2016-05-18 15:25:03 +0800 | [diff] [blame] | 215 | inode, filp); |
| 216 | } |
| 217 | } |
| 218 | } |
| 219 | |
Yan, Zheng | dd2bc47 | 2017-08-04 11:22:31 +0800 | [diff] [blame] | 220 | static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error) |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 221 | { |
| 222 | if (!error) |
| 223 | SetPageUptodate(page); |
| 224 | |
| 225 | unlock_page(page); |
| 226 | } |
| 227 | |
Zhang Zhuoyu | 3b33f69 | 2016-03-25 05:18:39 -0400 | [diff] [blame] | 228 | static inline bool cache_valid(struct ceph_inode_info *ci) |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 229 | { |
Yan, Zheng | f7f7e7a | 2016-05-18 20:31:55 +0800 | [diff] [blame] | 230 | return ci->i_fscache_gen == ci->i_rdcache_gen; |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 231 | } |
| 232 | |
| 233 | |
| 234 | /* Atempt to read from the fscache, |
| 235 | * |
| 236 | * This function is called from the readpage_nounlock context. DO NOT attempt to |
| 237 | * unlock the page here (or in the callback). |
| 238 | */ |
| 239 | int ceph_readpage_from_fscache(struct inode *inode, struct page *page) |
| 240 | { |
| 241 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 242 | int ret; |
| 243 | |
| 244 | if (!cache_valid(ci)) |
| 245 | return -ENOBUFS; |
| 246 | |
| 247 | ret = fscache_read_or_alloc_page(ci->fscache, page, |
Yan, Zheng | dd2bc47 | 2017-08-04 11:22:31 +0800 | [diff] [blame] | 248 | ceph_readpage_from_fscache_complete, NULL, |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 249 | GFP_KERNEL); |
| 250 | |
| 251 | switch (ret) { |
| 252 | case 0: /* Page found */ |
| 253 | dout("page read submitted\n"); |
| 254 | return 0; |
| 255 | case -ENOBUFS: /* Pages were not found, and can't be */ |
| 256 | case -ENODATA: /* Pages were not found */ |
| 257 | dout("page/inode not in cache\n"); |
| 258 | return ret; |
| 259 | default: |
| 260 | dout("%s: unknown error ret = %i\n", __func__, ret); |
| 261 | return ret; |
| 262 | } |
| 263 | } |
| 264 | |
| 265 | int ceph_readpages_from_fscache(struct inode *inode, |
| 266 | struct address_space *mapping, |
| 267 | struct list_head *pages, |
| 268 | unsigned *nr_pages) |
| 269 | { |
| 270 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 271 | int ret; |
| 272 | |
| 273 | if (!cache_valid(ci)) |
| 274 | return -ENOBUFS; |
| 275 | |
| 276 | ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages, |
Yan, Zheng | dd2bc47 | 2017-08-04 11:22:31 +0800 | [diff] [blame] | 277 | ceph_readpage_from_fscache_complete, |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 278 | NULL, mapping_gfp_mask(mapping)); |
| 279 | |
| 280 | switch (ret) { |
| 281 | case 0: /* All pages found */ |
| 282 | dout("all-page read submitted\n"); |
| 283 | return 0; |
| 284 | case -ENOBUFS: /* Some pages were not found, and can't be */ |
| 285 | case -ENODATA: /* some pages were not found */ |
| 286 | dout("page/inode not in cache\n"); |
| 287 | return ret; |
| 288 | default: |
| 289 | dout("%s: unknown error ret = %i\n", __func__, ret); |
| 290 | return ret; |
| 291 | } |
| 292 | } |
| 293 | |
| 294 | void ceph_readpage_to_fscache(struct inode *inode, struct page *page) |
| 295 | { |
| 296 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 297 | int ret; |
| 298 | |
Milosz Tanski | 9b8dd1e | 2013-09-03 19:11:01 -0400 | [diff] [blame] | 299 | if (!PageFsCache(page)) |
| 300 | return; |
| 301 | |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 302 | if (!cache_valid(ci)) |
| 303 | return; |
| 304 | |
David Howells | ee1235a | 2018-04-04 13:41:28 +0100 | [diff] [blame] | 305 | ret = fscache_write_page(ci->fscache, page, i_size_read(inode), |
| 306 | GFP_KERNEL); |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 307 | if (ret) |
| 308 | fscache_uncache_page(ci->fscache, page); |
| 309 | } |
| 310 | |
| 311 | void ceph_invalidate_fscache_page(struct inode* inode, struct page *page) |
| 312 | { |
| 313 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 314 | |
Milosz Tanski | ffc7966 | 2013-09-25 11:18:14 -0400 | [diff] [blame] | 315 | if (!PageFsCache(page)) |
| 316 | return; |
| 317 | |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 318 | fscache_wait_on_page_write(ci->fscache, page); |
| 319 | fscache_uncache_page(ci->fscache, page); |
| 320 | } |
| 321 | |
| 322 | void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc) |
| 323 | { |
Yan, Zheng | 1d8f836 | 2017-06-27 11:57:56 +0800 | [diff] [blame] | 324 | if (fscache_cookie_valid(fsc->fscache)) { |
| 325 | struct ceph_fscache_entry *ent; |
| 326 | bool found = false; |
| 327 | |
| 328 | mutex_lock(&ceph_fscache_lock); |
| 329 | list_for_each_entry(ent, &ceph_fscache_list, list) { |
| 330 | if (ent->fscache == fsc->fscache) { |
| 331 | list_del(&ent->list); |
| 332 | kfree(ent); |
| 333 | found = true; |
| 334 | break; |
| 335 | } |
| 336 | } |
| 337 | WARN_ON_ONCE(!found); |
| 338 | mutex_unlock(&ceph_fscache_lock); |
| 339 | |
David Howells | 402cb8d | 2018-04-04 13:41:28 +0100 | [diff] [blame] | 340 | __fscache_relinquish_cookie(fsc->fscache, NULL, false); |
Yan, Zheng | 1d8f836 | 2017-06-27 11:57:56 +0800 | [diff] [blame] | 341 | } |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 342 | fsc->fscache = NULL; |
| 343 | } |
| 344 | |
Yan, Zheng | f7f7e7a | 2016-05-18 20:31:55 +0800 | [diff] [blame] | 345 | /* |
| 346 | * caller should hold CEPH_CAP_FILE_{RD,CACHE} |
| 347 | */ |
| 348 | void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci) |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 349 | { |
Yan, Zheng | f7f7e7a | 2016-05-18 20:31:55 +0800 | [diff] [blame] | 350 | if (cache_valid(ci)) |
Milosz Tanski | e81568e | 2013-09-05 18:29:03 +0000 | [diff] [blame] | 351 | return; |
| 352 | |
Yan, Zheng | f7f7e7a | 2016-05-18 20:31:55 +0800 | [diff] [blame] | 353 | /* resue i_truncate_mutex. There should be no pending |
| 354 | * truncate while the caller holds CEPH_CAP_FILE_RD */ |
| 355 | mutex_lock(&ci->i_truncate_mutex); |
| 356 | if (!cache_valid(ci)) { |
David Howells | 402cb8d | 2018-04-04 13:41:28 +0100 | [diff] [blame] | 357 | if (fscache_check_consistency(ci->fscache, &ci->i_vino)) |
Yan, Zheng | f7f7e7a | 2016-05-18 20:31:55 +0800 | [diff] [blame] | 358 | fscache_invalidate(ci->fscache); |
| 359 | spin_lock(&ci->i_ceph_lock); |
| 360 | ci->i_fscache_gen = ci->i_rdcache_gen; |
| 361 | spin_unlock(&ci->i_ceph_lock); |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 362 | } |
Yan, Zheng | f7f7e7a | 2016-05-18 20:31:55 +0800 | [diff] [blame] | 363 | mutex_unlock(&ci->i_truncate_mutex); |
Milosz Tanski | 99ccbd2 | 2013-08-21 17:29:54 -0400 | [diff] [blame] | 364 | } |