blob: b2ec29eeb4c4fda7ed65efe6ee07503dbdfb6f53 [file] [log] [blame]
Thomas Gleixner1f327612019-05-28 09:57:16 -07001// SPDX-License-Identifier: GPL-2.0-only
Milosz Tanski99ccbd22013-08-21 17:29:54 -04002/*
3 * Ceph cache definitions.
4 *
5 * Copyright (C) 2013 by Adfin Solutions, Inc. All Rights Reserved.
6 * Written by Milosz Tanski (milosz@adfin.com)
Milosz Tanski99ccbd22013-08-21 17:29:54 -04007 */
8
Ilya Dryomov48f930e2019-09-05 17:29:29 +02009#include <linux/ceph/ceph_debug.h>
10
Milosz Tanski99ccbd22013-08-21 17:29:54 -040011#include "super.h"
12#include "cache.h"
13
14struct ceph_aux_inode {
Arnd Bergmann9bbeab42018-07-13 22:18:36 +020015 u64 version;
16 u64 mtime_sec;
17 u64 mtime_nsec;
Milosz Tanski99ccbd22013-08-21 17:29:54 -040018};
19
20struct fscache_netfs ceph_cache_netfs = {
21 .name = "ceph",
22 .version = 0,
23};
24
Yan, Zheng1d8f8362017-06-27 11:57:56 +080025static DEFINE_MUTEX(ceph_fscache_lock);
26static LIST_HEAD(ceph_fscache_list);
27
28struct ceph_fscache_entry {
29 struct list_head list;
30 struct fscache_cookie *fscache;
Yan, Zheng1d8f8362017-06-27 11:57:56 +080031 size_t uniq_len;
David Howells402cb8d2018-04-04 13:41:28 +010032 /* The following members must be last */
33 struct ceph_fsid fsid;
Yan, Zheng1d8f8362017-06-27 11:57:56 +080034 char uniquifier[0];
35};
36
Milosz Tanski99ccbd22013-08-21 17:29:54 -040037static const struct fscache_cookie_def ceph_fscache_fsid_object_def = {
38 .name = "CEPH.fsid",
39 .type = FSCACHE_COOKIE_TYPE_INDEX,
Milosz Tanski99ccbd22013-08-21 17:29:54 -040040};
41
Chengguang Xu57a35df2018-03-10 20:32:05 +080042int __init ceph_fscache_register(void)
Milosz Tanski99ccbd22013-08-21 17:29:54 -040043{
44 return fscache_register_netfs(&ceph_cache_netfs);
45}
46
Milosz Tanski971f0bd2013-09-06 15:13:18 +000047void ceph_fscache_unregister(void)
Milosz Tanski99ccbd22013-08-21 17:29:54 -040048{
49 fscache_unregister_netfs(&ceph_cache_netfs);
50}
51
52int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
53{
Yan, Zheng1d8f8362017-06-27 11:57:56 +080054 const struct ceph_fsid *fsid = &fsc->client->fsid;
55 const char *fscache_uniq = fsc->mount_options->fscache_uniq;
56 size_t uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0;
57 struct ceph_fscache_entry *ent;
58 int err = 0;
59
60 mutex_lock(&ceph_fscache_lock);
61 list_for_each_entry(ent, &ceph_fscache_list, list) {
62 if (memcmp(&ent->fsid, fsid, sizeof(*fsid)))
63 continue;
64 if (ent->uniq_len != uniq_len)
65 continue;
66 if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len))
67 continue;
68
69 pr_err("fscache cookie already registered for fsid %pU\n", fsid);
70 pr_err(" use fsc=%%s mount option to specify a uniquifier\n");
71 err = -EBUSY;
72 goto out_unlock;
73 }
74
75 ent = kzalloc(sizeof(*ent) + uniq_len, GFP_KERNEL);
76 if (!ent) {
77 err = -ENOMEM;
78 goto out_unlock;
79 }
80
David Howells402cb8d2018-04-04 13:41:28 +010081 memcpy(&ent->fsid, fsid, sizeof(*fsid));
82 if (uniq_len > 0) {
83 memcpy(&ent->uniquifier, fscache_uniq, uniq_len);
84 ent->uniq_len = uniq_len;
85 }
86
Milosz Tanski99ccbd22013-08-21 17:29:54 -040087 fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
88 &ceph_fscache_fsid_object_def,
David Howells402cb8d2018-04-04 13:41:28 +010089 &ent->fsid, sizeof(ent->fsid) + uniq_len,
90 NULL, 0,
David Howellsee1235a2018-04-04 13:41:28 +010091 fsc, 0, true);
Milosz Tanski99ccbd22013-08-21 17:29:54 -040092
Yan, Zheng1d8f8362017-06-27 11:57:56 +080093 if (fsc->fscache) {
Yan, Zheng1d8f8362017-06-27 11:57:56 +080094 ent->fscache = fsc->fscache;
95 list_add_tail(&ent->list, &ceph_fscache_list);
96 } else {
97 kfree(ent);
98 pr_err("unable to register fscache cookie for fsid %pU\n",
99 fsid);
100 /* all other fs ignore this error */
101 }
102out_unlock:
103 mutex_unlock(&ceph_fscache_lock);
104 return err;
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400105}
106
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400107static enum fscache_checkaux ceph_fscache_inode_check_aux(
David Howellsee1235a2018-04-04 13:41:28 +0100108 void *cookie_netfs_data, const void *data, uint16_t dlen,
109 loff_t object_size)
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400110{
111 struct ceph_aux_inode aux;
112 struct ceph_inode_info* ci = cookie_netfs_data;
113 struct inode* inode = &ci->vfs_inode;
114
David Howellsee1235a2018-04-04 13:41:28 +0100115 if (dlen != sizeof(aux) ||
116 i_size_read(inode) != object_size)
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400117 return FSCACHE_CHECKAUX_OBSOLETE;
118
119 memset(&aux, 0, sizeof(aux));
Yan, Zhengf6973c02016-05-20 16:57:29 +0800120 aux.version = ci->i_version;
Arnd Bergmann9bbeab42018-07-13 22:18:36 +0200121 aux.mtime_sec = inode->i_mtime.tv_sec;
122 aux.mtime_nsec = inode->i_mtime.tv_nsec;
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400123
124 if (memcmp(data, &aux, sizeof(aux)) != 0)
125 return FSCACHE_CHECKAUX_OBSOLETE;
126
Chengguang Xu4c069a52018-01-30 16:29:17 +0800127 dout("ceph inode 0x%p cached okay\n", ci);
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400128 return FSCACHE_CHECKAUX_OKAY;
129}
130
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400131static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
132 .name = "CEPH.inode",
133 .type = FSCACHE_COOKIE_TYPE_DATAFILE,
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400134 .check_aux = ceph_fscache_inode_check_aux,
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400135};
136
Yan, Zheng46b59b22016-05-18 15:25:03 +0800137void ceph_fscache_register_inode_cookie(struct inode *inode)
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400138{
Yan, Zheng46b59b22016-05-18 15:25:03 +0800139 struct ceph_inode_info *ci = ceph_inode(inode);
140 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
David Howells402cb8d2018-04-04 13:41:28 +0100141 struct ceph_aux_inode aux;
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400142
143 /* No caching for filesystem */
Markus Elfringd37b1d92017-08-20 20:22:02 +0200144 if (!fsc->fscache)
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400145 return;
146
147 /* Only cache for regular files that are read only */
Yan, Zheng46b59b22016-05-18 15:25:03 +0800148 if (!S_ISREG(inode->i_mode))
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400149 return;
150
Yan, Zheng46b59b22016-05-18 15:25:03 +0800151 inode_lock_nested(inode, I_MUTEX_CHILD);
152 if (!ci->fscache) {
David Howells402cb8d2018-04-04 13:41:28 +0100153 memset(&aux, 0, sizeof(aux));
154 aux.version = ci->i_version;
Arnd Bergmann9bbeab42018-07-13 22:18:36 +0200155 aux.mtime_sec = inode->i_mtime.tv_sec;
156 aux.mtime_nsec = inode->i_mtime.tv_nsec;
Yan, Zheng46b59b22016-05-18 15:25:03 +0800157 ci->fscache = fscache_acquire_cookie(fsc->fscache,
David Howells402cb8d2018-04-04 13:41:28 +0100158 &ceph_fscache_inode_object_def,
159 &ci->i_vino, sizeof(ci->i_vino),
160 &aux, sizeof(aux),
David Howellsee1235a2018-04-04 13:41:28 +0100161 ci, i_size_read(inode), false);
Yan, Zheng46b59b22016-05-18 15:25:03 +0800162 }
Al Viro59551022016-01-22 15:40:57 -0500163 inode_unlock(inode);
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400164}
165
166void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
167{
168 struct fscache_cookie* cookie;
169
170 if ((cookie = ci->fscache) == NULL)
171 return;
172
173 ci->fscache = NULL;
174
175 fscache_uncache_all_inode_pages(cookie, &ci->vfs_inode);
David Howells402cb8d2018-04-04 13:41:28 +0100176 fscache_relinquish_cookie(cookie, &ci->i_vino, false);
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400177}
178
Yan, Zheng46b59b22016-05-18 15:25:03 +0800179static bool ceph_fscache_can_enable(void *data)
180{
181 struct inode *inode = data;
182 return !inode_is_open_for_write(inode);
183}
184
185void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
186{
187 struct ceph_inode_info *ci = ceph_inode(inode);
188
189 if (!fscache_cookie_valid(ci->fscache))
190 return;
191
192 if (inode_is_open_for_write(inode)) {
193 dout("fscache_file_set_cookie %p %p disabling cache\n",
194 inode, filp);
David Howells402cb8d2018-04-04 13:41:28 +0100195 fscache_disable_cookie(ci->fscache, &ci->i_vino, false);
Yan, Zheng46b59b22016-05-18 15:25:03 +0800196 fscache_uncache_all_inode_pages(ci->fscache, inode);
197 } else {
David Howellsee1235a2018-04-04 13:41:28 +0100198 fscache_enable_cookie(ci->fscache, &ci->i_vino, i_size_read(inode),
David Howells402cb8d2018-04-04 13:41:28 +0100199 ceph_fscache_can_enable, inode);
Yan, Zheng46b59b22016-05-18 15:25:03 +0800200 if (fscache_cookie_enabled(ci->fscache)) {
Colin Ian King0fbc5362016-12-29 20:19:32 +0000201 dout("fscache_file_set_cookie %p %p enabling cache\n",
Yan, Zheng46b59b22016-05-18 15:25:03 +0800202 inode, filp);
203 }
204 }
205}
206
Yan, Zhengdd2bc472017-08-04 11:22:31 +0800207static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400208{
209 if (!error)
210 SetPageUptodate(page);
211
212 unlock_page(page);
213}
214
Zhang Zhuoyu3b33f692016-03-25 05:18:39 -0400215static inline bool cache_valid(struct ceph_inode_info *ci)
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400216{
Yan, Zhengf7f7e7a2016-05-18 20:31:55 +0800217 return ci->i_fscache_gen == ci->i_rdcache_gen;
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400218}
219
220
221/* Atempt to read from the fscache,
222 *
223 * This function is called from the readpage_nounlock context. DO NOT attempt to
224 * unlock the page here (or in the callback).
225 */
226int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
227{
228 struct ceph_inode_info *ci = ceph_inode(inode);
229 int ret;
230
231 if (!cache_valid(ci))
232 return -ENOBUFS;
233
234 ret = fscache_read_or_alloc_page(ci->fscache, page,
Yan, Zhengdd2bc472017-08-04 11:22:31 +0800235 ceph_readpage_from_fscache_complete, NULL,
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400236 GFP_KERNEL);
237
238 switch (ret) {
239 case 0: /* Page found */
240 dout("page read submitted\n");
241 return 0;
242 case -ENOBUFS: /* Pages were not found, and can't be */
243 case -ENODATA: /* Pages were not found */
244 dout("page/inode not in cache\n");
245 return ret;
246 default:
247 dout("%s: unknown error ret = %i\n", __func__, ret);
248 return ret;
249 }
250}
251
252int ceph_readpages_from_fscache(struct inode *inode,
253 struct address_space *mapping,
254 struct list_head *pages,
255 unsigned *nr_pages)
256{
257 struct ceph_inode_info *ci = ceph_inode(inode);
258 int ret;
259
260 if (!cache_valid(ci))
261 return -ENOBUFS;
262
263 ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
Yan, Zhengdd2bc472017-08-04 11:22:31 +0800264 ceph_readpage_from_fscache_complete,
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400265 NULL, mapping_gfp_mask(mapping));
266
267 switch (ret) {
268 case 0: /* All pages found */
269 dout("all-page read submitted\n");
270 return 0;
271 case -ENOBUFS: /* Some pages were not found, and can't be */
272 case -ENODATA: /* some pages were not found */
273 dout("page/inode not in cache\n");
274 return ret;
275 default:
276 dout("%s: unknown error ret = %i\n", __func__, ret);
277 return ret;
278 }
279}
280
281void ceph_readpage_to_fscache(struct inode *inode, struct page *page)
282{
283 struct ceph_inode_info *ci = ceph_inode(inode);
284 int ret;
285
Milosz Tanski9b8dd1e2013-09-03 19:11:01 -0400286 if (!PageFsCache(page))
287 return;
288
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400289 if (!cache_valid(ci))
290 return;
291
David Howellsee1235a2018-04-04 13:41:28 +0100292 ret = fscache_write_page(ci->fscache, page, i_size_read(inode),
293 GFP_KERNEL);
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400294 if (ret)
295 fscache_uncache_page(ci->fscache, page);
296}
297
298void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
299{
300 struct ceph_inode_info *ci = ceph_inode(inode);
301
Milosz Tanskiffc79662013-09-25 11:18:14 -0400302 if (!PageFsCache(page))
303 return;
304
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400305 fscache_wait_on_page_write(ci->fscache, page);
306 fscache_uncache_page(ci->fscache, page);
307}
308
309void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
310{
Yan, Zheng1d8f8362017-06-27 11:57:56 +0800311 if (fscache_cookie_valid(fsc->fscache)) {
312 struct ceph_fscache_entry *ent;
313 bool found = false;
314
315 mutex_lock(&ceph_fscache_lock);
316 list_for_each_entry(ent, &ceph_fscache_list, list) {
317 if (ent->fscache == fsc->fscache) {
318 list_del(&ent->list);
319 kfree(ent);
320 found = true;
321 break;
322 }
323 }
324 WARN_ON_ONCE(!found);
325 mutex_unlock(&ceph_fscache_lock);
326
David Howells402cb8d2018-04-04 13:41:28 +0100327 __fscache_relinquish_cookie(fsc->fscache, NULL, false);
Yan, Zheng1d8f8362017-06-27 11:57:56 +0800328 }
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400329 fsc->fscache = NULL;
330}
331
Yan, Zhengf7f7e7a2016-05-18 20:31:55 +0800332/*
333 * caller should hold CEPH_CAP_FILE_{RD,CACHE}
334 */
335void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400336{
Yan, Zhengf7f7e7a2016-05-18 20:31:55 +0800337 if (cache_valid(ci))
Milosz Tanskie81568e2013-09-05 18:29:03 +0000338 return;
339
Yan, Zhengf7f7e7a2016-05-18 20:31:55 +0800340 /* resue i_truncate_mutex. There should be no pending
341 * truncate while the caller holds CEPH_CAP_FILE_RD */
342 mutex_lock(&ci->i_truncate_mutex);
343 if (!cache_valid(ci)) {
David Howells402cb8d2018-04-04 13:41:28 +0100344 if (fscache_check_consistency(ci->fscache, &ci->i_vino))
Yan, Zhengf7f7e7a2016-05-18 20:31:55 +0800345 fscache_invalidate(ci->fscache);
346 spin_lock(&ci->i_ceph_lock);
347 ci->i_fscache_gen = ci->i_rdcache_gen;
348 spin_unlock(&ci->i_ceph_lock);
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400349 }
Yan, Zhengf7f7e7a2016-05-18 20:31:55 +0800350 mutex_unlock(&ci->i_truncate_mutex);
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400351}