blob: 2f5cb6bc78e1d00e029f029009b279684aff711a [file] [log] [blame]
Thomas Gleixner1f327612019-05-28 09:57:16 -07001// SPDX-License-Identifier: GPL-2.0-only
Milosz Tanski99ccbd22013-08-21 17:29:54 -04002/*
3 * Ceph cache definitions.
4 *
5 * Copyright (C) 2013 by Adfin Solutions, Inc. All Rights Reserved.
6 * Written by Milosz Tanski (milosz@adfin.com)
Milosz Tanski99ccbd22013-08-21 17:29:54 -04007 */
8
Ilya Dryomov48f930e2019-09-05 17:29:29 +02009#include <linux/ceph/ceph_debug.h>
10
David Howells82995cc2019-03-25 16:38:32 +000011#include <linux/fs_context.h>
Milosz Tanski99ccbd22013-08-21 17:29:54 -040012#include "super.h"
13#include "cache.h"
14
15struct ceph_aux_inode {
Arnd Bergmann9bbeab42018-07-13 22:18:36 +020016 u64 version;
17 u64 mtime_sec;
18 u64 mtime_nsec;
Milosz Tanski99ccbd22013-08-21 17:29:54 -040019};
20
21struct fscache_netfs ceph_cache_netfs = {
22 .name = "ceph",
23 .version = 0,
24};
25
Yan, Zheng1d8f8362017-06-27 11:57:56 +080026static DEFINE_MUTEX(ceph_fscache_lock);
27static LIST_HEAD(ceph_fscache_list);
28
29struct ceph_fscache_entry {
30 struct list_head list;
31 struct fscache_cookie *fscache;
Yan, Zheng1d8f8362017-06-27 11:57:56 +080032 size_t uniq_len;
David Howells402cb8d2018-04-04 13:41:28 +010033 /* The following members must be last */
34 struct ceph_fsid fsid;
Gustavo A. R. Silvaf682dc72020-02-13 10:00:04 -060035 char uniquifier[];
Yan, Zheng1d8f8362017-06-27 11:57:56 +080036};
37
Milosz Tanski99ccbd22013-08-21 17:29:54 -040038static const struct fscache_cookie_def ceph_fscache_fsid_object_def = {
39 .name = "CEPH.fsid",
40 .type = FSCACHE_COOKIE_TYPE_INDEX,
Milosz Tanski99ccbd22013-08-21 17:29:54 -040041};
42
Chengguang Xu57a35df2018-03-10 20:32:05 +080043int __init ceph_fscache_register(void)
Milosz Tanski99ccbd22013-08-21 17:29:54 -040044{
45 return fscache_register_netfs(&ceph_cache_netfs);
46}
47
Milosz Tanski971f0bd2013-09-06 15:13:18 +000048void ceph_fscache_unregister(void)
Milosz Tanski99ccbd22013-08-21 17:29:54 -040049{
50 fscache_unregister_netfs(&ceph_cache_netfs);
51}
52
David Howells82995cc2019-03-25 16:38:32 +000053int ceph_fscache_register_fs(struct ceph_fs_client* fsc, struct fs_context *fc)
Milosz Tanski99ccbd22013-08-21 17:29:54 -040054{
Yan, Zheng1d8f8362017-06-27 11:57:56 +080055 const struct ceph_fsid *fsid = &fsc->client->fsid;
56 const char *fscache_uniq = fsc->mount_options->fscache_uniq;
57 size_t uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0;
58 struct ceph_fscache_entry *ent;
59 int err = 0;
60
61 mutex_lock(&ceph_fscache_lock);
62 list_for_each_entry(ent, &ceph_fscache_list, list) {
63 if (memcmp(&ent->fsid, fsid, sizeof(*fsid)))
64 continue;
65 if (ent->uniq_len != uniq_len)
66 continue;
67 if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len))
68 continue;
69
Al Virod53d0f72019-12-21 21:31:52 -050070 errorfc(fc, "fscache cookie already registered for fsid %pU, use fsc=<uniquifier> option",
David Howells82995cc2019-03-25 16:38:32 +000071 fsid);
Yan, Zheng1d8f8362017-06-27 11:57:56 +080072 err = -EBUSY;
73 goto out_unlock;
74 }
75
76 ent = kzalloc(sizeof(*ent) + uniq_len, GFP_KERNEL);
77 if (!ent) {
78 err = -ENOMEM;
79 goto out_unlock;
80 }
81
David Howells402cb8d2018-04-04 13:41:28 +010082 memcpy(&ent->fsid, fsid, sizeof(*fsid));
83 if (uniq_len > 0) {
84 memcpy(&ent->uniquifier, fscache_uniq, uniq_len);
85 ent->uniq_len = uniq_len;
86 }
87
Milosz Tanski99ccbd22013-08-21 17:29:54 -040088 fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
89 &ceph_fscache_fsid_object_def,
David Howells402cb8d2018-04-04 13:41:28 +010090 &ent->fsid, sizeof(ent->fsid) + uniq_len,
91 NULL, 0,
David Howellsee1235a2018-04-04 13:41:28 +010092 fsc, 0, true);
Milosz Tanski99ccbd22013-08-21 17:29:54 -040093
Yan, Zheng1d8f8362017-06-27 11:57:56 +080094 if (fsc->fscache) {
Yan, Zheng1d8f8362017-06-27 11:57:56 +080095 ent->fscache = fsc->fscache;
96 list_add_tail(&ent->list, &ceph_fscache_list);
97 } else {
98 kfree(ent);
Al Virod53d0f72019-12-21 21:31:52 -050099 errorfc(fc, "unable to register fscache cookie for fsid %pU",
Yan, Zheng1d8f8362017-06-27 11:57:56 +0800100 fsid);
101 /* all other fs ignore this error */
102 }
103out_unlock:
104 mutex_unlock(&ceph_fscache_lock);
105 return err;
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400106}
107
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400108static enum fscache_checkaux ceph_fscache_inode_check_aux(
David Howellsee1235a2018-04-04 13:41:28 +0100109 void *cookie_netfs_data, const void *data, uint16_t dlen,
110 loff_t object_size)
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400111{
112 struct ceph_aux_inode aux;
113 struct ceph_inode_info* ci = cookie_netfs_data;
114 struct inode* inode = &ci->vfs_inode;
115
David Howellsee1235a2018-04-04 13:41:28 +0100116 if (dlen != sizeof(aux) ||
117 i_size_read(inode) != object_size)
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400118 return FSCACHE_CHECKAUX_OBSOLETE;
119
120 memset(&aux, 0, sizeof(aux));
Yan, Zhengf6973c02016-05-20 16:57:29 +0800121 aux.version = ci->i_version;
Arnd Bergmann9bbeab42018-07-13 22:18:36 +0200122 aux.mtime_sec = inode->i_mtime.tv_sec;
123 aux.mtime_nsec = inode->i_mtime.tv_nsec;
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400124
125 if (memcmp(data, &aux, sizeof(aux)) != 0)
126 return FSCACHE_CHECKAUX_OBSOLETE;
127
Chengguang Xu4c069a52018-01-30 16:29:17 +0800128 dout("ceph inode 0x%p cached okay\n", ci);
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400129 return FSCACHE_CHECKAUX_OKAY;
130}
131
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400132static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
133 .name = "CEPH.inode",
134 .type = FSCACHE_COOKIE_TYPE_DATAFILE,
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400135 .check_aux = ceph_fscache_inode_check_aux,
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400136};
137
Yan, Zheng46b59b22016-05-18 15:25:03 +0800138void ceph_fscache_register_inode_cookie(struct inode *inode)
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400139{
Yan, Zheng46b59b22016-05-18 15:25:03 +0800140 struct ceph_inode_info *ci = ceph_inode(inode);
141 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
David Howells402cb8d2018-04-04 13:41:28 +0100142 struct ceph_aux_inode aux;
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400143
144 /* No caching for filesystem */
Markus Elfringd37b1d92017-08-20 20:22:02 +0200145 if (!fsc->fscache)
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400146 return;
147
148 /* Only cache for regular files that are read only */
Yan, Zheng46b59b22016-05-18 15:25:03 +0800149 if (!S_ISREG(inode->i_mode))
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400150 return;
151
Yan, Zheng46b59b22016-05-18 15:25:03 +0800152 inode_lock_nested(inode, I_MUTEX_CHILD);
153 if (!ci->fscache) {
David Howells402cb8d2018-04-04 13:41:28 +0100154 memset(&aux, 0, sizeof(aux));
155 aux.version = ci->i_version;
Arnd Bergmann9bbeab42018-07-13 22:18:36 +0200156 aux.mtime_sec = inode->i_mtime.tv_sec;
157 aux.mtime_nsec = inode->i_mtime.tv_nsec;
Yan, Zheng46b59b22016-05-18 15:25:03 +0800158 ci->fscache = fscache_acquire_cookie(fsc->fscache,
David Howells402cb8d2018-04-04 13:41:28 +0100159 &ceph_fscache_inode_object_def,
160 &ci->i_vino, sizeof(ci->i_vino),
161 &aux, sizeof(aux),
David Howellsee1235a2018-04-04 13:41:28 +0100162 ci, i_size_read(inode), false);
Yan, Zheng46b59b22016-05-18 15:25:03 +0800163 }
Al Viro59551022016-01-22 15:40:57 -0500164 inode_unlock(inode);
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400165}
166
167void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
168{
169 struct fscache_cookie* cookie;
170
171 if ((cookie = ci->fscache) == NULL)
172 return;
173
174 ci->fscache = NULL;
175
176 fscache_uncache_all_inode_pages(cookie, &ci->vfs_inode);
David Howells402cb8d2018-04-04 13:41:28 +0100177 fscache_relinquish_cookie(cookie, &ci->i_vino, false);
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400178}
179
Yan, Zheng46b59b22016-05-18 15:25:03 +0800180static bool ceph_fscache_can_enable(void *data)
181{
182 struct inode *inode = data;
183 return !inode_is_open_for_write(inode);
184}
185
186void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
187{
188 struct ceph_inode_info *ci = ceph_inode(inode);
189
190 if (!fscache_cookie_valid(ci->fscache))
191 return;
192
193 if (inode_is_open_for_write(inode)) {
194 dout("fscache_file_set_cookie %p %p disabling cache\n",
195 inode, filp);
David Howells402cb8d2018-04-04 13:41:28 +0100196 fscache_disable_cookie(ci->fscache, &ci->i_vino, false);
Yan, Zheng46b59b22016-05-18 15:25:03 +0800197 fscache_uncache_all_inode_pages(ci->fscache, inode);
198 } else {
David Howellsee1235a2018-04-04 13:41:28 +0100199 fscache_enable_cookie(ci->fscache, &ci->i_vino, i_size_read(inode),
David Howells402cb8d2018-04-04 13:41:28 +0100200 ceph_fscache_can_enable, inode);
Yan, Zheng46b59b22016-05-18 15:25:03 +0800201 if (fscache_cookie_enabled(ci->fscache)) {
Colin Ian King0fbc5362016-12-29 20:19:32 +0000202 dout("fscache_file_set_cookie %p %p enabling cache\n",
Yan, Zheng46b59b22016-05-18 15:25:03 +0800203 inode, filp);
204 }
205 }
206}
207
Yan, Zhengdd2bc472017-08-04 11:22:31 +0800208static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400209{
210 if (!error)
211 SetPageUptodate(page);
212
213 unlock_page(page);
214}
215
Zhang Zhuoyu3b33f692016-03-25 05:18:39 -0400216static inline bool cache_valid(struct ceph_inode_info *ci)
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400217{
Yan, Zhengf7f7e7a2016-05-18 20:31:55 +0800218 return ci->i_fscache_gen == ci->i_rdcache_gen;
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400219}
220
221
222/* Atempt to read from the fscache,
223 *
224 * This function is called from the readpage_nounlock context. DO NOT attempt to
225 * unlock the page here (or in the callback).
226 */
227int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
228{
229 struct ceph_inode_info *ci = ceph_inode(inode);
230 int ret;
231
232 if (!cache_valid(ci))
233 return -ENOBUFS;
234
235 ret = fscache_read_or_alloc_page(ci->fscache, page,
Yan, Zhengdd2bc472017-08-04 11:22:31 +0800236 ceph_readpage_from_fscache_complete, NULL,
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400237 GFP_KERNEL);
238
239 switch (ret) {
240 case 0: /* Page found */
241 dout("page read submitted\n");
242 return 0;
243 case -ENOBUFS: /* Pages were not found, and can't be */
244 case -ENODATA: /* Pages were not found */
245 dout("page/inode not in cache\n");
246 return ret;
247 default:
248 dout("%s: unknown error ret = %i\n", __func__, ret);
249 return ret;
250 }
251}
252
253int ceph_readpages_from_fscache(struct inode *inode,
254 struct address_space *mapping,
255 struct list_head *pages,
256 unsigned *nr_pages)
257{
258 struct ceph_inode_info *ci = ceph_inode(inode);
259 int ret;
260
261 if (!cache_valid(ci))
262 return -ENOBUFS;
263
264 ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
Yan, Zhengdd2bc472017-08-04 11:22:31 +0800265 ceph_readpage_from_fscache_complete,
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400266 NULL, mapping_gfp_mask(mapping));
267
268 switch (ret) {
269 case 0: /* All pages found */
270 dout("all-page read submitted\n");
271 return 0;
272 case -ENOBUFS: /* Some pages were not found, and can't be */
273 case -ENODATA: /* some pages were not found */
274 dout("page/inode not in cache\n");
275 return ret;
276 default:
277 dout("%s: unknown error ret = %i\n", __func__, ret);
278 return ret;
279 }
280}
281
282void ceph_readpage_to_fscache(struct inode *inode, struct page *page)
283{
284 struct ceph_inode_info *ci = ceph_inode(inode);
285 int ret;
286
Milosz Tanski9b8dd1e2013-09-03 19:11:01 -0400287 if (!PageFsCache(page))
288 return;
289
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400290 if (!cache_valid(ci))
291 return;
292
David Howellsee1235a2018-04-04 13:41:28 +0100293 ret = fscache_write_page(ci->fscache, page, i_size_read(inode),
294 GFP_KERNEL);
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400295 if (ret)
296 fscache_uncache_page(ci->fscache, page);
297}
298
299void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
300{
301 struct ceph_inode_info *ci = ceph_inode(inode);
302
Milosz Tanskiffc79662013-09-25 11:18:14 -0400303 if (!PageFsCache(page))
304 return;
305
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400306 fscache_wait_on_page_write(ci->fscache, page);
307 fscache_uncache_page(ci->fscache, page);
308}
309
310void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
311{
Yan, Zheng1d8f8362017-06-27 11:57:56 +0800312 if (fscache_cookie_valid(fsc->fscache)) {
313 struct ceph_fscache_entry *ent;
314 bool found = false;
315
316 mutex_lock(&ceph_fscache_lock);
317 list_for_each_entry(ent, &ceph_fscache_list, list) {
318 if (ent->fscache == fsc->fscache) {
319 list_del(&ent->list);
320 kfree(ent);
321 found = true;
322 break;
323 }
324 }
325 WARN_ON_ONCE(!found);
326 mutex_unlock(&ceph_fscache_lock);
327
David Howells402cb8d2018-04-04 13:41:28 +0100328 __fscache_relinquish_cookie(fsc->fscache, NULL, false);
Yan, Zheng1d8f8362017-06-27 11:57:56 +0800329 }
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400330 fsc->fscache = NULL;
331}
332
Yan, Zhengf7f7e7a2016-05-18 20:31:55 +0800333/*
334 * caller should hold CEPH_CAP_FILE_{RD,CACHE}
335 */
336void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400337{
Yan, Zhengf7f7e7a2016-05-18 20:31:55 +0800338 if (cache_valid(ci))
Milosz Tanskie81568e2013-09-05 18:29:03 +0000339 return;
340
Yan, Zhengf7f7e7a2016-05-18 20:31:55 +0800341 /* resue i_truncate_mutex. There should be no pending
342 * truncate while the caller holds CEPH_CAP_FILE_RD */
343 mutex_lock(&ci->i_truncate_mutex);
344 if (!cache_valid(ci)) {
David Howells402cb8d2018-04-04 13:41:28 +0100345 if (fscache_check_consistency(ci->fscache, &ci->i_vino))
Yan, Zhengf7f7e7a2016-05-18 20:31:55 +0800346 fscache_invalidate(ci->fscache);
347 spin_lock(&ci->i_ceph_lock);
348 ci->i_fscache_gen = ci->i_rdcache_gen;
349 spin_unlock(&ci->i_ceph_lock);
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400350 }
Yan, Zhengf7f7e7a2016-05-18 20:31:55 +0800351 mutex_unlock(&ci->i_truncate_mutex);
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400352}