blob: 5ec465a1350a6e4550b9a02c7f34357a757d6809 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Sage Weilde576062009-10-06 11:31:07 -07002#ifndef _FS_CEPH_SUPER_H
3#define _FS_CEPH_SUPER_H
4
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -07005#include <linux/ceph/ceph_debug.h>
Sage Weilde576062009-10-06 11:31:07 -07006
7#include <asm/unaligned.h>
8#include <linux/backing-dev.h>
9#include <linux/completion.h>
10#include <linux/exportfs.h>
11#include <linux/fs.h>
12#include <linux/mempool.h>
13#include <linux/pagemap.h>
14#include <linux/wait.h>
Stephen Rothwellf1a3d572010-01-18 11:53:08 +110015#include <linux/writeback.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Guangliang Zhaoc969d9b2014-02-16 08:35:52 -080017#include <linux/posix_acl.h>
Elena Reshetova805692d2017-03-03 11:15:07 +020018#include <linux/refcount.h>
Jeff Layton668959a2019-08-06 09:07:51 -040019#include <linux/security.h>
Sage Weilde576062009-10-06 11:31:07 -070020
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070021#include <linux/ceph/libceph.h>
Sage Weilde576062009-10-06 11:31:07 -070022
Milosz Tanski99ccbd22013-08-21 17:29:54 -040023#ifdef CONFIG_CEPH_FSCACHE
Jeff Layton7c46b312021-01-21 16:27:14 -050024#define FSCACHE_USE_NEW_IO_API
Milosz Tanski99ccbd22013-08-21 17:29:54 -040025#include <linux/fscache.h>
26#endif
27
Sage Weilde576062009-10-06 11:31:07 -070028/* f_type in struct statfs */
29#define CEPH_SUPER_MAGIC 0x00c36400
30
31/* large granularity for statfs utilization stats to facilitate
32 * large volume sizes on 32-bit machines. */
Sage Weil92a49fb2013-02-22 15:31:00 -080033#define CEPH_BLOCK_SHIFT 22 /* 4 MB */
Sage Weilde576062009-10-06 11:31:07 -070034#define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT)
Kotresh HR8e55ba82021-11-10 23:30:21 +053035#define CEPH_4K_BLOCK_SHIFT 12 /* 4 KB */
Sage Weilde576062009-10-06 11:31:07 -070036
Ilya Dryomov0b98acd2020-09-14 13:39:19 +020037#define CEPH_MOUNT_OPT_CLEANRECOVER (1<<1) /* auto reonnect (clean mode) after blocklisted */
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070038#define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */
39#define CEPH_MOUNT_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */
40#define CEPH_MOUNT_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */
Yehuda Sadehad1fee92011-01-21 16:44:03 -080041#define CEPH_MOUNT_OPT_INO32 (1<<8) /* 32 bit inos */
Sage Weila40dc6c2012-01-10 09:12:55 -080042#define CEPH_MOUNT_OPT_DCACHE (1<<9) /* use dcache for readdir etc */
Milosz Tanski99ccbd22013-08-21 17:29:54 -040043#define CEPH_MOUNT_OPT_FSCACHE (1<<10) /* use fscache */
Yan, Zheng10183a62015-04-27 15:33:28 +080044#define CEPH_MOUNT_OPT_NOPOOLPERM (1<<11) /* no pool permission check */
Yan, Zhenge9e427f2016-11-10 16:02:06 +080045#define CEPH_MOUNT_OPT_MOUNTWAIT (1<<12) /* mount waits if no mds is up */
Luis Henriques9122eed2018-01-31 10:53:13 +000046#define CEPH_MOUNT_OPT_NOQUOTADF (1<<13) /* no root dir quota in statfs */
Luis Henriquesea4cdc52018-10-15 16:46:00 +010047#define CEPH_MOUNT_OPT_NOCOPYFROM (1<<14) /* don't use RADOS 'copy-from' op */
Jeff Layton2ccb4542019-04-02 15:35:56 -040048#define CEPH_MOUNT_OPT_ASYNC_DIROPS (1<<15) /* allow async directory ops */
Sage Weil6a259382010-07-07 09:06:08 -070049
Luis Henriques6f9718f2018-12-10 10:23:12 +000050#define CEPH_MOUNT_OPT_DEFAULT \
51 (CEPH_MOUNT_OPT_DCACHE | \
Jeff Laytonf7a67b42021-08-09 11:55:15 -040052 CEPH_MOUNT_OPT_NOCOPYFROM | \
53 CEPH_MOUNT_OPT_ASYNC_DIROPS)
Sage Weilde576062009-10-06 11:31:07 -070054
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070055#define ceph_set_mount_opt(fsc, opt) \
Jeff Layton2ccb4542019-04-02 15:35:56 -040056 (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt
57#define ceph_clear_mount_opt(fsc, opt) \
58 (fsc)->mount_options->flags &= ~CEPH_MOUNT_OPT_##opt
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070059#define ceph_test_mount_opt(fsc, opt) \
60 (!!((fsc)->mount_options->flags & CEPH_MOUNT_OPT_##opt))
Sage Weilde576062009-10-06 11:31:07 -070061
Yan, Zhengaa187922017-07-11 15:56:09 +080062/* max size of osd read request, limited by libceph */
63#define CEPH_MAX_READ_SIZE CEPH_MSG_MAX_DATA_LEN
Yan, Zheng95cca2b2017-07-11 17:34:46 +080064/* osd has a configurable limitaion of max write size.
65 * CEPH_MSG_MAX_DATA_LEN should be small enough. */
66#define CEPH_MAX_WRITE_SIZE CEPH_MSG_MAX_DATA_LEN
Andreas Gerstmayr7c94ba22017-01-10 14:17:56 +010067#define CEPH_RASIZE_DEFAULT (8192*1024) /* max readahead */
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070068#define CEPH_MAX_READDIR_DEFAULT 1024
69#define CEPH_MAX_READDIR_BYTES_DEFAULT (512*1024)
70#define CEPH_SNAPDIRNAME_DEFAULT ".snap"
Sage Weilde576062009-10-06 11:31:07 -070071
Yan, Zheng4214fb12017-07-11 18:49:44 +080072/*
73 * Delay telling the MDS we no longer want caps, in case we reopen
74 * the file. Delay a minimum amount of time, even if we send a cap
75 * message for some other reason. Otherwise, take the oppotunity to
76 * update the mds to avoid sending another message later.
77 */
78#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */
79#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */
80
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070081struct ceph_mount_options {
Jeff Laytonad8c28a2019-09-09 15:58:55 -040082 unsigned int flags;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070083
Jeff Laytonad8c28a2019-09-09 15:58:55 -040084 unsigned int wsize; /* max write size */
85 unsigned int rsize; /* max read size */
86 unsigned int rasize; /* max readahead */
87 unsigned int congestion_kb; /* max writeback in flight */
88 unsigned int caps_wanted_delay_min, caps_wanted_delay_max;
Yan, Zhengfe330322019-02-01 14:57:15 +080089 int caps_max;
Jeff Laytonad8c28a2019-09-09 15:58:55 -040090 unsigned int max_readdir; /* max readdir result (entries) */
91 unsigned int max_readdir_bytes; /* max readdir result (bytes) */
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070092
Venky Shankar7b19b4d2021-07-14 15:35:52 +053093 bool new_dev_syntax;
94
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070095 /*
96 * everything above this point can be memcmp'd; everything below
97 * is handled in compare_mount_options()
98 */
99
Sage Weilde576062009-10-06 11:31:07 -0700100 char *snapdir_name; /* default ".snap" */
Yan, Zheng430afba2016-07-08 11:25:38 +0800101 char *mds_namespace; /* default NULL */
Ilya Dryomovb27a9392020-02-10 22:51:08 +0100102 char *server_path; /* default NULL (means "/") */
Yan, Zheng1d8f8362017-06-27 11:57:56 +0800103 char *fscache_uniq; /* default NULL */
Venky Shankar7b19b4d2021-07-14 15:35:52 +0530104 char *mon_addr;
Sage Weilde576062009-10-06 11:31:07 -0700105};
106
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700107struct ceph_fs_client {
Sage Weilde576062009-10-06 11:31:07 -0700108 struct super_block *sb;
109
Xiubo Li18f473b2020-07-16 10:05:57 -0400110 struct list_head metric_wakeup;
111
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700112 struct ceph_mount_options *mount_options;
113 struct ceph_client *client;
114
Jeff Laytonaa5c7912020-10-06 08:38:20 -0400115 int mount_state;
Yan, Zheng81f148a2019-07-25 20:16:46 +0800116
Ilya Dryomov0b98acd2020-09-14 13:39:19 +0200117 bool blocklisted;
Yan, Zheng131d7eb2019-07-25 20:16:47 +0800118
Luis Henriques78beb0f2020-01-08 10:03:53 +0000119 bool have_copy_from2;
120
Yan, Zheng81f148a2019-07-25 20:16:46 +0800121 u32 filp_gen;
Chengguang Xu719784b2018-07-19 22:15:24 +0800122 loff_t max_file_size;
Sage Weil85ccce42010-02-17 10:02:43 -0800123
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700124 struct ceph_mds_client *mdsc;
Sage Weilde576062009-10-06 11:31:07 -0700125
Yan, Zhenge3ec8d62019-01-14 17:21:19 +0800126 atomic_long_t writeback_count;
127
Yan, Zheng1cf89a82019-05-18 11:18:44 +0800128 struct workqueue_struct *inode_wq;
Yan, Zhenge3ec8d62019-01-14 17:21:19 +0800129 struct workqueue_struct *cap_wq;
Sage Weilde576062009-10-06 11:31:07 -0700130
Sage Weil07433042009-11-18 16:50:41 -0800131#ifdef CONFIG_DEBUG_FS
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700132 struct dentry *debugfs_dentry_lru, *debugfs_caps;
Yehuda Sadeh2baba252009-12-18 13:51:57 -0800133 struct dentry *debugfs_congestion_kb;
Sage Weil06edf042009-12-15 14:44:32 -0800134 struct dentry *debugfs_bdi;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700135 struct dentry *debugfs_mdsc, *debugfs_mdsmap;
Xiubo Li247b1f12020-11-11 09:29:39 +0800136 struct dentry *debugfs_status;
John Spray14ed9702014-09-12 16:58:49 +0100137 struct dentry *debugfs_mds_sessions;
Luís Henriquescbed4ff2021-10-27 11:01:30 +0100138 struct dentry *debugfs_metrics_dir;
Sage Weil07433042009-11-18 16:50:41 -0800139#endif
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400140
141#ifdef CONFIG_CEPH_FSCACHE
142 struct fscache_cookie *fscache;
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400143#endif
Sage Weilde576062009-10-06 11:31:07 -0700144};
145
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700146
Sage Weilde576062009-10-06 11:31:07 -0700147/*
148 * File i/o capability. This tracks shared state with the metadata
149 * server that allows us to cache or writeback attributes or to read
150 * and write data. For any given inode, we should have one or more
151 * capabilities, one issued by each metadata server, and our
152 * cumulative access is the OR of all issued capabilities.
153 *
154 * Each cap is referenced by the inode's i_caps rbtree and by per-mds
155 * session capability lists.
156 */
157struct ceph_cap {
158 struct ceph_inode_info *ci;
159 struct rb_node ci_node; /* per-ci cap tree */
160 struct ceph_mds_session *session;
161 struct list_head session_caps; /* per-session caplist */
Sage Weilde576062009-10-06 11:31:07 -0700162 u64 cap_id; /* unique cap id (mds provided) */
Yan, Zheng745a8e32015-05-14 17:22:42 +0800163 union {
164 /* in-use caps */
165 struct {
166 int issued; /* latest, from the mds */
167 int implemented; /* implemented superset of
168 issued (for revocation) */
Jeff Laytonc74d79a2020-10-06 12:24:19 -0400169 int mds; /* mds index for this cap */
170 int mds_wanted; /* caps wanted from this mds */
Yan, Zheng745a8e32015-05-14 17:22:42 +0800171 };
172 /* caps to release */
173 struct {
174 u64 cap_ino;
175 int queue_release;
176 };
177 };
Sage Weil685f9a5d2009-11-09 12:05:48 -0800178 u32 seq, issue_seq, mseq;
179 u32 cap_gen; /* active/stale cycle */
Sage Weilde576062009-10-06 11:31:07 -0700180 unsigned long last_used;
181 struct list_head caps_item;
182};
183
Yan, Zhenga0d93e32020-03-05 20:21:01 +0800184#define CHECK_CAPS_AUTHONLY 1 /* only check auth cap */
185#define CHECK_CAPS_FLUSH 2 /* flush any dirty caps */
186#define CHECK_CAPS_NOINVAL 4 /* don't invalidate pagecache */
Sage Weilde576062009-10-06 11:31:07 -0700187
Yan, Zheng0e294382016-07-04 18:06:41 +0800188struct ceph_cap_flush {
189 u64 tid;
Xiubo Lib2f9fa12021-08-18 21:38:42 +0800190 int caps;
Yan, Zhengc8799fc2016-07-07 15:22:38 +0800191 bool wake; /* wake up flush waiters when finish ? */
Xiubo Lib2f9fa12021-08-18 21:38:42 +0800192 bool is_capsnap; /* true means capsnap */
Yan, Zheng0e294382016-07-04 18:06:41 +0800193 struct list_head g_list; // global
194 struct list_head i_list; // per inode
195};
196
Sage Weilde576062009-10-06 11:31:07 -0700197/*
198 * Snapped cap state that is pending flush to mds. When a snapshot occurs,
199 * we first complete any in-process sync writes and writeback any dirty
200 * data before flushing the snapped state (tracked here) back to the MDS.
201 */
202struct ceph_cap_snap {
Elena Reshetova805692d2017-03-03 11:15:07 +0200203 refcount_t nref;
Yan, Zheng0e294382016-07-04 18:06:41 +0800204 struct list_head ci_item;
Sage Weilde576062009-10-06 11:31:07 -0700205
Yan, Zheng0e294382016-07-04 18:06:41 +0800206 struct ceph_cap_flush cap_flush;
207
208 u64 follows;
Sage Weilde576062009-10-06 11:31:07 -0700209 int issued, dirty;
210 struct ceph_snap_context *context;
211
Al Viro5706b272011-07-26 04:52:22 -0400212 umode_t mode;
Eric W. Biederman05cb11c2013-01-31 02:56:19 -0800213 kuid_t uid;
214 kgid_t gid;
Sage Weilde576062009-10-06 11:31:07 -0700215
Sage Weil4a625be2010-08-22 15:03:56 -0700216 struct ceph_buffer *xattr_blob;
Sage Weilde576062009-10-06 11:31:07 -0700217 u64 xattr_version;
218
219 u64 size;
Jeff Layton176c77c2019-06-06 08:06:40 -0400220 u64 change_attr;
Jeff Laytonec62b892019-05-29 12:23:14 -0400221 struct timespec64 mtime, atime, ctime, btime;
Sage Weilde576062009-10-06 11:31:07 -0700222 u64 time_warp_seq;
Yan, Zheng5f743e42016-11-15 16:04:37 +0800223 u64 truncate_size;
224 u32 truncate_seq;
Sage Weilde576062009-10-06 11:31:07 -0700225 int writing; /* a sync write is still in progress */
226 int dirty_pages; /* dirty pages awaiting writeback */
Yan, Zhenge20d2582014-11-14 22:39:13 +0800227 bool inline_data;
Yan, Zheng86056092015-05-01 16:57:16 +0800228 bool need_flush;
Sage Weilde576062009-10-06 11:31:07 -0700229};
230
231static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
232{
Elena Reshetova805692d2017-03-03 11:15:07 +0200233 if (refcount_dec_and_test(&capsnap->nref)) {
Sage Weil4a625be2010-08-22 15:03:56 -0700234 if (capsnap->xattr_blob)
235 ceph_buffer_put(capsnap->xattr_blob);
Sage Weilde576062009-10-06 11:31:07 -0700236 kfree(capsnap);
Sage Weil4a625be2010-08-22 15:03:56 -0700237 }
Sage Weilde576062009-10-06 11:31:07 -0700238}
239
240/*
241 * The frag tree describes how a directory is fragmented, potentially across
242 * multiple metadata servers. It is also used to indicate points where
243 * metadata authority is delegated, and whether/where metadata is replicated.
244 *
245 * A _leaf_ frag will be present in the i_fragtree IFF there is
246 * delegation info. That is, if mds >= 0 || ndist > 0.
247 */
248#define CEPH_MAX_DIRFRAG_REP 4
249
250struct ceph_inode_frag {
251 struct rb_node node;
252
253 /* fragtree state */
254 u32 frag;
255 int split_by; /* i.e. 2^(split_by) children */
256
257 /* delegation and replication info */
258 int mds; /* -1 if same authority as parent */
259 int ndist; /* >0 if replicated */
260 int dist[CEPH_MAX_DIRFRAG_REP];
261};
262
263/*
264 * We cache inode xattrs as an encoded blob until they are first used,
265 * at which point we parse them into an rbtree.
266 */
267struct ceph_inode_xattr {
268 struct rb_node node;
269
270 const char *name;
271 int name_len;
272 const char *val;
273 int val_len;
274 int dirty;
275
276 int should_free_name;
277 int should_free_val;
278};
279
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700280/*
281 * Ceph dentry state
282 */
283struct ceph_dentry_info {
Yan, Zheng37c4efc2019-01-31 16:55:51 +0800284 struct dentry *dentry;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700285 struct ceph_mds_session *lease_session;
Yan, Zheng37c4efc2019-01-31 16:55:51 +0800286 struct list_head lease_list;
287 unsigned flags;
Yan, Zheng97aeb6b2017-11-27 10:47:46 +0800288 int lease_shared_gen;
289 u32 lease_gen;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700290 u32 lease_seq;
291 unsigned long lease_renew_after, lease_renew_from;
Miklos Szeredi9b16f03c2016-06-22 16:35:04 +0200292 unsigned long time;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700293 u64 offset;
294};
295
Yan, Zheng37c4efc2019-01-31 16:55:51 +0800296#define CEPH_DENTRY_REFERENCED 1
297#define CEPH_DENTRY_LEASE_LIST 2
298#define CEPH_DENTRY_SHRINK_LIST 4
Jeff Laytonf5e17ae2020-02-18 14:12:32 -0500299#define CEPH_DENTRY_PRIMARY_LINK 8
Yan, Zheng37c4efc2019-01-31 16:55:51 +0800300
Sage Weilde576062009-10-06 11:31:07 -0700301struct ceph_inode_xattrs_info {
302 /*
303 * (still encoded) xattr blob. we avoid the overhead of parsing
304 * this until someone actually calls getxattr, etc.
305 *
306 * blob->vec.iov_len == 4 implies there are no xattrs; blob ==
307 * NULL means we don't know.
308 */
309 struct ceph_buffer *blob, *prealloc_blob;
310
311 struct rb_root index;
312 bool dirty;
313 int count;
314 int names_size;
315 int vals_size;
316 u64 version, index_version;
317};
318
319/*
320 * Ceph inode.
321 */
Sage Weilde576062009-10-06 11:31:07 -0700322struct ceph_inode_info {
323 struct ceph_vino i_vino; /* ceph ino + snap */
324
Sage Weilbe655592011-11-30 09:47:09 -0800325 spinlock_t i_ceph_lock;
326
Sage Weilde576062009-10-06 11:31:07 -0700327 u64 i_version;
Yan, Zheng31c542a2014-11-14 21:41:55 +0800328 u64 i_inline_version;
Sage Weilde576062009-10-06 11:31:07 -0700329 u32 i_time_warp_seq;
330
Jeff Layton891f3f52020-01-14 15:06:40 -0500331 unsigned long i_ceph_flags;
Yan, Zhengfdd4e152015-06-16 20:48:56 +0800332 atomic64_t i_release_count;
333 atomic64_t i_ordered_count;
334 atomic64_t i_complete_seq[2];
Sage Weilde576062009-10-06 11:31:07 -0700335
Sage Weil6c0f3af2010-11-16 11:14:34 -0800336 struct ceph_dir_layout i_dir_layout;
Sage Weilde576062009-10-06 11:31:07 -0700337 struct ceph_file_layout i_layout;
Jeff Layton785892f2020-01-02 07:11:38 -0500338 struct ceph_file_layout i_cached_layout; // for async creates
Sage Weilde576062009-10-06 11:31:07 -0700339 char *i_symlink;
340
341 /* for dirs */
Arnd Bergmann9bbeab42018-07-13 22:18:36 +0200342 struct timespec64 i_rctime;
Yanhu Caoe7f72952020-08-28 09:28:44 +0800343 u64 i_rbytes, i_rfiles, i_rsubdirs, i_rsnaps;
Sage Weilde576062009-10-06 11:31:07 -0700344 u64 i_files, i_subdirs;
Sage Weilde576062009-10-06 11:31:07 -0700345
Luis Henriquesfb18a572018-01-05 10:47:18 +0000346 /* quotas */
347 u64 i_max_bytes, i_max_files;
348
Yan, Zheng08796872019-01-09 11:07:02 +0800349 s32 i_dir_pin;
350
Sage Weilde576062009-10-06 11:31:07 -0700351 struct rb_root i_fragtree;
Yan, Zheng1b1bc162016-05-04 11:40:30 +0800352 int i_fragtree_nsplits;
Sage Weilde576062009-10-06 11:31:07 -0700353 struct mutex i_fragtree_mutex;
354
355 struct ceph_inode_xattrs_info i_xattrs;
356
Sage Weilbe655592011-11-30 09:47:09 -0800357 /* capabilities. protected _both_ by i_ceph_lock and cap->session's
Sage Weilde576062009-10-06 11:31:07 -0700358 * s_mutex. */
359 struct rb_root i_caps; /* cap list */
360 struct ceph_cap *i_auth_cap; /* authoritative cap, if any */
361 unsigned i_dirty_caps, i_flushing_caps; /* mask of dirtied fields */
Jeff Layton1cf03a62020-04-01 17:07:52 -0400362
363 /*
Randy Dunlapf1f565a2020-07-17 16:36:04 -0700364 * Link to the auth cap's session's s_cap_dirty list. s_cap_dirty
Jeff Layton1cf03a62020-04-01 17:07:52 -0400365 * is protected by the mdsc->cap_dirty_lock, but each individual item
366 * is also protected by the inode's i_ceph_lock. Walking s_cap_dirty
367 * requires the mdsc->cap_dirty_lock. List presence for an item can
368 * be tested under the i_ceph_lock. Changing anything requires both.
369 */
370 struct list_head i_dirty_item;
371
372 /*
Jeff Layton829ad4d2020-04-03 13:09:07 -0400373 * Link to session's s_cap_flushing list. Protected in a similar
374 * fashion to i_dirty_item, but also by the s_mutex for changes. The
375 * s_cap_flushing list can be walked while holding either the s_mutex
376 * or msdc->cap_dirty_lock. List presence can also be checked while
377 * holding the i_ceph_lock for this inode.
Jeff Layton1cf03a62020-04-01 17:07:52 -0400378 */
379 struct list_head i_flushing_item;
380
Sage Weilde576062009-10-06 11:31:07 -0700381 /* we need to track cap writeback on a per-cap-bit basis, to allow
382 * overlapping, pipelined cap flushes to the mds. we can probably
383 * reduce the tid to 8 bits if we're concerned about inode size. */
Yan, Zhengf66fd9f2015-06-10 17:26:13 +0800384 struct ceph_cap_flush *i_prealloc_cap_flush;
Yan, Zhenge4500b52016-07-06 11:12:56 +0800385 struct list_head i_cap_flush_list;
Sage Weilde576062009-10-06 11:31:07 -0700386 wait_queue_head_t i_cap_wq; /* threads waiting on a capability */
Sage Weilde576062009-10-06 11:31:07 -0700387 unsigned long i_hold_caps_max; /* jiffies */
388 struct list_head i_cap_delay_list; /* for delayed cap release to mds */
Sage Weilde576062009-10-06 11:31:07 -0700389 struct ceph_cap_reservation i_cap_migration_resv;
390 struct list_head i_cap_snaps; /* snapped state pending flush to mds */
Sage Weil7d8cb262010-08-24 08:44:16 -0700391 struct ceph_snap_context *i_head_snapc; /* set if wr_buffer_head > 0 or
392 dirty|flushing caps */
Sage Weilde576062009-10-06 11:31:07 -0700393 unsigned i_snap_caps; /* cap bits for snapped files */
394
Yan, Zheng719a2512020-03-05 20:21:00 +0800395 unsigned long i_last_rd;
396 unsigned long i_last_wr;
Yan, Zheng774a6a12016-06-06 16:01:39 +0800397 int i_nr_by_mode[CEPH_FILE_MODE_BITS]; /* open file counts */
Sage Weilde576062009-10-06 11:31:07 -0700398
Yan, Zhengb0d7c222013-08-12 21:42:15 -0700399 struct mutex i_truncate_mutex;
Sage Weilde576062009-10-06 11:31:07 -0700400 u32 i_truncate_seq; /* last truncate to smaller size */
401 u64 i_truncate_size; /* and the size we last truncated down to */
402 int i_truncate_pending; /* still need to call vmtruncate */
403
404 u64 i_max_size; /* max file size authorized by mds */
405 u64 i_reported_size; /* (max_)size reported to or requested of mds */
406 u64 i_wanted_max_size; /* offset we'd like to write too */
407 u64 i_requested_max_size; /* max_size we've requested */
408
409 /* held references to caps */
410 int i_pin_ref;
Jeff Laytonf85122a2019-04-02 08:04:30 -0400411 int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref, i_fx_ref;
Sage Weilde576062009-10-06 11:31:07 -0700412 int i_wrbuffer_ref, i_wrbuffer_ref_head;
Yan, Zheng89aa5932017-09-08 15:23:18 +0800413 atomic_t i_filelock_ref;
Yan, Zheng97aeb6b2017-11-27 10:47:46 +0800414 atomic_t i_shared_gen; /* increment each time we get FILE_SHARED */
Sage Weilcd045cb2010-11-04 11:05:05 -0700415 u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */
Sage Weilde576062009-10-06 11:31:07 -0700416 u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */
417
Sage Weilde576062009-10-06 11:31:07 -0700418 struct list_head i_unsafe_dirops; /* uncommitted mds dir ops */
Yan, Zheng68cd5b42015-10-27 18:36:06 +0800419 struct list_head i_unsafe_iops; /* uncommitted mds inode ops */
Sage Weilde576062009-10-06 11:31:07 -0700420 spinlock_t i_unsafe_lock;
421
Yan, Zheng75c96272017-12-14 15:11:09 +0800422 union {
423 struct ceph_snap_realm *i_snap_realm; /* snap realm (if caps) */
424 struct ceph_snapid_map *i_snapid_map; /* snapid -> dev_t */
425 };
Sage Weilde576062009-10-06 11:31:07 -0700426 struct list_head i_snap_realm_item;
427 struct list_head i_snap_flush_item;
Jeff Layton245ce992019-05-29 11:19:42 -0400428 struct timespec64 i_btime;
David Disseldorp193e7b32019-04-18 14:15:46 +0200429 struct timespec64 i_snap_btime;
Sage Weilde576062009-10-06 11:31:07 -0700430
Yan, Zheng1cf89a82019-05-18 11:18:44 +0800431 struct work_struct i_work;
432 unsigned long i_work_mask;
Sage Weilde576062009-10-06 11:31:07 -0700433
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400434#ifdef CONFIG_CEPH_FSCACHE
435 struct fscache_cookie *fscache;
Milosz Tanski99ccbd22013-08-21 17:29:54 -0400436#endif
Sage Weilde576062009-10-06 11:31:07 -0700437 struct inode vfs_inode; /* at end */
438};
439
Jeff Layton721d5c12019-08-01 14:11:15 -0400440static inline struct ceph_inode_info *
441ceph_inode(const struct inode *inode)
Sage Weilde576062009-10-06 11:31:07 -0700442{
Noah Watkinsfbbccec2009-10-28 11:54:49 -0700443 return container_of(inode, struct ceph_inode_info, vfs_inode);
Sage Weilde576062009-10-06 11:31:07 -0700444}
445
Jeff Layton721d5c12019-08-01 14:11:15 -0400446static inline struct ceph_fs_client *
447ceph_inode_to_client(const struct inode *inode)
Yehuda Sadehad1fee92011-01-21 16:44:03 -0800448{
449 return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
450}
451
Jeff Layton721d5c12019-08-01 14:11:15 -0400452static inline struct ceph_fs_client *
453ceph_sb_to_client(const struct super_block *sb)
Yehuda Sadehad1fee92011-01-21 16:44:03 -0800454{
455 return (struct ceph_fs_client *)sb->s_fs_info;
456}
457
Xiubo Li2678da82020-09-03 09:01:39 -0400458static inline struct ceph_mds_client *
459ceph_sb_to_mdsc(const struct super_block *sb)
460{
461 return (struct ceph_mds_client *)ceph_sb_to_client(sb)->mdsc;
462}
463
Jeff Layton721d5c12019-08-01 14:11:15 -0400464static inline struct ceph_vino
465ceph_vino(const struct inode *inode)
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700466{
467 return ceph_inode(inode)->i_vino;
468}
469
Jeff Laytonebce3eb2020-08-18 08:03:48 -0400470static inline u32 ceph_ino_to_ino32(u64 vino)
Yehuda Sadehad1fee92011-01-21 16:44:03 -0800471{
Amon Ott3310f752011-10-20 13:04:07 -0700472 u32 ino = vino & 0xffffffff;
473 ino ^= vino >> 32;
Yehuda Sadehad1fee92011-01-21 16:44:03 -0800474 if (!ino)
Amon Otta661fc52012-01-23 09:25:23 -0800475 ino = 2;
Yehuda Sadehad1fee92011-01-21 16:44:03 -0800476 return ino;
477}
478
479/*
Jeff Laytonebce3eb2020-08-18 08:03:48 -0400480 * Inode numbers in cephfs are 64 bits, but inode->i_ino is 32-bits on
481 * some arches. We generally do not use this value inside the ceph driver, but
482 * we do want to set it to something, so that generic vfs code has an
483 * appropriate value for tracepoints and the like.
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700484 */
Jeff Laytonebce3eb2020-08-18 08:03:48 -0400485static inline ino_t ceph_vino_to_ino_t(struct ceph_vino vino)
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700486{
Jeff Laytonebce3eb2020-08-18 08:03:48 -0400487 if (sizeof(ino_t) == sizeof(u32))
488 return ceph_ino_to_ino32(vino.ino);
Amon Ott3310f752011-10-20 13:04:07 -0700489 return (ino_t)vino.ino;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700490}
491
492/* for printf-style formatting */
493#define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap
494
495static inline u64 ceph_ino(struct inode *inode)
496{
497 return ceph_inode(inode)->i_vino.ino;
498}
Jeff Laytonebce3eb2020-08-18 08:03:48 -0400499
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700500static inline u64 ceph_snap(struct inode *inode)
501{
502 return ceph_inode(inode)->i_vino.snap;
503}
504
Jeff Laytonebce3eb2020-08-18 08:03:48 -0400505/**
506 * ceph_present_ino - format an inode number for presentation to userland
507 * @sb: superblock where the inode lives
508 * @ino: inode number to (possibly) convert
509 *
510 * If the user mounted with the ino32 option, then the 64-bit value needs
511 * to be converted to something that can fit inside 32 bits. Note that
512 * internal kernel code never uses this value, so this is entirely for
513 * userland consumption.
514 */
515static inline u64 ceph_present_ino(struct super_block *sb, u64 ino)
516{
517 if (unlikely(ceph_test_mount_opt(ceph_sb_to_client(sb), INO32)))
518 return ceph_ino_to_ino32(ino);
519 return ino;
520}
521
522static inline u64 ceph_present_inode(struct inode *inode)
523{
524 return ceph_present_ino(inode->i_sb, ceph_ino(inode));
525}
526
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700527static inline int ceph_ino_compare(struct inode *inode, void *data)
528{
529 struct ceph_vino *pvino = (struct ceph_vino *)data;
530 struct ceph_inode_info *ci = ceph_inode(inode);
531 return ci->i_vino.ino == pvino->ino &&
532 ci->i_vino.snap == pvino->snap;
533}
534
Jeff Laytond4f6b312021-04-01 13:55:11 -0400535/*
536 * The MDS reserves a set of inodes for its own usage. These should never
537 * be accessible by clients, and so the MDS has no reason to ever hand these
538 * out. The range is CEPH_MDS_INO_MDSDIR_OFFSET..CEPH_INO_SYSTEM_BASE.
539 *
540 * These come from src/mds/mdstypes.h in the ceph sources.
541 */
Jeff Layton0078ea32021-11-09 09:54:49 -0500542#define CEPH_MAX_MDS 0x100
543#define CEPH_NUM_STRAY 10
Jeff Laytond4f6b312021-04-01 13:55:11 -0400544#define CEPH_MDS_INO_MDSDIR_OFFSET (1 * CEPH_MAX_MDS)
Jeff Layton0078ea32021-11-09 09:54:49 -0500545#define CEPH_MDS_INO_LOG_OFFSET (2 * CEPH_MAX_MDS)
Jeff Laytond4f6b312021-04-01 13:55:11 -0400546#define CEPH_INO_SYSTEM_BASE ((6*CEPH_MAX_MDS) + (CEPH_MAX_MDS * CEPH_NUM_STRAY))
547
548static inline bool ceph_vino_is_reserved(const struct ceph_vino vino)
549{
Jeff Layton0078ea32021-11-09 09:54:49 -0500550 if (vino.ino >= CEPH_INO_SYSTEM_BASE ||
551 vino.ino < CEPH_MDS_INO_MDSDIR_OFFSET)
552 return false;
553
554 /* Don't warn on mdsdirs */
555 WARN_RATELIMIT(vino.ino >= CEPH_MDS_INO_LOG_OFFSET,
556 "Attempt to access reserved inode number 0x%llx",
557 vino.ino);
558 return true;
Jeff Laytond4f6b312021-04-01 13:55:11 -0400559}
Jeff Laytonebce3eb2020-08-18 08:03:48 -0400560
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700561static inline struct inode *ceph_find_inode(struct super_block *sb,
562 struct ceph_vino vino)
563{
Jeff Laytond4f6b312021-04-01 13:55:11 -0400564 if (ceph_vino_is_reserved(vino))
565 return NULL;
566
Jeff Laytonebce3eb2020-08-18 08:03:48 -0400567 /*
568 * NB: The hashval will be run through the fs/inode.c hash function
569 * anyway, so there is no need to squash the inode number down to
570 * 32-bits first. Just use low-order bits on arches with 32-bit long.
571 */
572 return ilookup5(sb, (unsigned long)vino.ino, ceph_ino_compare, &vino);
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700573}
574
575
576/*
577 * Ceph inode.
578 */
Yan, Zheng10183a62015-04-27 15:33:28 +0800579#define CEPH_I_DIR_ORDERED (1 << 0) /* dentries in dir are ordered */
Yan, Zheng10183a62015-04-27 15:33:28 +0800580#define CEPH_I_FLUSH (1 << 2) /* do not delay flush of dirty metadata */
Jeff Laytondaca8bd2019-07-05 10:55:38 -0400581#define CEPH_I_POOL_PERM (1 << 3) /* pool rd/wr bits are valid */
582#define CEPH_I_POOL_RD (1 << 4) /* can read from pool */
583#define CEPH_I_POOL_WR (1 << 5) /* can write to pool */
584#define CEPH_I_SEC_INITED (1 << 6) /* security initialized */
Yan, Zhengc0e385b2020-03-05 20:20:59 +0800585#define CEPH_I_KICK_FLUSH (1 << 7) /* kick flushing caps */
586#define CEPH_I_FLUSH_SNAPS (1 << 8) /* need flush snapss */
587#define CEPH_I_ERROR_WRITE (1 << 9) /* have seen write errors */
588#define CEPH_I_ERROR_FILELOCK (1 << 10) /* have seen file lock errors */
589#define CEPH_I_ODIRECT (1 << 11) /* inode in direct I/O mode */
590#define CEPH_ASYNC_CREATE_BIT (12) /* async create in flight for this */
Jeff Layton891f3f52020-01-14 15:06:40 -0500591#define CEPH_I_ASYNC_CREATE (1 << CEPH_ASYNC_CREATE_BIT)
Jeff Layton5d6451b2021-08-31 13:39:13 -0400592#define CEPH_I_SHUTDOWN (1 << 13) /* inode is no longer usable */
Jeff Layton26544c622017-04-04 08:39:46 -0400593
594/*
Yan, Zheng1cf89a82019-05-18 11:18:44 +0800595 * Masks of ceph inode work.
596 */
Jeff Laytona8810cd2020-12-10 14:39:26 -0500597#define CEPH_I_WORK_WRITEBACK 0
598#define CEPH_I_WORK_INVALIDATE_PAGES 1
599#define CEPH_I_WORK_VMTRUNCATE 2
600#define CEPH_I_WORK_CHECK_CAPS 3
601#define CEPH_I_WORK_FLUSH_SNAPS 4
Yan, Zheng1cf89a82019-05-18 11:18:44 +0800602
603/*
Jeff Layton26544c622017-04-04 08:39:46 -0400604 * We set the ERROR_WRITE bit when we start seeing write errors on an inode
605 * and then clear it when they start succeeding. Note that we do a lockless
606 * check first, and only take the lock if it looks like it needs to be changed.
607 * The write submission code just takes this as a hint, so we're not too
608 * worried if a few slip through in either direction.
609 */
610static inline void ceph_set_error_write(struct ceph_inode_info *ci)
611{
612 if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ERROR_WRITE)) {
613 spin_lock(&ci->i_ceph_lock);
614 ci->i_ceph_flags |= CEPH_I_ERROR_WRITE;
615 spin_unlock(&ci->i_ceph_lock);
616 }
617}
618
619static inline void ceph_clear_error_write(struct ceph_inode_info *ci)
620{
621 if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ERROR_WRITE) {
622 spin_lock(&ci->i_ceph_lock);
623 ci->i_ceph_flags &= ~CEPH_I_ERROR_WRITE;
624 spin_unlock(&ci->i_ceph_lock);
625 }
626}
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700627
Yan, Zheng2f276c52013-03-13 19:44:32 +0800628static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
Yan, Zhengfdd4e152015-06-16 20:48:56 +0800629 long long release_count,
630 long long ordered_count)
Sage Weilde576062009-10-06 11:31:07 -0700631{
Andrea Parri74960772019-05-20 19:23:58 +0200632 /*
633 * Makes sure operations that setup readdir cache (update page
634 * cache and i_size) are strongly ordered w.r.t. the following
635 * atomic64_set() operations.
636 */
637 smp_mb();
Yan, Zhengfdd4e152015-06-16 20:48:56 +0800638 atomic64_set(&ci->i_complete_seq[0], release_count);
639 atomic64_set(&ci->i_complete_seq[1], ordered_count);
Sage Weilde576062009-10-06 11:31:07 -0700640}
641
Yan, Zheng2f276c52013-03-13 19:44:32 +0800642static inline void __ceph_dir_clear_complete(struct ceph_inode_info *ci)
Sage Weilde576062009-10-06 11:31:07 -0700643{
Yan, Zhengfdd4e152015-06-16 20:48:56 +0800644 atomic64_inc(&ci->i_release_count);
645}
646
647static inline void __ceph_dir_clear_ordered(struct ceph_inode_info *ci)
648{
649 atomic64_inc(&ci->i_ordered_count);
Sage Weilde576062009-10-06 11:31:07 -0700650}
651
Yan, Zheng2f276c52013-03-13 19:44:32 +0800652static inline bool __ceph_dir_is_complete(struct ceph_inode_info *ci)
Sage Weilde576062009-10-06 11:31:07 -0700653{
Yan, Zhengfdd4e152015-06-16 20:48:56 +0800654 return atomic64_read(&ci->i_complete_seq[0]) ==
655 atomic64_read(&ci->i_release_count);
Yan, Zheng2f276c52013-03-13 19:44:32 +0800656}
Sage Weilde576062009-10-06 11:31:07 -0700657
Yan, Zheng70db4f32014-10-21 18:09:56 -0700658static inline bool __ceph_dir_is_complete_ordered(struct ceph_inode_info *ci)
659{
Yan, Zhengfdd4e152015-06-16 20:48:56 +0800660 return atomic64_read(&ci->i_complete_seq[0]) ==
661 atomic64_read(&ci->i_release_count) &&
662 atomic64_read(&ci->i_complete_seq[1]) ==
663 atomic64_read(&ci->i_ordered_count);
Yan, Zheng70db4f32014-10-21 18:09:56 -0700664}
665
Yan, Zheng2f276c52013-03-13 19:44:32 +0800666static inline void ceph_dir_clear_complete(struct inode *inode)
667{
668 __ceph_dir_clear_complete(ceph_inode(inode));
669}
670
Yan, Zheng70db4f32014-10-21 18:09:56 -0700671static inline void ceph_dir_clear_ordered(struct inode *inode)
Yan, Zheng2f276c52013-03-13 19:44:32 +0800672{
Yan, Zhengfdd4e152015-06-16 20:48:56 +0800673 __ceph_dir_clear_ordered(ceph_inode(inode));
Sage Weilde576062009-10-06 11:31:07 -0700674}
675
Yan, Zheng70db4f32014-10-21 18:09:56 -0700676static inline bool ceph_dir_is_complete_ordered(struct inode *inode)
677{
Yan, Zhengfdd4e152015-06-16 20:48:56 +0800678 bool ret = __ceph_dir_is_complete_ordered(ceph_inode(inode));
679 smp_rmb();
Yan, Zheng70db4f32014-10-21 18:09:56 -0700680 return ret;
681}
Sage Weilde576062009-10-06 11:31:07 -0700682
683/* find a specific frag @f */
684extern struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci,
685 u32 f);
686
687/*
688 * choose fragment for value @v. copy frag content to pfrag, if leaf
689 * exists
690 */
691extern u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
692 struct ceph_inode_frag *pfrag,
693 int *found);
694
Yan, Zheng1e9c2eb2019-01-28 20:43:55 +0800695static inline struct ceph_dentry_info *ceph_dentry(const struct dentry *dentry)
Sage Weilde576062009-10-06 11:31:07 -0700696{
697 return (struct ceph_dentry_info *)dentry->d_fsdata;
698}
699
Sage Weilde576062009-10-06 11:31:07 -0700700/*
701 * caps helpers
702 */
703static inline bool __ceph_is_any_real_caps(struct ceph_inode_info *ci)
704{
705 return !RB_EMPTY_ROOT(&ci->i_caps);
706}
707
708extern int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented);
709extern int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int t);
Xiubo Li1af16d52020-03-19 23:45:00 -0400710extern int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask,
711 int t);
Sage Weilde576062009-10-06 11:31:07 -0700712extern int __ceph_caps_issued_other(struct ceph_inode_info *ci,
713 struct ceph_cap *cap);
714
715static inline int ceph_caps_issued(struct ceph_inode_info *ci)
716{
717 int issued;
Sage Weilbe655592011-11-30 09:47:09 -0800718 spin_lock(&ci->i_ceph_lock);
Sage Weilde576062009-10-06 11:31:07 -0700719 issued = __ceph_caps_issued(ci, NULL);
Sage Weilbe655592011-11-30 09:47:09 -0800720 spin_unlock(&ci->i_ceph_lock);
Sage Weilde576062009-10-06 11:31:07 -0700721 return issued;
722}
723
Xiubo Li1af16d52020-03-19 23:45:00 -0400724static inline int ceph_caps_issued_mask_metric(struct ceph_inode_info *ci,
725 int mask, int touch)
Sage Weilde576062009-10-06 11:31:07 -0700726{
727 int r;
Sage Weilbe655592011-11-30 09:47:09 -0800728 spin_lock(&ci->i_ceph_lock);
Xiubo Li1af16d52020-03-19 23:45:00 -0400729 r = __ceph_caps_issued_mask_metric(ci, mask, touch);
Sage Weilbe655592011-11-30 09:47:09 -0800730 spin_unlock(&ci->i_ceph_lock);
Sage Weilde576062009-10-06 11:31:07 -0700731 return r;
732}
733
734static inline int __ceph_caps_dirty(struct ceph_inode_info *ci)
735{
736 return ci->i_dirty_caps | ci->i_flushing_caps;
737}
Yan, Zhengf66fd9f2015-06-10 17:26:13 +0800738extern struct ceph_cap_flush *ceph_alloc_cap_flush(void);
739extern void ceph_free_cap_flush(struct ceph_cap_flush *cf);
740extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
741 struct ceph_cap_flush **pcf);
Sage Weilde576062009-10-06 11:31:07 -0700742
Yan, Zheng9563f882013-11-22 13:50:45 +0800743extern int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
744 struct ceph_cap *ocap, int mask);
Sage Weilde576062009-10-06 11:31:07 -0700745extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask);
746extern int __ceph_caps_used(struct ceph_inode_info *ci);
747
Yan, Zheng719a2512020-03-05 20:21:00 +0800748static inline bool __ceph_is_file_opened(struct ceph_inode_info *ci)
749{
750 return ci->i_nr_by_mode[0];
751}
Sage Weilde576062009-10-06 11:31:07 -0700752extern int __ceph_caps_file_wanted(struct ceph_inode_info *ci);
Yan, Zheng525d15e2019-05-11 17:27:59 +0800753extern int __ceph_caps_wanted(struct ceph_inode_info *ci);
Sage Weilde576062009-10-06 11:31:07 -0700754
755/* what the mds thinks we want */
Yan, Zhengc1944fe2017-01-29 22:15:47 +0800756extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check);
Sage Weilde576062009-10-06 11:31:07 -0700757
Yehuda Sadeh37151662010-06-17 16:16:12 -0700758extern void ceph_caps_init(struct ceph_mds_client *mdsc);
759extern void ceph_caps_finalize(struct ceph_mds_client *mdsc);
Yan, Zhengfe330322019-02-01 14:57:15 +0800760extern void ceph_adjust_caps_max_min(struct ceph_mds_client *mdsc,
761 struct ceph_mount_options *fsopt);
Zhi Zhange30ee582018-01-24 21:24:33 +0800762extern int ceph_reserve_caps(struct ceph_mds_client *mdsc,
Yehuda Sadeh37151662010-06-17 16:16:12 -0700763 struct ceph_cap_reservation *ctx, int need);
Chengguang Xu7bf8f732018-07-28 23:15:35 +0800764extern void ceph_unreserve_caps(struct ceph_mds_client *mdsc,
Yehuda Sadeh37151662010-06-17 16:16:12 -0700765 struct ceph_cap_reservation *ctx);
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700766extern void ceph_reservation_status(struct ceph_fs_client *client,
Sage Weilde576062009-10-06 11:31:07 -0700767 int *total, int *avail, int *used,
Sage Weil85ccce42010-02-17 10:02:43 -0800768 int *reserved, int *min);
Sage Weilde576062009-10-06 11:31:07 -0700769
Sage Weilde576062009-10-06 11:31:07 -0700770
Sage Weilde576062009-10-06 11:31:07 -0700771
772/*
773 * we keep buffered readdir results attached to file->private_data
774 */
Sage Weil4918b6d2011-07-26 11:26:07 -0700775#define CEPH_F_SYNC 1
Sage Weil9cfa1092011-07-26 11:26:18 -0700776#define CEPH_F_ATEND 2
Sage Weil4918b6d2011-07-26 11:26:07 -0700777
Sage Weilde576062009-10-06 11:31:07 -0700778struct ceph_file_info {
Sage Weil252c6722011-07-26 11:25:27 -0700779 short fmode; /* initialized on open */
780 short flags; /* CEPH_F_* */
Sage Weilde576062009-10-06 11:31:07 -0700781
Yan, Zheng5d988302017-12-15 11:15:36 +0800782 spinlock_t rw_contexts_lock;
783 struct list_head rw_contexts;
Yan, Zhengf4b97862019-07-25 20:16:42 +0800784
Yan, Zheng81f148a2019-07-25 20:16:46 +0800785 u32 filp_gen;
Yan, Zhengff5d9132019-07-25 20:16:45 +0800786 atomic_t num_locks;
Chengguang Xubb48bd42018-03-13 10:42:44 +0800787};
788
789struct ceph_dir_file_info {
790 struct ceph_file_info file_info;
Yan, Zheng5d988302017-12-15 11:15:36 +0800791
Sage Weilde576062009-10-06 11:31:07 -0700792 /* readdir: position within the dir */
793 u32 frag;
794 struct ceph_mds_request *last_readdir;
Sage Weilde576062009-10-06 11:31:07 -0700795
796 /* readdir: position within a frag */
Yan, Zhengf0494202014-02-27 16:26:24 +0800797 unsigned next_offset; /* offset of next chunk (last_name's + 1) */
Sage Weilde576062009-10-06 11:31:07 -0700798 char *last_name; /* last entry in previous chunk */
Yan, Zhengfdd4e152015-06-16 20:48:56 +0800799 long long dir_release_count;
800 long long dir_ordered_count;
801 int readdir_cache_idx;
Sage Weilde576062009-10-06 11:31:07 -0700802
803 /* used for -o dirstat read() on directory thing */
804 char *dir_info;
805 int dir_info_len;
806};
807
Yan, Zheng5d988302017-12-15 11:15:36 +0800808struct ceph_rw_context {
809 struct list_head list;
810 struct task_struct *thread;
811 int caps;
812};
813
814#define CEPH_DEFINE_RW_CONTEXT(_name, _caps) \
815 struct ceph_rw_context _name = { \
816 .thread = current, \
817 .caps = _caps, \
818 }
819
820static inline void ceph_add_rw_context(struct ceph_file_info *cf,
821 struct ceph_rw_context *ctx)
822{
823 spin_lock(&cf->rw_contexts_lock);
824 list_add(&ctx->list, &cf->rw_contexts);
825 spin_unlock(&cf->rw_contexts_lock);
826}
827
828static inline void ceph_del_rw_context(struct ceph_file_info *cf,
829 struct ceph_rw_context *ctx)
830{
831 spin_lock(&cf->rw_contexts_lock);
832 list_del(&ctx->list);
833 spin_unlock(&cf->rw_contexts_lock);
834}
835
836static inline struct ceph_rw_context*
837ceph_find_rw_context(struct ceph_file_info *cf)
838{
839 struct ceph_rw_context *ctx, *found = NULL;
840 spin_lock(&cf->rw_contexts_lock);
841 list_for_each_entry(ctx, &cf->rw_contexts, list) {
842 if (ctx->thread == current) {
843 found = ctx;
844 break;
845 }
846 }
847 spin_unlock(&cf->rw_contexts_lock);
848 return found;
849}
850
Yan, Zhengfdd4e152015-06-16 20:48:56 +0800851struct ceph_readdir_cache_control {
852 struct page *page;
853 struct dentry **dentries;
854 int index;
855};
Sage Weilde576062009-10-06 11:31:07 -0700856
857/*
Sage Weilde576062009-10-06 11:31:07 -0700858 * A "snap realm" describes a subset of the file hierarchy sharing
859 * the same set of snapshots that apply to it. The realms themselves
860 * are organized into a hierarchy, such that children inherit (some of)
861 * the snapshots of their parents.
862 *
863 * All inodes within the realm that have capabilities are linked into a
864 * per-realm list.
865 */
866struct ceph_snap_realm {
867 u64 ino;
Luis Henriquese3161f12018-01-12 17:19:28 +0000868 struct inode *inode;
Sage Weilde576062009-10-06 11:31:07 -0700869 atomic_t nref;
Sage Weila105f002010-02-15 14:37:55 -0800870 struct rb_node node;
871
Sage Weilde576062009-10-06 11:31:07 -0700872 u64 created, seq;
873 u64 parent_ino;
874 u64 parent_since; /* snapid when our current parent became so */
875
876 u64 *prior_parent_snaps; /* snaps inherited from any parents we */
Alex Elderaa711ee32012-07-13 20:35:11 -0500877 u32 num_prior_parent_snaps; /* had prior to parent_since */
Sage Weilde576062009-10-06 11:31:07 -0700878 u64 *snaps; /* snaps specific to this realm */
Alex Elderaa711ee32012-07-13 20:35:11 -0500879 u32 num_snaps;
Sage Weilde576062009-10-06 11:31:07 -0700880
881 struct ceph_snap_realm *parent;
882 struct list_head children; /* list of child realms */
883 struct list_head child_item;
884
885 struct list_head empty_item; /* if i have ref==0 */
886
Sage Weilae00d4f2010-09-16 16:26:51 -0700887 struct list_head dirty_item; /* if realm needs new context */
888
Sage Weilde576062009-10-06 11:31:07 -0700889 /* the current set of snaps for this realm */
890 struct ceph_snap_context *cached_context;
891
892 struct list_head inodes_with_caps;
893 spinlock_t inodes_with_caps_lock;
894};
895
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700896static inline int default_congestion_kb(void)
Sage Weilde576062009-10-06 11:31:07 -0700897{
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700898 int congestion_kb;
899
900 /*
901 * Copied from NFS
902 *
903 * congestion size, scale with available memory.
904 *
905 * 64MB: 8192k
906 * 128MB: 11585k
907 * 256MB: 16384k
908 * 512MB: 23170k
909 * 1GB: 32768k
910 * 2GB: 46340k
911 * 4GB: 65536k
912 * 8GB: 92681k
913 * 16GB: 131072k
914 *
915 * This allows larger machines to have larger/more transfers.
916 * Limit the default to 256M
917 */
Arun KSca79b0c2018-12-28 00:34:29 -0800918 congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10);
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700919 if (congestion_kb > 256*1024)
920 congestion_kb = 256*1024;
921
922 return congestion_kb;
Sage Weilde576062009-10-06 11:31:07 -0700923}
924
925
Yan, Zhengd468e722019-07-25 20:16:44 +0800926/* super.c */
927extern int ceph_force_reconnect(struct super_block *sb);
Sage Weilde576062009-10-06 11:31:07 -0700928/* snap.c */
929struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
930 u64 ino);
931extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
932 struct ceph_snap_realm *realm);
933extern void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
934 struct ceph_snap_realm *realm);
935extern int ceph_update_snap_trace(struct ceph_mds_client *m,
Yan, Zheng982d6012014-12-23 15:30:54 +0800936 void *p, void *e, bool deletion,
937 struct ceph_snap_realm **realm_ret);
Jeff Layton0ba92e12021-08-02 11:01:26 -0400938void ceph_change_snap_realm(struct inode *inode, struct ceph_snap_realm *realm);
Sage Weilde576062009-10-06 11:31:07 -0700939extern void ceph_handle_snap(struct ceph_mds_client *mdsc,
Sage Weil2600d2d2010-02-22 15:12:16 -0800940 struct ceph_mds_session *session,
Sage Weilde576062009-10-06 11:31:07 -0700941 struct ceph_msg *msg);
Sage Weilde576062009-10-06 11:31:07 -0700942extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
943 struct ceph_cap_snap *capsnap);
944extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc);
945
Yan, Zheng75c96272017-12-14 15:11:09 +0800946extern struct ceph_snapid_map *ceph_get_snapid_map(struct ceph_mds_client *mdsc,
947 u64 snap);
948extern void ceph_put_snapid_map(struct ceph_mds_client* mdsc,
949 struct ceph_snapid_map *sm);
950extern void ceph_trim_snapid_map(struct ceph_mds_client *mdsc);
951extern void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc);
Jeff Layton631ed4b2021-10-14 11:10:47 -0400952void ceph_umount_begin(struct super_block *sb);
Yan, Zheng75c96272017-12-14 15:11:09 +0800953
954
Sage Weilde576062009-10-06 11:31:07 -0700955/*
956 * a cap_snap is "pending" if it is still awaiting an in-progress
957 * sync write (that may/may not still update size, mtime, etc.).
958 */
959static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci)
960{
961 return !list_empty(&ci->i_cap_snaps) &&
Yan, Zheng86056092015-05-01 16:57:16 +0800962 list_last_entry(&ci->i_cap_snaps, struct ceph_cap_snap,
963 ci_item)->writing;
Sage Weilde576062009-10-06 11:31:07 -0700964}
965
Sage Weilde576062009-10-06 11:31:07 -0700966/* inode.c */
Jeff Layton966c7162019-12-05 09:09:25 -0500967struct ceph_mds_reply_info_in;
968struct ceph_mds_reply_dirfrag;
969
Sage Weilde576062009-10-06 11:31:07 -0700970extern const struct inode_operations ceph_file_iops;
971
972extern struct inode *ceph_alloc_inode(struct super_block *sb);
Yan, Zheng87bc5b82019-06-02 09:45:38 +0800973extern void ceph_evict_inode(struct inode *inode);
Al Virocfa6d412019-04-10 15:18:50 -0400974extern void ceph_free_inode(struct inode *inode);
Sage Weilde576062009-10-06 11:31:07 -0700975
976extern struct inode *ceph_get_inode(struct super_block *sb,
977 struct ceph_vino vino);
978extern struct inode *ceph_get_snapdir(struct inode *parent);
979extern int ceph_fill_file_size(struct inode *inode, int issued,
980 u32 truncate_seq, u64 truncate_size, u64 size);
981extern void ceph_fill_file_time(struct inode *inode, int issued,
Arnd Bergmann9bbeab42018-07-13 22:18:36 +0200982 u64 time_warp_seq, struct timespec64 *ctime,
983 struct timespec64 *mtime,
984 struct timespec64 *atime);
Jeff Layton966c7162019-12-05 09:09:25 -0500985extern int ceph_fill_inode(struct inode *inode, struct page *locked_page,
986 struct ceph_mds_reply_info_in *iinfo,
987 struct ceph_mds_reply_dirfrag *dirinfo,
988 struct ceph_mds_session *session, int cap_fmode,
989 struct ceph_cap_reservation *caps_reservation);
Sage Weilde576062009-10-06 11:31:07 -0700990extern int ceph_fill_trace(struct super_block *sb,
Jeff Laytonf5a03b02017-01-31 11:06:13 -0500991 struct ceph_mds_request *req);
Sage Weilde576062009-10-06 11:31:07 -0700992extern int ceph_readdir_prepopulate(struct ceph_mds_request *req,
993 struct ceph_mds_session *session);
994
995extern int ceph_inode_holds_cap(struct inode *inode, int mask);
996
Yan, Zhengefb0ca72017-05-22 12:03:32 +0800997extern bool ceph_inode_set_size(struct inode *inode, loff_t size);
Yan, Zhengb415bf42013-07-02 12:40:19 +0800998extern void __ceph_do_pending_vmtruncate(struct inode *inode);
Jeff Layton64f28c62020-10-09 14:24:34 -0400999
Jeff Layton64f28c62020-10-09 14:24:34 -04001000void ceph_queue_inode_work(struct inode *inode, int work_bit);
1001
1002static inline void ceph_queue_vmtruncate(struct inode *inode)
1003{
1004 ceph_queue_inode_work(inode, CEPH_I_WORK_VMTRUNCATE);
1005}
1006
1007static inline void ceph_queue_invalidate(struct inode *inode)
1008{
1009 ceph_queue_inode_work(inode, CEPH_I_WORK_INVALIDATE_PAGES);
1010}
1011
1012static inline void ceph_queue_writeback(struct inode *inode)
1013{
1014 ceph_queue_inode_work(inode, CEPH_I_WORK_WRITEBACK);
1015}
1016
Jeff Laytona8810cd2020-12-10 14:39:26 -05001017static inline void ceph_queue_check_caps(struct inode *inode)
1018{
1019 ceph_queue_inode_work(inode, CEPH_I_WORK_CHECK_CAPS);
1020}
1021
1022static inline void ceph_queue_flush_snaps(struct inode *inode)
1023{
1024 ceph_queue_inode_work(inode, CEPH_I_WORK_FLUSH_SNAPS);
1025}
1026
Yan, Zheng01deead2014-11-14 21:56:29 +08001027extern int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
1028 int mask, bool force);
1029static inline int ceph_do_getattr(struct inode *inode, int mask, bool force)
1030{
1031 return __ceph_do_getattr(inode, NULL, mask, force);
1032}
Christian Brauner549c7292021-01-21 14:19:43 +01001033extern int ceph_permission(struct user_namespace *mnt_userns,
1034 struct inode *inode, int mask);
Andreas Gruenbachera26fecc2016-04-14 00:30:16 +02001035extern int __ceph_setattr(struct inode *inode, struct iattr *attr);
Christian Brauner549c7292021-01-21 14:19:43 +01001036extern int ceph_setattr(struct user_namespace *mnt_userns,
1037 struct dentry *dentry, struct iattr *attr);
1038extern int ceph_getattr(struct user_namespace *mnt_userns,
1039 const struct path *path, struct kstat *stat,
David Howellsa528d352017-01-31 16:46:22 +00001040 u32 request_mask, unsigned int flags);
Jeff Layton5d6451b2021-08-31 13:39:13 -04001041void ceph_inode_shutdown(struct inode *inode);
1042
1043static inline bool ceph_inode_is_shutdown(struct inode *inode)
1044{
1045 unsigned long flags = READ_ONCE(ceph_inode(inode)->i_ceph_flags);
1046 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1047 int state = READ_ONCE(fsc->mount_state);
1048
1049 return (flags & CEPH_I_SHUTDOWN) || state >= CEPH_MOUNT_SHUTDOWN;
1050}
Sage Weilde576062009-10-06 11:31:07 -07001051
1052/* xattr.c */
Andreas Gruenbachera26fecc2016-04-14 00:30:16 +02001053int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
Guangliang Zhao7221fe42013-11-11 15:18:03 +08001054ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
Sage Weilde576062009-10-06 11:31:07 -07001055extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
Luis Henriques12fe3dd2019-07-19 15:32:21 +01001056extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
Sage Weilde576062009-10-06 11:31:07 -07001057extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
Yan, Zhengb1ee94a2014-09-16 20:35:17 +08001058extern const struct xattr_handler *ceph_xattr_handlers[];
Sage Weilde576062009-10-06 11:31:07 -07001059
Yan, Zheng5c31e922019-05-26 15:35:39 +08001060struct ceph_acl_sec_ctx {
1061#ifdef CONFIG_CEPH_FS_POSIX_ACL
1062 void *default_acl;
1063 void *acl;
1064#endif
Yan, Zhengac6713c2019-05-26 16:27:56 +08001065#ifdef CONFIG_CEPH_FS_SECURITY_LABEL
1066 void *sec_ctx;
1067 u32 sec_ctxlen;
1068#endif
Yan, Zheng5c31e922019-05-26 15:35:39 +08001069 struct ceph_pagelist *pagelist;
1070};
1071
Yan, Zheng315f2402016-03-07 10:34:50 +08001072#ifdef CONFIG_SECURITY
1073extern bool ceph_security_xattr_deadlock(struct inode *in);
1074extern bool ceph_security_xattr_wanted(struct inode *in);
1075#else
1076static inline bool ceph_security_xattr_deadlock(struct inode *in)
1077{
1078 return false;
1079}
1080static inline bool ceph_security_xattr_wanted(struct inode *in)
1081{
1082 return false;
1083}
1084#endif
1085
Yan, Zhengac6713c2019-05-26 16:27:56 +08001086#ifdef CONFIG_CEPH_FS_SECURITY_LABEL
1087extern int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
1088 struct ceph_acl_sec_ctx *ctx);
Jeff Layton668959a2019-08-06 09:07:51 -04001089static inline void ceph_security_invalidate_secctx(struct inode *inode)
1090{
1091 security_inode_invalidate_secctx(inode);
1092}
Yan, Zhengac6713c2019-05-26 16:27:56 +08001093#else
1094static inline int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
1095 struct ceph_acl_sec_ctx *ctx)
1096{
1097 return 0;
1098}
1099static inline void ceph_security_invalidate_secctx(struct inode *inode)
1100{
1101}
1102#endif
Guangliang Zhao7221fe42013-11-11 15:18:03 +08001103
Yan, Zheng5c31e922019-05-26 15:35:39 +08001104void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx);
Guangliang Zhao7221fe42013-11-11 15:18:03 +08001105
Yan, Zheng5c31e922019-05-26 15:35:39 +08001106/* acl.c */
Guangliang Zhao7221fe42013-11-11 15:18:03 +08001107#ifdef CONFIG_CEPH_FS_POSIX_ACL
1108
Miklos Szeredi0cad6242021-08-18 22:08:24 +02001109struct posix_acl *ceph_get_acl(struct inode *, int, bool);
Christian Brauner549c7292021-01-21 14:19:43 +01001110int ceph_set_acl(struct user_namespace *mnt_userns,
1111 struct inode *inode, struct posix_acl *acl, int type);
Yan, Zhengb1ee94a2014-09-16 20:35:17 +08001112int ceph_pre_init_acls(struct inode *dir, umode_t *mode,
Yan, Zheng5c31e922019-05-26 15:35:39 +08001113 struct ceph_acl_sec_ctx *as_ctx);
1114void ceph_init_inode_acls(struct inode *inode,
1115 struct ceph_acl_sec_ctx *as_ctx);
Guangliang Zhaoc969d9b2014-02-16 08:35:52 -08001116
1117static inline void ceph_forget_all_cached_acls(struct inode *inode)
1118{
1119 forget_all_cached_acls(inode);
1120}
Guangliang Zhao7221fe42013-11-11 15:18:03 +08001121
1122#else
1123
1124#define ceph_get_acl NULL
Sage Weil72466d02014-01-29 06:22:25 -08001125#define ceph_set_acl NULL
Guangliang Zhao7221fe42013-11-11 15:18:03 +08001126
Yan, Zhengb1ee94a2014-09-16 20:35:17 +08001127static inline int ceph_pre_init_acls(struct inode *dir, umode_t *mode,
Yan, Zheng5c31e922019-05-26 15:35:39 +08001128 struct ceph_acl_sec_ctx *as_ctx)
Guangliang Zhao7221fe42013-11-11 15:18:03 +08001129{
1130 return 0;
1131}
Yan, Zhengb1ee94a2014-09-16 20:35:17 +08001132static inline void ceph_init_inode_acls(struct inode *inode,
Yan, Zheng5c31e922019-05-26 15:35:39 +08001133 struct ceph_acl_sec_ctx *as_ctx)
Yan, Zhengb1ee94a2014-09-16 20:35:17 +08001134{
1135}
Guangliang Zhao7221fe42013-11-11 15:18:03 +08001136static inline int ceph_acl_chmod(struct dentry *dentry, struct inode *inode)
1137{
1138 return 0;
1139}
1140
1141static inline void ceph_forget_all_cached_acls(struct inode *inode)
1142{
1143}
1144
1145#endif
1146
Sage Weilde576062009-10-06 11:31:07 -07001147/* caps.c */
1148extern const char *ceph_cap_string(int c);
1149extern void ceph_handle_caps(struct ceph_mds_session *session,
1150 struct ceph_msg *msg);
Yan, Zhengd9df2782014-04-18 09:57:11 +08001151extern struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
1152 struct ceph_cap_reservation *ctx);
1153extern void ceph_add_cap(struct inode *inode,
1154 struct ceph_mds_session *session, u64 cap_id,
Yan, Zheng135e6712020-03-05 20:21:02 +08001155 unsigned issued, unsigned wanted,
Yan, Zhengd9df2782014-04-18 09:57:11 +08001156 unsigned cap, unsigned seq, u64 realmino, int flags,
1157 struct ceph_cap **new_cap);
Yan, Zhenga096b092013-09-22 10:15:58 +08001158extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
Xiubo Lia76d0a92021-08-25 21:45:45 +08001159extern void ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
Yan, Zhengd6e47812019-05-23 11:01:37 +08001160extern void __ceph_remove_caps(struct ceph_inode_info *ci);
Yehuda Sadeh37151662010-06-17 16:16:12 -07001161extern void ceph_put_cap(struct ceph_mds_client *mdsc,
1162 struct ceph_cap *cap);
Yan, Zheng9215aee2013-11-30 12:47:41 +08001163extern int ceph_is_any_caps(struct inode *inode);
Sage Weilde576062009-10-06 11:31:07 -07001164
Stephen Rothwellf1a3d572010-01-18 11:53:08 +11001165extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc);
Josef Bacik02c24a82011-07-16 20:44:56 -04001166extern int ceph_fsync(struct file *file, loff_t start, loff_t end,
1167 int datasync);
Yan, Zhenge548e9b2015-06-10 15:17:56 +08001168extern void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
1169 struct ceph_mds_session *session);
Sage Weilde576062009-10-06 11:31:07 -07001170extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
1171 struct ceph_mds_session *session);
Jeff Laytone8a4d262020-02-25 11:49:53 -08001172void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session,
1173 struct ceph_inode_info *ci);
Greg Farnum2bc50252010-06-30 12:44:34 -07001174extern struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci,
1175 int mds);
Jeff Layton40dcf752020-01-14 09:23:49 -05001176extern void ceph_take_cap_refs(struct ceph_inode_info *ci, int caps,
1177 bool snap_rwsem_locked);
Sage Weilde576062009-10-06 11:31:07 -07001178extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps);
1179extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
Jeff Laytona8810cd2020-12-10 14:39:26 -05001180extern void ceph_put_cap_refs_async(struct ceph_inode_info *ci, int had);
Xiubo Lie64f44a2020-05-27 09:09:27 -04001181extern void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci,
1182 int had);
Sage Weilde576062009-10-06 11:31:07 -07001183extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
1184 struct ceph_snap_context *snapc);
Xiubo Lia6d37cc2021-08-25 21:45:43 +08001185extern void __ceph_remove_capsnap(struct inode *inode,
1186 struct ceph_cap_snap *capsnap,
1187 bool *wake_ci, bool *wake_mdsc);
1188extern void ceph_remove_capsnap(struct inode *inode,
1189 struct ceph_cap_snap *capsnap,
1190 bool *wake_ci, bool *wake_mdsc);
Yan, Zhenged9b4302016-07-05 21:08:07 +08001191extern void ceph_flush_snaps(struct ceph_inode_info *ci,
1192 struct ceph_mds_session **psession);
Yan, Zhengefb0ca72017-05-22 12:03:32 +08001193extern bool __ceph_should_report_size(struct ceph_inode_info *ci);
Sage Weilde576062009-10-06 11:31:07 -07001194extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1195 struct ceph_mds_session *session);
Luis Henriquesbf2ba432021-07-06 14:52:41 +01001196extern unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
Sage Weilafcdaea2009-10-14 14:27:38 -07001197extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc);
Zhi Zhang6ef0bc62018-01-24 21:24:33 +08001198extern int ceph_drop_caps_for_unlink(struct inode *inode);
Sage Weilde576062009-10-06 11:31:07 -07001199extern int ceph_encode_inode_release(void **p, struct inode *inode,
1200 int mds, int drop, int unless, int force);
1201extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
Jeff Laytonca6c8ae2016-12-15 08:37:59 -05001202 struct inode *dir,
Sage Weilde576062009-10-06 11:31:07 -07001203 int mds, int drop, int unless);
1204
Yan, Zheng5e3ded12019-07-25 20:16:43 +08001205extern int ceph_get_caps(struct file *filp, int need, int want,
Jeff Laytone72968e2021-04-05 12:19:35 -04001206 loff_t endoff, int *got);
Yan, Zheng5e3ded12019-07-25 20:16:43 +08001207extern int ceph_try_get_caps(struct inode *inode,
Luis Henriques2ee9dd92018-10-15 16:45:57 +01001208 int need, int want, bool nonblock, int *got);
Sage Weilde576062009-10-06 11:31:07 -07001209
1210/* for counting open files by mode */
Yan, Zheng719a2512020-03-05 20:21:00 +08001211extern void ceph_get_fmode(struct ceph_inode_info *ci, int mode, int count);
1212extern void ceph_put_fmode(struct ceph_inode_info *ci, int mode, int count);
1213extern void __ceph_touch_fmode(struct ceph_inode_info *ci,
1214 struct ceph_mds_client *mdsc, int fmode);
Sage Weilde576062009-10-06 11:31:07 -07001215
1216/* addr.c */
1217extern const struct address_space_operations ceph_aops;
1218extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
Yan, Zheng10183a62015-04-27 15:33:28 +08001219extern int ceph_uninline_data(struct file *filp, struct page *locked_page);
Yan, Zheng5e3ded12019-07-25 20:16:43 +08001220extern int ceph_pool_perm_check(struct inode *inode, int need);
Yan, Zheng10183a62015-04-27 15:33:28 +08001221extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc);
Jeff Layton36e6da92021-09-02 13:06:57 -04001222int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invalidate);
Sage Weilde576062009-10-06 11:31:07 -07001223
1224/* file.c */
1225extern const struct file_operations ceph_file_fops;
Alex Elder9e0eb852013-02-06 13:11:38 -06001226
Yan, Zheng719a2512020-03-05 20:21:00 +08001227extern int ceph_renew_caps(struct inode *inode, int fmode);
Sage Weilde576062009-10-06 11:31:07 -07001228extern int ceph_open(struct inode *inode, struct file *file);
Sage Weil5ef50c32012-07-31 11:27:36 -07001229extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
Al Viro44907d72018-06-08 13:32:02 -04001230 struct file *file, unsigned flags, umode_t mode);
Sage Weilde576062009-10-06 11:31:07 -07001231extern int ceph_release(struct inode *inode, struct file *filp);
Yan, Zheng31c542a2014-11-14 21:41:55 +08001232extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
1233 char *data, size_t len);
Ilya Dryomov55f2a042017-02-13 14:44:19 +01001234
Sage Weilde576062009-10-06 11:31:07 -07001235/* dir.c */
1236extern const struct file_operations ceph_dir_fops;
Yan, Zheng38c48b52015-01-14 13:46:04 +08001237extern const struct file_operations ceph_snapdir_fops;
Sage Weilde576062009-10-06 11:31:07 -07001238extern const struct inode_operations ceph_dir_iops;
Yan, Zheng38c48b52015-01-14 13:46:04 +08001239extern const struct inode_operations ceph_snapdir_iops;
Al Viro18fc8ab2016-10-28 21:52:50 -04001240extern const struct dentry_operations ceph_dentry_ops;
Sage Weilde576062009-10-06 11:31:07 -07001241
Yan, Zhengf3c4ebe2016-04-29 11:27:30 +08001242extern loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order);
Sage Weilde576062009-10-06 11:31:07 -07001243extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry);
Jeff Laytonaa60cfc2021-03-01 08:01:54 -05001244extern struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
Jeff Layton7a971e22021-06-02 12:46:07 -04001245 struct dentry *dentry);
Sage Weilde576062009-10-06 11:31:07 -07001246extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
1247 struct dentry *dentry, int err);
1248
Yan, Zheng37c4efc2019-01-31 16:55:51 +08001249extern void __ceph_dentry_lease_touch(struct ceph_dentry_info *di);
1250extern void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di);
Sage Weil81a6cf22010-05-14 09:35:38 -07001251extern void ceph_invalidate_dentry_lease(struct dentry *dentry);
Yan, Zheng37c4efc2019-01-31 16:55:51 +08001252extern int ceph_trim_dentries(struct ceph_mds_client *mdsc);
Sage Weile5f86dc2011-07-26 11:30:55 -07001253extern unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn);
Yan, Zhengfdd4e152015-06-16 20:48:56 +08001254extern void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl);
Sage Weilde576062009-10-06 11:31:07 -07001255
Sage Weilde576062009-10-06 11:31:07 -07001256/* ioctl.c */
1257extern long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
1258
1259/* export.c */
1260extern const struct export_operations ceph_export_ops;
Luis Henriques38862742019-03-21 10:20:09 +00001261struct inode *ceph_lookup_inode(struct super_block *sb, u64 ino);
Sage Weilde576062009-10-06 11:31:07 -07001262
Greg Farnum40819f62010-08-02 15:34:23 -07001263/* locks.c */
Yan, Zhengeb13e832014-03-09 23:16:40 +08001264extern __init void ceph_flock_init(void);
Greg Farnum40819f62010-08-02 15:34:23 -07001265extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl);
1266extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl);
1267extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num);
Jim Schutt39be95e2013-05-15 13:03:35 -05001268extern int ceph_encode_locks_to_buffer(struct inode *inode,
1269 struct ceph_filelock *flocks,
1270 int num_fcntl_locks,
1271 int num_flock_locks);
1272extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
1273 struct ceph_pagelist *pagelist,
1274 int num_fcntl_locks, int num_flock_locks);
Greg Farnum40819f62010-08-02 15:34:23 -07001275
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -07001276/* debugfs.c */
Greg Kroah-Hartman1a829ff2019-06-12 16:55:38 +02001277extern void ceph_fs_debugfs_init(struct ceph_fs_client *client);
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -07001278extern void ceph_fs_debugfs_cleanup(struct ceph_fs_client *client);
1279
Luis Henriquesfb18a572018-01-05 10:47:18 +00001280/* quota.c */
Luis Henriquesd557c482018-01-12 17:19:29 +00001281static inline bool __ceph_has_any_quota(struct ceph_inode_info *ci)
1282{
1283 return ci->i_max_files || ci->i_max_bytes;
1284}
1285
1286extern void ceph_adjust_quota_realms_count(struct inode *inode, bool inc);
1287
1288static inline void __ceph_update_quota(struct ceph_inode_info *ci,
1289 u64 max_bytes, u64 max_files)
1290{
1291 bool had_quota, has_quota;
1292 had_quota = __ceph_has_any_quota(ci);
1293 ci->i_max_bytes = max_bytes;
1294 ci->i_max_files = max_files;
1295 has_quota = __ceph_has_any_quota(ci);
1296
1297 if (had_quota != has_quota)
1298 ceph_adjust_quota_realms_count(&ci->vfs_inode, has_quota);
1299}
1300
Luis Henriquesfb18a572018-01-05 10:47:18 +00001301extern void ceph_handle_quota(struct ceph_mds_client *mdsc,
1302 struct ceph_mds_session *session,
1303 struct ceph_msg *msg);
Luis Henriquesb7a29212018-01-05 10:47:19 +00001304extern bool ceph_quota_is_max_files_exceeded(struct inode *inode);
Luis Henriques6646ea12020-11-12 15:23:21 +00001305extern bool ceph_quota_is_same_realm(struct inode *old, struct inode *new);
Luis Henriques2b838452018-01-05 10:47:21 +00001306extern bool ceph_quota_is_max_bytes_exceeded(struct inode *inode,
1307 loff_t newlen);
Luis Henriques1ab302a2018-01-05 10:47:22 +00001308extern bool ceph_quota_is_max_bytes_approaching(struct inode *inode,
1309 loff_t newlen);
Luis Henriques9122eed2018-01-31 10:53:13 +00001310extern bool ceph_quota_update_statfs(struct ceph_fs_client *fsc,
1311 struct kstatfs *buf);
Luis Henriques0c44a8e2019-03-21 10:20:10 +00001312extern void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc);
Luis Henriquesfb18a572018-01-05 10:47:18 +00001313
Sage Weilde576062009-10-06 11:31:07 -07001314#endif /* _FS_CEPH_SUPER_H */