blob: 00720a02e7615198454b8296047aa98d0299859c [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scott7b718762005-11-02 14:58:39 +11003 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
6#ifndef __XFS_MOUNT_H__
7#define __XFS_MOUNT_H__
8
Mark Tinguelyad223e62012-06-14 09:22:15 -05009struct xlog;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010struct xfs_inode;
David Chinner2a82b8b2007-07-11 11:09:12 +100011struct xfs_mru_cache;
David Chinner82fa9012008-10-30 17:38:26 +110012struct xfs_ail;
Christoph Hellwig7d095252009-06-08 15:33:32 +020013struct xfs_quotainfo;
Dave Chinner0650b552014-06-06 15:01:58 +100014struct xfs_da_geometry;
Dave Chinner07b64032021-06-02 10:48:24 +100015struct xfs_perag;
Christoph Hellwig7d095252009-06-08 15:33:32 +020016
Dave Chinner055388a2011-01-04 11:35:03 +110017/* dynamic preallocation free space thresholds, 5% down to 1% */
18enum {
19 XFS_LOWSP_1_PCNT = 0,
20 XFS_LOWSP_2_PCNT,
21 XFS_LOWSP_3_PCNT,
22 XFS_LOWSP_4_PCNT,
23 XFS_LOWSP_5_PCNT,
24 XFS_LOWSP_MAX,
25};
26
Carlos Maiolino192852b2016-05-18 10:58:51 +100027/*
28 * Error Configuration
29 *
30 * Error classes define the subsystem the configuration belongs to.
31 * Error numbers define the errors that are configurable.
32 */
33enum {
Carlos Maiolinoffd40ef2016-05-18 11:01:00 +100034 XFS_ERR_METADATA,
Carlos Maiolino192852b2016-05-18 10:58:51 +100035 XFS_ERR_CLASS_MAX,
36};
37enum {
Carlos Maiolinoffd40ef2016-05-18 11:01:00 +100038 XFS_ERR_DEFAULT,
Carlos Maiolinoe0a431b32016-05-18 11:09:28 +100039 XFS_ERR_EIO,
40 XFS_ERR_ENOSPC,
41 XFS_ERR_ENODEV,
Carlos Maiolino192852b2016-05-18 10:58:51 +100042 XFS_ERR_ERRNO_MAX,
43};
44
Carlos Maiolinoa5ea70d2016-05-18 11:08:15 +100045#define XFS_ERR_RETRY_FOREVER -1
46
Eric Sandeen77169812016-09-14 07:51:30 +100047/*
48 * Although retry_timeout is in jiffies which is normally an unsigned long,
49 * we limit the retry timeout to 86400 seconds, or one day. So even a
50 * signed 32-bit long is sufficient for a HZ value up to 24855. Making it
51 * signed lets us store the special "-1" value, meaning retry forever.
52 */
Carlos Maiolino192852b2016-05-18 10:58:51 +100053struct xfs_error_cfg {
54 struct xfs_kobj kobj;
55 int max_retries;
Eric Sandeen77169812016-09-14 07:51:30 +100056 long retry_timeout; /* in jiffies, -1 = infinite */
Carlos Maiolino192852b2016-05-18 10:58:51 +100057};
58
Dave Chinnerb0dff462020-05-20 13:17:11 -070059/*
Dave Chinnerab23a772021-08-06 11:05:39 -070060 * Per-cpu deferred inode inactivation GC lists.
61 */
62struct xfs_inodegc {
63 struct llist_head list;
64 struct work_struct work;
65
66 /* approximate count of inodes in the list */
67 unsigned int items;
Darrick J. Wong40b1de0072021-08-06 11:05:43 -070068 unsigned int shrinker_hits;
Dave Chinnerab23a772021-08-06 11:05:39 -070069};
70
71/*
Dave Chinnerb0dff462020-05-20 13:17:11 -070072 * The struct xfsmount layout is optimised to separate read-mostly variables
73 * from variables that are frequently modified. We put the read-mostly variables
74 * first, then place all the other variables at the end.
75 *
76 * Typically, read-mostly variables are those that are set at mount time and
77 * never changed again, or only change rarely as a result of things like sysfs
78 * knobs being tweaked.
79 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070080typedef struct xfs_mount {
Dave Chinner501ab322015-02-23 21:19:28 +110081 struct xfs_sb m_sb; /* copy of fs superblock */
Dave Chinnerb0dff462020-05-20 13:17:11 -070082 struct super_block *m_super;
83 struct xfs_ail *m_ail; /* fs active log item list */
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 struct xfs_buf *m_sb_bp; /* buffer for superblock */
Nathan Scottfc1f8c12005-11-02 11:44:33 +110085 char *m_rtname; /* realtime device name */
86 char *m_logname; /* external log device name */
Dave Chinner0650b552014-06-06 15:01:58 +100087 struct xfs_da_geometry *m_dir_geo; /* directory block geometry */
88 struct xfs_da_geometry *m_attr_geo; /* attribute block geometry */
Mark Tinguelyad223e62012-06-14 09:22:15 -050089 struct xlog *m_log; /* log specific stuff */
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 struct xfs_inode *m_rbmip; /* pointer to bitmap inode */
91 struct xfs_inode *m_rsumip; /* pointer to summary inode */
92 struct xfs_inode *m_rootip; /* pointer to root directory */
93 struct xfs_quotainfo *m_quotainfo; /* disk quota information */
94 xfs_buftarg_t *m_ddev_targp; /* saves taking the address */
95 xfs_buftarg_t *m_logdev_targp;/* ptr to log device */
96 xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */
Dave Chinner0ed17f02021-08-06 11:05:38 -070097 struct list_head m_mount_list; /* global mount list */
Dave Chinnerab23a772021-08-06 11:05:39 -070098 void __percpu *m_inodegc; /* percpu inodegc structures */
99
Dave Chinnerb0dff462020-05-20 13:17:11 -0700100 /*
101 * Optional cache of rt summary level per bitmap block with the
102 * invariant that m_rsum_cache[bbno] <= the minimum i for which
103 * rsum[i][bbno] != 0. Reads and writes are serialized by the rsumip
104 * inode lock.
105 */
106 uint8_t *m_rsum_cache;
107 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
108 struct workqueue_struct *m_buf_workqueue;
109 struct workqueue_struct *m_unwritten_workqueue;
Dave Chinnerb0dff462020-05-20 13:17:11 -0700110 struct workqueue_struct *m_reclaim_workqueue;
Dave Chinnerb0dff462020-05-20 13:17:11 -0700111 struct workqueue_struct *m_sync_workqueue;
Dave Chinnerab23a772021-08-06 11:05:39 -0700112 struct workqueue_struct *m_blockgc_wq;
113 struct workqueue_struct *m_inodegc_wq;
Dave Chinnerb0dff462020-05-20 13:17:11 -0700114
115 int m_bsize; /* fs logical block size */
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700116 uint8_t m_blkbit_log; /* blocklog + NBBY */
117 uint8_t m_blkbb_log; /* blocklog - BBSHIFT */
118 uint8_t m_agno_log; /* log #ag's */
Dave Chinnerb0dff462020-05-20 13:17:11 -0700119 uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 uint m_blockmask; /* sb_blocksize-1 */
121 uint m_blockwsize; /* sb_blocksize in words */
122 uint m_blockwmask; /* blockwsize-1 */
Christoph Hellwig60197e82008-10-30 17:11:19 +1100123 uint m_alloc_mxr[2]; /* max alloc btree records */
124 uint m_alloc_mnr[2]; /* min alloc btree records */
125 uint m_bmap_dmxr[2]; /* max bmap btree records */
126 uint m_bmap_dmnr[2]; /* min bmap btree records */
Darrick J. Wong035e00a2016-08-03 11:36:07 +1000127 uint m_rmap_mxr[2]; /* max rmap btree records */
128 uint m_rmap_mnr[2]; /* min rmap btree records */
Darrick J. Wong1946b912016-10-03 09:11:18 -0700129 uint m_refc_mxr[2]; /* max refc btree records */
130 uint m_refc_mnr[2]; /* min refc btree records */
Darrick J. Wong7cb3efb2021-10-13 10:02:19 -0700131 uint m_alloc_maxlevels; /* max alloc btree levels */
132 uint m_bm_maxlevels[2]; /* max bmap btree levels */
Darrick J. Wong035e00a2016-08-03 11:36:07 +1000133 uint m_rmap_maxlevels; /* max rmap btree levels */
Darrick J. Wong1946b912016-10-03 09:11:18 -0700134 uint m_refc_maxlevels; /* max refcount btree level */
Darrick J. Wongb74e15d2021-09-16 12:27:34 -0700135 unsigned int m_agbtree_maxlevels; /* max level of all AG btrees */
Darrick J. Wong80180262016-08-03 11:31:47 +1000136 xfs_extlen_t m_ag_prealloc_blocks; /* reserved ag blocks */
Darrick J. Wong52548852016-08-03 11:38:24 +1000137 uint m_alloc_set_aside; /* space we can't use */
138 uint m_ag_max_usable; /* max space per AG */
Dave Chinnerb0dff462020-05-20 13:17:11 -0700139 int m_dalign; /* stripe unit */
140 int m_swidth; /* stripe width */
141 xfs_agnumber_t m_maxagi; /* highest inode alloc group */
142 uint m_allocsize_log;/* min write size log bytes */
143 uint m_allocsize_blocks; /* min write size blocks */
144 int m_logbufs; /* number of log buffers */
145 int m_logbsize; /* size of each log buffer */
146 uint m_rsumlevels; /* rt summary levels */
147 uint m_rsumsize; /* size of rt summary, bytes */
148 int m_fixedfsid[2]; /* unchanged for life of FS */
149 uint m_qflags; /* quota status flags */
Dave Chinnera1d86e82021-08-18 18:46:26 -0700150 uint64_t m_features; /* active filesystem features */
Darrick J. Wong65f03d82021-08-06 11:05:41 -0700151 uint64_t m_low_space[XFS_LOWSP_MAX];
152 uint64_t m_low_rtexts[XFS_LOWSP_MAX];
Dave Chinnerb0dff462020-05-20 13:17:11 -0700153 struct xfs_ino_geometry m_ino_geo; /* inode geometry */
154 struct xfs_trans_resv m_resv; /* precomputed res values */
155 /* low free space thresholds */
Dave Chinnerab23a772021-08-06 11:05:39 -0700156 unsigned long m_opstate; /* dynamic state flags */
Dave Chinnerb0dff462020-05-20 13:17:11 -0700157 bool m_always_cow;
158 bool m_fail_unmount;
159 bool m_finobt_nores; /* no per-AG finobt resv. */
160 bool m_update_sb; /* sb needs update in mount */
161
162 /*
163 * Bitsets of per-fs metadata that have been checked and/or are sick.
164 * Callers must hold m_sb_lock to access these two fields.
165 */
166 uint8_t m_fs_checked;
167 uint8_t m_fs_sick;
168 /*
169 * Bitsets of rt metadata that have been checked and/or are sick.
170 * Callers must hold m_sb_lock to access this field.
171 */
172 uint8_t m_rt_checked;
173 uint8_t m_rt_sick;
174
175 /*
176 * End of read-mostly variables. Frequently written variables and locks
177 * should be placed below this comment from now on. The first variable
178 * here is marked as cacheline aligned so they it is separated from
179 * the read-mostly variables.
180 */
181
182 spinlock_t ____cacheline_aligned m_sb_lock; /* sb counter lock */
183 struct percpu_counter m_icount; /* allocated inodes counter */
184 struct percpu_counter m_ifree; /* free inodes counter */
185 struct percpu_counter m_fdblocks; /* free block counter */
186 /*
187 * Count of data device blocks reserved for delayed allocations,
188 * including indlen blocks. Does not include allocated CoW staging
189 * extents or anything related to the rt device.
190 */
191 struct percpu_counter m_delalloc_blks;
Brian Foster16eaab82021-04-28 15:05:50 -0700192 /*
193 * Global count of allocation btree blocks in use across all AGs. Only
194 * used when perag reservation is enabled. Helps prevent block
195 * reservation from attempting to reserve allocation btree blocks.
196 */
197 atomic64_t m_allocbt_blks;
Dave Chinnerb0dff462020-05-20 13:17:11 -0700198
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000199 struct radix_tree_root m_perag_tree; /* per-ag accounting info */
200 spinlock_t m_perag_lock; /* lock for m_perag_tree */
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700201 uint64_t m_resblks; /* total reserved blocks */
202 uint64_t m_resblks_avail;/* available reserved blocks */
203 uint64_t m_resblks_save; /* reserved blks @ remount,ro */
Dave Chinnera7b339f2011-04-08 12:45:07 +1000204 struct delayed_work m_reclaim_work; /* background inode reclaim */
Brian Fostera31b1d32014-07-15 08:07:01 +1000205 struct xfs_kobj m_kobj;
Carlos Maiolino192852b2016-05-18 10:58:51 +1000206 struct xfs_kobj m_error_kobj;
Carlos Maiolinoffd40ef2016-05-18 11:01:00 +1000207 struct xfs_kobj m_error_meta_kobj;
Carlos Maiolino192852b2016-05-18 10:58:51 +1000208 struct xfs_error_cfg m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX];
Bill O'Donnell225e4632015-10-12 18:21:19 +1100209 struct xstats m_stats; /* per-fs stats */
Dave Chinnerb0dff462020-05-20 13:17:11 -0700210 xfs_agnumber_t m_agfrotor; /* last ag where space found */
211 xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */
212 spinlock_t m_agirotor_lock;/* .. and lock protecting it */
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000213
Darrick J. Wong40b1de0072021-08-06 11:05:43 -0700214 /* Memory shrinker to throttle and reprioritize inodegc */
215 struct shrinker m_inodegc_shrinker;
Darrick J. Wongf0f7a672020-04-12 13:11:10 -0700216 /*
217 * Workqueue item so that we can coalesce multiple inode flush attempts
218 * into a single flush.
219 */
220 struct work_struct m_flush_inodes_work;
Christoph Hellwig52785112015-02-16 11:49:23 +1100221
222 /*
223 * Generation of the filesysyem layout. This is incremented by each
224 * growfs, and used by the pNFS server to ensure the client updates
225 * its view of the block device once it gets a layout that might
226 * reference the newly added blocks. Does not need to be persistent
227 * as long as we only allow file system size increments, but if we
228 * ever support shrinks it would have to be persisted in addition
229 * to various other kinds of pain inflicted on the pNFS server.
230 */
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700231 uint32_t m_generation;
Dave Chinnerb0dff462020-05-20 13:17:11 -0700232 struct mutex m_growlock; /* growfs mutex */
Brian Foster801cc4e2016-03-15 11:42:44 +1100233
234#ifdef DEBUG
235 /*
Darrick J. Wong31965ef2017-06-20 17:54:46 -0700236 * Frequency with which errors are injected. Replaces xfs_etest; the
237 * value stored in here is the inverse of the frequency with which the
238 * error triggers. 1 = always, 2 = half the time, etc.
239 */
240 unsigned int *m_errortag;
Darrick J. Wongc6840102017-06-20 17:54:47 -0700241 struct xfs_kobj m_errortag_kobj;
Brian Foster801cc4e2016-03-15 11:42:44 +1100242#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243} xfs_mount_t;
244
Darrick J. Wongef325952019-06-05 11:19:34 -0700245#define M_IGEO(mp) (&(mp)->m_ino_geo)
246
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247/*
Dave Chinnera1d86e82021-08-18 18:46:26 -0700248 * Flags for m_features.
249 *
250 * These are all the active features in the filesystem, regardless of how
251 * they are configured.
252 */
253#define XFS_FEAT_ATTR (1ULL << 0) /* xattrs present in fs */
254#define XFS_FEAT_NLINK (1ULL << 1) /* 32 bit link counts */
255#define XFS_FEAT_QUOTA (1ULL << 2) /* quota active */
256#define XFS_FEAT_ALIGN (1ULL << 3) /* inode alignment */
257#define XFS_FEAT_DALIGN (1ULL << 4) /* data alignment */
258#define XFS_FEAT_LOGV2 (1ULL << 5) /* version 2 logs */
259#define XFS_FEAT_SECTOR (1ULL << 6) /* sector size > 512 bytes */
260#define XFS_FEAT_EXTFLG (1ULL << 7) /* unwritten extents */
261#define XFS_FEAT_ASCIICI (1ULL << 8) /* ASCII only case-insens. */
262#define XFS_FEAT_LAZYSBCOUNT (1ULL << 9) /* Superblk counters */
263#define XFS_FEAT_ATTR2 (1ULL << 10) /* dynamic attr fork */
264#define XFS_FEAT_PARENT (1ULL << 11) /* parent pointers */
265#define XFS_FEAT_PROJID32 (1ULL << 12) /* 32 bit project id */
266#define XFS_FEAT_CRC (1ULL << 13) /* metadata CRCs */
267#define XFS_FEAT_V3INODES (1ULL << 14) /* Version 3 inodes */
268#define XFS_FEAT_PQUOTINO (1ULL << 15) /* non-shared proj/grp quotas */
269#define XFS_FEAT_FTYPE (1ULL << 16) /* inode type in dir */
270#define XFS_FEAT_FINOBT (1ULL << 17) /* free inode btree */
271#define XFS_FEAT_RMAPBT (1ULL << 18) /* reverse map btree */
272#define XFS_FEAT_REFLINK (1ULL << 19) /* reflinked files */
273#define XFS_FEAT_SPINODES (1ULL << 20) /* sparse inode chunks */
274#define XFS_FEAT_META_UUID (1ULL << 21) /* metadata UUID */
275#define XFS_FEAT_REALTIME (1ULL << 22) /* realtime device present */
276#define XFS_FEAT_INOBTCNT (1ULL << 23) /* inobt block counts */
277#define XFS_FEAT_BIGTIME (1ULL << 24) /* large timestamps */
278#define XFS_FEAT_NEEDSREPAIR (1ULL << 25) /* needs xfs_repair */
279
Dave Chinner8970a5b2021-08-18 18:46:51 -0700280/* Mount features */
281#define XFS_FEAT_NOATTR2 (1ULL << 48) /* disable attr2 creation */
282#define XFS_FEAT_NOALIGN (1ULL << 49) /* ignore alignment */
283#define XFS_FEAT_ALLOCSIZE (1ULL << 50) /* user specified allocation size */
284#define XFS_FEAT_LARGE_IOSIZE (1ULL << 51) /* report large preferred
285 * I/O size in stat() */
286#define XFS_FEAT_WSYNC (1ULL << 52) /* synchronous metadata ops */
287#define XFS_FEAT_DIRSYNC (1ULL << 53) /* synchronous directory ops */
288#define XFS_FEAT_DISCARD (1ULL << 54) /* discard unused blocks */
289#define XFS_FEAT_GRPID (1ULL << 55) /* group-ID assigned from directory */
290#define XFS_FEAT_SMALL_INUMS (1ULL << 56) /* user wants 32bit inodes */
291#define XFS_FEAT_IKEEP (1ULL << 57) /* keep empty inode clusters*/
292#define XFS_FEAT_SWALLOC (1ULL << 58) /* stripe width allocation */
293#define XFS_FEAT_FILESTREAMS (1ULL << 59) /* use filestreams allocator */
294#define XFS_FEAT_DAX_ALWAYS (1ULL << 60) /* DAX always enabled */
295#define XFS_FEAT_DAX_NEVER (1ULL << 61) /* DAX never enabled */
296#define XFS_FEAT_NORECOVERY (1ULL << 62) /* no recovery - dirty fs */
297#define XFS_FEAT_NOUUID (1ULL << 63) /* ignore uuid during mount */
298
Dave Chinnera1d86e82021-08-18 18:46:26 -0700299#define __XFS_HAS_FEAT(name, NAME) \
300static inline bool xfs_has_ ## name (struct xfs_mount *mp) \
301{ \
302 return mp->m_features & XFS_FEAT_ ## NAME; \
303}
304
305/* Some features can be added dynamically so they need a set wrapper, too. */
306#define __XFS_ADD_FEAT(name, NAME) \
307 __XFS_HAS_FEAT(name, NAME); \
308static inline void xfs_add_ ## name (struct xfs_mount *mp) \
309{ \
310 mp->m_features |= XFS_FEAT_ ## NAME; \
311 xfs_sb_version_add ## name(&mp->m_sb); \
312}
313
Dave Chinner8970a5b2021-08-18 18:46:51 -0700314/* Superblock features */
Dave Chinnera1d86e82021-08-18 18:46:26 -0700315__XFS_ADD_FEAT(attr, ATTR)
316__XFS_HAS_FEAT(nlink, NLINK)
317__XFS_ADD_FEAT(quota, QUOTA)
318__XFS_HAS_FEAT(align, ALIGN)
319__XFS_HAS_FEAT(dalign, DALIGN)
320__XFS_HAS_FEAT(logv2, LOGV2)
321__XFS_HAS_FEAT(sector, SECTOR)
322__XFS_HAS_FEAT(extflg, EXTFLG)
323__XFS_HAS_FEAT(asciici, ASCIICI)
324__XFS_HAS_FEAT(lazysbcount, LAZYSBCOUNT)
325__XFS_ADD_FEAT(attr2, ATTR2)
326__XFS_HAS_FEAT(parent, PARENT)
327__XFS_ADD_FEAT(projid32, PROJID32)
328__XFS_HAS_FEAT(crc, CRC)
329__XFS_HAS_FEAT(v3inodes, V3INODES)
330__XFS_HAS_FEAT(pquotino, PQUOTINO)
331__XFS_HAS_FEAT(ftype, FTYPE)
332__XFS_HAS_FEAT(finobt, FINOBT)
333__XFS_HAS_FEAT(rmapbt, RMAPBT)
334__XFS_HAS_FEAT(reflink, REFLINK)
335__XFS_HAS_FEAT(sparseinodes, SPINODES)
336__XFS_HAS_FEAT(metauuid, META_UUID)
337__XFS_HAS_FEAT(realtime, REALTIME)
Dave Chinner38c26bf2021-08-18 18:46:37 -0700338__XFS_HAS_FEAT(inobtcounts, INOBTCNT)
339__XFS_HAS_FEAT(bigtime, BIGTIME)
340__XFS_HAS_FEAT(needsrepair, NEEDSREPAIR)
Dave Chinnera1d86e82021-08-18 18:46:26 -0700341
342/*
Dave Chinner8970a5b2021-08-18 18:46:51 -0700343 * Mount features
344 *
Dave Chinner2e973b22021-08-18 18:46:52 -0700345 * These do not change dynamically - features that can come and go, such as 32
346 * bit inodes and read-only state, are kept as operational state rather than
Dave Chinner8970a5b2021-08-18 18:46:51 -0700347 * features.
348 */
349__XFS_HAS_FEAT(noattr2, NOATTR2)
350__XFS_HAS_FEAT(noalign, NOALIGN)
351__XFS_HAS_FEAT(allocsize, ALLOCSIZE)
352__XFS_HAS_FEAT(large_iosize, LARGE_IOSIZE)
353__XFS_HAS_FEAT(wsync, WSYNC)
354__XFS_HAS_FEAT(dirsync, DIRSYNC)
355__XFS_HAS_FEAT(discard, DISCARD)
356__XFS_HAS_FEAT(grpid, GRPID)
357__XFS_HAS_FEAT(small_inums, SMALL_INUMS)
358__XFS_HAS_FEAT(ikeep, IKEEP)
359__XFS_HAS_FEAT(swalloc, SWALLOC)
360__XFS_HAS_FEAT(filestreams, FILESTREAMS)
361__XFS_HAS_FEAT(dax_always, DAX_ALWAYS)
362__XFS_HAS_FEAT(dax_never, DAX_NEVER)
363__XFS_HAS_FEAT(norecovery, NORECOVERY)
364__XFS_HAS_FEAT(nouuid, NOUUID)
365
366/*
Dave Chinner2e973b22021-08-18 18:46:52 -0700367 * Operational mount state flags
368 *
369 * Use these with atomic bit ops only!
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 */
Dave Chinner2e973b22021-08-18 18:46:52 -0700371#define XFS_OPSTATE_UNMOUNTING 0 /* filesystem is unmounting */
372#define XFS_OPSTATE_CLEAN 1 /* mount was clean */
373#define XFS_OPSTATE_SHUTDOWN 2 /* stop all fs operations */
374#define XFS_OPSTATE_INODE32 3 /* inode32 allocator active */
375#define XFS_OPSTATE_READONLY 4 /* read-only fs */
Dave Chinnercbe4dab2015-06-04 09:19:18 +1000376
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377/*
Dave Chinnerab23a772021-08-06 11:05:39 -0700378 * If set, inactivation worker threads will be scheduled to process queued
379 * inodegc work. If not, queued inodes remain in memory waiting to be
380 * processed.
381 */
Dave Chinner2e973b22021-08-18 18:46:52 -0700382#define XFS_OPSTATE_INODEGC_ENABLED 5
Darrick J. Wong6f649092021-08-06 11:05:42 -0700383/*
384 * If set, background speculative prealloc gc worker threads will be scheduled
385 * to process queued blockgc work. If not, inodes retain their preallocations
386 * until explicitly deleted.
387 */
Dave Chinner2e973b22021-08-18 18:46:52 -0700388#define XFS_OPSTATE_BLOCKGC_ENABLED 6
Dave Chinnerab23a772021-08-06 11:05:39 -0700389
390#define __XFS_IS_OPSTATE(name, NAME) \
391static inline bool xfs_is_ ## name (struct xfs_mount *mp) \
392{ \
393 return test_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
394} \
395static inline bool xfs_clear_ ## name (struct xfs_mount *mp) \
396{ \
397 return test_and_clear_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
398} \
399static inline bool xfs_set_ ## name (struct xfs_mount *mp) \
400{ \
401 return test_and_set_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
402}
403
Dave Chinner2e973b22021-08-18 18:46:52 -0700404__XFS_IS_OPSTATE(unmounting, UNMOUNTING)
405__XFS_IS_OPSTATE(clean, CLEAN)
406__XFS_IS_OPSTATE(shutdown, SHUTDOWN)
407__XFS_IS_OPSTATE(inode32, INODE32)
408__XFS_IS_OPSTATE(readonly, READONLY)
Dave Chinnerab23a772021-08-06 11:05:39 -0700409__XFS_IS_OPSTATE(inodegc_enabled, INODEGC_ENABLED)
Darrick J. Wong6f649092021-08-06 11:05:42 -0700410__XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED)
Dave Chinnerab23a772021-08-06 11:05:39 -0700411
412#define XFS_OPSTATE_STRINGS \
Dave Chinner2e973b22021-08-18 18:46:52 -0700413 { (1UL << XFS_OPSTATE_UNMOUNTING), "unmounting" }, \
414 { (1UL << XFS_OPSTATE_CLEAN), "clean" }, \
415 { (1UL << XFS_OPSTATE_SHUTDOWN), "shutdown" }, \
416 { (1UL << XFS_OPSTATE_INODE32), "inode32" }, \
417 { (1UL << XFS_OPSTATE_READONLY), "read_only" }, \
Darrick J. Wong6f649092021-08-06 11:05:42 -0700418 { (1UL << XFS_OPSTATE_INODEGC_ENABLED), "inodegc" }, \
419 { (1UL << XFS_OPSTATE_BLOCKGC_ENABLED), "blockgc" }
Dave Chinnerab23a772021-08-06 11:05:39 -0700420
421/*
Nathan Scott1f443ad2005-05-05 13:28:29 -0700422 * Max and min values for mount-option defined I/O
423 * preallocation sizes.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 */
Nathan Scott1f443ad2005-05-05 13:28:29 -0700425#define XFS_MAX_IO_LOG 30 /* 1G */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426#define XFS_MIN_IO_LOG PAGE_SHIFT
427
Dave Chinner75c8c50f2021-08-18 18:46:53 -0700428#define xfs_is_shutdown(mp) xfs_is_shutdown(mp)
Christoph Hellwig745f6912007-08-30 17:20:39 +1000429void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname,
430 int lnnum);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431#define xfs_force_shutdown(m,f) \
Christoph Hellwig745f6912007-08-30 17:20:39 +1000432 xfs_do_force_shutdown(m, f, __FILE__, __LINE__)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
Christoph Hellwig2b5decd2008-11-28 14:23:36 +1100434#define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */
435#define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */
436#define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */
437#define SHUTDOWN_CORRUPT_INCORE 0x0008 /* corrupt in-memory data structures */
Christoph Hellwig2b5decd2008-11-28 14:23:36 +1100438
Darrick J. Wong7f89c832021-08-10 17:00:54 -0700439#define XFS_SHUTDOWN_STRINGS \
440 { SHUTDOWN_META_IO_ERROR, "metadata_io" }, \
441 { SHUTDOWN_LOG_IO_ERROR, "log_io" }, \
442 { SHUTDOWN_FORCE_UMOUNT, "force_umount" }, \
443 { SHUTDOWN_CORRUPT_INCORE, "corruption" }
444
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 * Flags for xfs_mountfs
447 */
Nathan Scott764d1f82006-03-31 13:04:17 +1000448#define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
Nathan Scotta844f452005-11-02 14:38:42 +1100450static inline xfs_agnumber_t
451xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452{
Eric Sandeen4f1adf32017-04-19 15:19:32 -0700453 xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
Nathan Scotta844f452005-11-02 14:38:42 +1100454 do_div(ld, mp->m_sb.sb_agblocks);
455 return (xfs_agnumber_t) ld;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456}
457
Nathan Scotta844f452005-11-02 14:38:42 +1100458static inline xfs_agblock_t
459xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460{
Eric Sandeen4f1adf32017-04-19 15:19:32 -0700461 xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
Nathan Scotta844f452005-11-02 14:38:42 +1100462 return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463}
464
Dave Chinner07b64032021-06-02 10:48:24 +1000465int xfs_buf_hash_init(struct xfs_perag *pag);
466void xfs_buf_hash_destroy(struct xfs_perag *pag);
Lucas Stach6031e732016-12-07 17:36:36 +1100467
Darrick J. Wongaf3b6382015-11-03 13:06:34 +1100468extern void xfs_uuid_table_free(void);
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700469extern uint64_t xfs_default_resblks(xfs_mount_t *mp);
Christoph Hellwig42490232008-08-13 16:49:32 +1000470extern int xfs_mountfs(xfs_mount_t *mp);
Christoph Hellwig41b5c2e2008-08-13 16:49:57 +1000471extern void xfs_unmountfs(xfs_mount_t *);
Dave Chinner964aa8d92015-02-23 21:24:37 +1100472
Darrick J. Wong7d6f07d2021-08-06 11:05:40 -0700473/*
474 * Deltas for the block count can vary from 1 to very large, but lock contention
475 * only occurs on frequent small block count updates such as in the delayed
476 * allocation path for buffered writes (page a time updates). Hence we set
477 * a large batch count (1024) to minimise global counter updates except when
478 * we get near to ENOSPC and we have to be very accurate with our updates.
479 */
480#define XFS_FDBLOCKS_BATCH 1024
481
Dave Chinner0d485ad2015-02-23 21:22:03 +1100482extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
483 bool reserved);
Dave Chinnerbab98bb2015-02-23 21:22:54 +1100484extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
485
Nathan Scott764d1f82006-03-31 13:04:17 +1000486extern int xfs_readsb(xfs_mount_t *, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487extern void xfs_freesb(xfs_mount_t *);
Brian Foster91ee5752014-11-28 14:02:59 +1100488extern bool xfs_fs_writable(struct xfs_mount *mp, int level);
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700489extern int xfs_sb_validate_fsb_count(struct xfs_sb *, uint64_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490
Christoph Hellwigd7658d42010-02-17 19:36:13 +0000491extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
492
Dave Chinner055388a2011-01-04 11:35:03 +1100493extern void xfs_set_low_space_thresholds(struct xfs_mount *);
494
Dave Chinner3fbbbea2015-11-03 12:27:22 +1100495int xfs_zero_extent(struct xfs_inode *ip, xfs_fsblock_t start_fsb,
496 xfs_off_t count_fsb);
497
Carlos Maiolinodf309392016-05-18 11:05:33 +1000498struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp,
499 int error_class, int error);
Darrick J. Wongf467cad2018-07-20 09:28:40 -0700500void xfs_force_summary_recalc(struct xfs_mount *mp);
Darrick J. Wong908ce712021-08-08 08:27:12 -0700501int xfs_add_incompat_log_feature(struct xfs_mount *mp, uint32_t feature);
502bool xfs_clear_incompat_log_features(struct xfs_mount *mp);
Darrick J. Wong9fe82b82019-04-25 18:26:22 -0700503void xfs_mod_delalloc(struct xfs_mount *mp, int64_t delta);
Carlos Maiolinodf309392016-05-18 11:05:33 +1000504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505#endif /* __XFS_MOUNT_H__ */