Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
| 6 | #ifndef __XFS_MOUNT_H__ |
| 7 | #define __XFS_MOUNT_H__ |
| 8 | |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 9 | struct xlog; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | struct xfs_inode; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 11 | struct xfs_mru_cache; |
David Chinner | 82fa901 | 2008-10-30 17:38:26 +1100 | [diff] [blame] | 12 | struct xfs_ail; |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 13 | struct xfs_quotainfo; |
Dave Chinner | 0650b55 | 2014-06-06 15:01:58 +1000 | [diff] [blame] | 14 | struct xfs_da_geometry; |
Dave Chinner | 07b6403 | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 15 | struct xfs_perag; |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 16 | |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 17 | /* dynamic preallocation free space thresholds, 5% down to 1% */ |
| 18 | enum { |
| 19 | XFS_LOWSP_1_PCNT = 0, |
| 20 | XFS_LOWSP_2_PCNT, |
| 21 | XFS_LOWSP_3_PCNT, |
| 22 | XFS_LOWSP_4_PCNT, |
| 23 | XFS_LOWSP_5_PCNT, |
| 24 | XFS_LOWSP_MAX, |
| 25 | }; |
| 26 | |
Carlos Maiolino | 192852b | 2016-05-18 10:58:51 +1000 | [diff] [blame] | 27 | /* |
| 28 | * Error Configuration |
| 29 | * |
| 30 | * Error classes define the subsystem the configuration belongs to. |
| 31 | * Error numbers define the errors that are configurable. |
| 32 | */ |
| 33 | enum { |
Carlos Maiolino | ffd40ef | 2016-05-18 11:01:00 +1000 | [diff] [blame] | 34 | XFS_ERR_METADATA, |
Carlos Maiolino | 192852b | 2016-05-18 10:58:51 +1000 | [diff] [blame] | 35 | XFS_ERR_CLASS_MAX, |
| 36 | }; |
| 37 | enum { |
Carlos Maiolino | ffd40ef | 2016-05-18 11:01:00 +1000 | [diff] [blame] | 38 | XFS_ERR_DEFAULT, |
Carlos Maiolino | e0a431b3 | 2016-05-18 11:09:28 +1000 | [diff] [blame] | 39 | XFS_ERR_EIO, |
| 40 | XFS_ERR_ENOSPC, |
| 41 | XFS_ERR_ENODEV, |
Carlos Maiolino | 192852b | 2016-05-18 10:58:51 +1000 | [diff] [blame] | 42 | XFS_ERR_ERRNO_MAX, |
| 43 | }; |
| 44 | |
Carlos Maiolino | a5ea70d | 2016-05-18 11:08:15 +1000 | [diff] [blame] | 45 | #define XFS_ERR_RETRY_FOREVER -1 |
| 46 | |
Eric Sandeen | 7716981 | 2016-09-14 07:51:30 +1000 | [diff] [blame] | 47 | /* |
| 48 | * Although retry_timeout is in jiffies which is normally an unsigned long, |
| 49 | * we limit the retry timeout to 86400 seconds, or one day. So even a |
| 50 | * signed 32-bit long is sufficient for a HZ value up to 24855. Making it |
| 51 | * signed lets us store the special "-1" value, meaning retry forever. |
| 52 | */ |
Carlos Maiolino | 192852b | 2016-05-18 10:58:51 +1000 | [diff] [blame] | 53 | struct xfs_error_cfg { |
| 54 | struct xfs_kobj kobj; |
| 55 | int max_retries; |
Eric Sandeen | 7716981 | 2016-09-14 07:51:30 +1000 | [diff] [blame] | 56 | long retry_timeout; /* in jiffies, -1 = infinite */ |
Carlos Maiolino | 192852b | 2016-05-18 10:58:51 +1000 | [diff] [blame] | 57 | }; |
| 58 | |
Dave Chinner | b0dff46 | 2020-05-20 13:17:11 -0700 | [diff] [blame] | 59 | /* |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 60 | * Per-cpu deferred inode inactivation GC lists. |
| 61 | */ |
| 62 | struct xfs_inodegc { |
| 63 | struct llist_head list; |
| 64 | struct work_struct work; |
| 65 | |
| 66 | /* approximate count of inodes in the list */ |
| 67 | unsigned int items; |
Darrick J. Wong | 40b1de007 | 2021-08-06 11:05:43 -0700 | [diff] [blame] | 68 | unsigned int shrinker_hits; |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 69 | }; |
| 70 | |
| 71 | /* |
Dave Chinner | b0dff46 | 2020-05-20 13:17:11 -0700 | [diff] [blame] | 72 | * The struct xfsmount layout is optimised to separate read-mostly variables |
| 73 | * from variables that are frequently modified. We put the read-mostly variables |
| 74 | * first, then place all the other variables at the end. |
| 75 | * |
| 76 | * Typically, read-mostly variables are those that are set at mount time and |
| 77 | * never changed again, or only change rarely as a result of things like sysfs |
| 78 | * knobs being tweaked. |
| 79 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | typedef struct xfs_mount { |
Dave Chinner | 501ab32 | 2015-02-23 21:19:28 +1100 | [diff] [blame] | 81 | struct xfs_sb m_sb; /* copy of fs superblock */ |
Dave Chinner | b0dff46 | 2020-05-20 13:17:11 -0700 | [diff] [blame] | 82 | struct super_block *m_super; |
| 83 | struct xfs_ail *m_ail; /* fs active log item list */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | struct xfs_buf *m_sb_bp; /* buffer for superblock */ |
Nathan Scott | fc1f8c1 | 2005-11-02 11:44:33 +1100 | [diff] [blame] | 85 | char *m_rtname; /* realtime device name */ |
| 86 | char *m_logname; /* external log device name */ |
Dave Chinner | 0650b55 | 2014-06-06 15:01:58 +1000 | [diff] [blame] | 87 | struct xfs_da_geometry *m_dir_geo; /* directory block geometry */ |
| 88 | struct xfs_da_geometry *m_attr_geo; /* attribute block geometry */ |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 89 | struct xlog *m_log; /* log specific stuff */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | struct xfs_inode *m_rbmip; /* pointer to bitmap inode */ |
| 91 | struct xfs_inode *m_rsumip; /* pointer to summary inode */ |
| 92 | struct xfs_inode *m_rootip; /* pointer to root directory */ |
| 93 | struct xfs_quotainfo *m_quotainfo; /* disk quota information */ |
| 94 | xfs_buftarg_t *m_ddev_targp; /* saves taking the address */ |
| 95 | xfs_buftarg_t *m_logdev_targp;/* ptr to log device */ |
| 96 | xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */ |
Dave Chinner | 0ed17f0 | 2021-08-06 11:05:38 -0700 | [diff] [blame] | 97 | struct list_head m_mount_list; /* global mount list */ |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 98 | void __percpu *m_inodegc; /* percpu inodegc structures */ |
| 99 | |
Dave Chinner | b0dff46 | 2020-05-20 13:17:11 -0700 | [diff] [blame] | 100 | /* |
| 101 | * Optional cache of rt summary level per bitmap block with the |
| 102 | * invariant that m_rsum_cache[bbno] <= the minimum i for which |
| 103 | * rsum[i][bbno] != 0. Reads and writes are serialized by the rsumip |
| 104 | * inode lock. |
| 105 | */ |
| 106 | uint8_t *m_rsum_cache; |
| 107 | struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ |
| 108 | struct workqueue_struct *m_buf_workqueue; |
| 109 | struct workqueue_struct *m_unwritten_workqueue; |
Dave Chinner | b0dff46 | 2020-05-20 13:17:11 -0700 | [diff] [blame] | 110 | struct workqueue_struct *m_reclaim_workqueue; |
Dave Chinner | b0dff46 | 2020-05-20 13:17:11 -0700 | [diff] [blame] | 111 | struct workqueue_struct *m_sync_workqueue; |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 112 | struct workqueue_struct *m_blockgc_wq; |
| 113 | struct workqueue_struct *m_inodegc_wq; |
Dave Chinner | b0dff46 | 2020-05-20 13:17:11 -0700 | [diff] [blame] | 114 | |
| 115 | int m_bsize; /* fs logical block size */ |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 116 | uint8_t m_blkbit_log; /* blocklog + NBBY */ |
| 117 | uint8_t m_blkbb_log; /* blocklog - BBSHIFT */ |
| 118 | uint8_t m_agno_log; /* log #ag's */ |
Dave Chinner | b0dff46 | 2020-05-20 13:17:11 -0700 | [diff] [blame] | 119 | uint8_t m_sectbb_log; /* sectlog - BBSHIFT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | uint m_blockmask; /* sb_blocksize-1 */ |
| 121 | uint m_blockwsize; /* sb_blocksize in words */ |
| 122 | uint m_blockwmask; /* blockwsize-1 */ |
Christoph Hellwig | 60197e8 | 2008-10-30 17:11:19 +1100 | [diff] [blame] | 123 | uint m_alloc_mxr[2]; /* max alloc btree records */ |
| 124 | uint m_alloc_mnr[2]; /* min alloc btree records */ |
| 125 | uint m_bmap_dmxr[2]; /* max bmap btree records */ |
| 126 | uint m_bmap_dmnr[2]; /* min bmap btree records */ |
Darrick J. Wong | 035e00a | 2016-08-03 11:36:07 +1000 | [diff] [blame] | 127 | uint m_rmap_mxr[2]; /* max rmap btree records */ |
| 128 | uint m_rmap_mnr[2]; /* min rmap btree records */ |
Darrick J. Wong | 1946b91 | 2016-10-03 09:11:18 -0700 | [diff] [blame] | 129 | uint m_refc_mxr[2]; /* max refc btree records */ |
| 130 | uint m_refc_mnr[2]; /* min refc btree records */ |
Darrick J. Wong | 7cb3efb | 2021-10-13 10:02:19 -0700 | [diff] [blame] | 131 | uint m_alloc_maxlevels; /* max alloc btree levels */ |
| 132 | uint m_bm_maxlevels[2]; /* max bmap btree levels */ |
Darrick J. Wong | 035e00a | 2016-08-03 11:36:07 +1000 | [diff] [blame] | 133 | uint m_rmap_maxlevels; /* max rmap btree levels */ |
Darrick J. Wong | 1946b91 | 2016-10-03 09:11:18 -0700 | [diff] [blame] | 134 | uint m_refc_maxlevels; /* max refcount btree level */ |
Darrick J. Wong | b74e15d | 2021-09-16 12:27:34 -0700 | [diff] [blame] | 135 | unsigned int m_agbtree_maxlevels; /* max level of all AG btrees */ |
Darrick J. Wong | 8018026 | 2016-08-03 11:31:47 +1000 | [diff] [blame] | 136 | xfs_extlen_t m_ag_prealloc_blocks; /* reserved ag blocks */ |
Darrick J. Wong | 5254885 | 2016-08-03 11:38:24 +1000 | [diff] [blame] | 137 | uint m_alloc_set_aside; /* space we can't use */ |
| 138 | uint m_ag_max_usable; /* max space per AG */ |
Dave Chinner | b0dff46 | 2020-05-20 13:17:11 -0700 | [diff] [blame] | 139 | int m_dalign; /* stripe unit */ |
| 140 | int m_swidth; /* stripe width */ |
| 141 | xfs_agnumber_t m_maxagi; /* highest inode alloc group */ |
| 142 | uint m_allocsize_log;/* min write size log bytes */ |
| 143 | uint m_allocsize_blocks; /* min write size blocks */ |
| 144 | int m_logbufs; /* number of log buffers */ |
| 145 | int m_logbsize; /* size of each log buffer */ |
| 146 | uint m_rsumlevels; /* rt summary levels */ |
| 147 | uint m_rsumsize; /* size of rt summary, bytes */ |
| 148 | int m_fixedfsid[2]; /* unchanged for life of FS */ |
| 149 | uint m_qflags; /* quota status flags */ |
Dave Chinner | a1d86e8 | 2021-08-18 18:46:26 -0700 | [diff] [blame] | 150 | uint64_t m_features; /* active filesystem features */ |
Darrick J. Wong | 65f03d8 | 2021-08-06 11:05:41 -0700 | [diff] [blame] | 151 | uint64_t m_low_space[XFS_LOWSP_MAX]; |
| 152 | uint64_t m_low_rtexts[XFS_LOWSP_MAX]; |
Dave Chinner | b0dff46 | 2020-05-20 13:17:11 -0700 | [diff] [blame] | 153 | struct xfs_ino_geometry m_ino_geo; /* inode geometry */ |
| 154 | struct xfs_trans_resv m_resv; /* precomputed res values */ |
| 155 | /* low free space thresholds */ |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 156 | unsigned long m_opstate; /* dynamic state flags */ |
Dave Chinner | b0dff46 | 2020-05-20 13:17:11 -0700 | [diff] [blame] | 157 | bool m_always_cow; |
| 158 | bool m_fail_unmount; |
| 159 | bool m_finobt_nores; /* no per-AG finobt resv. */ |
| 160 | bool m_update_sb; /* sb needs update in mount */ |
| 161 | |
| 162 | /* |
| 163 | * Bitsets of per-fs metadata that have been checked and/or are sick. |
| 164 | * Callers must hold m_sb_lock to access these two fields. |
| 165 | */ |
| 166 | uint8_t m_fs_checked; |
| 167 | uint8_t m_fs_sick; |
| 168 | /* |
| 169 | * Bitsets of rt metadata that have been checked and/or are sick. |
| 170 | * Callers must hold m_sb_lock to access this field. |
| 171 | */ |
| 172 | uint8_t m_rt_checked; |
| 173 | uint8_t m_rt_sick; |
| 174 | |
| 175 | /* |
| 176 | * End of read-mostly variables. Frequently written variables and locks |
| 177 | * should be placed below this comment from now on. The first variable |
| 178 | * here is marked as cacheline aligned so they it is separated from |
| 179 | * the read-mostly variables. |
| 180 | */ |
| 181 | |
| 182 | spinlock_t ____cacheline_aligned m_sb_lock; /* sb counter lock */ |
| 183 | struct percpu_counter m_icount; /* allocated inodes counter */ |
| 184 | struct percpu_counter m_ifree; /* free inodes counter */ |
| 185 | struct percpu_counter m_fdblocks; /* free block counter */ |
| 186 | /* |
| 187 | * Count of data device blocks reserved for delayed allocations, |
| 188 | * including indlen blocks. Does not include allocated CoW staging |
| 189 | * extents or anything related to the rt device. |
| 190 | */ |
| 191 | struct percpu_counter m_delalloc_blks; |
Brian Foster | 16eaab8 | 2021-04-28 15:05:50 -0700 | [diff] [blame] | 192 | /* |
| 193 | * Global count of allocation btree blocks in use across all AGs. Only |
| 194 | * used when perag reservation is enabled. Helps prevent block |
| 195 | * reservation from attempting to reserve allocation btree blocks. |
| 196 | */ |
| 197 | atomic64_t m_allocbt_blks; |
Dave Chinner | b0dff46 | 2020-05-20 13:17:11 -0700 | [diff] [blame] | 198 | |
Dave Chinner | 1c1c6eb | 2010-01-11 11:47:44 +0000 | [diff] [blame] | 199 | struct radix_tree_root m_perag_tree; /* per-ag accounting info */ |
| 200 | spinlock_t m_perag_lock; /* lock for m_perag_tree */ |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 201 | uint64_t m_resblks; /* total reserved blocks */ |
| 202 | uint64_t m_resblks_avail;/* available reserved blocks */ |
| 203 | uint64_t m_resblks_save; /* reserved blks @ remount,ro */ |
Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 204 | struct delayed_work m_reclaim_work; /* background inode reclaim */ |
Brian Foster | a31b1d3 | 2014-07-15 08:07:01 +1000 | [diff] [blame] | 205 | struct xfs_kobj m_kobj; |
Carlos Maiolino | 192852b | 2016-05-18 10:58:51 +1000 | [diff] [blame] | 206 | struct xfs_kobj m_error_kobj; |
Carlos Maiolino | ffd40ef | 2016-05-18 11:01:00 +1000 | [diff] [blame] | 207 | struct xfs_kobj m_error_meta_kobj; |
Carlos Maiolino | 192852b | 2016-05-18 10:58:51 +1000 | [diff] [blame] | 208 | struct xfs_error_cfg m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX]; |
Bill O'Donnell | 225e463 | 2015-10-12 18:21:19 +1100 | [diff] [blame] | 209 | struct xstats m_stats; /* per-fs stats */ |
Dave Chinner | b0dff46 | 2020-05-20 13:17:11 -0700 | [diff] [blame] | 210 | xfs_agnumber_t m_agfrotor; /* last ag where space found */ |
| 211 | xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */ |
| 212 | spinlock_t m_agirotor_lock;/* .. and lock protecting it */ |
Christoph Hellwig | aa6bf01 | 2012-02-29 09:53:48 +0000 | [diff] [blame] | 213 | |
Darrick J. Wong | 40b1de007 | 2021-08-06 11:05:43 -0700 | [diff] [blame] | 214 | /* Memory shrinker to throttle and reprioritize inodegc */ |
| 215 | struct shrinker m_inodegc_shrinker; |
Darrick J. Wong | f0f7a67 | 2020-04-12 13:11:10 -0700 | [diff] [blame] | 216 | /* |
| 217 | * Workqueue item so that we can coalesce multiple inode flush attempts |
| 218 | * into a single flush. |
| 219 | */ |
| 220 | struct work_struct m_flush_inodes_work; |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 221 | |
| 222 | /* |
| 223 | * Generation of the filesysyem layout. This is incremented by each |
| 224 | * growfs, and used by the pNFS server to ensure the client updates |
| 225 | * its view of the block device once it gets a layout that might |
| 226 | * reference the newly added blocks. Does not need to be persistent |
| 227 | * as long as we only allow file system size increments, but if we |
| 228 | * ever support shrinks it would have to be persisted in addition |
| 229 | * to various other kinds of pain inflicted on the pNFS server. |
| 230 | */ |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 231 | uint32_t m_generation; |
Dave Chinner | b0dff46 | 2020-05-20 13:17:11 -0700 | [diff] [blame] | 232 | struct mutex m_growlock; /* growfs mutex */ |
Brian Foster | 801cc4e | 2016-03-15 11:42:44 +1100 | [diff] [blame] | 233 | |
| 234 | #ifdef DEBUG |
| 235 | /* |
Darrick J. Wong | 31965ef | 2017-06-20 17:54:46 -0700 | [diff] [blame] | 236 | * Frequency with which errors are injected. Replaces xfs_etest; the |
| 237 | * value stored in here is the inverse of the frequency with which the |
| 238 | * error triggers. 1 = always, 2 = half the time, etc. |
| 239 | */ |
| 240 | unsigned int *m_errortag; |
Darrick J. Wong | c684010 | 2017-06-20 17:54:47 -0700 | [diff] [blame] | 241 | struct xfs_kobj m_errortag_kobj; |
Brian Foster | 801cc4e | 2016-03-15 11:42:44 +1100 | [diff] [blame] | 242 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | } xfs_mount_t; |
| 244 | |
Darrick J. Wong | ef32595 | 2019-06-05 11:19:34 -0700 | [diff] [blame] | 245 | #define M_IGEO(mp) (&(mp)->m_ino_geo) |
| 246 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | /* |
Dave Chinner | a1d86e8 | 2021-08-18 18:46:26 -0700 | [diff] [blame] | 248 | * Flags for m_features. |
| 249 | * |
| 250 | * These are all the active features in the filesystem, regardless of how |
| 251 | * they are configured. |
| 252 | */ |
| 253 | #define XFS_FEAT_ATTR (1ULL << 0) /* xattrs present in fs */ |
| 254 | #define XFS_FEAT_NLINK (1ULL << 1) /* 32 bit link counts */ |
| 255 | #define XFS_FEAT_QUOTA (1ULL << 2) /* quota active */ |
| 256 | #define XFS_FEAT_ALIGN (1ULL << 3) /* inode alignment */ |
| 257 | #define XFS_FEAT_DALIGN (1ULL << 4) /* data alignment */ |
| 258 | #define XFS_FEAT_LOGV2 (1ULL << 5) /* version 2 logs */ |
| 259 | #define XFS_FEAT_SECTOR (1ULL << 6) /* sector size > 512 bytes */ |
| 260 | #define XFS_FEAT_EXTFLG (1ULL << 7) /* unwritten extents */ |
| 261 | #define XFS_FEAT_ASCIICI (1ULL << 8) /* ASCII only case-insens. */ |
| 262 | #define XFS_FEAT_LAZYSBCOUNT (1ULL << 9) /* Superblk counters */ |
| 263 | #define XFS_FEAT_ATTR2 (1ULL << 10) /* dynamic attr fork */ |
| 264 | #define XFS_FEAT_PARENT (1ULL << 11) /* parent pointers */ |
| 265 | #define XFS_FEAT_PROJID32 (1ULL << 12) /* 32 bit project id */ |
| 266 | #define XFS_FEAT_CRC (1ULL << 13) /* metadata CRCs */ |
| 267 | #define XFS_FEAT_V3INODES (1ULL << 14) /* Version 3 inodes */ |
| 268 | #define XFS_FEAT_PQUOTINO (1ULL << 15) /* non-shared proj/grp quotas */ |
| 269 | #define XFS_FEAT_FTYPE (1ULL << 16) /* inode type in dir */ |
| 270 | #define XFS_FEAT_FINOBT (1ULL << 17) /* free inode btree */ |
| 271 | #define XFS_FEAT_RMAPBT (1ULL << 18) /* reverse map btree */ |
| 272 | #define XFS_FEAT_REFLINK (1ULL << 19) /* reflinked files */ |
| 273 | #define XFS_FEAT_SPINODES (1ULL << 20) /* sparse inode chunks */ |
| 274 | #define XFS_FEAT_META_UUID (1ULL << 21) /* metadata UUID */ |
| 275 | #define XFS_FEAT_REALTIME (1ULL << 22) /* realtime device present */ |
| 276 | #define XFS_FEAT_INOBTCNT (1ULL << 23) /* inobt block counts */ |
| 277 | #define XFS_FEAT_BIGTIME (1ULL << 24) /* large timestamps */ |
| 278 | #define XFS_FEAT_NEEDSREPAIR (1ULL << 25) /* needs xfs_repair */ |
| 279 | |
Dave Chinner | 8970a5b | 2021-08-18 18:46:51 -0700 | [diff] [blame] | 280 | /* Mount features */ |
| 281 | #define XFS_FEAT_NOATTR2 (1ULL << 48) /* disable attr2 creation */ |
| 282 | #define XFS_FEAT_NOALIGN (1ULL << 49) /* ignore alignment */ |
| 283 | #define XFS_FEAT_ALLOCSIZE (1ULL << 50) /* user specified allocation size */ |
| 284 | #define XFS_FEAT_LARGE_IOSIZE (1ULL << 51) /* report large preferred |
| 285 | * I/O size in stat() */ |
| 286 | #define XFS_FEAT_WSYNC (1ULL << 52) /* synchronous metadata ops */ |
| 287 | #define XFS_FEAT_DIRSYNC (1ULL << 53) /* synchronous directory ops */ |
| 288 | #define XFS_FEAT_DISCARD (1ULL << 54) /* discard unused blocks */ |
| 289 | #define XFS_FEAT_GRPID (1ULL << 55) /* group-ID assigned from directory */ |
| 290 | #define XFS_FEAT_SMALL_INUMS (1ULL << 56) /* user wants 32bit inodes */ |
| 291 | #define XFS_FEAT_IKEEP (1ULL << 57) /* keep empty inode clusters*/ |
| 292 | #define XFS_FEAT_SWALLOC (1ULL << 58) /* stripe width allocation */ |
| 293 | #define XFS_FEAT_FILESTREAMS (1ULL << 59) /* use filestreams allocator */ |
| 294 | #define XFS_FEAT_DAX_ALWAYS (1ULL << 60) /* DAX always enabled */ |
| 295 | #define XFS_FEAT_DAX_NEVER (1ULL << 61) /* DAX never enabled */ |
| 296 | #define XFS_FEAT_NORECOVERY (1ULL << 62) /* no recovery - dirty fs */ |
| 297 | #define XFS_FEAT_NOUUID (1ULL << 63) /* ignore uuid during mount */ |
| 298 | |
Dave Chinner | a1d86e8 | 2021-08-18 18:46:26 -0700 | [diff] [blame] | 299 | #define __XFS_HAS_FEAT(name, NAME) \ |
| 300 | static inline bool xfs_has_ ## name (struct xfs_mount *mp) \ |
| 301 | { \ |
| 302 | return mp->m_features & XFS_FEAT_ ## NAME; \ |
| 303 | } |
| 304 | |
| 305 | /* Some features can be added dynamically so they need a set wrapper, too. */ |
| 306 | #define __XFS_ADD_FEAT(name, NAME) \ |
| 307 | __XFS_HAS_FEAT(name, NAME); \ |
| 308 | static inline void xfs_add_ ## name (struct xfs_mount *mp) \ |
| 309 | { \ |
| 310 | mp->m_features |= XFS_FEAT_ ## NAME; \ |
| 311 | xfs_sb_version_add ## name(&mp->m_sb); \ |
| 312 | } |
| 313 | |
Dave Chinner | 8970a5b | 2021-08-18 18:46:51 -0700 | [diff] [blame] | 314 | /* Superblock features */ |
Dave Chinner | a1d86e8 | 2021-08-18 18:46:26 -0700 | [diff] [blame] | 315 | __XFS_ADD_FEAT(attr, ATTR) |
| 316 | __XFS_HAS_FEAT(nlink, NLINK) |
| 317 | __XFS_ADD_FEAT(quota, QUOTA) |
| 318 | __XFS_HAS_FEAT(align, ALIGN) |
| 319 | __XFS_HAS_FEAT(dalign, DALIGN) |
| 320 | __XFS_HAS_FEAT(logv2, LOGV2) |
| 321 | __XFS_HAS_FEAT(sector, SECTOR) |
| 322 | __XFS_HAS_FEAT(extflg, EXTFLG) |
| 323 | __XFS_HAS_FEAT(asciici, ASCIICI) |
| 324 | __XFS_HAS_FEAT(lazysbcount, LAZYSBCOUNT) |
| 325 | __XFS_ADD_FEAT(attr2, ATTR2) |
| 326 | __XFS_HAS_FEAT(parent, PARENT) |
| 327 | __XFS_ADD_FEAT(projid32, PROJID32) |
| 328 | __XFS_HAS_FEAT(crc, CRC) |
| 329 | __XFS_HAS_FEAT(v3inodes, V3INODES) |
| 330 | __XFS_HAS_FEAT(pquotino, PQUOTINO) |
| 331 | __XFS_HAS_FEAT(ftype, FTYPE) |
| 332 | __XFS_HAS_FEAT(finobt, FINOBT) |
| 333 | __XFS_HAS_FEAT(rmapbt, RMAPBT) |
| 334 | __XFS_HAS_FEAT(reflink, REFLINK) |
| 335 | __XFS_HAS_FEAT(sparseinodes, SPINODES) |
| 336 | __XFS_HAS_FEAT(metauuid, META_UUID) |
| 337 | __XFS_HAS_FEAT(realtime, REALTIME) |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 338 | __XFS_HAS_FEAT(inobtcounts, INOBTCNT) |
| 339 | __XFS_HAS_FEAT(bigtime, BIGTIME) |
| 340 | __XFS_HAS_FEAT(needsrepair, NEEDSREPAIR) |
Dave Chinner | a1d86e8 | 2021-08-18 18:46:26 -0700 | [diff] [blame] | 341 | |
| 342 | /* |
Dave Chinner | 8970a5b | 2021-08-18 18:46:51 -0700 | [diff] [blame] | 343 | * Mount features |
| 344 | * |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 345 | * These do not change dynamically - features that can come and go, such as 32 |
| 346 | * bit inodes and read-only state, are kept as operational state rather than |
Dave Chinner | 8970a5b | 2021-08-18 18:46:51 -0700 | [diff] [blame] | 347 | * features. |
| 348 | */ |
| 349 | __XFS_HAS_FEAT(noattr2, NOATTR2) |
| 350 | __XFS_HAS_FEAT(noalign, NOALIGN) |
| 351 | __XFS_HAS_FEAT(allocsize, ALLOCSIZE) |
| 352 | __XFS_HAS_FEAT(large_iosize, LARGE_IOSIZE) |
| 353 | __XFS_HAS_FEAT(wsync, WSYNC) |
| 354 | __XFS_HAS_FEAT(dirsync, DIRSYNC) |
| 355 | __XFS_HAS_FEAT(discard, DISCARD) |
| 356 | __XFS_HAS_FEAT(grpid, GRPID) |
| 357 | __XFS_HAS_FEAT(small_inums, SMALL_INUMS) |
| 358 | __XFS_HAS_FEAT(ikeep, IKEEP) |
| 359 | __XFS_HAS_FEAT(swalloc, SWALLOC) |
| 360 | __XFS_HAS_FEAT(filestreams, FILESTREAMS) |
| 361 | __XFS_HAS_FEAT(dax_always, DAX_ALWAYS) |
| 362 | __XFS_HAS_FEAT(dax_never, DAX_NEVER) |
| 363 | __XFS_HAS_FEAT(norecovery, NORECOVERY) |
| 364 | __XFS_HAS_FEAT(nouuid, NOUUID) |
| 365 | |
| 366 | /* |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 367 | * Operational mount state flags |
| 368 | * |
| 369 | * Use these with atomic bit ops only! |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | */ |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 371 | #define XFS_OPSTATE_UNMOUNTING 0 /* filesystem is unmounting */ |
| 372 | #define XFS_OPSTATE_CLEAN 1 /* mount was clean */ |
| 373 | #define XFS_OPSTATE_SHUTDOWN 2 /* stop all fs operations */ |
| 374 | #define XFS_OPSTATE_INODE32 3 /* inode32 allocator active */ |
| 375 | #define XFS_OPSTATE_READONLY 4 /* read-only fs */ |
Dave Chinner | cbe4dab | 2015-06-04 09:19:18 +1000 | [diff] [blame] | 376 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | /* |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 378 | * If set, inactivation worker threads will be scheduled to process queued |
| 379 | * inodegc work. If not, queued inodes remain in memory waiting to be |
| 380 | * processed. |
| 381 | */ |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 382 | #define XFS_OPSTATE_INODEGC_ENABLED 5 |
Darrick J. Wong | 6f64909 | 2021-08-06 11:05:42 -0700 | [diff] [blame] | 383 | /* |
| 384 | * If set, background speculative prealloc gc worker threads will be scheduled |
| 385 | * to process queued blockgc work. If not, inodes retain their preallocations |
| 386 | * until explicitly deleted. |
| 387 | */ |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 388 | #define XFS_OPSTATE_BLOCKGC_ENABLED 6 |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 389 | |
| 390 | #define __XFS_IS_OPSTATE(name, NAME) \ |
| 391 | static inline bool xfs_is_ ## name (struct xfs_mount *mp) \ |
| 392 | { \ |
| 393 | return test_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \ |
| 394 | } \ |
| 395 | static inline bool xfs_clear_ ## name (struct xfs_mount *mp) \ |
| 396 | { \ |
| 397 | return test_and_clear_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \ |
| 398 | } \ |
| 399 | static inline bool xfs_set_ ## name (struct xfs_mount *mp) \ |
| 400 | { \ |
| 401 | return test_and_set_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \ |
| 402 | } |
| 403 | |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 404 | __XFS_IS_OPSTATE(unmounting, UNMOUNTING) |
| 405 | __XFS_IS_OPSTATE(clean, CLEAN) |
| 406 | __XFS_IS_OPSTATE(shutdown, SHUTDOWN) |
| 407 | __XFS_IS_OPSTATE(inode32, INODE32) |
| 408 | __XFS_IS_OPSTATE(readonly, READONLY) |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 409 | __XFS_IS_OPSTATE(inodegc_enabled, INODEGC_ENABLED) |
Darrick J. Wong | 6f64909 | 2021-08-06 11:05:42 -0700 | [diff] [blame] | 410 | __XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED) |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 411 | |
| 412 | #define XFS_OPSTATE_STRINGS \ |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 413 | { (1UL << XFS_OPSTATE_UNMOUNTING), "unmounting" }, \ |
| 414 | { (1UL << XFS_OPSTATE_CLEAN), "clean" }, \ |
| 415 | { (1UL << XFS_OPSTATE_SHUTDOWN), "shutdown" }, \ |
| 416 | { (1UL << XFS_OPSTATE_INODE32), "inode32" }, \ |
| 417 | { (1UL << XFS_OPSTATE_READONLY), "read_only" }, \ |
Darrick J. Wong | 6f64909 | 2021-08-06 11:05:42 -0700 | [diff] [blame] | 418 | { (1UL << XFS_OPSTATE_INODEGC_ENABLED), "inodegc" }, \ |
| 419 | { (1UL << XFS_OPSTATE_BLOCKGC_ENABLED), "blockgc" } |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 420 | |
| 421 | /* |
Nathan Scott | 1f443ad | 2005-05-05 13:28:29 -0700 | [diff] [blame] | 422 | * Max and min values for mount-option defined I/O |
| 423 | * preallocation sizes. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | */ |
Nathan Scott | 1f443ad | 2005-05-05 13:28:29 -0700 | [diff] [blame] | 425 | #define XFS_MAX_IO_LOG 30 /* 1G */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | #define XFS_MIN_IO_LOG PAGE_SHIFT |
| 427 | |
Dave Chinner | 75c8c50f | 2021-08-18 18:46:53 -0700 | [diff] [blame] | 428 | #define xfs_is_shutdown(mp) xfs_is_shutdown(mp) |
Christoph Hellwig | 745f691 | 2007-08-30 17:20:39 +1000 | [diff] [blame] | 429 | void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, |
| 430 | int lnnum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | #define xfs_force_shutdown(m,f) \ |
Christoph Hellwig | 745f691 | 2007-08-30 17:20:39 +1000 | [diff] [blame] | 432 | xfs_do_force_shutdown(m, f, __FILE__, __LINE__) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | |
Christoph Hellwig | 2b5decd | 2008-11-28 14:23:36 +1100 | [diff] [blame] | 434 | #define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */ |
| 435 | #define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */ |
| 436 | #define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */ |
| 437 | #define SHUTDOWN_CORRUPT_INCORE 0x0008 /* corrupt in-memory data structures */ |
Christoph Hellwig | 2b5decd | 2008-11-28 14:23:36 +1100 | [diff] [blame] | 438 | |
Darrick J. Wong | 7f89c83 | 2021-08-10 17:00:54 -0700 | [diff] [blame] | 439 | #define XFS_SHUTDOWN_STRINGS \ |
| 440 | { SHUTDOWN_META_IO_ERROR, "metadata_io" }, \ |
| 441 | { SHUTDOWN_LOG_IO_ERROR, "log_io" }, \ |
| 442 | { SHUTDOWN_FORCE_UMOUNT, "force_umount" }, \ |
| 443 | { SHUTDOWN_CORRUPT_INCORE, "corruption" } |
| 444 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | * Flags for xfs_mountfs |
| 447 | */ |
Nathan Scott | 764d1f8 | 2006-03-31 13:04:17 +1000 | [diff] [blame] | 448 | #define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 450 | static inline xfs_agnumber_t |
| 451 | xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | { |
Eric Sandeen | 4f1adf3 | 2017-04-19 15:19:32 -0700 | [diff] [blame] | 453 | xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d); |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 454 | do_div(ld, mp->m_sb.sb_agblocks); |
| 455 | return (xfs_agnumber_t) ld; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | } |
| 457 | |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 458 | static inline xfs_agblock_t |
| 459 | xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | { |
Eric Sandeen | 4f1adf3 | 2017-04-19 15:19:32 -0700 | [diff] [blame] | 461 | xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d); |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 462 | return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | } |
| 464 | |
Dave Chinner | 07b6403 | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 465 | int xfs_buf_hash_init(struct xfs_perag *pag); |
| 466 | void xfs_buf_hash_destroy(struct xfs_perag *pag); |
Lucas Stach | 6031e73 | 2016-12-07 17:36:36 +1100 | [diff] [blame] | 467 | |
Darrick J. Wong | af3b638 | 2015-11-03 13:06:34 +1100 | [diff] [blame] | 468 | extern void xfs_uuid_table_free(void); |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 469 | extern uint64_t xfs_default_resblks(xfs_mount_t *mp); |
Christoph Hellwig | 4249023 | 2008-08-13 16:49:32 +1000 | [diff] [blame] | 470 | extern int xfs_mountfs(xfs_mount_t *mp); |
Christoph Hellwig | 41b5c2e | 2008-08-13 16:49:57 +1000 | [diff] [blame] | 471 | extern void xfs_unmountfs(xfs_mount_t *); |
Dave Chinner | 964aa8d9 | 2015-02-23 21:24:37 +1100 | [diff] [blame] | 472 | |
Darrick J. Wong | 7d6f07d | 2021-08-06 11:05:40 -0700 | [diff] [blame] | 473 | /* |
| 474 | * Deltas for the block count can vary from 1 to very large, but lock contention |
| 475 | * only occurs on frequent small block count updates such as in the delayed |
| 476 | * allocation path for buffered writes (page a time updates). Hence we set |
| 477 | * a large batch count (1024) to minimise global counter updates except when |
| 478 | * we get near to ENOSPC and we have to be very accurate with our updates. |
| 479 | */ |
| 480 | #define XFS_FDBLOCKS_BATCH 1024 |
| 481 | |
Dave Chinner | 0d485ad | 2015-02-23 21:22:03 +1100 | [diff] [blame] | 482 | extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta, |
| 483 | bool reserved); |
Dave Chinner | bab98bb | 2015-02-23 21:22:54 +1100 | [diff] [blame] | 484 | extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta); |
| 485 | |
Nathan Scott | 764d1f8 | 2006-03-31 13:04:17 +1000 | [diff] [blame] | 486 | extern int xfs_readsb(xfs_mount_t *, int); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | extern void xfs_freesb(xfs_mount_t *); |
Brian Foster | 91ee575 | 2014-11-28 14:02:59 +1100 | [diff] [blame] | 488 | extern bool xfs_fs_writable(struct xfs_mount *mp, int level); |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 489 | extern int xfs_sb_validate_fsb_count(struct xfs_sb *, uint64_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 | |
Christoph Hellwig | d7658d4 | 2010-02-17 19:36:13 +0000 | [diff] [blame] | 491 | extern int xfs_dev_is_read_only(struct xfs_mount *, char *); |
| 492 | |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 493 | extern void xfs_set_low_space_thresholds(struct xfs_mount *); |
| 494 | |
Dave Chinner | 3fbbbea | 2015-11-03 12:27:22 +1100 | [diff] [blame] | 495 | int xfs_zero_extent(struct xfs_inode *ip, xfs_fsblock_t start_fsb, |
| 496 | xfs_off_t count_fsb); |
| 497 | |
Carlos Maiolino | df30939 | 2016-05-18 11:05:33 +1000 | [diff] [blame] | 498 | struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp, |
| 499 | int error_class, int error); |
Darrick J. Wong | f467cad | 2018-07-20 09:28:40 -0700 | [diff] [blame] | 500 | void xfs_force_summary_recalc(struct xfs_mount *mp); |
Darrick J. Wong | 908ce71 | 2021-08-08 08:27:12 -0700 | [diff] [blame] | 501 | int xfs_add_incompat_log_feature(struct xfs_mount *mp, uint32_t feature); |
| 502 | bool xfs_clear_incompat_log_features(struct xfs_mount *mp); |
Darrick J. Wong | 9fe82b8 | 2019-04-25 18:26:22 -0700 | [diff] [blame] | 503 | void xfs_mod_delalloc(struct xfs_mount *mp, int64_t delta); |
Carlos Maiolino | df30939 | 2016-05-18 11:05:33 +1000 | [diff] [blame] | 504 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | #endif /* __XFS_MOUNT_H__ */ |