Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
| 6 | #ifndef __XFS_MOUNT_H__ |
| 7 | #define __XFS_MOUNT_H__ |
| 8 | |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 9 | struct xlog; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | struct xfs_inode; |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 11 | struct xfs_mru_cache; |
Barry Naujok | 5163f95 | 2008-05-21 16:41:01 +1000 | [diff] [blame] | 12 | struct xfs_nameops; |
David Chinner | 82fa901 | 2008-10-30 17:38:26 +1100 | [diff] [blame] | 13 | struct xfs_ail; |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 14 | struct xfs_quotainfo; |
Dave Chinner | 32c5483 | 2013-10-29 22:11:46 +1100 | [diff] [blame] | 15 | struct xfs_dir_ops; |
Dave Chinner | 0650b55 | 2014-06-06 15:01:58 +1000 | [diff] [blame] | 16 | struct xfs_da_geometry; |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 17 | |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 18 | /* dynamic preallocation free space thresholds, 5% down to 1% */ |
| 19 | enum { |
| 20 | XFS_LOWSP_1_PCNT = 0, |
| 21 | XFS_LOWSP_2_PCNT, |
| 22 | XFS_LOWSP_3_PCNT, |
| 23 | XFS_LOWSP_4_PCNT, |
| 24 | XFS_LOWSP_5_PCNT, |
| 25 | XFS_LOWSP_MAX, |
| 26 | }; |
| 27 | |
Carlos Maiolino | 192852b | 2016-05-18 10:58:51 +1000 | [diff] [blame] | 28 | /* |
| 29 | * Error Configuration |
| 30 | * |
| 31 | * Error classes define the subsystem the configuration belongs to. |
| 32 | * Error numbers define the errors that are configurable. |
| 33 | */ |
| 34 | enum { |
Carlos Maiolino | ffd40ef | 2016-05-18 11:01:00 +1000 | [diff] [blame] | 35 | XFS_ERR_METADATA, |
Carlos Maiolino | 192852b | 2016-05-18 10:58:51 +1000 | [diff] [blame] | 36 | XFS_ERR_CLASS_MAX, |
| 37 | }; |
| 38 | enum { |
Carlos Maiolino | ffd40ef | 2016-05-18 11:01:00 +1000 | [diff] [blame] | 39 | XFS_ERR_DEFAULT, |
Carlos Maiolino | e0a431b3 | 2016-05-18 11:09:28 +1000 | [diff] [blame] | 40 | XFS_ERR_EIO, |
| 41 | XFS_ERR_ENOSPC, |
| 42 | XFS_ERR_ENODEV, |
Carlos Maiolino | 192852b | 2016-05-18 10:58:51 +1000 | [diff] [blame] | 43 | XFS_ERR_ERRNO_MAX, |
| 44 | }; |
| 45 | |
Carlos Maiolino | a5ea70d | 2016-05-18 11:08:15 +1000 | [diff] [blame] | 46 | #define XFS_ERR_RETRY_FOREVER -1 |
| 47 | |
Eric Sandeen | 7716981 | 2016-09-14 07:51:30 +1000 | [diff] [blame] | 48 | /* |
| 49 | * Although retry_timeout is in jiffies which is normally an unsigned long, |
| 50 | * we limit the retry timeout to 86400 seconds, or one day. So even a |
| 51 | * signed 32-bit long is sufficient for a HZ value up to 24855. Making it |
| 52 | * signed lets us store the special "-1" value, meaning retry forever. |
| 53 | */ |
Carlos Maiolino | 192852b | 2016-05-18 10:58:51 +1000 | [diff] [blame] | 54 | struct xfs_error_cfg { |
| 55 | struct xfs_kobj kobj; |
| 56 | int max_retries; |
Eric Sandeen | 7716981 | 2016-09-14 07:51:30 +1000 | [diff] [blame] | 57 | long retry_timeout; /* in jiffies, -1 = infinite */ |
Carlos Maiolino | 192852b | 2016-05-18 10:58:51 +1000 | [diff] [blame] | 58 | }; |
| 59 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | typedef struct xfs_mount { |
Christoph Hellwig | b267ce9 | 2007-08-30 17:21:30 +1000 | [diff] [blame] | 61 | struct super_block *m_super; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | xfs_tid_t m_tid; /* next unused tid for fs */ |
Darrick J. Wong | 6772c1f | 2019-04-12 07:40:25 -0700 | [diff] [blame] | 63 | |
| 64 | /* |
| 65 | * Bitsets of per-fs metadata that have been checked and/or are sick. |
| 66 | * Callers must hold m_sb_lock to access these two fields. |
| 67 | */ |
| 68 | uint8_t m_fs_checked; |
| 69 | uint8_t m_fs_sick; |
| 70 | /* |
| 71 | * Bitsets of rt metadata that have been checked and/or are sick. |
| 72 | * Callers must hold m_sb_lock to access this field. |
| 73 | */ |
| 74 | uint8_t m_rt_checked; |
| 75 | uint8_t m_rt_sick; |
| 76 | |
David Chinner | 82fa901 | 2008-10-30 17:38:26 +1100 | [diff] [blame] | 77 | struct xfs_ail *m_ail; /* fs active log item list */ |
Dave Chinner | 501ab32 | 2015-02-23 21:19:28 +1100 | [diff] [blame] | 78 | |
| 79 | struct xfs_sb m_sb; /* copy of fs superblock */ |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 80 | spinlock_t m_sb_lock; /* sb counter lock */ |
Dave Chinner | 501ab32 | 2015-02-23 21:19:28 +1100 | [diff] [blame] | 81 | struct percpu_counter m_icount; /* allocated inodes counter */ |
Dave Chinner | e88b64e | 2015-02-23 21:19:53 +1100 | [diff] [blame] | 82 | struct percpu_counter m_ifree; /* free inodes counter */ |
Dave Chinner | 0d485ad | 2015-02-23 21:22:03 +1100 | [diff] [blame] | 83 | struct percpu_counter m_fdblocks; /* free block counter */ |
Darrick J. Wong | 9fe82b8 | 2019-04-25 18:26:22 -0700 | [diff] [blame] | 84 | /* |
| 85 | * Count of data device blocks reserved for delayed allocations, |
| 86 | * including indlen blocks. Does not include allocated CoW staging |
| 87 | * extents or anything related to the rt device. |
| 88 | */ |
| 89 | struct percpu_counter m_delalloc_blks; |
Dave Chinner | 501ab32 | 2015-02-23 21:19:28 +1100 | [diff] [blame] | 90 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | struct xfs_buf *m_sb_bp; /* buffer for superblock */ |
| 92 | char *m_fsname; /* filesystem name */ |
| 93 | int m_fsname_len; /* strlen of fs name */ |
Nathan Scott | fc1f8c1 | 2005-11-02 11:44:33 +1100 | [diff] [blame] | 94 | char *m_rtname; /* realtime device name */ |
| 95 | char *m_logname; /* external log device name */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | int m_bsize; /* fs logical block size */ |
| 97 | xfs_agnumber_t m_agfrotor; /* last ag where space found */ |
| 98 | xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */ |
Eric Sandeen | 36e41ee | 2007-10-11 17:43:43 +1000 | [diff] [blame] | 99 | spinlock_t m_agirotor_lock;/* .. and lock protecting it */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | xfs_agnumber_t m_maxagi; /* highest inode alloc group */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | uint m_readio_log; /* min read size log bytes */ |
| 102 | uint m_readio_blocks; /* min read size blocks */ |
| 103 | uint m_writeio_log; /* min write size log bytes */ |
| 104 | uint m_writeio_blocks; /* min write size blocks */ |
Dave Chinner | 0650b55 | 2014-06-06 15:01:58 +1000 | [diff] [blame] | 105 | struct xfs_da_geometry *m_dir_geo; /* directory block geometry */ |
| 106 | struct xfs_da_geometry *m_attr_geo; /* attribute block geometry */ |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 107 | struct xlog *m_log; /* log specific stuff */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | int m_logbufs; /* number of log buffers */ |
| 109 | int m_logbsize; /* size of each log buffer */ |
| 110 | uint m_rsumlevels; /* rt summary levels */ |
| 111 | uint m_rsumsize; /* size of rt summary, bytes */ |
Omar Sandoval | 355e3532 | 2018-12-12 08:46:32 -0800 | [diff] [blame] | 112 | /* |
| 113 | * Optional cache of rt summary level per bitmap block with the |
| 114 | * invariant that m_rsum_cache[bbno] <= the minimum i for which |
| 115 | * rsum[i][bbno] != 0. Reads and writes are serialized by the rsumip |
| 116 | * inode lock. |
| 117 | */ |
| 118 | uint8_t *m_rsum_cache; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | struct xfs_inode *m_rbmip; /* pointer to bitmap inode */ |
| 120 | struct xfs_inode *m_rsumip; /* pointer to summary inode */ |
| 121 | struct xfs_inode *m_rootip; /* pointer to root directory */ |
| 122 | struct xfs_quotainfo *m_quotainfo; /* disk quota information */ |
| 123 | xfs_buftarg_t *m_ddev_targp; /* saves taking the address */ |
| 124 | xfs_buftarg_t *m_logdev_targp;/* ptr to log device */ |
| 125 | xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */ |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 126 | uint8_t m_blkbit_log; /* blocklog + NBBY */ |
| 127 | uint8_t m_blkbb_log; /* blocklog - BBSHIFT */ |
| 128 | uint8_t m_agno_log; /* log #ag's */ |
| 129 | uint8_t m_agino_log; /* #bits for agino in inum */ |
Dave Chinner | 8f80587 | 2013-11-01 15:27:20 +1100 | [diff] [blame] | 130 | uint m_inode_cluster_size;/* min inode buf size */ |
Darrick J. Wong | 83dcdb4 | 2018-12-12 08:46:25 -0800 | [diff] [blame] | 131 | unsigned int m_inodes_per_cluster; |
| 132 | unsigned int m_blocks_per_cluster; |
Darrick J. Wong | c1b4a32 | 2018-12-12 08:46:25 -0800 | [diff] [blame] | 133 | unsigned int m_cluster_align; |
| 134 | unsigned int m_cluster_align_inodes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | uint m_blockmask; /* sb_blocksize-1 */ |
| 136 | uint m_blockwsize; /* sb_blocksize in words */ |
| 137 | uint m_blockwmask; /* blockwsize-1 */ |
Christoph Hellwig | 60197e8 | 2008-10-30 17:11:19 +1100 | [diff] [blame] | 138 | uint m_alloc_mxr[2]; /* max alloc btree records */ |
| 139 | uint m_alloc_mnr[2]; /* min alloc btree records */ |
| 140 | uint m_bmap_dmxr[2]; /* max bmap btree records */ |
| 141 | uint m_bmap_dmnr[2]; /* min bmap btree records */ |
| 142 | uint m_inobt_mxr[2]; /* max inobt btree records */ |
| 143 | uint m_inobt_mnr[2]; /* min inobt btree records */ |
Darrick J. Wong | 035e00a | 2016-08-03 11:36:07 +1000 | [diff] [blame] | 144 | uint m_rmap_mxr[2]; /* max rmap btree records */ |
| 145 | uint m_rmap_mnr[2]; /* min rmap btree records */ |
Darrick J. Wong | 1946b91 | 2016-10-03 09:11:18 -0700 | [diff] [blame] | 146 | uint m_refc_mxr[2]; /* max refc btree records */ |
| 147 | uint m_refc_mnr[2]; /* min refc btree records */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */ |
| 149 | uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */ |
Christoph Hellwig | 0d87e65 | 2009-02-09 08:37:14 +0100 | [diff] [blame] | 150 | uint m_in_maxlevels; /* max inobt btree levels. */ |
Darrick J. Wong | 035e00a | 2016-08-03 11:36:07 +1000 | [diff] [blame] | 151 | uint m_rmap_maxlevels; /* max rmap btree levels */ |
Darrick J. Wong | 1946b91 | 2016-10-03 09:11:18 -0700 | [diff] [blame] | 152 | uint m_refc_maxlevels; /* max refcount btree level */ |
Darrick J. Wong | 8018026 | 2016-08-03 11:31:47 +1000 | [diff] [blame] | 153 | xfs_extlen_t m_ag_prealloc_blocks; /* reserved ag blocks */ |
Darrick J. Wong | 5254885 | 2016-08-03 11:38:24 +1000 | [diff] [blame] | 154 | uint m_alloc_set_aside; /* space we can't use */ |
| 155 | uint m_ag_max_usable; /* max space per AG */ |
Dave Chinner | 1c1c6eb | 2010-01-11 11:47:44 +0000 | [diff] [blame] | 156 | struct radix_tree_root m_perag_tree; /* per-ag accounting info */ |
| 157 | spinlock_t m_perag_lock; /* lock for m_perag_tree */ |
Christoph Hellwig | cc92e7a | 2007-08-30 17:21:54 +1000 | [diff] [blame] | 158 | struct mutex m_growlock; /* growfs mutex */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | int m_fixedfsid[2]; /* unchanged for life of FS */ |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 160 | uint64_t m_flags; /* global mount flags */ |
Darrick J. Wong | e1f6ca1 | 2019-02-14 09:33:15 -0800 | [diff] [blame] | 161 | bool m_finobt_nores; /* no per-AG finobt resv. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | int m_ialloc_inos; /* inodes in inode allocation */ |
| 163 | int m_ialloc_blks; /* blocks in inode allocation */ |
Brian Foster | 066a188 | 2015-05-29 08:55:20 +1000 | [diff] [blame] | 164 | int m_ialloc_min_blks;/* min blocks in sparse inode |
| 165 | * allocation */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | int m_inoalign_mask;/* mask sb_inoalignmt if used */ |
| 167 | uint m_qflags; /* quota status flags */ |
Jie Liu | 0eadd10 | 2013-08-12 20:49:56 +1000 | [diff] [blame] | 168 | struct xfs_trans_resv m_resv; /* precomputed res values */ |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 169 | uint64_t m_maxicount; /* maximum inode count */ |
| 170 | uint64_t m_resblks; /* total reserved blocks */ |
| 171 | uint64_t m_resblks_avail;/* available reserved blocks */ |
| 172 | uint64_t m_resblks_save; /* reserved blks @ remount,ro */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | int m_dalign; /* stripe unit */ |
| 174 | int m_swidth; /* stripe width */ |
Nathan Scott | c41564b | 2006-03-29 08:55:14 +1000 | [diff] [blame] | 175 | int m_sinoalign; /* stripe unit inode alignment */ |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 176 | uint8_t m_sectbb_log; /* sectlog - BBSHIFT */ |
Barry Naujok | 5163f95 | 2008-05-21 16:41:01 +1000 | [diff] [blame] | 177 | const struct xfs_nameops *m_dirnameops; /* vector of dir name ops */ |
Dave Chinner | 32c5483 | 2013-10-29 22:11:46 +1100 | [diff] [blame] | 178 | const struct xfs_dir_ops *m_dir_inode_ops; /* vector of dir inode ops */ |
Dave Chinner | 4bceb18 | 2013-10-29 22:11:51 +1100 | [diff] [blame] | 179 | const struct xfs_dir_ops *m_nondir_inode_ops; /* !dir inode ops */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | uint m_chsize; /* size of next field */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | atomic_t m_active_trans; /* number trans frozen */ |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 182 | struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ |
Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 183 | struct delayed_work m_reclaim_work; /* background inode reclaim */ |
Brian Foster | 579b62f | 2012-11-06 09:50:47 -0500 | [diff] [blame] | 184 | struct delayed_work m_eofblocks_work; /* background eof blocks |
| 185 | trimming */ |
Darrick J. Wong | 83104d4 | 2016-10-03 09:11:46 -0700 | [diff] [blame] | 186 | struct delayed_work m_cowblocks_work; /* background cow blocks |
| 187 | trimming */ |
Dave Chinner | 61e63ec | 2015-01-22 09:10:31 +1100 | [diff] [blame] | 188 | bool m_update_sb; /* sb needs update in mount */ |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 189 | int64_t m_low_space[XFS_LOWSP_MAX]; |
| 190 | /* low free space thresholds */ |
Brian Foster | a31b1d3 | 2014-07-15 08:07:01 +1000 | [diff] [blame] | 191 | struct xfs_kobj m_kobj; |
Carlos Maiolino | 192852b | 2016-05-18 10:58:51 +1000 | [diff] [blame] | 192 | struct xfs_kobj m_error_kobj; |
Carlos Maiolino | ffd40ef | 2016-05-18 11:01:00 +1000 | [diff] [blame] | 193 | struct xfs_kobj m_error_meta_kobj; |
Carlos Maiolino | 192852b | 2016-05-18 10:58:51 +1000 | [diff] [blame] | 194 | struct xfs_error_cfg m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX]; |
Bill O'Donnell | 225e463 | 2015-10-12 18:21:19 +1100 | [diff] [blame] | 195 | struct xstats m_stats; /* per-fs stats */ |
Christoph Hellwig | aa6bf01 | 2012-02-29 09:53:48 +0000 | [diff] [blame] | 196 | |
Brian Foster | 78c931b | 2014-11-28 13:59:58 +1100 | [diff] [blame] | 197 | struct workqueue_struct *m_buf_workqueue; |
Christoph Hellwig | aa6bf01 | 2012-02-29 09:53:48 +0000 | [diff] [blame] | 198 | struct workqueue_struct *m_unwritten_workqueue; |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 199 | struct workqueue_struct *m_cil_workqueue; |
Dave Chinner | 5889608 | 2012-10-08 21:56:05 +1100 | [diff] [blame] | 200 | struct workqueue_struct *m_reclaim_workqueue; |
| 201 | struct workqueue_struct *m_log_workqueue; |
Brian Foster | 579b62f | 2012-11-06 09:50:47 -0500 | [diff] [blame] | 202 | struct workqueue_struct *m_eofblocks_workqueue; |
Brian Foster | 696a562 | 2017-03-28 14:51:44 -0700 | [diff] [blame] | 203 | struct workqueue_struct *m_sync_workqueue; |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 204 | |
| 205 | /* |
| 206 | * Generation of the filesysyem layout. This is incremented by each |
| 207 | * growfs, and used by the pNFS server to ensure the client updates |
| 208 | * its view of the block device once it gets a layout that might |
| 209 | * reference the newly added blocks. Does not need to be persistent |
| 210 | * as long as we only allow file system size increments, but if we |
| 211 | * ever support shrinks it would have to be persisted in addition |
| 212 | * to various other kinds of pain inflicted on the pNFS server. |
| 213 | */ |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 214 | uint32_t m_generation; |
Brian Foster | 801cc4e | 2016-03-15 11:42:44 +1100 | [diff] [blame] | 215 | |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 216 | bool m_always_cow; |
Carlos Maiolino | e6b3bb7 | 2016-05-18 11:11:27 +1000 | [diff] [blame] | 217 | bool m_fail_unmount; |
Brian Foster | 801cc4e | 2016-03-15 11:42:44 +1100 | [diff] [blame] | 218 | #ifdef DEBUG |
| 219 | /* |
Darrick J. Wong | 31965ef | 2017-06-20 17:54:46 -0700 | [diff] [blame] | 220 | * Frequency with which errors are injected. Replaces xfs_etest; the |
| 221 | * value stored in here is the inverse of the frequency with which the |
| 222 | * error triggers. 1 = always, 2 = half the time, etc. |
| 223 | */ |
| 224 | unsigned int *m_errortag; |
Darrick J. Wong | c684010 | 2017-06-20 17:54:47 -0700 | [diff] [blame] | 225 | struct xfs_kobj m_errortag_kobj; |
Brian Foster | 801cc4e | 2016-03-15 11:42:44 +1100 | [diff] [blame] | 226 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | } xfs_mount_t; |
| 228 | |
| 229 | /* |
| 230 | * Flags for m_flags. |
| 231 | */ |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 232 | #define XFS_MOUNT_WSYNC (1ULL << 0) /* for nfs - all metadata ops |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | must be synchronous except |
| 234 | for space allocations */ |
Carlos Maiolino | e6b3bb7 | 2016-05-18 11:11:27 +1000 | [diff] [blame] | 235 | #define XFS_MOUNT_UNMOUNTING (1ULL << 1) /* filesystem is unmounting */ |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 236 | #define XFS_MOUNT_WAS_CLEAN (1ULL << 3) |
Nathan Scott | e718eeb4 | 2005-11-02 15:09:22 +1100 | [diff] [blame] | 237 | #define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | operations, typically for |
| 239 | disk errors in metadata */ |
Christoph Hellwig | e84661a | 2011-05-20 13:45:32 +0000 | [diff] [blame] | 240 | #define XFS_MOUNT_DISCARD (1ULL << 5) /* discard unused blocks */ |
Nathan Scott | e718eeb4 | 2005-11-02 15:09:22 +1100 | [diff] [blame] | 241 | #define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | allocations */ |
Nathan Scott | 13059ff | 2006-01-11 15:32:01 +1100 | [diff] [blame] | 243 | #define XFS_MOUNT_ATTR2 (1ULL << 8) /* allow use of attr2 format */ |
Christoph Hellwig | bd186aa | 2007-08-30 17:21:12 +1000 | [diff] [blame] | 244 | #define XFS_MOUNT_GRPID (1ULL << 9) /* group-ID assigned from directory */ |
Nathan Scott | e718eeb4 | 2005-11-02 15:09:22 +1100 | [diff] [blame] | 245 | #define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */ |
Nathan Scott | e718eeb4 | 2005-11-02 15:09:22 +1100 | [diff] [blame] | 246 | #define XFS_MOUNT_DFLT_IOSIZE (1ULL << 12) /* set default i/o size */ |
Eric Sandeen | 12c3f05 | 2016-03-02 09:58:09 +1100 | [diff] [blame] | 247 | #define XFS_MOUNT_SMALL_INUMS (1ULL << 14) /* user wants 32bit inodes */ |
| 248 | #define XFS_MOUNT_32BITINODES (1ULL << 15) /* inode32 allocator active */ |
Nathan Scott | e718eeb4 | 2005-11-02 15:09:22 +1100 | [diff] [blame] | 249 | #define XFS_MOUNT_NOUUID (1ULL << 16) /* ignore uuid during mount */ |
Josef Jeff Sipek | 1bd960e | 2008-02-29 13:58:40 +1100 | [diff] [blame] | 250 | #define XFS_MOUNT_IKEEP (1ULL << 18) /* keep empty inode clusters*/ |
Nathan Scott | e718eeb4 | 2005-11-02 15:09:22 +1100 | [diff] [blame] | 251 | #define XFS_MOUNT_SWALLOC (1ULL << 19) /* turn on stripe width |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | * allocation */ |
Christoph Hellwig | bd186aa | 2007-08-30 17:21:12 +1000 | [diff] [blame] | 253 | #define XFS_MOUNT_RDONLY (1ULL << 20) /* read-only fs */ |
Nathan Scott | e718eeb4 | 2005-11-02 15:09:22 +1100 | [diff] [blame] | 254 | #define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */ |
| 255 | #define XFS_MOUNT_COMPAT_IOSIZE (1ULL << 22) /* don't report large preferred |
David Chinner | e8c8b3a | 2005-11-02 10:33:05 +1100 | [diff] [blame] | 256 | * I/O size in stat() */ |
David Chinner | 2a82b8b | 2007-07-11 11:09:12 +1000 | [diff] [blame] | 257 | #define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams |
| 258 | allocator */ |
Tim Shimmin | 7c12f29 | 2008-04-30 18:15:28 +1000 | [diff] [blame] | 259 | #define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */ |
David Chinner | e8c8b3a | 2005-11-02 10:33:05 +1100 | [diff] [blame] | 260 | |
Dave Chinner | cbe4dab | 2015-06-04 09:19:18 +1000 | [diff] [blame] | 261 | #define XFS_MOUNT_DAX (1ULL << 62) /* TEST ONLY! */ |
| 262 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | |
| 264 | /* |
| 265 | * Default minimum read and write sizes. |
| 266 | */ |
| 267 | #define XFS_READIO_LOG_LARGE 16 |
| 268 | #define XFS_WRITEIO_LOG_LARGE 16 |
| 269 | |
| 270 | /* |
Nathan Scott | 1f443ad | 2005-05-05 13:28:29 -0700 | [diff] [blame] | 271 | * Max and min values for mount-option defined I/O |
| 272 | * preallocation sizes. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | */ |
Nathan Scott | 1f443ad | 2005-05-05 13:28:29 -0700 | [diff] [blame] | 274 | #define XFS_MAX_IO_LOG 30 /* 1G */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | #define XFS_MIN_IO_LOG PAGE_SHIFT |
| 276 | |
| 277 | /* |
| 278 | * Synchronous read and write sizes. This should be |
| 279 | * better for NFSv2 wsync filesystems. |
| 280 | */ |
Malcolm Parsons | 9da096f | 2009-03-29 09:55:42 +0200 | [diff] [blame] | 281 | #define XFS_WSYNC_READIO_LOG 15 /* 32k */ |
| 282 | #define XFS_WSYNC_WRITEIO_LOG 14 /* 16k */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | |
David Chinner | e8c8b3a | 2005-11-02 10:33:05 +1100 | [diff] [blame] | 284 | /* |
| 285 | * Allow large block sizes to be reported to userspace programs if the |
Barry Naujok | 556b8b1 | 2008-04-10 12:22:07 +1000 | [diff] [blame] | 286 | * "largeio" mount option is used. |
David Chinner | e8c8b3a | 2005-11-02 10:33:05 +1100 | [diff] [blame] | 287 | * |
| 288 | * If compatibility mode is specified, simply return the basic unit of caching |
| 289 | * so that we don't get inefficient read/modify/write I/O from user apps. |
| 290 | * Otherwise.... |
| 291 | * |
| 292 | * If the underlying volume is a stripe, then return the stripe width in bytes |
| 293 | * as the recommended I/O size. It is not a stripe and we've set a default |
| 294 | * buffered I/O size, return that, otherwise return the compat default. |
| 295 | */ |
| 296 | static inline unsigned long |
| 297 | xfs_preferred_iosize(xfs_mount_t *mp) |
| 298 | { |
| 299 | if (mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 300 | return PAGE_SIZE; |
David Chinner | e8c8b3a | 2005-11-02 10:33:05 +1100 | [diff] [blame] | 301 | return (mp->m_swidth ? |
| 302 | (mp->m_swidth << mp->m_sb.sb_blocklog) : |
| 303 | ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ? |
Dave Chinner | 9bb54cb | 2018-06-07 07:54:02 -0700 | [diff] [blame] | 304 | (1 << (int)max(mp->m_readio_log, mp->m_writeio_log)) : |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 305 | PAGE_SIZE)); |
David Chinner | e8c8b3a | 2005-11-02 10:33:05 +1100 | [diff] [blame] | 306 | } |
| 307 | |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 308 | #define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \ |
| 309 | ((mp)->m_flags & XFS_MOUNT_WAS_CLEAN) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | #define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN) |
Christoph Hellwig | 745f691 | 2007-08-30 17:20:39 +1000 | [diff] [blame] | 311 | void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, |
| 312 | int lnnum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | #define xfs_force_shutdown(m,f) \ |
Christoph Hellwig | 745f691 | 2007-08-30 17:20:39 +1000 | [diff] [blame] | 314 | xfs_do_force_shutdown(m, f, __FILE__, __LINE__) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | |
Christoph Hellwig | 2b5decd | 2008-11-28 14:23:36 +1100 | [diff] [blame] | 316 | #define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */ |
| 317 | #define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */ |
| 318 | #define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */ |
| 319 | #define SHUTDOWN_CORRUPT_INCORE 0x0008 /* corrupt in-memory data structures */ |
| 320 | #define SHUTDOWN_REMOTE_REQ 0x0010 /* shutdown came from remote cell */ |
| 321 | #define SHUTDOWN_DEVICE_REQ 0x0020 /* failed all paths to the device */ |
| 322 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | * Flags for xfs_mountfs |
| 325 | */ |
Nathan Scott | 764d1f8 | 2006-03-31 13:04:17 +1000 | [diff] [blame] | 326 | #define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 328 | static inline xfs_agnumber_t |
| 329 | xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | { |
Eric Sandeen | 4f1adf3 | 2017-04-19 15:19:32 -0700 | [diff] [blame] | 331 | xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d); |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 332 | do_div(ld, mp->m_sb.sb_agblocks); |
| 333 | return (xfs_agnumber_t) ld; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | } |
| 335 | |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 336 | static inline xfs_agblock_t |
| 337 | xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | { |
Eric Sandeen | 4f1adf3 | 2017-04-19 15:19:32 -0700 | [diff] [blame] | 339 | xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d); |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 340 | return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | } |
| 342 | |
Darrick J. Wong | 3fd129b | 2016-09-19 10:30:52 +1000 | [diff] [blame] | 343 | /* per-AG block reservation data structures*/ |
| 344 | enum xfs_ag_resv_type { |
| 345 | XFS_AG_RESV_NONE = 0, |
Brian Foster | 0ab3208 | 2018-03-09 14:02:32 -0800 | [diff] [blame] | 346 | XFS_AG_RESV_AGFL, |
Darrick J. Wong | 3fd129b | 2016-09-19 10:30:52 +1000 | [diff] [blame] | 347 | XFS_AG_RESV_METADATA, |
Brian Foster | 2159286 | 2018-03-09 14:01:59 -0800 | [diff] [blame] | 348 | XFS_AG_RESV_RMAPBT, |
Darrick J. Wong | 3fd129b | 2016-09-19 10:30:52 +1000 | [diff] [blame] | 349 | }; |
| 350 | |
| 351 | struct xfs_ag_resv { |
| 352 | /* number of blocks originally reserved here */ |
| 353 | xfs_extlen_t ar_orig_reserved; |
| 354 | /* number of blocks reserved here */ |
| 355 | xfs_extlen_t ar_reserved; |
| 356 | /* number of blocks originally asked for */ |
| 357 | xfs_extlen_t ar_asked; |
| 358 | }; |
| 359 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | /* |
Dave Chinner | 9356fe2 | 2013-08-12 20:49:55 +1000 | [diff] [blame] | 361 | * Per-ag incore structure, copies of information in agf and agi, to improve the |
Christoph Hellwig | 4fb6e8a | 2014-11-28 14:25:04 +1100 | [diff] [blame] | 362 | * performance of allocation group selection. |
Dave Chinner | 9356fe2 | 2013-08-12 20:49:55 +1000 | [diff] [blame] | 363 | */ |
| 364 | typedef struct xfs_perag { |
| 365 | struct xfs_mount *pag_mount; /* owner filesystem */ |
| 366 | xfs_agnumber_t pag_agno; /* AG this structure belongs to */ |
| 367 | atomic_t pag_ref; /* perag reference count */ |
| 368 | char pagf_init; /* this agf's entry is initialized */ |
| 369 | char pagi_init; /* this agi's entry is initialized */ |
| 370 | char pagf_metadata; /* the agf is preferred to be metadata */ |
| 371 | char pagi_inodeok; /* The agi is ok for inodes */ |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 372 | uint8_t pagf_levels[XFS_BTNUM_AGF]; |
Dave Chinner | 9356fe2 | 2013-08-12 20:49:55 +1000 | [diff] [blame] | 373 | /* # of levels in bno & cnt btree */ |
Brian Foster | a27ba26 | 2018-03-15 10:51:58 -0700 | [diff] [blame] | 374 | bool pagf_agflreset; /* agfl requires reset before use */ |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 375 | uint32_t pagf_flcount; /* count of blocks in freelist */ |
Dave Chinner | 9356fe2 | 2013-08-12 20:49:55 +1000 | [diff] [blame] | 376 | xfs_extlen_t pagf_freeblks; /* total free blocks */ |
| 377 | xfs_extlen_t pagf_longest; /* longest free space */ |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 378 | uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */ |
Dave Chinner | 9356fe2 | 2013-08-12 20:49:55 +1000 | [diff] [blame] | 379 | xfs_agino_t pagi_freecount; /* number of free inodes */ |
| 380 | xfs_agino_t pagi_count; /* number of allocated inodes */ |
| 381 | |
| 382 | /* |
| 383 | * Inode allocation search lookup optimisation. |
| 384 | * If the pagino matches, the search for new inodes |
| 385 | * doesn't need to search the near ones again straight away |
| 386 | */ |
| 387 | xfs_agino_t pagl_pagino; |
| 388 | xfs_agino_t pagl_leftrec; |
| 389 | xfs_agino_t pagl_rightrec; |
Darrick J. Wong | 6772c1f | 2019-04-12 07:40:25 -0700 | [diff] [blame] | 390 | |
| 391 | /* |
| 392 | * Bitsets of per-ag metadata that have been checked and/or are sick. |
| 393 | * Callers should hold pag_state_lock before accessing this field. |
| 394 | */ |
| 395 | uint16_t pag_checked; |
| 396 | uint16_t pag_sick; |
| 397 | spinlock_t pag_state_lock; |
| 398 | |
Dave Chinner | 9356fe2 | 2013-08-12 20:49:55 +1000 | [diff] [blame] | 399 | spinlock_t pagb_lock; /* lock for pagb_tree */ |
| 400 | struct rb_root pagb_tree; /* ordered tree of busy extents */ |
Christoph Hellwig | ebf5587 | 2017-02-07 14:06:57 -0800 | [diff] [blame] | 401 | unsigned int pagb_gen; /* generation count for pagb_tree */ |
| 402 | wait_queue_head_t pagb_wait; /* woken when pagb_gen changes */ |
Dave Chinner | 9356fe2 | 2013-08-12 20:49:55 +1000 | [diff] [blame] | 403 | |
| 404 | atomic_t pagf_fstrms; /* # of filestreams active in this AG */ |
| 405 | |
| 406 | spinlock_t pag_ici_lock; /* incore inode cache lock */ |
| 407 | struct radix_tree_root pag_ici_root; /* incore inode cache root */ |
| 408 | int pag_ici_reclaimable; /* reclaimable inodes */ |
| 409 | struct mutex pag_ici_reclaim_lock; /* serialisation point */ |
| 410 | unsigned long pag_ici_reclaim_cursor; /* reclaim restart point */ |
| 411 | |
| 412 | /* buffer cache index */ |
Lucas Stach | 6031e73 | 2016-12-07 17:36:36 +1100 | [diff] [blame] | 413 | spinlock_t pag_buf_lock; /* lock for pag_buf_hash */ |
| 414 | struct rhashtable pag_buf_hash; |
Dave Chinner | 9356fe2 | 2013-08-12 20:49:55 +1000 | [diff] [blame] | 415 | |
| 416 | /* for rcu-safe freeing */ |
| 417 | struct rcu_head rcu_head; |
| 418 | int pagb_count; /* pagb slots in use */ |
Darrick J. Wong | 3fd129b | 2016-09-19 10:30:52 +1000 | [diff] [blame] | 419 | |
| 420 | /* Blocks reserved for all kinds of metadata. */ |
| 421 | struct xfs_ag_resv pag_meta_resv; |
Brian Foster | 2159286 | 2018-03-09 14:01:59 -0800 | [diff] [blame] | 422 | /* Blocks reserved for the reverse mapping btree. */ |
| 423 | struct xfs_ag_resv pag_rmapbt_resv; |
Darrick J. Wong | 46eeb52 | 2016-10-03 09:11:16 -0700 | [diff] [blame] | 424 | |
| 425 | /* reference count */ |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 426 | uint8_t pagf_refcount_level; |
Darrick J. Wong | 9b24717 | 2019-02-07 10:37:16 -0800 | [diff] [blame] | 427 | |
| 428 | /* |
| 429 | * Unlinked inode information. This incore information reflects |
| 430 | * data stored in the AGI, so callers must hold the AGI buffer lock |
| 431 | * or have some other means to control concurrency. |
| 432 | */ |
| 433 | struct rhashtable pagi_unlinked_hash; |
Dave Chinner | 9356fe2 | 2013-08-12 20:49:55 +1000 | [diff] [blame] | 434 | } xfs_perag_t; |
| 435 | |
Darrick J. Wong | 3fd129b | 2016-09-19 10:30:52 +1000 | [diff] [blame] | 436 | static inline struct xfs_ag_resv * |
| 437 | xfs_perag_resv( |
| 438 | struct xfs_perag *pag, |
| 439 | enum xfs_ag_resv_type type) |
| 440 | { |
| 441 | switch (type) { |
| 442 | case XFS_AG_RESV_METADATA: |
| 443 | return &pag->pag_meta_resv; |
Brian Foster | 2159286 | 2018-03-09 14:01:59 -0800 | [diff] [blame] | 444 | case XFS_AG_RESV_RMAPBT: |
| 445 | return &pag->pag_rmapbt_resv; |
Darrick J. Wong | 3fd129b | 2016-09-19 10:30:52 +1000 | [diff] [blame] | 446 | default: |
| 447 | return NULL; |
| 448 | } |
| 449 | } |
| 450 | |
Lucas Stach | 6031e73 | 2016-12-07 17:36:36 +1100 | [diff] [blame] | 451 | int xfs_buf_hash_init(xfs_perag_t *pag); |
| 452 | void xfs_buf_hash_destroy(xfs_perag_t *pag); |
| 453 | |
Darrick J. Wong | af3b638 | 2015-11-03 13:06:34 +1100 | [diff] [blame] | 454 | extern void xfs_uuid_table_free(void); |
Chandra Seetharaman | adab0f6 | 2011-06-29 22:10:14 +0000 | [diff] [blame] | 455 | extern int xfs_log_sbcount(xfs_mount_t *); |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 456 | extern uint64_t xfs_default_resblks(xfs_mount_t *mp); |
Christoph Hellwig | 4249023 | 2008-08-13 16:49:32 +1000 | [diff] [blame] | 457 | extern int xfs_mountfs(xfs_mount_t *mp); |
Dave Chinner | ff55068 | 2013-08-12 20:49:41 +1000 | [diff] [blame] | 458 | extern int xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount, |
| 459 | xfs_agnumber_t *maxagi); |
Christoph Hellwig | 41b5c2e | 2008-08-13 16:49:57 +1000 | [diff] [blame] | 460 | extern void xfs_unmountfs(xfs_mount_t *); |
Dave Chinner | 964aa8d9 | 2015-02-23 21:24:37 +1100 | [diff] [blame] | 461 | |
Dave Chinner | 501ab32 | 2015-02-23 21:19:28 +1100 | [diff] [blame] | 462 | extern int xfs_mod_icount(struct xfs_mount *mp, int64_t delta); |
Dave Chinner | e88b64e | 2015-02-23 21:19:53 +1100 | [diff] [blame] | 463 | extern int xfs_mod_ifree(struct xfs_mount *mp, int64_t delta); |
Dave Chinner | 0d485ad | 2015-02-23 21:22:03 +1100 | [diff] [blame] | 464 | extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta, |
| 465 | bool reserved); |
Dave Chinner | bab98bb | 2015-02-23 21:22:54 +1100 | [diff] [blame] | 466 | extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta); |
| 467 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); |
Nathan Scott | 764d1f8 | 2006-03-31 13:04:17 +1000 | [diff] [blame] | 469 | extern int xfs_readsb(xfs_mount_t *, int); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 | extern void xfs_freesb(xfs_mount_t *); |
Brian Foster | 91ee575 | 2014-11-28 14:02:59 +1100 | [diff] [blame] | 471 | extern bool xfs_fs_writable(struct xfs_mount *mp, int level); |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 472 | extern int xfs_sb_validate_fsb_count(struct xfs_sb *, uint64_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | |
Christoph Hellwig | d7658d4 | 2010-02-17 19:36:13 +0000 | [diff] [blame] | 474 | extern int xfs_dev_is_read_only(struct xfs_mount *, char *); |
| 475 | |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 476 | extern void xfs_set_low_space_thresholds(struct xfs_mount *); |
| 477 | |
Dave Chinner | 3fbbbea | 2015-11-03 12:27:22 +1100 | [diff] [blame] | 478 | int xfs_zero_extent(struct xfs_inode *ip, xfs_fsblock_t start_fsb, |
| 479 | xfs_off_t count_fsb); |
| 480 | |
Carlos Maiolino | df30939 | 2016-05-18 11:05:33 +1000 | [diff] [blame] | 481 | struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp, |
| 482 | int error_class, int error); |
Darrick J. Wong | f467cad | 2018-07-20 09:28:40 -0700 | [diff] [blame] | 483 | void xfs_force_summary_recalc(struct xfs_mount *mp); |
Darrick J. Wong | 9fe82b8 | 2019-04-25 18:26:22 -0700 | [diff] [blame] | 484 | void xfs_mod_delalloc(struct xfs_mount *mp, int64_t delta); |
Carlos Maiolino | df30939 | 2016-05-18 11:05:33 +1000 | [diff] [blame] | 485 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | #endif /* __XFS_MOUNT_H__ */ |