blob: 4e2c63649cab04cb9ba54a85ef1ac2b656c1aa56 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scott7b718762005-11-02 14:58:39 +11003 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +11007#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11008#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +11009#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Nathan Scotta844f452005-11-02 14:38:42 +110012#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include "xfs_sb.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include "xfs_mount.h"
Darrick J. Wong3ab78df2016-08-03 11:15:38 +100015#include "xfs_defer.h"
Dave Chinner57062782013-10-15 09:17:51 +110016#include "xfs_da_format.h"
Dave Chinner9a2cc412014-12-04 09:43:17 +110017#include "xfs_da_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs_inode.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110019#include "xfs_dir2.h"
Nathan Scotta844f452005-11-02 14:38:42 +110020#include "xfs_ialloc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include "xfs_alloc.h"
22#include "xfs_rtalloc.h"
23#include "xfs_bmap.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110024#include "xfs_trans.h"
25#include "xfs_trans_priv.h"
26#include "xfs_log.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include "xfs_error.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include "xfs_quota.h"
29#include "xfs_fsops.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000030#include "xfs_trace.h"
Dave Chinner6d8b79c2012-10-08 21:56:09 +110031#include "xfs_icache.h"
Brian Fostera31b1d32014-07-15 08:07:01 +100032#include "xfs_sysfs.h"
Darrick J. Wong035e00a2016-08-03 11:36:07 +100033#include "xfs_rmap_btree.h"
Darrick J. Wong1946b912016-10-03 09:11:18 -070034#include "xfs_refcount_btree.h"
Darrick J. Wong174edb02016-10-03 09:11:39 -070035#include "xfs_reflink.h"
Christoph Hellwigebf55872017-02-07 14:06:57 -080036#include "xfs_extent_busy.h"
Darrick J. Wong39353ff2019-04-12 07:41:15 -070037#include "xfs_health.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000038
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Christoph Hellwig27174202009-03-30 10:21:31 +020040static DEFINE_MUTEX(xfs_uuid_table_mutex);
41static int xfs_uuid_table_size;
42static uuid_t *xfs_uuid_table;
43
Darrick J. Wongaf3b6382015-11-03 13:06:34 +110044void
45xfs_uuid_table_free(void)
46{
47 if (xfs_uuid_table_size == 0)
48 return;
49 kmem_free(xfs_uuid_table);
50 xfs_uuid_table = NULL;
51 xfs_uuid_table_size = 0;
52}
53
Christoph Hellwig27174202009-03-30 10:21:31 +020054/*
55 * See if the UUID is unique among mounted XFS filesystems.
56 * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
57 */
58STATIC int
59xfs_uuid_mount(
60 struct xfs_mount *mp)
61{
62 uuid_t *uuid = &mp->m_sb.sb_uuid;
63 int hole, i;
64
Amir Goldstein8f720d92017-04-28 08:10:53 -070065 /* Publish UUID in struct super_block */
Christoph Hellwig85787092017-05-10 15:06:33 +020066 uuid_copy(&mp->m_super->s_uuid, uuid);
Amir Goldstein8f720d92017-04-28 08:10:53 -070067
Christoph Hellwig27174202009-03-30 10:21:31 +020068 if (mp->m_flags & XFS_MOUNT_NOUUID)
69 return 0;
70
Amir Goldsteind905fda2017-05-04 16:26:23 +030071 if (uuid_is_null(uuid)) {
72 xfs_warn(mp, "Filesystem has null UUID - can't mount");
Dave Chinner24513372014-06-25 14:58:08 +100073 return -EINVAL;
Christoph Hellwig27174202009-03-30 10:21:31 +020074 }
75
76 mutex_lock(&xfs_uuid_table_mutex);
77 for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
Amir Goldsteind905fda2017-05-04 16:26:23 +030078 if (uuid_is_null(&xfs_uuid_table[i])) {
Christoph Hellwig27174202009-03-30 10:21:31 +020079 hole = i;
80 continue;
81 }
82 if (uuid_equal(uuid, &xfs_uuid_table[i]))
83 goto out_duplicate;
84 }
85
86 if (hole < 0) {
87 xfs_uuid_table = kmem_realloc(xfs_uuid_table,
88 (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
Christoph Hellwig27174202009-03-30 10:21:31 +020089 KM_SLEEP);
90 hole = xfs_uuid_table_size++;
91 }
92 xfs_uuid_table[hole] = *uuid;
93 mutex_unlock(&xfs_uuid_table_mutex);
94
95 return 0;
96
97 out_duplicate:
98 mutex_unlock(&xfs_uuid_table_mutex);
Mitsuo Hayasaka021000e2012-01-13 05:58:39 +000099 xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid);
Dave Chinner24513372014-06-25 14:58:08 +1000100 return -EINVAL;
Christoph Hellwig27174202009-03-30 10:21:31 +0200101}
102
103STATIC void
104xfs_uuid_unmount(
105 struct xfs_mount *mp)
106{
107 uuid_t *uuid = &mp->m_sb.sb_uuid;
108 int i;
109
110 if (mp->m_flags & XFS_MOUNT_NOUUID)
111 return;
112
113 mutex_lock(&xfs_uuid_table_mutex);
114 for (i = 0; i < xfs_uuid_table_size; i++) {
Amir Goldsteind905fda2017-05-04 16:26:23 +0300115 if (uuid_is_null(&xfs_uuid_table[i]))
Christoph Hellwig27174202009-03-30 10:21:31 +0200116 continue;
117 if (!uuid_equal(uuid, &xfs_uuid_table[i]))
118 continue;
119 memset(&xfs_uuid_table[i], 0, sizeof(uuid_t));
120 break;
121 }
122 ASSERT(i < xfs_uuid_table_size);
123 mutex_unlock(&xfs_uuid_table_mutex);
124}
125
126
Dave Chinnere1765792010-09-22 10:47:20 +1000127STATIC void
128__xfs_free_perag(
129 struct rcu_head *head)
130{
131 struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
132
133 ASSERT(atomic_read(&pag->pag_ref) == 0);
134 kmem_free(pag);
135}
136
Dave Chinner0fa800f2010-01-11 11:47:46 +0000137/*
Dave Chinnere1765792010-09-22 10:47:20 +1000138 * Free up the per-ag resources associated with the mount structure.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 */
Christoph Hellwigc962fb72008-05-20 15:10:52 +1000140STATIC void
Christoph Hellwigff4f0382008-08-13 16:50:47 +1000141xfs_free_perag(
Christoph Hellwig745f6912007-08-30 17:20:39 +1000142 xfs_mount_t *mp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143{
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000144 xfs_agnumber_t agno;
145 struct xfs_perag *pag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000147 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
148 spin_lock(&mp->m_perag_lock);
149 pag = radix_tree_delete(&mp->m_perag_tree, agno);
150 spin_unlock(&mp->m_perag_lock);
Dave Chinnere1765792010-09-22 10:47:20 +1000151 ASSERT(pag);
Dave Chinnerf83282a2010-11-08 08:55:04 +0000152 ASSERT(atomic_read(&pag->pag_ref) == 0);
Darrick J. Wong9b247172019-02-07 10:37:16 -0800153 xfs_iunlink_destroy(pag);
Lucas Stach6031e732016-12-07 17:36:36 +1100154 xfs_buf_hash_destroy(pag);
Xiongwei Song1da06182018-01-11 09:45:51 -0800155 mutex_destroy(&pag->pag_ici_reclaim_lock);
Dave Chinnere1765792010-09-22 10:47:20 +1000156 call_rcu(&pag->rcu_head, __xfs_free_perag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158}
159
Nathan Scott4cc929e2007-05-14 18:24:02 +1000160/*
161 * Check size of device based on the (data/realtime) block count.
162 * Note: this check is used by the growfs code as well as mount.
163 */
164int
165xfs_sb_validate_fsb_count(
166 xfs_sb_t *sbp,
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700167 uint64_t nblocks)
Nathan Scott4cc929e2007-05-14 18:24:02 +1000168{
169 ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
170 ASSERT(sbp->sb_blocklog >= BBSHIFT);
171
Christoph Hellwigd5cf09b2014-07-30 09:12:05 +1000172 /* Limited by ULONG_MAX of page cache index */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300173 if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
Dave Chinner24513372014-06-25 14:58:08 +1000174 return -EFBIG;
Nathan Scott4cc929e2007-05-14 18:24:02 +1000175 return 0;
176}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000178int
Nathan Scottc11e2c32005-11-02 15:11:45 +1100179xfs_initialize_perag(
Nathan Scottc11e2c32005-11-02 15:11:45 +1100180 xfs_mount_t *mp,
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000181 xfs_agnumber_t agcount,
182 xfs_agnumber_t *maxagi)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183{
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300184 xfs_agnumber_t index;
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800185 xfs_agnumber_t first_initialised = NULLAGNUMBER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 xfs_perag_t *pag;
Dave Chinner8b26c582010-01-11 11:47:48 +0000187 int error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000189 /*
190 * Walk the current per-ag tree so we don't try to initialise AGs
191 * that already exist (growfs case). Allocate and insert all the
192 * AGs we don't find ready for initialisation.
193 */
194 for (index = 0; index < agcount; index++) {
195 pag = xfs_perag_get(mp, index);
196 if (pag) {
197 xfs_perag_put(pag);
198 continue;
199 }
Christoph Hellwigfb3b5042010-05-28 19:03:10 +0000200
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000201 pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
202 if (!pag)
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800203 goto out_unwind_new_pags;
Christoph Hellwigfb3b5042010-05-28 19:03:10 +0000204 pag->pag_agno = index;
205 pag->pag_mount = mp;
Dave Chinner1a427ab2010-12-16 17:08:41 +1100206 spin_lock_init(&pag->pag_ici_lock);
Dave Chinner69b491c2010-09-27 11:09:51 +1000207 mutex_init(&pag->pag_ici_reclaim_lock);
Christoph Hellwigfb3b5042010-05-28 19:03:10 +0000208 INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
Lucas Stach6031e732016-12-07 17:36:36 +1100209 if (xfs_buf_hash_init(pag))
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800210 goto out_free_pag;
Christoph Hellwigebf55872017-02-07 14:06:57 -0800211 init_waitqueue_head(&pag->pagb_wait);
Darrick J. Wongff23f4a2018-07-31 13:18:02 -0700212 spin_lock_init(&pag->pagb_lock);
213 pag->pagb_count = 0;
214 pag->pagb_tree = RB_ROOT;
Christoph Hellwigfb3b5042010-05-28 19:03:10 +0000215
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000216 if (radix_tree_preload(GFP_NOFS))
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800217 goto out_hash_destroy;
Christoph Hellwigfb3b5042010-05-28 19:03:10 +0000218
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000219 spin_lock(&mp->m_perag_lock);
220 if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
221 BUG();
222 spin_unlock(&mp->m_perag_lock);
Dave Chinner8b26c582010-01-11 11:47:48 +0000223 radix_tree_preload_end();
224 error = -EEXIST;
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800225 goto out_hash_destroy;
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000226 }
227 spin_unlock(&mp->m_perag_lock);
228 radix_tree_preload_end();
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800229 /* first new pag is fully initialized */
230 if (first_initialised == NULLAGNUMBER)
231 first_initialised = index;
Darrick J. Wong9b247172019-02-07 10:37:16 -0800232 error = xfs_iunlink_init(pag);
233 if (error)
234 goto out_hash_destroy;
Darrick J. Wong6772c1f2019-04-12 07:40:25 -0700235 spin_lock_init(&pag->pag_state_lock);
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000236 }
237
Eric Sandeen12c3f052016-03-02 09:58:09 +1100238 index = xfs_set_inode_alloc(mp, agcount);
Christoph Hellwigfb3b5042010-05-28 19:03:10 +0000239
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000240 if (maxagi)
241 *maxagi = index;
Darrick J. Wong80180262016-08-03 11:31:47 +1000242
243 mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000244 return 0;
Dave Chinner8b26c582010-01-11 11:47:48 +0000245
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800246out_hash_destroy:
Lucas Stach6031e732016-12-07 17:36:36 +1100247 xfs_buf_hash_destroy(pag);
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800248out_free_pag:
Xiongwei Song1da06182018-01-11 09:45:51 -0800249 mutex_destroy(&pag->pag_ici_reclaim_lock);
Dave Chinner8b26c582010-01-11 11:47:48 +0000250 kmem_free(pag);
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800251out_unwind_new_pags:
252 /* unwind any prior newly initialized pags */
253 for (index = first_initialised; index < agcount; index++) {
Dave Chinner8b26c582010-01-11 11:47:48 +0000254 pag = radix_tree_delete(&mp->m_perag_tree, index);
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800255 if (!pag)
256 break;
Lucas Stach6031e732016-12-07 17:36:36 +1100257 xfs_buf_hash_destroy(pag);
Darrick J. Wong9b247172019-02-07 10:37:16 -0800258 xfs_iunlink_destroy(pag);
Xiongwei Song1da06182018-01-11 09:45:51 -0800259 mutex_destroy(&pag->pag_ici_reclaim_lock);
Dave Chinner8b26c582010-01-11 11:47:48 +0000260 kmem_free(pag);
261 }
262 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263}
264
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265/*
266 * xfs_readsb
267 *
268 * Does the initial read of the superblock.
269 */
270int
Dave Chinnerff550682013-08-12 20:49:41 +1000271xfs_readsb(
272 struct xfs_mount *mp,
273 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274{
275 unsigned int sector_size;
Dave Chinner04a1e6c2013-04-03 16:11:31 +1100276 struct xfs_buf *bp;
277 struct xfs_sb *sbp = &mp->m_sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 int error;
Dave Chinneraf34e092011-03-07 10:04:35 +1100279 int loud = !(flags & XFS_MFSI_QUIET);
Eric Sandeendaba5422014-02-19 15:39:16 +1100280 const struct xfs_buf_ops *buf_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
282 ASSERT(mp->m_sb_bp == NULL);
283 ASSERT(mp->m_ddev_targp != NULL);
284
285 /*
Eric Sandeendaba5422014-02-19 15:39:16 +1100286 * For the initial read, we must guess at the sector
287 * size based on the block device. It's enough to
288 * get the sb_sectsize out of the superblock and
289 * then reread with the proper length.
290 * We don't verify it yet, because it may not be complete.
291 */
292 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
293 buf_ops = NULL;
294
295 /*
Brian Fosterc891c302016-07-20 11:13:43 +1000296 * Allocate a (locked) buffer to hold the superblock. This will be kept
297 * around at all times to optimize access to the superblock. Therefore,
298 * set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count
299 * elevated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 */
Dave Chinner26af6552010-09-22 10:47:20 +1000301reread:
Dave Chinnerba372672014-10-02 09:05:32 +1000302 error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
Brian Fosterc891c302016-07-20 11:13:43 +1000303 BTOBB(sector_size), XBF_NO_IOACCT, &bp,
304 buf_ops);
Dave Chinnerba372672014-10-02 09:05:32 +1000305 if (error) {
Dave Chinnereab4e632012-11-12 22:54:02 +1100306 if (loud)
Dave Chinnere721f502013-04-03 16:11:32 +1100307 xfs_warn(mp, "SB validate failed with error %d.", error);
Dave Chinnerac75a1f2014-03-07 16:19:14 +1100308 /* bad CRC means corrupted metadata */
Dave Chinner24513372014-06-25 14:58:08 +1000309 if (error == -EFSBADCRC)
310 error = -EFSCORRUPTED;
Dave Chinnerba372672014-10-02 09:05:32 +1000311 return error;
Dave Chinnereab4e632012-11-12 22:54:02 +1100312 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
314 /*
315 * Initialize the mount structure from the superblock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 */
Dave Chinner556b8882014-06-06 16:00:43 +1000317 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
Dave Chinner556b8882014-06-06 16:00:43 +1000318
319 /*
320 * If we haven't validated the superblock, do so now before we try
321 * to check the sector size and reread the superblock appropriately.
322 */
323 if (sbp->sb_magicnum != XFS_SB_MAGIC) {
324 if (loud)
325 xfs_warn(mp, "Invalid superblock magic number");
Dave Chinner24513372014-06-25 14:58:08 +1000326 error = -EINVAL;
Dave Chinner556b8882014-06-06 16:00:43 +1000327 goto release_buf;
328 }
Dave Chinnerff550682013-08-12 20:49:41 +1000329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 /*
331 * We must be able to do sector-sized and sector-aligned IO.
332 */
Dave Chinner04a1e6c2013-04-03 16:11:31 +1100333 if (sector_size > sbp->sb_sectsize) {
Dave Chinneraf34e092011-03-07 10:04:35 +1100334 if (loud)
335 xfs_warn(mp, "device supports %u byte sectors (not %u)",
Dave Chinner04a1e6c2013-04-03 16:11:31 +1100336 sector_size, sbp->sb_sectsize);
Dave Chinner24513372014-06-25 14:58:08 +1000337 error = -ENOSYS;
Dave Chinner26af6552010-09-22 10:47:20 +1000338 goto release_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 }
340
Eric Sandeendaba5422014-02-19 15:39:16 +1100341 if (buf_ops == NULL) {
Dave Chinner556b8882014-06-06 16:00:43 +1000342 /*
343 * Re-read the superblock so the buffer is correctly sized,
344 * and properly verified.
345 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 xfs_buf_relse(bp);
Dave Chinner04a1e6c2013-04-03 16:11:31 +1100347 sector_size = sbp->sb_sectsize;
Eric Sandeendaba5422014-02-19 15:39:16 +1100348 buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops;
Dave Chinner26af6552010-09-22 10:47:20 +1000349 goto reread;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 }
351
Dave Chinner5681ca42015-02-23 21:22:31 +1100352 xfs_reinit_percpu_counters(mp);
David Chinner8d280b92006-03-14 13:13:09 +1100353
Dave Chinner04a1e6c2013-04-03 16:11:31 +1100354 /* no need to be quiet anymore, so reset the buf ops */
355 bp->b_ops = &xfs_sb_buf_ops;
356
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 mp->m_sb_bp = bp;
Dave Chinner26af6552010-09-22 10:47:20 +1000358 xfs_buf_unlock(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 return 0;
360
Dave Chinner26af6552010-09-22 10:47:20 +1000361release_buf:
362 xfs_buf_relse(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 return error;
364}
365
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366/*
Eric Sandeen0771fb42007-10-12 11:03:40 +1000367 * Update alignment values based on mount options and sb values
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 */
Eric Sandeen0771fb42007-10-12 11:03:40 +1000369STATIC int
Christoph Hellwig7884bc82009-01-19 02:04:07 +0100370xfs_update_alignment(xfs_mount_t *mp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 xfs_sb_t *sbp = &(mp->m_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
Christoph Hellwig42490232008-08-13 16:49:32 +1000374 if (mp->m_dalign) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 /*
376 * If stripe unit and stripe width are not multiples
377 * of the fs blocksize turn off alignment.
378 */
379 if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
380 (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
Jie Liu39a45d82013-05-02 19:27:47 +0800381 xfs_warn(mp,
382 "alignment check failed: sunit/swidth vs. blocksize(%d)",
383 sbp->sb_blocksize);
Dave Chinner24513372014-06-25 14:58:08 +1000384 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 } else {
386 /*
387 * Convert the stripe unit and width to FSBs.
388 */
389 mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
390 if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) {
Dave Chinner53487782011-03-07 10:05:35 +1100391 xfs_warn(mp,
Jie Liu39a45d82013-05-02 19:27:47 +0800392 "alignment check failed: sunit/swidth vs. agsize(%d)",
393 sbp->sb_agblocks);
Dave Chinner24513372014-06-25 14:58:08 +1000394 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 } else if (mp->m_dalign) {
396 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
397 } else {
Jie Liu39a45d82013-05-02 19:27:47 +0800398 xfs_warn(mp,
399 "alignment check failed: sunit(%d) less than bsize(%d)",
400 mp->m_dalign, sbp->sb_blocksize);
Dave Chinner24513372014-06-25 14:58:08 +1000401 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 }
403 }
404
405 /*
406 * Update superblock with new values
407 * and log changes
408 */
Eric Sandeen62118702008-03-06 13:44:28 +1100409 if (xfs_sb_version_hasdalign(sbp)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 if (sbp->sb_unit != mp->m_dalign) {
411 sbp->sb_unit = mp->m_dalign;
Dave Chinner61e63ec2015-01-22 09:10:31 +1100412 mp->m_update_sb = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 }
414 if (sbp->sb_width != mp->m_swidth) {
415 sbp->sb_width = mp->m_swidth;
Dave Chinner61e63ec2015-01-22 09:10:31 +1100416 mp->m_update_sb = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 }
Jie Liu34d7f602013-05-02 19:27:53 +0800418 } else {
419 xfs_warn(mp,
420 "cannot change alignment: superblock does not support data alignment");
Dave Chinner24513372014-06-25 14:58:08 +1000421 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 }
423 } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
Eric Sandeen62118702008-03-06 13:44:28 +1100424 xfs_sb_version_hasdalign(&mp->m_sb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 mp->m_dalign = sbp->sb_unit;
426 mp->m_swidth = sbp->sb_width;
427 }
428
Eric Sandeen0771fb42007-10-12 11:03:40 +1000429 return 0;
430}
431
432/*
Eric Sandeen0771fb42007-10-12 11:03:40 +1000433 * Set the default minimum read and write sizes unless
434 * already specified in a mount option.
435 * We use smaller I/O sizes when the file system
436 * is being used for NFS service (wsync mount option).
437 */
438STATIC void
439xfs_set_rw_sizes(xfs_mount_t *mp)
440{
441 xfs_sb_t *sbp = &(mp->m_sb);
442 int readio_log, writeio_log;
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
445 if (mp->m_flags & XFS_MOUNT_WSYNC) {
446 readio_log = XFS_WSYNC_READIO_LOG;
447 writeio_log = XFS_WSYNC_WRITEIO_LOG;
448 } else {
449 readio_log = XFS_READIO_LOG_LARGE;
450 writeio_log = XFS_WRITEIO_LOG_LARGE;
451 }
452 } else {
453 readio_log = mp->m_readio_log;
454 writeio_log = mp->m_writeio_log;
455 }
456
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 if (sbp->sb_blocklog > readio_log) {
458 mp->m_readio_log = sbp->sb_blocklog;
459 } else {
460 mp->m_readio_log = readio_log;
461 }
462 mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog);
463 if (sbp->sb_blocklog > writeio_log) {
464 mp->m_writeio_log = sbp->sb_blocklog;
465 } else {
466 mp->m_writeio_log = writeio_log;
467 }
468 mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog);
Eric Sandeen0771fb42007-10-12 11:03:40 +1000469}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Eric Sandeen0771fb42007-10-12 11:03:40 +1000471/*
Dave Chinner055388a2011-01-04 11:35:03 +1100472 * precalculate the low space thresholds for dynamic speculative preallocation.
473 */
474void
475xfs_set_low_space_thresholds(
476 struct xfs_mount *mp)
477{
478 int i;
479
480 for (i = 0; i < XFS_LOWSP_MAX; i++) {
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700481 uint64_t space = mp->m_sb.sb_dblocks;
Dave Chinner055388a2011-01-04 11:35:03 +1100482
483 do_div(space, 100);
484 mp->m_low_space[i] = space * (i + 1);
485 }
486}
487
Eric Sandeen0771fb42007-10-12 11:03:40 +1000488/*
Zhi Yong Wu0471f622013-08-07 10:10:58 +0000489 * Check that the data (and log if separate) is an ok size.
Eric Sandeen0771fb42007-10-12 11:03:40 +1000490 */
491STATIC int
Dave Chinnerba372672014-10-02 09:05:32 +1000492xfs_check_sizes(
493 struct xfs_mount *mp)
Eric Sandeen0771fb42007-10-12 11:03:40 +1000494{
Dave Chinnerba372672014-10-02 09:05:32 +1000495 struct xfs_buf *bp;
Eric Sandeen0771fb42007-10-12 11:03:40 +1000496 xfs_daddr_t d;
Dave Chinnerba372672014-10-02 09:05:32 +1000497 int error;
Eric Sandeen0771fb42007-10-12 11:03:40 +1000498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
500 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100501 xfs_warn(mp, "filesystem size mismatch detected");
Dave Chinner24513372014-06-25 14:58:08 +1000502 return -EFBIG;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 }
Dave Chinnerba372672014-10-02 09:05:32 +1000504 error = xfs_buf_read_uncached(mp->m_ddev_targp,
Dave Chinner1922c942010-09-22 10:47:20 +1000505 d - XFS_FSS_TO_BB(mp, 1),
Dave Chinnerba372672014-10-02 09:05:32 +1000506 XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
507 if (error) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100508 xfs_warn(mp, "last sector read failed");
Dave Chinnerba372672014-10-02 09:05:32 +1000509 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 }
Dave Chinner1922c942010-09-22 10:47:20 +1000511 xfs_buf_relse(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
Dave Chinnerba372672014-10-02 09:05:32 +1000513 if (mp->m_logdev_targp == mp->m_ddev_targp)
514 return 0;
515
516 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
517 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
518 xfs_warn(mp, "log size mismatch detected");
519 return -EFBIG;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 }
Dave Chinnerba372672014-10-02 09:05:32 +1000521 error = xfs_buf_read_uncached(mp->m_logdev_targp,
522 d - XFS_FSB_TO_BB(mp, 1),
523 XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
524 if (error) {
525 xfs_warn(mp, "log device read failed");
526 return error;
527 }
528 xfs_buf_relse(bp);
Eric Sandeen0771fb42007-10-12 11:03:40 +1000529 return 0;
530}
531
532/*
Christoph Hellwig7d095252009-06-08 15:33:32 +0200533 * Clear the quotaflags in memory and in the superblock.
534 */
535int
536xfs_mount_reset_sbqflags(
537 struct xfs_mount *mp)
538{
Christoph Hellwig7d095252009-06-08 15:33:32 +0200539 mp->m_qflags = 0;
540
Dave Chinner61e63ec2015-01-22 09:10:31 +1100541 /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */
Christoph Hellwig7d095252009-06-08 15:33:32 +0200542 if (mp->m_sb.sb_qflags == 0)
543 return 0;
544 spin_lock(&mp->m_sb_lock);
545 mp->m_sb.sb_qflags = 0;
546 spin_unlock(&mp->m_sb_lock);
547
Dave Chinner61e63ec2015-01-22 09:10:31 +1100548 if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
Christoph Hellwig7d095252009-06-08 15:33:32 +0200549 return 0;
550
Dave Chinner61e63ec2015-01-22 09:10:31 +1100551 return xfs_sync_sb(mp, false);
Christoph Hellwig7d095252009-06-08 15:33:32 +0200552}
553
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700554uint64_t
Eric Sandeend5db0f92010-02-05 22:59:53 +0000555xfs_default_resblks(xfs_mount_t *mp)
556{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700557 uint64_t resblks;
Eric Sandeend5db0f92010-02-05 22:59:53 +0000558
559 /*
Dave Chinner8babd8a2010-03-04 01:46:25 +0000560 * We default to 5% or 8192 fsbs of space reserved, whichever is
561 * smaller. This is intended to cover concurrent allocation
562 * transactions when we initially hit enospc. These each require a 4
563 * block reservation. Hence by default we cover roughly 2000 concurrent
564 * allocation reservations.
Eric Sandeend5db0f92010-02-05 22:59:53 +0000565 */
566 resblks = mp->m_sb.sb_dblocks;
567 do_div(resblks, 20);
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700568 resblks = min_t(uint64_t, resblks, 8192);
Eric Sandeend5db0f92010-02-05 22:59:53 +0000569 return resblks;
570}
571
Darrick J. Wong2e9e6482018-07-19 12:29:13 -0700572/* Ensure the summary counts are correct. */
573STATIC int
574xfs_check_summary_counts(
575 struct xfs_mount *mp)
576{
577 /*
578 * The AG0 superblock verifier rejects in-progress filesystems,
579 * so we should never see the flag set this far into mounting.
580 */
581 if (mp->m_sb.sb_inprogress) {
582 xfs_err(mp, "sb_inprogress set after log recovery??");
583 WARN_ON(1);
584 return -EFSCORRUPTED;
585 }
586
587 /*
588 * Now the log is mounted, we know if it was an unclean shutdown or
589 * not. If it was, with the first phase of recovery has completed, we
590 * have consistent AG blocks on disk. We have not recovered EFIs yet,
591 * but they are recovered transactionally in the second recovery phase
592 * later.
593 *
594 * If the log was clean when we mounted, we can check the summary
595 * counters. If any of them are obviously incorrect, we can recompute
596 * them from the AGF headers in the next step.
597 */
598 if (XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
599 (mp->m_sb.sb_fdblocks > mp->m_sb.sb_dblocks ||
Darrick J. Wong00d22a12018-08-10 17:55:56 -0700600 !xfs_verify_icount(mp, mp->m_sb.sb_icount) ||
Darrick J. Wong2e9e6482018-07-19 12:29:13 -0700601 mp->m_sb.sb_ifree > mp->m_sb.sb_icount))
Darrick J. Wong39353ff2019-04-12 07:41:15 -0700602 xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
Darrick J. Wong2e9e6482018-07-19 12:29:13 -0700603
604 /*
605 * We can safely re-initialise incore superblock counters from the
606 * per-ag data. These may not be correct if the filesystem was not
607 * cleanly unmounted, so we waited for recovery to finish before doing
608 * this.
609 *
610 * If the filesystem was cleanly unmounted or the previous check did
611 * not flag anything weird, then we can trust the values in the
612 * superblock to be correct and we don't need to do anything here.
613 * Otherwise, recalculate the summary counters.
614 */
615 if ((!xfs_sb_version_haslazysbcount(&mp->m_sb) ||
616 XFS_LAST_UNMOUNT_WAS_CLEAN(mp)) &&
Darrick J. Wong39353ff2019-04-12 07:41:15 -0700617 !xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS))
Darrick J. Wong2e9e6482018-07-19 12:29:13 -0700618 return 0;
619
620 return xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount);
621}
622
Christoph Hellwig7d095252009-06-08 15:33:32 +0200623/*
Eric Sandeen0771fb42007-10-12 11:03:40 +1000624 * This function does the following on an initial mount of a file system:
625 * - reads the superblock from disk and init the mount struct
626 * - if we're a 32-bit kernel, do a size check on the superblock
627 * so we don't mount terabyte filesystems
628 * - init mount struct realtime fields
629 * - allocate inode hash table for fs
630 * - init directory manager
631 * - perform recovery and init the log manager
632 */
633int
634xfs_mountfs(
Brian Fosterf0b2efa2015-08-19 09:58:36 +1000635 struct xfs_mount *mp)
Eric Sandeen0771fb42007-10-12 11:03:40 +1000636{
Brian Fosterf0b2efa2015-08-19 09:58:36 +1000637 struct xfs_sb *sbp = &(mp->m_sb);
638 struct xfs_inode *rip;
Darrick J. Wongef325952019-06-05 11:19:34 -0700639 struct xfs_ino_geometry *igeo = M_IGEO(mp);
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700640 uint64_t resblks;
Brian Fosterf0b2efa2015-08-19 09:58:36 +1000641 uint quotamount = 0;
642 uint quotaflags = 0;
643 int error = 0;
Eric Sandeen0771fb42007-10-12 11:03:40 +1000644
Dave Chinnerff550682013-08-12 20:49:41 +1000645 xfs_sb_mount_common(mp, sbp);
Eric Sandeen0771fb42007-10-12 11:03:40 +1000646
647 /*
Dave Chinner074e4272015-01-22 09:10:33 +1100648 * Check for a mismatched features2 values. Older kernels read & wrote
649 * into the wrong sb offset for sb_features2 on some platforms due to
650 * xfs_sb_t not being 64bit size aligned when sb_features2 was added,
651 * which made older superblock reading/writing routines swap it as a
652 * 64-bit value.
David Chinneree1c0902008-03-06 13:45:50 +1100653 *
Eric Sandeene6957ea2008-04-10 12:19:34 +1000654 * For backwards compatibility, we make both slots equal.
655 *
Dave Chinner074e4272015-01-22 09:10:33 +1100656 * If we detect a mismatched field, we OR the set bits into the existing
657 * features2 field in case it has already been modified; we don't want
658 * to lose any features. We then update the bad location with the ORed
659 * value so that older kernels will see any features2 flags. The
660 * superblock writeback code ensures the new sb_features2 is copied to
661 * sb_bad_features2 before it is logged or written to disk.
David Chinneree1c0902008-03-06 13:45:50 +1100662 */
Eric Sandeene6957ea2008-04-10 12:19:34 +1000663 if (xfs_sb_has_mismatched_features2(sbp)) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100664 xfs_warn(mp, "correcting sb_features alignment problem");
David Chinneree1c0902008-03-06 13:45:50 +1100665 sbp->sb_features2 |= sbp->sb_bad_features2;
Dave Chinner61e63ec2015-01-22 09:10:31 +1100666 mp->m_update_sb = true;
Eric Sandeene6957ea2008-04-10 12:19:34 +1000667
668 /*
669 * Re-check for ATTR2 in case it was found in bad_features2
670 * slot.
671 */
Tim Shimmin7c12f292008-04-30 18:15:28 +1000672 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
673 !(mp->m_flags & XFS_MOUNT_NOATTR2))
Eric Sandeene6957ea2008-04-10 12:19:34 +1000674 mp->m_flags |= XFS_MOUNT_ATTR2;
Tim Shimmin7c12f292008-04-30 18:15:28 +1000675 }
Eric Sandeene6957ea2008-04-10 12:19:34 +1000676
Tim Shimmin7c12f292008-04-30 18:15:28 +1000677 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
678 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
679 xfs_sb_version_removeattr2(&mp->m_sb);
Dave Chinner61e63ec2015-01-22 09:10:31 +1100680 mp->m_update_sb = true;
Tim Shimmin7c12f292008-04-30 18:15:28 +1000681
682 /* update sb_versionnum for the clearing of the morebits */
683 if (!sbp->sb_features2)
Dave Chinner61e63ec2015-01-22 09:10:31 +1100684 mp->m_update_sb = true;
David Chinneree1c0902008-03-06 13:45:50 +1100685 }
686
Dave Chinner263997a2014-05-20 07:46:40 +1000687 /* always use v2 inodes by default now */
688 if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) {
689 mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT;
Dave Chinner61e63ec2015-01-22 09:10:31 +1100690 mp->m_update_sb = true;
Dave Chinner263997a2014-05-20 07:46:40 +1000691 }
692
David Chinneree1c0902008-03-06 13:45:50 +1100693 /*
Eric Sandeen0771fb42007-10-12 11:03:40 +1000694 * Check if sb_agblocks is aligned at stripe boundary
695 * If sb_agblocks is NOT aligned turn off m_dalign since
696 * allocator alignment is within an ag, therefore ag has
697 * to be aligned at stripe boundary.
698 */
Christoph Hellwig7884bc82009-01-19 02:04:07 +0100699 error = xfs_update_alignment(mp);
Eric Sandeen0771fb42007-10-12 11:03:40 +1000700 if (error)
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100701 goto out;
Eric Sandeen0771fb42007-10-12 11:03:40 +1000702
703 xfs_alloc_compute_maxlevels(mp);
704 xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
705 xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
Darrick J. Wong494dba72019-06-05 11:19:35 -0700706 xfs_ialloc_setup_geometry(mp);
Darrick J. Wong035e00a2016-08-03 11:36:07 +1000707 xfs_rmapbt_compute_maxlevels(mp);
Darrick J. Wong1946b912016-10-03 09:11:18 -0700708 xfs_refcountbt_compute_maxlevels(mp);
Eric Sandeen0771fb42007-10-12 11:03:40 +1000709
Carlos Maiolinoe6b3bb72016-05-18 11:11:27 +1000710 /* enable fail_at_unmount as default */
Thomas Meyer749f24f2017-10-09 11:38:54 -0700711 mp->m_fail_unmount = true;
Carlos Maiolinoe6b3bb72016-05-18 11:11:27 +1000712
Brian Fostera31b1d32014-07-15 08:07:01 +1000713 error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname);
Christoph Hellwig27174202009-03-30 10:21:31 +0200714 if (error)
715 goto out;
Eric Sandeen0771fb42007-10-12 11:03:40 +1000716
Bill O'Donnell225e4632015-10-12 18:21:19 +1100717 error = xfs_sysfs_init(&mp->m_stats.xs_kobj, &xfs_stats_ktype,
718 &mp->m_kobj, "stats");
Brian Fostera31b1d32014-07-15 08:07:01 +1000719 if (error)
720 goto out_remove_sysfs;
721
Carlos Maiolino192852b2016-05-18 10:58:51 +1000722 error = xfs_error_sysfs_init(mp);
Bill O'Donnell225e4632015-10-12 18:21:19 +1100723 if (error)
724 goto out_del_stats;
725
Darrick J. Wong31965ef2017-06-20 17:54:46 -0700726 error = xfs_errortag_init(mp);
727 if (error)
728 goto out_remove_error_sysfs;
Carlos Maiolino192852b2016-05-18 10:58:51 +1000729
730 error = xfs_uuid_mount(mp);
731 if (error)
Darrick J. Wong31965ef2017-06-20 17:54:46 -0700732 goto out_remove_errortag;
Carlos Maiolino192852b2016-05-18 10:58:51 +1000733
Eric Sandeen0771fb42007-10-12 11:03:40 +1000734 /*
735 * Set the minimum read and write sizes
736 */
737 xfs_set_rw_sizes(mp);
738
Dave Chinner055388a2011-01-04 11:35:03 +1100739 /* set the low space thresholds for dynamic preallocation */
740 xfs_set_low_space_thresholds(mp);
741
Eric Sandeen0771fb42007-10-12 11:03:40 +1000742 /*
Brian Fostere5376fc2015-05-29 08:57:27 +1000743 * If enabled, sparse inode chunk alignment is expected to match the
744 * cluster size. Full inode chunk alignment must match the chunk size,
745 * but that is checked on sb read verification...
746 */
747 if (xfs_sb_version_hassparseinodes(&mp->m_sb) &&
748 mp->m_sb.sb_spino_align !=
Darrick J. Wong490d4512019-06-05 11:19:35 -0700749 XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw)) {
Brian Fostere5376fc2015-05-29 08:57:27 +1000750 xfs_warn(mp,
751 "Sparse inode block alignment (%u) must match cluster size (%llu).",
752 mp->m_sb.sb_spino_align,
Darrick J. Wong490d4512019-06-05 11:19:35 -0700753 XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw));
Brian Fostere5376fc2015-05-29 08:57:27 +1000754 error = -EINVAL;
755 goto out_remove_uuid;
756 }
757
758 /*
Zhi Yong Wuc2bfbc92013-08-12 03:15:03 +0000759 * Check that the data (and log if separate) is an ok size.
Eric Sandeen0771fb42007-10-12 11:03:40 +1000760 */
Christoph Hellwig42490232008-08-13 16:49:32 +1000761 error = xfs_check_sizes(mp);
Eric Sandeen0771fb42007-10-12 11:03:40 +1000762 if (error)
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100763 goto out_remove_uuid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
765 /*
766 * Initialize realtime fields in the mount structure
767 */
Eric Sandeen0771fb42007-10-12 11:03:40 +1000768 error = xfs_rtmount_init(mp);
769 if (error) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100770 xfs_warn(mp, "RT mount failed");
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100771 goto out_remove_uuid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 }
773
774 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 * Copies the low order bits of the timestamp and the randomly
776 * set "sequence" number out of a UUID.
777 */
Christoph Hellwigcb0ba6c2017-05-05 09:39:10 +0200778 mp->m_fixedfsid[0] =
779 (get_unaligned_be16(&sbp->sb_uuid.b[8]) << 16) |
780 get_unaligned_be16(&sbp->sb_uuid.b[4]);
781 mp->m_fixedfsid[1] = get_unaligned_be32(&sbp->sb_uuid.b[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782
Dave Chinner0650b552014-06-06 15:01:58 +1000783 error = xfs_da_mount(mp);
784 if (error) {
785 xfs_warn(mp, "Failed dir/attr init: %d", error);
786 goto out_remove_uuid;
787 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
789 /*
790 * Initialize the precomputed transaction reservations values.
791 */
792 xfs_trans_init(mp);
793
794 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 * Allocate and initialize the per-ag data.
796 */
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000797 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
798 if (error) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100799 xfs_warn(mp, "Failed per-ag init: %d", error);
Dave Chinner0650b552014-06-06 15:01:58 +1000800 goto out_free_dir;
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000801 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100803 if (!sbp->sb_logblocks) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100804 xfs_warn(mp, "no log defined");
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100805 XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp);
Dave Chinner24513372014-06-25 14:58:08 +1000806 error = -EFSCORRUPTED;
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100807 goto out_free_perag;
808 }
809
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 /*
Brian Fosterf0b2efa2015-08-19 09:58:36 +1000811 * Log's mount-time initialization. The first part of recovery can place
812 * some items on the AIL, to be handled when recovery is finished or
813 * cancelled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 */
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100815 error = xfs_log_mount(mp, mp->m_logdev_targp,
816 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
817 XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
818 if (error) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100819 xfs_warn(mp, "log mount failed");
Dave Chinnerd4f35122012-04-23 15:59:06 +1000820 goto out_fail_wait;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 }
822
Darrick J. Wong2e9e6482018-07-19 12:29:13 -0700823 /* Make sure the summary counts are ok. */
824 error = xfs_check_summary_counts(mp);
825 if (error)
826 goto out_log_dealloc;
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100827
David Chinner92821e22007-05-24 15:26:31 +1000828 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 * Get and sanity-check the root inode.
830 * Save the pointer to it in the mount structure.
831 */
Dave Chinner541b5ac2018-06-05 10:09:33 -0700832 error = xfs_iget(mp, NULL, sbp->sb_rootino, XFS_IGET_UNTRUSTED,
833 XFS_ILOCK_EXCL, &rip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 if (error) {
Dave Chinner541b5ac2018-06-05 10:09:33 -0700835 xfs_warn(mp,
836 "Failed to read root inode 0x%llx, error %d",
837 sbp->sb_rootino, -error);
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100838 goto out_log_dealloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 }
840
841 ASSERT(rip != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100843 if (unlikely(!S_ISDIR(VFS_I(rip)->i_mode))) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100844 xfs_warn(mp, "corrupted root inode %llu: not a directory",
Nathan Scottb6574522006-06-09 15:29:40 +1000845 (unsigned long long)rip->i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 xfs_iunlock(rip, XFS_ILOCK_EXCL);
847 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
848 mp);
Dave Chinner24513372014-06-25 14:58:08 +1000849 error = -EFSCORRUPTED;
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100850 goto out_rele_rip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 }
852 mp->m_rootip = rip; /* save it */
853
854 xfs_iunlock(rip, XFS_ILOCK_EXCL);
855
856 /*
857 * Initialize realtime inode pointers in the mount structure
858 */
Eric Sandeen0771fb42007-10-12 11:03:40 +1000859 error = xfs_rtmount_inodes(mp);
860 if (error) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 /*
862 * Free up the root inode.
863 */
Dave Chinner0b932cc2011-03-07 10:08:35 +1100864 xfs_warn(mp, "failed to read RT inodes");
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100865 goto out_rele_rip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 }
867
868 /*
Christoph Hellwig7884bc82009-01-19 02:04:07 +0100869 * If this is a read-only mount defer the superblock updates until
870 * the next remount into writeable mode. Otherwise we would never
871 * perform the update e.g. for the root filesystem.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 */
Dave Chinner61e63ec2015-01-22 09:10:31 +1100873 if (mp->m_update_sb && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
874 error = xfs_sync_sb(mp, false);
David Chinnere5720ee2008-04-10 12:21:18 +1000875 if (error) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100876 xfs_warn(mp, "failed to write sb changes");
Christoph Hellwigb93b6e42009-02-04 09:33:58 +0100877 goto out_rtunmount;
David Chinnere5720ee2008-04-10 12:21:18 +1000878 }
879 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
881 /*
882 * Initialise the XFS quota management subsystem for this mount
883 */
Christoph Hellwig7d095252009-06-08 15:33:32 +0200884 if (XFS_IS_QUOTA_RUNNING(mp)) {
885 error = xfs_qm_newmount(mp, &quotamount, &quotaflags);
886 if (error)
887 goto out_rtunmount;
888 } else {
889 ASSERT(!XFS_IS_QUOTA_ON(mp));
890
891 /*
892 * If a file system had quotas running earlier, but decided to
893 * mount without -o uquota/pquota/gquota options, revoke the
894 * quotachecked license.
895 */
896 if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100897 xfs_notice(mp, "resetting quota flags");
Christoph Hellwig7d095252009-06-08 15:33:32 +0200898 error = xfs_mount_reset_sbqflags(mp);
899 if (error)
Brian Fostera70a4fa2014-07-15 07:41:25 +1000900 goto out_rtunmount;
Christoph Hellwig7d095252009-06-08 15:33:32 +0200901 }
902 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
904 /*
Brian Fosterf0b2efa2015-08-19 09:58:36 +1000905 * Finish recovering the file system. This part needed to be delayed
906 * until after the root and real-time bitmap inodes were consistently
907 * read in.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 */
Christoph Hellwig42490232008-08-13 16:49:32 +1000909 error = xfs_log_mount_finish(mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 if (error) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100911 xfs_warn(mp, "log mount finish failed");
Christoph Hellwigb93b6e42009-02-04 09:33:58 +0100912 goto out_rtunmount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 }
914
915 /*
Dave Chinnerddeb14f2016-09-26 08:21:44 +1000916 * Now the log is fully replayed, we can transition to full read-only
917 * mode for read-only mounts. This will sync all the metadata and clean
918 * the log so that the recovery we just performed does not have to be
919 * replayed again on the next mount.
920 *
921 * We use the same quiesce mechanism as the rw->ro remount, as they are
922 * semantically identical operations.
923 */
924 if ((mp->m_flags & (XFS_MOUNT_RDONLY|XFS_MOUNT_NORECOVERY)) ==
925 XFS_MOUNT_RDONLY) {
926 xfs_quiesce_attr(mp);
927 }
928
929 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 * Complete the quota initialisation, post-log-replay component.
931 */
Christoph Hellwig7d095252009-06-08 15:33:32 +0200932 if (quotamount) {
933 ASSERT(mp->m_qflags == 0);
934 mp->m_qflags = quotaflags;
935
936 xfs_qm_mount_quotas(mp);
937 }
938
David Chinner84e1e992007-06-18 16:50:27 +1000939 /*
940 * Now we are mounted, reserve a small amount of unused space for
941 * privileged transactions. This is needed so that transaction
942 * space required for critical operations can dip into this pool
943 * when at ENOSPC. This is needed for operations like create with
944 * attr, unwritten extent conversion at ENOSPC, etc. Data allocations
945 * are not allowed to use this reserved space.
Dave Chinner8babd8a2010-03-04 01:46:25 +0000946 *
947 * This may drive us straight to ENOSPC on mount, but that implies
948 * we were already there on the last unmount. Warn if this occurs.
David Chinner84e1e992007-06-18 16:50:27 +1000949 */
Eric Sandeend5db0f92010-02-05 22:59:53 +0000950 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
951 resblks = xfs_default_resblks(mp);
952 error = xfs_reserve_blocks(mp, &resblks, NULL);
953 if (error)
Dave Chinner0b932cc2011-03-07 10:08:35 +1100954 xfs_warn(mp,
955 "Unable to allocate reserve blocks. Continuing without reserve pool.");
Darrick J. Wong174edb02016-10-03 09:11:39 -0700956
957 /* Recover any CoW blocks that never got remapped. */
958 error = xfs_reflink_recover_cow(mp);
959 if (error) {
960 xfs_err(mp,
961 "Error %d recovering leftover CoW allocations.", error);
962 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
963 goto out_quota;
964 }
Darrick J. Wong84d69612016-10-03 09:11:44 -0700965
966 /* Reserve AG blocks for future btree expansion. */
967 error = xfs_fs_reserve_ag_blocks(mp);
968 if (error && error != -ENOSPC)
969 goto out_agresv;
Eric Sandeend5db0f92010-02-05 22:59:53 +0000970 }
David Chinner84e1e992007-06-18 16:50:27 +1000971
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 return 0;
973
Darrick J. Wong84d69612016-10-03 09:11:44 -0700974 out_agresv:
975 xfs_fs_unreserve_ag_blocks(mp);
Darrick J. Wong174edb02016-10-03 09:11:39 -0700976 out_quota:
977 xfs_qm_unmount_quotas(mp);
Christoph Hellwigb93b6e42009-02-04 09:33:58 +0100978 out_rtunmount:
979 xfs_rtunmount_inodes(mp);
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100980 out_rele_rip:
Darrick J. Wong44a87362018-07-25 12:52:32 -0700981 xfs_irele(rip);
Darrick J. Wong77aff8c2017-08-10 14:20:29 -0700982 /* Clean out dquots that might be in memory after quotacheck. */
983 xfs_qm_unmount(mp);
Darrick J. Wong2d1d1da2017-11-08 16:26:49 -0800984 /*
985 * Cancel all delayed reclaim work and reclaim the inodes directly.
986 * We have to do this /after/ rtunmount and qm_unmount because those
987 * two will have scheduled delayed reclaim for the rt/quota inodes.
988 *
989 * This is slightly different from the unmountfs call sequence
990 * because we could be tearing down a partially set up mount. In
991 * particular, if log_mount_finish fails we bail out without calling
992 * qm_unmount_quotas and therefore rely on qm_unmount to release the
993 * quota inodes.
994 */
995 cancel_delayed_work_sync(&mp->m_reclaim_work);
996 xfs_reclaim_inodes(mp, SYNC_WAIT);
Darrick J. Wong519841c2019-04-12 07:41:16 -0700997 xfs_health_unmount(mp);
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100998 out_log_dealloc:
Carlos Maiolinoe6b3bb72016-05-18 11:11:27 +1000999 mp->m_flags |= XFS_MOUNT_UNMOUNTING;
Brian Fosterf0b2efa2015-08-19 09:58:36 +10001000 xfs_log_mount_cancel(mp);
Dave Chinnerd4f35122012-04-23 15:59:06 +10001001 out_fail_wait:
1002 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
1003 xfs_wait_buftarg(mp->m_logdev_targp);
1004 xfs_wait_buftarg(mp->m_ddev_targp);
Christoph Hellwigf9057e32009-02-04 09:31:52 +01001005 out_free_perag:
Christoph Hellwigff4f0382008-08-13 16:50:47 +10001006 xfs_free_perag(mp);
Dave Chinner0650b552014-06-06 15:01:58 +10001007 out_free_dir:
1008 xfs_da_unmount(mp);
Christoph Hellwigf9057e32009-02-04 09:31:52 +01001009 out_remove_uuid:
Christoph Hellwig27174202009-03-30 10:21:31 +02001010 xfs_uuid_unmount(mp);
Darrick J. Wong31965ef2017-06-20 17:54:46 -07001011 out_remove_errortag:
1012 xfs_errortag_del(mp);
Carlos Maiolino192852b2016-05-18 10:58:51 +10001013 out_remove_error_sysfs:
1014 xfs_error_sysfs_del(mp);
Bill O'Donnell225e4632015-10-12 18:21:19 +11001015 out_del_stats:
1016 xfs_sysfs_del(&mp->m_stats.xs_kobj);
Brian Fostera31b1d32014-07-15 08:07:01 +10001017 out_remove_sysfs:
1018 xfs_sysfs_del(&mp->m_kobj);
Christoph Hellwigf9057e32009-02-04 09:31:52 +01001019 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 return error;
1021}
1022
1023/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 * This flushes out the inodes,dquots and the superblock, unmounts the
1025 * log and makes sure that incore structures are freed.
1026 */
Christoph Hellwig41b5c2e2008-08-13 16:49:57 +10001027void
1028xfs_unmountfs(
1029 struct xfs_mount *mp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001031 uint64_t resblks;
Christoph Hellwig41b5c2e2008-08-13 16:49:57 +10001032 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033
Darrick J. Wonged30dcb2019-04-25 18:26:22 -07001034 xfs_stop_block_reaping(mp);
Darrick J. Wong84d69612016-10-03 09:11:44 -07001035 xfs_fs_unreserve_ag_blocks(mp);
Christoph Hellwig7d095252009-06-08 15:33:32 +02001036 xfs_qm_unmount_quotas(mp);
Christoph Hellwigb93b6e42009-02-04 09:33:58 +01001037 xfs_rtunmount_inodes(mp);
Darrick J. Wong44a87362018-07-25 12:52:32 -07001038 xfs_irele(mp->m_rootip);
Christoph Hellwig77508ec2008-08-13 16:49:04 +10001039
David Chinner641c56f2007-06-18 16:50:17 +10001040 /*
1041 * We can potentially deadlock here if we have an inode cluster
Malcolm Parsons9da096f2009-03-29 09:55:42 +02001042 * that has been freed has its buffer still pinned in memory because
David Chinner641c56f2007-06-18 16:50:17 +10001043 * the transaction is still sitting in a iclog. The stale inodes
1044 * on that buffer will have their flush locks held until the
1045 * transaction hits the disk and the callbacks run. the inode
1046 * flush takes the flush lock unconditionally and with nothing to
1047 * push out the iclog we will never get that unlocked. hence we
1048 * need to force the log first.
1049 */
Christoph Hellwiga14a3482010-01-19 09:56:46 +00001050 xfs_log_force(mp, XFS_LOG_SYNC);
Dave Chinnerc8543632010-02-06 12:39:36 +11001051
1052 /*
Christoph Hellwigebf55872017-02-07 14:06:57 -08001053 * Wait for all busy extents to be freed, including completion of
1054 * any discard operation.
1055 */
1056 xfs_extent_busy_wait_all(mp);
Christoph Hellwig4560e782017-02-07 14:07:58 -08001057 flush_workqueue(xfs_discard_wq);
Christoph Hellwigebf55872017-02-07 14:06:57 -08001058
1059 /*
Carlos Maiolinoe6b3bb72016-05-18 11:11:27 +10001060 * We now need to tell the world we are unmounting. This will allow
1061 * us to detect that the filesystem is going away and we should error
1062 * out anything that we have been retrying in the background. This will
1063 * prevent neverending retries in AIL pushing from hanging the unmount.
1064 */
1065 mp->m_flags |= XFS_MOUNT_UNMOUNTING;
1066
1067 /*
Christoph Hellwig211e4d42012-04-23 15:58:34 +10001068 * Flush all pending changes from the AIL.
Dave Chinnerc8543632010-02-06 12:39:36 +11001069 */
Christoph Hellwig211e4d42012-04-23 15:58:34 +10001070 xfs_ail_push_all_sync(mp->m_ail);
1071
1072 /*
1073 * And reclaim all inodes. At this point there should be no dirty
Dave Chinner7e185302012-10-08 21:56:00 +11001074 * inodes and none should be pinned or locked, but use synchronous
1075 * reclaim just to be sure. We can stop background inode reclaim
1076 * here as well if it is still running.
Christoph Hellwig211e4d42012-04-23 15:58:34 +10001077 */
Dave Chinner7e185302012-10-08 21:56:00 +11001078 cancel_delayed_work_sync(&mp->m_reclaim_work);
Dave Chinnerc8543632010-02-06 12:39:36 +11001079 xfs_reclaim_inodes(mp, SYNC_WAIT);
Darrick J. Wong519841c2019-04-12 07:41:16 -07001080 xfs_health_unmount(mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081
Christoph Hellwig7d095252009-06-08 15:33:32 +02001082 xfs_qm_unmount(mp);
Lachlan McIlroya357a122008-10-30 16:53:25 +11001083
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 /*
David Chinner84e1e992007-06-18 16:50:27 +10001085 * Unreserve any blocks we have so that when we unmount we don't account
1086 * the reserved free space as used. This is really only necessary for
1087 * lazy superblock counting because it trusts the incore superblock
Malcolm Parsons9da096f2009-03-29 09:55:42 +02001088 * counters to be absolutely correct on clean unmount.
David Chinner84e1e992007-06-18 16:50:27 +10001089 *
1090 * We don't bother correcting this elsewhere for lazy superblock
1091 * counting because on mount of an unclean filesystem we reconstruct the
1092 * correct counter value and this is irrelevant.
1093 *
1094 * For non-lazy counter filesystems, this doesn't matter at all because
1095 * we only every apply deltas to the superblock and hence the incore
1096 * value does not matter....
1097 */
1098 resblks = 0;
David Chinner714082b2008-04-10 12:20:03 +10001099 error = xfs_reserve_blocks(mp, &resblks, NULL);
1100 if (error)
Dave Chinner0b932cc2011-03-07 10:08:35 +11001101 xfs_warn(mp, "Unable to free reserved block pool. "
David Chinner714082b2008-04-10 12:20:03 +10001102 "Freespace may not be correct on next mount.");
1103
Chandra Seetharamanadab0f62011-06-29 22:10:14 +00001104 error = xfs_log_sbcount(mp);
David Chinnere5720ee2008-04-10 12:21:18 +10001105 if (error)
Dave Chinner0b932cc2011-03-07 10:08:35 +11001106 xfs_warn(mp, "Unable to update superblock counters. "
David Chinnere5720ee2008-04-10 12:21:18 +10001107 "Freespace may not be correct on next mount.");
Christoph Hellwig87c7bec2011-09-14 14:08:26 +00001108
Bill O'Donnell225e4632015-10-12 18:21:19 +11001109
Christoph Hellwig21b699c2009-03-16 08:19:29 +01001110 xfs_log_unmount(mp);
Dave Chinner0650b552014-06-06 15:01:58 +10001111 xfs_da_unmount(mp);
Christoph Hellwig27174202009-03-30 10:21:31 +02001112 xfs_uuid_unmount(mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113
Christoph Hellwig1550d0b2008-08-13 16:17:37 +10001114#if defined(DEBUG)
Darrick J. Wong31965ef2017-06-20 17:54:46 -07001115 xfs_errortag_clearall(mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116#endif
Christoph Hellwigff4f0382008-08-13 16:50:47 +10001117 xfs_free_perag(mp);
Brian Fostera31b1d32014-07-15 08:07:01 +10001118
Darrick J. Wong31965ef2017-06-20 17:54:46 -07001119 xfs_errortag_del(mp);
Carlos Maiolino192852b2016-05-18 10:58:51 +10001120 xfs_error_sysfs_del(mp);
Bill O'Donnell225e4632015-10-12 18:21:19 +11001121 xfs_sysfs_del(&mp->m_stats.xs_kobj);
Brian Fostera31b1d32014-07-15 08:07:01 +10001122 xfs_sysfs_del(&mp->m_kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123}
1124
Brian Foster91ee5752014-11-28 14:02:59 +11001125/*
1126 * Determine whether modifications can proceed. The caller specifies the minimum
1127 * freeze level for which modifications should not be allowed. This allows
1128 * certain operations to proceed while the freeze sequence is in progress, if
1129 * necessary.
1130 */
1131bool
1132xfs_fs_writable(
1133 struct xfs_mount *mp,
1134 int level)
David Chinner92821e22007-05-24 15:26:31 +10001135{
Brian Foster91ee5752014-11-28 14:02:59 +11001136 ASSERT(level > SB_UNFROZEN);
1137 if ((mp->m_super->s_writers.frozen >= level) ||
1138 XFS_FORCED_SHUTDOWN(mp) || (mp->m_flags & XFS_MOUNT_RDONLY))
1139 return false;
1140
1141 return true;
David Chinner92821e22007-05-24 15:26:31 +10001142}
1143
1144/*
Alex Elderb2ce3972011-07-11 09:51:44 -05001145 * xfs_log_sbcount
1146 *
Chandra Seetharamanadab0f62011-06-29 22:10:14 +00001147 * Sync the superblock counters to disk.
Alex Elderb2ce3972011-07-11 09:51:44 -05001148 *
Brian Foster91ee5752014-11-28 14:02:59 +11001149 * Note this code can be called during the process of freezing, so we use the
1150 * transaction allocator that does not block when the transaction subsystem is
1151 * in its frozen state.
David Chinner92821e22007-05-24 15:26:31 +10001152 */
1153int
Chandra Seetharamanadab0f62011-06-29 22:10:14 +00001154xfs_log_sbcount(xfs_mount_t *mp)
David Chinner92821e22007-05-24 15:26:31 +10001155{
Brian Foster91ee5752014-11-28 14:02:59 +11001156 /* allow this to proceed during the freeze sequence... */
1157 if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE))
David Chinner92821e22007-05-24 15:26:31 +10001158 return 0;
1159
David Chinner92821e22007-05-24 15:26:31 +10001160 /*
1161 * we don't need to do this if we are updating the superblock
1162 * counters on every modification.
1163 */
1164 if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1165 return 0;
1166
Dave Chinner61e63ec2015-01-22 09:10:31 +11001167 return xfs_sync_sb(mp, true);
David Chinner92821e22007-05-24 15:26:31 +10001168}
1169
Dave Chinner8c1903d2015-05-29 07:39:34 +10001170/*
1171 * Deltas for the inode count are +/-64, hence we use a large batch size
1172 * of 128 so we don't need to take the counter lock on every update.
1173 */
1174#define XFS_ICOUNT_BATCH 128
Dave Chinner501ab322015-02-23 21:19:28 +11001175int
1176xfs_mod_icount(
1177 struct xfs_mount *mp,
1178 int64_t delta)
1179{
Nikolay Borisov104b4e52017-06-20 21:01:20 +03001180 percpu_counter_add_batch(&mp->m_icount, delta, XFS_ICOUNT_BATCH);
Dave Chinner8c1903d2015-05-29 07:39:34 +10001181 if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) {
Dave Chinner501ab322015-02-23 21:19:28 +11001182 ASSERT(0);
1183 percpu_counter_add(&mp->m_icount, -delta);
1184 return -EINVAL;
1185 }
1186 return 0;
1187}
1188
Dave Chinnere88b64e2015-02-23 21:19:53 +11001189int
1190xfs_mod_ifree(
1191 struct xfs_mount *mp,
1192 int64_t delta)
1193{
1194 percpu_counter_add(&mp->m_ifree, delta);
1195 if (percpu_counter_compare(&mp->m_ifree, 0) < 0) {
1196 ASSERT(0);
1197 percpu_counter_add(&mp->m_ifree, -delta);
1198 return -EINVAL;
1199 }
1200 return 0;
1201}
Dave Chinner0d485ad2015-02-23 21:22:03 +11001202
Dave Chinner8c1903d2015-05-29 07:39:34 +10001203/*
1204 * Deltas for the block count can vary from 1 to very large, but lock contention
1205 * only occurs on frequent small block count updates such as in the delayed
1206 * allocation path for buffered writes (page a time updates). Hence we set
1207 * a large batch count (1024) to minimise global counter updates except when
1208 * we get near to ENOSPC and we have to be very accurate with our updates.
1209 */
1210#define XFS_FDBLOCKS_BATCH 1024
Dave Chinner0d485ad2015-02-23 21:22:03 +11001211int
1212xfs_mod_fdblocks(
1213 struct xfs_mount *mp,
1214 int64_t delta,
1215 bool rsvd)
1216{
1217 int64_t lcounter;
1218 long long res_used;
1219 s32 batch;
1220
1221 if (delta > 0) {
1222 /*
1223 * If the reserve pool is depleted, put blocks back into it
1224 * first. Most of the time the pool is full.
1225 */
1226 if (likely(mp->m_resblks == mp->m_resblks_avail)) {
1227 percpu_counter_add(&mp->m_fdblocks, delta);
1228 return 0;
1229 }
1230
1231 spin_lock(&mp->m_sb_lock);
1232 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
1233
1234 if (res_used > delta) {
1235 mp->m_resblks_avail += delta;
1236 } else {
1237 delta -= res_used;
1238 mp->m_resblks_avail = mp->m_resblks;
1239 percpu_counter_add(&mp->m_fdblocks, delta);
1240 }
1241 spin_unlock(&mp->m_sb_lock);
1242 return 0;
1243 }
1244
1245 /*
1246 * Taking blocks away, need to be more accurate the closer we
1247 * are to zero.
1248 *
Dave Chinner0d485ad2015-02-23 21:22:03 +11001249 * If the counter has a value of less than 2 * max batch size,
1250 * then make everything serialise as we are real close to
1251 * ENOSPC.
1252 */
Dave Chinner8c1903d2015-05-29 07:39:34 +10001253 if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH,
1254 XFS_FDBLOCKS_BATCH) < 0)
Dave Chinner0d485ad2015-02-23 21:22:03 +11001255 batch = 1;
1256 else
Dave Chinner8c1903d2015-05-29 07:39:34 +10001257 batch = XFS_FDBLOCKS_BATCH;
Dave Chinner0d485ad2015-02-23 21:22:03 +11001258
Nikolay Borisov104b4e52017-06-20 21:01:20 +03001259 percpu_counter_add_batch(&mp->m_fdblocks, delta, batch);
Darrick J. Wong52548852016-08-03 11:38:24 +10001260 if (__percpu_counter_compare(&mp->m_fdblocks, mp->m_alloc_set_aside,
Dave Chinner8c1903d2015-05-29 07:39:34 +10001261 XFS_FDBLOCKS_BATCH) >= 0) {
Dave Chinner0d485ad2015-02-23 21:22:03 +11001262 /* we had space! */
1263 return 0;
1264 }
1265
1266 /*
1267 * lock up the sb for dipping into reserves before releasing the space
1268 * that took us to ENOSPC.
1269 */
1270 spin_lock(&mp->m_sb_lock);
1271 percpu_counter_add(&mp->m_fdblocks, -delta);
1272 if (!rsvd)
1273 goto fdblocks_enospc;
1274
1275 lcounter = (long long)mp->m_resblks_avail + delta;
1276 if (lcounter >= 0) {
1277 mp->m_resblks_avail = lcounter;
1278 spin_unlock(&mp->m_sb_lock);
1279 return 0;
1280 }
1281 printk_once(KERN_WARNING
1282 "Filesystem \"%s\": reserve blocks depleted! "
1283 "Consider increasing reserve pool size.",
1284 mp->m_fsname);
1285fdblocks_enospc:
1286 spin_unlock(&mp->m_sb_lock);
1287 return -ENOSPC;
1288}
1289
Dave Chinnerbab98bb2015-02-23 21:22:54 +11001290int
1291xfs_mod_frextents(
1292 struct xfs_mount *mp,
1293 int64_t delta)
1294{
1295 int64_t lcounter;
1296 int ret = 0;
1297
1298 spin_lock(&mp->m_sb_lock);
1299 lcounter = mp->m_sb.sb_frextents + delta;
1300 if (lcounter < 0)
1301 ret = -ENOSPC;
1302 else
1303 mp->m_sb.sb_frextents = lcounter;
1304 spin_unlock(&mp->m_sb_lock);
1305 return ret;
1306}
1307
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 * xfs_getsb() is called to obtain the buffer for the superblock.
1310 * The buffer is returned locked and read in from disk.
1311 * The buffer should be released with a call to xfs_brelse().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 */
Christoph Hellwig0c842ad2011-07-08 14:36:19 +02001313struct xfs_buf *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314xfs_getsb(
Eric Sandeen8c9ce2f2019-06-12 08:59:58 -07001315 struct xfs_mount *mp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316{
Christoph Hellwig0c842ad2011-07-08 14:36:19 +02001317 struct xfs_buf *bp = mp->m_sb_bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
Eric Sandeen8c9ce2f2019-06-12 08:59:58 -07001319 xfs_buf_lock(bp);
Chandra Seetharaman72790aa2011-07-22 23:40:04 +00001320 xfs_buf_hold(bp);
Dave Chinnerb0388bf2016-02-10 15:01:11 +11001321 ASSERT(bp->b_flags & XBF_DONE);
Jesper Juhl014c2542006-01-15 02:37:08 +01001322 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323}
1324
1325/*
1326 * Used to free the superblock along various error paths.
1327 */
1328void
1329xfs_freesb(
Dave Chinner26af6552010-09-22 10:47:20 +10001330 struct xfs_mount *mp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331{
Dave Chinner26af6552010-09-22 10:47:20 +10001332 struct xfs_buf *bp = mp->m_sb_bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
Dave Chinner26af6552010-09-22 10:47:20 +10001334 xfs_buf_lock(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 mp->m_sb_bp = NULL;
Dave Chinner26af6552010-09-22 10:47:20 +10001336 xfs_buf_relse(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337}
1338
1339/*
Christoph Hellwigdda35b82010-02-15 09:44:46 +00001340 * If the underlying (data/log/rt) device is readonly, there are some
1341 * operations that cannot proceed.
1342 */
1343int
1344xfs_dev_is_read_only(
1345 struct xfs_mount *mp,
1346 char *message)
1347{
1348 if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
1349 xfs_readonly_buftarg(mp->m_logdev_targp) ||
1350 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
Dave Chinner0b932cc2011-03-07 10:08:35 +11001351 xfs_notice(mp, "%s required on read-only device.", message);
1352 xfs_notice(mp, "write access unavailable, cannot proceed.");
Dave Chinner24513372014-06-25 14:58:08 +10001353 return -EROFS;
Christoph Hellwigdda35b82010-02-15 09:44:46 +00001354 }
1355 return 0;
1356}
Darrick J. Wongf467cad2018-07-20 09:28:40 -07001357
1358/* Force the summary counters to be recalculated at next mount. */
1359void
1360xfs_force_summary_recalc(
1361 struct xfs_mount *mp)
1362{
1363 if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1364 return;
1365
Darrick J. Wong39353ff2019-04-12 07:41:15 -07001366 xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
Darrick J. Wongf467cad2018-07-20 09:28:40 -07001367}
Darrick J. Wong9fe82b82019-04-25 18:26:22 -07001368
1369/*
1370 * Update the in-core delayed block counter.
1371 *
1372 * We prefer to update the counter without having to take a spinlock for every
1373 * counter update (i.e. batching). Each change to delayed allocation
1374 * reservations can change can easily exceed the default percpu counter
1375 * batching, so we use a larger batch factor here.
1376 *
1377 * Note that we don't currently have any callers requiring fast summation
1378 * (e.g. percpu_counter_read) so we can use a big batch value here.
1379 */
1380#define XFS_DELALLOC_BATCH (4096)
1381void
1382xfs_mod_delalloc(
1383 struct xfs_mount *mp,
1384 int64_t delta)
1385{
1386 percpu_counter_add_batch(&mp->m_delalloc_blks, delta,
1387 XFS_DELALLOC_BATCH);
1388}