blob: fd63b0b1307c502ab45db45b37dd5da4cfa1d7f7 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scott7b718762005-11-02 14:58:39 +11003 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +11007#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11008#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +11009#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Nathan Scotta844f452005-11-02 14:38:42 +110012#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include "xfs_sb.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include "xfs_mount.h"
Darrick J. Wong3ab78df2016-08-03 11:15:38 +100015#include "xfs_defer.h"
Dave Chinner57062782013-10-15 09:17:51 +110016#include "xfs_da_format.h"
Dave Chinner9a2cc412014-12-04 09:43:17 +110017#include "xfs_da_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs_inode.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110019#include "xfs_dir2.h"
Nathan Scotta844f452005-11-02 14:38:42 +110020#include "xfs_ialloc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include "xfs_alloc.h"
22#include "xfs_rtalloc.h"
23#include "xfs_bmap.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110024#include "xfs_trans.h"
25#include "xfs_trans_priv.h"
26#include "xfs_log.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include "xfs_error.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include "xfs_quota.h"
29#include "xfs_fsops.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000030#include "xfs_trace.h"
Dave Chinner6d8b79c2012-10-08 21:56:09 +110031#include "xfs_icache.h"
Brian Fostera31b1d32014-07-15 08:07:01 +100032#include "xfs_sysfs.h"
Darrick J. Wong035e00a2016-08-03 11:36:07 +100033#include "xfs_rmap_btree.h"
Darrick J. Wong1946b912016-10-03 09:11:18 -070034#include "xfs_refcount_btree.h"
Darrick J. Wong174edb02016-10-03 09:11:39 -070035#include "xfs_reflink.h"
Christoph Hellwigebf55872017-02-07 14:06:57 -080036#include "xfs_extent_busy.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000037
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Christoph Hellwig27174202009-03-30 10:21:31 +020039static DEFINE_MUTEX(xfs_uuid_table_mutex);
40static int xfs_uuid_table_size;
41static uuid_t *xfs_uuid_table;
42
Darrick J. Wongaf3b6382015-11-03 13:06:34 +110043void
44xfs_uuid_table_free(void)
45{
46 if (xfs_uuid_table_size == 0)
47 return;
48 kmem_free(xfs_uuid_table);
49 xfs_uuid_table = NULL;
50 xfs_uuid_table_size = 0;
51}
52
Christoph Hellwig27174202009-03-30 10:21:31 +020053/*
54 * See if the UUID is unique among mounted XFS filesystems.
55 * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
56 */
57STATIC int
58xfs_uuid_mount(
59 struct xfs_mount *mp)
60{
61 uuid_t *uuid = &mp->m_sb.sb_uuid;
62 int hole, i;
63
Amir Goldstein8f720d92017-04-28 08:10:53 -070064 /* Publish UUID in struct super_block */
Christoph Hellwig85787092017-05-10 15:06:33 +020065 uuid_copy(&mp->m_super->s_uuid, uuid);
Amir Goldstein8f720d92017-04-28 08:10:53 -070066
Christoph Hellwig27174202009-03-30 10:21:31 +020067 if (mp->m_flags & XFS_MOUNT_NOUUID)
68 return 0;
69
Amir Goldsteind905fda2017-05-04 16:26:23 +030070 if (uuid_is_null(uuid)) {
71 xfs_warn(mp, "Filesystem has null UUID - can't mount");
Dave Chinner24513372014-06-25 14:58:08 +100072 return -EINVAL;
Christoph Hellwig27174202009-03-30 10:21:31 +020073 }
74
75 mutex_lock(&xfs_uuid_table_mutex);
76 for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
Amir Goldsteind905fda2017-05-04 16:26:23 +030077 if (uuid_is_null(&xfs_uuid_table[i])) {
Christoph Hellwig27174202009-03-30 10:21:31 +020078 hole = i;
79 continue;
80 }
81 if (uuid_equal(uuid, &xfs_uuid_table[i]))
82 goto out_duplicate;
83 }
84
85 if (hole < 0) {
86 xfs_uuid_table = kmem_realloc(xfs_uuid_table,
87 (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
Christoph Hellwig27174202009-03-30 10:21:31 +020088 KM_SLEEP);
89 hole = xfs_uuid_table_size++;
90 }
91 xfs_uuid_table[hole] = *uuid;
92 mutex_unlock(&xfs_uuid_table_mutex);
93
94 return 0;
95
96 out_duplicate:
97 mutex_unlock(&xfs_uuid_table_mutex);
Mitsuo Hayasaka021000e2012-01-13 05:58:39 +000098 xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid);
Dave Chinner24513372014-06-25 14:58:08 +100099 return -EINVAL;
Christoph Hellwig27174202009-03-30 10:21:31 +0200100}
101
102STATIC void
103xfs_uuid_unmount(
104 struct xfs_mount *mp)
105{
106 uuid_t *uuid = &mp->m_sb.sb_uuid;
107 int i;
108
109 if (mp->m_flags & XFS_MOUNT_NOUUID)
110 return;
111
112 mutex_lock(&xfs_uuid_table_mutex);
113 for (i = 0; i < xfs_uuid_table_size; i++) {
Amir Goldsteind905fda2017-05-04 16:26:23 +0300114 if (uuid_is_null(&xfs_uuid_table[i]))
Christoph Hellwig27174202009-03-30 10:21:31 +0200115 continue;
116 if (!uuid_equal(uuid, &xfs_uuid_table[i]))
117 continue;
118 memset(&xfs_uuid_table[i], 0, sizeof(uuid_t));
119 break;
120 }
121 ASSERT(i < xfs_uuid_table_size);
122 mutex_unlock(&xfs_uuid_table_mutex);
123}
124
125
Dave Chinnere1765792010-09-22 10:47:20 +1000126STATIC void
127__xfs_free_perag(
128 struct rcu_head *head)
129{
130 struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
131
132 ASSERT(atomic_read(&pag->pag_ref) == 0);
133 kmem_free(pag);
134}
135
Dave Chinner0fa800f2010-01-11 11:47:46 +0000136/*
Dave Chinnere1765792010-09-22 10:47:20 +1000137 * Free up the per-ag resources associated with the mount structure.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 */
Christoph Hellwigc962fb72008-05-20 15:10:52 +1000139STATIC void
Christoph Hellwigff4f0382008-08-13 16:50:47 +1000140xfs_free_perag(
Christoph Hellwig745f6912007-08-30 17:20:39 +1000141 xfs_mount_t *mp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000143 xfs_agnumber_t agno;
144 struct xfs_perag *pag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000146 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
147 spin_lock(&mp->m_perag_lock);
148 pag = radix_tree_delete(&mp->m_perag_tree, agno);
149 spin_unlock(&mp->m_perag_lock);
Dave Chinnere1765792010-09-22 10:47:20 +1000150 ASSERT(pag);
Dave Chinnerf83282a2010-11-08 08:55:04 +0000151 ASSERT(atomic_read(&pag->pag_ref) == 0);
Darrick J. Wong9b247172019-02-07 10:37:16 -0800152 xfs_iunlink_destroy(pag);
Lucas Stach6031e732016-12-07 17:36:36 +1100153 xfs_buf_hash_destroy(pag);
Xiongwei Song1da06182018-01-11 09:45:51 -0800154 mutex_destroy(&pag->pag_ici_reclaim_lock);
Dave Chinnere1765792010-09-22 10:47:20 +1000155 call_rcu(&pag->rcu_head, __xfs_free_perag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157}
158
Nathan Scott4cc929e2007-05-14 18:24:02 +1000159/*
160 * Check size of device based on the (data/realtime) block count.
161 * Note: this check is used by the growfs code as well as mount.
162 */
163int
164xfs_sb_validate_fsb_count(
165 xfs_sb_t *sbp,
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700166 uint64_t nblocks)
Nathan Scott4cc929e2007-05-14 18:24:02 +1000167{
168 ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
169 ASSERT(sbp->sb_blocklog >= BBSHIFT);
170
Christoph Hellwigd5cf09b2014-07-30 09:12:05 +1000171 /* Limited by ULONG_MAX of page cache index */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300172 if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
Dave Chinner24513372014-06-25 14:58:08 +1000173 return -EFBIG;
Nathan Scott4cc929e2007-05-14 18:24:02 +1000174 return 0;
175}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000177int
Nathan Scottc11e2c32005-11-02 15:11:45 +1100178xfs_initialize_perag(
Nathan Scottc11e2c32005-11-02 15:11:45 +1100179 xfs_mount_t *mp,
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000180 xfs_agnumber_t agcount,
181 xfs_agnumber_t *maxagi)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300183 xfs_agnumber_t index;
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800184 xfs_agnumber_t first_initialised = NULLAGNUMBER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 xfs_perag_t *pag;
Dave Chinner8b26c582010-01-11 11:47:48 +0000186 int error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000188 /*
189 * Walk the current per-ag tree so we don't try to initialise AGs
190 * that already exist (growfs case). Allocate and insert all the
191 * AGs we don't find ready for initialisation.
192 */
193 for (index = 0; index < agcount; index++) {
194 pag = xfs_perag_get(mp, index);
195 if (pag) {
196 xfs_perag_put(pag);
197 continue;
198 }
Christoph Hellwigfb3b5042010-05-28 19:03:10 +0000199
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000200 pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
201 if (!pag)
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800202 goto out_unwind_new_pags;
Christoph Hellwigfb3b5042010-05-28 19:03:10 +0000203 pag->pag_agno = index;
204 pag->pag_mount = mp;
Dave Chinner1a427ab2010-12-16 17:08:41 +1100205 spin_lock_init(&pag->pag_ici_lock);
Dave Chinner69b491c2010-09-27 11:09:51 +1000206 mutex_init(&pag->pag_ici_reclaim_lock);
Christoph Hellwigfb3b5042010-05-28 19:03:10 +0000207 INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
Lucas Stach6031e732016-12-07 17:36:36 +1100208 if (xfs_buf_hash_init(pag))
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800209 goto out_free_pag;
Christoph Hellwigebf55872017-02-07 14:06:57 -0800210 init_waitqueue_head(&pag->pagb_wait);
Darrick J. Wongff23f4a2018-07-31 13:18:02 -0700211 spin_lock_init(&pag->pagb_lock);
212 pag->pagb_count = 0;
213 pag->pagb_tree = RB_ROOT;
Christoph Hellwigfb3b5042010-05-28 19:03:10 +0000214
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000215 if (radix_tree_preload(GFP_NOFS))
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800216 goto out_hash_destroy;
Christoph Hellwigfb3b5042010-05-28 19:03:10 +0000217
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000218 spin_lock(&mp->m_perag_lock);
219 if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
220 BUG();
221 spin_unlock(&mp->m_perag_lock);
Dave Chinner8b26c582010-01-11 11:47:48 +0000222 radix_tree_preload_end();
223 error = -EEXIST;
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800224 goto out_hash_destroy;
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000225 }
226 spin_unlock(&mp->m_perag_lock);
227 radix_tree_preload_end();
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800228 /* first new pag is fully initialized */
229 if (first_initialised == NULLAGNUMBER)
230 first_initialised = index;
Darrick J. Wong9b247172019-02-07 10:37:16 -0800231 error = xfs_iunlink_init(pag);
232 if (error)
233 goto out_hash_destroy;
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000234 }
235
Eric Sandeen12c3f052016-03-02 09:58:09 +1100236 index = xfs_set_inode_alloc(mp, agcount);
Christoph Hellwigfb3b5042010-05-28 19:03:10 +0000237
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000238 if (maxagi)
239 *maxagi = index;
Darrick J. Wong80180262016-08-03 11:31:47 +1000240
241 mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000242 return 0;
Dave Chinner8b26c582010-01-11 11:47:48 +0000243
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800244out_hash_destroy:
Lucas Stach6031e732016-12-07 17:36:36 +1100245 xfs_buf_hash_destroy(pag);
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800246out_free_pag:
Xiongwei Song1da06182018-01-11 09:45:51 -0800247 mutex_destroy(&pag->pag_ici_reclaim_lock);
Dave Chinner8b26c582010-01-11 11:47:48 +0000248 kmem_free(pag);
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800249out_unwind_new_pags:
250 /* unwind any prior newly initialized pags */
251 for (index = first_initialised; index < agcount; index++) {
Dave Chinner8b26c582010-01-11 11:47:48 +0000252 pag = radix_tree_delete(&mp->m_perag_tree, index);
Bill O'Donnellb20fe472017-02-07 12:59:33 -0800253 if (!pag)
254 break;
Lucas Stach6031e732016-12-07 17:36:36 +1100255 xfs_buf_hash_destroy(pag);
Darrick J. Wong9b247172019-02-07 10:37:16 -0800256 xfs_iunlink_destroy(pag);
Xiongwei Song1da06182018-01-11 09:45:51 -0800257 mutex_destroy(&pag->pag_ici_reclaim_lock);
Dave Chinner8b26c582010-01-11 11:47:48 +0000258 kmem_free(pag);
259 }
260 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261}
262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263/*
264 * xfs_readsb
265 *
266 * Does the initial read of the superblock.
267 */
268int
Dave Chinnerff550682013-08-12 20:49:41 +1000269xfs_readsb(
270 struct xfs_mount *mp,
271 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272{
273 unsigned int sector_size;
Dave Chinner04a1e6c2013-04-03 16:11:31 +1100274 struct xfs_buf *bp;
275 struct xfs_sb *sbp = &mp->m_sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 int error;
Dave Chinneraf34e092011-03-07 10:04:35 +1100277 int loud = !(flags & XFS_MFSI_QUIET);
Eric Sandeendaba5422014-02-19 15:39:16 +1100278 const struct xfs_buf_ops *buf_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
280 ASSERT(mp->m_sb_bp == NULL);
281 ASSERT(mp->m_ddev_targp != NULL);
282
283 /*
Eric Sandeendaba5422014-02-19 15:39:16 +1100284 * For the initial read, we must guess at the sector
285 * size based on the block device. It's enough to
286 * get the sb_sectsize out of the superblock and
287 * then reread with the proper length.
288 * We don't verify it yet, because it may not be complete.
289 */
290 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
291 buf_ops = NULL;
292
293 /*
Brian Fosterc891c302016-07-20 11:13:43 +1000294 * Allocate a (locked) buffer to hold the superblock. This will be kept
295 * around at all times to optimize access to the superblock. Therefore,
296 * set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count
297 * elevated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 */
Dave Chinner26af6552010-09-22 10:47:20 +1000299reread:
Dave Chinnerba372672014-10-02 09:05:32 +1000300 error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
Brian Fosterc891c302016-07-20 11:13:43 +1000301 BTOBB(sector_size), XBF_NO_IOACCT, &bp,
302 buf_ops);
Dave Chinnerba372672014-10-02 09:05:32 +1000303 if (error) {
Dave Chinnereab4e632012-11-12 22:54:02 +1100304 if (loud)
Dave Chinnere721f502013-04-03 16:11:32 +1100305 xfs_warn(mp, "SB validate failed with error %d.", error);
Dave Chinnerac75a1f2014-03-07 16:19:14 +1100306 /* bad CRC means corrupted metadata */
Dave Chinner24513372014-06-25 14:58:08 +1000307 if (error == -EFSBADCRC)
308 error = -EFSCORRUPTED;
Dave Chinnerba372672014-10-02 09:05:32 +1000309 return error;
Dave Chinnereab4e632012-11-12 22:54:02 +1100310 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311
312 /*
313 * Initialize the mount structure from the superblock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 */
Dave Chinner556b8882014-06-06 16:00:43 +1000315 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
Dave Chinner556b8882014-06-06 16:00:43 +1000316
317 /*
318 * If we haven't validated the superblock, do so now before we try
319 * to check the sector size and reread the superblock appropriately.
320 */
321 if (sbp->sb_magicnum != XFS_SB_MAGIC) {
322 if (loud)
323 xfs_warn(mp, "Invalid superblock magic number");
Dave Chinner24513372014-06-25 14:58:08 +1000324 error = -EINVAL;
Dave Chinner556b8882014-06-06 16:00:43 +1000325 goto release_buf;
326 }
Dave Chinnerff550682013-08-12 20:49:41 +1000327
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 /*
329 * We must be able to do sector-sized and sector-aligned IO.
330 */
Dave Chinner04a1e6c2013-04-03 16:11:31 +1100331 if (sector_size > sbp->sb_sectsize) {
Dave Chinneraf34e092011-03-07 10:04:35 +1100332 if (loud)
333 xfs_warn(mp, "device supports %u byte sectors (not %u)",
Dave Chinner04a1e6c2013-04-03 16:11:31 +1100334 sector_size, sbp->sb_sectsize);
Dave Chinner24513372014-06-25 14:58:08 +1000335 error = -ENOSYS;
Dave Chinner26af6552010-09-22 10:47:20 +1000336 goto release_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 }
338
Eric Sandeendaba5422014-02-19 15:39:16 +1100339 if (buf_ops == NULL) {
Dave Chinner556b8882014-06-06 16:00:43 +1000340 /*
341 * Re-read the superblock so the buffer is correctly sized,
342 * and properly verified.
343 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 xfs_buf_relse(bp);
Dave Chinner04a1e6c2013-04-03 16:11:31 +1100345 sector_size = sbp->sb_sectsize;
Eric Sandeendaba5422014-02-19 15:39:16 +1100346 buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops;
Dave Chinner26af6552010-09-22 10:47:20 +1000347 goto reread;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 }
349
Dave Chinner5681ca42015-02-23 21:22:31 +1100350 xfs_reinit_percpu_counters(mp);
David Chinner8d280b92006-03-14 13:13:09 +1100351
Dave Chinner04a1e6c2013-04-03 16:11:31 +1100352 /* no need to be quiet anymore, so reset the buf ops */
353 bp->b_ops = &xfs_sb_buf_ops;
354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 mp->m_sb_bp = bp;
Dave Chinner26af6552010-09-22 10:47:20 +1000356 xfs_buf_unlock(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 return 0;
358
Dave Chinner26af6552010-09-22 10:47:20 +1000359release_buf:
360 xfs_buf_relse(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 return error;
362}
363
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364/*
Eric Sandeen0771fb42007-10-12 11:03:40 +1000365 * Update alignment values based on mount options and sb values
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 */
Eric Sandeen0771fb42007-10-12 11:03:40 +1000367STATIC int
Christoph Hellwig7884bc82009-01-19 02:04:07 +0100368xfs_update_alignment(xfs_mount_t *mp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 xfs_sb_t *sbp = &(mp->m_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
Christoph Hellwig42490232008-08-13 16:49:32 +1000372 if (mp->m_dalign) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 /*
374 * If stripe unit and stripe width are not multiples
375 * of the fs blocksize turn off alignment.
376 */
377 if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
378 (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
Jie Liu39a45d82013-05-02 19:27:47 +0800379 xfs_warn(mp,
380 "alignment check failed: sunit/swidth vs. blocksize(%d)",
381 sbp->sb_blocksize);
Dave Chinner24513372014-06-25 14:58:08 +1000382 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 } else {
384 /*
385 * Convert the stripe unit and width to FSBs.
386 */
387 mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
388 if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) {
Dave Chinner53487782011-03-07 10:05:35 +1100389 xfs_warn(mp,
Jie Liu39a45d82013-05-02 19:27:47 +0800390 "alignment check failed: sunit/swidth vs. agsize(%d)",
391 sbp->sb_agblocks);
Dave Chinner24513372014-06-25 14:58:08 +1000392 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 } else if (mp->m_dalign) {
394 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
395 } else {
Jie Liu39a45d82013-05-02 19:27:47 +0800396 xfs_warn(mp,
397 "alignment check failed: sunit(%d) less than bsize(%d)",
398 mp->m_dalign, sbp->sb_blocksize);
Dave Chinner24513372014-06-25 14:58:08 +1000399 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 }
401 }
402
403 /*
404 * Update superblock with new values
405 * and log changes
406 */
Eric Sandeen62118702008-03-06 13:44:28 +1100407 if (xfs_sb_version_hasdalign(sbp)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 if (sbp->sb_unit != mp->m_dalign) {
409 sbp->sb_unit = mp->m_dalign;
Dave Chinner61e63ec2015-01-22 09:10:31 +1100410 mp->m_update_sb = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 }
412 if (sbp->sb_width != mp->m_swidth) {
413 sbp->sb_width = mp->m_swidth;
Dave Chinner61e63ec2015-01-22 09:10:31 +1100414 mp->m_update_sb = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 }
Jie Liu34d7f602013-05-02 19:27:53 +0800416 } else {
417 xfs_warn(mp,
418 "cannot change alignment: superblock does not support data alignment");
Dave Chinner24513372014-06-25 14:58:08 +1000419 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 }
421 } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
Eric Sandeen62118702008-03-06 13:44:28 +1100422 xfs_sb_version_hasdalign(&mp->m_sb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 mp->m_dalign = sbp->sb_unit;
424 mp->m_swidth = sbp->sb_width;
425 }
426
Eric Sandeen0771fb42007-10-12 11:03:40 +1000427 return 0;
428}
429
430/*
431 * Set the maximum inode count for this filesystem
432 */
433STATIC void
434xfs_set_maxicount(xfs_mount_t *mp)
435{
436 xfs_sb_t *sbp = &(mp->m_sb);
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700437 uint64_t icount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
439 if (sbp->sb_imax_pct) {
Eric Sandeen0771fb42007-10-12 11:03:40 +1000440 /*
441 * Make sure the maximum inode count is a multiple
442 * of the units we allocate inodes in.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
445 do_div(icount, 100);
446 do_div(icount, mp->m_ialloc_blks);
447 mp->m_maxicount = (icount * mp->m_ialloc_blks) <<
448 sbp->sb_inopblog;
Eric Sandeen0771fb42007-10-12 11:03:40 +1000449 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 mp->m_maxicount = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 }
Eric Sandeen0771fb42007-10-12 11:03:40 +1000452}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
Eric Sandeen0771fb42007-10-12 11:03:40 +1000454/*
455 * Set the default minimum read and write sizes unless
456 * already specified in a mount option.
457 * We use smaller I/O sizes when the file system
458 * is being used for NFS service (wsync mount option).
459 */
460STATIC void
461xfs_set_rw_sizes(xfs_mount_t *mp)
462{
463 xfs_sb_t *sbp = &(mp->m_sb);
464 int readio_log, writeio_log;
465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
467 if (mp->m_flags & XFS_MOUNT_WSYNC) {
468 readio_log = XFS_WSYNC_READIO_LOG;
469 writeio_log = XFS_WSYNC_WRITEIO_LOG;
470 } else {
471 readio_log = XFS_READIO_LOG_LARGE;
472 writeio_log = XFS_WRITEIO_LOG_LARGE;
473 }
474 } else {
475 readio_log = mp->m_readio_log;
476 writeio_log = mp->m_writeio_log;
477 }
478
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 if (sbp->sb_blocklog > readio_log) {
480 mp->m_readio_log = sbp->sb_blocklog;
481 } else {
482 mp->m_readio_log = readio_log;
483 }
484 mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog);
485 if (sbp->sb_blocklog > writeio_log) {
486 mp->m_writeio_log = sbp->sb_blocklog;
487 } else {
488 mp->m_writeio_log = writeio_log;
489 }
490 mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog);
Eric Sandeen0771fb42007-10-12 11:03:40 +1000491}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
Eric Sandeen0771fb42007-10-12 11:03:40 +1000493/*
Dave Chinner055388a2011-01-04 11:35:03 +1100494 * precalculate the low space thresholds for dynamic speculative preallocation.
495 */
496void
497xfs_set_low_space_thresholds(
498 struct xfs_mount *mp)
499{
500 int i;
501
502 for (i = 0; i < XFS_LOWSP_MAX; i++) {
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700503 uint64_t space = mp->m_sb.sb_dblocks;
Dave Chinner055388a2011-01-04 11:35:03 +1100504
505 do_div(space, 100);
506 mp->m_low_space[i] = space * (i + 1);
507 }
508}
509
510
511/*
Eric Sandeen0771fb42007-10-12 11:03:40 +1000512 * Set whether we're using inode alignment.
513 */
514STATIC void
515xfs_set_inoalignment(xfs_mount_t *mp)
516{
Eric Sandeen62118702008-03-06 13:44:28 +1100517 if (xfs_sb_version_hasalign(&mp->m_sb) &&
Chandan Rajendrad5825712017-03-02 15:06:33 -0800518 mp->m_sb.sb_inoalignmt >= xfs_icluster_size_fsb(mp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
520 else
521 mp->m_inoalign_mask = 0;
522 /*
523 * If we are using stripe alignment, check whether
524 * the stripe unit is a multiple of the inode alignment
525 */
526 if (mp->m_dalign && mp->m_inoalign_mask &&
527 !(mp->m_dalign & mp->m_inoalign_mask))
528 mp->m_sinoalign = mp->m_dalign;
529 else
530 mp->m_sinoalign = 0;
Eric Sandeen0771fb42007-10-12 11:03:40 +1000531}
532
533/*
Zhi Yong Wu0471f622013-08-07 10:10:58 +0000534 * Check that the data (and log if separate) is an ok size.
Eric Sandeen0771fb42007-10-12 11:03:40 +1000535 */
536STATIC int
Dave Chinnerba372672014-10-02 09:05:32 +1000537xfs_check_sizes(
538 struct xfs_mount *mp)
Eric Sandeen0771fb42007-10-12 11:03:40 +1000539{
Dave Chinnerba372672014-10-02 09:05:32 +1000540 struct xfs_buf *bp;
Eric Sandeen0771fb42007-10-12 11:03:40 +1000541 xfs_daddr_t d;
Dave Chinnerba372672014-10-02 09:05:32 +1000542 int error;
Eric Sandeen0771fb42007-10-12 11:03:40 +1000543
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
545 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100546 xfs_warn(mp, "filesystem size mismatch detected");
Dave Chinner24513372014-06-25 14:58:08 +1000547 return -EFBIG;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 }
Dave Chinnerba372672014-10-02 09:05:32 +1000549 error = xfs_buf_read_uncached(mp->m_ddev_targp,
Dave Chinner1922c942010-09-22 10:47:20 +1000550 d - XFS_FSS_TO_BB(mp, 1),
Dave Chinnerba372672014-10-02 09:05:32 +1000551 XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
552 if (error) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100553 xfs_warn(mp, "last sector read failed");
Dave Chinnerba372672014-10-02 09:05:32 +1000554 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 }
Dave Chinner1922c942010-09-22 10:47:20 +1000556 xfs_buf_relse(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
Dave Chinnerba372672014-10-02 09:05:32 +1000558 if (mp->m_logdev_targp == mp->m_ddev_targp)
559 return 0;
560
561 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
562 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
563 xfs_warn(mp, "log size mismatch detected");
564 return -EFBIG;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 }
Dave Chinnerba372672014-10-02 09:05:32 +1000566 error = xfs_buf_read_uncached(mp->m_logdev_targp,
567 d - XFS_FSB_TO_BB(mp, 1),
568 XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
569 if (error) {
570 xfs_warn(mp, "log device read failed");
571 return error;
572 }
573 xfs_buf_relse(bp);
Eric Sandeen0771fb42007-10-12 11:03:40 +1000574 return 0;
575}
576
577/*
Christoph Hellwig7d095252009-06-08 15:33:32 +0200578 * Clear the quotaflags in memory and in the superblock.
579 */
580int
581xfs_mount_reset_sbqflags(
582 struct xfs_mount *mp)
583{
Christoph Hellwig7d095252009-06-08 15:33:32 +0200584 mp->m_qflags = 0;
585
Dave Chinner61e63ec2015-01-22 09:10:31 +1100586 /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */
Christoph Hellwig7d095252009-06-08 15:33:32 +0200587 if (mp->m_sb.sb_qflags == 0)
588 return 0;
589 spin_lock(&mp->m_sb_lock);
590 mp->m_sb.sb_qflags = 0;
591 spin_unlock(&mp->m_sb_lock);
592
Dave Chinner61e63ec2015-01-22 09:10:31 +1100593 if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
Christoph Hellwig7d095252009-06-08 15:33:32 +0200594 return 0;
595
Dave Chinner61e63ec2015-01-22 09:10:31 +1100596 return xfs_sync_sb(mp, false);
Christoph Hellwig7d095252009-06-08 15:33:32 +0200597}
598
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700599uint64_t
Eric Sandeend5db0f92010-02-05 22:59:53 +0000600xfs_default_resblks(xfs_mount_t *mp)
601{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700602 uint64_t resblks;
Eric Sandeend5db0f92010-02-05 22:59:53 +0000603
604 /*
Dave Chinner8babd8a2010-03-04 01:46:25 +0000605 * We default to 5% or 8192 fsbs of space reserved, whichever is
606 * smaller. This is intended to cover concurrent allocation
607 * transactions when we initially hit enospc. These each require a 4
608 * block reservation. Hence by default we cover roughly 2000 concurrent
609 * allocation reservations.
Eric Sandeend5db0f92010-02-05 22:59:53 +0000610 */
611 resblks = mp->m_sb.sb_dblocks;
612 do_div(resblks, 20);
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700613 resblks = min_t(uint64_t, resblks, 8192);
Eric Sandeend5db0f92010-02-05 22:59:53 +0000614 return resblks;
615}
616
Darrick J. Wong2e9e6482018-07-19 12:29:13 -0700617/* Ensure the summary counts are correct. */
618STATIC int
619xfs_check_summary_counts(
620 struct xfs_mount *mp)
621{
622 /*
623 * The AG0 superblock verifier rejects in-progress filesystems,
624 * so we should never see the flag set this far into mounting.
625 */
626 if (mp->m_sb.sb_inprogress) {
627 xfs_err(mp, "sb_inprogress set after log recovery??");
628 WARN_ON(1);
629 return -EFSCORRUPTED;
630 }
631
632 /*
633 * Now the log is mounted, we know if it was an unclean shutdown or
634 * not. If it was, with the first phase of recovery has completed, we
635 * have consistent AG blocks on disk. We have not recovered EFIs yet,
636 * but they are recovered transactionally in the second recovery phase
637 * later.
638 *
639 * If the log was clean when we mounted, we can check the summary
640 * counters. If any of them are obviously incorrect, we can recompute
641 * them from the AGF headers in the next step.
642 */
643 if (XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
644 (mp->m_sb.sb_fdblocks > mp->m_sb.sb_dblocks ||
Darrick J. Wong00d22a12018-08-10 17:55:56 -0700645 !xfs_verify_icount(mp, mp->m_sb.sb_icount) ||
Darrick J. Wong2e9e6482018-07-19 12:29:13 -0700646 mp->m_sb.sb_ifree > mp->m_sb.sb_icount))
647 mp->m_flags |= XFS_MOUNT_BAD_SUMMARY;
648
649 /*
650 * We can safely re-initialise incore superblock counters from the
651 * per-ag data. These may not be correct if the filesystem was not
652 * cleanly unmounted, so we waited for recovery to finish before doing
653 * this.
654 *
655 * If the filesystem was cleanly unmounted or the previous check did
656 * not flag anything weird, then we can trust the values in the
657 * superblock to be correct and we don't need to do anything here.
658 * Otherwise, recalculate the summary counters.
659 */
660 if ((!xfs_sb_version_haslazysbcount(&mp->m_sb) ||
661 XFS_LAST_UNMOUNT_WAS_CLEAN(mp)) &&
662 !(mp->m_flags & XFS_MOUNT_BAD_SUMMARY))
663 return 0;
664
665 return xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount);
666}
667
Christoph Hellwig7d095252009-06-08 15:33:32 +0200668/*
Eric Sandeen0771fb42007-10-12 11:03:40 +1000669 * This function does the following on an initial mount of a file system:
670 * - reads the superblock from disk and init the mount struct
671 * - if we're a 32-bit kernel, do a size check on the superblock
672 * so we don't mount terabyte filesystems
673 * - init mount struct realtime fields
674 * - allocate inode hash table for fs
675 * - init directory manager
676 * - perform recovery and init the log manager
677 */
678int
679xfs_mountfs(
Brian Fosterf0b2efa2015-08-19 09:58:36 +1000680 struct xfs_mount *mp)
Eric Sandeen0771fb42007-10-12 11:03:40 +1000681{
Brian Fosterf0b2efa2015-08-19 09:58:36 +1000682 struct xfs_sb *sbp = &(mp->m_sb);
683 struct xfs_inode *rip;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700684 uint64_t resblks;
Brian Fosterf0b2efa2015-08-19 09:58:36 +1000685 uint quotamount = 0;
686 uint quotaflags = 0;
687 int error = 0;
Eric Sandeen0771fb42007-10-12 11:03:40 +1000688
Dave Chinnerff550682013-08-12 20:49:41 +1000689 xfs_sb_mount_common(mp, sbp);
Eric Sandeen0771fb42007-10-12 11:03:40 +1000690
691 /*
Dave Chinner074e4272015-01-22 09:10:33 +1100692 * Check for a mismatched features2 values. Older kernels read & wrote
693 * into the wrong sb offset for sb_features2 on some platforms due to
694 * xfs_sb_t not being 64bit size aligned when sb_features2 was added,
695 * which made older superblock reading/writing routines swap it as a
696 * 64-bit value.
David Chinneree1c0902008-03-06 13:45:50 +1100697 *
Eric Sandeene6957ea2008-04-10 12:19:34 +1000698 * For backwards compatibility, we make both slots equal.
699 *
Dave Chinner074e4272015-01-22 09:10:33 +1100700 * If we detect a mismatched field, we OR the set bits into the existing
701 * features2 field in case it has already been modified; we don't want
702 * to lose any features. We then update the bad location with the ORed
703 * value so that older kernels will see any features2 flags. The
704 * superblock writeback code ensures the new sb_features2 is copied to
705 * sb_bad_features2 before it is logged or written to disk.
David Chinneree1c0902008-03-06 13:45:50 +1100706 */
Eric Sandeene6957ea2008-04-10 12:19:34 +1000707 if (xfs_sb_has_mismatched_features2(sbp)) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100708 xfs_warn(mp, "correcting sb_features alignment problem");
David Chinneree1c0902008-03-06 13:45:50 +1100709 sbp->sb_features2 |= sbp->sb_bad_features2;
Dave Chinner61e63ec2015-01-22 09:10:31 +1100710 mp->m_update_sb = true;
Eric Sandeene6957ea2008-04-10 12:19:34 +1000711
712 /*
713 * Re-check for ATTR2 in case it was found in bad_features2
714 * slot.
715 */
Tim Shimmin7c12f292008-04-30 18:15:28 +1000716 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
717 !(mp->m_flags & XFS_MOUNT_NOATTR2))
Eric Sandeene6957ea2008-04-10 12:19:34 +1000718 mp->m_flags |= XFS_MOUNT_ATTR2;
Tim Shimmin7c12f292008-04-30 18:15:28 +1000719 }
Eric Sandeene6957ea2008-04-10 12:19:34 +1000720
Tim Shimmin7c12f292008-04-30 18:15:28 +1000721 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
722 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
723 xfs_sb_version_removeattr2(&mp->m_sb);
Dave Chinner61e63ec2015-01-22 09:10:31 +1100724 mp->m_update_sb = true;
Tim Shimmin7c12f292008-04-30 18:15:28 +1000725
726 /* update sb_versionnum for the clearing of the morebits */
727 if (!sbp->sb_features2)
Dave Chinner61e63ec2015-01-22 09:10:31 +1100728 mp->m_update_sb = true;
David Chinneree1c0902008-03-06 13:45:50 +1100729 }
730
Dave Chinner263997a2014-05-20 07:46:40 +1000731 /* always use v2 inodes by default now */
732 if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) {
733 mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT;
Dave Chinner61e63ec2015-01-22 09:10:31 +1100734 mp->m_update_sb = true;
Dave Chinner263997a2014-05-20 07:46:40 +1000735 }
736
David Chinneree1c0902008-03-06 13:45:50 +1100737 /*
Eric Sandeen0771fb42007-10-12 11:03:40 +1000738 * Check if sb_agblocks is aligned at stripe boundary
739 * If sb_agblocks is NOT aligned turn off m_dalign since
740 * allocator alignment is within an ag, therefore ag has
741 * to be aligned at stripe boundary.
742 */
Christoph Hellwig7884bc82009-01-19 02:04:07 +0100743 error = xfs_update_alignment(mp);
Eric Sandeen0771fb42007-10-12 11:03:40 +1000744 if (error)
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100745 goto out;
Eric Sandeen0771fb42007-10-12 11:03:40 +1000746
747 xfs_alloc_compute_maxlevels(mp);
748 xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
749 xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
750 xfs_ialloc_compute_maxlevels(mp);
Darrick J. Wong035e00a2016-08-03 11:36:07 +1000751 xfs_rmapbt_compute_maxlevels(mp);
Darrick J. Wong1946b912016-10-03 09:11:18 -0700752 xfs_refcountbt_compute_maxlevels(mp);
Eric Sandeen0771fb42007-10-12 11:03:40 +1000753
754 xfs_set_maxicount(mp);
755
Carlos Maiolinoe6b3bb72016-05-18 11:11:27 +1000756 /* enable fail_at_unmount as default */
Thomas Meyer749f24f2017-10-09 11:38:54 -0700757 mp->m_fail_unmount = true;
Carlos Maiolinoe6b3bb72016-05-18 11:11:27 +1000758
Brian Fostera31b1d32014-07-15 08:07:01 +1000759 error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname);
Christoph Hellwig27174202009-03-30 10:21:31 +0200760 if (error)
761 goto out;
Eric Sandeen0771fb42007-10-12 11:03:40 +1000762
Bill O'Donnell225e4632015-10-12 18:21:19 +1100763 error = xfs_sysfs_init(&mp->m_stats.xs_kobj, &xfs_stats_ktype,
764 &mp->m_kobj, "stats");
Brian Fostera31b1d32014-07-15 08:07:01 +1000765 if (error)
766 goto out_remove_sysfs;
767
Carlos Maiolino192852b2016-05-18 10:58:51 +1000768 error = xfs_error_sysfs_init(mp);
Bill O'Donnell225e4632015-10-12 18:21:19 +1100769 if (error)
770 goto out_del_stats;
771
Darrick J. Wong31965ef2017-06-20 17:54:46 -0700772 error = xfs_errortag_init(mp);
773 if (error)
774 goto out_remove_error_sysfs;
Carlos Maiolino192852b2016-05-18 10:58:51 +1000775
776 error = xfs_uuid_mount(mp);
777 if (error)
Darrick J. Wong31965ef2017-06-20 17:54:46 -0700778 goto out_remove_errortag;
Carlos Maiolino192852b2016-05-18 10:58:51 +1000779
Eric Sandeen0771fb42007-10-12 11:03:40 +1000780 /*
781 * Set the minimum read and write sizes
782 */
783 xfs_set_rw_sizes(mp);
784
Dave Chinner055388a2011-01-04 11:35:03 +1100785 /* set the low space thresholds for dynamic preallocation */
786 xfs_set_low_space_thresholds(mp);
787
Eric Sandeen0771fb42007-10-12 11:03:40 +1000788 /*
789 * Set the inode cluster size.
790 * This may still be overridden by the file system
791 * block size if it is larger than the chosen cluster size.
Dave Chinner8f805872013-11-01 15:27:20 +1100792 *
793 * For v5 filesystems, scale the cluster size with the inode size to
794 * keep a constant ratio of inode per cluster buffer, but only if mkfs
795 * has set the inode alignment value appropriately for larger cluster
796 * sizes.
Eric Sandeen0771fb42007-10-12 11:03:40 +1000797 */
798 mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
Dave Chinner8f805872013-11-01 15:27:20 +1100799 if (xfs_sb_version_hascrc(&mp->m_sb)) {
800 int new_size = mp->m_inode_cluster_size;
801
802 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
803 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
804 mp->m_inode_cluster_size = new_size;
Dave Chinner8f805872013-11-01 15:27:20 +1100805 }
Darrick J. Wong83dcdb42018-12-12 08:46:25 -0800806 mp->m_blocks_per_cluster = xfs_icluster_size_fsb(mp);
807 mp->m_inodes_per_cluster = XFS_FSB_TO_INO(mp, mp->m_blocks_per_cluster);
Darrick J. Wongc1b4a322018-12-12 08:46:25 -0800808 mp->m_cluster_align = xfs_ialloc_cluster_alignment(mp);
809 mp->m_cluster_align_inodes = XFS_FSB_TO_INO(mp, mp->m_cluster_align);
Eric Sandeen0771fb42007-10-12 11:03:40 +1000810
811 /*
Brian Fostere5376fc2015-05-29 08:57:27 +1000812 * If enabled, sparse inode chunk alignment is expected to match the
813 * cluster size. Full inode chunk alignment must match the chunk size,
814 * but that is checked on sb read verification...
815 */
816 if (xfs_sb_version_hassparseinodes(&mp->m_sb) &&
817 mp->m_sb.sb_spino_align !=
818 XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) {
819 xfs_warn(mp,
820 "Sparse inode block alignment (%u) must match cluster size (%llu).",
821 mp->m_sb.sb_spino_align,
822 XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size));
823 error = -EINVAL;
824 goto out_remove_uuid;
825 }
826
827 /*
Eric Sandeen0771fb42007-10-12 11:03:40 +1000828 * Set inode alignment fields
829 */
830 xfs_set_inoalignment(mp);
831
832 /*
Zhi Yong Wuc2bfbc92013-08-12 03:15:03 +0000833 * Check that the data (and log if separate) is an ok size.
Eric Sandeen0771fb42007-10-12 11:03:40 +1000834 */
Christoph Hellwig42490232008-08-13 16:49:32 +1000835 error = xfs_check_sizes(mp);
Eric Sandeen0771fb42007-10-12 11:03:40 +1000836 if (error)
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100837 goto out_remove_uuid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
839 /*
840 * Initialize realtime fields in the mount structure
841 */
Eric Sandeen0771fb42007-10-12 11:03:40 +1000842 error = xfs_rtmount_init(mp);
843 if (error) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100844 xfs_warn(mp, "RT mount failed");
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100845 goto out_remove_uuid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 }
847
848 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 * Copies the low order bits of the timestamp and the randomly
850 * set "sequence" number out of a UUID.
851 */
Christoph Hellwigcb0ba6c2017-05-05 09:39:10 +0200852 mp->m_fixedfsid[0] =
853 (get_unaligned_be16(&sbp->sb_uuid.b[8]) << 16) |
854 get_unaligned_be16(&sbp->sb_uuid.b[4]);
855 mp->m_fixedfsid[1] = get_unaligned_be32(&sbp->sb_uuid.b[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856
Dave Chinner0650b552014-06-06 15:01:58 +1000857 error = xfs_da_mount(mp);
858 if (error) {
859 xfs_warn(mp, "Failed dir/attr init: %d", error);
860 goto out_remove_uuid;
861 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
863 /*
864 * Initialize the precomputed transaction reservations values.
865 */
866 xfs_trans_init(mp);
867
868 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 * Allocate and initialize the per-ag data.
870 */
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000871 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
872 if (error) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100873 xfs_warn(mp, "Failed per-ag init: %d", error);
Dave Chinner0650b552014-06-06 15:01:58 +1000874 goto out_free_dir;
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000875 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100877 if (!sbp->sb_logblocks) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100878 xfs_warn(mp, "no log defined");
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100879 XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp);
Dave Chinner24513372014-06-25 14:58:08 +1000880 error = -EFSCORRUPTED;
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100881 goto out_free_perag;
882 }
883
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 /*
Brian Fosterf0b2efa2015-08-19 09:58:36 +1000885 * Log's mount-time initialization. The first part of recovery can place
886 * some items on the AIL, to be handled when recovery is finished or
887 * cancelled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 */
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100889 error = xfs_log_mount(mp, mp->m_logdev_targp,
890 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
891 XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
892 if (error) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100893 xfs_warn(mp, "log mount failed");
Dave Chinnerd4f35122012-04-23 15:59:06 +1000894 goto out_fail_wait;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 }
896
Darrick J. Wong2e9e6482018-07-19 12:29:13 -0700897 /* Make sure the summary counts are ok. */
898 error = xfs_check_summary_counts(mp);
899 if (error)
900 goto out_log_dealloc;
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100901
David Chinner92821e22007-05-24 15:26:31 +1000902 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 * Get and sanity-check the root inode.
904 * Save the pointer to it in the mount structure.
905 */
Dave Chinner541b5ac2018-06-05 10:09:33 -0700906 error = xfs_iget(mp, NULL, sbp->sb_rootino, XFS_IGET_UNTRUSTED,
907 XFS_ILOCK_EXCL, &rip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 if (error) {
Dave Chinner541b5ac2018-06-05 10:09:33 -0700909 xfs_warn(mp,
910 "Failed to read root inode 0x%llx, error %d",
911 sbp->sb_rootino, -error);
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100912 goto out_log_dealloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 }
914
915 ASSERT(rip != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100917 if (unlikely(!S_ISDIR(VFS_I(rip)->i_mode))) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100918 xfs_warn(mp, "corrupted root inode %llu: not a directory",
Nathan Scottb6574522006-06-09 15:29:40 +1000919 (unsigned long long)rip->i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 xfs_iunlock(rip, XFS_ILOCK_EXCL);
921 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
922 mp);
Dave Chinner24513372014-06-25 14:58:08 +1000923 error = -EFSCORRUPTED;
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100924 goto out_rele_rip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 }
926 mp->m_rootip = rip; /* save it */
927
928 xfs_iunlock(rip, XFS_ILOCK_EXCL);
929
930 /*
931 * Initialize realtime inode pointers in the mount structure
932 */
Eric Sandeen0771fb42007-10-12 11:03:40 +1000933 error = xfs_rtmount_inodes(mp);
934 if (error) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 /*
936 * Free up the root inode.
937 */
Dave Chinner0b932cc2011-03-07 10:08:35 +1100938 xfs_warn(mp, "failed to read RT inodes");
Christoph Hellwigf9057e32009-02-04 09:31:52 +0100939 goto out_rele_rip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 }
941
942 /*
Christoph Hellwig7884bc82009-01-19 02:04:07 +0100943 * If this is a read-only mount defer the superblock updates until
944 * the next remount into writeable mode. Otherwise we would never
945 * perform the update e.g. for the root filesystem.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 */
Dave Chinner61e63ec2015-01-22 09:10:31 +1100947 if (mp->m_update_sb && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
948 error = xfs_sync_sb(mp, false);
David Chinnere5720ee2008-04-10 12:21:18 +1000949 if (error) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100950 xfs_warn(mp, "failed to write sb changes");
Christoph Hellwigb93b6e42009-02-04 09:33:58 +0100951 goto out_rtunmount;
David Chinnere5720ee2008-04-10 12:21:18 +1000952 }
953 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
955 /*
956 * Initialise the XFS quota management subsystem for this mount
957 */
Christoph Hellwig7d095252009-06-08 15:33:32 +0200958 if (XFS_IS_QUOTA_RUNNING(mp)) {
959 error = xfs_qm_newmount(mp, &quotamount, &quotaflags);
960 if (error)
961 goto out_rtunmount;
962 } else {
963 ASSERT(!XFS_IS_QUOTA_ON(mp));
964
965 /*
966 * If a file system had quotas running earlier, but decided to
967 * mount without -o uquota/pquota/gquota options, revoke the
968 * quotachecked license.
969 */
970 if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100971 xfs_notice(mp, "resetting quota flags");
Christoph Hellwig7d095252009-06-08 15:33:32 +0200972 error = xfs_mount_reset_sbqflags(mp);
973 if (error)
Brian Fostera70a4fa2014-07-15 07:41:25 +1000974 goto out_rtunmount;
Christoph Hellwig7d095252009-06-08 15:33:32 +0200975 }
976 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
978 /*
Brian Fosterf0b2efa2015-08-19 09:58:36 +1000979 * Finish recovering the file system. This part needed to be delayed
980 * until after the root and real-time bitmap inodes were consistently
981 * read in.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 */
Christoph Hellwig42490232008-08-13 16:49:32 +1000983 error = xfs_log_mount_finish(mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 if (error) {
Dave Chinner0b932cc2011-03-07 10:08:35 +1100985 xfs_warn(mp, "log mount finish failed");
Christoph Hellwigb93b6e42009-02-04 09:33:58 +0100986 goto out_rtunmount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 }
988
989 /*
Dave Chinnerddeb14f2016-09-26 08:21:44 +1000990 * Now the log is fully replayed, we can transition to full read-only
991 * mode for read-only mounts. This will sync all the metadata and clean
992 * the log so that the recovery we just performed does not have to be
993 * replayed again on the next mount.
994 *
995 * We use the same quiesce mechanism as the rw->ro remount, as they are
996 * semantically identical operations.
997 */
998 if ((mp->m_flags & (XFS_MOUNT_RDONLY|XFS_MOUNT_NORECOVERY)) ==
999 XFS_MOUNT_RDONLY) {
1000 xfs_quiesce_attr(mp);
1001 }
1002
1003 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 * Complete the quota initialisation, post-log-replay component.
1005 */
Christoph Hellwig7d095252009-06-08 15:33:32 +02001006 if (quotamount) {
1007 ASSERT(mp->m_qflags == 0);
1008 mp->m_qflags = quotaflags;
1009
1010 xfs_qm_mount_quotas(mp);
1011 }
1012
David Chinner84e1e992007-06-18 16:50:27 +10001013 /*
1014 * Now we are mounted, reserve a small amount of unused space for
1015 * privileged transactions. This is needed so that transaction
1016 * space required for critical operations can dip into this pool
1017 * when at ENOSPC. This is needed for operations like create with
1018 * attr, unwritten extent conversion at ENOSPC, etc. Data allocations
1019 * are not allowed to use this reserved space.
Dave Chinner8babd8a2010-03-04 01:46:25 +00001020 *
1021 * This may drive us straight to ENOSPC on mount, but that implies
1022 * we were already there on the last unmount. Warn if this occurs.
David Chinner84e1e992007-06-18 16:50:27 +10001023 */
Eric Sandeend5db0f92010-02-05 22:59:53 +00001024 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
1025 resblks = xfs_default_resblks(mp);
1026 error = xfs_reserve_blocks(mp, &resblks, NULL);
1027 if (error)
Dave Chinner0b932cc2011-03-07 10:08:35 +11001028 xfs_warn(mp,
1029 "Unable to allocate reserve blocks. Continuing without reserve pool.");
Darrick J. Wong174edb02016-10-03 09:11:39 -07001030
1031 /* Recover any CoW blocks that never got remapped. */
1032 error = xfs_reflink_recover_cow(mp);
1033 if (error) {
1034 xfs_err(mp,
1035 "Error %d recovering leftover CoW allocations.", error);
1036 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1037 goto out_quota;
1038 }
Darrick J. Wong84d69612016-10-03 09:11:44 -07001039
1040 /* Reserve AG blocks for future btree expansion. */
1041 error = xfs_fs_reserve_ag_blocks(mp);
1042 if (error && error != -ENOSPC)
1043 goto out_agresv;
Eric Sandeend5db0f92010-02-05 22:59:53 +00001044 }
David Chinner84e1e992007-06-18 16:50:27 +10001045
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 return 0;
1047
Darrick J. Wong84d69612016-10-03 09:11:44 -07001048 out_agresv:
1049 xfs_fs_unreserve_ag_blocks(mp);
Darrick J. Wong174edb02016-10-03 09:11:39 -07001050 out_quota:
1051 xfs_qm_unmount_quotas(mp);
Christoph Hellwigb93b6e42009-02-04 09:33:58 +01001052 out_rtunmount:
1053 xfs_rtunmount_inodes(mp);
Christoph Hellwigf9057e32009-02-04 09:31:52 +01001054 out_rele_rip:
Darrick J. Wong44a87362018-07-25 12:52:32 -07001055 xfs_irele(rip);
Darrick J. Wong77aff8c2017-08-10 14:20:29 -07001056 /* Clean out dquots that might be in memory after quotacheck. */
1057 xfs_qm_unmount(mp);
Darrick J. Wong2d1d1da2017-11-08 16:26:49 -08001058 /*
1059 * Cancel all delayed reclaim work and reclaim the inodes directly.
1060 * We have to do this /after/ rtunmount and qm_unmount because those
1061 * two will have scheduled delayed reclaim for the rt/quota inodes.
1062 *
1063 * This is slightly different from the unmountfs call sequence
1064 * because we could be tearing down a partially set up mount. In
1065 * particular, if log_mount_finish fails we bail out without calling
1066 * qm_unmount_quotas and therefore rely on qm_unmount to release the
1067 * quota inodes.
1068 */
1069 cancel_delayed_work_sync(&mp->m_reclaim_work);
1070 xfs_reclaim_inodes(mp, SYNC_WAIT);
Christoph Hellwigf9057e32009-02-04 09:31:52 +01001071 out_log_dealloc:
Carlos Maiolinoe6b3bb72016-05-18 11:11:27 +10001072 mp->m_flags |= XFS_MOUNT_UNMOUNTING;
Brian Fosterf0b2efa2015-08-19 09:58:36 +10001073 xfs_log_mount_cancel(mp);
Dave Chinnerd4f35122012-04-23 15:59:06 +10001074 out_fail_wait:
1075 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
1076 xfs_wait_buftarg(mp->m_logdev_targp);
1077 xfs_wait_buftarg(mp->m_ddev_targp);
Christoph Hellwigf9057e32009-02-04 09:31:52 +01001078 out_free_perag:
Christoph Hellwigff4f0382008-08-13 16:50:47 +10001079 xfs_free_perag(mp);
Dave Chinner0650b552014-06-06 15:01:58 +10001080 out_free_dir:
1081 xfs_da_unmount(mp);
Christoph Hellwigf9057e32009-02-04 09:31:52 +01001082 out_remove_uuid:
Christoph Hellwig27174202009-03-30 10:21:31 +02001083 xfs_uuid_unmount(mp);
Darrick J. Wong31965ef2017-06-20 17:54:46 -07001084 out_remove_errortag:
1085 xfs_errortag_del(mp);
Carlos Maiolino192852b2016-05-18 10:58:51 +10001086 out_remove_error_sysfs:
1087 xfs_error_sysfs_del(mp);
Bill O'Donnell225e4632015-10-12 18:21:19 +11001088 out_del_stats:
1089 xfs_sysfs_del(&mp->m_stats.xs_kobj);
Brian Fostera31b1d32014-07-15 08:07:01 +10001090 out_remove_sysfs:
1091 xfs_sysfs_del(&mp->m_kobj);
Christoph Hellwigf9057e32009-02-04 09:31:52 +01001092 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 return error;
1094}
1095
1096/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 * This flushes out the inodes,dquots and the superblock, unmounts the
1098 * log and makes sure that incore structures are freed.
1099 */
Christoph Hellwig41b5c2e2008-08-13 16:49:57 +10001100void
1101xfs_unmountfs(
1102 struct xfs_mount *mp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001104 uint64_t resblks;
Christoph Hellwig41b5c2e2008-08-13 16:49:57 +10001105 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
Darrick J. Wongd6b636e2018-05-09 10:03:56 -07001107 xfs_icache_disable_reclaim(mp);
Darrick J. Wong84d69612016-10-03 09:11:44 -07001108 xfs_fs_unreserve_ag_blocks(mp);
Christoph Hellwig7d095252009-06-08 15:33:32 +02001109 xfs_qm_unmount_quotas(mp);
Christoph Hellwigb93b6e42009-02-04 09:33:58 +01001110 xfs_rtunmount_inodes(mp);
Darrick J. Wong44a87362018-07-25 12:52:32 -07001111 xfs_irele(mp->m_rootip);
Christoph Hellwig77508ec2008-08-13 16:49:04 +10001112
David Chinner641c56f2007-06-18 16:50:17 +10001113 /*
1114 * We can potentially deadlock here if we have an inode cluster
Malcolm Parsons9da096f2009-03-29 09:55:42 +02001115 * that has been freed has its buffer still pinned in memory because
David Chinner641c56f2007-06-18 16:50:17 +10001116 * the transaction is still sitting in a iclog. The stale inodes
1117 * on that buffer will have their flush locks held until the
1118 * transaction hits the disk and the callbacks run. the inode
1119 * flush takes the flush lock unconditionally and with nothing to
1120 * push out the iclog we will never get that unlocked. hence we
1121 * need to force the log first.
1122 */
Christoph Hellwiga14a3482010-01-19 09:56:46 +00001123 xfs_log_force(mp, XFS_LOG_SYNC);
Dave Chinnerc8543632010-02-06 12:39:36 +11001124
1125 /*
Christoph Hellwigebf55872017-02-07 14:06:57 -08001126 * Wait for all busy extents to be freed, including completion of
1127 * any discard operation.
1128 */
1129 xfs_extent_busy_wait_all(mp);
Christoph Hellwig4560e782017-02-07 14:07:58 -08001130 flush_workqueue(xfs_discard_wq);
Christoph Hellwigebf55872017-02-07 14:06:57 -08001131
1132 /*
Carlos Maiolinoe6b3bb72016-05-18 11:11:27 +10001133 * We now need to tell the world we are unmounting. This will allow
1134 * us to detect that the filesystem is going away and we should error
1135 * out anything that we have been retrying in the background. This will
1136 * prevent neverending retries in AIL pushing from hanging the unmount.
1137 */
1138 mp->m_flags |= XFS_MOUNT_UNMOUNTING;
1139
1140 /*
Christoph Hellwig211e4d42012-04-23 15:58:34 +10001141 * Flush all pending changes from the AIL.
Dave Chinnerc8543632010-02-06 12:39:36 +11001142 */
Christoph Hellwig211e4d42012-04-23 15:58:34 +10001143 xfs_ail_push_all_sync(mp->m_ail);
1144
1145 /*
1146 * And reclaim all inodes. At this point there should be no dirty
Dave Chinner7e185302012-10-08 21:56:00 +11001147 * inodes and none should be pinned or locked, but use synchronous
1148 * reclaim just to be sure. We can stop background inode reclaim
1149 * here as well if it is still running.
Christoph Hellwig211e4d42012-04-23 15:58:34 +10001150 */
Dave Chinner7e185302012-10-08 21:56:00 +11001151 cancel_delayed_work_sync(&mp->m_reclaim_work);
Dave Chinnerc8543632010-02-06 12:39:36 +11001152 xfs_reclaim_inodes(mp, SYNC_WAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
Christoph Hellwig7d095252009-06-08 15:33:32 +02001154 xfs_qm_unmount(mp);
Lachlan McIlroya357a122008-10-30 16:53:25 +11001155
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 /*
David Chinner84e1e992007-06-18 16:50:27 +10001157 * Unreserve any blocks we have so that when we unmount we don't account
1158 * the reserved free space as used. This is really only necessary for
1159 * lazy superblock counting because it trusts the incore superblock
Malcolm Parsons9da096f2009-03-29 09:55:42 +02001160 * counters to be absolutely correct on clean unmount.
David Chinner84e1e992007-06-18 16:50:27 +10001161 *
1162 * We don't bother correcting this elsewhere for lazy superblock
1163 * counting because on mount of an unclean filesystem we reconstruct the
1164 * correct counter value and this is irrelevant.
1165 *
1166 * For non-lazy counter filesystems, this doesn't matter at all because
1167 * we only every apply deltas to the superblock and hence the incore
1168 * value does not matter....
1169 */
1170 resblks = 0;
David Chinner714082b2008-04-10 12:20:03 +10001171 error = xfs_reserve_blocks(mp, &resblks, NULL);
1172 if (error)
Dave Chinner0b932cc2011-03-07 10:08:35 +11001173 xfs_warn(mp, "Unable to free reserved block pool. "
David Chinner714082b2008-04-10 12:20:03 +10001174 "Freespace may not be correct on next mount.");
1175
Chandra Seetharamanadab0f62011-06-29 22:10:14 +00001176 error = xfs_log_sbcount(mp);
David Chinnere5720ee2008-04-10 12:21:18 +10001177 if (error)
Dave Chinner0b932cc2011-03-07 10:08:35 +11001178 xfs_warn(mp, "Unable to update superblock counters. "
David Chinnere5720ee2008-04-10 12:21:18 +10001179 "Freespace may not be correct on next mount.");
Christoph Hellwig87c7bec2011-09-14 14:08:26 +00001180
Bill O'Donnell225e4632015-10-12 18:21:19 +11001181
Christoph Hellwig21b699c2009-03-16 08:19:29 +01001182 xfs_log_unmount(mp);
Dave Chinner0650b552014-06-06 15:01:58 +10001183 xfs_da_unmount(mp);
Christoph Hellwig27174202009-03-30 10:21:31 +02001184 xfs_uuid_unmount(mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
Christoph Hellwig1550d0b2008-08-13 16:17:37 +10001186#if defined(DEBUG)
Darrick J. Wong31965ef2017-06-20 17:54:46 -07001187 xfs_errortag_clearall(mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188#endif
Christoph Hellwigff4f0382008-08-13 16:50:47 +10001189 xfs_free_perag(mp);
Brian Fostera31b1d32014-07-15 08:07:01 +10001190
Darrick J. Wong31965ef2017-06-20 17:54:46 -07001191 xfs_errortag_del(mp);
Carlos Maiolino192852b2016-05-18 10:58:51 +10001192 xfs_error_sysfs_del(mp);
Bill O'Donnell225e4632015-10-12 18:21:19 +11001193 xfs_sysfs_del(&mp->m_stats.xs_kobj);
Brian Fostera31b1d32014-07-15 08:07:01 +10001194 xfs_sysfs_del(&mp->m_kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195}
1196
Brian Foster91ee5752014-11-28 14:02:59 +11001197/*
1198 * Determine whether modifications can proceed. The caller specifies the minimum
1199 * freeze level for which modifications should not be allowed. This allows
1200 * certain operations to proceed while the freeze sequence is in progress, if
1201 * necessary.
1202 */
1203bool
1204xfs_fs_writable(
1205 struct xfs_mount *mp,
1206 int level)
David Chinner92821e22007-05-24 15:26:31 +10001207{
Brian Foster91ee5752014-11-28 14:02:59 +11001208 ASSERT(level > SB_UNFROZEN);
1209 if ((mp->m_super->s_writers.frozen >= level) ||
1210 XFS_FORCED_SHUTDOWN(mp) || (mp->m_flags & XFS_MOUNT_RDONLY))
1211 return false;
1212
1213 return true;
David Chinner92821e22007-05-24 15:26:31 +10001214}
1215
1216/*
Alex Elderb2ce3972011-07-11 09:51:44 -05001217 * xfs_log_sbcount
1218 *
Chandra Seetharamanadab0f62011-06-29 22:10:14 +00001219 * Sync the superblock counters to disk.
Alex Elderb2ce3972011-07-11 09:51:44 -05001220 *
Brian Foster91ee5752014-11-28 14:02:59 +11001221 * Note this code can be called during the process of freezing, so we use the
1222 * transaction allocator that does not block when the transaction subsystem is
1223 * in its frozen state.
David Chinner92821e22007-05-24 15:26:31 +10001224 */
1225int
Chandra Seetharamanadab0f62011-06-29 22:10:14 +00001226xfs_log_sbcount(xfs_mount_t *mp)
David Chinner92821e22007-05-24 15:26:31 +10001227{
Brian Foster91ee5752014-11-28 14:02:59 +11001228 /* allow this to proceed during the freeze sequence... */
1229 if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE))
David Chinner92821e22007-05-24 15:26:31 +10001230 return 0;
1231
David Chinner92821e22007-05-24 15:26:31 +10001232 /*
1233 * we don't need to do this if we are updating the superblock
1234 * counters on every modification.
1235 */
1236 if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1237 return 0;
1238
Dave Chinner61e63ec2015-01-22 09:10:31 +11001239 return xfs_sync_sb(mp, true);
David Chinner92821e22007-05-24 15:26:31 +10001240}
1241
Dave Chinner8c1903d2015-05-29 07:39:34 +10001242/*
1243 * Deltas for the inode count are +/-64, hence we use a large batch size
1244 * of 128 so we don't need to take the counter lock on every update.
1245 */
1246#define XFS_ICOUNT_BATCH 128
Dave Chinner501ab322015-02-23 21:19:28 +11001247int
1248xfs_mod_icount(
1249 struct xfs_mount *mp,
1250 int64_t delta)
1251{
Nikolay Borisov104b4e52017-06-20 21:01:20 +03001252 percpu_counter_add_batch(&mp->m_icount, delta, XFS_ICOUNT_BATCH);
Dave Chinner8c1903d2015-05-29 07:39:34 +10001253 if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) {
Dave Chinner501ab322015-02-23 21:19:28 +11001254 ASSERT(0);
1255 percpu_counter_add(&mp->m_icount, -delta);
1256 return -EINVAL;
1257 }
1258 return 0;
1259}
1260
Dave Chinnere88b64e2015-02-23 21:19:53 +11001261int
1262xfs_mod_ifree(
1263 struct xfs_mount *mp,
1264 int64_t delta)
1265{
1266 percpu_counter_add(&mp->m_ifree, delta);
1267 if (percpu_counter_compare(&mp->m_ifree, 0) < 0) {
1268 ASSERT(0);
1269 percpu_counter_add(&mp->m_ifree, -delta);
1270 return -EINVAL;
1271 }
1272 return 0;
1273}
Dave Chinner0d485ad2015-02-23 21:22:03 +11001274
Dave Chinner8c1903d2015-05-29 07:39:34 +10001275/*
1276 * Deltas for the block count can vary from 1 to very large, but lock contention
1277 * only occurs on frequent small block count updates such as in the delayed
1278 * allocation path for buffered writes (page a time updates). Hence we set
1279 * a large batch count (1024) to minimise global counter updates except when
1280 * we get near to ENOSPC and we have to be very accurate with our updates.
1281 */
1282#define XFS_FDBLOCKS_BATCH 1024
Dave Chinner0d485ad2015-02-23 21:22:03 +11001283int
1284xfs_mod_fdblocks(
1285 struct xfs_mount *mp,
1286 int64_t delta,
1287 bool rsvd)
1288{
1289 int64_t lcounter;
1290 long long res_used;
1291 s32 batch;
1292
1293 if (delta > 0) {
1294 /*
1295 * If the reserve pool is depleted, put blocks back into it
1296 * first. Most of the time the pool is full.
1297 */
1298 if (likely(mp->m_resblks == mp->m_resblks_avail)) {
1299 percpu_counter_add(&mp->m_fdblocks, delta);
1300 return 0;
1301 }
1302
1303 spin_lock(&mp->m_sb_lock);
1304 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
1305
1306 if (res_used > delta) {
1307 mp->m_resblks_avail += delta;
1308 } else {
1309 delta -= res_used;
1310 mp->m_resblks_avail = mp->m_resblks;
1311 percpu_counter_add(&mp->m_fdblocks, delta);
1312 }
1313 spin_unlock(&mp->m_sb_lock);
1314 return 0;
1315 }
1316
1317 /*
1318 * Taking blocks away, need to be more accurate the closer we
1319 * are to zero.
1320 *
Dave Chinner0d485ad2015-02-23 21:22:03 +11001321 * If the counter has a value of less than 2 * max batch size,
1322 * then make everything serialise as we are real close to
1323 * ENOSPC.
1324 */
Dave Chinner8c1903d2015-05-29 07:39:34 +10001325 if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH,
1326 XFS_FDBLOCKS_BATCH) < 0)
Dave Chinner0d485ad2015-02-23 21:22:03 +11001327 batch = 1;
1328 else
Dave Chinner8c1903d2015-05-29 07:39:34 +10001329 batch = XFS_FDBLOCKS_BATCH;
Dave Chinner0d485ad2015-02-23 21:22:03 +11001330
Nikolay Borisov104b4e52017-06-20 21:01:20 +03001331 percpu_counter_add_batch(&mp->m_fdblocks, delta, batch);
Darrick J. Wong52548852016-08-03 11:38:24 +10001332 if (__percpu_counter_compare(&mp->m_fdblocks, mp->m_alloc_set_aside,
Dave Chinner8c1903d2015-05-29 07:39:34 +10001333 XFS_FDBLOCKS_BATCH) >= 0) {
Dave Chinner0d485ad2015-02-23 21:22:03 +11001334 /* we had space! */
1335 return 0;
1336 }
1337
1338 /*
1339 * lock up the sb for dipping into reserves before releasing the space
1340 * that took us to ENOSPC.
1341 */
1342 spin_lock(&mp->m_sb_lock);
1343 percpu_counter_add(&mp->m_fdblocks, -delta);
1344 if (!rsvd)
1345 goto fdblocks_enospc;
1346
1347 lcounter = (long long)mp->m_resblks_avail + delta;
1348 if (lcounter >= 0) {
1349 mp->m_resblks_avail = lcounter;
1350 spin_unlock(&mp->m_sb_lock);
1351 return 0;
1352 }
1353 printk_once(KERN_WARNING
1354 "Filesystem \"%s\": reserve blocks depleted! "
1355 "Consider increasing reserve pool size.",
1356 mp->m_fsname);
1357fdblocks_enospc:
1358 spin_unlock(&mp->m_sb_lock);
1359 return -ENOSPC;
1360}
1361
Dave Chinnerbab98bb2015-02-23 21:22:54 +11001362int
1363xfs_mod_frextents(
1364 struct xfs_mount *mp,
1365 int64_t delta)
1366{
1367 int64_t lcounter;
1368 int ret = 0;
1369
1370 spin_lock(&mp->m_sb_lock);
1371 lcounter = mp->m_sb.sb_frextents + delta;
1372 if (lcounter < 0)
1373 ret = -ENOSPC;
1374 else
1375 mp->m_sb.sb_frextents = lcounter;
1376 spin_unlock(&mp->m_sb_lock);
1377 return ret;
1378}
1379
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 * xfs_getsb() is called to obtain the buffer for the superblock.
1382 * The buffer is returned locked and read in from disk.
1383 * The buffer should be released with a call to xfs_brelse().
1384 *
1385 * If the flags parameter is BUF_TRYLOCK, then we'll only return
1386 * the superblock buffer if it can be locked without sleeping.
1387 * If it can't then we'll return NULL.
1388 */
Christoph Hellwig0c842ad2011-07-08 14:36:19 +02001389struct xfs_buf *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390xfs_getsb(
Christoph Hellwig0c842ad2011-07-08 14:36:19 +02001391 struct xfs_mount *mp,
1392 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393{
Christoph Hellwig0c842ad2011-07-08 14:36:19 +02001394 struct xfs_buf *bp = mp->m_sb_bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
Christoph Hellwig0c842ad2011-07-08 14:36:19 +02001396 if (!xfs_buf_trylock(bp)) {
1397 if (flags & XBF_TRYLOCK)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 return NULL;
Christoph Hellwig0c842ad2011-07-08 14:36:19 +02001399 xfs_buf_lock(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 }
Christoph Hellwig0c842ad2011-07-08 14:36:19 +02001401
Chandra Seetharaman72790aa2011-07-22 23:40:04 +00001402 xfs_buf_hold(bp);
Dave Chinnerb0388bf2016-02-10 15:01:11 +11001403 ASSERT(bp->b_flags & XBF_DONE);
Jesper Juhl014c2542006-01-15 02:37:08 +01001404 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405}
1406
1407/*
1408 * Used to free the superblock along various error paths.
1409 */
1410void
1411xfs_freesb(
Dave Chinner26af6552010-09-22 10:47:20 +10001412 struct xfs_mount *mp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413{
Dave Chinner26af6552010-09-22 10:47:20 +10001414 struct xfs_buf *bp = mp->m_sb_bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415
Dave Chinner26af6552010-09-22 10:47:20 +10001416 xfs_buf_lock(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417 mp->m_sb_bp = NULL;
Dave Chinner26af6552010-09-22 10:47:20 +10001418 xfs_buf_relse(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419}
1420
1421/*
Christoph Hellwigdda35b82010-02-15 09:44:46 +00001422 * If the underlying (data/log/rt) device is readonly, there are some
1423 * operations that cannot proceed.
1424 */
1425int
1426xfs_dev_is_read_only(
1427 struct xfs_mount *mp,
1428 char *message)
1429{
1430 if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
1431 xfs_readonly_buftarg(mp->m_logdev_targp) ||
1432 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
Dave Chinner0b932cc2011-03-07 10:08:35 +11001433 xfs_notice(mp, "%s required on read-only device.", message);
1434 xfs_notice(mp, "write access unavailable, cannot proceed.");
Dave Chinner24513372014-06-25 14:58:08 +10001435 return -EROFS;
Christoph Hellwigdda35b82010-02-15 09:44:46 +00001436 }
1437 return 0;
1438}
Darrick J. Wongf467cad2018-07-20 09:28:40 -07001439
1440/* Force the summary counters to be recalculated at next mount. */
1441void
1442xfs_force_summary_recalc(
1443 struct xfs_mount *mp)
1444{
1445 if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1446 return;
1447
1448 spin_lock(&mp->m_sb_lock);
1449 mp->m_flags |= XFS_MOUNT_BAD_SUMMARY;
1450 spin_unlock(&mp->m_sb_lock);
1451}