blob: b03d82fcf0112a11b6593f67f4546045189ec339 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scotta805bad2006-06-19 08:40:27 +10003 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Nathan Scott7b718762005-11-02 14:58:39 +11004 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00006
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include "xfs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11008#include "xfs_shared.h"
Dave Chinner6ca1c902013-08-12 20:49:26 +10009#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110010#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include "xfs_sb.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include "xfs_mount.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110015#include "xfs_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include "xfs_bmap.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110017#include "xfs_alloc.h"
Christoph Hellwig9909c4a2007-10-11 18:11:14 +100018#include "xfs_fsops.h"
Dave Chinner239880e2013-10-23 10:50:10 +110019#include "xfs_trans.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_buf_item.h"
Dave Chinner239880e2013-10-23 10:50:10 +110021#include "xfs_log.h"
David Chinnera67d7c52007-11-23 16:29:32 +110022#include "xfs_log_priv.h"
Dave Chinner2b9ab5a2013-08-12 20:49:37 +100023#include "xfs_dir2.h"
Christoph Hellwig9f8868f2008-07-18 17:11:46 +100024#include "xfs_extfree_item.h"
25#include "xfs_mru_cache.h"
26#include "xfs_inode_item.h"
Dave Chinner6d8b79c2012-10-08 21:56:09 +110027#include "xfs_icache.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000028#include "xfs_trace.h"
Dave Chinner3ebe7d22013-06-27 16:04:53 +100029#include "xfs_icreate_item.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110030#include "xfs_filestream.h"
31#include "xfs_quota.h"
Brian Foster65b65732014-09-09 11:52:42 +100032#include "xfs_sysfs.h"
Darrick J. Wong30cbc592016-03-09 08:15:14 +110033#include "xfs_ondisk.h"
Darrick J. Wong5880f2d72016-08-03 12:04:45 +100034#include "xfs_rmap_item.h"
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -070035#include "xfs_refcount_item.h"
Darrick J. Wong6413a012016-10-03 09:11:25 -070036#include "xfs_bmap_item.h"
Darrick J. Wong5e7e6052016-10-03 09:11:38 -070037#include "xfs_reflink.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Adam Borowskidddde682018-10-18 17:20:19 +110039#include <linux/magic.h>
Ian Kent73e5fff2019-11-04 13:58:46 -080040#include <linux/fs_context.h>
41#include <linux/fs_parser.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Alexey Dobriyanb87221d2009-09-21 17:01:09 -070043static const struct super_operations xfs_super_operations;
Brian Foster65b65732014-09-09 11:52:42 +100044
Dave Chinnere3aed1a2014-09-29 10:46:08 +100045static struct kset *xfs_kset; /* top-level xfs sysfs dir */
Brian Foster65b65732014-09-09 11:52:42 +100046#ifdef DEBUG
47static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
48#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Christoph Hellwig62a877e2008-07-18 17:12:36 +100050/*
51 * Table driven mount option parser.
Christoph Hellwig62a877e2008-07-18 17:12:36 +100052 */
53enum {
Ian Kent8da57c52019-10-28 08:41:42 -070054 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
Eric Sandeen2e74af02016-03-02 09:55:38 +110055 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
Christoph Hellwig94079282019-04-28 08:32:52 -070056 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
Eric Sandeen1c02d502018-07-26 09:11:27 -070057 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
58 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
59 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
60 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
Eric Sandeen2e74af02016-03-02 09:55:38 +110061 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
Ian Kent73e5fff2019-11-04 13:58:46 -080062 Opt_discard, Opt_nodiscard, Opt_dax,
Christoph Hellwig62a877e2008-07-18 17:12:36 +100063};
64
Al Virod7167b12019-09-07 07:23:15 -040065static const struct fs_parameter_spec xfs_fs_parameters[] = {
Ian Kent73e5fff2019-11-04 13:58:46 -080066 fsparam_u32("logbufs", Opt_logbufs),
67 fsparam_string("logbsize", Opt_logbsize),
68 fsparam_string("logdev", Opt_logdev),
69 fsparam_string("rtdev", Opt_rtdev),
70 fsparam_flag("wsync", Opt_wsync),
71 fsparam_flag("noalign", Opt_noalign),
72 fsparam_flag("swalloc", Opt_swalloc),
73 fsparam_u32("sunit", Opt_sunit),
74 fsparam_u32("swidth", Opt_swidth),
75 fsparam_flag("nouuid", Opt_nouuid),
76 fsparam_flag("grpid", Opt_grpid),
77 fsparam_flag("nogrpid", Opt_nogrpid),
78 fsparam_flag("bsdgroups", Opt_bsdgroups),
79 fsparam_flag("sysvgroups", Opt_sysvgroups),
80 fsparam_string("allocsize", Opt_allocsize),
81 fsparam_flag("norecovery", Opt_norecovery),
82 fsparam_flag("inode64", Opt_inode64),
83 fsparam_flag("inode32", Opt_inode32),
84 fsparam_flag("ikeep", Opt_ikeep),
85 fsparam_flag("noikeep", Opt_noikeep),
86 fsparam_flag("largeio", Opt_largeio),
87 fsparam_flag("nolargeio", Opt_nolargeio),
88 fsparam_flag("attr2", Opt_attr2),
89 fsparam_flag("noattr2", Opt_noattr2),
90 fsparam_flag("filestreams", Opt_filestreams),
91 fsparam_flag("quota", Opt_quota),
92 fsparam_flag("noquota", Opt_noquota),
93 fsparam_flag("usrquota", Opt_usrquota),
94 fsparam_flag("grpquota", Opt_grpquota),
95 fsparam_flag("prjquota", Opt_prjquota),
96 fsparam_flag("uquota", Opt_uquota),
97 fsparam_flag("gquota", Opt_gquota),
98 fsparam_flag("pquota", Opt_pquota),
99 fsparam_flag("uqnoenforce", Opt_uqnoenforce),
100 fsparam_flag("gqnoenforce", Opt_gqnoenforce),
101 fsparam_flag("pqnoenforce", Opt_pqnoenforce),
102 fsparam_flag("qnoenforce", Opt_qnoenforce),
103 fsparam_flag("discard", Opt_discard),
104 fsparam_flag("nodiscard", Opt_nodiscard),
105 fsparam_flag("dax", Opt_dax),
106 {}
Christoph Hellwig62a877e2008-07-18 17:12:36 +1000107};
108
David Chinnera67d7c52007-11-23 16:29:32 +1100109struct proc_xfs_info {
Dave Chinnercbe4dab2015-06-04 09:19:18 +1000110 uint64_t flag;
111 char *str;
David Chinnera67d7c52007-11-23 16:29:32 +1100112};
113
Christoph Hellwig21f55992019-10-28 08:41:47 -0700114static int
115xfs_fs_show_options(
116 struct seq_file *m,
117 struct dentry *root)
David Chinnera67d7c52007-11-23 16:29:32 +1100118{
119 static struct proc_xfs_info xfs_info_set[] = {
120 /* the few simple ones we can get from the mount struct */
Eric Sandeen2e74af02016-03-02 09:55:38 +1100121 { XFS_MOUNT_IKEEP, ",ikeep" },
122 { XFS_MOUNT_WSYNC, ",wsync" },
123 { XFS_MOUNT_NOALIGN, ",noalign" },
124 { XFS_MOUNT_SWALLOC, ",swalloc" },
125 { XFS_MOUNT_NOUUID, ",nouuid" },
126 { XFS_MOUNT_NORECOVERY, ",norecovery" },
127 { XFS_MOUNT_ATTR2, ",attr2" },
128 { XFS_MOUNT_FILESTREAMS, ",filestreams" },
129 { XFS_MOUNT_GRPID, ",grpid" },
130 { XFS_MOUNT_DISCARD, ",discard" },
Christoph Hellwig7c6b94b2019-10-28 08:41:46 -0700131 { XFS_MOUNT_LARGEIO, ",largeio" },
Eric Sandeen2e74af02016-03-02 09:55:38 +1100132 { XFS_MOUNT_DAX, ",dax" },
David Chinnera67d7c52007-11-23 16:29:32 +1100133 { 0, NULL }
134 };
Christoph Hellwig21f55992019-10-28 08:41:47 -0700135 struct xfs_mount *mp = XFS_M(root->d_sb);
David Chinnera67d7c52007-11-23 16:29:32 +1100136 struct proc_xfs_info *xfs_infop;
137
138 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
139 if (mp->m_flags & xfs_infop->flag)
140 seq_puts(m, xfs_infop->str);
141 }
Christoph Hellwig1775c502019-10-28 08:41:47 -0700142
143 seq_printf(m, ",inode%d",
144 (mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64);
David Chinnera67d7c52007-11-23 16:29:32 +1100145
Christoph Hellwig3274d002019-10-28 08:41:45 -0700146 if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100147 seq_printf(m, ",allocsize=%dk",
Christoph Hellwigaa58d442019-10-28 08:41:46 -0700148 (1 << mp->m_allocsize_log) >> 10);
David Chinnera67d7c52007-11-23 16:29:32 +1100149
150 if (mp->m_logbufs > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100151 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
David Chinnera67d7c52007-11-23 16:29:32 +1100152 if (mp->m_logbsize > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100153 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
David Chinnera67d7c52007-11-23 16:29:32 +1100154
155 if (mp->m_logname)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100156 seq_show_option(m, "logdev", mp->m_logname);
David Chinnera67d7c52007-11-23 16:29:32 +1100157 if (mp->m_rtname)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100158 seq_show_option(m, "rtdev", mp->m_rtname);
David Chinnera67d7c52007-11-23 16:29:32 +1100159
160 if (mp->m_dalign > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100161 seq_printf(m, ",sunit=%d",
David Chinnera67d7c52007-11-23 16:29:32 +1100162 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
163 if (mp->m_swidth > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100164 seq_printf(m, ",swidth=%d",
David Chinnera67d7c52007-11-23 16:29:32 +1100165 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
166
167 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
Eric Sandeen2e74af02016-03-02 09:55:38 +1100168 seq_puts(m, ",usrquota");
David Chinnera67d7c52007-11-23 16:29:32 +1100169 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100170 seq_puts(m, ",uqnoenforce");
David Chinnera67d7c52007-11-23 16:29:32 +1100171
Alex Elder988abe42009-09-02 17:02:24 -0500172 if (mp->m_qflags & XFS_PQUOTA_ACCT) {
Chandra Seetharaman83e782e2013-06-27 17:25:10 -0500173 if (mp->m_qflags & XFS_PQUOTA_ENFD)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100174 seq_puts(m, ",prjquota");
Alex Elder988abe42009-09-02 17:02:24 -0500175 else
Eric Sandeen2e74af02016-03-02 09:55:38 +1100176 seq_puts(m, ",pqnoenforce");
Chandra Seetharamand892d582013-07-19 17:36:02 -0500177 }
178 if (mp->m_qflags & XFS_GQUOTA_ACCT) {
Chandra Seetharaman83e782e2013-06-27 17:25:10 -0500179 if (mp->m_qflags & XFS_GQUOTA_ENFD)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100180 seq_puts(m, ",grpquota");
Alex Elder988abe42009-09-02 17:02:24 -0500181 else
Eric Sandeen2e74af02016-03-02 09:55:38 +1100182 seq_puts(m, ",gqnoenforce");
Alex Elder988abe42009-09-02 17:02:24 -0500183 }
David Chinnera67d7c52007-11-23 16:29:32 +1100184
185 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
Eric Sandeen2e74af02016-03-02 09:55:38 +1100186 seq_puts(m, ",noquota");
Christoph Hellwig21f55992019-10-28 08:41:47 -0700187
188 return 0;
David Chinnera67d7c52007-11-23 16:29:32 +1100189}
Eric Sandeen91083262019-05-01 20:26:30 -0700190
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700191static uint64_t
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192xfs_max_file_offset(
193 unsigned int blockshift)
194{
195 unsigned int pagefactor = 1;
196 unsigned int bitshift = BITS_PER_LONG - 1;
197
198 /* Figure out maximum filesize, on Linux this can depend on
199 * the filesystem blocksize (on 32 bit platforms).
Christoph Hellwig72deb452019-04-05 18:08:59 +0200200 * __block_write_begin does this in an [unsigned] long long...
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300201 * page->index << (PAGE_SHIFT - bbits)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 * So, for page sized blocks (4K on 32 bit platforms),
203 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300204 * (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 * but for smaller blocksizes it is less (bbits = log2 bsize).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 */
207
208#if BITS_PER_LONG == 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 ASSERT(sizeof(sector_t) == 8);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300210 pagefactor = PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 bitshift = BITS_PER_LONG;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212#endif
213
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700214 return (((uint64_t)pagefactor) << bitshift) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215}
216
Eric Sandeen9de67c32014-07-24 20:51:54 +1000217/*
Eric Sandeen12c3f052016-03-02 09:58:09 +1100218 * Set parameters for inode allocation heuristics, taking into account
219 * filesystem size and inode32/inode64 mount options; i.e. specifically
220 * whether or not XFS_MOUNT_SMALL_INUMS is set.
221 *
222 * Inode allocation patterns are altered only if inode32 is requested
223 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
224 * If altered, XFS_MOUNT_32BITINODES is set as well.
225 *
226 * An agcount independent of that in the mount structure is provided
227 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
228 * to the potentially higher ag count.
229 *
230 * Returns the maximum AG index which may contain inodes.
Eric Sandeen9de67c32014-07-24 20:51:54 +1000231 */
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300232xfs_agnumber_t
Eric Sandeen12c3f052016-03-02 09:58:09 +1100233xfs_set_inode_alloc(
234 struct xfs_mount *mp,
235 xfs_agnumber_t agcount)
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300236{
Eric Sandeen12c3f052016-03-02 09:58:09 +1100237 xfs_agnumber_t index;
Carlos Maiolino4056c1d2012-09-20 10:32:40 -0300238 xfs_agnumber_t maxagi = 0;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300239 xfs_sb_t *sbp = &mp->m_sb;
240 xfs_agnumber_t max_metadata;
Eric Sandeen54aa61f2014-07-24 20:53:10 +1000241 xfs_agino_t agino;
242 xfs_ino_t ino;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300243
Eric Sandeen12c3f052016-03-02 09:58:09 +1100244 /*
245 * Calculate how much should be reserved for inodes to meet
246 * the max inode percentage. Used only for inode32.
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300247 */
Darrick J. Wongef325952019-06-05 11:19:34 -0700248 if (M_IGEO(mp)->maxicount) {
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700249 uint64_t icount;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300250
251 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
252 do_div(icount, 100);
253 icount += sbp->sb_agblocks - 1;
254 do_div(icount, sbp->sb_agblocks);
255 max_metadata = icount;
256 } else {
Eric Sandeen9de67c32014-07-24 20:51:54 +1000257 max_metadata = agcount;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300258 }
259
Eric Sandeen12c3f052016-03-02 09:58:09 +1100260 /* Get the last possible inode in the filesystem */
Darrick J. Wong43004b22018-12-12 08:46:24 -0800261 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
Eric Sandeen12c3f052016-03-02 09:58:09 +1100262 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
Eric Sandeen54aa61f2014-07-24 20:53:10 +1000263
Eric Sandeen12c3f052016-03-02 09:58:09 +1100264 /*
265 * If user asked for no more than 32-bit inodes, and the fs is
266 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
267 * the allocator to accommodate the request.
268 */
269 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
270 mp->m_flags |= XFS_MOUNT_32BITINODES;
271 else
272 mp->m_flags &= ~XFS_MOUNT_32BITINODES;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300273
Eric Sandeen9de67c32014-07-24 20:51:54 +1000274 for (index = 0; index < agcount; index++) {
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300275 struct xfs_perag *pag;
276
Eric Sandeen12c3f052016-03-02 09:58:09 +1100277 ino = XFS_AGINO_TO_INO(mp, index, agino);
278
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300279 pag = xfs_perag_get(mp, index);
Eric Sandeen12c3f052016-03-02 09:58:09 +1100280
281 if (mp->m_flags & XFS_MOUNT_32BITINODES) {
282 if (ino > XFS_MAXINUMBER_32) {
283 pag->pagi_inodeok = 0;
284 pag->pagf_metadata = 0;
285 } else {
286 pag->pagi_inodeok = 1;
287 maxagi++;
288 if (index < max_metadata)
289 pag->pagf_metadata = 1;
290 else
291 pag->pagf_metadata = 0;
292 }
293 } else {
294 pag->pagi_inodeok = 1;
295 pag->pagf_metadata = 0;
296 }
297
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300298 xfs_perag_put(pag);
299 }
300
Eric Sandeen12c3f052016-03-02 09:58:09 +1100301 return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300302}
303
Hannes Eder3180e662009-03-04 19:34:10 +0100304STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305xfs_blkdev_get(
306 xfs_mount_t *mp,
307 const char *name,
308 struct block_device **bdevp)
309{
310 int error = 0;
311
Tejun Heod4d77622010-11-13 11:55:18 +0100312 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
313 mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 if (IS_ERR(*bdevp)) {
315 error = PTR_ERR(*bdevp);
Eric Sandeen77af5742014-12-24 09:47:27 +1100316 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 }
318
Dave Chinner24513372014-06-25 14:58:08 +1000319 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320}
321
Hannes Eder3180e662009-03-04 19:34:10 +0100322STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323xfs_blkdev_put(
324 struct block_device *bdev)
325{
326 if (bdev)
Tejun Heoe525fd82010-11-13 11:55:17 +0100327 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328}
329
Christoph Hellwigf538d4d2005-11-02 10:26:59 +1100330void
331xfs_blkdev_issue_flush(
332 xfs_buftarg_t *buftarg)
333{
Shaohua Li7582df52012-04-24 21:23:46 +0800334 blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
Christoph Hellwigf538d4d2005-11-02 10:26:59 +1100335}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000337STATIC void
338xfs_close_devices(
339 struct xfs_mount *mp)
340{
Dan Williams486aff52017-08-24 15:12:50 -0700341 struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
342
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000343 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000344 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
Dan Williams486aff52017-08-24 15:12:50 -0700345 struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
346
Eric Sandeena1f69412018-04-06 10:09:42 -0700347 xfs_free_buftarg(mp->m_logdev_targp);
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000348 xfs_blkdev_put(logdev);
Dan Williams486aff52017-08-24 15:12:50 -0700349 fs_put_dax(dax_logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000350 }
351 if (mp->m_rtdev_targp) {
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000352 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
Dan Williams486aff52017-08-24 15:12:50 -0700353 struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
354
Eric Sandeena1f69412018-04-06 10:09:42 -0700355 xfs_free_buftarg(mp->m_rtdev_targp);
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000356 xfs_blkdev_put(rtdev);
Dan Williams486aff52017-08-24 15:12:50 -0700357 fs_put_dax(dax_rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000358 }
Eric Sandeena1f69412018-04-06 10:09:42 -0700359 xfs_free_buftarg(mp->m_ddev_targp);
Dan Williams486aff52017-08-24 15:12:50 -0700360 fs_put_dax(dax_ddev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000361}
362
363/*
364 * The file system configurations are:
365 * (1) device (partition) with data and internal log
366 * (2) logical volume with data and log subvolumes.
367 * (3) logical volume with data, log, and realtime subvolumes.
368 *
369 * We only have to handle opening the log and realtime volumes here if
370 * they are present. The data subvolume has already been opened by
371 * get_sb_bdev() and is stored in sb->s_bdev.
372 */
373STATIC int
374xfs_open_devices(
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100375 struct xfs_mount *mp)
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000376{
377 struct block_device *ddev = mp->m_super->s_bdev;
Dan Williams486aff52017-08-24 15:12:50 -0700378 struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev);
379 struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL;
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000380 struct block_device *logdev = NULL, *rtdev = NULL;
381 int error;
382
383 /*
384 * Open real time and log devices - order is important.
385 */
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100386 if (mp->m_logname) {
387 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000388 if (error)
389 goto out;
Dan Williams486aff52017-08-24 15:12:50 -0700390 dax_logdev = fs_dax_get_by_bdev(logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000391 }
392
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100393 if (mp->m_rtname) {
394 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000395 if (error)
396 goto out_close_logdev;
397
398 if (rtdev == ddev || rtdev == logdev) {
Dave Chinner4f107002011-03-07 10:00:35 +1100399 xfs_warn(mp,
400 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
Dave Chinner24513372014-06-25 14:58:08 +1000401 error = -EINVAL;
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000402 goto out_close_rtdev;
403 }
Dan Williams486aff52017-08-24 15:12:50 -0700404 dax_rtdev = fs_dax_get_by_bdev(rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000405 }
406
407 /*
408 * Setup xfs_mount buffer target pointers
409 */
Dave Chinner24513372014-06-25 14:58:08 +1000410 error = -ENOMEM;
Dan Williams486aff52017-08-24 15:12:50 -0700411 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000412 if (!mp->m_ddev_targp)
413 goto out_close_rtdev;
414
415 if (rtdev) {
Dan Williams486aff52017-08-24 15:12:50 -0700416 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000417 if (!mp->m_rtdev_targp)
418 goto out_free_ddev_targ;
419 }
420
421 if (logdev && logdev != ddev) {
Dan Williams486aff52017-08-24 15:12:50 -0700422 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000423 if (!mp->m_logdev_targp)
424 goto out_free_rtdev_targ;
425 } else {
426 mp->m_logdev_targp = mp->m_ddev_targp;
427 }
428
429 return 0;
430
431 out_free_rtdev_targ:
432 if (mp->m_rtdev_targp)
Eric Sandeena1f69412018-04-06 10:09:42 -0700433 xfs_free_buftarg(mp->m_rtdev_targp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000434 out_free_ddev_targ:
Eric Sandeena1f69412018-04-06 10:09:42 -0700435 xfs_free_buftarg(mp->m_ddev_targp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000436 out_close_rtdev:
Markus Elfringd2a5e3c2014-12-01 08:24:20 +1100437 xfs_blkdev_put(rtdev);
Dan Williams486aff52017-08-24 15:12:50 -0700438 fs_put_dax(dax_rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000439 out_close_logdev:
Dan Williams486aff52017-08-24 15:12:50 -0700440 if (logdev && logdev != ddev) {
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000441 xfs_blkdev_put(logdev);
Dan Williams486aff52017-08-24 15:12:50 -0700442 fs_put_dax(dax_logdev);
443 }
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000444 out:
Dan Williams486aff52017-08-24 15:12:50 -0700445 fs_put_dax(dax_ddev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000446 return error;
447}
448
Christoph Hellwige34b5622008-05-20 15:10:36 +1000449/*
450 * Setup xfs_mount buffer target pointers based on superblock
451 */
452STATIC int
453xfs_setup_devices(
454 struct xfs_mount *mp)
455{
456 int error;
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000457
Eric Sandeena96c4152014-04-14 19:00:29 +1000458 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
Christoph Hellwige34b5622008-05-20 15:10:36 +1000459 if (error)
460 return error;
461
462 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
463 unsigned int log_sector_size = BBSIZE;
464
465 if (xfs_sb_version_hassector(&mp->m_sb))
466 log_sector_size = mp->m_sb.sb_logsectsize;
467 error = xfs_setsize_buftarg(mp->m_logdev_targp,
Christoph Hellwige34b5622008-05-20 15:10:36 +1000468 log_sector_size);
469 if (error)
470 return error;
471 }
472 if (mp->m_rtdev_targp) {
473 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
Christoph Hellwige34b5622008-05-20 15:10:36 +1000474 mp->m_sb.sb_sectsize);
475 if (error)
476 return error;
477 }
478
479 return 0;
480}
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000481
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000482STATIC int
483xfs_init_mount_workqueues(
484 struct xfs_mount *mp)
485{
Brian Foster78c931b2014-11-28 13:59:58 +1100486 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
Ian Kente1d3d212019-11-04 13:58:40 -0800487 WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_super->s_id);
Brian Foster78c931b2014-11-28 13:59:58 +1100488 if (!mp->m_buf_workqueue)
489 goto out;
490
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000491 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
Ian Kente1d3d212019-11-04 13:58:40 -0800492 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000493 if (!mp->m_unwritten_workqueue)
Darrick J. Wong28408242019-04-15 13:13:21 -0700494 goto out_destroy_buf;
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000495
Dave Chinner4c2d5422012-04-23 17:54:32 +1000496 mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
Dave Chinner8ab39f12019-09-05 21:35:39 -0700497 WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND,
Ian Kente1d3d212019-11-04 13:58:40 -0800498 0, mp->m_super->s_id);
Dave Chinner4c2d5422012-04-23 17:54:32 +1000499 if (!mp->m_cil_workqueue)
500 goto out_destroy_unwritten;
Dave Chinner58896082012-10-08 21:56:05 +1100501
502 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
Ian Kente1d3d212019-11-04 13:58:40 -0800503 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
Dave Chinner58896082012-10-08 21:56:05 +1100504 if (!mp->m_reclaim_workqueue)
505 goto out_destroy_cil;
506
Brian Foster579b62f2012-11-06 09:50:47 -0500507 mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
Ian Kente1d3d212019-11-04 13:58:40 -0800508 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
Brian Foster579b62f2012-11-06 09:50:47 -0500509 if (!mp->m_eofblocks_workqueue)
Christoph Hellwig1058d0f2019-06-28 19:27:25 -0700510 goto out_destroy_reclaim;
Brian Foster579b62f2012-11-06 09:50:47 -0500511
Brian Foster696a5622017-03-28 14:51:44 -0700512 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
Ian Kente1d3d212019-11-04 13:58:40 -0800513 mp->m_super->s_id);
Brian Foster696a5622017-03-28 14:51:44 -0700514 if (!mp->m_sync_workqueue)
515 goto out_destroy_eofb;
516
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000517 return 0;
518
Brian Foster696a5622017-03-28 14:51:44 -0700519out_destroy_eofb:
520 destroy_workqueue(mp->m_eofblocks_workqueue);
Dave Chinner58896082012-10-08 21:56:05 +1100521out_destroy_reclaim:
522 destroy_workqueue(mp->m_reclaim_workqueue);
523out_destroy_cil:
524 destroy_workqueue(mp->m_cil_workqueue);
Dave Chinner4c2d5422012-04-23 17:54:32 +1000525out_destroy_unwritten:
526 destroy_workqueue(mp->m_unwritten_workqueue);
Brian Foster78c931b2014-11-28 13:59:58 +1100527out_destroy_buf:
528 destroy_workqueue(mp->m_buf_workqueue);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000529out:
530 return -ENOMEM;
531}
532
533STATIC void
534xfs_destroy_mount_workqueues(
535 struct xfs_mount *mp)
536{
Brian Foster696a5622017-03-28 14:51:44 -0700537 destroy_workqueue(mp->m_sync_workqueue);
Brian Foster579b62f2012-11-06 09:50:47 -0500538 destroy_workqueue(mp->m_eofblocks_workqueue);
Dave Chinner58896082012-10-08 21:56:05 +1100539 destroy_workqueue(mp->m_reclaim_workqueue);
Dave Chinner4c2d5422012-04-23 17:54:32 +1000540 destroy_workqueue(mp->m_cil_workqueue);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000541 destroy_workqueue(mp->m_unwritten_workqueue);
Brian Foster78c931b2014-11-28 13:59:58 +1100542 destroy_workqueue(mp->m_buf_workqueue);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000543}
544
Dave Chinner9aa05002012-10-08 21:56:04 +1100545/*
546 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
547 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
548 * for IO to complete so that we effectively throttle multiple callers to the
549 * rate at which IO is completing.
550 */
551void
552xfs_flush_inodes(
553 struct xfs_mount *mp)
554{
555 struct super_block *sb = mp->m_super;
556
557 if (down_read_trylock(&sb->s_umount)) {
Jan Kara0dc83bd2014-02-21 11:19:04 +0100558 sync_inodes_sb(sb);
Dave Chinner9aa05002012-10-08 21:56:04 +1100559 up_read(&sb->s_umount);
560 }
561}
562
David Chinnerbf904242008-10-30 17:36:14 +1100563/* Catch misguided souls that try to use this interface on XFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564STATIC struct inode *
Nathan Scotta50cd262006-03-14 14:06:18 +1100565xfs_fs_alloc_inode(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 struct super_block *sb)
567{
David Chinnerbf904242008-10-30 17:36:14 +1100568 BUG();
Lachlan McIlroy493dca62008-10-30 17:36:52 +1100569 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570}
571
Christoph Hellwig48318222018-10-18 17:20:11 +1100572#ifdef DEBUG
573static void
574xfs_check_delalloc(
575 struct xfs_inode *ip,
576 int whichfork)
577{
578 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
579 struct xfs_bmbt_irec got;
580 struct xfs_iext_cursor icur;
581
582 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
583 return;
584 do {
585 if (isnullstartblock(got.br_startblock)) {
586 xfs_warn(ip->i_mount,
587 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
588 ip->i_ino,
589 whichfork == XFS_DATA_FORK ? "data" : "cow",
590 got.br_startoff, got.br_blockcount);
591 }
592 } while (xfs_iext_next_extent(ifp, &icur, &got));
593}
594#else
595#define xfs_check_delalloc(ip, whichfork) do { } while (0)
596#endif
597
David Chinnerbf904242008-10-30 17:36:14 +1100598/*
David Chinner99fa8cb2008-10-30 17:36:40 +1100599 * Now that the generic code is guaranteed not to be accessing
Dave Chinner8179c032016-05-18 13:52:42 +1000600 * the linux inode, we can inactivate and reclaim the inode.
David Chinnerbf904242008-10-30 17:36:14 +1100601 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602STATIC void
Nathan Scotta50cd262006-03-14 14:06:18 +1100603xfs_fs_destroy_inode(
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000604 struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605{
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000606 struct xfs_inode *ip = XFS_I(inode);
607
Christoph Hellwigcca28fb2010-06-24 11:57:09 +1000608 trace_xfs_destroy_inode(ip);
David Chinner99fa8cb2008-10-30 17:36:40 +1100609
Christoph Hellwig65523212016-11-30 14:33:25 +1100610 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
Dave Chinner8179c032016-05-18 13:52:42 +1000611 XFS_STATS_INC(ip->i_mount, vn_rele);
612 XFS_STATS_INC(ip->i_mount, vn_remove);
613
614 xfs_inactive(ip);
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000615
Christoph Hellwig48318222018-10-18 17:20:11 +1100616 if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
617 xfs_check_delalloc(ip, XFS_DATA_FORK);
618 xfs_check_delalloc(ip, XFS_COW_FORK);
619 ASSERT(0);
620 }
621
Dave Chinner8179c032016-05-18 13:52:42 +1000622 XFS_STATS_INC(ip->i_mount, vn_reclaim);
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000623
624 /*
625 * We should never get here with one of the reclaim flags already set.
626 */
627 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
628 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
629
630 /*
Dave Chinner57817c62010-01-10 23:51:47 +0000631 * We always use background reclaim here because even if the
632 * inode is clean, it still may be under IO and hence we have
633 * to take the flush lock. The background reclaim path handles
634 * this more efficiently than we can here, so simply let background
635 * reclaim tear down all inodes.
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000636 */
Dave Chinner57817c62010-01-10 23:51:47 +0000637 xfs_inode_set_reclaim_tag(ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638}
639
Christoph Hellwigc3b1b132018-03-06 17:04:00 -0800640static void
641xfs_fs_dirty_inode(
642 struct inode *inode,
643 int flag)
644{
645 struct xfs_inode *ip = XFS_I(inode);
646 struct xfs_mount *mp = ip->i_mount;
647 struct xfs_trans *tp;
648
649 if (!(inode->i_sb->s_flags & SB_LAZYTIME))
650 return;
651 if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
652 return;
653
654 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
655 return;
656 xfs_ilock(ip, XFS_ILOCK_EXCL);
657 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
658 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
659 xfs_trans_commit(tp);
660}
661
David Chinner07c8f672008-10-30 16:11:59 +1100662/*
663 * Slab object creation initialisation for the XFS inode.
664 * This covers only the idempotent fields in the XFS inode;
665 * all other fields need to be initialised on allocation
Uwe Kleine-Königb5950762010-11-01 15:38:34 -0400666 * from the slab. This avoids the need to repeatedly initialise
David Chinner07c8f672008-10-30 16:11:59 +1100667 * fields in the xfs inode that left in the initialise state
668 * when freeing the inode.
669 */
David Chinnerbf904242008-10-30 17:36:14 +1100670STATIC void
671xfs_fs_inode_init_once(
David Chinner07c8f672008-10-30 16:11:59 +1100672 void *inode)
673{
674 struct xfs_inode *ip = inode;
675
676 memset(ip, 0, sizeof(struct xfs_inode));
David Chinnerbf904242008-10-30 17:36:14 +1100677
678 /* vfs inode */
679 inode_init_once(VFS_I(ip));
680
681 /* xfs inode */
David Chinner07c8f672008-10-30 16:11:59 +1100682 atomic_set(&ip->i_pincount, 0);
683 spin_lock_init(&ip->i_flags_lock);
David Chinner07c8f672008-10-30 16:11:59 +1100684
Dave Chinner653c60b2015-02-23 21:43:37 +1100685 mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
686 "xfsino", ip->i_ino);
David Chinner07c8f672008-10-30 16:11:59 +1100687 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
688 "xfsino", ip->i_ino);
David Chinner07c8f672008-10-30 16:11:59 +1100689}
690
Dave Chinner5132ba82012-03-22 05:15:10 +0000691/*
692 * We do an unlocked check for XFS_IDONTCACHE here because we are already
693 * serialised against cache hits here via the inode->i_lock and igrab() in
694 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
695 * racing with us, and it avoids needing to grab a spinlock here for every inode
696 * we drop the final reference on.
697 */
698STATIC int
699xfs_fs_drop_inode(
700 struct inode *inode)
701{
702 struct xfs_inode *ip = XFS_I(inode);
703
Darrick J. Wong17c12bc2016-10-03 09:11:29 -0700704 /*
705 * If this unlinked inode is in the middle of recovery, don't
706 * drop the inode just yet; log recovery will take care of
707 * that. See the comment for this inode flag.
708 */
709 if (ip->i_flags & XFS_IRECOVERY) {
710 ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
711 return 0;
712 }
713
Dave Chinner5132ba82012-03-22 05:15:10 +0000714 return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
715}
716
Ian Kenta943f372019-11-04 13:58:42 -0800717static void
718xfs_mount_free(
Christoph Hellwiga7381592008-08-13 16:04:05 +1000719 struct xfs_mount *mp)
720{
Christoph Hellwiga7381592008-08-13 16:04:05 +1000721 kfree(mp->m_rtname);
722 kfree(mp->m_logname);
Ian Kenta943f372019-11-04 13:58:42 -0800723 kmem_free(mp);
Christoph Hellwiga7381592008-08-13 16:04:05 +1000724}
725
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726STATIC int
Christoph Hellwig69961a22009-10-06 20:29:28 +0000727xfs_fs_sync_fs(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 struct super_block *sb,
729 int wait)
730{
Christoph Hellwig745f6912007-08-30 17:20:39 +1000731 struct xfs_mount *mp = XFS_M(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
Lachlan McIlroye893bff2007-10-12 11:13:35 +1000733 /*
Christoph Hellwig34625c62011-12-06 21:58:12 +0000734 * Doing anything during the async pass would be counterproductive.
Lachlan McIlroye893bff2007-10-12 11:13:35 +1000735 */
Christoph Hellwig34625c62011-12-06 21:58:12 +0000736 if (!wait)
Christoph Hellwig69961a22009-10-06 20:29:28 +0000737 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
Dave Chinner34061f52012-10-08 21:56:06 +1100739 xfs_log_force(mp, XFS_LOG_SYNC);
Christoph Hellwig69961a22009-10-06 20:29:28 +0000740 if (laptop_mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 /*
742 * The disk must be active because we're syncing.
Dave Chinnerf661f1e2012-10-08 21:56:02 +1100743 * We schedule log work now (now that the disk is
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 * active) instead of later (when it might not be).
745 */
Dave Chinnerf661f1e2012-10-08 21:56:02 +1100746 flush_delayed_work(&mp->m_log->l_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 }
748
Christoph Hellwig69961a22009-10-06 20:29:28 +0000749 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750}
751
752STATIC int
Nathan Scotta50cd262006-03-14 14:06:18 +1100753xfs_fs_statfs(
David Howells726c3342006-06-23 02:02:58 -0700754 struct dentry *dentry,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 struct kstatfs *statp)
756{
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000757 struct xfs_mount *mp = XFS_M(dentry->d_sb);
758 xfs_sb_t *sbp = &mp->m_sb;
David Howells2b0143b2015-03-17 22:25:59 +0000759 struct xfs_inode *ip = XFS_I(d_inode(dentry));
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700760 uint64_t fakeinos, id;
761 uint64_t icount;
762 uint64_t ifree;
763 uint64_t fdblocks;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000764 xfs_extlen_t lsize;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700765 int64_t ffree;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000766
Adam Borowskidddde682018-10-18 17:20:19 +1100767 statp->f_type = XFS_SUPER_MAGIC;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000768 statp->f_namelen = MAXNAMELEN - 1;
769
770 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
771 statp->f_fsid.val[0] = (u32)id;
772 statp->f_fsid.val[1] = (u32)(id >> 32);
773
Dave Chinner501ab322015-02-23 21:19:28 +1100774 icount = percpu_counter_sum(&mp->m_icount);
Dave Chinnere88b64e2015-02-23 21:19:53 +1100775 ifree = percpu_counter_sum(&mp->m_ifree);
Dave Chinner0d485ad2015-02-23 21:22:03 +1100776 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000777
778 spin_lock(&mp->m_sb_lock);
779 statp->f_bsize = sbp->sb_blocksize;
780 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
781 statp->f_blocks = sbp->sb_dblocks - lsize;
Dave Chinner0d485ad2015-02-23 21:22:03 +1100782 spin_unlock(&mp->m_sb_lock);
783
Darrick J. Wong52548852016-08-03 11:38:24 +1000784 statp->f_bfree = fdblocks - mp->m_alloc_set_aside;
Dave Chinner0d485ad2015-02-23 21:22:03 +1100785 statp->f_bavail = statp->f_bfree;
786
Darrick J. Wong43004b22018-12-12 08:46:24 -0800787 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
Dave Chinner9bb54cb2018-06-07 07:54:02 -0700788 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
Darrick J. Wongef325952019-06-05 11:19:34 -0700789 if (M_IGEO(mp)->maxicount)
Christoph Hellwiga19d9f82009-03-29 09:51:08 +0200790 statp->f_files = min_t(typeof(statp->f_files),
791 statp->f_files,
Darrick J. Wongef325952019-06-05 11:19:34 -0700792 M_IGEO(mp)->maxicount);
Stuart Brodsky2fe33662010-08-24 11:46:05 +1000793
Eric Sandeen01f98822015-02-06 09:53:02 +1100794 /* If sb_icount overshot maxicount, report actual allocation */
795 statp->f_files = max_t(typeof(statp->f_files),
796 statp->f_files,
797 sbp->sb_icount);
798
Stuart Brodsky2fe33662010-08-24 11:46:05 +1000799 /* make sure statp->f_ffree does not underflow */
Dave Chinnere88b64e2015-02-23 21:19:53 +1100800 ffree = statp->f_files - (icount - ifree);
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700801 statp->f_ffree = max_t(int64_t, ffree, 0);
Stuart Brodsky2fe33662010-08-24 11:46:05 +1000802
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000803
Jie Liuda5bf952012-04-12 03:59:57 +0000804 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
Chandra Seetharaman83e782e2013-06-27 17:25:10 -0500805 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
806 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
Christoph Hellwig7d095252009-06-08 15:33:32 +0200807 xfs_qm_statvfs(ip, statp);
Richard Wareinga0158312018-01-08 10:41:33 -0800808
809 if (XFS_IS_REALTIME_MOUNT(mp) &&
810 (ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
811 statp->f_blocks = sbp->sb_rblocks;
812 statp->f_bavail = statp->f_bfree =
813 sbp->sb_frextents * sbp->sb_rextsize;
814 }
815
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000816 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817}
818
Eric Sandeend5db0f92010-02-05 22:59:53 +0000819STATIC void
820xfs_save_resvblks(struct xfs_mount *mp)
821{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700822 uint64_t resblks = 0;
Eric Sandeend5db0f92010-02-05 22:59:53 +0000823
824 mp->m_resblks_save = mp->m_resblks;
825 xfs_reserve_blocks(mp, &resblks, NULL);
826}
827
828STATIC void
829xfs_restore_resvblks(struct xfs_mount *mp)
830{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700831 uint64_t resblks;
Eric Sandeend5db0f92010-02-05 22:59:53 +0000832
833 if (mp->m_resblks_save) {
834 resblks = mp->m_resblks_save;
835 mp->m_resblks_save = 0;
836 } else
837 resblks = xfs_default_resblks(mp);
838
839 xfs_reserve_blocks(mp, &resblks, NULL);
840}
841
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100842/*
843 * Trigger writeback of all the dirty metadata in the file system.
844 *
845 * This ensures that the metadata is written to their location on disk rather
846 * than just existing in transactions in the log. This means after a quiesce
Dave Chinnerc75921a2012-10-08 21:56:08 +1100847 * there is no log replay required to write the inodes to disk - this is the
848 * primary difference between a sync and a quiesce.
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100849 *
Dave Chinnerc75921a2012-10-08 21:56:08 +1100850 * Note: xfs_log_quiesce() stops background log work - the callers must ensure
851 * it is started again when appropriate.
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100852 */
Dave Chinnerddeb14f2016-09-26 08:21:44 +1000853void
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100854xfs_quiesce_attr(
855 struct xfs_mount *mp)
856{
857 int error = 0;
858
859 /* wait for all modifications to complete */
860 while (atomic_read(&mp->m_active_trans) > 0)
861 delay(100);
862
863 /* force the log to unpin objects from the now complete transactions */
864 xfs_log_force(mp, XFS_LOG_SYNC);
865
866 /* reclaim inodes to do any IO before the freeze completes */
867 xfs_reclaim_inodes(mp, 0);
868 xfs_reclaim_inodes(mp, SYNC_WAIT);
869
Dave Chinnerc75921a2012-10-08 21:56:08 +1100870 /* Push the superblock and write an unmount record */
871 error = xfs_log_sbcount(mp);
872 if (error)
873 xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
874 "Frozen image may not be consistent.");
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100875 /*
876 * Just warn here till VFS can correctly support
877 * read-only remount without racing.
878 */
879 WARN_ON(atomic_read(&mp->m_active_trans) != 0);
880
Dave Chinnerc75921a2012-10-08 21:56:08 +1100881 xfs_log_quiesce(mp);
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100882}
883
Christoph Hellwig9909c4a2007-10-11 18:11:14 +1000884/*
885 * Second stage of a freeze. The data is already frozen so we only
Dave Chinner61e63ec2015-01-22 09:10:31 +1100886 * need to take care of the metadata. Once that's done sync the superblock
887 * to the log to dirty it in case of a crash while frozen. This ensures that we
888 * will recover the unlinked inode lists on the next mount.
Christoph Hellwig9909c4a2007-10-11 18:11:14 +1000889 */
Takashi Satoc4be0c12009-01-09 16:40:58 -0800890STATIC int
891xfs_fs_freeze(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 struct super_block *sb)
893{
Christoph Hellwig9909c4a2007-10-11 18:11:14 +1000894 struct xfs_mount *mp = XFS_M(sb);
895
Darrick J. Wonged30dcb2019-04-25 18:26:22 -0700896 xfs_stop_block_reaping(mp);
Eric Sandeend5db0f92010-02-05 22:59:53 +0000897 xfs_save_resvblks(mp);
David Chinner76bf1052008-10-30 17:16:21 +1100898 xfs_quiesce_attr(mp);
Dave Chinner61e63ec2015-01-22 09:10:31 +1100899 return xfs_sync_sb(mp, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900}
901
902STATIC int
Eric Sandeend5db0f92010-02-05 22:59:53 +0000903xfs_fs_unfreeze(
904 struct super_block *sb)
905{
906 struct xfs_mount *mp = XFS_M(sb);
907
908 xfs_restore_resvblks(mp);
Dave Chinnerf661f1e2012-10-08 21:56:02 +1100909 xfs_log_work_queue(mp);
Darrick J. Wonged30dcb2019-04-25 18:26:22 -0700910 xfs_start_block_reaping(mp);
Eric Sandeend5db0f92010-02-05 22:59:53 +0000911 return 0;
912}
913
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000914/*
915 * This function fills in xfs_mount_t fields based on mount args.
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000916 * Note: the superblock _has_ now been read in.
917 */
918STATIC int
919xfs_finish_flags(
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000920 struct xfs_mount *mp)
921{
922 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
923
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200924 /* Fail a mount where the logbuf is smaller than the log stripe */
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000925 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100926 if (mp->m_logbsize <= 0 &&
927 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000928 mp->m_logbsize = mp->m_sb.sb_logsunit;
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100929 } else if (mp->m_logbsize > 0 &&
930 mp->m_logbsize < mp->m_sb.sb_logsunit) {
Dave Chinner4f107002011-03-07 10:00:35 +1100931 xfs_warn(mp,
932 "logbuf size must be greater than or equal to log stripe size");
Dave Chinner24513372014-06-25 14:58:08 +1000933 return -EINVAL;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000934 }
935 } else {
936 /* Fail a mount if the logbuf is larger than 32K */
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100937 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
Dave Chinner4f107002011-03-07 10:00:35 +1100938 xfs_warn(mp,
939 "logbuf size for version 1 logs must be 16K or 32K");
Dave Chinner24513372014-06-25 14:58:08 +1000940 return -EINVAL;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000941 }
942 }
943
944 /*
Dave Chinnerd3eaace2013-06-05 12:09:09 +1000945 * V5 filesystems always use attr2 format for attributes.
946 */
947 if (xfs_sb_version_hascrc(&mp->m_sb) &&
948 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
Eric Sandeen2e74af02016-03-02 09:55:38 +1100949 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
950 "attr2 is always enabled for V5 filesystems.");
Dave Chinner24513372014-06-25 14:58:08 +1000951 return -EINVAL;
Dave Chinnerd3eaace2013-06-05 12:09:09 +1000952 }
953
954 /*
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000955 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
956 * told by noattr2 to turn it off
957 */
958 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100959 !(mp->m_flags & XFS_MOUNT_NOATTR2))
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000960 mp->m_flags |= XFS_MOUNT_ATTR2;
961
962 /*
963 * prohibit r/w mounts of read-only filesystems
964 */
965 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
Dave Chinner4f107002011-03-07 10:00:35 +1100966 xfs_warn(mp,
967 "cannot mount a read-only filesystem as read-write");
Dave Chinner24513372014-06-25 14:58:08 +1000968 return -EROFS;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000969 }
970
Chandra Seetharamand892d582013-07-19 17:36:02 -0500971 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
972 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
973 !xfs_sb_version_has_pquotino(&mp->m_sb)) {
974 xfs_warn(mp,
975 "Super block does not support project and group quota together");
Dave Chinner24513372014-06-25 14:58:08 +1000976 return -EINVAL;
Chandra Seetharamand892d582013-07-19 17:36:02 -0500977 }
978
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000979 return 0;
980}
981
Dave Chinner5681ca42015-02-23 21:22:31 +1100982static int
983xfs_init_percpu_counters(
984 struct xfs_mount *mp)
985{
986 int error;
987
988 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
989 if (error)
Joe Perches5e9383f2015-03-25 15:00:24 +1100990 return -ENOMEM;
Dave Chinner5681ca42015-02-23 21:22:31 +1100991
992 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
993 if (error)
994 goto free_icount;
995
996 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
997 if (error)
998 goto free_ifree;
999
Darrick J. Wong9fe82b82019-04-25 18:26:22 -07001000 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1001 if (error)
1002 goto free_fdblocks;
1003
Dave Chinner5681ca42015-02-23 21:22:31 +11001004 return 0;
1005
Darrick J. Wong9fe82b82019-04-25 18:26:22 -07001006free_fdblocks:
1007 percpu_counter_destroy(&mp->m_fdblocks);
Dave Chinner5681ca42015-02-23 21:22:31 +11001008free_ifree:
1009 percpu_counter_destroy(&mp->m_ifree);
1010free_icount:
1011 percpu_counter_destroy(&mp->m_icount);
1012 return -ENOMEM;
1013}
1014
1015void
1016xfs_reinit_percpu_counters(
1017 struct xfs_mount *mp)
1018{
1019 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1020 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1021 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1022}
1023
1024static void
1025xfs_destroy_percpu_counters(
1026 struct xfs_mount *mp)
1027{
1028 percpu_counter_destroy(&mp->m_icount);
1029 percpu_counter_destroy(&mp->m_ifree);
1030 percpu_counter_destroy(&mp->m_fdblocks);
Darrick J. Wong9fe82b82019-04-25 18:26:22 -07001031 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1032 percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1033 percpu_counter_destroy(&mp->m_delalloc_blks);
Dave Chinner5681ca42015-02-23 21:22:31 +11001034}
1035
Ian Kent2f8d66b2019-11-04 13:58:47 -08001036static void
1037xfs_fs_put_super(
1038 struct super_block *sb)
1039{
1040 struct xfs_mount *mp = XFS_M(sb);
1041
1042 /* if ->fill_super failed, we have no mount to tear down */
1043 if (!sb->s_fs_info)
1044 return;
1045
1046 xfs_notice(mp, "Unmounting Filesystem");
1047 xfs_filestream_unmount(mp);
1048 xfs_unmountfs(mp);
1049
1050 xfs_freesb(mp);
1051 free_percpu(mp->m_stats.xs_stats);
1052 xfs_destroy_percpu_counters(mp);
1053 xfs_destroy_mount_workqueues(mp);
1054 xfs_close_devices(mp);
1055
1056 sb->s_fs_info = NULL;
1057 xfs_mount_free(mp);
1058}
1059
1060static long
1061xfs_fs_nr_cached_objects(
1062 struct super_block *sb,
1063 struct shrink_control *sc)
1064{
1065 /* Paranoia: catch incorrect calls during mount setup or teardown */
1066 if (WARN_ON_ONCE(!sb->s_fs_info))
1067 return 0;
1068 return xfs_reclaim_inodes_count(XFS_M(sb));
1069}
1070
1071static long
1072xfs_fs_free_cached_objects(
1073 struct super_block *sb,
1074 struct shrink_control *sc)
1075{
1076 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1077}
1078
1079static const struct super_operations xfs_super_operations = {
1080 .alloc_inode = xfs_fs_alloc_inode,
1081 .destroy_inode = xfs_fs_destroy_inode,
1082 .dirty_inode = xfs_fs_dirty_inode,
1083 .drop_inode = xfs_fs_drop_inode,
1084 .put_super = xfs_fs_put_super,
1085 .sync_fs = xfs_fs_sync_fs,
1086 .freeze_fs = xfs_fs_freeze,
1087 .unfreeze_fs = xfs_fs_unfreeze,
1088 .statfs = xfs_fs_statfs,
1089 .show_options = xfs_fs_show_options,
1090 .nr_cached_objects = xfs_fs_nr_cached_objects,
1091 .free_cached_objects = xfs_fs_free_cached_objects,
1092};
1093
Ian Kent73e5fff2019-11-04 13:58:46 -08001094static int
Ian Kent8757c382019-11-04 13:58:48 -08001095suffix_kstrtoint(
1096 const char *s,
1097 unsigned int base,
1098 int *res)
1099{
1100 int last, shift_left_factor = 0, _res;
1101 char *value;
1102 int ret = 0;
1103
1104 value = kstrdup(s, GFP_KERNEL);
1105 if (!value)
1106 return -ENOMEM;
1107
1108 last = strlen(value) - 1;
1109 if (value[last] == 'K' || value[last] == 'k') {
1110 shift_left_factor = 10;
1111 value[last] = '\0';
1112 }
1113 if (value[last] == 'M' || value[last] == 'm') {
1114 shift_left_factor = 20;
1115 value[last] = '\0';
1116 }
1117 if (value[last] == 'G' || value[last] == 'g') {
1118 shift_left_factor = 30;
1119 value[last] = '\0';
1120 }
1121
1122 if (kstrtoint(value, base, &_res))
1123 ret = -EINVAL;
1124 kfree(value);
1125 *res = _res << shift_left_factor;
1126 return ret;
1127}
1128
1129/*
1130 * Set mount state from a mount option.
1131 *
1132 * NOTE: mp->m_super is NULL here!
1133 */
1134static int
1135xfs_fc_parse_param(
1136 struct fs_context *fc,
1137 struct fs_parameter *param)
1138{
1139 struct xfs_mount *mp = fc->s_fs_info;
1140 struct fs_parse_result result;
1141 int size = 0;
1142 int opt;
1143
Al Virod7167b12019-09-07 07:23:15 -04001144 opt = fs_parse(fc, xfs_fs_parameters, param, &result);
Ian Kent8757c382019-11-04 13:58:48 -08001145 if (opt < 0)
1146 return opt;
1147
1148 switch (opt) {
1149 case Opt_logbufs:
1150 mp->m_logbufs = result.uint_32;
1151 return 0;
1152 case Opt_logbsize:
1153 if (suffix_kstrtoint(param->string, 10, &mp->m_logbsize))
1154 return -EINVAL;
1155 return 0;
1156 case Opt_logdev:
1157 kfree(mp->m_logname);
1158 mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1159 if (!mp->m_logname)
1160 return -ENOMEM;
1161 return 0;
1162 case Opt_rtdev:
1163 kfree(mp->m_rtname);
1164 mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1165 if (!mp->m_rtname)
1166 return -ENOMEM;
1167 return 0;
1168 case Opt_allocsize:
1169 if (suffix_kstrtoint(param->string, 10, &size))
1170 return -EINVAL;
1171 mp->m_allocsize_log = ffs(size) - 1;
1172 mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
1173 return 0;
1174 case Opt_grpid:
1175 case Opt_bsdgroups:
1176 mp->m_flags |= XFS_MOUNT_GRPID;
1177 return 0;
1178 case Opt_nogrpid:
1179 case Opt_sysvgroups:
1180 mp->m_flags &= ~XFS_MOUNT_GRPID;
1181 return 0;
1182 case Opt_wsync:
1183 mp->m_flags |= XFS_MOUNT_WSYNC;
1184 return 0;
1185 case Opt_norecovery:
1186 mp->m_flags |= XFS_MOUNT_NORECOVERY;
1187 return 0;
1188 case Opt_noalign:
1189 mp->m_flags |= XFS_MOUNT_NOALIGN;
1190 return 0;
1191 case Opt_swalloc:
1192 mp->m_flags |= XFS_MOUNT_SWALLOC;
1193 return 0;
1194 case Opt_sunit:
1195 mp->m_dalign = result.uint_32;
1196 return 0;
1197 case Opt_swidth:
1198 mp->m_swidth = result.uint_32;
1199 return 0;
1200 case Opt_inode32:
1201 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1202 return 0;
1203 case Opt_inode64:
1204 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1205 return 0;
1206 case Opt_nouuid:
1207 mp->m_flags |= XFS_MOUNT_NOUUID;
1208 return 0;
1209 case Opt_ikeep:
1210 mp->m_flags |= XFS_MOUNT_IKEEP;
1211 return 0;
1212 case Opt_noikeep:
1213 mp->m_flags &= ~XFS_MOUNT_IKEEP;
1214 return 0;
1215 case Opt_largeio:
1216 mp->m_flags |= XFS_MOUNT_LARGEIO;
1217 return 0;
1218 case Opt_nolargeio:
1219 mp->m_flags &= ~XFS_MOUNT_LARGEIO;
1220 return 0;
1221 case Opt_attr2:
1222 mp->m_flags |= XFS_MOUNT_ATTR2;
1223 return 0;
1224 case Opt_noattr2:
1225 mp->m_flags &= ~XFS_MOUNT_ATTR2;
1226 mp->m_flags |= XFS_MOUNT_NOATTR2;
1227 return 0;
1228 case Opt_filestreams:
1229 mp->m_flags |= XFS_MOUNT_FILESTREAMS;
1230 return 0;
1231 case Opt_noquota:
1232 mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1233 mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1234 mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
1235 return 0;
1236 case Opt_quota:
1237 case Opt_uquota:
1238 case Opt_usrquota:
1239 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
1240 XFS_UQUOTA_ENFD);
1241 return 0;
1242 case Opt_qnoenforce:
1243 case Opt_uqnoenforce:
1244 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
1245 mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1246 return 0;
1247 case Opt_pquota:
1248 case Opt_prjquota:
1249 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
1250 XFS_PQUOTA_ENFD);
1251 return 0;
1252 case Opt_pqnoenforce:
1253 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
1254 mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1255 return 0;
1256 case Opt_gquota:
1257 case Opt_grpquota:
1258 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
1259 XFS_GQUOTA_ENFD);
1260 return 0;
1261 case Opt_gqnoenforce:
1262 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
1263 mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1264 return 0;
1265 case Opt_discard:
1266 mp->m_flags |= XFS_MOUNT_DISCARD;
1267 return 0;
1268 case Opt_nodiscard:
1269 mp->m_flags &= ~XFS_MOUNT_DISCARD;
1270 return 0;
1271#ifdef CONFIG_FS_DAX
1272 case Opt_dax:
1273 mp->m_flags |= XFS_MOUNT_DAX;
1274 return 0;
1275#endif
1276 default:
1277 xfs_warn(mp, "unknown mount option [%s].", param->key);
1278 return -EINVAL;
1279 }
1280
1281 return 0;
1282}
1283
1284static int
1285xfs_fc_validate_params(
1286 struct xfs_mount *mp)
1287{
1288 /*
1289 * no recovery flag requires a read-only mount
1290 */
1291 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
1292 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1293 xfs_warn(mp, "no-recovery mounts must be read-only.");
1294 return -EINVAL;
1295 }
1296
1297 if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
1298 (mp->m_dalign || mp->m_swidth)) {
1299 xfs_warn(mp,
1300 "sunit and swidth options incompatible with the noalign option");
1301 return -EINVAL;
1302 }
1303
1304 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1305 xfs_warn(mp, "quota support not available in this kernel.");
1306 return -EINVAL;
1307 }
1308
1309 if ((mp->m_dalign && !mp->m_swidth) ||
1310 (!mp->m_dalign && mp->m_swidth)) {
1311 xfs_warn(mp, "sunit and swidth must be specified together");
1312 return -EINVAL;
1313 }
1314
1315 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1316 xfs_warn(mp,
1317 "stripe width (%d) must be a multiple of the stripe unit (%d)",
1318 mp->m_swidth, mp->m_dalign);
1319 return -EINVAL;
1320 }
1321
1322 if (mp->m_logbufs != -1 &&
1323 mp->m_logbufs != 0 &&
1324 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1325 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1326 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1327 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1328 return -EINVAL;
1329 }
1330
1331 if (mp->m_logbsize != -1 &&
1332 mp->m_logbsize != 0 &&
1333 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1334 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1335 !is_power_of_2(mp->m_logbsize))) {
1336 xfs_warn(mp,
1337 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1338 mp->m_logbsize);
1339 return -EINVAL;
1340 }
1341
1342 if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
1343 (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1344 mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1345 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1346 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1347 return -EINVAL;
1348 }
1349
1350 return 0;
1351}
1352
1353static int
Ian Kent73e5fff2019-11-04 13:58:46 -08001354xfs_fc_fill_super(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 struct super_block *sb,
Ian Kent73e5fff2019-11-04 13:58:46 -08001356 struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357{
Ian Kent73e5fff2019-11-04 13:58:46 -08001358 struct xfs_mount *mp = sb->s_fs_info;
Christoph Hellwigf3dcc132008-03-27 18:00:54 +11001359 struct inode *root;
Colin Ian King0279c712019-11-06 08:07:46 -08001360 int flags = 0, error;
Christoph Hellwigbdd907b2008-05-20 15:10:44 +10001361
Ian Kent7c89fcb2019-11-04 13:58:46 -08001362 mp->m_super = sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363
Ian Kent73e5fff2019-11-04 13:58:46 -08001364 error = xfs_fc_validate_params(mp);
Christoph Hellwig745f6912007-08-30 17:20:39 +10001365 if (error)
Ian Kente1d3d212019-11-04 13:58:40 -08001366 goto out_free_names;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
1368 sb_min_blocksize(sb, BBSIZE);
Lachlan McIlroy0ec58512008-06-23 13:23:01 +10001369 sb->s_xattr = xfs_xattr_handlers;
Nathan Scotta50cd262006-03-14 14:06:18 +11001370 sb->s_export_op = &xfs_export_operations;
Christoph Hellwigfcafb712009-02-09 08:47:34 +01001371#ifdef CONFIG_XFS_QUOTA
Nathan Scotta50cd262006-03-14 14:06:18 +11001372 sb->s_qcop = &xfs_quotactl_operations;
Jan Kara17ef4fd2014-09-30 22:35:33 +02001373 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
Christoph Hellwigfcafb712009-02-09 08:47:34 +01001374#endif
Nathan Scotta50cd262006-03-14 14:06:18 +11001375 sb->s_op = &xfs_super_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376
Dave Chinnerdae5cd82018-05-10 21:50:23 -07001377 /*
1378 * Delay mount work if the debug hook is set. This is debug
1379 * instrumention to coordinate simulation of xfs mount failures with
1380 * VFS superblock operations
1381 */
1382 if (xfs_globals.mount_delay) {
1383 xfs_notice(mp, "Delaying mount for %d seconds.",
1384 xfs_globals.mount_delay);
1385 msleep(xfs_globals.mount_delay * 1000);
1386 }
1387
Ian Kent73e5fff2019-11-04 13:58:46 -08001388 if (fc->sb_flags & SB_SILENT)
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001389 flags |= XFS_MFSI_QUIET;
1390
Christoph Hellwig9d565ff2008-10-30 17:53:24 +11001391 error = xfs_open_devices(mp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001392 if (error)
Ian Kente1d3d212019-11-04 13:58:40 -08001393 goto out_free_names;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001394
Dave Chinner24513372014-06-25 14:58:08 +10001395 error = xfs_init_mount_workqueues(mp);
Christoph Hellwig61ba35d2010-09-30 02:25:54 +00001396 if (error)
1397 goto out_close_devices;
Christoph Hellwigc962fb72008-05-20 15:10:52 +10001398
Dave Chinner5681ca42015-02-23 21:22:31 +11001399 error = xfs_init_percpu_counters(mp);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +00001400 if (error)
1401 goto out_destroy_workqueues;
1402
Bill O'Donnell225e4632015-10-12 18:21:19 +11001403 /* Allocate stats memory before we do operations that might use it */
1404 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1405 if (!mp->m_stats.xs_stats) {
Dan Carpenterf9d460b2015-10-19 08:42:47 +11001406 error = -ENOMEM;
Bill O'Donnell225e4632015-10-12 18:21:19 +11001407 goto out_destroy_counters;
1408 }
1409
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001410 error = xfs_readsb(mp, flags);
1411 if (error)
Bill O'Donnell225e4632015-10-12 18:21:19 +11001412 goto out_free_stats;
Christoph Hellwig9d565ff2008-10-30 17:53:24 +11001413
1414 error = xfs_finish_flags(mp);
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001415 if (error)
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001416 goto out_free_sb;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001417
Christoph Hellwige34b5622008-05-20 15:10:36 +10001418 error = xfs_setup_devices(mp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001419 if (error)
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001420 goto out_free_sb;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001421
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001422 error = xfs_filestream_mount(mp);
1423 if (error)
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001424 goto out_free_sb;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001425
Dave Chinner704b2902011-03-26 09:14:57 +11001426 /*
1427 * we must configure the block size in the superblock before we run the
1428 * full mount process as the mount process can lookup and cache inodes.
Dave Chinner704b2902011-03-26 09:14:57 +11001429 */
Adam Borowskidddde682018-10-18 17:20:19 +11001430 sb->s_magic = XFS_SUPER_MAGIC;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +10001431 sb->s_blocksize = mp->m_sb.sb_blocksize;
1432 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
Al Viro8de52772012-02-06 12:45:27 -05001434 sb->s_max_links = XFS_MAXLINK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 sb->s_time_gran = 1;
Deepa Dinamani22b13962019-07-30 08:22:29 -07001436 sb->s_time_min = S32_MIN;
1437 sb->s_time_max = S32_MAX;
Christoph Hellwigadfb5fb2019-06-28 19:30:22 -07001438 sb->s_iflags |= SB_I_CGROUPWB;
1439
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 set_posix_acl_flag(sb);
1441
Dave Chinnerdc037ad2013-06-27 16:04:59 +10001442 /* version 5 superblocks support inode version counters. */
1443 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
Matthew Garrett357fdad2017-10-18 13:56:26 -07001444 sb->s_flags |= SB_I_VERSION;
Dave Chinnerdc037ad2013-06-27 16:04:59 +10001445
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001446 if (mp->m_flags & XFS_MOUNT_DAX) {
Dave Jiang80660f22018-05-30 13:03:46 -07001447 bool rtdev_is_dax = false, datadev_is_dax;
Darrick J. Wongba23cba2018-05-30 13:03:45 -07001448
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001449 xfs_warn(mp,
Toshi Kani1e937cd2016-05-10 10:23:56 -06001450 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1451
Dave Jiang80660f22018-05-30 13:03:46 -07001452 datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
1453 sb->s_blocksize);
Darrick J. Wongba23cba2018-05-30 13:03:45 -07001454 if (mp->m_rtdev_targp)
Dave Jiang80660f22018-05-30 13:03:46 -07001455 rtdev_is_dax = bdev_dax_supported(
1456 mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
1457 if (!rtdev_is_dax && !datadev_is_dax) {
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001458 xfs_alert(mp,
Toshi Kani1e937cd2016-05-10 10:23:56 -06001459 "DAX unsupported by block device. Turning off DAX.");
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001460 mp->m_flags &= ~XFS_MOUNT_DAX;
1461 }
Darrick J. Wongb6e03c12018-01-31 14:21:56 -08001462 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
Darrick J. Wonge54b5bf2016-10-03 09:11:52 -07001463 xfs_alert(mp,
Christoph Hellwig1e369b02018-01-08 13:30:08 -08001464 "DAX and reflink cannot be used together!");
Darrick J. Wongb6e03c12018-01-31 14:21:56 -08001465 error = -EINVAL;
1466 goto out_filestream_unmount;
1467 }
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001468 }
1469
Kenjiro Nakayama1e6fa682017-09-18 12:03:56 -07001470 if (mp->m_flags & XFS_MOUNT_DISCARD) {
1471 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1472
1473 if (!blk_queue_discard(q)) {
1474 xfs_warn(mp, "mounting with \"discard\" option, but "
1475 "the device does not support discard");
1476 mp->m_flags &= ~XFS_MOUNT_DISCARD;
1477 }
1478 }
1479
Christoph Hellwig66ae56a2019-02-18 09:38:49 -08001480 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1481 if (mp->m_sb.sb_rblocks) {
1482 xfs_alert(mp,
Darrick J. Wongc14632d2018-01-31 16:38:18 -08001483 "reflink not compatible with realtime device!");
Christoph Hellwig66ae56a2019-02-18 09:38:49 -08001484 error = -EINVAL;
1485 goto out_filestream_unmount;
1486 }
1487
1488 if (xfs_globals.always_cow) {
1489 xfs_info(mp, "using DEBUG-only always_cow mode.");
1490 mp->m_always_cow = true;
1491 }
Darrick J. Wongc14632d2018-01-31 16:38:18 -08001492 }
1493
Darrick J. Wong76883f72018-01-31 09:47:25 -08001494 if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
Darrick J. Wong1c0607a2016-08-03 12:20:57 +10001495 xfs_alert(mp,
Darrick J. Wong76883f72018-01-31 09:47:25 -08001496 "reverse mapping btree not compatible with realtime device!");
1497 error = -EINVAL;
1498 goto out_filestream_unmount;
Darrick J. Wong738f57c2016-08-26 15:59:19 +10001499 }
Darrick J. Wong1c0607a2016-08-03 12:20:57 +10001500
Dave Chinner8a00ebe2012-04-13 12:10:44 +00001501 error = xfs_mountfs(mp);
Christoph Hellwig2bcf6e92011-07-13 13:43:48 +02001502 if (error)
Dave Chinner7e185302012-10-08 21:56:00 +11001503 goto out_filestream_unmount;
Dave Chinner704b2902011-03-26 09:14:57 +11001504
David Chinner01651642008-08-13 15:45:15 +10001505 root = igrab(VFS_I(mp->m_rootip));
Christoph Hellwigf3dcc132008-03-27 18:00:54 +11001506 if (!root) {
Dave Chinner24513372014-06-25 14:58:08 +10001507 error = -ENOENT;
Dave Chinner8a00ebe2012-04-13 12:10:44 +00001508 goto out_unmount;
Christoph Hellwigcbc89dc2008-02-05 12:14:01 +11001509 }
Al Viro48fde702012-01-08 22:15:13 -05001510 sb->s_root = d_make_root(root);
Christoph Hellwigf3dcc132008-03-27 18:00:54 +11001511 if (!sb->s_root) {
Dave Chinner24513372014-06-25 14:58:08 +10001512 error = -ENOMEM;
Dave Chinner8a00ebe2012-04-13 12:10:44 +00001513 goto out_unmount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 }
Christoph Hellwig74394492007-08-30 17:21:22 +10001515
Dave Chinner7e185302012-10-08 21:56:00 +11001516 return 0;
1517
1518 out_filestream_unmount:
Christoph Hellwig120226c2008-05-20 15:11:11 +10001519 xfs_filestream_unmount(mp);
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001520 out_free_sb:
1521 xfs_freesb(mp);
Bill O'Donnell225e4632015-10-12 18:21:19 +11001522 out_free_stats:
1523 free_percpu(mp->m_stats.xs_stats);
Christoph Hellwig9d565ff2008-10-30 17:53:24 +11001524 out_destroy_counters:
Dave Chinner5681ca42015-02-23 21:22:31 +11001525 xfs_destroy_percpu_counters(mp);
Bill O'Donnell225e4632015-10-12 18:21:19 +11001526 out_destroy_workqueues:
Christoph Hellwigaa6bf012012-02-29 09:53:48 +00001527 xfs_destroy_mount_workqueues(mp);
Christoph Hellwig61ba35d2010-09-30 02:25:54 +00001528 out_close_devices:
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001529 xfs_close_devices(mp);
Ian Kente1d3d212019-11-04 13:58:40 -08001530 out_free_names:
Dave Chinnerc9fbd7b2018-05-10 21:50:23 -07001531 sb->s_fs_info = NULL;
Ian Kenta943f372019-11-04 13:58:42 -08001532 xfs_mount_free(mp);
Dave Chinner24513372014-06-25 14:58:08 +10001533 return error;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001534
Christoph Hellwig2bcf6e92011-07-13 13:43:48 +02001535 out_unmount:
Christoph Hellwige48ad3162008-05-20 11:30:52 +10001536 xfs_filestream_unmount(mp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001537 xfs_unmountfs(mp);
Christoph Hellwig62033002008-08-13 16:50:21 +10001538 goto out_free_sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539}
1540
Ian Kent73e5fff2019-11-04 13:58:46 -08001541static int
1542xfs_fc_get_tree(
1543 struct fs_context *fc)
1544{
1545 return get_tree_bdev(fc, xfs_fc_fill_super);
1546}
1547
Ian Kent63cd1e92019-11-04 13:58:47 -08001548static int
1549xfs_remount_rw(
1550 struct xfs_mount *mp)
1551{
1552 struct xfs_sb *sbp = &mp->m_sb;
1553 int error;
1554
1555 if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1556 xfs_warn(mp,
1557 "ro->rw transition prohibited on norecovery mount");
1558 return -EINVAL;
1559 }
1560
1561 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1562 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1563 xfs_warn(mp,
1564 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1565 (sbp->sb_features_ro_compat &
1566 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1567 return -EINVAL;
1568 }
1569
1570 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1571
1572 /*
1573 * If this is the first remount to writeable state we might have some
1574 * superblock changes to update.
1575 */
1576 if (mp->m_update_sb) {
1577 error = xfs_sync_sb(mp, false);
1578 if (error) {
1579 xfs_warn(mp, "failed to write sb changes");
1580 return error;
1581 }
1582 mp->m_update_sb = false;
1583 }
1584
1585 /*
1586 * Fill out the reserve pool if it is empty. Use the stashed value if
1587 * it is non-zero, otherwise go with the default.
1588 */
1589 xfs_restore_resvblks(mp);
1590 xfs_log_work_queue(mp);
1591
1592 /* Recover any CoW blocks that never got remapped. */
1593 error = xfs_reflink_recover_cow(mp);
1594 if (error) {
1595 xfs_err(mp,
1596 "Error %d recovering leftover CoW allocations.", error);
Dan Carpenter7f6bcf72019-11-08 08:06:36 -08001597 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
Ian Kent63cd1e92019-11-04 13:58:47 -08001598 return error;
1599 }
1600 xfs_start_block_reaping(mp);
1601
1602 /* Create the per-AG metadata reservation pool .*/
1603 error = xfs_fs_reserve_ag_blocks(mp);
1604 if (error && error != -ENOSPC)
1605 return error;
1606
1607 return 0;
1608}
1609
1610static int
1611xfs_remount_ro(
1612 struct xfs_mount *mp)
1613{
1614 int error;
1615
1616 /*
1617 * Cancel background eofb scanning so it cannot race with the final
1618 * log force+buftarg wait and deadlock the remount.
1619 */
1620 xfs_stop_block_reaping(mp);
1621
1622 /* Get rid of any leftover CoW reservations... */
1623 error = xfs_icache_free_cowblocks(mp, NULL);
1624 if (error) {
1625 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1626 return error;
1627 }
1628
1629 /* Free the per-AG metadata reservation pool. */
1630 error = xfs_fs_unreserve_ag_blocks(mp);
1631 if (error) {
1632 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1633 return error;
1634 }
1635
1636 /*
1637 * Before we sync the metadata, we need to free up the reserve block
1638 * pool so that the used block count in the superblock on disk is
1639 * correct at the end of the remount. Stash the current* reserve pool
1640 * size so that if we get remounted rw, we can return it to the same
1641 * size.
1642 */
1643 xfs_save_resvblks(mp);
1644
1645 xfs_quiesce_attr(mp);
1646 mp->m_flags |= XFS_MOUNT_RDONLY;
1647
1648 return 0;
1649}
1650
1651/*
1652 * Logically we would return an error here to prevent users from believing
1653 * they might have changed mount options using remount which can't be changed.
1654 *
1655 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1656 * arguments in some cases so we can't blindly reject options, but have to
1657 * check for each specified option if it actually differs from the currently
1658 * set option and only reject it if that's the case.
1659 *
1660 * Until that is implemented we return success for every remount request, and
1661 * silently ignore all options that we can't actually change.
1662 */
1663static int
1664xfs_fc_reconfigure(
1665 struct fs_context *fc)
1666{
1667 struct xfs_mount *mp = XFS_M(fc->root->d_sb);
1668 struct xfs_mount *new_mp = fc->s_fs_info;
1669 xfs_sb_t *sbp = &mp->m_sb;
1670 int flags = fc->sb_flags;
1671 int error;
1672
1673 error = xfs_fc_validate_params(new_mp);
1674 if (error)
1675 return error;
1676
1677 sync_filesystem(mp->m_super);
1678
1679 /* inode32 -> inode64 */
1680 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1681 !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1682 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1683 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1684 }
1685
1686 /* inode64 -> inode32 */
1687 if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1688 (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1689 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1690 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1691 }
1692
1693 /* ro -> rw */
1694 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) {
1695 error = xfs_remount_rw(mp);
1696 if (error)
1697 return error;
1698 }
1699
1700 /* rw -> ro */
1701 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) {
1702 error = xfs_remount_ro(mp);
1703 if (error)
1704 return error;
1705 }
1706
1707 return 0;
1708}
1709
Ian Kent73e5fff2019-11-04 13:58:46 -08001710static void xfs_fc_free(
1711 struct fs_context *fc)
1712{
1713 struct xfs_mount *mp = fc->s_fs_info;
1714
1715 /*
1716 * mp is stored in the fs_context when it is initialized.
1717 * mp is transferred to the superblock on a successful mount,
1718 * but if an error occurs before the transfer we have to free
1719 * it here.
1720 */
1721 if (mp)
1722 xfs_mount_free(mp);
1723}
1724
1725static const struct fs_context_operations xfs_context_ops = {
1726 .parse_param = xfs_fc_parse_param,
1727 .get_tree = xfs_fc_get_tree,
1728 .reconfigure = xfs_fc_reconfigure,
1729 .free = xfs_fc_free,
1730};
1731
1732static int xfs_init_fs_context(
1733 struct fs_context *fc)
1734{
1735 struct xfs_mount *mp;
1736
Ian Kent50f83002019-11-04 13:58:48 -08001737 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
Ian Kent73e5fff2019-11-04 13:58:46 -08001738 if (!mp)
1739 return -ENOMEM;
1740
Ian Kent50f83002019-11-04 13:58:48 -08001741 spin_lock_init(&mp->m_sb_lock);
1742 spin_lock_init(&mp->m_agirotor_lock);
1743 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1744 spin_lock_init(&mp->m_perag_lock);
1745 mutex_init(&mp->m_growlock);
1746 atomic_set(&mp->m_active_trans, 0);
1747 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1748 INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
1749 INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
1750 mp->m_kobj.kobject.kset = xfs_kset;
1751 /*
1752 * We don't create the finobt per-ag space reservation until after log
1753 * recovery, so we must set this to true so that an ifree transaction
1754 * started during log recovery will not depend on space reservations
1755 * for finobt expansion.
1756 */
1757 mp->m_finobt_nores = true;
1758
Ian Kent73e5fff2019-11-04 13:58:46 -08001759 /*
1760 * These can be overridden by the mount option parsing.
1761 */
1762 mp->m_logbufs = -1;
1763 mp->m_logbsize = -1;
1764 mp->m_allocsize_log = 16; /* 64k */
1765
1766 /*
1767 * Copy binary VFS mount flags we are interested in.
1768 */
1769 if (fc->sb_flags & SB_RDONLY)
1770 mp->m_flags |= XFS_MOUNT_RDONLY;
1771 if (fc->sb_flags & SB_DIRSYNC)
1772 mp->m_flags |= XFS_MOUNT_DIRSYNC;
1773 if (fc->sb_flags & SB_SYNCHRONOUS)
1774 mp->m_flags |= XFS_MOUNT_WSYNC;
1775
1776 fc->s_fs_info = mp;
1777 fc->ops = &xfs_context_ops;
1778
1779 return 0;
1780}
1781
Andrew Morton5085b602007-02-20 13:57:47 -08001782static struct file_system_type xfs_fs_type = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 .owner = THIS_MODULE,
1784 .name = "xfs",
Ian Kent73e5fff2019-11-04 13:58:46 -08001785 .init_fs_context = xfs_init_fs_context,
Al Virod7167b12019-09-07 07:23:15 -04001786 .parameters = xfs_fs_parameters,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 .kill_sb = kill_block_super,
1788 .fs_flags = FS_REQUIRES_DEV,
1789};
Eric W. Biederman7f78e032013-03-02 19:39:14 -08001790MODULE_ALIAS_FS("xfs");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001792STATIC int __init
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001793xfs_init_zones(void)
1794{
Carlos Maiolinob1231762019-11-14 12:43:03 -08001795 xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
1796 sizeof(struct xlog_ticket),
1797 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001798 if (!xfs_log_ticket_zone)
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001799 goto out;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001800
Carlos Maiolinob1231762019-11-14 12:43:03 -08001801 xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
1802 sizeof(struct xfs_extent_free_item),
1803 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001804 if (!xfs_bmap_free_item_zone)
1805 goto out_destroy_log_ticket_zone;
David Chinnerbf904242008-10-30 17:36:14 +11001806
Carlos Maiolinob1231762019-11-14 12:43:03 -08001807 xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
1808 sizeof(struct xfs_btree_cur),
1809 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001810 if (!xfs_btree_cur_zone)
1811 goto out_destroy_bmap_free_item_zone;
1812
Carlos Maiolinob1231762019-11-14 12:43:03 -08001813 xfs_da_state_zone = kmem_cache_create("xfs_da_state",
1814 sizeof(struct xfs_da_state),
1815 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001816 if (!xfs_da_state_zone)
1817 goto out_destroy_btree_cur_zone;
1818
Carlos Maiolinob1231762019-11-14 12:43:03 -08001819 xfs_ifork_zone = kmem_cache_create("xfs_ifork",
1820 sizeof(struct xfs_ifork),
1821 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001822 if (!xfs_ifork_zone)
Dave Chinner1d9025e2012-06-22 18:50:14 +10001823 goto out_destroy_da_state_zone;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001824
Carlos Maiolinob1231762019-11-14 12:43:03 -08001825 xfs_trans_zone = kmem_cache_create("xf_trans",
1826 sizeof(struct xfs_trans),
1827 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001828 if (!xfs_trans_zone)
1829 goto out_destroy_ifork_zone;
1830
Christoph Hellwige98c4142010-06-23 18:11:15 +10001831
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001832 /*
1833 * The size of the zone allocated buf log item is the maximum
1834 * size possible under XFS. This wastes a little bit of memory,
1835 * but it is much faster.
1836 */
Carlos Maiolinob1231762019-11-14 12:43:03 -08001837 xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
1838 sizeof(struct xfs_buf_log_item),
1839 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001840 if (!xfs_buf_item_zone)
Dave Chinnere6631f82018-05-09 07:49:37 -07001841 goto out_destroy_trans_zone;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001842
Carlos Maiolinob1231762019-11-14 12:43:03 -08001843 xfs_efd_zone = kmem_cache_create("xfs_efd_item",
1844 (sizeof(struct xfs_efd_log_item) +
1845 (XFS_EFD_MAX_FAST_EXTENTS - 1) *
1846 sizeof(struct xfs_extent)),
1847 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001848 if (!xfs_efd_zone)
1849 goto out_destroy_buf_item_zone;
1850
Carlos Maiolinob1231762019-11-14 12:43:03 -08001851 xfs_efi_zone = kmem_cache_create("xfs_efi_item",
1852 (sizeof(struct xfs_efi_log_item) +
1853 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
1854 sizeof(struct xfs_extent)),
1855 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001856 if (!xfs_efi_zone)
1857 goto out_destroy_efd_zone;
1858
Carlos Maiolinob1231762019-11-14 12:43:03 -08001859 xfs_inode_zone = kmem_cache_create("xfs_inode",
1860 sizeof(struct xfs_inode), 0,
1861 (SLAB_HWCACHE_ALIGN |
1862 SLAB_RECLAIM_ACCOUNT |
1863 SLAB_MEM_SPREAD | SLAB_ACCOUNT),
1864 xfs_fs_inode_init_once);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001865 if (!xfs_inode_zone)
1866 goto out_destroy_efi_zone;
1867
Carlos Maiolinob1231762019-11-14 12:43:03 -08001868 xfs_ili_zone = kmem_cache_create("xfs_ili",
1869 sizeof(struct xfs_inode_log_item), 0,
1870 SLAB_MEM_SPREAD, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001871 if (!xfs_ili_zone)
1872 goto out_destroy_inode_zone;
Carlos Maiolinob1231762019-11-14 12:43:03 -08001873
1874 xfs_icreate_zone = kmem_cache_create("xfs_icr",
1875 sizeof(struct xfs_icreate_item),
1876 0, 0, NULL);
Dave Chinner3ebe7d22013-06-27 16:04:53 +10001877 if (!xfs_icreate_zone)
1878 goto out_destroy_ili_zone;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001879
Carlos Maiolinob1231762019-11-14 12:43:03 -08001880 xfs_rud_zone = kmem_cache_create("xfs_rud_item",
1881 sizeof(struct xfs_rud_log_item),
1882 0, 0, NULL);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10001883 if (!xfs_rud_zone)
1884 goto out_destroy_icreate_zone;
1885
Carlos Maiolinob1231762019-11-14 12:43:03 -08001886 xfs_rui_zone = kmem_cache_create("xfs_rui_item",
Darrick J. Wongcd001582016-09-19 10:24:27 +10001887 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
Carlos Maiolinob1231762019-11-14 12:43:03 -08001888 0, 0, NULL);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10001889 if (!xfs_rui_zone)
1890 goto out_destroy_rud_zone;
1891
Carlos Maiolinob1231762019-11-14 12:43:03 -08001892 xfs_cud_zone = kmem_cache_create("xfs_cud_item",
1893 sizeof(struct xfs_cud_log_item),
1894 0, 0, NULL);
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07001895 if (!xfs_cud_zone)
1896 goto out_destroy_rui_zone;
1897
Carlos Maiolinob1231762019-11-14 12:43:03 -08001898 xfs_cui_zone = kmem_cache_create("xfs_cui_item",
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07001899 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
Carlos Maiolinob1231762019-11-14 12:43:03 -08001900 0, 0, NULL);
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07001901 if (!xfs_cui_zone)
1902 goto out_destroy_cud_zone;
1903
Carlos Maiolinob1231762019-11-14 12:43:03 -08001904 xfs_bud_zone = kmem_cache_create("xfs_bud_item",
1905 sizeof(struct xfs_bud_log_item),
1906 0, 0, NULL);
Darrick J. Wong6413a012016-10-03 09:11:25 -07001907 if (!xfs_bud_zone)
1908 goto out_destroy_cui_zone;
1909
Carlos Maiolinob1231762019-11-14 12:43:03 -08001910 xfs_bui_zone = kmem_cache_create("xfs_bui_item",
Darrick J. Wong6413a012016-10-03 09:11:25 -07001911 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
Carlos Maiolinob1231762019-11-14 12:43:03 -08001912 0, 0, NULL);
Darrick J. Wong6413a012016-10-03 09:11:25 -07001913 if (!xfs_bui_zone)
1914 goto out_destroy_bud_zone;
1915
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001916 return 0;
1917
Darrick J. Wong6413a012016-10-03 09:11:25 -07001918 out_destroy_bud_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001919 kmem_cache_destroy(xfs_bud_zone);
Darrick J. Wong6413a012016-10-03 09:11:25 -07001920 out_destroy_cui_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001921 kmem_cache_destroy(xfs_cui_zone);
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07001922 out_destroy_cud_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001923 kmem_cache_destroy(xfs_cud_zone);
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07001924 out_destroy_rui_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001925 kmem_cache_destroy(xfs_rui_zone);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10001926 out_destroy_rud_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001927 kmem_cache_destroy(xfs_rud_zone);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10001928 out_destroy_icreate_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001929 kmem_cache_destroy(xfs_icreate_zone);
Dave Chinner3ebe7d22013-06-27 16:04:53 +10001930 out_destroy_ili_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001931 kmem_cache_destroy(xfs_ili_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001932 out_destroy_inode_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001933 kmem_cache_destroy(xfs_inode_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001934 out_destroy_efi_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001935 kmem_cache_destroy(xfs_efi_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001936 out_destroy_efd_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001937 kmem_cache_destroy(xfs_efd_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001938 out_destroy_buf_item_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001939 kmem_cache_destroy(xfs_buf_item_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001940 out_destroy_trans_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001941 kmem_cache_destroy(xfs_trans_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001942 out_destroy_ifork_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001943 kmem_cache_destroy(xfs_ifork_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001944 out_destroy_da_state_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001945 kmem_cache_destroy(xfs_da_state_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001946 out_destroy_btree_cur_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001947 kmem_cache_destroy(xfs_btree_cur_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001948 out_destroy_bmap_free_item_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001949 kmem_cache_destroy(xfs_bmap_free_item_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001950 out_destroy_log_ticket_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001951 kmem_cache_destroy(xfs_log_ticket_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001952 out:
1953 return -ENOMEM;
1954}
1955
1956STATIC void
1957xfs_destroy_zones(void)
1958{
Kirill A. Shutemov8c0a8532012-09-26 11:33:07 +10001959 /*
1960 * Make sure all delayed rcu free are flushed before we
1961 * destroy caches.
1962 */
1963 rcu_barrier();
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001964 kmem_cache_destroy(xfs_bui_zone);
1965 kmem_cache_destroy(xfs_bud_zone);
1966 kmem_cache_destroy(xfs_cui_zone);
1967 kmem_cache_destroy(xfs_cud_zone);
1968 kmem_cache_destroy(xfs_rui_zone);
1969 kmem_cache_destroy(xfs_rud_zone);
1970 kmem_cache_destroy(xfs_icreate_zone);
1971 kmem_cache_destroy(xfs_ili_zone);
1972 kmem_cache_destroy(xfs_inode_zone);
1973 kmem_cache_destroy(xfs_efi_zone);
1974 kmem_cache_destroy(xfs_efd_zone);
1975 kmem_cache_destroy(xfs_buf_item_zone);
1976 kmem_cache_destroy(xfs_trans_zone);
1977 kmem_cache_destroy(xfs_ifork_zone);
1978 kmem_cache_destroy(xfs_da_state_zone);
1979 kmem_cache_destroy(xfs_btree_cur_zone);
1980 kmem_cache_destroy(xfs_bmap_free_item_zone);
1981 kmem_cache_destroy(xfs_log_ticket_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001982}
1983
1984STATIC int __init
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10001985xfs_init_workqueues(void)
1986{
1987 /*
Dave Chinnerc999a222012-03-22 05:15:07 +00001988 * The allocation workqueue can be used in memory reclaim situations
1989 * (writepage path), and parallelism is only limited by the number of
1990 * AGs in all the filesystems mounted. Hence use the default large
1991 * max_active value for this workqueue.
1992 */
Brian Foster8018ec02014-09-09 11:44:46 +10001993 xfs_alloc_wq = alloc_workqueue("xfsalloc",
1994 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
Dave Chinnerc999a222012-03-22 05:15:07 +00001995 if (!xfs_alloc_wq)
Dave Chinner58896082012-10-08 21:56:05 +11001996 return -ENOMEM;
Dave Chinnerc999a222012-03-22 05:15:07 +00001997
Christoph Hellwig4560e782017-02-07 14:07:58 -08001998 xfs_discard_wq = alloc_workqueue("xfsdiscard", WQ_UNBOUND, 0);
1999 if (!xfs_discard_wq)
2000 goto out_free_alloc_wq;
2001
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002002 return 0;
Christoph Hellwig4560e782017-02-07 14:07:58 -08002003out_free_alloc_wq:
2004 destroy_workqueue(xfs_alloc_wq);
2005 return -ENOMEM;
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002006}
2007
Luck, Tony39411f82011-04-11 12:06:12 -07002008STATIC void
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002009xfs_destroy_workqueues(void)
2010{
Christoph Hellwig4560e782017-02-07 14:07:58 -08002011 destroy_workqueue(xfs_discard_wq);
Dave Chinnerc999a222012-03-22 05:15:07 +00002012 destroy_workqueue(xfs_alloc_wq);
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002013}
2014
2015STATIC int __init
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002016init_xfs_fs(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017{
2018 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019
Darrick J. Wong30cbc592016-03-09 08:15:14 +11002020 xfs_check_ondisk_structs();
2021
Christoph Hellwig65795912008-11-28 14:23:33 +11002022 printk(KERN_INFO XFS_VERSION_STRING " with "
2023 XFS_BUILD_OPTIONS " enabled\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002025 xfs_dir_startup();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
Nathan Scott87582802006-03-14 13:18:19 +11002027 error = xfs_init_zones();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002028 if (error)
2029 goto out;
2030
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002031 error = xfs_init_workqueues();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002032 if (error)
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002033 goto out_destroy_zones;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002034
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002035 error = xfs_mru_cache_init();
2036 if (error)
2037 goto out_destroy_wq;
2038
Nathan Scottce8e9222006-01-11 15:39:08 +11002039 error = xfs_buf_init();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002040 if (error)
Christoph Hellwig1919add2014-04-23 07:11:51 +10002041 goto out_mru_cache_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002043 error = xfs_init_procfs();
2044 if (error)
2045 goto out_buf_terminate;
2046
2047 error = xfs_sysctl_register();
2048 if (error)
2049 goto out_cleanup_procfs;
2050
Brian Foster3d871222014-07-15 07:41:37 +10002051 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2052 if (!xfs_kset) {
2053 error = -ENOMEM;
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002054 goto out_sysctl_unregister;
Brian Foster3d871222014-07-15 07:41:37 +10002055 }
2056
Bill O'Donnell80529c42015-10-12 05:19:45 +11002057 xfsstats.xs_kobj.kobject.kset = xfs_kset;
2058
2059 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2060 if (!xfsstats.xs_stats) {
2061 error = -ENOMEM;
2062 goto out_kset_unregister;
2063 }
2064
2065 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002066 "stats");
2067 if (error)
Bill O'Donnell80529c42015-10-12 05:19:45 +11002068 goto out_free_stats;
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002069
Brian Foster65b65732014-09-09 11:52:42 +10002070#ifdef DEBUG
2071 xfs_dbg_kobj.kobject.kset = xfs_kset;
2072 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002073 if (error)
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002074 goto out_remove_stats_kobj;
Brian Foster65b65732014-09-09 11:52:42 +10002075#endif
2076
2077 error = xfs_qm_init();
2078 if (error)
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002079 goto out_remove_dbg_kobj;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080
2081 error = register_filesystem(&xfs_fs_type);
2082 if (error)
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002083 goto out_qm_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 return 0;
2085
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002086 out_qm_exit:
2087 xfs_qm_exit();
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002088 out_remove_dbg_kobj:
Brian Foster65b65732014-09-09 11:52:42 +10002089#ifdef DEBUG
2090 xfs_sysfs_del(&xfs_dbg_kobj);
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002091 out_remove_stats_kobj:
Brian Foster65b65732014-09-09 11:52:42 +10002092#endif
Bill O'Donnell80529c42015-10-12 05:19:45 +11002093 xfs_sysfs_del(&xfsstats.xs_kobj);
2094 out_free_stats:
2095 free_percpu(xfsstats.xs_stats);
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002096 out_kset_unregister:
Brian Foster3d871222014-07-15 07:41:37 +10002097 kset_unregister(xfs_kset);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002098 out_sysctl_unregister:
2099 xfs_sysctl_unregister();
2100 out_cleanup_procfs:
2101 xfs_cleanup_procfs();
2102 out_buf_terminate:
Nathan Scottce8e9222006-01-11 15:39:08 +11002103 xfs_buf_terminate();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002104 out_mru_cache_uninit:
2105 xfs_mru_cache_uninit();
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002106 out_destroy_wq:
2107 xfs_destroy_workqueues();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002108 out_destroy_zones:
Nathan Scott87582802006-03-14 13:18:19 +11002109 xfs_destroy_zones();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002110 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 return error;
2112}
2113
2114STATIC void __exit
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002115exit_xfs_fs(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116{
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002117 xfs_qm_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 unregister_filesystem(&xfs_fs_type);
Brian Foster65b65732014-09-09 11:52:42 +10002119#ifdef DEBUG
2120 xfs_sysfs_del(&xfs_dbg_kobj);
2121#endif
Bill O'Donnell80529c42015-10-12 05:19:45 +11002122 xfs_sysfs_del(&xfsstats.xs_kobj);
2123 free_percpu(xfsstats.xs_stats);
Brian Foster3d871222014-07-15 07:41:37 +10002124 kset_unregister(xfs_kset);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002125 xfs_sysctl_unregister();
2126 xfs_cleanup_procfs();
Nathan Scottce8e9222006-01-11 15:39:08 +11002127 xfs_buf_terminate();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002128 xfs_mru_cache_uninit();
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002129 xfs_destroy_workqueues();
Nathan Scott87582802006-03-14 13:18:19 +11002130 xfs_destroy_zones();
Darrick J. Wongaf3b6382015-11-03 13:06:34 +11002131 xfs_uuid_table_free();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132}
2133
2134module_init(init_xfs_fs);
2135module_exit(exit_xfs_fs);
2136
2137MODULE_AUTHOR("Silicon Graphics, Inc.");
2138MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2139MODULE_LICENSE("GPL");