blob: 4c0dee78b2f8bbb8a11a827fc3464fad4e3c4e13 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scotta805bad2006-06-19 08:40:27 +10003 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Nathan Scott7b718762005-11-02 14:58:39 +11004 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00006
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include "xfs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11008#include "xfs_shared.h"
Dave Chinner6ca1c902013-08-12 20:49:26 +10009#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110010#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include "xfs_sb.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include "xfs_mount.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110015#include "xfs_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include "xfs_bmap.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110017#include "xfs_alloc.h"
Christoph Hellwig9909c4a2007-10-11 18:11:14 +100018#include "xfs_fsops.h"
Dave Chinner239880e2013-10-23 10:50:10 +110019#include "xfs_trans.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_buf_item.h"
Dave Chinner239880e2013-10-23 10:50:10 +110021#include "xfs_log.h"
David Chinnera67d7c52007-11-23 16:29:32 +110022#include "xfs_log_priv.h"
Dave Chinner2b9ab5a2013-08-12 20:49:37 +100023#include "xfs_dir2.h"
Christoph Hellwig9f8868f2008-07-18 17:11:46 +100024#include "xfs_extfree_item.h"
25#include "xfs_mru_cache.h"
26#include "xfs_inode_item.h"
Dave Chinner6d8b79c2012-10-08 21:56:09 +110027#include "xfs_icache.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000028#include "xfs_trace.h"
Dave Chinner3ebe7d22013-06-27 16:04:53 +100029#include "xfs_icreate_item.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110030#include "xfs_filestream.h"
31#include "xfs_quota.h"
Brian Foster65b65732014-09-09 11:52:42 +100032#include "xfs_sysfs.h"
Darrick J. Wong30cbc592016-03-09 08:15:14 +110033#include "xfs_ondisk.h"
Darrick J. Wong5880f2d72016-08-03 12:04:45 +100034#include "xfs_rmap_item.h"
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -070035#include "xfs_refcount_item.h"
Darrick J. Wong6413a012016-10-03 09:11:25 -070036#include "xfs_bmap_item.h"
Darrick J. Wong5e7e6052016-10-03 09:11:38 -070037#include "xfs_reflink.h"
Darrick J. Wong894ecac2021-01-22 16:48:44 -080038#include "xfs_pwork.h"
Dave Chinner9bbafc712021-06-02 10:48:24 +100039#include "xfs_ag.h"
Darrick J. Wongf3c799c2021-10-12 14:11:01 -070040#include "xfs_defer.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Adam Borowskidddde682018-10-18 17:20:19 +110042#include <linux/magic.h>
Ian Kent73e5fff2019-11-04 13:58:46 -080043#include <linux/fs_context.h>
44#include <linux/fs_parser.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Alexey Dobriyanb87221d2009-09-21 17:01:09 -070046static const struct super_operations xfs_super_operations;
Brian Foster65b65732014-09-09 11:52:42 +100047
Dave Chinnere3aed1a2014-09-29 10:46:08 +100048static struct kset *xfs_kset; /* top-level xfs sysfs dir */
Brian Foster65b65732014-09-09 11:52:42 +100049#ifdef DEBUG
50static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
51#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Dave Chinner0ed17f02021-08-06 11:05:38 -070053#ifdef CONFIG_HOTPLUG_CPU
54static LIST_HEAD(xfs_mount_list);
55static DEFINE_SPINLOCK(xfs_mount_list_lock);
56
57static inline void xfs_mount_list_add(struct xfs_mount *mp)
58{
59 spin_lock(&xfs_mount_list_lock);
60 list_add(&mp->m_mount_list, &xfs_mount_list);
61 spin_unlock(&xfs_mount_list_lock);
62}
63
64static inline void xfs_mount_list_del(struct xfs_mount *mp)
65{
66 spin_lock(&xfs_mount_list_lock);
67 list_del(&mp->m_mount_list);
68 spin_unlock(&xfs_mount_list_lock);
69}
70#else /* !CONFIG_HOTPLUG_CPU */
71static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
72static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
73#endif
74
Ira Weiny8d6c3442020-05-04 09:02:42 -070075enum xfs_dax_mode {
76 XFS_DAX_INODE = 0,
77 XFS_DAX_ALWAYS = 1,
78 XFS_DAX_NEVER = 2,
79};
80
81static void
82xfs_mount_set_dax_mode(
83 struct xfs_mount *mp,
84 enum xfs_dax_mode mode)
85{
86 switch (mode) {
87 case XFS_DAX_INODE:
Dave Chinner0560f312021-08-18 18:46:52 -070088 mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
Ira Weiny8d6c3442020-05-04 09:02:42 -070089 break;
90 case XFS_DAX_ALWAYS:
Dave Chinner0560f312021-08-18 18:46:52 -070091 mp->m_features |= XFS_FEAT_DAX_ALWAYS;
92 mp->m_features &= ~XFS_FEAT_DAX_NEVER;
Ira Weiny8d6c3442020-05-04 09:02:42 -070093 break;
94 case XFS_DAX_NEVER:
Dave Chinner0560f312021-08-18 18:46:52 -070095 mp->m_features |= XFS_FEAT_DAX_NEVER;
96 mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
Ira Weiny8d6c3442020-05-04 09:02:42 -070097 break;
98 }
99}
100
101static const struct constant_table dax_param_enums[] = {
102 {"inode", XFS_DAX_INODE },
103 {"always", XFS_DAX_ALWAYS },
104 {"never", XFS_DAX_NEVER },
105 {}
106};
107
Christoph Hellwig62a877e2008-07-18 17:12:36 +1000108/*
109 * Table driven mount option parser.
Christoph Hellwig62a877e2008-07-18 17:12:36 +1000110 */
111enum {
Ian Kent8da57c52019-10-28 08:41:42 -0700112 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
Eric Sandeen2e74af02016-03-02 09:55:38 +1100113 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
Christoph Hellwig94079282019-04-28 08:32:52 -0700114 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
Eric Sandeen1c02d502018-07-26 09:11:27 -0700115 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
116 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
117 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
118 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
Eric Sandeen2e74af02016-03-02 09:55:38 +1100119 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
Ira Weiny8d6c3442020-05-04 09:02:42 -0700120 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
Christoph Hellwig62a877e2008-07-18 17:12:36 +1000121};
122
Al Virod7167b12019-09-07 07:23:15 -0400123static const struct fs_parameter_spec xfs_fs_parameters[] = {
Ian Kent73e5fff2019-11-04 13:58:46 -0800124 fsparam_u32("logbufs", Opt_logbufs),
125 fsparam_string("logbsize", Opt_logbsize),
126 fsparam_string("logdev", Opt_logdev),
127 fsparam_string("rtdev", Opt_rtdev),
128 fsparam_flag("wsync", Opt_wsync),
129 fsparam_flag("noalign", Opt_noalign),
130 fsparam_flag("swalloc", Opt_swalloc),
131 fsparam_u32("sunit", Opt_sunit),
132 fsparam_u32("swidth", Opt_swidth),
133 fsparam_flag("nouuid", Opt_nouuid),
134 fsparam_flag("grpid", Opt_grpid),
135 fsparam_flag("nogrpid", Opt_nogrpid),
136 fsparam_flag("bsdgroups", Opt_bsdgroups),
137 fsparam_flag("sysvgroups", Opt_sysvgroups),
138 fsparam_string("allocsize", Opt_allocsize),
139 fsparam_flag("norecovery", Opt_norecovery),
140 fsparam_flag("inode64", Opt_inode64),
141 fsparam_flag("inode32", Opt_inode32),
142 fsparam_flag("ikeep", Opt_ikeep),
143 fsparam_flag("noikeep", Opt_noikeep),
144 fsparam_flag("largeio", Opt_largeio),
145 fsparam_flag("nolargeio", Opt_nolargeio),
146 fsparam_flag("attr2", Opt_attr2),
147 fsparam_flag("noattr2", Opt_noattr2),
148 fsparam_flag("filestreams", Opt_filestreams),
149 fsparam_flag("quota", Opt_quota),
150 fsparam_flag("noquota", Opt_noquota),
151 fsparam_flag("usrquota", Opt_usrquota),
152 fsparam_flag("grpquota", Opt_grpquota),
153 fsparam_flag("prjquota", Opt_prjquota),
154 fsparam_flag("uquota", Opt_uquota),
155 fsparam_flag("gquota", Opt_gquota),
156 fsparam_flag("pquota", Opt_pquota),
157 fsparam_flag("uqnoenforce", Opt_uqnoenforce),
158 fsparam_flag("gqnoenforce", Opt_gqnoenforce),
159 fsparam_flag("pqnoenforce", Opt_pqnoenforce),
160 fsparam_flag("qnoenforce", Opt_qnoenforce),
161 fsparam_flag("discard", Opt_discard),
162 fsparam_flag("nodiscard", Opt_nodiscard),
163 fsparam_flag("dax", Opt_dax),
Ira Weiny8d6c3442020-05-04 09:02:42 -0700164 fsparam_enum("dax", Opt_dax_enum, dax_param_enums),
Ian Kent73e5fff2019-11-04 13:58:46 -0800165 {}
Christoph Hellwig62a877e2008-07-18 17:12:36 +1000166};
167
David Chinnera67d7c52007-11-23 16:29:32 +1100168struct proc_xfs_info {
Dave Chinnercbe4dab2015-06-04 09:19:18 +1000169 uint64_t flag;
170 char *str;
David Chinnera67d7c52007-11-23 16:29:32 +1100171};
172
Christoph Hellwig21f55992019-10-28 08:41:47 -0700173static int
174xfs_fs_show_options(
175 struct seq_file *m,
176 struct dentry *root)
David Chinnera67d7c52007-11-23 16:29:32 +1100177{
178 static struct proc_xfs_info xfs_info_set[] = {
179 /* the few simple ones we can get from the mount struct */
Dave Chinner0560f312021-08-18 18:46:52 -0700180 { XFS_FEAT_IKEEP, ",ikeep" },
181 { XFS_FEAT_WSYNC, ",wsync" },
182 { XFS_FEAT_NOALIGN, ",noalign" },
183 { XFS_FEAT_SWALLOC, ",swalloc" },
184 { XFS_FEAT_NOUUID, ",nouuid" },
185 { XFS_FEAT_NORECOVERY, ",norecovery" },
186 { XFS_FEAT_ATTR2, ",attr2" },
187 { XFS_FEAT_FILESTREAMS, ",filestreams" },
188 { XFS_FEAT_GRPID, ",grpid" },
189 { XFS_FEAT_DISCARD, ",discard" },
190 { XFS_FEAT_LARGE_IOSIZE, ",largeio" },
191 { XFS_FEAT_DAX_ALWAYS, ",dax=always" },
192 { XFS_FEAT_DAX_NEVER, ",dax=never" },
David Chinnera67d7c52007-11-23 16:29:32 +1100193 { 0, NULL }
194 };
Christoph Hellwig21f55992019-10-28 08:41:47 -0700195 struct xfs_mount *mp = XFS_M(root->d_sb);
David Chinnera67d7c52007-11-23 16:29:32 +1100196 struct proc_xfs_info *xfs_infop;
197
198 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
Dave Chinner0560f312021-08-18 18:46:52 -0700199 if (mp->m_features & xfs_infop->flag)
David Chinnera67d7c52007-11-23 16:29:32 +1100200 seq_puts(m, xfs_infop->str);
201 }
Christoph Hellwig1775c502019-10-28 08:41:47 -0700202
Dave Chinner0560f312021-08-18 18:46:52 -0700203 seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
David Chinnera67d7c52007-11-23 16:29:32 +1100204
Dave Chinner0560f312021-08-18 18:46:52 -0700205 if (xfs_has_allocsize(mp))
Eric Sandeen2e74af02016-03-02 09:55:38 +1100206 seq_printf(m, ",allocsize=%dk",
Christoph Hellwigaa58d442019-10-28 08:41:46 -0700207 (1 << mp->m_allocsize_log) >> 10);
David Chinnera67d7c52007-11-23 16:29:32 +1100208
209 if (mp->m_logbufs > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100210 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
David Chinnera67d7c52007-11-23 16:29:32 +1100211 if (mp->m_logbsize > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100212 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
David Chinnera67d7c52007-11-23 16:29:32 +1100213
214 if (mp->m_logname)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100215 seq_show_option(m, "logdev", mp->m_logname);
David Chinnera67d7c52007-11-23 16:29:32 +1100216 if (mp->m_rtname)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100217 seq_show_option(m, "rtdev", mp->m_rtname);
David Chinnera67d7c52007-11-23 16:29:32 +1100218
219 if (mp->m_dalign > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100220 seq_printf(m, ",sunit=%d",
David Chinnera67d7c52007-11-23 16:29:32 +1100221 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
222 if (mp->m_swidth > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100223 seq_printf(m, ",swidth=%d",
David Chinnera67d7c52007-11-23 16:29:32 +1100224 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
225
Christoph Hellwig149e53a2021-08-06 11:05:37 -0700226 if (mp->m_qflags & XFS_UQUOTA_ENFD)
227 seq_puts(m, ",usrquota");
228 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
229 seq_puts(m, ",uqnoenforce");
David Chinnera67d7c52007-11-23 16:29:32 +1100230
Christoph Hellwig149e53a2021-08-06 11:05:37 -0700231 if (mp->m_qflags & XFS_PQUOTA_ENFD)
232 seq_puts(m, ",prjquota");
233 else if (mp->m_qflags & XFS_PQUOTA_ACCT)
234 seq_puts(m, ",pqnoenforce");
235
236 if (mp->m_qflags & XFS_GQUOTA_ENFD)
237 seq_puts(m, ",grpquota");
238 else if (mp->m_qflags & XFS_GQUOTA_ACCT)
239 seq_puts(m, ",gqnoenforce");
David Chinnera67d7c52007-11-23 16:29:32 +1100240
241 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
Eric Sandeen2e74af02016-03-02 09:55:38 +1100242 seq_puts(m, ",noquota");
Christoph Hellwig21f55992019-10-28 08:41:47 -0700243
244 return 0;
David Chinnera67d7c52007-11-23 16:29:32 +1100245}
Eric Sandeen91083262019-05-01 20:26:30 -0700246
Eric Sandeen9de67c32014-07-24 20:51:54 +1000247/*
Eric Sandeen12c3f052016-03-02 09:58:09 +1100248 * Set parameters for inode allocation heuristics, taking into account
249 * filesystem size and inode32/inode64 mount options; i.e. specifically
Dave Chinner0560f312021-08-18 18:46:52 -0700250 * whether or not XFS_FEAT_SMALL_INUMS is set.
Eric Sandeen12c3f052016-03-02 09:58:09 +1100251 *
252 * Inode allocation patterns are altered only if inode32 is requested
Dave Chinner0560f312021-08-18 18:46:52 -0700253 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
Dave Chinner2e973b22021-08-18 18:46:52 -0700254 * If altered, XFS_OPSTATE_INODE32 is set as well.
Eric Sandeen12c3f052016-03-02 09:58:09 +1100255 *
256 * An agcount independent of that in the mount structure is provided
257 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
258 * to the potentially higher ag count.
259 *
260 * Returns the maximum AG index which may contain inodes.
Eric Sandeen9de67c32014-07-24 20:51:54 +1000261 */
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300262xfs_agnumber_t
Eric Sandeen12c3f052016-03-02 09:58:09 +1100263xfs_set_inode_alloc(
264 struct xfs_mount *mp,
265 xfs_agnumber_t agcount)
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300266{
Eric Sandeen12c3f052016-03-02 09:58:09 +1100267 xfs_agnumber_t index;
Carlos Maiolino4056c1d2012-09-20 10:32:40 -0300268 xfs_agnumber_t maxagi = 0;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300269 xfs_sb_t *sbp = &mp->m_sb;
270 xfs_agnumber_t max_metadata;
Eric Sandeen54aa61f2014-07-24 20:53:10 +1000271 xfs_agino_t agino;
272 xfs_ino_t ino;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300273
Eric Sandeen12c3f052016-03-02 09:58:09 +1100274 /*
275 * Calculate how much should be reserved for inodes to meet
276 * the max inode percentage. Used only for inode32.
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300277 */
Darrick J. Wongef325952019-06-05 11:19:34 -0700278 if (M_IGEO(mp)->maxicount) {
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700279 uint64_t icount;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300280
281 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
282 do_div(icount, 100);
283 icount += sbp->sb_agblocks - 1;
284 do_div(icount, sbp->sb_agblocks);
285 max_metadata = icount;
286 } else {
Eric Sandeen9de67c32014-07-24 20:51:54 +1000287 max_metadata = agcount;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300288 }
289
Eric Sandeen12c3f052016-03-02 09:58:09 +1100290 /* Get the last possible inode in the filesystem */
Darrick J. Wong43004b22018-12-12 08:46:24 -0800291 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
Eric Sandeen12c3f052016-03-02 09:58:09 +1100292 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
Eric Sandeen54aa61f2014-07-24 20:53:10 +1000293
Eric Sandeen12c3f052016-03-02 09:58:09 +1100294 /*
295 * If user asked for no more than 32-bit inodes, and the fs is
Dave Chinner2e973b22021-08-18 18:46:52 -0700296 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
Eric Sandeen12c3f052016-03-02 09:58:09 +1100297 * the allocator to accommodate the request.
298 */
Dave Chinner0560f312021-08-18 18:46:52 -0700299 if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
Dave Chinner2e973b22021-08-18 18:46:52 -0700300 set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
Eric Sandeen12c3f052016-03-02 09:58:09 +1100301 else
Dave Chinner2e973b22021-08-18 18:46:52 -0700302 clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300303
Eric Sandeen9de67c32014-07-24 20:51:54 +1000304 for (index = 0; index < agcount; index++) {
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300305 struct xfs_perag *pag;
306
Eric Sandeen12c3f052016-03-02 09:58:09 +1100307 ino = XFS_AGINO_TO_INO(mp, index, agino);
308
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300309 pag = xfs_perag_get(mp, index);
Eric Sandeen12c3f052016-03-02 09:58:09 +1100310
Dave Chinner2e973b22021-08-18 18:46:52 -0700311 if (xfs_is_inode32(mp)) {
Eric Sandeen12c3f052016-03-02 09:58:09 +1100312 if (ino > XFS_MAXINUMBER_32) {
313 pag->pagi_inodeok = 0;
314 pag->pagf_metadata = 0;
315 } else {
316 pag->pagi_inodeok = 1;
317 maxagi++;
318 if (index < max_metadata)
319 pag->pagf_metadata = 1;
320 else
321 pag->pagf_metadata = 0;
322 }
323 } else {
324 pag->pagi_inodeok = 1;
325 pag->pagf_metadata = 0;
326 }
327
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300328 xfs_perag_put(pag);
329 }
330
Dave Chinner2e973b22021-08-18 18:46:52 -0700331 return xfs_is_inode32(mp) ? maxagi : agcount;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300332}
333
Christoph Hellwig679a9942021-11-29 11:21:41 +0100334static int
335xfs_setup_dax_always(
336 struct xfs_mount *mp)
Christoph Hellwiga384f082021-08-26 15:55:09 +0200337{
Christoph Hellwig7b0800d2021-11-29 11:21:42 +0100338 if (!mp->m_ddev_targp->bt_daxdev &&
339 (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) {
Christoph Hellwig679a9942021-11-29 11:21:41 +0100340 xfs_alert(mp,
341 "DAX unsupported by block device. Turning off DAX.");
342 goto disable_dax;
343 }
344
Christoph Hellwig7b0800d2021-11-29 11:21:42 +0100345 if (mp->m_super->s_blocksize != PAGE_SIZE) {
346 xfs_alert(mp,
347 "DAX not supported for blocksize. Turning off DAX.");
348 goto disable_dax;
349 }
350
Christoph Hellwig679a9942021-11-29 11:21:41 +0100351 if (xfs_has_reflink(mp)) {
352 xfs_alert(mp, "DAX and reflink cannot be used together!");
353 return -EINVAL;
354 }
355
356 xfs_warn(mp, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
357 return 0;
358
359disable_dax:
360 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
361 return 0;
Christoph Hellwiga384f082021-08-26 15:55:09 +0200362}
363
Hannes Eder3180e662009-03-04 19:34:10 +0100364STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365xfs_blkdev_get(
366 xfs_mount_t *mp,
367 const char *name,
368 struct block_device **bdevp)
369{
370 int error = 0;
371
Tejun Heod4d77622010-11-13 11:55:18 +0100372 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
373 mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 if (IS_ERR(*bdevp)) {
375 error = PTR_ERR(*bdevp);
Eric Sandeen77af5742014-12-24 09:47:27 +1100376 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 }
378
Dave Chinner24513372014-06-25 14:58:08 +1000379 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380}
381
Hannes Eder3180e662009-03-04 19:34:10 +0100382STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383xfs_blkdev_put(
384 struct block_device *bdev)
385{
386 if (bdev)
Tejun Heoe525fd82010-11-13 11:55:17 +0100387 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388}
389
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000390STATIC void
391xfs_close_devices(
392 struct xfs_mount *mp)
393{
394 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000395 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
Dan Williams486aff52017-08-24 15:12:50 -0700396
Eric Sandeena1f69412018-04-06 10:09:42 -0700397 xfs_free_buftarg(mp->m_logdev_targp);
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000398 xfs_blkdev_put(logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000399 }
400 if (mp->m_rtdev_targp) {
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000401 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
Dan Williams486aff52017-08-24 15:12:50 -0700402
Eric Sandeena1f69412018-04-06 10:09:42 -0700403 xfs_free_buftarg(mp->m_rtdev_targp);
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000404 xfs_blkdev_put(rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000405 }
Eric Sandeena1f69412018-04-06 10:09:42 -0700406 xfs_free_buftarg(mp->m_ddev_targp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000407}
408
409/*
410 * The file system configurations are:
411 * (1) device (partition) with data and internal log
412 * (2) logical volume with data and log subvolumes.
413 * (3) logical volume with data, log, and realtime subvolumes.
414 *
415 * We only have to handle opening the log and realtime volumes here if
416 * they are present. The data subvolume has already been opened by
417 * get_sb_bdev() and is stored in sb->s_bdev.
418 */
419STATIC int
420xfs_open_devices(
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100421 struct xfs_mount *mp)
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000422{
423 struct block_device *ddev = mp->m_super->s_bdev;
424 struct block_device *logdev = NULL, *rtdev = NULL;
425 int error;
426
427 /*
428 * Open real time and log devices - order is important.
429 */
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100430 if (mp->m_logname) {
431 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000432 if (error)
Christoph Hellwig5b5abbe2021-11-29 11:21:55 +0100433 return error;
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000434 }
435
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100436 if (mp->m_rtname) {
437 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000438 if (error)
439 goto out_close_logdev;
440
441 if (rtdev == ddev || rtdev == logdev) {
Dave Chinner4f107002011-03-07 10:00:35 +1100442 xfs_warn(mp,
443 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
Dave Chinner24513372014-06-25 14:58:08 +1000444 error = -EINVAL;
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000445 goto out_close_rtdev;
446 }
447 }
448
449 /*
450 * Setup xfs_mount buffer target pointers
451 */
Dave Chinner24513372014-06-25 14:58:08 +1000452 error = -ENOMEM;
Christoph Hellwig5b5abbe2021-11-29 11:21:55 +0100453 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000454 if (!mp->m_ddev_targp)
455 goto out_close_rtdev;
456
457 if (rtdev) {
Christoph Hellwig5b5abbe2021-11-29 11:21:55 +0100458 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000459 if (!mp->m_rtdev_targp)
460 goto out_free_ddev_targ;
461 }
462
463 if (logdev && logdev != ddev) {
Christoph Hellwig5b5abbe2021-11-29 11:21:55 +0100464 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000465 if (!mp->m_logdev_targp)
466 goto out_free_rtdev_targ;
467 } else {
468 mp->m_logdev_targp = mp->m_ddev_targp;
469 }
470
471 return 0;
472
473 out_free_rtdev_targ:
474 if (mp->m_rtdev_targp)
Eric Sandeena1f69412018-04-06 10:09:42 -0700475 xfs_free_buftarg(mp->m_rtdev_targp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000476 out_free_ddev_targ:
Eric Sandeena1f69412018-04-06 10:09:42 -0700477 xfs_free_buftarg(mp->m_ddev_targp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000478 out_close_rtdev:
Markus Elfringd2a5e3c2014-12-01 08:24:20 +1100479 xfs_blkdev_put(rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000480 out_close_logdev:
Christoph Hellwig5b5abbe2021-11-29 11:21:55 +0100481 if (logdev && logdev != ddev)
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000482 xfs_blkdev_put(logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000483 return error;
484}
485
Christoph Hellwige34b5622008-05-20 15:10:36 +1000486/*
487 * Setup xfs_mount buffer target pointers based on superblock
488 */
489STATIC int
490xfs_setup_devices(
491 struct xfs_mount *mp)
492{
493 int error;
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000494
Eric Sandeena96c4152014-04-14 19:00:29 +1000495 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
Christoph Hellwige34b5622008-05-20 15:10:36 +1000496 if (error)
497 return error;
498
499 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
500 unsigned int log_sector_size = BBSIZE;
501
Dave Chinner38c26bf2021-08-18 18:46:37 -0700502 if (xfs_has_sector(mp))
Christoph Hellwige34b5622008-05-20 15:10:36 +1000503 log_sector_size = mp->m_sb.sb_logsectsize;
504 error = xfs_setsize_buftarg(mp->m_logdev_targp,
Christoph Hellwige34b5622008-05-20 15:10:36 +1000505 log_sector_size);
506 if (error)
507 return error;
508 }
509 if (mp->m_rtdev_targp) {
510 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
Christoph Hellwige34b5622008-05-20 15:10:36 +1000511 mp->m_sb.sb_sectsize);
512 if (error)
513 return error;
514 }
515
516 return 0;
517}
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000518
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000519STATIC int
520xfs_init_mount_workqueues(
521 struct xfs_mount *mp)
522{
Brian Foster78c931b2014-11-28 13:59:58 +1100523 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
Darrick J. Wong05a302a2021-01-22 16:48:42 -0800524 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
525 1, mp->m_super->s_id);
Brian Foster78c931b2014-11-28 13:59:58 +1100526 if (!mp->m_buf_workqueue)
527 goto out;
528
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000529 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
Darrick J. Wong05a302a2021-01-22 16:48:42 -0800530 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
531 0, mp->m_super->s_id);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000532 if (!mp->m_unwritten_workqueue)
Darrick J. Wong28408242019-04-15 13:13:21 -0700533 goto out_destroy_buf;
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000534
Dave Chinner58896082012-10-08 21:56:05 +1100535 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
Darrick J. Wong05a302a2021-01-22 16:48:42 -0800536 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
537 0, mp->m_super->s_id);
Dave Chinner58896082012-10-08 21:56:05 +1100538 if (!mp->m_reclaim_workqueue)
Dave Chinner33c0dd72021-08-10 18:00:45 -0700539 goto out_destroy_unwritten;
Dave Chinner58896082012-10-08 21:56:05 +1100540
Dave Chinnerab23a772021-08-06 11:05:39 -0700541 mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
542 XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
Darrick J. Wong05a302a2021-01-22 16:48:42 -0800543 0, mp->m_super->s_id);
Dave Chinnerab23a772021-08-06 11:05:39 -0700544 if (!mp->m_blockgc_wq)
Christoph Hellwig1058d0f2019-06-28 19:27:25 -0700545 goto out_destroy_reclaim;
Brian Foster579b62f2012-11-06 09:50:47 -0500546
Dave Chinnerab23a772021-08-06 11:05:39 -0700547 mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
548 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
549 1, mp->m_super->s_id);
550 if (!mp->m_inodegc_wq)
551 goto out_destroy_blockgc;
552
Darrick J. Wong05a302a2021-01-22 16:48:42 -0800553 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
554 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
Brian Foster696a5622017-03-28 14:51:44 -0700555 if (!mp->m_sync_workqueue)
Dave Chinnerab23a772021-08-06 11:05:39 -0700556 goto out_destroy_inodegc;
Brian Foster696a5622017-03-28 14:51:44 -0700557
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000558 return 0;
559
Dave Chinnerab23a772021-08-06 11:05:39 -0700560out_destroy_inodegc:
561 destroy_workqueue(mp->m_inodegc_wq);
562out_destroy_blockgc:
563 destroy_workqueue(mp->m_blockgc_wq);
Dave Chinner58896082012-10-08 21:56:05 +1100564out_destroy_reclaim:
565 destroy_workqueue(mp->m_reclaim_workqueue);
Dave Chinner4c2d5422012-04-23 17:54:32 +1000566out_destroy_unwritten:
567 destroy_workqueue(mp->m_unwritten_workqueue);
Brian Foster78c931b2014-11-28 13:59:58 +1100568out_destroy_buf:
569 destroy_workqueue(mp->m_buf_workqueue);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000570out:
571 return -ENOMEM;
572}
573
574STATIC void
575xfs_destroy_mount_workqueues(
576 struct xfs_mount *mp)
577{
Brian Foster696a5622017-03-28 14:51:44 -0700578 destroy_workqueue(mp->m_sync_workqueue);
Dave Chinnerab23a772021-08-06 11:05:39 -0700579 destroy_workqueue(mp->m_blockgc_wq);
580 destroy_workqueue(mp->m_inodegc_wq);
Dave Chinner58896082012-10-08 21:56:05 +1100581 destroy_workqueue(mp->m_reclaim_workqueue);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000582 destroy_workqueue(mp->m_unwritten_workqueue);
Brian Foster78c931b2014-11-28 13:59:58 +1100583 destroy_workqueue(mp->m_buf_workqueue);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000584}
585
Darrick J. Wongf0f7a672020-04-12 13:11:10 -0700586static void
587xfs_flush_inodes_worker(
588 struct work_struct *work)
589{
590 struct xfs_mount *mp = container_of(work, struct xfs_mount,
591 m_flush_inodes_work);
592 struct super_block *sb = mp->m_super;
593
594 if (down_read_trylock(&sb->s_umount)) {
595 sync_inodes_sb(sb);
596 up_read(&sb->s_umount);
597 }
598}
599
Dave Chinner9aa05002012-10-08 21:56:04 +1100600/*
601 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
602 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
603 * for IO to complete so that we effectively throttle multiple callers to the
604 * rate at which IO is completing.
605 */
606void
607xfs_flush_inodes(
608 struct xfs_mount *mp)
609{
Darrick J. Wongf0f7a672020-04-12 13:11:10 -0700610 /*
611 * If flush_work() returns true then that means we waited for a flush
612 * which was already in progress. Don't bother running another scan.
613 */
614 if (flush_work(&mp->m_flush_inodes_work))
Darrick J. Wongc6425702020-03-27 08:49:44 -0700615 return;
616
Darrick J. Wongf0f7a672020-04-12 13:11:10 -0700617 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
618 flush_work(&mp->m_flush_inodes_work);
Dave Chinner9aa05002012-10-08 21:56:04 +1100619}
620
David Chinnerbf904242008-10-30 17:36:14 +1100621/* Catch misguided souls that try to use this interface on XFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622STATIC struct inode *
Nathan Scotta50cd262006-03-14 14:06:18 +1100623xfs_fs_alloc_inode(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 struct super_block *sb)
625{
David Chinnerbf904242008-10-30 17:36:14 +1100626 BUG();
Lachlan McIlroy493dca62008-10-30 17:36:52 +1100627 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628}
629
David Chinnerbf904242008-10-30 17:36:14 +1100630/*
David Chinner99fa8cb2008-10-30 17:36:40 +1100631 * Now that the generic code is guaranteed not to be accessing
Dave Chinner8179c032016-05-18 13:52:42 +1000632 * the linux inode, we can inactivate and reclaim the inode.
David Chinnerbf904242008-10-30 17:36:14 +1100633 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634STATIC void
Nathan Scotta50cd262006-03-14 14:06:18 +1100635xfs_fs_destroy_inode(
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000636 struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637{
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000638 struct xfs_inode *ip = XFS_I(inode);
639
Christoph Hellwigcca28fb2010-06-24 11:57:09 +1000640 trace_xfs_destroy_inode(ip);
David Chinner99fa8cb2008-10-30 17:36:40 +1100641
Christoph Hellwig65523212016-11-30 14:33:25 +1100642 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
Dave Chinner8179c032016-05-18 13:52:42 +1000643 XFS_STATS_INC(ip->i_mount, vn_rele);
644 XFS_STATS_INC(ip->i_mount, vn_remove);
Darrick J. Wongc076ae72021-05-31 11:32:02 -0700645 xfs_inode_mark_reclaimable(ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646}
647
Christoph Hellwigc3b1b132018-03-06 17:04:00 -0800648static void
649xfs_fs_dirty_inode(
650 struct inode *inode,
651 int flag)
652{
653 struct xfs_inode *ip = XFS_I(inode);
654 struct xfs_mount *mp = ip->i_mount;
655 struct xfs_trans *tp;
656
657 if (!(inode->i_sb->s_flags & SB_LAZYTIME))
658 return;
659 if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
660 return;
661
662 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
663 return;
664 xfs_ilock(ip, XFS_ILOCK_EXCL);
665 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
666 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
667 xfs_trans_commit(tp);
668}
669
David Chinner07c8f672008-10-30 16:11:59 +1100670/*
671 * Slab object creation initialisation for the XFS inode.
672 * This covers only the idempotent fields in the XFS inode;
673 * all other fields need to be initialised on allocation
Uwe Kleine-Königb5950762010-11-01 15:38:34 -0400674 * from the slab. This avoids the need to repeatedly initialise
David Chinner07c8f672008-10-30 16:11:59 +1100675 * fields in the xfs inode that left in the initialise state
676 * when freeing the inode.
677 */
David Chinnerbf904242008-10-30 17:36:14 +1100678STATIC void
679xfs_fs_inode_init_once(
David Chinner07c8f672008-10-30 16:11:59 +1100680 void *inode)
681{
682 struct xfs_inode *ip = inode;
683
684 memset(ip, 0, sizeof(struct xfs_inode));
David Chinnerbf904242008-10-30 17:36:14 +1100685
686 /* vfs inode */
687 inode_init_once(VFS_I(ip));
688
689 /* xfs inode */
David Chinner07c8f672008-10-30 16:11:59 +1100690 atomic_set(&ip->i_pincount, 0);
691 spin_lock_init(&ip->i_flags_lock);
David Chinner07c8f672008-10-30 16:11:59 +1100692
693 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
694 "xfsino", ip->i_ino);
David Chinner07c8f672008-10-30 16:11:59 +1100695}
696
Dave Chinner5132ba82012-03-22 05:15:10 +0000697/*
698 * We do an unlocked check for XFS_IDONTCACHE here because we are already
699 * serialised against cache hits here via the inode->i_lock and igrab() in
700 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
701 * racing with us, and it avoids needing to grab a spinlock here for every inode
702 * we drop the final reference on.
703 */
704STATIC int
705xfs_fs_drop_inode(
706 struct inode *inode)
707{
708 struct xfs_inode *ip = XFS_I(inode);
709
Darrick J. Wong17c12bc2016-10-03 09:11:29 -0700710 /*
711 * If this unlinked inode is in the middle of recovery, don't
712 * drop the inode just yet; log recovery will take care of
713 * that. See the comment for this inode flag.
714 */
715 if (ip->i_flags & XFS_IRECOVERY) {
Dave Chinnere1d06e52021-08-10 17:59:02 -0700716 ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
Darrick J. Wong17c12bc2016-10-03 09:11:29 -0700717 return 0;
718 }
719
Ira Weinydae2f8e2020-04-30 07:41:37 -0700720 return generic_drop_inode(inode);
Dave Chinner5132ba82012-03-22 05:15:10 +0000721}
722
Ian Kenta943f372019-11-04 13:58:42 -0800723static void
724xfs_mount_free(
Christoph Hellwiga7381592008-08-13 16:04:05 +1000725 struct xfs_mount *mp)
726{
Christoph Hellwiga7381592008-08-13 16:04:05 +1000727 kfree(mp->m_rtname);
728 kfree(mp->m_logname);
Ian Kenta943f372019-11-04 13:58:42 -0800729 kmem_free(mp);
Christoph Hellwiga7381592008-08-13 16:04:05 +1000730}
731
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732STATIC int
Christoph Hellwig69961a22009-10-06 20:29:28 +0000733xfs_fs_sync_fs(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 struct super_block *sb,
735 int wait)
736{
Christoph Hellwig745f6912007-08-30 17:20:39 +1000737 struct xfs_mount *mp = XFS_M(sb);
Darrick J. Wong2d862932022-01-30 08:53:17 -0800738 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Dave Chinnerab23a772021-08-06 11:05:39 -0700740 trace_xfs_fs_sync_fs(mp, __return_address);
741
Lachlan McIlroye893bff2007-10-12 11:13:35 +1000742 /*
Christoph Hellwig34625c62011-12-06 21:58:12 +0000743 * Doing anything during the async pass would be counterproductive.
Lachlan McIlroye893bff2007-10-12 11:13:35 +1000744 */
Christoph Hellwig34625c62011-12-06 21:58:12 +0000745 if (!wait)
Christoph Hellwig69961a22009-10-06 20:29:28 +0000746 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
Darrick J. Wong2d862932022-01-30 08:53:17 -0800748 error = xfs_log_force(mp, XFS_LOG_SYNC);
749 if (error)
750 return error;
751
Christoph Hellwig69961a22009-10-06 20:29:28 +0000752 if (laptop_mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 /*
754 * The disk must be active because we're syncing.
Dave Chinnerf661f1e2012-10-08 21:56:02 +1100755 * We schedule log work now (now that the disk is
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 * active) instead of later (when it might not be).
757 */
Dave Chinnerf661f1e2012-10-08 21:56:02 +1100758 flush_delayed_work(&mp->m_log->l_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 }
760
Dave Chinnerab23a772021-08-06 11:05:39 -0700761 /*
762 * If we are called with page faults frozen out, it means we are about
763 * to freeze the transaction subsystem. Take the opportunity to shut
764 * down inodegc because once SB_FREEZE_FS is set it's too late to
765 * prevent inactivation races with freeze. The fs doesn't get called
766 * again by the freezing process until after SB_FREEZE_FS has been set,
Darrick J. Wong6f649092021-08-06 11:05:42 -0700767 * so it's now or never. Same logic applies to speculative allocation
768 * garbage collection.
Dave Chinnerab23a772021-08-06 11:05:39 -0700769 *
770 * We don't care if this is a normal syncfs call that does this or
771 * freeze that does this - we can run this multiple times without issue
772 * and we won't race with a restart because a restart can only occur
773 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
774 */
Darrick J. Wong6f649092021-08-06 11:05:42 -0700775 if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
Dave Chinnerab23a772021-08-06 11:05:39 -0700776 xfs_inodegc_stop(mp);
Darrick J. Wong6f649092021-08-06 11:05:42 -0700777 xfs_blockgc_stop(mp);
778 }
Dave Chinnerab23a772021-08-06 11:05:39 -0700779
Christoph Hellwig69961a22009-10-06 20:29:28 +0000780 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781}
782
783STATIC int
Nathan Scotta50cd262006-03-14 14:06:18 +1100784xfs_fs_statfs(
David Howells726c3342006-06-23 02:02:58 -0700785 struct dentry *dentry,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 struct kstatfs *statp)
787{
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000788 struct xfs_mount *mp = XFS_M(dentry->d_sb);
789 xfs_sb_t *sbp = &mp->m_sb;
David Howells2b0143b2015-03-17 22:25:59 +0000790 struct xfs_inode *ip = XFS_I(d_inode(dentry));
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700791 uint64_t fakeinos, id;
792 uint64_t icount;
793 uint64_t ifree;
794 uint64_t fdblocks;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000795 xfs_extlen_t lsize;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700796 int64_t ffree;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000797
Darrick J. Wong01e8f372021-08-06 11:05:42 -0700798 /* Wait for whatever inactivations are in progress. */
799 xfs_inodegc_flush(mp);
800
Adam Borowskidddde682018-10-18 17:20:19 +1100801 statp->f_type = XFS_SUPER_MAGIC;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000802 statp->f_namelen = MAXNAMELEN - 1;
803
804 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
Al Viro6d1349c2020-09-18 16:45:50 -0400805 statp->f_fsid = u64_to_fsid(id);
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000806
Dave Chinner501ab322015-02-23 21:19:28 +1100807 icount = percpu_counter_sum(&mp->m_icount);
Dave Chinnere88b64e2015-02-23 21:19:53 +1100808 ifree = percpu_counter_sum(&mp->m_ifree);
Dave Chinner0d485ad2015-02-23 21:22:03 +1100809 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000810
811 spin_lock(&mp->m_sb_lock);
812 statp->f_bsize = sbp->sb_blocksize;
813 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
814 statp->f_blocks = sbp->sb_dblocks - lsize;
Dave Chinner0d485ad2015-02-23 21:22:03 +1100815 spin_unlock(&mp->m_sb_lock);
816
Zheng Bin237aac42020-05-12 11:48:35 -0700817 /* make sure statp->f_bfree does not underflow */
818 statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
Dave Chinner0d485ad2015-02-23 21:22:03 +1100819 statp->f_bavail = statp->f_bfree;
820
Darrick J. Wong43004b22018-12-12 08:46:24 -0800821 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
Dave Chinner9bb54cb2018-06-07 07:54:02 -0700822 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
Darrick J. Wongef325952019-06-05 11:19:34 -0700823 if (M_IGEO(mp)->maxicount)
Christoph Hellwiga19d9f82009-03-29 09:51:08 +0200824 statp->f_files = min_t(typeof(statp->f_files),
825 statp->f_files,
Darrick J. Wongef325952019-06-05 11:19:34 -0700826 M_IGEO(mp)->maxicount);
Stuart Brodsky2fe33662010-08-24 11:46:05 +1000827
Eric Sandeen01f98822015-02-06 09:53:02 +1100828 /* If sb_icount overshot maxicount, report actual allocation */
829 statp->f_files = max_t(typeof(statp->f_files),
830 statp->f_files,
831 sbp->sb_icount);
832
Stuart Brodsky2fe33662010-08-24 11:46:05 +1000833 /* make sure statp->f_ffree does not underflow */
Dave Chinnere88b64e2015-02-23 21:19:53 +1100834 ffree = statp->f_files - (icount - ifree);
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700835 statp->f_ffree = max_t(int64_t, ffree, 0);
Stuart Brodsky2fe33662010-08-24 11:46:05 +1000836
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000837
Christoph Hellwigdb073492021-03-29 11:11:44 -0700838 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
Chandra Seetharaman83e782e2013-06-27 17:25:10 -0500839 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
840 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
Christoph Hellwig7d095252009-06-08 15:33:32 +0200841 xfs_qm_statvfs(ip, statp);
Richard Wareinga0158312018-01-08 10:41:33 -0800842
843 if (XFS_IS_REALTIME_MOUNT(mp) &&
Christoph Hellwigdb073492021-03-29 11:11:44 -0700844 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
Richard Wareinga0158312018-01-08 10:41:33 -0800845 statp->f_blocks = sbp->sb_rblocks;
846 statp->f_bavail = statp->f_bfree =
847 sbp->sb_frextents * sbp->sb_rextsize;
848 }
849
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000850 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851}
852
Eric Sandeend5db0f92010-02-05 22:59:53 +0000853STATIC void
854xfs_save_resvblks(struct xfs_mount *mp)
855{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700856 uint64_t resblks = 0;
Eric Sandeend5db0f92010-02-05 22:59:53 +0000857
858 mp->m_resblks_save = mp->m_resblks;
859 xfs_reserve_blocks(mp, &resblks, NULL);
860}
861
862STATIC void
863xfs_restore_resvblks(struct xfs_mount *mp)
864{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700865 uint64_t resblks;
Eric Sandeend5db0f92010-02-05 22:59:53 +0000866
867 if (mp->m_resblks_save) {
868 resblks = mp->m_resblks_save;
869 mp->m_resblks_save = 0;
870 } else
871 resblks = xfs_default_resblks(mp);
872
873 xfs_reserve_blocks(mp, &resblks, NULL);
874}
875
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100876/*
Christoph Hellwig9909c4a2007-10-11 18:11:14 +1000877 * Second stage of a freeze. The data is already frozen so we only
Dave Chinner61e63ec2015-01-22 09:10:31 +1100878 * need to take care of the metadata. Once that's done sync the superblock
879 * to the log to dirty it in case of a crash while frozen. This ensures that we
880 * will recover the unlinked inode lists on the next mount.
Christoph Hellwig9909c4a2007-10-11 18:11:14 +1000881 */
Takashi Satoc4be0c12009-01-09 16:40:58 -0800882STATIC int
883xfs_fs_freeze(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 struct super_block *sb)
885{
Christoph Hellwig9909c4a2007-10-11 18:11:14 +1000886 struct xfs_mount *mp = XFS_M(sb);
Waiman Longc3f23752020-07-08 10:21:44 -0700887 unsigned int flags;
888 int ret;
Christoph Hellwig9909c4a2007-10-11 18:11:14 +1000889
Waiman Longc3f23752020-07-08 10:21:44 -0700890 /*
891 * The filesystem is now frozen far enough that memory reclaim
892 * cannot safely operate on the filesystem. Hence we need to
893 * set a GFP_NOFS context here to avoid recursion deadlocks.
894 */
895 flags = memalloc_nofs_save();
Eric Sandeend5db0f92010-02-05 22:59:53 +0000896 xfs_save_resvblks(mp);
Brian Foster5b0ad7c2021-01-22 16:48:24 -0800897 ret = xfs_log_quiesce(mp);
Waiman Longc3f23752020-07-08 10:21:44 -0700898 memalloc_nofs_restore(flags);
Dave Chinnerab23a772021-08-06 11:05:39 -0700899
900 /*
901 * For read-write filesystems, we need to restart the inodegc on error
902 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
903 * going to be run to restart it now. We are at SB_FREEZE_FS level
904 * here, so we can restart safely without racing with a stop in
905 * xfs_fs_sync_fs().
906 */
Dave Chinner2e973b22021-08-18 18:46:52 -0700907 if (ret && !xfs_is_readonly(mp)) {
Darrick J. Wong6f649092021-08-06 11:05:42 -0700908 xfs_blockgc_start(mp);
Dave Chinnerab23a772021-08-06 11:05:39 -0700909 xfs_inodegc_start(mp);
Darrick J. Wong6f649092021-08-06 11:05:42 -0700910 }
Dave Chinnerab23a772021-08-06 11:05:39 -0700911
Waiman Longc3f23752020-07-08 10:21:44 -0700912 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913}
914
915STATIC int
Eric Sandeend5db0f92010-02-05 22:59:53 +0000916xfs_fs_unfreeze(
917 struct super_block *sb)
918{
919 struct xfs_mount *mp = XFS_M(sb);
920
921 xfs_restore_resvblks(mp);
Dave Chinnerf661f1e2012-10-08 21:56:02 +1100922 xfs_log_work_queue(mp);
Dave Chinnerab23a772021-08-06 11:05:39 -0700923
924 /*
925 * Don't reactivate the inodegc worker on a readonly filesystem because
Darrick J. Wong6f649092021-08-06 11:05:42 -0700926 * inodes are sent directly to reclaim. Don't reactivate the blockgc
927 * worker because there are no speculative preallocations on a readonly
928 * filesystem.
Dave Chinnerab23a772021-08-06 11:05:39 -0700929 */
Dave Chinner2e973b22021-08-18 18:46:52 -0700930 if (!xfs_is_readonly(mp)) {
Darrick J. Wong6f649092021-08-06 11:05:42 -0700931 xfs_blockgc_start(mp);
Dave Chinnerab23a772021-08-06 11:05:39 -0700932 xfs_inodegc_start(mp);
Darrick J. Wong6f649092021-08-06 11:05:42 -0700933 }
Dave Chinnerab23a772021-08-06 11:05:39 -0700934
Eric Sandeend5db0f92010-02-05 22:59:53 +0000935 return 0;
936}
937
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000938/*
939 * This function fills in xfs_mount_t fields based on mount args.
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000940 * Note: the superblock _has_ now been read in.
941 */
942STATIC int
943xfs_finish_flags(
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000944 struct xfs_mount *mp)
945{
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200946 /* Fail a mount where the logbuf is smaller than the log stripe */
Dave Chinner38c26bf2021-08-18 18:46:37 -0700947 if (xfs_has_logv2(mp)) {
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100948 if (mp->m_logbsize <= 0 &&
949 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000950 mp->m_logbsize = mp->m_sb.sb_logsunit;
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100951 } else if (mp->m_logbsize > 0 &&
952 mp->m_logbsize < mp->m_sb.sb_logsunit) {
Dave Chinner4f107002011-03-07 10:00:35 +1100953 xfs_warn(mp,
954 "logbuf size must be greater than or equal to log stripe size");
Dave Chinner24513372014-06-25 14:58:08 +1000955 return -EINVAL;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000956 }
957 } else {
958 /* Fail a mount if the logbuf is larger than 32K */
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100959 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
Dave Chinner4f107002011-03-07 10:00:35 +1100960 xfs_warn(mp,
961 "logbuf size for version 1 logs must be 16K or 32K");
Dave Chinner24513372014-06-25 14:58:08 +1000962 return -EINVAL;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000963 }
964 }
965
966 /*
Dave Chinnerd3eaace2013-06-05 12:09:09 +1000967 * V5 filesystems always use attr2 format for attributes.
968 */
Dave Chinner0560f312021-08-18 18:46:52 -0700969 if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
Eric Sandeen2e74af02016-03-02 09:55:38 +1100970 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
971 "attr2 is always enabled for V5 filesystems.");
Dave Chinner24513372014-06-25 14:58:08 +1000972 return -EINVAL;
Dave Chinnerd3eaace2013-06-05 12:09:09 +1000973 }
974
975 /*
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000976 * prohibit r/w mounts of read-only filesystems
977 */
Dave Chinner2e973b22021-08-18 18:46:52 -0700978 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
Dave Chinner4f107002011-03-07 10:00:35 +1100979 xfs_warn(mp,
980 "cannot mount a read-only filesystem as read-write");
Dave Chinner24513372014-06-25 14:58:08 +1000981 return -EROFS;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000982 }
983
Christoph Hellwig149e53a2021-08-06 11:05:37 -0700984 if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
985 (mp->m_qflags & XFS_PQUOTA_ACCT) &&
Dave Chinner38c26bf2021-08-18 18:46:37 -0700986 !xfs_has_pquotino(mp)) {
Chandra Seetharamand892d582013-07-19 17:36:02 -0500987 xfs_warn(mp,
988 "Super block does not support project and group quota together");
Dave Chinner24513372014-06-25 14:58:08 +1000989 return -EINVAL;
Chandra Seetharamand892d582013-07-19 17:36:02 -0500990 }
991
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000992 return 0;
993}
994
Dave Chinner5681ca42015-02-23 21:22:31 +1100995static int
996xfs_init_percpu_counters(
997 struct xfs_mount *mp)
998{
999 int error;
1000
1001 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1002 if (error)
Joe Perches5e9383f2015-03-25 15:00:24 +11001003 return -ENOMEM;
Dave Chinner5681ca42015-02-23 21:22:31 +11001004
1005 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1006 if (error)
1007 goto free_icount;
1008
1009 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1010 if (error)
1011 goto free_ifree;
1012
Darrick J. Wong9fe82b82019-04-25 18:26:22 -07001013 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1014 if (error)
1015 goto free_fdblocks;
1016
Dave Chinner5681ca42015-02-23 21:22:31 +11001017 return 0;
1018
Darrick J. Wong9fe82b82019-04-25 18:26:22 -07001019free_fdblocks:
1020 percpu_counter_destroy(&mp->m_fdblocks);
Dave Chinner5681ca42015-02-23 21:22:31 +11001021free_ifree:
1022 percpu_counter_destroy(&mp->m_ifree);
1023free_icount:
1024 percpu_counter_destroy(&mp->m_icount);
1025 return -ENOMEM;
1026}
1027
1028void
1029xfs_reinit_percpu_counters(
1030 struct xfs_mount *mp)
1031{
1032 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1033 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1034 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1035}
1036
1037static void
1038xfs_destroy_percpu_counters(
1039 struct xfs_mount *mp)
1040{
1041 percpu_counter_destroy(&mp->m_icount);
1042 percpu_counter_destroy(&mp->m_ifree);
1043 percpu_counter_destroy(&mp->m_fdblocks);
Dave Chinner75c8c50f2021-08-18 18:46:53 -07001044 ASSERT(xfs_is_shutdown(mp) ||
Darrick J. Wong9fe82b82019-04-25 18:26:22 -07001045 percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1046 percpu_counter_destroy(&mp->m_delalloc_blks);
Dave Chinner5681ca42015-02-23 21:22:31 +11001047}
1048
Dave Chinnerab23a772021-08-06 11:05:39 -07001049static int
1050xfs_inodegc_init_percpu(
1051 struct xfs_mount *mp)
1052{
1053 struct xfs_inodegc *gc;
1054 int cpu;
1055
1056 mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1057 if (!mp->m_inodegc)
1058 return -ENOMEM;
1059
1060 for_each_possible_cpu(cpu) {
1061 gc = per_cpu_ptr(mp->m_inodegc, cpu);
1062 init_llist_head(&gc->list);
1063 gc->items = 0;
1064 INIT_WORK(&gc->work, xfs_inodegc_worker);
1065 }
1066 return 0;
1067}
1068
1069static void
1070xfs_inodegc_free_percpu(
1071 struct xfs_mount *mp)
1072{
1073 if (!mp->m_inodegc)
1074 return;
1075 free_percpu(mp->m_inodegc);
1076}
1077
Ian Kent2f8d66b2019-11-04 13:58:47 -08001078static void
1079xfs_fs_put_super(
1080 struct super_block *sb)
1081{
1082 struct xfs_mount *mp = XFS_M(sb);
1083
1084 /* if ->fill_super failed, we have no mount to tear down */
1085 if (!sb->s_fs_info)
1086 return;
1087
1088 xfs_notice(mp, "Unmounting Filesystem");
1089 xfs_filestream_unmount(mp);
1090 xfs_unmountfs(mp);
1091
1092 xfs_freesb(mp);
1093 free_percpu(mp->m_stats.xs_stats);
Dave Chinner0ed17f02021-08-06 11:05:38 -07001094 xfs_mount_list_del(mp);
Dave Chinnerab23a772021-08-06 11:05:39 -07001095 xfs_inodegc_free_percpu(mp);
Ian Kent2f8d66b2019-11-04 13:58:47 -08001096 xfs_destroy_percpu_counters(mp);
1097 xfs_destroy_mount_workqueues(mp);
1098 xfs_close_devices(mp);
1099
1100 sb->s_fs_info = NULL;
1101 xfs_mount_free(mp);
1102}
1103
1104static long
1105xfs_fs_nr_cached_objects(
1106 struct super_block *sb,
1107 struct shrink_control *sc)
1108{
1109 /* Paranoia: catch incorrect calls during mount setup or teardown */
1110 if (WARN_ON_ONCE(!sb->s_fs_info))
1111 return 0;
1112 return xfs_reclaim_inodes_count(XFS_M(sb));
1113}
1114
1115static long
1116xfs_fs_free_cached_objects(
1117 struct super_block *sb,
1118 struct shrink_control *sc)
1119{
1120 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1121}
1122
1123static const struct super_operations xfs_super_operations = {
1124 .alloc_inode = xfs_fs_alloc_inode,
1125 .destroy_inode = xfs_fs_destroy_inode,
1126 .dirty_inode = xfs_fs_dirty_inode,
1127 .drop_inode = xfs_fs_drop_inode,
1128 .put_super = xfs_fs_put_super,
1129 .sync_fs = xfs_fs_sync_fs,
1130 .freeze_fs = xfs_fs_freeze,
1131 .unfreeze_fs = xfs_fs_unfreeze,
1132 .statfs = xfs_fs_statfs,
1133 .show_options = xfs_fs_show_options,
1134 .nr_cached_objects = xfs_fs_nr_cached_objects,
1135 .free_cached_objects = xfs_fs_free_cached_objects,
1136};
1137
Ian Kent73e5fff2019-11-04 13:58:46 -08001138static int
Ian Kent8757c382019-11-04 13:58:48 -08001139suffix_kstrtoint(
1140 const char *s,
1141 unsigned int base,
1142 int *res)
1143{
1144 int last, shift_left_factor = 0, _res;
1145 char *value;
1146 int ret = 0;
1147
1148 value = kstrdup(s, GFP_KERNEL);
1149 if (!value)
1150 return -ENOMEM;
1151
1152 last = strlen(value) - 1;
1153 if (value[last] == 'K' || value[last] == 'k') {
1154 shift_left_factor = 10;
1155 value[last] = '\0';
1156 }
1157 if (value[last] == 'M' || value[last] == 'm') {
1158 shift_left_factor = 20;
1159 value[last] = '\0';
1160 }
1161 if (value[last] == 'G' || value[last] == 'g') {
1162 shift_left_factor = 30;
1163 value[last] = '\0';
1164 }
1165
1166 if (kstrtoint(value, base, &_res))
1167 ret = -EINVAL;
1168 kfree(value);
1169 *res = _res << shift_left_factor;
1170 return ret;
1171}
1172
Pavel Reichl92cf7d32021-03-22 09:52:02 -07001173static inline void
1174xfs_fs_warn_deprecated(
1175 struct fs_context *fc,
1176 struct fs_parameter *param,
1177 uint64_t flag,
1178 bool value)
1179{
1180 /* Don't print the warning if reconfiguring and current mount point
1181 * already had the flag set
1182 */
1183 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
Dave Chinner0560f312021-08-18 18:46:52 -07001184 !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
Pavel Reichl92cf7d32021-03-22 09:52:02 -07001185 return;
1186 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1187}
1188
Ian Kent8757c382019-11-04 13:58:48 -08001189/*
1190 * Set mount state from a mount option.
1191 *
1192 * NOTE: mp->m_super is NULL here!
1193 */
1194static int
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001195xfs_fs_parse_param(
Ian Kent8757c382019-11-04 13:58:48 -08001196 struct fs_context *fc,
1197 struct fs_parameter *param)
1198{
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001199 struct xfs_mount *parsing_mp = fc->s_fs_info;
Ian Kent8757c382019-11-04 13:58:48 -08001200 struct fs_parse_result result;
1201 int size = 0;
1202 int opt;
1203
Al Virod7167b12019-09-07 07:23:15 -04001204 opt = fs_parse(fc, xfs_fs_parameters, param, &result);
Ian Kent8757c382019-11-04 13:58:48 -08001205 if (opt < 0)
1206 return opt;
1207
1208 switch (opt) {
1209 case Opt_logbufs:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001210 parsing_mp->m_logbufs = result.uint_32;
Ian Kent8757c382019-11-04 13:58:48 -08001211 return 0;
1212 case Opt_logbsize:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001213 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
Ian Kent8757c382019-11-04 13:58:48 -08001214 return -EINVAL;
1215 return 0;
1216 case Opt_logdev:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001217 kfree(parsing_mp->m_logname);
1218 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1219 if (!parsing_mp->m_logname)
Ian Kent8757c382019-11-04 13:58:48 -08001220 return -ENOMEM;
1221 return 0;
1222 case Opt_rtdev:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001223 kfree(parsing_mp->m_rtname);
1224 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1225 if (!parsing_mp->m_rtname)
Ian Kent8757c382019-11-04 13:58:48 -08001226 return -ENOMEM;
1227 return 0;
1228 case Opt_allocsize:
1229 if (suffix_kstrtoint(param->string, 10, &size))
1230 return -EINVAL;
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001231 parsing_mp->m_allocsize_log = ffs(size) - 1;
Dave Chinner0560f312021-08-18 18:46:52 -07001232 parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
Ian Kent8757c382019-11-04 13:58:48 -08001233 return 0;
1234 case Opt_grpid:
1235 case Opt_bsdgroups:
Dave Chinner0560f312021-08-18 18:46:52 -07001236 parsing_mp->m_features |= XFS_FEAT_GRPID;
Ian Kent8757c382019-11-04 13:58:48 -08001237 return 0;
1238 case Opt_nogrpid:
1239 case Opt_sysvgroups:
Dave Chinner0560f312021-08-18 18:46:52 -07001240 parsing_mp->m_features &= ~XFS_FEAT_GRPID;
Ian Kent8757c382019-11-04 13:58:48 -08001241 return 0;
1242 case Opt_wsync:
Dave Chinner0560f312021-08-18 18:46:52 -07001243 parsing_mp->m_features |= XFS_FEAT_WSYNC;
Ian Kent8757c382019-11-04 13:58:48 -08001244 return 0;
1245 case Opt_norecovery:
Dave Chinner0560f312021-08-18 18:46:52 -07001246 parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
Ian Kent8757c382019-11-04 13:58:48 -08001247 return 0;
1248 case Opt_noalign:
Dave Chinner0560f312021-08-18 18:46:52 -07001249 parsing_mp->m_features |= XFS_FEAT_NOALIGN;
Ian Kent8757c382019-11-04 13:58:48 -08001250 return 0;
1251 case Opt_swalloc:
Dave Chinner0560f312021-08-18 18:46:52 -07001252 parsing_mp->m_features |= XFS_FEAT_SWALLOC;
Ian Kent8757c382019-11-04 13:58:48 -08001253 return 0;
1254 case Opt_sunit:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001255 parsing_mp->m_dalign = result.uint_32;
Ian Kent8757c382019-11-04 13:58:48 -08001256 return 0;
1257 case Opt_swidth:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001258 parsing_mp->m_swidth = result.uint_32;
Ian Kent8757c382019-11-04 13:58:48 -08001259 return 0;
1260 case Opt_inode32:
Dave Chinner0560f312021-08-18 18:46:52 -07001261 parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
Ian Kent8757c382019-11-04 13:58:48 -08001262 return 0;
1263 case Opt_inode64:
Dave Chinner0560f312021-08-18 18:46:52 -07001264 parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
Ian Kent8757c382019-11-04 13:58:48 -08001265 return 0;
1266 case Opt_nouuid:
Dave Chinner0560f312021-08-18 18:46:52 -07001267 parsing_mp->m_features |= XFS_FEAT_NOUUID;
Ian Kent8757c382019-11-04 13:58:48 -08001268 return 0;
Ian Kent8757c382019-11-04 13:58:48 -08001269 case Opt_largeio:
Dave Chinner0560f312021-08-18 18:46:52 -07001270 parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
Ian Kent8757c382019-11-04 13:58:48 -08001271 return 0;
1272 case Opt_nolargeio:
Dave Chinner0560f312021-08-18 18:46:52 -07001273 parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
Ian Kent8757c382019-11-04 13:58:48 -08001274 return 0;
Ian Kent8757c382019-11-04 13:58:48 -08001275 case Opt_filestreams:
Dave Chinner0560f312021-08-18 18:46:52 -07001276 parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
Ian Kent8757c382019-11-04 13:58:48 -08001277 return 0;
1278 case Opt_noquota:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001279 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1280 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
Ian Kent8757c382019-11-04 13:58:48 -08001281 return 0;
1282 case Opt_quota:
1283 case Opt_uquota:
1284 case Opt_usrquota:
Christoph Hellwig149e53a2021-08-06 11:05:37 -07001285 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
Ian Kent8757c382019-11-04 13:58:48 -08001286 return 0;
1287 case Opt_qnoenforce:
1288 case Opt_uqnoenforce:
Christoph Hellwig149e53a2021-08-06 11:05:37 -07001289 parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001290 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
Ian Kent8757c382019-11-04 13:58:48 -08001291 return 0;
1292 case Opt_pquota:
1293 case Opt_prjquota:
Christoph Hellwig149e53a2021-08-06 11:05:37 -07001294 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
Ian Kent8757c382019-11-04 13:58:48 -08001295 return 0;
1296 case Opt_pqnoenforce:
Christoph Hellwig149e53a2021-08-06 11:05:37 -07001297 parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001298 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
Ian Kent8757c382019-11-04 13:58:48 -08001299 return 0;
1300 case Opt_gquota:
1301 case Opt_grpquota:
Christoph Hellwig149e53a2021-08-06 11:05:37 -07001302 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
Ian Kent8757c382019-11-04 13:58:48 -08001303 return 0;
1304 case Opt_gqnoenforce:
Christoph Hellwig149e53a2021-08-06 11:05:37 -07001305 parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001306 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
Ian Kent8757c382019-11-04 13:58:48 -08001307 return 0;
1308 case Opt_discard:
Dave Chinner0560f312021-08-18 18:46:52 -07001309 parsing_mp->m_features |= XFS_FEAT_DISCARD;
Ian Kent8757c382019-11-04 13:58:48 -08001310 return 0;
1311 case Opt_nodiscard:
Dave Chinner0560f312021-08-18 18:46:52 -07001312 parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
Ian Kent8757c382019-11-04 13:58:48 -08001313 return 0;
1314#ifdef CONFIG_FS_DAX
1315 case Opt_dax:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001316 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
Ira Weiny8d6c3442020-05-04 09:02:42 -07001317 return 0;
1318 case Opt_dax_enum:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001319 xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
Ian Kent8757c382019-11-04 13:58:48 -08001320 return 0;
1321#endif
Pavel Reichlc23c3932020-09-25 11:10:29 -07001322 /* Following mount options will be removed in September 2025 */
1323 case Opt_ikeep:
Dave Chinner0560f312021-08-18 18:46:52 -07001324 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1325 parsing_mp->m_features |= XFS_FEAT_IKEEP;
Pavel Reichlc23c3932020-09-25 11:10:29 -07001326 return 0;
1327 case Opt_noikeep:
Dave Chinner0560f312021-08-18 18:46:52 -07001328 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1329 parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
Pavel Reichlc23c3932020-09-25 11:10:29 -07001330 return 0;
1331 case Opt_attr2:
Dave Chinner0560f312021-08-18 18:46:52 -07001332 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1333 parsing_mp->m_features |= XFS_FEAT_ATTR2;
Pavel Reichlc23c3932020-09-25 11:10:29 -07001334 return 0;
1335 case Opt_noattr2:
Dave Chinner0560f312021-08-18 18:46:52 -07001336 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1337 parsing_mp->m_features |= XFS_FEAT_NOATTR2;
Pavel Reichlc23c3932020-09-25 11:10:29 -07001338 return 0;
Ian Kent8757c382019-11-04 13:58:48 -08001339 default:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001340 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
Ian Kent8757c382019-11-04 13:58:48 -08001341 return -EINVAL;
1342 }
1343
1344 return 0;
1345}
1346
1347static int
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001348xfs_fs_validate_params(
Ian Kent8757c382019-11-04 13:58:48 -08001349 struct xfs_mount *mp)
1350{
Dave Chinner0560f312021-08-18 18:46:52 -07001351 /* No recovery flag requires a read-only mount */
Dave Chinner2e973b22021-08-18 18:46:52 -07001352 if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
Ian Kent8757c382019-11-04 13:58:48 -08001353 xfs_warn(mp, "no-recovery mounts must be read-only.");
1354 return -EINVAL;
1355 }
1356
Dave Chinner0560f312021-08-18 18:46:52 -07001357 /*
1358 * We have not read the superblock at this point, so only the attr2
1359 * mount option can set the attr2 feature by this stage.
1360 */
1361 if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
Dave Chinnere23b55d2021-08-18 18:46:25 -07001362 xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1363 return -EINVAL;
1364 }
1365
1366
Dave Chinner0560f312021-08-18 18:46:52 -07001367 if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
Ian Kent8757c382019-11-04 13:58:48 -08001368 xfs_warn(mp,
1369 "sunit and swidth options incompatible with the noalign option");
1370 return -EINVAL;
1371 }
1372
1373 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1374 xfs_warn(mp, "quota support not available in this kernel.");
1375 return -EINVAL;
1376 }
1377
1378 if ((mp->m_dalign && !mp->m_swidth) ||
1379 (!mp->m_dalign && mp->m_swidth)) {
1380 xfs_warn(mp, "sunit and swidth must be specified together");
1381 return -EINVAL;
1382 }
1383
1384 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1385 xfs_warn(mp,
1386 "stripe width (%d) must be a multiple of the stripe unit (%d)",
1387 mp->m_swidth, mp->m_dalign);
1388 return -EINVAL;
1389 }
1390
1391 if (mp->m_logbufs != -1 &&
1392 mp->m_logbufs != 0 &&
1393 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1394 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1395 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1396 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1397 return -EINVAL;
1398 }
1399
1400 if (mp->m_logbsize != -1 &&
1401 mp->m_logbsize != 0 &&
1402 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1403 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1404 !is_power_of_2(mp->m_logbsize))) {
1405 xfs_warn(mp,
1406 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1407 mp->m_logbsize);
1408 return -EINVAL;
1409 }
1410
Dave Chinner0560f312021-08-18 18:46:52 -07001411 if (xfs_has_allocsize(mp) &&
Ian Kent8757c382019-11-04 13:58:48 -08001412 (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1413 mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1414 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1415 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1416 return -EINVAL;
1417 }
1418
1419 return 0;
1420}
1421
1422static int
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001423xfs_fs_fill_super(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 struct super_block *sb,
Ian Kent73e5fff2019-11-04 13:58:46 -08001425 struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426{
Ian Kent73e5fff2019-11-04 13:58:46 -08001427 struct xfs_mount *mp = sb->s_fs_info;
Christoph Hellwigf3dcc132008-03-27 18:00:54 +11001428 struct inode *root;
Colin Ian King0279c712019-11-06 08:07:46 -08001429 int flags = 0, error;
Christoph Hellwigbdd907b2008-05-20 15:10:44 +10001430
Ian Kent7c89fcb2019-11-04 13:58:46 -08001431 mp->m_super = sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001433 error = xfs_fs_validate_params(mp);
Christoph Hellwig745f6912007-08-30 17:20:39 +10001434 if (error)
Ian Kente1d3d212019-11-04 13:58:40 -08001435 goto out_free_names;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
1437 sb_min_blocksize(sb, BBSIZE);
Lachlan McIlroy0ec58512008-06-23 13:23:01 +10001438 sb->s_xattr = xfs_xattr_handlers;
Nathan Scotta50cd262006-03-14 14:06:18 +11001439 sb->s_export_op = &xfs_export_operations;
Christoph Hellwigfcafb712009-02-09 08:47:34 +01001440#ifdef CONFIG_XFS_QUOTA
Nathan Scotta50cd262006-03-14 14:06:18 +11001441 sb->s_qcop = &xfs_quotactl_operations;
Jan Kara17ef4fd2014-09-30 22:35:33 +02001442 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
Christoph Hellwigfcafb712009-02-09 08:47:34 +01001443#endif
Nathan Scotta50cd262006-03-14 14:06:18 +11001444 sb->s_op = &xfs_super_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445
Dave Chinnerdae5cd82018-05-10 21:50:23 -07001446 /*
1447 * Delay mount work if the debug hook is set. This is debug
1448 * instrumention to coordinate simulation of xfs mount failures with
1449 * VFS superblock operations
1450 */
1451 if (xfs_globals.mount_delay) {
1452 xfs_notice(mp, "Delaying mount for %d seconds.",
1453 xfs_globals.mount_delay);
1454 msleep(xfs_globals.mount_delay * 1000);
1455 }
1456
Ian Kent73e5fff2019-11-04 13:58:46 -08001457 if (fc->sb_flags & SB_SILENT)
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001458 flags |= XFS_MFSI_QUIET;
1459
Christoph Hellwig9d565ff2008-10-30 17:53:24 +11001460 error = xfs_open_devices(mp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001461 if (error)
Ian Kente1d3d212019-11-04 13:58:40 -08001462 goto out_free_names;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001463
Dave Chinner24513372014-06-25 14:58:08 +10001464 error = xfs_init_mount_workqueues(mp);
Christoph Hellwig61ba35d2010-09-30 02:25:54 +00001465 if (error)
1466 goto out_close_devices;
Christoph Hellwigc962fb72008-05-20 15:10:52 +10001467
Dave Chinner5681ca42015-02-23 21:22:31 +11001468 error = xfs_init_percpu_counters(mp);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +00001469 if (error)
1470 goto out_destroy_workqueues;
1471
Dave Chinnerab23a772021-08-06 11:05:39 -07001472 error = xfs_inodegc_init_percpu(mp);
1473 if (error)
1474 goto out_destroy_counters;
1475
Dave Chinner0ed17f02021-08-06 11:05:38 -07001476 /*
1477 * All percpu data structures requiring cleanup when a cpu goes offline
1478 * must be allocated before adding this @mp to the cpu-dead handler's
1479 * mount list.
1480 */
1481 xfs_mount_list_add(mp);
1482
Bill O'Donnell225e4632015-10-12 18:21:19 +11001483 /* Allocate stats memory before we do operations that might use it */
1484 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1485 if (!mp->m_stats.xs_stats) {
Dan Carpenterf9d460b2015-10-19 08:42:47 +11001486 error = -ENOMEM;
Dave Chinnerab23a772021-08-06 11:05:39 -07001487 goto out_destroy_inodegc;
Bill O'Donnell225e4632015-10-12 18:21:19 +11001488 }
1489
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001490 error = xfs_readsb(mp, flags);
1491 if (error)
Bill O'Donnell225e4632015-10-12 18:21:19 +11001492 goto out_free_stats;
Christoph Hellwig9d565ff2008-10-30 17:53:24 +11001493
1494 error = xfs_finish_flags(mp);
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001495 if (error)
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001496 goto out_free_sb;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001497
Christoph Hellwige34b5622008-05-20 15:10:36 +10001498 error = xfs_setup_devices(mp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001499 if (error)
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001500 goto out_free_sb;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001501
Darrick J. Wongb96cb832020-09-10 10:57:17 -07001502 /* V4 support is undergoing deprecation. */
Dave Chinner38c26bf2021-08-18 18:46:37 -07001503 if (!xfs_has_crc(mp)) {
Darrick J. Wongb96cb832020-09-10 10:57:17 -07001504#ifdef CONFIG_XFS_SUPPORT_V4
1505 xfs_warn_once(mp,
1506 "Deprecated V4 format (crc=0) will not be supported after September 2030.");
1507#else
1508 xfs_warn(mp,
1509 "Deprecated V4 format (crc=0) not supported by kernel.");
1510 error = -EINVAL;
1511 goto out_free_sb;
1512#endif
1513 }
1514
Darrick J. Wong80c720b2020-11-24 11:45:55 -08001515 /* Filesystem claims it needs repair, so refuse the mount. */
Dave Chinnerebd90272021-08-18 18:46:55 -07001516 if (xfs_has_needsrepair(mp)) {
Darrick J. Wong80c720b2020-11-24 11:45:55 -08001517 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair.");
1518 error = -EFSCORRUPTED;
1519 goto out_free_sb;
1520 }
1521
Darrick J. Wong932befe2020-01-02 13:20:13 -08001522 /*
Darrick J. Wong3945ae02020-11-24 11:45:54 -08001523 * Don't touch the filesystem if a user tool thinks it owns the primary
1524 * superblock. mkfs doesn't clear the flag from secondary supers, so
1525 * we don't check them at all.
1526 */
1527 if (mp->m_sb.sb_inprogress) {
1528 xfs_warn(mp, "Offline file system operation in progress!");
1529 error = -EFSCORRUPTED;
1530 goto out_free_sb;
1531 }
1532
1533 /*
1534 * Until this is fixed only page-sized or smaller data blocks work.
1535 */
1536 if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1537 xfs_warn(mp,
1538 "File system with blocksize %d bytes. "
1539 "Only pagesize (%ld) or less will currently work.",
1540 mp->m_sb.sb_blocksize, PAGE_SIZE);
1541 error = -ENOSYS;
1542 goto out_free_sb;
1543 }
1544
1545 /* Ensure this filesystem fits in the page cache limits */
1546 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1547 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1548 xfs_warn(mp,
1549 "file system too large to be mounted on this system.");
1550 error = -EFBIG;
1551 goto out_free_sb;
1552 }
1553
1554 /*
Darrick J. Wong932befe2020-01-02 13:20:13 -08001555 * XFS block mappings use 54 bits to store the logical block offset.
1556 * This should suffice to handle the maximum file size that the VFS
1557 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1558 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1559 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1560 * to check this assertion.
1561 *
1562 * Avoid integer overflow by comparing the maximum bmbt offset to the
1563 * maximum pagecache offset in units of fs blocks.
1564 */
Darrick J. Wong33005fd2020-12-04 13:28:35 -08001565 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
Darrick J. Wong932befe2020-01-02 13:20:13 -08001566 xfs_warn(mp,
1567"MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1568 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1569 XFS_MAX_FILEOFF);
1570 error = -EINVAL;
1571 goto out_free_sb;
1572 }
1573
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001574 error = xfs_filestream_mount(mp);
1575 if (error)
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001576 goto out_free_sb;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001577
Dave Chinner704b2902011-03-26 09:14:57 +11001578 /*
1579 * we must configure the block size in the superblock before we run the
1580 * full mount process as the mount process can lookup and cache inodes.
Dave Chinner704b2902011-03-26 09:14:57 +11001581 */
Adam Borowskidddde682018-10-18 17:20:19 +11001582 sb->s_magic = XFS_SUPER_MAGIC;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +10001583 sb->s_blocksize = mp->m_sb.sb_blocksize;
1584 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
Darrick J. Wong932befe2020-01-02 13:20:13 -08001585 sb->s_maxbytes = MAX_LFS_FILESIZE;
Al Viro8de52772012-02-06 12:45:27 -05001586 sb->s_max_links = XFS_MAXLINK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 sb->s_time_gran = 1;
Dave Chinner38c26bf2021-08-18 18:46:37 -07001588 if (xfs_has_bigtime(mp)) {
Darrick J. Wongf93e54362020-08-17 09:59:07 -07001589 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1590 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1591 } else {
1592 sb->s_time_min = XFS_LEGACY_TIME_MIN;
1593 sb->s_time_max = XFS_LEGACY_TIME_MAX;
1594 }
Darrick J. Wong06dbf822020-08-24 11:58:01 -07001595 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
Christoph Hellwigadfb5fb2019-06-28 19:30:22 -07001596 sb->s_iflags |= SB_I_CGROUPWB;
1597
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 set_posix_acl_flag(sb);
1599
Dave Chinnerdc037ad2013-06-27 16:04:59 +10001600 /* version 5 superblocks support inode version counters. */
Dave Chinnerd6837c12021-08-18 18:46:56 -07001601 if (xfs_has_crc(mp))
Matthew Garrett357fdad2017-10-18 13:56:26 -07001602 sb->s_flags |= SB_I_VERSION;
Dave Chinnerdc037ad2013-06-27 16:04:59 +10001603
Dave Chinner0560f312021-08-18 18:46:52 -07001604 if (xfs_has_dax_always(mp)) {
Christoph Hellwig679a9942021-11-29 11:21:41 +01001605 error = xfs_setup_dax_always(mp);
1606 if (error)
Darrick J. Wongb6e03c12018-01-31 14:21:56 -08001607 goto out_filestream_unmount;
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001608 }
1609
Dave Chinner0560f312021-08-18 18:46:52 -07001610 if (xfs_has_discard(mp)) {
Kenjiro Nakayama1e6fa682017-09-18 12:03:56 -07001611 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1612
1613 if (!blk_queue_discard(q)) {
1614 xfs_warn(mp, "mounting with \"discard\" option, but "
1615 "the device does not support discard");
Dave Chinner0560f312021-08-18 18:46:52 -07001616 mp->m_features &= ~XFS_FEAT_DISCARD;
Kenjiro Nakayama1e6fa682017-09-18 12:03:56 -07001617 }
1618 }
1619
Dave Chinner38c26bf2021-08-18 18:46:37 -07001620 if (xfs_has_reflink(mp)) {
Christoph Hellwig66ae56a2019-02-18 09:38:49 -08001621 if (mp->m_sb.sb_rblocks) {
1622 xfs_alert(mp,
Darrick J. Wongc14632d2018-01-31 16:38:18 -08001623 "reflink not compatible with realtime device!");
Christoph Hellwig66ae56a2019-02-18 09:38:49 -08001624 error = -EINVAL;
1625 goto out_filestream_unmount;
1626 }
1627
1628 if (xfs_globals.always_cow) {
1629 xfs_info(mp, "using DEBUG-only always_cow mode.");
1630 mp->m_always_cow = true;
1631 }
Darrick J. Wongc14632d2018-01-31 16:38:18 -08001632 }
1633
Dave Chinner38c26bf2021-08-18 18:46:37 -07001634 if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
Darrick J. Wong1c0607a2016-08-03 12:20:57 +10001635 xfs_alert(mp,
Darrick J. Wong76883f72018-01-31 09:47:25 -08001636 "reverse mapping btree not compatible with realtime device!");
1637 error = -EINVAL;
1638 goto out_filestream_unmount;
Darrick J. Wong738f57c2016-08-26 15:59:19 +10001639 }
Darrick J. Wong1c0607a2016-08-03 12:20:57 +10001640
Dave Chinner8a00ebe2012-04-13 12:10:44 +00001641 error = xfs_mountfs(mp);
Christoph Hellwig2bcf6e92011-07-13 13:43:48 +02001642 if (error)
Dave Chinner7e185302012-10-08 21:56:00 +11001643 goto out_filestream_unmount;
Dave Chinner704b2902011-03-26 09:14:57 +11001644
David Chinner01651642008-08-13 15:45:15 +10001645 root = igrab(VFS_I(mp->m_rootip));
Christoph Hellwigf3dcc132008-03-27 18:00:54 +11001646 if (!root) {
Dave Chinner24513372014-06-25 14:58:08 +10001647 error = -ENOENT;
Dave Chinner8a00ebe2012-04-13 12:10:44 +00001648 goto out_unmount;
Christoph Hellwigcbc89dc2008-02-05 12:14:01 +11001649 }
Al Viro48fde702012-01-08 22:15:13 -05001650 sb->s_root = d_make_root(root);
Christoph Hellwigf3dcc132008-03-27 18:00:54 +11001651 if (!sb->s_root) {
Dave Chinner24513372014-06-25 14:58:08 +10001652 error = -ENOMEM;
Dave Chinner8a00ebe2012-04-13 12:10:44 +00001653 goto out_unmount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 }
Christoph Hellwig74394492007-08-30 17:21:22 +10001655
Dave Chinner7e185302012-10-08 21:56:00 +11001656 return 0;
1657
1658 out_filestream_unmount:
Christoph Hellwig120226c2008-05-20 15:11:11 +10001659 xfs_filestream_unmount(mp);
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001660 out_free_sb:
1661 xfs_freesb(mp);
Bill O'Donnell225e4632015-10-12 18:21:19 +11001662 out_free_stats:
1663 free_percpu(mp->m_stats.xs_stats);
Dave Chinnerab23a772021-08-06 11:05:39 -07001664 out_destroy_inodegc:
Dave Chinner0ed17f02021-08-06 11:05:38 -07001665 xfs_mount_list_del(mp);
Dave Chinnerab23a772021-08-06 11:05:39 -07001666 xfs_inodegc_free_percpu(mp);
Christoph Hellwig9d565ff2008-10-30 17:53:24 +11001667 out_destroy_counters:
Dave Chinner5681ca42015-02-23 21:22:31 +11001668 xfs_destroy_percpu_counters(mp);
Bill O'Donnell225e4632015-10-12 18:21:19 +11001669 out_destroy_workqueues:
Christoph Hellwigaa6bf012012-02-29 09:53:48 +00001670 xfs_destroy_mount_workqueues(mp);
Christoph Hellwig61ba35d2010-09-30 02:25:54 +00001671 out_close_devices:
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001672 xfs_close_devices(mp);
Ian Kente1d3d212019-11-04 13:58:40 -08001673 out_free_names:
Dave Chinnerc9fbd7b2018-05-10 21:50:23 -07001674 sb->s_fs_info = NULL;
Ian Kenta943f372019-11-04 13:58:42 -08001675 xfs_mount_free(mp);
Dave Chinner24513372014-06-25 14:58:08 +10001676 return error;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001677
Christoph Hellwig2bcf6e92011-07-13 13:43:48 +02001678 out_unmount:
Christoph Hellwige48ad3162008-05-20 11:30:52 +10001679 xfs_filestream_unmount(mp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001680 xfs_unmountfs(mp);
Christoph Hellwig62033002008-08-13 16:50:21 +10001681 goto out_free_sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682}
1683
Ian Kent73e5fff2019-11-04 13:58:46 -08001684static int
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001685xfs_fs_get_tree(
Ian Kent73e5fff2019-11-04 13:58:46 -08001686 struct fs_context *fc)
1687{
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001688 return get_tree_bdev(fc, xfs_fs_fill_super);
Ian Kent73e5fff2019-11-04 13:58:46 -08001689}
1690
Ian Kent63cd1e92019-11-04 13:58:47 -08001691static int
1692xfs_remount_rw(
1693 struct xfs_mount *mp)
1694{
1695 struct xfs_sb *sbp = &mp->m_sb;
1696 int error;
1697
Dave Chinner0560f312021-08-18 18:46:52 -07001698 if (xfs_has_norecovery(mp)) {
Ian Kent63cd1e92019-11-04 13:58:47 -08001699 xfs_warn(mp,
1700 "ro->rw transition prohibited on norecovery mount");
1701 return -EINVAL;
1702 }
1703
Dave Chinnerd6837c12021-08-18 18:46:56 -07001704 if (xfs_sb_is_v5(sbp) &&
Ian Kent63cd1e92019-11-04 13:58:47 -08001705 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1706 xfs_warn(mp,
1707 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1708 (sbp->sb_features_ro_compat &
1709 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1710 return -EINVAL;
1711 }
1712
Dave Chinner2e973b22021-08-18 18:46:52 -07001713 clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
Ian Kent63cd1e92019-11-04 13:58:47 -08001714
1715 /*
1716 * If this is the first remount to writeable state we might have some
1717 * superblock changes to update.
1718 */
1719 if (mp->m_update_sb) {
1720 error = xfs_sync_sb(mp, false);
1721 if (error) {
1722 xfs_warn(mp, "failed to write sb changes");
1723 return error;
1724 }
1725 mp->m_update_sb = false;
1726 }
1727
1728 /*
1729 * Fill out the reserve pool if it is empty. Use the stashed value if
1730 * it is non-zero, otherwise go with the default.
1731 */
1732 xfs_restore_resvblks(mp);
1733 xfs_log_work_queue(mp);
Darrick J. Wongc9a65262021-01-22 16:48:44 -08001734 xfs_blockgc_start(mp);
Ian Kent63cd1e92019-11-04 13:58:47 -08001735
1736 /* Create the per-AG metadata reservation pool .*/
1737 error = xfs_fs_reserve_ag_blocks(mp);
1738 if (error && error != -ENOSPC)
1739 return error;
1740
Dave Chinnerab23a772021-08-06 11:05:39 -07001741 /* Re-enable the background inode inactivation worker. */
1742 xfs_inodegc_start(mp);
1743
Ian Kent63cd1e92019-11-04 13:58:47 -08001744 return 0;
1745}
1746
1747static int
1748xfs_remount_ro(
1749 struct xfs_mount *mp)
1750{
Darrick J. Wong089558b2021-12-06 15:38:20 -08001751 struct xfs_icwalk icw = {
1752 .icw_flags = XFS_ICWALK_FLAG_SYNC,
1753 };
1754 int error;
Ian Kent63cd1e92019-11-04 13:58:47 -08001755
1756 /*
1757 * Cancel background eofb scanning so it cannot race with the final
1758 * log force+buftarg wait and deadlock the remount.
1759 */
Darrick J. Wongc9a65262021-01-22 16:48:44 -08001760 xfs_blockgc_stop(mp);
Ian Kent63cd1e92019-11-04 13:58:47 -08001761
Darrick J. Wong089558b2021-12-06 15:38:20 -08001762 /*
1763 * Clear out all remaining COW staging extents and speculative post-EOF
1764 * preallocations so that we don't leave inodes requiring inactivation
1765 * cleanups during reclaim on a read-only mount. We must process every
1766 * cached inode, so this requires a synchronous cache scan.
1767 */
1768 error = xfs_blockgc_free_space(mp, &icw);
Ian Kent63cd1e92019-11-04 13:58:47 -08001769 if (error) {
1770 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1771 return error;
1772 }
1773
Dave Chinnerab23a772021-08-06 11:05:39 -07001774 /*
1775 * Stop the inodegc background worker. xfs_fs_reconfigure already
1776 * flushed all pending inodegc work when it sync'd the filesystem.
1777 * The VFS holds s_umount, so we know that inodes cannot enter
1778 * xfs_fs_destroy_inode during a remount operation. In readonly mode
1779 * we send inodes straight to reclaim, so no inodes will be queued.
1780 */
1781 xfs_inodegc_stop(mp);
1782
Ian Kent63cd1e92019-11-04 13:58:47 -08001783 /* Free the per-AG metadata reservation pool. */
1784 error = xfs_fs_unreserve_ag_blocks(mp);
1785 if (error) {
1786 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1787 return error;
1788 }
1789
1790 /*
1791 * Before we sync the metadata, we need to free up the reserve block
1792 * pool so that the used block count in the superblock on disk is
1793 * correct at the end of the remount. Stash the current* reserve pool
1794 * size so that if we get remounted rw, we can return it to the same
1795 * size.
1796 */
1797 xfs_save_resvblks(mp);
1798
Brian Fosterea2064d2021-01-22 16:48:24 -08001799 xfs_log_clean(mp);
Dave Chinner2e973b22021-08-18 18:46:52 -07001800 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
Ian Kent63cd1e92019-11-04 13:58:47 -08001801
1802 return 0;
1803}
1804
1805/*
1806 * Logically we would return an error here to prevent users from believing
1807 * they might have changed mount options using remount which can't be changed.
1808 *
1809 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1810 * arguments in some cases so we can't blindly reject options, but have to
1811 * check for each specified option if it actually differs from the currently
1812 * set option and only reject it if that's the case.
1813 *
1814 * Until that is implemented we return success for every remount request, and
1815 * silently ignore all options that we can't actually change.
1816 */
1817static int
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001818xfs_fs_reconfigure(
Ian Kent63cd1e92019-11-04 13:58:47 -08001819 struct fs_context *fc)
1820{
1821 struct xfs_mount *mp = XFS_M(fc->root->d_sb);
1822 struct xfs_mount *new_mp = fc->s_fs_info;
Ian Kent63cd1e92019-11-04 13:58:47 -08001823 int flags = fc->sb_flags;
1824 int error;
1825
Eric Sandeen4750a172020-07-15 08:30:37 -07001826 /* version 5 superblocks always support version counters. */
Dave Chinnerd6837c12021-08-18 18:46:56 -07001827 if (xfs_has_crc(mp))
Eric Sandeen4750a172020-07-15 08:30:37 -07001828 fc->sb_flags |= SB_I_VERSION;
1829
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001830 error = xfs_fs_validate_params(new_mp);
Ian Kent63cd1e92019-11-04 13:58:47 -08001831 if (error)
1832 return error;
1833
1834 sync_filesystem(mp->m_super);
1835
1836 /* inode32 -> inode64 */
Dave Chinner0560f312021-08-18 18:46:52 -07001837 if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
1838 mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
Dave Chinnerd6837c12021-08-18 18:46:56 -07001839 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
Ian Kent63cd1e92019-11-04 13:58:47 -08001840 }
1841
1842 /* inode64 -> inode32 */
Dave Chinner0560f312021-08-18 18:46:52 -07001843 if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
1844 mp->m_features |= XFS_FEAT_SMALL_INUMS;
Dave Chinnerd6837c12021-08-18 18:46:56 -07001845 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
Ian Kent63cd1e92019-11-04 13:58:47 -08001846 }
1847
1848 /* ro -> rw */
Dave Chinner2e973b22021-08-18 18:46:52 -07001849 if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
Ian Kent63cd1e92019-11-04 13:58:47 -08001850 error = xfs_remount_rw(mp);
1851 if (error)
1852 return error;
1853 }
1854
1855 /* rw -> ro */
Dave Chinner2e973b22021-08-18 18:46:52 -07001856 if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
Ian Kent63cd1e92019-11-04 13:58:47 -08001857 error = xfs_remount_ro(mp);
1858 if (error)
1859 return error;
1860 }
1861
1862 return 0;
1863}
1864
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001865static void xfs_fs_free(
Ian Kent73e5fff2019-11-04 13:58:46 -08001866 struct fs_context *fc)
1867{
1868 struct xfs_mount *mp = fc->s_fs_info;
1869
1870 /*
1871 * mp is stored in the fs_context when it is initialized.
1872 * mp is transferred to the superblock on a successful mount,
1873 * but if an error occurs before the transfer we have to free
1874 * it here.
1875 */
1876 if (mp)
1877 xfs_mount_free(mp);
1878}
1879
1880static const struct fs_context_operations xfs_context_ops = {
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001881 .parse_param = xfs_fs_parse_param,
1882 .get_tree = xfs_fs_get_tree,
1883 .reconfigure = xfs_fs_reconfigure,
1884 .free = xfs_fs_free,
Ian Kent73e5fff2019-11-04 13:58:46 -08001885};
1886
1887static int xfs_init_fs_context(
1888 struct fs_context *fc)
1889{
1890 struct xfs_mount *mp;
1891
Ian Kent50f83002019-11-04 13:58:48 -08001892 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
Ian Kent73e5fff2019-11-04 13:58:46 -08001893 if (!mp)
1894 return -ENOMEM;
1895
Ian Kent50f83002019-11-04 13:58:48 -08001896 spin_lock_init(&mp->m_sb_lock);
1897 spin_lock_init(&mp->m_agirotor_lock);
1898 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1899 spin_lock_init(&mp->m_perag_lock);
1900 mutex_init(&mp->m_growlock);
Darrick J. Wongf0f7a672020-04-12 13:11:10 -07001901 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
Ian Kent50f83002019-11-04 13:58:48 -08001902 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
Ian Kent50f83002019-11-04 13:58:48 -08001903 mp->m_kobj.kobject.kset = xfs_kset;
1904 /*
1905 * We don't create the finobt per-ag space reservation until after log
1906 * recovery, so we must set this to true so that an ifree transaction
1907 * started during log recovery will not depend on space reservations
1908 * for finobt expansion.
1909 */
1910 mp->m_finobt_nores = true;
1911
Ian Kent73e5fff2019-11-04 13:58:46 -08001912 /*
1913 * These can be overridden by the mount option parsing.
1914 */
1915 mp->m_logbufs = -1;
1916 mp->m_logbsize = -1;
1917 mp->m_allocsize_log = 16; /* 64k */
1918
1919 /*
1920 * Copy binary VFS mount flags we are interested in.
1921 */
1922 if (fc->sb_flags & SB_RDONLY)
Dave Chinner2e973b22021-08-18 18:46:52 -07001923 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
Ian Kent73e5fff2019-11-04 13:58:46 -08001924 if (fc->sb_flags & SB_DIRSYNC)
Dave Chinner0560f312021-08-18 18:46:52 -07001925 mp->m_features |= XFS_FEAT_DIRSYNC;
Ian Kent73e5fff2019-11-04 13:58:46 -08001926 if (fc->sb_flags & SB_SYNCHRONOUS)
Dave Chinner0560f312021-08-18 18:46:52 -07001927 mp->m_features |= XFS_FEAT_WSYNC;
Ian Kent73e5fff2019-11-04 13:58:46 -08001928
1929 fc->s_fs_info = mp;
1930 fc->ops = &xfs_context_ops;
1931
1932 return 0;
1933}
1934
Andrew Morton5085b602007-02-20 13:57:47 -08001935static struct file_system_type xfs_fs_type = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 .owner = THIS_MODULE,
1937 .name = "xfs",
Ian Kent73e5fff2019-11-04 13:58:46 -08001938 .init_fs_context = xfs_init_fs_context,
Al Virod7167b12019-09-07 07:23:15 -04001939 .parameters = xfs_fs_parameters,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 .kill_sb = kill_block_super,
Christoph Hellwigf736d932021-01-21 14:19:58 +01001941 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942};
Eric W. Biederman7f78e032013-03-02 19:39:14 -08001943MODULE_ALIAS_FS("xfs");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001945STATIC int __init
Darrick J. Wong182696f2021-10-12 11:09:23 -07001946xfs_init_caches(void)
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001947{
Darrick J. Wong9fa47bd2021-09-23 12:21:37 -07001948 int error;
1949
Darrick J. Wong182696f2021-10-12 11:09:23 -07001950 xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
Carlos Maiolinob1231762019-11-14 12:43:03 -08001951 sizeof(struct xlog_ticket),
1952 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07001953 if (!xfs_log_ticket_cache)
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001954 goto out;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001955
Darrick J. Wong9fa47bd2021-09-23 12:21:37 -07001956 error = xfs_btree_init_cur_caches();
1957 if (error)
Darrick J. Wongc201d9c2021-10-12 14:17:01 -07001958 goto out_destroy_log_ticket_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001959
Darrick J. Wongf3c799c2021-10-12 14:11:01 -07001960 error = xfs_defer_init_item_caches();
1961 if (error)
1962 goto out_destroy_btree_cur_cache;
1963
Darrick J. Wong182696f2021-10-12 11:09:23 -07001964 xfs_da_state_cache = kmem_cache_create("xfs_da_state",
Carlos Maiolinob1231762019-11-14 12:43:03 -08001965 sizeof(struct xfs_da_state),
1966 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07001967 if (!xfs_da_state_cache)
Darrick J. Wongf3c799c2021-10-12 14:11:01 -07001968 goto out_destroy_defer_item_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001969
Darrick J. Wong182696f2021-10-12 11:09:23 -07001970 xfs_ifork_cache = kmem_cache_create("xfs_ifork",
Carlos Maiolinob1231762019-11-14 12:43:03 -08001971 sizeof(struct xfs_ifork),
1972 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07001973 if (!xfs_ifork_cache)
1974 goto out_destroy_da_state_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001975
Darrick J. Wong182696f2021-10-12 11:09:23 -07001976 xfs_trans_cache = kmem_cache_create("xfs_trans",
Carlos Maiolinob1231762019-11-14 12:43:03 -08001977 sizeof(struct xfs_trans),
1978 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07001979 if (!xfs_trans_cache)
1980 goto out_destroy_ifork_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001981
Christoph Hellwige98c4142010-06-23 18:11:15 +10001982
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001983 /*
Darrick J. Wong182696f2021-10-12 11:09:23 -07001984 * The size of the cache-allocated buf log item is the maximum
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001985 * size possible under XFS. This wastes a little bit of memory,
1986 * but it is much faster.
1987 */
Darrick J. Wong182696f2021-10-12 11:09:23 -07001988 xfs_buf_item_cache = kmem_cache_create("xfs_buf_item",
Carlos Maiolinob1231762019-11-14 12:43:03 -08001989 sizeof(struct xfs_buf_log_item),
1990 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07001991 if (!xfs_buf_item_cache)
1992 goto out_destroy_trans_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001993
Darrick J. Wong182696f2021-10-12 11:09:23 -07001994 xfs_efd_cache = kmem_cache_create("xfs_efd_item",
Carlos Maiolinob1231762019-11-14 12:43:03 -08001995 (sizeof(struct xfs_efd_log_item) +
1996 (XFS_EFD_MAX_FAST_EXTENTS - 1) *
1997 sizeof(struct xfs_extent)),
1998 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07001999 if (!xfs_efd_cache)
2000 goto out_destroy_buf_item_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002001
Darrick J. Wong182696f2021-10-12 11:09:23 -07002002 xfs_efi_cache = kmem_cache_create("xfs_efi_item",
Carlos Maiolinob1231762019-11-14 12:43:03 -08002003 (sizeof(struct xfs_efi_log_item) +
2004 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
2005 sizeof(struct xfs_extent)),
2006 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002007 if (!xfs_efi_cache)
2008 goto out_destroy_efd_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002009
Darrick J. Wong182696f2021-10-12 11:09:23 -07002010 xfs_inode_cache = kmem_cache_create("xfs_inode",
Carlos Maiolinob1231762019-11-14 12:43:03 -08002011 sizeof(struct xfs_inode), 0,
2012 (SLAB_HWCACHE_ALIGN |
2013 SLAB_RECLAIM_ACCOUNT |
2014 SLAB_MEM_SPREAD | SLAB_ACCOUNT),
2015 xfs_fs_inode_init_once);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002016 if (!xfs_inode_cache)
2017 goto out_destroy_efi_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002018
Darrick J. Wong182696f2021-10-12 11:09:23 -07002019 xfs_ili_cache = kmem_cache_create("xfs_ili",
Carlos Maiolinob1231762019-11-14 12:43:03 -08002020 sizeof(struct xfs_inode_log_item), 0,
Dave Chinnerd59eada2020-03-24 20:10:28 -07002021 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2022 NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002023 if (!xfs_ili_cache)
2024 goto out_destroy_inode_cache;
Carlos Maiolinob1231762019-11-14 12:43:03 -08002025
Darrick J. Wong182696f2021-10-12 11:09:23 -07002026 xfs_icreate_cache = kmem_cache_create("xfs_icr",
Carlos Maiolinob1231762019-11-14 12:43:03 -08002027 sizeof(struct xfs_icreate_item),
2028 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002029 if (!xfs_icreate_cache)
2030 goto out_destroy_ili_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002031
Darrick J. Wong182696f2021-10-12 11:09:23 -07002032 xfs_rud_cache = kmem_cache_create("xfs_rud_item",
Carlos Maiolinob1231762019-11-14 12:43:03 -08002033 sizeof(struct xfs_rud_log_item),
2034 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002035 if (!xfs_rud_cache)
2036 goto out_destroy_icreate_cache;
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10002037
Darrick J. Wong182696f2021-10-12 11:09:23 -07002038 xfs_rui_cache = kmem_cache_create("xfs_rui_item",
Darrick J. Wongcd001582016-09-19 10:24:27 +10002039 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
Carlos Maiolinob1231762019-11-14 12:43:03 -08002040 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002041 if (!xfs_rui_cache)
2042 goto out_destroy_rud_cache;
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10002043
Darrick J. Wong182696f2021-10-12 11:09:23 -07002044 xfs_cud_cache = kmem_cache_create("xfs_cud_item",
Carlos Maiolinob1231762019-11-14 12:43:03 -08002045 sizeof(struct xfs_cud_log_item),
2046 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002047 if (!xfs_cud_cache)
2048 goto out_destroy_rui_cache;
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07002049
Darrick J. Wong182696f2021-10-12 11:09:23 -07002050 xfs_cui_cache = kmem_cache_create("xfs_cui_item",
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07002051 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
Carlos Maiolinob1231762019-11-14 12:43:03 -08002052 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002053 if (!xfs_cui_cache)
2054 goto out_destroy_cud_cache;
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07002055
Darrick J. Wong182696f2021-10-12 11:09:23 -07002056 xfs_bud_cache = kmem_cache_create("xfs_bud_item",
Carlos Maiolinob1231762019-11-14 12:43:03 -08002057 sizeof(struct xfs_bud_log_item),
2058 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002059 if (!xfs_bud_cache)
2060 goto out_destroy_cui_cache;
Darrick J. Wong6413a012016-10-03 09:11:25 -07002061
Darrick J. Wong182696f2021-10-12 11:09:23 -07002062 xfs_bui_cache = kmem_cache_create("xfs_bui_item",
Darrick J. Wong6413a012016-10-03 09:11:25 -07002063 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
Carlos Maiolinob1231762019-11-14 12:43:03 -08002064 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002065 if (!xfs_bui_cache)
2066 goto out_destroy_bud_cache;
Darrick J. Wong6413a012016-10-03 09:11:25 -07002067
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002068 return 0;
2069
Darrick J. Wong182696f2021-10-12 11:09:23 -07002070 out_destroy_bud_cache:
2071 kmem_cache_destroy(xfs_bud_cache);
2072 out_destroy_cui_cache:
2073 kmem_cache_destroy(xfs_cui_cache);
2074 out_destroy_cud_cache:
2075 kmem_cache_destroy(xfs_cud_cache);
2076 out_destroy_rui_cache:
2077 kmem_cache_destroy(xfs_rui_cache);
2078 out_destroy_rud_cache:
2079 kmem_cache_destroy(xfs_rud_cache);
2080 out_destroy_icreate_cache:
2081 kmem_cache_destroy(xfs_icreate_cache);
2082 out_destroy_ili_cache:
2083 kmem_cache_destroy(xfs_ili_cache);
2084 out_destroy_inode_cache:
2085 kmem_cache_destroy(xfs_inode_cache);
2086 out_destroy_efi_cache:
2087 kmem_cache_destroy(xfs_efi_cache);
2088 out_destroy_efd_cache:
2089 kmem_cache_destroy(xfs_efd_cache);
2090 out_destroy_buf_item_cache:
2091 kmem_cache_destroy(xfs_buf_item_cache);
2092 out_destroy_trans_cache:
2093 kmem_cache_destroy(xfs_trans_cache);
2094 out_destroy_ifork_cache:
2095 kmem_cache_destroy(xfs_ifork_cache);
2096 out_destroy_da_state_cache:
2097 kmem_cache_destroy(xfs_da_state_cache);
Darrick J. Wongf3c799c2021-10-12 14:11:01 -07002098 out_destroy_defer_item_cache:
2099 xfs_defer_destroy_item_caches();
Darrick J. Wong182696f2021-10-12 11:09:23 -07002100 out_destroy_btree_cur_cache:
Darrick J. Wong9fa47bd2021-09-23 12:21:37 -07002101 xfs_btree_destroy_cur_caches();
Darrick J. Wong182696f2021-10-12 11:09:23 -07002102 out_destroy_log_ticket_cache:
2103 kmem_cache_destroy(xfs_log_ticket_cache);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002104 out:
2105 return -ENOMEM;
2106}
2107
2108STATIC void
Darrick J. Wong182696f2021-10-12 11:09:23 -07002109xfs_destroy_caches(void)
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002110{
Kirill A. Shutemov8c0a8532012-09-26 11:33:07 +10002111 /*
2112 * Make sure all delayed rcu free are flushed before we
2113 * destroy caches.
2114 */
2115 rcu_barrier();
Darrick J. Wong182696f2021-10-12 11:09:23 -07002116 kmem_cache_destroy(xfs_bui_cache);
2117 kmem_cache_destroy(xfs_bud_cache);
2118 kmem_cache_destroy(xfs_cui_cache);
2119 kmem_cache_destroy(xfs_cud_cache);
2120 kmem_cache_destroy(xfs_rui_cache);
2121 kmem_cache_destroy(xfs_rud_cache);
2122 kmem_cache_destroy(xfs_icreate_cache);
2123 kmem_cache_destroy(xfs_ili_cache);
2124 kmem_cache_destroy(xfs_inode_cache);
2125 kmem_cache_destroy(xfs_efi_cache);
2126 kmem_cache_destroy(xfs_efd_cache);
2127 kmem_cache_destroy(xfs_buf_item_cache);
2128 kmem_cache_destroy(xfs_trans_cache);
2129 kmem_cache_destroy(xfs_ifork_cache);
2130 kmem_cache_destroy(xfs_da_state_cache);
Darrick J. Wongf3c799c2021-10-12 14:11:01 -07002131 xfs_defer_destroy_item_caches();
Darrick J. Wong9fa47bd2021-09-23 12:21:37 -07002132 xfs_btree_destroy_cur_caches();
Darrick J. Wong182696f2021-10-12 11:09:23 -07002133 kmem_cache_destroy(xfs_log_ticket_cache);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002134}
2135
2136STATIC int __init
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002137xfs_init_workqueues(void)
2138{
2139 /*
Dave Chinnerc999a222012-03-22 05:15:07 +00002140 * The allocation workqueue can be used in memory reclaim situations
2141 * (writepage path), and parallelism is only limited by the number of
2142 * AGs in all the filesystems mounted. Hence use the default large
2143 * max_active value for this workqueue.
2144 */
Brian Foster8018ec02014-09-09 11:44:46 +10002145 xfs_alloc_wq = alloc_workqueue("xfsalloc",
Darrick J. Wong05a302a2021-01-22 16:48:42 -08002146 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
Dave Chinnerc999a222012-03-22 05:15:07 +00002147 if (!xfs_alloc_wq)
Dave Chinner58896082012-10-08 21:56:05 +11002148 return -ENOMEM;
Dave Chinnerc999a222012-03-22 05:15:07 +00002149
Darrick J. Wong05a302a2021-01-22 16:48:42 -08002150 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2151 0);
Christoph Hellwig4560e782017-02-07 14:07:58 -08002152 if (!xfs_discard_wq)
2153 goto out_free_alloc_wq;
2154
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002155 return 0;
Christoph Hellwig4560e782017-02-07 14:07:58 -08002156out_free_alloc_wq:
2157 destroy_workqueue(xfs_alloc_wq);
2158 return -ENOMEM;
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002159}
2160
Luck, Tony39411f82011-04-11 12:06:12 -07002161STATIC void
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002162xfs_destroy_workqueues(void)
2163{
Christoph Hellwig4560e782017-02-07 14:07:58 -08002164 destroy_workqueue(xfs_discard_wq);
Dave Chinnerc999a222012-03-22 05:15:07 +00002165 destroy_workqueue(xfs_alloc_wq);
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002166}
2167
Dave Chinnerf1653c22021-08-06 11:05:37 -07002168#ifdef CONFIG_HOTPLUG_CPU
2169static int
2170xfs_cpu_dead(
2171 unsigned int cpu)
2172{
Dave Chinner0ed17f02021-08-06 11:05:38 -07002173 struct xfs_mount *mp, *n;
2174
2175 spin_lock(&xfs_mount_list_lock);
2176 list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
2177 spin_unlock(&xfs_mount_list_lock);
Dave Chinnerab23a772021-08-06 11:05:39 -07002178 xfs_inodegc_cpu_dead(mp, cpu);
Dave Chinner0ed17f02021-08-06 11:05:38 -07002179 spin_lock(&xfs_mount_list_lock);
2180 }
2181 spin_unlock(&xfs_mount_list_lock);
Dave Chinnerf1653c22021-08-06 11:05:37 -07002182 return 0;
2183}
2184
2185static int __init
2186xfs_cpu_hotplug_init(void)
2187{
2188 int error;
2189
2190 error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL,
2191 xfs_cpu_dead);
2192 if (error < 0)
2193 xfs_alert(NULL,
2194"Failed to initialise CPU hotplug, error %d. XFS is non-functional.",
2195 error);
2196 return error;
2197}
2198
2199static void
2200xfs_cpu_hotplug_destroy(void)
2201{
2202 cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD);
2203}
2204
2205#else /* !CONFIG_HOTPLUG_CPU */
2206static inline int xfs_cpu_hotplug_init(void) { return 0; }
2207static inline void xfs_cpu_hotplug_destroy(void) {}
2208#endif
2209
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002210STATIC int __init
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002211init_xfs_fs(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212{
2213 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214
Darrick J. Wong30cbc592016-03-09 08:15:14 +11002215 xfs_check_ondisk_structs();
2216
Christoph Hellwig65795912008-11-28 14:23:33 +11002217 printk(KERN_INFO XFS_VERSION_STRING " with "
2218 XFS_BUILD_OPTIONS " enabled\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002220 xfs_dir_startup();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221
Dave Chinnerf1653c22021-08-06 11:05:37 -07002222 error = xfs_cpu_hotplug_init();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002223 if (error)
2224 goto out;
2225
Darrick J. Wong182696f2021-10-12 11:09:23 -07002226 error = xfs_init_caches();
Dave Chinnerf1653c22021-08-06 11:05:37 -07002227 if (error)
2228 goto out_destroy_hp;
2229
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002230 error = xfs_init_workqueues();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002231 if (error)
Darrick J. Wong182696f2021-10-12 11:09:23 -07002232 goto out_destroy_caches;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002233
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002234 error = xfs_mru_cache_init();
2235 if (error)
2236 goto out_destroy_wq;
2237
Nathan Scottce8e9222006-01-11 15:39:08 +11002238 error = xfs_buf_init();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002239 if (error)
Christoph Hellwig1919add2014-04-23 07:11:51 +10002240 goto out_mru_cache_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002242 error = xfs_init_procfs();
2243 if (error)
2244 goto out_buf_terminate;
2245
2246 error = xfs_sysctl_register();
2247 if (error)
2248 goto out_cleanup_procfs;
2249
Brian Foster3d871222014-07-15 07:41:37 +10002250 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2251 if (!xfs_kset) {
2252 error = -ENOMEM;
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002253 goto out_sysctl_unregister;
Brian Foster3d871222014-07-15 07:41:37 +10002254 }
2255
Bill O'Donnell80529c42015-10-12 05:19:45 +11002256 xfsstats.xs_kobj.kobject.kset = xfs_kset;
2257
2258 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2259 if (!xfsstats.xs_stats) {
2260 error = -ENOMEM;
2261 goto out_kset_unregister;
2262 }
2263
2264 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002265 "stats");
2266 if (error)
Bill O'Donnell80529c42015-10-12 05:19:45 +11002267 goto out_free_stats;
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002268
Brian Foster65b65732014-09-09 11:52:42 +10002269#ifdef DEBUG
2270 xfs_dbg_kobj.kobject.kset = xfs_kset;
2271 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002272 if (error)
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002273 goto out_remove_stats_kobj;
Brian Foster65b65732014-09-09 11:52:42 +10002274#endif
2275
2276 error = xfs_qm_init();
2277 if (error)
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002278 goto out_remove_dbg_kobj;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279
2280 error = register_filesystem(&xfs_fs_type);
2281 if (error)
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002282 goto out_qm_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 return 0;
2284
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002285 out_qm_exit:
2286 xfs_qm_exit();
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002287 out_remove_dbg_kobj:
Brian Foster65b65732014-09-09 11:52:42 +10002288#ifdef DEBUG
2289 xfs_sysfs_del(&xfs_dbg_kobj);
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002290 out_remove_stats_kobj:
Brian Foster65b65732014-09-09 11:52:42 +10002291#endif
Bill O'Donnell80529c42015-10-12 05:19:45 +11002292 xfs_sysfs_del(&xfsstats.xs_kobj);
2293 out_free_stats:
2294 free_percpu(xfsstats.xs_stats);
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002295 out_kset_unregister:
Brian Foster3d871222014-07-15 07:41:37 +10002296 kset_unregister(xfs_kset);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002297 out_sysctl_unregister:
2298 xfs_sysctl_unregister();
2299 out_cleanup_procfs:
2300 xfs_cleanup_procfs();
2301 out_buf_terminate:
Nathan Scottce8e9222006-01-11 15:39:08 +11002302 xfs_buf_terminate();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002303 out_mru_cache_uninit:
2304 xfs_mru_cache_uninit();
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002305 out_destroy_wq:
2306 xfs_destroy_workqueues();
Darrick J. Wong182696f2021-10-12 11:09:23 -07002307 out_destroy_caches:
2308 xfs_destroy_caches();
Dave Chinnerf1653c22021-08-06 11:05:37 -07002309 out_destroy_hp:
2310 xfs_cpu_hotplug_destroy();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002311 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 return error;
2313}
2314
2315STATIC void __exit
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002316exit_xfs_fs(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317{
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002318 xfs_qm_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 unregister_filesystem(&xfs_fs_type);
Brian Foster65b65732014-09-09 11:52:42 +10002320#ifdef DEBUG
2321 xfs_sysfs_del(&xfs_dbg_kobj);
2322#endif
Bill O'Donnell80529c42015-10-12 05:19:45 +11002323 xfs_sysfs_del(&xfsstats.xs_kobj);
2324 free_percpu(xfsstats.xs_stats);
Brian Foster3d871222014-07-15 07:41:37 +10002325 kset_unregister(xfs_kset);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002326 xfs_sysctl_unregister();
2327 xfs_cleanup_procfs();
Nathan Scottce8e9222006-01-11 15:39:08 +11002328 xfs_buf_terminate();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002329 xfs_mru_cache_uninit();
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002330 xfs_destroy_workqueues();
Darrick J. Wong182696f2021-10-12 11:09:23 -07002331 xfs_destroy_caches();
Darrick J. Wongaf3b6382015-11-03 13:06:34 +11002332 xfs_uuid_table_free();
Dave Chinnerf1653c22021-08-06 11:05:37 -07002333 xfs_cpu_hotplug_destroy();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334}
2335
2336module_init(init_xfs_fs);
2337module_exit(exit_xfs_fs);
2338
2339MODULE_AUTHOR("Silicon Graphics, Inc.");
2340MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2341MODULE_LICENSE("GPL");