blob: e21459f9923a8a8ef47158e1099e8f772abb35dc [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scotta805bad2006-06-19 08:40:27 +10003 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Nathan Scott7b718762005-11-02 14:58:39 +11004 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00006
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include "xfs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11008#include "xfs_shared.h"
Dave Chinner6ca1c902013-08-12 20:49:26 +10009#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110010#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include "xfs_sb.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include "xfs_mount.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110015#include "xfs_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include "xfs_bmap.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110017#include "xfs_alloc.h"
Christoph Hellwig9909c4a2007-10-11 18:11:14 +100018#include "xfs_fsops.h"
Dave Chinner239880e2013-10-23 10:50:10 +110019#include "xfs_trans.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_buf_item.h"
Dave Chinner239880e2013-10-23 10:50:10 +110021#include "xfs_log.h"
David Chinnera67d7c52007-11-23 16:29:32 +110022#include "xfs_log_priv.h"
Dave Chinner2b9ab5a2013-08-12 20:49:37 +100023#include "xfs_dir2.h"
Christoph Hellwig9f8868f2008-07-18 17:11:46 +100024#include "xfs_extfree_item.h"
25#include "xfs_mru_cache.h"
26#include "xfs_inode_item.h"
Dave Chinner6d8b79c2012-10-08 21:56:09 +110027#include "xfs_icache.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000028#include "xfs_trace.h"
Dave Chinner3ebe7d22013-06-27 16:04:53 +100029#include "xfs_icreate_item.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110030#include "xfs_filestream.h"
31#include "xfs_quota.h"
Brian Foster65b65732014-09-09 11:52:42 +100032#include "xfs_sysfs.h"
Darrick J. Wong30cbc592016-03-09 08:15:14 +110033#include "xfs_ondisk.h"
Darrick J. Wong5880f2d72016-08-03 12:04:45 +100034#include "xfs_rmap_item.h"
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -070035#include "xfs_refcount_item.h"
Darrick J. Wong6413a012016-10-03 09:11:25 -070036#include "xfs_bmap_item.h"
Darrick J. Wong5e7e6052016-10-03 09:11:38 -070037#include "xfs_reflink.h"
Darrick J. Wong894ecac2021-01-22 16:48:44 -080038#include "xfs_pwork.h"
Dave Chinner9bbafc712021-06-02 10:48:24 +100039#include "xfs_ag.h"
Darrick J. Wongf3c799c2021-10-12 14:11:01 -070040#include "xfs_defer.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Adam Borowskidddde682018-10-18 17:20:19 +110042#include <linux/magic.h>
Ian Kent73e5fff2019-11-04 13:58:46 -080043#include <linux/fs_context.h>
44#include <linux/fs_parser.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Alexey Dobriyanb87221d2009-09-21 17:01:09 -070046static const struct super_operations xfs_super_operations;
Brian Foster65b65732014-09-09 11:52:42 +100047
Dave Chinnere3aed1a2014-09-29 10:46:08 +100048static struct kset *xfs_kset; /* top-level xfs sysfs dir */
Brian Foster65b65732014-09-09 11:52:42 +100049#ifdef DEBUG
50static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
51#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Dave Chinner0ed17f02021-08-06 11:05:38 -070053#ifdef CONFIG_HOTPLUG_CPU
54static LIST_HEAD(xfs_mount_list);
55static DEFINE_SPINLOCK(xfs_mount_list_lock);
56
57static inline void xfs_mount_list_add(struct xfs_mount *mp)
58{
59 spin_lock(&xfs_mount_list_lock);
60 list_add(&mp->m_mount_list, &xfs_mount_list);
61 spin_unlock(&xfs_mount_list_lock);
62}
63
64static inline void xfs_mount_list_del(struct xfs_mount *mp)
65{
66 spin_lock(&xfs_mount_list_lock);
67 list_del(&mp->m_mount_list);
68 spin_unlock(&xfs_mount_list_lock);
69}
70#else /* !CONFIG_HOTPLUG_CPU */
71static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
72static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
73#endif
74
Ira Weiny8d6c3442020-05-04 09:02:42 -070075enum xfs_dax_mode {
76 XFS_DAX_INODE = 0,
77 XFS_DAX_ALWAYS = 1,
78 XFS_DAX_NEVER = 2,
79};
80
81static void
82xfs_mount_set_dax_mode(
83 struct xfs_mount *mp,
84 enum xfs_dax_mode mode)
85{
86 switch (mode) {
87 case XFS_DAX_INODE:
Dave Chinner0560f312021-08-18 18:46:52 -070088 mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
Ira Weiny8d6c3442020-05-04 09:02:42 -070089 break;
90 case XFS_DAX_ALWAYS:
Dave Chinner0560f312021-08-18 18:46:52 -070091 mp->m_features |= XFS_FEAT_DAX_ALWAYS;
92 mp->m_features &= ~XFS_FEAT_DAX_NEVER;
Ira Weiny8d6c3442020-05-04 09:02:42 -070093 break;
94 case XFS_DAX_NEVER:
Dave Chinner0560f312021-08-18 18:46:52 -070095 mp->m_features |= XFS_FEAT_DAX_NEVER;
96 mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
Ira Weiny8d6c3442020-05-04 09:02:42 -070097 break;
98 }
99}
100
101static const struct constant_table dax_param_enums[] = {
102 {"inode", XFS_DAX_INODE },
103 {"always", XFS_DAX_ALWAYS },
104 {"never", XFS_DAX_NEVER },
105 {}
106};
107
Christoph Hellwig62a877e2008-07-18 17:12:36 +1000108/*
109 * Table driven mount option parser.
Christoph Hellwig62a877e2008-07-18 17:12:36 +1000110 */
111enum {
Ian Kent8da57c52019-10-28 08:41:42 -0700112 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
Eric Sandeen2e74af02016-03-02 09:55:38 +1100113 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
Christoph Hellwig94079282019-04-28 08:32:52 -0700114 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
Eric Sandeen1c02d502018-07-26 09:11:27 -0700115 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
116 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
117 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
118 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
Eric Sandeen2e74af02016-03-02 09:55:38 +1100119 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
Ira Weiny8d6c3442020-05-04 09:02:42 -0700120 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
Christoph Hellwig62a877e2008-07-18 17:12:36 +1000121};
122
Al Virod7167b12019-09-07 07:23:15 -0400123static const struct fs_parameter_spec xfs_fs_parameters[] = {
Ian Kent73e5fff2019-11-04 13:58:46 -0800124 fsparam_u32("logbufs", Opt_logbufs),
125 fsparam_string("logbsize", Opt_logbsize),
126 fsparam_string("logdev", Opt_logdev),
127 fsparam_string("rtdev", Opt_rtdev),
128 fsparam_flag("wsync", Opt_wsync),
129 fsparam_flag("noalign", Opt_noalign),
130 fsparam_flag("swalloc", Opt_swalloc),
131 fsparam_u32("sunit", Opt_sunit),
132 fsparam_u32("swidth", Opt_swidth),
133 fsparam_flag("nouuid", Opt_nouuid),
134 fsparam_flag("grpid", Opt_grpid),
135 fsparam_flag("nogrpid", Opt_nogrpid),
136 fsparam_flag("bsdgroups", Opt_bsdgroups),
137 fsparam_flag("sysvgroups", Opt_sysvgroups),
138 fsparam_string("allocsize", Opt_allocsize),
139 fsparam_flag("norecovery", Opt_norecovery),
140 fsparam_flag("inode64", Opt_inode64),
141 fsparam_flag("inode32", Opt_inode32),
142 fsparam_flag("ikeep", Opt_ikeep),
143 fsparam_flag("noikeep", Opt_noikeep),
144 fsparam_flag("largeio", Opt_largeio),
145 fsparam_flag("nolargeio", Opt_nolargeio),
146 fsparam_flag("attr2", Opt_attr2),
147 fsparam_flag("noattr2", Opt_noattr2),
148 fsparam_flag("filestreams", Opt_filestreams),
149 fsparam_flag("quota", Opt_quota),
150 fsparam_flag("noquota", Opt_noquota),
151 fsparam_flag("usrquota", Opt_usrquota),
152 fsparam_flag("grpquota", Opt_grpquota),
153 fsparam_flag("prjquota", Opt_prjquota),
154 fsparam_flag("uquota", Opt_uquota),
155 fsparam_flag("gquota", Opt_gquota),
156 fsparam_flag("pquota", Opt_pquota),
157 fsparam_flag("uqnoenforce", Opt_uqnoenforce),
158 fsparam_flag("gqnoenforce", Opt_gqnoenforce),
159 fsparam_flag("pqnoenforce", Opt_pqnoenforce),
160 fsparam_flag("qnoenforce", Opt_qnoenforce),
161 fsparam_flag("discard", Opt_discard),
162 fsparam_flag("nodiscard", Opt_nodiscard),
163 fsparam_flag("dax", Opt_dax),
Ira Weiny8d6c3442020-05-04 09:02:42 -0700164 fsparam_enum("dax", Opt_dax_enum, dax_param_enums),
Ian Kent73e5fff2019-11-04 13:58:46 -0800165 {}
Christoph Hellwig62a877e2008-07-18 17:12:36 +1000166};
167
David Chinnera67d7c52007-11-23 16:29:32 +1100168struct proc_xfs_info {
Dave Chinnercbe4dab2015-06-04 09:19:18 +1000169 uint64_t flag;
170 char *str;
David Chinnera67d7c52007-11-23 16:29:32 +1100171};
172
Christoph Hellwig21f55992019-10-28 08:41:47 -0700173static int
174xfs_fs_show_options(
175 struct seq_file *m,
176 struct dentry *root)
David Chinnera67d7c52007-11-23 16:29:32 +1100177{
178 static struct proc_xfs_info xfs_info_set[] = {
179 /* the few simple ones we can get from the mount struct */
Dave Chinner0560f312021-08-18 18:46:52 -0700180 { XFS_FEAT_IKEEP, ",ikeep" },
181 { XFS_FEAT_WSYNC, ",wsync" },
182 { XFS_FEAT_NOALIGN, ",noalign" },
183 { XFS_FEAT_SWALLOC, ",swalloc" },
184 { XFS_FEAT_NOUUID, ",nouuid" },
185 { XFS_FEAT_NORECOVERY, ",norecovery" },
186 { XFS_FEAT_ATTR2, ",attr2" },
187 { XFS_FEAT_FILESTREAMS, ",filestreams" },
188 { XFS_FEAT_GRPID, ",grpid" },
189 { XFS_FEAT_DISCARD, ",discard" },
190 { XFS_FEAT_LARGE_IOSIZE, ",largeio" },
191 { XFS_FEAT_DAX_ALWAYS, ",dax=always" },
192 { XFS_FEAT_DAX_NEVER, ",dax=never" },
David Chinnera67d7c52007-11-23 16:29:32 +1100193 { 0, NULL }
194 };
Christoph Hellwig21f55992019-10-28 08:41:47 -0700195 struct xfs_mount *mp = XFS_M(root->d_sb);
David Chinnera67d7c52007-11-23 16:29:32 +1100196 struct proc_xfs_info *xfs_infop;
197
198 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
Dave Chinner0560f312021-08-18 18:46:52 -0700199 if (mp->m_features & xfs_infop->flag)
David Chinnera67d7c52007-11-23 16:29:32 +1100200 seq_puts(m, xfs_infop->str);
201 }
Christoph Hellwig1775c502019-10-28 08:41:47 -0700202
Dave Chinner0560f312021-08-18 18:46:52 -0700203 seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
David Chinnera67d7c52007-11-23 16:29:32 +1100204
Dave Chinner0560f312021-08-18 18:46:52 -0700205 if (xfs_has_allocsize(mp))
Eric Sandeen2e74af02016-03-02 09:55:38 +1100206 seq_printf(m, ",allocsize=%dk",
Christoph Hellwigaa58d442019-10-28 08:41:46 -0700207 (1 << mp->m_allocsize_log) >> 10);
David Chinnera67d7c52007-11-23 16:29:32 +1100208
209 if (mp->m_logbufs > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100210 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
David Chinnera67d7c52007-11-23 16:29:32 +1100211 if (mp->m_logbsize > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100212 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
David Chinnera67d7c52007-11-23 16:29:32 +1100213
214 if (mp->m_logname)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100215 seq_show_option(m, "logdev", mp->m_logname);
David Chinnera67d7c52007-11-23 16:29:32 +1100216 if (mp->m_rtname)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100217 seq_show_option(m, "rtdev", mp->m_rtname);
David Chinnera67d7c52007-11-23 16:29:32 +1100218
219 if (mp->m_dalign > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100220 seq_printf(m, ",sunit=%d",
David Chinnera67d7c52007-11-23 16:29:32 +1100221 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
222 if (mp->m_swidth > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100223 seq_printf(m, ",swidth=%d",
David Chinnera67d7c52007-11-23 16:29:32 +1100224 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
225
Christoph Hellwig149e53a2021-08-06 11:05:37 -0700226 if (mp->m_qflags & XFS_UQUOTA_ENFD)
227 seq_puts(m, ",usrquota");
228 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
229 seq_puts(m, ",uqnoenforce");
David Chinnera67d7c52007-11-23 16:29:32 +1100230
Christoph Hellwig149e53a2021-08-06 11:05:37 -0700231 if (mp->m_qflags & XFS_PQUOTA_ENFD)
232 seq_puts(m, ",prjquota");
233 else if (mp->m_qflags & XFS_PQUOTA_ACCT)
234 seq_puts(m, ",pqnoenforce");
235
236 if (mp->m_qflags & XFS_GQUOTA_ENFD)
237 seq_puts(m, ",grpquota");
238 else if (mp->m_qflags & XFS_GQUOTA_ACCT)
239 seq_puts(m, ",gqnoenforce");
David Chinnera67d7c52007-11-23 16:29:32 +1100240
241 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
Eric Sandeen2e74af02016-03-02 09:55:38 +1100242 seq_puts(m, ",noquota");
Christoph Hellwig21f55992019-10-28 08:41:47 -0700243
244 return 0;
David Chinnera67d7c52007-11-23 16:29:32 +1100245}
Eric Sandeen91083262019-05-01 20:26:30 -0700246
Eric Sandeen9de67c32014-07-24 20:51:54 +1000247/*
Eric Sandeen12c3f052016-03-02 09:58:09 +1100248 * Set parameters for inode allocation heuristics, taking into account
249 * filesystem size and inode32/inode64 mount options; i.e. specifically
Dave Chinner0560f312021-08-18 18:46:52 -0700250 * whether or not XFS_FEAT_SMALL_INUMS is set.
Eric Sandeen12c3f052016-03-02 09:58:09 +1100251 *
252 * Inode allocation patterns are altered only if inode32 is requested
Dave Chinner0560f312021-08-18 18:46:52 -0700253 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
Dave Chinner2e973b22021-08-18 18:46:52 -0700254 * If altered, XFS_OPSTATE_INODE32 is set as well.
Eric Sandeen12c3f052016-03-02 09:58:09 +1100255 *
256 * An agcount independent of that in the mount structure is provided
257 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
258 * to the potentially higher ag count.
259 *
260 * Returns the maximum AG index which may contain inodes.
Eric Sandeen9de67c32014-07-24 20:51:54 +1000261 */
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300262xfs_agnumber_t
Eric Sandeen12c3f052016-03-02 09:58:09 +1100263xfs_set_inode_alloc(
264 struct xfs_mount *mp,
265 xfs_agnumber_t agcount)
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300266{
Eric Sandeen12c3f052016-03-02 09:58:09 +1100267 xfs_agnumber_t index;
Carlos Maiolino4056c1d2012-09-20 10:32:40 -0300268 xfs_agnumber_t maxagi = 0;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300269 xfs_sb_t *sbp = &mp->m_sb;
270 xfs_agnumber_t max_metadata;
Eric Sandeen54aa61f2014-07-24 20:53:10 +1000271 xfs_agino_t agino;
272 xfs_ino_t ino;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300273
Eric Sandeen12c3f052016-03-02 09:58:09 +1100274 /*
275 * Calculate how much should be reserved for inodes to meet
276 * the max inode percentage. Used only for inode32.
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300277 */
Darrick J. Wongef325952019-06-05 11:19:34 -0700278 if (M_IGEO(mp)->maxicount) {
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700279 uint64_t icount;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300280
281 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
282 do_div(icount, 100);
283 icount += sbp->sb_agblocks - 1;
284 do_div(icount, sbp->sb_agblocks);
285 max_metadata = icount;
286 } else {
Eric Sandeen9de67c32014-07-24 20:51:54 +1000287 max_metadata = agcount;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300288 }
289
Eric Sandeen12c3f052016-03-02 09:58:09 +1100290 /* Get the last possible inode in the filesystem */
Darrick J. Wong43004b22018-12-12 08:46:24 -0800291 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
Eric Sandeen12c3f052016-03-02 09:58:09 +1100292 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
Eric Sandeen54aa61f2014-07-24 20:53:10 +1000293
Eric Sandeen12c3f052016-03-02 09:58:09 +1100294 /*
295 * If user asked for no more than 32-bit inodes, and the fs is
Dave Chinner2e973b22021-08-18 18:46:52 -0700296 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
Eric Sandeen12c3f052016-03-02 09:58:09 +1100297 * the allocator to accommodate the request.
298 */
Dave Chinner0560f312021-08-18 18:46:52 -0700299 if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
Dave Chinner2e973b22021-08-18 18:46:52 -0700300 set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
Eric Sandeen12c3f052016-03-02 09:58:09 +1100301 else
Dave Chinner2e973b22021-08-18 18:46:52 -0700302 clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300303
Eric Sandeen9de67c32014-07-24 20:51:54 +1000304 for (index = 0; index < agcount; index++) {
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300305 struct xfs_perag *pag;
306
Eric Sandeen12c3f052016-03-02 09:58:09 +1100307 ino = XFS_AGINO_TO_INO(mp, index, agino);
308
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300309 pag = xfs_perag_get(mp, index);
Eric Sandeen12c3f052016-03-02 09:58:09 +1100310
Dave Chinner2e973b22021-08-18 18:46:52 -0700311 if (xfs_is_inode32(mp)) {
Eric Sandeen12c3f052016-03-02 09:58:09 +1100312 if (ino > XFS_MAXINUMBER_32) {
313 pag->pagi_inodeok = 0;
314 pag->pagf_metadata = 0;
315 } else {
316 pag->pagi_inodeok = 1;
317 maxagi++;
318 if (index < max_metadata)
319 pag->pagf_metadata = 1;
320 else
321 pag->pagf_metadata = 0;
322 }
323 } else {
324 pag->pagi_inodeok = 1;
325 pag->pagf_metadata = 0;
326 }
327
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300328 xfs_perag_put(pag);
329 }
330
Dave Chinner2e973b22021-08-18 18:46:52 -0700331 return xfs_is_inode32(mp) ? maxagi : agcount;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300332}
333
Christoph Hellwiga384f082021-08-26 15:55:09 +0200334static bool
335xfs_buftarg_is_dax(
336 struct super_block *sb,
337 struct xfs_buftarg *bt)
338{
Christoph Hellwigbdd3c502021-08-26 15:55:10 +0200339 return dax_supported(bt->bt_daxdev, bt->bt_bdev, sb->s_blocksize, 0,
340 bdev_nr_sectors(bt->bt_bdev));
Christoph Hellwiga384f082021-08-26 15:55:09 +0200341}
342
Hannes Eder3180e662009-03-04 19:34:10 +0100343STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344xfs_blkdev_get(
345 xfs_mount_t *mp,
346 const char *name,
347 struct block_device **bdevp)
348{
349 int error = 0;
350
Tejun Heod4d77622010-11-13 11:55:18 +0100351 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
352 mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 if (IS_ERR(*bdevp)) {
354 error = PTR_ERR(*bdevp);
Eric Sandeen77af5742014-12-24 09:47:27 +1100355 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 }
357
Dave Chinner24513372014-06-25 14:58:08 +1000358 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359}
360
Hannes Eder3180e662009-03-04 19:34:10 +0100361STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362xfs_blkdev_put(
363 struct block_device *bdev)
364{
365 if (bdev)
Tejun Heoe525fd82010-11-13 11:55:17 +0100366 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367}
368
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000369STATIC void
370xfs_close_devices(
371 struct xfs_mount *mp)
372{
Dan Williams486aff52017-08-24 15:12:50 -0700373 struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
374
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000375 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000376 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
Dan Williams486aff52017-08-24 15:12:50 -0700377 struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
378
Eric Sandeena1f69412018-04-06 10:09:42 -0700379 xfs_free_buftarg(mp->m_logdev_targp);
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000380 xfs_blkdev_put(logdev);
Dan Williams486aff52017-08-24 15:12:50 -0700381 fs_put_dax(dax_logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000382 }
383 if (mp->m_rtdev_targp) {
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000384 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
Dan Williams486aff52017-08-24 15:12:50 -0700385 struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
386
Eric Sandeena1f69412018-04-06 10:09:42 -0700387 xfs_free_buftarg(mp->m_rtdev_targp);
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000388 xfs_blkdev_put(rtdev);
Dan Williams486aff52017-08-24 15:12:50 -0700389 fs_put_dax(dax_rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000390 }
Eric Sandeena1f69412018-04-06 10:09:42 -0700391 xfs_free_buftarg(mp->m_ddev_targp);
Dan Williams486aff52017-08-24 15:12:50 -0700392 fs_put_dax(dax_ddev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000393}
394
395/*
396 * The file system configurations are:
397 * (1) device (partition) with data and internal log
398 * (2) logical volume with data and log subvolumes.
399 * (3) logical volume with data, log, and realtime subvolumes.
400 *
401 * We only have to handle opening the log and realtime volumes here if
402 * they are present. The data subvolume has already been opened by
403 * get_sb_bdev() and is stored in sb->s_bdev.
404 */
405STATIC int
406xfs_open_devices(
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100407 struct xfs_mount *mp)
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000408{
409 struct block_device *ddev = mp->m_super->s_bdev;
Dan Williams486aff52017-08-24 15:12:50 -0700410 struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev);
411 struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL;
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000412 struct block_device *logdev = NULL, *rtdev = NULL;
413 int error;
414
415 /*
416 * Open real time and log devices - order is important.
417 */
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100418 if (mp->m_logname) {
419 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000420 if (error)
421 goto out;
Dan Williams486aff52017-08-24 15:12:50 -0700422 dax_logdev = fs_dax_get_by_bdev(logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000423 }
424
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100425 if (mp->m_rtname) {
426 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000427 if (error)
428 goto out_close_logdev;
429
430 if (rtdev == ddev || rtdev == logdev) {
Dave Chinner4f107002011-03-07 10:00:35 +1100431 xfs_warn(mp,
432 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
Dave Chinner24513372014-06-25 14:58:08 +1000433 error = -EINVAL;
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000434 goto out_close_rtdev;
435 }
Dan Williams486aff52017-08-24 15:12:50 -0700436 dax_rtdev = fs_dax_get_by_bdev(rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000437 }
438
439 /*
440 * Setup xfs_mount buffer target pointers
441 */
Dave Chinner24513372014-06-25 14:58:08 +1000442 error = -ENOMEM;
Dan Williams486aff52017-08-24 15:12:50 -0700443 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000444 if (!mp->m_ddev_targp)
445 goto out_close_rtdev;
446
447 if (rtdev) {
Dan Williams486aff52017-08-24 15:12:50 -0700448 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000449 if (!mp->m_rtdev_targp)
450 goto out_free_ddev_targ;
451 }
452
453 if (logdev && logdev != ddev) {
Dan Williams486aff52017-08-24 15:12:50 -0700454 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000455 if (!mp->m_logdev_targp)
456 goto out_free_rtdev_targ;
457 } else {
458 mp->m_logdev_targp = mp->m_ddev_targp;
459 }
460
461 return 0;
462
463 out_free_rtdev_targ:
464 if (mp->m_rtdev_targp)
Eric Sandeena1f69412018-04-06 10:09:42 -0700465 xfs_free_buftarg(mp->m_rtdev_targp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000466 out_free_ddev_targ:
Eric Sandeena1f69412018-04-06 10:09:42 -0700467 xfs_free_buftarg(mp->m_ddev_targp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000468 out_close_rtdev:
Markus Elfringd2a5e3c2014-12-01 08:24:20 +1100469 xfs_blkdev_put(rtdev);
Dan Williams486aff52017-08-24 15:12:50 -0700470 fs_put_dax(dax_rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000471 out_close_logdev:
Dan Williams486aff52017-08-24 15:12:50 -0700472 if (logdev && logdev != ddev) {
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000473 xfs_blkdev_put(logdev);
Dan Williams486aff52017-08-24 15:12:50 -0700474 fs_put_dax(dax_logdev);
475 }
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000476 out:
Dan Williams486aff52017-08-24 15:12:50 -0700477 fs_put_dax(dax_ddev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000478 return error;
479}
480
Christoph Hellwige34b5622008-05-20 15:10:36 +1000481/*
482 * Setup xfs_mount buffer target pointers based on superblock
483 */
484STATIC int
485xfs_setup_devices(
486 struct xfs_mount *mp)
487{
488 int error;
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000489
Eric Sandeena96c4152014-04-14 19:00:29 +1000490 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
Christoph Hellwige34b5622008-05-20 15:10:36 +1000491 if (error)
492 return error;
493
494 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
495 unsigned int log_sector_size = BBSIZE;
496
Dave Chinner38c26bf2021-08-18 18:46:37 -0700497 if (xfs_has_sector(mp))
Christoph Hellwige34b5622008-05-20 15:10:36 +1000498 log_sector_size = mp->m_sb.sb_logsectsize;
499 error = xfs_setsize_buftarg(mp->m_logdev_targp,
Christoph Hellwige34b5622008-05-20 15:10:36 +1000500 log_sector_size);
501 if (error)
502 return error;
503 }
504 if (mp->m_rtdev_targp) {
505 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
Christoph Hellwige34b5622008-05-20 15:10:36 +1000506 mp->m_sb.sb_sectsize);
507 if (error)
508 return error;
509 }
510
511 return 0;
512}
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000513
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000514STATIC int
515xfs_init_mount_workqueues(
516 struct xfs_mount *mp)
517{
Brian Foster78c931b2014-11-28 13:59:58 +1100518 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
Darrick J. Wong05a302a2021-01-22 16:48:42 -0800519 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
520 1, mp->m_super->s_id);
Brian Foster78c931b2014-11-28 13:59:58 +1100521 if (!mp->m_buf_workqueue)
522 goto out;
523
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000524 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
Darrick J. Wong05a302a2021-01-22 16:48:42 -0800525 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
526 0, mp->m_super->s_id);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000527 if (!mp->m_unwritten_workqueue)
Darrick J. Wong28408242019-04-15 13:13:21 -0700528 goto out_destroy_buf;
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000529
Dave Chinner58896082012-10-08 21:56:05 +1100530 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
Darrick J. Wong05a302a2021-01-22 16:48:42 -0800531 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
532 0, mp->m_super->s_id);
Dave Chinner58896082012-10-08 21:56:05 +1100533 if (!mp->m_reclaim_workqueue)
Dave Chinner33c0dd72021-08-10 18:00:45 -0700534 goto out_destroy_unwritten;
Dave Chinner58896082012-10-08 21:56:05 +1100535
Dave Chinnerab23a772021-08-06 11:05:39 -0700536 mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
537 XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
Darrick J. Wong05a302a2021-01-22 16:48:42 -0800538 0, mp->m_super->s_id);
Dave Chinnerab23a772021-08-06 11:05:39 -0700539 if (!mp->m_blockgc_wq)
Christoph Hellwig1058d0f2019-06-28 19:27:25 -0700540 goto out_destroy_reclaim;
Brian Foster579b62f2012-11-06 09:50:47 -0500541
Dave Chinnerab23a772021-08-06 11:05:39 -0700542 mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
543 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
544 1, mp->m_super->s_id);
545 if (!mp->m_inodegc_wq)
546 goto out_destroy_blockgc;
547
Darrick J. Wong05a302a2021-01-22 16:48:42 -0800548 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
549 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
Brian Foster696a5622017-03-28 14:51:44 -0700550 if (!mp->m_sync_workqueue)
Dave Chinnerab23a772021-08-06 11:05:39 -0700551 goto out_destroy_inodegc;
Brian Foster696a5622017-03-28 14:51:44 -0700552
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000553 return 0;
554
Dave Chinnerab23a772021-08-06 11:05:39 -0700555out_destroy_inodegc:
556 destroy_workqueue(mp->m_inodegc_wq);
557out_destroy_blockgc:
558 destroy_workqueue(mp->m_blockgc_wq);
Dave Chinner58896082012-10-08 21:56:05 +1100559out_destroy_reclaim:
560 destroy_workqueue(mp->m_reclaim_workqueue);
Dave Chinner4c2d5422012-04-23 17:54:32 +1000561out_destroy_unwritten:
562 destroy_workqueue(mp->m_unwritten_workqueue);
Brian Foster78c931b2014-11-28 13:59:58 +1100563out_destroy_buf:
564 destroy_workqueue(mp->m_buf_workqueue);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000565out:
566 return -ENOMEM;
567}
568
569STATIC void
570xfs_destroy_mount_workqueues(
571 struct xfs_mount *mp)
572{
Brian Foster696a5622017-03-28 14:51:44 -0700573 destroy_workqueue(mp->m_sync_workqueue);
Dave Chinnerab23a772021-08-06 11:05:39 -0700574 destroy_workqueue(mp->m_blockgc_wq);
575 destroy_workqueue(mp->m_inodegc_wq);
Dave Chinner58896082012-10-08 21:56:05 +1100576 destroy_workqueue(mp->m_reclaim_workqueue);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000577 destroy_workqueue(mp->m_unwritten_workqueue);
Brian Foster78c931b2014-11-28 13:59:58 +1100578 destroy_workqueue(mp->m_buf_workqueue);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000579}
580
Darrick J. Wongf0f7a672020-04-12 13:11:10 -0700581static void
582xfs_flush_inodes_worker(
583 struct work_struct *work)
584{
585 struct xfs_mount *mp = container_of(work, struct xfs_mount,
586 m_flush_inodes_work);
587 struct super_block *sb = mp->m_super;
588
589 if (down_read_trylock(&sb->s_umount)) {
590 sync_inodes_sb(sb);
591 up_read(&sb->s_umount);
592 }
593}
594
Dave Chinner9aa05002012-10-08 21:56:04 +1100595/*
596 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
597 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
598 * for IO to complete so that we effectively throttle multiple callers to the
599 * rate at which IO is completing.
600 */
601void
602xfs_flush_inodes(
603 struct xfs_mount *mp)
604{
Darrick J. Wongf0f7a672020-04-12 13:11:10 -0700605 /*
606 * If flush_work() returns true then that means we waited for a flush
607 * which was already in progress. Don't bother running another scan.
608 */
609 if (flush_work(&mp->m_flush_inodes_work))
Darrick J. Wongc6425702020-03-27 08:49:44 -0700610 return;
611
Darrick J. Wongf0f7a672020-04-12 13:11:10 -0700612 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
613 flush_work(&mp->m_flush_inodes_work);
Dave Chinner9aa05002012-10-08 21:56:04 +1100614}
615
David Chinnerbf904242008-10-30 17:36:14 +1100616/* Catch misguided souls that try to use this interface on XFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617STATIC struct inode *
Nathan Scotta50cd262006-03-14 14:06:18 +1100618xfs_fs_alloc_inode(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 struct super_block *sb)
620{
David Chinnerbf904242008-10-30 17:36:14 +1100621 BUG();
Lachlan McIlroy493dca62008-10-30 17:36:52 +1100622 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623}
624
David Chinnerbf904242008-10-30 17:36:14 +1100625/*
David Chinner99fa8cb2008-10-30 17:36:40 +1100626 * Now that the generic code is guaranteed not to be accessing
Dave Chinner8179c032016-05-18 13:52:42 +1000627 * the linux inode, we can inactivate and reclaim the inode.
David Chinnerbf904242008-10-30 17:36:14 +1100628 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629STATIC void
Nathan Scotta50cd262006-03-14 14:06:18 +1100630xfs_fs_destroy_inode(
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000631 struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632{
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000633 struct xfs_inode *ip = XFS_I(inode);
634
Christoph Hellwigcca28fb2010-06-24 11:57:09 +1000635 trace_xfs_destroy_inode(ip);
David Chinner99fa8cb2008-10-30 17:36:40 +1100636
Christoph Hellwig65523212016-11-30 14:33:25 +1100637 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
Dave Chinner8179c032016-05-18 13:52:42 +1000638 XFS_STATS_INC(ip->i_mount, vn_rele);
639 XFS_STATS_INC(ip->i_mount, vn_remove);
Darrick J. Wongc076ae72021-05-31 11:32:02 -0700640 xfs_inode_mark_reclaimable(ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641}
642
Christoph Hellwigc3b1b132018-03-06 17:04:00 -0800643static void
644xfs_fs_dirty_inode(
645 struct inode *inode,
646 int flag)
647{
648 struct xfs_inode *ip = XFS_I(inode);
649 struct xfs_mount *mp = ip->i_mount;
650 struct xfs_trans *tp;
651
652 if (!(inode->i_sb->s_flags & SB_LAZYTIME))
653 return;
654 if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
655 return;
656
657 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
658 return;
659 xfs_ilock(ip, XFS_ILOCK_EXCL);
660 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
661 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
662 xfs_trans_commit(tp);
663}
664
David Chinner07c8f672008-10-30 16:11:59 +1100665/*
666 * Slab object creation initialisation for the XFS inode.
667 * This covers only the idempotent fields in the XFS inode;
668 * all other fields need to be initialised on allocation
Uwe Kleine-Königb5950762010-11-01 15:38:34 -0400669 * from the slab. This avoids the need to repeatedly initialise
David Chinner07c8f672008-10-30 16:11:59 +1100670 * fields in the xfs inode that left in the initialise state
671 * when freeing the inode.
672 */
David Chinnerbf904242008-10-30 17:36:14 +1100673STATIC void
674xfs_fs_inode_init_once(
David Chinner07c8f672008-10-30 16:11:59 +1100675 void *inode)
676{
677 struct xfs_inode *ip = inode;
678
679 memset(ip, 0, sizeof(struct xfs_inode));
David Chinnerbf904242008-10-30 17:36:14 +1100680
681 /* vfs inode */
682 inode_init_once(VFS_I(ip));
683
684 /* xfs inode */
David Chinner07c8f672008-10-30 16:11:59 +1100685 atomic_set(&ip->i_pincount, 0);
686 spin_lock_init(&ip->i_flags_lock);
David Chinner07c8f672008-10-30 16:11:59 +1100687
688 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
689 "xfsino", ip->i_ino);
David Chinner07c8f672008-10-30 16:11:59 +1100690}
691
Dave Chinner5132ba82012-03-22 05:15:10 +0000692/*
693 * We do an unlocked check for XFS_IDONTCACHE here because we are already
694 * serialised against cache hits here via the inode->i_lock and igrab() in
695 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
696 * racing with us, and it avoids needing to grab a spinlock here for every inode
697 * we drop the final reference on.
698 */
699STATIC int
700xfs_fs_drop_inode(
701 struct inode *inode)
702{
703 struct xfs_inode *ip = XFS_I(inode);
704
Darrick J. Wong17c12bc2016-10-03 09:11:29 -0700705 /*
706 * If this unlinked inode is in the middle of recovery, don't
707 * drop the inode just yet; log recovery will take care of
708 * that. See the comment for this inode flag.
709 */
710 if (ip->i_flags & XFS_IRECOVERY) {
Dave Chinnere1d06e52021-08-10 17:59:02 -0700711 ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
Darrick J. Wong17c12bc2016-10-03 09:11:29 -0700712 return 0;
713 }
714
Ira Weinydae2f8e2020-04-30 07:41:37 -0700715 return generic_drop_inode(inode);
Dave Chinner5132ba82012-03-22 05:15:10 +0000716}
717
Ian Kenta943f372019-11-04 13:58:42 -0800718static void
719xfs_mount_free(
Christoph Hellwiga7381592008-08-13 16:04:05 +1000720 struct xfs_mount *mp)
721{
Christoph Hellwiga7381592008-08-13 16:04:05 +1000722 kfree(mp->m_rtname);
723 kfree(mp->m_logname);
Ian Kenta943f372019-11-04 13:58:42 -0800724 kmem_free(mp);
Christoph Hellwiga7381592008-08-13 16:04:05 +1000725}
726
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727STATIC int
Christoph Hellwig69961a22009-10-06 20:29:28 +0000728xfs_fs_sync_fs(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 struct super_block *sb,
730 int wait)
731{
Christoph Hellwig745f6912007-08-30 17:20:39 +1000732 struct xfs_mount *mp = XFS_M(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
Dave Chinnerab23a772021-08-06 11:05:39 -0700734 trace_xfs_fs_sync_fs(mp, __return_address);
735
Lachlan McIlroye893bff2007-10-12 11:13:35 +1000736 /*
Christoph Hellwig34625c62011-12-06 21:58:12 +0000737 * Doing anything during the async pass would be counterproductive.
Lachlan McIlroye893bff2007-10-12 11:13:35 +1000738 */
Christoph Hellwig34625c62011-12-06 21:58:12 +0000739 if (!wait)
Christoph Hellwig69961a22009-10-06 20:29:28 +0000740 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
Dave Chinner34061f52012-10-08 21:56:06 +1100742 xfs_log_force(mp, XFS_LOG_SYNC);
Christoph Hellwig69961a22009-10-06 20:29:28 +0000743 if (laptop_mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 /*
745 * The disk must be active because we're syncing.
Dave Chinnerf661f1e2012-10-08 21:56:02 +1100746 * We schedule log work now (now that the disk is
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 * active) instead of later (when it might not be).
748 */
Dave Chinnerf661f1e2012-10-08 21:56:02 +1100749 flush_delayed_work(&mp->m_log->l_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 }
751
Dave Chinnerab23a772021-08-06 11:05:39 -0700752 /*
753 * If we are called with page faults frozen out, it means we are about
754 * to freeze the transaction subsystem. Take the opportunity to shut
755 * down inodegc because once SB_FREEZE_FS is set it's too late to
756 * prevent inactivation races with freeze. The fs doesn't get called
757 * again by the freezing process until after SB_FREEZE_FS has been set,
Darrick J. Wong6f649092021-08-06 11:05:42 -0700758 * so it's now or never. Same logic applies to speculative allocation
759 * garbage collection.
Dave Chinnerab23a772021-08-06 11:05:39 -0700760 *
761 * We don't care if this is a normal syncfs call that does this or
762 * freeze that does this - we can run this multiple times without issue
763 * and we won't race with a restart because a restart can only occur
764 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
765 */
Darrick J. Wong6f649092021-08-06 11:05:42 -0700766 if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
Dave Chinnerab23a772021-08-06 11:05:39 -0700767 xfs_inodegc_stop(mp);
Darrick J. Wong6f649092021-08-06 11:05:42 -0700768 xfs_blockgc_stop(mp);
769 }
Dave Chinnerab23a772021-08-06 11:05:39 -0700770
Christoph Hellwig69961a22009-10-06 20:29:28 +0000771 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772}
773
774STATIC int
Nathan Scotta50cd262006-03-14 14:06:18 +1100775xfs_fs_statfs(
David Howells726c3342006-06-23 02:02:58 -0700776 struct dentry *dentry,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 struct kstatfs *statp)
778{
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000779 struct xfs_mount *mp = XFS_M(dentry->d_sb);
780 xfs_sb_t *sbp = &mp->m_sb;
David Howells2b0143b2015-03-17 22:25:59 +0000781 struct xfs_inode *ip = XFS_I(d_inode(dentry));
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700782 uint64_t fakeinos, id;
783 uint64_t icount;
784 uint64_t ifree;
785 uint64_t fdblocks;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000786 xfs_extlen_t lsize;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700787 int64_t ffree;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000788
Darrick J. Wong01e8f372021-08-06 11:05:42 -0700789 /* Wait for whatever inactivations are in progress. */
790 xfs_inodegc_flush(mp);
791
Adam Borowskidddde682018-10-18 17:20:19 +1100792 statp->f_type = XFS_SUPER_MAGIC;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000793 statp->f_namelen = MAXNAMELEN - 1;
794
795 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
Al Viro6d1349c2020-09-18 16:45:50 -0400796 statp->f_fsid = u64_to_fsid(id);
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000797
Dave Chinner501ab322015-02-23 21:19:28 +1100798 icount = percpu_counter_sum(&mp->m_icount);
Dave Chinnere88b64e2015-02-23 21:19:53 +1100799 ifree = percpu_counter_sum(&mp->m_ifree);
Dave Chinner0d485ad2015-02-23 21:22:03 +1100800 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000801
802 spin_lock(&mp->m_sb_lock);
803 statp->f_bsize = sbp->sb_blocksize;
804 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
805 statp->f_blocks = sbp->sb_dblocks - lsize;
Dave Chinner0d485ad2015-02-23 21:22:03 +1100806 spin_unlock(&mp->m_sb_lock);
807
Zheng Bin237aac42020-05-12 11:48:35 -0700808 /* make sure statp->f_bfree does not underflow */
809 statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
Dave Chinner0d485ad2015-02-23 21:22:03 +1100810 statp->f_bavail = statp->f_bfree;
811
Darrick J. Wong43004b22018-12-12 08:46:24 -0800812 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
Dave Chinner9bb54cb2018-06-07 07:54:02 -0700813 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
Darrick J. Wongef325952019-06-05 11:19:34 -0700814 if (M_IGEO(mp)->maxicount)
Christoph Hellwiga19d9f82009-03-29 09:51:08 +0200815 statp->f_files = min_t(typeof(statp->f_files),
816 statp->f_files,
Darrick J. Wongef325952019-06-05 11:19:34 -0700817 M_IGEO(mp)->maxicount);
Stuart Brodsky2fe33662010-08-24 11:46:05 +1000818
Eric Sandeen01f98822015-02-06 09:53:02 +1100819 /* If sb_icount overshot maxicount, report actual allocation */
820 statp->f_files = max_t(typeof(statp->f_files),
821 statp->f_files,
822 sbp->sb_icount);
823
Stuart Brodsky2fe33662010-08-24 11:46:05 +1000824 /* make sure statp->f_ffree does not underflow */
Dave Chinnere88b64e2015-02-23 21:19:53 +1100825 ffree = statp->f_files - (icount - ifree);
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700826 statp->f_ffree = max_t(int64_t, ffree, 0);
Stuart Brodsky2fe33662010-08-24 11:46:05 +1000827
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000828
Christoph Hellwigdb073492021-03-29 11:11:44 -0700829 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
Chandra Seetharaman83e782e2013-06-27 17:25:10 -0500830 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
831 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
Christoph Hellwig7d095252009-06-08 15:33:32 +0200832 xfs_qm_statvfs(ip, statp);
Richard Wareinga0158312018-01-08 10:41:33 -0800833
834 if (XFS_IS_REALTIME_MOUNT(mp) &&
Christoph Hellwigdb073492021-03-29 11:11:44 -0700835 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
Richard Wareinga0158312018-01-08 10:41:33 -0800836 statp->f_blocks = sbp->sb_rblocks;
837 statp->f_bavail = statp->f_bfree =
838 sbp->sb_frextents * sbp->sb_rextsize;
839 }
840
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000841 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842}
843
Eric Sandeend5db0f92010-02-05 22:59:53 +0000844STATIC void
845xfs_save_resvblks(struct xfs_mount *mp)
846{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700847 uint64_t resblks = 0;
Eric Sandeend5db0f92010-02-05 22:59:53 +0000848
849 mp->m_resblks_save = mp->m_resblks;
850 xfs_reserve_blocks(mp, &resblks, NULL);
851}
852
853STATIC void
854xfs_restore_resvblks(struct xfs_mount *mp)
855{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700856 uint64_t resblks;
Eric Sandeend5db0f92010-02-05 22:59:53 +0000857
858 if (mp->m_resblks_save) {
859 resblks = mp->m_resblks_save;
860 mp->m_resblks_save = 0;
861 } else
862 resblks = xfs_default_resblks(mp);
863
864 xfs_reserve_blocks(mp, &resblks, NULL);
865}
866
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100867/*
Christoph Hellwig9909c4a2007-10-11 18:11:14 +1000868 * Second stage of a freeze. The data is already frozen so we only
Dave Chinner61e63ec2015-01-22 09:10:31 +1100869 * need to take care of the metadata. Once that's done sync the superblock
870 * to the log to dirty it in case of a crash while frozen. This ensures that we
871 * will recover the unlinked inode lists on the next mount.
Christoph Hellwig9909c4a2007-10-11 18:11:14 +1000872 */
Takashi Satoc4be0c12009-01-09 16:40:58 -0800873STATIC int
874xfs_fs_freeze(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 struct super_block *sb)
876{
Christoph Hellwig9909c4a2007-10-11 18:11:14 +1000877 struct xfs_mount *mp = XFS_M(sb);
Waiman Longc3f23752020-07-08 10:21:44 -0700878 unsigned int flags;
879 int ret;
Christoph Hellwig9909c4a2007-10-11 18:11:14 +1000880
Waiman Longc3f23752020-07-08 10:21:44 -0700881 /*
882 * The filesystem is now frozen far enough that memory reclaim
883 * cannot safely operate on the filesystem. Hence we need to
884 * set a GFP_NOFS context here to avoid recursion deadlocks.
885 */
886 flags = memalloc_nofs_save();
Eric Sandeend5db0f92010-02-05 22:59:53 +0000887 xfs_save_resvblks(mp);
Brian Foster5b0ad7c2021-01-22 16:48:24 -0800888 ret = xfs_log_quiesce(mp);
Waiman Longc3f23752020-07-08 10:21:44 -0700889 memalloc_nofs_restore(flags);
Dave Chinnerab23a772021-08-06 11:05:39 -0700890
891 /*
892 * For read-write filesystems, we need to restart the inodegc on error
893 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
894 * going to be run to restart it now. We are at SB_FREEZE_FS level
895 * here, so we can restart safely without racing with a stop in
896 * xfs_fs_sync_fs().
897 */
Dave Chinner2e973b22021-08-18 18:46:52 -0700898 if (ret && !xfs_is_readonly(mp)) {
Darrick J. Wong6f649092021-08-06 11:05:42 -0700899 xfs_blockgc_start(mp);
Dave Chinnerab23a772021-08-06 11:05:39 -0700900 xfs_inodegc_start(mp);
Darrick J. Wong6f649092021-08-06 11:05:42 -0700901 }
Dave Chinnerab23a772021-08-06 11:05:39 -0700902
Waiman Longc3f23752020-07-08 10:21:44 -0700903 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904}
905
906STATIC int
Eric Sandeend5db0f92010-02-05 22:59:53 +0000907xfs_fs_unfreeze(
908 struct super_block *sb)
909{
910 struct xfs_mount *mp = XFS_M(sb);
911
912 xfs_restore_resvblks(mp);
Dave Chinnerf661f1e2012-10-08 21:56:02 +1100913 xfs_log_work_queue(mp);
Dave Chinnerab23a772021-08-06 11:05:39 -0700914
915 /*
916 * Don't reactivate the inodegc worker on a readonly filesystem because
Darrick J. Wong6f649092021-08-06 11:05:42 -0700917 * inodes are sent directly to reclaim. Don't reactivate the blockgc
918 * worker because there are no speculative preallocations on a readonly
919 * filesystem.
Dave Chinnerab23a772021-08-06 11:05:39 -0700920 */
Dave Chinner2e973b22021-08-18 18:46:52 -0700921 if (!xfs_is_readonly(mp)) {
Darrick J. Wong6f649092021-08-06 11:05:42 -0700922 xfs_blockgc_start(mp);
Dave Chinnerab23a772021-08-06 11:05:39 -0700923 xfs_inodegc_start(mp);
Darrick J. Wong6f649092021-08-06 11:05:42 -0700924 }
Dave Chinnerab23a772021-08-06 11:05:39 -0700925
Eric Sandeend5db0f92010-02-05 22:59:53 +0000926 return 0;
927}
928
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000929/*
930 * This function fills in xfs_mount_t fields based on mount args.
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000931 * Note: the superblock _has_ now been read in.
932 */
933STATIC int
934xfs_finish_flags(
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000935 struct xfs_mount *mp)
936{
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200937 /* Fail a mount where the logbuf is smaller than the log stripe */
Dave Chinner38c26bf2021-08-18 18:46:37 -0700938 if (xfs_has_logv2(mp)) {
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100939 if (mp->m_logbsize <= 0 &&
940 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000941 mp->m_logbsize = mp->m_sb.sb_logsunit;
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100942 } else if (mp->m_logbsize > 0 &&
943 mp->m_logbsize < mp->m_sb.sb_logsunit) {
Dave Chinner4f107002011-03-07 10:00:35 +1100944 xfs_warn(mp,
945 "logbuf size must be greater than or equal to log stripe size");
Dave Chinner24513372014-06-25 14:58:08 +1000946 return -EINVAL;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000947 }
948 } else {
949 /* Fail a mount if the logbuf is larger than 32K */
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100950 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
Dave Chinner4f107002011-03-07 10:00:35 +1100951 xfs_warn(mp,
952 "logbuf size for version 1 logs must be 16K or 32K");
Dave Chinner24513372014-06-25 14:58:08 +1000953 return -EINVAL;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000954 }
955 }
956
957 /*
Dave Chinnerd3eaace2013-06-05 12:09:09 +1000958 * V5 filesystems always use attr2 format for attributes.
959 */
Dave Chinner0560f312021-08-18 18:46:52 -0700960 if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
Eric Sandeen2e74af02016-03-02 09:55:38 +1100961 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
962 "attr2 is always enabled for V5 filesystems.");
Dave Chinner24513372014-06-25 14:58:08 +1000963 return -EINVAL;
Dave Chinnerd3eaace2013-06-05 12:09:09 +1000964 }
965
966 /*
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000967 * prohibit r/w mounts of read-only filesystems
968 */
Dave Chinner2e973b22021-08-18 18:46:52 -0700969 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
Dave Chinner4f107002011-03-07 10:00:35 +1100970 xfs_warn(mp,
971 "cannot mount a read-only filesystem as read-write");
Dave Chinner24513372014-06-25 14:58:08 +1000972 return -EROFS;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000973 }
974
Christoph Hellwig149e53a2021-08-06 11:05:37 -0700975 if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
976 (mp->m_qflags & XFS_PQUOTA_ACCT) &&
Dave Chinner38c26bf2021-08-18 18:46:37 -0700977 !xfs_has_pquotino(mp)) {
Chandra Seetharamand892d582013-07-19 17:36:02 -0500978 xfs_warn(mp,
979 "Super block does not support project and group quota together");
Dave Chinner24513372014-06-25 14:58:08 +1000980 return -EINVAL;
Chandra Seetharamand892d582013-07-19 17:36:02 -0500981 }
982
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000983 return 0;
984}
985
Dave Chinner5681ca42015-02-23 21:22:31 +1100986static int
987xfs_init_percpu_counters(
988 struct xfs_mount *mp)
989{
990 int error;
991
992 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
993 if (error)
Joe Perches5e9383f2015-03-25 15:00:24 +1100994 return -ENOMEM;
Dave Chinner5681ca42015-02-23 21:22:31 +1100995
996 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
997 if (error)
998 goto free_icount;
999
1000 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1001 if (error)
1002 goto free_ifree;
1003
Darrick J. Wong9fe82b82019-04-25 18:26:22 -07001004 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1005 if (error)
1006 goto free_fdblocks;
1007
Dave Chinner5681ca42015-02-23 21:22:31 +11001008 return 0;
1009
Darrick J. Wong9fe82b82019-04-25 18:26:22 -07001010free_fdblocks:
1011 percpu_counter_destroy(&mp->m_fdblocks);
Dave Chinner5681ca42015-02-23 21:22:31 +11001012free_ifree:
1013 percpu_counter_destroy(&mp->m_ifree);
1014free_icount:
1015 percpu_counter_destroy(&mp->m_icount);
1016 return -ENOMEM;
1017}
1018
1019void
1020xfs_reinit_percpu_counters(
1021 struct xfs_mount *mp)
1022{
1023 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1024 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1025 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1026}
1027
1028static void
1029xfs_destroy_percpu_counters(
1030 struct xfs_mount *mp)
1031{
1032 percpu_counter_destroy(&mp->m_icount);
1033 percpu_counter_destroy(&mp->m_ifree);
1034 percpu_counter_destroy(&mp->m_fdblocks);
Dave Chinner75c8c50f2021-08-18 18:46:53 -07001035 ASSERT(xfs_is_shutdown(mp) ||
Darrick J. Wong9fe82b82019-04-25 18:26:22 -07001036 percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1037 percpu_counter_destroy(&mp->m_delalloc_blks);
Dave Chinner5681ca42015-02-23 21:22:31 +11001038}
1039
Dave Chinnerab23a772021-08-06 11:05:39 -07001040static int
1041xfs_inodegc_init_percpu(
1042 struct xfs_mount *mp)
1043{
1044 struct xfs_inodegc *gc;
1045 int cpu;
1046
1047 mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1048 if (!mp->m_inodegc)
1049 return -ENOMEM;
1050
1051 for_each_possible_cpu(cpu) {
1052 gc = per_cpu_ptr(mp->m_inodegc, cpu);
1053 init_llist_head(&gc->list);
1054 gc->items = 0;
1055 INIT_WORK(&gc->work, xfs_inodegc_worker);
1056 }
1057 return 0;
1058}
1059
1060static void
1061xfs_inodegc_free_percpu(
1062 struct xfs_mount *mp)
1063{
1064 if (!mp->m_inodegc)
1065 return;
1066 free_percpu(mp->m_inodegc);
1067}
1068
Ian Kent2f8d66b2019-11-04 13:58:47 -08001069static void
1070xfs_fs_put_super(
1071 struct super_block *sb)
1072{
1073 struct xfs_mount *mp = XFS_M(sb);
1074
1075 /* if ->fill_super failed, we have no mount to tear down */
1076 if (!sb->s_fs_info)
1077 return;
1078
1079 xfs_notice(mp, "Unmounting Filesystem");
1080 xfs_filestream_unmount(mp);
1081 xfs_unmountfs(mp);
1082
1083 xfs_freesb(mp);
1084 free_percpu(mp->m_stats.xs_stats);
Dave Chinner0ed17f02021-08-06 11:05:38 -07001085 xfs_mount_list_del(mp);
Dave Chinnerab23a772021-08-06 11:05:39 -07001086 xfs_inodegc_free_percpu(mp);
Ian Kent2f8d66b2019-11-04 13:58:47 -08001087 xfs_destroy_percpu_counters(mp);
1088 xfs_destroy_mount_workqueues(mp);
1089 xfs_close_devices(mp);
1090
1091 sb->s_fs_info = NULL;
1092 xfs_mount_free(mp);
1093}
1094
1095static long
1096xfs_fs_nr_cached_objects(
1097 struct super_block *sb,
1098 struct shrink_control *sc)
1099{
1100 /* Paranoia: catch incorrect calls during mount setup or teardown */
1101 if (WARN_ON_ONCE(!sb->s_fs_info))
1102 return 0;
1103 return xfs_reclaim_inodes_count(XFS_M(sb));
1104}
1105
1106static long
1107xfs_fs_free_cached_objects(
1108 struct super_block *sb,
1109 struct shrink_control *sc)
1110{
1111 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1112}
1113
1114static const struct super_operations xfs_super_operations = {
1115 .alloc_inode = xfs_fs_alloc_inode,
1116 .destroy_inode = xfs_fs_destroy_inode,
1117 .dirty_inode = xfs_fs_dirty_inode,
1118 .drop_inode = xfs_fs_drop_inode,
1119 .put_super = xfs_fs_put_super,
1120 .sync_fs = xfs_fs_sync_fs,
1121 .freeze_fs = xfs_fs_freeze,
1122 .unfreeze_fs = xfs_fs_unfreeze,
1123 .statfs = xfs_fs_statfs,
1124 .show_options = xfs_fs_show_options,
1125 .nr_cached_objects = xfs_fs_nr_cached_objects,
1126 .free_cached_objects = xfs_fs_free_cached_objects,
1127};
1128
Ian Kent73e5fff2019-11-04 13:58:46 -08001129static int
Ian Kent8757c382019-11-04 13:58:48 -08001130suffix_kstrtoint(
1131 const char *s,
1132 unsigned int base,
1133 int *res)
1134{
1135 int last, shift_left_factor = 0, _res;
1136 char *value;
1137 int ret = 0;
1138
1139 value = kstrdup(s, GFP_KERNEL);
1140 if (!value)
1141 return -ENOMEM;
1142
1143 last = strlen(value) - 1;
1144 if (value[last] == 'K' || value[last] == 'k') {
1145 shift_left_factor = 10;
1146 value[last] = '\0';
1147 }
1148 if (value[last] == 'M' || value[last] == 'm') {
1149 shift_left_factor = 20;
1150 value[last] = '\0';
1151 }
1152 if (value[last] == 'G' || value[last] == 'g') {
1153 shift_left_factor = 30;
1154 value[last] = '\0';
1155 }
1156
1157 if (kstrtoint(value, base, &_res))
1158 ret = -EINVAL;
1159 kfree(value);
1160 *res = _res << shift_left_factor;
1161 return ret;
1162}
1163
Pavel Reichl92cf7d32021-03-22 09:52:02 -07001164static inline void
1165xfs_fs_warn_deprecated(
1166 struct fs_context *fc,
1167 struct fs_parameter *param,
1168 uint64_t flag,
1169 bool value)
1170{
1171 /* Don't print the warning if reconfiguring and current mount point
1172 * already had the flag set
1173 */
1174 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
Dave Chinner0560f312021-08-18 18:46:52 -07001175 !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
Pavel Reichl92cf7d32021-03-22 09:52:02 -07001176 return;
1177 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1178}
1179
Ian Kent8757c382019-11-04 13:58:48 -08001180/*
1181 * Set mount state from a mount option.
1182 *
1183 * NOTE: mp->m_super is NULL here!
1184 */
1185static int
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001186xfs_fs_parse_param(
Ian Kent8757c382019-11-04 13:58:48 -08001187 struct fs_context *fc,
1188 struct fs_parameter *param)
1189{
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001190 struct xfs_mount *parsing_mp = fc->s_fs_info;
Ian Kent8757c382019-11-04 13:58:48 -08001191 struct fs_parse_result result;
1192 int size = 0;
1193 int opt;
1194
Al Virod7167b12019-09-07 07:23:15 -04001195 opt = fs_parse(fc, xfs_fs_parameters, param, &result);
Ian Kent8757c382019-11-04 13:58:48 -08001196 if (opt < 0)
1197 return opt;
1198
1199 switch (opt) {
1200 case Opt_logbufs:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001201 parsing_mp->m_logbufs = result.uint_32;
Ian Kent8757c382019-11-04 13:58:48 -08001202 return 0;
1203 case Opt_logbsize:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001204 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
Ian Kent8757c382019-11-04 13:58:48 -08001205 return -EINVAL;
1206 return 0;
1207 case Opt_logdev:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001208 kfree(parsing_mp->m_logname);
1209 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1210 if (!parsing_mp->m_logname)
Ian Kent8757c382019-11-04 13:58:48 -08001211 return -ENOMEM;
1212 return 0;
1213 case Opt_rtdev:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001214 kfree(parsing_mp->m_rtname);
1215 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1216 if (!parsing_mp->m_rtname)
Ian Kent8757c382019-11-04 13:58:48 -08001217 return -ENOMEM;
1218 return 0;
1219 case Opt_allocsize:
1220 if (suffix_kstrtoint(param->string, 10, &size))
1221 return -EINVAL;
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001222 parsing_mp->m_allocsize_log = ffs(size) - 1;
Dave Chinner0560f312021-08-18 18:46:52 -07001223 parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
Ian Kent8757c382019-11-04 13:58:48 -08001224 return 0;
1225 case Opt_grpid:
1226 case Opt_bsdgroups:
Dave Chinner0560f312021-08-18 18:46:52 -07001227 parsing_mp->m_features |= XFS_FEAT_GRPID;
Ian Kent8757c382019-11-04 13:58:48 -08001228 return 0;
1229 case Opt_nogrpid:
1230 case Opt_sysvgroups:
Dave Chinner0560f312021-08-18 18:46:52 -07001231 parsing_mp->m_features &= ~XFS_FEAT_GRPID;
Ian Kent8757c382019-11-04 13:58:48 -08001232 return 0;
1233 case Opt_wsync:
Dave Chinner0560f312021-08-18 18:46:52 -07001234 parsing_mp->m_features |= XFS_FEAT_WSYNC;
Ian Kent8757c382019-11-04 13:58:48 -08001235 return 0;
1236 case Opt_norecovery:
Dave Chinner0560f312021-08-18 18:46:52 -07001237 parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
Ian Kent8757c382019-11-04 13:58:48 -08001238 return 0;
1239 case Opt_noalign:
Dave Chinner0560f312021-08-18 18:46:52 -07001240 parsing_mp->m_features |= XFS_FEAT_NOALIGN;
Ian Kent8757c382019-11-04 13:58:48 -08001241 return 0;
1242 case Opt_swalloc:
Dave Chinner0560f312021-08-18 18:46:52 -07001243 parsing_mp->m_features |= XFS_FEAT_SWALLOC;
Ian Kent8757c382019-11-04 13:58:48 -08001244 return 0;
1245 case Opt_sunit:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001246 parsing_mp->m_dalign = result.uint_32;
Ian Kent8757c382019-11-04 13:58:48 -08001247 return 0;
1248 case Opt_swidth:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001249 parsing_mp->m_swidth = result.uint_32;
Ian Kent8757c382019-11-04 13:58:48 -08001250 return 0;
1251 case Opt_inode32:
Dave Chinner0560f312021-08-18 18:46:52 -07001252 parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
Ian Kent8757c382019-11-04 13:58:48 -08001253 return 0;
1254 case Opt_inode64:
Dave Chinner0560f312021-08-18 18:46:52 -07001255 parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
Ian Kent8757c382019-11-04 13:58:48 -08001256 return 0;
1257 case Opt_nouuid:
Dave Chinner0560f312021-08-18 18:46:52 -07001258 parsing_mp->m_features |= XFS_FEAT_NOUUID;
Ian Kent8757c382019-11-04 13:58:48 -08001259 return 0;
Ian Kent8757c382019-11-04 13:58:48 -08001260 case Opt_largeio:
Dave Chinner0560f312021-08-18 18:46:52 -07001261 parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
Ian Kent8757c382019-11-04 13:58:48 -08001262 return 0;
1263 case Opt_nolargeio:
Dave Chinner0560f312021-08-18 18:46:52 -07001264 parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
Ian Kent8757c382019-11-04 13:58:48 -08001265 return 0;
Ian Kent8757c382019-11-04 13:58:48 -08001266 case Opt_filestreams:
Dave Chinner0560f312021-08-18 18:46:52 -07001267 parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
Ian Kent8757c382019-11-04 13:58:48 -08001268 return 0;
1269 case Opt_noquota:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001270 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1271 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
Ian Kent8757c382019-11-04 13:58:48 -08001272 return 0;
1273 case Opt_quota:
1274 case Opt_uquota:
1275 case Opt_usrquota:
Christoph Hellwig149e53a2021-08-06 11:05:37 -07001276 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
Ian Kent8757c382019-11-04 13:58:48 -08001277 return 0;
1278 case Opt_qnoenforce:
1279 case Opt_uqnoenforce:
Christoph Hellwig149e53a2021-08-06 11:05:37 -07001280 parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001281 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
Ian Kent8757c382019-11-04 13:58:48 -08001282 return 0;
1283 case Opt_pquota:
1284 case Opt_prjquota:
Christoph Hellwig149e53a2021-08-06 11:05:37 -07001285 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
Ian Kent8757c382019-11-04 13:58:48 -08001286 return 0;
1287 case Opt_pqnoenforce:
Christoph Hellwig149e53a2021-08-06 11:05:37 -07001288 parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001289 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
Ian Kent8757c382019-11-04 13:58:48 -08001290 return 0;
1291 case Opt_gquota:
1292 case Opt_grpquota:
Christoph Hellwig149e53a2021-08-06 11:05:37 -07001293 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
Ian Kent8757c382019-11-04 13:58:48 -08001294 return 0;
1295 case Opt_gqnoenforce:
Christoph Hellwig149e53a2021-08-06 11:05:37 -07001296 parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001297 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
Ian Kent8757c382019-11-04 13:58:48 -08001298 return 0;
1299 case Opt_discard:
Dave Chinner0560f312021-08-18 18:46:52 -07001300 parsing_mp->m_features |= XFS_FEAT_DISCARD;
Ian Kent8757c382019-11-04 13:58:48 -08001301 return 0;
1302 case Opt_nodiscard:
Dave Chinner0560f312021-08-18 18:46:52 -07001303 parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
Ian Kent8757c382019-11-04 13:58:48 -08001304 return 0;
1305#ifdef CONFIG_FS_DAX
1306 case Opt_dax:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001307 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
Ira Weiny8d6c3442020-05-04 09:02:42 -07001308 return 0;
1309 case Opt_dax_enum:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001310 xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
Ian Kent8757c382019-11-04 13:58:48 -08001311 return 0;
1312#endif
Pavel Reichlc23c3932020-09-25 11:10:29 -07001313 /* Following mount options will be removed in September 2025 */
1314 case Opt_ikeep:
Dave Chinner0560f312021-08-18 18:46:52 -07001315 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1316 parsing_mp->m_features |= XFS_FEAT_IKEEP;
Pavel Reichlc23c3932020-09-25 11:10:29 -07001317 return 0;
1318 case Opt_noikeep:
Dave Chinner0560f312021-08-18 18:46:52 -07001319 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1320 parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
Pavel Reichlc23c3932020-09-25 11:10:29 -07001321 return 0;
1322 case Opt_attr2:
Dave Chinner0560f312021-08-18 18:46:52 -07001323 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1324 parsing_mp->m_features |= XFS_FEAT_ATTR2;
Pavel Reichlc23c3932020-09-25 11:10:29 -07001325 return 0;
1326 case Opt_noattr2:
Dave Chinner0560f312021-08-18 18:46:52 -07001327 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1328 parsing_mp->m_features |= XFS_FEAT_NOATTR2;
Pavel Reichlc23c3932020-09-25 11:10:29 -07001329 return 0;
Ian Kent8757c382019-11-04 13:58:48 -08001330 default:
Pavel Reichl0f98b4e2021-03-22 09:52:01 -07001331 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
Ian Kent8757c382019-11-04 13:58:48 -08001332 return -EINVAL;
1333 }
1334
1335 return 0;
1336}
1337
1338static int
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001339xfs_fs_validate_params(
Ian Kent8757c382019-11-04 13:58:48 -08001340 struct xfs_mount *mp)
1341{
Dave Chinner0560f312021-08-18 18:46:52 -07001342 /* No recovery flag requires a read-only mount */
Dave Chinner2e973b22021-08-18 18:46:52 -07001343 if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
Ian Kent8757c382019-11-04 13:58:48 -08001344 xfs_warn(mp, "no-recovery mounts must be read-only.");
1345 return -EINVAL;
1346 }
1347
Dave Chinner0560f312021-08-18 18:46:52 -07001348 /*
1349 * We have not read the superblock at this point, so only the attr2
1350 * mount option can set the attr2 feature by this stage.
1351 */
1352 if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
Dave Chinnere23b55d2021-08-18 18:46:25 -07001353 xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1354 return -EINVAL;
1355 }
1356
1357
Dave Chinner0560f312021-08-18 18:46:52 -07001358 if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
Ian Kent8757c382019-11-04 13:58:48 -08001359 xfs_warn(mp,
1360 "sunit and swidth options incompatible with the noalign option");
1361 return -EINVAL;
1362 }
1363
1364 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1365 xfs_warn(mp, "quota support not available in this kernel.");
1366 return -EINVAL;
1367 }
1368
1369 if ((mp->m_dalign && !mp->m_swidth) ||
1370 (!mp->m_dalign && mp->m_swidth)) {
1371 xfs_warn(mp, "sunit and swidth must be specified together");
1372 return -EINVAL;
1373 }
1374
1375 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1376 xfs_warn(mp,
1377 "stripe width (%d) must be a multiple of the stripe unit (%d)",
1378 mp->m_swidth, mp->m_dalign);
1379 return -EINVAL;
1380 }
1381
1382 if (mp->m_logbufs != -1 &&
1383 mp->m_logbufs != 0 &&
1384 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1385 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1386 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1387 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1388 return -EINVAL;
1389 }
1390
1391 if (mp->m_logbsize != -1 &&
1392 mp->m_logbsize != 0 &&
1393 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1394 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1395 !is_power_of_2(mp->m_logbsize))) {
1396 xfs_warn(mp,
1397 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1398 mp->m_logbsize);
1399 return -EINVAL;
1400 }
1401
Dave Chinner0560f312021-08-18 18:46:52 -07001402 if (xfs_has_allocsize(mp) &&
Ian Kent8757c382019-11-04 13:58:48 -08001403 (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1404 mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1405 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1406 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1407 return -EINVAL;
1408 }
1409
1410 return 0;
1411}
1412
1413static int
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001414xfs_fs_fill_super(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 struct super_block *sb,
Ian Kent73e5fff2019-11-04 13:58:46 -08001416 struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417{
Ian Kent73e5fff2019-11-04 13:58:46 -08001418 struct xfs_mount *mp = sb->s_fs_info;
Christoph Hellwigf3dcc132008-03-27 18:00:54 +11001419 struct inode *root;
Colin Ian King0279c712019-11-06 08:07:46 -08001420 int flags = 0, error;
Christoph Hellwigbdd907b2008-05-20 15:10:44 +10001421
Ian Kent7c89fcb2019-11-04 13:58:46 -08001422 mp->m_super = sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001424 error = xfs_fs_validate_params(mp);
Christoph Hellwig745f6912007-08-30 17:20:39 +10001425 if (error)
Ian Kente1d3d212019-11-04 13:58:40 -08001426 goto out_free_names;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427
1428 sb_min_blocksize(sb, BBSIZE);
Lachlan McIlroy0ec58512008-06-23 13:23:01 +10001429 sb->s_xattr = xfs_xattr_handlers;
Nathan Scotta50cd262006-03-14 14:06:18 +11001430 sb->s_export_op = &xfs_export_operations;
Christoph Hellwigfcafb712009-02-09 08:47:34 +01001431#ifdef CONFIG_XFS_QUOTA
Nathan Scotta50cd262006-03-14 14:06:18 +11001432 sb->s_qcop = &xfs_quotactl_operations;
Jan Kara17ef4fd2014-09-30 22:35:33 +02001433 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
Christoph Hellwigfcafb712009-02-09 08:47:34 +01001434#endif
Nathan Scotta50cd262006-03-14 14:06:18 +11001435 sb->s_op = &xfs_super_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
Dave Chinnerdae5cd82018-05-10 21:50:23 -07001437 /*
1438 * Delay mount work if the debug hook is set. This is debug
1439 * instrumention to coordinate simulation of xfs mount failures with
1440 * VFS superblock operations
1441 */
1442 if (xfs_globals.mount_delay) {
1443 xfs_notice(mp, "Delaying mount for %d seconds.",
1444 xfs_globals.mount_delay);
1445 msleep(xfs_globals.mount_delay * 1000);
1446 }
1447
Ian Kent73e5fff2019-11-04 13:58:46 -08001448 if (fc->sb_flags & SB_SILENT)
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001449 flags |= XFS_MFSI_QUIET;
1450
Christoph Hellwig9d565ff2008-10-30 17:53:24 +11001451 error = xfs_open_devices(mp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001452 if (error)
Ian Kente1d3d212019-11-04 13:58:40 -08001453 goto out_free_names;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001454
Dave Chinner24513372014-06-25 14:58:08 +10001455 error = xfs_init_mount_workqueues(mp);
Christoph Hellwig61ba35d2010-09-30 02:25:54 +00001456 if (error)
1457 goto out_close_devices;
Christoph Hellwigc962fb72008-05-20 15:10:52 +10001458
Dave Chinner5681ca42015-02-23 21:22:31 +11001459 error = xfs_init_percpu_counters(mp);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +00001460 if (error)
1461 goto out_destroy_workqueues;
1462
Dave Chinnerab23a772021-08-06 11:05:39 -07001463 error = xfs_inodegc_init_percpu(mp);
1464 if (error)
1465 goto out_destroy_counters;
1466
Dave Chinner0ed17f02021-08-06 11:05:38 -07001467 /*
1468 * All percpu data structures requiring cleanup when a cpu goes offline
1469 * must be allocated before adding this @mp to the cpu-dead handler's
1470 * mount list.
1471 */
1472 xfs_mount_list_add(mp);
1473
Bill O'Donnell225e4632015-10-12 18:21:19 +11001474 /* Allocate stats memory before we do operations that might use it */
1475 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1476 if (!mp->m_stats.xs_stats) {
Dan Carpenterf9d460b2015-10-19 08:42:47 +11001477 error = -ENOMEM;
Dave Chinnerab23a772021-08-06 11:05:39 -07001478 goto out_destroy_inodegc;
Bill O'Donnell225e4632015-10-12 18:21:19 +11001479 }
1480
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001481 error = xfs_readsb(mp, flags);
1482 if (error)
Bill O'Donnell225e4632015-10-12 18:21:19 +11001483 goto out_free_stats;
Christoph Hellwig9d565ff2008-10-30 17:53:24 +11001484
1485 error = xfs_finish_flags(mp);
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001486 if (error)
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001487 goto out_free_sb;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001488
Christoph Hellwige34b5622008-05-20 15:10:36 +10001489 error = xfs_setup_devices(mp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001490 if (error)
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001491 goto out_free_sb;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001492
Darrick J. Wongb96cb832020-09-10 10:57:17 -07001493 /* V4 support is undergoing deprecation. */
Dave Chinner38c26bf2021-08-18 18:46:37 -07001494 if (!xfs_has_crc(mp)) {
Darrick J. Wongb96cb832020-09-10 10:57:17 -07001495#ifdef CONFIG_XFS_SUPPORT_V4
1496 xfs_warn_once(mp,
1497 "Deprecated V4 format (crc=0) will not be supported after September 2030.");
1498#else
1499 xfs_warn(mp,
1500 "Deprecated V4 format (crc=0) not supported by kernel.");
1501 error = -EINVAL;
1502 goto out_free_sb;
1503#endif
1504 }
1505
Darrick J. Wong80c720b2020-11-24 11:45:55 -08001506 /* Filesystem claims it needs repair, so refuse the mount. */
Dave Chinnerebd90272021-08-18 18:46:55 -07001507 if (xfs_has_needsrepair(mp)) {
Darrick J. Wong80c720b2020-11-24 11:45:55 -08001508 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair.");
1509 error = -EFSCORRUPTED;
1510 goto out_free_sb;
1511 }
1512
Darrick J. Wong932befe2020-01-02 13:20:13 -08001513 /*
Darrick J. Wong3945ae02020-11-24 11:45:54 -08001514 * Don't touch the filesystem if a user tool thinks it owns the primary
1515 * superblock. mkfs doesn't clear the flag from secondary supers, so
1516 * we don't check them at all.
1517 */
1518 if (mp->m_sb.sb_inprogress) {
1519 xfs_warn(mp, "Offline file system operation in progress!");
1520 error = -EFSCORRUPTED;
1521 goto out_free_sb;
1522 }
1523
1524 /*
1525 * Until this is fixed only page-sized or smaller data blocks work.
1526 */
1527 if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1528 xfs_warn(mp,
1529 "File system with blocksize %d bytes. "
1530 "Only pagesize (%ld) or less will currently work.",
1531 mp->m_sb.sb_blocksize, PAGE_SIZE);
1532 error = -ENOSYS;
1533 goto out_free_sb;
1534 }
1535
1536 /* Ensure this filesystem fits in the page cache limits */
1537 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1538 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1539 xfs_warn(mp,
1540 "file system too large to be mounted on this system.");
1541 error = -EFBIG;
1542 goto out_free_sb;
1543 }
1544
1545 /*
Darrick J. Wong932befe2020-01-02 13:20:13 -08001546 * XFS block mappings use 54 bits to store the logical block offset.
1547 * This should suffice to handle the maximum file size that the VFS
1548 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1549 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1550 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1551 * to check this assertion.
1552 *
1553 * Avoid integer overflow by comparing the maximum bmbt offset to the
1554 * maximum pagecache offset in units of fs blocks.
1555 */
Darrick J. Wong33005fd2020-12-04 13:28:35 -08001556 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
Darrick J. Wong932befe2020-01-02 13:20:13 -08001557 xfs_warn(mp,
1558"MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1559 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1560 XFS_MAX_FILEOFF);
1561 error = -EINVAL;
1562 goto out_free_sb;
1563 }
1564
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001565 error = xfs_filestream_mount(mp);
1566 if (error)
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001567 goto out_free_sb;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001568
Dave Chinner704b2902011-03-26 09:14:57 +11001569 /*
1570 * we must configure the block size in the superblock before we run the
1571 * full mount process as the mount process can lookup and cache inodes.
Dave Chinner704b2902011-03-26 09:14:57 +11001572 */
Adam Borowskidddde682018-10-18 17:20:19 +11001573 sb->s_magic = XFS_SUPER_MAGIC;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +10001574 sb->s_blocksize = mp->m_sb.sb_blocksize;
1575 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
Darrick J. Wong932befe2020-01-02 13:20:13 -08001576 sb->s_maxbytes = MAX_LFS_FILESIZE;
Al Viro8de52772012-02-06 12:45:27 -05001577 sb->s_max_links = XFS_MAXLINK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 sb->s_time_gran = 1;
Dave Chinner38c26bf2021-08-18 18:46:37 -07001579 if (xfs_has_bigtime(mp)) {
Darrick J. Wongf93e54362020-08-17 09:59:07 -07001580 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1581 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1582 } else {
1583 sb->s_time_min = XFS_LEGACY_TIME_MIN;
1584 sb->s_time_max = XFS_LEGACY_TIME_MAX;
1585 }
Darrick J. Wong06dbf822020-08-24 11:58:01 -07001586 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
Christoph Hellwigadfb5fb2019-06-28 19:30:22 -07001587 sb->s_iflags |= SB_I_CGROUPWB;
1588
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 set_posix_acl_flag(sb);
1590
Dave Chinnerdc037ad2013-06-27 16:04:59 +10001591 /* version 5 superblocks support inode version counters. */
Dave Chinnerd6837c12021-08-18 18:46:56 -07001592 if (xfs_has_crc(mp))
Matthew Garrett357fdad2017-10-18 13:56:26 -07001593 sb->s_flags |= SB_I_VERSION;
Dave Chinnerdc037ad2013-06-27 16:04:59 +10001594
Dave Chinner0560f312021-08-18 18:46:52 -07001595 if (xfs_has_dax_always(mp)) {
Dave Jiang80660f22018-05-30 13:03:46 -07001596 bool rtdev_is_dax = false, datadev_is_dax;
Darrick J. Wongba23cba2018-05-30 13:03:45 -07001597
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001598 xfs_warn(mp,
Toshi Kani1e937cd2016-05-10 10:23:56 -06001599 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1600
Christoph Hellwiga384f082021-08-26 15:55:09 +02001601 datadev_is_dax = xfs_buftarg_is_dax(sb, mp->m_ddev_targp);
Darrick J. Wongba23cba2018-05-30 13:03:45 -07001602 if (mp->m_rtdev_targp)
Christoph Hellwiga384f082021-08-26 15:55:09 +02001603 rtdev_is_dax = xfs_buftarg_is_dax(sb,
1604 mp->m_rtdev_targp);
Dave Jiang80660f22018-05-30 13:03:46 -07001605 if (!rtdev_is_dax && !datadev_is_dax) {
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001606 xfs_alert(mp,
Toshi Kani1e937cd2016-05-10 10:23:56 -06001607 "DAX unsupported by block device. Turning off DAX.");
Ira Weiny8d6c3442020-05-04 09:02:42 -07001608 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001609 }
Dave Chinner38c26bf2021-08-18 18:46:37 -07001610 if (xfs_has_reflink(mp)) {
Darrick J. Wonge54b5bf2016-10-03 09:11:52 -07001611 xfs_alert(mp,
Christoph Hellwig1e369b02018-01-08 13:30:08 -08001612 "DAX and reflink cannot be used together!");
Darrick J. Wongb6e03c12018-01-31 14:21:56 -08001613 error = -EINVAL;
1614 goto out_filestream_unmount;
1615 }
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001616 }
1617
Dave Chinner0560f312021-08-18 18:46:52 -07001618 if (xfs_has_discard(mp)) {
Kenjiro Nakayama1e6fa682017-09-18 12:03:56 -07001619 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1620
1621 if (!blk_queue_discard(q)) {
1622 xfs_warn(mp, "mounting with \"discard\" option, but "
1623 "the device does not support discard");
Dave Chinner0560f312021-08-18 18:46:52 -07001624 mp->m_features &= ~XFS_FEAT_DISCARD;
Kenjiro Nakayama1e6fa682017-09-18 12:03:56 -07001625 }
1626 }
1627
Dave Chinner38c26bf2021-08-18 18:46:37 -07001628 if (xfs_has_reflink(mp)) {
Christoph Hellwig66ae56a2019-02-18 09:38:49 -08001629 if (mp->m_sb.sb_rblocks) {
1630 xfs_alert(mp,
Darrick J. Wongc14632d2018-01-31 16:38:18 -08001631 "reflink not compatible with realtime device!");
Christoph Hellwig66ae56a2019-02-18 09:38:49 -08001632 error = -EINVAL;
1633 goto out_filestream_unmount;
1634 }
1635
1636 if (xfs_globals.always_cow) {
1637 xfs_info(mp, "using DEBUG-only always_cow mode.");
1638 mp->m_always_cow = true;
1639 }
Darrick J. Wongc14632d2018-01-31 16:38:18 -08001640 }
1641
Dave Chinner38c26bf2021-08-18 18:46:37 -07001642 if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
Darrick J. Wong1c0607a2016-08-03 12:20:57 +10001643 xfs_alert(mp,
Darrick J. Wong76883f72018-01-31 09:47:25 -08001644 "reverse mapping btree not compatible with realtime device!");
1645 error = -EINVAL;
1646 goto out_filestream_unmount;
Darrick J. Wong738f57c2016-08-26 15:59:19 +10001647 }
Darrick J. Wong1c0607a2016-08-03 12:20:57 +10001648
Dave Chinner8a00ebe2012-04-13 12:10:44 +00001649 error = xfs_mountfs(mp);
Christoph Hellwig2bcf6e92011-07-13 13:43:48 +02001650 if (error)
Dave Chinner7e185302012-10-08 21:56:00 +11001651 goto out_filestream_unmount;
Dave Chinner704b2902011-03-26 09:14:57 +11001652
David Chinner01651642008-08-13 15:45:15 +10001653 root = igrab(VFS_I(mp->m_rootip));
Christoph Hellwigf3dcc132008-03-27 18:00:54 +11001654 if (!root) {
Dave Chinner24513372014-06-25 14:58:08 +10001655 error = -ENOENT;
Dave Chinner8a00ebe2012-04-13 12:10:44 +00001656 goto out_unmount;
Christoph Hellwigcbc89dc2008-02-05 12:14:01 +11001657 }
Al Viro48fde702012-01-08 22:15:13 -05001658 sb->s_root = d_make_root(root);
Christoph Hellwigf3dcc132008-03-27 18:00:54 +11001659 if (!sb->s_root) {
Dave Chinner24513372014-06-25 14:58:08 +10001660 error = -ENOMEM;
Dave Chinner8a00ebe2012-04-13 12:10:44 +00001661 goto out_unmount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 }
Christoph Hellwig74394492007-08-30 17:21:22 +10001663
Dave Chinner7e185302012-10-08 21:56:00 +11001664 return 0;
1665
1666 out_filestream_unmount:
Christoph Hellwig120226c2008-05-20 15:11:11 +10001667 xfs_filestream_unmount(mp);
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001668 out_free_sb:
1669 xfs_freesb(mp);
Bill O'Donnell225e4632015-10-12 18:21:19 +11001670 out_free_stats:
1671 free_percpu(mp->m_stats.xs_stats);
Dave Chinnerab23a772021-08-06 11:05:39 -07001672 out_destroy_inodegc:
Dave Chinner0ed17f02021-08-06 11:05:38 -07001673 xfs_mount_list_del(mp);
Dave Chinnerab23a772021-08-06 11:05:39 -07001674 xfs_inodegc_free_percpu(mp);
Christoph Hellwig9d565ff2008-10-30 17:53:24 +11001675 out_destroy_counters:
Dave Chinner5681ca42015-02-23 21:22:31 +11001676 xfs_destroy_percpu_counters(mp);
Bill O'Donnell225e4632015-10-12 18:21:19 +11001677 out_destroy_workqueues:
Christoph Hellwigaa6bf012012-02-29 09:53:48 +00001678 xfs_destroy_mount_workqueues(mp);
Christoph Hellwig61ba35d2010-09-30 02:25:54 +00001679 out_close_devices:
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001680 xfs_close_devices(mp);
Ian Kente1d3d212019-11-04 13:58:40 -08001681 out_free_names:
Dave Chinnerc9fbd7b2018-05-10 21:50:23 -07001682 sb->s_fs_info = NULL;
Ian Kenta943f372019-11-04 13:58:42 -08001683 xfs_mount_free(mp);
Dave Chinner24513372014-06-25 14:58:08 +10001684 return error;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001685
Christoph Hellwig2bcf6e92011-07-13 13:43:48 +02001686 out_unmount:
Christoph Hellwige48ad3162008-05-20 11:30:52 +10001687 xfs_filestream_unmount(mp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001688 xfs_unmountfs(mp);
Christoph Hellwig62033002008-08-13 16:50:21 +10001689 goto out_free_sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690}
1691
Ian Kent73e5fff2019-11-04 13:58:46 -08001692static int
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001693xfs_fs_get_tree(
Ian Kent73e5fff2019-11-04 13:58:46 -08001694 struct fs_context *fc)
1695{
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001696 return get_tree_bdev(fc, xfs_fs_fill_super);
Ian Kent73e5fff2019-11-04 13:58:46 -08001697}
1698
Ian Kent63cd1e92019-11-04 13:58:47 -08001699static int
1700xfs_remount_rw(
1701 struct xfs_mount *mp)
1702{
1703 struct xfs_sb *sbp = &mp->m_sb;
1704 int error;
1705
Dave Chinner0560f312021-08-18 18:46:52 -07001706 if (xfs_has_norecovery(mp)) {
Ian Kent63cd1e92019-11-04 13:58:47 -08001707 xfs_warn(mp,
1708 "ro->rw transition prohibited on norecovery mount");
1709 return -EINVAL;
1710 }
1711
Dave Chinnerd6837c12021-08-18 18:46:56 -07001712 if (xfs_sb_is_v5(sbp) &&
Ian Kent63cd1e92019-11-04 13:58:47 -08001713 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1714 xfs_warn(mp,
1715 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1716 (sbp->sb_features_ro_compat &
1717 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1718 return -EINVAL;
1719 }
1720
Dave Chinner2e973b22021-08-18 18:46:52 -07001721 clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
Ian Kent63cd1e92019-11-04 13:58:47 -08001722
1723 /*
1724 * If this is the first remount to writeable state we might have some
1725 * superblock changes to update.
1726 */
1727 if (mp->m_update_sb) {
1728 error = xfs_sync_sb(mp, false);
1729 if (error) {
1730 xfs_warn(mp, "failed to write sb changes");
1731 return error;
1732 }
1733 mp->m_update_sb = false;
1734 }
1735
1736 /*
1737 * Fill out the reserve pool if it is empty. Use the stashed value if
1738 * it is non-zero, otherwise go with the default.
1739 */
1740 xfs_restore_resvblks(mp);
1741 xfs_log_work_queue(mp);
1742
1743 /* Recover any CoW blocks that never got remapped. */
1744 error = xfs_reflink_recover_cow(mp);
1745 if (error) {
1746 xfs_err(mp,
1747 "Error %d recovering leftover CoW allocations.", error);
Dan Carpenter7f6bcf72019-11-08 08:06:36 -08001748 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
Ian Kent63cd1e92019-11-04 13:58:47 -08001749 return error;
1750 }
Darrick J. Wongc9a65262021-01-22 16:48:44 -08001751 xfs_blockgc_start(mp);
Ian Kent63cd1e92019-11-04 13:58:47 -08001752
1753 /* Create the per-AG metadata reservation pool .*/
1754 error = xfs_fs_reserve_ag_blocks(mp);
1755 if (error && error != -ENOSPC)
1756 return error;
1757
Dave Chinnerab23a772021-08-06 11:05:39 -07001758 /* Re-enable the background inode inactivation worker. */
1759 xfs_inodegc_start(mp);
1760
Ian Kent63cd1e92019-11-04 13:58:47 -08001761 return 0;
1762}
1763
1764static int
1765xfs_remount_ro(
1766 struct xfs_mount *mp)
1767{
1768 int error;
1769
1770 /*
1771 * Cancel background eofb scanning so it cannot race with the final
1772 * log force+buftarg wait and deadlock the remount.
1773 */
Darrick J. Wongc9a65262021-01-22 16:48:44 -08001774 xfs_blockgc_stop(mp);
Ian Kent63cd1e92019-11-04 13:58:47 -08001775
1776 /* Get rid of any leftover CoW reservations... */
Darrick J. Wongb943c0c2021-01-22 16:48:40 -08001777 error = xfs_blockgc_free_space(mp, NULL);
Ian Kent63cd1e92019-11-04 13:58:47 -08001778 if (error) {
1779 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1780 return error;
1781 }
1782
Dave Chinnerab23a772021-08-06 11:05:39 -07001783 /*
1784 * Stop the inodegc background worker. xfs_fs_reconfigure already
1785 * flushed all pending inodegc work when it sync'd the filesystem.
1786 * The VFS holds s_umount, so we know that inodes cannot enter
1787 * xfs_fs_destroy_inode during a remount operation. In readonly mode
1788 * we send inodes straight to reclaim, so no inodes will be queued.
1789 */
1790 xfs_inodegc_stop(mp);
1791
Ian Kent63cd1e92019-11-04 13:58:47 -08001792 /* Free the per-AG metadata reservation pool. */
1793 error = xfs_fs_unreserve_ag_blocks(mp);
1794 if (error) {
1795 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1796 return error;
1797 }
1798
1799 /*
1800 * Before we sync the metadata, we need to free up the reserve block
1801 * pool so that the used block count in the superblock on disk is
1802 * correct at the end of the remount. Stash the current* reserve pool
1803 * size so that if we get remounted rw, we can return it to the same
1804 * size.
1805 */
1806 xfs_save_resvblks(mp);
1807
Brian Fosterea2064d2021-01-22 16:48:24 -08001808 xfs_log_clean(mp);
Dave Chinner2e973b22021-08-18 18:46:52 -07001809 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
Ian Kent63cd1e92019-11-04 13:58:47 -08001810
1811 return 0;
1812}
1813
1814/*
1815 * Logically we would return an error here to prevent users from believing
1816 * they might have changed mount options using remount which can't be changed.
1817 *
1818 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1819 * arguments in some cases so we can't blindly reject options, but have to
1820 * check for each specified option if it actually differs from the currently
1821 * set option and only reject it if that's the case.
1822 *
1823 * Until that is implemented we return success for every remount request, and
1824 * silently ignore all options that we can't actually change.
1825 */
1826static int
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001827xfs_fs_reconfigure(
Ian Kent63cd1e92019-11-04 13:58:47 -08001828 struct fs_context *fc)
1829{
1830 struct xfs_mount *mp = XFS_M(fc->root->d_sb);
1831 struct xfs_mount *new_mp = fc->s_fs_info;
Ian Kent63cd1e92019-11-04 13:58:47 -08001832 int flags = fc->sb_flags;
1833 int error;
1834
Eric Sandeen4750a172020-07-15 08:30:37 -07001835 /* version 5 superblocks always support version counters. */
Dave Chinnerd6837c12021-08-18 18:46:56 -07001836 if (xfs_has_crc(mp))
Eric Sandeen4750a172020-07-15 08:30:37 -07001837 fc->sb_flags |= SB_I_VERSION;
1838
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001839 error = xfs_fs_validate_params(new_mp);
Ian Kent63cd1e92019-11-04 13:58:47 -08001840 if (error)
1841 return error;
1842
1843 sync_filesystem(mp->m_super);
1844
1845 /* inode32 -> inode64 */
Dave Chinner0560f312021-08-18 18:46:52 -07001846 if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
1847 mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
Dave Chinnerd6837c12021-08-18 18:46:56 -07001848 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
Ian Kent63cd1e92019-11-04 13:58:47 -08001849 }
1850
1851 /* inode64 -> inode32 */
Dave Chinner0560f312021-08-18 18:46:52 -07001852 if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
1853 mp->m_features |= XFS_FEAT_SMALL_INUMS;
Dave Chinnerd6837c12021-08-18 18:46:56 -07001854 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
Ian Kent63cd1e92019-11-04 13:58:47 -08001855 }
1856
1857 /* ro -> rw */
Dave Chinner2e973b22021-08-18 18:46:52 -07001858 if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
Ian Kent63cd1e92019-11-04 13:58:47 -08001859 error = xfs_remount_rw(mp);
1860 if (error)
1861 return error;
1862 }
1863
1864 /* rw -> ro */
Dave Chinner2e973b22021-08-18 18:46:52 -07001865 if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
Ian Kent63cd1e92019-11-04 13:58:47 -08001866 error = xfs_remount_ro(mp);
1867 if (error)
1868 return error;
1869 }
1870
1871 return 0;
1872}
1873
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001874static void xfs_fs_free(
Ian Kent73e5fff2019-11-04 13:58:46 -08001875 struct fs_context *fc)
1876{
1877 struct xfs_mount *mp = fc->s_fs_info;
1878
1879 /*
1880 * mp is stored in the fs_context when it is initialized.
1881 * mp is transferred to the superblock on a successful mount,
1882 * but if an error occurs before the transfer we have to free
1883 * it here.
1884 */
1885 if (mp)
1886 xfs_mount_free(mp);
1887}
1888
1889static const struct fs_context_operations xfs_context_ops = {
Darrick J. Wong1e5c39d2020-12-04 15:59:39 -08001890 .parse_param = xfs_fs_parse_param,
1891 .get_tree = xfs_fs_get_tree,
1892 .reconfigure = xfs_fs_reconfigure,
1893 .free = xfs_fs_free,
Ian Kent73e5fff2019-11-04 13:58:46 -08001894};
1895
1896static int xfs_init_fs_context(
1897 struct fs_context *fc)
1898{
1899 struct xfs_mount *mp;
1900
Ian Kent50f83002019-11-04 13:58:48 -08001901 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
Ian Kent73e5fff2019-11-04 13:58:46 -08001902 if (!mp)
1903 return -ENOMEM;
1904
Ian Kent50f83002019-11-04 13:58:48 -08001905 spin_lock_init(&mp->m_sb_lock);
1906 spin_lock_init(&mp->m_agirotor_lock);
1907 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1908 spin_lock_init(&mp->m_perag_lock);
1909 mutex_init(&mp->m_growlock);
Darrick J. Wongf0f7a672020-04-12 13:11:10 -07001910 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
Ian Kent50f83002019-11-04 13:58:48 -08001911 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
Ian Kent50f83002019-11-04 13:58:48 -08001912 mp->m_kobj.kobject.kset = xfs_kset;
1913 /*
1914 * We don't create the finobt per-ag space reservation until after log
1915 * recovery, so we must set this to true so that an ifree transaction
1916 * started during log recovery will not depend on space reservations
1917 * for finobt expansion.
1918 */
1919 mp->m_finobt_nores = true;
1920
Ian Kent73e5fff2019-11-04 13:58:46 -08001921 /*
1922 * These can be overridden by the mount option parsing.
1923 */
1924 mp->m_logbufs = -1;
1925 mp->m_logbsize = -1;
1926 mp->m_allocsize_log = 16; /* 64k */
1927
1928 /*
1929 * Copy binary VFS mount flags we are interested in.
1930 */
1931 if (fc->sb_flags & SB_RDONLY)
Dave Chinner2e973b22021-08-18 18:46:52 -07001932 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
Ian Kent73e5fff2019-11-04 13:58:46 -08001933 if (fc->sb_flags & SB_DIRSYNC)
Dave Chinner0560f312021-08-18 18:46:52 -07001934 mp->m_features |= XFS_FEAT_DIRSYNC;
Ian Kent73e5fff2019-11-04 13:58:46 -08001935 if (fc->sb_flags & SB_SYNCHRONOUS)
Dave Chinner0560f312021-08-18 18:46:52 -07001936 mp->m_features |= XFS_FEAT_WSYNC;
Ian Kent73e5fff2019-11-04 13:58:46 -08001937
1938 fc->s_fs_info = mp;
1939 fc->ops = &xfs_context_ops;
1940
1941 return 0;
1942}
1943
Andrew Morton5085b602007-02-20 13:57:47 -08001944static struct file_system_type xfs_fs_type = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 .owner = THIS_MODULE,
1946 .name = "xfs",
Ian Kent73e5fff2019-11-04 13:58:46 -08001947 .init_fs_context = xfs_init_fs_context,
Al Virod7167b12019-09-07 07:23:15 -04001948 .parameters = xfs_fs_parameters,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 .kill_sb = kill_block_super,
Christoph Hellwigf736d932021-01-21 14:19:58 +01001950 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951};
Eric W. Biederman7f78e032013-03-02 19:39:14 -08001952MODULE_ALIAS_FS("xfs");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001954STATIC int __init
Darrick J. Wong182696f2021-10-12 11:09:23 -07001955xfs_init_caches(void)
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001956{
Darrick J. Wong9fa47bd2021-09-23 12:21:37 -07001957 int error;
1958
Darrick J. Wong182696f2021-10-12 11:09:23 -07001959 xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
Carlos Maiolinob1231762019-11-14 12:43:03 -08001960 sizeof(struct xlog_ticket),
1961 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07001962 if (!xfs_log_ticket_cache)
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001963 goto out;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001964
Darrick J. Wong9fa47bd2021-09-23 12:21:37 -07001965 error = xfs_btree_init_cur_caches();
1966 if (error)
Darrick J. Wongc201d9c2021-10-12 14:17:01 -07001967 goto out_destroy_log_ticket_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001968
Darrick J. Wongf3c799c2021-10-12 14:11:01 -07001969 error = xfs_defer_init_item_caches();
1970 if (error)
1971 goto out_destroy_btree_cur_cache;
1972
Darrick J. Wong182696f2021-10-12 11:09:23 -07001973 xfs_da_state_cache = kmem_cache_create("xfs_da_state",
Carlos Maiolinob1231762019-11-14 12:43:03 -08001974 sizeof(struct xfs_da_state),
1975 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07001976 if (!xfs_da_state_cache)
Darrick J. Wongf3c799c2021-10-12 14:11:01 -07001977 goto out_destroy_defer_item_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001978
Darrick J. Wong182696f2021-10-12 11:09:23 -07001979 xfs_ifork_cache = kmem_cache_create("xfs_ifork",
Carlos Maiolinob1231762019-11-14 12:43:03 -08001980 sizeof(struct xfs_ifork),
1981 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07001982 if (!xfs_ifork_cache)
1983 goto out_destroy_da_state_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001984
Darrick J. Wong182696f2021-10-12 11:09:23 -07001985 xfs_trans_cache = kmem_cache_create("xfs_trans",
Carlos Maiolinob1231762019-11-14 12:43:03 -08001986 sizeof(struct xfs_trans),
1987 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07001988 if (!xfs_trans_cache)
1989 goto out_destroy_ifork_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001990
Christoph Hellwige98c4142010-06-23 18:11:15 +10001991
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001992 /*
Darrick J. Wong182696f2021-10-12 11:09:23 -07001993 * The size of the cache-allocated buf log item is the maximum
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001994 * size possible under XFS. This wastes a little bit of memory,
1995 * but it is much faster.
1996 */
Darrick J. Wong182696f2021-10-12 11:09:23 -07001997 xfs_buf_item_cache = kmem_cache_create("xfs_buf_item",
Carlos Maiolinob1231762019-11-14 12:43:03 -08001998 sizeof(struct xfs_buf_log_item),
1999 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002000 if (!xfs_buf_item_cache)
2001 goto out_destroy_trans_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002002
Darrick J. Wong182696f2021-10-12 11:09:23 -07002003 xfs_efd_cache = kmem_cache_create("xfs_efd_item",
Carlos Maiolinob1231762019-11-14 12:43:03 -08002004 (sizeof(struct xfs_efd_log_item) +
2005 (XFS_EFD_MAX_FAST_EXTENTS - 1) *
2006 sizeof(struct xfs_extent)),
2007 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002008 if (!xfs_efd_cache)
2009 goto out_destroy_buf_item_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002010
Darrick J. Wong182696f2021-10-12 11:09:23 -07002011 xfs_efi_cache = kmem_cache_create("xfs_efi_item",
Carlos Maiolinob1231762019-11-14 12:43:03 -08002012 (sizeof(struct xfs_efi_log_item) +
2013 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
2014 sizeof(struct xfs_extent)),
2015 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002016 if (!xfs_efi_cache)
2017 goto out_destroy_efd_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002018
Darrick J. Wong182696f2021-10-12 11:09:23 -07002019 xfs_inode_cache = kmem_cache_create("xfs_inode",
Carlos Maiolinob1231762019-11-14 12:43:03 -08002020 sizeof(struct xfs_inode), 0,
2021 (SLAB_HWCACHE_ALIGN |
2022 SLAB_RECLAIM_ACCOUNT |
2023 SLAB_MEM_SPREAD | SLAB_ACCOUNT),
2024 xfs_fs_inode_init_once);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002025 if (!xfs_inode_cache)
2026 goto out_destroy_efi_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002027
Darrick J. Wong182696f2021-10-12 11:09:23 -07002028 xfs_ili_cache = kmem_cache_create("xfs_ili",
Carlos Maiolinob1231762019-11-14 12:43:03 -08002029 sizeof(struct xfs_inode_log_item), 0,
Dave Chinnerd59eada2020-03-24 20:10:28 -07002030 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2031 NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002032 if (!xfs_ili_cache)
2033 goto out_destroy_inode_cache;
Carlos Maiolinob1231762019-11-14 12:43:03 -08002034
Darrick J. Wong182696f2021-10-12 11:09:23 -07002035 xfs_icreate_cache = kmem_cache_create("xfs_icr",
Carlos Maiolinob1231762019-11-14 12:43:03 -08002036 sizeof(struct xfs_icreate_item),
2037 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002038 if (!xfs_icreate_cache)
2039 goto out_destroy_ili_cache;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002040
Darrick J. Wong182696f2021-10-12 11:09:23 -07002041 xfs_rud_cache = kmem_cache_create("xfs_rud_item",
Carlos Maiolinob1231762019-11-14 12:43:03 -08002042 sizeof(struct xfs_rud_log_item),
2043 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002044 if (!xfs_rud_cache)
2045 goto out_destroy_icreate_cache;
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10002046
Darrick J. Wong182696f2021-10-12 11:09:23 -07002047 xfs_rui_cache = kmem_cache_create("xfs_rui_item",
Darrick J. Wongcd001582016-09-19 10:24:27 +10002048 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
Carlos Maiolinob1231762019-11-14 12:43:03 -08002049 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002050 if (!xfs_rui_cache)
2051 goto out_destroy_rud_cache;
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10002052
Darrick J. Wong182696f2021-10-12 11:09:23 -07002053 xfs_cud_cache = kmem_cache_create("xfs_cud_item",
Carlos Maiolinob1231762019-11-14 12:43:03 -08002054 sizeof(struct xfs_cud_log_item),
2055 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002056 if (!xfs_cud_cache)
2057 goto out_destroy_rui_cache;
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07002058
Darrick J. Wong182696f2021-10-12 11:09:23 -07002059 xfs_cui_cache = kmem_cache_create("xfs_cui_item",
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07002060 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
Carlos Maiolinob1231762019-11-14 12:43:03 -08002061 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002062 if (!xfs_cui_cache)
2063 goto out_destroy_cud_cache;
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07002064
Darrick J. Wong182696f2021-10-12 11:09:23 -07002065 xfs_bud_cache = kmem_cache_create("xfs_bud_item",
Carlos Maiolinob1231762019-11-14 12:43:03 -08002066 sizeof(struct xfs_bud_log_item),
2067 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002068 if (!xfs_bud_cache)
2069 goto out_destroy_cui_cache;
Darrick J. Wong6413a012016-10-03 09:11:25 -07002070
Darrick J. Wong182696f2021-10-12 11:09:23 -07002071 xfs_bui_cache = kmem_cache_create("xfs_bui_item",
Darrick J. Wong6413a012016-10-03 09:11:25 -07002072 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
Carlos Maiolinob1231762019-11-14 12:43:03 -08002073 0, 0, NULL);
Darrick J. Wong182696f2021-10-12 11:09:23 -07002074 if (!xfs_bui_cache)
2075 goto out_destroy_bud_cache;
Darrick J. Wong6413a012016-10-03 09:11:25 -07002076
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002077 return 0;
2078
Darrick J. Wong182696f2021-10-12 11:09:23 -07002079 out_destroy_bud_cache:
2080 kmem_cache_destroy(xfs_bud_cache);
2081 out_destroy_cui_cache:
2082 kmem_cache_destroy(xfs_cui_cache);
2083 out_destroy_cud_cache:
2084 kmem_cache_destroy(xfs_cud_cache);
2085 out_destroy_rui_cache:
2086 kmem_cache_destroy(xfs_rui_cache);
2087 out_destroy_rud_cache:
2088 kmem_cache_destroy(xfs_rud_cache);
2089 out_destroy_icreate_cache:
2090 kmem_cache_destroy(xfs_icreate_cache);
2091 out_destroy_ili_cache:
2092 kmem_cache_destroy(xfs_ili_cache);
2093 out_destroy_inode_cache:
2094 kmem_cache_destroy(xfs_inode_cache);
2095 out_destroy_efi_cache:
2096 kmem_cache_destroy(xfs_efi_cache);
2097 out_destroy_efd_cache:
2098 kmem_cache_destroy(xfs_efd_cache);
2099 out_destroy_buf_item_cache:
2100 kmem_cache_destroy(xfs_buf_item_cache);
2101 out_destroy_trans_cache:
2102 kmem_cache_destroy(xfs_trans_cache);
2103 out_destroy_ifork_cache:
2104 kmem_cache_destroy(xfs_ifork_cache);
2105 out_destroy_da_state_cache:
2106 kmem_cache_destroy(xfs_da_state_cache);
Darrick J. Wongf3c799c2021-10-12 14:11:01 -07002107 out_destroy_defer_item_cache:
2108 xfs_defer_destroy_item_caches();
Darrick J. Wong182696f2021-10-12 11:09:23 -07002109 out_destroy_btree_cur_cache:
Darrick J. Wong9fa47bd2021-09-23 12:21:37 -07002110 xfs_btree_destroy_cur_caches();
Darrick J. Wong182696f2021-10-12 11:09:23 -07002111 out_destroy_log_ticket_cache:
2112 kmem_cache_destroy(xfs_log_ticket_cache);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002113 out:
2114 return -ENOMEM;
2115}
2116
2117STATIC void
Darrick J. Wong182696f2021-10-12 11:09:23 -07002118xfs_destroy_caches(void)
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002119{
Kirill A. Shutemov8c0a8532012-09-26 11:33:07 +10002120 /*
2121 * Make sure all delayed rcu free are flushed before we
2122 * destroy caches.
2123 */
2124 rcu_barrier();
Darrick J. Wong182696f2021-10-12 11:09:23 -07002125 kmem_cache_destroy(xfs_bui_cache);
2126 kmem_cache_destroy(xfs_bud_cache);
2127 kmem_cache_destroy(xfs_cui_cache);
2128 kmem_cache_destroy(xfs_cud_cache);
2129 kmem_cache_destroy(xfs_rui_cache);
2130 kmem_cache_destroy(xfs_rud_cache);
2131 kmem_cache_destroy(xfs_icreate_cache);
2132 kmem_cache_destroy(xfs_ili_cache);
2133 kmem_cache_destroy(xfs_inode_cache);
2134 kmem_cache_destroy(xfs_efi_cache);
2135 kmem_cache_destroy(xfs_efd_cache);
2136 kmem_cache_destroy(xfs_buf_item_cache);
2137 kmem_cache_destroy(xfs_trans_cache);
2138 kmem_cache_destroy(xfs_ifork_cache);
2139 kmem_cache_destroy(xfs_da_state_cache);
Darrick J. Wongf3c799c2021-10-12 14:11:01 -07002140 xfs_defer_destroy_item_caches();
Darrick J. Wong9fa47bd2021-09-23 12:21:37 -07002141 xfs_btree_destroy_cur_caches();
Darrick J. Wong182696f2021-10-12 11:09:23 -07002142 kmem_cache_destroy(xfs_log_ticket_cache);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002143}
2144
2145STATIC int __init
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002146xfs_init_workqueues(void)
2147{
2148 /*
Dave Chinnerc999a222012-03-22 05:15:07 +00002149 * The allocation workqueue can be used in memory reclaim situations
2150 * (writepage path), and parallelism is only limited by the number of
2151 * AGs in all the filesystems mounted. Hence use the default large
2152 * max_active value for this workqueue.
2153 */
Brian Foster8018ec02014-09-09 11:44:46 +10002154 xfs_alloc_wq = alloc_workqueue("xfsalloc",
Darrick J. Wong05a302a2021-01-22 16:48:42 -08002155 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
Dave Chinnerc999a222012-03-22 05:15:07 +00002156 if (!xfs_alloc_wq)
Dave Chinner58896082012-10-08 21:56:05 +11002157 return -ENOMEM;
Dave Chinnerc999a222012-03-22 05:15:07 +00002158
Darrick J. Wong05a302a2021-01-22 16:48:42 -08002159 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2160 0);
Christoph Hellwig4560e782017-02-07 14:07:58 -08002161 if (!xfs_discard_wq)
2162 goto out_free_alloc_wq;
2163
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002164 return 0;
Christoph Hellwig4560e782017-02-07 14:07:58 -08002165out_free_alloc_wq:
2166 destroy_workqueue(xfs_alloc_wq);
2167 return -ENOMEM;
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002168}
2169
Luck, Tony39411f82011-04-11 12:06:12 -07002170STATIC void
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002171xfs_destroy_workqueues(void)
2172{
Christoph Hellwig4560e782017-02-07 14:07:58 -08002173 destroy_workqueue(xfs_discard_wq);
Dave Chinnerc999a222012-03-22 05:15:07 +00002174 destroy_workqueue(xfs_alloc_wq);
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002175}
2176
Dave Chinnerf1653c22021-08-06 11:05:37 -07002177#ifdef CONFIG_HOTPLUG_CPU
2178static int
2179xfs_cpu_dead(
2180 unsigned int cpu)
2181{
Dave Chinner0ed17f02021-08-06 11:05:38 -07002182 struct xfs_mount *mp, *n;
2183
2184 spin_lock(&xfs_mount_list_lock);
2185 list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
2186 spin_unlock(&xfs_mount_list_lock);
Dave Chinnerab23a772021-08-06 11:05:39 -07002187 xfs_inodegc_cpu_dead(mp, cpu);
Dave Chinner0ed17f02021-08-06 11:05:38 -07002188 spin_lock(&xfs_mount_list_lock);
2189 }
2190 spin_unlock(&xfs_mount_list_lock);
Dave Chinnerf1653c22021-08-06 11:05:37 -07002191 return 0;
2192}
2193
2194static int __init
2195xfs_cpu_hotplug_init(void)
2196{
2197 int error;
2198
2199 error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL,
2200 xfs_cpu_dead);
2201 if (error < 0)
2202 xfs_alert(NULL,
2203"Failed to initialise CPU hotplug, error %d. XFS is non-functional.",
2204 error);
2205 return error;
2206}
2207
2208static void
2209xfs_cpu_hotplug_destroy(void)
2210{
2211 cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD);
2212}
2213
2214#else /* !CONFIG_HOTPLUG_CPU */
2215static inline int xfs_cpu_hotplug_init(void) { return 0; }
2216static inline void xfs_cpu_hotplug_destroy(void) {}
2217#endif
2218
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002219STATIC int __init
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002220init_xfs_fs(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221{
2222 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223
Darrick J. Wong30cbc592016-03-09 08:15:14 +11002224 xfs_check_ondisk_structs();
2225
Christoph Hellwig65795912008-11-28 14:23:33 +11002226 printk(KERN_INFO XFS_VERSION_STRING " with "
2227 XFS_BUILD_OPTIONS " enabled\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002229 xfs_dir_startup();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230
Dave Chinnerf1653c22021-08-06 11:05:37 -07002231 error = xfs_cpu_hotplug_init();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002232 if (error)
2233 goto out;
2234
Darrick J. Wong182696f2021-10-12 11:09:23 -07002235 error = xfs_init_caches();
Dave Chinnerf1653c22021-08-06 11:05:37 -07002236 if (error)
2237 goto out_destroy_hp;
2238
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002239 error = xfs_init_workqueues();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002240 if (error)
Darrick J. Wong182696f2021-10-12 11:09:23 -07002241 goto out_destroy_caches;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002242
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002243 error = xfs_mru_cache_init();
2244 if (error)
2245 goto out_destroy_wq;
2246
Nathan Scottce8e9222006-01-11 15:39:08 +11002247 error = xfs_buf_init();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002248 if (error)
Christoph Hellwig1919add2014-04-23 07:11:51 +10002249 goto out_mru_cache_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002251 error = xfs_init_procfs();
2252 if (error)
2253 goto out_buf_terminate;
2254
2255 error = xfs_sysctl_register();
2256 if (error)
2257 goto out_cleanup_procfs;
2258
Brian Foster3d871222014-07-15 07:41:37 +10002259 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2260 if (!xfs_kset) {
2261 error = -ENOMEM;
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002262 goto out_sysctl_unregister;
Brian Foster3d871222014-07-15 07:41:37 +10002263 }
2264
Bill O'Donnell80529c42015-10-12 05:19:45 +11002265 xfsstats.xs_kobj.kobject.kset = xfs_kset;
2266
2267 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2268 if (!xfsstats.xs_stats) {
2269 error = -ENOMEM;
2270 goto out_kset_unregister;
2271 }
2272
2273 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002274 "stats");
2275 if (error)
Bill O'Donnell80529c42015-10-12 05:19:45 +11002276 goto out_free_stats;
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002277
Brian Foster65b65732014-09-09 11:52:42 +10002278#ifdef DEBUG
2279 xfs_dbg_kobj.kobject.kset = xfs_kset;
2280 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002281 if (error)
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002282 goto out_remove_stats_kobj;
Brian Foster65b65732014-09-09 11:52:42 +10002283#endif
2284
2285 error = xfs_qm_init();
2286 if (error)
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002287 goto out_remove_dbg_kobj;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288
2289 error = register_filesystem(&xfs_fs_type);
2290 if (error)
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002291 goto out_qm_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 return 0;
2293
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002294 out_qm_exit:
2295 xfs_qm_exit();
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002296 out_remove_dbg_kobj:
Brian Foster65b65732014-09-09 11:52:42 +10002297#ifdef DEBUG
2298 xfs_sysfs_del(&xfs_dbg_kobj);
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002299 out_remove_stats_kobj:
Brian Foster65b65732014-09-09 11:52:42 +10002300#endif
Bill O'Donnell80529c42015-10-12 05:19:45 +11002301 xfs_sysfs_del(&xfsstats.xs_kobj);
2302 out_free_stats:
2303 free_percpu(xfsstats.xs_stats);
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002304 out_kset_unregister:
Brian Foster3d871222014-07-15 07:41:37 +10002305 kset_unregister(xfs_kset);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002306 out_sysctl_unregister:
2307 xfs_sysctl_unregister();
2308 out_cleanup_procfs:
2309 xfs_cleanup_procfs();
2310 out_buf_terminate:
Nathan Scottce8e9222006-01-11 15:39:08 +11002311 xfs_buf_terminate();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002312 out_mru_cache_uninit:
2313 xfs_mru_cache_uninit();
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002314 out_destroy_wq:
2315 xfs_destroy_workqueues();
Darrick J. Wong182696f2021-10-12 11:09:23 -07002316 out_destroy_caches:
2317 xfs_destroy_caches();
Dave Chinnerf1653c22021-08-06 11:05:37 -07002318 out_destroy_hp:
2319 xfs_cpu_hotplug_destroy();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002320 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 return error;
2322}
2323
2324STATIC void __exit
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002325exit_xfs_fs(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326{
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002327 xfs_qm_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 unregister_filesystem(&xfs_fs_type);
Brian Foster65b65732014-09-09 11:52:42 +10002329#ifdef DEBUG
2330 xfs_sysfs_del(&xfs_dbg_kobj);
2331#endif
Bill O'Donnell80529c42015-10-12 05:19:45 +11002332 xfs_sysfs_del(&xfsstats.xs_kobj);
2333 free_percpu(xfsstats.xs_stats);
Brian Foster3d871222014-07-15 07:41:37 +10002334 kset_unregister(xfs_kset);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002335 xfs_sysctl_unregister();
2336 xfs_cleanup_procfs();
Nathan Scottce8e9222006-01-11 15:39:08 +11002337 xfs_buf_terminate();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002338 xfs_mru_cache_uninit();
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002339 xfs_destroy_workqueues();
Darrick J. Wong182696f2021-10-12 11:09:23 -07002340 xfs_destroy_caches();
Darrick J. Wongaf3b6382015-11-03 13:06:34 +11002341 xfs_uuid_table_free();
Dave Chinnerf1653c22021-08-06 11:05:37 -07002342 xfs_cpu_hotplug_destroy();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343}
2344
2345module_init(init_xfs_fs);
2346module_exit(exit_xfs_fs);
2347
2348MODULE_AUTHOR("Silicon Graphics, Inc.");
2349MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2350MODULE_LICENSE("GPL");