blob: 760901783944e0cd98ecd963904838c6efceac3c [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scotta805bad2006-06-19 08:40:27 +10003 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Nathan Scott7b718762005-11-02 14:58:39 +11004 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00006
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include "xfs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11008#include "xfs_shared.h"
Dave Chinner6ca1c902013-08-12 20:49:26 +10009#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110010#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include "xfs_sb.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include "xfs_mount.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110015#include "xfs_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include "xfs_bmap.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110017#include "xfs_alloc.h"
Christoph Hellwig9909c4a2007-10-11 18:11:14 +100018#include "xfs_fsops.h"
Dave Chinner239880e2013-10-23 10:50:10 +110019#include "xfs_trans.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_buf_item.h"
Dave Chinner239880e2013-10-23 10:50:10 +110021#include "xfs_log.h"
David Chinnera67d7c52007-11-23 16:29:32 +110022#include "xfs_log_priv.h"
Dave Chinner2b9ab5a2013-08-12 20:49:37 +100023#include "xfs_dir2.h"
Christoph Hellwig9f8868f2008-07-18 17:11:46 +100024#include "xfs_extfree_item.h"
25#include "xfs_mru_cache.h"
26#include "xfs_inode_item.h"
Dave Chinner6d8b79c2012-10-08 21:56:09 +110027#include "xfs_icache.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000028#include "xfs_trace.h"
Dave Chinner3ebe7d22013-06-27 16:04:53 +100029#include "xfs_icreate_item.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110030#include "xfs_filestream.h"
31#include "xfs_quota.h"
Brian Foster65b65732014-09-09 11:52:42 +100032#include "xfs_sysfs.h"
Darrick J. Wong30cbc592016-03-09 08:15:14 +110033#include "xfs_ondisk.h"
Darrick J. Wong5880f2d72016-08-03 12:04:45 +100034#include "xfs_rmap_item.h"
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -070035#include "xfs_refcount_item.h"
Darrick J. Wong6413a012016-10-03 09:11:25 -070036#include "xfs_bmap_item.h"
Darrick J. Wong5e7e6052016-10-03 09:11:38 -070037#include "xfs_reflink.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Adam Borowskidddde682018-10-18 17:20:19 +110039#include <linux/magic.h>
Ian Kent73e5fff2019-11-04 13:58:46 -080040#include <linux/fs_context.h>
41#include <linux/fs_parser.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Alexey Dobriyanb87221d2009-09-21 17:01:09 -070043static const struct super_operations xfs_super_operations;
Brian Foster65b65732014-09-09 11:52:42 +100044
Dave Chinnere3aed1a2014-09-29 10:46:08 +100045static struct kset *xfs_kset; /* top-level xfs sysfs dir */
Brian Foster65b65732014-09-09 11:52:42 +100046#ifdef DEBUG
47static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
48#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Christoph Hellwig62a877e2008-07-18 17:12:36 +100050/*
51 * Table driven mount option parser.
Christoph Hellwig62a877e2008-07-18 17:12:36 +100052 */
53enum {
Ian Kent8da57c52019-10-28 08:41:42 -070054 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
Eric Sandeen2e74af02016-03-02 09:55:38 +110055 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
Christoph Hellwig94079282019-04-28 08:32:52 -070056 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
Eric Sandeen1c02d502018-07-26 09:11:27 -070057 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
58 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
59 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
60 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
Eric Sandeen2e74af02016-03-02 09:55:38 +110061 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
Ian Kent73e5fff2019-11-04 13:58:46 -080062 Opt_discard, Opt_nodiscard, Opt_dax,
Christoph Hellwig62a877e2008-07-18 17:12:36 +100063};
64
Ian Kent73e5fff2019-11-04 13:58:46 -080065static const struct fs_parameter_spec xfs_param_specs[] = {
66 fsparam_u32("logbufs", Opt_logbufs),
67 fsparam_string("logbsize", Opt_logbsize),
68 fsparam_string("logdev", Opt_logdev),
69 fsparam_string("rtdev", Opt_rtdev),
70 fsparam_flag("wsync", Opt_wsync),
71 fsparam_flag("noalign", Opt_noalign),
72 fsparam_flag("swalloc", Opt_swalloc),
73 fsparam_u32("sunit", Opt_sunit),
74 fsparam_u32("swidth", Opt_swidth),
75 fsparam_flag("nouuid", Opt_nouuid),
76 fsparam_flag("grpid", Opt_grpid),
77 fsparam_flag("nogrpid", Opt_nogrpid),
78 fsparam_flag("bsdgroups", Opt_bsdgroups),
79 fsparam_flag("sysvgroups", Opt_sysvgroups),
80 fsparam_string("allocsize", Opt_allocsize),
81 fsparam_flag("norecovery", Opt_norecovery),
82 fsparam_flag("inode64", Opt_inode64),
83 fsparam_flag("inode32", Opt_inode32),
84 fsparam_flag("ikeep", Opt_ikeep),
85 fsparam_flag("noikeep", Opt_noikeep),
86 fsparam_flag("largeio", Opt_largeio),
87 fsparam_flag("nolargeio", Opt_nolargeio),
88 fsparam_flag("attr2", Opt_attr2),
89 fsparam_flag("noattr2", Opt_noattr2),
90 fsparam_flag("filestreams", Opt_filestreams),
91 fsparam_flag("quota", Opt_quota),
92 fsparam_flag("noquota", Opt_noquota),
93 fsparam_flag("usrquota", Opt_usrquota),
94 fsparam_flag("grpquota", Opt_grpquota),
95 fsparam_flag("prjquota", Opt_prjquota),
96 fsparam_flag("uquota", Opt_uquota),
97 fsparam_flag("gquota", Opt_gquota),
98 fsparam_flag("pquota", Opt_pquota),
99 fsparam_flag("uqnoenforce", Opt_uqnoenforce),
100 fsparam_flag("gqnoenforce", Opt_gqnoenforce),
101 fsparam_flag("pqnoenforce", Opt_pqnoenforce),
102 fsparam_flag("qnoenforce", Opt_qnoenforce),
103 fsparam_flag("discard", Opt_discard),
104 fsparam_flag("nodiscard", Opt_nodiscard),
105 fsparam_flag("dax", Opt_dax),
106 {}
Christoph Hellwig62a877e2008-07-18 17:12:36 +1000107};
108
Ian Kent73e5fff2019-11-04 13:58:46 -0800109static const struct fs_parameter_description xfs_fs_parameters = {
110 .name = "xfs",
111 .specs = xfs_param_specs,
112};
Christoph Hellwig62a877e2008-07-18 17:12:36 +1000113
David Chinnera67d7c52007-11-23 16:29:32 +1100114struct proc_xfs_info {
Dave Chinnercbe4dab2015-06-04 09:19:18 +1000115 uint64_t flag;
116 char *str;
David Chinnera67d7c52007-11-23 16:29:32 +1100117};
118
Christoph Hellwig21f55992019-10-28 08:41:47 -0700119static int
120xfs_fs_show_options(
121 struct seq_file *m,
122 struct dentry *root)
David Chinnera67d7c52007-11-23 16:29:32 +1100123{
124 static struct proc_xfs_info xfs_info_set[] = {
125 /* the few simple ones we can get from the mount struct */
Eric Sandeen2e74af02016-03-02 09:55:38 +1100126 { XFS_MOUNT_IKEEP, ",ikeep" },
127 { XFS_MOUNT_WSYNC, ",wsync" },
128 { XFS_MOUNT_NOALIGN, ",noalign" },
129 { XFS_MOUNT_SWALLOC, ",swalloc" },
130 { XFS_MOUNT_NOUUID, ",nouuid" },
131 { XFS_MOUNT_NORECOVERY, ",norecovery" },
132 { XFS_MOUNT_ATTR2, ",attr2" },
133 { XFS_MOUNT_FILESTREAMS, ",filestreams" },
134 { XFS_MOUNT_GRPID, ",grpid" },
135 { XFS_MOUNT_DISCARD, ",discard" },
Christoph Hellwig7c6b94b2019-10-28 08:41:46 -0700136 { XFS_MOUNT_LARGEIO, ",largeio" },
Eric Sandeen2e74af02016-03-02 09:55:38 +1100137 { XFS_MOUNT_DAX, ",dax" },
David Chinnera67d7c52007-11-23 16:29:32 +1100138 { 0, NULL }
139 };
Christoph Hellwig21f55992019-10-28 08:41:47 -0700140 struct xfs_mount *mp = XFS_M(root->d_sb);
David Chinnera67d7c52007-11-23 16:29:32 +1100141 struct proc_xfs_info *xfs_infop;
142
143 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
144 if (mp->m_flags & xfs_infop->flag)
145 seq_puts(m, xfs_infop->str);
146 }
Christoph Hellwig1775c502019-10-28 08:41:47 -0700147
148 seq_printf(m, ",inode%d",
149 (mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64);
David Chinnera67d7c52007-11-23 16:29:32 +1100150
Christoph Hellwig3274d002019-10-28 08:41:45 -0700151 if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100152 seq_printf(m, ",allocsize=%dk",
Christoph Hellwigaa58d442019-10-28 08:41:46 -0700153 (1 << mp->m_allocsize_log) >> 10);
David Chinnera67d7c52007-11-23 16:29:32 +1100154
155 if (mp->m_logbufs > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100156 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
David Chinnera67d7c52007-11-23 16:29:32 +1100157 if (mp->m_logbsize > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100158 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
David Chinnera67d7c52007-11-23 16:29:32 +1100159
160 if (mp->m_logname)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100161 seq_show_option(m, "logdev", mp->m_logname);
David Chinnera67d7c52007-11-23 16:29:32 +1100162 if (mp->m_rtname)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100163 seq_show_option(m, "rtdev", mp->m_rtname);
David Chinnera67d7c52007-11-23 16:29:32 +1100164
165 if (mp->m_dalign > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100166 seq_printf(m, ",sunit=%d",
David Chinnera67d7c52007-11-23 16:29:32 +1100167 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
168 if (mp->m_swidth > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100169 seq_printf(m, ",swidth=%d",
David Chinnera67d7c52007-11-23 16:29:32 +1100170 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
171
172 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
Eric Sandeen2e74af02016-03-02 09:55:38 +1100173 seq_puts(m, ",usrquota");
David Chinnera67d7c52007-11-23 16:29:32 +1100174 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100175 seq_puts(m, ",uqnoenforce");
David Chinnera67d7c52007-11-23 16:29:32 +1100176
Alex Elder988abe42009-09-02 17:02:24 -0500177 if (mp->m_qflags & XFS_PQUOTA_ACCT) {
Chandra Seetharaman83e782e2013-06-27 17:25:10 -0500178 if (mp->m_qflags & XFS_PQUOTA_ENFD)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100179 seq_puts(m, ",prjquota");
Alex Elder988abe42009-09-02 17:02:24 -0500180 else
Eric Sandeen2e74af02016-03-02 09:55:38 +1100181 seq_puts(m, ",pqnoenforce");
Chandra Seetharamand892d582013-07-19 17:36:02 -0500182 }
183 if (mp->m_qflags & XFS_GQUOTA_ACCT) {
Chandra Seetharaman83e782e2013-06-27 17:25:10 -0500184 if (mp->m_qflags & XFS_GQUOTA_ENFD)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100185 seq_puts(m, ",grpquota");
Alex Elder988abe42009-09-02 17:02:24 -0500186 else
Eric Sandeen2e74af02016-03-02 09:55:38 +1100187 seq_puts(m, ",gqnoenforce");
Alex Elder988abe42009-09-02 17:02:24 -0500188 }
David Chinnera67d7c52007-11-23 16:29:32 +1100189
190 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
Eric Sandeen2e74af02016-03-02 09:55:38 +1100191 seq_puts(m, ",noquota");
Christoph Hellwig21f55992019-10-28 08:41:47 -0700192
193 return 0;
David Chinnera67d7c52007-11-23 16:29:32 +1100194}
Eric Sandeen91083262019-05-01 20:26:30 -0700195
Eric Sandeen9de67c32014-07-24 20:51:54 +1000196/*
Eric Sandeen12c3f052016-03-02 09:58:09 +1100197 * Set parameters for inode allocation heuristics, taking into account
198 * filesystem size and inode32/inode64 mount options; i.e. specifically
199 * whether or not XFS_MOUNT_SMALL_INUMS is set.
200 *
201 * Inode allocation patterns are altered only if inode32 is requested
202 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
203 * If altered, XFS_MOUNT_32BITINODES is set as well.
204 *
205 * An agcount independent of that in the mount structure is provided
206 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
207 * to the potentially higher ag count.
208 *
209 * Returns the maximum AG index which may contain inodes.
Eric Sandeen9de67c32014-07-24 20:51:54 +1000210 */
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300211xfs_agnumber_t
Eric Sandeen12c3f052016-03-02 09:58:09 +1100212xfs_set_inode_alloc(
213 struct xfs_mount *mp,
214 xfs_agnumber_t agcount)
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300215{
Eric Sandeen12c3f052016-03-02 09:58:09 +1100216 xfs_agnumber_t index;
Carlos Maiolino4056c1d2012-09-20 10:32:40 -0300217 xfs_agnumber_t maxagi = 0;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300218 xfs_sb_t *sbp = &mp->m_sb;
219 xfs_agnumber_t max_metadata;
Eric Sandeen54aa61f2014-07-24 20:53:10 +1000220 xfs_agino_t agino;
221 xfs_ino_t ino;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300222
Eric Sandeen12c3f052016-03-02 09:58:09 +1100223 /*
224 * Calculate how much should be reserved for inodes to meet
225 * the max inode percentage. Used only for inode32.
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300226 */
Darrick J. Wongef325952019-06-05 11:19:34 -0700227 if (M_IGEO(mp)->maxicount) {
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700228 uint64_t icount;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300229
230 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
231 do_div(icount, 100);
232 icount += sbp->sb_agblocks - 1;
233 do_div(icount, sbp->sb_agblocks);
234 max_metadata = icount;
235 } else {
Eric Sandeen9de67c32014-07-24 20:51:54 +1000236 max_metadata = agcount;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300237 }
238
Eric Sandeen12c3f052016-03-02 09:58:09 +1100239 /* Get the last possible inode in the filesystem */
Darrick J. Wong43004b22018-12-12 08:46:24 -0800240 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
Eric Sandeen12c3f052016-03-02 09:58:09 +1100241 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
Eric Sandeen54aa61f2014-07-24 20:53:10 +1000242
Eric Sandeen12c3f052016-03-02 09:58:09 +1100243 /*
244 * If user asked for no more than 32-bit inodes, and the fs is
245 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
246 * the allocator to accommodate the request.
247 */
248 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
249 mp->m_flags |= XFS_MOUNT_32BITINODES;
250 else
251 mp->m_flags &= ~XFS_MOUNT_32BITINODES;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300252
Eric Sandeen9de67c32014-07-24 20:51:54 +1000253 for (index = 0; index < agcount; index++) {
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300254 struct xfs_perag *pag;
255
Eric Sandeen12c3f052016-03-02 09:58:09 +1100256 ino = XFS_AGINO_TO_INO(mp, index, agino);
257
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300258 pag = xfs_perag_get(mp, index);
Eric Sandeen12c3f052016-03-02 09:58:09 +1100259
260 if (mp->m_flags & XFS_MOUNT_32BITINODES) {
261 if (ino > XFS_MAXINUMBER_32) {
262 pag->pagi_inodeok = 0;
263 pag->pagf_metadata = 0;
264 } else {
265 pag->pagi_inodeok = 1;
266 maxagi++;
267 if (index < max_metadata)
268 pag->pagf_metadata = 1;
269 else
270 pag->pagf_metadata = 0;
271 }
272 } else {
273 pag->pagi_inodeok = 1;
274 pag->pagf_metadata = 0;
275 }
276
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300277 xfs_perag_put(pag);
278 }
279
Eric Sandeen12c3f052016-03-02 09:58:09 +1100280 return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300281}
282
Hannes Eder3180e662009-03-04 19:34:10 +0100283STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284xfs_blkdev_get(
285 xfs_mount_t *mp,
286 const char *name,
287 struct block_device **bdevp)
288{
289 int error = 0;
290
Tejun Heod4d77622010-11-13 11:55:18 +0100291 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
292 mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 if (IS_ERR(*bdevp)) {
294 error = PTR_ERR(*bdevp);
Eric Sandeen77af5742014-12-24 09:47:27 +1100295 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 }
297
Dave Chinner24513372014-06-25 14:58:08 +1000298 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299}
300
Hannes Eder3180e662009-03-04 19:34:10 +0100301STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302xfs_blkdev_put(
303 struct block_device *bdev)
304{
305 if (bdev)
Tejun Heoe525fd82010-11-13 11:55:17 +0100306 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
Christoph Hellwigf538d4d2005-11-02 10:26:59 +1100309void
310xfs_blkdev_issue_flush(
311 xfs_buftarg_t *buftarg)
312{
Shaohua Li7582df52012-04-24 21:23:46 +0800313 blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
Christoph Hellwigf538d4d2005-11-02 10:26:59 +1100314}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000316STATIC void
317xfs_close_devices(
318 struct xfs_mount *mp)
319{
Dan Williams486aff52017-08-24 15:12:50 -0700320 struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
321
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000322 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000323 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
Dan Williams486aff52017-08-24 15:12:50 -0700324 struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
325
Eric Sandeena1f69412018-04-06 10:09:42 -0700326 xfs_free_buftarg(mp->m_logdev_targp);
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000327 xfs_blkdev_put(logdev);
Dan Williams486aff52017-08-24 15:12:50 -0700328 fs_put_dax(dax_logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000329 }
330 if (mp->m_rtdev_targp) {
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000331 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
Dan Williams486aff52017-08-24 15:12:50 -0700332 struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
333
Eric Sandeena1f69412018-04-06 10:09:42 -0700334 xfs_free_buftarg(mp->m_rtdev_targp);
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000335 xfs_blkdev_put(rtdev);
Dan Williams486aff52017-08-24 15:12:50 -0700336 fs_put_dax(dax_rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000337 }
Eric Sandeena1f69412018-04-06 10:09:42 -0700338 xfs_free_buftarg(mp->m_ddev_targp);
Dan Williams486aff52017-08-24 15:12:50 -0700339 fs_put_dax(dax_ddev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000340}
341
342/*
343 * The file system configurations are:
344 * (1) device (partition) with data and internal log
345 * (2) logical volume with data and log subvolumes.
346 * (3) logical volume with data, log, and realtime subvolumes.
347 *
348 * We only have to handle opening the log and realtime volumes here if
349 * they are present. The data subvolume has already been opened by
350 * get_sb_bdev() and is stored in sb->s_bdev.
351 */
352STATIC int
353xfs_open_devices(
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100354 struct xfs_mount *mp)
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000355{
356 struct block_device *ddev = mp->m_super->s_bdev;
Dan Williams486aff52017-08-24 15:12:50 -0700357 struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev);
358 struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL;
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000359 struct block_device *logdev = NULL, *rtdev = NULL;
360 int error;
361
362 /*
363 * Open real time and log devices - order is important.
364 */
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100365 if (mp->m_logname) {
366 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000367 if (error)
368 goto out;
Dan Williams486aff52017-08-24 15:12:50 -0700369 dax_logdev = fs_dax_get_by_bdev(logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000370 }
371
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100372 if (mp->m_rtname) {
373 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000374 if (error)
375 goto out_close_logdev;
376
377 if (rtdev == ddev || rtdev == logdev) {
Dave Chinner4f107002011-03-07 10:00:35 +1100378 xfs_warn(mp,
379 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
Dave Chinner24513372014-06-25 14:58:08 +1000380 error = -EINVAL;
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000381 goto out_close_rtdev;
382 }
Dan Williams486aff52017-08-24 15:12:50 -0700383 dax_rtdev = fs_dax_get_by_bdev(rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000384 }
385
386 /*
387 * Setup xfs_mount buffer target pointers
388 */
Dave Chinner24513372014-06-25 14:58:08 +1000389 error = -ENOMEM;
Dan Williams486aff52017-08-24 15:12:50 -0700390 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000391 if (!mp->m_ddev_targp)
392 goto out_close_rtdev;
393
394 if (rtdev) {
Dan Williams486aff52017-08-24 15:12:50 -0700395 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000396 if (!mp->m_rtdev_targp)
397 goto out_free_ddev_targ;
398 }
399
400 if (logdev && logdev != ddev) {
Dan Williams486aff52017-08-24 15:12:50 -0700401 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000402 if (!mp->m_logdev_targp)
403 goto out_free_rtdev_targ;
404 } else {
405 mp->m_logdev_targp = mp->m_ddev_targp;
406 }
407
408 return 0;
409
410 out_free_rtdev_targ:
411 if (mp->m_rtdev_targp)
Eric Sandeena1f69412018-04-06 10:09:42 -0700412 xfs_free_buftarg(mp->m_rtdev_targp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000413 out_free_ddev_targ:
Eric Sandeena1f69412018-04-06 10:09:42 -0700414 xfs_free_buftarg(mp->m_ddev_targp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000415 out_close_rtdev:
Markus Elfringd2a5e3c2014-12-01 08:24:20 +1100416 xfs_blkdev_put(rtdev);
Dan Williams486aff52017-08-24 15:12:50 -0700417 fs_put_dax(dax_rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000418 out_close_logdev:
Dan Williams486aff52017-08-24 15:12:50 -0700419 if (logdev && logdev != ddev) {
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000420 xfs_blkdev_put(logdev);
Dan Williams486aff52017-08-24 15:12:50 -0700421 fs_put_dax(dax_logdev);
422 }
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000423 out:
Dan Williams486aff52017-08-24 15:12:50 -0700424 fs_put_dax(dax_ddev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000425 return error;
426}
427
Christoph Hellwige34b5622008-05-20 15:10:36 +1000428/*
429 * Setup xfs_mount buffer target pointers based on superblock
430 */
431STATIC int
432xfs_setup_devices(
433 struct xfs_mount *mp)
434{
435 int error;
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000436
Eric Sandeena96c4152014-04-14 19:00:29 +1000437 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
Christoph Hellwige34b5622008-05-20 15:10:36 +1000438 if (error)
439 return error;
440
441 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
442 unsigned int log_sector_size = BBSIZE;
443
444 if (xfs_sb_version_hassector(&mp->m_sb))
445 log_sector_size = mp->m_sb.sb_logsectsize;
446 error = xfs_setsize_buftarg(mp->m_logdev_targp,
Christoph Hellwige34b5622008-05-20 15:10:36 +1000447 log_sector_size);
448 if (error)
449 return error;
450 }
451 if (mp->m_rtdev_targp) {
452 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
Christoph Hellwige34b5622008-05-20 15:10:36 +1000453 mp->m_sb.sb_sectsize);
454 if (error)
455 return error;
456 }
457
458 return 0;
459}
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000460
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000461STATIC int
462xfs_init_mount_workqueues(
463 struct xfs_mount *mp)
464{
Brian Foster78c931b2014-11-28 13:59:58 +1100465 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
Ian Kente1d3d212019-11-04 13:58:40 -0800466 WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_super->s_id);
Brian Foster78c931b2014-11-28 13:59:58 +1100467 if (!mp->m_buf_workqueue)
468 goto out;
469
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000470 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
Ian Kente1d3d212019-11-04 13:58:40 -0800471 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000472 if (!mp->m_unwritten_workqueue)
Darrick J. Wong28408242019-04-15 13:13:21 -0700473 goto out_destroy_buf;
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000474
Dave Chinner4c2d5422012-04-23 17:54:32 +1000475 mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
Dave Chinner8ab39f12019-09-05 21:35:39 -0700476 WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND,
Ian Kente1d3d212019-11-04 13:58:40 -0800477 0, mp->m_super->s_id);
Dave Chinner4c2d5422012-04-23 17:54:32 +1000478 if (!mp->m_cil_workqueue)
479 goto out_destroy_unwritten;
Dave Chinner58896082012-10-08 21:56:05 +1100480
481 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
Ian Kente1d3d212019-11-04 13:58:40 -0800482 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
Dave Chinner58896082012-10-08 21:56:05 +1100483 if (!mp->m_reclaim_workqueue)
484 goto out_destroy_cil;
485
Brian Foster579b62f2012-11-06 09:50:47 -0500486 mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
Ian Kente1d3d212019-11-04 13:58:40 -0800487 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
Brian Foster579b62f2012-11-06 09:50:47 -0500488 if (!mp->m_eofblocks_workqueue)
Christoph Hellwig1058d0f2019-06-28 19:27:25 -0700489 goto out_destroy_reclaim;
Brian Foster579b62f2012-11-06 09:50:47 -0500490
Brian Foster696a5622017-03-28 14:51:44 -0700491 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
Ian Kente1d3d212019-11-04 13:58:40 -0800492 mp->m_super->s_id);
Brian Foster696a5622017-03-28 14:51:44 -0700493 if (!mp->m_sync_workqueue)
494 goto out_destroy_eofb;
495
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000496 return 0;
497
Brian Foster696a5622017-03-28 14:51:44 -0700498out_destroy_eofb:
499 destroy_workqueue(mp->m_eofblocks_workqueue);
Dave Chinner58896082012-10-08 21:56:05 +1100500out_destroy_reclaim:
501 destroy_workqueue(mp->m_reclaim_workqueue);
502out_destroy_cil:
503 destroy_workqueue(mp->m_cil_workqueue);
Dave Chinner4c2d5422012-04-23 17:54:32 +1000504out_destroy_unwritten:
505 destroy_workqueue(mp->m_unwritten_workqueue);
Brian Foster78c931b2014-11-28 13:59:58 +1100506out_destroy_buf:
507 destroy_workqueue(mp->m_buf_workqueue);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000508out:
509 return -ENOMEM;
510}
511
512STATIC void
513xfs_destroy_mount_workqueues(
514 struct xfs_mount *mp)
515{
Brian Foster696a5622017-03-28 14:51:44 -0700516 destroy_workqueue(mp->m_sync_workqueue);
Brian Foster579b62f2012-11-06 09:50:47 -0500517 destroy_workqueue(mp->m_eofblocks_workqueue);
Dave Chinner58896082012-10-08 21:56:05 +1100518 destroy_workqueue(mp->m_reclaim_workqueue);
Dave Chinner4c2d5422012-04-23 17:54:32 +1000519 destroy_workqueue(mp->m_cil_workqueue);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000520 destroy_workqueue(mp->m_unwritten_workqueue);
Brian Foster78c931b2014-11-28 13:59:58 +1100521 destroy_workqueue(mp->m_buf_workqueue);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000522}
523
Dave Chinner9aa05002012-10-08 21:56:04 +1100524/*
525 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
526 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
527 * for IO to complete so that we effectively throttle multiple callers to the
528 * rate at which IO is completing.
529 */
530void
531xfs_flush_inodes(
532 struct xfs_mount *mp)
533{
534 struct super_block *sb = mp->m_super;
535
536 if (down_read_trylock(&sb->s_umount)) {
Jan Kara0dc83bd2014-02-21 11:19:04 +0100537 sync_inodes_sb(sb);
Dave Chinner9aa05002012-10-08 21:56:04 +1100538 up_read(&sb->s_umount);
539 }
540}
541
David Chinnerbf904242008-10-30 17:36:14 +1100542/* Catch misguided souls that try to use this interface on XFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543STATIC struct inode *
Nathan Scotta50cd262006-03-14 14:06:18 +1100544xfs_fs_alloc_inode(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 struct super_block *sb)
546{
David Chinnerbf904242008-10-30 17:36:14 +1100547 BUG();
Lachlan McIlroy493dca62008-10-30 17:36:52 +1100548 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549}
550
Christoph Hellwig48318222018-10-18 17:20:11 +1100551#ifdef DEBUG
552static void
553xfs_check_delalloc(
554 struct xfs_inode *ip,
555 int whichfork)
556{
557 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
558 struct xfs_bmbt_irec got;
559 struct xfs_iext_cursor icur;
560
561 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
562 return;
563 do {
564 if (isnullstartblock(got.br_startblock)) {
565 xfs_warn(ip->i_mount,
566 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
567 ip->i_ino,
568 whichfork == XFS_DATA_FORK ? "data" : "cow",
569 got.br_startoff, got.br_blockcount);
570 }
571 } while (xfs_iext_next_extent(ifp, &icur, &got));
572}
573#else
574#define xfs_check_delalloc(ip, whichfork) do { } while (0)
575#endif
576
David Chinnerbf904242008-10-30 17:36:14 +1100577/*
David Chinner99fa8cb2008-10-30 17:36:40 +1100578 * Now that the generic code is guaranteed not to be accessing
Dave Chinner8179c032016-05-18 13:52:42 +1000579 * the linux inode, we can inactivate and reclaim the inode.
David Chinnerbf904242008-10-30 17:36:14 +1100580 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581STATIC void
Nathan Scotta50cd262006-03-14 14:06:18 +1100582xfs_fs_destroy_inode(
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000583 struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584{
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000585 struct xfs_inode *ip = XFS_I(inode);
586
Christoph Hellwigcca28fb2010-06-24 11:57:09 +1000587 trace_xfs_destroy_inode(ip);
David Chinner99fa8cb2008-10-30 17:36:40 +1100588
Christoph Hellwig65523212016-11-30 14:33:25 +1100589 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
Dave Chinner8179c032016-05-18 13:52:42 +1000590 XFS_STATS_INC(ip->i_mount, vn_rele);
591 XFS_STATS_INC(ip->i_mount, vn_remove);
592
593 xfs_inactive(ip);
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000594
Christoph Hellwig48318222018-10-18 17:20:11 +1100595 if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
596 xfs_check_delalloc(ip, XFS_DATA_FORK);
597 xfs_check_delalloc(ip, XFS_COW_FORK);
598 ASSERT(0);
599 }
600
Dave Chinner8179c032016-05-18 13:52:42 +1000601 XFS_STATS_INC(ip->i_mount, vn_reclaim);
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000602
603 /*
604 * We should never get here with one of the reclaim flags already set.
605 */
606 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
607 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
608
609 /*
Dave Chinner57817c62010-01-10 23:51:47 +0000610 * We always use background reclaim here because even if the
611 * inode is clean, it still may be under IO and hence we have
612 * to take the flush lock. The background reclaim path handles
613 * this more efficiently than we can here, so simply let background
614 * reclaim tear down all inodes.
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000615 */
Dave Chinner57817c62010-01-10 23:51:47 +0000616 xfs_inode_set_reclaim_tag(ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617}
618
Christoph Hellwigc3b1b132018-03-06 17:04:00 -0800619static void
620xfs_fs_dirty_inode(
621 struct inode *inode,
622 int flag)
623{
624 struct xfs_inode *ip = XFS_I(inode);
625 struct xfs_mount *mp = ip->i_mount;
626 struct xfs_trans *tp;
627
628 if (!(inode->i_sb->s_flags & SB_LAZYTIME))
629 return;
630 if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
631 return;
632
633 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
634 return;
635 xfs_ilock(ip, XFS_ILOCK_EXCL);
636 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
637 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
638 xfs_trans_commit(tp);
639}
640
David Chinner07c8f672008-10-30 16:11:59 +1100641/*
642 * Slab object creation initialisation for the XFS inode.
643 * This covers only the idempotent fields in the XFS inode;
644 * all other fields need to be initialised on allocation
Uwe Kleine-Königb5950762010-11-01 15:38:34 -0400645 * from the slab. This avoids the need to repeatedly initialise
David Chinner07c8f672008-10-30 16:11:59 +1100646 * fields in the xfs inode that left in the initialise state
647 * when freeing the inode.
648 */
David Chinnerbf904242008-10-30 17:36:14 +1100649STATIC void
650xfs_fs_inode_init_once(
David Chinner07c8f672008-10-30 16:11:59 +1100651 void *inode)
652{
653 struct xfs_inode *ip = inode;
654
655 memset(ip, 0, sizeof(struct xfs_inode));
David Chinnerbf904242008-10-30 17:36:14 +1100656
657 /* vfs inode */
658 inode_init_once(VFS_I(ip));
659
660 /* xfs inode */
David Chinner07c8f672008-10-30 16:11:59 +1100661 atomic_set(&ip->i_pincount, 0);
662 spin_lock_init(&ip->i_flags_lock);
David Chinner07c8f672008-10-30 16:11:59 +1100663
Dave Chinner653c60b2015-02-23 21:43:37 +1100664 mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
665 "xfsino", ip->i_ino);
David Chinner07c8f672008-10-30 16:11:59 +1100666 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
667 "xfsino", ip->i_ino);
David Chinner07c8f672008-10-30 16:11:59 +1100668}
669
Dave Chinner5132ba82012-03-22 05:15:10 +0000670/*
671 * We do an unlocked check for XFS_IDONTCACHE here because we are already
672 * serialised against cache hits here via the inode->i_lock and igrab() in
673 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
674 * racing with us, and it avoids needing to grab a spinlock here for every inode
675 * we drop the final reference on.
676 */
677STATIC int
678xfs_fs_drop_inode(
679 struct inode *inode)
680{
681 struct xfs_inode *ip = XFS_I(inode);
682
Darrick J. Wong17c12bc2016-10-03 09:11:29 -0700683 /*
684 * If this unlinked inode is in the middle of recovery, don't
685 * drop the inode just yet; log recovery will take care of
686 * that. See the comment for this inode flag.
687 */
688 if (ip->i_flags & XFS_IRECOVERY) {
689 ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
690 return 0;
691 }
692
Dave Chinner5132ba82012-03-22 05:15:10 +0000693 return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
694}
695
Ian Kenta943f372019-11-04 13:58:42 -0800696static void
697xfs_mount_free(
Christoph Hellwiga7381592008-08-13 16:04:05 +1000698 struct xfs_mount *mp)
699{
Christoph Hellwiga7381592008-08-13 16:04:05 +1000700 kfree(mp->m_rtname);
701 kfree(mp->m_logname);
Ian Kenta943f372019-11-04 13:58:42 -0800702 kmem_free(mp);
Christoph Hellwiga7381592008-08-13 16:04:05 +1000703}
704
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705STATIC int
Christoph Hellwig69961a22009-10-06 20:29:28 +0000706xfs_fs_sync_fs(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 struct super_block *sb,
708 int wait)
709{
Christoph Hellwig745f6912007-08-30 17:20:39 +1000710 struct xfs_mount *mp = XFS_M(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
Lachlan McIlroye893bff2007-10-12 11:13:35 +1000712 /*
Christoph Hellwig34625c62011-12-06 21:58:12 +0000713 * Doing anything during the async pass would be counterproductive.
Lachlan McIlroye893bff2007-10-12 11:13:35 +1000714 */
Christoph Hellwig34625c62011-12-06 21:58:12 +0000715 if (!wait)
Christoph Hellwig69961a22009-10-06 20:29:28 +0000716 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Dave Chinner34061f52012-10-08 21:56:06 +1100718 xfs_log_force(mp, XFS_LOG_SYNC);
Christoph Hellwig69961a22009-10-06 20:29:28 +0000719 if (laptop_mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 /*
721 * The disk must be active because we're syncing.
Dave Chinnerf661f1e2012-10-08 21:56:02 +1100722 * We schedule log work now (now that the disk is
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 * active) instead of later (when it might not be).
724 */
Dave Chinnerf661f1e2012-10-08 21:56:02 +1100725 flush_delayed_work(&mp->m_log->l_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 }
727
Christoph Hellwig69961a22009-10-06 20:29:28 +0000728 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729}
730
731STATIC int
Nathan Scotta50cd262006-03-14 14:06:18 +1100732xfs_fs_statfs(
David Howells726c3342006-06-23 02:02:58 -0700733 struct dentry *dentry,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 struct kstatfs *statp)
735{
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000736 struct xfs_mount *mp = XFS_M(dentry->d_sb);
737 xfs_sb_t *sbp = &mp->m_sb;
David Howells2b0143b2015-03-17 22:25:59 +0000738 struct xfs_inode *ip = XFS_I(d_inode(dentry));
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700739 uint64_t fakeinos, id;
740 uint64_t icount;
741 uint64_t ifree;
742 uint64_t fdblocks;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000743 xfs_extlen_t lsize;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700744 int64_t ffree;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000745
Adam Borowskidddde682018-10-18 17:20:19 +1100746 statp->f_type = XFS_SUPER_MAGIC;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000747 statp->f_namelen = MAXNAMELEN - 1;
748
749 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
750 statp->f_fsid.val[0] = (u32)id;
751 statp->f_fsid.val[1] = (u32)(id >> 32);
752
Dave Chinner501ab322015-02-23 21:19:28 +1100753 icount = percpu_counter_sum(&mp->m_icount);
Dave Chinnere88b64e2015-02-23 21:19:53 +1100754 ifree = percpu_counter_sum(&mp->m_ifree);
Dave Chinner0d485ad2015-02-23 21:22:03 +1100755 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000756
757 spin_lock(&mp->m_sb_lock);
758 statp->f_bsize = sbp->sb_blocksize;
759 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
760 statp->f_blocks = sbp->sb_dblocks - lsize;
Dave Chinner0d485ad2015-02-23 21:22:03 +1100761 spin_unlock(&mp->m_sb_lock);
762
Darrick J. Wong52548852016-08-03 11:38:24 +1000763 statp->f_bfree = fdblocks - mp->m_alloc_set_aside;
Dave Chinner0d485ad2015-02-23 21:22:03 +1100764 statp->f_bavail = statp->f_bfree;
765
Darrick J. Wong43004b22018-12-12 08:46:24 -0800766 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
Dave Chinner9bb54cb2018-06-07 07:54:02 -0700767 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
Darrick J. Wongef325952019-06-05 11:19:34 -0700768 if (M_IGEO(mp)->maxicount)
Christoph Hellwiga19d9f82009-03-29 09:51:08 +0200769 statp->f_files = min_t(typeof(statp->f_files),
770 statp->f_files,
Darrick J. Wongef325952019-06-05 11:19:34 -0700771 M_IGEO(mp)->maxicount);
Stuart Brodsky2fe33662010-08-24 11:46:05 +1000772
Eric Sandeen01f98822015-02-06 09:53:02 +1100773 /* If sb_icount overshot maxicount, report actual allocation */
774 statp->f_files = max_t(typeof(statp->f_files),
775 statp->f_files,
776 sbp->sb_icount);
777
Stuart Brodsky2fe33662010-08-24 11:46:05 +1000778 /* make sure statp->f_ffree does not underflow */
Dave Chinnere88b64e2015-02-23 21:19:53 +1100779 ffree = statp->f_files - (icount - ifree);
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700780 statp->f_ffree = max_t(int64_t, ffree, 0);
Stuart Brodsky2fe33662010-08-24 11:46:05 +1000781
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000782
Jie Liuda5bf952012-04-12 03:59:57 +0000783 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
Chandra Seetharaman83e782e2013-06-27 17:25:10 -0500784 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
785 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
Christoph Hellwig7d095252009-06-08 15:33:32 +0200786 xfs_qm_statvfs(ip, statp);
Richard Wareinga0158312018-01-08 10:41:33 -0800787
788 if (XFS_IS_REALTIME_MOUNT(mp) &&
789 (ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
790 statp->f_blocks = sbp->sb_rblocks;
791 statp->f_bavail = statp->f_bfree =
792 sbp->sb_frextents * sbp->sb_rextsize;
793 }
794
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000795 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796}
797
Eric Sandeend5db0f92010-02-05 22:59:53 +0000798STATIC void
799xfs_save_resvblks(struct xfs_mount *mp)
800{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700801 uint64_t resblks = 0;
Eric Sandeend5db0f92010-02-05 22:59:53 +0000802
803 mp->m_resblks_save = mp->m_resblks;
804 xfs_reserve_blocks(mp, &resblks, NULL);
805}
806
807STATIC void
808xfs_restore_resvblks(struct xfs_mount *mp)
809{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700810 uint64_t resblks;
Eric Sandeend5db0f92010-02-05 22:59:53 +0000811
812 if (mp->m_resblks_save) {
813 resblks = mp->m_resblks_save;
814 mp->m_resblks_save = 0;
815 } else
816 resblks = xfs_default_resblks(mp);
817
818 xfs_reserve_blocks(mp, &resblks, NULL);
819}
820
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100821/*
822 * Trigger writeback of all the dirty metadata in the file system.
823 *
824 * This ensures that the metadata is written to their location on disk rather
825 * than just existing in transactions in the log. This means after a quiesce
Dave Chinnerc75921a2012-10-08 21:56:08 +1100826 * there is no log replay required to write the inodes to disk - this is the
827 * primary difference between a sync and a quiesce.
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100828 *
Dave Chinnerc75921a2012-10-08 21:56:08 +1100829 * Note: xfs_log_quiesce() stops background log work - the callers must ensure
830 * it is started again when appropriate.
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100831 */
Dave Chinnerddeb14f2016-09-26 08:21:44 +1000832void
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100833xfs_quiesce_attr(
834 struct xfs_mount *mp)
835{
836 int error = 0;
837
838 /* wait for all modifications to complete */
839 while (atomic_read(&mp->m_active_trans) > 0)
840 delay(100);
841
842 /* force the log to unpin objects from the now complete transactions */
843 xfs_log_force(mp, XFS_LOG_SYNC);
844
845 /* reclaim inodes to do any IO before the freeze completes */
846 xfs_reclaim_inodes(mp, 0);
847 xfs_reclaim_inodes(mp, SYNC_WAIT);
848
Dave Chinnerc75921a2012-10-08 21:56:08 +1100849 /* Push the superblock and write an unmount record */
850 error = xfs_log_sbcount(mp);
851 if (error)
852 xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
853 "Frozen image may not be consistent.");
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100854 /*
855 * Just warn here till VFS can correctly support
856 * read-only remount without racing.
857 */
858 WARN_ON(atomic_read(&mp->m_active_trans) != 0);
859
Dave Chinnerc75921a2012-10-08 21:56:08 +1100860 xfs_log_quiesce(mp);
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100861}
862
Christoph Hellwig9909c4a2007-10-11 18:11:14 +1000863/*
864 * Second stage of a freeze. The data is already frozen so we only
Dave Chinner61e63ec2015-01-22 09:10:31 +1100865 * need to take care of the metadata. Once that's done sync the superblock
866 * to the log to dirty it in case of a crash while frozen. This ensures that we
867 * will recover the unlinked inode lists on the next mount.
Christoph Hellwig9909c4a2007-10-11 18:11:14 +1000868 */
Takashi Satoc4be0c12009-01-09 16:40:58 -0800869STATIC int
870xfs_fs_freeze(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 struct super_block *sb)
872{
Christoph Hellwig9909c4a2007-10-11 18:11:14 +1000873 struct xfs_mount *mp = XFS_M(sb);
874
Darrick J. Wonged30dcb2019-04-25 18:26:22 -0700875 xfs_stop_block_reaping(mp);
Eric Sandeend5db0f92010-02-05 22:59:53 +0000876 xfs_save_resvblks(mp);
David Chinner76bf1052008-10-30 17:16:21 +1100877 xfs_quiesce_attr(mp);
Dave Chinner61e63ec2015-01-22 09:10:31 +1100878 return xfs_sync_sb(mp, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879}
880
881STATIC int
Eric Sandeend5db0f92010-02-05 22:59:53 +0000882xfs_fs_unfreeze(
883 struct super_block *sb)
884{
885 struct xfs_mount *mp = XFS_M(sb);
886
887 xfs_restore_resvblks(mp);
Dave Chinnerf661f1e2012-10-08 21:56:02 +1100888 xfs_log_work_queue(mp);
Darrick J. Wonged30dcb2019-04-25 18:26:22 -0700889 xfs_start_block_reaping(mp);
Eric Sandeend5db0f92010-02-05 22:59:53 +0000890 return 0;
891}
892
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000893/*
894 * This function fills in xfs_mount_t fields based on mount args.
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000895 * Note: the superblock _has_ now been read in.
896 */
897STATIC int
898xfs_finish_flags(
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000899 struct xfs_mount *mp)
900{
901 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
902
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200903 /* Fail a mount where the logbuf is smaller than the log stripe */
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000904 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100905 if (mp->m_logbsize <= 0 &&
906 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000907 mp->m_logbsize = mp->m_sb.sb_logsunit;
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100908 } else if (mp->m_logbsize > 0 &&
909 mp->m_logbsize < mp->m_sb.sb_logsunit) {
Dave Chinner4f107002011-03-07 10:00:35 +1100910 xfs_warn(mp,
911 "logbuf size must be greater than or equal to log stripe size");
Dave Chinner24513372014-06-25 14:58:08 +1000912 return -EINVAL;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000913 }
914 } else {
915 /* Fail a mount if the logbuf is larger than 32K */
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100916 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
Dave Chinner4f107002011-03-07 10:00:35 +1100917 xfs_warn(mp,
918 "logbuf size for version 1 logs must be 16K or 32K");
Dave Chinner24513372014-06-25 14:58:08 +1000919 return -EINVAL;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000920 }
921 }
922
923 /*
Dave Chinnerd3eaace2013-06-05 12:09:09 +1000924 * V5 filesystems always use attr2 format for attributes.
925 */
926 if (xfs_sb_version_hascrc(&mp->m_sb) &&
927 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
Eric Sandeen2e74af02016-03-02 09:55:38 +1100928 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
929 "attr2 is always enabled for V5 filesystems.");
Dave Chinner24513372014-06-25 14:58:08 +1000930 return -EINVAL;
Dave Chinnerd3eaace2013-06-05 12:09:09 +1000931 }
932
933 /*
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000934 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
935 * told by noattr2 to turn it off
936 */
937 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100938 !(mp->m_flags & XFS_MOUNT_NOATTR2))
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000939 mp->m_flags |= XFS_MOUNT_ATTR2;
940
941 /*
942 * prohibit r/w mounts of read-only filesystems
943 */
944 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
Dave Chinner4f107002011-03-07 10:00:35 +1100945 xfs_warn(mp,
946 "cannot mount a read-only filesystem as read-write");
Dave Chinner24513372014-06-25 14:58:08 +1000947 return -EROFS;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000948 }
949
Chandra Seetharamand892d582013-07-19 17:36:02 -0500950 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
951 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
952 !xfs_sb_version_has_pquotino(&mp->m_sb)) {
953 xfs_warn(mp,
954 "Super block does not support project and group quota together");
Dave Chinner24513372014-06-25 14:58:08 +1000955 return -EINVAL;
Chandra Seetharamand892d582013-07-19 17:36:02 -0500956 }
957
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000958 return 0;
959}
960
Dave Chinner5681ca42015-02-23 21:22:31 +1100961static int
962xfs_init_percpu_counters(
963 struct xfs_mount *mp)
964{
965 int error;
966
967 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
968 if (error)
Joe Perches5e9383f2015-03-25 15:00:24 +1100969 return -ENOMEM;
Dave Chinner5681ca42015-02-23 21:22:31 +1100970
971 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
972 if (error)
973 goto free_icount;
974
975 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
976 if (error)
977 goto free_ifree;
978
Darrick J. Wong9fe82b82019-04-25 18:26:22 -0700979 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
980 if (error)
981 goto free_fdblocks;
982
Dave Chinner5681ca42015-02-23 21:22:31 +1100983 return 0;
984
Darrick J. Wong9fe82b82019-04-25 18:26:22 -0700985free_fdblocks:
986 percpu_counter_destroy(&mp->m_fdblocks);
Dave Chinner5681ca42015-02-23 21:22:31 +1100987free_ifree:
988 percpu_counter_destroy(&mp->m_ifree);
989free_icount:
990 percpu_counter_destroy(&mp->m_icount);
991 return -ENOMEM;
992}
993
994void
995xfs_reinit_percpu_counters(
996 struct xfs_mount *mp)
997{
998 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
999 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1000 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1001}
1002
1003static void
1004xfs_destroy_percpu_counters(
1005 struct xfs_mount *mp)
1006{
1007 percpu_counter_destroy(&mp->m_icount);
1008 percpu_counter_destroy(&mp->m_ifree);
1009 percpu_counter_destroy(&mp->m_fdblocks);
Darrick J. Wong9fe82b82019-04-25 18:26:22 -07001010 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1011 percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1012 percpu_counter_destroy(&mp->m_delalloc_blks);
Dave Chinner5681ca42015-02-23 21:22:31 +11001013}
1014
Ian Kent2f8d66b2019-11-04 13:58:47 -08001015static void
1016xfs_fs_put_super(
1017 struct super_block *sb)
1018{
1019 struct xfs_mount *mp = XFS_M(sb);
1020
1021 /* if ->fill_super failed, we have no mount to tear down */
1022 if (!sb->s_fs_info)
1023 return;
1024
1025 xfs_notice(mp, "Unmounting Filesystem");
1026 xfs_filestream_unmount(mp);
1027 xfs_unmountfs(mp);
1028
1029 xfs_freesb(mp);
1030 free_percpu(mp->m_stats.xs_stats);
1031 xfs_destroy_percpu_counters(mp);
1032 xfs_destroy_mount_workqueues(mp);
1033 xfs_close_devices(mp);
1034
1035 sb->s_fs_info = NULL;
1036 xfs_mount_free(mp);
1037}
1038
1039static long
1040xfs_fs_nr_cached_objects(
1041 struct super_block *sb,
1042 struct shrink_control *sc)
1043{
1044 /* Paranoia: catch incorrect calls during mount setup or teardown */
1045 if (WARN_ON_ONCE(!sb->s_fs_info))
1046 return 0;
1047 return xfs_reclaim_inodes_count(XFS_M(sb));
1048}
1049
1050static long
1051xfs_fs_free_cached_objects(
1052 struct super_block *sb,
1053 struct shrink_control *sc)
1054{
1055 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1056}
1057
1058static const struct super_operations xfs_super_operations = {
1059 .alloc_inode = xfs_fs_alloc_inode,
1060 .destroy_inode = xfs_fs_destroy_inode,
1061 .dirty_inode = xfs_fs_dirty_inode,
1062 .drop_inode = xfs_fs_drop_inode,
1063 .put_super = xfs_fs_put_super,
1064 .sync_fs = xfs_fs_sync_fs,
1065 .freeze_fs = xfs_fs_freeze,
1066 .unfreeze_fs = xfs_fs_unfreeze,
1067 .statfs = xfs_fs_statfs,
1068 .show_options = xfs_fs_show_options,
1069 .nr_cached_objects = xfs_fs_nr_cached_objects,
1070 .free_cached_objects = xfs_fs_free_cached_objects,
1071};
1072
Ian Kent73e5fff2019-11-04 13:58:46 -08001073static int
Ian Kent8757c382019-11-04 13:58:48 -08001074suffix_kstrtoint(
1075 const char *s,
1076 unsigned int base,
1077 int *res)
1078{
1079 int last, shift_left_factor = 0, _res;
1080 char *value;
1081 int ret = 0;
1082
1083 value = kstrdup(s, GFP_KERNEL);
1084 if (!value)
1085 return -ENOMEM;
1086
1087 last = strlen(value) - 1;
1088 if (value[last] == 'K' || value[last] == 'k') {
1089 shift_left_factor = 10;
1090 value[last] = '\0';
1091 }
1092 if (value[last] == 'M' || value[last] == 'm') {
1093 shift_left_factor = 20;
1094 value[last] = '\0';
1095 }
1096 if (value[last] == 'G' || value[last] == 'g') {
1097 shift_left_factor = 30;
1098 value[last] = '\0';
1099 }
1100
1101 if (kstrtoint(value, base, &_res))
1102 ret = -EINVAL;
1103 kfree(value);
1104 *res = _res << shift_left_factor;
1105 return ret;
1106}
1107
1108/*
1109 * Set mount state from a mount option.
1110 *
1111 * NOTE: mp->m_super is NULL here!
1112 */
1113static int
1114xfs_fc_parse_param(
1115 struct fs_context *fc,
1116 struct fs_parameter *param)
1117{
1118 struct xfs_mount *mp = fc->s_fs_info;
1119 struct fs_parse_result result;
1120 int size = 0;
1121 int opt;
1122
1123 opt = fs_parse(fc, &xfs_fs_parameters, param, &result);
1124 if (opt < 0)
1125 return opt;
1126
1127 switch (opt) {
1128 case Opt_logbufs:
1129 mp->m_logbufs = result.uint_32;
1130 return 0;
1131 case Opt_logbsize:
1132 if (suffix_kstrtoint(param->string, 10, &mp->m_logbsize))
1133 return -EINVAL;
1134 return 0;
1135 case Opt_logdev:
1136 kfree(mp->m_logname);
1137 mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1138 if (!mp->m_logname)
1139 return -ENOMEM;
1140 return 0;
1141 case Opt_rtdev:
1142 kfree(mp->m_rtname);
1143 mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1144 if (!mp->m_rtname)
1145 return -ENOMEM;
1146 return 0;
1147 case Opt_allocsize:
1148 if (suffix_kstrtoint(param->string, 10, &size))
1149 return -EINVAL;
1150 mp->m_allocsize_log = ffs(size) - 1;
1151 mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
1152 return 0;
1153 case Opt_grpid:
1154 case Opt_bsdgroups:
1155 mp->m_flags |= XFS_MOUNT_GRPID;
1156 return 0;
1157 case Opt_nogrpid:
1158 case Opt_sysvgroups:
1159 mp->m_flags &= ~XFS_MOUNT_GRPID;
1160 return 0;
1161 case Opt_wsync:
1162 mp->m_flags |= XFS_MOUNT_WSYNC;
1163 return 0;
1164 case Opt_norecovery:
1165 mp->m_flags |= XFS_MOUNT_NORECOVERY;
1166 return 0;
1167 case Opt_noalign:
1168 mp->m_flags |= XFS_MOUNT_NOALIGN;
1169 return 0;
1170 case Opt_swalloc:
1171 mp->m_flags |= XFS_MOUNT_SWALLOC;
1172 return 0;
1173 case Opt_sunit:
1174 mp->m_dalign = result.uint_32;
1175 return 0;
1176 case Opt_swidth:
1177 mp->m_swidth = result.uint_32;
1178 return 0;
1179 case Opt_inode32:
1180 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1181 return 0;
1182 case Opt_inode64:
1183 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1184 return 0;
1185 case Opt_nouuid:
1186 mp->m_flags |= XFS_MOUNT_NOUUID;
1187 return 0;
1188 case Opt_ikeep:
1189 mp->m_flags |= XFS_MOUNT_IKEEP;
1190 return 0;
1191 case Opt_noikeep:
1192 mp->m_flags &= ~XFS_MOUNT_IKEEP;
1193 return 0;
1194 case Opt_largeio:
1195 mp->m_flags |= XFS_MOUNT_LARGEIO;
1196 return 0;
1197 case Opt_nolargeio:
1198 mp->m_flags &= ~XFS_MOUNT_LARGEIO;
1199 return 0;
1200 case Opt_attr2:
1201 mp->m_flags |= XFS_MOUNT_ATTR2;
1202 return 0;
1203 case Opt_noattr2:
1204 mp->m_flags &= ~XFS_MOUNT_ATTR2;
1205 mp->m_flags |= XFS_MOUNT_NOATTR2;
1206 return 0;
1207 case Opt_filestreams:
1208 mp->m_flags |= XFS_MOUNT_FILESTREAMS;
1209 return 0;
1210 case Opt_noquota:
1211 mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1212 mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1213 mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
1214 return 0;
1215 case Opt_quota:
1216 case Opt_uquota:
1217 case Opt_usrquota:
1218 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
1219 XFS_UQUOTA_ENFD);
1220 return 0;
1221 case Opt_qnoenforce:
1222 case Opt_uqnoenforce:
1223 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
1224 mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1225 return 0;
1226 case Opt_pquota:
1227 case Opt_prjquota:
1228 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
1229 XFS_PQUOTA_ENFD);
1230 return 0;
1231 case Opt_pqnoenforce:
1232 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
1233 mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1234 return 0;
1235 case Opt_gquota:
1236 case Opt_grpquota:
1237 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
1238 XFS_GQUOTA_ENFD);
1239 return 0;
1240 case Opt_gqnoenforce:
1241 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
1242 mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1243 return 0;
1244 case Opt_discard:
1245 mp->m_flags |= XFS_MOUNT_DISCARD;
1246 return 0;
1247 case Opt_nodiscard:
1248 mp->m_flags &= ~XFS_MOUNT_DISCARD;
1249 return 0;
1250#ifdef CONFIG_FS_DAX
1251 case Opt_dax:
1252 mp->m_flags |= XFS_MOUNT_DAX;
1253 return 0;
1254#endif
1255 default:
1256 xfs_warn(mp, "unknown mount option [%s].", param->key);
1257 return -EINVAL;
1258 }
1259
1260 return 0;
1261}
1262
1263static int
1264xfs_fc_validate_params(
1265 struct xfs_mount *mp)
1266{
1267 /*
1268 * no recovery flag requires a read-only mount
1269 */
1270 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
1271 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1272 xfs_warn(mp, "no-recovery mounts must be read-only.");
1273 return -EINVAL;
1274 }
1275
1276 if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
1277 (mp->m_dalign || mp->m_swidth)) {
1278 xfs_warn(mp,
1279 "sunit and swidth options incompatible with the noalign option");
1280 return -EINVAL;
1281 }
1282
1283 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1284 xfs_warn(mp, "quota support not available in this kernel.");
1285 return -EINVAL;
1286 }
1287
1288 if ((mp->m_dalign && !mp->m_swidth) ||
1289 (!mp->m_dalign && mp->m_swidth)) {
1290 xfs_warn(mp, "sunit and swidth must be specified together");
1291 return -EINVAL;
1292 }
1293
1294 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1295 xfs_warn(mp,
1296 "stripe width (%d) must be a multiple of the stripe unit (%d)",
1297 mp->m_swidth, mp->m_dalign);
1298 return -EINVAL;
1299 }
1300
1301 if (mp->m_logbufs != -1 &&
1302 mp->m_logbufs != 0 &&
1303 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1304 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1305 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1306 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1307 return -EINVAL;
1308 }
1309
1310 if (mp->m_logbsize != -1 &&
1311 mp->m_logbsize != 0 &&
1312 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1313 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1314 !is_power_of_2(mp->m_logbsize))) {
1315 xfs_warn(mp,
1316 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1317 mp->m_logbsize);
1318 return -EINVAL;
1319 }
1320
1321 if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
1322 (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1323 mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1324 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1325 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1326 return -EINVAL;
1327 }
1328
1329 return 0;
1330}
1331
1332static int
Ian Kent73e5fff2019-11-04 13:58:46 -08001333xfs_fc_fill_super(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 struct super_block *sb,
Ian Kent73e5fff2019-11-04 13:58:46 -08001335 struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336{
Ian Kent73e5fff2019-11-04 13:58:46 -08001337 struct xfs_mount *mp = sb->s_fs_info;
Christoph Hellwigf3dcc132008-03-27 18:00:54 +11001338 struct inode *root;
Colin Ian King0279c712019-11-06 08:07:46 -08001339 int flags = 0, error;
Christoph Hellwigbdd907b2008-05-20 15:10:44 +10001340
Ian Kent7c89fcb2019-11-04 13:58:46 -08001341 mp->m_super = sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342
Ian Kent73e5fff2019-11-04 13:58:46 -08001343 error = xfs_fc_validate_params(mp);
Christoph Hellwig745f6912007-08-30 17:20:39 +10001344 if (error)
Ian Kente1d3d212019-11-04 13:58:40 -08001345 goto out_free_names;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
1347 sb_min_blocksize(sb, BBSIZE);
Lachlan McIlroy0ec58512008-06-23 13:23:01 +10001348 sb->s_xattr = xfs_xattr_handlers;
Nathan Scotta50cd262006-03-14 14:06:18 +11001349 sb->s_export_op = &xfs_export_operations;
Christoph Hellwigfcafb712009-02-09 08:47:34 +01001350#ifdef CONFIG_XFS_QUOTA
Nathan Scotta50cd262006-03-14 14:06:18 +11001351 sb->s_qcop = &xfs_quotactl_operations;
Jan Kara17ef4fd2014-09-30 22:35:33 +02001352 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
Christoph Hellwigfcafb712009-02-09 08:47:34 +01001353#endif
Nathan Scotta50cd262006-03-14 14:06:18 +11001354 sb->s_op = &xfs_super_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
Dave Chinnerdae5cd82018-05-10 21:50:23 -07001356 /*
1357 * Delay mount work if the debug hook is set. This is debug
1358 * instrumention to coordinate simulation of xfs mount failures with
1359 * VFS superblock operations
1360 */
1361 if (xfs_globals.mount_delay) {
1362 xfs_notice(mp, "Delaying mount for %d seconds.",
1363 xfs_globals.mount_delay);
1364 msleep(xfs_globals.mount_delay * 1000);
1365 }
1366
Ian Kent73e5fff2019-11-04 13:58:46 -08001367 if (fc->sb_flags & SB_SILENT)
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001368 flags |= XFS_MFSI_QUIET;
1369
Christoph Hellwig9d565ff2008-10-30 17:53:24 +11001370 error = xfs_open_devices(mp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001371 if (error)
Ian Kente1d3d212019-11-04 13:58:40 -08001372 goto out_free_names;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001373
Dave Chinner24513372014-06-25 14:58:08 +10001374 error = xfs_init_mount_workqueues(mp);
Christoph Hellwig61ba35d2010-09-30 02:25:54 +00001375 if (error)
1376 goto out_close_devices;
Christoph Hellwigc962fb72008-05-20 15:10:52 +10001377
Dave Chinner5681ca42015-02-23 21:22:31 +11001378 error = xfs_init_percpu_counters(mp);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +00001379 if (error)
1380 goto out_destroy_workqueues;
1381
Bill O'Donnell225e4632015-10-12 18:21:19 +11001382 /* Allocate stats memory before we do operations that might use it */
1383 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1384 if (!mp->m_stats.xs_stats) {
Dan Carpenterf9d460b2015-10-19 08:42:47 +11001385 error = -ENOMEM;
Bill O'Donnell225e4632015-10-12 18:21:19 +11001386 goto out_destroy_counters;
1387 }
1388
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001389 error = xfs_readsb(mp, flags);
1390 if (error)
Bill O'Donnell225e4632015-10-12 18:21:19 +11001391 goto out_free_stats;
Christoph Hellwig9d565ff2008-10-30 17:53:24 +11001392
1393 error = xfs_finish_flags(mp);
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001394 if (error)
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001395 goto out_free_sb;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001396
Christoph Hellwige34b5622008-05-20 15:10:36 +10001397 error = xfs_setup_devices(mp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001398 if (error)
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001399 goto out_free_sb;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001400
Darrick J. Wong932befe2020-01-02 13:20:13 -08001401 /*
1402 * XFS block mappings use 54 bits to store the logical block offset.
1403 * This should suffice to handle the maximum file size that the VFS
1404 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1405 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1406 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1407 * to check this assertion.
1408 *
1409 * Avoid integer overflow by comparing the maximum bmbt offset to the
1410 * maximum pagecache offset in units of fs blocks.
1411 */
1412 if (XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE) > XFS_MAX_FILEOFF) {
1413 xfs_warn(mp,
1414"MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1415 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1416 XFS_MAX_FILEOFF);
1417 error = -EINVAL;
1418 goto out_free_sb;
1419 }
1420
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001421 error = xfs_filestream_mount(mp);
1422 if (error)
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001423 goto out_free_sb;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001424
Dave Chinner704b2902011-03-26 09:14:57 +11001425 /*
1426 * we must configure the block size in the superblock before we run the
1427 * full mount process as the mount process can lookup and cache inodes.
Dave Chinner704b2902011-03-26 09:14:57 +11001428 */
Adam Borowskidddde682018-10-18 17:20:19 +11001429 sb->s_magic = XFS_SUPER_MAGIC;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +10001430 sb->s_blocksize = mp->m_sb.sb_blocksize;
1431 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
Darrick J. Wong932befe2020-01-02 13:20:13 -08001432 sb->s_maxbytes = MAX_LFS_FILESIZE;
Al Viro8de52772012-02-06 12:45:27 -05001433 sb->s_max_links = XFS_MAXLINK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 sb->s_time_gran = 1;
Deepa Dinamani22b13962019-07-30 08:22:29 -07001435 sb->s_time_min = S32_MIN;
1436 sb->s_time_max = S32_MAX;
Christoph Hellwigadfb5fb2019-06-28 19:30:22 -07001437 sb->s_iflags |= SB_I_CGROUPWB;
1438
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 set_posix_acl_flag(sb);
1440
Dave Chinnerdc037ad2013-06-27 16:04:59 +10001441 /* version 5 superblocks support inode version counters. */
1442 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
Matthew Garrett357fdad2017-10-18 13:56:26 -07001443 sb->s_flags |= SB_I_VERSION;
Dave Chinnerdc037ad2013-06-27 16:04:59 +10001444
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001445 if (mp->m_flags & XFS_MOUNT_DAX) {
Dave Jiang80660f22018-05-30 13:03:46 -07001446 bool rtdev_is_dax = false, datadev_is_dax;
Darrick J. Wongba23cba2018-05-30 13:03:45 -07001447
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001448 xfs_warn(mp,
Toshi Kani1e937cd2016-05-10 10:23:56 -06001449 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1450
Dave Jiang80660f22018-05-30 13:03:46 -07001451 datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
1452 sb->s_blocksize);
Darrick J. Wongba23cba2018-05-30 13:03:45 -07001453 if (mp->m_rtdev_targp)
Dave Jiang80660f22018-05-30 13:03:46 -07001454 rtdev_is_dax = bdev_dax_supported(
1455 mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
1456 if (!rtdev_is_dax && !datadev_is_dax) {
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001457 xfs_alert(mp,
Toshi Kani1e937cd2016-05-10 10:23:56 -06001458 "DAX unsupported by block device. Turning off DAX.");
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001459 mp->m_flags &= ~XFS_MOUNT_DAX;
1460 }
Darrick J. Wongb6e03c12018-01-31 14:21:56 -08001461 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
Darrick J. Wonge54b5bf2016-10-03 09:11:52 -07001462 xfs_alert(mp,
Christoph Hellwig1e369b02018-01-08 13:30:08 -08001463 "DAX and reflink cannot be used together!");
Darrick J. Wongb6e03c12018-01-31 14:21:56 -08001464 error = -EINVAL;
1465 goto out_filestream_unmount;
1466 }
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001467 }
1468
Kenjiro Nakayama1e6fa682017-09-18 12:03:56 -07001469 if (mp->m_flags & XFS_MOUNT_DISCARD) {
1470 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1471
1472 if (!blk_queue_discard(q)) {
1473 xfs_warn(mp, "mounting with \"discard\" option, but "
1474 "the device does not support discard");
1475 mp->m_flags &= ~XFS_MOUNT_DISCARD;
1476 }
1477 }
1478
Christoph Hellwig66ae56a2019-02-18 09:38:49 -08001479 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1480 if (mp->m_sb.sb_rblocks) {
1481 xfs_alert(mp,
Darrick J. Wongc14632d2018-01-31 16:38:18 -08001482 "reflink not compatible with realtime device!");
Christoph Hellwig66ae56a2019-02-18 09:38:49 -08001483 error = -EINVAL;
1484 goto out_filestream_unmount;
1485 }
1486
1487 if (xfs_globals.always_cow) {
1488 xfs_info(mp, "using DEBUG-only always_cow mode.");
1489 mp->m_always_cow = true;
1490 }
Darrick J. Wongc14632d2018-01-31 16:38:18 -08001491 }
1492
Darrick J. Wong76883f72018-01-31 09:47:25 -08001493 if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
Darrick J. Wong1c0607a2016-08-03 12:20:57 +10001494 xfs_alert(mp,
Darrick J. Wong76883f72018-01-31 09:47:25 -08001495 "reverse mapping btree not compatible with realtime device!");
1496 error = -EINVAL;
1497 goto out_filestream_unmount;
Darrick J. Wong738f57c2016-08-26 15:59:19 +10001498 }
Darrick J. Wong1c0607a2016-08-03 12:20:57 +10001499
Dave Chinner8a00ebe2012-04-13 12:10:44 +00001500 error = xfs_mountfs(mp);
Christoph Hellwig2bcf6e92011-07-13 13:43:48 +02001501 if (error)
Dave Chinner7e185302012-10-08 21:56:00 +11001502 goto out_filestream_unmount;
Dave Chinner704b2902011-03-26 09:14:57 +11001503
David Chinner01651642008-08-13 15:45:15 +10001504 root = igrab(VFS_I(mp->m_rootip));
Christoph Hellwigf3dcc132008-03-27 18:00:54 +11001505 if (!root) {
Dave Chinner24513372014-06-25 14:58:08 +10001506 error = -ENOENT;
Dave Chinner8a00ebe2012-04-13 12:10:44 +00001507 goto out_unmount;
Christoph Hellwigcbc89dc2008-02-05 12:14:01 +11001508 }
Al Viro48fde702012-01-08 22:15:13 -05001509 sb->s_root = d_make_root(root);
Christoph Hellwigf3dcc132008-03-27 18:00:54 +11001510 if (!sb->s_root) {
Dave Chinner24513372014-06-25 14:58:08 +10001511 error = -ENOMEM;
Dave Chinner8a00ebe2012-04-13 12:10:44 +00001512 goto out_unmount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 }
Christoph Hellwig74394492007-08-30 17:21:22 +10001514
Dave Chinner7e185302012-10-08 21:56:00 +11001515 return 0;
1516
1517 out_filestream_unmount:
Christoph Hellwig120226c2008-05-20 15:11:11 +10001518 xfs_filestream_unmount(mp);
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001519 out_free_sb:
1520 xfs_freesb(mp);
Bill O'Donnell225e4632015-10-12 18:21:19 +11001521 out_free_stats:
1522 free_percpu(mp->m_stats.xs_stats);
Christoph Hellwig9d565ff2008-10-30 17:53:24 +11001523 out_destroy_counters:
Dave Chinner5681ca42015-02-23 21:22:31 +11001524 xfs_destroy_percpu_counters(mp);
Bill O'Donnell225e4632015-10-12 18:21:19 +11001525 out_destroy_workqueues:
Christoph Hellwigaa6bf012012-02-29 09:53:48 +00001526 xfs_destroy_mount_workqueues(mp);
Christoph Hellwig61ba35d2010-09-30 02:25:54 +00001527 out_close_devices:
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001528 xfs_close_devices(mp);
Ian Kente1d3d212019-11-04 13:58:40 -08001529 out_free_names:
Dave Chinnerc9fbd7b2018-05-10 21:50:23 -07001530 sb->s_fs_info = NULL;
Ian Kenta943f372019-11-04 13:58:42 -08001531 xfs_mount_free(mp);
Dave Chinner24513372014-06-25 14:58:08 +10001532 return error;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001533
Christoph Hellwig2bcf6e92011-07-13 13:43:48 +02001534 out_unmount:
Christoph Hellwige48ad3162008-05-20 11:30:52 +10001535 xfs_filestream_unmount(mp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001536 xfs_unmountfs(mp);
Christoph Hellwig62033002008-08-13 16:50:21 +10001537 goto out_free_sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538}
1539
Ian Kent73e5fff2019-11-04 13:58:46 -08001540static int
1541xfs_fc_get_tree(
1542 struct fs_context *fc)
1543{
1544 return get_tree_bdev(fc, xfs_fc_fill_super);
1545}
1546
Ian Kent63cd1e92019-11-04 13:58:47 -08001547static int
1548xfs_remount_rw(
1549 struct xfs_mount *mp)
1550{
1551 struct xfs_sb *sbp = &mp->m_sb;
1552 int error;
1553
1554 if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1555 xfs_warn(mp,
1556 "ro->rw transition prohibited on norecovery mount");
1557 return -EINVAL;
1558 }
1559
1560 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1561 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1562 xfs_warn(mp,
1563 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1564 (sbp->sb_features_ro_compat &
1565 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1566 return -EINVAL;
1567 }
1568
1569 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1570
1571 /*
1572 * If this is the first remount to writeable state we might have some
1573 * superblock changes to update.
1574 */
1575 if (mp->m_update_sb) {
1576 error = xfs_sync_sb(mp, false);
1577 if (error) {
1578 xfs_warn(mp, "failed to write sb changes");
1579 return error;
1580 }
1581 mp->m_update_sb = false;
1582 }
1583
1584 /*
1585 * Fill out the reserve pool if it is empty. Use the stashed value if
1586 * it is non-zero, otherwise go with the default.
1587 */
1588 xfs_restore_resvblks(mp);
1589 xfs_log_work_queue(mp);
1590
1591 /* Recover any CoW blocks that never got remapped. */
1592 error = xfs_reflink_recover_cow(mp);
1593 if (error) {
1594 xfs_err(mp,
1595 "Error %d recovering leftover CoW allocations.", error);
Dan Carpenter7f6bcf72019-11-08 08:06:36 -08001596 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
Ian Kent63cd1e92019-11-04 13:58:47 -08001597 return error;
1598 }
1599 xfs_start_block_reaping(mp);
1600
1601 /* Create the per-AG metadata reservation pool .*/
1602 error = xfs_fs_reserve_ag_blocks(mp);
1603 if (error && error != -ENOSPC)
1604 return error;
1605
1606 return 0;
1607}
1608
1609static int
1610xfs_remount_ro(
1611 struct xfs_mount *mp)
1612{
1613 int error;
1614
1615 /*
1616 * Cancel background eofb scanning so it cannot race with the final
1617 * log force+buftarg wait and deadlock the remount.
1618 */
1619 xfs_stop_block_reaping(mp);
1620
1621 /* Get rid of any leftover CoW reservations... */
1622 error = xfs_icache_free_cowblocks(mp, NULL);
1623 if (error) {
1624 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1625 return error;
1626 }
1627
1628 /* Free the per-AG metadata reservation pool. */
1629 error = xfs_fs_unreserve_ag_blocks(mp);
1630 if (error) {
1631 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1632 return error;
1633 }
1634
1635 /*
1636 * Before we sync the metadata, we need to free up the reserve block
1637 * pool so that the used block count in the superblock on disk is
1638 * correct at the end of the remount. Stash the current* reserve pool
1639 * size so that if we get remounted rw, we can return it to the same
1640 * size.
1641 */
1642 xfs_save_resvblks(mp);
1643
1644 xfs_quiesce_attr(mp);
1645 mp->m_flags |= XFS_MOUNT_RDONLY;
1646
1647 return 0;
1648}
1649
1650/*
1651 * Logically we would return an error here to prevent users from believing
1652 * they might have changed mount options using remount which can't be changed.
1653 *
1654 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1655 * arguments in some cases so we can't blindly reject options, but have to
1656 * check for each specified option if it actually differs from the currently
1657 * set option and only reject it if that's the case.
1658 *
1659 * Until that is implemented we return success for every remount request, and
1660 * silently ignore all options that we can't actually change.
1661 */
1662static int
1663xfs_fc_reconfigure(
1664 struct fs_context *fc)
1665{
1666 struct xfs_mount *mp = XFS_M(fc->root->d_sb);
1667 struct xfs_mount *new_mp = fc->s_fs_info;
1668 xfs_sb_t *sbp = &mp->m_sb;
1669 int flags = fc->sb_flags;
1670 int error;
1671
1672 error = xfs_fc_validate_params(new_mp);
1673 if (error)
1674 return error;
1675
1676 sync_filesystem(mp->m_super);
1677
1678 /* inode32 -> inode64 */
1679 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1680 !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1681 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1682 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1683 }
1684
1685 /* inode64 -> inode32 */
1686 if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1687 (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1688 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1689 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1690 }
1691
1692 /* ro -> rw */
1693 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) {
1694 error = xfs_remount_rw(mp);
1695 if (error)
1696 return error;
1697 }
1698
1699 /* rw -> ro */
1700 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) {
1701 error = xfs_remount_ro(mp);
1702 if (error)
1703 return error;
1704 }
1705
1706 return 0;
1707}
1708
Ian Kent73e5fff2019-11-04 13:58:46 -08001709static void xfs_fc_free(
1710 struct fs_context *fc)
1711{
1712 struct xfs_mount *mp = fc->s_fs_info;
1713
1714 /*
1715 * mp is stored in the fs_context when it is initialized.
1716 * mp is transferred to the superblock on a successful mount,
1717 * but if an error occurs before the transfer we have to free
1718 * it here.
1719 */
1720 if (mp)
1721 xfs_mount_free(mp);
1722}
1723
1724static const struct fs_context_operations xfs_context_ops = {
1725 .parse_param = xfs_fc_parse_param,
1726 .get_tree = xfs_fc_get_tree,
1727 .reconfigure = xfs_fc_reconfigure,
1728 .free = xfs_fc_free,
1729};
1730
1731static int xfs_init_fs_context(
1732 struct fs_context *fc)
1733{
1734 struct xfs_mount *mp;
1735
Ian Kent50f83002019-11-04 13:58:48 -08001736 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
Ian Kent73e5fff2019-11-04 13:58:46 -08001737 if (!mp)
1738 return -ENOMEM;
1739
Ian Kent50f83002019-11-04 13:58:48 -08001740 spin_lock_init(&mp->m_sb_lock);
1741 spin_lock_init(&mp->m_agirotor_lock);
1742 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1743 spin_lock_init(&mp->m_perag_lock);
1744 mutex_init(&mp->m_growlock);
1745 atomic_set(&mp->m_active_trans, 0);
1746 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1747 INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
1748 INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
1749 mp->m_kobj.kobject.kset = xfs_kset;
1750 /*
1751 * We don't create the finobt per-ag space reservation until after log
1752 * recovery, so we must set this to true so that an ifree transaction
1753 * started during log recovery will not depend on space reservations
1754 * for finobt expansion.
1755 */
1756 mp->m_finobt_nores = true;
1757
Ian Kent73e5fff2019-11-04 13:58:46 -08001758 /*
1759 * These can be overridden by the mount option parsing.
1760 */
1761 mp->m_logbufs = -1;
1762 mp->m_logbsize = -1;
1763 mp->m_allocsize_log = 16; /* 64k */
1764
1765 /*
1766 * Copy binary VFS mount flags we are interested in.
1767 */
1768 if (fc->sb_flags & SB_RDONLY)
1769 mp->m_flags |= XFS_MOUNT_RDONLY;
1770 if (fc->sb_flags & SB_DIRSYNC)
1771 mp->m_flags |= XFS_MOUNT_DIRSYNC;
1772 if (fc->sb_flags & SB_SYNCHRONOUS)
1773 mp->m_flags |= XFS_MOUNT_WSYNC;
1774
1775 fc->s_fs_info = mp;
1776 fc->ops = &xfs_context_ops;
1777
1778 return 0;
1779}
1780
Andrew Morton5085b602007-02-20 13:57:47 -08001781static struct file_system_type xfs_fs_type = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 .owner = THIS_MODULE,
1783 .name = "xfs",
Ian Kent73e5fff2019-11-04 13:58:46 -08001784 .init_fs_context = xfs_init_fs_context,
1785 .parameters = &xfs_fs_parameters,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 .kill_sb = kill_block_super,
1787 .fs_flags = FS_REQUIRES_DEV,
1788};
Eric W. Biederman7f78e032013-03-02 19:39:14 -08001789MODULE_ALIAS_FS("xfs");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001791STATIC int __init
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001792xfs_init_zones(void)
1793{
Carlos Maiolinob1231762019-11-14 12:43:03 -08001794 xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
1795 sizeof(struct xlog_ticket),
1796 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001797 if (!xfs_log_ticket_zone)
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001798 goto out;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001799
Carlos Maiolinob1231762019-11-14 12:43:03 -08001800 xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
1801 sizeof(struct xfs_extent_free_item),
1802 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001803 if (!xfs_bmap_free_item_zone)
1804 goto out_destroy_log_ticket_zone;
David Chinnerbf904242008-10-30 17:36:14 +11001805
Carlos Maiolinob1231762019-11-14 12:43:03 -08001806 xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
1807 sizeof(struct xfs_btree_cur),
1808 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001809 if (!xfs_btree_cur_zone)
1810 goto out_destroy_bmap_free_item_zone;
1811
Carlos Maiolinob1231762019-11-14 12:43:03 -08001812 xfs_da_state_zone = kmem_cache_create("xfs_da_state",
1813 sizeof(struct xfs_da_state),
1814 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001815 if (!xfs_da_state_zone)
1816 goto out_destroy_btree_cur_zone;
1817
Carlos Maiolinob1231762019-11-14 12:43:03 -08001818 xfs_ifork_zone = kmem_cache_create("xfs_ifork",
1819 sizeof(struct xfs_ifork),
1820 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001821 if (!xfs_ifork_zone)
Dave Chinner1d9025e2012-06-22 18:50:14 +10001822 goto out_destroy_da_state_zone;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001823
Carlos Maiolinob1231762019-11-14 12:43:03 -08001824 xfs_trans_zone = kmem_cache_create("xf_trans",
1825 sizeof(struct xfs_trans),
1826 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001827 if (!xfs_trans_zone)
1828 goto out_destroy_ifork_zone;
1829
Christoph Hellwige98c4142010-06-23 18:11:15 +10001830
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001831 /*
1832 * The size of the zone allocated buf log item is the maximum
1833 * size possible under XFS. This wastes a little bit of memory,
1834 * but it is much faster.
1835 */
Carlos Maiolinob1231762019-11-14 12:43:03 -08001836 xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
1837 sizeof(struct xfs_buf_log_item),
1838 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001839 if (!xfs_buf_item_zone)
Dave Chinnere6631f82018-05-09 07:49:37 -07001840 goto out_destroy_trans_zone;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001841
Carlos Maiolinob1231762019-11-14 12:43:03 -08001842 xfs_efd_zone = kmem_cache_create("xfs_efd_item",
1843 (sizeof(struct xfs_efd_log_item) +
1844 (XFS_EFD_MAX_FAST_EXTENTS - 1) *
1845 sizeof(struct xfs_extent)),
1846 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001847 if (!xfs_efd_zone)
1848 goto out_destroy_buf_item_zone;
1849
Carlos Maiolinob1231762019-11-14 12:43:03 -08001850 xfs_efi_zone = kmem_cache_create("xfs_efi_item",
1851 (sizeof(struct xfs_efi_log_item) +
1852 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
1853 sizeof(struct xfs_extent)),
1854 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001855 if (!xfs_efi_zone)
1856 goto out_destroy_efd_zone;
1857
Carlos Maiolinob1231762019-11-14 12:43:03 -08001858 xfs_inode_zone = kmem_cache_create("xfs_inode",
1859 sizeof(struct xfs_inode), 0,
1860 (SLAB_HWCACHE_ALIGN |
1861 SLAB_RECLAIM_ACCOUNT |
1862 SLAB_MEM_SPREAD | SLAB_ACCOUNT),
1863 xfs_fs_inode_init_once);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001864 if (!xfs_inode_zone)
1865 goto out_destroy_efi_zone;
1866
Carlos Maiolinob1231762019-11-14 12:43:03 -08001867 xfs_ili_zone = kmem_cache_create("xfs_ili",
1868 sizeof(struct xfs_inode_log_item), 0,
1869 SLAB_MEM_SPREAD, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001870 if (!xfs_ili_zone)
1871 goto out_destroy_inode_zone;
Carlos Maiolinob1231762019-11-14 12:43:03 -08001872
1873 xfs_icreate_zone = kmem_cache_create("xfs_icr",
1874 sizeof(struct xfs_icreate_item),
1875 0, 0, NULL);
Dave Chinner3ebe7d22013-06-27 16:04:53 +10001876 if (!xfs_icreate_zone)
1877 goto out_destroy_ili_zone;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001878
Carlos Maiolinob1231762019-11-14 12:43:03 -08001879 xfs_rud_zone = kmem_cache_create("xfs_rud_item",
1880 sizeof(struct xfs_rud_log_item),
1881 0, 0, NULL);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10001882 if (!xfs_rud_zone)
1883 goto out_destroy_icreate_zone;
1884
Carlos Maiolinob1231762019-11-14 12:43:03 -08001885 xfs_rui_zone = kmem_cache_create("xfs_rui_item",
Darrick J. Wongcd001582016-09-19 10:24:27 +10001886 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
Carlos Maiolinob1231762019-11-14 12:43:03 -08001887 0, 0, NULL);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10001888 if (!xfs_rui_zone)
1889 goto out_destroy_rud_zone;
1890
Carlos Maiolinob1231762019-11-14 12:43:03 -08001891 xfs_cud_zone = kmem_cache_create("xfs_cud_item",
1892 sizeof(struct xfs_cud_log_item),
1893 0, 0, NULL);
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07001894 if (!xfs_cud_zone)
1895 goto out_destroy_rui_zone;
1896
Carlos Maiolinob1231762019-11-14 12:43:03 -08001897 xfs_cui_zone = kmem_cache_create("xfs_cui_item",
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07001898 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
Carlos Maiolinob1231762019-11-14 12:43:03 -08001899 0, 0, NULL);
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07001900 if (!xfs_cui_zone)
1901 goto out_destroy_cud_zone;
1902
Carlos Maiolinob1231762019-11-14 12:43:03 -08001903 xfs_bud_zone = kmem_cache_create("xfs_bud_item",
1904 sizeof(struct xfs_bud_log_item),
1905 0, 0, NULL);
Darrick J. Wong6413a012016-10-03 09:11:25 -07001906 if (!xfs_bud_zone)
1907 goto out_destroy_cui_zone;
1908
Carlos Maiolinob1231762019-11-14 12:43:03 -08001909 xfs_bui_zone = kmem_cache_create("xfs_bui_item",
Darrick J. Wong6413a012016-10-03 09:11:25 -07001910 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
Carlos Maiolinob1231762019-11-14 12:43:03 -08001911 0, 0, NULL);
Darrick J. Wong6413a012016-10-03 09:11:25 -07001912 if (!xfs_bui_zone)
1913 goto out_destroy_bud_zone;
1914
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001915 return 0;
1916
Darrick J. Wong6413a012016-10-03 09:11:25 -07001917 out_destroy_bud_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001918 kmem_cache_destroy(xfs_bud_zone);
Darrick J. Wong6413a012016-10-03 09:11:25 -07001919 out_destroy_cui_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001920 kmem_cache_destroy(xfs_cui_zone);
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07001921 out_destroy_cud_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001922 kmem_cache_destroy(xfs_cud_zone);
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07001923 out_destroy_rui_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001924 kmem_cache_destroy(xfs_rui_zone);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10001925 out_destroy_rud_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001926 kmem_cache_destroy(xfs_rud_zone);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10001927 out_destroy_icreate_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001928 kmem_cache_destroy(xfs_icreate_zone);
Dave Chinner3ebe7d22013-06-27 16:04:53 +10001929 out_destroy_ili_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001930 kmem_cache_destroy(xfs_ili_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001931 out_destroy_inode_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001932 kmem_cache_destroy(xfs_inode_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001933 out_destroy_efi_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001934 kmem_cache_destroy(xfs_efi_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001935 out_destroy_efd_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001936 kmem_cache_destroy(xfs_efd_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001937 out_destroy_buf_item_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001938 kmem_cache_destroy(xfs_buf_item_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001939 out_destroy_trans_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001940 kmem_cache_destroy(xfs_trans_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001941 out_destroy_ifork_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001942 kmem_cache_destroy(xfs_ifork_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001943 out_destroy_da_state_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001944 kmem_cache_destroy(xfs_da_state_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001945 out_destroy_btree_cur_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001946 kmem_cache_destroy(xfs_btree_cur_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001947 out_destroy_bmap_free_item_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001948 kmem_cache_destroy(xfs_bmap_free_item_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001949 out_destroy_log_ticket_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001950 kmem_cache_destroy(xfs_log_ticket_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001951 out:
1952 return -ENOMEM;
1953}
1954
1955STATIC void
1956xfs_destroy_zones(void)
1957{
Kirill A. Shutemov8c0a8532012-09-26 11:33:07 +10001958 /*
1959 * Make sure all delayed rcu free are flushed before we
1960 * destroy caches.
1961 */
1962 rcu_barrier();
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001963 kmem_cache_destroy(xfs_bui_zone);
1964 kmem_cache_destroy(xfs_bud_zone);
1965 kmem_cache_destroy(xfs_cui_zone);
1966 kmem_cache_destroy(xfs_cud_zone);
1967 kmem_cache_destroy(xfs_rui_zone);
1968 kmem_cache_destroy(xfs_rud_zone);
1969 kmem_cache_destroy(xfs_icreate_zone);
1970 kmem_cache_destroy(xfs_ili_zone);
1971 kmem_cache_destroy(xfs_inode_zone);
1972 kmem_cache_destroy(xfs_efi_zone);
1973 kmem_cache_destroy(xfs_efd_zone);
1974 kmem_cache_destroy(xfs_buf_item_zone);
1975 kmem_cache_destroy(xfs_trans_zone);
1976 kmem_cache_destroy(xfs_ifork_zone);
1977 kmem_cache_destroy(xfs_da_state_zone);
1978 kmem_cache_destroy(xfs_btree_cur_zone);
1979 kmem_cache_destroy(xfs_bmap_free_item_zone);
1980 kmem_cache_destroy(xfs_log_ticket_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001981}
1982
1983STATIC int __init
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10001984xfs_init_workqueues(void)
1985{
1986 /*
Dave Chinnerc999a222012-03-22 05:15:07 +00001987 * The allocation workqueue can be used in memory reclaim situations
1988 * (writepage path), and parallelism is only limited by the number of
1989 * AGs in all the filesystems mounted. Hence use the default large
1990 * max_active value for this workqueue.
1991 */
Brian Foster8018ec02014-09-09 11:44:46 +10001992 xfs_alloc_wq = alloc_workqueue("xfsalloc",
1993 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
Dave Chinnerc999a222012-03-22 05:15:07 +00001994 if (!xfs_alloc_wq)
Dave Chinner58896082012-10-08 21:56:05 +11001995 return -ENOMEM;
Dave Chinnerc999a222012-03-22 05:15:07 +00001996
Christoph Hellwig4560e782017-02-07 14:07:58 -08001997 xfs_discard_wq = alloc_workqueue("xfsdiscard", WQ_UNBOUND, 0);
1998 if (!xfs_discard_wq)
1999 goto out_free_alloc_wq;
2000
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002001 return 0;
Christoph Hellwig4560e782017-02-07 14:07:58 -08002002out_free_alloc_wq:
2003 destroy_workqueue(xfs_alloc_wq);
2004 return -ENOMEM;
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002005}
2006
Luck, Tony39411f82011-04-11 12:06:12 -07002007STATIC void
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002008xfs_destroy_workqueues(void)
2009{
Christoph Hellwig4560e782017-02-07 14:07:58 -08002010 destroy_workqueue(xfs_discard_wq);
Dave Chinnerc999a222012-03-22 05:15:07 +00002011 destroy_workqueue(xfs_alloc_wq);
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002012}
2013
2014STATIC int __init
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002015init_xfs_fs(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016{
2017 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018
Darrick J. Wong30cbc592016-03-09 08:15:14 +11002019 xfs_check_ondisk_structs();
2020
Christoph Hellwig65795912008-11-28 14:23:33 +11002021 printk(KERN_INFO XFS_VERSION_STRING " with "
2022 XFS_BUILD_OPTIONS " enabled\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002024 xfs_dir_startup();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025
Nathan Scott87582802006-03-14 13:18:19 +11002026 error = xfs_init_zones();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002027 if (error)
2028 goto out;
2029
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002030 error = xfs_init_workqueues();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002031 if (error)
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002032 goto out_destroy_zones;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002033
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002034 error = xfs_mru_cache_init();
2035 if (error)
2036 goto out_destroy_wq;
2037
Nathan Scottce8e9222006-01-11 15:39:08 +11002038 error = xfs_buf_init();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002039 if (error)
Christoph Hellwig1919add2014-04-23 07:11:51 +10002040 goto out_mru_cache_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002042 error = xfs_init_procfs();
2043 if (error)
2044 goto out_buf_terminate;
2045
2046 error = xfs_sysctl_register();
2047 if (error)
2048 goto out_cleanup_procfs;
2049
Brian Foster3d871222014-07-15 07:41:37 +10002050 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2051 if (!xfs_kset) {
2052 error = -ENOMEM;
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002053 goto out_sysctl_unregister;
Brian Foster3d871222014-07-15 07:41:37 +10002054 }
2055
Bill O'Donnell80529c42015-10-12 05:19:45 +11002056 xfsstats.xs_kobj.kobject.kset = xfs_kset;
2057
2058 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2059 if (!xfsstats.xs_stats) {
2060 error = -ENOMEM;
2061 goto out_kset_unregister;
2062 }
2063
2064 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002065 "stats");
2066 if (error)
Bill O'Donnell80529c42015-10-12 05:19:45 +11002067 goto out_free_stats;
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002068
Brian Foster65b65732014-09-09 11:52:42 +10002069#ifdef DEBUG
2070 xfs_dbg_kobj.kobject.kset = xfs_kset;
2071 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002072 if (error)
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002073 goto out_remove_stats_kobj;
Brian Foster65b65732014-09-09 11:52:42 +10002074#endif
2075
2076 error = xfs_qm_init();
2077 if (error)
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002078 goto out_remove_dbg_kobj;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079
2080 error = register_filesystem(&xfs_fs_type);
2081 if (error)
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002082 goto out_qm_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 return 0;
2084
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002085 out_qm_exit:
2086 xfs_qm_exit();
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002087 out_remove_dbg_kobj:
Brian Foster65b65732014-09-09 11:52:42 +10002088#ifdef DEBUG
2089 xfs_sysfs_del(&xfs_dbg_kobj);
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002090 out_remove_stats_kobj:
Brian Foster65b65732014-09-09 11:52:42 +10002091#endif
Bill O'Donnell80529c42015-10-12 05:19:45 +11002092 xfs_sysfs_del(&xfsstats.xs_kobj);
2093 out_free_stats:
2094 free_percpu(xfsstats.xs_stats);
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002095 out_kset_unregister:
Brian Foster3d871222014-07-15 07:41:37 +10002096 kset_unregister(xfs_kset);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002097 out_sysctl_unregister:
2098 xfs_sysctl_unregister();
2099 out_cleanup_procfs:
2100 xfs_cleanup_procfs();
2101 out_buf_terminate:
Nathan Scottce8e9222006-01-11 15:39:08 +11002102 xfs_buf_terminate();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002103 out_mru_cache_uninit:
2104 xfs_mru_cache_uninit();
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002105 out_destroy_wq:
2106 xfs_destroy_workqueues();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002107 out_destroy_zones:
Nathan Scott87582802006-03-14 13:18:19 +11002108 xfs_destroy_zones();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002109 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 return error;
2111}
2112
2113STATIC void __exit
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002114exit_xfs_fs(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115{
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002116 xfs_qm_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 unregister_filesystem(&xfs_fs_type);
Brian Foster65b65732014-09-09 11:52:42 +10002118#ifdef DEBUG
2119 xfs_sysfs_del(&xfs_dbg_kobj);
2120#endif
Bill O'Donnell80529c42015-10-12 05:19:45 +11002121 xfs_sysfs_del(&xfsstats.xs_kobj);
2122 free_percpu(xfsstats.xs_stats);
Brian Foster3d871222014-07-15 07:41:37 +10002123 kset_unregister(xfs_kset);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002124 xfs_sysctl_unregister();
2125 xfs_cleanup_procfs();
Nathan Scottce8e9222006-01-11 15:39:08 +11002126 xfs_buf_terminate();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002127 xfs_mru_cache_uninit();
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002128 xfs_destroy_workqueues();
Nathan Scott87582802006-03-14 13:18:19 +11002129 xfs_destroy_zones();
Darrick J. Wongaf3b6382015-11-03 13:06:34 +11002130 xfs_uuid_table_free();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131}
2132
2133module_init(init_xfs_fs);
2134module_exit(exit_xfs_fs);
2135
2136MODULE_AUTHOR("Silicon Graphics, Inc.");
2137MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2138MODULE_LICENSE("GPL");