blob: d9ae27ddf253bba6763a10fa8fde72425c5d6df1 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scotta805bad2006-06-19 08:40:27 +10003 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Nathan Scott7b718762005-11-02 14:58:39 +11004 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00006
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include "xfs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11008#include "xfs_shared.h"
Dave Chinner6ca1c902013-08-12 20:49:26 +10009#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110010#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include "xfs_sb.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include "xfs_mount.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110015#include "xfs_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include "xfs_bmap.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110017#include "xfs_alloc.h"
Christoph Hellwig9909c4a2007-10-11 18:11:14 +100018#include "xfs_fsops.h"
Dave Chinner239880e2013-10-23 10:50:10 +110019#include "xfs_trans.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_buf_item.h"
Dave Chinner239880e2013-10-23 10:50:10 +110021#include "xfs_log.h"
David Chinnera67d7c52007-11-23 16:29:32 +110022#include "xfs_log_priv.h"
Dave Chinner2b9ab5a2013-08-12 20:49:37 +100023#include "xfs_dir2.h"
Christoph Hellwig9f8868f2008-07-18 17:11:46 +100024#include "xfs_extfree_item.h"
25#include "xfs_mru_cache.h"
26#include "xfs_inode_item.h"
Dave Chinner6d8b79c2012-10-08 21:56:09 +110027#include "xfs_icache.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000028#include "xfs_trace.h"
Dave Chinner3ebe7d22013-06-27 16:04:53 +100029#include "xfs_icreate_item.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110030#include "xfs_filestream.h"
31#include "xfs_quota.h"
Brian Foster65b65732014-09-09 11:52:42 +100032#include "xfs_sysfs.h"
Darrick J. Wong30cbc592016-03-09 08:15:14 +110033#include "xfs_ondisk.h"
Darrick J. Wong5880f2d72016-08-03 12:04:45 +100034#include "xfs_rmap_item.h"
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -070035#include "xfs_refcount_item.h"
Darrick J. Wong6413a012016-10-03 09:11:25 -070036#include "xfs_bmap_item.h"
Darrick J. Wong5e7e6052016-10-03 09:11:38 -070037#include "xfs_reflink.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Adam Borowskidddde682018-10-18 17:20:19 +110039#include <linux/magic.h>
Ian Kent73e5fff2019-11-04 13:58:46 -080040#include <linux/fs_context.h>
41#include <linux/fs_parser.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Alexey Dobriyanb87221d2009-09-21 17:01:09 -070043static const struct super_operations xfs_super_operations;
Brian Foster65b65732014-09-09 11:52:42 +100044
Dave Chinnere3aed1a2014-09-29 10:46:08 +100045static struct kset *xfs_kset; /* top-level xfs sysfs dir */
Brian Foster65b65732014-09-09 11:52:42 +100046#ifdef DEBUG
47static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
48#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Christoph Hellwig62a877e2008-07-18 17:12:36 +100050/*
51 * Table driven mount option parser.
Christoph Hellwig62a877e2008-07-18 17:12:36 +100052 */
53enum {
Ian Kent8da57c52019-10-28 08:41:42 -070054 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
Eric Sandeen2e74af02016-03-02 09:55:38 +110055 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
Christoph Hellwig94079282019-04-28 08:32:52 -070056 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
Eric Sandeen1c02d502018-07-26 09:11:27 -070057 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
58 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
59 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
60 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
Eric Sandeen2e74af02016-03-02 09:55:38 +110061 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
Ian Kent73e5fff2019-11-04 13:58:46 -080062 Opt_discard, Opt_nodiscard, Opt_dax,
Christoph Hellwig62a877e2008-07-18 17:12:36 +100063};
64
Ian Kent73e5fff2019-11-04 13:58:46 -080065static const struct fs_parameter_spec xfs_param_specs[] = {
66 fsparam_u32("logbufs", Opt_logbufs),
67 fsparam_string("logbsize", Opt_logbsize),
68 fsparam_string("logdev", Opt_logdev),
69 fsparam_string("rtdev", Opt_rtdev),
70 fsparam_flag("wsync", Opt_wsync),
71 fsparam_flag("noalign", Opt_noalign),
72 fsparam_flag("swalloc", Opt_swalloc),
73 fsparam_u32("sunit", Opt_sunit),
74 fsparam_u32("swidth", Opt_swidth),
75 fsparam_flag("nouuid", Opt_nouuid),
76 fsparam_flag("grpid", Opt_grpid),
77 fsparam_flag("nogrpid", Opt_nogrpid),
78 fsparam_flag("bsdgroups", Opt_bsdgroups),
79 fsparam_flag("sysvgroups", Opt_sysvgroups),
80 fsparam_string("allocsize", Opt_allocsize),
81 fsparam_flag("norecovery", Opt_norecovery),
82 fsparam_flag("inode64", Opt_inode64),
83 fsparam_flag("inode32", Opt_inode32),
84 fsparam_flag("ikeep", Opt_ikeep),
85 fsparam_flag("noikeep", Opt_noikeep),
86 fsparam_flag("largeio", Opt_largeio),
87 fsparam_flag("nolargeio", Opt_nolargeio),
88 fsparam_flag("attr2", Opt_attr2),
89 fsparam_flag("noattr2", Opt_noattr2),
90 fsparam_flag("filestreams", Opt_filestreams),
91 fsparam_flag("quota", Opt_quota),
92 fsparam_flag("noquota", Opt_noquota),
93 fsparam_flag("usrquota", Opt_usrquota),
94 fsparam_flag("grpquota", Opt_grpquota),
95 fsparam_flag("prjquota", Opt_prjquota),
96 fsparam_flag("uquota", Opt_uquota),
97 fsparam_flag("gquota", Opt_gquota),
98 fsparam_flag("pquota", Opt_pquota),
99 fsparam_flag("uqnoenforce", Opt_uqnoenforce),
100 fsparam_flag("gqnoenforce", Opt_gqnoenforce),
101 fsparam_flag("pqnoenforce", Opt_pqnoenforce),
102 fsparam_flag("qnoenforce", Opt_qnoenforce),
103 fsparam_flag("discard", Opt_discard),
104 fsparam_flag("nodiscard", Opt_nodiscard),
105 fsparam_flag("dax", Opt_dax),
106 {}
Christoph Hellwig62a877e2008-07-18 17:12:36 +1000107};
108
Ian Kent73e5fff2019-11-04 13:58:46 -0800109static const struct fs_parameter_description xfs_fs_parameters = {
110 .name = "xfs",
111 .specs = xfs_param_specs,
112};
Christoph Hellwig62a877e2008-07-18 17:12:36 +1000113
David Chinnera67d7c52007-11-23 16:29:32 +1100114struct proc_xfs_info {
Dave Chinnercbe4dab2015-06-04 09:19:18 +1000115 uint64_t flag;
116 char *str;
David Chinnera67d7c52007-11-23 16:29:32 +1100117};
118
Christoph Hellwig21f55992019-10-28 08:41:47 -0700119static int
120xfs_fs_show_options(
121 struct seq_file *m,
122 struct dentry *root)
David Chinnera67d7c52007-11-23 16:29:32 +1100123{
124 static struct proc_xfs_info xfs_info_set[] = {
125 /* the few simple ones we can get from the mount struct */
Eric Sandeen2e74af02016-03-02 09:55:38 +1100126 { XFS_MOUNT_IKEEP, ",ikeep" },
127 { XFS_MOUNT_WSYNC, ",wsync" },
128 { XFS_MOUNT_NOALIGN, ",noalign" },
129 { XFS_MOUNT_SWALLOC, ",swalloc" },
130 { XFS_MOUNT_NOUUID, ",nouuid" },
131 { XFS_MOUNT_NORECOVERY, ",norecovery" },
132 { XFS_MOUNT_ATTR2, ",attr2" },
133 { XFS_MOUNT_FILESTREAMS, ",filestreams" },
134 { XFS_MOUNT_GRPID, ",grpid" },
135 { XFS_MOUNT_DISCARD, ",discard" },
Christoph Hellwig7c6b94b2019-10-28 08:41:46 -0700136 { XFS_MOUNT_LARGEIO, ",largeio" },
Eric Sandeen2e74af02016-03-02 09:55:38 +1100137 { XFS_MOUNT_DAX, ",dax" },
David Chinnera67d7c52007-11-23 16:29:32 +1100138 { 0, NULL }
139 };
Christoph Hellwig21f55992019-10-28 08:41:47 -0700140 struct xfs_mount *mp = XFS_M(root->d_sb);
David Chinnera67d7c52007-11-23 16:29:32 +1100141 struct proc_xfs_info *xfs_infop;
142
143 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
144 if (mp->m_flags & xfs_infop->flag)
145 seq_puts(m, xfs_infop->str);
146 }
Christoph Hellwig1775c502019-10-28 08:41:47 -0700147
148 seq_printf(m, ",inode%d",
149 (mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64);
David Chinnera67d7c52007-11-23 16:29:32 +1100150
Christoph Hellwig3274d002019-10-28 08:41:45 -0700151 if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100152 seq_printf(m, ",allocsize=%dk",
Christoph Hellwigaa58d442019-10-28 08:41:46 -0700153 (1 << mp->m_allocsize_log) >> 10);
David Chinnera67d7c52007-11-23 16:29:32 +1100154
155 if (mp->m_logbufs > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100156 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
David Chinnera67d7c52007-11-23 16:29:32 +1100157 if (mp->m_logbsize > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100158 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
David Chinnera67d7c52007-11-23 16:29:32 +1100159
160 if (mp->m_logname)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100161 seq_show_option(m, "logdev", mp->m_logname);
David Chinnera67d7c52007-11-23 16:29:32 +1100162 if (mp->m_rtname)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100163 seq_show_option(m, "rtdev", mp->m_rtname);
David Chinnera67d7c52007-11-23 16:29:32 +1100164
165 if (mp->m_dalign > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100166 seq_printf(m, ",sunit=%d",
David Chinnera67d7c52007-11-23 16:29:32 +1100167 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
168 if (mp->m_swidth > 0)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100169 seq_printf(m, ",swidth=%d",
David Chinnera67d7c52007-11-23 16:29:32 +1100170 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
171
172 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
Eric Sandeen2e74af02016-03-02 09:55:38 +1100173 seq_puts(m, ",usrquota");
David Chinnera67d7c52007-11-23 16:29:32 +1100174 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100175 seq_puts(m, ",uqnoenforce");
David Chinnera67d7c52007-11-23 16:29:32 +1100176
Alex Elder988abe42009-09-02 17:02:24 -0500177 if (mp->m_qflags & XFS_PQUOTA_ACCT) {
Chandra Seetharaman83e782e2013-06-27 17:25:10 -0500178 if (mp->m_qflags & XFS_PQUOTA_ENFD)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100179 seq_puts(m, ",prjquota");
Alex Elder988abe42009-09-02 17:02:24 -0500180 else
Eric Sandeen2e74af02016-03-02 09:55:38 +1100181 seq_puts(m, ",pqnoenforce");
Chandra Seetharamand892d582013-07-19 17:36:02 -0500182 }
183 if (mp->m_qflags & XFS_GQUOTA_ACCT) {
Chandra Seetharaman83e782e2013-06-27 17:25:10 -0500184 if (mp->m_qflags & XFS_GQUOTA_ENFD)
Eric Sandeen2e74af02016-03-02 09:55:38 +1100185 seq_puts(m, ",grpquota");
Alex Elder988abe42009-09-02 17:02:24 -0500186 else
Eric Sandeen2e74af02016-03-02 09:55:38 +1100187 seq_puts(m, ",gqnoenforce");
Alex Elder988abe42009-09-02 17:02:24 -0500188 }
David Chinnera67d7c52007-11-23 16:29:32 +1100189
190 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
Eric Sandeen2e74af02016-03-02 09:55:38 +1100191 seq_puts(m, ",noquota");
Christoph Hellwig21f55992019-10-28 08:41:47 -0700192
193 return 0;
David Chinnera67d7c52007-11-23 16:29:32 +1100194}
Eric Sandeen91083262019-05-01 20:26:30 -0700195
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700196static uint64_t
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197xfs_max_file_offset(
198 unsigned int blockshift)
199{
200 unsigned int pagefactor = 1;
201 unsigned int bitshift = BITS_PER_LONG - 1;
202
203 /* Figure out maximum filesize, on Linux this can depend on
204 * the filesystem blocksize (on 32 bit platforms).
Christoph Hellwig72deb452019-04-05 18:08:59 +0200205 * __block_write_begin does this in an [unsigned] long long...
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300206 * page->index << (PAGE_SHIFT - bbits)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 * So, for page sized blocks (4K on 32 bit platforms),
208 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300209 * (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 * but for smaller blocksizes it is less (bbits = log2 bsize).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 */
212
213#if BITS_PER_LONG == 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 ASSERT(sizeof(sector_t) == 8);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300215 pagefactor = PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 bitshift = BITS_PER_LONG;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217#endif
218
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700219 return (((uint64_t)pagefactor) << bitshift) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220}
221
Eric Sandeen9de67c32014-07-24 20:51:54 +1000222/*
Eric Sandeen12c3f052016-03-02 09:58:09 +1100223 * Set parameters for inode allocation heuristics, taking into account
224 * filesystem size and inode32/inode64 mount options; i.e. specifically
225 * whether or not XFS_MOUNT_SMALL_INUMS is set.
226 *
227 * Inode allocation patterns are altered only if inode32 is requested
228 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
229 * If altered, XFS_MOUNT_32BITINODES is set as well.
230 *
231 * An agcount independent of that in the mount structure is provided
232 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
233 * to the potentially higher ag count.
234 *
235 * Returns the maximum AG index which may contain inodes.
Eric Sandeen9de67c32014-07-24 20:51:54 +1000236 */
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300237xfs_agnumber_t
Eric Sandeen12c3f052016-03-02 09:58:09 +1100238xfs_set_inode_alloc(
239 struct xfs_mount *mp,
240 xfs_agnumber_t agcount)
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300241{
Eric Sandeen12c3f052016-03-02 09:58:09 +1100242 xfs_agnumber_t index;
Carlos Maiolino4056c1d2012-09-20 10:32:40 -0300243 xfs_agnumber_t maxagi = 0;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300244 xfs_sb_t *sbp = &mp->m_sb;
245 xfs_agnumber_t max_metadata;
Eric Sandeen54aa61f2014-07-24 20:53:10 +1000246 xfs_agino_t agino;
247 xfs_ino_t ino;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300248
Eric Sandeen12c3f052016-03-02 09:58:09 +1100249 /*
250 * Calculate how much should be reserved for inodes to meet
251 * the max inode percentage. Used only for inode32.
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300252 */
Darrick J. Wongef325952019-06-05 11:19:34 -0700253 if (M_IGEO(mp)->maxicount) {
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700254 uint64_t icount;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300255
256 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
257 do_div(icount, 100);
258 icount += sbp->sb_agblocks - 1;
259 do_div(icount, sbp->sb_agblocks);
260 max_metadata = icount;
261 } else {
Eric Sandeen9de67c32014-07-24 20:51:54 +1000262 max_metadata = agcount;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300263 }
264
Eric Sandeen12c3f052016-03-02 09:58:09 +1100265 /* Get the last possible inode in the filesystem */
Darrick J. Wong43004b22018-12-12 08:46:24 -0800266 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
Eric Sandeen12c3f052016-03-02 09:58:09 +1100267 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
Eric Sandeen54aa61f2014-07-24 20:53:10 +1000268
Eric Sandeen12c3f052016-03-02 09:58:09 +1100269 /*
270 * If user asked for no more than 32-bit inodes, and the fs is
271 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
272 * the allocator to accommodate the request.
273 */
274 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
275 mp->m_flags |= XFS_MOUNT_32BITINODES;
276 else
277 mp->m_flags &= ~XFS_MOUNT_32BITINODES;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300278
Eric Sandeen9de67c32014-07-24 20:51:54 +1000279 for (index = 0; index < agcount; index++) {
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300280 struct xfs_perag *pag;
281
Eric Sandeen12c3f052016-03-02 09:58:09 +1100282 ino = XFS_AGINO_TO_INO(mp, index, agino);
283
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300284 pag = xfs_perag_get(mp, index);
Eric Sandeen12c3f052016-03-02 09:58:09 +1100285
286 if (mp->m_flags & XFS_MOUNT_32BITINODES) {
287 if (ino > XFS_MAXINUMBER_32) {
288 pag->pagi_inodeok = 0;
289 pag->pagf_metadata = 0;
290 } else {
291 pag->pagi_inodeok = 1;
292 maxagi++;
293 if (index < max_metadata)
294 pag->pagf_metadata = 1;
295 else
296 pag->pagf_metadata = 0;
297 }
298 } else {
299 pag->pagi_inodeok = 1;
300 pag->pagf_metadata = 0;
301 }
302
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300303 xfs_perag_put(pag);
304 }
305
Eric Sandeen12c3f052016-03-02 09:58:09 +1100306 return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
Carlos Maiolino2d2194f2012-09-20 10:32:38 -0300307}
308
Hannes Eder3180e662009-03-04 19:34:10 +0100309STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310xfs_blkdev_get(
311 xfs_mount_t *mp,
312 const char *name,
313 struct block_device **bdevp)
314{
315 int error = 0;
316
Tejun Heod4d77622010-11-13 11:55:18 +0100317 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
318 mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 if (IS_ERR(*bdevp)) {
320 error = PTR_ERR(*bdevp);
Eric Sandeen77af5742014-12-24 09:47:27 +1100321 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 }
323
Dave Chinner24513372014-06-25 14:58:08 +1000324 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325}
326
Hannes Eder3180e662009-03-04 19:34:10 +0100327STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328xfs_blkdev_put(
329 struct block_device *bdev)
330{
331 if (bdev)
Tejun Heoe525fd82010-11-13 11:55:17 +0100332 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333}
334
Christoph Hellwigf538d4d2005-11-02 10:26:59 +1100335void
336xfs_blkdev_issue_flush(
337 xfs_buftarg_t *buftarg)
338{
Shaohua Li7582df52012-04-24 21:23:46 +0800339 blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
Christoph Hellwigf538d4d2005-11-02 10:26:59 +1100340}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000342STATIC void
343xfs_close_devices(
344 struct xfs_mount *mp)
345{
Dan Williams486aff52017-08-24 15:12:50 -0700346 struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
347
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000348 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000349 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
Dan Williams486aff52017-08-24 15:12:50 -0700350 struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
351
Eric Sandeena1f69412018-04-06 10:09:42 -0700352 xfs_free_buftarg(mp->m_logdev_targp);
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000353 xfs_blkdev_put(logdev);
Dan Williams486aff52017-08-24 15:12:50 -0700354 fs_put_dax(dax_logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000355 }
356 if (mp->m_rtdev_targp) {
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000357 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
Dan Williams486aff52017-08-24 15:12:50 -0700358 struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
359
Eric Sandeena1f69412018-04-06 10:09:42 -0700360 xfs_free_buftarg(mp->m_rtdev_targp);
Lachlan McIlroyc032bfc2008-07-18 17:13:12 +1000361 xfs_blkdev_put(rtdev);
Dan Williams486aff52017-08-24 15:12:50 -0700362 fs_put_dax(dax_rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000363 }
Eric Sandeena1f69412018-04-06 10:09:42 -0700364 xfs_free_buftarg(mp->m_ddev_targp);
Dan Williams486aff52017-08-24 15:12:50 -0700365 fs_put_dax(dax_ddev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000366}
367
368/*
369 * The file system configurations are:
370 * (1) device (partition) with data and internal log
371 * (2) logical volume with data and log subvolumes.
372 * (3) logical volume with data, log, and realtime subvolumes.
373 *
374 * We only have to handle opening the log and realtime volumes here if
375 * they are present. The data subvolume has already been opened by
376 * get_sb_bdev() and is stored in sb->s_bdev.
377 */
378STATIC int
379xfs_open_devices(
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100380 struct xfs_mount *mp)
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000381{
382 struct block_device *ddev = mp->m_super->s_bdev;
Dan Williams486aff52017-08-24 15:12:50 -0700383 struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev);
384 struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL;
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000385 struct block_device *logdev = NULL, *rtdev = NULL;
386 int error;
387
388 /*
389 * Open real time and log devices - order is important.
390 */
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100391 if (mp->m_logname) {
392 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000393 if (error)
394 goto out;
Dan Williams486aff52017-08-24 15:12:50 -0700395 dax_logdev = fs_dax_get_by_bdev(logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000396 }
397
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100398 if (mp->m_rtname) {
399 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000400 if (error)
401 goto out_close_logdev;
402
403 if (rtdev == ddev || rtdev == logdev) {
Dave Chinner4f107002011-03-07 10:00:35 +1100404 xfs_warn(mp,
405 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
Dave Chinner24513372014-06-25 14:58:08 +1000406 error = -EINVAL;
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000407 goto out_close_rtdev;
408 }
Dan Williams486aff52017-08-24 15:12:50 -0700409 dax_rtdev = fs_dax_get_by_bdev(rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000410 }
411
412 /*
413 * Setup xfs_mount buffer target pointers
414 */
Dave Chinner24513372014-06-25 14:58:08 +1000415 error = -ENOMEM;
Dan Williams486aff52017-08-24 15:12:50 -0700416 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000417 if (!mp->m_ddev_targp)
418 goto out_close_rtdev;
419
420 if (rtdev) {
Dan Williams486aff52017-08-24 15:12:50 -0700421 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000422 if (!mp->m_rtdev_targp)
423 goto out_free_ddev_targ;
424 }
425
426 if (logdev && logdev != ddev) {
Dan Williams486aff52017-08-24 15:12:50 -0700427 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000428 if (!mp->m_logdev_targp)
429 goto out_free_rtdev_targ;
430 } else {
431 mp->m_logdev_targp = mp->m_ddev_targp;
432 }
433
434 return 0;
435
436 out_free_rtdev_targ:
437 if (mp->m_rtdev_targp)
Eric Sandeena1f69412018-04-06 10:09:42 -0700438 xfs_free_buftarg(mp->m_rtdev_targp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000439 out_free_ddev_targ:
Eric Sandeena1f69412018-04-06 10:09:42 -0700440 xfs_free_buftarg(mp->m_ddev_targp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000441 out_close_rtdev:
Markus Elfringd2a5e3c2014-12-01 08:24:20 +1100442 xfs_blkdev_put(rtdev);
Dan Williams486aff52017-08-24 15:12:50 -0700443 fs_put_dax(dax_rtdev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000444 out_close_logdev:
Dan Williams486aff52017-08-24 15:12:50 -0700445 if (logdev && logdev != ddev) {
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000446 xfs_blkdev_put(logdev);
Dan Williams486aff52017-08-24 15:12:50 -0700447 fs_put_dax(dax_logdev);
448 }
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000449 out:
Dan Williams486aff52017-08-24 15:12:50 -0700450 fs_put_dax(dax_ddev);
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000451 return error;
452}
453
Christoph Hellwige34b5622008-05-20 15:10:36 +1000454/*
455 * Setup xfs_mount buffer target pointers based on superblock
456 */
457STATIC int
458xfs_setup_devices(
459 struct xfs_mount *mp)
460{
461 int error;
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000462
Eric Sandeena96c4152014-04-14 19:00:29 +1000463 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
Christoph Hellwige34b5622008-05-20 15:10:36 +1000464 if (error)
465 return error;
466
467 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
468 unsigned int log_sector_size = BBSIZE;
469
470 if (xfs_sb_version_hassector(&mp->m_sb))
471 log_sector_size = mp->m_sb.sb_logsectsize;
472 error = xfs_setsize_buftarg(mp->m_logdev_targp,
Christoph Hellwige34b5622008-05-20 15:10:36 +1000473 log_sector_size);
474 if (error)
475 return error;
476 }
477 if (mp->m_rtdev_targp) {
478 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
Christoph Hellwige34b5622008-05-20 15:10:36 +1000479 mp->m_sb.sb_sectsize);
480 if (error)
481 return error;
482 }
483
484 return 0;
485}
Christoph Hellwig19f354d2008-05-20 11:31:13 +1000486
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000487STATIC int
488xfs_init_mount_workqueues(
489 struct xfs_mount *mp)
490{
Brian Foster78c931b2014-11-28 13:59:58 +1100491 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
Ian Kente1d3d212019-11-04 13:58:40 -0800492 WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_super->s_id);
Brian Foster78c931b2014-11-28 13:59:58 +1100493 if (!mp->m_buf_workqueue)
494 goto out;
495
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000496 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
Ian Kente1d3d212019-11-04 13:58:40 -0800497 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000498 if (!mp->m_unwritten_workqueue)
Darrick J. Wong28408242019-04-15 13:13:21 -0700499 goto out_destroy_buf;
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000500
Dave Chinner4c2d5422012-04-23 17:54:32 +1000501 mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
Dave Chinner8ab39f12019-09-05 21:35:39 -0700502 WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND,
Ian Kente1d3d212019-11-04 13:58:40 -0800503 0, mp->m_super->s_id);
Dave Chinner4c2d5422012-04-23 17:54:32 +1000504 if (!mp->m_cil_workqueue)
505 goto out_destroy_unwritten;
Dave Chinner58896082012-10-08 21:56:05 +1100506
507 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
Ian Kente1d3d212019-11-04 13:58:40 -0800508 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
Dave Chinner58896082012-10-08 21:56:05 +1100509 if (!mp->m_reclaim_workqueue)
510 goto out_destroy_cil;
511
Brian Foster579b62f2012-11-06 09:50:47 -0500512 mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
Ian Kente1d3d212019-11-04 13:58:40 -0800513 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
Brian Foster579b62f2012-11-06 09:50:47 -0500514 if (!mp->m_eofblocks_workqueue)
Christoph Hellwig1058d0f2019-06-28 19:27:25 -0700515 goto out_destroy_reclaim;
Brian Foster579b62f2012-11-06 09:50:47 -0500516
Brian Foster696a5622017-03-28 14:51:44 -0700517 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
Ian Kente1d3d212019-11-04 13:58:40 -0800518 mp->m_super->s_id);
Brian Foster696a5622017-03-28 14:51:44 -0700519 if (!mp->m_sync_workqueue)
520 goto out_destroy_eofb;
521
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000522 return 0;
523
Brian Foster696a5622017-03-28 14:51:44 -0700524out_destroy_eofb:
525 destroy_workqueue(mp->m_eofblocks_workqueue);
Dave Chinner58896082012-10-08 21:56:05 +1100526out_destroy_reclaim:
527 destroy_workqueue(mp->m_reclaim_workqueue);
528out_destroy_cil:
529 destroy_workqueue(mp->m_cil_workqueue);
Dave Chinner4c2d5422012-04-23 17:54:32 +1000530out_destroy_unwritten:
531 destroy_workqueue(mp->m_unwritten_workqueue);
Brian Foster78c931b2014-11-28 13:59:58 +1100532out_destroy_buf:
533 destroy_workqueue(mp->m_buf_workqueue);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000534out:
535 return -ENOMEM;
536}
537
538STATIC void
539xfs_destroy_mount_workqueues(
540 struct xfs_mount *mp)
541{
Brian Foster696a5622017-03-28 14:51:44 -0700542 destroy_workqueue(mp->m_sync_workqueue);
Brian Foster579b62f2012-11-06 09:50:47 -0500543 destroy_workqueue(mp->m_eofblocks_workqueue);
Dave Chinner58896082012-10-08 21:56:05 +1100544 destroy_workqueue(mp->m_reclaim_workqueue);
Dave Chinner4c2d5422012-04-23 17:54:32 +1000545 destroy_workqueue(mp->m_cil_workqueue);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000546 destroy_workqueue(mp->m_unwritten_workqueue);
Brian Foster78c931b2014-11-28 13:59:58 +1100547 destroy_workqueue(mp->m_buf_workqueue);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000548}
549
Dave Chinner9aa05002012-10-08 21:56:04 +1100550/*
551 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
552 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
553 * for IO to complete so that we effectively throttle multiple callers to the
554 * rate at which IO is completing.
555 */
556void
557xfs_flush_inodes(
558 struct xfs_mount *mp)
559{
560 struct super_block *sb = mp->m_super;
561
562 if (down_read_trylock(&sb->s_umount)) {
Jan Kara0dc83bd2014-02-21 11:19:04 +0100563 sync_inodes_sb(sb);
Dave Chinner9aa05002012-10-08 21:56:04 +1100564 up_read(&sb->s_umount);
565 }
566}
567
David Chinnerbf904242008-10-30 17:36:14 +1100568/* Catch misguided souls that try to use this interface on XFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569STATIC struct inode *
Nathan Scotta50cd262006-03-14 14:06:18 +1100570xfs_fs_alloc_inode(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 struct super_block *sb)
572{
David Chinnerbf904242008-10-30 17:36:14 +1100573 BUG();
Lachlan McIlroy493dca62008-10-30 17:36:52 +1100574 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575}
576
Christoph Hellwig48318222018-10-18 17:20:11 +1100577#ifdef DEBUG
578static void
579xfs_check_delalloc(
580 struct xfs_inode *ip,
581 int whichfork)
582{
583 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
584 struct xfs_bmbt_irec got;
585 struct xfs_iext_cursor icur;
586
587 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
588 return;
589 do {
590 if (isnullstartblock(got.br_startblock)) {
591 xfs_warn(ip->i_mount,
592 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
593 ip->i_ino,
594 whichfork == XFS_DATA_FORK ? "data" : "cow",
595 got.br_startoff, got.br_blockcount);
596 }
597 } while (xfs_iext_next_extent(ifp, &icur, &got));
598}
599#else
600#define xfs_check_delalloc(ip, whichfork) do { } while (0)
601#endif
602
David Chinnerbf904242008-10-30 17:36:14 +1100603/*
David Chinner99fa8cb2008-10-30 17:36:40 +1100604 * Now that the generic code is guaranteed not to be accessing
Dave Chinner8179c032016-05-18 13:52:42 +1000605 * the linux inode, we can inactivate and reclaim the inode.
David Chinnerbf904242008-10-30 17:36:14 +1100606 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607STATIC void
Nathan Scotta50cd262006-03-14 14:06:18 +1100608xfs_fs_destroy_inode(
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000609 struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610{
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000611 struct xfs_inode *ip = XFS_I(inode);
612
Christoph Hellwigcca28fb2010-06-24 11:57:09 +1000613 trace_xfs_destroy_inode(ip);
David Chinner99fa8cb2008-10-30 17:36:40 +1100614
Christoph Hellwig65523212016-11-30 14:33:25 +1100615 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
Dave Chinner8179c032016-05-18 13:52:42 +1000616 XFS_STATS_INC(ip->i_mount, vn_rele);
617 XFS_STATS_INC(ip->i_mount, vn_remove);
618
619 xfs_inactive(ip);
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000620
Christoph Hellwig48318222018-10-18 17:20:11 +1100621 if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
622 xfs_check_delalloc(ip, XFS_DATA_FORK);
623 xfs_check_delalloc(ip, XFS_COW_FORK);
624 ASSERT(0);
625 }
626
Dave Chinner8179c032016-05-18 13:52:42 +1000627 XFS_STATS_INC(ip->i_mount, vn_reclaim);
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000628
629 /*
630 * We should never get here with one of the reclaim flags already set.
631 */
632 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
633 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
634
635 /*
Dave Chinner57817c62010-01-10 23:51:47 +0000636 * We always use background reclaim here because even if the
637 * inode is clean, it still may be under IO and hence we have
638 * to take the flush lock. The background reclaim path handles
639 * this more efficiently than we can here, so simply let background
640 * reclaim tear down all inodes.
Christoph Hellwig848ce8f2009-09-29 13:48:56 +0000641 */
Dave Chinner57817c62010-01-10 23:51:47 +0000642 xfs_inode_set_reclaim_tag(ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643}
644
Christoph Hellwigc3b1b132018-03-06 17:04:00 -0800645static void
646xfs_fs_dirty_inode(
647 struct inode *inode,
648 int flag)
649{
650 struct xfs_inode *ip = XFS_I(inode);
651 struct xfs_mount *mp = ip->i_mount;
652 struct xfs_trans *tp;
653
654 if (!(inode->i_sb->s_flags & SB_LAZYTIME))
655 return;
656 if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
657 return;
658
659 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
660 return;
661 xfs_ilock(ip, XFS_ILOCK_EXCL);
662 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
663 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
664 xfs_trans_commit(tp);
665}
666
David Chinner07c8f672008-10-30 16:11:59 +1100667/*
668 * Slab object creation initialisation for the XFS inode.
669 * This covers only the idempotent fields in the XFS inode;
670 * all other fields need to be initialised on allocation
Uwe Kleine-Königb5950762010-11-01 15:38:34 -0400671 * from the slab. This avoids the need to repeatedly initialise
David Chinner07c8f672008-10-30 16:11:59 +1100672 * fields in the xfs inode that left in the initialise state
673 * when freeing the inode.
674 */
David Chinnerbf904242008-10-30 17:36:14 +1100675STATIC void
676xfs_fs_inode_init_once(
David Chinner07c8f672008-10-30 16:11:59 +1100677 void *inode)
678{
679 struct xfs_inode *ip = inode;
680
681 memset(ip, 0, sizeof(struct xfs_inode));
David Chinnerbf904242008-10-30 17:36:14 +1100682
683 /* vfs inode */
684 inode_init_once(VFS_I(ip));
685
686 /* xfs inode */
David Chinner07c8f672008-10-30 16:11:59 +1100687 atomic_set(&ip->i_pincount, 0);
688 spin_lock_init(&ip->i_flags_lock);
David Chinner07c8f672008-10-30 16:11:59 +1100689
Dave Chinner653c60b2015-02-23 21:43:37 +1100690 mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
691 "xfsino", ip->i_ino);
David Chinner07c8f672008-10-30 16:11:59 +1100692 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
693 "xfsino", ip->i_ino);
David Chinner07c8f672008-10-30 16:11:59 +1100694}
695
Dave Chinner5132ba82012-03-22 05:15:10 +0000696/*
697 * We do an unlocked check for XFS_IDONTCACHE here because we are already
698 * serialised against cache hits here via the inode->i_lock and igrab() in
699 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
700 * racing with us, and it avoids needing to grab a spinlock here for every inode
701 * we drop the final reference on.
702 */
703STATIC int
704xfs_fs_drop_inode(
705 struct inode *inode)
706{
707 struct xfs_inode *ip = XFS_I(inode);
708
Darrick J. Wong17c12bc2016-10-03 09:11:29 -0700709 /*
710 * If this unlinked inode is in the middle of recovery, don't
711 * drop the inode just yet; log recovery will take care of
712 * that. See the comment for this inode flag.
713 */
714 if (ip->i_flags & XFS_IRECOVERY) {
715 ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
716 return 0;
717 }
718
Dave Chinner5132ba82012-03-22 05:15:10 +0000719 return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
720}
721
Ian Kenta943f372019-11-04 13:58:42 -0800722static void
723xfs_mount_free(
Christoph Hellwiga7381592008-08-13 16:04:05 +1000724 struct xfs_mount *mp)
725{
Christoph Hellwiga7381592008-08-13 16:04:05 +1000726 kfree(mp->m_rtname);
727 kfree(mp->m_logname);
Ian Kenta943f372019-11-04 13:58:42 -0800728 kmem_free(mp);
Christoph Hellwiga7381592008-08-13 16:04:05 +1000729}
730
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731STATIC int
Christoph Hellwig69961a22009-10-06 20:29:28 +0000732xfs_fs_sync_fs(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 struct super_block *sb,
734 int wait)
735{
Christoph Hellwig745f6912007-08-30 17:20:39 +1000736 struct xfs_mount *mp = XFS_M(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
Lachlan McIlroye893bff2007-10-12 11:13:35 +1000738 /*
Christoph Hellwig34625c62011-12-06 21:58:12 +0000739 * Doing anything during the async pass would be counterproductive.
Lachlan McIlroye893bff2007-10-12 11:13:35 +1000740 */
Christoph Hellwig34625c62011-12-06 21:58:12 +0000741 if (!wait)
Christoph Hellwig69961a22009-10-06 20:29:28 +0000742 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Dave Chinner34061f52012-10-08 21:56:06 +1100744 xfs_log_force(mp, XFS_LOG_SYNC);
Christoph Hellwig69961a22009-10-06 20:29:28 +0000745 if (laptop_mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 /*
747 * The disk must be active because we're syncing.
Dave Chinnerf661f1e2012-10-08 21:56:02 +1100748 * We schedule log work now (now that the disk is
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 * active) instead of later (when it might not be).
750 */
Dave Chinnerf661f1e2012-10-08 21:56:02 +1100751 flush_delayed_work(&mp->m_log->l_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 }
753
Christoph Hellwig69961a22009-10-06 20:29:28 +0000754 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755}
756
757STATIC int
Nathan Scotta50cd262006-03-14 14:06:18 +1100758xfs_fs_statfs(
David Howells726c3342006-06-23 02:02:58 -0700759 struct dentry *dentry,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 struct kstatfs *statp)
761{
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000762 struct xfs_mount *mp = XFS_M(dentry->d_sb);
763 xfs_sb_t *sbp = &mp->m_sb;
David Howells2b0143b2015-03-17 22:25:59 +0000764 struct xfs_inode *ip = XFS_I(d_inode(dentry));
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700765 uint64_t fakeinos, id;
766 uint64_t icount;
767 uint64_t ifree;
768 uint64_t fdblocks;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000769 xfs_extlen_t lsize;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700770 int64_t ffree;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000771
Adam Borowskidddde682018-10-18 17:20:19 +1100772 statp->f_type = XFS_SUPER_MAGIC;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000773 statp->f_namelen = MAXNAMELEN - 1;
774
775 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
776 statp->f_fsid.val[0] = (u32)id;
777 statp->f_fsid.val[1] = (u32)(id >> 32);
778
Dave Chinner501ab322015-02-23 21:19:28 +1100779 icount = percpu_counter_sum(&mp->m_icount);
Dave Chinnere88b64e2015-02-23 21:19:53 +1100780 ifree = percpu_counter_sum(&mp->m_ifree);
Dave Chinner0d485ad2015-02-23 21:22:03 +1100781 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000782
783 spin_lock(&mp->m_sb_lock);
784 statp->f_bsize = sbp->sb_blocksize;
785 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
786 statp->f_blocks = sbp->sb_dblocks - lsize;
Dave Chinner0d485ad2015-02-23 21:22:03 +1100787 spin_unlock(&mp->m_sb_lock);
788
Darrick J. Wong52548852016-08-03 11:38:24 +1000789 statp->f_bfree = fdblocks - mp->m_alloc_set_aside;
Dave Chinner0d485ad2015-02-23 21:22:03 +1100790 statp->f_bavail = statp->f_bfree;
791
Darrick J. Wong43004b22018-12-12 08:46:24 -0800792 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
Dave Chinner9bb54cb2018-06-07 07:54:02 -0700793 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
Darrick J. Wongef325952019-06-05 11:19:34 -0700794 if (M_IGEO(mp)->maxicount)
Christoph Hellwiga19d9f82009-03-29 09:51:08 +0200795 statp->f_files = min_t(typeof(statp->f_files),
796 statp->f_files,
Darrick J. Wongef325952019-06-05 11:19:34 -0700797 M_IGEO(mp)->maxicount);
Stuart Brodsky2fe33662010-08-24 11:46:05 +1000798
Eric Sandeen01f98822015-02-06 09:53:02 +1100799 /* If sb_icount overshot maxicount, report actual allocation */
800 statp->f_files = max_t(typeof(statp->f_files),
801 statp->f_files,
802 sbp->sb_icount);
803
Stuart Brodsky2fe33662010-08-24 11:46:05 +1000804 /* make sure statp->f_ffree does not underflow */
Dave Chinnere88b64e2015-02-23 21:19:53 +1100805 ffree = statp->f_files - (icount - ifree);
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700806 statp->f_ffree = max_t(int64_t, ffree, 0);
Stuart Brodsky2fe33662010-08-24 11:46:05 +1000807
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000808
Jie Liuda5bf952012-04-12 03:59:57 +0000809 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
Chandra Seetharaman83e782e2013-06-27 17:25:10 -0500810 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
811 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
Christoph Hellwig7d095252009-06-08 15:33:32 +0200812 xfs_qm_statvfs(ip, statp);
Richard Wareinga0158312018-01-08 10:41:33 -0800813
814 if (XFS_IS_REALTIME_MOUNT(mp) &&
815 (ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
816 statp->f_blocks = sbp->sb_rblocks;
817 statp->f_bavail = statp->f_bfree =
818 sbp->sb_frextents * sbp->sb_rextsize;
819 }
820
Christoph Hellwig4ca488e2007-10-11 18:09:40 +1000821 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822}
823
Eric Sandeend5db0f92010-02-05 22:59:53 +0000824STATIC void
825xfs_save_resvblks(struct xfs_mount *mp)
826{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700827 uint64_t resblks = 0;
Eric Sandeend5db0f92010-02-05 22:59:53 +0000828
829 mp->m_resblks_save = mp->m_resblks;
830 xfs_reserve_blocks(mp, &resblks, NULL);
831}
832
833STATIC void
834xfs_restore_resvblks(struct xfs_mount *mp)
835{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700836 uint64_t resblks;
Eric Sandeend5db0f92010-02-05 22:59:53 +0000837
838 if (mp->m_resblks_save) {
839 resblks = mp->m_resblks_save;
840 mp->m_resblks_save = 0;
841 } else
842 resblks = xfs_default_resblks(mp);
843
844 xfs_reserve_blocks(mp, &resblks, NULL);
845}
846
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100847/*
848 * Trigger writeback of all the dirty metadata in the file system.
849 *
850 * This ensures that the metadata is written to their location on disk rather
851 * than just existing in transactions in the log. This means after a quiesce
Dave Chinnerc75921a2012-10-08 21:56:08 +1100852 * there is no log replay required to write the inodes to disk - this is the
853 * primary difference between a sync and a quiesce.
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100854 *
Dave Chinnerc75921a2012-10-08 21:56:08 +1100855 * Note: xfs_log_quiesce() stops background log work - the callers must ensure
856 * it is started again when appropriate.
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100857 */
Dave Chinnerddeb14f2016-09-26 08:21:44 +1000858void
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100859xfs_quiesce_attr(
860 struct xfs_mount *mp)
861{
862 int error = 0;
863
864 /* wait for all modifications to complete */
865 while (atomic_read(&mp->m_active_trans) > 0)
866 delay(100);
867
868 /* force the log to unpin objects from the now complete transactions */
869 xfs_log_force(mp, XFS_LOG_SYNC);
870
871 /* reclaim inodes to do any IO before the freeze completes */
872 xfs_reclaim_inodes(mp, 0);
873 xfs_reclaim_inodes(mp, SYNC_WAIT);
874
Dave Chinnerc75921a2012-10-08 21:56:08 +1100875 /* Push the superblock and write an unmount record */
876 error = xfs_log_sbcount(mp);
877 if (error)
878 xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
879 "Frozen image may not be consistent.");
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100880 /*
881 * Just warn here till VFS can correctly support
882 * read-only remount without racing.
883 */
884 WARN_ON(atomic_read(&mp->m_active_trans) != 0);
885
Dave Chinnerc75921a2012-10-08 21:56:08 +1100886 xfs_log_quiesce(mp);
Dave Chinnerc7eea6f2012-10-08 21:56:07 +1100887}
888
Christoph Hellwig9909c4a2007-10-11 18:11:14 +1000889/*
890 * Second stage of a freeze. The data is already frozen so we only
Dave Chinner61e63ec2015-01-22 09:10:31 +1100891 * need to take care of the metadata. Once that's done sync the superblock
892 * to the log to dirty it in case of a crash while frozen. This ensures that we
893 * will recover the unlinked inode lists on the next mount.
Christoph Hellwig9909c4a2007-10-11 18:11:14 +1000894 */
Takashi Satoc4be0c12009-01-09 16:40:58 -0800895STATIC int
896xfs_fs_freeze(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 struct super_block *sb)
898{
Christoph Hellwig9909c4a2007-10-11 18:11:14 +1000899 struct xfs_mount *mp = XFS_M(sb);
900
Darrick J. Wonged30dcb2019-04-25 18:26:22 -0700901 xfs_stop_block_reaping(mp);
Eric Sandeend5db0f92010-02-05 22:59:53 +0000902 xfs_save_resvblks(mp);
David Chinner76bf1052008-10-30 17:16:21 +1100903 xfs_quiesce_attr(mp);
Dave Chinner61e63ec2015-01-22 09:10:31 +1100904 return xfs_sync_sb(mp, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905}
906
907STATIC int
Eric Sandeend5db0f92010-02-05 22:59:53 +0000908xfs_fs_unfreeze(
909 struct super_block *sb)
910{
911 struct xfs_mount *mp = XFS_M(sb);
912
913 xfs_restore_resvblks(mp);
Dave Chinnerf661f1e2012-10-08 21:56:02 +1100914 xfs_log_work_queue(mp);
Darrick J. Wonged30dcb2019-04-25 18:26:22 -0700915 xfs_start_block_reaping(mp);
Eric Sandeend5db0f92010-02-05 22:59:53 +0000916 return 0;
917}
918
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000919/*
920 * This function fills in xfs_mount_t fields based on mount args.
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000921 * Note: the superblock _has_ now been read in.
922 */
923STATIC int
924xfs_finish_flags(
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000925 struct xfs_mount *mp)
926{
927 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
928
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200929 /* Fail a mount where the logbuf is smaller than the log stripe */
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000930 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100931 if (mp->m_logbsize <= 0 &&
932 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000933 mp->m_logbsize = mp->m_sb.sb_logsunit;
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100934 } else if (mp->m_logbsize > 0 &&
935 mp->m_logbsize < mp->m_sb.sb_logsunit) {
Dave Chinner4f107002011-03-07 10:00:35 +1100936 xfs_warn(mp,
937 "logbuf size must be greater than or equal to log stripe size");
Dave Chinner24513372014-06-25 14:58:08 +1000938 return -EINVAL;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000939 }
940 } else {
941 /* Fail a mount if the logbuf is larger than 32K */
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100942 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
Dave Chinner4f107002011-03-07 10:00:35 +1100943 xfs_warn(mp,
944 "logbuf size for version 1 logs must be 16K or 32K");
Dave Chinner24513372014-06-25 14:58:08 +1000945 return -EINVAL;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000946 }
947 }
948
949 /*
Dave Chinnerd3eaace2013-06-05 12:09:09 +1000950 * V5 filesystems always use attr2 format for attributes.
951 */
952 if (xfs_sb_version_hascrc(&mp->m_sb) &&
953 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
Eric Sandeen2e74af02016-03-02 09:55:38 +1100954 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
955 "attr2 is always enabled for V5 filesystems.");
Dave Chinner24513372014-06-25 14:58:08 +1000956 return -EINVAL;
Dave Chinnerd3eaace2013-06-05 12:09:09 +1000957 }
958
959 /*
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000960 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
961 * told by noattr2 to turn it off
962 */
963 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
Christoph Hellwig9d565ff2008-10-30 17:53:24 +1100964 !(mp->m_flags & XFS_MOUNT_NOATTR2))
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000965 mp->m_flags |= XFS_MOUNT_ATTR2;
966
967 /*
968 * prohibit r/w mounts of read-only filesystems
969 */
970 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
Dave Chinner4f107002011-03-07 10:00:35 +1100971 xfs_warn(mp,
972 "cannot mount a read-only filesystem as read-write");
Dave Chinner24513372014-06-25 14:58:08 +1000973 return -EROFS;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000974 }
975
Chandra Seetharamand892d582013-07-19 17:36:02 -0500976 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
977 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
978 !xfs_sb_version_has_pquotino(&mp->m_sb)) {
979 xfs_warn(mp,
980 "Super block does not support project and group quota together");
Dave Chinner24513372014-06-25 14:58:08 +1000981 return -EINVAL;
Chandra Seetharamand892d582013-07-19 17:36:02 -0500982 }
983
Christoph Hellwigf8f15e42008-05-20 11:30:59 +1000984 return 0;
985}
986
Dave Chinner5681ca42015-02-23 21:22:31 +1100987static int
988xfs_init_percpu_counters(
989 struct xfs_mount *mp)
990{
991 int error;
992
993 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
994 if (error)
Joe Perches5e9383f2015-03-25 15:00:24 +1100995 return -ENOMEM;
Dave Chinner5681ca42015-02-23 21:22:31 +1100996
997 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
998 if (error)
999 goto free_icount;
1000
1001 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1002 if (error)
1003 goto free_ifree;
1004
Darrick J. Wong9fe82b82019-04-25 18:26:22 -07001005 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1006 if (error)
1007 goto free_fdblocks;
1008
Dave Chinner5681ca42015-02-23 21:22:31 +11001009 return 0;
1010
Darrick J. Wong9fe82b82019-04-25 18:26:22 -07001011free_fdblocks:
1012 percpu_counter_destroy(&mp->m_fdblocks);
Dave Chinner5681ca42015-02-23 21:22:31 +11001013free_ifree:
1014 percpu_counter_destroy(&mp->m_ifree);
1015free_icount:
1016 percpu_counter_destroy(&mp->m_icount);
1017 return -ENOMEM;
1018}
1019
1020void
1021xfs_reinit_percpu_counters(
1022 struct xfs_mount *mp)
1023{
1024 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1025 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1026 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1027}
1028
1029static void
1030xfs_destroy_percpu_counters(
1031 struct xfs_mount *mp)
1032{
1033 percpu_counter_destroy(&mp->m_icount);
1034 percpu_counter_destroy(&mp->m_ifree);
1035 percpu_counter_destroy(&mp->m_fdblocks);
Darrick J. Wong9fe82b82019-04-25 18:26:22 -07001036 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1037 percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1038 percpu_counter_destroy(&mp->m_delalloc_blks);
Dave Chinner5681ca42015-02-23 21:22:31 +11001039}
1040
Ian Kent2f8d66b2019-11-04 13:58:47 -08001041static void
1042xfs_fs_put_super(
1043 struct super_block *sb)
1044{
1045 struct xfs_mount *mp = XFS_M(sb);
1046
1047 /* if ->fill_super failed, we have no mount to tear down */
1048 if (!sb->s_fs_info)
1049 return;
1050
1051 xfs_notice(mp, "Unmounting Filesystem");
1052 xfs_filestream_unmount(mp);
1053 xfs_unmountfs(mp);
1054
1055 xfs_freesb(mp);
1056 free_percpu(mp->m_stats.xs_stats);
1057 xfs_destroy_percpu_counters(mp);
1058 xfs_destroy_mount_workqueues(mp);
1059 xfs_close_devices(mp);
1060
1061 sb->s_fs_info = NULL;
1062 xfs_mount_free(mp);
1063}
1064
1065static long
1066xfs_fs_nr_cached_objects(
1067 struct super_block *sb,
1068 struct shrink_control *sc)
1069{
1070 /* Paranoia: catch incorrect calls during mount setup or teardown */
1071 if (WARN_ON_ONCE(!sb->s_fs_info))
1072 return 0;
1073 return xfs_reclaim_inodes_count(XFS_M(sb));
1074}
1075
1076static long
1077xfs_fs_free_cached_objects(
1078 struct super_block *sb,
1079 struct shrink_control *sc)
1080{
1081 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1082}
1083
1084static const struct super_operations xfs_super_operations = {
1085 .alloc_inode = xfs_fs_alloc_inode,
1086 .destroy_inode = xfs_fs_destroy_inode,
1087 .dirty_inode = xfs_fs_dirty_inode,
1088 .drop_inode = xfs_fs_drop_inode,
1089 .put_super = xfs_fs_put_super,
1090 .sync_fs = xfs_fs_sync_fs,
1091 .freeze_fs = xfs_fs_freeze,
1092 .unfreeze_fs = xfs_fs_unfreeze,
1093 .statfs = xfs_fs_statfs,
1094 .show_options = xfs_fs_show_options,
1095 .nr_cached_objects = xfs_fs_nr_cached_objects,
1096 .free_cached_objects = xfs_fs_free_cached_objects,
1097};
1098
Ian Kent73e5fff2019-11-04 13:58:46 -08001099static int
Ian Kent8757c382019-11-04 13:58:48 -08001100suffix_kstrtoint(
1101 const char *s,
1102 unsigned int base,
1103 int *res)
1104{
1105 int last, shift_left_factor = 0, _res;
1106 char *value;
1107 int ret = 0;
1108
1109 value = kstrdup(s, GFP_KERNEL);
1110 if (!value)
1111 return -ENOMEM;
1112
1113 last = strlen(value) - 1;
1114 if (value[last] == 'K' || value[last] == 'k') {
1115 shift_left_factor = 10;
1116 value[last] = '\0';
1117 }
1118 if (value[last] == 'M' || value[last] == 'm') {
1119 shift_left_factor = 20;
1120 value[last] = '\0';
1121 }
1122 if (value[last] == 'G' || value[last] == 'g') {
1123 shift_left_factor = 30;
1124 value[last] = '\0';
1125 }
1126
1127 if (kstrtoint(value, base, &_res))
1128 ret = -EINVAL;
1129 kfree(value);
1130 *res = _res << shift_left_factor;
1131 return ret;
1132}
1133
1134/*
1135 * Set mount state from a mount option.
1136 *
1137 * NOTE: mp->m_super is NULL here!
1138 */
1139static int
1140xfs_fc_parse_param(
1141 struct fs_context *fc,
1142 struct fs_parameter *param)
1143{
1144 struct xfs_mount *mp = fc->s_fs_info;
1145 struct fs_parse_result result;
1146 int size = 0;
1147 int opt;
1148
1149 opt = fs_parse(fc, &xfs_fs_parameters, param, &result);
1150 if (opt < 0)
1151 return opt;
1152
1153 switch (opt) {
1154 case Opt_logbufs:
1155 mp->m_logbufs = result.uint_32;
1156 return 0;
1157 case Opt_logbsize:
1158 if (suffix_kstrtoint(param->string, 10, &mp->m_logbsize))
1159 return -EINVAL;
1160 return 0;
1161 case Opt_logdev:
1162 kfree(mp->m_logname);
1163 mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1164 if (!mp->m_logname)
1165 return -ENOMEM;
1166 return 0;
1167 case Opt_rtdev:
1168 kfree(mp->m_rtname);
1169 mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1170 if (!mp->m_rtname)
1171 return -ENOMEM;
1172 return 0;
1173 case Opt_allocsize:
1174 if (suffix_kstrtoint(param->string, 10, &size))
1175 return -EINVAL;
1176 mp->m_allocsize_log = ffs(size) - 1;
1177 mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
1178 return 0;
1179 case Opt_grpid:
1180 case Opt_bsdgroups:
1181 mp->m_flags |= XFS_MOUNT_GRPID;
1182 return 0;
1183 case Opt_nogrpid:
1184 case Opt_sysvgroups:
1185 mp->m_flags &= ~XFS_MOUNT_GRPID;
1186 return 0;
1187 case Opt_wsync:
1188 mp->m_flags |= XFS_MOUNT_WSYNC;
1189 return 0;
1190 case Opt_norecovery:
1191 mp->m_flags |= XFS_MOUNT_NORECOVERY;
1192 return 0;
1193 case Opt_noalign:
1194 mp->m_flags |= XFS_MOUNT_NOALIGN;
1195 return 0;
1196 case Opt_swalloc:
1197 mp->m_flags |= XFS_MOUNT_SWALLOC;
1198 return 0;
1199 case Opt_sunit:
1200 mp->m_dalign = result.uint_32;
1201 return 0;
1202 case Opt_swidth:
1203 mp->m_swidth = result.uint_32;
1204 return 0;
1205 case Opt_inode32:
1206 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1207 return 0;
1208 case Opt_inode64:
1209 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1210 return 0;
1211 case Opt_nouuid:
1212 mp->m_flags |= XFS_MOUNT_NOUUID;
1213 return 0;
1214 case Opt_ikeep:
1215 mp->m_flags |= XFS_MOUNT_IKEEP;
1216 return 0;
1217 case Opt_noikeep:
1218 mp->m_flags &= ~XFS_MOUNT_IKEEP;
1219 return 0;
1220 case Opt_largeio:
1221 mp->m_flags |= XFS_MOUNT_LARGEIO;
1222 return 0;
1223 case Opt_nolargeio:
1224 mp->m_flags &= ~XFS_MOUNT_LARGEIO;
1225 return 0;
1226 case Opt_attr2:
1227 mp->m_flags |= XFS_MOUNT_ATTR2;
1228 return 0;
1229 case Opt_noattr2:
1230 mp->m_flags &= ~XFS_MOUNT_ATTR2;
1231 mp->m_flags |= XFS_MOUNT_NOATTR2;
1232 return 0;
1233 case Opt_filestreams:
1234 mp->m_flags |= XFS_MOUNT_FILESTREAMS;
1235 return 0;
1236 case Opt_noquota:
1237 mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1238 mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1239 mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
1240 return 0;
1241 case Opt_quota:
1242 case Opt_uquota:
1243 case Opt_usrquota:
1244 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
1245 XFS_UQUOTA_ENFD);
1246 return 0;
1247 case Opt_qnoenforce:
1248 case Opt_uqnoenforce:
1249 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
1250 mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1251 return 0;
1252 case Opt_pquota:
1253 case Opt_prjquota:
1254 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
1255 XFS_PQUOTA_ENFD);
1256 return 0;
1257 case Opt_pqnoenforce:
1258 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
1259 mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1260 return 0;
1261 case Opt_gquota:
1262 case Opt_grpquota:
1263 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
1264 XFS_GQUOTA_ENFD);
1265 return 0;
1266 case Opt_gqnoenforce:
1267 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
1268 mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1269 return 0;
1270 case Opt_discard:
1271 mp->m_flags |= XFS_MOUNT_DISCARD;
1272 return 0;
1273 case Opt_nodiscard:
1274 mp->m_flags &= ~XFS_MOUNT_DISCARD;
1275 return 0;
1276#ifdef CONFIG_FS_DAX
1277 case Opt_dax:
1278 mp->m_flags |= XFS_MOUNT_DAX;
1279 return 0;
1280#endif
1281 default:
1282 xfs_warn(mp, "unknown mount option [%s].", param->key);
1283 return -EINVAL;
1284 }
1285
1286 return 0;
1287}
1288
1289static int
1290xfs_fc_validate_params(
1291 struct xfs_mount *mp)
1292{
1293 /*
1294 * no recovery flag requires a read-only mount
1295 */
1296 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
1297 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1298 xfs_warn(mp, "no-recovery mounts must be read-only.");
1299 return -EINVAL;
1300 }
1301
1302 if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
1303 (mp->m_dalign || mp->m_swidth)) {
1304 xfs_warn(mp,
1305 "sunit and swidth options incompatible with the noalign option");
1306 return -EINVAL;
1307 }
1308
1309 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1310 xfs_warn(mp, "quota support not available in this kernel.");
1311 return -EINVAL;
1312 }
1313
1314 if ((mp->m_dalign && !mp->m_swidth) ||
1315 (!mp->m_dalign && mp->m_swidth)) {
1316 xfs_warn(mp, "sunit and swidth must be specified together");
1317 return -EINVAL;
1318 }
1319
1320 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1321 xfs_warn(mp,
1322 "stripe width (%d) must be a multiple of the stripe unit (%d)",
1323 mp->m_swidth, mp->m_dalign);
1324 return -EINVAL;
1325 }
1326
1327 if (mp->m_logbufs != -1 &&
1328 mp->m_logbufs != 0 &&
1329 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1330 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1331 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1332 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1333 return -EINVAL;
1334 }
1335
1336 if (mp->m_logbsize != -1 &&
1337 mp->m_logbsize != 0 &&
1338 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1339 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1340 !is_power_of_2(mp->m_logbsize))) {
1341 xfs_warn(mp,
1342 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1343 mp->m_logbsize);
1344 return -EINVAL;
1345 }
1346
1347 if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
1348 (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1349 mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1350 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1351 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1352 return -EINVAL;
1353 }
1354
1355 return 0;
1356}
1357
1358static int
Ian Kent73e5fff2019-11-04 13:58:46 -08001359xfs_fc_fill_super(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 struct super_block *sb,
Ian Kent73e5fff2019-11-04 13:58:46 -08001361 struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362{
Ian Kent73e5fff2019-11-04 13:58:46 -08001363 struct xfs_mount *mp = sb->s_fs_info;
Christoph Hellwigf3dcc132008-03-27 18:00:54 +11001364 struct inode *root;
Colin Ian King0279c712019-11-06 08:07:46 -08001365 int flags = 0, error;
Christoph Hellwigbdd907b2008-05-20 15:10:44 +10001366
Ian Kent7c89fcb2019-11-04 13:58:46 -08001367 mp->m_super = sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368
Ian Kent73e5fff2019-11-04 13:58:46 -08001369 error = xfs_fc_validate_params(mp);
Christoph Hellwig745f6912007-08-30 17:20:39 +10001370 if (error)
Ian Kente1d3d212019-11-04 13:58:40 -08001371 goto out_free_names;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
1373 sb_min_blocksize(sb, BBSIZE);
Lachlan McIlroy0ec58512008-06-23 13:23:01 +10001374 sb->s_xattr = xfs_xattr_handlers;
Nathan Scotta50cd262006-03-14 14:06:18 +11001375 sb->s_export_op = &xfs_export_operations;
Christoph Hellwigfcafb712009-02-09 08:47:34 +01001376#ifdef CONFIG_XFS_QUOTA
Nathan Scotta50cd262006-03-14 14:06:18 +11001377 sb->s_qcop = &xfs_quotactl_operations;
Jan Kara17ef4fd2014-09-30 22:35:33 +02001378 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
Christoph Hellwigfcafb712009-02-09 08:47:34 +01001379#endif
Nathan Scotta50cd262006-03-14 14:06:18 +11001380 sb->s_op = &xfs_super_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381
Dave Chinnerdae5cd82018-05-10 21:50:23 -07001382 /*
1383 * Delay mount work if the debug hook is set. This is debug
1384 * instrumention to coordinate simulation of xfs mount failures with
1385 * VFS superblock operations
1386 */
1387 if (xfs_globals.mount_delay) {
1388 xfs_notice(mp, "Delaying mount for %d seconds.",
1389 xfs_globals.mount_delay);
1390 msleep(xfs_globals.mount_delay * 1000);
1391 }
1392
Ian Kent73e5fff2019-11-04 13:58:46 -08001393 if (fc->sb_flags & SB_SILENT)
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001394 flags |= XFS_MFSI_QUIET;
1395
Christoph Hellwig9d565ff2008-10-30 17:53:24 +11001396 error = xfs_open_devices(mp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001397 if (error)
Ian Kente1d3d212019-11-04 13:58:40 -08001398 goto out_free_names;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001399
Dave Chinner24513372014-06-25 14:58:08 +10001400 error = xfs_init_mount_workqueues(mp);
Christoph Hellwig61ba35d2010-09-30 02:25:54 +00001401 if (error)
1402 goto out_close_devices;
Christoph Hellwigc962fb72008-05-20 15:10:52 +10001403
Dave Chinner5681ca42015-02-23 21:22:31 +11001404 error = xfs_init_percpu_counters(mp);
Christoph Hellwigaa6bf012012-02-29 09:53:48 +00001405 if (error)
1406 goto out_destroy_workqueues;
1407
Bill O'Donnell225e4632015-10-12 18:21:19 +11001408 /* Allocate stats memory before we do operations that might use it */
1409 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1410 if (!mp->m_stats.xs_stats) {
Dan Carpenterf9d460b2015-10-19 08:42:47 +11001411 error = -ENOMEM;
Bill O'Donnell225e4632015-10-12 18:21:19 +11001412 goto out_destroy_counters;
1413 }
1414
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001415 error = xfs_readsb(mp, flags);
1416 if (error)
Bill O'Donnell225e4632015-10-12 18:21:19 +11001417 goto out_free_stats;
Christoph Hellwig9d565ff2008-10-30 17:53:24 +11001418
1419 error = xfs_finish_flags(mp);
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001420 if (error)
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001421 goto out_free_sb;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001422
Christoph Hellwige34b5622008-05-20 15:10:36 +10001423 error = xfs_setup_devices(mp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001424 if (error)
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001425 goto out_free_sb;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001426
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001427 error = xfs_filestream_mount(mp);
1428 if (error)
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001429 goto out_free_sb;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001430
Dave Chinner704b2902011-03-26 09:14:57 +11001431 /*
1432 * we must configure the block size in the superblock before we run the
1433 * full mount process as the mount process can lookup and cache inodes.
Dave Chinner704b2902011-03-26 09:14:57 +11001434 */
Adam Borowskidddde682018-10-18 17:20:19 +11001435 sb->s_magic = XFS_SUPER_MAGIC;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +10001436 sb->s_blocksize = mp->m_sb.sb_blocksize;
1437 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
Al Viro8de52772012-02-06 12:45:27 -05001439 sb->s_max_links = XFS_MAXLINK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 sb->s_time_gran = 1;
Deepa Dinamani22b13962019-07-30 08:22:29 -07001441 sb->s_time_min = S32_MIN;
1442 sb->s_time_max = S32_MAX;
Christoph Hellwigadfb5fb2019-06-28 19:30:22 -07001443 sb->s_iflags |= SB_I_CGROUPWB;
1444
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 set_posix_acl_flag(sb);
1446
Dave Chinnerdc037ad2013-06-27 16:04:59 +10001447 /* version 5 superblocks support inode version counters. */
1448 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
Matthew Garrett357fdad2017-10-18 13:56:26 -07001449 sb->s_flags |= SB_I_VERSION;
Dave Chinnerdc037ad2013-06-27 16:04:59 +10001450
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001451 if (mp->m_flags & XFS_MOUNT_DAX) {
Dave Jiang80660f22018-05-30 13:03:46 -07001452 bool rtdev_is_dax = false, datadev_is_dax;
Darrick J. Wongba23cba2018-05-30 13:03:45 -07001453
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001454 xfs_warn(mp,
Toshi Kani1e937cd2016-05-10 10:23:56 -06001455 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1456
Dave Jiang80660f22018-05-30 13:03:46 -07001457 datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
1458 sb->s_blocksize);
Darrick J. Wongba23cba2018-05-30 13:03:45 -07001459 if (mp->m_rtdev_targp)
Dave Jiang80660f22018-05-30 13:03:46 -07001460 rtdev_is_dax = bdev_dax_supported(
1461 mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
1462 if (!rtdev_is_dax && !datadev_is_dax) {
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001463 xfs_alert(mp,
Toshi Kani1e937cd2016-05-10 10:23:56 -06001464 "DAX unsupported by block device. Turning off DAX.");
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001465 mp->m_flags &= ~XFS_MOUNT_DAX;
1466 }
Darrick J. Wongb6e03c12018-01-31 14:21:56 -08001467 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
Darrick J. Wonge54b5bf2016-10-03 09:11:52 -07001468 xfs_alert(mp,
Christoph Hellwig1e369b02018-01-08 13:30:08 -08001469 "DAX and reflink cannot be used together!");
Darrick J. Wongb6e03c12018-01-31 14:21:56 -08001470 error = -EINVAL;
1471 goto out_filestream_unmount;
1472 }
Dave Chinnercbe4dab2015-06-04 09:19:18 +10001473 }
1474
Kenjiro Nakayama1e6fa682017-09-18 12:03:56 -07001475 if (mp->m_flags & XFS_MOUNT_DISCARD) {
1476 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1477
1478 if (!blk_queue_discard(q)) {
1479 xfs_warn(mp, "mounting with \"discard\" option, but "
1480 "the device does not support discard");
1481 mp->m_flags &= ~XFS_MOUNT_DISCARD;
1482 }
1483 }
1484
Christoph Hellwig66ae56a2019-02-18 09:38:49 -08001485 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1486 if (mp->m_sb.sb_rblocks) {
1487 xfs_alert(mp,
Darrick J. Wongc14632d2018-01-31 16:38:18 -08001488 "reflink not compatible with realtime device!");
Christoph Hellwig66ae56a2019-02-18 09:38:49 -08001489 error = -EINVAL;
1490 goto out_filestream_unmount;
1491 }
1492
1493 if (xfs_globals.always_cow) {
1494 xfs_info(mp, "using DEBUG-only always_cow mode.");
1495 mp->m_always_cow = true;
1496 }
Darrick J. Wongc14632d2018-01-31 16:38:18 -08001497 }
1498
Darrick J. Wong76883f72018-01-31 09:47:25 -08001499 if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
Darrick J. Wong1c0607a2016-08-03 12:20:57 +10001500 xfs_alert(mp,
Darrick J. Wong76883f72018-01-31 09:47:25 -08001501 "reverse mapping btree not compatible with realtime device!");
1502 error = -EINVAL;
1503 goto out_filestream_unmount;
Darrick J. Wong738f57c2016-08-26 15:59:19 +10001504 }
Darrick J. Wong1c0607a2016-08-03 12:20:57 +10001505
Dave Chinner8a00ebe2012-04-13 12:10:44 +00001506 error = xfs_mountfs(mp);
Christoph Hellwig2bcf6e92011-07-13 13:43:48 +02001507 if (error)
Dave Chinner7e185302012-10-08 21:56:00 +11001508 goto out_filestream_unmount;
Dave Chinner704b2902011-03-26 09:14:57 +11001509
David Chinner01651642008-08-13 15:45:15 +10001510 root = igrab(VFS_I(mp->m_rootip));
Christoph Hellwigf3dcc132008-03-27 18:00:54 +11001511 if (!root) {
Dave Chinner24513372014-06-25 14:58:08 +10001512 error = -ENOENT;
Dave Chinner8a00ebe2012-04-13 12:10:44 +00001513 goto out_unmount;
Christoph Hellwigcbc89dc2008-02-05 12:14:01 +11001514 }
Al Viro48fde702012-01-08 22:15:13 -05001515 sb->s_root = d_make_root(root);
Christoph Hellwigf3dcc132008-03-27 18:00:54 +11001516 if (!sb->s_root) {
Dave Chinner24513372014-06-25 14:58:08 +10001517 error = -ENOMEM;
Dave Chinner8a00ebe2012-04-13 12:10:44 +00001518 goto out_unmount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 }
Christoph Hellwig74394492007-08-30 17:21:22 +10001520
Dave Chinner7e185302012-10-08 21:56:00 +11001521 return 0;
1522
1523 out_filestream_unmount:
Christoph Hellwig120226c2008-05-20 15:11:11 +10001524 xfs_filestream_unmount(mp);
Christoph Hellwigeffa2ed2008-05-20 15:11:05 +10001525 out_free_sb:
1526 xfs_freesb(mp);
Bill O'Donnell225e4632015-10-12 18:21:19 +11001527 out_free_stats:
1528 free_percpu(mp->m_stats.xs_stats);
Christoph Hellwig9d565ff2008-10-30 17:53:24 +11001529 out_destroy_counters:
Dave Chinner5681ca42015-02-23 21:22:31 +11001530 xfs_destroy_percpu_counters(mp);
Bill O'Donnell225e4632015-10-12 18:21:19 +11001531 out_destroy_workqueues:
Christoph Hellwigaa6bf012012-02-29 09:53:48 +00001532 xfs_destroy_mount_workqueues(mp);
Christoph Hellwig61ba35d2010-09-30 02:25:54 +00001533 out_close_devices:
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001534 xfs_close_devices(mp);
Ian Kente1d3d212019-11-04 13:58:40 -08001535 out_free_names:
Dave Chinnerc9fbd7b2018-05-10 21:50:23 -07001536 sb->s_fs_info = NULL;
Ian Kenta943f372019-11-04 13:58:42 -08001537 xfs_mount_free(mp);
Dave Chinner24513372014-06-25 14:58:08 +10001538 return error;
Christoph Hellwigf8f15e42008-05-20 11:30:59 +10001539
Christoph Hellwig2bcf6e92011-07-13 13:43:48 +02001540 out_unmount:
Christoph Hellwige48ad3162008-05-20 11:30:52 +10001541 xfs_filestream_unmount(mp);
Christoph Hellwig19f354d2008-05-20 11:31:13 +10001542 xfs_unmountfs(mp);
Christoph Hellwig62033002008-08-13 16:50:21 +10001543 goto out_free_sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544}
1545
Ian Kent73e5fff2019-11-04 13:58:46 -08001546static int
1547xfs_fc_get_tree(
1548 struct fs_context *fc)
1549{
1550 return get_tree_bdev(fc, xfs_fc_fill_super);
1551}
1552
Ian Kent63cd1e92019-11-04 13:58:47 -08001553static int
1554xfs_remount_rw(
1555 struct xfs_mount *mp)
1556{
1557 struct xfs_sb *sbp = &mp->m_sb;
1558 int error;
1559
1560 if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1561 xfs_warn(mp,
1562 "ro->rw transition prohibited on norecovery mount");
1563 return -EINVAL;
1564 }
1565
1566 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1567 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1568 xfs_warn(mp,
1569 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1570 (sbp->sb_features_ro_compat &
1571 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1572 return -EINVAL;
1573 }
1574
1575 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1576
1577 /*
1578 * If this is the first remount to writeable state we might have some
1579 * superblock changes to update.
1580 */
1581 if (mp->m_update_sb) {
1582 error = xfs_sync_sb(mp, false);
1583 if (error) {
1584 xfs_warn(mp, "failed to write sb changes");
1585 return error;
1586 }
1587 mp->m_update_sb = false;
1588 }
1589
1590 /*
1591 * Fill out the reserve pool if it is empty. Use the stashed value if
1592 * it is non-zero, otherwise go with the default.
1593 */
1594 xfs_restore_resvblks(mp);
1595 xfs_log_work_queue(mp);
1596
1597 /* Recover any CoW blocks that never got remapped. */
1598 error = xfs_reflink_recover_cow(mp);
1599 if (error) {
1600 xfs_err(mp,
1601 "Error %d recovering leftover CoW allocations.", error);
Dan Carpenter7f6bcf72019-11-08 08:06:36 -08001602 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
Ian Kent63cd1e92019-11-04 13:58:47 -08001603 return error;
1604 }
1605 xfs_start_block_reaping(mp);
1606
1607 /* Create the per-AG metadata reservation pool .*/
1608 error = xfs_fs_reserve_ag_blocks(mp);
1609 if (error && error != -ENOSPC)
1610 return error;
1611
1612 return 0;
1613}
1614
1615static int
1616xfs_remount_ro(
1617 struct xfs_mount *mp)
1618{
1619 int error;
1620
1621 /*
1622 * Cancel background eofb scanning so it cannot race with the final
1623 * log force+buftarg wait and deadlock the remount.
1624 */
1625 xfs_stop_block_reaping(mp);
1626
1627 /* Get rid of any leftover CoW reservations... */
1628 error = xfs_icache_free_cowblocks(mp, NULL);
1629 if (error) {
1630 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1631 return error;
1632 }
1633
1634 /* Free the per-AG metadata reservation pool. */
1635 error = xfs_fs_unreserve_ag_blocks(mp);
1636 if (error) {
1637 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1638 return error;
1639 }
1640
1641 /*
1642 * Before we sync the metadata, we need to free up the reserve block
1643 * pool so that the used block count in the superblock on disk is
1644 * correct at the end of the remount. Stash the current* reserve pool
1645 * size so that if we get remounted rw, we can return it to the same
1646 * size.
1647 */
1648 xfs_save_resvblks(mp);
1649
1650 xfs_quiesce_attr(mp);
1651 mp->m_flags |= XFS_MOUNT_RDONLY;
1652
1653 return 0;
1654}
1655
1656/*
1657 * Logically we would return an error here to prevent users from believing
1658 * they might have changed mount options using remount which can't be changed.
1659 *
1660 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1661 * arguments in some cases so we can't blindly reject options, but have to
1662 * check for each specified option if it actually differs from the currently
1663 * set option and only reject it if that's the case.
1664 *
1665 * Until that is implemented we return success for every remount request, and
1666 * silently ignore all options that we can't actually change.
1667 */
1668static int
1669xfs_fc_reconfigure(
1670 struct fs_context *fc)
1671{
1672 struct xfs_mount *mp = XFS_M(fc->root->d_sb);
1673 struct xfs_mount *new_mp = fc->s_fs_info;
1674 xfs_sb_t *sbp = &mp->m_sb;
1675 int flags = fc->sb_flags;
1676 int error;
1677
1678 error = xfs_fc_validate_params(new_mp);
1679 if (error)
1680 return error;
1681
1682 sync_filesystem(mp->m_super);
1683
1684 /* inode32 -> inode64 */
1685 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1686 !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1687 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1688 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1689 }
1690
1691 /* inode64 -> inode32 */
1692 if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1693 (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1694 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1695 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1696 }
1697
1698 /* ro -> rw */
1699 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) {
1700 error = xfs_remount_rw(mp);
1701 if (error)
1702 return error;
1703 }
1704
1705 /* rw -> ro */
1706 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) {
1707 error = xfs_remount_ro(mp);
1708 if (error)
1709 return error;
1710 }
1711
1712 return 0;
1713}
1714
Ian Kent73e5fff2019-11-04 13:58:46 -08001715static void xfs_fc_free(
1716 struct fs_context *fc)
1717{
1718 struct xfs_mount *mp = fc->s_fs_info;
1719
1720 /*
1721 * mp is stored in the fs_context when it is initialized.
1722 * mp is transferred to the superblock on a successful mount,
1723 * but if an error occurs before the transfer we have to free
1724 * it here.
1725 */
1726 if (mp)
1727 xfs_mount_free(mp);
1728}
1729
1730static const struct fs_context_operations xfs_context_ops = {
1731 .parse_param = xfs_fc_parse_param,
1732 .get_tree = xfs_fc_get_tree,
1733 .reconfigure = xfs_fc_reconfigure,
1734 .free = xfs_fc_free,
1735};
1736
1737static int xfs_init_fs_context(
1738 struct fs_context *fc)
1739{
1740 struct xfs_mount *mp;
1741
Ian Kent50f83002019-11-04 13:58:48 -08001742 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
Ian Kent73e5fff2019-11-04 13:58:46 -08001743 if (!mp)
1744 return -ENOMEM;
1745
Ian Kent50f83002019-11-04 13:58:48 -08001746 spin_lock_init(&mp->m_sb_lock);
1747 spin_lock_init(&mp->m_agirotor_lock);
1748 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1749 spin_lock_init(&mp->m_perag_lock);
1750 mutex_init(&mp->m_growlock);
1751 atomic_set(&mp->m_active_trans, 0);
1752 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1753 INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
1754 INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
1755 mp->m_kobj.kobject.kset = xfs_kset;
1756 /*
1757 * We don't create the finobt per-ag space reservation until after log
1758 * recovery, so we must set this to true so that an ifree transaction
1759 * started during log recovery will not depend on space reservations
1760 * for finobt expansion.
1761 */
1762 mp->m_finobt_nores = true;
1763
Ian Kent73e5fff2019-11-04 13:58:46 -08001764 /*
1765 * These can be overridden by the mount option parsing.
1766 */
1767 mp->m_logbufs = -1;
1768 mp->m_logbsize = -1;
1769 mp->m_allocsize_log = 16; /* 64k */
1770
1771 /*
1772 * Copy binary VFS mount flags we are interested in.
1773 */
1774 if (fc->sb_flags & SB_RDONLY)
1775 mp->m_flags |= XFS_MOUNT_RDONLY;
1776 if (fc->sb_flags & SB_DIRSYNC)
1777 mp->m_flags |= XFS_MOUNT_DIRSYNC;
1778 if (fc->sb_flags & SB_SYNCHRONOUS)
1779 mp->m_flags |= XFS_MOUNT_WSYNC;
1780
1781 fc->s_fs_info = mp;
1782 fc->ops = &xfs_context_ops;
1783
1784 return 0;
1785}
1786
Andrew Morton5085b602007-02-20 13:57:47 -08001787static struct file_system_type xfs_fs_type = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 .owner = THIS_MODULE,
1789 .name = "xfs",
Ian Kent73e5fff2019-11-04 13:58:46 -08001790 .init_fs_context = xfs_init_fs_context,
1791 .parameters = &xfs_fs_parameters,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 .kill_sb = kill_block_super,
1793 .fs_flags = FS_REQUIRES_DEV,
1794};
Eric W. Biederman7f78e032013-03-02 19:39:14 -08001795MODULE_ALIAS_FS("xfs");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001797STATIC int __init
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001798xfs_init_zones(void)
1799{
Carlos Maiolinob1231762019-11-14 12:43:03 -08001800 xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
1801 sizeof(struct xlog_ticket),
1802 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001803 if (!xfs_log_ticket_zone)
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001804 goto out;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001805
Carlos Maiolinob1231762019-11-14 12:43:03 -08001806 xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
1807 sizeof(struct xfs_extent_free_item),
1808 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001809 if (!xfs_bmap_free_item_zone)
1810 goto out_destroy_log_ticket_zone;
David Chinnerbf904242008-10-30 17:36:14 +11001811
Carlos Maiolinob1231762019-11-14 12:43:03 -08001812 xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
1813 sizeof(struct xfs_btree_cur),
1814 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001815 if (!xfs_btree_cur_zone)
1816 goto out_destroy_bmap_free_item_zone;
1817
Carlos Maiolinob1231762019-11-14 12:43:03 -08001818 xfs_da_state_zone = kmem_cache_create("xfs_da_state",
1819 sizeof(struct xfs_da_state),
1820 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001821 if (!xfs_da_state_zone)
1822 goto out_destroy_btree_cur_zone;
1823
Carlos Maiolinob1231762019-11-14 12:43:03 -08001824 xfs_ifork_zone = kmem_cache_create("xfs_ifork",
1825 sizeof(struct xfs_ifork),
1826 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001827 if (!xfs_ifork_zone)
Dave Chinner1d9025e2012-06-22 18:50:14 +10001828 goto out_destroy_da_state_zone;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001829
Carlos Maiolinob1231762019-11-14 12:43:03 -08001830 xfs_trans_zone = kmem_cache_create("xf_trans",
1831 sizeof(struct xfs_trans),
1832 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001833 if (!xfs_trans_zone)
1834 goto out_destroy_ifork_zone;
1835
Christoph Hellwige98c4142010-06-23 18:11:15 +10001836
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001837 /*
1838 * The size of the zone allocated buf log item is the maximum
1839 * size possible under XFS. This wastes a little bit of memory,
1840 * but it is much faster.
1841 */
Carlos Maiolinob1231762019-11-14 12:43:03 -08001842 xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
1843 sizeof(struct xfs_buf_log_item),
1844 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001845 if (!xfs_buf_item_zone)
Dave Chinnere6631f82018-05-09 07:49:37 -07001846 goto out_destroy_trans_zone;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001847
Carlos Maiolinob1231762019-11-14 12:43:03 -08001848 xfs_efd_zone = kmem_cache_create("xfs_efd_item",
1849 (sizeof(struct xfs_efd_log_item) +
1850 (XFS_EFD_MAX_FAST_EXTENTS - 1) *
1851 sizeof(struct xfs_extent)),
1852 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001853 if (!xfs_efd_zone)
1854 goto out_destroy_buf_item_zone;
1855
Carlos Maiolinob1231762019-11-14 12:43:03 -08001856 xfs_efi_zone = kmem_cache_create("xfs_efi_item",
1857 (sizeof(struct xfs_efi_log_item) +
1858 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
1859 sizeof(struct xfs_extent)),
1860 0, 0, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001861 if (!xfs_efi_zone)
1862 goto out_destroy_efd_zone;
1863
Carlos Maiolinob1231762019-11-14 12:43:03 -08001864 xfs_inode_zone = kmem_cache_create("xfs_inode",
1865 sizeof(struct xfs_inode), 0,
1866 (SLAB_HWCACHE_ALIGN |
1867 SLAB_RECLAIM_ACCOUNT |
1868 SLAB_MEM_SPREAD | SLAB_ACCOUNT),
1869 xfs_fs_inode_init_once);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001870 if (!xfs_inode_zone)
1871 goto out_destroy_efi_zone;
1872
Carlos Maiolinob1231762019-11-14 12:43:03 -08001873 xfs_ili_zone = kmem_cache_create("xfs_ili",
1874 sizeof(struct xfs_inode_log_item), 0,
1875 SLAB_MEM_SPREAD, NULL);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001876 if (!xfs_ili_zone)
1877 goto out_destroy_inode_zone;
Carlos Maiolinob1231762019-11-14 12:43:03 -08001878
1879 xfs_icreate_zone = kmem_cache_create("xfs_icr",
1880 sizeof(struct xfs_icreate_item),
1881 0, 0, NULL);
Dave Chinner3ebe7d22013-06-27 16:04:53 +10001882 if (!xfs_icreate_zone)
1883 goto out_destroy_ili_zone;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001884
Carlos Maiolinob1231762019-11-14 12:43:03 -08001885 xfs_rud_zone = kmem_cache_create("xfs_rud_item",
1886 sizeof(struct xfs_rud_log_item),
1887 0, 0, NULL);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10001888 if (!xfs_rud_zone)
1889 goto out_destroy_icreate_zone;
1890
Carlos Maiolinob1231762019-11-14 12:43:03 -08001891 xfs_rui_zone = kmem_cache_create("xfs_rui_item",
Darrick J. Wongcd001582016-09-19 10:24:27 +10001892 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
Carlos Maiolinob1231762019-11-14 12:43:03 -08001893 0, 0, NULL);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10001894 if (!xfs_rui_zone)
1895 goto out_destroy_rud_zone;
1896
Carlos Maiolinob1231762019-11-14 12:43:03 -08001897 xfs_cud_zone = kmem_cache_create("xfs_cud_item",
1898 sizeof(struct xfs_cud_log_item),
1899 0, 0, NULL);
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07001900 if (!xfs_cud_zone)
1901 goto out_destroy_rui_zone;
1902
Carlos Maiolinob1231762019-11-14 12:43:03 -08001903 xfs_cui_zone = kmem_cache_create("xfs_cui_item",
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07001904 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
Carlos Maiolinob1231762019-11-14 12:43:03 -08001905 0, 0, NULL);
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07001906 if (!xfs_cui_zone)
1907 goto out_destroy_cud_zone;
1908
Carlos Maiolinob1231762019-11-14 12:43:03 -08001909 xfs_bud_zone = kmem_cache_create("xfs_bud_item",
1910 sizeof(struct xfs_bud_log_item),
1911 0, 0, NULL);
Darrick J. Wong6413a012016-10-03 09:11:25 -07001912 if (!xfs_bud_zone)
1913 goto out_destroy_cui_zone;
1914
Carlos Maiolinob1231762019-11-14 12:43:03 -08001915 xfs_bui_zone = kmem_cache_create("xfs_bui_item",
Darrick J. Wong6413a012016-10-03 09:11:25 -07001916 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
Carlos Maiolinob1231762019-11-14 12:43:03 -08001917 0, 0, NULL);
Darrick J. Wong6413a012016-10-03 09:11:25 -07001918 if (!xfs_bui_zone)
1919 goto out_destroy_bud_zone;
1920
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001921 return 0;
1922
Darrick J. Wong6413a012016-10-03 09:11:25 -07001923 out_destroy_bud_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001924 kmem_cache_destroy(xfs_bud_zone);
Darrick J. Wong6413a012016-10-03 09:11:25 -07001925 out_destroy_cui_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001926 kmem_cache_destroy(xfs_cui_zone);
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07001927 out_destroy_cud_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001928 kmem_cache_destroy(xfs_cud_zone);
Darrick J. Wongbaf4bcac2016-10-03 09:11:20 -07001929 out_destroy_rui_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001930 kmem_cache_destroy(xfs_rui_zone);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10001931 out_destroy_rud_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001932 kmem_cache_destroy(xfs_rud_zone);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10001933 out_destroy_icreate_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001934 kmem_cache_destroy(xfs_icreate_zone);
Dave Chinner3ebe7d22013-06-27 16:04:53 +10001935 out_destroy_ili_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001936 kmem_cache_destroy(xfs_ili_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001937 out_destroy_inode_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001938 kmem_cache_destroy(xfs_inode_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001939 out_destroy_efi_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001940 kmem_cache_destroy(xfs_efi_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001941 out_destroy_efd_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001942 kmem_cache_destroy(xfs_efd_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001943 out_destroy_buf_item_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001944 kmem_cache_destroy(xfs_buf_item_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001945 out_destroy_trans_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001946 kmem_cache_destroy(xfs_trans_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001947 out_destroy_ifork_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001948 kmem_cache_destroy(xfs_ifork_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001949 out_destroy_da_state_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001950 kmem_cache_destroy(xfs_da_state_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001951 out_destroy_btree_cur_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001952 kmem_cache_destroy(xfs_btree_cur_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001953 out_destroy_bmap_free_item_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001954 kmem_cache_destroy(xfs_bmap_free_item_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001955 out_destroy_log_ticket_zone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001956 kmem_cache_destroy(xfs_log_ticket_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001957 out:
1958 return -ENOMEM;
1959}
1960
1961STATIC void
1962xfs_destroy_zones(void)
1963{
Kirill A. Shutemov8c0a8532012-09-26 11:33:07 +10001964 /*
1965 * Make sure all delayed rcu free are flushed before we
1966 * destroy caches.
1967 */
1968 rcu_barrier();
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001969 kmem_cache_destroy(xfs_bui_zone);
1970 kmem_cache_destroy(xfs_bud_zone);
1971 kmem_cache_destroy(xfs_cui_zone);
1972 kmem_cache_destroy(xfs_cud_zone);
1973 kmem_cache_destroy(xfs_rui_zone);
1974 kmem_cache_destroy(xfs_rud_zone);
1975 kmem_cache_destroy(xfs_icreate_zone);
1976 kmem_cache_destroy(xfs_ili_zone);
1977 kmem_cache_destroy(xfs_inode_zone);
1978 kmem_cache_destroy(xfs_efi_zone);
1979 kmem_cache_destroy(xfs_efd_zone);
1980 kmem_cache_destroy(xfs_buf_item_zone);
1981 kmem_cache_destroy(xfs_trans_zone);
1982 kmem_cache_destroy(xfs_ifork_zone);
1983 kmem_cache_destroy(xfs_da_state_zone);
1984 kmem_cache_destroy(xfs_btree_cur_zone);
1985 kmem_cache_destroy(xfs_bmap_free_item_zone);
1986 kmem_cache_destroy(xfs_log_ticket_zone);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10001987}
1988
1989STATIC int __init
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10001990xfs_init_workqueues(void)
1991{
1992 /*
Dave Chinnerc999a222012-03-22 05:15:07 +00001993 * The allocation workqueue can be used in memory reclaim situations
1994 * (writepage path), and parallelism is only limited by the number of
1995 * AGs in all the filesystems mounted. Hence use the default large
1996 * max_active value for this workqueue.
1997 */
Brian Foster8018ec02014-09-09 11:44:46 +10001998 xfs_alloc_wq = alloc_workqueue("xfsalloc",
1999 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
Dave Chinnerc999a222012-03-22 05:15:07 +00002000 if (!xfs_alloc_wq)
Dave Chinner58896082012-10-08 21:56:05 +11002001 return -ENOMEM;
Dave Chinnerc999a222012-03-22 05:15:07 +00002002
Christoph Hellwig4560e782017-02-07 14:07:58 -08002003 xfs_discard_wq = alloc_workqueue("xfsdiscard", WQ_UNBOUND, 0);
2004 if (!xfs_discard_wq)
2005 goto out_free_alloc_wq;
2006
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002007 return 0;
Christoph Hellwig4560e782017-02-07 14:07:58 -08002008out_free_alloc_wq:
2009 destroy_workqueue(xfs_alloc_wq);
2010 return -ENOMEM;
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002011}
2012
Luck, Tony39411f82011-04-11 12:06:12 -07002013STATIC void
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002014xfs_destroy_workqueues(void)
2015{
Christoph Hellwig4560e782017-02-07 14:07:58 -08002016 destroy_workqueue(xfs_discard_wq);
Dave Chinnerc999a222012-03-22 05:15:07 +00002017 destroy_workqueue(xfs_alloc_wq);
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002018}
2019
2020STATIC int __init
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002021init_xfs_fs(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022{
2023 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024
Darrick J. Wong30cbc592016-03-09 08:15:14 +11002025 xfs_check_ondisk_structs();
2026
Christoph Hellwig65795912008-11-28 14:23:33 +11002027 printk(KERN_INFO XFS_VERSION_STRING " with "
2028 XFS_BUILD_OPTIONS " enabled\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002030 xfs_dir_startup();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031
Nathan Scott87582802006-03-14 13:18:19 +11002032 error = xfs_init_zones();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002033 if (error)
2034 goto out;
2035
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002036 error = xfs_init_workqueues();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002037 if (error)
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002038 goto out_destroy_zones;
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002039
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002040 error = xfs_mru_cache_init();
2041 if (error)
2042 goto out_destroy_wq;
2043
Nathan Scottce8e9222006-01-11 15:39:08 +11002044 error = xfs_buf_init();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002045 if (error)
Christoph Hellwig1919add2014-04-23 07:11:51 +10002046 goto out_mru_cache_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002048 error = xfs_init_procfs();
2049 if (error)
2050 goto out_buf_terminate;
2051
2052 error = xfs_sysctl_register();
2053 if (error)
2054 goto out_cleanup_procfs;
2055
Brian Foster3d871222014-07-15 07:41:37 +10002056 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2057 if (!xfs_kset) {
2058 error = -ENOMEM;
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002059 goto out_sysctl_unregister;
Brian Foster3d871222014-07-15 07:41:37 +10002060 }
2061
Bill O'Donnell80529c42015-10-12 05:19:45 +11002062 xfsstats.xs_kobj.kobject.kset = xfs_kset;
2063
2064 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2065 if (!xfsstats.xs_stats) {
2066 error = -ENOMEM;
2067 goto out_kset_unregister;
2068 }
2069
2070 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002071 "stats");
2072 if (error)
Bill O'Donnell80529c42015-10-12 05:19:45 +11002073 goto out_free_stats;
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002074
Brian Foster65b65732014-09-09 11:52:42 +10002075#ifdef DEBUG
2076 xfs_dbg_kobj.kobject.kset = xfs_kset;
2077 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002078 if (error)
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002079 goto out_remove_stats_kobj;
Brian Foster65b65732014-09-09 11:52:42 +10002080#endif
2081
2082 error = xfs_qm_init();
2083 if (error)
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002084 goto out_remove_dbg_kobj;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
2086 error = register_filesystem(&xfs_fs_type);
2087 if (error)
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002088 goto out_qm_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 return 0;
2090
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002091 out_qm_exit:
2092 xfs_qm_exit();
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002093 out_remove_dbg_kobj:
Brian Foster65b65732014-09-09 11:52:42 +10002094#ifdef DEBUG
2095 xfs_sysfs_del(&xfs_dbg_kobj);
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002096 out_remove_stats_kobj:
Brian Foster65b65732014-09-09 11:52:42 +10002097#endif
Bill O'Donnell80529c42015-10-12 05:19:45 +11002098 xfs_sysfs_del(&xfsstats.xs_kobj);
2099 out_free_stats:
2100 free_percpu(xfsstats.xs_stats);
Bill O'Donnellbb230c12015-10-12 05:15:45 +11002101 out_kset_unregister:
Brian Foster3d871222014-07-15 07:41:37 +10002102 kset_unregister(xfs_kset);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002103 out_sysctl_unregister:
2104 xfs_sysctl_unregister();
2105 out_cleanup_procfs:
2106 xfs_cleanup_procfs();
2107 out_buf_terminate:
Nathan Scottce8e9222006-01-11 15:39:08 +11002108 xfs_buf_terminate();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002109 out_mru_cache_uninit:
2110 xfs_mru_cache_uninit();
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002111 out_destroy_wq:
2112 xfs_destroy_workqueues();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002113 out_destroy_zones:
Nathan Scott87582802006-03-14 13:18:19 +11002114 xfs_destroy_zones();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002115 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 return error;
2117}
2118
2119STATIC void __exit
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002120exit_xfs_fs(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121{
Christoph Hellwiga05931c2012-03-13 08:52:37 +00002122 xfs_qm_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 unregister_filesystem(&xfs_fs_type);
Brian Foster65b65732014-09-09 11:52:42 +10002124#ifdef DEBUG
2125 xfs_sysfs_del(&xfs_dbg_kobj);
2126#endif
Bill O'Donnell80529c42015-10-12 05:19:45 +11002127 xfs_sysfs_del(&xfsstats.xs_kobj);
2128 free_percpu(xfsstats.xs_stats);
Brian Foster3d871222014-07-15 07:41:37 +10002129 kset_unregister(xfs_kset);
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002130 xfs_sysctl_unregister();
2131 xfs_cleanup_procfs();
Nathan Scottce8e9222006-01-11 15:39:08 +11002132 xfs_buf_terminate();
Christoph Hellwig9f8868f2008-07-18 17:11:46 +10002133 xfs_mru_cache_uninit();
Dave Chinner0bf6a5b2011-04-08 12:45:07 +10002134 xfs_destroy_workqueues();
Nathan Scott87582802006-03-14 13:18:19 +11002135 xfs_destroy_zones();
Darrick J. Wongaf3b6382015-11-03 13:06:34 +11002136 xfs_uuid_table_free();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137}
2138
2139module_init(init_xfs_fs);
2140module_exit(exit_xfs_fs);
2141
2142MODULE_AUTHOR("Silicon Graphics, Inc.");
2143MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2144MODULE_LICENSE("GPL");