blob: 70024a24692d4b1c8960e782d33360b426817566 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scotta805bad2006-06-19 08:40:27 +10002 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Nathan Scott7b718762005-11-02 14:58:39 +11003 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_log.h"
21#include "xfs_clnt.h"
Nathan Scotta844f452005-11-02 14:38:42 +110022#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include "xfs_trans.h"
24#include "xfs_sb.h"
Nathan Scotta844f452005-11-02 14:38:42 +110025#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include "xfs_dir2.h"
27#include "xfs_alloc.h"
28#include "xfs_dmapi.h"
29#include "xfs_quota.h"
30#include "xfs_mount.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_bmap_btree.h"
Nathan Scotta844f452005-11-02 14:38:42 +110032#include "xfs_alloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include "xfs_ialloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include "xfs_dir2_sf.h"
Nathan Scotta844f452005-11-02 14:38:42 +110035#include "xfs_attr_sf.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include "xfs_dinode.h"
37#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110038#include "xfs_btree.h"
39#include "xfs_ialloc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include "xfs_bmap.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include "xfs_rtalloc.h"
42#include "xfs_error.h"
43#include "xfs_itable.h"
Christoph Hellwig9909c4a2007-10-11 18:11:14 +100044#include "xfs_fsops.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include "xfs_rw.h"
46#include "xfs_acl.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include "xfs_attr.h"
48#include "xfs_buf_item.h"
49#include "xfs_utils.h"
Christoph Hellwig739bfb22007-08-29 10:58:01 +100050#include "xfs_vnodeops.h"
Christoph Hellwig745f6912007-08-30 17:20:39 +100051#include "xfs_vfsops.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070052#include "xfs_version.h"
David Chinnera67d7c52007-11-23 16:29:32 +110053#include "xfs_log_priv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55#include <linux/namei.h>
56#include <linux/init.h>
57#include <linux/mount.h>
Christoph Hellwig0829c362005-09-02 16:58:49 +100058#include <linux/mempool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <linux/writeback.h>
Christoph Hellwig4df08c52005-09-05 08:34:18 +100060#include <linux/kthread.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080061#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
David Chinner7989cb82007-02-10 18:34:56 +110063static struct quotactl_ops xfs_quotactl_operations;
64static struct super_operations xfs_super_operations;
65static kmem_zone_t *xfs_vnode_zone;
66static kmem_zone_t *xfs_ioend_zone;
Christoph Hellwig0829c362005-09-02 16:58:49 +100067mempool_t *xfs_ioend_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
69STATIC struct xfs_mount_args *
70xfs_args_allocate(
Nathan Scott764d1f82006-03-31 13:04:17 +100071 struct super_block *sb,
72 int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073{
74 struct xfs_mount_args *args;
75
76 args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
77 args->logbufs = args->logbufsize = -1;
78 strncpy(args->fsname, sb->s_id, MAXNAMELEN);
79
80 /* Copy the already-parsed mount(2) flags we're interested in */
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 if (sb->s_flags & MS_DIRSYNC)
82 args->flags |= XFSMNT_DIRSYNC;
83 if (sb->s_flags & MS_SYNCHRONOUS)
84 args->flags |= XFSMNT_WSYNC;
Nathan Scott764d1f82006-03-31 13:04:17 +100085 if (silent)
86 args->flags |= XFSMNT_QUIET;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 args->flags |= XFSMNT_32BITINODES;
88
89 return args;
90}
91
David Chinnera67d7c52007-11-23 16:29:32 +110092#define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */
93#define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */
94#define MNTOPT_LOGDEV "logdev" /* log device */
95#define MNTOPT_RTDEV "rtdev" /* realtime I/O device */
96#define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */
97#define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */
98#define MNTOPT_INO64 "ino64" /* force inodes into 64-bit range */
99#define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */
100#define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */
101#define MNTOPT_SUNIT "sunit" /* data volume stripe unit */
102#define MNTOPT_SWIDTH "swidth" /* data volume stripe width */
103#define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */
104#define MNTOPT_MTPT "mtpt" /* filesystem mount point */
105#define MNTOPT_GRPID "grpid" /* group-ID from parent directory */
106#define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */
107#define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */
108#define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */
109#define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */
110#define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */
111#define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and
112 * unwritten extent conversion */
113#define MNTOPT_NOBARRIER "nobarrier" /* .. disable */
114#define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */
115#define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */
116#define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */
117#define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */
118#define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */
119#define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes
120 * in stat(). */
121#define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */
122#define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */
123#define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */
124#define MNTOPT_QUOTA "quota" /* disk quotas (user) */
125#define MNTOPT_NOQUOTA "noquota" /* no quotas */
126#define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */
127#define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */
128#define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */
129#define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */
130#define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */
131#define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */
132#define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
133#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
134#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
135#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
136#define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */
137#define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */
138#define MNTOPT_DMI "dmi" /* DMI enabled (DMAPI / XDSM) */
139
140STATIC unsigned long
141suffix_strtoul(char *s, char **endp, unsigned int base)
142{
143 int last, shift_left_factor = 0;
144 char *value = s;
145
146 last = strlen(value) - 1;
147 if (value[last] == 'K' || value[last] == 'k') {
148 shift_left_factor = 10;
149 value[last] = '\0';
150 }
151 if (value[last] == 'M' || value[last] == 'm') {
152 shift_left_factor = 20;
153 value[last] = '\0';
154 }
155 if (value[last] == 'G' || value[last] == 'g') {
156 shift_left_factor = 30;
157 value[last] = '\0';
158 }
159
160 return simple_strtoul((const char *)s, endp, base) << shift_left_factor;
161}
162
163STATIC int
164xfs_parseargs(
165 struct xfs_mount *mp,
166 char *options,
167 struct xfs_mount_args *args,
168 int update)
169{
170 char *this_char, *value, *eov;
171 int dsunit, dswidth, vol_dsunit, vol_dswidth;
172 int iosize;
173 int ikeep = 0;
174
175 args->flags |= XFSMNT_BARRIER;
176 args->flags2 |= XFSMNT2_COMPAT_IOSIZE;
177
178 if (!options)
179 goto done;
180
181 iosize = dsunit = dswidth = vol_dsunit = vol_dswidth = 0;
182
183 while ((this_char = strsep(&options, ",")) != NULL) {
184 if (!*this_char)
185 continue;
186 if ((value = strchr(this_char, '=')) != NULL)
187 *value++ = 0;
188
189 if (!strcmp(this_char, MNTOPT_LOGBUFS)) {
190 if (!value || !*value) {
191 cmn_err(CE_WARN,
192 "XFS: %s option requires an argument",
193 this_char);
194 return EINVAL;
195 }
196 args->logbufs = simple_strtoul(value, &eov, 10);
197 } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
198 if (!value || !*value) {
199 cmn_err(CE_WARN,
200 "XFS: %s option requires an argument",
201 this_char);
202 return EINVAL;
203 }
204 args->logbufsize = suffix_strtoul(value, &eov, 10);
205 } else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
206 if (!value || !*value) {
207 cmn_err(CE_WARN,
208 "XFS: %s option requires an argument",
209 this_char);
210 return EINVAL;
211 }
212 strncpy(args->logname, value, MAXNAMELEN);
213 } else if (!strcmp(this_char, MNTOPT_MTPT)) {
214 if (!value || !*value) {
215 cmn_err(CE_WARN,
216 "XFS: %s option requires an argument",
217 this_char);
218 return EINVAL;
219 }
220 strncpy(args->mtpt, value, MAXNAMELEN);
221 } else if (!strcmp(this_char, MNTOPT_RTDEV)) {
222 if (!value || !*value) {
223 cmn_err(CE_WARN,
224 "XFS: %s option requires an argument",
225 this_char);
226 return EINVAL;
227 }
228 strncpy(args->rtname, value, MAXNAMELEN);
229 } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) {
230 if (!value || !*value) {
231 cmn_err(CE_WARN,
232 "XFS: %s option requires an argument",
233 this_char);
234 return EINVAL;
235 }
236 iosize = simple_strtoul(value, &eov, 10);
237 args->flags |= XFSMNT_IOSIZE;
238 args->iosizelog = (uint8_t) iosize;
239 } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
240 if (!value || !*value) {
241 cmn_err(CE_WARN,
242 "XFS: %s option requires an argument",
243 this_char);
244 return EINVAL;
245 }
246 iosize = suffix_strtoul(value, &eov, 10);
247 args->flags |= XFSMNT_IOSIZE;
248 args->iosizelog = ffs(iosize) - 1;
249 } else if (!strcmp(this_char, MNTOPT_GRPID) ||
250 !strcmp(this_char, MNTOPT_BSDGROUPS)) {
251 mp->m_flags |= XFS_MOUNT_GRPID;
252 } else if (!strcmp(this_char, MNTOPT_NOGRPID) ||
253 !strcmp(this_char, MNTOPT_SYSVGROUPS)) {
254 mp->m_flags &= ~XFS_MOUNT_GRPID;
255 } else if (!strcmp(this_char, MNTOPT_WSYNC)) {
256 args->flags |= XFSMNT_WSYNC;
257 } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) {
258 args->flags |= XFSMNT_OSYNCISOSYNC;
259 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
260 args->flags |= XFSMNT_NORECOVERY;
261 } else if (!strcmp(this_char, MNTOPT_INO64)) {
262 args->flags |= XFSMNT_INO64;
263#if !XFS_BIG_INUMS
264 cmn_err(CE_WARN,
265 "XFS: %s option not allowed on this system",
266 this_char);
267 return EINVAL;
268#endif
269 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
270 args->flags |= XFSMNT_NOALIGN;
271 } else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
272 args->flags |= XFSMNT_SWALLOC;
273 } else if (!strcmp(this_char, MNTOPT_SUNIT)) {
274 if (!value || !*value) {
275 cmn_err(CE_WARN,
276 "XFS: %s option requires an argument",
277 this_char);
278 return EINVAL;
279 }
280 dsunit = simple_strtoul(value, &eov, 10);
281 } else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
282 if (!value || !*value) {
283 cmn_err(CE_WARN,
284 "XFS: %s option requires an argument",
285 this_char);
286 return EINVAL;
287 }
288 dswidth = simple_strtoul(value, &eov, 10);
289 } else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
290 args->flags &= ~XFSMNT_32BITINODES;
291#if !XFS_BIG_INUMS
292 cmn_err(CE_WARN,
293 "XFS: %s option not allowed on this system",
294 this_char);
295 return EINVAL;
296#endif
297 } else if (!strcmp(this_char, MNTOPT_NOUUID)) {
298 args->flags |= XFSMNT_NOUUID;
299 } else if (!strcmp(this_char, MNTOPT_BARRIER)) {
300 args->flags |= XFSMNT_BARRIER;
301 } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
302 args->flags &= ~XFSMNT_BARRIER;
303 } else if (!strcmp(this_char, MNTOPT_IKEEP)) {
304 ikeep = 1;
305 args->flags &= ~XFSMNT_IDELETE;
306 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
307 args->flags |= XFSMNT_IDELETE;
308 } else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
309 args->flags2 &= ~XFSMNT2_COMPAT_IOSIZE;
310 } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
311 args->flags2 |= XFSMNT2_COMPAT_IOSIZE;
312 } else if (!strcmp(this_char, MNTOPT_ATTR2)) {
313 args->flags |= XFSMNT_ATTR2;
314 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
315 args->flags &= ~XFSMNT_ATTR2;
316 } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) {
317 args->flags2 |= XFSMNT2_FILESTREAMS;
318 } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
319 args->flags &= ~(XFSMNT_UQUOTAENF|XFSMNT_UQUOTA);
320 args->flags &= ~(XFSMNT_GQUOTAENF|XFSMNT_GQUOTA);
321 } else if (!strcmp(this_char, MNTOPT_QUOTA) ||
322 !strcmp(this_char, MNTOPT_UQUOTA) ||
323 !strcmp(this_char, MNTOPT_USRQUOTA)) {
324 args->flags |= XFSMNT_UQUOTA | XFSMNT_UQUOTAENF;
325 } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
326 !strcmp(this_char, MNTOPT_UQUOTANOENF)) {
327 args->flags |= XFSMNT_UQUOTA;
328 args->flags &= ~XFSMNT_UQUOTAENF;
329 } else if (!strcmp(this_char, MNTOPT_PQUOTA) ||
330 !strcmp(this_char, MNTOPT_PRJQUOTA)) {
331 args->flags |= XFSMNT_PQUOTA | XFSMNT_PQUOTAENF;
332 } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) {
333 args->flags |= XFSMNT_PQUOTA;
334 args->flags &= ~XFSMNT_PQUOTAENF;
335 } else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
336 !strcmp(this_char, MNTOPT_GRPQUOTA)) {
337 args->flags |= XFSMNT_GQUOTA | XFSMNT_GQUOTAENF;
338 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
339 args->flags |= XFSMNT_GQUOTA;
340 args->flags &= ~XFSMNT_GQUOTAENF;
341 } else if (!strcmp(this_char, MNTOPT_DMAPI)) {
342 args->flags |= XFSMNT_DMAPI;
343 } else if (!strcmp(this_char, MNTOPT_XDSM)) {
344 args->flags |= XFSMNT_DMAPI;
345 } else if (!strcmp(this_char, MNTOPT_DMI)) {
346 args->flags |= XFSMNT_DMAPI;
347 } else if (!strcmp(this_char, "ihashsize")) {
348 cmn_err(CE_WARN,
349 "XFS: ihashsize no longer used, option is deprecated.");
350 } else if (!strcmp(this_char, "osyncisdsync")) {
351 /* no-op, this is now the default */
352 cmn_err(CE_WARN,
353 "XFS: osyncisdsync is now the default, option is deprecated.");
354 } else if (!strcmp(this_char, "irixsgid")) {
355 cmn_err(CE_WARN,
356 "XFS: irixsgid is now a sysctl(2) variable, option is deprecated.");
357 } else {
358 cmn_err(CE_WARN,
359 "XFS: unknown mount option [%s].", this_char);
360 return EINVAL;
361 }
362 }
363
364 if (args->flags & XFSMNT_NORECOVERY) {
365 if ((mp->m_flags & XFS_MOUNT_RDONLY) == 0) {
366 cmn_err(CE_WARN,
367 "XFS: no-recovery mounts must be read-only.");
368 return EINVAL;
369 }
370 }
371
372 if ((args->flags & XFSMNT_NOALIGN) && (dsunit || dswidth)) {
373 cmn_err(CE_WARN,
374 "XFS: sunit and swidth options incompatible with the noalign option");
375 return EINVAL;
376 }
377
378 if ((args->flags & XFSMNT_GQUOTA) && (args->flags & XFSMNT_PQUOTA)) {
379 cmn_err(CE_WARN,
380 "XFS: cannot mount with both project and group quota");
381 return EINVAL;
382 }
383
384 if ((args->flags & XFSMNT_DMAPI) && *args->mtpt == '\0') {
385 printk("XFS: %s option needs the mount point option as well\n",
386 MNTOPT_DMAPI);
387 return EINVAL;
388 }
389
390 if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
391 cmn_err(CE_WARN,
392 "XFS: sunit and swidth must be specified together");
393 return EINVAL;
394 }
395
396 if (dsunit && (dswidth % dsunit != 0)) {
397 cmn_err(CE_WARN,
398 "XFS: stripe width (%d) must be a multiple of the stripe unit (%d)",
399 dswidth, dsunit);
400 return EINVAL;
401 }
402
403 /*
404 * Applications using DMI filesystems often expect the
405 * inode generation number to be monotonically increasing.
406 * If we delete inode chunks we break this assumption, so
407 * keep unused inode chunks on disk for DMI filesystems
408 * until we come up with a better solution.
409 * Note that if "ikeep" or "noikeep" mount options are
410 * supplied, then they are honored.
411 */
412 if (!(args->flags & XFSMNT_DMAPI) && !ikeep)
413 args->flags |= XFSMNT_IDELETE;
414
415 if ((args->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) {
416 if (dsunit) {
417 args->sunit = dsunit;
418 args->flags |= XFSMNT_RETERR;
419 } else {
420 args->sunit = vol_dsunit;
421 }
422 dswidth ? (args->swidth = dswidth) :
423 (args->swidth = vol_dswidth);
424 } else {
425 args->sunit = args->swidth = 0;
426 }
427
428done:
429 if (args->flags & XFSMNT_32BITINODES)
430 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
431 if (args->flags2)
432 args->flags |= XFSMNT_FLAGS2;
433 return 0;
434}
435
436struct proc_xfs_info {
437 int flag;
438 char *str;
439};
440
441STATIC int
442xfs_showargs(
443 struct xfs_mount *mp,
444 struct seq_file *m)
445{
446 static struct proc_xfs_info xfs_info_set[] = {
447 /* the few simple ones we can get from the mount struct */
448 { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC },
449 { XFS_MOUNT_INO64, "," MNTOPT_INO64 },
450 { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN },
451 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC },
452 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
453 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
454 { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC },
455 { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 },
456 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM },
457 { XFS_MOUNT_DMAPI, "," MNTOPT_DMAPI },
458 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID },
459 { 0, NULL }
460 };
461 static struct proc_xfs_info xfs_info_unset[] = {
462 /* the few simple ones we can get from the mount struct */
463 { XFS_MOUNT_IDELETE, "," MNTOPT_IKEEP },
464 { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO },
465 { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER },
466 { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE },
467 { 0, NULL }
468 };
469 struct proc_xfs_info *xfs_infop;
470
471 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
472 if (mp->m_flags & xfs_infop->flag)
473 seq_puts(m, xfs_infop->str);
474 }
475 for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
476 if (!(mp->m_flags & xfs_infop->flag))
477 seq_puts(m, xfs_infop->str);
478 }
479
480 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
481 seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk",
482 (int)(1 << mp->m_writeio_log) >> 10);
483
484 if (mp->m_logbufs > 0)
485 seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
486 if (mp->m_logbsize > 0)
487 seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
488
489 if (mp->m_logname)
490 seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
491 if (mp->m_rtname)
492 seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
493
494 if (mp->m_dalign > 0)
495 seq_printf(m, "," MNTOPT_SUNIT "=%d",
496 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
497 if (mp->m_swidth > 0)
498 seq_printf(m, "," MNTOPT_SWIDTH "=%d",
499 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
500
501 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
502 seq_puts(m, "," MNTOPT_USRQUOTA);
503 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
504 seq_puts(m, "," MNTOPT_UQUOTANOENF);
505
506 if (mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
507 seq_puts(m, "," MNTOPT_PRJQUOTA);
508 else if (mp->m_qflags & XFS_PQUOTA_ACCT)
509 seq_puts(m, "," MNTOPT_PQUOTANOENF);
510
511 if (mp->m_qflags & (XFS_GQUOTA_ACCT|XFS_OQUOTA_ENFD))
512 seq_puts(m, "," MNTOPT_GRPQUOTA);
513 else if (mp->m_qflags & XFS_GQUOTA_ACCT)
514 seq_puts(m, "," MNTOPT_GQUOTANOENF);
515
516 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
517 seq_puts(m, "," MNTOPT_NOQUOTA);
518
519 return 0;
520}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521__uint64_t
522xfs_max_file_offset(
523 unsigned int blockshift)
524{
525 unsigned int pagefactor = 1;
526 unsigned int bitshift = BITS_PER_LONG - 1;
527
528 /* Figure out maximum filesize, on Linux this can depend on
529 * the filesystem blocksize (on 32 bit platforms).
530 * __block_prepare_write does this in an [unsigned] long...
531 * page->index << (PAGE_CACHE_SHIFT - bbits)
532 * So, for page sized blocks (4K on 32 bit platforms),
533 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
534 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
535 * but for smaller blocksizes it is less (bbits = log2 bsize).
536 * Note1: get_block_t takes a long (implicit cast from above)
537 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
538 * can optionally convert the [unsigned] long from above into
539 * an [unsigned] long long.
540 */
541
542#if BITS_PER_LONG == 32
543# if defined(CONFIG_LBD)
544 ASSERT(sizeof(sector_t) == 8);
545 pagefactor = PAGE_CACHE_SIZE;
546 bitshift = BITS_PER_LONG;
547# else
548 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
549# endif
550#endif
551
552 return (((__uint64_t)pagefactor) << bitshift) - 1;
553}
554
David Chinner7989cb82007-02-10 18:34:56 +1100555STATIC_INLINE void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556xfs_set_inodeops(
557 struct inode *inode)
558{
Christoph Hellwig0432dab2005-09-02 16:46:51 +1000559 switch (inode->i_mode & S_IFMT) {
560 case S_IFREG:
Nathan Scott416c6d52006-03-14 14:00:51 +1100561 inode->i_op = &xfs_inode_operations;
Nathan Scott3562fd42006-03-14 14:00:35 +1100562 inode->i_fop = &xfs_file_operations;
Nathan Scotte4c573b2006-03-14 13:54:26 +1100563 inode->i_mapping->a_ops = &xfs_address_space_operations;
Christoph Hellwig0432dab2005-09-02 16:46:51 +1000564 break;
565 case S_IFDIR:
Nathan Scott416c6d52006-03-14 14:00:51 +1100566 inode->i_op = &xfs_dir_inode_operations;
Nathan Scott3562fd42006-03-14 14:00:35 +1100567 inode->i_fop = &xfs_dir_file_operations;
Christoph Hellwig0432dab2005-09-02 16:46:51 +1000568 break;
569 case S_IFLNK:
Nathan Scott416c6d52006-03-14 14:00:51 +1100570 inode->i_op = &xfs_symlink_inode_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 if (inode->i_blocks)
Nathan Scotte4c573b2006-03-14 13:54:26 +1100572 inode->i_mapping->a_ops = &xfs_address_space_operations;
Christoph Hellwig0432dab2005-09-02 16:46:51 +1000573 break;
574 default:
Nathan Scott416c6d52006-03-14 14:00:51 +1100575 inode->i_op = &xfs_inode_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 init_special_inode(inode, inode->i_mode, inode->i_rdev);
Christoph Hellwig0432dab2005-09-02 16:46:51 +1000577 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 }
579}
580
David Chinner7989cb82007-02-10 18:34:56 +1100581STATIC_INLINE void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582xfs_revalidate_inode(
583 xfs_mount_t *mp,
Nathan Scott67fcaa72006-06-09 17:00:52 +1000584 bhv_vnode_t *vp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 xfs_inode_t *ip)
586{
Nathan Scottec86dc02006-03-17 17:25:36 +1100587 struct inode *inode = vn_to_inode(vp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Christoph Hellwig0432dab2005-09-02 16:46:51 +1000589 inode->i_mode = ip->i_d.di_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 inode->i_nlink = ip->i_d.di_nlink;
591 inode->i_uid = ip->i_d.di_uid;
592 inode->i_gid = ip->i_d.di_gid;
Christoph Hellwig0432dab2005-09-02 16:46:51 +1000593
594 switch (inode->i_mode & S_IFMT) {
595 case S_IFBLK:
596 case S_IFCHR:
597 inode->i_rdev =
598 MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
599 sysv_minor(ip->i_df.if_u2.if_rdev));
600 break;
601 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 inode->i_rdev = 0;
Christoph Hellwig0432dab2005-09-02 16:46:51 +1000603 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 }
Christoph Hellwig0432dab2005-09-02 16:46:51 +1000605
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 inode->i_generation = ip->i_d.di_gen;
607 i_size_write(inode, ip->i_d.di_size);
608 inode->i_blocks =
609 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
610 inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec;
611 inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
612 inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
613 inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
614 inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
615 inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
616 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
617 inode->i_flags |= S_IMMUTABLE;
618 else
619 inode->i_flags &= ~S_IMMUTABLE;
620 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
621 inode->i_flags |= S_APPEND;
622 else
623 inode->i_flags &= ~S_APPEND;
624 if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
625 inode->i_flags |= S_SYNC;
626 else
627 inode->i_flags &= ~S_SYNC;
628 if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
629 inode->i_flags |= S_NOATIME;
630 else
631 inode->i_flags &= ~S_NOATIME;
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +1000632 xfs_iflags_clear(ip, XFS_IMODIFIED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633}
634
635void
636xfs_initialize_vnode(
Christoph Hellwig48c872a9f2007-08-30 17:20:31 +1000637 struct xfs_mount *mp,
Nathan Scott67fcaa72006-06-09 17:00:52 +1000638 bhv_vnode_t *vp,
Christoph Hellwig745f6912007-08-30 17:20:39 +1000639 struct xfs_inode *ip)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640{
Nathan Scottec86dc02006-03-17 17:25:36 +1100641 struct inode *inode = vn_to_inode(vp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000643 if (!ip->i_vnode) {
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000644 ip->i_vnode = vp;
645 inode->i_private = ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 }
647
648 /*
649 * We need to set the ops vectors, and unlock the inode, but if
650 * we have been called during the new inode create process, it is
651 * too early to fill in the Linux inode. We will get called a
652 * second time once the inode is properly set up, and then we can
653 * finish our work.
654 */
Christoph Hellwig745f6912007-08-30 17:20:39 +1000655 if (ip->i_d.di_mode != 0 && (inode->i_state & I_NEW)) {
Christoph Hellwig48c872a9f2007-08-30 17:20:31 +1000656 xfs_revalidate_inode(mp, vp, ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 xfs_set_inodeops(inode);
Nathan Scottec86dc02006-03-17 17:25:36 +1100658
David Chinner7a18c382006-11-11 18:04:54 +1100659 xfs_iflags_clear(ip, XFS_INEW);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 barrier();
661
662 unlock_new_inode(inode);
663 }
664}
665
666int
667xfs_blkdev_get(
668 xfs_mount_t *mp,
669 const char *name,
670 struct block_device **bdevp)
671{
672 int error = 0;
673
674 *bdevp = open_bdev_excl(name, 0, mp);
675 if (IS_ERR(*bdevp)) {
676 error = PTR_ERR(*bdevp);
677 printk("XFS: Invalid device [%s], error=%d\n", name, error);
678 }
679
680 return -error;
681}
682
683void
684xfs_blkdev_put(
685 struct block_device *bdev)
686{
687 if (bdev)
688 close_bdev_excl(bdev);
689}
690
Christoph Hellwigf538d4d2005-11-02 10:26:59 +1100691/*
692 * Try to write out the superblock using barriers.
693 */
694STATIC int
695xfs_barrier_test(
696 xfs_mount_t *mp)
697{
698 xfs_buf_t *sbp = xfs_getsb(mp, 0);
699 int error;
700
701 XFS_BUF_UNDONE(sbp);
702 XFS_BUF_UNREAD(sbp);
703 XFS_BUF_UNDELAYWRITE(sbp);
704 XFS_BUF_WRITE(sbp);
705 XFS_BUF_UNASYNC(sbp);
706 XFS_BUF_ORDERED(sbp);
707
708 xfsbdstrat(mp, sbp);
709 error = xfs_iowait(sbp);
710
711 /*
712 * Clear all the flags we set and possible error state in the
713 * buffer. We only did the write to try out whether barriers
714 * worked and shouldn't leave any traces in the superblock
715 * buffer.
716 */
717 XFS_BUF_DONE(sbp);
718 XFS_BUF_ERROR(sbp, 0);
719 XFS_BUF_UNORDERED(sbp);
720
721 xfs_buf_relse(sbp);
722 return error;
723}
724
725void
726xfs_mountfs_check_barriers(xfs_mount_t *mp)
727{
728 int error;
729
730 if (mp->m_logdev_targp != mp->m_ddev_targp) {
731 xfs_fs_cmn_err(CE_NOTE, mp,
732 "Disabling barriers, not supported with external log device");
733 mp->m_flags &= ~XFS_MOUNT_BARRIER;
Christoph Hellwig4ef19dd2006-01-11 15:27:18 +1100734 return;
Christoph Hellwigf538d4d2005-11-02 10:26:59 +1100735 }
736
Nathan Scottce8e9222006-01-11 15:39:08 +1100737 if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered ==
Christoph Hellwigf538d4d2005-11-02 10:26:59 +1100738 QUEUE_ORDERED_NONE) {
739 xfs_fs_cmn_err(CE_NOTE, mp,
740 "Disabling barriers, not supported by the underlying device");
741 mp->m_flags &= ~XFS_MOUNT_BARRIER;
Christoph Hellwig4ef19dd2006-01-11 15:27:18 +1100742 return;
Christoph Hellwigf538d4d2005-11-02 10:26:59 +1100743 }
744
Nathan Scottb2ea4012006-07-28 17:05:13 +1000745 if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
746 xfs_fs_cmn_err(CE_NOTE, mp,
747 "Disabling barriers, underlying device is readonly");
748 mp->m_flags &= ~XFS_MOUNT_BARRIER;
749 return;
750 }
751
Christoph Hellwigf538d4d2005-11-02 10:26:59 +1100752 error = xfs_barrier_test(mp);
753 if (error) {
754 xfs_fs_cmn_err(CE_NOTE, mp,
755 "Disabling barriers, trial barrier write failed");
756 mp->m_flags &= ~XFS_MOUNT_BARRIER;
Christoph Hellwig4ef19dd2006-01-11 15:27:18 +1100757 return;
Christoph Hellwigf538d4d2005-11-02 10:26:59 +1100758 }
759}
760
761void
762xfs_blkdev_issue_flush(
763 xfs_buftarg_t *buftarg)
764{
Nathan Scottce8e9222006-01-11 15:39:08 +1100765 blkdev_issue_flush(buftarg->bt_bdev, NULL);
Christoph Hellwigf538d4d2005-11-02 10:26:59 +1100766}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
768STATIC struct inode *
Nathan Scotta50cd262006-03-14 14:06:18 +1100769xfs_fs_alloc_inode(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 struct super_block *sb)
771{
Nathan Scott67fcaa72006-06-09 17:00:52 +1000772 bhv_vnode_t *vp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773
Nathan Scott87582802006-03-14 13:18:19 +1100774 vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
775 if (unlikely(!vp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 return NULL;
Nathan Scottec86dc02006-03-17 17:25:36 +1100777 return vn_to_inode(vp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778}
779
780STATIC void
Nathan Scotta50cd262006-03-14 14:06:18 +1100781xfs_fs_destroy_inode(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 struct inode *inode)
783{
Nathan Scottec86dc02006-03-17 17:25:36 +1100784 kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785}
786
787STATIC void
Nathan Scotta50cd262006-03-14 14:06:18 +1100788xfs_fs_inode_init_once(
Nathan Scott87582802006-03-14 13:18:19 +1100789 kmem_zone_t *zonep,
Christoph Lameter4ba9b9d2007-10-16 23:25:51 -0700790 void *vnode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791{
Christoph Lametera35afb82007-05-16 22:10:57 -0700792 inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793}
794
795STATIC int
Nathan Scott87582802006-03-14 13:18:19 +1100796xfs_init_zones(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797{
Nathan Scott67fcaa72006-06-09 17:00:52 +1000798 xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode",
Nathan Scotte0cc2322006-03-14 13:19:55 +1100799 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
800 KM_ZONE_SPREAD,
Nathan Scotta50cd262006-03-14 14:06:18 +1100801 xfs_fs_inode_init_once);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000802 if (!xfs_vnode_zone)
803 goto out;
804
805 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
806 if (!xfs_ioend_zone)
807 goto out_destroy_vnode_zone;
808
Matthew Dobson93d23412006-03-26 01:37:50 -0800809 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
810 xfs_ioend_zone);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000811 if (!xfs_ioend_pool)
812 goto out_free_ioend_zone;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 return 0;
Christoph Hellwig0829c362005-09-02 16:58:49 +1000814
Christoph Hellwig0829c362005-09-02 16:58:49 +1000815 out_free_ioend_zone:
816 kmem_zone_destroy(xfs_ioend_zone);
817 out_destroy_vnode_zone:
818 kmem_zone_destroy(xfs_vnode_zone);
819 out:
820 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821}
822
823STATIC void
Nathan Scott87582802006-03-14 13:18:19 +1100824xfs_destroy_zones(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825{
Christoph Hellwig0829c362005-09-02 16:58:49 +1000826 mempool_destroy(xfs_ioend_pool);
827 kmem_zone_destroy(xfs_vnode_zone);
828 kmem_zone_destroy(xfs_ioend_zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829}
830
831/*
832 * Attempt to flush the inode, this will actually fail
833 * if the inode is pinned, but we dirty the inode again
834 * at the point when it is unpinned after a log write,
Nathan Scott87582802006-03-14 13:18:19 +1100835 * since this is when the inode itself becomes flushable.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 */
837STATIC int
Nathan Scotta50cd262006-03-14 14:06:18 +1100838xfs_fs_write_inode(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 struct inode *inode,
840 int sync)
841{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 int error = 0, flags = FLUSH_INODE;
843
Lachlan McIlroycf441ee2008-02-07 16:42:19 +1100844 xfs_itrace_entry(XFS_I(inode));
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000845 if (sync) {
846 filemap_fdatawait(inode->i_mapping);
847 flags |= FLUSH_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 }
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000849 error = xfs_inode_flush(XFS_I(inode), flags);
Lachlan McIlroye893bff2007-10-12 11:13:35 +1000850 /*
851 * if we failed to write out the inode then mark
852 * it dirty again so we'll try again later.
853 */
854 if (error)
855 mark_inode_dirty_sync(inode);
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000856
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 return -error;
858}
859
860STATIC void
Nathan Scotta50cd262006-03-14 14:06:18 +1100861xfs_fs_clear_inode(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 struct inode *inode)
863{
Christoph Hellwig1543d792007-08-29 11:46:47 +1000864 xfs_inode_t *ip = XFS_I(inode);
Christoph Hellwig56d433e2005-09-05 08:23:54 +1000865
Christoph Hellwig02ba71d2005-09-05 08:28:02 +1000866 /*
Christoph Hellwig1543d792007-08-29 11:46:47 +1000867 * ip can be null when xfs_iget_core calls xfs_idestroy if we
Christoph Hellwig02ba71d2005-09-05 08:28:02 +1000868 * find an inode with di_mode == 0 but without IGET_CREATE set.
869 */
Christoph Hellwig1543d792007-08-29 11:46:47 +1000870 if (ip) {
Lachlan McIlroycf441ee2008-02-07 16:42:19 +1100871 xfs_itrace_entry(ip);
Christoph Hellwig1543d792007-08-29 11:46:47 +1000872 XFS_STATS_INC(vn_rele);
873 XFS_STATS_INC(vn_remove);
874 XFS_STATS_INC(vn_reclaim);
875 XFS_STATS_DEC(vn_active);
Christoph Hellwig56d433e2005-09-05 08:23:54 +1000876
Christoph Hellwig1543d792007-08-29 11:46:47 +1000877 xfs_inactive(ip);
878 xfs_iflags_clear(ip, XFS_IMODIFIED);
879 if (xfs_reclaim(ip))
880 panic("%s: cannot reclaim 0x%p\n", __FUNCTION__, inode);
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +1000881 }
Christoph Hellwig56d433e2005-09-05 08:23:54 +1000882
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000883 ASSERT(XFS_I(inode) == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884}
885
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886/*
887 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
888 * Doing this has two advantages:
889 * - It saves on stack space, which is tight in certain situations
890 * - It can be used (with care) as a mechanism to avoid deadlocks.
891 * Flushing while allocating in a full filesystem requires both.
892 */
893STATIC void
894xfs_syncd_queue_work(
Christoph Hellwig74394492007-08-30 17:21:22 +1000895 struct xfs_mount *mp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 void *data,
Christoph Hellwig74394492007-08-30 17:21:22 +1000897 void (*syncer)(struct xfs_mount *, void *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898{
Nathan Scottb83bd132006-06-09 16:48:30 +1000899 struct bhv_vfs_sync_work *work;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900
Nathan Scottb83bd132006-06-09 16:48:30 +1000901 work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 INIT_LIST_HEAD(&work->w_list);
903 work->w_syncer = syncer;
904 work->w_data = data;
Christoph Hellwig74394492007-08-30 17:21:22 +1000905 work->w_mount = mp;
906 spin_lock(&mp->m_sync_lock);
907 list_add_tail(&work->w_list, &mp->m_sync_list);
908 spin_unlock(&mp->m_sync_lock);
909 wake_up_process(mp->m_sync_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910}
911
912/*
913 * Flush delayed allocate data, attempting to free up reserved space
914 * from existing allocations. At this point a new allocation attempt
915 * has failed with ENOSPC and we are in the process of scratching our
916 * heads, looking about for more room...
917 */
918STATIC void
919xfs_flush_inode_work(
Christoph Hellwig74394492007-08-30 17:21:22 +1000920 struct xfs_mount *mp,
921 void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922{
Christoph Hellwig74394492007-08-30 17:21:22 +1000923 struct inode *inode = arg;
924 filemap_flush(inode->i_mapping);
925 iput(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926}
927
928void
929xfs_flush_inode(
930 xfs_inode_t *ip)
931{
Christoph Hellwig74394492007-08-30 17:21:22 +1000932 struct inode *inode = ip->i_vnode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
934 igrab(inode);
Christoph Hellwig74394492007-08-30 17:21:22 +1000935 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
Nishanth Aravamudan041e0e32005-09-10 00:27:23 -0700936 delay(msecs_to_jiffies(500));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937}
938
939/*
940 * This is the "bigger hammer" version of xfs_flush_inode_work...
941 * (IOW, "If at first you don't succeed, use a Bigger Hammer").
942 */
943STATIC void
944xfs_flush_device_work(
Christoph Hellwig74394492007-08-30 17:21:22 +1000945 struct xfs_mount *mp,
946 void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947{
Christoph Hellwig74394492007-08-30 17:21:22 +1000948 struct inode *inode = arg;
Christoph Hellwigb267ce92007-08-30 17:21:30 +1000949 sync_blockdev(mp->m_super->s_bdev);
Christoph Hellwig74394492007-08-30 17:21:22 +1000950 iput(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951}
952
953void
954xfs_flush_device(
955 xfs_inode_t *ip)
956{
Nathan Scottec86dc02006-03-17 17:25:36 +1100957 struct inode *inode = vn_to_inode(XFS_ITOV(ip));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
959 igrab(inode);
Christoph Hellwig74394492007-08-30 17:21:22 +1000960 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
Nishanth Aravamudan041e0e32005-09-10 00:27:23 -0700961 delay(msecs_to_jiffies(500));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
963}
964
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965STATIC void
Christoph Hellwig74394492007-08-30 17:21:22 +1000966xfs_sync_worker(
967 struct xfs_mount *mp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 void *unused)
969{
970 int error;
971
Christoph Hellwig74394492007-08-30 17:21:22 +1000972 if (!(mp->m_flags & XFS_MOUNT_RDONLY))
973 error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR |
974 SYNC_REFCACHE | SYNC_SUPER);
975 mp->m_sync_seq++;
976 wake_up(&mp->m_wait_single_sync_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977}
978
979STATIC int
980xfssyncd(
981 void *arg)
982{
Christoph Hellwig74394492007-08-30 17:21:22 +1000983 struct xfs_mount *mp = arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 long timeleft;
Nathan Scottb83bd132006-06-09 16:48:30 +1000985 bhv_vfs_sync_work_t *work, *n;
Christoph Hellwig4df08c52005-09-05 08:34:18 +1000986 LIST_HEAD (tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
Rafael J. Wysocki83144182007-07-17 04:03:35 -0700988 set_freezable();
Nishanth Aravamudan041e0e32005-09-10 00:27:23 -0700989 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 for (;;) {
Nishanth Aravamudan041e0e32005-09-10 00:27:23 -0700991 timeleft = schedule_timeout_interruptible(timeleft);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 /* swsusp */
Christoph Lameter3e1d1d22005-06-24 23:13:50 -0700993 try_to_freeze();
Christoph Hellwig74394492007-08-30 17:21:22 +1000994 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 break;
996
Christoph Hellwig74394492007-08-30 17:21:22 +1000997 spin_lock(&mp->m_sync_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 /*
999 * We can get woken by laptop mode, to do a sync -
1000 * that's the (only!) case where the list would be
1001 * empty with time remaining.
1002 */
Christoph Hellwig74394492007-08-30 17:21:22 +10001003 if (!timeleft || list_empty(&mp->m_sync_list)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 if (!timeleft)
Nishanth Aravamudan041e0e32005-09-10 00:27:23 -07001005 timeleft = xfs_syncd_centisecs *
1006 msecs_to_jiffies(10);
Christoph Hellwig74394492007-08-30 17:21:22 +10001007 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
1008 list_add_tail(&mp->m_sync_work.w_list,
1009 &mp->m_sync_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 }
Christoph Hellwig74394492007-08-30 17:21:22 +10001011 list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 list_move(&work->w_list, &tmp);
Christoph Hellwig74394492007-08-30 17:21:22 +10001013 spin_unlock(&mp->m_sync_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014
1015 list_for_each_entry_safe(work, n, &tmp, w_list) {
Christoph Hellwig74394492007-08-30 17:21:22 +10001016 (*work->w_syncer)(mp, work->w_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 list_del(&work->w_list);
Christoph Hellwig74394492007-08-30 17:21:22 +10001018 if (work == &mp->m_sync_work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 continue;
Nathan Scottb83bd132006-06-09 16:48:30 +10001020 kmem_free(work, sizeof(struct bhv_vfs_sync_work));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 }
1022 }
1023
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 return 0;
1025}
1026
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027STATIC void
Nathan Scotta50cd262006-03-14 14:06:18 +11001028xfs_fs_put_super(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 struct super_block *sb)
1030{
Christoph Hellwig745f6912007-08-30 17:20:39 +10001031 struct xfs_mount *mp = XFS_M(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 int error;
1033
Christoph Hellwig74394492007-08-30 17:21:22 +10001034 kthread_stop(mp->m_sync_task);
1035
Christoph Hellwig745f6912007-08-30 17:20:39 +10001036 xfs_sync(mp, SYNC_ATTR | SYNC_DELWRI);
1037 error = xfs_unmount(mp, 0, NULL);
Christoph Hellwigb267ce92007-08-30 17:21:30 +10001038 if (error)
Nathan Scottb83bd132006-06-09 16:48:30 +10001039 printk("XFS: unmount got error=%d\n", error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040}
1041
1042STATIC void
Nathan Scotta50cd262006-03-14 14:06:18 +11001043xfs_fs_write_super(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 struct super_block *sb)
1045{
Nathan Scottb83bd132006-06-09 16:48:30 +10001046 if (!(sb->s_flags & MS_RDONLY))
Christoph Hellwig745f6912007-08-30 17:20:39 +10001047 xfs_sync(XFS_M(sb), SYNC_FSDATA);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 sb->s_dirt = 0;
1049}
1050
1051STATIC int
Nathan Scotta50cd262006-03-14 14:06:18 +11001052xfs_fs_sync_super(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 struct super_block *sb,
1054 int wait)
1055{
Christoph Hellwig745f6912007-08-30 17:20:39 +10001056 struct xfs_mount *mp = XFS_M(sb);
Nathan Scottb83bd132006-06-09 16:48:30 +10001057 int error;
1058 int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059
Lachlan McIlroye893bff2007-10-12 11:13:35 +10001060 /*
1061 * Treat a sync operation like a freeze. This is to work
1062 * around a race in sync_inodes() which works in two phases
1063 * - an asynchronous flush, which can write out an inode
1064 * without waiting for file size updates to complete, and a
1065 * synchronous flush, which wont do anything because the
1066 * async flush removed the inode's dirty flag. Also
1067 * sync_inodes() will not see any files that just have
1068 * outstanding transactions to be flushed because we don't
1069 * dirty the Linux inode until after the transaction I/O
1070 * completes.
1071 */
1072 if (wait || unlikely(sb->s_frozen == SB_FREEZE_WRITE)) {
David Chinner28239452007-02-10 18:36:40 +11001073 /*
1074 * First stage of freeze - no more writers will make progress
1075 * now we are here, so we flush delwri and delalloc buffers
1076 * here, then wait for all I/O to complete. Data is frozen at
1077 * that point. Metadata is not frozen, transactions can still
1078 * occur here so don't bother flushing the buftarg (i.e
1079 * SYNC_QUIESCE) because it'll just get dirty again.
1080 */
David Chinner516b2e72007-06-18 16:50:48 +10001081 flags = SYNC_DATA_QUIESCE;
David Chinner28239452007-02-10 18:36:40 +11001082 } else
Lachlan McIlroye893bff2007-10-12 11:13:35 +10001083 flags = SYNC_FSDATA;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
Christoph Hellwig745f6912007-08-30 17:20:39 +10001085 error = xfs_sync(mp, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 sb->s_dirt = 0;
1087
1088 if (unlikely(laptop_mode)) {
Christoph Hellwig74394492007-08-30 17:21:22 +10001089 int prev_sync_seq = mp->m_sync_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
1091 /*
1092 * The disk must be active because we're syncing.
1093 * We schedule xfssyncd now (now that the disk is
1094 * active) instead of later (when it might not be).
1095 */
Christoph Hellwig74394492007-08-30 17:21:22 +10001096 wake_up_process(mp->m_sync_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 /*
1098 * We have to wait for the sync iteration to complete.
1099 * If we don't, the disk activity caused by the sync
1100 * will come after the sync is completed, and that
1101 * triggers another sync from laptop mode.
1102 */
Christoph Hellwig74394492007-08-30 17:21:22 +10001103 wait_event(mp->m_wait_single_sync_task,
1104 mp->m_sync_seq != prev_sync_seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 }
1106
1107 return -error;
1108}
1109
1110STATIC int
Nathan Scotta50cd262006-03-14 14:06:18 +11001111xfs_fs_statfs(
David Howells726c3342006-06-23 02:02:58 -07001112 struct dentry *dentry,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 struct kstatfs *statp)
1114{
Christoph Hellwig4ca488e2007-10-11 18:09:40 +10001115 struct xfs_mount *mp = XFS_M(dentry->d_sb);
1116 xfs_sb_t *sbp = &mp->m_sb;
1117 __uint64_t fakeinos, id;
1118 xfs_extlen_t lsize;
1119
1120 statp->f_type = XFS_SB_MAGIC;
1121 statp->f_namelen = MAXNAMELEN - 1;
1122
1123 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1124 statp->f_fsid.val[0] = (u32)id;
1125 statp->f_fsid.val[1] = (u32)(id >> 32);
1126
1127 xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT);
1128
1129 spin_lock(&mp->m_sb_lock);
1130 statp->f_bsize = sbp->sb_blocksize;
1131 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1132 statp->f_blocks = sbp->sb_dblocks - lsize;
1133 statp->f_bfree = statp->f_bavail =
1134 sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1135 fakeinos = statp->f_bfree << sbp->sb_inopblog;
1136#if XFS_BIG_INUMS
1137 fakeinos += mp->m_inoadd;
1138#endif
1139 statp->f_files =
1140 MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
1141 if (mp->m_maxicount)
1142#if XFS_BIG_INUMS
1143 if (!mp->m_inoadd)
1144#endif
1145 statp->f_files = min_t(typeof(statp->f_files),
1146 statp->f_files,
1147 mp->m_maxicount);
1148 statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
1149 spin_unlock(&mp->m_sb_lock);
1150
1151 XFS_QM_DQSTATVFS(XFS_I(dentry->d_inode), statp);
1152 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153}
1154
1155STATIC int
Nathan Scotta50cd262006-03-14 14:06:18 +11001156xfs_fs_remount(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 struct super_block *sb,
1158 int *flags,
1159 char *options)
1160{
Christoph Hellwig745f6912007-08-30 17:20:39 +10001161 struct xfs_mount *mp = XFS_M(sb);
Nathan Scott764d1f82006-03-31 13:04:17 +10001162 struct xfs_mount_args *args = xfs_args_allocate(sb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 int error;
1164
Christoph Hellwig745f6912007-08-30 17:20:39 +10001165 error = xfs_parseargs(mp, options, args, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 if (!error)
Christoph Hellwig745f6912007-08-30 17:20:39 +10001167 error = xfs_mntupdate(mp, flags, args);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 kmem_free(args, sizeof(*args));
1169 return -error;
1170}
1171
Christoph Hellwig9909c4a2007-10-11 18:11:14 +10001172/*
1173 * Second stage of a freeze. The data is already frozen so we only
1174 * need to take care of themetadata. Once that's done write a dummy
1175 * record to dirty the log in case of a crash while frozen.
1176 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177STATIC void
Nathan Scotta50cd262006-03-14 14:06:18 +11001178xfs_fs_lockfs(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 struct super_block *sb)
1180{
Christoph Hellwig9909c4a2007-10-11 18:11:14 +10001181 struct xfs_mount *mp = XFS_M(sb);
1182
1183 xfs_attr_quiesce(mp);
1184 xfs_fs_log_dummy(mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185}
1186
1187STATIC int
Nathan Scotta50cd262006-03-14 14:06:18 +11001188xfs_fs_show_options(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 struct seq_file *m,
1190 struct vfsmount *mnt)
1191{
Christoph Hellwig745f6912007-08-30 17:20:39 +10001192 return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193}
1194
1195STATIC int
Nathan Scotta50cd262006-03-14 14:06:18 +11001196xfs_fs_quotasync(
Nathan Scottee348072005-11-02 10:32:38 +11001197 struct super_block *sb,
1198 int type)
1199{
Christoph Hellwigb09cc772007-08-30 17:19:57 +10001200 return -XFS_QM_QUOTACTL(XFS_M(sb), Q_XQUOTASYNC, 0, NULL);
Nathan Scottee348072005-11-02 10:32:38 +11001201}
1202
1203STATIC int
Nathan Scotta50cd262006-03-14 14:06:18 +11001204xfs_fs_getxstate(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 struct super_block *sb,
1206 struct fs_quota_stat *fqs)
1207{
Christoph Hellwigb09cc772007-08-30 17:19:57 +10001208 return -XFS_QM_QUOTACTL(XFS_M(sb), Q_XGETQSTAT, 0, (caddr_t)fqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209}
1210
1211STATIC int
Nathan Scotta50cd262006-03-14 14:06:18 +11001212xfs_fs_setxstate(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 struct super_block *sb,
1214 unsigned int flags,
1215 int op)
1216{
Christoph Hellwigb09cc772007-08-30 17:19:57 +10001217 return -XFS_QM_QUOTACTL(XFS_M(sb), op, 0, (caddr_t)&flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218}
1219
1220STATIC int
Nathan Scotta50cd262006-03-14 14:06:18 +11001221xfs_fs_getxquota(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 struct super_block *sb,
1223 int type,
1224 qid_t id,
1225 struct fs_disk_quota *fdq)
1226{
Christoph Hellwigb09cc772007-08-30 17:19:57 +10001227 return -XFS_QM_QUOTACTL(XFS_M(sb),
Nathan Scottb83bd132006-06-09 16:48:30 +10001228 (type == USRQUOTA) ? Q_XGETQUOTA :
1229 ((type == GRPQUOTA) ? Q_XGETGQUOTA :
1230 Q_XGETPQUOTA), id, (caddr_t)fdq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231}
1232
1233STATIC int
Nathan Scotta50cd262006-03-14 14:06:18 +11001234xfs_fs_setxquota(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 struct super_block *sb,
1236 int type,
1237 qid_t id,
1238 struct fs_disk_quota *fdq)
1239{
Christoph Hellwigb09cc772007-08-30 17:19:57 +10001240 return -XFS_QM_QUOTACTL(XFS_M(sb),
Nathan Scottb83bd132006-06-09 16:48:30 +10001241 (type == USRQUOTA) ? Q_XSETQLIM :
1242 ((type == GRPQUOTA) ? Q_XSETGQLIM :
1243 Q_XSETPQLIM), id, (caddr_t)fdq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244}
1245
1246STATIC int
Nathan Scotta50cd262006-03-14 14:06:18 +11001247xfs_fs_fill_super(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 struct super_block *sb,
1249 void *data,
1250 int silent)
1251{
Christoph Hellwig0a74cd12007-08-29 11:53:12 +10001252 struct inode *rootvp;
Christoph Hellwig745f6912007-08-30 17:20:39 +10001253 struct xfs_mount *mp = NULL;
Nathan Scott764d1f82006-03-31 13:04:17 +10001254 struct xfs_mount_args *args = xfs_args_allocate(sb, silent);
Nathan Scottb83bd132006-06-09 16:48:30 +10001255 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256
Christoph Hellwig745f6912007-08-30 17:20:39 +10001257 mp = xfs_mount_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
Christoph Hellwig74394492007-08-30 17:21:22 +10001259 INIT_LIST_HEAD(&mp->m_sync_list);
1260 spin_lock_init(&mp->m_sync_lock);
1261 init_waitqueue_head(&mp->m_wait_single_sync_task);
1262
Christoph Hellwigb267ce92007-08-30 17:21:30 +10001263 mp->m_super = sb;
1264 sb->s_fs_info = mp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265
Christoph Hellwigbd186aa2007-08-30 17:21:12 +10001266 if (sb->s_flags & MS_RDONLY)
1267 mp->m_flags |= XFS_MOUNT_RDONLY;
1268
Christoph Hellwig745f6912007-08-30 17:20:39 +10001269 error = xfs_parseargs(mp, (char *)data, args, 0);
1270 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 goto fail_vfsop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
1273 sb_min_blocksize(sb, BBSIZE);
Nathan Scotta50cd262006-03-14 14:06:18 +11001274 sb->s_export_op = &xfs_export_operations;
Nathan Scotta50cd262006-03-14 14:06:18 +11001275 sb->s_qcop = &xfs_quotactl_operations;
1276 sb->s_op = &xfs_super_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277
Christoph Hellwig745f6912007-08-30 17:20:39 +10001278 error = xfs_mount(mp, args, NULL);
1279 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 goto fail_vfsop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 sb->s_dirt = 1;
Christoph Hellwig4ca488e2007-10-11 18:09:40 +10001283 sb->s_magic = XFS_SB_MAGIC;
1284 sb->s_blocksize = mp->m_sb.sb_blocksize;
1285 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
1287 sb->s_time_gran = 1;
1288 set_posix_acl_flag(sb);
1289
Christoph Hellwig745f6912007-08-30 17:20:39 +10001290 error = xfs_root(mp, &rootvp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 if (error)
1292 goto fail_unmount;
1293
Nathan Scottec86dc02006-03-17 17:25:36 +11001294 sb->s_root = d_alloc_root(vn_to_inode(rootvp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 if (!sb->s_root) {
1296 error = ENOMEM;
1297 goto fail_vnrele;
1298 }
1299 if (is_bad_inode(sb->s_root->d_inode)) {
1300 error = EINVAL;
1301 goto fail_vnrele;
1302 }
Christoph Hellwig74394492007-08-30 17:21:22 +10001303
1304 mp->m_sync_work.w_syncer = xfs_sync_worker;
1305 mp->m_sync_work.w_mount = mp;
1306 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
1307 if (IS_ERR(mp->m_sync_task)) {
1308 error = -PTR_ERR(mp->m_sync_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 goto fail_vnrele;
Christoph Hellwig74394492007-08-30 17:21:22 +10001310 }
1311
Lachlan McIlroycf441ee2008-02-07 16:42:19 +11001312 xfs_itrace_exit(XFS_I(sb->s_root->d_inode));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
1314 kmem_free(args, sizeof(*args));
1315 return 0;
1316
1317fail_vnrele:
1318 if (sb->s_root) {
1319 dput(sb->s_root);
1320 sb->s_root = NULL;
1321 } else {
1322 VN_RELE(rootvp);
1323 }
1324
1325fail_unmount:
Christoph Hellwig745f6912007-08-30 17:20:39 +10001326 xfs_unmount(mp, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
1328fail_vfsop:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 kmem_free(args, sizeof(*args));
1330 return -error;
1331}
1332
David Howells454e2392006-06-23 02:02:57 -07001333STATIC int
Nathan Scotta50cd262006-03-14 14:06:18 +11001334xfs_fs_get_sb(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 struct file_system_type *fs_type,
1336 int flags,
1337 const char *dev_name,
David Howells454e2392006-06-23 02:02:57 -07001338 void *data,
1339 struct vfsmount *mnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340{
David Howells454e2392006-06-23 02:02:57 -07001341 return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super,
1342 mnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343}
1344
David Chinner7989cb82007-02-10 18:34:56 +11001345static struct super_operations xfs_super_operations = {
Nathan Scotta50cd262006-03-14 14:06:18 +11001346 .alloc_inode = xfs_fs_alloc_inode,
1347 .destroy_inode = xfs_fs_destroy_inode,
1348 .write_inode = xfs_fs_write_inode,
1349 .clear_inode = xfs_fs_clear_inode,
1350 .put_super = xfs_fs_put_super,
1351 .write_super = xfs_fs_write_super,
1352 .sync_fs = xfs_fs_sync_super,
1353 .write_super_lockfs = xfs_fs_lockfs,
1354 .statfs = xfs_fs_statfs,
1355 .remount_fs = xfs_fs_remount,
1356 .show_options = xfs_fs_show_options,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357};
1358
David Chinner7989cb82007-02-10 18:34:56 +11001359static struct quotactl_ops xfs_quotactl_operations = {
Nathan Scotta50cd262006-03-14 14:06:18 +11001360 .quota_sync = xfs_fs_quotasync,
1361 .get_xstate = xfs_fs_getxstate,
1362 .set_xstate = xfs_fs_setxstate,
1363 .get_xquota = xfs_fs_getxquota,
1364 .set_xquota = xfs_fs_setxquota,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365};
1366
Andrew Morton5085b602007-02-20 13:57:47 -08001367static struct file_system_type xfs_fs_type = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 .owner = THIS_MODULE,
1369 .name = "xfs",
Nathan Scotta50cd262006-03-14 14:06:18 +11001370 .get_sb = xfs_fs_get_sb,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 .kill_sb = kill_block_super,
1372 .fs_flags = FS_REQUIRES_DEV,
1373};
1374
1375
1376STATIC int __init
1377init_xfs_fs( void )
1378{
1379 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 static char message[] __initdata = KERN_INFO \
1381 XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n";
1382
1383 printk(message);
1384
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 ktrace_init(64);
1386
Nathan Scott87582802006-03-14 13:18:19 +11001387 error = xfs_init_zones();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 if (error < 0)
Christoph Hellwig0829c362005-09-02 16:58:49 +10001389 goto undo_zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390
Nathan Scottce8e9222006-01-11 15:39:08 +11001391 error = xfs_buf_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 if (error < 0)
Nathan Scottce8e9222006-01-11 15:39:08 +11001393 goto undo_buffers;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394
1395 vn_init();
1396 xfs_init();
1397 uuid_init();
1398 vfs_initquota();
1399
1400 error = register_filesystem(&xfs_fs_type);
1401 if (error)
1402 goto undo_register;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 return 0;
1404
1405undo_register:
Nathan Scottce8e9222006-01-11 15:39:08 +11001406 xfs_buf_terminate();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407
Nathan Scottce8e9222006-01-11 15:39:08 +11001408undo_buffers:
Nathan Scott87582802006-03-14 13:18:19 +11001409 xfs_destroy_zones();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
Christoph Hellwig0829c362005-09-02 16:58:49 +10001411undo_zones:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 return error;
1413}
1414
1415STATIC void __exit
1416exit_xfs_fs( void )
1417{
1418 vfs_exitquota();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 unregister_filesystem(&xfs_fs_type);
1420 xfs_cleanup();
Nathan Scottce8e9222006-01-11 15:39:08 +11001421 xfs_buf_terminate();
Nathan Scott87582802006-03-14 13:18:19 +11001422 xfs_destroy_zones();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 ktrace_uninit();
1424}
1425
1426module_init(init_xfs_fs);
1427module_exit(exit_xfs_fs);
1428
1429MODULE_AUTHOR("Silicon Graphics, Inc.");
1430MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
1431MODULE_LICENSE("GPL");