blob: 878bf382f8481e6aec6596133f6832b57776fa51 [file] [log] [blame]
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001/**
2 * fs/f2fs/super.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/fs.h>
14#include <linux/statfs.h>
15#include <linux/proc_fs.h>
16#include <linux/buffer_head.h>
17#include <linux/backing-dev.h>
18#include <linux/kthread.h>
19#include <linux/parser.h>
20#include <linux/mount.h>
21#include <linux/seq_file.h>
22#include <linux/random.h>
23#include <linux/exportfs.h>
24#include <linux/f2fs_fs.h>
25
26#include "f2fs.h"
27#include "node.h"
28#include "xattr.h"
29
30static struct kmem_cache *f2fs_inode_cachep;
31
32enum {
33 Opt_gc_background_off,
34 Opt_disable_roll_forward,
35 Opt_discard,
36 Opt_noheap,
37 Opt_nouser_xattr,
38 Opt_noacl,
39 Opt_active_logs,
40 Opt_disable_ext_identify,
41 Opt_err,
42};
43
44static match_table_t f2fs_tokens = {
45 {Opt_gc_background_off, "background_gc_off"},
46 {Opt_disable_roll_forward, "disable_roll_forward"},
47 {Opt_discard, "discard"},
48 {Opt_noheap, "no_heap"},
49 {Opt_nouser_xattr, "nouser_xattr"},
50 {Opt_noacl, "noacl"},
51 {Opt_active_logs, "active_logs=%u"},
52 {Opt_disable_ext_identify, "disable_ext_identify"},
53 {Opt_err, NULL},
54};
55
56static void init_once(void *foo)
57{
58 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
59
60 memset(fi, 0, sizeof(*fi));
61 inode_init_once(&fi->vfs_inode);
62}
63
64static struct inode *f2fs_alloc_inode(struct super_block *sb)
65{
66 struct f2fs_inode_info *fi;
67
68 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_NOFS | __GFP_ZERO);
69 if (!fi)
70 return NULL;
71
72 init_once((void *) fi);
73
74 /* Initilize f2fs-specific inode info */
75 fi->vfs_inode.i_version = 1;
76 atomic_set(&fi->dirty_dents, 0);
77 fi->i_current_depth = 1;
78 fi->i_advise = 0;
79 rwlock_init(&fi->ext.ext_lock);
80
81 set_inode_flag(fi, FI_NEW_INODE);
82
83 return &fi->vfs_inode;
84}
85
86static void f2fs_i_callback(struct rcu_head *head)
87{
88 struct inode *inode = container_of(head, struct inode, i_rcu);
89 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
90}
91
Jaegeuk Kim25ca9232012-11-28 16:12:41 +090092static void f2fs_destroy_inode(struct inode *inode)
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090093{
94 call_rcu(&inode->i_rcu, f2fs_i_callback);
95}
96
97static void f2fs_put_super(struct super_block *sb)
98{
99 struct f2fs_sb_info *sbi = F2FS_SB(sb);
100
101 f2fs_destroy_stats(sbi);
102 stop_gc_thread(sbi);
103
104 write_checkpoint(sbi, false, true);
105
106 iput(sbi->node_inode);
107 iput(sbi->meta_inode);
108
109 /* destroy f2fs internal modules */
110 destroy_node_manager(sbi);
111 destroy_segment_manager(sbi);
112
113 kfree(sbi->ckpt);
114
115 sb->s_fs_info = NULL;
116 brelse(sbi->raw_super_buf);
117 kfree(sbi);
118}
119
120int f2fs_sync_fs(struct super_block *sb, int sync)
121{
122 struct f2fs_sb_info *sbi = F2FS_SB(sb);
123 int ret = 0;
124
125 if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES))
126 return 0;
127
128 if (sync)
129 write_checkpoint(sbi, false, false);
130
131 return ret;
132}
133
134static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
135{
136 struct super_block *sb = dentry->d_sb;
137 struct f2fs_sb_info *sbi = F2FS_SB(sb);
138 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
139 block_t total_count, user_block_count, start_count, ovp_count;
140
141 total_count = le64_to_cpu(sbi->raw_super->block_count);
142 user_block_count = sbi->user_block_count;
143 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
144 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
145 buf->f_type = F2FS_SUPER_MAGIC;
146 buf->f_bsize = sbi->blocksize;
147
148 buf->f_blocks = total_count - start_count;
149 buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
150 buf->f_bavail = user_block_count - valid_user_blocks(sbi);
151
152 buf->f_files = valid_inode_count(sbi);
153 buf->f_ffree = sbi->total_node_count - valid_node_count(sbi);
154
155 buf->f_namelen = F2FS_MAX_NAME_LEN;
156 buf->f_fsid.val[0] = (u32)id;
157 buf->f_fsid.val[1] = (u32)(id >> 32);
158
159 return 0;
160}
161
162static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
163{
164 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
165
166 if (test_opt(sbi, BG_GC))
167 seq_puts(seq, ",background_gc_on");
168 else
169 seq_puts(seq, ",background_gc_off");
170 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
171 seq_puts(seq, ",disable_roll_forward");
172 if (test_opt(sbi, DISCARD))
173 seq_puts(seq, ",discard");
174 if (test_opt(sbi, NOHEAP))
175 seq_puts(seq, ",no_heap_alloc");
176#ifdef CONFIG_F2FS_FS_XATTR
177 if (test_opt(sbi, XATTR_USER))
178 seq_puts(seq, ",user_xattr");
179 else
180 seq_puts(seq, ",nouser_xattr");
181#endif
182#ifdef CONFIG_F2FS_FS_POSIX_ACL
183 if (test_opt(sbi, POSIX_ACL))
184 seq_puts(seq, ",acl");
185 else
186 seq_puts(seq, ",noacl");
187#endif
188 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
189 seq_puts(seq, ",disable_ext_indentify");
190
191 seq_printf(seq, ",active_logs=%u", sbi->active_logs);
192
193 return 0;
194}
195
196static struct super_operations f2fs_sops = {
197 .alloc_inode = f2fs_alloc_inode,
198 .destroy_inode = f2fs_destroy_inode,
199 .write_inode = f2fs_write_inode,
200 .show_options = f2fs_show_options,
201 .evict_inode = f2fs_evict_inode,
202 .put_super = f2fs_put_super,
203 .sync_fs = f2fs_sync_fs,
204 .statfs = f2fs_statfs,
205};
206
207static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
208 u64 ino, u32 generation)
209{
210 struct f2fs_sb_info *sbi = F2FS_SB(sb);
211 struct inode *inode;
212
213 if (ino < F2FS_ROOT_INO(sbi))
214 return ERR_PTR(-ESTALE);
215
216 /*
217 * f2fs_iget isn't quite right if the inode is currently unallocated!
218 * However f2fs_iget currently does appropriate checks to handle stale
219 * inodes so everything is OK.
220 */
221 inode = f2fs_iget(sb, ino);
222 if (IS_ERR(inode))
223 return ERR_CAST(inode);
224 if (generation && inode->i_generation != generation) {
225 /* we didn't find the right inode.. */
226 iput(inode);
227 return ERR_PTR(-ESTALE);
228 }
229 return inode;
230}
231
232static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
233 int fh_len, int fh_type)
234{
235 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
236 f2fs_nfs_get_inode);
237}
238
239static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
240 int fh_len, int fh_type)
241{
242 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
243 f2fs_nfs_get_inode);
244}
245
246static const struct export_operations f2fs_export_ops = {
247 .fh_to_dentry = f2fs_fh_to_dentry,
248 .fh_to_parent = f2fs_fh_to_parent,
249 .get_parent = f2fs_get_parent,
250};
251
252static int parse_options(struct f2fs_sb_info *sbi, char *options)
253{
254 substring_t args[MAX_OPT_ARGS];
255 char *p;
256 int arg = 0;
257
258 if (!options)
259 return 0;
260
261 while ((p = strsep(&options, ",")) != NULL) {
262 int token;
263 if (!*p)
264 continue;
265 /*
266 * Initialize args struct so we know whether arg was
267 * found; some options take optional arguments.
268 */
269 args[0].to = args[0].from = NULL;
270 token = match_token(p, f2fs_tokens, args);
271
272 switch (token) {
273 case Opt_gc_background_off:
274 clear_opt(sbi, BG_GC);
275 break;
276 case Opt_disable_roll_forward:
277 set_opt(sbi, DISABLE_ROLL_FORWARD);
278 break;
279 case Opt_discard:
280 set_opt(sbi, DISCARD);
281 break;
282 case Opt_noheap:
283 set_opt(sbi, NOHEAP);
284 break;
285#ifdef CONFIG_F2FS_FS_XATTR
286 case Opt_nouser_xattr:
287 clear_opt(sbi, XATTR_USER);
288 break;
289#else
290 case Opt_nouser_xattr:
291 pr_info("nouser_xattr options not supported\n");
292 break;
293#endif
294#ifdef CONFIG_F2FS_FS_POSIX_ACL
295 case Opt_noacl:
296 clear_opt(sbi, POSIX_ACL);
297 break;
298#else
299 case Opt_noacl:
300 pr_info("noacl options not supported\n");
301 break;
302#endif
303 case Opt_active_logs:
304 if (args->from && match_int(args, &arg))
305 return -EINVAL;
306 if (arg != 2 && arg != 4 && arg != 6)
307 return -EINVAL;
308 sbi->active_logs = arg;
309 break;
310 case Opt_disable_ext_identify:
311 set_opt(sbi, DISABLE_EXT_IDENTIFY);
312 break;
313 default:
314 return -EINVAL;
315 }
316 }
317 return 0;
318}
319
320static loff_t max_file_size(unsigned bits)
321{
322 loff_t result = ADDRS_PER_INODE;
323 loff_t leaf_count = ADDRS_PER_BLOCK;
324
325 /* two direct node blocks */
326 result += (leaf_count * 2);
327
328 /* two indirect node blocks */
329 leaf_count *= NIDS_PER_BLOCK;
330 result += (leaf_count * 2);
331
332 /* one double indirect node block */
333 leaf_count *= NIDS_PER_BLOCK;
334 result += leaf_count;
335
336 result <<= bits;
337 return result;
338}
339
340static int sanity_check_raw_super(struct f2fs_super_block *raw_super)
341{
342 unsigned int blocksize;
343
344 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic))
345 return 1;
346
347 /* Currently, support only 4KB block size */
348 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
349 if (blocksize != PAGE_CACHE_SIZE)
350 return 1;
351 if (le32_to_cpu(raw_super->log_sectorsize) !=
352 F2FS_LOG_SECTOR_SIZE)
353 return 1;
354 if (le32_to_cpu(raw_super->log_sectors_per_block) !=
355 F2FS_LOG_SECTORS_PER_BLOCK)
356 return 1;
357 return 0;
358}
359
360static int sanity_check_ckpt(struct f2fs_super_block *raw_super,
361 struct f2fs_checkpoint *ckpt)
362{
363 unsigned int total, fsmeta;
364
365 total = le32_to_cpu(raw_super->segment_count);
366 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
367 fsmeta += le32_to_cpu(raw_super->segment_count_sit);
368 fsmeta += le32_to_cpu(raw_super->segment_count_nat);
369 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
370 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
371
372 if (fsmeta >= total)
373 return 1;
374 return 0;
375}
376
377static void init_sb_info(struct f2fs_sb_info *sbi)
378{
379 struct f2fs_super_block *raw_super = sbi->raw_super;
380 int i;
381
382 sbi->log_sectors_per_block =
383 le32_to_cpu(raw_super->log_sectors_per_block);
384 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
385 sbi->blocksize = 1 << sbi->log_blocksize;
386 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
387 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
388 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
389 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
390 sbi->total_sections = le32_to_cpu(raw_super->section_count);
391 sbi->total_node_count =
392 (le32_to_cpu(raw_super->segment_count_nat) / 2)
393 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
394 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
395 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
396 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
397
398 for (i = 0; i < NR_COUNT_TYPE; i++)
399 atomic_set(&sbi->nr_pages[i], 0);
400}
401
402static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
403{
404 struct f2fs_sb_info *sbi;
405 struct f2fs_super_block *raw_super;
406 struct buffer_head *raw_super_buf;
407 struct inode *root;
408 long err = -EINVAL;
409 int i;
410
411 /* allocate memory for f2fs-specific super block info */
412 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
413 if (!sbi)
414 return -ENOMEM;
415
416 /* set a temporary block size */
417 if (!sb_set_blocksize(sb, F2FS_BLKSIZE))
418 goto free_sbi;
419
420 /* read f2fs raw super block */
421 raw_super_buf = sb_bread(sb, 0);
422 if (!raw_super_buf) {
423 err = -EIO;
424 goto free_sbi;
425 }
426 raw_super = (struct f2fs_super_block *)
427 ((char *)raw_super_buf->b_data + F2FS_SUPER_OFFSET);
428
429 /* init some FS parameters */
430 sbi->active_logs = NR_CURSEG_TYPE;
431
432 set_opt(sbi, BG_GC);
433
434#ifdef CONFIG_F2FS_FS_XATTR
435 set_opt(sbi, XATTR_USER);
436#endif
437#ifdef CONFIG_F2FS_FS_POSIX_ACL
438 set_opt(sbi, POSIX_ACL);
439#endif
440 /* parse mount options */
441 if (parse_options(sbi, (char *)data))
442 goto free_sb_buf;
443
444 /* sanity checking of raw super */
445 if (sanity_check_raw_super(raw_super))
446 goto free_sb_buf;
447
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900448 sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900449 sb->s_max_links = F2FS_LINK_MAX;
450 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
451
452 sb->s_op = &f2fs_sops;
453 sb->s_xattr = f2fs_xattr_handlers;
454 sb->s_export_op = &f2fs_export_ops;
455 sb->s_magic = F2FS_SUPER_MAGIC;
456 sb->s_fs_info = sbi;
457 sb->s_time_gran = 1;
458 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
459 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
460 memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
461
462 /* init f2fs-specific super block info */
463 sbi->sb = sb;
464 sbi->raw_super = raw_super;
465 sbi->raw_super_buf = raw_super_buf;
466 mutex_init(&sbi->gc_mutex);
467 mutex_init(&sbi->write_inode);
468 mutex_init(&sbi->writepages);
469 mutex_init(&sbi->cp_mutex);
470 for (i = 0; i < NR_LOCK_TYPE; i++)
471 mutex_init(&sbi->fs_lock[i]);
472 sbi->por_doing = 0;
473 spin_lock_init(&sbi->stat_lock);
474 init_rwsem(&sbi->bio_sem);
475 init_sb_info(sbi);
476
477 /* get an inode for meta space */
478 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
479 if (IS_ERR(sbi->meta_inode)) {
480 err = PTR_ERR(sbi->meta_inode);
481 goto free_sb_buf;
482 }
483
484 err = get_valid_checkpoint(sbi);
485 if (err)
486 goto free_meta_inode;
487
488 /* sanity checking of checkpoint */
489 err = -EINVAL;
490 if (sanity_check_ckpt(raw_super, sbi->ckpt))
491 goto free_cp;
492
493 sbi->total_valid_node_count =
494 le32_to_cpu(sbi->ckpt->valid_node_count);
495 sbi->total_valid_inode_count =
496 le32_to_cpu(sbi->ckpt->valid_inode_count);
497 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
498 sbi->total_valid_block_count =
499 le64_to_cpu(sbi->ckpt->valid_block_count);
500 sbi->last_valid_block_count = sbi->total_valid_block_count;
501 sbi->alloc_valid_block_count = 0;
502 INIT_LIST_HEAD(&sbi->dir_inode_list);
503 spin_lock_init(&sbi->dir_inode_lock);
504
505 /* init super block */
506 if (!sb_set_blocksize(sb, sbi->blocksize))
507 goto free_cp;
508
509 init_orphan_info(sbi);
510
511 /* setup f2fs internal modules */
512 err = build_segment_manager(sbi);
513 if (err)
514 goto free_sm;
515 err = build_node_manager(sbi);
516 if (err)
517 goto free_nm;
518
519 build_gc_manager(sbi);
520
521 /* get an inode for node space */
522 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
523 if (IS_ERR(sbi->node_inode)) {
524 err = PTR_ERR(sbi->node_inode);
525 goto free_nm;
526 }
527
528 /* if there are nt orphan nodes free them */
529 err = -EINVAL;
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900530 if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) &&
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900531 recover_orphan_inodes(sbi))
532 goto free_node_inode;
533
534 /* read root inode and dentry */
535 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
536 if (IS_ERR(root)) {
537 err = PTR_ERR(root);
538 goto free_node_inode;
539 }
540 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size)
541 goto free_root_inode;
542
543 sb->s_root = d_make_root(root); /* allocate root dentry */
544 if (!sb->s_root) {
545 err = -ENOMEM;
546 goto free_root_inode;
547 }
548
549 /* recover fsynced data */
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900550 if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) &&
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900551 !test_opt(sbi, DISABLE_ROLL_FORWARD))
552 recover_fsync_data(sbi);
553
554 /* After POR, we can run background GC thread */
555 err = start_gc_thread(sbi);
556 if (err)
557 goto fail;
558
559 err = f2fs_build_stats(sbi);
560 if (err)
561 goto fail;
562
563 return 0;
564fail:
565 stop_gc_thread(sbi);
566free_root_inode:
567 dput(sb->s_root);
568 sb->s_root = NULL;
569free_node_inode:
570 iput(sbi->node_inode);
571free_nm:
572 destroy_node_manager(sbi);
573free_sm:
574 destroy_segment_manager(sbi);
575free_cp:
576 kfree(sbi->ckpt);
577free_meta_inode:
578 make_bad_inode(sbi->meta_inode);
579 iput(sbi->meta_inode);
580free_sb_buf:
581 brelse(raw_super_buf);
582free_sbi:
583 kfree(sbi);
584 return err;
585}
586
587static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
588 const char *dev_name, void *data)
589{
590 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
591}
592
593static struct file_system_type f2fs_fs_type = {
594 .owner = THIS_MODULE,
595 .name = "f2fs",
596 .mount = f2fs_mount,
597 .kill_sb = kill_block_super,
598 .fs_flags = FS_REQUIRES_DEV,
599};
600
601static int init_inodecache(void)
602{
603 f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
604 sizeof(struct f2fs_inode_info), NULL);
605 if (f2fs_inode_cachep == NULL)
606 return -ENOMEM;
607 return 0;
608}
609
610static void destroy_inodecache(void)
611{
612 /*
613 * Make sure all delayed rcu free inodes are flushed before we
614 * destroy cache.
615 */
616 rcu_barrier();
617 kmem_cache_destroy(f2fs_inode_cachep);
618}
619
620static int __init init_f2fs_fs(void)
621{
622 int err;
623
624 err = init_inodecache();
625 if (err)
626 goto fail;
627 err = create_node_manager_caches();
628 if (err)
629 goto fail;
630 err = create_gc_caches();
631 if (err)
632 goto fail;
633 err = create_checkpoint_caches();
634 if (err)
635 goto fail;
636 return register_filesystem(&f2fs_fs_type);
637fail:
638 return err;
639}
640
641static void __exit exit_f2fs_fs(void)
642{
643 destroy_root_stats();
644 unregister_filesystem(&f2fs_fs_type);
645 destroy_checkpoint_caches();
646 destroy_gc_caches();
647 destroy_node_manager_caches();
648 destroy_inodecache();
649}
650
651module_init(init_f2fs_fs)
652module_exit(exit_f2fs_fs)
653
654MODULE_AUTHOR("Samsung Electronics's Praesto Team");
655MODULE_DESCRIPTION("Flash Friendly File System");
656MODULE_LICENSE("GPL");