blob: 64e389285f54ee92ea1bb96e863c96c012e6e2d7 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +09002 * fs/f2fs/checkpoint.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/bio.h>
13#include <linux/mpage.h>
14#include <linux/writeback.h>
15#include <linux/blkdev.h>
16#include <linux/f2fs_fs.h>
17#include <linux/pagevec.h>
18#include <linux/swap.h>
19
20#include "f2fs.h"
21#include "node.h"
22#include "segment.h"
Jaegeuk Kim9e4ded32014-12-17 19:58:58 -080023#include "trace.h"
Namjae Jeon2af4bd62013-04-23 18:26:54 +090024#include <trace/events/f2fs.h>
Jaegeuk Kim127e6702012-11-02 17:08:18 +090025
Jaegeuk Kim6451e042014-07-25 15:47:17 -070026static struct kmem_cache *ino_entry_slab;
Chao Yu06292072014-12-29 15:56:18 +080027struct kmem_cache *inode_entry_slab;
Jaegeuk Kim127e6702012-11-02 17:08:18 +090028
Jaegeuk Kim38f91ca2016-05-18 14:07:56 -070029void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
30{
Chao Yuaaec2b12016-09-20 11:04:18 +080031 set_ckpt_flags(sbi, CP_ERROR_FLAG);
Jaegeuk Kim38f91ca2016-05-18 14:07:56 -070032 sbi->sb->s_flags |= MS_RDONLY;
33 if (!end_io)
34 f2fs_flush_merged_bios(sbi);
35}
36
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090037/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +090038 * We guarantee no failure on the returned page.
39 */
40struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
41{
Gu Zheng9df27d92014-01-20 18:37:04 +080042 struct address_space *mapping = META_MAPPING(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +090043 struct page *page = NULL;
44repeat:
Jaegeuk Kim300e1292016-04-29 16:11:53 -070045 page = f2fs_grab_cache_page(mapping, index, false);
Jaegeuk Kim127e6702012-11-02 17:08:18 +090046 if (!page) {
47 cond_resched();
48 goto repeat;
49 }
Jaegeuk Kimfec1d652016-01-20 23:43:51 +080050 f2fs_wait_on_page_writeback(page, META, true);
Jaegeuk Kim237c0792016-06-30 18:49:15 -070051 if (!PageUptodate(page))
52 SetPageUptodate(page);
Jaegeuk Kim127e6702012-11-02 17:08:18 +090053 return page;
54}
55
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090056/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +090057 * We guarantee no failure on the returned page.
58 */
Chao Yu2b947002015-10-12 17:04:21 +080059static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
60 bool is_meta)
Jaegeuk Kim127e6702012-11-02 17:08:18 +090061{
Gu Zheng9df27d92014-01-20 18:37:04 +080062 struct address_space *mapping = META_MAPPING(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +090063 struct page *page;
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -080064 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -070065 .sbi = sbi,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -080066 .type = META,
Mike Christie04d328d2016-06-05 14:31:55 -050067 .op = REQ_OP_READ,
68 .op_flags = READ_SYNC | REQ_META | REQ_PRIO,
Chao Yu7a9d7542016-02-22 18:36:38 +080069 .old_blkaddr = index,
70 .new_blkaddr = index,
Jaegeuk Kim4375a332015-04-23 12:04:33 -070071 .encrypted_page = NULL,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -080072 };
Chao Yu2b947002015-10-12 17:04:21 +080073
74 if (unlikely(!is_meta))
Mike Christie04d328d2016-06-05 14:31:55 -050075 fio.op_flags &= ~REQ_META;
Jaegeuk Kim127e6702012-11-02 17:08:18 +090076repeat:
Jaegeuk Kim300e1292016-04-29 16:11:53 -070077 page = f2fs_grab_cache_page(mapping, index, false);
Jaegeuk Kim127e6702012-11-02 17:08:18 +090078 if (!page) {
79 cond_resched();
80 goto repeat;
81 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +090082 if (PageUptodate(page))
83 goto out;
Jaegeuk Kim127e6702012-11-02 17:08:18 +090084
Jaegeuk Kim05ca3632015-04-23 14:38:15 -070085 fio.page = page;
86
Jaegeuk Kim86531d62015-07-15 13:08:21 -070087 if (f2fs_submit_page_bio(&fio)) {
88 f2fs_put_page(page, 1);
Jaegeuk Kim393ff912013-03-08 21:29:23 +090089 goto repeat;
Jaegeuk Kim86531d62015-07-15 13:08:21 -070090 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +090091
92 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +090093 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +090094 f2fs_put_page(page, 1);
95 goto repeat;
96 }
Chao Yuf3f338c2015-07-29 17:33:13 +080097
98 /*
99 * if there is any IO error when accessing device, make our filesystem
100 * readonly and make sure do not write checkpoint with non-uptodate
101 * meta page.
102 */
103 if (unlikely(!PageUptodate(page)))
Jaegeuk Kim38f91ca2016-05-18 14:07:56 -0700104 f2fs_stop_checkpoint(sbi, false);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900105out:
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900106 return page;
107}
108
Chao Yu2b947002015-10-12 17:04:21 +0800109struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
110{
111 return __get_meta_page(sbi, index, true);
112}
113
114/* for POR only */
115struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
116{
117 return __get_meta_page(sbi, index, false);
118}
119
Chao Yuf0c9cad2015-04-18 18:05:36 +0800120bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
Chao Yu662befd2014-02-07 16:11:53 +0800121{
122 switch (type) {
123 case META_NAT:
Chao Yu66b00c12014-12-08 14:59:17 +0800124 break;
Chao Yu662befd2014-02-07 16:11:53 +0800125 case META_SIT:
Chao Yu66b00c12014-12-08 14:59:17 +0800126 if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
127 return false;
128 break;
Chao Yu81c1a0f12014-02-27 19:12:24 +0800129 case META_SSA:
Chao Yu66b00c12014-12-08 14:59:17 +0800130 if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
131 blkaddr < SM_I(sbi)->ssa_blkaddr))
132 return false;
133 break;
Chao Yu662befd2014-02-07 16:11:53 +0800134 case META_CP:
Chao Yu66b00c12014-12-08 14:59:17 +0800135 if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
136 blkaddr < __start_cp_addr(sbi)))
137 return false;
138 break;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700139 case META_POR:
Chao Yu66b00c12014-12-08 14:59:17 +0800140 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
141 blkaddr < MAIN_BLKADDR(sbi)))
142 return false;
143 break;
Chao Yu662befd2014-02-07 16:11:53 +0800144 default:
145 BUG();
146 }
Chao Yu66b00c12014-12-08 14:59:17 +0800147
148 return true;
Chao Yu662befd2014-02-07 16:11:53 +0800149}
150
151/*
Chao Yu81c1a0f12014-02-27 19:12:24 +0800152 * Readahead CP/NAT/SIT/SSA pages
Chao Yu662befd2014-02-07 16:11:53 +0800153 */
Chao Yu26879fb2015-10-12 17:05:59 +0800154int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
155 int type, bool sync)
Chao Yu662befd2014-02-07 16:11:53 +0800156{
Chao Yu662befd2014-02-07 16:11:53 +0800157 struct page *page;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700158 block_t blkno = start;
Chao Yu662befd2014-02-07 16:11:53 +0800159 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700160 .sbi = sbi,
Chao Yu662befd2014-02-07 16:11:53 +0800161 .type = META,
Mike Christie04d328d2016-06-05 14:31:55 -0500162 .op = REQ_OP_READ,
Christoph Hellwig70246282016-07-19 11:28:41 +0200163 .op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : REQ_RAHEAD,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700164 .encrypted_page = NULL,
Chao Yu662befd2014-02-07 16:11:53 +0800165 };
Chao Yue9f5b8b2016-02-14 18:54:33 +0800166 struct blk_plug plug;
Chao Yu662befd2014-02-07 16:11:53 +0800167
Chao Yu2b947002015-10-12 17:04:21 +0800168 if (unlikely(type == META_POR))
Mike Christie04d328d2016-06-05 14:31:55 -0500169 fio.op_flags &= ~REQ_META;
Chao Yu2b947002015-10-12 17:04:21 +0800170
Chao Yue9f5b8b2016-02-14 18:54:33 +0800171 blk_start_plug(&plug);
Chao Yu662befd2014-02-07 16:11:53 +0800172 for (; nrpages-- > 0; blkno++) {
Chao Yu662befd2014-02-07 16:11:53 +0800173
Chao Yu66b00c12014-12-08 14:59:17 +0800174 if (!is_valid_blkaddr(sbi, blkno, type))
175 goto out;
176
Chao Yu662befd2014-02-07 16:11:53 +0800177 switch (type) {
178 case META_NAT:
Chao Yu66b00c12014-12-08 14:59:17 +0800179 if (unlikely(blkno >=
180 NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
Chao Yu662befd2014-02-07 16:11:53 +0800181 blkno = 0;
Chao Yu66b00c12014-12-08 14:59:17 +0800182 /* get nat block addr */
Chao Yu7a9d7542016-02-22 18:36:38 +0800183 fio.new_blkaddr = current_nat_addr(sbi,
Chao Yu662befd2014-02-07 16:11:53 +0800184 blkno * NAT_ENTRY_PER_BLOCK);
185 break;
186 case META_SIT:
187 /* get sit block addr */
Chao Yu7a9d7542016-02-22 18:36:38 +0800188 fio.new_blkaddr = current_sit_addr(sbi,
Chao Yu662befd2014-02-07 16:11:53 +0800189 blkno * SIT_ENTRY_PER_BLOCK);
Chao Yu662befd2014-02-07 16:11:53 +0800190 break;
Chao Yu81c1a0f12014-02-27 19:12:24 +0800191 case META_SSA:
Chao Yu662befd2014-02-07 16:11:53 +0800192 case META_CP:
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700193 case META_POR:
Chao Yu7a9d7542016-02-22 18:36:38 +0800194 fio.new_blkaddr = blkno;
Chao Yu662befd2014-02-07 16:11:53 +0800195 break;
196 default:
197 BUG();
198 }
199
Jaegeuk Kim300e1292016-04-29 16:11:53 -0700200 page = f2fs_grab_cache_page(META_MAPPING(sbi),
201 fio.new_blkaddr, false);
Chao Yu662befd2014-02-07 16:11:53 +0800202 if (!page)
203 continue;
204 if (PageUptodate(page)) {
Chao Yu662befd2014-02-07 16:11:53 +0800205 f2fs_put_page(page, 1);
206 continue;
207 }
208
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700209 fio.page = page;
Chao Yu7a9d7542016-02-22 18:36:38 +0800210 fio.old_blkaddr = fio.new_blkaddr;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700211 f2fs_submit_page_mbio(&fio);
Chao Yu662befd2014-02-07 16:11:53 +0800212 f2fs_put_page(page, 0);
213 }
214out:
215 f2fs_submit_merged_bio(sbi, META, READ);
Chao Yue9f5b8b2016-02-14 18:54:33 +0800216 blk_finish_plug(&plug);
Chao Yu662befd2014-02-07 16:11:53 +0800217 return blkno - start;
218}
219
Chao Yu635aee12014-12-08 15:02:52 +0800220void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
221{
222 struct page *page;
223 bool readahead = false;
224
225 page = find_get_page(META_MAPPING(sbi), index);
Jaegeuk Kim4da7bf52016-04-06 11:27:03 -0700226 if (!page || !PageUptodate(page))
Chao Yu635aee12014-12-08 15:02:52 +0800227 readahead = true;
228 f2fs_put_page(page, 0);
229
230 if (readahead)
Chao Yu26879fb2015-10-12 17:05:59 +0800231 ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR, true);
Chao Yu635aee12014-12-08 15:02:52 +0800232}
233
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900234static int f2fs_write_meta_page(struct page *page,
235 struct writeback_control *wbc)
236{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700237 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900238
Chao Yuecda0de2014-05-06 16:48:26 +0800239 trace_f2fs_writepage(page, META);
240
Chao Yucaf00472015-01-28 17:48:42 +0800241 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
Chao Yucfb271d2013-12-05 17:15:22 +0800242 goto redirty_out;
Jaegeuk Kim857dc4e2014-11-19 11:03:34 -0800243 if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
Chao Yucfb271d2013-12-05 17:15:22 +0800244 goto redirty_out;
Jaegeuk Kim1e968fd2014-08-11 16:49:25 -0700245 if (unlikely(f2fs_cp_error(sbi)))
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -0700246 goto redirty_out;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900247
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900248 write_meta_page(sbi, page);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900249 dec_page_count(sbi, F2FS_DIRTY_META);
Chao Yu0c3a5792016-01-18 18:28:11 +0800250
251 if (wbc->for_reclaim)
252 f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, META, WRITE);
253
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900254 unlock_page(page);
Jaegeuk Kim857dc4e2014-11-19 11:03:34 -0800255
Chao Yu0c3a5792016-01-18 18:28:11 +0800256 if (unlikely(f2fs_cp_error(sbi)))
Jaegeuk Kim857dc4e2014-11-19 11:03:34 -0800257 f2fs_submit_merged_bio(sbi, META, WRITE);
Chao Yu0c3a5792016-01-18 18:28:11 +0800258
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900259 return 0;
Chao Yucfb271d2013-12-05 17:15:22 +0800260
261redirty_out:
Jaegeuk Kim76f60262014-04-15 16:04:15 +0900262 redirty_page_for_writepage(wbc, page);
Chao Yucfb271d2013-12-05 17:15:22 +0800263 return AOP_WRITEPAGE_ACTIVATE;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900264}
265
266static int f2fs_write_meta_pages(struct address_space *mapping,
267 struct writeback_control *wbc)
268{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700269 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -0700270 struct blk_plug plug;
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +0900271 long diff, written;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900272
Jaegeuk Kim5459aa92013-12-17 17:28:41 +0900273 /* collect a number of dirty meta pages and write together */
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +0900274 if (wbc->for_kupdate ||
275 get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
Jaegeuk Kimd3baf952014-03-18 13:43:05 +0900276 goto skip_write;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900277
Yunlei Hed31c7c32016-02-04 16:14:00 +0800278 trace_f2fs_writepages(mapping->host, wbc, META);
279
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900280 /* if mounting is failed, skip writing node pages */
281 mutex_lock(&sbi->cp_mutex);
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +0900282 diff = nr_pages_to_write(sbi, META, wbc);
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -0700283 blk_start_plug(&plug);
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +0900284 written = sync_meta_pages(sbi, META, wbc->nr_to_write);
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -0700285 blk_finish_plug(&plug);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900286 mutex_unlock(&sbi->cp_mutex);
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +0900287 wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900288 return 0;
Jaegeuk Kimd3baf952014-03-18 13:43:05 +0900289
290skip_write:
291 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
Yunlei Hed31c7c32016-02-04 16:14:00 +0800292 trace_f2fs_writepages(mapping->host, wbc, META);
Jaegeuk Kimd3baf952014-03-18 13:43:05 +0900293 return 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900294}
295
296long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
297 long nr_to_write)
298{
Gu Zheng9df27d92014-01-20 18:37:04 +0800299 struct address_space *mapping = META_MAPPING(sbi);
Chao Yu80dd9c02016-02-24 17:20:44 +0800300 pgoff_t index = 0, end = ULONG_MAX, prev = ULONG_MAX;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900301 struct pagevec pvec;
302 long nwritten = 0;
303 struct writeback_control wbc = {
304 .for_reclaim = 0,
305 };
Chao Yue9f5b8b2016-02-14 18:54:33 +0800306 struct blk_plug plug;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900307
308 pagevec_init(&pvec, 0);
309
Chao Yue9f5b8b2016-02-14 18:54:33 +0800310 blk_start_plug(&plug);
311
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900312 while (index <= end) {
313 int i, nr_pages;
314 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
315 PAGECACHE_TAG_DIRTY,
316 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
Chao Yucfb271d2013-12-05 17:15:22 +0800317 if (unlikely(nr_pages == 0))
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900318 break;
319
320 for (i = 0; i < nr_pages; i++) {
321 struct page *page = pvec.pages[i];
Jaegeuk Kim203681f2014-02-05 13:03:57 +0900322
Chao Yu80dd9c02016-02-24 17:20:44 +0800323 if (prev == ULONG_MAX)
Jaegeuk Kim6066d8c2015-10-01 16:42:55 -0700324 prev = page->index - 1;
325 if (nr_to_write != LONG_MAX && page->index != prev + 1) {
326 pagevec_release(&pvec);
327 goto stop;
328 }
329
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900330 lock_page(page);
Jaegeuk Kim203681f2014-02-05 13:03:57 +0900331
332 if (unlikely(page->mapping != mapping)) {
333continue_unlock:
334 unlock_page(page);
335 continue;
336 }
337 if (!PageDirty(page)) {
338 /* someone wrote it for us */
339 goto continue_unlock;
340 }
341
Jaegeuk Kimfa3d2bd2016-01-28 11:48:52 -0800342 f2fs_wait_on_page_writeback(page, META, true);
343
344 BUG_ON(PageWriteback(page));
Jaegeuk Kim203681f2014-02-05 13:03:57 +0900345 if (!clear_page_dirty_for_io(page))
346 goto continue_unlock;
347
Chao Yu97dc3fd2015-02-16 16:19:22 +0800348 if (mapping->a_ops->writepage(page, &wbc)) {
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900349 unlock_page(page);
350 break;
351 }
Chao Yucfb271d2013-12-05 17:15:22 +0800352 nwritten++;
Jaegeuk Kim6066d8c2015-10-01 16:42:55 -0700353 prev = page->index;
Chao Yucfb271d2013-12-05 17:15:22 +0800354 if (unlikely(nwritten >= nr_to_write))
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900355 break;
356 }
357 pagevec_release(&pvec);
358 cond_resched();
359 }
Jaegeuk Kim6066d8c2015-10-01 16:42:55 -0700360stop:
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900361 if (nwritten)
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900362 f2fs_submit_merged_bio(sbi, type, WRITE);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900363
Chao Yue9f5b8b2016-02-14 18:54:33 +0800364 blk_finish_plug(&plug);
365
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900366 return nwritten;
367}
368
369static int f2fs_set_meta_page_dirty(struct page *page)
370{
Jaegeuk Kim26c6b882013-10-24 17:53:29 +0900371 trace_f2fs_set_page_dirty(page, META);
372
Jaegeuk Kim237c0792016-06-30 18:49:15 -0700373 if (!PageUptodate(page))
374 SetPageUptodate(page);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900375 if (!PageDirty(page)) {
Jaegeuk Kimfe76b792016-06-30 18:40:10 -0700376 f2fs_set_page_dirty_nobuffers(page);
Jaegeuk Kim40813632014-09-02 15:31:18 -0700377 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
Chao Yu16018392015-01-19 20:24:37 +0800378 SetPagePrivate(page);
Jaegeuk Kim9e4ded32014-12-17 19:58:58 -0800379 f2fs_trace_pid(page);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900380 return 1;
381 }
382 return 0;
383}
384
385const struct address_space_operations f2fs_meta_aops = {
386 .writepage = f2fs_write_meta_page,
387 .writepages = f2fs_write_meta_pages,
388 .set_page_dirty = f2fs_set_meta_page_dirty,
Chao Yu487261f2015-02-05 17:44:29 +0800389 .invalidatepage = f2fs_invalidate_page,
390 .releasepage = f2fs_release_page,
Weichao Guo5b7a4872016-09-20 05:03:27 +0800391#ifdef CONFIG_MIGRATION
392 .migratepage = f2fs_migrate_page,
393#endif
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900394};
395
Jaegeuk Kim6451e042014-07-25 15:47:17 -0700396static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
Jaegeuk Kim953e6cc2014-07-25 15:47:16 -0700397{
Chao Yu67298802014-11-18 11:18:36 +0800398 struct inode_management *im = &sbi->im[type];
Jaegeuk Kim80c54502015-08-20 08:51:56 -0700399 struct ino_entry *e, *tmp;
400
401 tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS);
Jaegeuk Kim39efac42014-07-24 18:15:17 -0700402retry:
Jaegeuk Kim80c54502015-08-20 08:51:56 -0700403 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
Jaegeuk Kim769ec6e2014-12-03 20:47:26 -0800404
Chao Yu67298802014-11-18 11:18:36 +0800405 spin_lock(&im->ino_lock);
Chao Yu67298802014-11-18 11:18:36 +0800406 e = radix_tree_lookup(&im->ino_root, ino);
Jaegeuk Kim39efac42014-07-24 18:15:17 -0700407 if (!e) {
Jaegeuk Kim80c54502015-08-20 08:51:56 -0700408 e = tmp;
Chao Yu67298802014-11-18 11:18:36 +0800409 if (radix_tree_insert(&im->ino_root, ino, e)) {
410 spin_unlock(&im->ino_lock);
Jaegeuk Kim769ec6e2014-12-03 20:47:26 -0800411 radix_tree_preload_end();
Jaegeuk Kim39efac42014-07-24 18:15:17 -0700412 goto retry;
413 }
414 memset(e, 0, sizeof(struct ino_entry));
415 e->ino = ino;
416
Chao Yu67298802014-11-18 11:18:36 +0800417 list_add_tail(&e->list, &im->ino_list);
Jaegeuk Kim8c402942014-11-06 15:16:04 -0800418 if (type != ORPHAN_INO)
Chao Yu67298802014-11-18 11:18:36 +0800419 im->ino_num++;
Jaegeuk Kim39efac42014-07-24 18:15:17 -0700420 }
Chao Yu67298802014-11-18 11:18:36 +0800421 spin_unlock(&im->ino_lock);
Jaegeuk Kim769ec6e2014-12-03 20:47:26 -0800422 radix_tree_preload_end();
Jaegeuk Kim80c54502015-08-20 08:51:56 -0700423
424 if (e != tmp)
425 kmem_cache_free(ino_entry_slab, tmp);
Jaegeuk Kim953e6cc2014-07-25 15:47:16 -0700426}
427
Jaegeuk Kim6451e042014-07-25 15:47:17 -0700428static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
Jaegeuk Kim953e6cc2014-07-25 15:47:16 -0700429{
Chao Yu67298802014-11-18 11:18:36 +0800430 struct inode_management *im = &sbi->im[type];
Jaegeuk Kim6451e042014-07-25 15:47:17 -0700431 struct ino_entry *e;
Jaegeuk Kim953e6cc2014-07-25 15:47:16 -0700432
Chao Yu67298802014-11-18 11:18:36 +0800433 spin_lock(&im->ino_lock);
434 e = radix_tree_lookup(&im->ino_root, ino);
Jaegeuk Kim39efac42014-07-24 18:15:17 -0700435 if (e) {
436 list_del(&e->list);
Chao Yu67298802014-11-18 11:18:36 +0800437 radix_tree_delete(&im->ino_root, ino);
438 im->ino_num--;
439 spin_unlock(&im->ino_lock);
Jaegeuk Kim39efac42014-07-24 18:15:17 -0700440 kmem_cache_free(ino_entry_slab, e);
441 return;
Jaegeuk Kim953e6cc2014-07-25 15:47:16 -0700442 }
Chao Yu67298802014-11-18 11:18:36 +0800443 spin_unlock(&im->ino_lock);
Jaegeuk Kim953e6cc2014-07-25 15:47:16 -0700444}
445
Chao Yua49324f2015-12-15 13:29:47 +0800446void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700447{
448 /* add new dirty ino entry into list */
449 __add_ino_entry(sbi, ino, type);
450}
451
Chao Yua49324f2015-12-15 13:29:47 +0800452void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700453{
454 /* remove dirty ino entry from list */
455 __remove_ino_entry(sbi, ino, type);
456}
457
458/* mode should be APPEND_INO or UPDATE_INO */
459bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
460{
Chao Yu67298802014-11-18 11:18:36 +0800461 struct inode_management *im = &sbi->im[mode];
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700462 struct ino_entry *e;
Chao Yu67298802014-11-18 11:18:36 +0800463
464 spin_lock(&im->ino_lock);
465 e = radix_tree_lookup(&im->ino_root, ino);
466 spin_unlock(&im->ino_lock);
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700467 return e ? true : false;
468}
469
Jaegeuk Kim74ef9242016-05-02 22:09:56 -0700470void release_ino_entry(struct f2fs_sb_info *sbi, bool all)
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700471{
472 struct ino_entry *e, *tmp;
473 int i;
474
Jaegeuk Kim74ef9242016-05-02 22:09:56 -0700475 for (i = all ? ORPHAN_INO: APPEND_INO; i <= UPDATE_INO; i++) {
Chao Yu67298802014-11-18 11:18:36 +0800476 struct inode_management *im = &sbi->im[i];
477
478 spin_lock(&im->ino_lock);
479 list_for_each_entry_safe(e, tmp, &im->ino_list, list) {
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700480 list_del(&e->list);
Chao Yu67298802014-11-18 11:18:36 +0800481 radix_tree_delete(&im->ino_root, e->ino);
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700482 kmem_cache_free(ino_entry_slab, e);
Chao Yu67298802014-11-18 11:18:36 +0800483 im->ino_num--;
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700484 }
Chao Yu67298802014-11-18 11:18:36 +0800485 spin_unlock(&im->ino_lock);
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700486 }
487}
488
Jaegeuk Kimcbd56e72013-07-30 11:36:53 +0900489int acquire_orphan_inode(struct f2fs_sb_info *sbi)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900490{
Chao Yu67298802014-11-18 11:18:36 +0800491 struct inode_management *im = &sbi->im[ORPHAN_INO];
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900492 int err = 0;
493
Chao Yu67298802014-11-18 11:18:36 +0800494 spin_lock(&im->ino_lock);
Jaegeuk Kimcb789422016-04-29 16:29:22 -0700495
496#ifdef CONFIG_F2FS_FAULT_INJECTION
497 if (time_to_inject(FAULT_ORPHAN)) {
498 spin_unlock(&im->ino_lock);
499 return -ENOSPC;
500 }
501#endif
Chao Yu67298802014-11-18 11:18:36 +0800502 if (unlikely(im->ino_num >= sbi->max_orphans))
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900503 err = -ENOSPC;
Jaegeuk Kimcbd56e72013-07-30 11:36:53 +0900504 else
Chao Yu67298802014-11-18 11:18:36 +0800505 im->ino_num++;
506 spin_unlock(&im->ino_lock);
Gu Zheng0d47c1a2013-12-26 18:24:19 +0800507
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900508 return err;
509}
510
Jaegeuk Kimcbd56e72013-07-30 11:36:53 +0900511void release_orphan_inode(struct f2fs_sb_info *sbi)
512{
Chao Yu67298802014-11-18 11:18:36 +0800513 struct inode_management *im = &sbi->im[ORPHAN_INO];
514
515 spin_lock(&im->ino_lock);
516 f2fs_bug_on(sbi, im->ino_num == 0);
517 im->ino_num--;
518 spin_unlock(&im->ino_lock);
Jaegeuk Kimcbd56e72013-07-30 11:36:53 +0900519}
520
Jaegeuk Kim67c37582016-06-13 18:27:02 -0700521void add_orphan_inode(struct inode *inode)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900522{
Jaegeuk Kim39efac42014-07-24 18:15:17 -0700523 /* add new orphan ino entry into list */
Jaegeuk Kim67c37582016-06-13 18:27:02 -0700524 __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, ORPHAN_INO);
525 update_inode_page(inode);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900526}
527
528void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
529{
Jaegeuk Kim953e6cc2014-07-25 15:47:16 -0700530 /* remove orphan entry from orphan list */
Jaegeuk Kim6451e042014-07-25 15:47:17 -0700531 __remove_ino_entry(sbi, ino, ORPHAN_INO);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900532}
533
Chao Yu8c14bfa2015-08-07 17:58:43 +0800534static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900535{
Chao Yu8c14bfa2015-08-07 17:58:43 +0800536 struct inode *inode;
Jaegeuk Kim5905f9a2016-09-12 15:08:37 -0700537 struct node_info ni;
Chao Yu8c14bfa2015-08-07 17:58:43 +0800538
Jaegeuk Kim5905f9a2016-09-12 15:08:37 -0700539 inode = f2fs_iget_retry(sbi->sb, ino);
Chao Yu8c14bfa2015-08-07 17:58:43 +0800540 if (IS_ERR(inode)) {
541 /*
542 * there should be a bug that we can't find the entry
543 * to orphan inode.
544 */
545 f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
546 return PTR_ERR(inode);
547 }
548
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900549 clear_nlink(inode);
550
551 /* truncate all the data during iput */
552 iput(inode);
Jaegeuk Kim5905f9a2016-09-12 15:08:37 -0700553
554 get_node_info(sbi, ino, &ni);
555
556 /* ENOMEM was fully retried in f2fs_evict_inode. */
557 if (ni.blk_addr != NULL_ADDR) {
558 int err = acquire_orphan_inode(sbi);
559
560 if (err) {
561 set_sbi_flag(sbi, SBI_NEED_FSCK);
562 f2fs_msg(sbi->sb, KERN_WARNING,
563 "%s: orphan failed (ino=%x), run fsck to fix.",
564 __func__, ino);
565 return err;
566 }
567 __add_ino_entry(sbi, ino, ORPHAN_INO);
568 }
Chao Yu8c14bfa2015-08-07 17:58:43 +0800569 return 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900570}
571
Chao Yu8c14bfa2015-08-07 17:58:43 +0800572int recover_orphan_inodes(struct f2fs_sb_info *sbi)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900573{
Wanpeng Li3c642982015-02-26 07:57:21 +0800574 block_t start_blk, orphan_blocks, i, j;
Chao Yu8c14bfa2015-08-07 17:58:43 +0800575 int err;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900576
Chao Yuaaec2b12016-09-20 11:04:18 +0800577 if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
Chao Yu8c14bfa2015-08-07 17:58:43 +0800578 return 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900579
Wanpeng Li55141482015-02-26 07:57:20 +0800580 start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
Wanpeng Li3c642982015-02-26 07:57:21 +0800581 orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900582
Chao Yu26879fb2015-10-12 17:05:59 +0800583 ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
Chao Yu662befd2014-02-07 16:11:53 +0800584
Wanpeng Li3c642982015-02-26 07:57:21 +0800585 for (i = 0; i < orphan_blocks; i++) {
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900586 struct page *page = get_meta_page(sbi, start_blk + i);
587 struct f2fs_orphan_block *orphan_blk;
588
589 orphan_blk = (struct f2fs_orphan_block *)page_address(page);
590 for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
591 nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
Chao Yu8c14bfa2015-08-07 17:58:43 +0800592 err = recover_orphan_inode(sbi, ino);
593 if (err) {
594 f2fs_put_page(page, 1);
Chao Yu8c14bfa2015-08-07 17:58:43 +0800595 return err;
596 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900597 }
598 f2fs_put_page(page, 1);
599 }
600 /* clear Orphan Flag */
Chao Yuaaec2b12016-09-20 11:04:18 +0800601 clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
Chao Yu8c14bfa2015-08-07 17:58:43 +0800602 return 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900603}
604
605static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
606{
Gu Zheng502c6e02013-11-19 18:03:58 +0800607 struct list_head *head;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900608 struct f2fs_orphan_block *orphan_blk = NULL;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900609 unsigned int nentries = 0;
Chao Yubd936f82015-07-13 17:44:25 +0800610 unsigned short index = 1;
Jaegeuk Kim8c402942014-11-06 15:16:04 -0800611 unsigned short orphan_blocks;
Gu Zheng45319292014-01-10 18:09:02 +0800612 struct page *page = NULL;
Jaegeuk Kim6451e042014-07-25 15:47:17 -0700613 struct ino_entry *orphan = NULL;
Chao Yu67298802014-11-18 11:18:36 +0800614 struct inode_management *im = &sbi->im[ORPHAN_INO];
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900615
Chao Yu67298802014-11-18 11:18:36 +0800616 orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
Jaegeuk Kim8c402942014-11-06 15:16:04 -0800617
Jaegeuk Kimd6c67a42015-05-01 11:08:59 -0700618 /*
619 * we don't need to do spin_lock(&im->ino_lock) here, since all the
620 * orphan inode operations are covered under f2fs_lock_op().
621 * And, spin_lock should be avoided due to page operations below.
622 */
Chao Yu67298802014-11-18 11:18:36 +0800623 head = &im->ino_list;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900624
625 /* loop for each orphan inode entry and write them in Jornal block */
Gu Zheng502c6e02013-11-19 18:03:58 +0800626 list_for_each_entry(orphan, head, list) {
627 if (!page) {
Chao Yubd936f82015-07-13 17:44:25 +0800628 page = grab_meta_page(sbi, start_blk++);
Gu Zheng502c6e02013-11-19 18:03:58 +0800629 orphan_blk =
630 (struct f2fs_orphan_block *)page_address(page);
631 memset(orphan_blk, 0, sizeof(*orphan_blk));
632 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900633
Gu Zheng36795562013-11-26 16:44:16 +0800634 orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900635
Gu Zheng36795562013-11-26 16:44:16 +0800636 if (nentries == F2FS_ORPHANS_PER_BLOCK) {
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900637 /*
638 * an orphan block is full of 1020 entries,
639 * then we need to flush current orphan blocks
640 * and bring another one in memory
641 */
642 orphan_blk->blk_addr = cpu_to_le16(index);
643 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
644 orphan_blk->entry_count = cpu_to_le32(nentries);
645 set_page_dirty(page);
646 f2fs_put_page(page, 1);
647 index++;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900648 nentries = 0;
649 page = NULL;
650 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900651 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900652
Gu Zheng502c6e02013-11-19 18:03:58 +0800653 if (page) {
654 orphan_blk->blk_addr = cpu_to_le16(index);
655 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
656 orphan_blk->entry_count = cpu_to_le32(nentries);
657 set_page_dirty(page);
658 f2fs_put_page(page, 1);
659 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900660}
661
662static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
663 block_t cp_addr, unsigned long long *version)
664{
665 struct page *cp_page_1, *cp_page_2 = NULL;
666 unsigned long blk_size = sbi->blocksize;
667 struct f2fs_checkpoint *cp_block;
668 unsigned long long cur_version = 0, pre_version = 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900669 size_t crc_offset;
Jaegeuk Kim7e586fa2013-06-19 20:47:19 +0900670 __u32 crc = 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900671
672 /* Read the 1st cp block in this CP pack */
673 cp_page_1 = get_meta_page(sbi, cp_addr);
674
675 /* get the version number */
676 cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
677 crc_offset = le32_to_cpu(cp_block->checksum_offset);
678 if (crc_offset >= blk_size)
679 goto invalid_cp1;
680
Jaegeuk Kim29e70432015-02-10 16:23:12 -0800681 crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
Keith Mok43b65732016-03-02 12:04:24 -0800682 if (!f2fs_crc_valid(sbi, crc, cp_block, crc_offset))
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900683 goto invalid_cp1;
684
Jaegeuk Kimd71b5562013-08-09 15:03:21 +0900685 pre_version = cur_cp_version(cp_block);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900686
687 /* Read the 2nd cp block in this CP pack */
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900688 cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900689 cp_page_2 = get_meta_page(sbi, cp_addr);
690
691 cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
692 crc_offset = le32_to_cpu(cp_block->checksum_offset);
693 if (crc_offset >= blk_size)
694 goto invalid_cp2;
695
Jaegeuk Kim29e70432015-02-10 16:23:12 -0800696 crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
Keith Mok43b65732016-03-02 12:04:24 -0800697 if (!f2fs_crc_valid(sbi, crc, cp_block, crc_offset))
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900698 goto invalid_cp2;
699
Jaegeuk Kimd71b5562013-08-09 15:03:21 +0900700 cur_version = cur_cp_version(cp_block);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900701
702 if (cur_version == pre_version) {
703 *version = cur_version;
704 f2fs_put_page(cp_page_2, 1);
705 return cp_page_1;
706 }
707invalid_cp2:
708 f2fs_put_page(cp_page_2, 1);
709invalid_cp1:
710 f2fs_put_page(cp_page_1, 1);
711 return NULL;
712}
713
714int get_valid_checkpoint(struct f2fs_sb_info *sbi)
715{
716 struct f2fs_checkpoint *cp_block;
717 struct f2fs_super_block *fsb = sbi->raw_super;
718 struct page *cp1, *cp2, *cur_page;
719 unsigned long blk_size = sbi->blocksize;
720 unsigned long long cp1_version = 0, cp2_version = 0;
721 unsigned long long cp_start_blk_no;
Wanpeng Li55141482015-02-26 07:57:20 +0800722 unsigned int cp_blks = 1 + __cp_payload(sbi);
Changman Lee1dbe4152014-05-12 12:27:43 +0900723 block_t cp_blk_no;
724 int i;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900725
Changman Lee1dbe4152014-05-12 12:27:43 +0900726 sbi->ckpt = kzalloc(cp_blks * blk_size, GFP_KERNEL);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900727 if (!sbi->ckpt)
728 return -ENOMEM;
729 /*
730 * Finding out valid cp block involves read both
731 * sets( cp pack1 and cp pack 2)
732 */
733 cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
734 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
735
736 /* The second checkpoint pack should start at the next segment */
Jaegeuk Kimf9a4e6d2013-11-28 12:44:05 +0900737 cp_start_blk_no += ((unsigned long long)1) <<
738 le32_to_cpu(fsb->log_blocks_per_seg);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900739 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
740
741 if (cp1 && cp2) {
742 if (ver_after(cp2_version, cp1_version))
743 cur_page = cp2;
744 else
745 cur_page = cp1;
746 } else if (cp1) {
747 cur_page = cp1;
748 } else if (cp2) {
749 cur_page = cp2;
750 } else {
751 goto fail_no_cp;
752 }
753
754 cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
755 memcpy(sbi->ckpt, cp_block, blk_size);
756
Shawn Lin984ec632016-02-17 11:26:32 +0800757 /* Sanity checking of checkpoint */
758 if (sanity_check_ckpt(sbi))
759 goto fail_no_cp;
760
Changman Lee1dbe4152014-05-12 12:27:43 +0900761 if (cp_blks <= 1)
762 goto done;
763
764 cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
765 if (cur_page == cp2)
766 cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
767
768 for (i = 1; i < cp_blks; i++) {
769 void *sit_bitmap_ptr;
770 unsigned char *ckpt = (unsigned char *)sbi->ckpt;
771
772 cur_page = get_meta_page(sbi, cp_blk_no + i);
773 sit_bitmap_ptr = page_address(cur_page);
774 memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
775 f2fs_put_page(cur_page, 1);
776 }
777done:
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900778 f2fs_put_page(cp1, 1);
779 f2fs_put_page(cp2, 1);
780 return 0;
781
782fail_no_cp:
783 kfree(sbi->ckpt);
784 return -EINVAL;
785}
786
Chao Yuc227f912015-12-16 13:09:20 +0800787static void __add_dirty_inode(struct inode *inode, enum inode_type type)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900788{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700789 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yuc227f912015-12-16 13:09:20 +0800790 int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900791
Jaegeuk Kim91942322016-05-20 10:13:22 -0700792 if (is_inode_flag_set(inode, flag))
Chao Yu2710fd72015-12-15 13:30:45 +0800793 return;
Chao Yu2d7b8222014-03-29 11:33:17 +0800794
Jaegeuk Kim91942322016-05-20 10:13:22 -0700795 set_inode_flag(inode, flag);
796 list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
Chao Yu33fbd512015-12-17 17:14:44 +0800797 stat_inc_dirty_inode(sbi, type);
Jaegeuk Kim5deb8262013-06-05 17:42:45 +0900798}
799
Chao Yuc227f912015-12-16 13:09:20 +0800800static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
Chao Yu6ad76092015-12-15 13:31:40 +0800801{
Chao Yuc227f912015-12-16 13:09:20 +0800802 int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
Chao Yu6ad76092015-12-15 13:31:40 +0800803
Jaegeuk Kim91942322016-05-20 10:13:22 -0700804 if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))
Chao Yu6ad76092015-12-15 13:31:40 +0800805 return;
806
Jaegeuk Kim91942322016-05-20 10:13:22 -0700807 list_del_init(&F2FS_I(inode)->dirty_list);
808 clear_inode_flag(inode, flag);
Chao Yu33fbd512015-12-17 17:14:44 +0800809 stat_dec_dirty_inode(F2FS_I_SB(inode), type);
Chao Yu6ad76092015-12-15 13:31:40 +0800810}
811
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -0700812void update_dirty_page(struct inode *inode, struct page *page)
Jaegeuk Kim5deb8262013-06-05 17:42:45 +0900813{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700814 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yuc227f912015-12-16 13:09:20 +0800815 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
Jaegeuk Kim5deb8262013-06-05 17:42:45 +0900816
Chao Yu5ac9f362015-06-29 18:14:10 +0800817 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
818 !S_ISLNK(inode->i_mode))
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900819 return;
Gu Zheng7bd59382013-10-22 14:52:26 +0800820
Jaegeuk Kim1c4bf762016-06-01 20:55:51 -0700821 spin_lock(&sbi->inode_lock[type]);
822 if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
Jaegeuk Kim10aa97c2016-05-16 10:33:40 -0700823 __add_dirty_inode(inode, type);
Yunlei Heb951a4e2016-05-13 14:57:43 +0800824 inode_inc_dirty_pages(inode);
Jaegeuk Kim1c4bf762016-06-01 20:55:51 -0700825 spin_unlock(&sbi->inode_lock[type]);
826
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -0700827 SetPagePrivate(page);
Jaegeuk Kim9e4ded32014-12-17 19:58:58 -0800828 f2fs_trace_pid(page);
Jaegeuk Kim5deb8262013-06-05 17:42:45 +0900829}
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900830
Chao Yuc227f912015-12-16 13:09:20 +0800831void remove_dirty_inode(struct inode *inode)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900832{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700833 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yuc227f912015-12-16 13:09:20 +0800834 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900835
Chao Yuc227f912015-12-16 13:09:20 +0800836 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
837 !S_ISLNK(inode->i_mode))
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900838 return;
839
Jaegeuk Kim10aa97c2016-05-16 10:33:40 -0700840 if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH))
841 return;
842
Chao Yuc227f912015-12-16 13:09:20 +0800843 spin_lock(&sbi->inode_lock[type]);
844 __remove_dirty_inode(inode, type);
845 spin_unlock(&sbi->inode_lock[type]);
Jaegeuk Kim74d0b912013-05-15 16:40:02 +0900846}
847
Chao Yu6d5a1492015-12-24 18:04:56 +0800848int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900849{
Gu Zhengce3b7d82013-11-19 18:03:47 +0800850 struct list_head *head;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900851 struct inode *inode;
Chao Yu2710fd72015-12-15 13:30:45 +0800852 struct f2fs_inode_info *fi;
Chao Yu4cf18532015-12-17 17:17:16 +0800853 bool is_dir = (type == DIR_INODE);
854
855 trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir,
856 get_pages(sbi, is_dir ?
857 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900858retry:
Jaegeuk Kimaf41d3e2014-10-17 14:14:16 -0700859 if (unlikely(f2fs_cp_error(sbi)))
Chao Yu6d5a1492015-12-24 18:04:56 +0800860 return -EIO;
Jaegeuk Kimaf41d3e2014-10-17 14:14:16 -0700861
Chao Yuc227f912015-12-16 13:09:20 +0800862 spin_lock(&sbi->inode_lock[type]);
Gu Zhengce3b7d82013-11-19 18:03:47 +0800863
Chao Yuc227f912015-12-16 13:09:20 +0800864 head = &sbi->inode_list[type];
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900865 if (list_empty(head)) {
Chao Yuc227f912015-12-16 13:09:20 +0800866 spin_unlock(&sbi->inode_lock[type]);
Chao Yu4cf18532015-12-17 17:17:16 +0800867 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
868 get_pages(sbi, is_dir ?
869 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
Chao Yu6d5a1492015-12-24 18:04:56 +0800870 return 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900871 }
Chao Yu2710fd72015-12-15 13:30:45 +0800872 fi = list_entry(head->next, struct f2fs_inode_info, dirty_list);
873 inode = igrab(&fi->vfs_inode);
Chao Yuc227f912015-12-16 13:09:20 +0800874 spin_unlock(&sbi->inode_lock[type]);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900875 if (inode) {
Jaegeuk Kim87d6f892014-03-18 12:40:49 +0900876 filemap_fdatawrite(inode->i_mapping);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900877 iput(inode);
878 } else {
879 /*
880 * We should submit bio, since it exists several
881 * wribacking dentry pages in the freeing inode.
882 */
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900883 f2fs_submit_merged_bio(sbi, DATA, WRITE);
Sebastian Andrzej Siewior7ecebe52015-02-27 13:13:14 +0100884 cond_resched();
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900885 }
886 goto retry;
887}
888
Jaegeuk Kim0f18b462016-05-20 11:10:10 -0700889int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
890{
891 struct list_head *head = &sbi->inode_list[DIRTY_META];
892 struct inode *inode;
893 struct f2fs_inode_info *fi;
894 s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
895
896 while (total--) {
897 if (unlikely(f2fs_cp_error(sbi)))
898 return -EIO;
899
900 spin_lock(&sbi->inode_lock[DIRTY_META]);
901 if (list_empty(head)) {
902 spin_unlock(&sbi->inode_lock[DIRTY_META]);
903 return 0;
904 }
905 fi = list_entry(head->next, struct f2fs_inode_info,
906 gdirty_list);
907 inode = igrab(&fi->vfs_inode);
908 spin_unlock(&sbi->inode_lock[DIRTY_META]);
909 if (inode) {
910 update_inode_page(inode);
911 iput(inode);
912 }
913 };
914 return 0;
915}
916
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900917/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900918 * Freeze all the FS-operations for checkpoint.
919 */
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -0700920static int block_operations(struct f2fs_sb_info *sbi)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900921{
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900922 struct writeback_control wbc = {
923 .sync_mode = WB_SYNC_ALL,
924 .nr_to_write = LONG_MAX,
925 .for_reclaim = 0,
926 };
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900927 struct blk_plug plug;
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -0700928 int err = 0;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900929
930 blk_start_plug(&plug);
931
Jaegeuk Kim39936832012-11-22 16:21:29 +0900932retry_flush_dents:
Gu Zhenge4795562013-09-27 18:08:30 +0800933 f2fs_lock_all(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900934 /* write all the dirty dentry pages */
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900935 if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
Gu Zhenge4795562013-09-27 18:08:30 +0800936 f2fs_unlock_all(sbi);
Chao Yu6d5a1492015-12-24 18:04:56 +0800937 err = sync_dirty_inodes(sbi, DIR_INODE);
938 if (err)
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -0700939 goto out;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900940 goto retry_flush_dents;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900941 }
942
Jaegeuk Kim0f18b462016-05-20 11:10:10 -0700943 if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
944 f2fs_unlock_all(sbi);
945 err = f2fs_sync_inode_meta(sbi);
946 if (err)
947 goto out;
948 goto retry_flush_dents;
949 }
950
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900951 /*
arter97e1c42042014-08-06 23:22:50 +0900952 * POR: we should ensure that there are no dirty node pages
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900953 * until finishing nat/sit flush.
954 */
Jaegeuk Kim39936832012-11-22 16:21:29 +0900955retry_flush_nodes:
Chao Yub3582c62014-07-03 18:58:39 +0800956 down_write(&sbi->node_write);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900957
958 if (get_pages(sbi, F2FS_DIRTY_NODES)) {
Chao Yub3582c62014-07-03 18:58:39 +0800959 up_write(&sbi->node_write);
Jaegeuk Kim52681372016-04-13 16:24:44 -0700960 err = sync_node_pages(sbi, &wbc);
Chao Yu6d5a1492015-12-24 18:04:56 +0800961 if (err) {
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -0700962 f2fs_unlock_all(sbi);
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -0700963 goto out;
964 }
Jaegeuk Kim39936832012-11-22 16:21:29 +0900965 goto retry_flush_nodes;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900966 }
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -0700967out:
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900968 blk_finish_plug(&plug);
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -0700969 return err;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900970}
971
972static void unblock_operations(struct f2fs_sb_info *sbi)
973{
Chao Yub3582c62014-07-03 18:58:39 +0800974 up_write(&sbi->node_write);
Jaegeuk Kimad4edb82016-06-16 16:41:49 -0700975
976 build_free_nids(sbi);
Gu Zhenge4795562013-09-27 18:08:30 +0800977 f2fs_unlock_all(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900978}
979
Changman Leefb51b5e2013-11-07 12:48:25 +0900980static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
981{
982 DEFINE_WAIT(wait);
983
984 for (;;) {
985 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
986
Jaegeuk Kimf5730182016-05-17 16:23:36 -0700987 if (!atomic_read(&sbi->nr_wb_bios))
Changman Leefb51b5e2013-11-07 12:48:25 +0900988 break;
989
Yunlei He0ff21642016-02-23 12:07:56 +0800990 io_schedule_timeout(5*HZ);
Changman Leefb51b5e2013-11-07 12:48:25 +0900991 }
992 finish_wait(&sbi->cp_wait, &wait);
993}
994
Chao Yuc34f42e2015-12-23 17:50:30 +0800995static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900996{
997 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
Huang Ying77041822014-09-12 20:19:48 +0800998 struct f2fs_nm_info *nm_i = NM_I(sbi);
Chao Yu67298802014-11-18 11:18:36 +0800999 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
Huang Ying77041822014-09-12 20:19:48 +08001000 nid_t last_nid = nm_i->next_scan_nid;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001001 block_t start_blk;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001002 unsigned int data_sum_blocks, orphan_blocks;
Jaegeuk Kim7e586fa2013-06-19 20:47:19 +09001003 __u32 crc32 = 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001004 int i;
Wanpeng Li55141482015-02-26 07:57:20 +08001005 int cp_payload_blks = __cp_payload(sbi);
Shuoran Liu8f1dbbb2016-01-27 09:57:30 +08001006 struct super_block *sb = sbi->sb;
1007 struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
1008 u64 kbytes_written;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001009
1010 /* Flush all the NAT/SIT pages */
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001011 while (get_pages(sbi, F2FS_DIRTY_META)) {
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001012 sync_meta_pages(sbi, META, LONG_MAX);
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001013 if (unlikely(f2fs_cp_error(sbi)))
Chao Yuc34f42e2015-12-23 17:50:30 +08001014 return -EIO;
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001015 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001016
1017 next_free_nid(sbi, &last_nid);
1018
1019 /*
1020 * modify checkpoint
1021 * version number is already updated
1022 */
1023 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
1024 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
1025 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
Chao Yub5b82202014-08-22 16:17:38 +08001026 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001027 ckpt->cur_node_segno[i] =
1028 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
1029 ckpt->cur_node_blkoff[i] =
1030 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
1031 ckpt->alloc_type[i + CURSEG_HOT_NODE] =
1032 curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
1033 }
Chao Yub5b82202014-08-22 16:17:38 +08001034 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001035 ckpt->cur_data_segno[i] =
1036 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
1037 ckpt->cur_data_blkoff[i] =
1038 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
1039 ckpt->alloc_type[i + CURSEG_HOT_DATA] =
1040 curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
1041 }
1042
1043 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
1044 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
1045 ckpt->next_free_nid = cpu_to_le32(last_nid);
1046
1047 /* 2 cp + n data seg summary + orphan inode blocks */
Chao Yu3fa06d72014-12-09 14:21:46 +08001048 data_sum_blocks = npages_for_summary_flush(sbi, false);
Chao Yuaaec2b12016-09-20 11:04:18 +08001049 spin_lock(&sbi->cp_lock);
Chao Yub5b82202014-08-22 16:17:38 +08001050 if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
Chao Yuaaec2b12016-09-20 11:04:18 +08001051 __set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001052 else
Chao Yuaaec2b12016-09-20 11:04:18 +08001053 __clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
1054 spin_unlock(&sbi->cp_lock);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001055
Chao Yu67298802014-11-18 11:18:36 +08001056 orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
Changman Lee1dbe4152014-05-12 12:27:43 +09001057 ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
1058 orphan_blocks);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001059
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001060 if (__remain_node_summaries(cpc->reason))
Chao Yub5b82202014-08-22 16:17:38 +08001061 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+
Changman Lee1dbe4152014-05-12 12:27:43 +09001062 cp_payload_blks + data_sum_blocks +
1063 orphan_blocks + NR_CURSEG_NODE_TYPE);
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001064 else
Chao Yub5b82202014-08-22 16:17:38 +08001065 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
Changman Lee1dbe4152014-05-12 12:27:43 +09001066 cp_payload_blks + data_sum_blocks +
1067 orphan_blocks);
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001068
Chao Yuaaec2b12016-09-20 11:04:18 +08001069 spin_lock(&sbi->cp_lock);
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001070 if (cpc->reason == CP_UMOUNT)
Chao Yuaaec2b12016-09-20 11:04:18 +08001071 __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001072 else
Chao Yuaaec2b12016-09-20 11:04:18 +08001073 __clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001074
1075 if (cpc->reason == CP_FASTBOOT)
Chao Yuaaec2b12016-09-20 11:04:18 +08001076 __set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001077 else
Chao Yuaaec2b12016-09-20 11:04:18 +08001078 __clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001079
Chao Yu67298802014-11-18 11:18:36 +08001080 if (orphan_num)
Chao Yuaaec2b12016-09-20 11:04:18 +08001081 __set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001082 else
Chao Yuaaec2b12016-09-20 11:04:18 +08001083 __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001084
Chao Yucaf00472015-01-28 17:48:42 +08001085 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
Chao Yuaaec2b12016-09-20 11:04:18 +08001086 __set_ckpt_flags(ckpt, CP_FSCK_FLAG);
Jaegeuk Kim2ae4c672014-09-02 15:43:52 -07001087
Jaegeuk Kima468f0e2016-09-19 17:55:10 -07001088 /* set this flag to activate crc|cp_ver for recovery */
Chao Yuaaec2b12016-09-20 11:04:18 +08001089 __set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
1090
1091 spin_unlock(&sbi->cp_lock);
Jaegeuk Kima468f0e2016-09-19 17:55:10 -07001092
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001093 /* update SIT/NAT bitmap */
1094 get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
1095 get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
1096
Keith Mok43b65732016-03-02 12:04:24 -08001097 crc32 = f2fs_crc32(sbi, ckpt, le32_to_cpu(ckpt->checksum_offset));
Jaegeuk Kim7e586fa2013-06-19 20:47:19 +09001098 *((__le32 *)((unsigned char *)ckpt +
1099 le32_to_cpu(ckpt->checksum_offset)))
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001100 = cpu_to_le32(crc32);
1101
1102 start_blk = __start_cp_addr(sbi);
1103
Jaegeuk Kima7230d12015-09-16 14:06:54 -07001104 /* need to wait for end_io results */
1105 wait_on_all_pages_writeback(sbi);
1106 if (unlikely(f2fs_cp_error(sbi)))
Chao Yuc34f42e2015-12-23 17:50:30 +08001107 return -EIO;
Jaegeuk Kima7230d12015-09-16 14:06:54 -07001108
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001109 /* write out checkpoint buffer at block 0 */
Chao Yu381722d2015-05-19 17:40:04 +08001110 update_meta_page(sbi, ckpt, start_blk++);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001111
Chao Yu381722d2015-05-19 17:40:04 +08001112 for (i = 1; i < 1 + cp_payload_blks; i++)
1113 update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE,
1114 start_blk++);
Changman Lee1dbe4152014-05-12 12:27:43 +09001115
Chao Yu67298802014-11-18 11:18:36 +08001116 if (orphan_num) {
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001117 write_orphan_inodes(sbi, start_blk);
1118 start_blk += orphan_blocks;
1119 }
1120
1121 write_data_summaries(sbi, start_blk);
1122 start_blk += data_sum_blocks;
Shuoran Liu8f1dbbb2016-01-27 09:57:30 +08001123
1124 /* Record write statistics in the hot node summary */
1125 kbytes_written = sbi->kbytes_written;
1126 if (sb->s_bdev->bd_part)
1127 kbytes_written += BD_PART_WRITTEN(sbi);
1128
Chao Yub7ad7512016-02-19 18:08:46 +08001129 seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written);
Shuoran Liu8f1dbbb2016-01-27 09:57:30 +08001130
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001131 if (__remain_node_summaries(cpc->reason)) {
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001132 write_node_summaries(sbi, start_blk);
1133 start_blk += NR_CURSEG_NODE_TYPE;
1134 }
1135
1136 /* writeout checkpoint block */
Chao Yu381722d2015-05-19 17:40:04 +08001137 update_meta_page(sbi, ckpt, start_blk);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001138
1139 /* wait for previous submitted node/meta pages writeback */
Changman Leefb51b5e2013-11-07 12:48:25 +09001140 wait_on_all_pages_writeback(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001141
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001142 if (unlikely(f2fs_cp_error(sbi)))
Chao Yuc34f42e2015-12-23 17:50:30 +08001143 return -EIO;
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001144
Chao Yu80dd9c02016-02-24 17:20:44 +08001145 filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LLONG_MAX);
1146 filemap_fdatawait_range(META_MAPPING(sbi), 0, LLONG_MAX);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001147
1148 /* update user_block_counts */
1149 sbi->last_valid_block_count = sbi->total_valid_block_count;
Jaegeuk Kim41382ec2016-05-16 11:06:50 -07001150 percpu_counter_set(&sbi->alloc_valid_block_count, 0);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001151
1152 /* Here, we only have one bio having CP pack */
Jaegeuk Kim577e3492013-01-24 19:56:11 +09001153 sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001154
Jaegeuk Kim6a8f8ca2014-10-29 14:37:22 -07001155 /* wait for previous submitted meta pages writeback */
1156 wait_on_all_pages_writeback(sbi);
1157
Jaegeuk Kim74ef9242016-05-02 22:09:56 -07001158 release_ino_entry(sbi, false);
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001159
1160 if (unlikely(f2fs_cp_error(sbi)))
Chao Yuc34f42e2015-12-23 17:50:30 +08001161 return -EIO;
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001162
Jaegeuk Kim836b5a62015-04-30 22:50:06 -07001163 clear_prefree_segments(sbi, cpc);
Chao Yucaf00472015-01-28 17:48:42 +08001164 clear_sbi_flag(sbi, SBI_IS_DIRTY);
Jaegeuk Kimbbf156f2016-08-29 18:23:45 -07001165 clear_sbi_flag(sbi, SBI_NEED_CP);
Chao Yuc34f42e2015-12-23 17:50:30 +08001166
Chao Yuc2a080a2016-08-31 10:43:19 +08001167 /*
1168 * redirty superblock if metadata like node page or inode cache is
1169 * updated during writing checkpoint.
1170 */
1171 if (get_pages(sbi, F2FS_DIRTY_NODES) ||
1172 get_pages(sbi, F2FS_DIRTY_IMETA))
1173 set_sbi_flag(sbi, SBI_IS_DIRTY);
1174
1175 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
1176
Chao Yuc34f42e2015-12-23 17:50:30 +08001177 return 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001178}
1179
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001180/*
arter97e1c42042014-08-06 23:22:50 +09001181 * We guarantee that this checkpoint procedure will not fail.
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001182 */
Chao Yuc34f42e2015-12-23 17:50:30 +08001183int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001184{
1185 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1186 unsigned long long ckpt_ver;
Chao Yuc34f42e2015-12-23 17:50:30 +08001187 int err = 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001188
Jaegeuk Kim43727522013-02-04 15:11:17 +09001189 mutex_lock(&sbi->cp_mutex);
Jaegeuk Kim85010172014-08-11 18:37:46 -07001190
Chao Yucaf00472015-01-28 17:48:42 +08001191 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001192 (cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC ||
1193 (cpc->reason == CP_DISCARD && !sbi->discard_blks)))
Jaegeuk Kim85010172014-08-11 18:37:46 -07001194 goto out;
Chao Yuc34f42e2015-12-23 17:50:30 +08001195 if (unlikely(f2fs_cp_error(sbi))) {
1196 err = -EIO;
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001197 goto out;
Chao Yuc34f42e2015-12-23 17:50:30 +08001198 }
1199 if (f2fs_readonly(sbi->sb)) {
1200 err = -EROFS;
Jaegeuk Kim11504a82015-01-23 18:43:45 -08001201 goto out;
Chao Yuc34f42e2015-12-23 17:50:30 +08001202 }
Wanpeng Li2bda5422015-02-27 15:56:16 +08001203
1204 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
1205
Chao Yuc34f42e2015-12-23 17:50:30 +08001206 err = block_operations(sbi);
1207 if (err)
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001208 goto out;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001209
Jaegeuk Kim75ab4cb2014-09-20 21:57:51 -07001210 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
Namjae Jeon2af4bd62013-04-23 18:26:54 +09001211
Chao Yu406657d2016-02-24 17:17:55 +08001212 f2fs_flush_merged_bios(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001213
Yunlei He58cce382016-08-18 21:01:19 +08001214 /* this is the case of multiple fstrims without any changes */
1215 if (cpc->reason == CP_DISCARD && !is_sbi_flag_set(sbi, SBI_IS_DIRTY)) {
1216 f2fs_bug_on(sbi, NM_I(sbi)->dirty_nat_cnt);
1217 f2fs_bug_on(sbi, SIT_I(sbi)->dirty_sentries);
1218 f2fs_bug_on(sbi, prefree_segments(sbi));
1219 flush_sit_entries(sbi, cpc);
1220 clear_prefree_segments(sbi, cpc);
Chao Yu275b66b2016-08-29 23:58:34 +08001221 f2fs_wait_all_discard_bio(sbi);
Yunlei He58cce382016-08-18 21:01:19 +08001222 unblock_operations(sbi);
1223 goto out;
1224 }
1225
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001226 /*
1227 * update checkpoint pack index
1228 * Increase the version number so that
1229 * SIT entries and seg summaries are written at correct place
1230 */
Jaegeuk Kimd71b5562013-08-09 15:03:21 +09001231 ckpt_ver = cur_cp_version(ckpt);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001232 ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
1233
1234 /* write cached NAT/SIT entries to NAT/SIT area */
1235 flush_nat_entries(sbi);
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001236 flush_sit_entries(sbi, cpc);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001237
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001238 /* unlock all the fs_lock[] in do_checkpoint() */
Chao Yuc34f42e2015-12-23 17:50:30 +08001239 err = do_checkpoint(sbi, cpc);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001240
Chao Yu275b66b2016-08-29 23:58:34 +08001241 f2fs_wait_all_discard_bio(sbi);
1242
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001243 unblock_operations(sbi);
Changman Lee942e0be2014-02-13 15:12:29 +09001244 stat_inc_cp_count(sbi->stat_info);
Jaegeuk Kim10027552015-04-09 17:03:53 -07001245
1246 if (cpc->reason == CP_RECOVERY)
1247 f2fs_msg(sbi->sb, KERN_NOTICE,
1248 "checkpoint: version = %llx", ckpt_ver);
Jaegeuk Kim60b99b42015-10-05 14:49:57 -07001249
1250 /* do checkpoint periodically */
Jaegeuk Kim6beceb52016-01-08 15:51:50 -08001251 f2fs_update_time(sbi, CP_TIME);
Jaegeuk Kim55d1cdb2015-12-15 16:07:14 -08001252 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
Jaegeuk Kim85010172014-08-11 18:37:46 -07001253out:
1254 mutex_unlock(&sbi->cp_mutex);
Chao Yuc34f42e2015-12-23 17:50:30 +08001255 return err;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001256}
1257
Jaegeuk Kim6451e042014-07-25 15:47:17 -07001258void init_ino_entry_info(struct f2fs_sb_info *sbi)
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001259{
Jaegeuk Kim6451e042014-07-25 15:47:17 -07001260 int i;
1261
1262 for (i = 0; i < MAX_INO_ENTRY; i++) {
Chao Yu67298802014-11-18 11:18:36 +08001263 struct inode_management *im = &sbi->im[i];
1264
1265 INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC);
1266 spin_lock_init(&im->ino_lock);
1267 INIT_LIST_HEAD(&im->ino_list);
1268 im->ino_num = 0;
Jaegeuk Kim6451e042014-07-25 15:47:17 -07001269 }
1270
Chao Yub5b82202014-08-22 16:17:38 +08001271 sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
Wanpeng Li14b42812015-02-27 17:38:13 +08001272 NR_CURSEG_TYPE - __cp_payload(sbi)) *
1273 F2FS_ORPHANS_PER_BLOCK;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001274}
1275
Namjae Jeon6e6093a2013-01-17 00:08:30 +09001276int __init create_checkpoint_caches(void)
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001277{
Jaegeuk Kim6451e042014-07-25 15:47:17 -07001278 ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
1279 sizeof(struct ino_entry));
1280 if (!ino_entry_slab)
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001281 return -ENOMEM;
Chao Yu06292072014-12-29 15:56:18 +08001282 inode_entry_slab = f2fs_kmem_cache_create("f2fs_inode_entry",
1283 sizeof(struct inode_entry));
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001284 if (!inode_entry_slab) {
Jaegeuk Kim6451e042014-07-25 15:47:17 -07001285 kmem_cache_destroy(ino_entry_slab);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001286 return -ENOMEM;
1287 }
1288 return 0;
1289}
1290
1291void destroy_checkpoint_caches(void)
1292{
Jaegeuk Kim6451e042014-07-25 15:47:17 -07001293 kmem_cache_destroy(ino_entry_slab);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001294 kmem_cache_destroy(inode_entry_slab);
1295}