blob: aef32f36e2f3f7ce12c5578b65cd6e34ca202416 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +09002 * fs/f2fs/checkpoint.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/bio.h>
13#include <linux/mpage.h>
14#include <linux/writeback.h>
15#include <linux/blkdev.h>
16#include <linux/f2fs_fs.h>
17#include <linux/pagevec.h>
18#include <linux/swap.h>
19
20#include "f2fs.h"
21#include "node.h"
22#include "segment.h"
Namjae Jeon2af4bd62013-04-23 18:26:54 +090023#include <trace/events/f2fs.h>
Jaegeuk Kim127e6702012-11-02 17:08:18 +090024
25static struct kmem_cache *orphan_entry_slab;
26static struct kmem_cache *inode_entry_slab;
27
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090028/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +090029 * We guarantee no failure on the returned page.
30 */
31struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
32{
Gu Zheng9df27d92014-01-20 18:37:04 +080033 struct address_space *mapping = META_MAPPING(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +090034 struct page *page = NULL;
35repeat:
36 page = grab_cache_page(mapping, index);
37 if (!page) {
38 cond_resched();
39 goto repeat;
40 }
41
42 /* We wait writeback only inside grab_meta_page() */
43 wait_on_page_writeback(page);
44 SetPageUptodate(page);
45 return page;
46}
47
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090048/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +090049 * We guarantee no failure on the returned page.
50 */
51struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
52{
Gu Zheng9df27d92014-01-20 18:37:04 +080053 struct address_space *mapping = META_MAPPING(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +090054 struct page *page;
55repeat:
56 page = grab_cache_page(mapping, index);
57 if (!page) {
58 cond_resched();
59 goto repeat;
60 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +090061 if (PageUptodate(page))
62 goto out;
Jaegeuk Kim127e6702012-11-02 17:08:18 +090063
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090064 if (f2fs_submit_page_bio(sbi, page, index,
65 READ_SYNC | REQ_META | REQ_PRIO))
Jaegeuk Kim393ff912013-03-08 21:29:23 +090066 goto repeat;
67
68 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +090069 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +090070 f2fs_put_page(page, 1);
71 goto repeat;
72 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +090073out:
74 mark_page_accessed(page);
Jaegeuk Kim127e6702012-11-02 17:08:18 +090075 return page;
76}
77
Chao Yu662befd2014-02-07 16:11:53 +080078inline int get_max_meta_blks(struct f2fs_sb_info *sbi, int type)
79{
80 switch (type) {
81 case META_NAT:
82 return NM_I(sbi)->max_nid / NAT_ENTRY_PER_BLOCK;
83 case META_SIT:
84 return SIT_BLK_CNT(sbi);
Chao Yu81c1a0f12014-02-27 19:12:24 +080085 case META_SSA:
Chao Yu662befd2014-02-07 16:11:53 +080086 case META_CP:
87 return 0;
88 default:
89 BUG();
90 }
91}
92
93/*
Chao Yu81c1a0f12014-02-27 19:12:24 +080094 * Readahead CP/NAT/SIT/SSA pages
Chao Yu662befd2014-02-07 16:11:53 +080095 */
96int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int type)
97{
98 block_t prev_blk_addr = 0;
99 struct page *page;
100 int blkno = start;
101 int max_blks = get_max_meta_blks(sbi, type);
102
103 struct f2fs_io_info fio = {
104 .type = META,
105 .rw = READ_SYNC | REQ_META | REQ_PRIO
106 };
107
108 for (; nrpages-- > 0; blkno++) {
109 block_t blk_addr;
110
111 switch (type) {
112 case META_NAT:
113 /* get nat block addr */
114 if (unlikely(blkno >= max_blks))
115 blkno = 0;
116 blk_addr = current_nat_addr(sbi,
117 blkno * NAT_ENTRY_PER_BLOCK);
118 break;
119 case META_SIT:
120 /* get sit block addr */
121 if (unlikely(blkno >= max_blks))
122 goto out;
123 blk_addr = current_sit_addr(sbi,
124 blkno * SIT_ENTRY_PER_BLOCK);
125 if (blkno != start && prev_blk_addr + 1 != blk_addr)
126 goto out;
127 prev_blk_addr = blk_addr;
128 break;
Chao Yu81c1a0f12014-02-27 19:12:24 +0800129 case META_SSA:
Chao Yu662befd2014-02-07 16:11:53 +0800130 case META_CP:
Chao Yu81c1a0f12014-02-27 19:12:24 +0800131 /* get ssa/cp block addr */
Chao Yu662befd2014-02-07 16:11:53 +0800132 blk_addr = blkno;
133 break;
134 default:
135 BUG();
136 }
137
138 page = grab_cache_page(META_MAPPING(sbi), blk_addr);
139 if (!page)
140 continue;
141 if (PageUptodate(page)) {
142 mark_page_accessed(page);
143 f2fs_put_page(page, 1);
144 continue;
145 }
146
147 f2fs_submit_page_mbio(sbi, page, blk_addr, &fio);
148 mark_page_accessed(page);
149 f2fs_put_page(page, 0);
150 }
151out:
152 f2fs_submit_merged_bio(sbi, META, READ);
153 return blkno - start;
154}
155
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900156static int f2fs_write_meta_page(struct page *page,
157 struct writeback_control *wbc)
158{
159 struct inode *inode = page->mapping->host;
160 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900161
Jaegeuk Kim203681f2014-02-05 13:03:57 +0900162 if (unlikely(sbi->por_doing))
Chao Yucfb271d2013-12-05 17:15:22 +0800163 goto redirty_out;
Chao Yucfb271d2013-12-05 17:15:22 +0800164 if (wbc->for_reclaim)
165 goto redirty_out;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900166
Jaegeuk Kim203681f2014-02-05 13:03:57 +0900167 /* Should not write any meta pages, if any IO error was occurred */
168 if (unlikely(is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)))
169 goto no_write;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900170
Jaegeuk Kim203681f2014-02-05 13:03:57 +0900171 wait_on_page_writeback(page);
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900172 write_meta_page(sbi, page);
Jaegeuk Kim203681f2014-02-05 13:03:57 +0900173no_write:
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900174 dec_page_count(sbi, F2FS_DIRTY_META);
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900175 unlock_page(page);
176 return 0;
Chao Yucfb271d2013-12-05 17:15:22 +0800177
178redirty_out:
179 dec_page_count(sbi, F2FS_DIRTY_META);
180 wbc->pages_skipped++;
Chao Yu9cf3c382014-02-28 10:12:05 +0800181 account_page_redirty(page);
Chao Yucfb271d2013-12-05 17:15:22 +0800182 set_page_dirty(page);
183 return AOP_WRITEPAGE_ACTIVATE;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900184}
185
186static int f2fs_write_meta_pages(struct address_space *mapping,
187 struct writeback_control *wbc)
188{
189 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
Jaegeuk Kim87d6f892014-03-18 12:40:49 +0900190 int nrpages = nr_pages_to_skip(sbi, META);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900191 long written;
192
Jaegeuk Kim5459aa92013-12-17 17:28:41 +0900193 /* collect a number of dirty meta pages and write together */
Jaegeuk Kimd3baf952014-03-18 13:43:05 +0900194 if (wbc->for_kupdate || get_pages(sbi, F2FS_DIRTY_META) < nrpages)
195 goto skip_write;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900196
197 /* if mounting is failed, skip writing node pages */
198 mutex_lock(&sbi->cp_mutex);
Jaegeuk Kim5459aa92013-12-17 17:28:41 +0900199 written = sync_meta_pages(sbi, META, nrpages);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900200 mutex_unlock(&sbi->cp_mutex);
201 wbc->nr_to_write -= written;
202 return 0;
Jaegeuk Kimd3baf952014-03-18 13:43:05 +0900203
204skip_write:
205 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
206 return 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900207}
208
209long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
210 long nr_to_write)
211{
Gu Zheng9df27d92014-01-20 18:37:04 +0800212 struct address_space *mapping = META_MAPPING(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900213 pgoff_t index = 0, end = LONG_MAX;
214 struct pagevec pvec;
215 long nwritten = 0;
216 struct writeback_control wbc = {
217 .for_reclaim = 0,
218 };
219
220 pagevec_init(&pvec, 0);
221
222 while (index <= end) {
223 int i, nr_pages;
224 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
225 PAGECACHE_TAG_DIRTY,
226 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
Chao Yucfb271d2013-12-05 17:15:22 +0800227 if (unlikely(nr_pages == 0))
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900228 break;
229
230 for (i = 0; i < nr_pages; i++) {
231 struct page *page = pvec.pages[i];
Jaegeuk Kim203681f2014-02-05 13:03:57 +0900232
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900233 lock_page(page);
Jaegeuk Kim203681f2014-02-05 13:03:57 +0900234
235 if (unlikely(page->mapping != mapping)) {
236continue_unlock:
237 unlock_page(page);
238 continue;
239 }
240 if (!PageDirty(page)) {
241 /* someone wrote it for us */
242 goto continue_unlock;
243 }
244
245 if (!clear_page_dirty_for_io(page))
246 goto continue_unlock;
247
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900248 if (f2fs_write_meta_page(page, &wbc)) {
249 unlock_page(page);
250 break;
251 }
Chao Yucfb271d2013-12-05 17:15:22 +0800252 nwritten++;
253 if (unlikely(nwritten >= nr_to_write))
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900254 break;
255 }
256 pagevec_release(&pvec);
257 cond_resched();
258 }
259
260 if (nwritten)
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900261 f2fs_submit_merged_bio(sbi, type, WRITE);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900262
263 return nwritten;
264}
265
266static int f2fs_set_meta_page_dirty(struct page *page)
267{
268 struct address_space *mapping = page->mapping;
269 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
270
Jaegeuk Kim26c6b882013-10-24 17:53:29 +0900271 trace_f2fs_set_page_dirty(page, META);
272
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900273 SetPageUptodate(page);
274 if (!PageDirty(page)) {
275 __set_page_dirty_nobuffers(page);
276 inc_page_count(sbi, F2FS_DIRTY_META);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900277 return 1;
278 }
279 return 0;
280}
281
282const struct address_space_operations f2fs_meta_aops = {
283 .writepage = f2fs_write_meta_page,
284 .writepages = f2fs_write_meta_pages,
285 .set_page_dirty = f2fs_set_meta_page_dirty,
286};
287
Jaegeuk Kimcbd56e72013-07-30 11:36:53 +0900288int acquire_orphan_inode(struct f2fs_sb_info *sbi)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900289{
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900290 int err = 0;
291
Gu Zheng17b692f2014-01-10 18:09:14 +0800292 spin_lock(&sbi->orphan_inode_lock);
Gu Zheng0d47c1a2013-12-26 18:24:19 +0800293 if (unlikely(sbi->n_orphans >= sbi->max_orphans))
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900294 err = -ENOSPC;
Jaegeuk Kimcbd56e72013-07-30 11:36:53 +0900295 else
296 sbi->n_orphans++;
Gu Zheng17b692f2014-01-10 18:09:14 +0800297 spin_unlock(&sbi->orphan_inode_lock);
Gu Zheng0d47c1a2013-12-26 18:24:19 +0800298
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900299 return err;
300}
301
Jaegeuk Kimcbd56e72013-07-30 11:36:53 +0900302void release_orphan_inode(struct f2fs_sb_info *sbi)
303{
Gu Zheng17b692f2014-01-10 18:09:14 +0800304 spin_lock(&sbi->orphan_inode_lock);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900305 f2fs_bug_on(sbi->n_orphans == 0);
Jaegeuk Kimcbd56e72013-07-30 11:36:53 +0900306 sbi->n_orphans--;
Gu Zheng17b692f2014-01-10 18:09:14 +0800307 spin_unlock(&sbi->orphan_inode_lock);
Jaegeuk Kimcbd56e72013-07-30 11:36:53 +0900308}
309
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900310void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
311{
312 struct list_head *head, *this;
313 struct orphan_inode_entry *new = NULL, *orphan = NULL;
314
Gu Zhengc1ef37252014-01-10 18:09:08 +0800315 new = f2fs_kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC);
316 new->ino = ino;
317
Gu Zheng17b692f2014-01-10 18:09:14 +0800318 spin_lock(&sbi->orphan_inode_lock);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900319 head = &sbi->orphan_inode_list;
320 list_for_each(this, head) {
321 orphan = list_entry(this, struct orphan_inode_entry, list);
Gu Zhengc1ef37252014-01-10 18:09:08 +0800322 if (orphan->ino == ino) {
Gu Zheng17b692f2014-01-10 18:09:14 +0800323 spin_unlock(&sbi->orphan_inode_lock);
Gu Zhengc1ef37252014-01-10 18:09:08 +0800324 kmem_cache_free(orphan_entry_slab, new);
325 return;
326 }
327
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900328 if (orphan->ino > ino)
329 break;
330 orphan = NULL;
331 }
Gu Zheng7bd59382013-10-22 14:52:26 +0800332
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900333 /* add new_oentry into list which is sorted by inode number */
majianpenga2617dc2013-01-29 16:19:02 +0800334 if (orphan)
335 list_add(&new->list, this->prev);
336 else
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900337 list_add_tail(&new->list, head);
Gu Zheng17b692f2014-01-10 18:09:14 +0800338 spin_unlock(&sbi->orphan_inode_lock);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900339}
340
341void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
342{
Gu Zheng60ed9a02013-07-19 16:24:06 +0800343 struct list_head *head;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900344 struct orphan_inode_entry *orphan;
345
Gu Zheng17b692f2014-01-10 18:09:14 +0800346 spin_lock(&sbi->orphan_inode_lock);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900347 head = &sbi->orphan_inode_list;
Gu Zheng60ed9a02013-07-19 16:24:06 +0800348 list_for_each_entry(orphan, head, list) {
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900349 if (orphan->ino == ino) {
350 list_del(&orphan->list);
351 kmem_cache_free(orphan_entry_slab, orphan);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900352 f2fs_bug_on(sbi->n_orphans == 0);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900353 sbi->n_orphans--;
354 break;
355 }
356 }
Gu Zheng17b692f2014-01-10 18:09:14 +0800357 spin_unlock(&sbi->orphan_inode_lock);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900358}
359
360static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
361{
362 struct inode *inode = f2fs_iget(sbi->sb, ino);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900363 f2fs_bug_on(IS_ERR(inode));
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900364 clear_nlink(inode);
365
366 /* truncate all the data during iput */
367 iput(inode);
368}
369
Chao Yu8f99a942013-11-28 15:43:43 +0800370void recover_orphan_inodes(struct f2fs_sb_info *sbi)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900371{
372 block_t start_blk, orphan_blkaddr, i, j;
373
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900374 if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
Chao Yu8f99a942013-11-28 15:43:43 +0800375 return;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900376
Haicheng Liaabe5132013-10-23 12:39:32 +0800377 sbi->por_doing = true;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900378 start_blk = __start_cp_addr(sbi) + 1;
379 orphan_blkaddr = __start_sum_addr(sbi) - 1;
380
Chao Yu662befd2014-02-07 16:11:53 +0800381 ra_meta_pages(sbi, start_blk, orphan_blkaddr, META_CP);
382
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900383 for (i = 0; i < orphan_blkaddr; i++) {
384 struct page *page = get_meta_page(sbi, start_blk + i);
385 struct f2fs_orphan_block *orphan_blk;
386
387 orphan_blk = (struct f2fs_orphan_block *)page_address(page);
388 for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
389 nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
390 recover_orphan_inode(sbi, ino);
391 }
392 f2fs_put_page(page, 1);
393 }
394 /* clear Orphan Flag */
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900395 clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
Haicheng Liaabe5132013-10-23 12:39:32 +0800396 sbi->por_doing = false;
Chao Yu8f99a942013-11-28 15:43:43 +0800397 return;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900398}
399
400static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
401{
Gu Zheng502c6e02013-11-19 18:03:58 +0800402 struct list_head *head;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900403 struct f2fs_orphan_block *orphan_blk = NULL;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900404 unsigned int nentries = 0;
Gu Zheng45319292014-01-10 18:09:02 +0800405 unsigned short index;
406 unsigned short orphan_blocks = (unsigned short)((sbi->n_orphans +
407 (F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK);
408 struct page *page = NULL;
Gu Zheng502c6e02013-11-19 18:03:58 +0800409 struct orphan_inode_entry *orphan = NULL;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900410
Gu Zheng45319292014-01-10 18:09:02 +0800411 for (index = 0; index < orphan_blocks; index++)
Gu Zheng63f53842014-01-20 18:37:30 +0800412 grab_meta_page(sbi, start_blk + index);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900413
Gu Zheng45319292014-01-10 18:09:02 +0800414 index = 1;
Gu Zheng17b692f2014-01-10 18:09:14 +0800415 spin_lock(&sbi->orphan_inode_lock);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900416 head = &sbi->orphan_inode_list;
417
418 /* loop for each orphan inode entry and write them in Jornal block */
Gu Zheng502c6e02013-11-19 18:03:58 +0800419 list_for_each_entry(orphan, head, list) {
420 if (!page) {
Gu Zheng63f53842014-01-20 18:37:30 +0800421 page = find_get_page(META_MAPPING(sbi), start_blk++);
422 f2fs_bug_on(!page);
Gu Zheng502c6e02013-11-19 18:03:58 +0800423 orphan_blk =
424 (struct f2fs_orphan_block *)page_address(page);
425 memset(orphan_blk, 0, sizeof(*orphan_blk));
Gu Zheng63f53842014-01-20 18:37:30 +0800426 f2fs_put_page(page, 0);
Gu Zheng502c6e02013-11-19 18:03:58 +0800427 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900428
Gu Zheng36795562013-11-26 16:44:16 +0800429 orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900430
Gu Zheng36795562013-11-26 16:44:16 +0800431 if (nentries == F2FS_ORPHANS_PER_BLOCK) {
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900432 /*
433 * an orphan block is full of 1020 entries,
434 * then we need to flush current orphan blocks
435 * and bring another one in memory
436 */
437 orphan_blk->blk_addr = cpu_to_le16(index);
438 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
439 orphan_blk->entry_count = cpu_to_le32(nentries);
440 set_page_dirty(page);
441 f2fs_put_page(page, 1);
442 index++;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900443 nentries = 0;
444 page = NULL;
445 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900446 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900447
Gu Zheng502c6e02013-11-19 18:03:58 +0800448 if (page) {
449 orphan_blk->blk_addr = cpu_to_le16(index);
450 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
451 orphan_blk->entry_count = cpu_to_le32(nentries);
452 set_page_dirty(page);
453 f2fs_put_page(page, 1);
454 }
455
Gu Zheng17b692f2014-01-10 18:09:14 +0800456 spin_unlock(&sbi->orphan_inode_lock);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900457}
458
459static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
460 block_t cp_addr, unsigned long long *version)
461{
462 struct page *cp_page_1, *cp_page_2 = NULL;
463 unsigned long blk_size = sbi->blocksize;
464 struct f2fs_checkpoint *cp_block;
465 unsigned long long cur_version = 0, pre_version = 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900466 size_t crc_offset;
Jaegeuk Kim7e586fa2013-06-19 20:47:19 +0900467 __u32 crc = 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900468
469 /* Read the 1st cp block in this CP pack */
470 cp_page_1 = get_meta_page(sbi, cp_addr);
471
472 /* get the version number */
473 cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
474 crc_offset = le32_to_cpu(cp_block->checksum_offset);
475 if (crc_offset >= blk_size)
476 goto invalid_cp1;
477
Jaegeuk Kim7e586fa2013-06-19 20:47:19 +0900478 crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset)));
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900479 if (!f2fs_crc_valid(crc, cp_block, crc_offset))
480 goto invalid_cp1;
481
Jaegeuk Kimd71b5562013-08-09 15:03:21 +0900482 pre_version = cur_cp_version(cp_block);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900483
484 /* Read the 2nd cp block in this CP pack */
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900485 cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900486 cp_page_2 = get_meta_page(sbi, cp_addr);
487
488 cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
489 crc_offset = le32_to_cpu(cp_block->checksum_offset);
490 if (crc_offset >= blk_size)
491 goto invalid_cp2;
492
Jaegeuk Kim7e586fa2013-06-19 20:47:19 +0900493 crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset)));
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900494 if (!f2fs_crc_valid(crc, cp_block, crc_offset))
495 goto invalid_cp2;
496
Jaegeuk Kimd71b5562013-08-09 15:03:21 +0900497 cur_version = cur_cp_version(cp_block);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900498
499 if (cur_version == pre_version) {
500 *version = cur_version;
501 f2fs_put_page(cp_page_2, 1);
502 return cp_page_1;
503 }
504invalid_cp2:
505 f2fs_put_page(cp_page_2, 1);
506invalid_cp1:
507 f2fs_put_page(cp_page_1, 1);
508 return NULL;
509}
510
511int get_valid_checkpoint(struct f2fs_sb_info *sbi)
512{
513 struct f2fs_checkpoint *cp_block;
514 struct f2fs_super_block *fsb = sbi->raw_super;
515 struct page *cp1, *cp2, *cur_page;
516 unsigned long blk_size = sbi->blocksize;
517 unsigned long long cp1_version = 0, cp2_version = 0;
518 unsigned long long cp_start_blk_no;
519
520 sbi->ckpt = kzalloc(blk_size, GFP_KERNEL);
521 if (!sbi->ckpt)
522 return -ENOMEM;
523 /*
524 * Finding out valid cp block involves read both
525 * sets( cp pack1 and cp pack 2)
526 */
527 cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
528 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
529
530 /* The second checkpoint pack should start at the next segment */
Jaegeuk Kimf9a4e6d2013-11-28 12:44:05 +0900531 cp_start_blk_no += ((unsigned long long)1) <<
532 le32_to_cpu(fsb->log_blocks_per_seg);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900533 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
534
535 if (cp1 && cp2) {
536 if (ver_after(cp2_version, cp1_version))
537 cur_page = cp2;
538 else
539 cur_page = cp1;
540 } else if (cp1) {
541 cur_page = cp1;
542 } else if (cp2) {
543 cur_page = cp2;
544 } else {
545 goto fail_no_cp;
546 }
547
548 cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
549 memcpy(sbi->ckpt, cp_block, blk_size);
550
551 f2fs_put_page(cp1, 1);
552 f2fs_put_page(cp2, 1);
553 return 0;
554
555fail_no_cp:
556 kfree(sbi->ckpt);
557 return -EINVAL;
558}
559
Jaegeuk Kim5deb8262013-06-05 17:42:45 +0900560static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900561{
562 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
563 struct list_head *head = &sbi->dir_inode_list;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900564 struct list_head *this;
565
Jaegeuk Kim5deb8262013-06-05 17:42:45 +0900566 list_for_each(this, head) {
567 struct dir_inode_entry *entry;
568 entry = list_entry(this, struct dir_inode_entry, list);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900569 if (unlikely(entry->inode == inode))
Jaegeuk Kim5deb8262013-06-05 17:42:45 +0900570 return -EEXIST;
571 }
572 list_add_tail(&new->list, head);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900573 stat_inc_dirty_dir(sbi);
Jaegeuk Kim5deb8262013-06-05 17:42:45 +0900574 return 0;
575}
576
577void set_dirty_dir_page(struct inode *inode, struct page *page)
578{
579 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
580 struct dir_inode_entry *new;
581
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900582 if (!S_ISDIR(inode->i_mode))
583 return;
Gu Zheng7bd59382013-10-22 14:52:26 +0800584
585 new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900586 new->inode = inode;
587 INIT_LIST_HEAD(&new->list);
588
589 spin_lock(&sbi->dir_inode_lock);
Jaegeuk Kim5deb8262013-06-05 17:42:45 +0900590 if (__add_dirty_inode(inode, new))
591 kmem_cache_free(inode_entry_slab, new);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900592
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900593 inode_inc_dirty_dents(inode);
594 SetPagePrivate(page);
Jaegeuk Kim5deb8262013-06-05 17:42:45 +0900595 spin_unlock(&sbi->dir_inode_lock);
596}
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900597
Jaegeuk Kim5deb8262013-06-05 17:42:45 +0900598void add_dirty_dir_inode(struct inode *inode)
599{
600 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Gu Zheng7bd59382013-10-22 14:52:26 +0800601 struct dir_inode_entry *new =
602 f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
603
Jaegeuk Kim5deb8262013-06-05 17:42:45 +0900604 new->inode = inode;
605 INIT_LIST_HEAD(&new->list);
606
607 spin_lock(&sbi->dir_inode_lock);
608 if (__add_dirty_inode(inode, new))
609 kmem_cache_free(inode_entry_slab, new);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900610 spin_unlock(&sbi->dir_inode_lock);
611}
612
613void remove_dirty_dir_inode(struct inode *inode)
614{
615 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Gu Zhengce3b7d82013-11-19 18:03:47 +0800616
617 struct list_head *this, *head;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900618
619 if (!S_ISDIR(inode->i_mode))
620 return;
621
622 spin_lock(&sbi->dir_inode_lock);
Jaegeuk Kimf8b2c1f2014-03-18 12:33:06 +0900623 if (get_dirty_dents(inode)) {
Jaegeuk Kim3b10b1f2013-05-27 10:32:01 +0900624 spin_unlock(&sbi->dir_inode_lock);
625 return;
626 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900627
Gu Zhengce3b7d82013-11-19 18:03:47 +0800628 head = &sbi->dir_inode_list;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900629 list_for_each(this, head) {
630 struct dir_inode_entry *entry;
631 entry = list_entry(this, struct dir_inode_entry, list);
632 if (entry->inode == inode) {
633 list_del(&entry->list);
634 kmem_cache_free(inode_entry_slab, entry);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900635 stat_dec_dirty_dir(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900636 break;
637 }
638 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900639 spin_unlock(&sbi->dir_inode_lock);
Jaegeuk Kim74d0b912013-05-15 16:40:02 +0900640
641 /* Only from the recovery routine */
Jaegeuk Kimafc3eda2013-05-28 09:59:27 +0900642 if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) {
643 clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT);
Jaegeuk Kim74d0b912013-05-15 16:40:02 +0900644 iput(inode);
Jaegeuk Kimafc3eda2013-05-28 09:59:27 +0900645 }
Jaegeuk Kim74d0b912013-05-15 16:40:02 +0900646}
647
648struct inode *check_dirty_dir_inode(struct f2fs_sb_info *sbi, nid_t ino)
649{
Gu Zhengce3b7d82013-11-19 18:03:47 +0800650
651 struct list_head *this, *head;
Jaegeuk Kim74d0b912013-05-15 16:40:02 +0900652 struct inode *inode = NULL;
653
654 spin_lock(&sbi->dir_inode_lock);
Gu Zhengce3b7d82013-11-19 18:03:47 +0800655
656 head = &sbi->dir_inode_list;
Jaegeuk Kim74d0b912013-05-15 16:40:02 +0900657 list_for_each(this, head) {
658 struct dir_inode_entry *entry;
659 entry = list_entry(this, struct dir_inode_entry, list);
660 if (entry->inode->i_ino == ino) {
661 inode = entry->inode;
662 break;
663 }
664 }
665 spin_unlock(&sbi->dir_inode_lock);
666 return inode;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900667}
668
669void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
670{
Gu Zhengce3b7d82013-11-19 18:03:47 +0800671 struct list_head *head;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900672 struct dir_inode_entry *entry;
673 struct inode *inode;
674retry:
675 spin_lock(&sbi->dir_inode_lock);
Gu Zhengce3b7d82013-11-19 18:03:47 +0800676
677 head = &sbi->dir_inode_list;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900678 if (list_empty(head)) {
679 spin_unlock(&sbi->dir_inode_lock);
680 return;
681 }
682 entry = list_entry(head->next, struct dir_inode_entry, list);
683 inode = igrab(entry->inode);
684 spin_unlock(&sbi->dir_inode_lock);
685 if (inode) {
Jaegeuk Kim87d6f892014-03-18 12:40:49 +0900686 filemap_fdatawrite(inode->i_mapping);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900687 iput(inode);
688 } else {
689 /*
690 * We should submit bio, since it exists several
691 * wribacking dentry pages in the freeing inode.
692 */
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900693 f2fs_submit_merged_bio(sbi, DATA, WRITE);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900694 }
695 goto retry;
696}
697
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900698/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900699 * Freeze all the FS-operations for checkpoint.
700 */
Jaegeuk Kim43727522013-02-04 15:11:17 +0900701static void block_operations(struct f2fs_sb_info *sbi)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900702{
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900703 struct writeback_control wbc = {
704 .sync_mode = WB_SYNC_ALL,
705 .nr_to_write = LONG_MAX,
706 .for_reclaim = 0,
707 };
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900708 struct blk_plug plug;
709
710 blk_start_plug(&plug);
711
Jaegeuk Kim39936832012-11-22 16:21:29 +0900712retry_flush_dents:
Gu Zhenge4795562013-09-27 18:08:30 +0800713 f2fs_lock_all(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900714 /* write all the dirty dentry pages */
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900715 if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
Gu Zhenge4795562013-09-27 18:08:30 +0800716 f2fs_unlock_all(sbi);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900717 sync_dirty_dir_inodes(sbi);
718 goto retry_flush_dents;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900719 }
720
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900721 /*
722 * POR: we should ensure that there is no dirty node pages
723 * until finishing nat/sit flush.
724 */
Jaegeuk Kim39936832012-11-22 16:21:29 +0900725retry_flush_nodes:
726 mutex_lock(&sbi->node_write);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900727
728 if (get_pages(sbi, F2FS_DIRTY_NODES)) {
Jaegeuk Kim39936832012-11-22 16:21:29 +0900729 mutex_unlock(&sbi->node_write);
730 sync_node_pages(sbi, 0, &wbc);
731 goto retry_flush_nodes;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900732 }
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900733 blk_finish_plug(&plug);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900734}
735
736static void unblock_operations(struct f2fs_sb_info *sbi)
737{
Jaegeuk Kim39936832012-11-22 16:21:29 +0900738 mutex_unlock(&sbi->node_write);
Gu Zhenge4795562013-09-27 18:08:30 +0800739 f2fs_unlock_all(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900740}
741
Changman Leefb51b5e2013-11-07 12:48:25 +0900742static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
743{
744 DEFINE_WAIT(wait);
745
746 for (;;) {
747 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
748
749 if (!get_pages(sbi, F2FS_WRITEBACK))
750 break;
751
752 io_schedule();
753 }
754 finish_wait(&sbi->cp_wait, &wait);
755}
756
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900757static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
758{
759 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
760 nid_t last_nid = 0;
761 block_t start_blk;
762 struct page *cp_page;
763 unsigned int data_sum_blocks, orphan_blocks;
Jaegeuk Kim7e586fa2013-06-19 20:47:19 +0900764 __u32 crc32 = 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900765 void *kaddr;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900766 int i;
767
768 /* Flush all the NAT/SIT pages */
769 while (get_pages(sbi, F2FS_DIRTY_META))
770 sync_meta_pages(sbi, META, LONG_MAX);
771
772 next_free_nid(sbi, &last_nid);
773
774 /*
775 * modify checkpoint
776 * version number is already updated
777 */
778 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
779 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
780 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
781 for (i = 0; i < 3; i++) {
782 ckpt->cur_node_segno[i] =
783 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
784 ckpt->cur_node_blkoff[i] =
785 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
786 ckpt->alloc_type[i + CURSEG_HOT_NODE] =
787 curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
788 }
789 for (i = 0; i < 3; i++) {
790 ckpt->cur_data_segno[i] =
791 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
792 ckpt->cur_data_blkoff[i] =
793 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
794 ckpt->alloc_type[i + CURSEG_HOT_DATA] =
795 curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
796 }
797
798 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
799 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
800 ckpt->next_free_nid = cpu_to_le32(last_nid);
801
802 /* 2 cp + n data seg summary + orphan inode blocks */
803 data_sum_blocks = npages_for_summary_flush(sbi);
804 if (data_sum_blocks < 3)
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900805 set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900806 else
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900807 clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900808
809 orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1)
810 / F2FS_ORPHANS_PER_BLOCK;
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900811 ckpt->cp_pack_start_sum = cpu_to_le32(1 + orphan_blocks);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900812
813 if (is_umount) {
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900814 set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
815 ckpt->cp_pack_total_block_count = cpu_to_le32(2 +
816 data_sum_blocks + orphan_blocks + NR_CURSEG_NODE_TYPE);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900817 } else {
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900818 clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
819 ckpt->cp_pack_total_block_count = cpu_to_le32(2 +
820 data_sum_blocks + orphan_blocks);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900821 }
822
823 if (sbi->n_orphans)
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900824 set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900825 else
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900826 clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900827
828 /* update SIT/NAT bitmap */
829 get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
830 get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
831
832 crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset));
Jaegeuk Kim7e586fa2013-06-19 20:47:19 +0900833 *((__le32 *)((unsigned char *)ckpt +
834 le32_to_cpu(ckpt->checksum_offset)))
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900835 = cpu_to_le32(crc32);
836
837 start_blk = __start_cp_addr(sbi);
838
839 /* write out checkpoint buffer at block 0 */
840 cp_page = grab_meta_page(sbi, start_blk++);
841 kaddr = page_address(cp_page);
842 memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));
843 set_page_dirty(cp_page);
844 f2fs_put_page(cp_page, 1);
845
846 if (sbi->n_orphans) {
847 write_orphan_inodes(sbi, start_blk);
848 start_blk += orphan_blocks;
849 }
850
851 write_data_summaries(sbi, start_blk);
852 start_blk += data_sum_blocks;
853 if (is_umount) {
854 write_node_summaries(sbi, start_blk);
855 start_blk += NR_CURSEG_NODE_TYPE;
856 }
857
858 /* writeout checkpoint block */
859 cp_page = grab_meta_page(sbi, start_blk);
860 kaddr = page_address(cp_page);
861 memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));
862 set_page_dirty(cp_page);
863 f2fs_put_page(cp_page, 1);
864
865 /* wait for previous submitted node/meta pages writeback */
Changman Leefb51b5e2013-11-07 12:48:25 +0900866 wait_on_all_pages_writeback(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900867
Jaegeuk Kim4ef51a82014-01-21 18:51:16 +0900868 filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX);
Gu Zheng9df27d92014-01-20 18:37:04 +0800869 filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900870
871 /* update user_block_counts */
872 sbi->last_valid_block_count = sbi->total_valid_block_count;
873 sbi->alloc_valid_block_count = 0;
874
875 /* Here, we only have one bio having CP pack */
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900876 sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900877
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900878 if (unlikely(!is_set_ckpt_flags(ckpt, CP_ERROR_FLAG))) {
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900879 clear_prefree_segments(sbi);
880 F2FS_RESET_SB_DIRT(sbi);
881 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900882}
883
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900884/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900885 * We guarantee that this checkpoint procedure should not fail.
886 */
Jaegeuk Kim43727522013-02-04 15:11:17 +0900887void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900888{
889 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
890 unsigned long long ckpt_ver;
891
Namjae Jeon2af4bd62013-04-23 18:26:54 +0900892 trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops");
893
Jaegeuk Kim43727522013-02-04 15:11:17 +0900894 mutex_lock(&sbi->cp_mutex);
895 block_operations(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900896
Namjae Jeon2af4bd62013-04-23 18:26:54 +0900897 trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops");
898
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900899 f2fs_submit_merged_bio(sbi, DATA, WRITE);
900 f2fs_submit_merged_bio(sbi, NODE, WRITE);
901 f2fs_submit_merged_bio(sbi, META, WRITE);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900902
903 /*
904 * update checkpoint pack index
905 * Increase the version number so that
906 * SIT entries and seg summaries are written at correct place
907 */
Jaegeuk Kimd71b5562013-08-09 15:03:21 +0900908 ckpt_ver = cur_cp_version(ckpt);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900909 ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
910
911 /* write cached NAT/SIT entries to NAT/SIT area */
912 flush_nat_entries(sbi);
913 flush_sit_entries(sbi);
914
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900915 /* unlock all the fs_lock[] in do_checkpoint() */
916 do_checkpoint(sbi, is_umount);
917
918 unblock_operations(sbi);
919 mutex_unlock(&sbi->cp_mutex);
Namjae Jeon2af4bd62013-04-23 18:26:54 +0900920
Changman Lee942e0be2014-02-13 15:12:29 +0900921 stat_inc_cp_count(sbi->stat_info);
Namjae Jeon2af4bd62013-04-23 18:26:54 +0900922 trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint");
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900923}
924
925void init_orphan_info(struct f2fs_sb_info *sbi)
926{
Gu Zheng17b692f2014-01-10 18:09:14 +0800927 spin_lock_init(&sbi->orphan_inode_lock);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900928 INIT_LIST_HEAD(&sbi->orphan_inode_list);
929 sbi->n_orphans = 0;
Gu Zheng0d47c1a2013-12-26 18:24:19 +0800930 /*
931 * considering 512 blocks in a segment 8 blocks are needed for cp
932 * and log segment summaries. Remaining blocks are used to keep
933 * orphan entries with the limitation one reserved segment
934 * for cp pack we can have max 1020*504 orphan entries
935 */
936 sbi->max_orphans = (sbi->blocks_per_seg - 2 - NR_CURSEG_TYPE)
937 * F2FS_ORPHANS_PER_BLOCK;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900938}
939
Namjae Jeon6e6093a2013-01-17 00:08:30 +0900940int __init create_checkpoint_caches(void)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900941{
942 orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry",
Gu Zhenge8512d22014-03-07 18:43:28 +0800943 sizeof(struct orphan_inode_entry));
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900944 if (!orphan_entry_slab)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900945 return -ENOMEM;
946 inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry",
Gu Zhenge8512d22014-03-07 18:43:28 +0800947 sizeof(struct dir_inode_entry));
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900948 if (!inode_entry_slab) {
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900949 kmem_cache_destroy(orphan_entry_slab);
950 return -ENOMEM;
951 }
952 return 0;
953}
954
955void destroy_checkpoint_caches(void)
956{
957 kmem_cache_destroy(orphan_entry_slab);
958 kmem_cache_destroy(inode_entry_slab);
959}