blob: 3d4d5fc19e6fc6ad14be01a3d11b1dd68273b0fd [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/bio.h>
14#include <linux/blkdev.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010015#include <linux/prefetch.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090016#include <linux/vmalloc.h>
17
18#include "f2fs.h"
19#include "segment.h"
20#include "node.h"
Namjae Jeon6ec178d2013-04-23 17:51:43 +090021#include <trace/events/f2fs.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090022
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090023/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090024 * This function balances dirty node and dentry pages.
25 * In addition, it controls garbage collection.
26 */
27void f2fs_balance_fs(struct f2fs_sb_info *sbi)
28{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090029 /*
Jaegeuk Kim029cd282012-12-21 17:20:21 +090030 * We should do GC or end up with checkpoint, if there are so many dirty
31 * dir/node pages without enough free segments.
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090032 */
Jaegeuk Kim43727522013-02-04 15:11:17 +090033 if (has_not_enough_free_secs(sbi, 0)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090034 mutex_lock(&sbi->gc_mutex);
Jaegeuk Kim408e9372013-01-03 17:55:52 +090035 f2fs_gc(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090036 }
37}
38
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +090039void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
40{
41 /* check the # of cached NAT entries and prefree segments */
42 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
43 excess_prefree_segs(sbi))
44 f2fs_sync_fs(sbi->sb, true);
45}
46
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090047static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
48 enum dirty_type dirty_type)
49{
50 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
51
52 /* need not be added */
53 if (IS_CURSEG(sbi, segno))
54 return;
55
56 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
57 dirty_i->nr_dirty[dirty_type]++;
58
59 if (dirty_type == DIRTY) {
60 struct seg_entry *sentry = get_seg_entry(sbi, segno);
Changman Lee4625d6a2013-10-25 17:31:57 +090061 enum dirty_type t = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +090062
Changman Lee4625d6a2013-10-25 17:31:57 +090063 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
64 dirty_i->nr_dirty[t]++;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090065 }
66}
67
68static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
69 enum dirty_type dirty_type)
70{
71 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
72
73 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
74 dirty_i->nr_dirty[dirty_type]--;
75
76 if (dirty_type == DIRTY) {
Changman Lee4625d6a2013-10-25 17:31:57 +090077 struct seg_entry *sentry = get_seg_entry(sbi, segno);
78 enum dirty_type t = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +090079
Changman Lee4625d6a2013-10-25 17:31:57 +090080 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
81 dirty_i->nr_dirty[t]--;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +090082
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +090083 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
84 clear_bit(GET_SECNO(sbi, segno),
85 dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090086 }
87}
88
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090089/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090090 * Should not occur error such as -ENOMEM.
91 * Adding dirty entry into seglist is not critical operation.
92 * If a given segment is one of current working segments, it won't be added.
93 */
Haicheng Li8d8451a2013-06-13 16:59:28 +080094static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090095{
96 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
97 unsigned short valid_blocks;
98
99 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
100 return;
101
102 mutex_lock(&dirty_i->seglist_lock);
103
104 valid_blocks = get_valid_blocks(sbi, segno, 0);
105
106 if (valid_blocks == 0) {
107 __locate_dirty_segment(sbi, segno, PRE);
108 __remove_dirty_segment(sbi, segno, DIRTY);
109 } else if (valid_blocks < sbi->blocks_per_seg) {
110 __locate_dirty_segment(sbi, segno, DIRTY);
111 } else {
112 /* Recovery routine with SSR needs this */
113 __remove_dirty_segment(sbi, segno, DIRTY);
114 }
115
116 mutex_unlock(&dirty_i->seglist_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900117}
118
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900119/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900120 * Should call clear_prefree_segments after checkpoint is done.
121 */
122static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
123{
124 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Haicheng Li81fb5e82013-05-14 18:20:28 +0800125 unsigned int segno = -1;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900126 unsigned int total_segs = TOTAL_SEGS(sbi);
127
128 mutex_lock(&dirty_i->seglist_lock);
129 while (1) {
130 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
Haicheng Li81fb5e82013-05-14 18:20:28 +0800131 segno + 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900132 if (segno >= total_segs)
133 break;
134 __set_test_and_free(sbi, segno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900135 }
136 mutex_unlock(&dirty_i->seglist_lock);
137}
138
139void clear_prefree_segments(struct f2fs_sb_info *sbi)
140{
141 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Haicheng Li81fb5e82013-05-14 18:20:28 +0800142 unsigned int segno = -1;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900143 unsigned int total_segs = TOTAL_SEGS(sbi);
144
145 mutex_lock(&dirty_i->seglist_lock);
146 while (1) {
147 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
Haicheng Li81fb5e82013-05-14 18:20:28 +0800148 segno + 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900149 if (segno >= total_segs)
150 break;
151
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900152 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE]))
153 dirty_i->nr_dirty[PRE]--;
154
155 /* Let's use trim */
156 if (test_opt(sbi, DISCARD))
157 blkdev_issue_discard(sbi->sb->s_bdev,
158 START_BLOCK(sbi, segno) <<
159 sbi->log_sectors_per_block,
160 1 << (sbi->log_sectors_per_block +
161 sbi->log_blocks_per_seg),
162 GFP_NOFS, 0);
163 }
164 mutex_unlock(&dirty_i->seglist_lock);
165}
166
167static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
168{
169 struct sit_info *sit_i = SIT_I(sbi);
170 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap))
171 sit_i->dirty_sentries++;
172}
173
174static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
175 unsigned int segno, int modified)
176{
177 struct seg_entry *se = get_seg_entry(sbi, segno);
178 se->type = type;
179 if (modified)
180 __mark_sit_entry_dirty(sbi, segno);
181}
182
183static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
184{
185 struct seg_entry *se;
186 unsigned int segno, offset;
187 long int new_vblocks;
188
189 segno = GET_SEGNO(sbi, blkaddr);
190
191 se = get_seg_entry(sbi, segno);
192 new_vblocks = se->valid_blocks + del;
193 offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1);
194
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900195 f2fs_bug_on((new_vblocks >> (sizeof(unsigned short) << 3) ||
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900196 (new_vblocks > sbi->blocks_per_seg)));
197
198 se->valid_blocks = new_vblocks;
199 se->mtime = get_mtime(sbi);
200 SIT_I(sbi)->max_mtime = se->mtime;
201
202 /* Update valid block bitmap */
203 if (del > 0) {
204 if (f2fs_set_bit(offset, se->cur_valid_map))
205 BUG();
206 } else {
207 if (!f2fs_clear_bit(offset, se->cur_valid_map))
208 BUG();
209 }
210 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
211 se->ckpt_valid_blocks += del;
212
213 __mark_sit_entry_dirty(sbi, segno);
214
215 /* update total number of valid blocks to be written in ckpt area */
216 SIT_I(sbi)->written_valid_blocks += del;
217
218 if (sbi->segs_per_sec > 1)
219 get_sec_entry(sbi, segno)->valid_blocks += del;
220}
221
222static void refresh_sit_entry(struct f2fs_sb_info *sbi,
223 block_t old_blkaddr, block_t new_blkaddr)
224{
225 update_sit_entry(sbi, new_blkaddr, 1);
226 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
227 update_sit_entry(sbi, old_blkaddr, -1);
228}
229
230void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
231{
232 unsigned int segno = GET_SEGNO(sbi, addr);
233 struct sit_info *sit_i = SIT_I(sbi);
234
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900235 f2fs_bug_on(addr == NULL_ADDR);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900236 if (addr == NEW_ADDR)
237 return;
238
239 /* add it into sit main buffer */
240 mutex_lock(&sit_i->sentry_lock);
241
242 update_sit_entry(sbi, addr, -1);
243
244 /* add it into dirty seglist */
245 locate_dirty_segment(sbi, segno);
246
247 mutex_unlock(&sit_i->sentry_lock);
248}
249
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900250/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900251 * This function should be resided under the curseg_mutex lock
252 */
253static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
Haicheng Lie79efe32013-06-13 16:59:27 +0800254 struct f2fs_summary *sum)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900255{
256 struct curseg_info *curseg = CURSEG_I(sbi, type);
257 void *addr = curseg->sum_blk;
Haicheng Lie79efe32013-06-13 16:59:27 +0800258 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900259 memcpy(addr, sum, sizeof(struct f2fs_summary));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900260}
261
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900262/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900263 * Calculate the number of current summary pages for writing
264 */
265int npages_for_summary_flush(struct f2fs_sb_info *sbi)
266{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900267 int valid_sum_count = 0;
Fan Li9a479382013-10-29 16:21:47 +0800268 int i, sum_in_page;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900269
270 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
271 if (sbi->ckpt->alloc_type[i] == SSR)
272 valid_sum_count += sbi->blocks_per_seg;
273 else
274 valid_sum_count += curseg_blkoff(sbi, i);
275 }
276
Fan Li9a479382013-10-29 16:21:47 +0800277 sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
278 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
279 if (valid_sum_count <= sum_in_page)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900280 return 1;
Fan Li9a479382013-10-29 16:21:47 +0800281 else if ((valid_sum_count - sum_in_page) <=
282 (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900283 return 2;
284 return 3;
285}
286
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900287/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900288 * Caller should put this summary page
289 */
290struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
291{
292 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
293}
294
295static void write_sum_page(struct f2fs_sb_info *sbi,
296 struct f2fs_summary_block *sum_blk, block_t blk_addr)
297{
298 struct page *page = grab_meta_page(sbi, blk_addr);
299 void *kaddr = page_address(page);
300 memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
301 set_page_dirty(page);
302 f2fs_put_page(page, 1);
303}
304
Jaegeuk Kim60374682013-03-31 13:58:51 +0900305static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
306{
307 struct curseg_info *curseg = CURSEG_I(sbi, type);
Haicheng Li81fb5e82013-05-14 18:20:28 +0800308 unsigned int segno = curseg->segno + 1;
Jaegeuk Kim60374682013-03-31 13:58:51 +0900309 struct free_segmap_info *free_i = FREE_I(sbi);
310
Haicheng Li81fb5e82013-05-14 18:20:28 +0800311 if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec)
312 return !test_bit(segno, free_i->free_segmap);
Jaegeuk Kim60374682013-03-31 13:58:51 +0900313 return 0;
314}
315
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900316/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900317 * Find a new segment from the free segments bitmap to right order
318 * This function should be returned with success, otherwise BUG
319 */
320static void get_new_segment(struct f2fs_sb_info *sbi,
321 unsigned int *newseg, bool new_sec, int dir)
322{
323 struct free_segmap_info *free_i = FREE_I(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900324 unsigned int segno, secno, zoneno;
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900325 unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900326 unsigned int hint = *newseg / sbi->segs_per_sec;
327 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
328 unsigned int left_start = hint;
329 bool init = true;
330 int go_left = 0;
331 int i;
332
333 write_lock(&free_i->segmap_lock);
334
335 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
336 segno = find_next_zero_bit(free_i->free_segmap,
337 TOTAL_SEGS(sbi), *newseg + 1);
Jaegeuk Kim33afa7f2013-03-31 12:59:53 +0900338 if (segno - *newseg < sbi->segs_per_sec -
339 (*newseg % sbi->segs_per_sec))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900340 goto got_it;
341 }
342find_other_zone:
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900343 secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint);
344 if (secno >= TOTAL_SECS(sbi)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900345 if (dir == ALLOC_RIGHT) {
346 secno = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900347 TOTAL_SECS(sbi), 0);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900348 f2fs_bug_on(secno >= TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900349 } else {
350 go_left = 1;
351 left_start = hint - 1;
352 }
353 }
354 if (go_left == 0)
355 goto skip_left;
356
357 while (test_bit(left_start, free_i->free_secmap)) {
358 if (left_start > 0) {
359 left_start--;
360 continue;
361 }
362 left_start = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900363 TOTAL_SECS(sbi), 0);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900364 f2fs_bug_on(left_start >= TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900365 break;
366 }
367 secno = left_start;
368skip_left:
369 hint = secno;
370 segno = secno * sbi->segs_per_sec;
371 zoneno = secno / sbi->secs_per_zone;
372
373 /* give up on finding another zone */
374 if (!init)
375 goto got_it;
376 if (sbi->secs_per_zone == 1)
377 goto got_it;
378 if (zoneno == old_zoneno)
379 goto got_it;
380 if (dir == ALLOC_LEFT) {
381 if (!go_left && zoneno + 1 >= total_zones)
382 goto got_it;
383 if (go_left && zoneno == 0)
384 goto got_it;
385 }
386 for (i = 0; i < NR_CURSEG_TYPE; i++)
387 if (CURSEG_I(sbi, i)->zone == zoneno)
388 break;
389
390 if (i < NR_CURSEG_TYPE) {
391 /* zone is in user, try another */
392 if (go_left)
393 hint = zoneno * sbi->secs_per_zone - 1;
394 else if (zoneno + 1 >= total_zones)
395 hint = 0;
396 else
397 hint = (zoneno + 1) * sbi->secs_per_zone;
398 init = false;
399 goto find_other_zone;
400 }
401got_it:
402 /* set it as dirty segment in free segmap */
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900403 f2fs_bug_on(test_bit(segno, free_i->free_segmap));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900404 __set_inuse(sbi, segno);
405 *newseg = segno;
406 write_unlock(&free_i->segmap_lock);
407}
408
409static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
410{
411 struct curseg_info *curseg = CURSEG_I(sbi, type);
412 struct summary_footer *sum_footer;
413
414 curseg->segno = curseg->next_segno;
415 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
416 curseg->next_blkoff = 0;
417 curseg->next_segno = NULL_SEGNO;
418
419 sum_footer = &(curseg->sum_blk->footer);
420 memset(sum_footer, 0, sizeof(struct summary_footer));
421 if (IS_DATASEG(type))
422 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
423 if (IS_NODESEG(type))
424 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
425 __set_sit_entry_type(sbi, type, curseg->segno, modified);
426}
427
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900428/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900429 * Allocate a current working segment.
430 * This function always allocates a free segment in LFS manner.
431 */
432static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
433{
434 struct curseg_info *curseg = CURSEG_I(sbi, type);
435 unsigned int segno = curseg->segno;
436 int dir = ALLOC_LEFT;
437
438 write_sum_page(sbi, curseg->sum_blk,
Haicheng Li81fb5e82013-05-14 18:20:28 +0800439 GET_SUM_BLOCK(sbi, segno));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900440 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
441 dir = ALLOC_RIGHT;
442
443 if (test_opt(sbi, NOHEAP))
444 dir = ALLOC_RIGHT;
445
446 get_new_segment(sbi, &segno, new_sec, dir);
447 curseg->next_segno = segno;
448 reset_curseg(sbi, type, 1);
449 curseg->alloc_type = LFS;
450}
451
452static void __next_free_blkoff(struct f2fs_sb_info *sbi,
453 struct curseg_info *seg, block_t start)
454{
455 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
456 block_t ofs;
457 for (ofs = start; ofs < sbi->blocks_per_seg; ofs++) {
458 if (!f2fs_test_bit(ofs, se->ckpt_valid_map)
459 && !f2fs_test_bit(ofs, se->cur_valid_map))
460 break;
461 }
462 seg->next_blkoff = ofs;
463}
464
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900465/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900466 * If a segment is written by LFS manner, next block offset is just obtained
467 * by increasing the current block offset. However, if a segment is written by
468 * SSR manner, next block offset obtained by calling __next_free_blkoff
469 */
470static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
471 struct curseg_info *seg)
472{
473 if (seg->alloc_type == SSR)
474 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
475 else
476 seg->next_blkoff++;
477}
478
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900479/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900480 * This function always allocates a used segment (from dirty seglist) by SSR
481 * manner, so it should recover the existing segment information of valid blocks
482 */
483static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
484{
485 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
486 struct curseg_info *curseg = CURSEG_I(sbi, type);
487 unsigned int new_segno = curseg->next_segno;
488 struct f2fs_summary_block *sum_node;
489 struct page *sum_page;
490
491 write_sum_page(sbi, curseg->sum_blk,
492 GET_SUM_BLOCK(sbi, curseg->segno));
493 __set_test_and_inuse(sbi, new_segno);
494
495 mutex_lock(&dirty_i->seglist_lock);
496 __remove_dirty_segment(sbi, new_segno, PRE);
497 __remove_dirty_segment(sbi, new_segno, DIRTY);
498 mutex_unlock(&dirty_i->seglist_lock);
499
500 reset_curseg(sbi, type, 1);
501 curseg->alloc_type = SSR;
502 __next_free_blkoff(sbi, curseg, 0);
503
504 if (reuse) {
505 sum_page = get_sum_page(sbi, new_segno);
506 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
507 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
508 f2fs_put_page(sum_page, 1);
509 }
510}
511
Jaegeuk Kim43727522013-02-04 15:11:17 +0900512static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
513{
514 struct curseg_info *curseg = CURSEG_I(sbi, type);
515 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
516
517 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
518 return v_ops->get_victim(sbi,
519 &(curseg)->next_segno, BG_GC, type, SSR);
520
521 /* For data segments, let's do SSR more intensively */
522 for (; type >= CURSEG_HOT_DATA; type--)
523 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
524 BG_GC, type, SSR))
525 return 1;
526 return 0;
527}
528
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900529/*
530 * flush out current segment and replace it with new segment
531 * This function should be returned with success, otherwise BUG
532 */
533static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
534 int type, bool force)
535{
536 struct curseg_info *curseg = CURSEG_I(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900537
Gu Zheng7b405272013-08-19 09:41:15 +0800538 if (force)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900539 new_curseg(sbi, type, true);
Gu Zheng7b405272013-08-19 09:41:15 +0800540 else if (type == CURSEG_WARM_NODE)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900541 new_curseg(sbi, type, false);
Jaegeuk Kim60374682013-03-31 13:58:51 +0900542 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
543 new_curseg(sbi, type, false);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900544 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
545 change_curseg(sbi, type, true);
546 else
547 new_curseg(sbi, type, false);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900548
549 stat_inc_seg_type(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900550}
551
552void allocate_new_segments(struct f2fs_sb_info *sbi)
553{
554 struct curseg_info *curseg;
555 unsigned int old_curseg;
556 int i;
557
558 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
559 curseg = CURSEG_I(sbi, i);
560 old_curseg = curseg->segno;
561 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
562 locate_dirty_segment(sbi, old_curseg);
563 }
564}
565
566static const struct segment_allocation default_salloc_ops = {
567 .allocate_segment = allocate_segment_by_default,
568};
569
570static void f2fs_end_io_write(struct bio *bio, int err)
571{
572 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
573 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
574 struct bio_private *p = bio->bi_private;
575
576 do {
577 struct page *page = bvec->bv_page;
578
579 if (--bvec >= bio->bi_io_vec)
580 prefetchw(&bvec->bv_page->flags);
581 if (!uptodate) {
582 SetPageError(page);
583 if (page->mapping)
584 set_bit(AS_EIO, &page->mapping->flags);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900585 set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG);
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900586 p->sbi->sb->s_flags |= MS_RDONLY;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900587 }
588 end_page_writeback(page);
589 dec_page_count(p->sbi, F2FS_WRITEBACK);
590 } while (bvec >= bio->bi_io_vec);
591
592 if (p->is_sync)
593 complete(p->wait);
Gu Zhenge2340882013-10-14 18:45:56 +0800594
595 if (!get_pages(p->sbi, F2FS_WRITEBACK) && p->sbi->cp_task)
596 wake_up_process(p->sbi->cp_task);
597
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900598 kfree(p);
599 bio_put(bio);
600}
601
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900602struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900603{
604 struct bio *bio;
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900605
606 /* No failure on bio allocation */
607 bio = bio_alloc(GFP_NOIO, npages);
608 bio->bi_bdev = bdev;
Gu Zhengd8207f62013-07-25 11:30:01 +0800609 bio->bi_private = NULL;
610
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900611 return bio;
612}
613
614static void do_submit_bio(struct f2fs_sb_info *sbi,
615 enum page_type type, bool sync)
616{
617 int rw = sync ? WRITE_SYNC : WRITE;
618 enum page_type btype = type > META ? META : type;
619
620 if (type >= META_FLUSH)
621 rw = WRITE_FLUSH_FUA;
622
Namjae Jeon86804412013-04-25 11:45:21 +0900623 if (btype == META)
624 rw |= REQ_META;
625
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900626 if (sbi->bio[btype]) {
627 struct bio_private *p = sbi->bio[btype]->bi_private;
628 p->sbi = sbi;
629 sbi->bio[btype]->bi_end_io = f2fs_end_io_write;
Namjae Jeon6ec178d2013-04-23 17:51:43 +0900630
631 trace_f2fs_do_submit_bio(sbi->sb, btype, sync, sbi->bio[btype]);
632
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900633 if (type == META_FLUSH) {
634 DECLARE_COMPLETION_ONSTACK(wait);
635 p->is_sync = true;
636 p->wait = &wait;
637 submit_bio(rw, sbi->bio[btype]);
638 wait_for_completion(&wait);
639 } else {
640 p->is_sync = false;
641 submit_bio(rw, sbi->bio[btype]);
642 }
643 sbi->bio[btype] = NULL;
644 }
645}
646
647void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync)
648{
649 down_write(&sbi->bio_sem);
650 do_submit_bio(sbi, type, sync);
651 up_write(&sbi->bio_sem);
652}
653
654static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
655 block_t blk_addr, enum page_type type)
656{
657 struct block_device *bdev = sbi->sb->s_bdev;
Chao Yucc7b1bb2013-09-22 15:50:50 +0800658 int bio_blocks;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900659
660 verify_block_addr(sbi, blk_addr);
661
662 down_write(&sbi->bio_sem);
663
664 inc_page_count(sbi, F2FS_WRITEBACK);
665
666 if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1)
667 do_submit_bio(sbi, type, false);
668alloc_new:
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900669 if (sbi->bio[type] == NULL) {
Gu Zhengd8207f62013-07-25 11:30:01 +0800670 struct bio_private *priv;
671retry:
672 priv = kmalloc(sizeof(struct bio_private), GFP_NOFS);
673 if (!priv) {
674 cond_resched();
675 goto retry;
676 }
677
Chao Yucc7b1bb2013-09-22 15:50:50 +0800678 bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
679 sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks);
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900680 sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
Gu Zhengd8207f62013-07-25 11:30:01 +0800681 sbi->bio[type]->bi_private = priv;
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900682 /*
683 * The end_io will be assigned at the sumbission phase.
684 * Until then, let bio_add_page() merge consecutive IOs as much
685 * as possible.
686 */
687 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900688
689 if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) <
690 PAGE_CACHE_SIZE) {
691 do_submit_bio(sbi, type, false);
692 goto alloc_new;
693 }
694
695 sbi->last_block_in_bio[type] = blk_addr;
696
697 up_write(&sbi->bio_sem);
Namjae Jeon6ec178d2013-04-23 17:51:43 +0900698 trace_f2fs_submit_write_page(page, blk_addr, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900699}
700
Jin Xua5694692013-08-05 20:02:04 +0800701void f2fs_wait_on_page_writeback(struct page *page,
702 enum page_type type, bool sync)
703{
704 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
705 if (PageWriteback(page)) {
706 f2fs_submit_bio(sbi, type, sync);
707 wait_on_page_writeback(page);
708 }
709}
710
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900711static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
712{
713 struct curseg_info *curseg = CURSEG_I(sbi, type);
714 if (curseg->next_blkoff < sbi->blocks_per_seg)
715 return true;
716 return false;
717}
718
719static int __get_segment_type_2(struct page *page, enum page_type p_type)
720{
721 if (p_type == DATA)
722 return CURSEG_HOT_DATA;
723 else
724 return CURSEG_HOT_NODE;
725}
726
727static int __get_segment_type_4(struct page *page, enum page_type p_type)
728{
729 if (p_type == DATA) {
730 struct inode *inode = page->mapping->host;
731
732 if (S_ISDIR(inode->i_mode))
733 return CURSEG_HOT_DATA;
734 else
735 return CURSEG_COLD_DATA;
736 } else {
737 if (IS_DNODE(page) && !is_cold_node(page))
738 return CURSEG_HOT_NODE;
739 else
740 return CURSEG_COLD_NODE;
741 }
742}
743
744static int __get_segment_type_6(struct page *page, enum page_type p_type)
745{
746 if (p_type == DATA) {
747 struct inode *inode = page->mapping->host;
748
749 if (S_ISDIR(inode->i_mode))
750 return CURSEG_HOT_DATA;
Jaegeuk Kim354a3392013-06-14 08:52:35 +0900751 else if (is_cold_data(page) || file_is_cold(inode))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900752 return CURSEG_COLD_DATA;
753 else
754 return CURSEG_WARM_DATA;
755 } else {
756 if (IS_DNODE(page))
757 return is_cold_node(page) ? CURSEG_WARM_NODE :
758 CURSEG_HOT_NODE;
759 else
760 return CURSEG_COLD_NODE;
761 }
762}
763
764static int __get_segment_type(struct page *page, enum page_type p_type)
765{
766 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
767 switch (sbi->active_logs) {
768 case 2:
769 return __get_segment_type_2(page, p_type);
770 case 4:
771 return __get_segment_type_4(page, p_type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900772 }
Jaegeuk Kim12a67142012-12-21 11:47:05 +0900773 /* NR_CURSEG_TYPE(6) logs by default */
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900774 f2fs_bug_on(sbi->active_logs != NR_CURSEG_TYPE);
Jaegeuk Kim12a67142012-12-21 11:47:05 +0900775 return __get_segment_type_6(page, p_type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900776}
777
778static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
779 block_t old_blkaddr, block_t *new_blkaddr,
780 struct f2fs_summary *sum, enum page_type p_type)
781{
782 struct sit_info *sit_i = SIT_I(sbi);
783 struct curseg_info *curseg;
784 unsigned int old_cursegno;
785 int type;
786
787 type = __get_segment_type(page, p_type);
788 curseg = CURSEG_I(sbi, type);
789
790 mutex_lock(&curseg->curseg_mutex);
791
792 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
793 old_cursegno = curseg->segno;
794
795 /*
796 * __add_sum_entry should be resided under the curseg_mutex
797 * because, this function updates a summary entry in the
798 * current summary block.
799 */
Haicheng Lie79efe32013-06-13 16:59:27 +0800800 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900801
802 mutex_lock(&sit_i->sentry_lock);
803 __refresh_next_blkoff(sbi, curseg);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900804
805 stat_inc_block_count(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900806
807 /*
808 * SIT information should be updated before segment allocation,
809 * since SSR needs latest valid block information.
810 */
811 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
812
813 if (!__has_curseg_space(sbi, type))
814 sit_i->s_ops->allocate_segment(sbi, type, false);
815
816 locate_dirty_segment(sbi, old_cursegno);
817 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
818 mutex_unlock(&sit_i->sentry_lock);
819
820 if (p_type == NODE)
821 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
822
823 /* writeout dirty page into bdev */
824 submit_write_page(sbi, page, *new_blkaddr, p_type);
825
826 mutex_unlock(&curseg->curseg_mutex);
827}
828
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900829void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900830{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900831 set_page_writeback(page);
832 submit_write_page(sbi, page, page->index, META);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900833}
834
835void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
836 unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr)
837{
838 struct f2fs_summary sum;
839 set_summary(&sum, nid, 0, 0);
840 do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, NODE);
841}
842
843void write_data_page(struct inode *inode, struct page *page,
844 struct dnode_of_data *dn, block_t old_blkaddr,
845 block_t *new_blkaddr)
846{
847 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
848 struct f2fs_summary sum;
849 struct node_info ni;
850
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900851 f2fs_bug_on(old_blkaddr == NULL_ADDR);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900852 get_node_info(sbi, dn->nid, &ni);
853 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
854
855 do_write_page(sbi, page, old_blkaddr,
856 new_blkaddr, &sum, DATA);
857}
858
859void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page,
860 block_t old_blk_addr)
861{
862 submit_write_page(sbi, page, old_blk_addr, DATA);
863}
864
865void recover_data_page(struct f2fs_sb_info *sbi,
866 struct page *page, struct f2fs_summary *sum,
867 block_t old_blkaddr, block_t new_blkaddr)
868{
869 struct sit_info *sit_i = SIT_I(sbi);
870 struct curseg_info *curseg;
871 unsigned int segno, old_cursegno;
872 struct seg_entry *se;
873 int type;
874
875 segno = GET_SEGNO(sbi, new_blkaddr);
876 se = get_seg_entry(sbi, segno);
877 type = se->type;
878
879 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
880 if (old_blkaddr == NULL_ADDR)
881 type = CURSEG_COLD_DATA;
882 else
883 type = CURSEG_WARM_DATA;
884 }
885 curseg = CURSEG_I(sbi, type);
886
887 mutex_lock(&curseg->curseg_mutex);
888 mutex_lock(&sit_i->sentry_lock);
889
890 old_cursegno = curseg->segno;
891
892 /* change the current segment */
893 if (segno != curseg->segno) {
894 curseg->next_segno = segno;
895 change_curseg(sbi, type, true);
896 }
897
898 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
899 (sbi->blocks_per_seg - 1);
Haicheng Lie79efe32013-06-13 16:59:27 +0800900 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900901
902 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
903
904 locate_dirty_segment(sbi, old_cursegno);
905 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
906
907 mutex_unlock(&sit_i->sentry_lock);
908 mutex_unlock(&curseg->curseg_mutex);
909}
910
911void rewrite_node_page(struct f2fs_sb_info *sbi,
912 struct page *page, struct f2fs_summary *sum,
913 block_t old_blkaddr, block_t new_blkaddr)
914{
915 struct sit_info *sit_i = SIT_I(sbi);
916 int type = CURSEG_WARM_NODE;
917 struct curseg_info *curseg;
918 unsigned int segno, old_cursegno;
919 block_t next_blkaddr = next_blkaddr_of_node(page);
920 unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr);
921
922 curseg = CURSEG_I(sbi, type);
923
924 mutex_lock(&curseg->curseg_mutex);
925 mutex_lock(&sit_i->sentry_lock);
926
927 segno = GET_SEGNO(sbi, new_blkaddr);
928 old_cursegno = curseg->segno;
929
930 /* change the current segment */
931 if (segno != curseg->segno) {
932 curseg->next_segno = segno;
933 change_curseg(sbi, type, true);
934 }
935 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
936 (sbi->blocks_per_seg - 1);
Haicheng Lie79efe32013-06-13 16:59:27 +0800937 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900938
939 /* change the current log to the next block addr in advance */
940 if (next_segno != segno) {
941 curseg->next_segno = next_segno;
942 change_curseg(sbi, type, true);
943 }
944 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) &
945 (sbi->blocks_per_seg - 1);
946
947 /* rewrite node page */
948 set_page_writeback(page);
949 submit_write_page(sbi, page, new_blkaddr, NODE);
950 f2fs_submit_bio(sbi, NODE, true);
951 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
952
953 locate_dirty_segment(sbi, old_cursegno);
954 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
955
956 mutex_unlock(&sit_i->sentry_lock);
957 mutex_unlock(&curseg->curseg_mutex);
958}
959
960static int read_compacted_summaries(struct f2fs_sb_info *sbi)
961{
962 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
963 struct curseg_info *seg_i;
964 unsigned char *kaddr;
965 struct page *page;
966 block_t start;
967 int i, j, offset;
968
969 start = start_sum_block(sbi);
970
971 page = get_meta_page(sbi, start++);
972 kaddr = (unsigned char *)page_address(page);
973
974 /* Step 1: restore nat cache */
975 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
976 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
977
978 /* Step 2: restore sit cache */
979 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
980 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
981 SUM_JOURNAL_SIZE);
982 offset = 2 * SUM_JOURNAL_SIZE;
983
984 /* Step 3: restore summary entries */
985 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
986 unsigned short blk_off;
987 unsigned int segno;
988
989 seg_i = CURSEG_I(sbi, i);
990 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
991 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
992 seg_i->next_segno = segno;
993 reset_curseg(sbi, i, 0);
994 seg_i->alloc_type = ckpt->alloc_type[i];
995 seg_i->next_blkoff = blk_off;
996
997 if (seg_i->alloc_type == SSR)
998 blk_off = sbi->blocks_per_seg;
999
1000 for (j = 0; j < blk_off; j++) {
1001 struct f2fs_summary *s;
1002 s = (struct f2fs_summary *)(kaddr + offset);
1003 seg_i->sum_blk->entries[j] = *s;
1004 offset += SUMMARY_SIZE;
1005 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1006 SUM_FOOTER_SIZE)
1007 continue;
1008
1009 f2fs_put_page(page, 1);
1010 page = NULL;
1011
1012 page = get_meta_page(sbi, start++);
1013 kaddr = (unsigned char *)page_address(page);
1014 offset = 0;
1015 }
1016 }
1017 f2fs_put_page(page, 1);
1018 return 0;
1019}
1020
1021static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1022{
1023 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1024 struct f2fs_summary_block *sum;
1025 struct curseg_info *curseg;
1026 struct page *new;
1027 unsigned short blk_off;
1028 unsigned int segno = 0;
1029 block_t blk_addr = 0;
1030
1031 /* get segment number and block addr */
1032 if (IS_DATASEG(type)) {
1033 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1034 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1035 CURSEG_HOT_DATA]);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001036 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001037 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1038 else
1039 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1040 } else {
1041 segno = le32_to_cpu(ckpt->cur_node_segno[type -
1042 CURSEG_HOT_NODE]);
1043 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1044 CURSEG_HOT_NODE]);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001045 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001046 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1047 type - CURSEG_HOT_NODE);
1048 else
1049 blk_addr = GET_SUM_BLOCK(sbi, segno);
1050 }
1051
1052 new = get_meta_page(sbi, blk_addr);
1053 sum = (struct f2fs_summary_block *)page_address(new);
1054
1055 if (IS_NODESEG(type)) {
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001056 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001057 struct f2fs_summary *ns = &sum->entries[0];
1058 int i;
1059 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1060 ns->version = 0;
1061 ns->ofs_in_node = 0;
1062 }
1063 } else {
1064 if (restore_node_summary(sbi, segno, sum)) {
1065 f2fs_put_page(new, 1);
1066 return -EINVAL;
1067 }
1068 }
1069 }
1070
1071 /* set uncompleted segment to curseg */
1072 curseg = CURSEG_I(sbi, type);
1073 mutex_lock(&curseg->curseg_mutex);
1074 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1075 curseg->next_segno = segno;
1076 reset_curseg(sbi, type, 0);
1077 curseg->alloc_type = ckpt->alloc_type[type];
1078 curseg->next_blkoff = blk_off;
1079 mutex_unlock(&curseg->curseg_mutex);
1080 f2fs_put_page(new, 1);
1081 return 0;
1082}
1083
1084static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1085{
1086 int type = CURSEG_HOT_DATA;
1087
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001088 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001089 /* restore for compacted data summary */
1090 if (read_compacted_summaries(sbi))
1091 return -EINVAL;
1092 type = CURSEG_HOT_NODE;
1093 }
1094
1095 for (; type <= CURSEG_COLD_NODE; type++)
1096 if (read_normal_summaries(sbi, type))
1097 return -EINVAL;
1098 return 0;
1099}
1100
1101static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1102{
1103 struct page *page;
1104 unsigned char *kaddr;
1105 struct f2fs_summary *summary;
1106 struct curseg_info *seg_i;
1107 int written_size = 0;
1108 int i, j;
1109
1110 page = grab_meta_page(sbi, blkaddr++);
1111 kaddr = (unsigned char *)page_address(page);
1112
1113 /* Step 1: write nat cache */
1114 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1115 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1116 written_size += SUM_JOURNAL_SIZE;
1117
1118 /* Step 2: write sit cache */
1119 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1120 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1121 SUM_JOURNAL_SIZE);
1122 written_size += SUM_JOURNAL_SIZE;
1123
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001124 /* Step 3: write summary entries */
1125 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1126 unsigned short blkoff;
1127 seg_i = CURSEG_I(sbi, i);
1128 if (sbi->ckpt->alloc_type[i] == SSR)
1129 blkoff = sbi->blocks_per_seg;
1130 else
1131 blkoff = curseg_blkoff(sbi, i);
1132
1133 for (j = 0; j < blkoff; j++) {
1134 if (!page) {
1135 page = grab_meta_page(sbi, blkaddr++);
1136 kaddr = (unsigned char *)page_address(page);
1137 written_size = 0;
1138 }
1139 summary = (struct f2fs_summary *)(kaddr + written_size);
1140 *summary = seg_i->sum_blk->entries[j];
1141 written_size += SUMMARY_SIZE;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001142
1143 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1144 SUM_FOOTER_SIZE)
1145 continue;
1146
Chao Yue8d61a72013-10-24 15:08:28 +08001147 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001148 f2fs_put_page(page, 1);
1149 page = NULL;
1150 }
1151 }
Chao Yue8d61a72013-10-24 15:08:28 +08001152 if (page) {
1153 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001154 f2fs_put_page(page, 1);
Chao Yue8d61a72013-10-24 15:08:28 +08001155 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001156}
1157
1158static void write_normal_summaries(struct f2fs_sb_info *sbi,
1159 block_t blkaddr, int type)
1160{
1161 int i, end;
1162 if (IS_DATASEG(type))
1163 end = type + NR_CURSEG_DATA_TYPE;
1164 else
1165 end = type + NR_CURSEG_NODE_TYPE;
1166
1167 for (i = type; i < end; i++) {
1168 struct curseg_info *sum = CURSEG_I(sbi, i);
1169 mutex_lock(&sum->curseg_mutex);
1170 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1171 mutex_unlock(&sum->curseg_mutex);
1172 }
1173}
1174
1175void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1176{
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001177 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001178 write_compacted_summaries(sbi, start_blk);
1179 else
1180 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1181}
1182
1183void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1184{
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001185 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001186 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001187}
1188
1189int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1190 unsigned int val, int alloc)
1191{
1192 int i;
1193
1194 if (type == NAT_JOURNAL) {
1195 for (i = 0; i < nats_in_cursum(sum); i++) {
1196 if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1197 return i;
1198 }
1199 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1200 return update_nats_in_cursum(sum, 1);
1201 } else if (type == SIT_JOURNAL) {
1202 for (i = 0; i < sits_in_cursum(sum); i++)
1203 if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1204 return i;
1205 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1206 return update_sits_in_cursum(sum, 1);
1207 }
1208 return -1;
1209}
1210
1211static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1212 unsigned int segno)
1213{
1214 struct sit_info *sit_i = SIT_I(sbi);
1215 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1216 block_t blk_addr = sit_i->sit_base_addr + offset;
1217
1218 check_seg_range(sbi, segno);
1219
1220 /* calculate sit block address */
1221 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1222 blk_addr += sit_i->sit_blocks;
1223
1224 return get_meta_page(sbi, blk_addr);
1225}
1226
1227static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1228 unsigned int start)
1229{
1230 struct sit_info *sit_i = SIT_I(sbi);
1231 struct page *src_page, *dst_page;
1232 pgoff_t src_off, dst_off;
1233 void *src_addr, *dst_addr;
1234
1235 src_off = current_sit_addr(sbi, start);
1236 dst_off = next_sit_addr(sbi, src_off);
1237
1238 /* get current sit block page without lock */
1239 src_page = get_meta_page(sbi, src_off);
1240 dst_page = grab_meta_page(sbi, dst_off);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +09001241 f2fs_bug_on(PageDirty(src_page));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001242
1243 src_addr = page_address(src_page);
1244 dst_addr = page_address(dst_page);
1245 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1246
1247 set_page_dirty(dst_page);
1248 f2fs_put_page(src_page, 1);
1249
1250 set_to_next_sit(sit_i, start);
1251
1252 return dst_page;
1253}
1254
1255static bool flush_sits_in_journal(struct f2fs_sb_info *sbi)
1256{
1257 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1258 struct f2fs_summary_block *sum = curseg->sum_blk;
1259 int i;
1260
1261 /*
1262 * If the journal area in the current summary is full of sit entries,
1263 * all the sit entries will be flushed. Otherwise the sit entries
1264 * are not able to replace with newly hot sit entries.
1265 */
1266 if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) {
1267 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1268 unsigned int segno;
1269 segno = le32_to_cpu(segno_in_journal(sum, i));
1270 __mark_sit_entry_dirty(sbi, segno);
1271 }
1272 update_sits_in_cursum(sum, -sits_in_cursum(sum));
Haicheng Licffbfa62013-10-18 17:24:07 +08001273 return true;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001274 }
Haicheng Licffbfa62013-10-18 17:24:07 +08001275 return false;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001276}
1277
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001278/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001279 * CP calls this function, which flushes SIT entries including sit_journal,
1280 * and moves prefree segs to free segs.
1281 */
1282void flush_sit_entries(struct f2fs_sb_info *sbi)
1283{
1284 struct sit_info *sit_i = SIT_I(sbi);
1285 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1286 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1287 struct f2fs_summary_block *sum = curseg->sum_blk;
1288 unsigned long nsegs = TOTAL_SEGS(sbi);
1289 struct page *page = NULL;
1290 struct f2fs_sit_block *raw_sit = NULL;
1291 unsigned int start = 0, end = 0;
1292 unsigned int segno = -1;
1293 bool flushed;
1294
1295 mutex_lock(&curseg->curseg_mutex);
1296 mutex_lock(&sit_i->sentry_lock);
1297
1298 /*
1299 * "flushed" indicates whether sit entries in journal are flushed
1300 * to the SIT area or not.
1301 */
1302 flushed = flush_sits_in_journal(sbi);
1303
1304 while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) {
1305 struct seg_entry *se = get_seg_entry(sbi, segno);
1306 int sit_offset, offset;
1307
1308 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1309
1310 if (flushed)
1311 goto to_sit_page;
1312
1313 offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1);
1314 if (offset >= 0) {
1315 segno_in_journal(sum, offset) = cpu_to_le32(segno);
1316 seg_info_to_raw_sit(se, &sit_in_journal(sum, offset));
1317 goto flush_done;
1318 }
1319to_sit_page:
1320 if (!page || (start > segno) || (segno > end)) {
1321 if (page) {
1322 f2fs_put_page(page, 1);
1323 page = NULL;
1324 }
1325
1326 start = START_SEGNO(sit_i, segno);
1327 end = start + SIT_ENTRY_PER_BLOCK - 1;
1328
1329 /* read sit block that will be updated */
1330 page = get_next_sit_page(sbi, start);
1331 raw_sit = page_address(page);
1332 }
1333
1334 /* udpate entry in SIT block */
1335 seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]);
1336flush_done:
1337 __clear_bit(segno, bitmap);
1338 sit_i->dirty_sentries--;
1339 }
1340 mutex_unlock(&sit_i->sentry_lock);
1341 mutex_unlock(&curseg->curseg_mutex);
1342
1343 /* writeout last modified SIT block */
1344 f2fs_put_page(page, 1);
1345
1346 set_prefree_as_free_segments(sbi);
1347}
1348
1349static int build_sit_info(struct f2fs_sb_info *sbi)
1350{
1351 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1352 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1353 struct sit_info *sit_i;
1354 unsigned int sit_segs, start;
1355 char *src_bitmap, *dst_bitmap;
1356 unsigned int bitmap_size;
1357
1358 /* allocate memory for SIT information */
1359 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
1360 if (!sit_i)
1361 return -ENOMEM;
1362
1363 SM_I(sbi)->sit_info = sit_i;
1364
1365 sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry));
1366 if (!sit_i->sentries)
1367 return -ENOMEM;
1368
1369 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1370 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1371 if (!sit_i->dirty_sentries_bitmap)
1372 return -ENOMEM;
1373
1374 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1375 sit_i->sentries[start].cur_valid_map
1376 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1377 sit_i->sentries[start].ckpt_valid_map
1378 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1379 if (!sit_i->sentries[start].cur_valid_map
1380 || !sit_i->sentries[start].ckpt_valid_map)
1381 return -ENOMEM;
1382 }
1383
1384 if (sbi->segs_per_sec > 1) {
Jaegeuk Kim53cf9522013-03-31 12:39:49 +09001385 sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) *
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001386 sizeof(struct sec_entry));
1387 if (!sit_i->sec_entries)
1388 return -ENOMEM;
1389 }
1390
1391 /* get information related with SIT */
1392 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
1393
1394 /* setup SIT bitmap from ckeckpoint pack */
1395 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1396 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1397
Alexandru Gheorghiu79b57932013-03-28 02:24:53 +02001398 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001399 if (!dst_bitmap)
1400 return -ENOMEM;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001401
1402 /* init SIT information */
1403 sit_i->s_ops = &default_salloc_ops;
1404
1405 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
1406 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1407 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
1408 sit_i->sit_bitmap = dst_bitmap;
1409 sit_i->bitmap_size = bitmap_size;
1410 sit_i->dirty_sentries = 0;
1411 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1412 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
1413 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
1414 mutex_init(&sit_i->sentry_lock);
1415 return 0;
1416}
1417
1418static int build_free_segmap(struct f2fs_sb_info *sbi)
1419{
1420 struct f2fs_sm_info *sm_info = SM_I(sbi);
1421 struct free_segmap_info *free_i;
1422 unsigned int bitmap_size, sec_bitmap_size;
1423
1424 /* allocate memory for free segmap information */
1425 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
1426 if (!free_i)
1427 return -ENOMEM;
1428
1429 SM_I(sbi)->free_info = free_i;
1430
1431 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1432 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
1433 if (!free_i->free_segmap)
1434 return -ENOMEM;
1435
Jaegeuk Kim53cf9522013-03-31 12:39:49 +09001436 sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001437 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
1438 if (!free_i->free_secmap)
1439 return -ENOMEM;
1440
1441 /* set all segments as dirty temporarily */
1442 memset(free_i->free_segmap, 0xff, bitmap_size);
1443 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
1444
1445 /* init free segmap information */
1446 free_i->start_segno =
1447 (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr);
1448 free_i->free_segments = 0;
1449 free_i->free_sections = 0;
1450 rwlock_init(&free_i->segmap_lock);
1451 return 0;
1452}
1453
1454static int build_curseg(struct f2fs_sb_info *sbi)
1455{
Namjae Jeon1042d602012-12-01 10:56:13 +09001456 struct curseg_info *array;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001457 int i;
1458
1459 array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL);
1460 if (!array)
1461 return -ENOMEM;
1462
1463 SM_I(sbi)->curseg_array = array;
1464
1465 for (i = 0; i < NR_CURSEG_TYPE; i++) {
1466 mutex_init(&array[i].curseg_mutex);
1467 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
1468 if (!array[i].sum_blk)
1469 return -ENOMEM;
1470 array[i].segno = NULL_SEGNO;
1471 array[i].next_blkoff = 0;
1472 }
1473 return restore_curseg_summaries(sbi);
1474}
1475
1476static void build_sit_entries(struct f2fs_sb_info *sbi)
1477{
1478 struct sit_info *sit_i = SIT_I(sbi);
1479 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1480 struct f2fs_summary_block *sum = curseg->sum_blk;
1481 unsigned int start;
1482
1483 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1484 struct seg_entry *se = &sit_i->sentries[start];
1485 struct f2fs_sit_block *sit_blk;
1486 struct f2fs_sit_entry sit;
1487 struct page *page;
1488 int i;
1489
1490 mutex_lock(&curseg->curseg_mutex);
1491 for (i = 0; i < sits_in_cursum(sum); i++) {
1492 if (le32_to_cpu(segno_in_journal(sum, i)) == start) {
1493 sit = sit_in_journal(sum, i);
1494 mutex_unlock(&curseg->curseg_mutex);
1495 goto got_it;
1496 }
1497 }
1498 mutex_unlock(&curseg->curseg_mutex);
1499 page = get_current_sit_page(sbi, start);
1500 sit_blk = (struct f2fs_sit_block *)page_address(page);
1501 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
1502 f2fs_put_page(page, 1);
1503got_it:
1504 check_block_count(sbi, start, &sit);
1505 seg_info_from_raw_sit(se, &sit);
1506 if (sbi->segs_per_sec > 1) {
1507 struct sec_entry *e = get_sec_entry(sbi, start);
1508 e->valid_blocks += se->valid_blocks;
1509 }
1510 }
1511}
1512
1513static void init_free_segmap(struct f2fs_sb_info *sbi)
1514{
1515 unsigned int start;
1516 int type;
1517
1518 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1519 struct seg_entry *sentry = get_seg_entry(sbi, start);
1520 if (!sentry->valid_blocks)
1521 __set_free(sbi, start);
1522 }
1523
1524 /* set use the current segments */
1525 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
1526 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
1527 __set_test_and_inuse(sbi, curseg_t->segno);
1528 }
1529}
1530
1531static void init_dirty_segmap(struct f2fs_sb_info *sbi)
1532{
1533 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1534 struct free_segmap_info *free_i = FREE_I(sbi);
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001535 unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001536 unsigned short valid_blocks;
1537
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001538 while (1) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001539 /* find dirty segment based on free segmap */
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001540 segno = find_next_inuse(free_i, total_segs, offset);
1541 if (segno >= total_segs)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001542 break;
1543 offset = segno + 1;
1544 valid_blocks = get_valid_blocks(sbi, segno, 0);
1545 if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks)
1546 continue;
1547 mutex_lock(&dirty_i->seglist_lock);
1548 __locate_dirty_segment(sbi, segno, DIRTY);
1549 mutex_unlock(&dirty_i->seglist_lock);
1550 }
1551}
1552
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001553static int init_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001554{
1555 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001556 unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001557
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001558 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
1559 if (!dirty_i->victim_secmap)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001560 return -ENOMEM;
1561 return 0;
1562}
1563
1564static int build_dirty_segmap(struct f2fs_sb_info *sbi)
1565{
1566 struct dirty_seglist_info *dirty_i;
1567 unsigned int bitmap_size, i;
1568
1569 /* allocate memory for dirty segments list information */
1570 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
1571 if (!dirty_i)
1572 return -ENOMEM;
1573
1574 SM_I(sbi)->dirty_info = dirty_i;
1575 mutex_init(&dirty_i->seglist_lock);
1576
1577 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1578
1579 for (i = 0; i < NR_DIRTY_TYPE; i++) {
1580 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001581 if (!dirty_i->dirty_segmap[i])
1582 return -ENOMEM;
1583 }
1584
1585 init_dirty_segmap(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001586 return init_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001587}
1588
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001589/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001590 * Update min, max modified time for cost-benefit GC algorithm
1591 */
1592static void init_min_max_mtime(struct f2fs_sb_info *sbi)
1593{
1594 struct sit_info *sit_i = SIT_I(sbi);
1595 unsigned int segno;
1596
1597 mutex_lock(&sit_i->sentry_lock);
1598
1599 sit_i->min_mtime = LLONG_MAX;
1600
1601 for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) {
1602 unsigned int i;
1603 unsigned long long mtime = 0;
1604
1605 for (i = 0; i < sbi->segs_per_sec; i++)
1606 mtime += get_seg_entry(sbi, segno + i)->mtime;
1607
1608 mtime = div_u64(mtime, sbi->segs_per_sec);
1609
1610 if (sit_i->min_mtime > mtime)
1611 sit_i->min_mtime = mtime;
1612 }
1613 sit_i->max_mtime = get_mtime(sbi);
1614 mutex_unlock(&sit_i->sentry_lock);
1615}
1616
1617int build_segment_manager(struct f2fs_sb_info *sbi)
1618{
1619 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1620 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
Namjae Jeon1042d602012-12-01 10:56:13 +09001621 struct f2fs_sm_info *sm_info;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001622 int err;
1623
1624 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
1625 if (!sm_info)
1626 return -ENOMEM;
1627
1628 /* init sm info */
1629 sbi->sm_info = sm_info;
1630 INIT_LIST_HEAD(&sm_info->wblist_head);
1631 spin_lock_init(&sm_info->wblist_lock);
1632 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1633 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1634 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
1635 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
1636 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
1637 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
1638 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +09001639 sm_info->rec_prefree_segments = DEF_RECLAIM_PREFREE_SEGMENTS;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001640
1641 err = build_sit_info(sbi);
1642 if (err)
1643 return err;
1644 err = build_free_segmap(sbi);
1645 if (err)
1646 return err;
1647 err = build_curseg(sbi);
1648 if (err)
1649 return err;
1650
1651 /* reinit free segmap based on SIT */
1652 build_sit_entries(sbi);
1653
1654 init_free_segmap(sbi);
1655 err = build_dirty_segmap(sbi);
1656 if (err)
1657 return err;
1658
1659 init_min_max_mtime(sbi);
1660 return 0;
1661}
1662
1663static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
1664 enum dirty_type dirty_type)
1665{
1666 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1667
1668 mutex_lock(&dirty_i->seglist_lock);
1669 kfree(dirty_i->dirty_segmap[dirty_type]);
1670 dirty_i->nr_dirty[dirty_type] = 0;
1671 mutex_unlock(&dirty_i->seglist_lock);
1672}
1673
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001674static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001675{
1676 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001677 kfree(dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001678}
1679
1680static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
1681{
1682 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1683 int i;
1684
1685 if (!dirty_i)
1686 return;
1687
1688 /* discard pre-free/dirty segments list */
1689 for (i = 0; i < NR_DIRTY_TYPE; i++)
1690 discard_dirty_segmap(sbi, i);
1691
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001692 destroy_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001693 SM_I(sbi)->dirty_info = NULL;
1694 kfree(dirty_i);
1695}
1696
1697static void destroy_curseg(struct f2fs_sb_info *sbi)
1698{
1699 struct curseg_info *array = SM_I(sbi)->curseg_array;
1700 int i;
1701
1702 if (!array)
1703 return;
1704 SM_I(sbi)->curseg_array = NULL;
1705 for (i = 0; i < NR_CURSEG_TYPE; i++)
1706 kfree(array[i].sum_blk);
1707 kfree(array);
1708}
1709
1710static void destroy_free_segmap(struct f2fs_sb_info *sbi)
1711{
1712 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
1713 if (!free_i)
1714 return;
1715 SM_I(sbi)->free_info = NULL;
1716 kfree(free_i->free_segmap);
1717 kfree(free_i->free_secmap);
1718 kfree(free_i);
1719}
1720
1721static void destroy_sit_info(struct f2fs_sb_info *sbi)
1722{
1723 struct sit_info *sit_i = SIT_I(sbi);
1724 unsigned int start;
1725
1726 if (!sit_i)
1727 return;
1728
1729 if (sit_i->sentries) {
1730 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1731 kfree(sit_i->sentries[start].cur_valid_map);
1732 kfree(sit_i->sentries[start].ckpt_valid_map);
1733 }
1734 }
1735 vfree(sit_i->sentries);
1736 vfree(sit_i->sec_entries);
1737 kfree(sit_i->dirty_sentries_bitmap);
1738
1739 SM_I(sbi)->sit_info = NULL;
1740 kfree(sit_i->sit_bitmap);
1741 kfree(sit_i);
1742}
1743
1744void destroy_segment_manager(struct f2fs_sb_info *sbi)
1745{
1746 struct f2fs_sm_info *sm_info = SM_I(sbi);
1747 destroy_dirty_segmap(sbi);
1748 destroy_curseg(sbi);
1749 destroy_free_segmap(sbi);
1750 destroy_sit_info(sbi);
1751 sbi->sm_info = NULL;
1752 kfree(sm_info);
1753}