Jaegeuk Kim | 0a8165d | 2012-11-29 13:28:09 +0900 | [diff] [blame] | 1 | /* |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 2 | * fs/f2fs/gc.c |
| 3 | * |
| 4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. |
| 5 | * http://www.samsung.com/ |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | */ |
| 11 | #include <linux/fs.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/backing-dev.h> |
| 14 | #include <linux/proc_fs.h> |
| 15 | #include <linux/init.h> |
| 16 | #include <linux/f2fs_fs.h> |
| 17 | #include <linux/kthread.h> |
| 18 | #include <linux/delay.h> |
| 19 | #include <linux/freezer.h> |
| 20 | #include <linux/blkdev.h> |
| 21 | |
| 22 | #include "f2fs.h" |
| 23 | #include "node.h" |
| 24 | #include "segment.h" |
| 25 | #include "gc.h" |
| 26 | |
| 27 | static struct kmem_cache *winode_slab; |
| 28 | |
| 29 | static int gc_thread_func(void *data) |
| 30 | { |
| 31 | struct f2fs_sb_info *sbi = data; |
| 32 | wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; |
| 33 | long wait_ms; |
| 34 | |
| 35 | wait_ms = GC_THREAD_MIN_SLEEP_TIME; |
| 36 | |
| 37 | do { |
| 38 | if (try_to_freeze()) |
| 39 | continue; |
| 40 | else |
| 41 | wait_event_interruptible_timeout(*wq, |
| 42 | kthread_should_stop(), |
| 43 | msecs_to_jiffies(wait_ms)); |
| 44 | if (kthread_should_stop()) |
| 45 | break; |
| 46 | |
Changman Lee | d6212a5 | 2013-01-29 18:30:07 +0900 | [diff] [blame^] | 47 | if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { |
| 48 | wait_ms = GC_THREAD_MAX_SLEEP_TIME; |
| 49 | continue; |
| 50 | } |
| 51 | |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 52 | f2fs_balance_fs(sbi); |
| 53 | |
| 54 | if (!test_opt(sbi, BG_GC)) |
| 55 | continue; |
| 56 | |
| 57 | /* |
| 58 | * [GC triggering condition] |
| 59 | * 0. GC is not conducted currently. |
| 60 | * 1. There are enough dirty segments. |
| 61 | * 2. IO subsystem is idle by checking the # of writeback pages. |
| 62 | * 3. IO subsystem is idle by checking the # of requests in |
| 63 | * bdev's request list. |
| 64 | * |
| 65 | * Note) We have to avoid triggering GCs too much frequently. |
| 66 | * Because it is possible that some segments can be |
| 67 | * invalidated soon after by user update or deletion. |
| 68 | * So, I'd like to wait some time to collect dirty segments. |
| 69 | */ |
| 70 | if (!mutex_trylock(&sbi->gc_mutex)) |
| 71 | continue; |
| 72 | |
| 73 | if (!is_idle(sbi)) { |
| 74 | wait_ms = increase_sleep_time(wait_ms); |
| 75 | mutex_unlock(&sbi->gc_mutex); |
| 76 | continue; |
| 77 | } |
| 78 | |
| 79 | if (has_enough_invalid_blocks(sbi)) |
| 80 | wait_ms = decrease_sleep_time(wait_ms); |
| 81 | else |
| 82 | wait_ms = increase_sleep_time(wait_ms); |
| 83 | |
| 84 | sbi->bg_gc++; |
| 85 | |
Jaegeuk Kim | 408e937 | 2013-01-03 17:55:52 +0900 | [diff] [blame] | 86 | if (f2fs_gc(sbi) == GC_NONE) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 87 | wait_ms = GC_THREAD_NOGC_SLEEP_TIME; |
| 88 | else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME) |
| 89 | wait_ms = GC_THREAD_MAX_SLEEP_TIME; |
| 90 | |
| 91 | } while (!kthread_should_stop()); |
| 92 | return 0; |
| 93 | } |
| 94 | |
| 95 | int start_gc_thread(struct f2fs_sb_info *sbi) |
| 96 | { |
Namjae Jeon | 1042d60 | 2012-12-01 10:56:13 +0900 | [diff] [blame] | 97 | struct f2fs_gc_kthread *gc_th; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 98 | |
| 99 | gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL); |
| 100 | if (!gc_th) |
| 101 | return -ENOMEM; |
| 102 | |
| 103 | sbi->gc_thread = gc_th; |
| 104 | init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); |
| 105 | sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, |
| 106 | GC_THREAD_NAME); |
| 107 | if (IS_ERR(gc_th->f2fs_gc_task)) { |
| 108 | kfree(gc_th); |
| 109 | return -ENOMEM; |
| 110 | } |
| 111 | return 0; |
| 112 | } |
| 113 | |
| 114 | void stop_gc_thread(struct f2fs_sb_info *sbi) |
| 115 | { |
| 116 | struct f2fs_gc_kthread *gc_th = sbi->gc_thread; |
| 117 | if (!gc_th) |
| 118 | return; |
| 119 | kthread_stop(gc_th->f2fs_gc_task); |
| 120 | kfree(gc_th); |
| 121 | sbi->gc_thread = NULL; |
| 122 | } |
| 123 | |
| 124 | static int select_gc_type(int gc_type) |
| 125 | { |
| 126 | return (gc_type == BG_GC) ? GC_CB : GC_GREEDY; |
| 127 | } |
| 128 | |
| 129 | static void select_policy(struct f2fs_sb_info *sbi, int gc_type, |
| 130 | int type, struct victim_sel_policy *p) |
| 131 | { |
| 132 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); |
| 133 | |
| 134 | if (p->alloc_mode) { |
| 135 | p->gc_mode = GC_GREEDY; |
| 136 | p->dirty_segmap = dirty_i->dirty_segmap[type]; |
| 137 | p->ofs_unit = 1; |
| 138 | } else { |
| 139 | p->gc_mode = select_gc_type(gc_type); |
| 140 | p->dirty_segmap = dirty_i->dirty_segmap[DIRTY]; |
| 141 | p->ofs_unit = sbi->segs_per_sec; |
| 142 | } |
| 143 | p->offset = sbi->last_victim[p->gc_mode]; |
| 144 | } |
| 145 | |
| 146 | static unsigned int get_max_cost(struct f2fs_sb_info *sbi, |
| 147 | struct victim_sel_policy *p) |
| 148 | { |
| 149 | if (p->gc_mode == GC_GREEDY) |
| 150 | return (1 << sbi->log_blocks_per_seg) * p->ofs_unit; |
| 151 | else if (p->gc_mode == GC_CB) |
| 152 | return UINT_MAX; |
| 153 | else /* No other gc_mode */ |
| 154 | return 0; |
| 155 | } |
| 156 | |
| 157 | static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) |
| 158 | { |
| 159 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); |
| 160 | unsigned int segno; |
| 161 | |
| 162 | /* |
| 163 | * If the gc_type is FG_GC, we can select victim segments |
| 164 | * selected by background GC before. |
| 165 | * Those segments guarantee they have small valid blocks. |
| 166 | */ |
| 167 | segno = find_next_bit(dirty_i->victim_segmap[BG_GC], |
| 168 | TOTAL_SEGS(sbi), 0); |
| 169 | if (segno < TOTAL_SEGS(sbi)) { |
| 170 | clear_bit(segno, dirty_i->victim_segmap[BG_GC]); |
| 171 | return segno; |
| 172 | } |
| 173 | return NULL_SEGNO; |
| 174 | } |
| 175 | |
| 176 | static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) |
| 177 | { |
| 178 | struct sit_info *sit_i = SIT_I(sbi); |
| 179 | unsigned int secno = GET_SECNO(sbi, segno); |
| 180 | unsigned int start = secno * sbi->segs_per_sec; |
| 181 | unsigned long long mtime = 0; |
| 182 | unsigned int vblocks; |
| 183 | unsigned char age = 0; |
| 184 | unsigned char u; |
| 185 | unsigned int i; |
| 186 | |
| 187 | for (i = 0; i < sbi->segs_per_sec; i++) |
| 188 | mtime += get_seg_entry(sbi, start + i)->mtime; |
| 189 | vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec); |
| 190 | |
| 191 | mtime = div_u64(mtime, sbi->segs_per_sec); |
| 192 | vblocks = div_u64(vblocks, sbi->segs_per_sec); |
| 193 | |
| 194 | u = (vblocks * 100) >> sbi->log_blocks_per_seg; |
| 195 | |
| 196 | /* Handle if the system time is changed by user */ |
| 197 | if (mtime < sit_i->min_mtime) |
| 198 | sit_i->min_mtime = mtime; |
| 199 | if (mtime > sit_i->max_mtime) |
| 200 | sit_i->max_mtime = mtime; |
| 201 | if (sit_i->max_mtime != sit_i->min_mtime) |
| 202 | age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime), |
| 203 | sit_i->max_mtime - sit_i->min_mtime); |
| 204 | |
| 205 | return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); |
| 206 | } |
| 207 | |
| 208 | static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno, |
| 209 | struct victim_sel_policy *p) |
| 210 | { |
| 211 | if (p->alloc_mode == SSR) |
| 212 | return get_seg_entry(sbi, segno)->ckpt_valid_blocks; |
| 213 | |
| 214 | /* alloc_mode == LFS */ |
| 215 | if (p->gc_mode == GC_GREEDY) |
| 216 | return get_valid_blocks(sbi, segno, sbi->segs_per_sec); |
| 217 | else |
| 218 | return get_cb_cost(sbi, segno); |
| 219 | } |
| 220 | |
Jaegeuk Kim | 0a8165d | 2012-11-29 13:28:09 +0900 | [diff] [blame] | 221 | /* |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 222 | * This function is called from two pathes. |
| 223 | * One is garbage collection and the other is SSR segment selection. |
| 224 | * When it is called during GC, it just gets a victim segment |
| 225 | * and it does not remove it from dirty seglist. |
| 226 | * When it is called from SSR segment selection, it finds a segment |
| 227 | * which has minimum valid blocks and removes it from dirty seglist. |
| 228 | */ |
| 229 | static int get_victim_by_default(struct f2fs_sb_info *sbi, |
| 230 | unsigned int *result, int gc_type, int type, char alloc_mode) |
| 231 | { |
| 232 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); |
| 233 | struct victim_sel_policy p; |
| 234 | unsigned int segno; |
| 235 | int nsearched = 0; |
| 236 | |
| 237 | p.alloc_mode = alloc_mode; |
| 238 | select_policy(sbi, gc_type, type, &p); |
| 239 | |
| 240 | p.min_segno = NULL_SEGNO; |
| 241 | p.min_cost = get_max_cost(sbi, &p); |
| 242 | |
| 243 | mutex_lock(&dirty_i->seglist_lock); |
| 244 | |
| 245 | if (p.alloc_mode == LFS && gc_type == FG_GC) { |
| 246 | p.min_segno = check_bg_victims(sbi); |
| 247 | if (p.min_segno != NULL_SEGNO) |
| 248 | goto got_it; |
| 249 | } |
| 250 | |
| 251 | while (1) { |
| 252 | unsigned long cost; |
| 253 | |
| 254 | segno = find_next_bit(p.dirty_segmap, |
| 255 | TOTAL_SEGS(sbi), p.offset); |
| 256 | if (segno >= TOTAL_SEGS(sbi)) { |
| 257 | if (sbi->last_victim[p.gc_mode]) { |
| 258 | sbi->last_victim[p.gc_mode] = 0; |
| 259 | p.offset = 0; |
| 260 | continue; |
| 261 | } |
| 262 | break; |
| 263 | } |
| 264 | p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit; |
| 265 | |
| 266 | if (test_bit(segno, dirty_i->victim_segmap[FG_GC])) |
| 267 | continue; |
| 268 | if (gc_type == BG_GC && |
| 269 | test_bit(segno, dirty_i->victim_segmap[BG_GC])) |
| 270 | continue; |
| 271 | if (IS_CURSEC(sbi, GET_SECNO(sbi, segno))) |
| 272 | continue; |
| 273 | |
| 274 | cost = get_gc_cost(sbi, segno, &p); |
| 275 | |
| 276 | if (p.min_cost > cost) { |
| 277 | p.min_segno = segno; |
| 278 | p.min_cost = cost; |
| 279 | } |
| 280 | |
| 281 | if (cost == get_max_cost(sbi, &p)) |
| 282 | continue; |
| 283 | |
| 284 | if (nsearched++ >= MAX_VICTIM_SEARCH) { |
| 285 | sbi->last_victim[p.gc_mode] = segno; |
| 286 | break; |
| 287 | } |
| 288 | } |
| 289 | got_it: |
| 290 | if (p.min_segno != NULL_SEGNO) { |
| 291 | *result = (p.min_segno / p.ofs_unit) * p.ofs_unit; |
| 292 | if (p.alloc_mode == LFS) { |
| 293 | int i; |
| 294 | for (i = 0; i < p.ofs_unit; i++) |
| 295 | set_bit(*result + i, |
| 296 | dirty_i->victim_segmap[gc_type]); |
| 297 | } |
| 298 | } |
| 299 | mutex_unlock(&dirty_i->seglist_lock); |
| 300 | |
| 301 | return (p.min_segno == NULL_SEGNO) ? 0 : 1; |
| 302 | } |
| 303 | |
| 304 | static const struct victim_selection default_v_ops = { |
| 305 | .get_victim = get_victim_by_default, |
| 306 | }; |
| 307 | |
| 308 | static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist) |
| 309 | { |
| 310 | struct list_head *this; |
| 311 | struct inode_entry *ie; |
| 312 | |
| 313 | list_for_each(this, ilist) { |
| 314 | ie = list_entry(this, struct inode_entry, list); |
| 315 | if (ie->inode->i_ino == ino) |
| 316 | return ie->inode; |
| 317 | } |
| 318 | return NULL; |
| 319 | } |
| 320 | |
| 321 | static void add_gc_inode(struct inode *inode, struct list_head *ilist) |
| 322 | { |
| 323 | struct list_head *this; |
| 324 | struct inode_entry *new_ie, *ie; |
| 325 | |
| 326 | list_for_each(this, ilist) { |
| 327 | ie = list_entry(this, struct inode_entry, list); |
| 328 | if (ie->inode == inode) { |
| 329 | iput(inode); |
| 330 | return; |
| 331 | } |
| 332 | } |
| 333 | repeat: |
| 334 | new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS); |
| 335 | if (!new_ie) { |
| 336 | cond_resched(); |
| 337 | goto repeat; |
| 338 | } |
| 339 | new_ie->inode = inode; |
| 340 | list_add_tail(&new_ie->list, ilist); |
| 341 | } |
| 342 | |
| 343 | static void put_gc_inode(struct list_head *ilist) |
| 344 | { |
| 345 | struct inode_entry *ie, *next_ie; |
| 346 | list_for_each_entry_safe(ie, next_ie, ilist, list) { |
| 347 | iput(ie->inode); |
| 348 | list_del(&ie->list); |
| 349 | kmem_cache_free(winode_slab, ie); |
| 350 | } |
| 351 | } |
| 352 | |
| 353 | static int check_valid_map(struct f2fs_sb_info *sbi, |
| 354 | unsigned int segno, int offset) |
| 355 | { |
| 356 | struct sit_info *sit_i = SIT_I(sbi); |
| 357 | struct seg_entry *sentry; |
| 358 | int ret; |
| 359 | |
| 360 | mutex_lock(&sit_i->sentry_lock); |
| 361 | sentry = get_seg_entry(sbi, segno); |
| 362 | ret = f2fs_test_bit(offset, sentry->cur_valid_map); |
| 363 | mutex_unlock(&sit_i->sentry_lock); |
| 364 | return ret ? GC_OK : GC_NEXT; |
| 365 | } |
| 366 | |
Jaegeuk Kim | 0a8165d | 2012-11-29 13:28:09 +0900 | [diff] [blame] | 367 | /* |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 368 | * This function compares node address got in summary with that in NAT. |
| 369 | * On validity, copy that node with cold status, otherwise (invalid node) |
| 370 | * ignore that. |
| 371 | */ |
| 372 | static int gc_node_segment(struct f2fs_sb_info *sbi, |
| 373 | struct f2fs_summary *sum, unsigned int segno, int gc_type) |
| 374 | { |
| 375 | bool initial = true; |
| 376 | struct f2fs_summary *entry; |
| 377 | int off; |
| 378 | |
| 379 | next_step: |
| 380 | entry = sum; |
| 381 | for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { |
| 382 | nid_t nid = le32_to_cpu(entry->nid); |
| 383 | struct page *node_page; |
| 384 | int err; |
| 385 | |
| 386 | /* |
| 387 | * It makes sure that free segments are able to write |
| 388 | * all the dirty node pages before CP after this CP. |
| 389 | * So let's check the space of dirty node pages. |
| 390 | */ |
| 391 | if (should_do_checkpoint(sbi)) { |
| 392 | mutex_lock(&sbi->cp_mutex); |
| 393 | block_operations(sbi); |
| 394 | return GC_BLOCKED; |
| 395 | } |
| 396 | |
| 397 | err = check_valid_map(sbi, segno, off); |
Jaegeuk Kim | 2b50638 | 2012-12-26 14:39:50 +0900 | [diff] [blame] | 398 | if (err == GC_NEXT) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 399 | continue; |
| 400 | |
| 401 | if (initial) { |
| 402 | ra_node_page(sbi, nid); |
| 403 | continue; |
| 404 | } |
| 405 | node_page = get_node_page(sbi, nid); |
| 406 | if (IS_ERR(node_page)) |
| 407 | continue; |
| 408 | |
| 409 | /* set page dirty and write it */ |
| 410 | if (!PageWriteback(node_page)) |
| 411 | set_page_dirty(node_page); |
| 412 | f2fs_put_page(node_page, 1); |
| 413 | stat_inc_node_blk_count(sbi, 1); |
| 414 | } |
| 415 | if (initial) { |
| 416 | initial = false; |
| 417 | goto next_step; |
| 418 | } |
| 419 | |
| 420 | if (gc_type == FG_GC) { |
| 421 | struct writeback_control wbc = { |
| 422 | .sync_mode = WB_SYNC_ALL, |
| 423 | .nr_to_write = LONG_MAX, |
| 424 | .for_reclaim = 0, |
| 425 | }; |
| 426 | sync_node_pages(sbi, 0, &wbc); |
| 427 | } |
| 428 | return GC_DONE; |
| 429 | } |
| 430 | |
Jaegeuk Kim | 0a8165d | 2012-11-29 13:28:09 +0900 | [diff] [blame] | 431 | /* |
Jaegeuk Kim | 9af45ef | 2013-01-21 17:34:21 +0900 | [diff] [blame] | 432 | * Calculate start block index indicating the given node offset. |
| 433 | * Be careful, caller should give this node offset only indicating direct node |
| 434 | * blocks. If any node offsets, which point the other types of node blocks such |
| 435 | * as indirect or double indirect node blocks, are given, it must be a caller's |
| 436 | * bug. |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 437 | */ |
| 438 | block_t start_bidx_of_node(unsigned int node_ofs) |
| 439 | { |
Jaegeuk Kim | ce19a5d | 2012-12-26 12:03:22 +0900 | [diff] [blame] | 440 | unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4; |
| 441 | unsigned int bidx; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 442 | |
Jaegeuk Kim | ce19a5d | 2012-12-26 12:03:22 +0900 | [diff] [blame] | 443 | if (node_ofs == 0) |
| 444 | return 0; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 445 | |
Jaegeuk Kim | ce19a5d | 2012-12-26 12:03:22 +0900 | [diff] [blame] | 446 | if (node_ofs <= 2) { |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 447 | bidx = node_ofs - 1; |
| 448 | } else if (node_ofs <= indirect_blks) { |
Jaegeuk Kim | ce19a5d | 2012-12-26 12:03:22 +0900 | [diff] [blame] | 449 | int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 450 | bidx = node_ofs - 2 - dec; |
| 451 | } else { |
Jaegeuk Kim | ce19a5d | 2012-12-26 12:03:22 +0900 | [diff] [blame] | 452 | int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 453 | bidx = node_ofs - 5 - dec; |
| 454 | } |
Jaegeuk Kim | ce19a5d | 2012-12-26 12:03:22 +0900 | [diff] [blame] | 455 | return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 456 | } |
| 457 | |
| 458 | static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, |
| 459 | struct node_info *dni, block_t blkaddr, unsigned int *nofs) |
| 460 | { |
| 461 | struct page *node_page; |
| 462 | nid_t nid; |
| 463 | unsigned int ofs_in_node; |
| 464 | block_t source_blkaddr; |
| 465 | |
| 466 | nid = le32_to_cpu(sum->nid); |
| 467 | ofs_in_node = le16_to_cpu(sum->ofs_in_node); |
| 468 | |
| 469 | node_page = get_node_page(sbi, nid); |
| 470 | if (IS_ERR(node_page)) |
| 471 | return GC_NEXT; |
| 472 | |
| 473 | get_node_info(sbi, nid, dni); |
| 474 | |
| 475 | if (sum->version != dni->version) { |
| 476 | f2fs_put_page(node_page, 1); |
| 477 | return GC_NEXT; |
| 478 | } |
| 479 | |
| 480 | *nofs = ofs_of_node(node_page); |
| 481 | source_blkaddr = datablock_addr(node_page, ofs_in_node); |
| 482 | f2fs_put_page(node_page, 1); |
| 483 | |
| 484 | if (source_blkaddr != blkaddr) |
| 485 | return GC_NEXT; |
| 486 | return GC_OK; |
| 487 | } |
| 488 | |
| 489 | static void move_data_page(struct inode *inode, struct page *page, int gc_type) |
| 490 | { |
| 491 | if (page->mapping != inode->i_mapping) |
| 492 | goto out; |
| 493 | |
| 494 | if (inode != page->mapping->host) |
| 495 | goto out; |
| 496 | |
| 497 | if (PageWriteback(page)) |
| 498 | goto out; |
| 499 | |
| 500 | if (gc_type == BG_GC) { |
| 501 | set_page_dirty(page); |
| 502 | set_cold_data(page); |
| 503 | } else { |
| 504 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
| 505 | mutex_lock_op(sbi, DATA_WRITE); |
| 506 | if (clear_page_dirty_for_io(page) && |
| 507 | S_ISDIR(inode->i_mode)) { |
| 508 | dec_page_count(sbi, F2FS_DIRTY_DENTS); |
| 509 | inode_dec_dirty_dents(inode); |
| 510 | } |
| 511 | set_cold_data(page); |
| 512 | do_write_data_page(page); |
| 513 | mutex_unlock_op(sbi, DATA_WRITE); |
| 514 | clear_cold_data(page); |
| 515 | } |
| 516 | out: |
| 517 | f2fs_put_page(page, 1); |
| 518 | } |
| 519 | |
Jaegeuk Kim | 0a8165d | 2012-11-29 13:28:09 +0900 | [diff] [blame] | 520 | /* |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 521 | * This function tries to get parent node of victim data block, and identifies |
| 522 | * data block validity. If the block is valid, copy that with cold status and |
| 523 | * modify parent node. |
| 524 | * If the parent node is not valid or the data block address is different, |
| 525 | * the victim data block is ignored. |
| 526 | */ |
| 527 | static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, |
| 528 | struct list_head *ilist, unsigned int segno, int gc_type) |
| 529 | { |
| 530 | struct super_block *sb = sbi->sb; |
| 531 | struct f2fs_summary *entry; |
| 532 | block_t start_addr; |
| 533 | int err, off; |
| 534 | int phase = 0; |
| 535 | |
| 536 | start_addr = START_BLOCK(sbi, segno); |
| 537 | |
| 538 | next_step: |
| 539 | entry = sum; |
| 540 | for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { |
| 541 | struct page *data_page; |
| 542 | struct inode *inode; |
| 543 | struct node_info dni; /* dnode info for the data */ |
| 544 | unsigned int ofs_in_node, nofs; |
| 545 | block_t start_bidx; |
| 546 | |
| 547 | /* |
| 548 | * It makes sure that free segments are able to write |
| 549 | * all the dirty node pages before CP after this CP. |
| 550 | * So let's check the space of dirty node pages. |
| 551 | */ |
| 552 | if (should_do_checkpoint(sbi)) { |
| 553 | mutex_lock(&sbi->cp_mutex); |
| 554 | block_operations(sbi); |
| 555 | err = GC_BLOCKED; |
| 556 | goto stop; |
| 557 | } |
| 558 | |
| 559 | err = check_valid_map(sbi, segno, off); |
Jaegeuk Kim | 2b50638 | 2012-12-26 14:39:50 +0900 | [diff] [blame] | 560 | if (err == GC_NEXT) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 561 | continue; |
| 562 | |
| 563 | if (phase == 0) { |
| 564 | ra_node_page(sbi, le32_to_cpu(entry->nid)); |
| 565 | continue; |
| 566 | } |
| 567 | |
| 568 | /* Get an inode by ino with checking validity */ |
| 569 | err = check_dnode(sbi, entry, &dni, start_addr + off, &nofs); |
Jaegeuk Kim | 2b50638 | 2012-12-26 14:39:50 +0900 | [diff] [blame] | 570 | if (err == GC_NEXT) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 571 | continue; |
| 572 | |
| 573 | if (phase == 1) { |
| 574 | ra_node_page(sbi, dni.ino); |
| 575 | continue; |
| 576 | } |
| 577 | |
| 578 | start_bidx = start_bidx_of_node(nofs); |
| 579 | ofs_in_node = le16_to_cpu(entry->ofs_in_node); |
| 580 | |
| 581 | if (phase == 2) { |
| 582 | inode = f2fs_iget_nowait(sb, dni.ino); |
| 583 | if (IS_ERR(inode)) |
| 584 | continue; |
| 585 | |
| 586 | data_page = find_data_page(inode, |
| 587 | start_bidx + ofs_in_node); |
| 588 | if (IS_ERR(data_page)) |
| 589 | goto next_iput; |
| 590 | |
| 591 | f2fs_put_page(data_page, 0); |
| 592 | add_gc_inode(inode, ilist); |
| 593 | } else { |
| 594 | inode = find_gc_inode(dni.ino, ilist); |
| 595 | if (inode) { |
| 596 | data_page = get_lock_data_page(inode, |
| 597 | start_bidx + ofs_in_node); |
| 598 | if (IS_ERR(data_page)) |
| 599 | continue; |
| 600 | move_data_page(inode, data_page, gc_type); |
| 601 | stat_inc_data_blk_count(sbi, 1); |
| 602 | } |
| 603 | } |
| 604 | continue; |
| 605 | next_iput: |
| 606 | iput(inode); |
| 607 | } |
| 608 | if (++phase < 4) |
| 609 | goto next_step; |
| 610 | err = GC_DONE; |
| 611 | stop: |
| 612 | if (gc_type == FG_GC) |
| 613 | f2fs_submit_bio(sbi, DATA, true); |
| 614 | return err; |
| 615 | } |
| 616 | |
| 617 | static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, |
| 618 | int gc_type, int type) |
| 619 | { |
| 620 | struct sit_info *sit_i = SIT_I(sbi); |
| 621 | int ret; |
| 622 | mutex_lock(&sit_i->sentry_lock); |
| 623 | ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS); |
| 624 | mutex_unlock(&sit_i->sentry_lock); |
| 625 | return ret; |
| 626 | } |
| 627 | |
| 628 | static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, |
| 629 | struct list_head *ilist, int gc_type) |
| 630 | { |
| 631 | struct page *sum_page; |
| 632 | struct f2fs_summary_block *sum; |
| 633 | int ret = GC_DONE; |
| 634 | |
| 635 | /* read segment summary of victim */ |
| 636 | sum_page = get_sum_page(sbi, segno); |
| 637 | if (IS_ERR(sum_page)) |
| 638 | return GC_ERROR; |
| 639 | |
| 640 | /* |
| 641 | * CP needs to lock sum_page. In this time, we don't need |
| 642 | * to lock this page, because this summary page is not gone anywhere. |
| 643 | * Also, this page is not gonna be updated before GC is done. |
| 644 | */ |
| 645 | unlock_page(sum_page); |
| 646 | sum = page_address(sum_page); |
| 647 | |
| 648 | switch (GET_SUM_TYPE((&sum->footer))) { |
| 649 | case SUM_TYPE_NODE: |
| 650 | ret = gc_node_segment(sbi, sum->entries, segno, gc_type); |
| 651 | break; |
| 652 | case SUM_TYPE_DATA: |
| 653 | ret = gc_data_segment(sbi, sum->entries, ilist, segno, gc_type); |
| 654 | break; |
| 655 | } |
| 656 | stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer))); |
| 657 | stat_inc_call_count(sbi->stat_info); |
| 658 | |
| 659 | f2fs_put_page(sum_page, 0); |
| 660 | return ret; |
| 661 | } |
| 662 | |
Jaegeuk Kim | 408e937 | 2013-01-03 17:55:52 +0900 | [diff] [blame] | 663 | int f2fs_gc(struct f2fs_sb_info *sbi) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 664 | { |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 665 | struct list_head ilist; |
Jaegeuk Kim | 408e937 | 2013-01-03 17:55:52 +0900 | [diff] [blame] | 666 | unsigned int segno, i; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 667 | int gc_type = BG_GC; |
Jaegeuk Kim | 408e937 | 2013-01-03 17:55:52 +0900 | [diff] [blame] | 668 | int gc_status = GC_NONE; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 669 | |
| 670 | INIT_LIST_HEAD(&ilist); |
| 671 | gc_more: |
Jaegeuk Kim | 408e937 | 2013-01-03 17:55:52 +0900 | [diff] [blame] | 672 | if (!(sbi->sb->s_flags & MS_ACTIVE)) |
| 673 | goto stop; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 674 | |
| 675 | if (has_not_enough_free_secs(sbi)) |
Jaegeuk Kim | 408e937 | 2013-01-03 17:55:52 +0900 | [diff] [blame] | 676 | gc_type = FG_GC; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 677 | |
Jaegeuk Kim | 408e937 | 2013-01-03 17:55:52 +0900 | [diff] [blame] | 678 | if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE)) |
| 679 | goto stop; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 680 | |
Jaegeuk Kim | 408e937 | 2013-01-03 17:55:52 +0900 | [diff] [blame] | 681 | for (i = 0; i < sbi->segs_per_sec; i++) { |
| 682 | /* |
| 683 | * do_garbage_collect will give us three gc_status: |
| 684 | * GC_ERROR, GC_DONE, and GC_BLOCKED. |
| 685 | * If GC is finished uncleanly, we have to return |
| 686 | * the victim to dirty segment list. |
| 687 | */ |
| 688 | gc_status = do_garbage_collect(sbi, segno + i, &ilist, gc_type); |
| 689 | if (gc_status != GC_DONE) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 690 | break; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 691 | } |
Jaegeuk Kim | 408e937 | 2013-01-03 17:55:52 +0900 | [diff] [blame] | 692 | if (has_not_enough_free_secs(sbi)) { |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 693 | write_checkpoint(sbi, (gc_status == GC_BLOCKED), false); |
Jaegeuk Kim | 408e937 | 2013-01-03 17:55:52 +0900 | [diff] [blame] | 694 | if (has_not_enough_free_secs(sbi)) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 695 | goto gc_more; |
| 696 | } |
Jaegeuk Kim | 408e937 | 2013-01-03 17:55:52 +0900 | [diff] [blame] | 697 | stop: |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 698 | mutex_unlock(&sbi->gc_mutex); |
| 699 | |
| 700 | put_gc_inode(&ilist); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 701 | return gc_status; |
| 702 | } |
| 703 | |
| 704 | void build_gc_manager(struct f2fs_sb_info *sbi) |
| 705 | { |
| 706 | DIRTY_I(sbi)->v_ops = &default_v_ops; |
| 707 | } |
| 708 | |
Namjae Jeon | 6e6093a | 2013-01-17 00:08:30 +0900 | [diff] [blame] | 709 | int __init create_gc_caches(void) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 710 | { |
| 711 | winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes", |
| 712 | sizeof(struct inode_entry), NULL); |
| 713 | if (!winode_slab) |
| 714 | return -ENOMEM; |
| 715 | return 0; |
| 716 | } |
| 717 | |
| 718 | void destroy_gc_caches(void) |
| 719 | { |
| 720 | kmem_cache_destroy(winode_slab); |
| 721 | } |