Chao Yu | 7c1a000 | 2018-09-12 09:16:07 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Jaegeuk Kim | 0a8165d | 2012-11-29 13:28:09 +0900 | [diff] [blame] | 2 | /* |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 3 | * fs/f2fs/gc.c |
| 4 | * |
| 5 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. |
| 6 | * http://www.samsung.com/ |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 7 | */ |
| 8 | #include <linux/fs.h> |
| 9 | #include <linux/module.h> |
| 10 | #include <linux/backing-dev.h> |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 11 | #include <linux/init.h> |
| 12 | #include <linux/f2fs_fs.h> |
| 13 | #include <linux/kthread.h> |
| 14 | #include <linux/delay.h> |
| 15 | #include <linux/freezer.h> |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 16 | #include <linux/sched/signal.h> |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 17 | |
| 18 | #include "f2fs.h" |
| 19 | #include "node.h" |
| 20 | #include "segment.h" |
| 21 | #include "gc.h" |
Namjae Jeon | 8e46b3e | 2013-04-23 16:42:53 +0900 | [diff] [blame] | 22 | #include <trace/events/f2fs.h> |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 23 | |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 24 | static struct kmem_cache *victim_entry_slab; |
| 25 | |
Jack Qiu | da52f8a | 2020-06-18 12:37:10 +0800 | [diff] [blame] | 26 | static unsigned int count_bits(const unsigned long *addr, |
| 27 | unsigned int offset, unsigned int len); |
| 28 | |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 29 | static int gc_thread_func(void *data) |
| 30 | { |
| 31 | struct f2fs_sb_info *sbi = data; |
Namjae Jeon | b59d0ba | 2013-08-04 23:09:40 +0900 | [diff] [blame] | 32 | struct f2fs_gc_kthread *gc_th = sbi->gc_thread; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 33 | wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; |
Chao Yu | b667340 | 2021-03-27 17:57:06 +0800 | [diff] [blame] | 34 | wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq; |
Chao Yu | b8c502b | 2017-08-07 23:12:46 +0800 | [diff] [blame] | 35 | unsigned int wait_ms; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 36 | |
Namjae Jeon | b59d0ba | 2013-08-04 23:09:40 +0900 | [diff] [blame] | 37 | wait_ms = gc_th->min_sleep_time; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 38 | |
Jaegeuk Kim | 1d7be27 | 2017-05-17 10:36:58 -0700 | [diff] [blame] | 39 | set_freezable(); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 40 | do { |
Chao Yu | b667340 | 2021-03-27 17:57:06 +0800 | [diff] [blame] | 41 | bool sync_mode, foreground = false; |
Chao Yu | bbbc34f | 2020-02-14 17:44:13 +0800 | [diff] [blame] | 42 | |
Jaegeuk Kim | 1d7be27 | 2017-05-17 10:36:58 -0700 | [diff] [blame] | 43 | wait_event_interruptible_timeout(*wq, |
Jaegeuk Kim | d9872a6 | 2017-08-06 22:09:00 -0700 | [diff] [blame] | 44 | kthread_should_stop() || freezing(current) || |
Chao Yu | b667340 | 2021-03-27 17:57:06 +0800 | [diff] [blame] | 45 | waitqueue_active(fggc_wq) || |
Jaegeuk Kim | d9872a6 | 2017-08-06 22:09:00 -0700 | [diff] [blame] | 46 | gc_th->gc_wake, |
Jaegeuk Kim | 1d7be27 | 2017-05-17 10:36:58 -0700 | [diff] [blame] | 47 | msecs_to_jiffies(wait_ms)); |
| 48 | |
Chao Yu | b667340 | 2021-03-27 17:57:06 +0800 | [diff] [blame] | 49 | if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq)) |
| 50 | foreground = true; |
| 51 | |
Jaegeuk Kim | d9872a6 | 2017-08-06 22:09:00 -0700 | [diff] [blame] | 52 | /* give it a try one time */ |
| 53 | if (gc_th->gc_wake) |
| 54 | gc_th->gc_wake = 0; |
| 55 | |
Chao Yu | 274bd9b | 2018-09-29 18:31:28 +0800 | [diff] [blame] | 56 | if (try_to_freeze()) { |
| 57 | stat_other_skip_bggc_count(sbi); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 58 | continue; |
Chao Yu | 274bd9b | 2018-09-29 18:31:28 +0800 | [diff] [blame] | 59 | } |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 60 | if (kthread_should_stop()) |
| 61 | break; |
| 62 | |
Changman Lee | d6212a5 | 2013-01-29 18:30:07 +0900 | [diff] [blame] | 63 | if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { |
Chao Yu | 88dd893 | 2015-01-26 20:24:21 +0800 | [diff] [blame] | 64 | increase_sleep_time(gc_th, &wait_ms); |
Chao Yu | 274bd9b | 2018-09-29 18:31:28 +0800 | [diff] [blame] | 65 | stat_other_skip_bggc_count(sbi); |
Changman Lee | d6212a5 | 2013-01-29 18:30:07 +0900 | [diff] [blame] | 66 | continue; |
| 67 | } |
| 68 | |
Chao Yu | 5552351 | 2017-02-25 11:08:28 +0800 | [diff] [blame] | 69 | if (time_to_inject(sbi, FAULT_CHECKPOINT)) { |
Chao Yu | c45d600 | 2019-11-01 17:53:23 +0800 | [diff] [blame] | 70 | f2fs_show_injection_info(sbi, FAULT_CHECKPOINT); |
Chao Yu | 0f34802 | 2016-09-26 19:45:55 +0800 | [diff] [blame] | 71 | f2fs_stop_checkpoint(sbi, false); |
Chao Yu | 5552351 | 2017-02-25 11:08:28 +0800 | [diff] [blame] | 72 | } |
Chao Yu | 0f34802 | 2016-09-26 19:45:55 +0800 | [diff] [blame] | 73 | |
Chao Yu | 274bd9b | 2018-09-29 18:31:28 +0800 | [diff] [blame] | 74 | if (!sb_start_write_trylock(sbi->sb)) { |
| 75 | stat_other_skip_bggc_count(sbi); |
Chao Yu | dc6febb | 2017-07-22 08:52:23 +0800 | [diff] [blame] | 76 | continue; |
Chao Yu | 274bd9b | 2018-09-29 18:31:28 +0800 | [diff] [blame] | 77 | } |
Chao Yu | dc6febb | 2017-07-22 08:52:23 +0800 | [diff] [blame] | 78 | |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 79 | /* |
| 80 | * [GC triggering condition] |
| 81 | * 0. GC is not conducted currently. |
| 82 | * 1. There are enough dirty segments. |
| 83 | * 2. IO subsystem is idle by checking the # of writeback pages. |
| 84 | * 3. IO subsystem is idle by checking the # of requests in |
| 85 | * bdev's request list. |
| 86 | * |
arter97 | e1c4204 | 2014-08-06 23:22:50 +0900 | [diff] [blame] | 87 | * Note) We have to avoid triggering GCs frequently. |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 88 | * Because it is possible that some segments can be |
| 89 | * invalidated soon after by user update or deletion. |
| 90 | * So, I'd like to wait some time to collect dirty segments. |
| 91 | */ |
Daeho Jeong | 0e5e811 | 2020-07-02 13:14:14 +0900 | [diff] [blame] | 92 | if (sbi->gc_mode == GC_URGENT_HIGH) { |
Jaegeuk Kim | d9872a6 | 2017-08-06 22:09:00 -0700 | [diff] [blame] | 93 | wait_ms = gc_th->urgent_sleep_time; |
Chao Yu | fb24fea | 2020-01-14 19:36:50 +0800 | [diff] [blame] | 94 | down_write(&sbi->gc_lock); |
Jaegeuk Kim | d9872a6 | 2017-08-06 22:09:00 -0700 | [diff] [blame] | 95 | goto do_gc; |
| 96 | } |
| 97 | |
Chao Yu | b667340 | 2021-03-27 17:57:06 +0800 | [diff] [blame] | 98 | if (foreground) { |
| 99 | down_write(&sbi->gc_lock); |
| 100 | goto do_gc; |
| 101 | } else if (!down_write_trylock(&sbi->gc_lock)) { |
Chao Yu | 274bd9b | 2018-09-29 18:31:28 +0800 | [diff] [blame] | 102 | stat_other_skip_bggc_count(sbi); |
Jaegeuk Kim | 69babac | 2018-02-26 09:19:47 -0800 | [diff] [blame] | 103 | goto next; |
Chao Yu | 274bd9b | 2018-09-29 18:31:28 +0800 | [diff] [blame] | 104 | } |
Jaegeuk Kim | 69babac | 2018-02-26 09:19:47 -0800 | [diff] [blame] | 105 | |
Sahitya Tummala | a7d10cf | 2018-09-19 14:18:47 +0530 | [diff] [blame] | 106 | if (!is_idle(sbi, GC_TIME)) { |
Chao Yu | 88dd893 | 2015-01-26 20:24:21 +0800 | [diff] [blame] | 107 | increase_sleep_time(gc_th, &wait_ms); |
Chao Yu | fb24fea | 2020-01-14 19:36:50 +0800 | [diff] [blame] | 108 | up_write(&sbi->gc_lock); |
Chao Yu | 274bd9b | 2018-09-29 18:31:28 +0800 | [diff] [blame] | 109 | stat_io_skip_bggc_count(sbi); |
Chao Yu | dc6febb | 2017-07-22 08:52:23 +0800 | [diff] [blame] | 110 | goto next; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 111 | } |
| 112 | |
| 113 | if (has_enough_invalid_blocks(sbi)) |
Chao Yu | 88dd893 | 2015-01-26 20:24:21 +0800 | [diff] [blame] | 114 | decrease_sleep_time(gc_th, &wait_ms); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 115 | else |
Chao Yu | 88dd893 | 2015-01-26 20:24:21 +0800 | [diff] [blame] | 116 | increase_sleep_time(gc_th, &wait_ms); |
Jaegeuk Kim | d9872a6 | 2017-08-06 22:09:00 -0700 | [diff] [blame] | 117 | do_gc: |
Chao Yu | b667340 | 2021-03-27 17:57:06 +0800 | [diff] [blame] | 118 | if (!foreground) |
| 119 | stat_inc_bggc_count(sbi->stat_info); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 120 | |
Chao Yu | bbbc34f | 2020-02-14 17:44:13 +0800 | [diff] [blame] | 121 | sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC; |
| 122 | |
Chao Yu | b667340 | 2021-03-27 17:57:06 +0800 | [diff] [blame] | 123 | /* foreground GC was been triggered via f2fs_balance_fs() */ |
| 124 | if (foreground) |
| 125 | sync_mode = false; |
| 126 | |
Jaegeuk Kim | 4372752 | 2013-02-04 15:11:17 +0900 | [diff] [blame] | 127 | /* if return value is not zero, no victim was selected */ |
Chao Yu | b667340 | 2021-03-27 17:57:06 +0800 | [diff] [blame] | 128 | if (f2fs_gc(sbi, sync_mode, !foreground, false, NULL_SEGNO)) |
Namjae Jeon | b59d0ba | 2013-08-04 23:09:40 +0900 | [diff] [blame] | 129 | wait_ms = gc_th->no_gc_sleep_time; |
Jaegeuk Kim | 81eb8d6 | 2013-10-24 13:31:34 +0900 | [diff] [blame] | 130 | |
Chao Yu | b667340 | 2021-03-27 17:57:06 +0800 | [diff] [blame] | 131 | if (foreground) |
| 132 | wake_up_all(&gc_th->fggc_wq); |
| 133 | |
Jaegeuk Kim | 84e4214 | 2015-10-13 10:00:53 -0700 | [diff] [blame] | 134 | trace_f2fs_background_gc(sbi->sb, wait_ms, |
| 135 | prefree_segments(sbi), free_segments(sbi)); |
| 136 | |
Jaegeuk Kim | 4660f9c | 2013-10-24 14:19:18 +0900 | [diff] [blame] | 137 | /* balancing f2fs's metadata periodically */ |
Chao Yu | 7bcd0cf | 2020-03-19 19:57:58 +0800 | [diff] [blame] | 138 | f2fs_balance_fs_bg(sbi, true); |
Chao Yu | dc6febb | 2017-07-22 08:52:23 +0800 | [diff] [blame] | 139 | next: |
| 140 | sb_end_write(sbi->sb); |
Jaegeuk Kim | 81eb8d6 | 2013-10-24 13:31:34 +0900 | [diff] [blame] | 141 | |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 142 | } while (!kthread_should_stop()); |
| 143 | return 0; |
| 144 | } |
| 145 | |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 146 | int f2fs_start_gc_thread(struct f2fs_sb_info *sbi) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 147 | { |
Namjae Jeon | 1042d60 | 2012-12-01 10:56:13 +0900 | [diff] [blame] | 148 | struct f2fs_gc_kthread *gc_th; |
Namjae Jeon | ec7b1f2 | 2013-02-02 23:52:28 +0900 | [diff] [blame] | 149 | dev_t dev = sbi->sb->s_bdev->bd_dev; |
Namjae Jeon | 7a267f8 | 2013-05-26 11:05:32 +0900 | [diff] [blame] | 150 | int err = 0; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 151 | |
Chao Yu | 1ecc0c5 | 2016-09-23 21:30:09 +0800 | [diff] [blame] | 152 | gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL); |
Namjae Jeon | 7a267f8 | 2013-05-26 11:05:32 +0900 | [diff] [blame] | 153 | if (!gc_th) { |
| 154 | err = -ENOMEM; |
| 155 | goto out; |
| 156 | } |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 157 | |
Jaegeuk Kim | d9872a6 | 2017-08-06 22:09:00 -0700 | [diff] [blame] | 158 | gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME; |
Namjae Jeon | b59d0ba | 2013-08-04 23:09:40 +0900 | [diff] [blame] | 159 | gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME; |
| 160 | gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME; |
| 161 | gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME; |
| 162 | |
Yi Zhuang | f10ea3c | 2021-04-06 09:47:35 +0800 | [diff] [blame] | 163 | gc_th->gc_wake = 0; |
Namjae Jeon | d2dc095 | 2013-08-04 23:10:15 +0900 | [diff] [blame] | 164 | |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 165 | sbi->gc_thread = gc_th; |
| 166 | init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); |
Chao Yu | b667340 | 2021-03-27 17:57:06 +0800 | [diff] [blame] | 167 | init_waitqueue_head(&sbi->gc_thread->fggc_wq); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 168 | sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, |
Namjae Jeon | ec7b1f2 | 2013-02-02 23:52:28 +0900 | [diff] [blame] | 169 | "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 170 | if (IS_ERR(gc_th->f2fs_gc_task)) { |
Namjae Jeon | 7a267f8 | 2013-05-26 11:05:32 +0900 | [diff] [blame] | 171 | err = PTR_ERR(gc_th->f2fs_gc_task); |
Chao Yu | c8eb702 | 2020-09-14 16:47:00 +0800 | [diff] [blame] | 172 | kfree(gc_th); |
Namjae Jeon | 2571842 | 2013-02-02 23:52:42 +0900 | [diff] [blame] | 173 | sbi->gc_thread = NULL; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 174 | } |
Namjae Jeon | 7a267f8 | 2013-05-26 11:05:32 +0900 | [diff] [blame] | 175 | out: |
| 176 | return err; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 177 | } |
| 178 | |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 179 | void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 180 | { |
| 181 | struct f2fs_gc_kthread *gc_th = sbi->gc_thread; |
Yi Zhuang | f10ea3c | 2021-04-06 09:47:35 +0800 | [diff] [blame] | 182 | |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 183 | if (!gc_th) |
| 184 | return; |
| 185 | kthread_stop(gc_th->f2fs_gc_task); |
Chao Yu | b667340 | 2021-03-27 17:57:06 +0800 | [diff] [blame] | 186 | wake_up_all(&gc_th->fggc_wq); |
Chao Yu | c8eb702 | 2020-09-14 16:47:00 +0800 | [diff] [blame] | 187 | kfree(gc_th); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 188 | sbi->gc_thread = NULL; |
| 189 | } |
| 190 | |
Jaegeuk Kim | 5b0e953 | 2018-05-07 14:22:40 -0700 | [diff] [blame] | 191 | static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 192 | { |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 193 | int gc_mode; |
| 194 | |
| 195 | if (gc_type == BG_GC) { |
| 196 | if (sbi->am.atgc_enabled) |
| 197 | gc_mode = GC_AT; |
| 198 | else |
| 199 | gc_mode = GC_CB; |
| 200 | } else { |
| 201 | gc_mode = GC_GREEDY; |
| 202 | } |
Namjae Jeon | d2dc095 | 2013-08-04 23:10:15 +0900 | [diff] [blame] | 203 | |
Jaegeuk Kim | 5b0e953 | 2018-05-07 14:22:40 -0700 | [diff] [blame] | 204 | switch (sbi->gc_mode) { |
| 205 | case GC_IDLE_CB: |
| 206 | gc_mode = GC_CB; |
| 207 | break; |
| 208 | case GC_IDLE_GREEDY: |
Daeho Jeong | 0e5e811 | 2020-07-02 13:14:14 +0900 | [diff] [blame] | 209 | case GC_URGENT_HIGH: |
Jaegeuk Kim | b27bc80 | 2018-02-26 15:40:30 -0800 | [diff] [blame] | 210 | gc_mode = GC_GREEDY; |
Jaegeuk Kim | 5b0e953 | 2018-05-07 14:22:40 -0700 | [diff] [blame] | 211 | break; |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 212 | case GC_IDLE_AT: |
| 213 | gc_mode = GC_AT; |
| 214 | break; |
Jaegeuk Kim | 5b0e953 | 2018-05-07 14:22:40 -0700 | [diff] [blame] | 215 | } |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 216 | |
Namjae Jeon | d2dc095 | 2013-08-04 23:10:15 +0900 | [diff] [blame] | 217 | return gc_mode; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 218 | } |
| 219 | |
| 220 | static void select_policy(struct f2fs_sb_info *sbi, int gc_type, |
| 221 | int type, struct victim_sel_policy *p) |
| 222 | { |
| 223 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); |
| 224 | |
Jaegeuk Kim | 4ebefc4 | 2013-03-31 13:49:18 +0900 | [diff] [blame] | 225 | if (p->alloc_mode == SSR) { |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 226 | p->gc_mode = GC_GREEDY; |
Jack Qiu | da52f8a | 2020-06-18 12:37:10 +0800 | [diff] [blame] | 227 | p->dirty_bitmap = dirty_i->dirty_segmap[type]; |
Jin Xu | a26b7c8 | 2013-09-05 12:45:26 +0800 | [diff] [blame] | 228 | p->max_search = dirty_i->nr_dirty[type]; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 229 | p->ofs_unit = 1; |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 230 | } else if (p->alloc_mode == AT_SSR) { |
| 231 | p->gc_mode = GC_GREEDY; |
| 232 | p->dirty_bitmap = dirty_i->dirty_segmap[type]; |
| 233 | p->max_search = dirty_i->nr_dirty[type]; |
| 234 | p->ofs_unit = 1; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 235 | } else { |
Jaegeuk Kim | 5b0e953 | 2018-05-07 14:22:40 -0700 | [diff] [blame] | 236 | p->gc_mode = select_gc_type(sbi, gc_type); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 237 | p->ofs_unit = sbi->segs_per_sec; |
Jack Qiu | da52f8a | 2020-06-18 12:37:10 +0800 | [diff] [blame] | 238 | if (__is_large_section(sbi)) { |
| 239 | p->dirty_bitmap = dirty_i->dirty_secmap; |
| 240 | p->max_search = count_bits(p->dirty_bitmap, |
| 241 | 0, MAIN_SECS(sbi)); |
| 242 | } else { |
| 243 | p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY]; |
| 244 | p->max_search = dirty_i->nr_dirty[DIRTY]; |
| 245 | } |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 246 | } |
Jin Xu | a26b7c8 | 2013-09-05 12:45:26 +0800 | [diff] [blame] | 247 | |
Chao Yu | 7a88ddb | 2020-02-27 19:30:05 +0800 | [diff] [blame] | 248 | /* |
| 249 | * adjust candidates range, should select all dirty segments for |
| 250 | * foreground GC and urgent GC cases. |
| 251 | */ |
Jaegeuk Kim | b27bc80 | 2018-02-26 15:40:30 -0800 | [diff] [blame] | 252 | if (gc_type != FG_GC && |
Daeho Jeong | 0e5e811 | 2020-07-02 13:14:14 +0900 | [diff] [blame] | 253 | (sbi->gc_mode != GC_URGENT_HIGH) && |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 254 | (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) && |
Jaegeuk Kim | b27bc80 | 2018-02-26 15:40:30 -0800 | [diff] [blame] | 255 | p->max_search > sbi->max_victim_search) |
Jaegeuk Kim | b1c57c1 | 2014-01-08 13:45:08 +0900 | [diff] [blame] | 256 | p->max_search = sbi->max_victim_search; |
Jin Xu | a26b7c8 | 2013-09-05 12:45:26 +0800 | [diff] [blame] | 257 | |
Yunlong Song | b94929d | 2018-01-29 11:37:45 +0800 | [diff] [blame] | 258 | /* let's select beginning hot/small space first in no_heap mode*/ |
| 259 | if (test_opt(sbi, NOHEAP) && |
| 260 | (type == CURSEG_HOT_DATA || IS_NODESEG(type))) |
Jaegeuk Kim | 7a20b8a | 2017-03-24 20:41:45 -0400 | [diff] [blame] | 261 | p->offset = 0; |
| 262 | else |
Jaegeuk Kim | e066b83 | 2017-04-13 15:17:00 -0700 | [diff] [blame] | 263 | p->offset = SIT_I(sbi)->last_victim[p->gc_mode]; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 264 | } |
| 265 | |
| 266 | static unsigned int get_max_cost(struct f2fs_sb_info *sbi, |
| 267 | struct victim_sel_policy *p) |
| 268 | { |
Jaegeuk Kim | b7250d2 | 2013-02-05 13:19:28 +0900 | [diff] [blame] | 269 | /* SSR allocates in a segment unit */ |
| 270 | if (p->alloc_mode == SSR) |
Chao Yu | 3519e3f | 2015-12-01 11:56:52 +0800 | [diff] [blame] | 271 | return sbi->blocks_per_seg; |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 272 | else if (p->alloc_mode == AT_SSR) |
| 273 | return UINT_MAX; |
| 274 | |
| 275 | /* LFS */ |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 276 | if (p->gc_mode == GC_GREEDY) |
Jaegeuk Kim | c541a51 | 2017-03-25 00:03:02 -0700 | [diff] [blame] | 277 | return 2 * sbi->blocks_per_seg * p->ofs_unit; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 278 | else if (p->gc_mode == GC_CB) |
| 279 | return UINT_MAX; |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 280 | else if (p->gc_mode == GC_AT) |
| 281 | return UINT_MAX; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 282 | else /* No other gc_mode */ |
| 283 | return 0; |
| 284 | } |
| 285 | |
| 286 | static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) |
| 287 | { |
| 288 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); |
Jaegeuk Kim | 5ec4e49 | 2013-03-31 13:26:03 +0900 | [diff] [blame] | 289 | unsigned int secno; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 290 | |
| 291 | /* |
| 292 | * If the gc_type is FG_GC, we can select victim segments |
| 293 | * selected by background GC before. |
| 294 | * Those segments guarantee they have small valid blocks. |
| 295 | */ |
Jaegeuk Kim | 7cd8558 | 2014-09-23 11:23:01 -0700 | [diff] [blame] | 296 | for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { |
Jaegeuk Kim | 5ec4e49 | 2013-03-31 13:26:03 +0900 | [diff] [blame] | 297 | if (sec_usage_check(sbi, secno)) |
Chao Yu | b65ee14 | 2014-08-04 10:10:07 +0800 | [diff] [blame] | 298 | continue; |
Jaegeuk Kim | 5ec4e49 | 2013-03-31 13:26:03 +0900 | [diff] [blame] | 299 | clear_bit(secno, dirty_i->victim_secmap); |
Jaegeuk Kim | 4ddb1a4 | 2017-04-07 15:08:17 -0700 | [diff] [blame] | 300 | return GET_SEG_FROM_SEC(sbi, secno); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 301 | } |
| 302 | return NULL_SEGNO; |
| 303 | } |
| 304 | |
| 305 | static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) |
| 306 | { |
| 307 | struct sit_info *sit_i = SIT_I(sbi); |
Jaegeuk Kim | 4ddb1a4 | 2017-04-07 15:08:17 -0700 | [diff] [blame] | 308 | unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); |
| 309 | unsigned int start = GET_SEG_FROM_SEC(sbi, secno); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 310 | unsigned long long mtime = 0; |
| 311 | unsigned int vblocks; |
| 312 | unsigned char age = 0; |
| 313 | unsigned char u; |
| 314 | unsigned int i; |
Aravind Ramesh | de881df | 2020-07-16 18:26:56 +0530 | [diff] [blame] | 315 | unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 316 | |
Aravind Ramesh | de881df | 2020-07-16 18:26:56 +0530 | [diff] [blame] | 317 | for (i = 0; i < usable_segs_per_sec; i++) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 318 | mtime += get_seg_entry(sbi, start + i)->mtime; |
Jaegeuk Kim | 302bd34 | 2017-04-07 14:33:22 -0700 | [diff] [blame] | 319 | vblocks = get_valid_blocks(sbi, segno, true); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 320 | |
Aravind Ramesh | de881df | 2020-07-16 18:26:56 +0530 | [diff] [blame] | 321 | mtime = div_u64(mtime, usable_segs_per_sec); |
| 322 | vblocks = div_u64(vblocks, usable_segs_per_sec); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 323 | |
| 324 | u = (vblocks * 100) >> sbi->log_blocks_per_seg; |
| 325 | |
arter97 | e1c4204 | 2014-08-06 23:22:50 +0900 | [diff] [blame] | 326 | /* Handle if the system time has changed by the user */ |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 327 | if (mtime < sit_i->min_mtime) |
| 328 | sit_i->min_mtime = mtime; |
| 329 | if (mtime > sit_i->max_mtime) |
| 330 | sit_i->max_mtime = mtime; |
| 331 | if (sit_i->max_mtime != sit_i->min_mtime) |
| 332 | age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime), |
| 333 | sit_i->max_mtime - sit_i->min_mtime); |
| 334 | |
| 335 | return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); |
| 336 | } |
| 337 | |
Jin Xu | a57e564 | 2013-09-13 08:38:54 +0800 | [diff] [blame] | 338 | static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, |
| 339 | unsigned int segno, struct victim_sel_policy *p) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 340 | { |
| 341 | if (p->alloc_mode == SSR) |
Yunlong Song | 2afce76 | 2017-09-04 11:10:18 +0800 | [diff] [blame] | 342 | return get_seg_entry(sbi, segno)->ckpt_valid_blocks; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 343 | |
| 344 | /* alloc_mode == LFS */ |
| 345 | if (p->gc_mode == GC_GREEDY) |
Yunlong Song | 91f4382 | 2017-09-23 17:02:18 +0800 | [diff] [blame] | 346 | return get_valid_blocks(sbi, segno, true); |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 347 | else if (p->gc_mode == GC_CB) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 348 | return get_cb_cost(sbi, segno); |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 349 | |
| 350 | f2fs_bug_on(sbi, 1); |
| 351 | return 0; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 352 | } |
| 353 | |
Fan Li | 688159b | 2016-02-03 16:21:57 +0800 | [diff] [blame] | 354 | static unsigned int count_bits(const unsigned long *addr, |
| 355 | unsigned int offset, unsigned int len) |
| 356 | { |
| 357 | unsigned int end = offset + len, sum = 0; |
| 358 | |
| 359 | while (offset < end) { |
| 360 | if (test_bit(offset++, addr)) |
| 361 | ++sum; |
| 362 | } |
| 363 | return sum; |
| 364 | } |
| 365 | |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 366 | static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi, |
| 367 | unsigned long long mtime, unsigned int segno, |
| 368 | struct rb_node *parent, struct rb_node **p, |
| 369 | bool left_most) |
| 370 | { |
| 371 | struct atgc_management *am = &sbi->am; |
| 372 | struct victim_entry *ve; |
| 373 | |
| 374 | ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS); |
| 375 | |
| 376 | ve->mtime = mtime; |
| 377 | ve->segno = segno; |
| 378 | |
| 379 | rb_link_node(&ve->rb_node, parent, p); |
| 380 | rb_insert_color_cached(&ve->rb_node, &am->root, left_most); |
| 381 | |
| 382 | list_add_tail(&ve->list, &am->victim_list); |
| 383 | |
| 384 | am->victim_count++; |
| 385 | |
| 386 | return ve; |
| 387 | } |
| 388 | |
| 389 | static void insert_victim_entry(struct f2fs_sb_info *sbi, |
| 390 | unsigned long long mtime, unsigned int segno) |
| 391 | { |
| 392 | struct atgc_management *am = &sbi->am; |
| 393 | struct rb_node **p; |
| 394 | struct rb_node *parent = NULL; |
| 395 | bool left_most = true; |
| 396 | |
| 397 | p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most); |
| 398 | attach_victim_entry(sbi, mtime, segno, parent, p, left_most); |
| 399 | } |
| 400 | |
| 401 | static void add_victim_entry(struct f2fs_sb_info *sbi, |
| 402 | struct victim_sel_policy *p, unsigned int segno) |
| 403 | { |
| 404 | struct sit_info *sit_i = SIT_I(sbi); |
| 405 | unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); |
| 406 | unsigned int start = GET_SEG_FROM_SEC(sbi, secno); |
| 407 | unsigned long long mtime = 0; |
| 408 | unsigned int i; |
| 409 | |
| 410 | if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { |
| 411 | if (p->gc_mode == GC_AT && |
| 412 | get_valid_blocks(sbi, segno, true) == 0) |
| 413 | return; |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 414 | } |
| 415 | |
| 416 | for (i = 0; i < sbi->segs_per_sec; i++) |
| 417 | mtime += get_seg_entry(sbi, start + i)->mtime; |
| 418 | mtime = div_u64(mtime, sbi->segs_per_sec); |
| 419 | |
| 420 | /* Handle if the system time has changed by the user */ |
| 421 | if (mtime < sit_i->min_mtime) |
| 422 | sit_i->min_mtime = mtime; |
| 423 | if (mtime > sit_i->max_mtime) |
| 424 | sit_i->max_mtime = mtime; |
| 425 | if (mtime < sit_i->dirty_min_mtime) |
| 426 | sit_i->dirty_min_mtime = mtime; |
| 427 | if (mtime > sit_i->dirty_max_mtime) |
| 428 | sit_i->dirty_max_mtime = mtime; |
| 429 | |
| 430 | /* don't choose young section as candidate */ |
| 431 | if (sit_i->dirty_max_mtime - mtime < p->age_threshold) |
| 432 | return; |
| 433 | |
| 434 | insert_victim_entry(sbi, mtime, segno); |
| 435 | } |
| 436 | |
| 437 | static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi, |
| 438 | struct victim_sel_policy *p) |
| 439 | { |
| 440 | struct atgc_management *am = &sbi->am; |
| 441 | struct rb_node *parent = NULL; |
| 442 | bool left_most; |
| 443 | |
| 444 | f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most); |
| 445 | |
| 446 | return parent; |
| 447 | } |
| 448 | |
| 449 | static void atgc_lookup_victim(struct f2fs_sb_info *sbi, |
| 450 | struct victim_sel_policy *p) |
| 451 | { |
| 452 | struct sit_info *sit_i = SIT_I(sbi); |
| 453 | struct atgc_management *am = &sbi->am; |
| 454 | struct rb_root_cached *root = &am->root; |
| 455 | struct rb_node *node; |
| 456 | struct rb_entry *re; |
| 457 | struct victim_entry *ve; |
| 458 | unsigned long long total_time; |
| 459 | unsigned long long age, u, accu; |
| 460 | unsigned long long max_mtime = sit_i->dirty_max_mtime; |
| 461 | unsigned long long min_mtime = sit_i->dirty_min_mtime; |
| 462 | unsigned int sec_blocks = BLKS_PER_SEC(sbi); |
| 463 | unsigned int vblocks; |
| 464 | unsigned int dirty_threshold = max(am->max_candidate_count, |
| 465 | am->candidate_ratio * |
| 466 | am->victim_count / 100); |
| 467 | unsigned int age_weight = am->age_weight; |
| 468 | unsigned int cost; |
| 469 | unsigned int iter = 0; |
| 470 | |
| 471 | if (max_mtime < min_mtime) |
| 472 | return; |
| 473 | |
| 474 | max_mtime += 1; |
| 475 | total_time = max_mtime - min_mtime; |
| 476 | |
| 477 | accu = div64_u64(ULLONG_MAX, total_time); |
| 478 | accu = min_t(unsigned long long, div_u64(accu, 100), |
| 479 | DEFAULT_ACCURACY_CLASS); |
| 480 | |
| 481 | node = rb_first_cached(root); |
| 482 | next: |
| 483 | re = rb_entry_safe(node, struct rb_entry, rb_node); |
| 484 | if (!re) |
| 485 | return; |
| 486 | |
| 487 | ve = (struct victim_entry *)re; |
| 488 | |
| 489 | if (ve->mtime >= max_mtime || ve->mtime < min_mtime) |
| 490 | goto skip; |
| 491 | |
| 492 | /* age = 10000 * x% * 60 */ |
| 493 | age = div64_u64(accu * (max_mtime - ve->mtime), total_time) * |
| 494 | age_weight; |
| 495 | |
| 496 | vblocks = get_valid_blocks(sbi, ve->segno, true); |
| 497 | f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks); |
| 498 | |
| 499 | /* u = 10000 * x% * 40 */ |
| 500 | u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) * |
| 501 | (100 - age_weight); |
| 502 | |
| 503 | f2fs_bug_on(sbi, age + u >= UINT_MAX); |
| 504 | |
| 505 | cost = UINT_MAX - (age + u); |
| 506 | iter++; |
| 507 | |
| 508 | if (cost < p->min_cost || |
| 509 | (cost == p->min_cost && age > p->oldest_age)) { |
| 510 | p->min_cost = cost; |
| 511 | p->oldest_age = age; |
| 512 | p->min_segno = ve->segno; |
| 513 | } |
| 514 | skip: |
| 515 | if (iter < dirty_threshold) { |
| 516 | node = rb_next(node); |
| 517 | goto next; |
| 518 | } |
| 519 | } |
| 520 | |
| 521 | /* |
| 522 | * select candidates around source section in range of |
| 523 | * [target - dirty_threshold, target + dirty_threshold] |
| 524 | */ |
| 525 | static void atssr_lookup_victim(struct f2fs_sb_info *sbi, |
| 526 | struct victim_sel_policy *p) |
| 527 | { |
| 528 | struct sit_info *sit_i = SIT_I(sbi); |
| 529 | struct atgc_management *am = &sbi->am; |
| 530 | struct rb_node *node; |
| 531 | struct rb_entry *re; |
| 532 | struct victim_entry *ve; |
| 533 | unsigned long long age; |
| 534 | unsigned long long max_mtime = sit_i->dirty_max_mtime; |
| 535 | unsigned long long min_mtime = sit_i->dirty_min_mtime; |
| 536 | unsigned int seg_blocks = sbi->blocks_per_seg; |
| 537 | unsigned int vblocks; |
| 538 | unsigned int dirty_threshold = max(am->max_candidate_count, |
| 539 | am->candidate_ratio * |
| 540 | am->victim_count / 100); |
| 541 | unsigned int cost; |
| 542 | unsigned int iter = 0; |
| 543 | int stage = 0; |
| 544 | |
| 545 | if (max_mtime < min_mtime) |
| 546 | return; |
| 547 | max_mtime += 1; |
| 548 | next_stage: |
| 549 | node = lookup_central_victim(sbi, p); |
| 550 | next_node: |
| 551 | re = rb_entry_safe(node, struct rb_entry, rb_node); |
| 552 | if (!re) { |
| 553 | if (stage == 0) |
| 554 | goto skip_stage; |
| 555 | return; |
| 556 | } |
| 557 | |
| 558 | ve = (struct victim_entry *)re; |
| 559 | |
| 560 | if (ve->mtime >= max_mtime || ve->mtime < min_mtime) |
| 561 | goto skip_node; |
| 562 | |
| 563 | age = max_mtime - ve->mtime; |
| 564 | |
| 565 | vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks; |
| 566 | f2fs_bug_on(sbi, !vblocks); |
| 567 | |
| 568 | /* rare case */ |
| 569 | if (vblocks == seg_blocks) |
| 570 | goto skip_node; |
| 571 | |
| 572 | iter++; |
| 573 | |
| 574 | age = max_mtime - abs(p->age - age); |
| 575 | cost = UINT_MAX - vblocks; |
| 576 | |
| 577 | if (cost < p->min_cost || |
| 578 | (cost == p->min_cost && age > p->oldest_age)) { |
| 579 | p->min_cost = cost; |
| 580 | p->oldest_age = age; |
| 581 | p->min_segno = ve->segno; |
| 582 | } |
| 583 | skip_node: |
| 584 | if (iter < dirty_threshold) { |
| 585 | if (stage == 0) |
| 586 | node = rb_prev(node); |
| 587 | else if (stage == 1) |
| 588 | node = rb_next(node); |
| 589 | goto next_node; |
| 590 | } |
| 591 | skip_stage: |
| 592 | if (stage < 1) { |
| 593 | stage++; |
| 594 | iter = 0; |
| 595 | goto next_stage; |
| 596 | } |
| 597 | } |
| 598 | static void lookup_victim_by_age(struct f2fs_sb_info *sbi, |
| 599 | struct victim_sel_policy *p) |
| 600 | { |
| 601 | f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, |
| 602 | &sbi->am.root, true)); |
| 603 | |
| 604 | if (p->gc_mode == GC_AT) |
| 605 | atgc_lookup_victim(sbi, p); |
| 606 | else if (p->alloc_mode == AT_SSR) |
| 607 | atssr_lookup_victim(sbi, p); |
| 608 | else |
| 609 | f2fs_bug_on(sbi, 1); |
| 610 | } |
| 611 | |
| 612 | static void release_victim_entry(struct f2fs_sb_info *sbi) |
| 613 | { |
| 614 | struct atgc_management *am = &sbi->am; |
| 615 | struct victim_entry *ve, *tmp; |
| 616 | |
| 617 | list_for_each_entry_safe(ve, tmp, &am->victim_list, list) { |
| 618 | list_del(&ve->list); |
| 619 | kmem_cache_free(victim_entry_slab, ve); |
| 620 | am->victim_count--; |
| 621 | } |
| 622 | |
| 623 | am->root = RB_ROOT_CACHED; |
| 624 | |
| 625 | f2fs_bug_on(sbi, am->victim_count); |
| 626 | f2fs_bug_on(sbi, !list_empty(&am->victim_list)); |
| 627 | } |
| 628 | |
Jaegeuk Kim | 0a8165d | 2012-11-29 13:28:09 +0900 | [diff] [blame] | 629 | /* |
Masanari Iida | 111d249 | 2013-03-19 08:03:35 +0900 | [diff] [blame] | 630 | * This function is called from two paths. |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 631 | * One is garbage collection and the other is SSR segment selection. |
| 632 | * When it is called during GC, it just gets a victim segment |
| 633 | * and it does not remove it from dirty seglist. |
| 634 | * When it is called from SSR segment selection, it finds a segment |
| 635 | * which has minimum valid blocks and removes it from dirty seglist. |
| 636 | */ |
| 637 | static int get_victim_by_default(struct f2fs_sb_info *sbi, |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 638 | unsigned int *result, int gc_type, int type, |
| 639 | char alloc_mode, unsigned long long age) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 640 | { |
| 641 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); |
Jaegeuk Kim | e066b83 | 2017-04-13 15:17:00 -0700 | [diff] [blame] | 642 | struct sit_info *sm = SIT_I(sbi); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 643 | struct victim_sel_policy p; |
Sheng Yong | 3fa5650 | 2016-09-29 18:37:31 +0800 | [diff] [blame] | 644 | unsigned int secno, last_victim; |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 645 | unsigned int last_segment; |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 646 | unsigned int nsearched; |
| 647 | bool is_atgc; |
Qilong Zhang | 9776750 | 2020-06-28 19:23:03 +0800 | [diff] [blame] | 648 | int ret = 0; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 649 | |
Chao Yu | 210f41b | 2014-09-15 18:05:44 +0800 | [diff] [blame] | 650 | mutex_lock(&dirty_i->seglist_lock); |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 651 | last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec; |
Chao Yu | 210f41b | 2014-09-15 18:05:44 +0800 | [diff] [blame] | 652 | |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 653 | p.alloc_mode = alloc_mode; |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 654 | p.age = age; |
| 655 | p.age_threshold = sbi->am.age_threshold; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 656 | |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 657 | retry: |
| 658 | select_policy(sbi, gc_type, type, &p); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 659 | p.min_segno = NULL_SEGNO; |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 660 | p.oldest_age = 0; |
Sheng Yong | 3fa5650 | 2016-09-29 18:37:31 +0800 | [diff] [blame] | 661 | p.min_cost = get_max_cost(sbi, &p); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 662 | |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 663 | is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR); |
| 664 | nsearched = 0; |
| 665 | |
| 666 | if (is_atgc) |
| 667 | SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX; |
| 668 | |
Jaegeuk Kim | e066b83 | 2017-04-13 15:17:00 -0700 | [diff] [blame] | 669 | if (*result != NULL_SEGNO) { |
Qilong Zhang | 9776750 | 2020-06-28 19:23:03 +0800 | [diff] [blame] | 670 | if (!get_valid_blocks(sbi, *result, false)) { |
| 671 | ret = -ENODATA; |
| 672 | goto out; |
| 673 | } |
| 674 | |
| 675 | if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) |
| 676 | ret = -EBUSY; |
| 677 | else |
Jaegeuk Kim | e066b83 | 2017-04-13 15:17:00 -0700 | [diff] [blame] | 678 | p.min_segno = *result; |
| 679 | goto out; |
| 680 | } |
| 681 | |
Qilong Zhang | 9776750 | 2020-06-28 19:23:03 +0800 | [diff] [blame] | 682 | ret = -ENODATA; |
Chao Yu | 3342bb3 | 2015-10-05 22:20:40 +0800 | [diff] [blame] | 683 | if (p.max_search == 0) |
| 684 | goto out; |
| 685 | |
Chao Yu | e3080b0 | 2018-10-24 18:37:27 +0800 | [diff] [blame] | 686 | if (__is_large_section(sbi) && p.alloc_mode == LFS) { |
| 687 | if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) { |
| 688 | p.min_segno = sbi->next_victim_seg[BG_GC]; |
| 689 | *result = p.min_segno; |
| 690 | sbi->next_victim_seg[BG_GC] = NULL_SEGNO; |
| 691 | goto got_result; |
| 692 | } |
| 693 | if (gc_type == FG_GC && |
| 694 | sbi->next_victim_seg[FG_GC] != NULL_SEGNO) { |
| 695 | p.min_segno = sbi->next_victim_seg[FG_GC]; |
| 696 | *result = p.min_segno; |
| 697 | sbi->next_victim_seg[FG_GC] = NULL_SEGNO; |
| 698 | goto got_result; |
| 699 | } |
| 700 | } |
| 701 | |
Jaegeuk Kim | e066b83 | 2017-04-13 15:17:00 -0700 | [diff] [blame] | 702 | last_victim = sm->last_victim[p.gc_mode]; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 703 | if (p.alloc_mode == LFS && gc_type == FG_GC) { |
| 704 | p.min_segno = check_bg_victims(sbi); |
| 705 | if (p.min_segno != NULL_SEGNO) |
| 706 | goto got_it; |
| 707 | } |
| 708 | |
| 709 | while (1) { |
Jack Qiu | da52f8a | 2020-06-18 12:37:10 +0800 | [diff] [blame] | 710 | unsigned long cost, *dirty_bitmap; |
| 711 | unsigned int unit_no, segno; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 712 | |
Jack Qiu | da52f8a | 2020-06-18 12:37:10 +0800 | [diff] [blame] | 713 | dirty_bitmap = p.dirty_bitmap; |
| 714 | unit_no = find_next_bit(dirty_bitmap, |
| 715 | last_segment / p.ofs_unit, |
| 716 | p.offset / p.ofs_unit); |
| 717 | segno = unit_no * p.ofs_unit; |
Chao Yu | a43f7ec | 2015-10-05 22:19:24 +0800 | [diff] [blame] | 718 | if (segno >= last_segment) { |
Jaegeuk Kim | e066b83 | 2017-04-13 15:17:00 -0700 | [diff] [blame] | 719 | if (sm->last_victim[p.gc_mode]) { |
| 720 | last_segment = |
| 721 | sm->last_victim[p.gc_mode]; |
| 722 | sm->last_victim[p.gc_mode] = 0; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 723 | p.offset = 0; |
| 724 | continue; |
| 725 | } |
| 726 | break; |
| 727 | } |
Jin Xu | a57e564 | 2013-09-13 08:38:54 +0800 | [diff] [blame] | 728 | |
| 729 | p.offset = segno + p.ofs_unit; |
Jack Qiu | da52f8a | 2020-06-18 12:37:10 +0800 | [diff] [blame] | 730 | nsearched++; |
Fan Li | 688159b | 2016-02-03 16:21:57 +0800 | [diff] [blame] | 731 | |
Sahitya Tummala | bbf9f7d | 2019-08-07 19:10:32 +0530 | [diff] [blame] | 732 | #ifdef CONFIG_F2FS_CHECK_FS |
| 733 | /* |
| 734 | * skip selecting the invalid segno (that is failed due to block |
| 735 | * validity check failure during GC) to avoid endless GC loop in |
| 736 | * such cases. |
| 737 | */ |
| 738 | if (test_bit(segno, sm->invalid_segmap)) |
| 739 | goto next; |
| 740 | #endif |
| 741 | |
Jaegeuk Kim | 4ddb1a4 | 2017-04-07 15:08:17 -0700 | [diff] [blame] | 742 | secno = GET_SEC_FROM_SEG(sbi, segno); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 743 | |
Jaegeuk Kim | 5ec4e49 | 2013-03-31 13:26:03 +0900 | [diff] [blame] | 744 | if (sec_usage_check(sbi, secno)) |
Fan Li | 688159b | 2016-02-03 16:21:57 +0800 | [diff] [blame] | 745 | goto next; |
Chao Yu | adbeec3 | 2021-03-24 11:18:28 +0800 | [diff] [blame] | 746 | |
Daniel Rosenberg | 4354994 | 2018-08-20 19:21:43 -0700 | [diff] [blame] | 747 | /* Don't touch checkpointed data */ |
Chao Yu | adbeec3 | 2021-03-24 11:18:28 +0800 | [diff] [blame] | 748 | if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { |
| 749 | if (p.alloc_mode == LFS) { |
| 750 | /* |
| 751 | * LFS is set to find source section during GC. |
| 752 | * The victim should have no checkpointed data. |
| 753 | */ |
| 754 | if (get_ckpt_valid_blocks(sbi, segno, true)) |
| 755 | goto next; |
| 756 | } else { |
| 757 | /* |
| 758 | * SSR | AT_SSR are set to find target segment |
| 759 | * for writes which can be full by checkpointed |
| 760 | * and newly written blocks. |
| 761 | */ |
| 762 | if (!f2fs_segment_has_free_slot(sbi, segno)) |
| 763 | goto next; |
| 764 | } |
| 765 | } |
| 766 | |
Jaegeuk Kim | 5ec4e49 | 2013-03-31 13:26:03 +0900 | [diff] [blame] | 767 | if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) |
Fan Li | 688159b | 2016-02-03 16:21:57 +0800 | [diff] [blame] | 768 | goto next; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 769 | |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 770 | if (is_atgc) { |
| 771 | add_victim_entry(sbi, &p, segno); |
| 772 | goto next; |
| 773 | } |
| 774 | |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 775 | cost = get_gc_cost(sbi, segno, &p); |
| 776 | |
| 777 | if (p.min_cost > cost) { |
| 778 | p.min_segno = segno; |
| 779 | p.min_cost = cost; |
Jin Xu | a57e564 | 2013-09-13 08:38:54 +0800 | [diff] [blame] | 780 | } |
Fan Li | 688159b | 2016-02-03 16:21:57 +0800 | [diff] [blame] | 781 | next: |
| 782 | if (nsearched >= p.max_search) { |
Jaegeuk Kim | e066b83 | 2017-04-13 15:17:00 -0700 | [diff] [blame] | 783 | if (!sm->last_victim[p.gc_mode] && segno <= last_victim) |
Jack Qiu | da52f8a | 2020-06-18 12:37:10 +0800 | [diff] [blame] | 784 | sm->last_victim[p.gc_mode] = |
| 785 | last_victim + p.ofs_unit; |
Jaegeuk Kim | 4ce5377 | 2016-02-18 16:34:38 -0800 | [diff] [blame] | 786 | else |
Jack Qiu | da52f8a | 2020-06-18 12:37:10 +0800 | [diff] [blame] | 787 | sm->last_victim[p.gc_mode] = segno + p.ofs_unit; |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 788 | sm->last_victim[p.gc_mode] %= |
| 789 | (MAIN_SECS(sbi) * sbi->segs_per_sec); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 790 | break; |
| 791 | } |
| 792 | } |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 793 | |
| 794 | /* get victim for GC_AT/AT_SSR */ |
| 795 | if (is_atgc) { |
| 796 | lookup_victim_by_age(sbi, &p); |
| 797 | release_victim_entry(sbi); |
| 798 | } |
| 799 | |
| 800 | if (is_atgc && p.min_segno == NULL_SEGNO && |
| 801 | sm->elapsed_time < p.age_threshold) { |
| 802 | p.age_threshold = 0; |
| 803 | goto retry; |
| 804 | } |
| 805 | |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 806 | if (p.min_segno != NULL_SEGNO) { |
Namjae Jeon | b2b3460 | 2013-06-01 16:20:26 +0900 | [diff] [blame] | 807 | got_it: |
Chao Yu | e3080b0 | 2018-10-24 18:37:27 +0800 | [diff] [blame] | 808 | *result = (p.min_segno / p.ofs_unit) * p.ofs_unit; |
| 809 | got_result: |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 810 | if (p.alloc_mode == LFS) { |
Jaegeuk Kim | 4ddb1a4 | 2017-04-07 15:08:17 -0700 | [diff] [blame] | 811 | secno = GET_SEC_FROM_SEG(sbi, p.min_segno); |
Jaegeuk Kim | 5ec4e49 | 2013-03-31 13:26:03 +0900 | [diff] [blame] | 812 | if (gc_type == FG_GC) |
| 813 | sbi->cur_victim_sec = secno; |
| 814 | else |
| 815 | set_bit(secno, dirty_i->victim_secmap); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 816 | } |
Qilong Zhang | 9776750 | 2020-06-28 19:23:03 +0800 | [diff] [blame] | 817 | ret = 0; |
Namjae Jeon | 8e46b3e | 2013-04-23 16:42:53 +0900 | [diff] [blame] | 818 | |
Sahitya Tummala | e3c5910 | 2018-11-26 13:31:42 +0530 | [diff] [blame] | 819 | } |
| 820 | out: |
| 821 | if (p.min_segno != NULL_SEGNO) |
Namjae Jeon | 8e46b3e | 2013-04-23 16:42:53 +0900 | [diff] [blame] | 822 | trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, |
| 823 | sbi->cur_victim_sec, |
| 824 | prefree_segments(sbi), free_segments(sbi)); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 825 | mutex_unlock(&dirty_i->seglist_lock); |
| 826 | |
Qilong Zhang | 9776750 | 2020-06-28 19:23:03 +0800 | [diff] [blame] | 827 | return ret; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 828 | } |
| 829 | |
| 830 | static const struct victim_selection default_v_ops = { |
| 831 | .get_victim = get_victim_by_default, |
| 832 | }; |
| 833 | |
Changman Lee | 7dda2af | 2014-11-28 15:49:40 +0000 | [diff] [blame] | 834 | static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 835 | { |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 836 | struct inode_entry *ie; |
| 837 | |
Changman Lee | 7dda2af | 2014-11-28 15:49:40 +0000 | [diff] [blame] | 838 | ie = radix_tree_lookup(&gc_list->iroot, ino); |
| 839 | if (ie) |
| 840 | return ie->inode; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 841 | return NULL; |
| 842 | } |
| 843 | |
Changman Lee | 7dda2af | 2014-11-28 15:49:40 +0000 | [diff] [blame] | 844 | static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 845 | { |
Gu Zheng | 6cc4af5 | 2013-06-20 17:52:39 +0800 | [diff] [blame] | 846 | struct inode_entry *new_ie; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 847 | |
Changman Lee | 7dda2af | 2014-11-28 15:49:40 +0000 | [diff] [blame] | 848 | if (inode == find_gc_inode(gc_list, inode->i_ino)) { |
Gu Zheng | 6cc4af5 | 2013-06-20 17:52:39 +0800 | [diff] [blame] | 849 | iput(inode); |
| 850 | return; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 851 | } |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 852 | new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 853 | new_ie->inode = inode; |
Chao Yu | f28e503 | 2015-01-23 20:37:53 +0800 | [diff] [blame] | 854 | |
| 855 | f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie); |
Changman Lee | 7dda2af | 2014-11-28 15:49:40 +0000 | [diff] [blame] | 856 | list_add_tail(&new_ie->list, &gc_list->ilist); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 857 | } |
| 858 | |
Changman Lee | 7dda2af | 2014-11-28 15:49:40 +0000 | [diff] [blame] | 859 | static void put_gc_inode(struct gc_inode_list *gc_list) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 860 | { |
| 861 | struct inode_entry *ie, *next_ie; |
Yi Zhuang | f10ea3c | 2021-04-06 09:47:35 +0800 | [diff] [blame] | 862 | |
Changman Lee | 7dda2af | 2014-11-28 15:49:40 +0000 | [diff] [blame] | 863 | list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) { |
| 864 | radix_tree_delete(&gc_list->iroot, ie->inode->i_ino); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 865 | iput(ie->inode); |
| 866 | list_del(&ie->list); |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 867 | kmem_cache_free(f2fs_inode_entry_slab, ie); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 868 | } |
| 869 | } |
| 870 | |
| 871 | static int check_valid_map(struct f2fs_sb_info *sbi, |
| 872 | unsigned int segno, int offset) |
| 873 | { |
| 874 | struct sit_info *sit_i = SIT_I(sbi); |
| 875 | struct seg_entry *sentry; |
| 876 | int ret; |
| 877 | |
Chao Yu | 3d26fa6 | 2017-10-30 17:49:53 +0800 | [diff] [blame] | 878 | down_read(&sit_i->sentry_lock); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 879 | sentry = get_seg_entry(sbi, segno); |
| 880 | ret = f2fs_test_bit(offset, sentry->cur_valid_map); |
Chao Yu | 3d26fa6 | 2017-10-30 17:49:53 +0800 | [diff] [blame] | 881 | up_read(&sit_i->sentry_lock); |
Jaegeuk Kim | 4372752 | 2013-02-04 15:11:17 +0900 | [diff] [blame] | 882 | return ret; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 883 | } |
| 884 | |
Jaegeuk Kim | 0a8165d | 2012-11-29 13:28:09 +0900 | [diff] [blame] | 885 | /* |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 886 | * This function compares node address got in summary with that in NAT. |
| 887 | * On validity, copy that node with cold status, otherwise (invalid node) |
| 888 | * ignore that. |
| 889 | */ |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 890 | static int gc_node_segment(struct f2fs_sb_info *sbi, |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 891 | struct f2fs_summary *sum, unsigned int segno, int gc_type) |
| 892 | { |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 893 | struct f2fs_summary *entry; |
Jaegeuk Kim | 26d5859 | 2015-08-14 14:37:50 -0700 | [diff] [blame] | 894 | block_t start_addr; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 895 | int off; |
Chao Yu | 7ea984b | 2016-08-27 00:14:31 +0800 | [diff] [blame] | 896 | int phase = 0; |
Chao Yu | c29fd0c | 2018-06-04 23:20:36 +0800 | [diff] [blame] | 897 | bool fggc = (gc_type == FG_GC); |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 898 | int submitted = 0; |
Aravind Ramesh | de881df | 2020-07-16 18:26:56 +0530 | [diff] [blame] | 899 | unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 900 | |
Jaegeuk Kim | 26d5859 | 2015-08-14 14:37:50 -0700 | [diff] [blame] | 901 | start_addr = START_BLOCK(sbi, segno); |
| 902 | |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 903 | next_step: |
| 904 | entry = sum; |
Jaegeuk Kim | c718379b | 2013-04-24 13:19:56 +0900 | [diff] [blame] | 905 | |
Chao Yu | c29fd0c | 2018-06-04 23:20:36 +0800 | [diff] [blame] | 906 | if (fggc && phase == 2) |
| 907 | atomic_inc(&sbi->wb_sync_req[NODE]); |
| 908 | |
Aravind Ramesh | de881df | 2020-07-16 18:26:56 +0530 | [diff] [blame] | 909 | for (off = 0; off < usable_blks_in_seg; off++, entry++) { |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 910 | nid_t nid = le32_to_cpu(entry->nid); |
| 911 | struct page *node_page; |
Jaegeuk Kim | 26d5859 | 2015-08-14 14:37:50 -0700 | [diff] [blame] | 912 | struct node_info ni; |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 913 | int err; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 914 | |
Jaegeuk Kim | 4372752 | 2013-02-04 15:11:17 +0900 | [diff] [blame] | 915 | /* stop BG_GC if there is not enough free sections. */ |
Jaegeuk Kim | 7f3037a | 2016-09-01 12:02:51 -0700 | [diff] [blame] | 916 | if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 917 | return submitted; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 918 | |
Jaegeuk Kim | 4372752 | 2013-02-04 15:11:17 +0900 | [diff] [blame] | 919 | if (check_valid_map(sbi, segno, off) == 0) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 920 | continue; |
| 921 | |
Chao Yu | 7ea984b | 2016-08-27 00:14:31 +0800 | [diff] [blame] | 922 | if (phase == 0) { |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 923 | f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, |
Chao Yu | 7ea984b | 2016-08-27 00:14:31 +0800 | [diff] [blame] | 924 | META_NAT, true); |
| 925 | continue; |
| 926 | } |
| 927 | |
| 928 | if (phase == 1) { |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 929 | f2fs_ra_node_page(sbi, nid); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 930 | continue; |
| 931 | } |
Chao Yu | 7ea984b | 2016-08-27 00:14:31 +0800 | [diff] [blame] | 932 | |
| 933 | /* phase == 2 */ |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 934 | node_page = f2fs_get_node_page(sbi, nid); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 935 | if (IS_ERR(node_page)) |
| 936 | continue; |
| 937 | |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 938 | /* block may become invalid during f2fs_get_node_page */ |
Huang Ying | 9a01b56 | 2014-09-07 11:05:20 +0800 | [diff] [blame] | 939 | if (check_valid_map(sbi, segno, off) == 0) { |
| 940 | f2fs_put_page(node_page, 1); |
| 941 | continue; |
| 942 | } |
| 943 | |
Chao Yu | 7735730 | 2018-07-17 00:02:17 +0800 | [diff] [blame] | 944 | if (f2fs_get_node_info(sbi, nid, &ni)) { |
| 945 | f2fs_put_page(node_page, 1); |
| 946 | continue; |
| 947 | } |
| 948 | |
Jaegeuk Kim | 26d5859 | 2015-08-14 14:37:50 -0700 | [diff] [blame] | 949 | if (ni.blk_addr != start_addr + off) { |
| 950 | f2fs_put_page(node_page, 1); |
| 951 | continue; |
| 952 | } |
| 953 | |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 954 | err = f2fs_move_node_page(node_page, gc_type); |
| 955 | if (!err && gc_type == FG_GC) |
| 956 | submitted++; |
Changman Lee | e123598 | 2014-12-23 08:37:39 +0900 | [diff] [blame] | 957 | stat_inc_node_blk_count(sbi, 1, gc_type); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 958 | } |
Jaegeuk Kim | c718379b | 2013-04-24 13:19:56 +0900 | [diff] [blame] | 959 | |
Chao Yu | 7ea984b | 2016-08-27 00:14:31 +0800 | [diff] [blame] | 960 | if (++phase < 3) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 961 | goto next_step; |
Chao Yu | c29fd0c | 2018-06-04 23:20:36 +0800 | [diff] [blame] | 962 | |
| 963 | if (fggc) |
| 964 | atomic_dec(&sbi->wb_sync_req[NODE]); |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 965 | return submitted; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 966 | } |
| 967 | |
Jaegeuk Kim | 0a8165d | 2012-11-29 13:28:09 +0900 | [diff] [blame] | 968 | /* |
Jaegeuk Kim | 9af45ef | 2013-01-21 17:34:21 +0900 | [diff] [blame] | 969 | * Calculate start block index indicating the given node offset. |
| 970 | * Be careful, caller should give this node offset only indicating direct node |
| 971 | * blocks. If any node offsets, which point the other types of node blocks such |
| 972 | * as indirect or double indirect node blocks, are given, it must be a caller's |
| 973 | * bug. |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 974 | */ |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 975 | block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 976 | { |
Jaegeuk Kim | ce19a5d | 2012-12-26 12:03:22 +0900 | [diff] [blame] | 977 | unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4; |
| 978 | unsigned int bidx; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 979 | |
Jaegeuk Kim | ce19a5d | 2012-12-26 12:03:22 +0900 | [diff] [blame] | 980 | if (node_ofs == 0) |
| 981 | return 0; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 982 | |
Jaegeuk Kim | ce19a5d | 2012-12-26 12:03:22 +0900 | [diff] [blame] | 983 | if (node_ofs <= 2) { |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 984 | bidx = node_ofs - 1; |
| 985 | } else if (node_ofs <= indirect_blks) { |
Jaegeuk Kim | ce19a5d | 2012-12-26 12:03:22 +0900 | [diff] [blame] | 986 | int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1); |
Yi Zhuang | f10ea3c | 2021-04-06 09:47:35 +0800 | [diff] [blame] | 987 | |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 988 | bidx = node_ofs - 2 - dec; |
| 989 | } else { |
Jaegeuk Kim | ce19a5d | 2012-12-26 12:03:22 +0900 | [diff] [blame] | 990 | int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); |
Yi Zhuang | f10ea3c | 2021-04-06 09:47:35 +0800 | [diff] [blame] | 991 | |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 992 | bidx = node_ofs - 5 - dec; |
| 993 | } |
Chao Yu | d02a6e6 | 2019-03-25 21:08:19 +0800 | [diff] [blame] | 994 | return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 995 | } |
| 996 | |
Nicholas Krause | c107989 | 2015-06-30 21:37:21 -0400 | [diff] [blame] | 997 | static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 998 | struct node_info *dni, block_t blkaddr, unsigned int *nofs) |
| 999 | { |
| 1000 | struct page *node_page; |
| 1001 | nid_t nid; |
| 1002 | unsigned int ofs_in_node; |
| 1003 | block_t source_blkaddr; |
| 1004 | |
| 1005 | nid = le32_to_cpu(sum->nid); |
| 1006 | ofs_in_node = le16_to_cpu(sum->ofs_in_node); |
| 1007 | |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 1008 | node_page = f2fs_get_node_page(sbi, nid); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1009 | if (IS_ERR(node_page)) |
Nicholas Krause | c107989 | 2015-06-30 21:37:21 -0400 | [diff] [blame] | 1010 | return false; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1011 | |
Chao Yu | 7735730 | 2018-07-17 00:02:17 +0800 | [diff] [blame] | 1012 | if (f2fs_get_node_info(sbi, nid, dni)) { |
| 1013 | f2fs_put_page(node_page, 1); |
| 1014 | return false; |
| 1015 | } |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1016 | |
| 1017 | if (sum->version != dni->version) { |
Joe Perches | dcbb4c1 | 2019-06-18 17:48:42 +0800 | [diff] [blame] | 1018 | f2fs_warn(sbi, "%s: valid data with mismatched node version.", |
| 1019 | __func__); |
Jaegeuk Kim | c13ff37 | 2017-03-21 10:59:50 -0400 | [diff] [blame] | 1020 | set_sbi_flag(sbi, SBI_NEED_FSCK); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1021 | } |
| 1022 | |
| 1023 | *nofs = ofs_of_node(node_page); |
Chao Yu | a2ced1c | 2020-02-14 17:44:10 +0800 | [diff] [blame] | 1024 | source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1025 | f2fs_put_page(node_page, 1); |
| 1026 | |
Sahitya Tummala | bbf9f7d | 2019-08-07 19:10:32 +0530 | [diff] [blame] | 1027 | if (source_blkaddr != blkaddr) { |
| 1028 | #ifdef CONFIG_F2FS_CHECK_FS |
| 1029 | unsigned int segno = GET_SEGNO(sbi, blkaddr); |
| 1030 | unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); |
| 1031 | |
| 1032 | if (unlikely(check_valid_map(sbi, segno, offset))) { |
| 1033 | if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) { |
Joe Perches | b5a393c | 2021-05-26 13:05:36 -0700 | [diff] [blame] | 1034 | f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u", |
| 1035 | blkaddr, source_blkaddr, segno); |
Sahitya Tummala | bbf9f7d | 2019-08-07 19:10:32 +0530 | [diff] [blame] | 1036 | f2fs_bug_on(sbi, 1); |
| 1037 | } |
| 1038 | } |
| 1039 | #endif |
Nicholas Krause | c107989 | 2015-06-30 21:37:21 -0400 | [diff] [blame] | 1040 | return false; |
Sahitya Tummala | bbf9f7d | 2019-08-07 19:10:32 +0530 | [diff] [blame] | 1041 | } |
Nicholas Krause | c107989 | 2015-06-30 21:37:21 -0400 | [diff] [blame] | 1042 | return true; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1043 | } |
| 1044 | |
Chao Yu | 6aa58d8 | 2018-08-14 22:37:25 +0800 | [diff] [blame] | 1045 | static int ra_data_block(struct inode *inode, pgoff_t index) |
| 1046 | { |
| 1047 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 1048 | struct address_space *mapping = inode->i_mapping; |
| 1049 | struct dnode_of_data dn; |
| 1050 | struct page *page; |
| 1051 | struct extent_info ei = {0, 0, 0}; |
| 1052 | struct f2fs_io_info fio = { |
| 1053 | .sbi = sbi, |
| 1054 | .ino = inode->i_ino, |
| 1055 | .type = DATA, |
| 1056 | .temp = COLD, |
| 1057 | .op = REQ_OP_READ, |
| 1058 | .op_flags = 0, |
| 1059 | .encrypted_page = NULL, |
| 1060 | .in_list = false, |
| 1061 | .retry = false, |
| 1062 | }; |
| 1063 | int err; |
| 1064 | |
| 1065 | page = f2fs_grab_cache_page(mapping, index, true); |
| 1066 | if (!page) |
| 1067 | return -ENOMEM; |
| 1068 | |
| 1069 | if (f2fs_lookup_extent_cache(inode, index, &ei)) { |
| 1070 | dn.data_blkaddr = ei.blk + index - ei.fofs; |
Chao Yu | 93770ab | 2019-04-15 15:26:32 +0800 | [diff] [blame] | 1071 | if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, |
| 1072 | DATA_GENERIC_ENHANCE_READ))) { |
Chao Yu | 10f966b | 2019-06-20 11:36:14 +0800 | [diff] [blame] | 1073 | err = -EFSCORRUPTED; |
Chao Yu | 93770ab | 2019-04-15 15:26:32 +0800 | [diff] [blame] | 1074 | goto put_page; |
| 1075 | } |
Chao Yu | 6aa58d8 | 2018-08-14 22:37:25 +0800 | [diff] [blame] | 1076 | goto got_it; |
| 1077 | } |
| 1078 | |
| 1079 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
| 1080 | err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); |
| 1081 | if (err) |
| 1082 | goto put_page; |
| 1083 | f2fs_put_dnode(&dn); |
| 1084 | |
Chao Yu | 93770ab | 2019-04-15 15:26:32 +0800 | [diff] [blame] | 1085 | if (!__is_valid_data_blkaddr(dn.data_blkaddr)) { |
| 1086 | err = -ENOENT; |
| 1087 | goto put_page; |
| 1088 | } |
Chao Yu | 6aa58d8 | 2018-08-14 22:37:25 +0800 | [diff] [blame] | 1089 | if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, |
Chao Yu | 93770ab | 2019-04-15 15:26:32 +0800 | [diff] [blame] | 1090 | DATA_GENERIC_ENHANCE))) { |
Chao Yu | 10f966b | 2019-06-20 11:36:14 +0800 | [diff] [blame] | 1091 | err = -EFSCORRUPTED; |
Chao Yu | 6aa58d8 | 2018-08-14 22:37:25 +0800 | [diff] [blame] | 1092 | goto put_page; |
| 1093 | } |
| 1094 | got_it: |
| 1095 | /* read page */ |
| 1096 | fio.page = page; |
| 1097 | fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; |
| 1098 | |
Yunlong Song | 9bf1a3f | 2018-09-18 20:39:53 +0800 | [diff] [blame] | 1099 | /* |
| 1100 | * don't cache encrypted data into meta inode until previous dirty |
| 1101 | * data were writebacked to avoid racing between GC and flush. |
| 1102 | */ |
Chao Yu | bae0ee7 | 2018-12-25 17:43:42 +0800 | [diff] [blame] | 1103 | f2fs_wait_on_page_writeback(page, DATA, true, true); |
Yunlong Song | 9bf1a3f | 2018-09-18 20:39:53 +0800 | [diff] [blame] | 1104 | |
| 1105 | f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); |
| 1106 | |
Chao Yu | 6aa58d8 | 2018-08-14 22:37:25 +0800 | [diff] [blame] | 1107 | fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi), |
| 1108 | dn.data_blkaddr, |
| 1109 | FGP_LOCK | FGP_CREAT, GFP_NOFS); |
| 1110 | if (!fio.encrypted_page) { |
| 1111 | err = -ENOMEM; |
| 1112 | goto put_page; |
| 1113 | } |
| 1114 | |
| 1115 | err = f2fs_submit_page_bio(&fio); |
| 1116 | if (err) |
| 1117 | goto put_encrypted_page; |
| 1118 | f2fs_put_page(fio.encrypted_page, 0); |
| 1119 | f2fs_put_page(page, 1); |
Chao Yu | 8b83ac8 | 2020-04-16 18:16:56 +0800 | [diff] [blame] | 1120 | |
| 1121 | f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); |
Chao Yu | 9c12238 | 2020-04-23 18:03:06 +0800 | [diff] [blame] | 1122 | f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE); |
Chao Yu | 8b83ac8 | 2020-04-16 18:16:56 +0800 | [diff] [blame] | 1123 | |
Chao Yu | 6aa58d8 | 2018-08-14 22:37:25 +0800 | [diff] [blame] | 1124 | return 0; |
| 1125 | put_encrypted_page: |
| 1126 | f2fs_put_page(fio.encrypted_page, 1); |
| 1127 | put_page: |
| 1128 | f2fs_put_page(page, 1); |
| 1129 | return err; |
| 1130 | } |
| 1131 | |
Jaegeuk Kim | d4c759e | 2017-09-05 17:04:35 -0700 | [diff] [blame] | 1132 | /* |
| 1133 | * Move data block via META_MAPPING while keeping locked data page. |
| 1134 | * This can be used to move blocks, aka LBAs, directly on disk. |
| 1135 | */ |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1136 | static int move_data_block(struct inode *inode, block_t bidx, |
Chao Yu | 2ef79ec | 2018-05-07 20:28:54 +0800 | [diff] [blame] | 1137 | int gc_type, unsigned int segno, int off) |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1138 | { |
| 1139 | struct f2fs_io_info fio = { |
| 1140 | .sbi = F2FS_I_SB(inode), |
Chao Yu | 39d787b | 2017-09-29 13:59:38 +0800 | [diff] [blame] | 1141 | .ino = inode->i_ino, |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1142 | .type = DATA, |
Jaegeuk Kim | a912b54 | 2017-05-10 11:18:25 -0700 | [diff] [blame] | 1143 | .temp = COLD, |
Mike Christie | 04d328d | 2016-06-05 14:31:55 -0500 | [diff] [blame] | 1144 | .op = REQ_OP_READ, |
Christoph Hellwig | 70fd761 | 2016-11-01 07:40:10 -0600 | [diff] [blame] | 1145 | .op_flags = 0, |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1146 | .encrypted_page = NULL, |
Chao Yu | fb830fc | 2017-05-19 23:37:01 +0800 | [diff] [blame] | 1147 | .in_list = false, |
Chao Yu | fe16efe | 2018-05-28 23:47:18 +0800 | [diff] [blame] | 1148 | .retry = false, |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1149 | }; |
| 1150 | struct dnode_of_data dn; |
| 1151 | struct f2fs_summary sum; |
| 1152 | struct node_info ni; |
Chao Yu | 6aa58d8 | 2018-08-14 22:37:25 +0800 | [diff] [blame] | 1153 | struct page *page, *mpage; |
Chao Yu | 4356e48 | 2016-02-23 17:52:43 +0800 | [diff] [blame] | 1154 | block_t newaddr; |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1155 | int err = 0; |
Chao Yu | b0332a0 | 2020-02-14 17:44:12 +0800 | [diff] [blame] | 1156 | bool lfs_mode = f2fs_lfs_mode(fio.sbi); |
Weichao Guo | f7dca84 | 2021-03-17 17:27:23 +0800 | [diff] [blame] | 1157 | int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) && |
| 1158 | (fio.sbi->gc_mode != GC_URGENT_HIGH) ? |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 1159 | CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA; |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1160 | |
| 1161 | /* do not read out */ |
Jaegeuk Kim | a56c7c6 | 2015-10-09 15:11:38 -0700 | [diff] [blame] | 1162 | page = f2fs_grab_cache_page(inode->i_mapping, bidx, false); |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1163 | if (!page) |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1164 | return -ENOMEM; |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1165 | |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1166 | if (!check_valid_map(F2FS_I_SB(inode), segno, off)) { |
| 1167 | err = -ENOENT; |
Yunlei He | 2061471 | 2016-11-07 21:22:31 +0800 | [diff] [blame] | 1168 | goto out; |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1169 | } |
Yunlei He | 2061471 | 2016-11-07 21:22:31 +0800 | [diff] [blame] | 1170 | |
Chao Yu | 2ef79ec | 2018-05-07 20:28:54 +0800 | [diff] [blame] | 1171 | if (f2fs_is_atomic_file(inode)) { |
| 1172 | F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++; |
| 1173 | F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++; |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1174 | err = -EAGAIN; |
Chao Yu | 5fe4574 | 2017-01-07 18:50:26 +0800 | [diff] [blame] | 1175 | goto out; |
Chao Yu | 2ef79ec | 2018-05-07 20:28:54 +0800 | [diff] [blame] | 1176 | } |
Chao Yu | 5fe4574 | 2017-01-07 18:50:26 +0800 | [diff] [blame] | 1177 | |
Jaegeuk Kim | 1ad71a2 | 2017-12-07 16:25:39 -0800 | [diff] [blame] | 1178 | if (f2fs_is_pinned_file(inode)) { |
| 1179 | f2fs_pin_file_control(inode, true); |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1180 | err = -EAGAIN; |
Jaegeuk Kim | 1ad71a2 | 2017-12-07 16:25:39 -0800 | [diff] [blame] | 1181 | goto out; |
| 1182 | } |
| 1183 | |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1184 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 1185 | err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE); |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1186 | if (err) |
| 1187 | goto out; |
| 1188 | |
Chao Yu | 08b39fb | 2015-10-08 13:27:34 +0800 | [diff] [blame] | 1189 | if (unlikely(dn.data_blkaddr == NULL_ADDR)) { |
| 1190 | ClearPageUptodate(page); |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1191 | err = -ENOENT; |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1192 | goto put_out; |
Chao Yu | 08b39fb | 2015-10-08 13:27:34 +0800 | [diff] [blame] | 1193 | } |
| 1194 | |
| 1195 | /* |
| 1196 | * don't cache encrypted data into meta inode until previous dirty |
| 1197 | * data were writebacked to avoid racing between GC and flush. |
| 1198 | */ |
Chao Yu | bae0ee7 | 2018-12-25 17:43:42 +0800 | [diff] [blame] | 1199 | f2fs_wait_on_page_writeback(page, DATA, true, true); |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1200 | |
Yunlong Song | 9bf1a3f | 2018-09-18 20:39:53 +0800 | [diff] [blame] | 1201 | f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); |
| 1202 | |
Chao Yu | 7735730 | 2018-07-17 00:02:17 +0800 | [diff] [blame] | 1203 | err = f2fs_get_node_info(fio.sbi, dn.nid, &ni); |
| 1204 | if (err) |
| 1205 | goto put_out; |
| 1206 | |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1207 | /* read page */ |
| 1208 | fio.page = page; |
Chao Yu | 7a9d754 | 2016-02-22 18:36:38 +0800 | [diff] [blame] | 1209 | fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1210 | |
Chao Yu | 107a805 | 2018-05-26 09:00:13 +0800 | [diff] [blame] | 1211 | if (lfs_mode) |
| 1212 | down_write(&fio.sbi->io_order_lock); |
| 1213 | |
Jaegeuk Kim | 543b8c4 | 2019-07-17 18:31:53 -0700 | [diff] [blame] | 1214 | mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), |
| 1215 | fio.old_blkaddr, false); |
Chao Yu | d7cd370 | 2020-07-01 10:27:09 +0800 | [diff] [blame] | 1216 | if (!mpage) { |
| 1217 | err = -ENOMEM; |
Jaegeuk Kim | 543b8c4 | 2019-07-17 18:31:53 -0700 | [diff] [blame] | 1218 | goto up_out; |
Chao Yu | d7cd370 | 2020-07-01 10:27:09 +0800 | [diff] [blame] | 1219 | } |
Jaegeuk Kim | 543b8c4 | 2019-07-17 18:31:53 -0700 | [diff] [blame] | 1220 | |
| 1221 | fio.encrypted_page = mpage; |
| 1222 | |
| 1223 | /* read source block in mpage */ |
| 1224 | if (!PageUptodate(mpage)) { |
| 1225 | err = f2fs_submit_page_bio(&fio); |
| 1226 | if (err) { |
| 1227 | f2fs_put_page(mpage, 1); |
| 1228 | goto up_out; |
| 1229 | } |
Chao Yu | 8b83ac8 | 2020-04-16 18:16:56 +0800 | [diff] [blame] | 1230 | |
| 1231 | f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); |
Chao Yu | 9c12238 | 2020-04-23 18:03:06 +0800 | [diff] [blame] | 1232 | f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE); |
Chao Yu | 8b83ac8 | 2020-04-16 18:16:56 +0800 | [diff] [blame] | 1233 | |
Jaegeuk Kim | 543b8c4 | 2019-07-17 18:31:53 -0700 | [diff] [blame] | 1234 | lock_page(mpage); |
| 1235 | if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || |
| 1236 | !PageUptodate(mpage))) { |
| 1237 | err = -EIO; |
| 1238 | f2fs_put_page(mpage, 1); |
| 1239 | goto up_out; |
| 1240 | } |
| 1241 | } |
| 1242 | |
Chao Yu | e12d322 | 2020-12-30 16:38:35 +0800 | [diff] [blame] | 1243 | set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); |
| 1244 | |
| 1245 | /* allocate block address */ |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 1246 | f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 1247 | &sum, type, NULL); |
Chao Yu | 4356e48 | 2016-02-23 17:52:43 +0800 | [diff] [blame] | 1248 | |
Chao Yu | 01eccef | 2017-10-28 16:52:30 +0800 | [diff] [blame] | 1249 | fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), |
| 1250 | newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); |
Chao Yu | 4356e48 | 2016-02-23 17:52:43 +0800 | [diff] [blame] | 1251 | if (!fio.encrypted_page) { |
| 1252 | err = -ENOMEM; |
Jaegeuk Kim | 543b8c4 | 2019-07-17 18:31:53 -0700 | [diff] [blame] | 1253 | f2fs_put_page(mpage, 1); |
Chao Yu | 4356e48 | 2016-02-23 17:52:43 +0800 | [diff] [blame] | 1254 | goto recover_block; |
| 1255 | } |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1256 | |
Jaegeuk Kim | 543b8c4 | 2019-07-17 18:31:53 -0700 | [diff] [blame] | 1257 | /* write target block */ |
Chao Yu | bae0ee7 | 2018-12-25 17:43:42 +0800 | [diff] [blame] | 1258 | f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true); |
Jaegeuk Kim | 543b8c4 | 2019-07-17 18:31:53 -0700 | [diff] [blame] | 1259 | memcpy(page_address(fio.encrypted_page), |
| 1260 | page_address(mpage), PAGE_SIZE); |
| 1261 | f2fs_put_page(mpage, 1); |
| 1262 | invalidate_mapping_pages(META_MAPPING(fio.sbi), |
| 1263 | fio.old_blkaddr, fio.old_blkaddr); |
Chao Yu | ec3ea14 | 2021-05-20 19:51:50 +0800 | [diff] [blame^] | 1264 | f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr); |
Jaegeuk Kim | 543b8c4 | 2019-07-17 18:31:53 -0700 | [diff] [blame] | 1265 | |
Chao Yu | 8d64d36 | 2018-12-12 18:12:30 +0800 | [diff] [blame] | 1266 | set_page_dirty(fio.encrypted_page); |
Jaegeuk Kim | 6282adb | 2015-07-25 00:29:17 -0700 | [diff] [blame] | 1267 | if (clear_page_dirty_for_io(fio.encrypted_page)) |
| 1268 | dec_page_count(fio.sbi, F2FS_DIRTY_META); |
| 1269 | |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1270 | set_page_writeback(fio.encrypted_page); |
Jaegeuk Kim | 17c5003 | 2018-04-11 23:09:04 -0700 | [diff] [blame] | 1271 | ClearPageError(page); |
Jaegeuk Kim | 548aeda | 2015-07-13 17:44:14 -0700 | [diff] [blame] | 1272 | |
Mike Christie | 04d328d | 2016-06-05 14:31:55 -0500 | [diff] [blame] | 1273 | fio.op = REQ_OP_WRITE; |
Christoph Hellwig | 70fd761 | 2016-11-01 07:40:10 -0600 | [diff] [blame] | 1274 | fio.op_flags = REQ_SYNC; |
Chao Yu | 4356e48 | 2016-02-23 17:52:43 +0800 | [diff] [blame] | 1275 | fio.new_blkaddr = newaddr; |
Chao Yu | fe16efe | 2018-05-28 23:47:18 +0800 | [diff] [blame] | 1276 | f2fs_submit_page_write(&fio); |
| 1277 | if (fio.retry) { |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1278 | err = -EAGAIN; |
Sheng Yong | a9d572c | 2018-01-17 12:11:31 +0800 | [diff] [blame] | 1279 | if (PageWriteback(fio.encrypted_page)) |
| 1280 | end_page_writeback(fio.encrypted_page); |
| 1281 | goto put_page_out; |
| 1282 | } |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1283 | |
Chao Yu | b0af6d4 | 2017-08-02 23:21:48 +0800 | [diff] [blame] | 1284 | f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE); |
| 1285 | |
Chao Yu | f28b343 | 2016-02-24 17:16:47 +0800 | [diff] [blame] | 1286 | f2fs_update_data_blkaddr(&dn, newaddr); |
Jaegeuk Kim | 9194232 | 2016-05-20 10:13:22 -0700 | [diff] [blame] | 1287 | set_inode_flag(inode, FI_APPEND_WRITE); |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1288 | if (page->index == 0) |
Jaegeuk Kim | 9194232 | 2016-05-20 10:13:22 -0700 | [diff] [blame] | 1289 | set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); |
Jaegeuk Kim | 548aeda | 2015-07-13 17:44:14 -0700 | [diff] [blame] | 1290 | put_page_out: |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1291 | f2fs_put_page(fio.encrypted_page, 1); |
Chao Yu | 4356e48 | 2016-02-23 17:52:43 +0800 | [diff] [blame] | 1292 | recover_block: |
| 1293 | if (err) |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 1294 | f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr, |
Chao Yu | c5d0278 | 2020-08-04 21:14:47 +0800 | [diff] [blame] | 1295 | true, true, true); |
Jaegeuk Kim | 543b8c4 | 2019-07-17 18:31:53 -0700 | [diff] [blame] | 1296 | up_out: |
| 1297 | if (lfs_mode) |
| 1298 | up_write(&fio.sbi->io_order_lock); |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1299 | put_out: |
| 1300 | f2fs_put_dnode(&dn); |
| 1301 | out: |
| 1302 | f2fs_put_page(page, 1); |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1303 | return err; |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1304 | } |
| 1305 | |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1306 | static int move_data_page(struct inode *inode, block_t bidx, int gc_type, |
Yunlei He | 2061471 | 2016-11-07 21:22:31 +0800 | [diff] [blame] | 1307 | unsigned int segno, int off) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1308 | { |
Jaegeuk Kim | c879f90 | 2015-04-24 14:34:30 -0700 | [diff] [blame] | 1309 | struct page *page; |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1310 | int err = 0; |
Jaegeuk Kim | c879f90 | 2015-04-24 14:34:30 -0700 | [diff] [blame] | 1311 | |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 1312 | page = f2fs_get_lock_data_page(inode, bidx, true); |
Jaegeuk Kim | c879f90 | 2015-04-24 14:34:30 -0700 | [diff] [blame] | 1313 | if (IS_ERR(page)) |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1314 | return PTR_ERR(page); |
Fan Li | 63a0b7cb | 2013-12-09 16:09:00 +0800 | [diff] [blame] | 1315 | |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1316 | if (!check_valid_map(F2FS_I_SB(inode), segno, off)) { |
| 1317 | err = -ENOENT; |
Yunlei He | 2061471 | 2016-11-07 21:22:31 +0800 | [diff] [blame] | 1318 | goto out; |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1319 | } |
Yunlei He | 2061471 | 2016-11-07 21:22:31 +0800 | [diff] [blame] | 1320 | |
Chao Yu | 2ef79ec | 2018-05-07 20:28:54 +0800 | [diff] [blame] | 1321 | if (f2fs_is_atomic_file(inode)) { |
| 1322 | F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++; |
| 1323 | F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++; |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1324 | err = -EAGAIN; |
Chao Yu | 5fe4574 | 2017-01-07 18:50:26 +0800 | [diff] [blame] | 1325 | goto out; |
Chao Yu | 2ef79ec | 2018-05-07 20:28:54 +0800 | [diff] [blame] | 1326 | } |
Jaegeuk Kim | 1ad71a2 | 2017-12-07 16:25:39 -0800 | [diff] [blame] | 1327 | if (f2fs_is_pinned_file(inode)) { |
| 1328 | if (gc_type == FG_GC) |
| 1329 | f2fs_pin_file_control(inode, true); |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1330 | err = -EAGAIN; |
Jaegeuk Kim | 1ad71a2 | 2017-12-07 16:25:39 -0800 | [diff] [blame] | 1331 | goto out; |
| 1332 | } |
Chao Yu | 5fe4574 | 2017-01-07 18:50:26 +0800 | [diff] [blame] | 1333 | |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1334 | if (gc_type == BG_GC) { |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1335 | if (PageWriteback(page)) { |
| 1336 | err = -EAGAIN; |
Jaegeuk Kim | 4ebefc4 | 2013-03-31 13:49:18 +0900 | [diff] [blame] | 1337 | goto out; |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1338 | } |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1339 | set_page_dirty(page); |
Chao Yu | bdc14e1 | 2021-04-28 17:20:31 +0800 | [diff] [blame] | 1340 | set_page_private_gcing(page); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1341 | } else { |
Jaegeuk Kim | c879f90 | 2015-04-24 14:34:30 -0700 | [diff] [blame] | 1342 | struct f2fs_io_info fio = { |
| 1343 | .sbi = F2FS_I_SB(inode), |
Chao Yu | 39d787b | 2017-09-29 13:59:38 +0800 | [diff] [blame] | 1344 | .ino = inode->i_ino, |
Jaegeuk Kim | c879f90 | 2015-04-24 14:34:30 -0700 | [diff] [blame] | 1345 | .type = DATA, |
Jaegeuk Kim | a912b54 | 2017-05-10 11:18:25 -0700 | [diff] [blame] | 1346 | .temp = COLD, |
Mike Christie | 04d328d | 2016-06-05 14:31:55 -0500 | [diff] [blame] | 1347 | .op = REQ_OP_WRITE, |
Christoph Hellwig | 70fd761 | 2016-11-01 07:40:10 -0600 | [diff] [blame] | 1348 | .op_flags = REQ_SYNC, |
Hou Pengyang | e959c8f | 2017-04-25 12:45:13 +0000 | [diff] [blame] | 1349 | .old_blkaddr = NULL_ADDR, |
Jaegeuk Kim | c879f90 | 2015-04-24 14:34:30 -0700 | [diff] [blame] | 1350 | .page = page, |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1351 | .encrypted_page = NULL, |
Jaegeuk Kim | cc15620 | 2017-05-12 13:51:34 -0700 | [diff] [blame] | 1352 | .need_lock = LOCK_REQ, |
Chao Yu | b0af6d4 | 2017-08-02 23:21:48 +0800 | [diff] [blame] | 1353 | .io_type = FS_GC_DATA_IO, |
Jaegeuk Kim | c879f90 | 2015-04-24 14:34:30 -0700 | [diff] [blame] | 1354 | }; |
Chao Yu | 72e1c79 | 2016-07-03 22:05:13 +0800 | [diff] [blame] | 1355 | bool is_dirty = PageDirty(page); |
Chao Yu | 72e1c79 | 2016-07-03 22:05:13 +0800 | [diff] [blame] | 1356 | |
| 1357 | retry: |
Chao Yu | bae0ee7 | 2018-12-25 17:43:42 +0800 | [diff] [blame] | 1358 | f2fs_wait_on_page_writeback(page, DATA, true, true); |
Chao Yu | 8d64d36 | 2018-12-12 18:12:30 +0800 | [diff] [blame] | 1359 | |
| 1360 | set_page_dirty(page); |
Chao Yu | 933439c | 2016-10-11 22:57:01 +0800 | [diff] [blame] | 1361 | if (clear_page_dirty_for_io(page)) { |
Jaegeuk Kim | a7ffdbe | 2014-09-12 15:53:45 -0700 | [diff] [blame] | 1362 | inode_dec_dirty_pages(inode); |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 1363 | f2fs_remove_dirty_inode(inode); |
Chao Yu | 933439c | 2016-10-11 22:57:01 +0800 | [diff] [blame] | 1364 | } |
Chao Yu | 72e1c79 | 2016-07-03 22:05:13 +0800 | [diff] [blame] | 1365 | |
Chao Yu | bdc14e1 | 2021-04-28 17:20:31 +0800 | [diff] [blame] | 1366 | set_page_private_gcing(page); |
Chao Yu | 72e1c79 | 2016-07-03 22:05:13 +0800 | [diff] [blame] | 1367 | |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 1368 | err = f2fs_do_write_data_page(&fio); |
Chao Yu | 14a2855 | 2018-05-28 16:59:27 +0800 | [diff] [blame] | 1369 | if (err) { |
Chao Yu | bdc14e1 | 2021-04-28 17:20:31 +0800 | [diff] [blame] | 1370 | clear_page_private_gcing(page); |
Chao Yu | 14a2855 | 2018-05-28 16:59:27 +0800 | [diff] [blame] | 1371 | if (err == -ENOMEM) { |
Chao Yu | 5df7731f | 2020-02-17 17:45:44 +0800 | [diff] [blame] | 1372 | congestion_wait(BLK_RW_ASYNC, |
| 1373 | DEFAULT_IO_TIMEOUT); |
Chao Yu | 14a2855 | 2018-05-28 16:59:27 +0800 | [diff] [blame] | 1374 | goto retry; |
| 1375 | } |
| 1376 | if (is_dirty) |
| 1377 | set_page_dirty(page); |
Chao Yu | 72e1c79 | 2016-07-03 22:05:13 +0800 | [diff] [blame] | 1378 | } |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1379 | } |
| 1380 | out: |
| 1381 | f2fs_put_page(page, 1); |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1382 | return err; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1383 | } |
| 1384 | |
Jaegeuk Kim | 0a8165d | 2012-11-29 13:28:09 +0900 | [diff] [blame] | 1385 | /* |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1386 | * This function tries to get parent node of victim data block, and identifies |
| 1387 | * data block validity. If the block is valid, copy that with cold status and |
| 1388 | * modify parent node. |
| 1389 | * If the parent node is not valid or the data block address is different, |
| 1390 | * the victim data block is ignored. |
| 1391 | */ |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1392 | static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, |
Chao Yu | ba25abd | 2021-02-20 17:35:40 +0800 | [diff] [blame] | 1393 | struct gc_inode_list *gc_list, unsigned int segno, int gc_type, |
| 1394 | bool force_migrate) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1395 | { |
| 1396 | struct super_block *sb = sbi->sb; |
| 1397 | struct f2fs_summary *entry; |
| 1398 | block_t start_addr; |
Jaegeuk Kim | 4372752 | 2013-02-04 15:11:17 +0900 | [diff] [blame] | 1399 | int off; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1400 | int phase = 0; |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1401 | int submitted = 0; |
Aravind Ramesh | de881df | 2020-07-16 18:26:56 +0530 | [diff] [blame] | 1402 | unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1403 | |
| 1404 | start_addr = START_BLOCK(sbi, segno); |
| 1405 | |
| 1406 | next_step: |
| 1407 | entry = sum; |
Jaegeuk Kim | c718379b | 2013-04-24 13:19:56 +0900 | [diff] [blame] | 1408 | |
Aravind Ramesh | de881df | 2020-07-16 18:26:56 +0530 | [diff] [blame] | 1409 | for (off = 0; off < usable_blks_in_seg; off++, entry++) { |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1410 | struct page *data_page; |
| 1411 | struct inode *inode; |
| 1412 | struct node_info dni; /* dnode info for the data */ |
| 1413 | unsigned int ofs_in_node, nofs; |
| 1414 | block_t start_bidx; |
Chao Yu | 7ea984b | 2016-08-27 00:14:31 +0800 | [diff] [blame] | 1415 | nid_t nid = le32_to_cpu(entry->nid); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1416 | |
Jaegeuk Kim | 803e74b | 2019-11-22 12:02:06 -0800 | [diff] [blame] | 1417 | /* |
| 1418 | * stop BG_GC if there is not enough free sections. |
| 1419 | * Or, stop GC if the segment becomes fully valid caused by |
| 1420 | * race condition along with SSR block allocation. |
| 1421 | */ |
| 1422 | if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) || |
Chao Yu | ba25abd | 2021-02-20 17:35:40 +0800 | [diff] [blame] | 1423 | (!force_migrate && get_valid_blocks(sbi, segno, true) == |
| 1424 | BLKS_PER_SEC(sbi))) |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1425 | return submitted; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1426 | |
Jaegeuk Kim | 4372752 | 2013-02-04 15:11:17 +0900 | [diff] [blame] | 1427 | if (check_valid_map(sbi, segno, off) == 0) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1428 | continue; |
| 1429 | |
| 1430 | if (phase == 0) { |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 1431 | f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, |
Chao Yu | 7ea984b | 2016-08-27 00:14:31 +0800 | [diff] [blame] | 1432 | META_NAT, true); |
| 1433 | continue; |
| 1434 | } |
| 1435 | |
| 1436 | if (phase == 1) { |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 1437 | f2fs_ra_node_page(sbi, nid); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1438 | continue; |
| 1439 | } |
| 1440 | |
| 1441 | /* Get an inode by ino with checking validity */ |
Nicholas Krause | c107989 | 2015-06-30 21:37:21 -0400 | [diff] [blame] | 1442 | if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs)) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1443 | continue; |
| 1444 | |
Chao Yu | 7ea984b | 2016-08-27 00:14:31 +0800 | [diff] [blame] | 1445 | if (phase == 2) { |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 1446 | f2fs_ra_node_page(sbi, dni.ino); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1447 | continue; |
| 1448 | } |
| 1449 | |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1450 | ofs_in_node = le16_to_cpu(entry->ofs_in_node); |
| 1451 | |
Chao Yu | 7ea984b | 2016-08-27 00:14:31 +0800 | [diff] [blame] | 1452 | if (phase == 3) { |
Jaegeuk Kim | d4686d56 | 2013-01-31 15:36:04 +0900 | [diff] [blame] | 1453 | inode = f2fs_iget(sb, dni.ino); |
Jaegeuk Kim | 4eea93e | 2019-12-20 17:20:05 -0800 | [diff] [blame] | 1454 | if (IS_ERR(inode) || is_bad_inode(inode)) { |
| 1455 | set_sbi_flag(sbi, SBI_NEED_FSCK); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1456 | continue; |
Jaegeuk Kim | 4eea93e | 2019-12-20 17:20:05 -0800 | [diff] [blame] | 1457 | } |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1458 | |
Chao Yu | bb06664 | 2017-11-03 10:21:05 +0800 | [diff] [blame] | 1459 | if (!down_write_trylock( |
Chao Yu | b2532c6 | 2018-04-24 10:55:28 +0800 | [diff] [blame] | 1460 | &F2FS_I(inode)->i_gc_rwsem[WRITE])) { |
Chao Yu | bb06664 | 2017-11-03 10:21:05 +0800 | [diff] [blame] | 1461 | iput(inode); |
Jaegeuk Kim | 6f8d445 | 2018-07-25 12:11:56 +0900 | [diff] [blame] | 1462 | sbi->skipped_gc_rwsem++; |
Chao Yu | bb06664 | 2017-11-03 10:21:05 +0800 | [diff] [blame] | 1463 | continue; |
| 1464 | } |
| 1465 | |
Chao Yu | 6aa58d8 | 2018-08-14 22:37:25 +0800 | [diff] [blame] | 1466 | start_bidx = f2fs_start_bidx_of_node(nofs, inode) + |
| 1467 | ofs_in_node; |
| 1468 | |
| 1469 | if (f2fs_post_read_required(inode)) { |
| 1470 | int err = ra_data_block(inode, start_bidx); |
| 1471 | |
| 1472 | up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); |
| 1473 | if (err) { |
| 1474 | iput(inode); |
| 1475 | continue; |
| 1476 | } |
| 1477 | add_gc_inode(gc_list, inode); |
| 1478 | continue; |
| 1479 | } |
| 1480 | |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 1481 | data_page = f2fs_get_read_data_page(inode, |
Chao Yu | 6aa58d8 | 2018-08-14 22:37:25 +0800 | [diff] [blame] | 1482 | start_bidx, REQ_RAHEAD, true); |
Chao Yu | b2532c6 | 2018-04-24 10:55:28 +0800 | [diff] [blame] | 1483 | up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); |
Changman Lee | 31a3268 | 2014-11-27 16:03:08 +0900 | [diff] [blame] | 1484 | if (IS_ERR(data_page)) { |
| 1485 | iput(inode); |
| 1486 | continue; |
| 1487 | } |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1488 | |
| 1489 | f2fs_put_page(data_page, 0); |
Changman Lee | 7dda2af | 2014-11-28 15:49:40 +0000 | [diff] [blame] | 1490 | add_gc_inode(gc_list, inode); |
Changman Lee | 31a3268 | 2014-11-27 16:03:08 +0900 | [diff] [blame] | 1491 | continue; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1492 | } |
Changman Lee | 31a3268 | 2014-11-27 16:03:08 +0900 | [diff] [blame] | 1493 | |
Chao Yu | 7ea984b | 2016-08-27 00:14:31 +0800 | [diff] [blame] | 1494 | /* phase 4 */ |
Changman Lee | 7dda2af | 2014-11-28 15:49:40 +0000 | [diff] [blame] | 1495 | inode = find_gc_inode(gc_list, dni.ino); |
Changman Lee | 31a3268 | 2014-11-27 16:03:08 +0900 | [diff] [blame] | 1496 | if (inode) { |
Chao Yu | 82e0a5a | 2016-07-13 09:18:29 +0800 | [diff] [blame] | 1497 | struct f2fs_inode_info *fi = F2FS_I(inode); |
| 1498 | bool locked = false; |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1499 | int err; |
Chao Yu | 82e0a5a | 2016-07-13 09:18:29 +0800 | [diff] [blame] | 1500 | |
| 1501 | if (S_ISREG(inode->i_mode)) { |
Chao Yu | b2532c6 | 2018-04-24 10:55:28 +0800 | [diff] [blame] | 1502 | if (!down_write_trylock(&fi->i_gc_rwsem[READ])) |
Chao Yu | 82e0a5a | 2016-07-13 09:18:29 +0800 | [diff] [blame] | 1503 | continue; |
| 1504 | if (!down_write_trylock( |
Chao Yu | b2532c6 | 2018-04-24 10:55:28 +0800 | [diff] [blame] | 1505 | &fi->i_gc_rwsem[WRITE])) { |
Jaegeuk Kim | 6f8d445 | 2018-07-25 12:11:56 +0900 | [diff] [blame] | 1506 | sbi->skipped_gc_rwsem++; |
Chao Yu | b2532c6 | 2018-04-24 10:55:28 +0800 | [diff] [blame] | 1507 | up_write(&fi->i_gc_rwsem[READ]); |
Chao Yu | 82e0a5a | 2016-07-13 09:18:29 +0800 | [diff] [blame] | 1508 | continue; |
| 1509 | } |
| 1510 | locked = true; |
Chao Yu | 73ac2f4 | 2017-08-23 18:23:24 +0800 | [diff] [blame] | 1511 | |
| 1512 | /* wait for all inflight aio data */ |
| 1513 | inode_dio_wait(inode); |
Chao Yu | 82e0a5a | 2016-07-13 09:18:29 +0800 | [diff] [blame] | 1514 | } |
| 1515 | |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 1516 | start_bidx = f2fs_start_bidx_of_node(nofs, inode) |
Jaegeuk Kim | c879f90 | 2015-04-24 14:34:30 -0700 | [diff] [blame] | 1517 | + ofs_in_node; |
Eric Biggers | 6dbb179 | 2018-04-18 11:09:48 -0700 | [diff] [blame] | 1518 | if (f2fs_post_read_required(inode)) |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1519 | err = move_data_block(inode, start_bidx, |
| 1520 | gc_type, segno, off); |
Jaegeuk Kim | 4375a33 | 2015-04-23 12:04:33 -0700 | [diff] [blame] | 1521 | else |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1522 | err = move_data_page(inode, start_bidx, gc_type, |
Jaegeuk Kim | d4c759e | 2017-09-05 17:04:35 -0700 | [diff] [blame] | 1523 | segno, off); |
Chao Yu | 82e0a5a | 2016-07-13 09:18:29 +0800 | [diff] [blame] | 1524 | |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1525 | if (!err && (gc_type == FG_GC || |
| 1526 | f2fs_post_read_required(inode))) |
| 1527 | submitted++; |
| 1528 | |
Chao Yu | 82e0a5a | 2016-07-13 09:18:29 +0800 | [diff] [blame] | 1529 | if (locked) { |
Chao Yu | b2532c6 | 2018-04-24 10:55:28 +0800 | [diff] [blame] | 1530 | up_write(&fi->i_gc_rwsem[WRITE]); |
| 1531 | up_write(&fi->i_gc_rwsem[READ]); |
Chao Yu | 82e0a5a | 2016-07-13 09:18:29 +0800 | [diff] [blame] | 1532 | } |
| 1533 | |
Changman Lee | e123598 | 2014-12-23 08:37:39 +0900 | [diff] [blame] | 1534 | stat_inc_data_blk_count(sbi, 1, gc_type); |
Changman Lee | 31a3268 | 2014-11-27 16:03:08 +0900 | [diff] [blame] | 1535 | } |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1536 | } |
Jaegeuk Kim | c718379b | 2013-04-24 13:19:56 +0900 | [diff] [blame] | 1537 | |
Chao Yu | 7ea984b | 2016-08-27 00:14:31 +0800 | [diff] [blame] | 1538 | if (++phase < 5) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1539 | goto next_step; |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1540 | |
| 1541 | return submitted; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1542 | } |
| 1543 | |
| 1544 | static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, |
Gu Zheng | 8a2d0ac | 2014-10-20 17:45:48 +0800 | [diff] [blame] | 1545 | int gc_type) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1546 | { |
| 1547 | struct sit_info *sit_i = SIT_I(sbi); |
| 1548 | int ret; |
Gu Zheng | 8a2d0ac | 2014-10-20 17:45:48 +0800 | [diff] [blame] | 1549 | |
Chao Yu | 3d26fa6 | 2017-10-30 17:49:53 +0800 | [diff] [blame] | 1550 | down_write(&sit_i->sentry_lock); |
Gu Zheng | 8a2d0ac | 2014-10-20 17:45:48 +0800 | [diff] [blame] | 1551 | ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 1552 | NO_CHECK_TYPE, LFS, 0); |
Chao Yu | 3d26fa6 | 2017-10-30 17:49:53 +0800 | [diff] [blame] | 1553 | up_write(&sit_i->sentry_lock); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1554 | return ret; |
| 1555 | } |
| 1556 | |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1557 | static int do_garbage_collect(struct f2fs_sb_info *sbi, |
| 1558 | unsigned int start_segno, |
Chao Yu | ba25abd | 2021-02-20 17:35:40 +0800 | [diff] [blame] | 1559 | struct gc_inode_list *gc_list, int gc_type, |
| 1560 | bool force_migrate) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1561 | { |
| 1562 | struct page *sum_page; |
| 1563 | struct f2fs_summary_block *sum; |
Jaegeuk Kim | c718379b | 2013-04-24 13:19:56 +0900 | [diff] [blame] | 1564 | struct blk_plug plug; |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1565 | unsigned int segno = start_segno; |
| 1566 | unsigned int end_segno = start_segno + sbi->segs_per_sec; |
Chao Yu | e3080b0 | 2018-10-24 18:37:27 +0800 | [diff] [blame] | 1567 | int seg_freed = 0, migrated = 0; |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1568 | unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? |
| 1569 | SUM_TYPE_DATA : SUM_TYPE_NODE; |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1570 | int submitted = 0; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1571 | |
Chao Yu | e3080b0 | 2018-10-24 18:37:27 +0800 | [diff] [blame] | 1572 | if (__is_large_section(sbi)) |
| 1573 | end_segno = rounddown(end_segno, sbi->segs_per_sec); |
| 1574 | |
Aravind Ramesh | de881df | 2020-07-16 18:26:56 +0530 | [diff] [blame] | 1575 | /* |
| 1576 | * zone-capacity can be less than zone-size in zoned devices, |
| 1577 | * resulting in less than expected usable segments in the zone, |
| 1578 | * calculate the end segno in the zone which can be garbage collected |
| 1579 | */ |
| 1580 | if (f2fs_sb_has_blkzoned(sbi)) |
| 1581 | end_segno -= sbi->segs_per_sec - |
| 1582 | f2fs_usable_segs_in_sec(sbi, segno); |
| 1583 | |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 1584 | sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type); |
| 1585 | |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1586 | /* readahead multi ssa blocks those have contiguous address */ |
Chao Yu | 2c70c5e | 2018-10-24 18:37:26 +0800 | [diff] [blame] | 1587 | if (__is_large_section(sbi)) |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 1588 | f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), |
Chao Yu | e3080b0 | 2018-10-24 18:37:27 +0800 | [diff] [blame] | 1589 | end_segno - segno, META_SSA, true); |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1590 | |
| 1591 | /* reference all summary page */ |
| 1592 | while (segno < end_segno) { |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 1593 | sum_page = f2fs_get_sum_page(sbi, segno++); |
Jaegeuk Kim | edc55aa | 2018-09-17 17:36:06 -0700 | [diff] [blame] | 1594 | if (IS_ERR(sum_page)) { |
| 1595 | int err = PTR_ERR(sum_page); |
| 1596 | |
| 1597 | end_segno = segno - 1; |
| 1598 | for (segno = start_segno; segno < end_segno; segno++) { |
| 1599 | sum_page = find_get_page(META_MAPPING(sbi), |
| 1600 | GET_SUM_BLOCK(sbi, segno)); |
| 1601 | f2fs_put_page(sum_page, 0); |
| 1602 | f2fs_put_page(sum_page, 0); |
| 1603 | } |
| 1604 | return err; |
| 1605 | } |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1606 | unlock_page(sum_page); |
| 1607 | } |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1608 | |
Jaegeuk Kim | c718379b | 2013-04-24 13:19:56 +0900 | [diff] [blame] | 1609 | blk_start_plug(&plug); |
| 1610 | |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1611 | for (segno = start_segno; segno < end_segno; segno++) { |
Jaegeuk Kim | aa98727 | 2016-06-06 18:49:54 -0700 | [diff] [blame] | 1612 | |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1613 | /* find segment summary of victim */ |
| 1614 | sum_page = find_get_page(META_MAPPING(sbi), |
| 1615 | GET_SUM_BLOCK(sbi, segno)); |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1616 | f2fs_put_page(sum_page, 0); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1617 | |
Yunlong Song | d6c66cd | 2018-10-24 16:08:30 +0800 | [diff] [blame] | 1618 | if (get_valid_blocks(sbi, segno, false) == 0) |
| 1619 | goto freed; |
Jaegeuk Kim | dabfbbc | 2020-02-09 13:28:45 -0800 | [diff] [blame] | 1620 | if (gc_type == BG_GC && __is_large_section(sbi) && |
Chao Yu | e3080b0 | 2018-10-24 18:37:27 +0800 | [diff] [blame] | 1621 | migrated >= sbi->migration_granularity) |
| 1622 | goto skip; |
Yunlong Song | d6c66cd | 2018-10-24 16:08:30 +0800 | [diff] [blame] | 1623 | if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi))) |
Chao Yu | e3080b0 | 2018-10-24 18:37:27 +0800 | [diff] [blame] | 1624 | goto skip; |
Jaegeuk Kim | de0dcc4 | 2016-10-12 13:38:41 -0700 | [diff] [blame] | 1625 | |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1626 | sum = page_address(sum_page); |
Chao Yu | 10d255c | 2018-07-04 21:20:05 +0800 | [diff] [blame] | 1627 | if (type != GET_SUM_TYPE((&sum->footer))) { |
Joe Perches | dcbb4c1 | 2019-06-18 17:48:42 +0800 | [diff] [blame] | 1628 | f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT", |
| 1629 | segno, type, GET_SUM_TYPE((&sum->footer))); |
Chao Yu | 10d255c | 2018-07-04 21:20:05 +0800 | [diff] [blame] | 1630 | set_sbi_flag(sbi, SBI_NEED_FSCK); |
Chao Yu | 793ab1c | 2019-04-10 18:45:50 +0800 | [diff] [blame] | 1631 | f2fs_stop_checkpoint(sbi, false); |
Chao Yu | e3080b0 | 2018-10-24 18:37:27 +0800 | [diff] [blame] | 1632 | goto skip; |
Chao Yu | 10d255c | 2018-07-04 21:20:05 +0800 | [diff] [blame] | 1633 | } |
Jaegeuk Kim | 9236cac | 2015-05-28 18:19:17 -0700 | [diff] [blame] | 1634 | |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1635 | /* |
| 1636 | * this is to avoid deadlock: |
| 1637 | * - lock_page(sum_page) - f2fs_replace_block |
Chao Yu | 3d26fa6 | 2017-10-30 17:49:53 +0800 | [diff] [blame] | 1638 | * - check_valid_map() - down_write(sentry_lock) |
| 1639 | * - down_read(sentry_lock) - change_curseg() |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1640 | * - lock_page(sum_page) |
| 1641 | */ |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1642 | if (type == SUM_TYPE_NODE) |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1643 | submitted += gc_node_segment(sbi, sum->entries, segno, |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1644 | gc_type); |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1645 | else |
| 1646 | submitted += gc_data_segment(sbi, sum->entries, gc_list, |
Chao Yu | ba25abd | 2021-02-20 17:35:40 +0800 | [diff] [blame] | 1647 | segno, gc_type, |
| 1648 | force_migrate); |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1649 | |
| 1650 | stat_inc_seg_count(sbi, type, gc_type); |
Jaegeuk Kim | 8c7b9ac | 2020-02-09 13:27:09 -0800 | [diff] [blame] | 1651 | migrated++; |
Chao Yu | c56f16d | 2017-08-11 18:00:15 +0800 | [diff] [blame] | 1652 | |
Yunlong Song | d6c66cd | 2018-10-24 16:08:30 +0800 | [diff] [blame] | 1653 | freed: |
Chao Yu | c56f16d | 2017-08-11 18:00:15 +0800 | [diff] [blame] | 1654 | if (gc_type == FG_GC && |
| 1655 | get_valid_blocks(sbi, segno, false) == 0) |
| 1656 | seg_freed++; |
Chao Yu | e3080b0 | 2018-10-24 18:37:27 +0800 | [diff] [blame] | 1657 | |
| 1658 | if (__is_large_section(sbi) && segno + 1 < end_segno) |
| 1659 | sbi->next_victim_seg[gc_type] = segno + 1; |
| 1660 | skip: |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1661 | f2fs_put_page(sum_page, 0); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1662 | } |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1663 | |
Chao Yu | 48018b4 | 2018-09-13 07:40:53 +0800 | [diff] [blame] | 1664 | if (submitted) |
Jaegeuk Kim | b9109b0 | 2017-05-10 11:28:38 -0700 | [diff] [blame] | 1665 | f2fs_submit_merged_write(sbi, |
| 1666 | (type == SUM_TYPE_NODE) ? NODE : DATA); |
Chao Yu | 718e53f | 2016-01-23 16:23:55 +0800 | [diff] [blame] | 1667 | |
Jaegeuk Kim | c718379b | 2013-04-24 13:19:56 +0900 | [diff] [blame] | 1668 | blk_finish_plug(&plug); |
| 1669 | |
Chao Yu | 17d899d | 2016-02-22 18:32:13 +0800 | [diff] [blame] | 1670 | stat_inc_call_count(sbi->stat_info); |
| 1671 | |
Chao Yu | c56f16d | 2017-08-11 18:00:15 +0800 | [diff] [blame] | 1672 | return seg_freed; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1673 | } |
| 1674 | |
Jaegeuk Kim | e066b83 | 2017-04-13 15:17:00 -0700 | [diff] [blame] | 1675 | int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, |
Chao Yu | ba25abd | 2021-02-20 17:35:40 +0800 | [diff] [blame] | 1676 | bool background, bool force, unsigned int segno) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1677 | { |
Chao Yu | d530d4d | 2015-10-05 22:22:44 +0800 | [diff] [blame] | 1678 | int gc_type = sync ? FG_GC : BG_GC; |
Chao Yu | c56f16d | 2017-08-11 18:00:15 +0800 | [diff] [blame] | 1679 | int sec_freed = 0, seg_freed = 0, total_freed = 0; |
| 1680 | int ret = 0; |
Jaegeuk Kim | d5053a34 | 2014-10-30 22:47:03 -0700 | [diff] [blame] | 1681 | struct cp_control cpc; |
Jaegeuk Kim | e066b83 | 2017-04-13 15:17:00 -0700 | [diff] [blame] | 1682 | unsigned int init_segno = segno; |
Changman Lee | 7dda2af | 2014-11-28 15:49:40 +0000 | [diff] [blame] | 1683 | struct gc_inode_list gc_list = { |
| 1684 | .ilist = LIST_HEAD_INIT(gc_list.ilist), |
Matthew Wilcox | f6bb2a2 | 2018-04-10 16:36:52 -0700 | [diff] [blame] | 1685 | .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), |
Changman Lee | 7dda2af | 2014-11-28 15:49:40 +0000 | [diff] [blame] | 1686 | }; |
Chao Yu | 2ef79ec | 2018-05-07 20:28:54 +0800 | [diff] [blame] | 1687 | unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC]; |
Jaegeuk Kim | 6f8d445 | 2018-07-25 12:11:56 +0900 | [diff] [blame] | 1688 | unsigned long long first_skipped; |
Chao Yu | 2ef79ec | 2018-05-07 20:28:54 +0800 | [diff] [blame] | 1689 | unsigned int skipped_round = 0, round = 0; |
Jaegeuk Kim | d5053a34 | 2014-10-30 22:47:03 -0700 | [diff] [blame] | 1690 | |
Chao Yu | c56f16d | 2017-08-11 18:00:15 +0800 | [diff] [blame] | 1691 | trace_f2fs_gc_begin(sbi->sb, sync, background, |
| 1692 | get_pages(sbi, F2FS_DIRTY_NODES), |
| 1693 | get_pages(sbi, F2FS_DIRTY_DENTS), |
| 1694 | get_pages(sbi, F2FS_DIRTY_IMETA), |
| 1695 | free_sections(sbi), |
| 1696 | free_segments(sbi), |
| 1697 | reserved_segments(sbi), |
| 1698 | prefree_segments(sbi)); |
| 1699 | |
Jaegeuk Kim | 119ee91 | 2015-01-29 11:45:33 -0800 | [diff] [blame] | 1700 | cpc.reason = __get_cp_reason(sbi); |
Jaegeuk Kim | 6f8d445 | 2018-07-25 12:11:56 +0900 | [diff] [blame] | 1701 | sbi->skipped_gc_rwsem = 0; |
| 1702 | first_skipped = last_skipped; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1703 | gc_more: |
Linus Torvalds | 1751e8a | 2017-11-27 13:05:09 -0800 | [diff] [blame] | 1704 | if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) { |
Weichao Guo | e5dbd95 | 2017-05-11 04:28:00 +0800 | [diff] [blame] | 1705 | ret = -EINVAL; |
Jaegeuk Kim | 408e937 | 2013-01-03 17:55:52 +0900 | [diff] [blame] | 1706 | goto stop; |
Weichao Guo | e5dbd95 | 2017-05-11 04:28:00 +0800 | [diff] [blame] | 1707 | } |
Chao Yu | 6d5a149 | 2015-12-24 18:04:56 +0800 | [diff] [blame] | 1708 | if (unlikely(f2fs_cp_error(sbi))) { |
| 1709 | ret = -EIO; |
Jaegeuk Kim | 203681f | 2014-02-05 13:03:57 +0900 | [diff] [blame] | 1710 | goto stop; |
Chao Yu | 6d5a149 | 2015-12-24 18:04:56 +0800 | [diff] [blame] | 1711 | } |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1712 | |
Hou Pengyang | 19f4e68 | 2017-02-25 03:57:38 +0000 | [diff] [blame] | 1713 | if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) { |
Jaegeuk Kim | 6e17bfb | 2016-01-23 22:00:57 +0800 | [diff] [blame] | 1714 | /* |
Hou Pengyang | 19f4e68 | 2017-02-25 03:57:38 +0000 | [diff] [blame] | 1715 | * For example, if there are many prefree_segments below given |
| 1716 | * threshold, we can make them free by checkpoint. Then, we |
| 1717 | * secure free segments which doesn't need fggc any more. |
Jaegeuk Kim | 6e17bfb | 2016-01-23 22:00:57 +0800 | [diff] [blame] | 1718 | */ |
Daniel Rosenberg | 4354994 | 2018-08-20 19:21:43 -0700 | [diff] [blame] | 1719 | if (prefree_segments(sbi) && |
| 1720 | !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 1721 | ret = f2fs_write_checkpoint(sbi, &cpc); |
Jaegeuk Kim | 8fd5a37 | 2017-04-07 17:25:54 -0700 | [diff] [blame] | 1722 | if (ret) |
| 1723 | goto stop; |
| 1724 | } |
Hou Pengyang | 19f4e68 | 2017-02-25 03:57:38 +0000 | [diff] [blame] | 1725 | if (has_not_enough_free_secs(sbi, 0, 0)) |
| 1726 | gc_type = FG_GC; |
Jaegeuk Kim | d64f804 | 2013-04-08 16:01:00 +0900 | [diff] [blame] | 1727 | } |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1728 | |
Hou Pengyang | 19f4e68 | 2017-02-25 03:57:38 +0000 | [diff] [blame] | 1729 | /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */ |
Chao Yu | c56f16d | 2017-08-11 18:00:15 +0800 | [diff] [blame] | 1730 | if (gc_type == BG_GC && !background) { |
| 1731 | ret = -EINVAL; |
Hou Pengyang | 19f4e68 | 2017-02-25 03:57:38 +0000 | [diff] [blame] | 1732 | goto stop; |
Chao Yu | c56f16d | 2017-08-11 18:00:15 +0800 | [diff] [blame] | 1733 | } |
Qilong Zhang | 9776750 | 2020-06-28 19:23:03 +0800 | [diff] [blame] | 1734 | ret = __get_victim(sbi, &segno, gc_type); |
| 1735 | if (ret) |
Jaegeuk Kim | 408e937 | 2013-01-03 17:55:52 +0900 | [diff] [blame] | 1736 | goto stop; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1737 | |
Chao Yu | ba25abd | 2021-02-20 17:35:40 +0800 | [diff] [blame] | 1738 | seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force); |
Aravind Ramesh | de881df | 2020-07-16 18:26:56 +0530 | [diff] [blame] | 1739 | if (gc_type == FG_GC && |
| 1740 | seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) |
Chao Yu | 45fe849 | 2015-09-28 17:42:24 +0800 | [diff] [blame] | 1741 | sec_freed++; |
Chao Yu | c56f16d | 2017-08-11 18:00:15 +0800 | [diff] [blame] | 1742 | total_freed += seg_freed; |
Jaegeuk Kim | 4372752 | 2013-02-04 15:11:17 +0900 | [diff] [blame] | 1743 | |
Chao Yu | 2ef79ec | 2018-05-07 20:28:54 +0800 | [diff] [blame] | 1744 | if (gc_type == FG_GC) { |
Jaegeuk Kim | 6f8d445 | 2018-07-25 12:11:56 +0900 | [diff] [blame] | 1745 | if (sbi->skipped_atomic_files[FG_GC] > last_skipped || |
| 1746 | sbi->skipped_gc_rwsem) |
Chao Yu | 2ef79ec | 2018-05-07 20:28:54 +0800 | [diff] [blame] | 1747 | skipped_round++; |
| 1748 | last_skipped = sbi->skipped_atomic_files[FG_GC]; |
| 1749 | round++; |
| 1750 | } |
| 1751 | |
Sahitya Tummala | 957fa47 | 2019-07-29 10:50:26 +0530 | [diff] [blame] | 1752 | if (gc_type == FG_GC && seg_freed) |
Jaegeuk Kim | 5ec4e49 | 2013-03-31 13:26:03 +0900 | [diff] [blame] | 1753 | sbi->cur_victim_sec = NULL_SEGNO; |
Jaegeuk Kim | 4372752 | 2013-02-04 15:11:17 +0900 | [diff] [blame] | 1754 | |
Jaegeuk Kim | 6f8d445 | 2018-07-25 12:11:56 +0900 | [diff] [blame] | 1755 | if (sync) |
| 1756 | goto stop; |
| 1757 | |
| 1758 | if (has_not_enough_free_secs(sbi, sec_freed, 0)) { |
| 1759 | if (skipped_round <= MAX_SKIP_GC_COUNT || |
| 1760 | skipped_round * 2 < round) { |
Jaegeuk Kim | e066b83 | 2017-04-13 15:17:00 -0700 | [diff] [blame] | 1761 | segno = NULL_SEGNO; |
Chao Yu | d530d4d | 2015-10-05 22:22:44 +0800 | [diff] [blame] | 1762 | goto gc_more; |
Jaegeuk Kim | e066b83 | 2017-04-13 15:17:00 -0700 | [diff] [blame] | 1763 | } |
Jaegeuk Kim | 4372752 | 2013-02-04 15:11:17 +0900 | [diff] [blame] | 1764 | |
Jaegeuk Kim | 6f8d445 | 2018-07-25 12:11:56 +0900 | [diff] [blame] | 1765 | if (first_skipped < last_skipped && |
| 1766 | (last_skipped - first_skipped) > |
| 1767 | sbi->skipped_gc_rwsem) { |
| 1768 | f2fs_drop_inmem_pages_all(sbi, true); |
| 1769 | segno = NULL_SEGNO; |
| 1770 | goto gc_more; |
| 1771 | } |
Daniel Rosenberg | 4354994 | 2018-08-20 19:21:43 -0700 | [diff] [blame] | 1772 | if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 1773 | ret = f2fs_write_checkpoint(sbi, &cpc); |
Chao Yu | d530d4d | 2015-10-05 22:22:44 +0800 | [diff] [blame] | 1774 | } |
Jaegeuk Kim | 408e937 | 2013-01-03 17:55:52 +0900 | [diff] [blame] | 1775 | stop: |
Jaegeuk Kim | e066b83 | 2017-04-13 15:17:00 -0700 | [diff] [blame] | 1776 | SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0; |
| 1777 | SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno; |
Chao Yu | c56f16d | 2017-08-11 18:00:15 +0800 | [diff] [blame] | 1778 | |
| 1779 | trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed, |
| 1780 | get_pages(sbi, F2FS_DIRTY_NODES), |
| 1781 | get_pages(sbi, F2FS_DIRTY_DENTS), |
| 1782 | get_pages(sbi, F2FS_DIRTY_IMETA), |
| 1783 | free_sections(sbi), |
| 1784 | free_segments(sbi), |
| 1785 | reserved_segments(sbi), |
| 1786 | prefree_segments(sbi)); |
| 1787 | |
Chao Yu | fb24fea | 2020-01-14 19:36:50 +0800 | [diff] [blame] | 1788 | up_write(&sbi->gc_lock); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1789 | |
Changman Lee | 7dda2af | 2014-11-28 15:49:40 +0000 | [diff] [blame] | 1790 | put_gc_inode(&gc_list); |
Chao Yu | d530d4d | 2015-10-05 22:22:44 +0800 | [diff] [blame] | 1791 | |
Jaegeuk Kim | 61f7725 | 2018-09-25 15:25:21 -0700 | [diff] [blame] | 1792 | if (sync && !ret) |
Chao Yu | d530d4d | 2015-10-05 22:22:44 +0800 | [diff] [blame] | 1793 | ret = sec_freed ? 0 : -EAGAIN; |
Jaegeuk Kim | 4372752 | 2013-02-04 15:11:17 +0900 | [diff] [blame] | 1794 | return ret; |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1795 | } |
| 1796 | |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 1797 | int __init f2fs_create_garbage_collection_cache(void) |
| 1798 | { |
| 1799 | victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry", |
| 1800 | sizeof(struct victim_entry)); |
| 1801 | if (!victim_entry_slab) |
| 1802 | return -ENOMEM; |
| 1803 | return 0; |
| 1804 | } |
| 1805 | |
| 1806 | void f2fs_destroy_garbage_collection_cache(void) |
| 1807 | { |
| 1808 | kmem_cache_destroy(victim_entry_slab); |
| 1809 | } |
| 1810 | |
| 1811 | static void init_atgc_management(struct f2fs_sb_info *sbi) |
| 1812 | { |
| 1813 | struct atgc_management *am = &sbi->am; |
| 1814 | |
| 1815 | if (test_opt(sbi, ATGC) && |
| 1816 | SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD) |
| 1817 | am->atgc_enabled = true; |
| 1818 | |
| 1819 | am->root = RB_ROOT_CACHED; |
| 1820 | INIT_LIST_HEAD(&am->victim_list); |
| 1821 | am->victim_count = 0; |
| 1822 | |
| 1823 | am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO; |
| 1824 | am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT; |
| 1825 | am->age_weight = DEF_GC_THREAD_AGE_WEIGHT; |
Chao Yu | bb5f20d | 2021-05-11 18:17:34 +0800 | [diff] [blame] | 1826 | am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD; |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 1827 | } |
| 1828 | |
Chao Yu | 4d57b86 | 2018-05-30 00:20:41 +0800 | [diff] [blame] | 1829 | void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1830 | { |
| 1831 | DIRTY_I(sbi)->v_ops = &default_v_ops; |
Hou Pengyang | e93b986 | 2017-02-16 12:34:31 +0000 | [diff] [blame] | 1832 | |
Jaegeuk Kim | 1ad71a2 | 2017-12-07 16:25:39 -0800 | [diff] [blame] | 1833 | sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; |
Jaegeuk Kim | d579324 | 2017-04-18 15:03:15 -0700 | [diff] [blame] | 1834 | |
| 1835 | /* give warm/cold data area from slower device */ |
Damien Le Moal | 0916878 | 2019-03-16 09:13:06 +0900 | [diff] [blame] | 1836 | if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi)) |
Jaegeuk Kim | d579324 | 2017-04-18 15:03:15 -0700 | [diff] [blame] | 1837 | SIT_I(sbi)->last_victim[ALLOC_NEXT] = |
| 1838 | GET_SEGNO(sbi, FDEV(0).end_blk) + 1; |
Chao Yu | 093749e | 2020-08-04 21:14:49 +0800 | [diff] [blame] | 1839 | |
| 1840 | init_atgc_management(sbi); |
Jaegeuk Kim | 7bc0900 | 2012-11-02 17:13:01 +0900 | [diff] [blame] | 1841 | } |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1842 | |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 1843 | static int free_segment_range(struct f2fs_sb_info *sbi, |
| 1844 | unsigned int secs, bool gc_only) |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1845 | { |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 1846 | unsigned int segno, next_inuse, start, end; |
| 1847 | struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; |
| 1848 | int gc_mode, gc_type; |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1849 | int err = 0; |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 1850 | int type; |
| 1851 | |
| 1852 | /* Force block allocation for GC */ |
| 1853 | MAIN_SECS(sbi) -= secs; |
| 1854 | start = MAIN_SECS(sbi) * sbi->segs_per_sec; |
| 1855 | end = MAIN_SEGS(sbi) - 1; |
| 1856 | |
| 1857 | mutex_lock(&DIRTY_I(sbi)->seglist_lock); |
| 1858 | for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++) |
| 1859 | if (SIT_I(sbi)->last_victim[gc_mode] >= start) |
| 1860 | SIT_I(sbi)->last_victim[gc_mode] = 0; |
| 1861 | |
| 1862 | for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++) |
| 1863 | if (sbi->next_victim_seg[gc_type] >= start) |
| 1864 | sbi->next_victim_seg[gc_type] = NULL_SEGNO; |
| 1865 | mutex_unlock(&DIRTY_I(sbi)->seglist_lock); |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1866 | |
| 1867 | /* Move out cursegs from the target range */ |
Chao Yu | d0b9e42 | 2020-08-04 21:14:45 +0800 | [diff] [blame] | 1868 | for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) |
Chao Yu | 0ef8183 | 2020-06-18 14:36:22 +0800 | [diff] [blame] | 1869 | f2fs_allocate_segment_for_resize(sbi, type, start, end); |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1870 | |
| 1871 | /* do GC to move out valid blocks in the range */ |
| 1872 | for (segno = start; segno <= end; segno += sbi->segs_per_sec) { |
| 1873 | struct gc_inode_list gc_list = { |
| 1874 | .ilist = LIST_HEAD_INIT(gc_list.ilist), |
| 1875 | .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), |
| 1876 | }; |
| 1877 | |
Chao Yu | ba25abd | 2021-02-20 17:35:40 +0800 | [diff] [blame] | 1878 | do_garbage_collect(sbi, segno, &gc_list, FG_GC, true); |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1879 | put_gc_inode(&gc_list); |
| 1880 | |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 1881 | if (!gc_only && get_valid_blocks(sbi, segno, true)) { |
| 1882 | err = -EAGAIN; |
| 1883 | goto out; |
| 1884 | } |
| 1885 | if (fatal_signal_pending(current)) { |
| 1886 | err = -ERESTARTSYS; |
| 1887 | goto out; |
| 1888 | } |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1889 | } |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 1890 | if (gc_only) |
| 1891 | goto out; |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1892 | |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 1893 | err = f2fs_write_checkpoint(sbi, &cpc); |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1894 | if (err) |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 1895 | goto out; |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1896 | |
| 1897 | next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start); |
| 1898 | if (next_inuse <= end) { |
Joe Perches | dcbb4c1 | 2019-06-18 17:48:42 +0800 | [diff] [blame] | 1899 | f2fs_err(sbi, "segno %u should be free but still inuse!", |
| 1900 | next_inuse); |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1901 | f2fs_bug_on(sbi, 1); |
| 1902 | } |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 1903 | out: |
| 1904 | MAIN_SECS(sbi) += secs; |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1905 | return err; |
| 1906 | } |
| 1907 | |
| 1908 | static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) |
| 1909 | { |
| 1910 | struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi); |
Chao Yu | a4ba5df | 2020-03-03 20:09:25 +0800 | [diff] [blame] | 1911 | int section_count; |
| 1912 | int segment_count; |
| 1913 | int segment_count_main; |
| 1914 | long long block_count; |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1915 | int segs = secs * sbi->segs_per_sec; |
| 1916 | |
Chao Yu | a4ba5df | 2020-03-03 20:09:25 +0800 | [diff] [blame] | 1917 | down_write(&sbi->sb_lock); |
| 1918 | |
| 1919 | section_count = le32_to_cpu(raw_sb->section_count); |
| 1920 | segment_count = le32_to_cpu(raw_sb->segment_count); |
| 1921 | segment_count_main = le32_to_cpu(raw_sb->segment_count_main); |
| 1922 | block_count = le64_to_cpu(raw_sb->block_count); |
| 1923 | |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1924 | raw_sb->section_count = cpu_to_le32(section_count + secs); |
| 1925 | raw_sb->segment_count = cpu_to_le32(segment_count + segs); |
| 1926 | raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs); |
| 1927 | raw_sb->block_count = cpu_to_le64(block_count + |
| 1928 | (long long)segs * sbi->blocks_per_seg); |
Qiuyang Sun | 46d9ce1 | 2019-09-23 12:21:39 +0800 | [diff] [blame] | 1929 | if (f2fs_is_multi_device(sbi)) { |
| 1930 | int last_dev = sbi->s_ndevs - 1; |
| 1931 | int dev_segs = |
| 1932 | le32_to_cpu(raw_sb->devs[last_dev].total_segments); |
| 1933 | |
| 1934 | raw_sb->devs[last_dev].total_segments = |
| 1935 | cpu_to_le32(dev_segs + segs); |
| 1936 | } |
Chao Yu | a4ba5df | 2020-03-03 20:09:25 +0800 | [diff] [blame] | 1937 | |
| 1938 | up_write(&sbi->sb_lock); |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1939 | } |
| 1940 | |
| 1941 | static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) |
| 1942 | { |
| 1943 | int segs = secs * sbi->segs_per_sec; |
Qiuyang Sun | 46d9ce1 | 2019-09-23 12:21:39 +0800 | [diff] [blame] | 1944 | long long blks = (long long)segs * sbi->blocks_per_seg; |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1945 | long long user_block_count = |
| 1946 | le64_to_cpu(F2FS_CKPT(sbi)->user_block_count); |
| 1947 | |
| 1948 | SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs; |
| 1949 | MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs; |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 1950 | MAIN_SECS(sbi) += secs; |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1951 | FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs; |
| 1952 | FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs; |
Qiuyang Sun | 46d9ce1 | 2019-09-23 12:21:39 +0800 | [diff] [blame] | 1953 | F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks); |
| 1954 | |
| 1955 | if (f2fs_is_multi_device(sbi)) { |
| 1956 | int last_dev = sbi->s_ndevs - 1; |
| 1957 | |
| 1958 | FDEV(last_dev).total_segments = |
| 1959 | (int)FDEV(last_dev).total_segments + segs; |
| 1960 | FDEV(last_dev).end_blk = |
| 1961 | (long long)FDEV(last_dev).end_blk + blks; |
| 1962 | #ifdef CONFIG_BLK_DEV_ZONED |
| 1963 | FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz + |
| 1964 | (int)(blks >> sbi->log_blocks_per_blkz); |
| 1965 | #endif |
| 1966 | } |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1967 | } |
| 1968 | |
| 1969 | int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) |
| 1970 | { |
| 1971 | __u64 old_block_count, shrunk_blocks; |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 1972 | struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1973 | unsigned int secs; |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1974 | int err = 0; |
| 1975 | __u32 rem; |
| 1976 | |
| 1977 | old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count); |
| 1978 | if (block_count > old_block_count) |
| 1979 | return -EINVAL; |
| 1980 | |
Qiuyang Sun | 46d9ce1 | 2019-09-23 12:21:39 +0800 | [diff] [blame] | 1981 | if (f2fs_is_multi_device(sbi)) { |
| 1982 | int last_dev = sbi->s_ndevs - 1; |
| 1983 | __u64 last_segs = FDEV(last_dev).total_segments; |
| 1984 | |
| 1985 | if (block_count + last_segs * sbi->blocks_per_seg <= |
| 1986 | old_block_count) |
| 1987 | return -EINVAL; |
| 1988 | } |
| 1989 | |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 1990 | /* new fs size should align to section size */ |
| 1991 | div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem); |
| 1992 | if (rem) |
| 1993 | return -EINVAL; |
| 1994 | |
| 1995 | if (block_count == old_block_count) |
| 1996 | return 0; |
| 1997 | |
| 1998 | if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { |
Joe Perches | dcbb4c1 | 2019-06-18 17:48:42 +0800 | [diff] [blame] | 1999 | f2fs_err(sbi, "Should run fsck to repair first."); |
Chao Yu | 10f966b | 2019-06-20 11:36:14 +0800 | [diff] [blame] | 2000 | return -EFSCORRUPTED; |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 2001 | } |
| 2002 | |
| 2003 | if (test_opt(sbi, DISABLE_CHECKPOINT)) { |
Joe Perches | dcbb4c1 | 2019-06-18 17:48:42 +0800 | [diff] [blame] | 2004 | f2fs_err(sbi, "Checkpoint should be enabled."); |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 2005 | return -EINVAL; |
| 2006 | } |
| 2007 | |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 2008 | shrunk_blocks = old_block_count - block_count; |
| 2009 | secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi)); |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 2010 | |
| 2011 | /* stop other GC */ |
| 2012 | if (!down_write_trylock(&sbi->gc_lock)) |
| 2013 | return -EAGAIN; |
| 2014 | |
| 2015 | /* stop CP to protect MAIN_SEC in free_segment_range */ |
| 2016 | f2fs_lock_op(sbi); |
Chao Yu | 841a52a | 2021-02-20 17:35:41 +0800 | [diff] [blame] | 2017 | |
| 2018 | spin_lock(&sbi->stat_lock); |
| 2019 | if (shrunk_blocks + valid_user_blocks(sbi) + |
| 2020 | sbi->current_reserved_blocks + sbi->unusable_block_count + |
| 2021 | F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) |
| 2022 | err = -ENOSPC; |
| 2023 | spin_unlock(&sbi->stat_lock); |
| 2024 | |
| 2025 | if (err) |
| 2026 | goto out_unlock; |
| 2027 | |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 2028 | err = free_segment_range(sbi, secs, true); |
Chao Yu | 841a52a | 2021-02-20 17:35:41 +0800 | [diff] [blame] | 2029 | |
| 2030 | out_unlock: |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 2031 | f2fs_unlock_op(sbi); |
| 2032 | up_write(&sbi->gc_lock); |
| 2033 | if (err) |
| 2034 | return err; |
| 2035 | |
| 2036 | set_sbi_flag(sbi, SBI_IS_RESIZEFS); |
| 2037 | |
| 2038 | freeze_super(sbi->sb); |
| 2039 | down_write(&sbi->gc_lock); |
Sahitya Tummala | 301e317 | 2020-11-23 10:58:32 +0530 | [diff] [blame] | 2040 | down_write(&sbi->cp_global_sem); |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 2041 | |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 2042 | spin_lock(&sbi->stat_lock); |
| 2043 | if (shrunk_blocks + valid_user_blocks(sbi) + |
| 2044 | sbi->current_reserved_blocks + sbi->unusable_block_count + |
| 2045 | F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) |
| 2046 | err = -ENOSPC; |
| 2047 | else |
| 2048 | sbi->user_block_count -= shrunk_blocks; |
| 2049 | spin_unlock(&sbi->stat_lock); |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 2050 | if (err) |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 2051 | goto out_err; |
| 2052 | |
| 2053 | err = free_segment_range(sbi, secs, false); |
| 2054 | if (err) |
| 2055 | goto recover_out; |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 2056 | |
| 2057 | update_sb_metadata(sbi, -secs); |
| 2058 | |
| 2059 | err = f2fs_commit_super(sbi, false); |
| 2060 | if (err) { |
| 2061 | update_sb_metadata(sbi, secs); |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 2062 | goto recover_out; |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 2063 | } |
| 2064 | |
| 2065 | update_fs_metadata(sbi, -secs); |
| 2066 | clear_sbi_flag(sbi, SBI_IS_RESIZEFS); |
Sahitya Tummala | 68275682 | 2020-03-03 19:59:25 +0530 | [diff] [blame] | 2067 | set_sbi_flag(sbi, SBI_IS_DIRTY); |
Sahitya Tummala | 68275682 | 2020-03-03 19:59:25 +0530 | [diff] [blame] | 2068 | |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 2069 | err = f2fs_write_checkpoint(sbi, &cpc); |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 2070 | if (err) { |
| 2071 | update_fs_metadata(sbi, secs); |
| 2072 | update_sb_metadata(sbi, secs); |
| 2073 | f2fs_commit_super(sbi, false); |
| 2074 | } |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 2075 | recover_out: |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 2076 | if (err) { |
| 2077 | set_sbi_flag(sbi, SBI_NEED_FSCK); |
Joe Perches | dcbb4c1 | 2019-06-18 17:48:42 +0800 | [diff] [blame] | 2078 | f2fs_err(sbi, "resize_fs failed, should run fsck to repair!"); |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 2079 | |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 2080 | spin_lock(&sbi->stat_lock); |
| 2081 | sbi->user_block_count += shrunk_blocks; |
| 2082 | spin_unlock(&sbi->stat_lock); |
| 2083 | } |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 2084 | out_err: |
Sahitya Tummala | 301e317 | 2020-11-23 10:58:32 +0530 | [diff] [blame] | 2085 | up_write(&sbi->cp_global_sem); |
Jaegeuk Kim | b4b1006 | 2020-03-31 11:43:07 -0700 | [diff] [blame] | 2086 | up_write(&sbi->gc_lock); |
| 2087 | thaw_super(sbi->sb); |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 2088 | clear_sbi_flag(sbi, SBI_IS_RESIZEFS); |
Qiuyang Sun | 04f0b2e | 2019-06-05 11:33:25 +0800 | [diff] [blame] | 2089 | return err; |
| 2090 | } |