blob: 7a83bd530812ff8662bb5c39f760b613882c7ffc [file] [log] [blame]
Chao Yu7c1a0002018-09-12 09:16:07 +08001// SPDX-License-Identifier: GPL-2.0
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002/*
Jaegeuk Kim39a53e02012-11-28 13:37:31 +09003 * fs/f2fs/segment.h
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
Jaegeuk Kim39a53e02012-11-28 13:37:31 +09007 */
Jaegeuk Kimac5d1562013-04-29 16:58:39 +09008#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -04009#include <linux/backing-dev.h>
Jaegeuk Kimac5d1562013-04-29 16:58:39 +090010
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090011/* constant macro */
12#define NULL_SEGNO ((unsigned int)(~0))
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +090013#define NULL_SECNO ((unsigned int)(~0))
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090014
Jaegeuk Kim58c41032014-03-19 14:17:21 +090015#define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
Jaegeuk Kim44a83492016-07-13 18:23:35 -070016#define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +090017
Jaegeuk Kim2040fce82016-12-05 13:56:04 -080018#define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
19
Namjae Jeon6224da82013-04-06 14:44:32 +090020/* L: Logical segment # in volume, R: Relative segment # in main area */
Tomohiro Kusumi68afcf22017-04-09 02:11:36 +030021#define GET_L2R_SEGNO(free_i, segno) ((segno) - (free_i)->start_segno)
22#define GET_R2L_SEGNO(free_i, segno) ((segno) + (free_i)->start_segno)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090023
Tomohiro Kusumi68afcf22017-04-09 02:11:36 +030024#define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA)
25#define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090026
Jaegeuk Kima912b542017-05-10 11:18:25 -070027#define IS_HOT(t) ((t) == CURSEG_HOT_NODE || (t) == CURSEG_HOT_DATA)
28#define IS_WARM(t) ((t) == CURSEG_WARM_NODE || (t) == CURSEG_WARM_DATA)
29#define IS_COLD(t) ((t) == CURSEG_COLD_NODE || (t) == CURSEG_COLD_DATA)
30
Jaegeuk Kim5c773ba2013-03-31 12:30:04 +090031#define IS_CURSEG(sbi, seg) \
Tomohiro Kusumi68afcf22017-04-09 02:11:36 +030032 (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
33 ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
34 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
35 ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
36 ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
37 ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090038
39#define IS_CURSEC(sbi, secno) \
Tomohiro Kusumi68afcf22017-04-09 02:11:36 +030040 (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
41 (sbi)->segs_per_sec) || \
42 ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
43 (sbi)->segs_per_sec) || \
44 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
45 (sbi)->segs_per_sec) || \
46 ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
47 (sbi)->segs_per_sec) || \
48 ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
49 (sbi)->segs_per_sec) || \
50 ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
51 (sbi)->segs_per_sec)) \
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090052
Yunlei He08337212018-03-08 16:29:13 +080053#define MAIN_BLKADDR(sbi) \
54 (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
55 le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
56#define SEG0_BLKADDR(sbi) \
57 (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \
58 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
Jaegeuk Kim7cd85582014-09-23 11:23:01 -070059
60#define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
Tomohiro Kusumi68afcf22017-04-09 02:11:36 +030061#define MAIN_SECS(sbi) ((sbi)->total_sections)
Jaegeuk Kim7cd85582014-09-23 11:23:01 -070062
Yunlei He08337212018-03-08 16:29:13 +080063#define TOTAL_SEGS(sbi) \
64 (SM_I(sbi) ? SM_I(sbi)->segment_count : \
65 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
Tomohiro Kusumi68afcf22017-04-09 02:11:36 +030066#define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg)
Jaegeuk Kim7cd85582014-09-23 11:23:01 -070067
68#define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
Tomohiro Kusumi68afcf22017-04-09 02:11:36 +030069#define SEGMENT_SIZE(sbi) (1ULL << ((sbi)->log_blocksize + \
70 (sbi)->log_blocks_per_seg))
Jaegeuk Kim7cd85582014-09-23 11:23:01 -070071
72#define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \
Tomohiro Kusumi68afcf22017-04-09 02:11:36 +030073 (GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg))
Jaegeuk Kim7cd85582014-09-23 11:23:01 -070074
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090075#define NEXT_FREE_BLKADDR(sbi, curseg) \
Tomohiro Kusumi68afcf22017-04-09 02:11:36 +030076 (START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090077
Jaegeuk Kim7cd85582014-09-23 11:23:01 -070078#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi))
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090079#define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
Tomohiro Kusumi68afcf22017-04-09 02:11:36 +030080 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg)
Jaegeuk Kim491c0852014-02-04 13:01:10 +090081#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
Tomohiro Kusumi68afcf22017-04-09 02:11:36 +030082 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
Jaegeuk Kim491c0852014-02-04 13:01:10 +090083
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090084#define GET_SEGNO(sbi, blk_addr) \
Chao Yu93770ab2019-04-15 15:26:32 +080085 ((!__is_valid_data_blkaddr(blk_addr)) ? \
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090086 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
87 GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
Jaegeuk Kim4ddb1a42017-04-07 15:08:17 -070088#define BLKS_PER_SEC(sbi) \
89 ((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
90#define GET_SEC_FROM_SEG(sbi, segno) \
Tomohiro Kusumi68afcf22017-04-09 02:11:36 +030091 ((segno) / (sbi)->segs_per_sec)
Jaegeuk Kim4ddb1a42017-04-07 15:08:17 -070092#define GET_SEG_FROM_SEC(sbi, secno) \
Jaegeuk Kim63fcf8e2017-04-07 14:27:07 -070093 ((secno) * (sbi)->segs_per_sec)
Jaegeuk Kim4ddb1a42017-04-07 15:08:17 -070094#define GET_ZONE_FROM_SEC(sbi, secno) \
95 ((secno) / (sbi)->secs_per_zone)
96#define GET_ZONE_FROM_SEG(sbi, segno) \
97 GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090098
99#define GET_SUM_BLOCK(sbi, segno) \
Tomohiro Kusumi68afcf22017-04-09 02:11:36 +0300100 ((sbi)->sm_info->ssa_blkaddr + (segno))
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900101
102#define GET_SUM_TYPE(footer) ((footer)->entry_type)
Tomohiro Kusumi68afcf22017-04-09 02:11:36 +0300103#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900104
105#define SIT_ENTRY_OFFSET(sit_i, segno) \
Tomohiro Kusumi68afcf22017-04-09 02:11:36 +0300106 ((segno) % (sit_i)->sents_per_block)
Chao Yud3a14af2014-09-04 18:11:47 +0800107#define SIT_BLOCK_OFFSET(segno) \
Tomohiro Kusumi68afcf22017-04-09 02:11:36 +0300108 ((segno) / SIT_ENTRY_PER_BLOCK)
Chao Yud3a14af2014-09-04 18:11:47 +0800109#define START_SEGNO(segno) \
110 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
Chao Yu74de5932013-11-22 09:09:59 +0800111#define SIT_BLK_CNT(sbi) \
Geert Uytterhoevenf91108b2019-06-20 16:42:08 +0200112 DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900113#define f2fs_bitmap_size(nr) \
114 (BITS_TO_LONGS(nr) * sizeof(unsigned long))
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900115
Chao Yu55cf9cb2014-09-15 18:01:10 +0800116#define SECTOR_FROM_BLOCK(blk_addr) \
117 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
118#define SECTOR_TO_BLOCK(sectors) \
Tomohiro Kusumi68afcf22017-04-09 02:11:36 +0300119 ((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900120
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900121/*
122 * indicate a block allocation direction: RIGHT and LEFT.
123 * RIGHT means allocating new sections towards the end of volume.
124 * LEFT means the opposite direction.
125 */
126enum {
127 ALLOC_RIGHT = 0,
128 ALLOC_LEFT
129};
130
131/*
132 * In the victim_sel_policy->alloc_mode, there are two block allocation modes.
133 * LFS writes data sequentially with cleaning operations.
134 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
135 */
136enum {
137 LFS = 0,
138 SSR
139};
140
141/*
142 * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes.
143 * GC_CB is based on cost-benefit algorithm.
144 * GC_GREEDY is based on greedy algorithm.
145 */
146enum {
147 GC_CB = 0,
Jaegeuk Kime066b832017-04-13 15:17:00 -0700148 GC_GREEDY,
149 ALLOC_NEXT,
150 FLUSH_DEVICE,
151 MAX_GC_POLICY,
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900152};
153
154/*
155 * BG_GC means the background cleaning job.
156 * FG_GC means the on-demand cleaning job.
Jaegeuk Kim6aefd932015-10-05 11:02:54 -0700157 * FORCE_FG_GC means on-demand cleaning job in background.
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900158 */
159enum {
160 BG_GC = 0,
Jaegeuk Kim6aefd932015-10-05 11:02:54 -0700161 FG_GC,
162 FORCE_FG_GC,
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900163};
164
165/* for a function parameter to select a victim segment */
166struct victim_sel_policy {
167 int alloc_mode; /* LFS or SSR */
168 int gc_mode; /* GC_CB or GC_GREEDY */
169 unsigned long *dirty_segmap; /* dirty segment bitmap */
Jin Xua26b7c82013-09-05 12:45:26 +0800170 unsigned int max_search; /* maximum # of segments to search */
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900171 unsigned int offset; /* last scanned bitmap offset */
172 unsigned int ofs_unit; /* bitmap search unit */
173 unsigned int min_cost; /* minimum cost */
174 unsigned int min_segno; /* segment # having min. cost */
175};
176
177struct seg_entry {
Chao Yuf51b4ce2016-05-04 23:19:48 +0800178 unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */
179 unsigned int valid_blocks:10; /* # of valid blocks */
180 unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */
181 unsigned int padding:6; /* padding */
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900182 unsigned char *cur_valid_map; /* validity bitmap of blocks */
Chao Yu355e7892017-01-07 18:51:01 +0800183#ifdef CONFIG_F2FS_CHECK_FS
184 unsigned char *cur_valid_map_mir; /* mirror of current valid bitmap */
185#endif
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900186 /*
187 * # of valid blocks and the validity bitmap stored in the the last
188 * checkpoint pack. This information is used by the SSR mode.
189 */
Chao Yuf51b4ce2016-05-04 23:19:48 +0800190 unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */
Jaegeuk Kima66cdd92015-04-30 22:37:50 -0700191 unsigned char *discard_map;
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900192 unsigned long long mtime; /* modification time of the segment */
193};
194
195struct sec_entry {
196 unsigned int valid_blocks; /* # of valid blocks in a section */
197};
198
199struct segment_allocation {
200 void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
201};
202
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +0900203#define MAX_SKIP_GC_COUNT 16
Chao Yu2ef79ec2018-05-07 20:28:54 +0800204
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700205struct inmem_pages {
206 struct list_head list;
207 struct page *page;
Chao Yu28bc1062016-02-06 14:40:34 +0800208 block_t old_addr; /* for revoking when fail to commit */
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700209};
210
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900211struct sit_info {
212 const struct segment_allocation *s_ops;
213
214 block_t sit_base_addr; /* start block address of SIT area */
215 block_t sit_blocks; /* # of blocks used by SIT area */
216 block_t written_valid_blocks; /* # of valid blocks in main area */
Chao Yu2fde3dd2019-07-26 15:41:20 +0800217 char *bitmap; /* all bitmaps pointer */
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900218 char *sit_bitmap; /* SIT bitmap pointer */
Chao Yuae27d622017-01-07 18:52:34 +0800219#ifdef CONFIG_F2FS_CHECK_FS
220 char *sit_bitmap_mir; /* SIT bitmap mirror */
Sahitya Tummalabbf9f7d2019-08-07 19:10:32 +0530221
222 /* bitmap of segments to be ignored by GC in case of errors */
223 unsigned long *invalid_segmap;
Chao Yuae27d622017-01-07 18:52:34 +0800224#endif
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900225 unsigned int bitmap_size; /* SIT bitmap size */
226
Jaegeuk Kim60a3b782015-02-10 16:44:29 -0800227 unsigned long *tmp_map; /* bitmap for temporal use */
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900228 unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */
229 unsigned int dirty_sentries; /* # of dirty sentries */
230 unsigned int sents_per_block; /* # of SIT entries per block */
Chao Yu3d26fa62017-10-30 17:49:53 +0800231 struct rw_semaphore sentry_lock; /* to protect SIT cache */
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900232 struct seg_entry *sentries; /* SIT segment-level cache */
233 struct sec_entry *sec_entries; /* SIT section-level cache */
234
235 /* for cost-benefit algorithm in cleaning procedure */
236 unsigned long long elapsed_time; /* elapsed time after mount */
237 unsigned long long mounted_time; /* mount time */
238 unsigned long long min_mtime; /* min. modification time */
239 unsigned long long max_mtime; /* max. modification time */
Jaegeuk Kime066b832017-04-13 15:17:00 -0700240
241 unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900242};
243
244struct free_segmap_info {
245 unsigned int start_segno; /* start segment number logically */
246 unsigned int free_segments; /* # of free segments */
247 unsigned int free_sections; /* # of free sections */
Chao Yu1a118cc2015-02-11 18:20:38 +0800248 spinlock_t segmap_lock; /* free segmap lock */
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900249 unsigned long *free_segmap; /* free segment bitmap */
250 unsigned long *free_secmap; /* free section bitmap */
251};
252
253/* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
254enum dirty_type {
255 DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */
256 DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */
257 DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */
258 DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */
259 DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */
260 DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */
261 DIRTY, /* to count # of dirty segments */
262 PRE, /* to count # of entirely obsolete segments */
263 NR_DIRTY_TYPE
264};
265
266struct dirty_seglist_info {
267 const struct victim_selection *v_ops; /* victim selction operation */
268 unsigned long *dirty_segmap[NR_DIRTY_TYPE];
269 struct mutex seglist_lock; /* lock for segment bitmaps */
270 int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900271 unsigned long *victim_secmap; /* background GC victims */
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900272};
273
274/* victim selection function for cleaning and SSR */
275struct victim_selection {
276 int (*get_victim)(struct f2fs_sb_info *, unsigned int *,
277 int, int, char);
278};
279
280/* for active log information */
281struct curseg_info {
282 struct mutex curseg_mutex; /* lock for consistency */
283 struct f2fs_summary_block *sum_blk; /* cached summary block */
Chao Yub7ad7512016-02-19 18:08:46 +0800284 struct rw_semaphore journal_rwsem; /* protect journal area */
285 struct f2fs_journal *journal; /* cached journal info */
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900286 unsigned char alloc_type; /* current allocation type */
287 unsigned int segno; /* current segment number */
288 unsigned short next_blkoff; /* next block offset to write */
289 unsigned int zone; /* current zone number */
290 unsigned int next_segno; /* preallocated segment */
291};
292
Chao Yu184a5cd2014-09-04 18:13:01 +0800293struct sit_entry_set {
294 struct list_head set_list; /* link with all sit sets */
295 unsigned int start_segno; /* start segno of sits in set */
296 unsigned int entry_cnt; /* the # of sit entries in set */
297};
298
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900299/*
300 * inline functions
301 */
302static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
303{
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -0700304 if (type == CURSEG_COLD_DATA_PINNED)
305 type = CURSEG_COLD_DATA;
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900306 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
307}
308
309static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
310 unsigned int segno)
311{
312 struct sit_info *sit_i = SIT_I(sbi);
313 return &sit_i->sentries[segno];
314}
315
316static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
317 unsigned int segno)
318{
319 struct sit_info *sit_i = SIT_I(sbi);
Jaegeuk Kim4ddb1a42017-04-07 15:08:17 -0700320 return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)];
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900321}
322
323static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
Jaegeuk Kim302bd342017-04-07 14:33:22 -0700324 unsigned int segno, bool use_section)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900325{
326 /*
327 * In order to get # of valid blocks in a section instantly from many
328 * segments, f2fs manages two counting structures separately.
329 */
Chao Yu2c70c5e2018-10-24 18:37:26 +0800330 if (use_section && __is_large_section(sbi))
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900331 return get_sec_entry(sbi, segno)->valid_blocks;
332 else
333 return get_seg_entry(sbi, segno)->valid_blocks;
334}
335
Daniel Rosenberg43549942018-08-20 19:21:43 -0700336static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
337 unsigned int segno)
338{
339 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
340}
341
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900342static inline void seg_info_from_raw_sit(struct seg_entry *se,
343 struct f2fs_sit_entry *rs)
344{
345 se->valid_blocks = GET_SIT_VBLOCKS(rs);
346 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
347 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
348 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
Chao Yu355e7892017-01-07 18:51:01 +0800349#ifdef CONFIG_F2FS_CHECK_FS
350 memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
351#endif
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900352 se->type = GET_SIT_TYPE(rs);
353 se->mtime = le64_to_cpu(rs->mtime);
354}
355
Yunlei He068c3cd2018-01-25 17:27:11 +0800356static inline void __seg_info_to_raw_sit(struct seg_entry *se,
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900357 struct f2fs_sit_entry *rs)
358{
359 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
360 se->valid_blocks;
361 rs->vblocks = cpu_to_le16(raw_vblocks);
362 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
Yunlei He068c3cd2018-01-25 17:27:11 +0800363 rs->mtime = cpu_to_le64(se->mtime);
364}
365
366static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
367 struct page *page, unsigned int start)
368{
369 struct f2fs_sit_block *raw_sit;
370 struct seg_entry *se;
371 struct f2fs_sit_entry *rs;
372 unsigned int end = min(start + SIT_ENTRY_PER_BLOCK,
373 (unsigned long)MAIN_SEGS(sbi));
374 int i;
375
376 raw_sit = (struct f2fs_sit_block *)page_address(page);
Chao Yu81114ba2018-04-09 20:25:06 +0800377 memset(raw_sit, 0, PAGE_SIZE);
Yunlei He068c3cd2018-01-25 17:27:11 +0800378 for (i = 0; i < end - start; i++) {
379 rs = &raw_sit->entries[i];
380 se = get_seg_entry(sbi, start + i);
381 __seg_info_to_raw_sit(se, rs);
382 }
383}
384
385static inline void seg_info_to_raw_sit(struct seg_entry *se,
386 struct f2fs_sit_entry *rs)
387{
388 __seg_info_to_raw_sit(se, rs);
389
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900390 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
391 se->ckpt_valid_blocks = se->valid_blocks;
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900392}
393
394static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
395 unsigned int max, unsigned int segno)
396{
397 unsigned int ret;
Chao Yu1a118cc2015-02-11 18:20:38 +0800398 spin_lock(&free_i->segmap_lock);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900399 ret = find_next_bit(free_i->free_segmap, max, segno);
Chao Yu1a118cc2015-02-11 18:20:38 +0800400 spin_unlock(&free_i->segmap_lock);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900401 return ret;
402}
403
404static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
405{
406 struct free_segmap_info *free_i = FREE_I(sbi);
Jaegeuk Kim4ddb1a42017-04-07 15:08:17 -0700407 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
408 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900409 unsigned int next;
410
Chao Yu1a118cc2015-02-11 18:20:38 +0800411 spin_lock(&free_i->segmap_lock);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900412 clear_bit(segno, free_i->free_segmap);
413 free_i->free_segments++;
414
Wanpeng Li7fd97012015-03-06 15:00:55 +0800415 next = find_next_bit(free_i->free_segmap,
416 start_segno + sbi->segs_per_sec, start_segno);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900417 if (next >= start_segno + sbi->segs_per_sec) {
418 clear_bit(secno, free_i->free_secmap);
419 free_i->free_sections++;
420 }
Chao Yu1a118cc2015-02-11 18:20:38 +0800421 spin_unlock(&free_i->segmap_lock);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900422}
423
424static inline void __set_inuse(struct f2fs_sb_info *sbi,
425 unsigned int segno)
426{
427 struct free_segmap_info *free_i = FREE_I(sbi);
Jaegeuk Kim4ddb1a42017-04-07 15:08:17 -0700428 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
429
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900430 set_bit(segno, free_i->free_segmap);
431 free_i->free_segments--;
432 if (!test_and_set_bit(secno, free_i->free_secmap))
433 free_i->free_sections--;
434}
435
436static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
437 unsigned int segno)
438{
439 struct free_segmap_info *free_i = FREE_I(sbi);
Jaegeuk Kim4ddb1a42017-04-07 15:08:17 -0700440 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
441 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900442 unsigned int next;
443
Chao Yu1a118cc2015-02-11 18:20:38 +0800444 spin_lock(&free_i->segmap_lock);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900445 if (test_and_clear_bit(segno, free_i->free_segmap)) {
446 free_i->free_segments++;
447
Yunlong Song3611ce92018-07-12 23:09:26 +0800448 if (IS_CURSEC(sbi, secno))
449 goto skip_free;
Chao Yuf1121ab2014-07-14 16:45:15 +0800450 next = find_next_bit(free_i->free_segmap,
451 start_segno + sbi->segs_per_sec, start_segno);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900452 if (next >= start_segno + sbi->segs_per_sec) {
453 if (test_and_clear_bit(secno, free_i->free_secmap))
454 free_i->free_sections++;
455 }
456 }
Yunlong Song3611ce92018-07-12 23:09:26 +0800457skip_free:
Chao Yu1a118cc2015-02-11 18:20:38 +0800458 spin_unlock(&free_i->segmap_lock);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900459}
460
461static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
462 unsigned int segno)
463{
464 struct free_segmap_info *free_i = FREE_I(sbi);
Jaegeuk Kim4ddb1a42017-04-07 15:08:17 -0700465 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
466
Chao Yu1a118cc2015-02-11 18:20:38 +0800467 spin_lock(&free_i->segmap_lock);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900468 if (!test_and_set_bit(segno, free_i->free_segmap)) {
469 free_i->free_segments--;
470 if (!test_and_set_bit(secno, free_i->free_secmap))
471 free_i->free_sections--;
472 }
Chao Yu1a118cc2015-02-11 18:20:38 +0800473 spin_unlock(&free_i->segmap_lock);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900474}
475
476static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
477 void *dst_addr)
478{
479 struct sit_info *sit_i = SIT_I(sbi);
Chao Yuae27d622017-01-07 18:52:34 +0800480
481#ifdef CONFIG_F2FS_CHECK_FS
482 if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir,
483 sit_i->bitmap_size))
484 f2fs_bug_on(sbi, 1);
485#endif
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900486 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
487}
488
489static inline block_t written_block_count(struct f2fs_sb_info *sbi)
490{
Jaegeuk Kim8b8343f2014-02-24 13:00:13 +0900491 return SIT_I(sbi)->written_valid_blocks;
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900492}
493
494static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
495{
Jaegeuk Kim8b8343f2014-02-24 13:00:13 +0900496 return FREE_I(sbi)->free_segments;
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900497}
498
499static inline int reserved_segments(struct f2fs_sb_info *sbi)
500{
501 return SM_I(sbi)->reserved_segments;
502}
503
504static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
505{
Jaegeuk Kim8b8343f2014-02-24 13:00:13 +0900506 return FREE_I(sbi)->free_sections;
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900507}
508
509static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
510{
511 return DIRTY_I(sbi)->nr_dirty[PRE];
512}
513
514static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
515{
516 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
517 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
518 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
519 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
520 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
521 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
522}
523
524static inline int overprovision_segments(struct f2fs_sb_info *sbi)
525{
526 return SM_I(sbi)->ovp_segments;
527}
528
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900529static inline int reserved_sections(struct f2fs_sb_info *sbi)
530{
Jaegeuk Kim4ddb1a42017-04-07 15:08:17 -0700531 return GET_SEC_FROM_SEG(sbi, (unsigned int)reserved_segments(sbi));
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900532}
533
Chao Yubf34c932017-10-30 17:49:54 +0800534static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi)
535{
536 unsigned int node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
537 get_pages(sbi, F2FS_DIRTY_DENTS);
538 unsigned int dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
539 unsigned int segno, left_blocks;
540 int i;
541
542 /* check current node segment */
543 for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
544 segno = CURSEG_I(sbi, i)->segno;
545 left_blocks = sbi->blocks_per_seg -
546 get_seg_entry(sbi, segno)->ckpt_valid_blocks;
547
548 if (node_blocks > left_blocks)
549 return false;
550 }
551
552 /* check current data segment */
553 segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
554 left_blocks = sbi->blocks_per_seg -
555 get_seg_entry(sbi, segno)->ckpt_valid_blocks;
556 if (dent_blocks > left_blocks)
557 return false;
558 return true;
559}
560
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -0700561static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
562 int freed, int needed)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900563{
Namjae Jeon5ac206c2013-02-02 23:52:59 +0900564 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
565 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
Jaegeuk Kimb9610bd2016-10-14 13:28:05 -0700566 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
Jaegeuk Kim0f18b462016-05-20 11:10:10 -0700567
Chao Yucaf00472015-01-28 17:48:42 +0800568 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
Jaegeuk Kim029cd282012-12-21 17:20:21 +0900569 return false;
570
Chao Yubf34c932017-10-30 17:49:54 +0800571 if (free_sections(sbi) + freed == reserved_sections(sbi) + needed &&
572 has_curseg_enough_space(sbi))
573 return false;
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -0700574 return (free_sections(sbi) + freed) <=
Jaegeuk Kimb9610bd2016-10-14 13:28:05 -0700575 (node_secs + 2 * dent_secs + imeta_secs +
576 reserved_sections(sbi) + needed);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900577}
578
Chao Yu00e09c02019-08-23 17:58:36 +0800579static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
Daniel Rosenberg43549942018-08-20 19:21:43 -0700580{
581 if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
Chao Yu00e09c02019-08-23 17:58:36 +0800582 return true;
Daniel Rosenberg43549942018-08-20 19:21:43 -0700583 if (likely(!has_not_enough_free_secs(sbi, 0, 0)))
Chao Yu00e09c02019-08-23 17:58:36 +0800584 return true;
585 return false;
Daniel Rosenberg43549942018-08-20 19:21:43 -0700586}
587
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +0900588static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
589{
Chris Fries6c311ec2014-01-17 14:44:39 -0600590 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +0900591}
592
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900593static inline int utilization(struct f2fs_sb_info *sbi)
594{
Chris Fries6c311ec2014-01-17 14:44:39 -0600595 return div_u64((u64)valid_user_blocks(sbi) * 100,
596 sbi->user_block_count);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900597}
598
599/*
600 * Sometimes f2fs may be better to drop out-of-place update policy.
Jaegeuk Kim216fbd62013-11-07 13:13:42 +0900601 * And, users can control the policy through sysfs entries.
602 * There are five policies with triggering conditions as follows.
603 * F2FS_IPU_FORCE - all the time,
604 * F2FS_IPU_SSR - if SSR mode is activated,
605 * F2FS_IPU_UTIL - if FS utilization is over threashold,
606 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
607 * threashold,
Jaegeuk Kimc1ce1b02014-09-10 16:53:02 -0700608 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
609 * storages. IPU will be triggered only if the # of dirty
Jaegeuk Kimd7b0a232020-01-08 15:10:02 -0800610 * pages over min_fsync_blocks. (=default option)
611 * F2FS_IPU_ASYNC - do IPU given by asynchronous write requests.
Jaegeuk Kim0e7f41972020-01-06 16:43:09 -0800612 * F2FS_IPU_NOCACHE - disable IPU bio cache.
Jaegeuk Kimd7b0a232020-01-08 15:10:02 -0800613 * F2FS_IPUT_DISABLE - disable IPU. (=default option in LFS mode)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900614 */
Jaegeuk Kim216fbd62013-11-07 13:13:42 +0900615#define DEF_MIN_IPU_UTIL 70
Jaegeuk Kimc1ce1b02014-09-10 16:53:02 -0700616#define DEF_MIN_FSYNC_BLOCKS 8
Jaegeuk Kimef095d12017-03-24 20:05:13 -0400617#define DEF_MIN_HOT_BLOCKS 16
Jaegeuk Kim216fbd62013-11-07 13:13:42 +0900618
Jaegeuk Kim84b89e52018-02-22 14:09:30 -0800619#define SMALL_VOLUME_SEGMENTS (16 * 512) /* 16GB */
620
Jaegeuk Kim216fbd62013-11-07 13:13:42 +0900621enum {
622 F2FS_IPU_FORCE,
623 F2FS_IPU_SSR,
624 F2FS_IPU_UTIL,
625 F2FS_IPU_SSR_UTIL,
Jaegeuk Kimc1ce1b02014-09-10 16:53:02 -0700626 F2FS_IPU_FSYNC,
Hou Pengyang04485982017-04-18 11:57:16 +0000627 F2FS_IPU_ASYNC,
Jaegeuk Kim0e7f41972020-01-06 16:43:09 -0800628 F2FS_IPU_NOCACHE,
Jaegeuk Kim216fbd62013-11-07 13:13:42 +0900629};
630
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900631static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
632 int type)
633{
634 struct curseg_info *curseg = CURSEG_I(sbi, type);
635 return curseg->segno;
636}
637
638static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
639 int type)
640{
641 struct curseg_info *curseg = CURSEG_I(sbi, type);
642 return curseg->alloc_type;
643}
644
645static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
646{
647 struct curseg_info *curseg = CURSEG_I(sbi, type);
648 return curseg->next_blkoff;
649}
650
651static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
652{
Liu Xue7a04f642015-07-27 10:17:59 +0000653 f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900654}
655
Chao Yu93770ab2019-04-15 15:26:32 +0800656static inline void verify_fio_blkaddr(struct f2fs_io_info *fio)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900657{
Yunlei He08337212018-03-08 16:29:13 +0800658 struct f2fs_sb_info *sbi = fio->sbi;
659
Chao Yu93770ab2019-04-15 15:26:32 +0800660 if (__is_valid_data_blkaddr(fio->old_blkaddr))
661 verify_blkaddr(sbi, fio->old_blkaddr, __is_meta_io(fio) ?
662 META_GENERIC : DATA_GENERIC);
663 verify_blkaddr(sbi, fio->new_blkaddr, __is_meta_io(fio) ?
664 META_GENERIC : DATA_GENERIC_ENHANCE);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900665}
666
667/*
arter97e1c42042014-08-06 23:22:50 +0900668 * Summary block is always treated as an invalid block
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900669 */
Jaegeuk Kimc39a1b32017-12-19 19:16:34 -0800670static inline int check_block_count(struct f2fs_sb_info *sbi,
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900671 int segno, struct f2fs_sit_entry *raw_sit)
672{
Chao Yu44c60bf2013-10-29 14:50:40 +0800673 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900674 int valid_blocks = 0;
Chao Yu44c60bf2013-10-29 14:50:40 +0800675 int cur_pos = 0, next_pos;
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900676
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900677 /* check bitmap with valid block count */
Chao Yu44c60bf2013-10-29 14:50:40 +0800678 do {
679 if (is_valid) {
680 next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
681 sbi->blocks_per_seg,
682 cur_pos);
683 valid_blocks += next_pos - cur_pos;
684 } else
685 next_pos = find_next_bit_le(&raw_sit->valid_map,
686 sbi->blocks_per_seg,
687 cur_pos);
688 cur_pos = next_pos;
689 is_valid = !is_valid;
690 } while (cur_pos < sbi->blocks_per_seg);
Jaegeuk Kimc39a1b32017-12-19 19:16:34 -0800691
692 if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800693 f2fs_err(sbi, "Mismatch valid blocks %d vs. %d",
694 GET_SIT_VBLOCKS(raw_sit), valid_blocks);
Jaegeuk Kimc39a1b32017-12-19 19:16:34 -0800695 set_sbi_flag(sbi, SBI_NEED_FSCK);
Chao Yu10f966b2019-06-20 11:36:14 +0800696 return -EFSCORRUPTED;
Jaegeuk Kimc39a1b32017-12-19 19:16:34 -0800697 }
Chao Yue95bcdb2019-04-15 15:30:51 +0800698
Jaegeuk Kim4c278392015-08-11 16:01:30 -0700699 /* check segment usage, and check boundary of a given segment number */
Jaegeuk Kimc39a1b32017-12-19 19:16:34 -0800700 if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
701 || segno > TOTAL_SEGS(sbi) - 1)) {
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800702 f2fs_err(sbi, "Wrong valid blocks %d or segno %u",
703 GET_SIT_VBLOCKS(raw_sit), segno);
Jaegeuk Kimc39a1b32017-12-19 19:16:34 -0800704 set_sbi_flag(sbi, SBI_NEED_FSCK);
Chao Yu10f966b2019-06-20 11:36:14 +0800705 return -EFSCORRUPTED;
Jaegeuk Kimc39a1b32017-12-19 19:16:34 -0800706 }
707 return 0;
Liu Xue7a04f642015-07-27 10:17:59 +0000708}
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900709
710static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
711 unsigned int start)
712{
713 struct sit_info *sit_i = SIT_I(sbi);
Chao Yud3a14af2014-09-04 18:11:47 +0800714 unsigned int offset = SIT_BLOCK_OFFSET(start);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900715 block_t blk_addr = sit_i->sit_base_addr + offset;
716
717 check_seg_range(sbi, start);
718
Chao Yuae27d622017-01-07 18:52:34 +0800719#ifdef CONFIG_F2FS_CHECK_FS
720 if (f2fs_test_bit(offset, sit_i->sit_bitmap) !=
721 f2fs_test_bit(offset, sit_i->sit_bitmap_mir))
722 f2fs_bug_on(sbi, 1);
723#endif
724
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900725 /* calculate sit block address */
726 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
727 blk_addr += sit_i->sit_blocks;
728
729 return blk_addr;
730}
731
732static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
733 pgoff_t block_addr)
734{
735 struct sit_info *sit_i = SIT_I(sbi);
736 block_addr -= sit_i->sit_base_addr;
737 if (block_addr < sit_i->sit_blocks)
738 block_addr += sit_i->sit_blocks;
739 else
740 block_addr -= sit_i->sit_blocks;
741
742 return block_addr + sit_i->sit_base_addr;
743}
744
745static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
746{
Chao Yud3a14af2014-09-04 18:11:47 +0800747 unsigned int block_off = SIT_BLOCK_OFFSET(start);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900748
Gu Zhengc6ac4c02014-10-20 17:45:50 +0800749 f2fs_change_bit(block_off, sit_i->sit_bitmap);
Chao Yuae27d622017-01-07 18:52:34 +0800750#ifdef CONFIG_F2FS_CHECK_FS
751 f2fs_change_bit(block_off, sit_i->sit_bitmap_mir);
752#endif
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900753}
754
Chao Yua1f72ac22018-06-04 23:20:17 +0800755static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi,
756 bool base_time)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900757{
758 struct sit_info *sit_i = SIT_I(sbi);
Jaegeuk Kima7e679b2020-02-25 19:08:16 -0800759 time64_t diff, now = ktime_get_boottime_seconds();
Deepa Dinamani48fbfe52017-05-08 15:59:10 -0700760
Chao Yua1f72ac22018-06-04 23:20:17 +0800761 if (now >= sit_i->mounted_time)
762 return sit_i->elapsed_time + now - sit_i->mounted_time;
763
764 /* system time is set to the past */
765 if (!base_time) {
766 diff = sit_i->mounted_time - now;
767 if (sit_i->elapsed_time >= diff)
768 return sit_i->elapsed_time - diff;
769 return 0;
770 }
771 return sit_i->elapsed_time;
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900772}
773
774static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
775 unsigned int ofs_in_node, unsigned char version)
776{
777 sum->nid = cpu_to_le32(nid);
778 sum->ofs_in_node = cpu_to_le16(ofs_in_node);
779 sum->version = version;
780}
781
782static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
783{
784 return __start_cp_addr(sbi) +
785 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
786}
787
788static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
789{
790 return __start_cp_addr(sbi) +
791 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
792 - (base + 1) + type;
793}
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900794
795static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
796{
797 if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
798 return true;
799 return false;
800}
Jaegeuk Kimac5d1562013-04-29 16:58:39 +0900801
Jaegeuk Kim87d6f892014-03-18 12:40:49 +0900802/*
803 * It is very important to gather dirty pages and write at once, so that we can
804 * submit a big bio without interfering other data writes.
805 * By default, 512 pages for directory data,
Kinglong Mee727ebb02017-02-25 19:32:21 +0800806 * 512 pages (2MB) * 8 for nodes, and
807 * 256 pages * 8 for meta are set.
Jaegeuk Kim87d6f892014-03-18 12:40:49 +0900808 */
809static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
810{
Tejun Heoa88a3412015-05-22 17:13:28 -0400811 if (sbi->sb->s_bdi->wb.dirty_exceeded)
Jaegeuk Kim510184c2014-11-06 17:23:08 -0800812 return 0;
813
Jaegeuk Kima1257022015-10-08 10:40:07 -0700814 if (type == DATA)
815 return sbi->blocks_per_seg;
816 else if (type == NODE)
Jaegeuk Kim2c237eb2016-06-16 16:44:11 -0700817 return 8 * sbi->blocks_per_seg;
Jaegeuk Kim87d6f892014-03-18 12:40:49 +0900818 else if (type == META)
Jaegeuk Kim664ba972016-10-18 11:07:45 -0700819 return 8 * BIO_MAX_PAGES;
Jaegeuk Kim87d6f892014-03-18 12:40:49 +0900820 else
821 return 0;
822}
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +0900823
824/*
825 * When writing pages, it'd better align nr_to_write for segment size.
826 */
827static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
828 struct writeback_control *wbc)
829{
830 long nr_to_write, desired;
831
832 if (wbc->sync_mode != WB_SYNC_NONE)
833 return 0;
834
835 nr_to_write = wbc->nr_to_write;
Jaegeuk Kim664ba972016-10-18 11:07:45 -0700836 desired = BIO_MAX_PAGES;
Jaegeuk Kim28ea6162016-05-25 17:17:56 -0700837 if (type == NODE)
Jaegeuk Kim664ba972016-10-18 11:07:45 -0700838 desired <<= 1;
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +0900839
840 wbc->nr_to_write = desired;
841 return desired - nr_to_write;
842}
Jaegeuk Kim01983c72017-08-22 21:15:43 -0700843
844static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
845{
846 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
847 bool wakeup = false;
848 int i;
849
850 if (force)
851 goto wake_up;
852
853 mutex_lock(&dcc->cmd_lock);
Chao Yu78997b52017-10-04 09:08:34 +0800854 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
855 if (i + 1 < dcc->discard_granularity)
856 break;
Jaegeuk Kim01983c72017-08-22 21:15:43 -0700857 if (!list_empty(&dcc->pend_list[i])) {
858 wakeup = true;
859 break;
860 }
861 }
862 mutex_unlock(&dcc->cmd_lock);
Jaegeuk Kimb4608662019-01-25 10:26:39 -0800863 if (!wakeup || !is_idle(sbi, DISCARD_TIME))
Jaegeuk Kim01983c72017-08-22 21:15:43 -0700864 return;
865wake_up:
866 dcc->discard_wake = 1;
867 wake_up_interruptible_all(&dcc->discard_wait_queue);
868}