blob: 2f1a4220f14bf3bc0bcb730b4f692105041ae9c6 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim39a53e02012-11-28 13:37:31 +09002 * fs/f2fs/segment.h
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Jaegeuk Kimac5d1562013-04-29 16:58:39 +090011#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040012#include <linux/backing-dev.h>
Jaegeuk Kimac5d1562013-04-29 16:58:39 +090013
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090014/* constant macro */
15#define NULL_SEGNO ((unsigned int)(~0))
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +090016#define NULL_SECNO ((unsigned int)(~0))
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090017
Jaegeuk Kim58c41032014-03-19 14:17:21 +090018#define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +090019
Namjae Jeon6224da82013-04-06 14:44:32 +090020/* L: Logical segment # in volume, R: Relative segment # in main area */
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090021#define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
22#define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno)
23
Changman Lee61ae45c2013-11-21 20:04:21 +090024#define IS_DATASEG(t) (t <= CURSEG_COLD_DATA)
25#define IS_NODESEG(t) (t >= CURSEG_HOT_NODE)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090026
Jaegeuk Kim5c773ba2013-03-31 12:30:04 +090027#define IS_CURSEG(sbi, seg) \
28 ((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
29 (seg == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
30 (seg == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
31 (seg == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
32 (seg == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
33 (seg == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090034
35#define IS_CURSEC(sbi, secno) \
36 ((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
37 sbi->segs_per_sec) || \
38 (secno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
39 sbi->segs_per_sec) || \
40 (secno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
41 sbi->segs_per_sec) || \
42 (secno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
43 sbi->segs_per_sec) || \
44 (secno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
45 sbi->segs_per_sec) || \
46 (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
47 sbi->segs_per_sec)) \
48
Jaegeuk Kim7cd85582014-09-23 11:23:01 -070049#define MAIN_BLKADDR(sbi) (SM_I(sbi)->main_blkaddr)
50#define SEG0_BLKADDR(sbi) (SM_I(sbi)->seg0_blkaddr)
51
52#define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
53#define MAIN_SECS(sbi) (sbi->total_sections)
54
55#define TOTAL_SEGS(sbi) (SM_I(sbi)->segment_count)
56#define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg)
57
58#define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
Dan Carpenter8a219842014-09-25 14:39:17 +030059#define SEGMENT_SIZE(sbi) (1ULL << (sbi->log_blocksize + \
Jaegeuk Kim7cd85582014-09-23 11:23:01 -070060 sbi->log_blocks_per_seg))
61
62#define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090063 (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg))
Jaegeuk Kim7cd85582014-09-23 11:23:01 -070064
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090065#define NEXT_FREE_BLKADDR(sbi, curseg) \
66 (START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff)
67
Jaegeuk Kim7cd85582014-09-23 11:23:01 -070068#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi))
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090069#define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
70 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg)
Jaegeuk Kim491c0852014-02-04 13:01:10 +090071#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
72 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1))
73
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090074#define GET_SEGNO(sbi, blk_addr) \
75 (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? \
76 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
77 GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
78#define GET_SECNO(sbi, segno) \
79 ((segno) / sbi->segs_per_sec)
80#define GET_ZONENO_FROM_SEGNO(sbi, segno) \
81 ((segno / sbi->segs_per_sec) / sbi->secs_per_zone)
82
83#define GET_SUM_BLOCK(sbi, segno) \
84 ((sbi->sm_info->ssa_blkaddr) + segno)
85
86#define GET_SUM_TYPE(footer) ((footer)->entry_type)
87#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = type)
88
89#define SIT_ENTRY_OFFSET(sit_i, segno) \
90 (segno % sit_i->sents_per_block)
Chao Yud3a14af2014-09-04 18:11:47 +080091#define SIT_BLOCK_OFFSET(segno) \
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090092 (segno / SIT_ENTRY_PER_BLOCK)
Chao Yud3a14af2014-09-04 18:11:47 +080093#define START_SEGNO(segno) \
94 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
Chao Yu74de5932013-11-22 09:09:59 +080095#define SIT_BLK_CNT(sbi) \
Jaegeuk Kim7cd85582014-09-23 11:23:01 -070096 ((MAIN_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090097#define f2fs_bitmap_size(nr) \
98 (BITS_TO_LONGS(nr) * sizeof(unsigned long))
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090099
Chao Yu55cf9cb2014-09-15 18:01:10 +0800100#define SECTOR_FROM_BLOCK(blk_addr) \
101 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
102#define SECTOR_TO_BLOCK(sectors) \
103 (sectors >> F2FS_LOG_SECTORS_PER_BLOCK)
Jaegeuk Kim90a893c2014-09-22 16:21:07 -0700104#define MAX_BIO_BLOCKS(sbi) \
105 ((int)min((int)max_hw_blocks(sbi), BIO_MAX_PAGES))
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900106
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900107/*
108 * indicate a block allocation direction: RIGHT and LEFT.
109 * RIGHT means allocating new sections towards the end of volume.
110 * LEFT means the opposite direction.
111 */
112enum {
113 ALLOC_RIGHT = 0,
114 ALLOC_LEFT
115};
116
117/*
118 * In the victim_sel_policy->alloc_mode, there are two block allocation modes.
119 * LFS writes data sequentially with cleaning operations.
120 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
121 */
122enum {
123 LFS = 0,
124 SSR
125};
126
127/*
128 * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes.
129 * GC_CB is based on cost-benefit algorithm.
130 * GC_GREEDY is based on greedy algorithm.
131 */
132enum {
133 GC_CB = 0,
134 GC_GREEDY
135};
136
137/*
138 * BG_GC means the background cleaning job.
139 * FG_GC means the on-demand cleaning job.
Jaegeuk Kim6aefd932015-10-05 11:02:54 -0700140 * FORCE_FG_GC means on-demand cleaning job in background.
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900141 */
142enum {
143 BG_GC = 0,
Jaegeuk Kim6aefd932015-10-05 11:02:54 -0700144 FG_GC,
145 FORCE_FG_GC,
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900146};
147
148/* for a function parameter to select a victim segment */
149struct victim_sel_policy {
150 int alloc_mode; /* LFS or SSR */
151 int gc_mode; /* GC_CB or GC_GREEDY */
152 unsigned long *dirty_segmap; /* dirty segment bitmap */
Jin Xua26b7c82013-09-05 12:45:26 +0800153 unsigned int max_search; /* maximum # of segments to search */
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900154 unsigned int offset; /* last scanned bitmap offset */
155 unsigned int ofs_unit; /* bitmap search unit */
156 unsigned int min_cost; /* minimum cost */
157 unsigned int min_segno; /* segment # having min. cost */
158};
159
160struct seg_entry {
161 unsigned short valid_blocks; /* # of valid blocks */
162 unsigned char *cur_valid_map; /* validity bitmap of blocks */
163 /*
164 * # of valid blocks and the validity bitmap stored in the the last
165 * checkpoint pack. This information is used by the SSR mode.
166 */
167 unsigned short ckpt_valid_blocks;
168 unsigned char *ckpt_valid_map;
Jaegeuk Kima66cdd92015-04-30 22:37:50 -0700169 unsigned char *discard_map;
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900170 unsigned char type; /* segment type like CURSEG_XXX_TYPE */
171 unsigned long long mtime; /* modification time of the segment */
172};
173
174struct sec_entry {
175 unsigned int valid_blocks; /* # of valid blocks in a section */
176};
177
178struct segment_allocation {
179 void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
180};
181
Chao Yudecd36b2015-08-07 18:42:09 +0800182/*
183 * this value is set in page as a private data which indicate that
184 * the page is atomically written, and it is in inmem_pages list.
185 */
Jaegeuk Kimd48dfc22016-01-29 16:21:15 -0800186#define ATOMIC_WRITTEN_PAGE ((unsigned long)-1)
Chao Yudecd36b2015-08-07 18:42:09 +0800187
188#define IS_ATOMIC_WRITTEN_PAGE(page) \
189 (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
190
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700191struct inmem_pages {
192 struct list_head list;
193 struct page *page;
Chao Yu28bc1062016-02-06 14:40:34 +0800194 block_t old_addr; /* for revoking when fail to commit */
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700195};
196
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900197struct sit_info {
198 const struct segment_allocation *s_ops;
199
200 block_t sit_base_addr; /* start block address of SIT area */
201 block_t sit_blocks; /* # of blocks used by SIT area */
202 block_t written_valid_blocks; /* # of valid blocks in main area */
203 char *sit_bitmap; /* SIT bitmap pointer */
204 unsigned int bitmap_size; /* SIT bitmap size */
205
Jaegeuk Kim60a3b782015-02-10 16:44:29 -0800206 unsigned long *tmp_map; /* bitmap for temporal use */
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900207 unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */
208 unsigned int dirty_sentries; /* # of dirty sentries */
209 unsigned int sents_per_block; /* # of SIT entries per block */
210 struct mutex sentry_lock; /* to protect SIT cache */
211 struct seg_entry *sentries; /* SIT segment-level cache */
212 struct sec_entry *sec_entries; /* SIT section-level cache */
213
214 /* for cost-benefit algorithm in cleaning procedure */
215 unsigned long long elapsed_time; /* elapsed time after mount */
216 unsigned long long mounted_time; /* mount time */
217 unsigned long long min_mtime; /* min. modification time */
218 unsigned long long max_mtime; /* max. modification time */
219};
220
221struct free_segmap_info {
222 unsigned int start_segno; /* start segment number logically */
223 unsigned int free_segments; /* # of free segments */
224 unsigned int free_sections; /* # of free sections */
Chao Yu1a118cc2015-02-11 18:20:38 +0800225 spinlock_t segmap_lock; /* free segmap lock */
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900226 unsigned long *free_segmap; /* free segment bitmap */
227 unsigned long *free_secmap; /* free section bitmap */
228};
229
230/* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
231enum dirty_type {
232 DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */
233 DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */
234 DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */
235 DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */
236 DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */
237 DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */
238 DIRTY, /* to count # of dirty segments */
239 PRE, /* to count # of entirely obsolete segments */
240 NR_DIRTY_TYPE
241};
242
243struct dirty_seglist_info {
244 const struct victim_selection *v_ops; /* victim selction operation */
245 unsigned long *dirty_segmap[NR_DIRTY_TYPE];
246 struct mutex seglist_lock; /* lock for segment bitmaps */
247 int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900248 unsigned long *victim_secmap; /* background GC victims */
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900249};
250
251/* victim selection function for cleaning and SSR */
252struct victim_selection {
253 int (*get_victim)(struct f2fs_sb_info *, unsigned int *,
254 int, int, char);
255};
256
257/* for active log information */
258struct curseg_info {
259 struct mutex curseg_mutex; /* lock for consistency */
260 struct f2fs_summary_block *sum_blk; /* cached summary block */
261 unsigned char alloc_type; /* current allocation type */
262 unsigned int segno; /* current segment number */
263 unsigned short next_blkoff; /* next block offset to write */
264 unsigned int zone; /* current zone number */
265 unsigned int next_segno; /* preallocated segment */
266};
267
Chao Yu184a5cd2014-09-04 18:13:01 +0800268struct sit_entry_set {
269 struct list_head set_list; /* link with all sit sets */
270 unsigned int start_segno; /* start segno of sits in set */
271 unsigned int entry_cnt; /* the # of sit entries in set */
272};
273
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900274/*
275 * inline functions
276 */
277static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
278{
279 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
280}
281
282static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
283 unsigned int segno)
284{
285 struct sit_info *sit_i = SIT_I(sbi);
286 return &sit_i->sentries[segno];
287}
288
289static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
290 unsigned int segno)
291{
292 struct sit_info *sit_i = SIT_I(sbi);
293 return &sit_i->sec_entries[GET_SECNO(sbi, segno)];
294}
295
296static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
297 unsigned int segno, int section)
298{
299 /*
300 * In order to get # of valid blocks in a section instantly from many
301 * segments, f2fs manages two counting structures separately.
302 */
303 if (section > 1)
304 return get_sec_entry(sbi, segno)->valid_blocks;
305 else
306 return get_seg_entry(sbi, segno)->valid_blocks;
307}
308
309static inline void seg_info_from_raw_sit(struct seg_entry *se,
310 struct f2fs_sit_entry *rs)
311{
312 se->valid_blocks = GET_SIT_VBLOCKS(rs);
313 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
314 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
315 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
316 se->type = GET_SIT_TYPE(rs);
317 se->mtime = le64_to_cpu(rs->mtime);
318}
319
320static inline void seg_info_to_raw_sit(struct seg_entry *se,
321 struct f2fs_sit_entry *rs)
322{
323 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
324 se->valid_blocks;
325 rs->vblocks = cpu_to_le16(raw_vblocks);
326 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
327 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
328 se->ckpt_valid_blocks = se->valid_blocks;
329 rs->mtime = cpu_to_le64(se->mtime);
330}
331
332static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
333 unsigned int max, unsigned int segno)
334{
335 unsigned int ret;
Chao Yu1a118cc2015-02-11 18:20:38 +0800336 spin_lock(&free_i->segmap_lock);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900337 ret = find_next_bit(free_i->free_segmap, max, segno);
Chao Yu1a118cc2015-02-11 18:20:38 +0800338 spin_unlock(&free_i->segmap_lock);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900339 return ret;
340}
341
342static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
343{
344 struct free_segmap_info *free_i = FREE_I(sbi);
345 unsigned int secno = segno / sbi->segs_per_sec;
346 unsigned int start_segno = secno * sbi->segs_per_sec;
347 unsigned int next;
348
Chao Yu1a118cc2015-02-11 18:20:38 +0800349 spin_lock(&free_i->segmap_lock);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900350 clear_bit(segno, free_i->free_segmap);
351 free_i->free_segments++;
352
Wanpeng Li7fd97012015-03-06 15:00:55 +0800353 next = find_next_bit(free_i->free_segmap,
354 start_segno + sbi->segs_per_sec, start_segno);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900355 if (next >= start_segno + sbi->segs_per_sec) {
356 clear_bit(secno, free_i->free_secmap);
357 free_i->free_sections++;
358 }
Chao Yu1a118cc2015-02-11 18:20:38 +0800359 spin_unlock(&free_i->segmap_lock);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900360}
361
362static inline void __set_inuse(struct f2fs_sb_info *sbi,
363 unsigned int segno)
364{
365 struct free_segmap_info *free_i = FREE_I(sbi);
366 unsigned int secno = segno / sbi->segs_per_sec;
367 set_bit(segno, free_i->free_segmap);
368 free_i->free_segments--;
369 if (!test_and_set_bit(secno, free_i->free_secmap))
370 free_i->free_sections--;
371}
372
373static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
374 unsigned int segno)
375{
376 struct free_segmap_info *free_i = FREE_I(sbi);
377 unsigned int secno = segno / sbi->segs_per_sec;
378 unsigned int start_segno = secno * sbi->segs_per_sec;
379 unsigned int next;
380
Chao Yu1a118cc2015-02-11 18:20:38 +0800381 spin_lock(&free_i->segmap_lock);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900382 if (test_and_clear_bit(segno, free_i->free_segmap)) {
383 free_i->free_segments++;
384
Chao Yuf1121ab2014-07-14 16:45:15 +0800385 next = find_next_bit(free_i->free_segmap,
386 start_segno + sbi->segs_per_sec, start_segno);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900387 if (next >= start_segno + sbi->segs_per_sec) {
388 if (test_and_clear_bit(secno, free_i->free_secmap))
389 free_i->free_sections++;
390 }
391 }
Chao Yu1a118cc2015-02-11 18:20:38 +0800392 spin_unlock(&free_i->segmap_lock);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900393}
394
395static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
396 unsigned int segno)
397{
398 struct free_segmap_info *free_i = FREE_I(sbi);
399 unsigned int secno = segno / sbi->segs_per_sec;
Chao Yu1a118cc2015-02-11 18:20:38 +0800400 spin_lock(&free_i->segmap_lock);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900401 if (!test_and_set_bit(segno, free_i->free_segmap)) {
402 free_i->free_segments--;
403 if (!test_and_set_bit(secno, free_i->free_secmap))
404 free_i->free_sections--;
405 }
Chao Yu1a118cc2015-02-11 18:20:38 +0800406 spin_unlock(&free_i->segmap_lock);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900407}
408
409static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
410 void *dst_addr)
411{
412 struct sit_info *sit_i = SIT_I(sbi);
413 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
414}
415
416static inline block_t written_block_count(struct f2fs_sb_info *sbi)
417{
Jaegeuk Kim8b8343f2014-02-24 13:00:13 +0900418 return SIT_I(sbi)->written_valid_blocks;
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900419}
420
421static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
422{
Jaegeuk Kim8b8343f2014-02-24 13:00:13 +0900423 return FREE_I(sbi)->free_segments;
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900424}
425
426static inline int reserved_segments(struct f2fs_sb_info *sbi)
427{
428 return SM_I(sbi)->reserved_segments;
429}
430
431static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
432{
Jaegeuk Kim8b8343f2014-02-24 13:00:13 +0900433 return FREE_I(sbi)->free_sections;
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900434}
435
436static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
437{
438 return DIRTY_I(sbi)->nr_dirty[PRE];
439}
440
441static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
442{
443 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
444 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
445 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
446 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
447 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
448 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
449}
450
451static inline int overprovision_segments(struct f2fs_sb_info *sbi)
452{
453 return SM_I(sbi)->ovp_segments;
454}
455
456static inline int overprovision_sections(struct f2fs_sb_info *sbi)
457{
458 return ((unsigned int) overprovision_segments(sbi)) / sbi->segs_per_sec;
459}
460
461static inline int reserved_sections(struct f2fs_sb_info *sbi)
462{
463 return ((unsigned int) reserved_segments(sbi)) / sbi->segs_per_sec;
464}
465
466static inline bool need_SSR(struct f2fs_sb_info *sbi)
467{
Jaegeuk Kim95dd8972014-09-17 17:52:58 -0700468 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
469 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
470 return free_sections(sbi) <= (node_secs + 2 * dent_secs +
471 reserved_sections(sbi) + 1);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900472}
473
Jaegeuk Kim43727522013-02-04 15:11:17 +0900474static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900475{
Namjae Jeon5ac206c2013-02-02 23:52:59 +0900476 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
477 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
Jaegeuk Kim43727522013-02-04 15:11:17 +0900478
Chao Yucaf00472015-01-28 17:48:42 +0800479 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
Jaegeuk Kim029cd282012-12-21 17:20:21 +0900480 return false;
481
Chris Fries6c311ec2014-01-17 14:44:39 -0600482 return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
483 reserved_sections(sbi));
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900484}
485
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +0900486static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
487{
Chris Fries6c311ec2014-01-17 14:44:39 -0600488 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +0900489}
490
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900491static inline int utilization(struct f2fs_sb_info *sbi)
492{
Chris Fries6c311ec2014-01-17 14:44:39 -0600493 return div_u64((u64)valid_user_blocks(sbi) * 100,
494 sbi->user_block_count);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900495}
496
497/*
498 * Sometimes f2fs may be better to drop out-of-place update policy.
Jaegeuk Kim216fbd62013-11-07 13:13:42 +0900499 * And, users can control the policy through sysfs entries.
500 * There are five policies with triggering conditions as follows.
501 * F2FS_IPU_FORCE - all the time,
502 * F2FS_IPU_SSR - if SSR mode is activated,
503 * F2FS_IPU_UTIL - if FS utilization is over threashold,
504 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
505 * threashold,
Jaegeuk Kimc1ce1b02014-09-10 16:53:02 -0700506 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
507 * storages. IPU will be triggered only if the # of dirty
508 * pages over min_fsync_blocks.
Jaegeuk Kim216fbd62013-11-07 13:13:42 +0900509 * F2FS_IPUT_DISABLE - disable IPU. (=default option)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900510 */
Jaegeuk Kim216fbd62013-11-07 13:13:42 +0900511#define DEF_MIN_IPU_UTIL 70
Jaegeuk Kimc1ce1b02014-09-10 16:53:02 -0700512#define DEF_MIN_FSYNC_BLOCKS 8
Jaegeuk Kim216fbd62013-11-07 13:13:42 +0900513
514enum {
515 F2FS_IPU_FORCE,
516 F2FS_IPU_SSR,
517 F2FS_IPU_UTIL,
518 F2FS_IPU_SSR_UTIL,
Jaegeuk Kimc1ce1b02014-09-10 16:53:02 -0700519 F2FS_IPU_FSYNC,
Jaegeuk Kim216fbd62013-11-07 13:13:42 +0900520};
521
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900522static inline bool need_inplace_update(struct inode *inode)
523{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700524 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim9b5f1362014-09-16 18:30:54 -0700525 unsigned int policy = SM_I(sbi)->ipu_policy;
Jaegeuk Kim216fbd62013-11-07 13:13:42 +0900526
527 /* IPU can be done only for the user data */
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700528 if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900529 return false;
Jaegeuk Kim216fbd62013-11-07 13:13:42 +0900530
Jaegeuk Kim9b5f1362014-09-16 18:30:54 -0700531 if (policy & (0x1 << F2FS_IPU_FORCE))
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900532 return true;
Jaegeuk Kim9b5f1362014-09-16 18:30:54 -0700533 if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
534 return true;
535 if (policy & (0x1 << F2FS_IPU_UTIL) &&
536 utilization(sbi) > SM_I(sbi)->min_ipu_util)
537 return true;
538 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) &&
539 utilization(sbi) > SM_I(sbi)->min_ipu_util)
540 return true;
541
542 /* this is only set during fdatasync */
543 if (policy & (0x1 << F2FS_IPU_FSYNC) &&
544 is_inode_flag_set(F2FS_I(inode), FI_NEED_IPU))
545 return true;
546
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900547 return false;
548}
549
550static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
551 int type)
552{
553 struct curseg_info *curseg = CURSEG_I(sbi, type);
554 return curseg->segno;
555}
556
557static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
558 int type)
559{
560 struct curseg_info *curseg = CURSEG_I(sbi, type);
561 return curseg->alloc_type;
562}
563
564static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
565{
566 struct curseg_info *curseg = CURSEG_I(sbi, type);
567 return curseg->next_blkoff;
568}
569
570static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
571{
Liu Xue7a04f642015-07-27 10:17:59 +0000572 f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900573}
574
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900575static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
576{
Liu Xue7a04f642015-07-27 10:17:59 +0000577 f2fs_bug_on(sbi, blk_addr < SEG0_BLKADDR(sbi)
578 || blk_addr >= MAX_BLKADDR(sbi));
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900579}
580
581/*
arter97e1c42042014-08-06 23:22:50 +0900582 * Summary block is always treated as an invalid block
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900583 */
584static inline void check_block_count(struct f2fs_sb_info *sbi,
585 int segno, struct f2fs_sit_entry *raw_sit)
586{
Jaegeuk Kim4c278392015-08-11 16:01:30 -0700587#ifdef CONFIG_F2FS_CHECK_FS
Chao Yu44c60bf2013-10-29 14:50:40 +0800588 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900589 int valid_blocks = 0;
Chao Yu44c60bf2013-10-29 14:50:40 +0800590 int cur_pos = 0, next_pos;
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900591
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900592 /* check bitmap with valid block count */
Chao Yu44c60bf2013-10-29 14:50:40 +0800593 do {
594 if (is_valid) {
595 next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
596 sbi->blocks_per_seg,
597 cur_pos);
598 valid_blocks += next_pos - cur_pos;
599 } else
600 next_pos = find_next_bit_le(&raw_sit->valid_map,
601 sbi->blocks_per_seg,
602 cur_pos);
603 cur_pos = next_pos;
604 is_valid = !is_valid;
605 } while (cur_pos < sbi->blocks_per_seg);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900606 BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900607#endif
Jaegeuk Kim4c278392015-08-11 16:01:30 -0700608 /* check segment usage, and check boundary of a given segment number */
609 f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
610 || segno > TOTAL_SEGS(sbi) - 1);
Liu Xue7a04f642015-07-27 10:17:59 +0000611}
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900612
613static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
614 unsigned int start)
615{
616 struct sit_info *sit_i = SIT_I(sbi);
Chao Yud3a14af2014-09-04 18:11:47 +0800617 unsigned int offset = SIT_BLOCK_OFFSET(start);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900618 block_t blk_addr = sit_i->sit_base_addr + offset;
619
620 check_seg_range(sbi, start);
621
622 /* calculate sit block address */
623 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
624 blk_addr += sit_i->sit_blocks;
625
626 return blk_addr;
627}
628
629static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
630 pgoff_t block_addr)
631{
632 struct sit_info *sit_i = SIT_I(sbi);
633 block_addr -= sit_i->sit_base_addr;
634 if (block_addr < sit_i->sit_blocks)
635 block_addr += sit_i->sit_blocks;
636 else
637 block_addr -= sit_i->sit_blocks;
638
639 return block_addr + sit_i->sit_base_addr;
640}
641
642static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
643{
Chao Yud3a14af2014-09-04 18:11:47 +0800644 unsigned int block_off = SIT_BLOCK_OFFSET(start);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900645
Gu Zhengc6ac4c02014-10-20 17:45:50 +0800646 f2fs_change_bit(block_off, sit_i->sit_bitmap);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900647}
648
649static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi)
650{
651 struct sit_info *sit_i = SIT_I(sbi);
652 return sit_i->elapsed_time + CURRENT_TIME_SEC.tv_sec -
653 sit_i->mounted_time;
654}
655
656static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
657 unsigned int ofs_in_node, unsigned char version)
658{
659 sum->nid = cpu_to_le32(nid);
660 sum->ofs_in_node = cpu_to_le16(ofs_in_node);
661 sum->version = version;
662}
663
664static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
665{
666 return __start_cp_addr(sbi) +
667 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
668}
669
670static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
671{
672 return __start_cp_addr(sbi) +
673 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
674 - (base + 1) + type;
675}
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900676
677static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
678{
679 if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
680 return true;
681 return false;
682}
Jaegeuk Kimac5d1562013-04-29 16:58:39 +0900683
684static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi)
685{
686 struct block_device *bdev = sbi->sb->s_bdev;
687 struct request_queue *q = bdev_get_queue(bdev);
Chao Yu55cf9cb2014-09-15 18:01:10 +0800688 return SECTOR_TO_BLOCK(queue_max_sectors(q));
Jaegeuk Kimac5d1562013-04-29 16:58:39 +0900689}
Jaegeuk Kim87d6f892014-03-18 12:40:49 +0900690
691/*
692 * It is very important to gather dirty pages and write at once, so that we can
693 * submit a big bio without interfering other data writes.
694 * By default, 512 pages for directory data,
695 * 512 pages (2MB) * 3 for three types of nodes, and
696 * max_bio_blocks for meta are set.
697 */
698static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
699{
Tejun Heoa88a3412015-05-22 17:13:28 -0400700 if (sbi->sb->s_bdi->wb.dirty_exceeded)
Jaegeuk Kim510184c2014-11-06 17:23:08 -0800701 return 0;
702
Jaegeuk Kima1257022015-10-08 10:40:07 -0700703 if (type == DATA)
704 return sbi->blocks_per_seg;
705 else if (type == NODE)
Jaegeuk Kim87d6f892014-03-18 12:40:49 +0900706 return 3 * sbi->blocks_per_seg;
707 else if (type == META)
Jaegeuk Kim90a893c2014-09-22 16:21:07 -0700708 return MAX_BIO_BLOCKS(sbi);
Jaegeuk Kim87d6f892014-03-18 12:40:49 +0900709 else
710 return 0;
711}
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +0900712
713/*
714 * When writing pages, it'd better align nr_to_write for segment size.
715 */
716static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
717 struct writeback_control *wbc)
718{
719 long nr_to_write, desired;
720
721 if (wbc->sync_mode != WB_SYNC_NONE)
722 return 0;
723
724 nr_to_write = wbc->nr_to_write;
725
726 if (type == DATA)
727 desired = 4096;
728 else if (type == NODE)
729 desired = 3 * max_hw_blocks(sbi);
730 else
Jaegeuk Kim90a893c2014-09-22 16:21:07 -0700731 desired = MAX_BIO_BLOCKS(sbi);
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +0900732
733 wbc->nr_to_write = desired;
734 return desired - nr_to_write;
735}