blob: 673ce926cf0903580e340e506a5158ee429e561a [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim39a53e02012-11-28 13:37:31 +09002 * fs/f2fs/node.h
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11/* start node id of a node block dedicated to the given node id */
12#define START_NID(nid) ((nid / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK)
13
14/* node block offset on the NAT area dedicated to the given start node id */
15#define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
16
Chao Yuea1a29a02015-10-12 17:08:48 +080017/* # of pages to perform synchronous readahead before building free nids */
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090018#define FREE_NID_PAGES 4
19
Chao Yuea1a29a02015-10-12 17:08:48 +080020#define DEF_RA_NID_PAGES 4 /* # of nid pages to be readaheaded */
21
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090022/* maximum readahead size for node during getting data blocks */
23#define MAX_RA_NODE 128
24
Jaegeuk Kimcdfc41c2014-03-19 13:31:37 +090025/* control the memory footprint threshold (10MB per 1GB ram) */
Jaegeuk Kim29710bc2016-06-02 15:26:27 -070026#define DEF_RAM_THRESHOLD 1
Jaegeuk Kimcdfc41c2014-03-19 13:31:37 +090027
Chao Yu7d768d22016-01-18 18:31:18 +080028/* control dirty nats ratio threshold (default: 10% over max nid count) */
29#define DEF_DIRTY_NAT_RATIO_THRESHOLD 10
Jaegeuk Kime589c2c2016-06-02 15:24:24 -070030/* control total # of nats */
31#define DEF_NAT_CACHE_THRESHOLD 100000
Chao Yu7d768d22016-01-18 18:31:18 +080032
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090033/* vector size for gang look-up from nat cache that consists of radix tree */
34#define NATVEC_SIZE 64
Jaegeuk Kim7aed0d42015-01-07 10:47:57 -080035#define SETVEC_SIZE 32
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090036
Jaegeuk Kim56ae6742013-03-31 12:47:20 +090037/* return value for read_node_page */
38#define LOCKED_PAGE 1
39
Chao Yu5c27f4e2014-12-18 17:37:21 +080040/* For flag in struct node_info */
41enum {
42 IS_CHECKPOINTED, /* is it checkpointed before? */
43 HAS_FSYNCED_INODE, /* is the inode fsynced before? */
44 HAS_LAST_FSYNC, /* has the latest node fsync mark? */
45 IS_DIRTY, /* this nat entry is dirty? */
46};
47
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090048/*
49 * For node information
50 */
51struct node_info {
52 nid_t nid; /* node id */
53 nid_t ino; /* inode number of the node's owner */
54 block_t blk_addr; /* block address of the node */
55 unsigned char version; /* version of the node */
Chao Yu5c27f4e2014-12-18 17:37:21 +080056 unsigned char flag; /* for node information bits */
Jaegeuk Kim7ef35e32014-09-15 12:07:13 -070057};
58
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090059struct nat_entry {
60 struct list_head list; /* for clean or dirty nat list */
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090061 struct node_info ni; /* in-memory node information */
62};
63
64#define nat_get_nid(nat) (nat->ni.nid)
65#define nat_set_nid(nat, n) (nat->ni.nid = n)
66#define nat_get_blkaddr(nat) (nat->ni.blk_addr)
67#define nat_set_blkaddr(nat, b) (nat->ni.blk_addr = b)
68#define nat_get_ino(nat) (nat->ni.ino)
69#define nat_set_ino(nat, i) (nat->ni.ino = i)
70#define nat_get_version(nat) (nat->ni.version)
71#define nat_set_version(nat, v) (nat->ni.version = v)
72
Jaegeuk Kim39a53e02012-11-28 13:37:31 +090073#define inc_node_version(version) (++version)
74
Chao Yu5c27f4e2014-12-18 17:37:21 +080075static inline void copy_node_info(struct node_info *dst,
76 struct node_info *src)
77{
78 dst->nid = src->nid;
79 dst->ino = src->ino;
80 dst->blk_addr = src->blk_addr;
81 dst->version = src->version;
82 /* should not copy flag here */
83}
84
Jaegeuk Kim7ef35e32014-09-15 12:07:13 -070085static inline void set_nat_flag(struct nat_entry *ne,
86 unsigned int type, bool set)
87{
88 unsigned char mask = 0x01 << type;
89 if (set)
Chao Yu5c27f4e2014-12-18 17:37:21 +080090 ne->ni.flag |= mask;
Jaegeuk Kim7ef35e32014-09-15 12:07:13 -070091 else
Chao Yu5c27f4e2014-12-18 17:37:21 +080092 ne->ni.flag &= ~mask;
Jaegeuk Kim7ef35e32014-09-15 12:07:13 -070093}
94
95static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type)
96{
97 unsigned char mask = 0x01 << type;
Chao Yu5c27f4e2014-12-18 17:37:21 +080098 return ne->ni.flag & mask;
Jaegeuk Kim7ef35e32014-09-15 12:07:13 -070099}
100
Jaegeuk Kim88bd02c2014-09-15 14:50:48 -0700101static inline void nat_reset_flag(struct nat_entry *ne)
102{
103 /* these states can be set only after checkpoint was done */
104 set_nat_flag(ne, IS_CHECKPOINTED, true);
105 set_nat_flag(ne, HAS_FSYNCED_INODE, false);
106 set_nat_flag(ne, HAS_LAST_FSYNC, true);
107}
108
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900109static inline void node_info_from_raw_nat(struct node_info *ni,
110 struct f2fs_nat_entry *raw_ne)
111{
112 ni->ino = le32_to_cpu(raw_ne->ino);
113 ni->blk_addr = le32_to_cpu(raw_ne->block_addr);
114 ni->version = raw_ne->version;
115}
116
Chao Yu94dac222014-04-17 10:51:05 +0800117static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne,
118 struct node_info *ni)
119{
120 raw_ne->ino = cpu_to_le32(ni->ino);
121 raw_ne->block_addr = cpu_to_le32(ni->blk_addr);
122 raw_ne->version = ni->version;
123}
124
Chao Yu7d768d22016-01-18 18:31:18 +0800125static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
126{
127 return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid *
Chao Yu2304cb02016-01-18 18:32:58 +0800128 NM_I(sbi)->dirty_nats_ratio / 100;
Chao Yu7d768d22016-01-18 18:31:18 +0800129}
130
Jaegeuk Kime589c2c2016-06-02 15:24:24 -0700131static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
132{
133 return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
134}
135
Jaegeuk Kim6fb03f32014-04-16 10:47:06 +0900136enum mem_type {
Jaegeuk Kimcdfc41c2014-03-19 13:31:37 +0900137 FREE_NIDS, /* indicates the free nid list */
Jaegeuk Kim6fb03f32014-04-16 10:47:06 +0900138 NAT_ENTRIES, /* indicates the cached nat entry */
Jaegeuk Kima1257022015-10-08 10:40:07 -0700139 DIRTY_DENTS, /* indicates dirty dentry pages */
Jaegeuk Kime5e7ea32014-11-06 15:24:46 -0800140 INO_ENTRIES, /* indicates inode entries */
Chao Yu13054c52015-02-05 17:52:58 +0800141 EXTENT_CACHE, /* indicates extent cache */
Jaegeuk Kim1e843712014-12-09 06:08:59 -0800142 BASE_CHECK, /* check kernel status */
Jaegeuk Kimcdfc41c2014-03-19 13:31:37 +0900143};
144
Chao Yuaec71382014-06-24 09:18:20 +0800145struct nat_entry_set {
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -0700146 struct list_head set_list; /* link with other nat sets */
Chao Yuaec71382014-06-24 09:18:20 +0800147 struct list_head entry_list; /* link with dirty nat entries */
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -0700148 nid_t set; /* set number*/
Chao Yuaec71382014-06-24 09:18:20 +0800149 unsigned int entry_cnt; /* the # of nat entries in set */
150};
151
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900152/*
153 * For free nid mangement
154 */
155enum nid_state {
156 NID_NEW, /* newly added to free nid list */
157 NID_ALLOC /* it is allocated */
158};
159
160struct free_nid {
161 struct list_head list; /* for free node id list */
162 nid_t nid; /* node id */
163 int state; /* in use or not: NID_NEW or NID_ALLOC */
164};
165
Jaegeuk Kim120c2cb2014-10-03 15:12:42 -0700166static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900167{
168 struct f2fs_nm_info *nm_i = NM_I(sbi);
169 struct free_nid *fnid;
170
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900171 spin_lock(&nm_i->free_nid_list_lock);
Huang Yingc6e48932014-09-12 19:21:11 +0800172 if (nm_i->fcnt <= 0) {
173 spin_unlock(&nm_i->free_nid_list_lock);
Jaegeuk Kim120c2cb2014-10-03 15:12:42 -0700174 return;
Huang Yingc6e48932014-09-12 19:21:11 +0800175 }
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900176 fnid = list_entry(nm_i->free_nid_list.next, struct free_nid, list);
177 *nid = fnid->nid;
178 spin_unlock(&nm_i->free_nid_list_lock);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900179}
180
181/*
182 * inline functions
183 */
184static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr)
185{
186 struct f2fs_nm_info *nm_i = NM_I(sbi);
187 memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size);
188}
189
190static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
191{
192 struct f2fs_nm_info *nm_i = NM_I(sbi);
193 pgoff_t block_off;
194 pgoff_t block_addr;
195 int seg_off;
196
197 block_off = NAT_BLOCK_OFFSET(start);
198 seg_off = block_off >> sbi->log_blocks_per_seg;
199
200 block_addr = (pgoff_t)(nm_i->nat_blkaddr +
201 (seg_off << sbi->log_blocks_per_seg << 1) +
Chao Yu3519e3f2015-12-01 11:56:52 +0800202 (block_off & (sbi->blocks_per_seg - 1)));
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900203
204 if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
205 block_addr += sbi->blocks_per_seg;
206
207 return block_addr;
208}
209
210static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi,
211 pgoff_t block_addr)
212{
213 struct f2fs_nm_info *nm_i = NM_I(sbi);
214
215 block_addr -= nm_i->nat_blkaddr;
216 if ((block_addr >> sbi->log_blocks_per_seg) % 2)
217 block_addr -= sbi->blocks_per_seg;
218 else
219 block_addr += sbi->blocks_per_seg;
220
221 return block_addr + nm_i->nat_blkaddr;
222}
223
224static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
225{
226 unsigned int block_off = NAT_BLOCK_OFFSET(start_nid);
227
Gu Zhengc6ac4c02014-10-20 17:45:50 +0800228 f2fs_change_bit(block_off, nm_i->nat_bitmap);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900229}
230
231static inline void fill_node_footer(struct page *page, nid_t nid,
232 nid_t ino, unsigned int ofs, bool reset)
233{
Gu Zheng45590712013-07-15 17:57:38 +0800234 struct f2fs_node *rn = F2FS_NODE(page);
Jaegeuk Kim09eb4832014-12-23 16:26:31 -0800235 unsigned int old_flag = 0;
236
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900237 if (reset)
238 memset(rn, 0, sizeof(*rn));
Jaegeuk Kim09eb4832014-12-23 16:26:31 -0800239 else
240 old_flag = le32_to_cpu(rn->footer.flag);
241
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900242 rn->footer.nid = cpu_to_le32(nid);
243 rn->footer.ino = cpu_to_le32(ino);
Jaegeuk Kim09eb4832014-12-23 16:26:31 -0800244
245 /* should remain old flag bits such as COLD_BIT_SHIFT */
246 rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) |
247 (old_flag & OFFSET_BIT_MASK));
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900248}
249
250static inline void copy_node_footer(struct page *dst, struct page *src)
251{
Gu Zheng45590712013-07-15 17:57:38 +0800252 struct f2fs_node *src_rn = F2FS_NODE(src);
253 struct f2fs_node *dst_rn = F2FS_NODE(dst);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900254 memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
255}
256
257static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
258{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700259 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
Gu Zheng45590712013-07-15 17:57:38 +0800260 struct f2fs_node *rn = F2FS_NODE(page);
261
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900262 rn->footer.cp_ver = ckpt->checkpoint_ver;
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900263 rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900264}
265
266static inline nid_t ino_of_node(struct page *node_page)
267{
Gu Zheng45590712013-07-15 17:57:38 +0800268 struct f2fs_node *rn = F2FS_NODE(node_page);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900269 return le32_to_cpu(rn->footer.ino);
270}
271
272static inline nid_t nid_of_node(struct page *node_page)
273{
Gu Zheng45590712013-07-15 17:57:38 +0800274 struct f2fs_node *rn = F2FS_NODE(node_page);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900275 return le32_to_cpu(rn->footer.nid);
276}
277
278static inline unsigned int ofs_of_node(struct page *node_page)
279{
Gu Zheng45590712013-07-15 17:57:38 +0800280 struct f2fs_node *rn = F2FS_NODE(node_page);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900281 unsigned flag = le32_to_cpu(rn->footer.flag);
282 return flag >> OFFSET_BIT_SHIFT;
283}
284
285static inline unsigned long long cpver_of_node(struct page *node_page)
286{
Gu Zheng45590712013-07-15 17:57:38 +0800287 struct f2fs_node *rn = F2FS_NODE(node_page);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900288 return le64_to_cpu(rn->footer.cp_ver);
289}
290
291static inline block_t next_blkaddr_of_node(struct page *node_page)
292{
Gu Zheng45590712013-07-15 17:57:38 +0800293 struct f2fs_node *rn = F2FS_NODE(node_page);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900294 return le32_to_cpu(rn->footer.next_blkaddr);
295}
296
297/*
298 * f2fs assigns the following node offsets described as (num).
299 * N = NIDS_PER_BLOCK
300 *
301 * Inode block (0)
302 * |- direct node (1)
303 * |- direct node (2)
304 * |- indirect node (3)
305 * | `- direct node (4 => 4 + N - 1)
306 * |- indirect node (4 + N)
307 * | `- direct node (5 + N => 5 + 2N - 1)
308 * `- double indirect node (5 + 2N)
309 * `- indirect node (6 + 2N)
Chao Yu4f4124d2013-12-21 18:02:14 +0800310 * `- direct node
311 * ......
312 * `- indirect node ((6 + 2N) + x(N + 1))
313 * `- direct node
314 * ......
315 * `- indirect node ((6 + 2N) + (N - 1)(N + 1))
316 * `- direct node
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900317 */
318static inline bool IS_DNODE(struct page *node_page)
319{
320 unsigned int ofs = ofs_of_node(node_page);
Jaegeuk Kimdbe6a5f2013-08-09 08:14:06 +0900321
Chao Yu4bc8e9b2014-03-17 16:35:06 +0800322 if (f2fs_has_xattr_block(ofs))
Jaegeuk Kimdbe6a5f2013-08-09 08:14:06 +0900323 return false;
324
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900325 if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK ||
326 ofs == 5 + 2 * NIDS_PER_BLOCK)
327 return false;
328 if (ofs >= 6 + 2 * NIDS_PER_BLOCK) {
329 ofs -= 6 + 2 * NIDS_PER_BLOCK;
Zhihui Zhang33151012013-04-07 12:57:04 -0400330 if (!((long int)ofs % (NIDS_PER_BLOCK + 1)))
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900331 return false;
332 }
333 return true;
334}
335
Jaegeuk Kim12719ae2016-01-07 13:23:12 -0800336static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900337{
Gu Zheng45590712013-07-15 17:57:38 +0800338 struct f2fs_node *rn = F2FS_NODE(p);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900339
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800340 f2fs_wait_on_page_writeback(p, NODE, true);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900341
342 if (i)
343 rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
344 else
345 rn->in.nid[off] = cpu_to_le32(nid);
Jaegeuk Kim12719ae2016-01-07 13:23:12 -0800346 return set_page_dirty(p);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900347}
348
349static inline nid_t get_nid(struct page *p, int off, bool i)
350{
Gu Zheng45590712013-07-15 17:57:38 +0800351 struct f2fs_node *rn = F2FS_NODE(p);
352
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900353 if (i)
354 return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
355 return le32_to_cpu(rn->in.nid[off]);
356}
357
358/*
359 * Coldness identification:
360 * - Mark cold files in f2fs_inode_info
361 * - Mark cold node blocks in their node footer
362 * - Mark cold data pages in page cache
363 */
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900364static inline int is_cold_data(struct page *page)
365{
366 return PageChecked(page);
367}
368
369static inline void set_cold_data(struct page *page)
370{
371 SetPageChecked(page);
372}
373
374static inline void clear_cold_data(struct page *page)
375{
376 ClearPageChecked(page);
377}
378
Namjae Jeona06a2412013-05-23 22:58:40 +0900379static inline int is_node(struct page *page, int type)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900380{
Gu Zheng45590712013-07-15 17:57:38 +0800381 struct f2fs_node *rn = F2FS_NODE(page);
Namjae Jeona06a2412013-05-23 22:58:40 +0900382 return le32_to_cpu(rn->footer.flag) & (1 << type);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900383}
384
Namjae Jeona06a2412013-05-23 22:58:40 +0900385#define is_cold_node(page) is_node(page, COLD_BIT_SHIFT)
386#define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT)
387#define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900388
Jaegeuk Kim2049d4f2016-01-25 05:57:05 -0800389static inline int is_inline_node(struct page *page)
390{
391 return PageChecked(page);
392}
393
394static inline void set_inline_node(struct page *page)
395{
396 SetPageChecked(page);
397}
398
399static inline void clear_inline_node(struct page *page)
400{
401 ClearPageChecked(page);
402}
403
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900404static inline void set_cold_node(struct inode *inode, struct page *page)
405{
Gu Zheng45590712013-07-15 17:57:38 +0800406 struct f2fs_node *rn = F2FS_NODE(page);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900407 unsigned int flag = le32_to_cpu(rn->footer.flag);
408
409 if (S_ISDIR(inode->i_mode))
410 flag &= ~(0x1 << COLD_BIT_SHIFT);
411 else
412 flag |= (0x1 << COLD_BIT_SHIFT);
413 rn->footer.flag = cpu_to_le32(flag);
414}
415
Namjae Jeona06a2412013-05-23 22:58:40 +0900416static inline void set_mark(struct page *page, int mark, int type)
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900417{
Gu Zheng45590712013-07-15 17:57:38 +0800418 struct f2fs_node *rn = F2FS_NODE(page);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900419 unsigned int flag = le32_to_cpu(rn->footer.flag);
420 if (mark)
Namjae Jeona06a2412013-05-23 22:58:40 +0900421 flag |= (0x1 << type);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900422 else
Namjae Jeona06a2412013-05-23 22:58:40 +0900423 flag &= ~(0x1 << type);
Jaegeuk Kim39a53e02012-11-28 13:37:31 +0900424 rn->footer.flag = cpu_to_le32(flag);
425}
Namjae Jeona06a2412013-05-23 22:58:40 +0900426#define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT)
427#define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT)