blob: 4d7aca85d915803254d4ad88a097f6addb9d63b9 [file] [log] [blame]
Qu Wenruocac06d82021-01-26 16:33:47 +08001/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef BTRFS_SUBPAGE_H
4#define BTRFS_SUBPAGE_H
5
6#include <linux/spinlock.h>
7
8/*
9 * Maximum page size we support is 64K, minimum sector size is 4K, u16 bitmap
10 * is sufficient. Regular bitmap_* is not used due to size reasons.
11 */
12#define BTRFS_SUBPAGE_BITMAP_SIZE 16
13
14/*
15 * Structure to trace status of each sector inside a page, attached to
16 * page::private for both data and metadata inodes.
17 */
18struct btrfs_subpage {
19 /* Common members for both data and metadata pages */
20 spinlock_t lock;
Qu Wenruoa1d767c2021-01-26 16:33:52 +080021 u16 uptodate_bitmap;
Qu Wenruo03a816b32021-01-26 16:33:53 +080022 u16 error_bitmap;
Qu Wenruod8a57132021-03-25 15:14:37 +080023 u16 dirty_bitmap;
Qu Wenruo3470da32021-03-25 15:14:38 +080024 u16 writeback_bitmap;
Qu Wenruo3d078ef2021-06-07 17:02:58 +080025 /*
26 * Both data and metadata needs to track how many readers are for the
27 * page.
28 * Data relies on @readers to unlock the page when last reader finished.
29 * While metadata doesn't need page unlock, it needs to prevent
30 * page::private get cleared before the last end_page_read().
31 */
32 atomic_t readers;
Qu Wenruo760f9912021-01-26 16:33:48 +080033 union {
Qu Wenruo8ff84662021-01-26 16:33:50 +080034 /*
35 * Structures only used by metadata
36 *
37 * @eb_refs should only be operated under private_lock, as it
38 * manages whether the subpage can be detached.
39 */
40 atomic_t eb_refs;
Qu Wenruo760f9912021-01-26 16:33:48 +080041 /* Structures only used by data */
Qu Wenruo92082d42021-02-02 10:28:36 +080042 struct {
Qu Wenruo1e1de382021-05-31 16:50:44 +080043 atomic_t writers;
Qu Wenruo6f174002021-05-31 16:50:45 +080044
45 /* Tracke pending ordered extent in this sector */
46 u16 ordered_bitmap;
Qu Wenruo92082d42021-02-02 10:28:36 +080047 };
Qu Wenruo760f9912021-01-26 16:33:48 +080048 };
Qu Wenruocac06d82021-01-26 16:33:47 +080049};
50
51enum btrfs_subpage_type {
52 BTRFS_SUBPAGE_METADATA,
53 BTRFS_SUBPAGE_DATA,
54};
55
56int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
57 struct page *page, enum btrfs_subpage_type type);
58void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
59 struct page *page);
60
Qu Wenruo760f9912021-01-26 16:33:48 +080061/* Allocate additional data where page represents more than one sector */
62int btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
63 struct btrfs_subpage **ret,
64 enum btrfs_subpage_type type);
65void btrfs_free_subpage(struct btrfs_subpage *subpage);
66
Qu Wenruo8ff84662021-01-26 16:33:50 +080067void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
68 struct page *page);
69void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
70 struct page *page);
71
Qu Wenruo92082d42021-02-02 10:28:36 +080072void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
73 struct page *page, u64 start, u32 len);
74void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
75 struct page *page, u64 start, u32 len);
76
Qu Wenruo1e1de382021-05-31 16:50:44 +080077void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
78 struct page *page, u64 start, u32 len);
79bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
80 struct page *page, u64 start, u32 len);
81int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
82 struct page *page, u64 start, u32 len);
83void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
84 struct page *page, u64 start, u32 len);
85
Qu Wenruoa1d767c2021-01-26 16:33:52 +080086/*
87 * Template for subpage related operations.
88 *
89 * btrfs_subpage_*() are for call sites where the page has subpage attached and
90 * the range is ensured to be inside the page.
91 *
92 * btrfs_page_*() are for call sites where the page can either be subpage
93 * specific or regular page. The function will handle both cases.
94 * But the range still needs to be inside the page.
Qu Wenruo60e2d252021-05-31 16:50:39 +080095 *
96 * btrfs_page_clamp_*() are similar to btrfs_page_*(), except the range doesn't
97 * need to be inside the page. Those functions will truncate the range
98 * automatically.
Qu Wenruoa1d767c2021-01-26 16:33:52 +080099 */
100#define DECLARE_BTRFS_SUBPAGE_OPS(name) \
101void btrfs_subpage_set_##name(const struct btrfs_fs_info *fs_info, \
102 struct page *page, u64 start, u32 len); \
103void btrfs_subpage_clear_##name(const struct btrfs_fs_info *fs_info, \
104 struct page *page, u64 start, u32 len); \
105bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
106 struct page *page, u64 start, u32 len); \
107void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
108 struct page *page, u64 start, u32 len); \
109void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
110 struct page *page, u64 start, u32 len); \
111bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \
Qu Wenruo60e2d252021-05-31 16:50:39 +0800112 struct page *page, u64 start, u32 len); \
113void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
114 struct page *page, u64 start, u32 len); \
115void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
116 struct page *page, u64 start, u32 len); \
117bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
Qu Wenruoa1d767c2021-01-26 16:33:52 +0800118 struct page *page, u64 start, u32 len);
119
120DECLARE_BTRFS_SUBPAGE_OPS(uptodate);
Qu Wenruo03a816b32021-01-26 16:33:53 +0800121DECLARE_BTRFS_SUBPAGE_OPS(error);
Qu Wenruod8a57132021-03-25 15:14:37 +0800122DECLARE_BTRFS_SUBPAGE_OPS(dirty);
Qu Wenruo3470da32021-03-25 15:14:38 +0800123DECLARE_BTRFS_SUBPAGE_OPS(writeback);
Qu Wenruo6f174002021-05-31 16:50:45 +0800124DECLARE_BTRFS_SUBPAGE_OPS(ordered);
Qu Wenruod8a57132021-03-25 15:14:37 +0800125
126bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
127 struct page *page, u64 start, u32 len);
Qu Wenruoa1d767c2021-01-26 16:33:52 +0800128
Qu Wenruocac06d82021-01-26 16:33:47 +0800129#endif