Gao Xiang | 29b24f6 | 2019-07-31 23:57:31 +0800 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | /* |
Gao Xiang | 0d40d6e3 | 2018-07-26 20:22:02 +0800 | [diff] [blame] | 3 | * Copyright (C) 2018 HUAWEI, Inc. |
Alexander A. Klimov | 592e7cd | 2020-07-13 15:09:44 +0200 | [diff] [blame] | 4 | * https://www.huawei.com/ |
Gao Xiang | 0d40d6e3 | 2018-07-26 20:22:02 +0800 | [diff] [blame] | 5 | */ |
Gao Xiang | 57b78c9 | 2019-07-31 23:57:32 +0800 | [diff] [blame] | 6 | #ifndef __EROFS_FS_ZDATA_H |
| 7 | #define __EROFS_FS_ZDATA_H |
Gao Xiang | 0d40d6e3 | 2018-07-26 20:22:02 +0800 | [diff] [blame] | 8 | |
| 9 | #include "internal.h" |
Gao Xiang | 57b78c9 | 2019-07-31 23:57:32 +0800 | [diff] [blame] | 10 | #include "zpvec.h" |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 11 | |
Gao Xiang | 9f6cc76 | 2021-04-07 12:39:20 +0800 | [diff] [blame] | 12 | #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE) |
Gao Xiang | 88aaf5a | 2019-06-24 15:22:57 +0800 | [diff] [blame] | 13 | #define Z_EROFS_NR_INLINE_PAGEVECS 3 |
| 14 | |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 15 | /* |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 16 | * Structure fields follow one of the following exclusion rules. |
| 17 | * |
| 18 | * I: Modifiable by initialization/destruction paths and read-only |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 19 | * for everyone else; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 20 | * |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 21 | * L: Field should be protected by pageset lock; |
| 22 | * |
| 23 | * A: Field should be accessed / updated in atomic for parallelized code. |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 24 | */ |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 25 | struct z_erofs_collection { |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 26 | struct mutex lock; |
| 27 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 28 | /* I: page offset of start position of decompression */ |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 29 | unsigned short pageofs; |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 30 | |
| 31 | /* L: maximum relative page index in pagevec[] */ |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 32 | unsigned short nr_pages; |
| 33 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 34 | /* L: total number of pages in pagevec[] */ |
Pratik Shinde | e82a9a1 | 2019-07-15 17:51:27 +0530 | [diff] [blame] | 35 | unsigned int vcnt; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 36 | |
| 37 | union { |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 38 | /* L: inline a certain number of pagevecs for bootstrap */ |
Gao Xiang | fa61a33 | 2019-06-24 15:22:53 +0800 | [diff] [blame] | 39 | erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS]; |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 40 | |
| 41 | /* I: can be used to free the pcluster by RCU. */ |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 42 | struct rcu_head rcu; |
| 43 | }; |
| 44 | }; |
| 45 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 46 | #define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001 |
| 47 | #define Z_EROFS_PCLUSTER_LENGTH_BIT 1 |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 48 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 49 | /* |
| 50 | * let's leave a type here in case of introducing |
| 51 | * another tagged pointer later. |
| 52 | */ |
| 53 | typedef void *z_erofs_next_pcluster_t; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 54 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 55 | struct z_erofs_pcluster { |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 56 | struct erofs_workgroup obj; |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 57 | struct z_erofs_collection primary_collection; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 58 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 59 | /* A: point to next chained pcluster or TAILs */ |
| 60 | z_erofs_next_pcluster_t next; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 61 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 62 | /* A: lower limit of decompressed length and if full length or not */ |
| 63 | unsigned int length; |
| 64 | |
Yue Hu | cecf864 | 2021-12-29 07:29:19 +0800 | [diff] [blame] | 65 | /* I: page offset of inline compressed data */ |
| 66 | unsigned short pageofs_in; |
| 67 | |
| 68 | union { |
| 69 | /* I: physical cluster size in pages */ |
| 70 | unsigned short pclusterpages; |
| 71 | |
| 72 | /* I: tailpacking inline compressed size */ |
| 73 | unsigned short tailpacking_size; |
| 74 | }; |
Gao Xiang | 9f6cc76 | 2021-04-07 12:39:20 +0800 | [diff] [blame] | 75 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 76 | /* I: compression algorithm format */ |
| 77 | unsigned char algorithmformat; |
Gao Xiang | 9f6cc76 | 2021-04-07 12:39:20 +0800 | [diff] [blame] | 78 | |
| 79 | /* A: compressed pages (can be cached or inplaced pages) */ |
| 80 | struct page *compressed_pages[]; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 81 | }; |
| 82 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 83 | #define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection) |
| 84 | |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 85 | /* let's avoid the valid 32-bit kernel addresses */ |
| 86 | |
| 87 | /* the chained workgroup has't submitted io (still open) */ |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 88 | #define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE) |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 89 | /* the chained workgroup has already submitted io */ |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 90 | #define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD) |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 91 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 92 | #define Z_EROFS_PCLUSTER_NIL (NULL) |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 93 | |
Gao Xiang | a4b1fab | 2019-10-08 20:56:15 +0800 | [diff] [blame] | 94 | struct z_erofs_decompressqueue { |
| 95 | struct super_block *sb; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 96 | atomic_t pending_bios; |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 97 | z_erofs_next_pcluster_t head; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 98 | |
| 99 | union { |
| 100 | wait_queue_head_t wait; |
| 101 | struct work_struct work; |
| 102 | } u; |
| 103 | }; |
| 104 | |
Yue Hu | cecf864 | 2021-12-29 07:29:19 +0800 | [diff] [blame] | 105 | static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl) |
| 106 | { |
| 107 | return !pcl->obj.index; |
| 108 | } |
| 109 | |
| 110 | static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl) |
| 111 | { |
| 112 | if (z_erofs_is_inline_pcluster(pcl)) |
| 113 | return 1; |
| 114 | return pcl->pclusterpages; |
| 115 | } |
| 116 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 117 | #define Z_EROFS_ONLINEPAGE_COUNT_BITS 2 |
| 118 | #define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1) |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 119 | #define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS) |
| 120 | |
| 121 | /* |
| 122 | * waiters (aka. ongoing_packs): # to unlock the page |
| 123 | * sub-index: 0 - for partial page, >= 1 full page sub-index |
| 124 | */ |
| 125 | typedef atomic_t z_erofs_onlinepage_t; |
| 126 | |
| 127 | /* type punning */ |
| 128 | union z_erofs_onlinepage_converter { |
| 129 | z_erofs_onlinepage_t *o; |
| 130 | unsigned long *v; |
| 131 | }; |
| 132 | |
Pratik Shinde | e82a9a1 | 2019-07-15 17:51:27 +0530 | [diff] [blame] | 133 | static inline unsigned int z_erofs_onlinepage_index(struct page *page) |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 134 | { |
| 135 | union z_erofs_onlinepage_converter u; |
| 136 | |
Hariprasad Kelam | 800c16c | 2019-06-08 15:19:18 +0530 | [diff] [blame] | 137 | DBG_BUGON(!PagePrivate(page)); |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 138 | u.v = &page_private(page); |
| 139 | |
| 140 | return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; |
| 141 | } |
| 142 | |
| 143 | static inline void z_erofs_onlinepage_init(struct page *page) |
| 144 | { |
| 145 | union { |
| 146 | z_erofs_onlinepage_t o; |
| 147 | unsigned long v; |
| 148 | /* keep from being unlocked in advance */ |
| 149 | } u = { .o = ATOMIC_INIT(1) }; |
| 150 | |
| 151 | set_page_private(page, u.v); |
| 152 | smp_wmb(); |
| 153 | SetPagePrivate(page); |
| 154 | } |
| 155 | |
| 156 | static inline void z_erofs_onlinepage_fixup(struct page *page, |
| 157 | uintptr_t index, bool down) |
| 158 | { |
Gao Xiang | 3c597282 | 2020-06-19 07:43:49 +0800 | [diff] [blame] | 159 | union z_erofs_onlinepage_converter u = { .v = &page_private(page) }; |
| 160 | int orig, orig_index, val; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 161 | |
Gao Xiang | 3c597282 | 2020-06-19 07:43:49 +0800 | [diff] [blame] | 162 | repeat: |
| 163 | orig = atomic_read(u.o); |
| 164 | orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; |
| 165 | if (orig_index) { |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 166 | if (!index) |
| 167 | return; |
| 168 | |
Gao Xiang | 3c597282 | 2020-06-19 07:43:49 +0800 | [diff] [blame] | 169 | DBG_BUGON(orig_index != index); |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 170 | } |
| 171 | |
Gao Xiang | 3c597282 | 2020-06-19 07:43:49 +0800 | [diff] [blame] | 172 | val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) | |
| 173 | ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down); |
| 174 | if (atomic_cmpxchg(u.o, orig, val) != orig) |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 175 | goto repeat; |
| 176 | } |
| 177 | |
| 178 | static inline void z_erofs_onlinepage_endio(struct page *page) |
| 179 | { |
| 180 | union z_erofs_onlinepage_converter u; |
Pratik Shinde | e82a9a1 | 2019-07-15 17:51:27 +0530 | [diff] [blame] | 181 | unsigned int v; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 182 | |
Hariprasad Kelam | 800c16c | 2019-06-08 15:19:18 +0530 | [diff] [blame] | 183 | DBG_BUGON(!PagePrivate(page)); |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 184 | u.v = &page_private(page); |
| 185 | |
| 186 | v = atomic_dec_return(u.o); |
| 187 | if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) { |
Gao Xiang | 6aaa7b0 | 2020-12-08 17:58:32 +0800 | [diff] [blame] | 188 | set_page_private(page, 0); |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 189 | ClearPagePrivate(page); |
| 190 | if (!PageError(page)) |
| 191 | SetPageUptodate(page); |
| 192 | unlock_page(page); |
| 193 | } |
Gao Xiang | 4f761fa | 2019-09-04 10:09:09 +0800 | [diff] [blame] | 194 | erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o)); |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 195 | } |
| 196 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 197 | #define Z_EROFS_VMAP_ONSTACK_PAGES \ |
Gao Xiang | 81edee7 | 2018-07-28 15:10:32 +0800 | [diff] [blame] | 198 | min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U) |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 199 | #define Z_EROFS_VMAP_GLOBAL_PAGES 2048 |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 200 | |
Gao Xiang | 0d40d6e3 | 2018-07-26 20:22:02 +0800 | [diff] [blame] | 201 | #endif |