Gao Xiang | 29b24f6 | 2019-07-31 23:57:31 +0800 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | /* |
Gao Xiang | 0d40d6e3 | 2018-07-26 20:22:02 +0800 | [diff] [blame] | 3 | * Copyright (C) 2018 HUAWEI, Inc. |
Alexander A. Klimov | 592e7cd | 2020-07-13 15:09:44 +0200 | [diff] [blame] | 4 | * https://www.huawei.com/ |
Gao Xiang | 0d40d6e3 | 2018-07-26 20:22:02 +0800 | [diff] [blame] | 5 | */ |
Gao Xiang | 57b78c9 | 2019-07-31 23:57:32 +0800 | [diff] [blame] | 6 | #ifndef __EROFS_FS_ZDATA_H |
| 7 | #define __EROFS_FS_ZDATA_H |
Gao Xiang | 0d40d6e3 | 2018-07-26 20:22:02 +0800 | [diff] [blame] | 8 | |
| 9 | #include "internal.h" |
Gao Xiang | 57b78c9 | 2019-07-31 23:57:32 +0800 | [diff] [blame] | 10 | #include "zpvec.h" |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 11 | |
Gao Xiang | 9f6cc76 | 2021-04-07 12:39:20 +0800 | [diff] [blame] | 12 | #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE) |
Gao Xiang | 88aaf5a | 2019-06-24 15:22:57 +0800 | [diff] [blame] | 13 | #define Z_EROFS_NR_INLINE_PAGEVECS 3 |
| 14 | |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 15 | /* |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 16 | * Structure fields follow one of the following exclusion rules. |
| 17 | * |
| 18 | * I: Modifiable by initialization/destruction paths and read-only |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 19 | * for everyone else; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 20 | * |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 21 | * L: Field should be protected by pageset lock; |
| 22 | * |
| 23 | * A: Field should be accessed / updated in atomic for parallelized code. |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 24 | */ |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 25 | struct z_erofs_collection { |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 26 | struct mutex lock; |
| 27 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 28 | /* I: page offset of start position of decompression */ |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 29 | unsigned short pageofs; |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 30 | |
| 31 | /* L: maximum relative page index in pagevec[] */ |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 32 | unsigned short nr_pages; |
| 33 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 34 | /* L: total number of pages in pagevec[] */ |
Pratik Shinde | e82a9a1 | 2019-07-15 17:51:27 +0530 | [diff] [blame] | 35 | unsigned int vcnt; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 36 | |
| 37 | union { |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 38 | /* L: inline a certain number of pagevecs for bootstrap */ |
Gao Xiang | fa61a33 | 2019-06-24 15:22:53 +0800 | [diff] [blame] | 39 | erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS]; |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 40 | |
| 41 | /* I: can be used to free the pcluster by RCU. */ |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 42 | struct rcu_head rcu; |
| 43 | }; |
| 44 | }; |
| 45 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 46 | #define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001 |
| 47 | #define Z_EROFS_PCLUSTER_LENGTH_BIT 1 |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 48 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 49 | /* |
| 50 | * let's leave a type here in case of introducing |
| 51 | * another tagged pointer later. |
| 52 | */ |
| 53 | typedef void *z_erofs_next_pcluster_t; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 54 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 55 | struct z_erofs_pcluster { |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 56 | struct erofs_workgroup obj; |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 57 | struct z_erofs_collection primary_collection; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 58 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 59 | /* A: point to next chained pcluster or TAILs */ |
| 60 | z_erofs_next_pcluster_t next; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 61 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 62 | /* A: lower limit of decompressed length and if full length or not */ |
| 63 | unsigned int length; |
| 64 | |
Gao Xiang | 9f6cc76 | 2021-04-07 12:39:20 +0800 | [diff] [blame] | 65 | /* I: physical cluster size in pages */ |
| 66 | unsigned short pclusterpages; |
| 67 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 68 | /* I: compression algorithm format */ |
| 69 | unsigned char algorithmformat; |
Gao Xiang | 9f6cc76 | 2021-04-07 12:39:20 +0800 | [diff] [blame] | 70 | |
| 71 | /* A: compressed pages (can be cached or inplaced pages) */ |
| 72 | struct page *compressed_pages[]; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 73 | }; |
| 74 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 75 | #define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection) |
| 76 | |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 77 | /* let's avoid the valid 32-bit kernel addresses */ |
| 78 | |
| 79 | /* the chained workgroup has't submitted io (still open) */ |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 80 | #define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE) |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 81 | /* the chained workgroup has already submitted io */ |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 82 | #define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD) |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 83 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 84 | #define Z_EROFS_PCLUSTER_NIL (NULL) |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 85 | |
Gao Xiang | a4b1fab | 2019-10-08 20:56:15 +0800 | [diff] [blame] | 86 | struct z_erofs_decompressqueue { |
| 87 | struct super_block *sb; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 88 | atomic_t pending_bios; |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 89 | z_erofs_next_pcluster_t head; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 90 | |
| 91 | union { |
| 92 | wait_queue_head_t wait; |
| 93 | struct work_struct work; |
| 94 | } u; |
| 95 | }; |
| 96 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 97 | #define Z_EROFS_ONLINEPAGE_COUNT_BITS 2 |
| 98 | #define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1) |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 99 | #define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS) |
| 100 | |
| 101 | /* |
| 102 | * waiters (aka. ongoing_packs): # to unlock the page |
| 103 | * sub-index: 0 - for partial page, >= 1 full page sub-index |
| 104 | */ |
| 105 | typedef atomic_t z_erofs_onlinepage_t; |
| 106 | |
| 107 | /* type punning */ |
| 108 | union z_erofs_onlinepage_converter { |
| 109 | z_erofs_onlinepage_t *o; |
| 110 | unsigned long *v; |
| 111 | }; |
| 112 | |
Pratik Shinde | e82a9a1 | 2019-07-15 17:51:27 +0530 | [diff] [blame] | 113 | static inline unsigned int z_erofs_onlinepage_index(struct page *page) |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 114 | { |
| 115 | union z_erofs_onlinepage_converter u; |
| 116 | |
Hariprasad Kelam | 800c16c | 2019-06-08 15:19:18 +0530 | [diff] [blame] | 117 | DBG_BUGON(!PagePrivate(page)); |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 118 | u.v = &page_private(page); |
| 119 | |
| 120 | return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; |
| 121 | } |
| 122 | |
| 123 | static inline void z_erofs_onlinepage_init(struct page *page) |
| 124 | { |
| 125 | union { |
| 126 | z_erofs_onlinepage_t o; |
| 127 | unsigned long v; |
| 128 | /* keep from being unlocked in advance */ |
| 129 | } u = { .o = ATOMIC_INIT(1) }; |
| 130 | |
| 131 | set_page_private(page, u.v); |
| 132 | smp_wmb(); |
| 133 | SetPagePrivate(page); |
| 134 | } |
| 135 | |
| 136 | static inline void z_erofs_onlinepage_fixup(struct page *page, |
| 137 | uintptr_t index, bool down) |
| 138 | { |
Gao Xiang | 3c597282 | 2020-06-19 07:43:49 +0800 | [diff] [blame] | 139 | union z_erofs_onlinepage_converter u = { .v = &page_private(page) }; |
| 140 | int orig, orig_index, val; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 141 | |
Gao Xiang | 3c597282 | 2020-06-19 07:43:49 +0800 | [diff] [blame] | 142 | repeat: |
| 143 | orig = atomic_read(u.o); |
| 144 | orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; |
| 145 | if (orig_index) { |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 146 | if (!index) |
| 147 | return; |
| 148 | |
Gao Xiang | 3c597282 | 2020-06-19 07:43:49 +0800 | [diff] [blame] | 149 | DBG_BUGON(orig_index != index); |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 150 | } |
| 151 | |
Gao Xiang | 3c597282 | 2020-06-19 07:43:49 +0800 | [diff] [blame] | 152 | val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) | |
| 153 | ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down); |
| 154 | if (atomic_cmpxchg(u.o, orig, val) != orig) |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 155 | goto repeat; |
| 156 | } |
| 157 | |
| 158 | static inline void z_erofs_onlinepage_endio(struct page *page) |
| 159 | { |
| 160 | union z_erofs_onlinepage_converter u; |
Pratik Shinde | e82a9a1 | 2019-07-15 17:51:27 +0530 | [diff] [blame] | 161 | unsigned int v; |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 162 | |
Hariprasad Kelam | 800c16c | 2019-06-08 15:19:18 +0530 | [diff] [blame] | 163 | DBG_BUGON(!PagePrivate(page)); |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 164 | u.v = &page_private(page); |
| 165 | |
| 166 | v = atomic_dec_return(u.o); |
| 167 | if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) { |
Gao Xiang | 6aaa7b0 | 2020-12-08 17:58:32 +0800 | [diff] [blame] | 168 | set_page_private(page, 0); |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 169 | ClearPagePrivate(page); |
| 170 | if (!PageError(page)) |
| 171 | SetPageUptodate(page); |
| 172 | unlock_page(page); |
| 173 | } |
Gao Xiang | 4f761fa | 2019-09-04 10:09:09 +0800 | [diff] [blame] | 174 | erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o)); |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 175 | } |
| 176 | |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 177 | #define Z_EROFS_VMAP_ONSTACK_PAGES \ |
Gao Xiang | 81edee7 | 2018-07-28 15:10:32 +0800 | [diff] [blame] | 178 | min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U) |
Gao Xiang | 97e86a8 | 2019-07-31 23:57:47 +0800 | [diff] [blame] | 179 | #define Z_EROFS_VMAP_GLOBAL_PAGES 2048 |
Gao Xiang | 3883a79 | 2018-07-26 20:22:06 +0800 | [diff] [blame] | 180 | |
Gao Xiang | 0d40d6e3 | 2018-07-26 20:22:02 +0800 | [diff] [blame] | 181 | #endif |