blob: e043216b545f19ecb4b2d250b1e1efb2ca720823 [file] [log] [blame]
Gao Xiang29b24f62019-07-31 23:57:31 +08001/* SPDX-License-Identifier: GPL-2.0-only */
2/*
Gao Xiang0d40d6e32018-07-26 20:22:02 +08003 * Copyright (C) 2018 HUAWEI, Inc.
Alexander A. Klimov592e7cd2020-07-13 15:09:44 +02004 * https://www.huawei.com/
Gao Xiang0d40d6e32018-07-26 20:22:02 +08005 */
Gao Xiang57b78c92019-07-31 23:57:32 +08006#ifndef __EROFS_FS_ZDATA_H
7#define __EROFS_FS_ZDATA_H
Gao Xiang0d40d6e32018-07-26 20:22:02 +08008
9#include "internal.h"
Gao Xiang57b78c92019-07-31 23:57:32 +080010#include "zpvec.h"
Gao Xiang3883a792018-07-26 20:22:06 +080011
Gao Xiang9f6cc762021-04-07 12:39:20 +080012#define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
Gao Xiang88aaf5a2019-06-24 15:22:57 +080013#define Z_EROFS_NR_INLINE_PAGEVECS 3
14
Gao Xiang3883a792018-07-26 20:22:06 +080015/*
Gao Xiang3883a792018-07-26 20:22:06 +080016 * Structure fields follow one of the following exclusion rules.
17 *
18 * I: Modifiable by initialization/destruction paths and read-only
Gao Xiang97e86a82019-07-31 23:57:47 +080019 * for everyone else;
Gao Xiang3883a792018-07-26 20:22:06 +080020 *
Gao Xiang97e86a82019-07-31 23:57:47 +080021 * L: Field should be protected by pageset lock;
22 *
23 * A: Field should be accessed / updated in atomic for parallelized code.
Gao Xiang3883a792018-07-26 20:22:06 +080024 */
Gao Xiang97e86a82019-07-31 23:57:47 +080025struct z_erofs_collection {
Gao Xiang3883a792018-07-26 20:22:06 +080026 struct mutex lock;
27
Gao Xiang97e86a82019-07-31 23:57:47 +080028 /* I: page offset of start position of decompression */
Gao Xiang3883a792018-07-26 20:22:06 +080029 unsigned short pageofs;
Gao Xiang97e86a82019-07-31 23:57:47 +080030
31 /* L: maximum relative page index in pagevec[] */
Gao Xiang3883a792018-07-26 20:22:06 +080032 unsigned short nr_pages;
33
Gao Xiang97e86a82019-07-31 23:57:47 +080034 /* L: total number of pages in pagevec[] */
Pratik Shindee82a9a12019-07-15 17:51:27 +053035 unsigned int vcnt;
Gao Xiang3883a792018-07-26 20:22:06 +080036
37 union {
Gao Xiang97e86a82019-07-31 23:57:47 +080038 /* L: inline a certain number of pagevecs for bootstrap */
Gao Xiangfa61a332019-06-24 15:22:53 +080039 erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
Gao Xiang97e86a82019-07-31 23:57:47 +080040
41 /* I: can be used to free the pcluster by RCU. */
Gao Xiang3883a792018-07-26 20:22:06 +080042 struct rcu_head rcu;
43 };
44};
45
Gao Xiang97e86a82019-07-31 23:57:47 +080046#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001
47#define Z_EROFS_PCLUSTER_LENGTH_BIT 1
Gao Xiang3883a792018-07-26 20:22:06 +080048
Gao Xiang97e86a82019-07-31 23:57:47 +080049/*
50 * let's leave a type here in case of introducing
51 * another tagged pointer later.
52 */
53typedef void *z_erofs_next_pcluster_t;
Gao Xiang3883a792018-07-26 20:22:06 +080054
Gao Xiang97e86a82019-07-31 23:57:47 +080055struct z_erofs_pcluster {
Gao Xiang3883a792018-07-26 20:22:06 +080056 struct erofs_workgroup obj;
Gao Xiang97e86a82019-07-31 23:57:47 +080057 struct z_erofs_collection primary_collection;
Gao Xiang3883a792018-07-26 20:22:06 +080058
Gao Xiang97e86a82019-07-31 23:57:47 +080059 /* A: point to next chained pcluster or TAILs */
60 z_erofs_next_pcluster_t next;
Gao Xiang3883a792018-07-26 20:22:06 +080061
Gao Xiang97e86a82019-07-31 23:57:47 +080062 /* A: lower limit of decompressed length and if full length or not */
63 unsigned int length;
64
Yue Hucecf8642021-12-29 07:29:19 +080065 /* I: page offset of inline compressed data */
66 unsigned short pageofs_in;
67
68 union {
69 /* I: physical cluster size in pages */
70 unsigned short pclusterpages;
71
72 /* I: tailpacking inline compressed size */
73 unsigned short tailpacking_size;
74 };
Gao Xiang9f6cc762021-04-07 12:39:20 +080075
Gao Xiang97e86a82019-07-31 23:57:47 +080076 /* I: compression algorithm format */
77 unsigned char algorithmformat;
Gao Xiang9f6cc762021-04-07 12:39:20 +080078
79 /* A: compressed pages (can be cached or inplaced pages) */
80 struct page *compressed_pages[];
Gao Xiang3883a792018-07-26 20:22:06 +080081};
82
Gao Xiang97e86a82019-07-31 23:57:47 +080083#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
84
Gao Xiang3883a792018-07-26 20:22:06 +080085/* let's avoid the valid 32-bit kernel addresses */
86
87/* the chained workgroup has't submitted io (still open) */
Gao Xiang97e86a82019-07-31 23:57:47 +080088#define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE)
Gao Xiang3883a792018-07-26 20:22:06 +080089/* the chained workgroup has already submitted io */
Gao Xiang97e86a82019-07-31 23:57:47 +080090#define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD)
Gao Xiang3883a792018-07-26 20:22:06 +080091
Gao Xiang97e86a82019-07-31 23:57:47 +080092#define Z_EROFS_PCLUSTER_NIL (NULL)
Gao Xiang3883a792018-07-26 20:22:06 +080093
Gao Xianga4b1fab2019-10-08 20:56:15 +080094struct z_erofs_decompressqueue {
95 struct super_block *sb;
Gao Xiang3883a792018-07-26 20:22:06 +080096 atomic_t pending_bios;
Gao Xiang97e86a82019-07-31 23:57:47 +080097 z_erofs_next_pcluster_t head;
Gao Xiang3883a792018-07-26 20:22:06 +080098
99 union {
100 wait_queue_head_t wait;
101 struct work_struct work;
102 } u;
103};
104
Yue Hucecf8642021-12-29 07:29:19 +0800105static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
106{
107 return !pcl->obj.index;
108}
109
110static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
111{
112 if (z_erofs_is_inline_pcluster(pcl))
113 return 1;
114 return pcl->pclusterpages;
115}
116
Gao Xiang97e86a82019-07-31 23:57:47 +0800117#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
118#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
Gao Xiang3883a792018-07-26 20:22:06 +0800119#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
120
121/*
122 * waiters (aka. ongoing_packs): # to unlock the page
123 * sub-index: 0 - for partial page, >= 1 full page sub-index
124 */
125typedef atomic_t z_erofs_onlinepage_t;
126
127/* type punning */
128union z_erofs_onlinepage_converter {
129 z_erofs_onlinepage_t *o;
130 unsigned long *v;
131};
132
Pratik Shindee82a9a12019-07-15 17:51:27 +0530133static inline unsigned int z_erofs_onlinepage_index(struct page *page)
Gao Xiang3883a792018-07-26 20:22:06 +0800134{
135 union z_erofs_onlinepage_converter u;
136
Hariprasad Kelam800c16c2019-06-08 15:19:18 +0530137 DBG_BUGON(!PagePrivate(page));
Gao Xiang3883a792018-07-26 20:22:06 +0800138 u.v = &page_private(page);
139
140 return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
141}
142
143static inline void z_erofs_onlinepage_init(struct page *page)
144{
145 union {
146 z_erofs_onlinepage_t o;
147 unsigned long v;
148 /* keep from being unlocked in advance */
149 } u = { .o = ATOMIC_INIT(1) };
150
151 set_page_private(page, u.v);
152 smp_wmb();
153 SetPagePrivate(page);
154}
155
156static inline void z_erofs_onlinepage_fixup(struct page *page,
157 uintptr_t index, bool down)
158{
Gao Xiang3c5972822020-06-19 07:43:49 +0800159 union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
160 int orig, orig_index, val;
Gao Xiang3883a792018-07-26 20:22:06 +0800161
Gao Xiang3c5972822020-06-19 07:43:49 +0800162repeat:
163 orig = atomic_read(u.o);
164 orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
165 if (orig_index) {
Gao Xiang3883a792018-07-26 20:22:06 +0800166 if (!index)
167 return;
168
Gao Xiang3c5972822020-06-19 07:43:49 +0800169 DBG_BUGON(orig_index != index);
Gao Xiang3883a792018-07-26 20:22:06 +0800170 }
171
Gao Xiang3c5972822020-06-19 07:43:49 +0800172 val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
173 ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
174 if (atomic_cmpxchg(u.o, orig, val) != orig)
Gao Xiang3883a792018-07-26 20:22:06 +0800175 goto repeat;
176}
177
178static inline void z_erofs_onlinepage_endio(struct page *page)
179{
180 union z_erofs_onlinepage_converter u;
Pratik Shindee82a9a12019-07-15 17:51:27 +0530181 unsigned int v;
Gao Xiang3883a792018-07-26 20:22:06 +0800182
Hariprasad Kelam800c16c2019-06-08 15:19:18 +0530183 DBG_BUGON(!PagePrivate(page));
Gao Xiang3883a792018-07-26 20:22:06 +0800184 u.v = &page_private(page);
185
186 v = atomic_dec_return(u.o);
187 if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
Gao Xiang6aaa7b02020-12-08 17:58:32 +0800188 set_page_private(page, 0);
Gao Xiang3883a792018-07-26 20:22:06 +0800189 ClearPagePrivate(page);
190 if (!PageError(page))
191 SetPageUptodate(page);
192 unlock_page(page);
193 }
Gao Xiang4f761fa2019-09-04 10:09:09 +0800194 erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o));
Gao Xiang3883a792018-07-26 20:22:06 +0800195}
196
Gao Xiang97e86a82019-07-31 23:57:47 +0800197#define Z_EROFS_VMAP_ONSTACK_PAGES \
Gao Xiang81edee72018-07-28 15:10:32 +0800198 min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
Gao Xiang97e86a82019-07-31 23:57:47 +0800199#define Z_EROFS_VMAP_GLOBAL_PAGES 2048
Gao Xiang3883a792018-07-26 20:22:06 +0800200
Gao Xiang0d40d6e32018-07-26 20:22:02 +0800201#endif