blob: 05a3063cf2bc10081f56379a6eb0e2c186c79d80 [file] [log] [blame]
Gao Xiang622cead2021-10-11 05:31:45 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2#include <linux/xz.h>
3#include <linux/module.h>
4#include "compress.h"
5
6struct z_erofs_lzma {
7 struct z_erofs_lzma *next;
8 struct xz_dec_microlzma *state;
9 struct xz_buf buf;
10 u8 bounce[PAGE_SIZE];
11};
12
13/* considering the LZMA performance, no need to use a lockless list for now */
14static DEFINE_SPINLOCK(z_erofs_lzma_lock);
15static unsigned int z_erofs_lzma_max_dictsize;
16static unsigned int z_erofs_lzma_nstrms, z_erofs_lzma_avail_strms;
17static struct z_erofs_lzma *z_erofs_lzma_head;
18static DECLARE_WAIT_QUEUE_HEAD(z_erofs_lzma_wq);
19
20module_param_named(lzma_streams, z_erofs_lzma_nstrms, uint, 0444);
21
22void z_erofs_lzma_exit(void)
23{
24 /* there should be no running fs instance */
25 while (z_erofs_lzma_avail_strms) {
26 struct z_erofs_lzma *strm;
27
28 spin_lock(&z_erofs_lzma_lock);
29 strm = z_erofs_lzma_head;
30 if (!strm) {
31 spin_unlock(&z_erofs_lzma_lock);
32 DBG_BUGON(1);
33 return;
34 }
35 z_erofs_lzma_head = NULL;
36 spin_unlock(&z_erofs_lzma_lock);
37
38 while (strm) {
39 struct z_erofs_lzma *n = strm->next;
40
41 if (strm->state)
42 xz_dec_microlzma_end(strm->state);
43 kfree(strm);
44 --z_erofs_lzma_avail_strms;
45 strm = n;
46 }
47 }
48}
49
50int z_erofs_lzma_init(void)
51{
52 unsigned int i;
53
54 /* by default, use # of possible CPUs instead */
55 if (!z_erofs_lzma_nstrms)
56 z_erofs_lzma_nstrms = num_possible_cpus();
57
58 for (i = 0; i < z_erofs_lzma_nstrms; ++i) {
59 struct z_erofs_lzma *strm = kzalloc(sizeof(*strm), GFP_KERNEL);
60
61 if (!strm) {
62 z_erofs_lzma_exit();
63 return -ENOMEM;
64 }
65 spin_lock(&z_erofs_lzma_lock);
66 strm->next = z_erofs_lzma_head;
67 z_erofs_lzma_head = strm;
68 spin_unlock(&z_erofs_lzma_lock);
69 ++z_erofs_lzma_avail_strms;
70 }
71 return 0;
72}
73
74int z_erofs_load_lzma_config(struct super_block *sb,
75 struct erofs_super_block *dsb,
76 struct z_erofs_lzma_cfgs *lzma, int size)
77{
78 static DEFINE_MUTEX(lzma_resize_mutex);
79 unsigned int dict_size, i;
80 struct z_erofs_lzma *strm, *head = NULL;
81 int err;
82
83 if (!lzma || size < sizeof(struct z_erofs_lzma_cfgs)) {
84 erofs_err(sb, "invalid lzma cfgs, size=%u", size);
85 return -EINVAL;
86 }
87 if (lzma->format) {
88 erofs_err(sb, "unidentified lzma format %x, please check kernel version",
89 le16_to_cpu(lzma->format));
90 return -EINVAL;
91 }
92 dict_size = le32_to_cpu(lzma->dict_size);
93 if (dict_size > Z_EROFS_LZMA_MAX_DICT_SIZE || dict_size < 4096) {
94 erofs_err(sb, "unsupported lzma dictionary size %u",
95 dict_size);
96 return -EINVAL;
97 }
98
99 erofs_info(sb, "EXPERIMENTAL MicroLZMA in use. Use at your own risk!");
100
101 /* in case 2 z_erofs_load_lzma_config() race to avoid deadlock */
102 mutex_lock(&lzma_resize_mutex);
103
104 if (z_erofs_lzma_max_dictsize >= dict_size) {
105 mutex_unlock(&lzma_resize_mutex);
106 return 0;
107 }
108
109 /* 1. collect/isolate all streams for the following check */
110 for (i = 0; i < z_erofs_lzma_avail_strms; ++i) {
111 struct z_erofs_lzma *last;
112
113again:
114 spin_lock(&z_erofs_lzma_lock);
115 strm = z_erofs_lzma_head;
116 if (!strm) {
117 spin_unlock(&z_erofs_lzma_lock);
118 wait_event(z_erofs_lzma_wq,
119 READ_ONCE(z_erofs_lzma_head));
120 goto again;
121 }
122 z_erofs_lzma_head = NULL;
123 spin_unlock(&z_erofs_lzma_lock);
124
125 for (last = strm; last->next; last = last->next)
126 ++i;
127 last->next = head;
128 head = strm;
129 }
130
131 err = 0;
132 /* 2. walk each isolated stream and grow max dict_size if needed */
133 for (strm = head; strm; strm = strm->next) {
134 if (strm->state)
135 xz_dec_microlzma_end(strm->state);
136 strm->state = xz_dec_microlzma_alloc(XZ_PREALLOC, dict_size);
137 if (!strm->state)
138 err = -ENOMEM;
139 }
140
141 /* 3. push back all to the global list and update max dict_size */
142 spin_lock(&z_erofs_lzma_lock);
143 DBG_BUGON(z_erofs_lzma_head);
144 z_erofs_lzma_head = head;
145 spin_unlock(&z_erofs_lzma_lock);
146
147 z_erofs_lzma_max_dictsize = dict_size;
148 mutex_unlock(&lzma_resize_mutex);
149 return err;
150}
151
152int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
Gao Xiangeaa91722021-10-22 17:01:20 +0800153 struct page **pagepool)
Gao Xiang622cead2021-10-11 05:31:45 +0800154{
155 const unsigned int nrpages_out =
156 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
157 const unsigned int nrpages_in =
158 PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
Gao Xiang10e5f6e2021-12-28 13:46:01 +0800159 unsigned int inlen, outlen, pageofs;
Gao Xiang622cead2021-10-11 05:31:45 +0800160 struct z_erofs_lzma *strm;
161 u8 *kin;
162 bool bounced = false;
163 int no, ni, j, err = 0;
164
165 /* 1. get the exact LZMA compressed size */
166 kin = kmap(*rq->in);
Gao Xiang10e5f6e2021-12-28 13:46:01 +0800167 err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in,
168 min_t(unsigned int, rq->inputsize,
169 EROFS_BLKSIZ - rq->pageofs_in));
170 if (err) {
Gao Xiang622cead2021-10-11 05:31:45 +0800171 kunmap(*rq->in);
Gao Xiang10e5f6e2021-12-28 13:46:01 +0800172 return err;
Gao Xiang622cead2021-10-11 05:31:45 +0800173 }
Gao Xiang622cead2021-10-11 05:31:45 +0800174
175 /* 2. get an available lzma context */
176again:
177 spin_lock(&z_erofs_lzma_lock);
178 strm = z_erofs_lzma_head;
179 if (!strm) {
180 spin_unlock(&z_erofs_lzma_lock);
181 wait_event(z_erofs_lzma_wq, READ_ONCE(z_erofs_lzma_head));
182 goto again;
183 }
184 z_erofs_lzma_head = strm->next;
185 spin_unlock(&z_erofs_lzma_lock);
186
187 /* 3. multi-call decompress */
188 inlen = rq->inputsize;
189 outlen = rq->outputsize;
190 xz_dec_microlzma_reset(strm->state, inlen, outlen,
191 !rq->partial_decoding);
192 pageofs = rq->pageofs_out;
Gao Xiang10e5f6e2021-12-28 13:46:01 +0800193 strm->buf.in = kin + rq->pageofs_in;
Gao Xiang622cead2021-10-11 05:31:45 +0800194 strm->buf.in_pos = 0;
Gao Xiang10e5f6e2021-12-28 13:46:01 +0800195 strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE - rq->pageofs_in);
Gao Xiang622cead2021-10-11 05:31:45 +0800196 inlen -= strm->buf.in_size;
197 strm->buf.out = NULL;
198 strm->buf.out_pos = 0;
199 strm->buf.out_size = 0;
200
201 for (ni = 0, no = -1;;) {
202 enum xz_ret xz_err;
203
204 if (strm->buf.out_pos == strm->buf.out_size) {
205 if (strm->buf.out) {
206 kunmap(rq->out[no]);
207 strm->buf.out = NULL;
208 }
209
210 if (++no >= nrpages_out || !outlen) {
211 erofs_err(rq->sb, "decompressed buf out of bound");
212 err = -EFSCORRUPTED;
213 break;
214 }
215 strm->buf.out_pos = 0;
216 strm->buf.out_size = min_t(u32, outlen,
217 PAGE_SIZE - pageofs);
218 outlen -= strm->buf.out_size;
219 if (rq->out[no])
220 strm->buf.out = kmap(rq->out[no]) + pageofs;
221 pageofs = 0;
222 } else if (strm->buf.in_pos == strm->buf.in_size) {
223 kunmap(rq->in[ni]);
224
225 if (++ni >= nrpages_in || !inlen) {
226 erofs_err(rq->sb, "compressed buf out of bound");
227 err = -EFSCORRUPTED;
228 break;
229 }
230 strm->buf.in_pos = 0;
231 strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE);
232 inlen -= strm->buf.in_size;
233 kin = kmap(rq->in[ni]);
234 strm->buf.in = kin;
235 bounced = false;
236 }
237
238 /*
239 * Handle overlapping: Use bounced buffer if the compressed
240 * data is under processing; Otherwise, Use short-lived pages
241 * from the on-stack pagepool where pages share with the same
242 * request.
243 */
244 if (!bounced && rq->out[no] == rq->in[ni]) {
245 memcpy(strm->bounce, strm->buf.in, strm->buf.in_size);
246 strm->buf.in = strm->bounce;
247 bounced = true;
248 }
249 for (j = ni + 1; j < nrpages_in; ++j) {
250 struct page *tmppage;
251
252 if (rq->out[no] != rq->in[j])
253 continue;
254
255 DBG_BUGON(erofs_page_is_managed(EROFS_SB(rq->sb),
256 rq->in[j]));
257 tmppage = erofs_allocpage(pagepool,
258 GFP_KERNEL | __GFP_NOFAIL);
259 set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
260 copy_highpage(tmppage, rq->in[j]);
261 rq->in[j] = tmppage;
262 }
263 xz_err = xz_dec_microlzma_run(strm->state, &strm->buf);
264 DBG_BUGON(strm->buf.out_pos > strm->buf.out_size);
265 DBG_BUGON(strm->buf.in_pos > strm->buf.in_size);
266
267 if (xz_err != XZ_OK) {
268 if (xz_err == XZ_STREAM_END && !outlen)
269 break;
270 erofs_err(rq->sb, "failed to decompress %d in[%u] out[%u]",
271 xz_err, rq->inputsize, rq->outputsize);
272 err = -EFSCORRUPTED;
273 break;
274 }
275 }
276 if (no < nrpages_out && strm->buf.out)
277 kunmap(rq->in[no]);
278 if (ni < nrpages_in)
279 kunmap(rq->in[ni]);
280 /* 4. push back LZMA stream context to the global list */
281 spin_lock(&z_erofs_lzma_lock);
282 strm->next = z_erofs_lzma_head;
283 z_erofs_lzma_head = strm;
284 spin_unlock(&z_erofs_lzma_lock);
285 wake_up(&z_erofs_lzma_wq);
286 return err;
287}