blob: 97538ff24a19104e6a080ed43d10e73cb1858cb8 [file] [log] [blame]
Gao Xiang29b24f62019-07-31 23:57:31 +08001// SPDX-License-Identifier: GPL-2.0-only
Gao Xiang7fc45db2019-06-24 15:22:55 +08002/*
Gao Xiang7fc45db2019-06-24 15:22:55 +08003 * Copyright (C) 2019 HUAWEI, Inc.
Alexander A. Klimov592e7cd2020-07-13 15:09:44 +02004 * https://www.huawei.com/
Gao Xiang7fc45db2019-06-24 15:22:55 +08005 * Created by Gao Xiang <gaoxiang25@huawei.com>
6 */
7#include "compress.h"
Gao Xiang46c2d142019-07-31 23:57:44 +08008#include <linux/module.h>
Gao Xiang7fc45db2019-06-24 15:22:55 +08009#include <linux/lz4.h>
10
11#ifndef LZ4_DISTANCE_MAX /* history window size */
12#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
13#endif
14
Gao Xiangaf89bce2019-07-03 14:52:09 +080015#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
Gao Xiang0ffd71b2019-06-24 15:22:56 +080016#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
17#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
18#endif
Gao Xiang7fc45db2019-06-24 15:22:55 +080019
20struct z_erofs_decompressor {
21 /*
22 * if destpages have sparsed pages, fill them with bounce pages.
23 * it also check whether destpages indicate continuous physical memory.
24 */
25 int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
26 struct list_head *pagepool);
27 int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
28 char *name;
29};
30
Huang Jianan5d505382021-03-29 09:23:06 +080031int z_erofs_load_lz4_config(struct super_block *sb,
Gao Xiang46249cd2021-03-29 09:23:07 +080032 struct erofs_super_block *dsb,
33 struct z_erofs_lz4_cfgs *lz4, int size)
Huang Jianan5d505382021-03-29 09:23:06 +080034{
Gao Xiang46249cd2021-03-29 09:23:07 +080035 u16 distance;
36
37 if (lz4) {
38 if (size < sizeof(struct z_erofs_lz4_cfgs)) {
39 erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
40 return -EINVAL;
41 }
42 distance = le16_to_cpu(lz4->max_distance);
43 } else {
44 distance = le16_to_cpu(dsb->lz4_max_distance);
45 }
Huang Jianan5d505382021-03-29 09:23:06 +080046
47 EROFS_SB(sb)->lz4.max_distance_pages = distance ?
48 DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
49 LZ4_MAX_DISTANCE_PAGES;
50 return 0;
51}
52
Gao Xiang99634bf2019-09-04 10:09:05 +080053static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
54 struct list_head *pagepool)
Gao Xiang7fc45db2019-06-24 15:22:55 +080055{
56 const unsigned int nr =
57 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
58 struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
Gao Xiangaf89bce2019-07-03 14:52:09 +080059 unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
60 BITS_PER_LONG)] = { 0 };
Huang Jianan5d505382021-03-29 09:23:06 +080061 unsigned int lz4_max_distance_pages =
62 EROFS_SB(rq->sb)->lz4.max_distance_pages;
Gao Xiang7fc45db2019-06-24 15:22:55 +080063 void *kaddr = NULL;
Gao Xiangaf89bce2019-07-03 14:52:09 +080064 unsigned int i, j, top;
Gao Xiang7fc45db2019-06-24 15:22:55 +080065
Gao Xiangaf89bce2019-07-03 14:52:09 +080066 top = 0;
67 for (i = j = 0; i < nr; ++i, ++j) {
Gao Xiang7fc45db2019-06-24 15:22:55 +080068 struct page *const page = rq->out[i];
Gao Xiangaf89bce2019-07-03 14:52:09 +080069 struct page *victim;
Gao Xiang7fc45db2019-06-24 15:22:55 +080070
Huang Jianan5d505382021-03-29 09:23:06 +080071 if (j >= lz4_max_distance_pages)
Gao Xiangaf89bce2019-07-03 14:52:09 +080072 j = 0;
73
74 /* 'valid' bounced can only be tested after a complete round */
75 if (test_bit(j, bounced)) {
Huang Jianan5d505382021-03-29 09:23:06 +080076 DBG_BUGON(i < lz4_max_distance_pages);
77 DBG_BUGON(top >= lz4_max_distance_pages);
78 availables[top++] = rq->out[i - lz4_max_distance_pages];
Gao Xiangaf89bce2019-07-03 14:52:09 +080079 }
Gao Xiang7fc45db2019-06-24 15:22:55 +080080
81 if (page) {
Gao Xiangaf89bce2019-07-03 14:52:09 +080082 __clear_bit(j, bounced);
Gao Xiang7fc45db2019-06-24 15:22:55 +080083 if (kaddr) {
84 if (kaddr + PAGE_SIZE == page_address(page))
85 kaddr += PAGE_SIZE;
86 else
87 kaddr = NULL;
88 } else if (!i) {
89 kaddr = page_address(page);
90 }
91 continue;
92 }
93 kaddr = NULL;
Gao Xiangaf89bce2019-07-03 14:52:09 +080094 __set_bit(j, bounced);
Gao Xiang7fc45db2019-06-24 15:22:55 +080095
Gao Xiangaf89bce2019-07-03 14:52:09 +080096 if (top) {
97 victim = availables[--top];
98 get_page(victim);
Gao Xiang7fc45db2019-06-24 15:22:55 +080099 } else {
Huang Jiananb4892fa2021-03-16 11:15:14 +0800100 victim = erofs_allocpage(pagepool,
101 GFP_KERNEL | __GFP_NOFAIL);
Gao Xiang6aaa7b02020-12-08 17:58:32 +0800102 set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
Gao Xiang7fc45db2019-06-24 15:22:55 +0800103 }
Gao Xiangaf89bce2019-07-03 14:52:09 +0800104 rq->out[i] = victim;
Gao Xiang7fc45db2019-06-24 15:22:55 +0800105 }
106 return kaddr ? 1 : 0;
107}
108
109static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq,
110 u8 *src, unsigned int pageofs_in)
111{
112 /*
113 * if in-place decompression is ongoing, those decompressed
114 * pages should be copied in order to avoid being overlapped.
115 */
116 struct page **in = rq->in;
117 u8 *const tmp = erofs_get_pcpubuf(0);
118 u8 *tmpp = tmp;
119 unsigned int inlen = rq->inputsize - pageofs_in;
120 unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in);
121
122 while (tmpp < tmp + inlen) {
123 if (!src)
124 src = kmap_atomic(*in);
125 memcpy(tmpp, src + pageofs_in, count);
126 kunmap_atomic(src);
127 src = NULL;
128 tmpp += count;
129 pageofs_in = 0;
130 count = PAGE_SIZE;
131 ++in;
132 }
133 return tmp;
134}
135
Gao Xiang99634bf2019-09-04 10:09:05 +0800136static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
Gao Xiang7fc45db2019-06-24 15:22:55 +0800137{
138 unsigned int inputmargin, inlen;
139 u8 *src;
Gao Xiang0ffd71b2019-06-24 15:22:56 +0800140 bool copied, support_0padding;
Gao Xiang7fc45db2019-06-24 15:22:55 +0800141 int ret;
142
143 if (rq->inputsize > PAGE_SIZE)
Gao Xiangff784a72019-08-14 18:37:05 +0800144 return -EOPNOTSUPP;
Gao Xiang7fc45db2019-06-24 15:22:55 +0800145
146 src = kmap_atomic(*rq->in);
147 inputmargin = 0;
Gao Xiang0ffd71b2019-06-24 15:22:56 +0800148 support_0padding = false;
149
150 /* decompression inplace is only safe when 0padding is enabled */
Gao Xiangde06a6a2021-03-29 09:23:05 +0800151 if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) {
Gao Xiang0ffd71b2019-06-24 15:22:56 +0800152 support_0padding = true;
153
154 while (!src[inputmargin & ~PAGE_MASK])
155 if (!(++inputmargin & ~PAGE_MASK))
156 break;
157
158 if (inputmargin >= rq->inputsize) {
159 kunmap_atomic(src);
160 return -EIO;
161 }
162 }
Gao Xiang7fc45db2019-06-24 15:22:55 +0800163
164 copied = false;
165 inlen = rq->inputsize - inputmargin;
166 if (rq->inplace_io) {
Gao Xiang0ffd71b2019-06-24 15:22:56 +0800167 const uint oend = (rq->pageofs_out +
168 rq->outputsize) & ~PAGE_MASK;
169 const uint nr = PAGE_ALIGN(rq->pageofs_out +
170 rq->outputsize) >> PAGE_SHIFT;
171
172 if (rq->partial_decoding || !support_0padding ||
173 rq->out[nr - 1] != rq->in[0] ||
174 rq->inputsize - oend <
175 LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) {
176 src = generic_copy_inplace_data(rq, src, inputmargin);
177 inputmargin = 0;
178 copied = true;
179 }
Gao Xiang7fc45db2019-06-24 15:22:55 +0800180 }
181
Gao Xiangaf1038a2020-02-26 16:10:07 +0800182 /* legacy format could compress extra data in a pcluster. */
183 if (rq->partial_decoding || !support_0padding)
184 ret = LZ4_decompress_safe_partial(src + inputmargin, out,
185 inlen, rq->outputsize,
186 rq->outputsize);
187 else
188 ret = LZ4_decompress_safe(src + inputmargin, out,
189 inlen, rq->outputsize);
190
Gao Xiangaa99a762020-02-26 16:10:08 +0800191 if (ret != rq->outputsize) {
192 erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
193 ret, inlen, inputmargin, rq->outputsize);
194
Gao Xiang7fc45db2019-06-24 15:22:55 +0800195 WARN_ON(1);
196 print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
197 16, 1, src + inputmargin, inlen, true);
198 print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
199 16, 1, out, rq->outputsize, true);
Gao Xiangaa99a762020-02-26 16:10:08 +0800200
201 if (ret >= 0)
202 memset(out + ret, 0, rq->outputsize - ret);
Gao Xiang7fc45db2019-06-24 15:22:55 +0800203 ret = -EIO;
204 }
205
206 if (copied)
207 erofs_put_pcpubuf(src);
208 else
209 kunmap_atomic(src);
210 return ret;
211}
212
213static struct z_erofs_decompressor decompressors[] = {
214 [Z_EROFS_COMPRESSION_SHIFTED] = {
215 .name = "shifted"
216 },
217 [Z_EROFS_COMPRESSION_LZ4] = {
Gao Xiang99634bf2019-09-04 10:09:05 +0800218 .prepare_destpages = z_erofs_lz4_prepare_destpages,
219 .decompress = z_erofs_lz4_decompress,
Gao Xiang7fc45db2019-06-24 15:22:55 +0800220 .name = "lz4"
221 },
222};
223
224static void copy_from_pcpubuf(struct page **out, const char *dst,
225 unsigned short pageofs_out,
226 unsigned int outputsize)
227{
228 const char *end = dst + outputsize;
229 const unsigned int righthalf = PAGE_SIZE - pageofs_out;
230 const char *cur = dst - pageofs_out;
231
232 while (cur < end) {
233 struct page *const page = *out++;
234
235 if (page) {
236 char *buf = kmap_atomic(page);
237
238 if (cur >= dst) {
239 memcpy(buf, cur, min_t(uint, PAGE_SIZE,
240 end - cur));
241 } else {
242 memcpy(buf + pageofs_out, cur + pageofs_out,
243 min_t(uint, righthalf, end - cur));
244 }
245 kunmap_atomic(buf);
246 }
247 cur += PAGE_SIZE;
248 }
249}
250
Gao Xiang99634bf2019-09-04 10:09:05 +0800251static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
252 struct list_head *pagepool)
Gao Xiang7fc45db2019-06-24 15:22:55 +0800253{
254 const unsigned int nrpages_out =
255 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
256 const struct z_erofs_decompressor *alg = decompressors + rq->alg;
257 unsigned int dst_maptype;
258 void *dst;
Gao Xiang73d03932019-09-04 10:09:07 +0800259 int ret, i;
Gao Xiang7fc45db2019-06-24 15:22:55 +0800260
261 if (nrpages_out == 1 && !rq->inplace_io) {
262 DBG_BUGON(!*rq->out);
263 dst = kmap_atomic(*rq->out);
264 dst_maptype = 0;
265 goto dstmap_out;
266 }
267
268 /*
269 * For the case of small output size (especially much less
270 * than PAGE_SIZE), memcpy the decompressed data rather than
271 * compressed data is preferred.
272 */
273 if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
274 dst = erofs_get_pcpubuf(0);
275 if (IS_ERR(dst))
276 return PTR_ERR(dst);
277
278 rq->inplace_io = false;
279 ret = alg->decompress(rq, dst);
280 if (!ret)
281 copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
282 rq->outputsize);
283
284 erofs_put_pcpubuf(dst);
285 return ret;
286 }
287
288 ret = alg->prepare_destpages(rq, pagepool);
289 if (ret < 0) {
290 return ret;
291 } else if (ret) {
292 dst = page_address(*rq->out);
293 dst_maptype = 1;
294 goto dstmap_out;
295 }
296
Gao Xiang73d03932019-09-04 10:09:07 +0800297 i = 0;
298 while (1) {
Christoph Hellwigd4efd792020-06-01 21:51:27 -0700299 dst = vm_map_ram(rq->out, nrpages_out, -1);
Gao Xiang73d03932019-09-04 10:09:07 +0800300
301 /* retry two more times (totally 3 times) */
302 if (dst || ++i >= 3)
303 break;
304 vm_unmap_aliases();
305 }
306
Gao Xiang7fc45db2019-06-24 15:22:55 +0800307 if (!dst)
308 return -ENOMEM;
Gao Xiang73d03932019-09-04 10:09:07 +0800309
Gao Xiang7fc45db2019-06-24 15:22:55 +0800310 dst_maptype = 2;
311
312dstmap_out:
313 ret = alg->decompress(rq, dst + rq->pageofs_out);
314
315 if (!dst_maptype)
316 kunmap_atomic(dst);
317 else if (dst_maptype == 2)
Gao Xiang73d03932019-09-04 10:09:07 +0800318 vm_unmap_ram(dst, nrpages_out);
Gao Xiang7fc45db2019-06-24 15:22:55 +0800319 return ret;
320}
321
Gao Xiang99634bf2019-09-04 10:09:05 +0800322static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
323 struct list_head *pagepool)
Gao Xiang7fc45db2019-06-24 15:22:55 +0800324{
325 const unsigned int nrpages_out =
326 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
327 const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
328 unsigned char *src, *dst;
329
330 if (nrpages_out > 2) {
331 DBG_BUGON(1);
332 return -EIO;
333 }
334
335 if (rq->out[0] == *rq->in) {
336 DBG_BUGON(nrpages_out != 1);
337 return 0;
338 }
339
340 src = kmap_atomic(*rq->in);
Gao Xiang4d202432020-01-07 10:25:46 +0800341 if (rq->out[0]) {
Gao Xiang7fc45db2019-06-24 15:22:55 +0800342 dst = kmap_atomic(rq->out[0]);
343 memcpy(dst + rq->pageofs_out, src, righthalf);
Gao Xiang4d202432020-01-07 10:25:46 +0800344 kunmap_atomic(dst);
Gao Xiang7fc45db2019-06-24 15:22:55 +0800345 }
346
Gao Xiang4d202432020-01-07 10:25:46 +0800347 if (nrpages_out == 2) {
Gao Xiang7fc45db2019-06-24 15:22:55 +0800348 DBG_BUGON(!rq->out[1]);
Gao Xiang4d202432020-01-07 10:25:46 +0800349 if (rq->out[1] == *rq->in) {
350 memmove(src, src + righthalf, rq->pageofs_out);
351 } else {
352 dst = kmap_atomic(rq->out[1]);
353 memcpy(dst, src + righthalf, rq->pageofs_out);
354 kunmap_atomic(dst);
355 }
Gao Xiang7fc45db2019-06-24 15:22:55 +0800356 }
Gao Xiang7fc45db2019-06-24 15:22:55 +0800357 kunmap_atomic(src);
358 return 0;
359}
360
361int z_erofs_decompress(struct z_erofs_decompress_req *rq,
362 struct list_head *pagepool)
363{
364 if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED)
Gao Xiang99634bf2019-09-04 10:09:05 +0800365 return z_erofs_shifted_transform(rq, pagepool);
366 return z_erofs_decompress_generic(rq, pagepool);
Gao Xiang7fc45db2019-06-24 15:22:55 +0800367}
368