Gao Xiang | 29b24f6 | 2019-07-31 23:57:31 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 2 | /* |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 3 | * Copyright (C) 2019 HUAWEI, Inc. |
Alexander A. Klimov | 592e7cd | 2020-07-13 15:09:44 +0200 | [diff] [blame] | 4 | * https://www.huawei.com/ |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 5 | * Created by Gao Xiang <gaoxiang25@huawei.com> |
| 6 | */ |
| 7 | #include "compress.h" |
Gao Xiang | 46c2d14 | 2019-07-31 23:57:44 +0800 | [diff] [blame] | 8 | #include <linux/module.h> |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 9 | #include <linux/lz4.h> |
| 10 | |
| 11 | #ifndef LZ4_DISTANCE_MAX /* history window size */ |
| 12 | #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ |
| 13 | #endif |
| 14 | |
Gao Xiang | af89bce | 2019-07-03 14:52:09 +0800 | [diff] [blame] | 15 | #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1) |
Gao Xiang | 0ffd71b | 2019-06-24 15:22:56 +0800 | [diff] [blame] | 16 | #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN |
| 17 | #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32) |
| 18 | #endif |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 19 | |
| 20 | struct z_erofs_decompressor { |
| 21 | /* |
| 22 | * if destpages have sparsed pages, fill them with bounce pages. |
| 23 | * it also check whether destpages indicate continuous physical memory. |
| 24 | */ |
| 25 | int (*prepare_destpages)(struct z_erofs_decompress_req *rq, |
| 26 | struct list_head *pagepool); |
| 27 | int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out); |
| 28 | char *name; |
| 29 | }; |
| 30 | |
Huang Jianan | 5d50538 | 2021-03-29 09:23:06 +0800 | [diff] [blame] | 31 | int z_erofs_load_lz4_config(struct super_block *sb, |
Gao Xiang | 46249cd | 2021-03-29 09:23:07 +0800 | [diff] [blame^] | 32 | struct erofs_super_block *dsb, |
| 33 | struct z_erofs_lz4_cfgs *lz4, int size) |
Huang Jianan | 5d50538 | 2021-03-29 09:23:06 +0800 | [diff] [blame] | 34 | { |
Gao Xiang | 46249cd | 2021-03-29 09:23:07 +0800 | [diff] [blame^] | 35 | u16 distance; |
| 36 | |
| 37 | if (lz4) { |
| 38 | if (size < sizeof(struct z_erofs_lz4_cfgs)) { |
| 39 | erofs_err(sb, "invalid lz4 cfgs, size=%u", size); |
| 40 | return -EINVAL; |
| 41 | } |
| 42 | distance = le16_to_cpu(lz4->max_distance); |
| 43 | } else { |
| 44 | distance = le16_to_cpu(dsb->lz4_max_distance); |
| 45 | } |
Huang Jianan | 5d50538 | 2021-03-29 09:23:06 +0800 | [diff] [blame] | 46 | |
| 47 | EROFS_SB(sb)->lz4.max_distance_pages = distance ? |
| 48 | DIV_ROUND_UP(distance, PAGE_SIZE) + 1 : |
| 49 | LZ4_MAX_DISTANCE_PAGES; |
| 50 | return 0; |
| 51 | } |
| 52 | |
Gao Xiang | 99634bf | 2019-09-04 10:09:05 +0800 | [diff] [blame] | 53 | static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, |
| 54 | struct list_head *pagepool) |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 55 | { |
| 56 | const unsigned int nr = |
| 57 | PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; |
| 58 | struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL }; |
Gao Xiang | af89bce | 2019-07-03 14:52:09 +0800 | [diff] [blame] | 59 | unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES, |
| 60 | BITS_PER_LONG)] = { 0 }; |
Huang Jianan | 5d50538 | 2021-03-29 09:23:06 +0800 | [diff] [blame] | 61 | unsigned int lz4_max_distance_pages = |
| 62 | EROFS_SB(rq->sb)->lz4.max_distance_pages; |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 63 | void *kaddr = NULL; |
Gao Xiang | af89bce | 2019-07-03 14:52:09 +0800 | [diff] [blame] | 64 | unsigned int i, j, top; |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 65 | |
Gao Xiang | af89bce | 2019-07-03 14:52:09 +0800 | [diff] [blame] | 66 | top = 0; |
| 67 | for (i = j = 0; i < nr; ++i, ++j) { |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 68 | struct page *const page = rq->out[i]; |
Gao Xiang | af89bce | 2019-07-03 14:52:09 +0800 | [diff] [blame] | 69 | struct page *victim; |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 70 | |
Huang Jianan | 5d50538 | 2021-03-29 09:23:06 +0800 | [diff] [blame] | 71 | if (j >= lz4_max_distance_pages) |
Gao Xiang | af89bce | 2019-07-03 14:52:09 +0800 | [diff] [blame] | 72 | j = 0; |
| 73 | |
| 74 | /* 'valid' bounced can only be tested after a complete round */ |
| 75 | if (test_bit(j, bounced)) { |
Huang Jianan | 5d50538 | 2021-03-29 09:23:06 +0800 | [diff] [blame] | 76 | DBG_BUGON(i < lz4_max_distance_pages); |
| 77 | DBG_BUGON(top >= lz4_max_distance_pages); |
| 78 | availables[top++] = rq->out[i - lz4_max_distance_pages]; |
Gao Xiang | af89bce | 2019-07-03 14:52:09 +0800 | [diff] [blame] | 79 | } |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 80 | |
| 81 | if (page) { |
Gao Xiang | af89bce | 2019-07-03 14:52:09 +0800 | [diff] [blame] | 82 | __clear_bit(j, bounced); |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 83 | if (kaddr) { |
| 84 | if (kaddr + PAGE_SIZE == page_address(page)) |
| 85 | kaddr += PAGE_SIZE; |
| 86 | else |
| 87 | kaddr = NULL; |
| 88 | } else if (!i) { |
| 89 | kaddr = page_address(page); |
| 90 | } |
| 91 | continue; |
| 92 | } |
| 93 | kaddr = NULL; |
Gao Xiang | af89bce | 2019-07-03 14:52:09 +0800 | [diff] [blame] | 94 | __set_bit(j, bounced); |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 95 | |
Gao Xiang | af89bce | 2019-07-03 14:52:09 +0800 | [diff] [blame] | 96 | if (top) { |
| 97 | victim = availables[--top]; |
| 98 | get_page(victim); |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 99 | } else { |
Huang Jianan | b4892fa | 2021-03-16 11:15:14 +0800 | [diff] [blame] | 100 | victim = erofs_allocpage(pagepool, |
| 101 | GFP_KERNEL | __GFP_NOFAIL); |
Gao Xiang | 6aaa7b0 | 2020-12-08 17:58:32 +0800 | [diff] [blame] | 102 | set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE); |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 103 | } |
Gao Xiang | af89bce | 2019-07-03 14:52:09 +0800 | [diff] [blame] | 104 | rq->out[i] = victim; |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 105 | } |
| 106 | return kaddr ? 1 : 0; |
| 107 | } |
| 108 | |
| 109 | static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq, |
| 110 | u8 *src, unsigned int pageofs_in) |
| 111 | { |
| 112 | /* |
| 113 | * if in-place decompression is ongoing, those decompressed |
| 114 | * pages should be copied in order to avoid being overlapped. |
| 115 | */ |
| 116 | struct page **in = rq->in; |
| 117 | u8 *const tmp = erofs_get_pcpubuf(0); |
| 118 | u8 *tmpp = tmp; |
| 119 | unsigned int inlen = rq->inputsize - pageofs_in; |
| 120 | unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in); |
| 121 | |
| 122 | while (tmpp < tmp + inlen) { |
| 123 | if (!src) |
| 124 | src = kmap_atomic(*in); |
| 125 | memcpy(tmpp, src + pageofs_in, count); |
| 126 | kunmap_atomic(src); |
| 127 | src = NULL; |
| 128 | tmpp += count; |
| 129 | pageofs_in = 0; |
| 130 | count = PAGE_SIZE; |
| 131 | ++in; |
| 132 | } |
| 133 | return tmp; |
| 134 | } |
| 135 | |
Gao Xiang | 99634bf | 2019-09-04 10:09:05 +0800 | [diff] [blame] | 136 | static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out) |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 137 | { |
| 138 | unsigned int inputmargin, inlen; |
| 139 | u8 *src; |
Gao Xiang | 0ffd71b | 2019-06-24 15:22:56 +0800 | [diff] [blame] | 140 | bool copied, support_0padding; |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 141 | int ret; |
| 142 | |
| 143 | if (rq->inputsize > PAGE_SIZE) |
Gao Xiang | ff784a7 | 2019-08-14 18:37:05 +0800 | [diff] [blame] | 144 | return -EOPNOTSUPP; |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 145 | |
| 146 | src = kmap_atomic(*rq->in); |
| 147 | inputmargin = 0; |
Gao Xiang | 0ffd71b | 2019-06-24 15:22:56 +0800 | [diff] [blame] | 148 | support_0padding = false; |
| 149 | |
| 150 | /* decompression inplace is only safe when 0padding is enabled */ |
Gao Xiang | de06a6a | 2021-03-29 09:23:05 +0800 | [diff] [blame] | 151 | if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) { |
Gao Xiang | 0ffd71b | 2019-06-24 15:22:56 +0800 | [diff] [blame] | 152 | support_0padding = true; |
| 153 | |
| 154 | while (!src[inputmargin & ~PAGE_MASK]) |
| 155 | if (!(++inputmargin & ~PAGE_MASK)) |
| 156 | break; |
| 157 | |
| 158 | if (inputmargin >= rq->inputsize) { |
| 159 | kunmap_atomic(src); |
| 160 | return -EIO; |
| 161 | } |
| 162 | } |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 163 | |
| 164 | copied = false; |
| 165 | inlen = rq->inputsize - inputmargin; |
| 166 | if (rq->inplace_io) { |
Gao Xiang | 0ffd71b | 2019-06-24 15:22:56 +0800 | [diff] [blame] | 167 | const uint oend = (rq->pageofs_out + |
| 168 | rq->outputsize) & ~PAGE_MASK; |
| 169 | const uint nr = PAGE_ALIGN(rq->pageofs_out + |
| 170 | rq->outputsize) >> PAGE_SHIFT; |
| 171 | |
| 172 | if (rq->partial_decoding || !support_0padding || |
| 173 | rq->out[nr - 1] != rq->in[0] || |
| 174 | rq->inputsize - oend < |
| 175 | LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) { |
| 176 | src = generic_copy_inplace_data(rq, src, inputmargin); |
| 177 | inputmargin = 0; |
| 178 | copied = true; |
| 179 | } |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 180 | } |
| 181 | |
Gao Xiang | af1038a | 2020-02-26 16:10:07 +0800 | [diff] [blame] | 182 | /* legacy format could compress extra data in a pcluster. */ |
| 183 | if (rq->partial_decoding || !support_0padding) |
| 184 | ret = LZ4_decompress_safe_partial(src + inputmargin, out, |
| 185 | inlen, rq->outputsize, |
| 186 | rq->outputsize); |
| 187 | else |
| 188 | ret = LZ4_decompress_safe(src + inputmargin, out, |
| 189 | inlen, rq->outputsize); |
| 190 | |
Gao Xiang | aa99a76 | 2020-02-26 16:10:08 +0800 | [diff] [blame] | 191 | if (ret != rq->outputsize) { |
| 192 | erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]", |
| 193 | ret, inlen, inputmargin, rq->outputsize); |
| 194 | |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 195 | WARN_ON(1); |
| 196 | print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, |
| 197 | 16, 1, src + inputmargin, inlen, true); |
| 198 | print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, |
| 199 | 16, 1, out, rq->outputsize, true); |
Gao Xiang | aa99a76 | 2020-02-26 16:10:08 +0800 | [diff] [blame] | 200 | |
| 201 | if (ret >= 0) |
| 202 | memset(out + ret, 0, rq->outputsize - ret); |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 203 | ret = -EIO; |
| 204 | } |
| 205 | |
| 206 | if (copied) |
| 207 | erofs_put_pcpubuf(src); |
| 208 | else |
| 209 | kunmap_atomic(src); |
| 210 | return ret; |
| 211 | } |
| 212 | |
| 213 | static struct z_erofs_decompressor decompressors[] = { |
| 214 | [Z_EROFS_COMPRESSION_SHIFTED] = { |
| 215 | .name = "shifted" |
| 216 | }, |
| 217 | [Z_EROFS_COMPRESSION_LZ4] = { |
Gao Xiang | 99634bf | 2019-09-04 10:09:05 +0800 | [diff] [blame] | 218 | .prepare_destpages = z_erofs_lz4_prepare_destpages, |
| 219 | .decompress = z_erofs_lz4_decompress, |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 220 | .name = "lz4" |
| 221 | }, |
| 222 | }; |
| 223 | |
| 224 | static void copy_from_pcpubuf(struct page **out, const char *dst, |
| 225 | unsigned short pageofs_out, |
| 226 | unsigned int outputsize) |
| 227 | { |
| 228 | const char *end = dst + outputsize; |
| 229 | const unsigned int righthalf = PAGE_SIZE - pageofs_out; |
| 230 | const char *cur = dst - pageofs_out; |
| 231 | |
| 232 | while (cur < end) { |
| 233 | struct page *const page = *out++; |
| 234 | |
| 235 | if (page) { |
| 236 | char *buf = kmap_atomic(page); |
| 237 | |
| 238 | if (cur >= dst) { |
| 239 | memcpy(buf, cur, min_t(uint, PAGE_SIZE, |
| 240 | end - cur)); |
| 241 | } else { |
| 242 | memcpy(buf + pageofs_out, cur + pageofs_out, |
| 243 | min_t(uint, righthalf, end - cur)); |
| 244 | } |
| 245 | kunmap_atomic(buf); |
| 246 | } |
| 247 | cur += PAGE_SIZE; |
| 248 | } |
| 249 | } |
| 250 | |
Gao Xiang | 99634bf | 2019-09-04 10:09:05 +0800 | [diff] [blame] | 251 | static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq, |
| 252 | struct list_head *pagepool) |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 253 | { |
| 254 | const unsigned int nrpages_out = |
| 255 | PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; |
| 256 | const struct z_erofs_decompressor *alg = decompressors + rq->alg; |
| 257 | unsigned int dst_maptype; |
| 258 | void *dst; |
Gao Xiang | 73d0393 | 2019-09-04 10:09:07 +0800 | [diff] [blame] | 259 | int ret, i; |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 260 | |
| 261 | if (nrpages_out == 1 && !rq->inplace_io) { |
| 262 | DBG_BUGON(!*rq->out); |
| 263 | dst = kmap_atomic(*rq->out); |
| 264 | dst_maptype = 0; |
| 265 | goto dstmap_out; |
| 266 | } |
| 267 | |
| 268 | /* |
| 269 | * For the case of small output size (especially much less |
| 270 | * than PAGE_SIZE), memcpy the decompressed data rather than |
| 271 | * compressed data is preferred. |
| 272 | */ |
| 273 | if (rq->outputsize <= PAGE_SIZE * 7 / 8) { |
| 274 | dst = erofs_get_pcpubuf(0); |
| 275 | if (IS_ERR(dst)) |
| 276 | return PTR_ERR(dst); |
| 277 | |
| 278 | rq->inplace_io = false; |
| 279 | ret = alg->decompress(rq, dst); |
| 280 | if (!ret) |
| 281 | copy_from_pcpubuf(rq->out, dst, rq->pageofs_out, |
| 282 | rq->outputsize); |
| 283 | |
| 284 | erofs_put_pcpubuf(dst); |
| 285 | return ret; |
| 286 | } |
| 287 | |
| 288 | ret = alg->prepare_destpages(rq, pagepool); |
| 289 | if (ret < 0) { |
| 290 | return ret; |
| 291 | } else if (ret) { |
| 292 | dst = page_address(*rq->out); |
| 293 | dst_maptype = 1; |
| 294 | goto dstmap_out; |
| 295 | } |
| 296 | |
Gao Xiang | 73d0393 | 2019-09-04 10:09:07 +0800 | [diff] [blame] | 297 | i = 0; |
| 298 | while (1) { |
Christoph Hellwig | d4efd79 | 2020-06-01 21:51:27 -0700 | [diff] [blame] | 299 | dst = vm_map_ram(rq->out, nrpages_out, -1); |
Gao Xiang | 73d0393 | 2019-09-04 10:09:07 +0800 | [diff] [blame] | 300 | |
| 301 | /* retry two more times (totally 3 times) */ |
| 302 | if (dst || ++i >= 3) |
| 303 | break; |
| 304 | vm_unmap_aliases(); |
| 305 | } |
| 306 | |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 307 | if (!dst) |
| 308 | return -ENOMEM; |
Gao Xiang | 73d0393 | 2019-09-04 10:09:07 +0800 | [diff] [blame] | 309 | |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 310 | dst_maptype = 2; |
| 311 | |
| 312 | dstmap_out: |
| 313 | ret = alg->decompress(rq, dst + rq->pageofs_out); |
| 314 | |
| 315 | if (!dst_maptype) |
| 316 | kunmap_atomic(dst); |
| 317 | else if (dst_maptype == 2) |
Gao Xiang | 73d0393 | 2019-09-04 10:09:07 +0800 | [diff] [blame] | 318 | vm_unmap_ram(dst, nrpages_out); |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 319 | return ret; |
| 320 | } |
| 321 | |
Gao Xiang | 99634bf | 2019-09-04 10:09:05 +0800 | [diff] [blame] | 322 | static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq, |
| 323 | struct list_head *pagepool) |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 324 | { |
| 325 | const unsigned int nrpages_out = |
| 326 | PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; |
| 327 | const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out; |
| 328 | unsigned char *src, *dst; |
| 329 | |
| 330 | if (nrpages_out > 2) { |
| 331 | DBG_BUGON(1); |
| 332 | return -EIO; |
| 333 | } |
| 334 | |
| 335 | if (rq->out[0] == *rq->in) { |
| 336 | DBG_BUGON(nrpages_out != 1); |
| 337 | return 0; |
| 338 | } |
| 339 | |
| 340 | src = kmap_atomic(*rq->in); |
Gao Xiang | 4d20243 | 2020-01-07 10:25:46 +0800 | [diff] [blame] | 341 | if (rq->out[0]) { |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 342 | dst = kmap_atomic(rq->out[0]); |
| 343 | memcpy(dst + rq->pageofs_out, src, righthalf); |
Gao Xiang | 4d20243 | 2020-01-07 10:25:46 +0800 | [diff] [blame] | 344 | kunmap_atomic(dst); |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 345 | } |
| 346 | |
Gao Xiang | 4d20243 | 2020-01-07 10:25:46 +0800 | [diff] [blame] | 347 | if (nrpages_out == 2) { |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 348 | DBG_BUGON(!rq->out[1]); |
Gao Xiang | 4d20243 | 2020-01-07 10:25:46 +0800 | [diff] [blame] | 349 | if (rq->out[1] == *rq->in) { |
| 350 | memmove(src, src + righthalf, rq->pageofs_out); |
| 351 | } else { |
| 352 | dst = kmap_atomic(rq->out[1]); |
| 353 | memcpy(dst, src + righthalf, rq->pageofs_out); |
| 354 | kunmap_atomic(dst); |
| 355 | } |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 356 | } |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 357 | kunmap_atomic(src); |
| 358 | return 0; |
| 359 | } |
| 360 | |
| 361 | int z_erofs_decompress(struct z_erofs_decompress_req *rq, |
| 362 | struct list_head *pagepool) |
| 363 | { |
| 364 | if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED) |
Gao Xiang | 99634bf | 2019-09-04 10:09:05 +0800 | [diff] [blame] | 365 | return z_erofs_shifted_transform(rq, pagepool); |
| 366 | return z_erofs_decompress_generic(rq, pagepool); |
Gao Xiang | 7fc45db | 2019-06-24 15:22:55 +0800 | [diff] [blame] | 367 | } |
| 368 | |