blob: 05615a1099dbc5f6c2267357a0a6be51a9e5097f [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Masonc8b97812008-10-29 14:49:59 -04002/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 *
Chris Masonc8b97812008-10-29 14:49:59 -04005 * Based on jffs2 zlib code:
6 * Copyright © 2001-2007 Red Hat, Inc.
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 */
9
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/zlib.h>
13#include <linux/zutil.h>
David Sterba6acafd12017-05-31 17:21:15 +020014#include <linux/mm.h>
Chris Masonc8b97812008-10-29 14:49:59 -040015#include <linux/init.h>
16#include <linux/err.h>
17#include <linux/sched.h>
18#include <linux/pagemap.h>
19#include <linux/bio.h>
Anand Jaine1ddce72017-05-26 15:44:59 +080020#include <linux/refcount.h>
Christoph Hellwigb2950862008-12-02 09:54:17 -050021#include "compression.h"
Chris Masonc8b97812008-10-29 14:49:59 -040022
Mikhail Zaslonko3fd396a2020-01-30 22:16:33 -080023/* workspace buffer size for s390 zlib hardware support */
24#define ZLIB_DFLTCC_BUF_SIZE (4 * PAGE_SIZE)
25
Chris Masonc8b97812008-10-29 14:49:59 -040026struct workspace {
Sergey Senozhatsky78809912014-07-07 23:38:29 +090027 z_stream strm;
Chris Masonc8b97812008-10-29 14:49:59 -040028 char *buf;
Mikhail Zaslonko3fd396a2020-01-30 22:16:33 -080029 unsigned int buf_size;
Chris Masonc8b97812008-10-29 14:49:59 -040030 struct list_head list;
David Sterbaf51d2b52017-09-15 17:36:57 +020031 int level;
Chris Masonc8b97812008-10-29 14:49:59 -040032};
33
Dennis Zhou92ee55302019-02-04 15:20:03 -050034static struct workspace_manager wsm;
35
David Sterbad20f3952019-10-04 02:21:48 +020036struct list_head *zlib_get_workspace(unsigned int level)
Dennis Zhou92ee55302019-02-04 15:20:03 -050037{
David Sterba5907a9b2019-10-04 02:50:28 +020038 struct list_head *ws = btrfs_get_workspace(BTRFS_COMPRESS_ZLIB, level);
Dennis Zhoud0ab62c2019-02-04 15:20:05 -050039 struct workspace *workspace = list_entry(ws, struct workspace, list);
40
41 workspace->level = level;
42
43 return ws;
Dennis Zhou92ee55302019-02-04 15:20:03 -050044}
45
David Sterbad20f3952019-10-04 02:21:48 +020046void zlib_free_workspace(struct list_head *ws)
Chris Masonc8b97812008-10-29 14:49:59 -040047{
Li Zefan261507a02010-12-17 14:21:50 +080048 struct workspace *workspace = list_entry(ws, struct workspace, list);
Chris Masonc8b97812008-10-29 14:49:59 -040049
David Sterba6acafd12017-05-31 17:21:15 +020050 kvfree(workspace->strm.workspace);
Chris Masonc8b97812008-10-29 14:49:59 -040051 kfree(workspace->buf);
52 kfree(workspace);
Chris Masonc8b97812008-10-29 14:49:59 -040053}
54
David Sterbad20f3952019-10-04 02:21:48 +020055struct list_head *zlib_alloc_workspace(unsigned int level)
Chris Masonc8b97812008-10-29 14:49:59 -040056{
57 struct workspace *workspace;
Sergey Senozhatsky78809912014-07-07 23:38:29 +090058 int workspacesize;
Li Zefan261507a02010-12-17 14:21:50 +080059
David Sterba389a6cf2017-05-31 17:21:15 +020060 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
Li Zefan261507a02010-12-17 14:21:50 +080061 if (!workspace)
62 return ERR_PTR(-ENOMEM);
63
Sergey Senozhatsky78809912014-07-07 23:38:29 +090064 workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
65 zlib_inflate_workspacesize());
David Sterba6acafd12017-05-31 17:21:15 +020066 workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
Dennis Zhou7bf49942019-02-04 15:20:04 -050067 workspace->level = level;
Mikhail Zaslonko3fd396a2020-01-30 22:16:33 -080068 workspace->buf = NULL;
69 /*
70 * In case of s390 zlib hardware support, allocate lager workspace
71 * buffer. If allocator fails, fall back to a single page buffer.
72 */
73 if (zlib_deflate_dfltcc_enabled()) {
74 workspace->buf = kmalloc(ZLIB_DFLTCC_BUF_SIZE,
75 __GFP_NOMEMALLOC | __GFP_NORETRY |
76 __GFP_NOWARN | GFP_NOIO);
77 workspace->buf_size = ZLIB_DFLTCC_BUF_SIZE;
78 }
79 if (!workspace->buf) {
80 workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
81 workspace->buf_size = PAGE_SIZE;
82 }
Sergey Senozhatsky78809912014-07-07 23:38:29 +090083 if (!workspace->strm.workspace || !workspace->buf)
Li Zefan261507a02010-12-17 14:21:50 +080084 goto fail;
85
86 INIT_LIST_HEAD(&workspace->list);
87
88 return &workspace->list;
89fail:
90 zlib_free_workspace(&workspace->list);
91 return ERR_PTR(-ENOMEM);
Chris Masonc8b97812008-10-29 14:49:59 -040092}
93
David Sterbac4bf6652019-10-01 22:38:34 +020094int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
95 u64 start, struct page **pages, unsigned long *out_pages,
96 unsigned long *total_in, unsigned long *total_out)
Chris Masonc8b97812008-10-29 14:49:59 -040097{
Li Zefan261507a02010-12-17 14:21:50 +080098 struct workspace *workspace = list_entry(ws, struct workspace, list);
Chris Masonc8b97812008-10-29 14:49:59 -040099 int ret;
Chris Masonc8b97812008-10-29 14:49:59 -0400100 char *data_in;
101 char *cpage_out;
102 int nr_pages = 0;
103 struct page *in_page = NULL;
104 struct page *out_page = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -0400105 unsigned long bytes_left;
Mikhail Zaslonko3fd396a2020-01-30 22:16:33 -0800106 unsigned int in_buf_pages;
David Sterba38c31462017-02-14 19:04:07 +0100107 unsigned long len = *total_out;
David Sterba4d3a8002017-02-14 19:04:07 +0100108 unsigned long nr_dest_pages = *out_pages;
David Sterbae5d74902017-02-14 19:45:05 +0100109 const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -0400110
111 *out_pages = 0;
112 *total_out = 0;
113 *total_in = 0;
114
David Sterbaf51d2b52017-09-15 17:36:57 +0200115 if (Z_OK != zlib_deflateInit(&workspace->strm, workspace->level)) {
Jeff Mahoney62e85572016-09-20 10:05:01 -0400116 pr_warn("BTRFS: deflateInit failed\n");
Zach Brown60e19752014-05-09 17:15:08 -0400117 ret = -EIO;
Chris Masonc8b97812008-10-29 14:49:59 -0400118 goto out;
119 }
120
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900121 workspace->strm.total_in = 0;
122 workspace->strm.total_out = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400123
Chris Masonc8b97812008-10-29 14:49:59 -0400124 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
Li Zefan4b720292010-11-09 08:27:27 +0800125 if (out_page == NULL) {
Zach Brown60e19752014-05-09 17:15:08 -0400126 ret = -ENOMEM;
Li Zefan4b720292010-11-09 08:27:27 +0800127 goto out;
128 }
Chris Masonc8b97812008-10-29 14:49:59 -0400129 cpage_out = kmap(out_page);
130 pages[0] = out_page;
131 nr_pages = 1;
132
Mikhail Zaslonko3fd396a2020-01-30 22:16:33 -0800133 workspace->strm.next_in = workspace->buf;
134 workspace->strm.avail_in = 0;
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900135 workspace->strm.next_out = cpage_out;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300136 workspace->strm.avail_out = PAGE_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -0400137
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900138 while (workspace->strm.total_in < len) {
Mikhail Zaslonko3fd396a2020-01-30 22:16:33 -0800139 /*
140 * Get next input pages and copy the contents to
141 * the workspace buffer if required.
142 */
143 if (workspace->strm.avail_in == 0) {
144 bytes_left = len - workspace->strm.total_in;
145 in_buf_pages = min(DIV_ROUND_UP(bytes_left, PAGE_SIZE),
146 workspace->buf_size / PAGE_SIZE);
147 if (in_buf_pages > 1) {
148 int i;
149
150 for (i = 0; i < in_buf_pages; i++) {
151 if (in_page) {
152 kunmap(in_page);
153 put_page(in_page);
154 }
155 in_page = find_get_page(mapping,
156 start >> PAGE_SHIFT);
157 data_in = kmap(in_page);
158 memcpy(workspace->buf + i * PAGE_SIZE,
159 data_in, PAGE_SIZE);
160 start += PAGE_SIZE;
161 }
162 workspace->strm.next_in = workspace->buf;
163 } else {
164 if (in_page) {
165 kunmap(in_page);
166 put_page(in_page);
167 }
168 in_page = find_get_page(mapping,
169 start >> PAGE_SHIFT);
170 data_in = kmap(in_page);
171 start += PAGE_SIZE;
172 workspace->strm.next_in = data_in;
173 }
174 workspace->strm.avail_in = min(bytes_left,
175 (unsigned long) workspace->buf_size);
176 }
177
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900178 ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
Chris Masonc8b97812008-10-29 14:49:59 -0400179 if (ret != Z_OK) {
Jeff Mahoney62e85572016-09-20 10:05:01 -0400180 pr_debug("BTRFS: deflate in loop returned %d\n",
Chris Masonc8b97812008-10-29 14:49:59 -0400181 ret);
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900182 zlib_deflateEnd(&workspace->strm);
Zach Brown60e19752014-05-09 17:15:08 -0400183 ret = -EIO;
Chris Masonc8b97812008-10-29 14:49:59 -0400184 goto out;
185 }
186
187 /* we're making it bigger, give up */
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900188 if (workspace->strm.total_in > 8192 &&
189 workspace->strm.total_in <
190 workspace->strm.total_out) {
David Sterba130d5b42014-06-20 11:43:20 +0200191 ret = -E2BIG;
Chris Masonc8b97812008-10-29 14:49:59 -0400192 goto out;
193 }
194 /* we need another page for writing out. Test this
195 * before the total_in so we will pull in a new page for
196 * the stream end if required
197 */
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900198 if (workspace->strm.avail_out == 0) {
Chris Masonc8b97812008-10-29 14:49:59 -0400199 kunmap(out_page);
200 if (nr_pages == nr_dest_pages) {
201 out_page = NULL;
Zach Brown60e19752014-05-09 17:15:08 -0400202 ret = -E2BIG;
Chris Masonc8b97812008-10-29 14:49:59 -0400203 goto out;
204 }
205 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
Li Zefan4b720292010-11-09 08:27:27 +0800206 if (out_page == NULL) {
Zach Brown60e19752014-05-09 17:15:08 -0400207 ret = -ENOMEM;
Li Zefan4b720292010-11-09 08:27:27 +0800208 goto out;
209 }
Chris Masonc8b97812008-10-29 14:49:59 -0400210 cpage_out = kmap(out_page);
211 pages[nr_pages] = out_page;
212 nr_pages++;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300213 workspace->strm.avail_out = PAGE_SIZE;
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900214 workspace->strm.next_out = cpage_out;
Chris Masonc8b97812008-10-29 14:49:59 -0400215 }
216 /* we're all done */
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900217 if (workspace->strm.total_in >= len)
Chris Masonc8b97812008-10-29 14:49:59 -0400218 break;
Mikhail Zaslonko3fd396a2020-01-30 22:16:33 -0800219 if (workspace->strm.total_out > max_out)
220 break;
Chris Masonc8b97812008-10-29 14:49:59 -0400221 }
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900222 workspace->strm.avail_in = 0;
Mikhail Zaslonko3fd396a2020-01-30 22:16:33 -0800223 /*
224 * Call deflate with Z_FINISH flush parameter providing more output
225 * space but no more input data, until it returns with Z_STREAM_END.
226 */
227 while (ret != Z_STREAM_END) {
228 ret = zlib_deflate(&workspace->strm, Z_FINISH);
229 if (ret == Z_STREAM_END)
230 break;
231 if (ret != Z_OK && ret != Z_BUF_ERROR) {
232 zlib_deflateEnd(&workspace->strm);
233 ret = -EIO;
234 goto out;
235 } else if (workspace->strm.avail_out == 0) {
236 /* get another page for the stream end */
237 kunmap(out_page);
238 if (nr_pages == nr_dest_pages) {
239 out_page = NULL;
240 ret = -E2BIG;
241 goto out;
242 }
243 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
244 if (out_page == NULL) {
245 ret = -ENOMEM;
246 goto out;
247 }
248 cpage_out = kmap(out_page);
249 pages[nr_pages] = out_page;
250 nr_pages++;
251 workspace->strm.avail_out = PAGE_SIZE;
252 workspace->strm.next_out = cpage_out;
253 }
Chris Masonc8b97812008-10-29 14:49:59 -0400254 }
Mikhail Zaslonko3fd396a2020-01-30 22:16:33 -0800255 zlib_deflateEnd(&workspace->strm);
Chris Masonc8b97812008-10-29 14:49:59 -0400256
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900257 if (workspace->strm.total_out >= workspace->strm.total_in) {
Zach Brown60e19752014-05-09 17:15:08 -0400258 ret = -E2BIG;
Chris Masonc8b97812008-10-29 14:49:59 -0400259 goto out;
260 }
261
262 ret = 0;
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900263 *total_out = workspace->strm.total_out;
264 *total_in = workspace->strm.total_in;
Chris Masonc8b97812008-10-29 14:49:59 -0400265out:
266 *out_pages = nr_pages;
267 if (out_page)
268 kunmap(out_page);
269
270 if (in_page) {
271 kunmap(in_page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300272 put_page(in_page);
Chris Masonc8b97812008-10-29 14:49:59 -0400273 }
Chris Masonc8b97812008-10-29 14:49:59 -0400274 return ret;
275}
276
David Sterbac4bf6652019-10-01 22:38:34 +0200277int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
Chris Masonc8b97812008-10-29 14:49:59 -0400278{
Li Zefan261507a02010-12-17 14:21:50 +0800279 struct workspace *workspace = list_entry(ws, struct workspace, list);
Li Zefan3a39c182010-11-08 15:22:19 +0800280 int ret = 0, ret2;
Chris Masonc8b97812008-10-29 14:49:59 -0400281 int wbits = MAX_WBITS;
Chris Masonc8b97812008-10-29 14:49:59 -0400282 char *data_in;
283 size_t total_out = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400284 unsigned long page_in_index = 0;
Anand Jaine1ddce72017-05-26 15:44:59 +0800285 size_t srclen = cb->compressed_len;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300286 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
Chris Masonc8b97812008-10-29 14:49:59 -0400287 unsigned long buf_start;
Anand Jaine1ddce72017-05-26 15:44:59 +0800288 struct page **pages_in = cb->compressed_pages;
289 u64 disk_start = cb->start;
290 struct bio *orig_bio = cb->orig_bio;
Chris Masonc8b97812008-10-29 14:49:59 -0400291
Chris Masonc8b97812008-10-29 14:49:59 -0400292 data_in = kmap(pages_in[page_in_index]);
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900293 workspace->strm.next_in = data_in;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300294 workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900295 workspace->strm.total_in = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400296
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900297 workspace->strm.total_out = 0;
298 workspace->strm.next_out = workspace->buf;
Mikhail Zaslonko3fd396a2020-01-30 22:16:33 -0800299 workspace->strm.avail_out = workspace->buf_size;
Chris Masonc8b97812008-10-29 14:49:59 -0400300
301 /* If it's deflate, and it's got no preset dictionary, then
302 we can tell zlib to skip the adler32 check. */
303 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
304 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
305 !(((data_in[0]<<8) + data_in[1]) % 31)) {
306
307 wbits = -((data_in[0] >> 4) + 8);
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900308 workspace->strm.next_in += 2;
309 workspace->strm.avail_in -= 2;
Chris Masonc8b97812008-10-29 14:49:59 -0400310 }
311
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900312 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
Jeff Mahoney62e85572016-09-20 10:05:01 -0400313 pr_warn("BTRFS: inflateInit failed\n");
Nick Terrelld1111a72016-11-01 20:25:27 -0700314 kunmap(pages_in[page_in_index]);
Zach Brown60e19752014-05-09 17:15:08 -0400315 return -EIO;
Chris Masonc8b97812008-10-29 14:49:59 -0400316 }
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900317 while (workspace->strm.total_in < srclen) {
318 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
Chris Masond3977122009-01-05 21:25:51 -0500319 if (ret != Z_OK && ret != Z_STREAM_END)
Chris Masonc8b97812008-10-29 14:49:59 -0400320 break;
Chris Masonc8b97812008-10-29 14:49:59 -0400321
Li Zefan3a39c182010-11-08 15:22:19 +0800322 buf_start = total_out;
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900323 total_out = workspace->strm.total_out;
Chris Masonc8b97812008-10-29 14:49:59 -0400324
Li Zefan3a39c182010-11-08 15:22:19 +0800325 /* we didn't make progress in this inflate call, we're done */
326 if (buf_start == total_out)
Chris Masonc8b97812008-10-29 14:49:59 -0400327 break;
Li Zefan3a39c182010-11-08 15:22:19 +0800328
329 ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
330 total_out, disk_start,
Christoph Hellwig974b1ad2016-11-25 09:07:46 +0100331 orig_bio);
Li Zefan3a39c182010-11-08 15:22:19 +0800332 if (ret2 == 0) {
333 ret = 0;
334 goto done;
Chris Masonc8b97812008-10-29 14:49:59 -0400335 }
336
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900337 workspace->strm.next_out = workspace->buf;
Mikhail Zaslonko3fd396a2020-01-30 22:16:33 -0800338 workspace->strm.avail_out = workspace->buf_size;
Chris Masonc8b97812008-10-29 14:49:59 -0400339
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900340 if (workspace->strm.avail_in == 0) {
Chris Masonc8b97812008-10-29 14:49:59 -0400341 unsigned long tmp;
342 kunmap(pages_in[page_in_index]);
343 page_in_index++;
344 if (page_in_index >= total_pages_in) {
345 data_in = NULL;
346 break;
347 }
348 data_in = kmap(pages_in[page_in_index]);
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900349 workspace->strm.next_in = data_in;
350 tmp = srclen - workspace->strm.total_in;
351 workspace->strm.avail_in = min(tmp,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300352 PAGE_SIZE);
Chris Masonc8b97812008-10-29 14:49:59 -0400353 }
354 }
Chris Masond3977122009-01-05 21:25:51 -0500355 if (ret != Z_STREAM_END)
Zach Brown60e19752014-05-09 17:15:08 -0400356 ret = -EIO;
Chris Masond3977122009-01-05 21:25:51 -0500357 else
Chris Masonc8b97812008-10-29 14:49:59 -0400358 ret = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400359done:
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900360 zlib_inflateEnd(&workspace->strm);
Chris Masonc8b97812008-10-29 14:49:59 -0400361 if (data_in)
362 kunmap(pages_in[page_in_index]);
Chris Mason2f19cad2014-11-30 08:56:33 -0500363 if (!ret)
Christoph Hellwig974b1ad2016-11-25 09:07:46 +0100364 zero_fill_bio(orig_bio);
Chris Masonc8b97812008-10-29 14:49:59 -0400365 return ret;
366}
367
David Sterbac4bf6652019-10-01 22:38:34 +0200368int zlib_decompress(struct list_head *ws, unsigned char *data_in,
369 struct page *dest_page, unsigned long start_byte, size_t srclen,
370 size_t destlen)
Chris Masonc8b97812008-10-29 14:49:59 -0400371{
Li Zefan261507a02010-12-17 14:21:50 +0800372 struct workspace *workspace = list_entry(ws, struct workspace, list);
Chris Masonc8b97812008-10-29 14:49:59 -0400373 int ret = 0;
374 int wbits = MAX_WBITS;
Chris Mason2f19cad2014-11-30 08:56:33 -0500375 unsigned long bytes_left;
Chris Masonc8b97812008-10-29 14:49:59 -0400376 unsigned long total_out = 0;
Chris Mason2f19cad2014-11-30 08:56:33 -0500377 unsigned long pg_offset = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400378 char *kaddr;
379
Chris Mason2f19cad2014-11-30 08:56:33 -0500380 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
381 bytes_left = destlen;
382
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900383 workspace->strm.next_in = data_in;
384 workspace->strm.avail_in = srclen;
385 workspace->strm.total_in = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400386
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900387 workspace->strm.next_out = workspace->buf;
Mikhail Zaslonko3fd396a2020-01-30 22:16:33 -0800388 workspace->strm.avail_out = workspace->buf_size;
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900389 workspace->strm.total_out = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400390 /* If it's deflate, and it's got no preset dictionary, then
391 we can tell zlib to skip the adler32 check. */
392 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
393 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
394 !(((data_in[0]<<8) + data_in[1]) % 31)) {
395
396 wbits = -((data_in[0] >> 4) + 8);
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900397 workspace->strm.next_in += 2;
398 workspace->strm.avail_in -= 2;
Chris Masonc8b97812008-10-29 14:49:59 -0400399 }
400
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900401 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
Jeff Mahoney62e85572016-09-20 10:05:01 -0400402 pr_warn("BTRFS: inflateInit failed\n");
Zach Brown60e19752014-05-09 17:15:08 -0400403 return -EIO;
Chris Masonc8b97812008-10-29 14:49:59 -0400404 }
405
Chris Masond3977122009-01-05 21:25:51 -0500406 while (bytes_left > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -0400407 unsigned long buf_start;
408 unsigned long buf_offset;
409 unsigned long bytes;
Chris Masonc8b97812008-10-29 14:49:59 -0400410
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900411 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
Chris Masond3977122009-01-05 21:25:51 -0500412 if (ret != Z_OK && ret != Z_STREAM_END)
Chris Masonc8b97812008-10-29 14:49:59 -0400413 break;
Chris Masonc8b97812008-10-29 14:49:59 -0400414
415 buf_start = total_out;
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900416 total_out = workspace->strm.total_out;
Chris Masonc8b97812008-10-29 14:49:59 -0400417
418 if (total_out == buf_start) {
Zach Brown60e19752014-05-09 17:15:08 -0400419 ret = -EIO;
Chris Masonc8b97812008-10-29 14:49:59 -0400420 break;
421 }
422
Chris Masond3977122009-01-05 21:25:51 -0500423 if (total_out <= start_byte)
Chris Masonc8b97812008-10-29 14:49:59 -0400424 goto next;
Chris Masonc8b97812008-10-29 14:49:59 -0400425
Chris Masond3977122009-01-05 21:25:51 -0500426 if (total_out > start_byte && buf_start < start_byte)
Chris Masonc8b97812008-10-29 14:49:59 -0400427 buf_offset = start_byte - buf_start;
Chris Masond3977122009-01-05 21:25:51 -0500428 else
Chris Masonc8b97812008-10-29 14:49:59 -0400429 buf_offset = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400430
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300431 bytes = min(PAGE_SIZE - pg_offset,
Mikhail Zaslonko3fd396a2020-01-30 22:16:33 -0800432 PAGE_SIZE - (buf_offset % PAGE_SIZE));
Chris Masonc8b97812008-10-29 14:49:59 -0400433 bytes = min(bytes, bytes_left);
434
Cong Wang7ac687d2011-11-25 23:14:28 +0800435 kaddr = kmap_atomic(dest_page);
Chris Masonc8b97812008-10-29 14:49:59 -0400436 memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
Cong Wang7ac687d2011-11-25 23:14:28 +0800437 kunmap_atomic(kaddr);
Chris Masonc8b97812008-10-29 14:49:59 -0400438
439 pg_offset += bytes;
440 bytes_left -= bytes;
441next:
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900442 workspace->strm.next_out = workspace->buf;
Mikhail Zaslonko3fd396a2020-01-30 22:16:33 -0800443 workspace->strm.avail_out = workspace->buf_size;
Chris Masonc8b97812008-10-29 14:49:59 -0400444 }
Chris Masond3977122009-01-05 21:25:51 -0500445
446 if (ret != Z_STREAM_END && bytes_left != 0)
Zach Brown60e19752014-05-09 17:15:08 -0400447 ret = -EIO;
Chris Masond3977122009-01-05 21:25:51 -0500448 else
Chris Masonc8b97812008-10-29 14:49:59 -0400449 ret = 0;
Chris Masond3977122009-01-05 21:25:51 -0500450
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900451 zlib_inflateEnd(&workspace->strm);
Chris Mason2f19cad2014-11-30 08:56:33 -0500452
453 /*
454 * this should only happen if zlib returned fewer bytes than we
455 * expected. btrfs_get_block is responsible for zeroing from the
456 * end of the inline extent (destlen) to the end of the page
457 */
458 if (pg_offset < destlen) {
459 kaddr = kmap_atomic(dest_page);
460 memset(kaddr + pg_offset, 0, destlen - pg_offset);
461 kunmap_atomic(kaddr);
462 }
Chris Masonc8b97812008-10-29 14:49:59 -0400463 return ret;
464}
465
David Sterbae8c9f182015-01-02 18:23:10 +0100466const struct btrfs_compress_op btrfs_zlib_compress = {
David Sterbabe9510452019-10-02 00:53:31 +0200467 .workspace_manager = &wsm,
David Sterbae18333a2019-08-09 16:25:34 +0200468 .max_level = 9,
469 .default_level = BTRFS_ZLIB_DEFAULT_LEVEL,
Li Zefan261507a02010-12-17 14:21:50 +0800470};