blob: 970ff3e35bb345e598854e5767b8c0981bfbb15c [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Masonc8b97812008-10-29 14:49:59 -04002/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 *
Chris Masonc8b97812008-10-29 14:49:59 -04005 * Based on jffs2 zlib code:
6 * Copyright © 2001-2007 Red Hat, Inc.
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 */
9
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/zlib.h>
13#include <linux/zutil.h>
David Sterba6acafd12017-05-31 17:21:15 +020014#include <linux/mm.h>
Chris Masonc8b97812008-10-29 14:49:59 -040015#include <linux/init.h>
16#include <linux/err.h>
17#include <linux/sched.h>
18#include <linux/pagemap.h>
19#include <linux/bio.h>
Anand Jaine1ddce72017-05-26 15:44:59 +080020#include <linux/refcount.h>
Christoph Hellwigb2950862008-12-02 09:54:17 -050021#include "compression.h"
Chris Masonc8b97812008-10-29 14:49:59 -040022
Chris Masonc8b97812008-10-29 14:49:59 -040023struct workspace {
Sergey Senozhatsky78809912014-07-07 23:38:29 +090024 z_stream strm;
Chris Masonc8b97812008-10-29 14:49:59 -040025 char *buf;
26 struct list_head list;
David Sterbaf51d2b52017-09-15 17:36:57 +020027 int level;
Chris Masonc8b97812008-10-29 14:49:59 -040028};
29
Li Zefan261507a02010-12-17 14:21:50 +080030static void zlib_free_workspace(struct list_head *ws)
Chris Masonc8b97812008-10-29 14:49:59 -040031{
Li Zefan261507a02010-12-17 14:21:50 +080032 struct workspace *workspace = list_entry(ws, struct workspace, list);
Chris Masonc8b97812008-10-29 14:49:59 -040033
David Sterba6acafd12017-05-31 17:21:15 +020034 kvfree(workspace->strm.workspace);
Chris Masonc8b97812008-10-29 14:49:59 -040035 kfree(workspace->buf);
36 kfree(workspace);
Chris Masonc8b97812008-10-29 14:49:59 -040037}
38
Li Zefan261507a02010-12-17 14:21:50 +080039static struct list_head *zlib_alloc_workspace(void)
Chris Masonc8b97812008-10-29 14:49:59 -040040{
41 struct workspace *workspace;
Sergey Senozhatsky78809912014-07-07 23:38:29 +090042 int workspacesize;
Li Zefan261507a02010-12-17 14:21:50 +080043
David Sterba389a6cf2017-05-31 17:21:15 +020044 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
Li Zefan261507a02010-12-17 14:21:50 +080045 if (!workspace)
46 return ERR_PTR(-ENOMEM);
47
Sergey Senozhatsky78809912014-07-07 23:38:29 +090048 workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
49 zlib_inflate_workspacesize());
David Sterba6acafd12017-05-31 17:21:15 +020050 workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
David Sterba389a6cf2017-05-31 17:21:15 +020051 workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
Sergey Senozhatsky78809912014-07-07 23:38:29 +090052 if (!workspace->strm.workspace || !workspace->buf)
Li Zefan261507a02010-12-17 14:21:50 +080053 goto fail;
54
55 INIT_LIST_HEAD(&workspace->list);
56
57 return &workspace->list;
58fail:
59 zlib_free_workspace(&workspace->list);
60 return ERR_PTR(-ENOMEM);
Chris Masonc8b97812008-10-29 14:49:59 -040061}
62
Li Zefan261507a02010-12-17 14:21:50 +080063static int zlib_compress_pages(struct list_head *ws,
64 struct address_space *mapping,
David Sterba38c31462017-02-14 19:04:07 +010065 u64 start,
Li Zefan261507a02010-12-17 14:21:50 +080066 struct page **pages,
Li Zefan261507a02010-12-17 14:21:50 +080067 unsigned long *out_pages,
68 unsigned long *total_in,
David Sterbae5d74902017-02-14 19:45:05 +010069 unsigned long *total_out)
Chris Masonc8b97812008-10-29 14:49:59 -040070{
Li Zefan261507a02010-12-17 14:21:50 +080071 struct workspace *workspace = list_entry(ws, struct workspace, list);
Chris Masonc8b97812008-10-29 14:49:59 -040072 int ret;
Chris Masonc8b97812008-10-29 14:49:59 -040073 char *data_in;
74 char *cpage_out;
75 int nr_pages = 0;
76 struct page *in_page = NULL;
77 struct page *out_page = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -040078 unsigned long bytes_left;
David Sterba38c31462017-02-14 19:04:07 +010079 unsigned long len = *total_out;
David Sterba4d3a8002017-02-14 19:04:07 +010080 unsigned long nr_dest_pages = *out_pages;
David Sterbae5d74902017-02-14 19:45:05 +010081 const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -040082
83 *out_pages = 0;
84 *total_out = 0;
85 *total_in = 0;
86
David Sterbaf51d2b52017-09-15 17:36:57 +020087 if (Z_OK != zlib_deflateInit(&workspace->strm, workspace->level)) {
Jeff Mahoney62e85572016-09-20 10:05:01 -040088 pr_warn("BTRFS: deflateInit failed\n");
Zach Brown60e19752014-05-09 17:15:08 -040089 ret = -EIO;
Chris Masonc8b97812008-10-29 14:49:59 -040090 goto out;
91 }
92
Sergey Senozhatsky78809912014-07-07 23:38:29 +090093 workspace->strm.total_in = 0;
94 workspace->strm.total_out = 0;
Chris Masonc8b97812008-10-29 14:49:59 -040095
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030096 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
Chris Masonc8b97812008-10-29 14:49:59 -040097 data_in = kmap(in_page);
98
99 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
Li Zefan4b720292010-11-09 08:27:27 +0800100 if (out_page == NULL) {
Zach Brown60e19752014-05-09 17:15:08 -0400101 ret = -ENOMEM;
Li Zefan4b720292010-11-09 08:27:27 +0800102 goto out;
103 }
Chris Masonc8b97812008-10-29 14:49:59 -0400104 cpage_out = kmap(out_page);
105 pages[0] = out_page;
106 nr_pages = 1;
107
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900108 workspace->strm.next_in = data_in;
109 workspace->strm.next_out = cpage_out;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300110 workspace->strm.avail_out = PAGE_SIZE;
111 workspace->strm.avail_in = min(len, PAGE_SIZE);
Chris Masonc8b97812008-10-29 14:49:59 -0400112
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900113 while (workspace->strm.total_in < len) {
114 ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
Chris Masonc8b97812008-10-29 14:49:59 -0400115 if (ret != Z_OK) {
Jeff Mahoney62e85572016-09-20 10:05:01 -0400116 pr_debug("BTRFS: deflate in loop returned %d\n",
Chris Masonc8b97812008-10-29 14:49:59 -0400117 ret);
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900118 zlib_deflateEnd(&workspace->strm);
Zach Brown60e19752014-05-09 17:15:08 -0400119 ret = -EIO;
Chris Masonc8b97812008-10-29 14:49:59 -0400120 goto out;
121 }
122
123 /* we're making it bigger, give up */
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900124 if (workspace->strm.total_in > 8192 &&
125 workspace->strm.total_in <
126 workspace->strm.total_out) {
David Sterba130d5b42014-06-20 11:43:20 +0200127 ret = -E2BIG;
Chris Masonc8b97812008-10-29 14:49:59 -0400128 goto out;
129 }
130 /* we need another page for writing out. Test this
131 * before the total_in so we will pull in a new page for
132 * the stream end if required
133 */
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900134 if (workspace->strm.avail_out == 0) {
Chris Masonc8b97812008-10-29 14:49:59 -0400135 kunmap(out_page);
136 if (nr_pages == nr_dest_pages) {
137 out_page = NULL;
Zach Brown60e19752014-05-09 17:15:08 -0400138 ret = -E2BIG;
Chris Masonc8b97812008-10-29 14:49:59 -0400139 goto out;
140 }
141 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
Li Zefan4b720292010-11-09 08:27:27 +0800142 if (out_page == NULL) {
Zach Brown60e19752014-05-09 17:15:08 -0400143 ret = -ENOMEM;
Li Zefan4b720292010-11-09 08:27:27 +0800144 goto out;
145 }
Chris Masonc8b97812008-10-29 14:49:59 -0400146 cpage_out = kmap(out_page);
147 pages[nr_pages] = out_page;
148 nr_pages++;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300149 workspace->strm.avail_out = PAGE_SIZE;
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900150 workspace->strm.next_out = cpage_out;
Chris Masonc8b97812008-10-29 14:49:59 -0400151 }
152 /* we're all done */
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900153 if (workspace->strm.total_in >= len)
Chris Masonc8b97812008-10-29 14:49:59 -0400154 break;
155
156 /* we've read in a full page, get a new one */
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900157 if (workspace->strm.avail_in == 0) {
158 if (workspace->strm.total_out > max_out)
Chris Masonc8b97812008-10-29 14:49:59 -0400159 break;
160
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900161 bytes_left = len - workspace->strm.total_in;
Chris Masonc8b97812008-10-29 14:49:59 -0400162 kunmap(in_page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300163 put_page(in_page);
Chris Masonc8b97812008-10-29 14:49:59 -0400164
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300165 start += PAGE_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -0400166 in_page = find_get_page(mapping,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300167 start >> PAGE_SHIFT);
Chris Masonc8b97812008-10-29 14:49:59 -0400168 data_in = kmap(in_page);
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900169 workspace->strm.avail_in = min(bytes_left,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300170 PAGE_SIZE);
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900171 workspace->strm.next_in = data_in;
Chris Masonc8b97812008-10-29 14:49:59 -0400172 }
173 }
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900174 workspace->strm.avail_in = 0;
175 ret = zlib_deflate(&workspace->strm, Z_FINISH);
176 zlib_deflateEnd(&workspace->strm);
Chris Masonc8b97812008-10-29 14:49:59 -0400177
178 if (ret != Z_STREAM_END) {
Zach Brown60e19752014-05-09 17:15:08 -0400179 ret = -EIO;
Chris Masonc8b97812008-10-29 14:49:59 -0400180 goto out;
181 }
182
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900183 if (workspace->strm.total_out >= workspace->strm.total_in) {
Zach Brown60e19752014-05-09 17:15:08 -0400184 ret = -E2BIG;
Chris Masonc8b97812008-10-29 14:49:59 -0400185 goto out;
186 }
187
188 ret = 0;
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900189 *total_out = workspace->strm.total_out;
190 *total_in = workspace->strm.total_in;
Chris Masonc8b97812008-10-29 14:49:59 -0400191out:
192 *out_pages = nr_pages;
193 if (out_page)
194 kunmap(out_page);
195
196 if (in_page) {
197 kunmap(in_page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300198 put_page(in_page);
Chris Masonc8b97812008-10-29 14:49:59 -0400199 }
Chris Masonc8b97812008-10-29 14:49:59 -0400200 return ret;
201}
202
Anand Jaine1ddce72017-05-26 15:44:59 +0800203static int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
Chris Masonc8b97812008-10-29 14:49:59 -0400204{
Li Zefan261507a02010-12-17 14:21:50 +0800205 struct workspace *workspace = list_entry(ws, struct workspace, list);
Li Zefan3a39c182010-11-08 15:22:19 +0800206 int ret = 0, ret2;
Chris Masonc8b97812008-10-29 14:49:59 -0400207 int wbits = MAX_WBITS;
Chris Masonc8b97812008-10-29 14:49:59 -0400208 char *data_in;
209 size_t total_out = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400210 unsigned long page_in_index = 0;
Anand Jaine1ddce72017-05-26 15:44:59 +0800211 size_t srclen = cb->compressed_len;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300212 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
Chris Masonc8b97812008-10-29 14:49:59 -0400213 unsigned long buf_start;
Anand Jaine1ddce72017-05-26 15:44:59 +0800214 struct page **pages_in = cb->compressed_pages;
215 u64 disk_start = cb->start;
216 struct bio *orig_bio = cb->orig_bio;
Chris Masonc8b97812008-10-29 14:49:59 -0400217
Chris Masonc8b97812008-10-29 14:49:59 -0400218 data_in = kmap(pages_in[page_in_index]);
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900219 workspace->strm.next_in = data_in;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300220 workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900221 workspace->strm.total_in = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400222
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900223 workspace->strm.total_out = 0;
224 workspace->strm.next_out = workspace->buf;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300225 workspace->strm.avail_out = PAGE_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -0400226
227 /* If it's deflate, and it's got no preset dictionary, then
228 we can tell zlib to skip the adler32 check. */
229 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
230 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
231 !(((data_in[0]<<8) + data_in[1]) % 31)) {
232
233 wbits = -((data_in[0] >> 4) + 8);
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900234 workspace->strm.next_in += 2;
235 workspace->strm.avail_in -= 2;
Chris Masonc8b97812008-10-29 14:49:59 -0400236 }
237
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900238 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
Jeff Mahoney62e85572016-09-20 10:05:01 -0400239 pr_warn("BTRFS: inflateInit failed\n");
Nick Terrelld1111a72016-11-01 20:25:27 -0700240 kunmap(pages_in[page_in_index]);
Zach Brown60e19752014-05-09 17:15:08 -0400241 return -EIO;
Chris Masonc8b97812008-10-29 14:49:59 -0400242 }
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900243 while (workspace->strm.total_in < srclen) {
244 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
Chris Masond3977122009-01-05 21:25:51 -0500245 if (ret != Z_OK && ret != Z_STREAM_END)
Chris Masonc8b97812008-10-29 14:49:59 -0400246 break;
Chris Masonc8b97812008-10-29 14:49:59 -0400247
Li Zefan3a39c182010-11-08 15:22:19 +0800248 buf_start = total_out;
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900249 total_out = workspace->strm.total_out;
Chris Masonc8b97812008-10-29 14:49:59 -0400250
Li Zefan3a39c182010-11-08 15:22:19 +0800251 /* we didn't make progress in this inflate call, we're done */
252 if (buf_start == total_out)
Chris Masonc8b97812008-10-29 14:49:59 -0400253 break;
Li Zefan3a39c182010-11-08 15:22:19 +0800254
255 ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
256 total_out, disk_start,
Christoph Hellwig974b1ad2016-11-25 09:07:46 +0100257 orig_bio);
Li Zefan3a39c182010-11-08 15:22:19 +0800258 if (ret2 == 0) {
259 ret = 0;
260 goto done;
Chris Masonc8b97812008-10-29 14:49:59 -0400261 }
262
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900263 workspace->strm.next_out = workspace->buf;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300264 workspace->strm.avail_out = PAGE_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -0400265
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900266 if (workspace->strm.avail_in == 0) {
Chris Masonc8b97812008-10-29 14:49:59 -0400267 unsigned long tmp;
268 kunmap(pages_in[page_in_index]);
269 page_in_index++;
270 if (page_in_index >= total_pages_in) {
271 data_in = NULL;
272 break;
273 }
274 data_in = kmap(pages_in[page_in_index]);
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900275 workspace->strm.next_in = data_in;
276 tmp = srclen - workspace->strm.total_in;
277 workspace->strm.avail_in = min(tmp,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300278 PAGE_SIZE);
Chris Masonc8b97812008-10-29 14:49:59 -0400279 }
280 }
Chris Masond3977122009-01-05 21:25:51 -0500281 if (ret != Z_STREAM_END)
Zach Brown60e19752014-05-09 17:15:08 -0400282 ret = -EIO;
Chris Masond3977122009-01-05 21:25:51 -0500283 else
Chris Masonc8b97812008-10-29 14:49:59 -0400284 ret = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400285done:
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900286 zlib_inflateEnd(&workspace->strm);
Chris Masonc8b97812008-10-29 14:49:59 -0400287 if (data_in)
288 kunmap(pages_in[page_in_index]);
Chris Mason2f19cad2014-11-30 08:56:33 -0500289 if (!ret)
Christoph Hellwig974b1ad2016-11-25 09:07:46 +0100290 zero_fill_bio(orig_bio);
Chris Masonc8b97812008-10-29 14:49:59 -0400291 return ret;
292}
293
Li Zefan261507a02010-12-17 14:21:50 +0800294static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
295 struct page *dest_page,
296 unsigned long start_byte,
297 size_t srclen, size_t destlen)
Chris Masonc8b97812008-10-29 14:49:59 -0400298{
Li Zefan261507a02010-12-17 14:21:50 +0800299 struct workspace *workspace = list_entry(ws, struct workspace, list);
Chris Masonc8b97812008-10-29 14:49:59 -0400300 int ret = 0;
301 int wbits = MAX_WBITS;
Chris Mason2f19cad2014-11-30 08:56:33 -0500302 unsigned long bytes_left;
Chris Masonc8b97812008-10-29 14:49:59 -0400303 unsigned long total_out = 0;
Chris Mason2f19cad2014-11-30 08:56:33 -0500304 unsigned long pg_offset = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400305 char *kaddr;
306
Chris Mason2f19cad2014-11-30 08:56:33 -0500307 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
308 bytes_left = destlen;
309
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900310 workspace->strm.next_in = data_in;
311 workspace->strm.avail_in = srclen;
312 workspace->strm.total_in = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400313
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900314 workspace->strm.next_out = workspace->buf;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300315 workspace->strm.avail_out = PAGE_SIZE;
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900316 workspace->strm.total_out = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400317 /* If it's deflate, and it's got no preset dictionary, then
318 we can tell zlib to skip the adler32 check. */
319 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
320 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
321 !(((data_in[0]<<8) + data_in[1]) % 31)) {
322
323 wbits = -((data_in[0] >> 4) + 8);
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900324 workspace->strm.next_in += 2;
325 workspace->strm.avail_in -= 2;
Chris Masonc8b97812008-10-29 14:49:59 -0400326 }
327
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900328 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
Jeff Mahoney62e85572016-09-20 10:05:01 -0400329 pr_warn("BTRFS: inflateInit failed\n");
Zach Brown60e19752014-05-09 17:15:08 -0400330 return -EIO;
Chris Masonc8b97812008-10-29 14:49:59 -0400331 }
332
Chris Masond3977122009-01-05 21:25:51 -0500333 while (bytes_left > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -0400334 unsigned long buf_start;
335 unsigned long buf_offset;
336 unsigned long bytes;
Chris Masonc8b97812008-10-29 14:49:59 -0400337
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900338 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
Chris Masond3977122009-01-05 21:25:51 -0500339 if (ret != Z_OK && ret != Z_STREAM_END)
Chris Masonc8b97812008-10-29 14:49:59 -0400340 break;
Chris Masonc8b97812008-10-29 14:49:59 -0400341
342 buf_start = total_out;
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900343 total_out = workspace->strm.total_out;
Chris Masonc8b97812008-10-29 14:49:59 -0400344
345 if (total_out == buf_start) {
Zach Brown60e19752014-05-09 17:15:08 -0400346 ret = -EIO;
Chris Masonc8b97812008-10-29 14:49:59 -0400347 break;
348 }
349
Chris Masond3977122009-01-05 21:25:51 -0500350 if (total_out <= start_byte)
Chris Masonc8b97812008-10-29 14:49:59 -0400351 goto next;
Chris Masonc8b97812008-10-29 14:49:59 -0400352
Chris Masond3977122009-01-05 21:25:51 -0500353 if (total_out > start_byte && buf_start < start_byte)
Chris Masonc8b97812008-10-29 14:49:59 -0400354 buf_offset = start_byte - buf_start;
Chris Masond3977122009-01-05 21:25:51 -0500355 else
Chris Masonc8b97812008-10-29 14:49:59 -0400356 buf_offset = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400357
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300358 bytes = min(PAGE_SIZE - pg_offset,
359 PAGE_SIZE - buf_offset);
Chris Masonc8b97812008-10-29 14:49:59 -0400360 bytes = min(bytes, bytes_left);
361
Cong Wang7ac687d2011-11-25 23:14:28 +0800362 kaddr = kmap_atomic(dest_page);
Chris Masonc8b97812008-10-29 14:49:59 -0400363 memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
Cong Wang7ac687d2011-11-25 23:14:28 +0800364 kunmap_atomic(kaddr);
Chris Masonc8b97812008-10-29 14:49:59 -0400365
366 pg_offset += bytes;
367 bytes_left -= bytes;
368next:
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900369 workspace->strm.next_out = workspace->buf;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300370 workspace->strm.avail_out = PAGE_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -0400371 }
Chris Masond3977122009-01-05 21:25:51 -0500372
373 if (ret != Z_STREAM_END && bytes_left != 0)
Zach Brown60e19752014-05-09 17:15:08 -0400374 ret = -EIO;
Chris Masond3977122009-01-05 21:25:51 -0500375 else
Chris Masonc8b97812008-10-29 14:49:59 -0400376 ret = 0;
Chris Masond3977122009-01-05 21:25:51 -0500377
Sergey Senozhatsky78809912014-07-07 23:38:29 +0900378 zlib_inflateEnd(&workspace->strm);
Chris Mason2f19cad2014-11-30 08:56:33 -0500379
380 /*
381 * this should only happen if zlib returned fewer bytes than we
382 * expected. btrfs_get_block is responsible for zeroing from the
383 * end of the inline extent (destlen) to the end of the page
384 */
385 if (pg_offset < destlen) {
386 kaddr = kmap_atomic(dest_page);
387 memset(kaddr + pg_offset, 0, destlen - pg_offset);
388 kunmap_atomic(kaddr);
389 }
Chris Masonc8b97812008-10-29 14:49:59 -0400390 return ret;
391}
392
David Sterbaf51d2b52017-09-15 17:36:57 +0200393static void zlib_set_level(struct list_head *ws, unsigned int type)
394{
395 struct workspace *workspace = list_entry(ws, struct workspace, list);
396 unsigned level = (type & 0xF0) >> 4;
397
398 if (level > 9)
399 level = 9;
400
401 workspace->level = level > 0 ? level : 3;
402}
403
David Sterbae8c9f182015-01-02 18:23:10 +0100404const struct btrfs_compress_op btrfs_zlib_compress = {
Li Zefan261507a02010-12-17 14:21:50 +0800405 .alloc_workspace = zlib_alloc_workspace,
406 .free_workspace = zlib_free_workspace,
407 .compress_pages = zlib_compress_pages,
Christoph Hellwig974b1ad2016-11-25 09:07:46 +0100408 .decompress_bio = zlib_decompress_bio,
Li Zefan261507a02010-12-17 14:21:50 +0800409 .decompress = zlib_decompress,
David Sterbaf51d2b52017-09-15 17:36:57 +0200410 .set_level = zlib_set_level,
Li Zefan261507a02010-12-17 14:21:50 +0800411};