blob: 0fb90cbe76697c0efeac27f54a0a5fa005db508f [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Li Zefana6fa6fa2010-10-25 15:12:26 +08002/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
Li Zefana6fa6fa2010-10-25 15:12:26 +08004 */
5
6#include <linux/kernel.h>
7#include <linux/slab.h>
David Sterba6acafd12017-05-31 17:21:15 +02008#include <linux/mm.h>
Li Zefana6fa6fa2010-10-25 15:12:26 +08009#include <linux/init.h>
10#include <linux/err.h>
11#include <linux/sched.h>
12#include <linux/pagemap.h>
13#include <linux/bio.h>
14#include <linux/lzo.h>
Anand Jaine1ddce72017-05-26 15:44:59 +080015#include <linux/refcount.h>
Li Zefana6fa6fa2010-10-25 15:12:26 +080016#include "compression.h"
Qu Wenruoa6e66e62021-07-26 14:34:55 +080017#include "ctree.h"
Li Zefana6fa6fa2010-10-25 15:12:26 +080018
19#define LZO_LEN 4
20
Qu Wenruo2a1f7c02018-05-17 13:10:01 +080021/*
22 * Btrfs LZO compression format
23 *
24 * Regular and inlined LZO compressed data extents consist of:
25 *
26 * 1. Header
27 * Fixed size. LZO_LEN (4) bytes long, LE32.
28 * Records the total size (including the header) of compressed data.
29 *
30 * 2. Segment(s)
Andrea Gelmini52042d82018-11-28 12:05:13 +010031 * Variable size. Each segment includes one segment header, followed by data
Qu Wenruo2a1f7c02018-05-17 13:10:01 +080032 * payload.
33 * One regular LZO compressed extent can have one or more segments.
34 * For inlined LZO compressed extent, only one segment is allowed.
Qu Wenruod4088802021-09-27 15:22:04 +080035 * One segment represents at most one sector of uncompressed data.
Qu Wenruo2a1f7c02018-05-17 13:10:01 +080036 *
37 * 2.1 Segment header
38 * Fixed size. LZO_LEN (4) bytes long, LE32.
39 * Records the total size of the segment (not including the header).
Qu Wenruod4088802021-09-27 15:22:04 +080040 * Segment header never crosses sector boundary, thus it's possible to
41 * have at most 3 padding zeros at the end of the sector.
Qu Wenruo2a1f7c02018-05-17 13:10:01 +080042 *
43 * 2.2 Data Payload
Qu Wenruod4088802021-09-27 15:22:04 +080044 * Variable size. Size up limit should be lzo1x_worst_compress(sectorsize)
45 * which is 4419 for a 4KiB sectorsize.
Qu Wenruo2a1f7c02018-05-17 13:10:01 +080046 *
Qu Wenruod4088802021-09-27 15:22:04 +080047 * Example with 4K sectorsize:
Qu Wenruo2a1f7c02018-05-17 13:10:01 +080048 * Page 1:
49 * 0 0x2 0x4 0x6 0x8 0xa 0xc 0xe 0x10
50 * 0x0000 | Header | SegHdr 01 | Data payload 01 ... |
51 * ...
52 * 0x0ff0 | SegHdr N | Data payload N ... |00|
53 * ^^ padding zeros
54 * Page 2:
55 * 0x1000 | SegHdr N+1| Data payload N+1 ... |
56 */
57
Li Zefana6fa6fa2010-10-25 15:12:26 +080058struct workspace {
59 void *mem;
Jie Liu3fb40372013-06-06 13:38:50 +000060 void *buf; /* where decompressed data goes */
61 void *cbuf; /* where compressed data goes */
Li Zefana6fa6fa2010-10-25 15:12:26 +080062 struct list_head list;
63};
64
Dennis Zhou92ee55302019-02-04 15:20:03 -050065static struct workspace_manager wsm;
66
David Sterbad20f3952019-10-04 02:21:48 +020067void lzo_free_workspace(struct list_head *ws)
Li Zefana6fa6fa2010-10-25 15:12:26 +080068{
69 struct workspace *workspace = list_entry(ws, struct workspace, list);
70
David Sterba6acafd12017-05-31 17:21:15 +020071 kvfree(workspace->buf);
72 kvfree(workspace->cbuf);
73 kvfree(workspace->mem);
Li Zefana6fa6fa2010-10-25 15:12:26 +080074 kfree(workspace);
75}
76
David Sterbad20f3952019-10-04 02:21:48 +020077struct list_head *lzo_alloc_workspace(unsigned int level)
Li Zefana6fa6fa2010-10-25 15:12:26 +080078{
79 struct workspace *workspace;
80
David Sterba389a6cf2017-05-31 17:21:15 +020081 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
Li Zefana6fa6fa2010-10-25 15:12:26 +080082 if (!workspace)
83 return ERR_PTR(-ENOMEM);
84
David Sterba6acafd12017-05-31 17:21:15 +020085 workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
86 workspace->buf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
87 workspace->cbuf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
Li Zefana6fa6fa2010-10-25 15:12:26 +080088 if (!workspace->mem || !workspace->buf || !workspace->cbuf)
89 goto fail;
90
91 INIT_LIST_HEAD(&workspace->list);
92
93 return &workspace->list;
94fail:
95 lzo_free_workspace(&workspace->list);
96 return ERR_PTR(-ENOMEM);
97}
98
99static inline void write_compress_length(char *buf, size_t len)
100{
101 __le32 dlen;
102
103 dlen = cpu_to_le32(len);
104 memcpy(buf, &dlen, LZO_LEN);
105}
106
David Sterba14a33572017-02-14 17:58:04 +0100107static inline size_t read_compress_length(const char *buf)
Li Zefana6fa6fa2010-10-25 15:12:26 +0800108{
109 __le32 dlen;
110
111 memcpy(&dlen, buf, LZO_LEN);
112 return le32_to_cpu(dlen);
113}
114
Qu Wenruod4088802021-09-27 15:22:04 +0800115/*
116 * Will do:
117 *
118 * - Write a segment header into the destination
119 * - Copy the compressed buffer into the destination
120 * - Make sure we have enough space in the last sector to fit a segment header
121 * If not, we will pad at most (LZO_LEN (4)) - 1 bytes of zeros.
122 *
123 * Will allocate new pages when needed.
124 */
125static int copy_compressed_data_to_page(char *compressed_data,
126 size_t compressed_size,
127 struct page **out_pages,
Qu Wenruo6f019c02021-11-12 12:47:30 +0800128 unsigned long max_nr_page,
Qu Wenruod4088802021-09-27 15:22:04 +0800129 u32 *cur_out,
130 const u32 sectorsize)
131{
132 u32 sector_bytes_left;
133 u32 orig_out;
134 struct page *cur_page;
Linus Torvalds037c50b2021-11-01 12:48:25 -0700135 char *kaddr;
Qu Wenruod4088802021-09-27 15:22:04 +0800136
Qu Wenruo6f019c02021-11-12 12:47:30 +0800137 if ((*cur_out / PAGE_SIZE) >= max_nr_page)
138 return -E2BIG;
139
Qu Wenruod4088802021-09-27 15:22:04 +0800140 /*
141 * We never allow a segment header crossing sector boundary, previous
142 * run should ensure we have enough space left inside the sector.
143 */
144 ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
145
146 cur_page = out_pages[*cur_out / PAGE_SIZE];
147 /* Allocate a new page */
148 if (!cur_page) {
149 cur_page = alloc_page(GFP_NOFS);
150 if (!cur_page)
151 return -ENOMEM;
152 out_pages[*cur_out / PAGE_SIZE] = cur_page;
153 }
154
Linus Torvalds037c50b2021-11-01 12:48:25 -0700155 kaddr = kmap(cur_page);
156 write_compress_length(kaddr + offset_in_page(*cur_out),
Qu Wenruod4088802021-09-27 15:22:04 +0800157 compressed_size);
158 *cur_out += LZO_LEN;
159
160 orig_out = *cur_out;
161
162 /* Copy compressed data */
163 while (*cur_out - orig_out < compressed_size) {
164 u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize,
165 orig_out + compressed_size - *cur_out);
166
Linus Torvalds037c50b2021-11-01 12:48:25 -0700167 kunmap(cur_page);
Linus Torvalds6fdf8862021-11-18 12:41:14 -0800168
Qu Wenruo6f019c02021-11-12 12:47:30 +0800169 if ((*cur_out / PAGE_SIZE) >= max_nr_page)
170 return -E2BIG;
171
Qu Wenruod4088802021-09-27 15:22:04 +0800172 cur_page = out_pages[*cur_out / PAGE_SIZE];
173 /* Allocate a new page */
174 if (!cur_page) {
175 cur_page = alloc_page(GFP_NOFS);
176 if (!cur_page)
177 return -ENOMEM;
178 out_pages[*cur_out / PAGE_SIZE] = cur_page;
179 }
Linus Torvalds037c50b2021-11-01 12:48:25 -0700180 kaddr = kmap(cur_page);
Qu Wenruod4088802021-09-27 15:22:04 +0800181
Linus Torvalds037c50b2021-11-01 12:48:25 -0700182 memcpy(kaddr + offset_in_page(*cur_out),
Qu Wenruod4088802021-09-27 15:22:04 +0800183 compressed_data + *cur_out - orig_out, copy_len);
184
185 *cur_out += copy_len;
186 }
187
188 /*
189 * Check if we can fit the next segment header into the remaining space
190 * of the sector.
191 */
192 sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out;
193 if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0)
Linus Torvalds037c50b2021-11-01 12:48:25 -0700194 goto out;
Qu Wenruod4088802021-09-27 15:22:04 +0800195
196 /* The remaining size is not enough, pad it with zeros */
Linus Torvalds037c50b2021-11-01 12:48:25 -0700197 memset(kaddr + offset_in_page(*cur_out), 0,
Qu Wenruod4088802021-09-27 15:22:04 +0800198 sector_bytes_left);
199 *cur_out += sector_bytes_left;
Linus Torvalds037c50b2021-11-01 12:48:25 -0700200
201out:
202 kunmap(cur_page);
Qu Wenruod4088802021-09-27 15:22:04 +0800203 return 0;
204}
205
David Sterbac4bf6652019-10-01 22:38:34 +0200206int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
207 u64 start, struct page **pages, unsigned long *out_pages,
208 unsigned long *total_in, unsigned long *total_out)
Li Zefana6fa6fa2010-10-25 15:12:26 +0800209{
210 struct workspace *workspace = list_entry(ws, struct workspace, list);
Qu Wenruod4088802021-09-27 15:22:04 +0800211 const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize;
212 struct page *page_in = NULL;
Linus Torvalds037c50b2021-11-01 12:48:25 -0700213 char *sizes_ptr;
Qu Wenruo6f019c02021-11-12 12:47:30 +0800214 const unsigned long max_nr_page = *out_pages;
Li Zefana6fa6fa2010-10-25 15:12:26 +0800215 int ret = 0;
Qu Wenruod4088802021-09-27 15:22:04 +0800216 /* Points to the file offset of input data */
217 u64 cur_in = start;
218 /* Points to the current output byte */
219 u32 cur_out = 0;
220 u32 len = *total_out;
Li Zefana6fa6fa2010-10-25 15:12:26 +0800221
Qu Wenruo6f019c02021-11-12 12:47:30 +0800222 ASSERT(max_nr_page > 0);
Li Zefana6fa6fa2010-10-25 15:12:26 +0800223 *out_pages = 0;
224 *total_out = 0;
225 *total_in = 0;
226
Li Zefana6fa6fa2010-10-25 15:12:26 +0800227 /*
Qu Wenruod4088802021-09-27 15:22:04 +0800228 * Skip the header for now, we will later come back and write the total
229 * compressed size
Li Zefana6fa6fa2010-10-25 15:12:26 +0800230 */
Qu Wenruod4088802021-09-27 15:22:04 +0800231 cur_out += LZO_LEN;
232 while (cur_in < start + len) {
Linus Torvalds037c50b2021-11-01 12:48:25 -0700233 char *data_in;
Qu Wenruod4088802021-09-27 15:22:04 +0800234 const u32 sectorsize_mask = sectorsize - 1;
235 u32 sector_off = (cur_in - start) & sectorsize_mask;
236 u32 in_len;
237 size_t out_len;
Li Zefana6fa6fa2010-10-25 15:12:26 +0800238
Qu Wenruod4088802021-09-27 15:22:04 +0800239 /* Get the input page first */
240 if (!page_in) {
241 page_in = find_get_page(mapping, cur_in >> PAGE_SHIFT);
242 ASSERT(page_in);
243 }
244
245 /* Compress at most one sector of data each time */
246 in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
247 ASSERT(in_len);
Linus Torvalds037c50b2021-11-01 12:48:25 -0700248 data_in = kmap(page_in);
249 ret = lzo1x_1_compress(data_in +
Qu Wenruod4088802021-09-27 15:22:04 +0800250 offset_in_page(cur_in), in_len,
251 workspace->cbuf, &out_len,
252 workspace->mem);
Linus Torvalds037c50b2021-11-01 12:48:25 -0700253 kunmap(page_in);
Qu Wenruod4088802021-09-27 15:22:04 +0800254 if (ret < 0) {
255 pr_debug("BTRFS: lzo in loop returned %d\n", ret);
Zach Brown60e19752014-05-09 17:15:08 -0400256 ret = -EIO;
Li Zefana6fa6fa2010-10-25 15:12:26 +0800257 goto out;
258 }
259
Qu Wenruod4088802021-09-27 15:22:04 +0800260 ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
Qu Wenruo6f019c02021-11-12 12:47:30 +0800261 pages, max_nr_page,
262 &cur_out, sectorsize);
Qu Wenruod4088802021-09-27 15:22:04 +0800263 if (ret < 0)
264 goto out;
Li Zefana6fa6fa2010-10-25 15:12:26 +0800265
Qu Wenruod4088802021-09-27 15:22:04 +0800266 cur_in += in_len;
Li Zefana6fa6fa2010-10-25 15:12:26 +0800267
Qu Wenruod4088802021-09-27 15:22:04 +0800268 /*
269 * Check if we're making it bigger after two sectors. And if
270 * it is so, give up.
271 */
272 if (cur_in - start > sectorsize * 2 && cur_in - start < cur_out) {
Zach Brown60e19752014-05-09 17:15:08 -0400273 ret = -E2BIG;
Li Zefana6fa6fa2010-10-25 15:12:26 +0800274 goto out;
Stefan Agner59516f62013-07-01 20:33:39 +0200275 }
Li Zefana6fa6fa2010-10-25 15:12:26 +0800276
Qu Wenruod4088802021-09-27 15:22:04 +0800277 /* Check if we have reached page boundary */
278 if (IS_ALIGNED(cur_in, PAGE_SIZE)) {
279 put_page(page_in);
280 page_in = NULL;
281 }
Li Zefana6fa6fa2010-10-25 15:12:26 +0800282 }
283
Qu Wenruod4088802021-09-27 15:22:04 +0800284 /* Store the size of all chunks of compressed data */
David Sterbaccaa66c2021-10-27 10:44:21 +0200285 sizes_ptr = kmap_local_page(pages[0]);
Linus Torvalds037c50b2021-11-01 12:48:25 -0700286 write_compress_length(sizes_ptr, cur_out);
David Sterbaccaa66c2021-10-27 10:44:21 +0200287 kunmap_local(sizes_ptr);
Li Zefana6fa6fa2010-10-25 15:12:26 +0800288
289 ret = 0;
Qu Wenruod4088802021-09-27 15:22:04 +0800290 *total_out = cur_out;
291 *total_in = cur_in - start;
Li Zefana6fa6fa2010-10-25 15:12:26 +0800292out:
Qu Wenruodaf87e92021-11-20 16:34:11 +0800293 if (page_in)
294 put_page(page_in);
Qu Wenruod4088802021-09-27 15:22:04 +0800295 *out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE);
Li Zefana6fa6fa2010-10-25 15:12:26 +0800296 return ret;
297}
298
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800299/*
300 * Copy the compressed segment payload into @dest.
301 *
302 * For the payload there will be no padding, just need to do page switching.
303 */
304static void copy_compressed_segment(struct compressed_bio *cb,
305 char *dest, u32 len, u32 *cur_in)
306{
307 u32 orig_in = *cur_in;
308
309 while (*cur_in < orig_in + len) {
David Sterbaccaa66c2021-10-27 10:44:21 +0200310 char *kaddr;
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800311 struct page *cur_page;
312 u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
313 orig_in + len - *cur_in);
314
315 ASSERT(copy_len);
316 cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
317
David Sterbaccaa66c2021-10-27 10:44:21 +0200318 kaddr = kmap(cur_page);
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800319 memcpy(dest + *cur_in - orig_in,
David Sterbaccaa66c2021-10-27 10:44:21 +0200320 kaddr + offset_in_page(*cur_in),
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800321 copy_len);
David Sterbaccaa66c2021-10-27 10:44:21 +0200322 kunmap(cur_page);
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800323
324 *cur_in += copy_len;
325 }
326}
327
David Sterbac4bf6652019-10-01 22:38:34 +0200328int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
Li Zefana6fa6fa2010-10-25 15:12:26 +0800329{
330 struct workspace *workspace = list_entry(ws, struct workspace, list);
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800331 const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
332 const u32 sectorsize = fs_info->sectorsize;
David Sterbaccaa66c2021-10-27 10:44:21 +0200333 char *kaddr;
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800334 int ret;
335 /* Compressed data length, can be unaligned */
336 u32 len_in;
337 /* Offset inside the compressed data */
338 u32 cur_in = 0;
339 /* Bytes decompressed so far */
340 u32 cur_out = 0;
Li Zefana6fa6fa2010-10-25 15:12:26 +0800341
David Sterbaccaa66c2021-10-27 10:44:21 +0200342 kaddr = kmap(cb->compressed_pages[0]);
343 len_in = read_compress_length(kaddr);
344 kunmap(cb->compressed_pages[0]);
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800345 cur_in += LZO_LEN;
346
Qu Wenruo314bfa42018-05-15 14:57:51 +0800347 /*
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800348 * LZO header length check
Qu Wenruo314bfa42018-05-15 14:57:51 +0800349 *
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800350 * The total length should not exceed the maximum extent length,
351 * and all sectors should be used.
352 * If this happens, it means the compressed extent is corrupted.
Qu Wenruo314bfa42018-05-15 14:57:51 +0800353 */
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800354 if (len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) ||
355 round_up(len_in, sectorsize) < cb->compressed_len) {
356 btrfs_err(fs_info,
357 "invalid lzo header, lzo len %u compressed len %u",
358 len_in, cb->compressed_len);
359 return -EUCLEAN;
Qu Wenruo314bfa42018-05-15 14:57:51 +0800360 }
Li Zefana6fa6fa2010-10-25 15:12:26 +0800361
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800362 /* Go through each lzo segment */
363 while (cur_in < len_in) {
364 struct page *cur_page;
365 /* Length of the compressed segment */
366 u32 seg_len;
367 u32 sector_bytes_left;
368 size_t out_len = lzo1x_worst_compress(sectorsize);
Li Zefana6fa6fa2010-10-25 15:12:26 +0800369
Qu Wenruo314bfa42018-05-15 14:57:51 +0800370 /*
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800371 * We should always have enough space for one segment header
372 * inside current sector.
Qu Wenruo314bfa42018-05-15 14:57:51 +0800373 */
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800374 ASSERT(cur_in / sectorsize ==
375 (cur_in + LZO_LEN - 1) / sectorsize);
376 cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
377 ASSERT(cur_page);
Linus Torvalds2cf3f812021-11-01 12:46:47 -0700378 kaddr = kmap(cur_page);
David Sterbaccaa66c2021-10-27 10:44:21 +0200379 seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
Linus Torvalds2cf3f812021-11-01 12:46:47 -0700380 kunmap(cur_page);
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800381 cur_in += LZO_LEN;
Qu Wenruo314bfa42018-05-15 14:57:51 +0800382
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800383 /* Copy the compressed segment payload into workspace */
384 copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
Li Zefana6fa6fa2010-10-25 15:12:26 +0800385
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800386 /* Decompress the data */
387 ret = lzo1x_decompress_safe(workspace->cbuf, seg_len,
388 workspace->buf, &out_len);
Li Zefana6fa6fa2010-10-25 15:12:26 +0800389 if (ret != LZO_E_OK) {
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800390 btrfs_err(fs_info, "failed to decompress");
Zach Brown60e19752014-05-09 17:15:08 -0400391 ret = -EIO;
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800392 goto out;
Li Zefana6fa6fa2010-10-25 15:12:26 +0800393 }
394
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800395 /* Copy the data into inode pages */
396 ret = btrfs_decompress_buf2page(workspace->buf, out_len, cb, cur_out);
397 cur_out += out_len;
Li Zefana6fa6fa2010-10-25 15:12:26 +0800398
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800399 /* All data read, exit */
400 if (ret == 0)
401 goto out;
402 ret = 0;
403
404 /* Check if the sector has enough space for a segment header */
405 sector_bytes_left = sectorsize - (cur_in % sectorsize);
406 if (sector_bytes_left >= LZO_LEN)
407 continue;
408
409 /* Skip the padding zeros */
410 cur_in += sector_bytes_left;
Li Zefana6fa6fa2010-10-25 15:12:26 +0800411 }
Qu Wenruoa6e66e62021-07-26 14:34:55 +0800412out:
Chris Mason2f19cad2014-11-30 08:56:33 -0500413 if (!ret)
Qu Wenruo1c3dc172021-07-05 10:00:58 +0800414 zero_fill_bio(cb->orig_bio);
Li Zefana6fa6fa2010-10-25 15:12:26 +0800415 return ret;
416}
417
David Sterbac4bf6652019-10-01 22:38:34 +0200418int lzo_decompress(struct list_head *ws, unsigned char *data_in,
419 struct page *dest_page, unsigned long start_byte, size_t srclen,
420 size_t destlen)
Li Zefana6fa6fa2010-10-25 15:12:26 +0800421{
422 struct workspace *workspace = list_entry(ws, struct workspace, list);
423 size_t in_len;
424 size_t out_len;
Qu Wenruode885e32018-05-17 14:10:29 +0800425 size_t max_segment_len = lzo1x_worst_compress(PAGE_SIZE);
Li Zefana6fa6fa2010-10-25 15:12:26 +0800426 int ret = 0;
427 char *kaddr;
428 unsigned long bytes;
429
Qu Wenruode885e32018-05-17 14:10:29 +0800430 if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
431 return -EUCLEAN;
Li Zefana6fa6fa2010-10-25 15:12:26 +0800432
Qu Wenruode885e32018-05-17 14:10:29 +0800433 in_len = read_compress_length(data_in);
434 if (in_len != srclen)
435 return -EUCLEAN;
Li Zefana6fa6fa2010-10-25 15:12:26 +0800436 data_in += LZO_LEN;
437
438 in_len = read_compress_length(data_in);
Qu Wenruode885e32018-05-17 14:10:29 +0800439 if (in_len != srclen - LZO_LEN * 2) {
440 ret = -EUCLEAN;
441 goto out;
442 }
Li Zefana6fa6fa2010-10-25 15:12:26 +0800443 data_in += LZO_LEN;
444
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300445 out_len = PAGE_SIZE;
Li Zefana6fa6fa2010-10-25 15:12:26 +0800446 ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
447 if (ret != LZO_E_OK) {
Jeff Mahoney62e85572016-09-20 10:05:01 -0400448 pr_warn("BTRFS: decompress failed!\n");
Zach Brown60e19752014-05-09 17:15:08 -0400449 ret = -EIO;
Li Zefana6fa6fa2010-10-25 15:12:26 +0800450 goto out;
451 }
452
453 if (out_len < start_byte) {
Zach Brown60e19752014-05-09 17:15:08 -0400454 ret = -EIO;
Li Zefana6fa6fa2010-10-25 15:12:26 +0800455 goto out;
456 }
457
Chris Mason2f19cad2014-11-30 08:56:33 -0500458 /*
459 * the caller is already checking against PAGE_SIZE, but lets
460 * move this check closer to the memcpy/memset
461 */
462 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
Li Zefana6fa6fa2010-10-25 15:12:26 +0800463 bytes = min_t(unsigned long, destlen, out_len - start_byte);
464
David Sterbaccaa66c2021-10-27 10:44:21 +0200465 kaddr = kmap_local_page(dest_page);
Li Zefana6fa6fa2010-10-25 15:12:26 +0800466 memcpy(kaddr, workspace->buf + start_byte, bytes);
Chris Mason2f19cad2014-11-30 08:56:33 -0500467
468 /*
469 * btrfs_getblock is doing a zero on the tail of the page too,
470 * but this will cover anything missing from the decompressed
471 * data.
472 */
473 if (bytes < destlen)
474 memset(kaddr+bytes, 0, destlen-bytes);
David Sterbaccaa66c2021-10-27 10:44:21 +0200475 kunmap_local(kaddr);
Li Zefana6fa6fa2010-10-25 15:12:26 +0800476out:
477 return ret;
478}
479
David Sterbae8c9f182015-01-02 18:23:10 +0100480const struct btrfs_compress_op btrfs_lzo_compress = {
David Sterbabe9510452019-10-02 00:53:31 +0200481 .workspace_manager = &wsm,
David Sterbae18333a2019-08-09 16:25:34 +0200482 .max_level = 1,
483 .default_level = 1,
Li Zefana6fa6fa2010-10-25 15:12:26 +0800484};