David Sterba | c1d7c51 | 2018-04-03 19:23:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2008 Oracle. All rights reserved. |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/slab.h> |
David Sterba | 6acafd1 | 2017-05-31 17:21:15 +0200 | [diff] [blame] | 8 | #include <linux/mm.h> |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 9 | #include <linux/init.h> |
| 10 | #include <linux/err.h> |
| 11 | #include <linux/sched.h> |
| 12 | #include <linux/pagemap.h> |
| 13 | #include <linux/bio.h> |
| 14 | #include <linux/lzo.h> |
Anand Jain | e1ddce7 | 2017-05-26 15:44:59 +0800 | [diff] [blame] | 15 | #include <linux/refcount.h> |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 16 | #include "compression.h" |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 17 | #include "ctree.h" |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 18 | |
| 19 | #define LZO_LEN 4 |
| 20 | |
Qu Wenruo | 2a1f7c0 | 2018-05-17 13:10:01 +0800 | [diff] [blame] | 21 | /* |
| 22 | * Btrfs LZO compression format |
| 23 | * |
| 24 | * Regular and inlined LZO compressed data extents consist of: |
| 25 | * |
| 26 | * 1. Header |
| 27 | * Fixed size. LZO_LEN (4) bytes long, LE32. |
| 28 | * Records the total size (including the header) of compressed data. |
| 29 | * |
| 30 | * 2. Segment(s) |
Andrea Gelmini | 52042d8 | 2018-11-28 12:05:13 +0100 | [diff] [blame] | 31 | * Variable size. Each segment includes one segment header, followed by data |
Qu Wenruo | 2a1f7c0 | 2018-05-17 13:10:01 +0800 | [diff] [blame] | 32 | * payload. |
| 33 | * One regular LZO compressed extent can have one or more segments. |
| 34 | * For inlined LZO compressed extent, only one segment is allowed. |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 35 | * One segment represents at most one sector of uncompressed data. |
Qu Wenruo | 2a1f7c0 | 2018-05-17 13:10:01 +0800 | [diff] [blame] | 36 | * |
| 37 | * 2.1 Segment header |
| 38 | * Fixed size. LZO_LEN (4) bytes long, LE32. |
| 39 | * Records the total size of the segment (not including the header). |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 40 | * Segment header never crosses sector boundary, thus it's possible to |
| 41 | * have at most 3 padding zeros at the end of the sector. |
Qu Wenruo | 2a1f7c0 | 2018-05-17 13:10:01 +0800 | [diff] [blame] | 42 | * |
| 43 | * 2.2 Data Payload |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 44 | * Variable size. Size up limit should be lzo1x_worst_compress(sectorsize) |
| 45 | * which is 4419 for a 4KiB sectorsize. |
Qu Wenruo | 2a1f7c0 | 2018-05-17 13:10:01 +0800 | [diff] [blame] | 46 | * |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 47 | * Example with 4K sectorsize: |
Qu Wenruo | 2a1f7c0 | 2018-05-17 13:10:01 +0800 | [diff] [blame] | 48 | * Page 1: |
| 49 | * 0 0x2 0x4 0x6 0x8 0xa 0xc 0xe 0x10 |
| 50 | * 0x0000 | Header | SegHdr 01 | Data payload 01 ... | |
| 51 | * ... |
| 52 | * 0x0ff0 | SegHdr N | Data payload N ... |00| |
| 53 | * ^^ padding zeros |
| 54 | * Page 2: |
| 55 | * 0x1000 | SegHdr N+1| Data payload N+1 ... | |
| 56 | */ |
| 57 | |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 58 | struct workspace { |
| 59 | void *mem; |
Jie Liu | 3fb4037 | 2013-06-06 13:38:50 +0000 | [diff] [blame] | 60 | void *buf; /* where decompressed data goes */ |
| 61 | void *cbuf; /* where compressed data goes */ |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 62 | struct list_head list; |
| 63 | }; |
| 64 | |
Dennis Zhou | 92ee5530 | 2019-02-04 15:20:03 -0500 | [diff] [blame] | 65 | static struct workspace_manager wsm; |
| 66 | |
David Sterba | d20f395 | 2019-10-04 02:21:48 +0200 | [diff] [blame] | 67 | void lzo_free_workspace(struct list_head *ws) |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 68 | { |
| 69 | struct workspace *workspace = list_entry(ws, struct workspace, list); |
| 70 | |
David Sterba | 6acafd1 | 2017-05-31 17:21:15 +0200 | [diff] [blame] | 71 | kvfree(workspace->buf); |
| 72 | kvfree(workspace->cbuf); |
| 73 | kvfree(workspace->mem); |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 74 | kfree(workspace); |
| 75 | } |
| 76 | |
David Sterba | d20f395 | 2019-10-04 02:21:48 +0200 | [diff] [blame] | 77 | struct list_head *lzo_alloc_workspace(unsigned int level) |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 78 | { |
| 79 | struct workspace *workspace; |
| 80 | |
David Sterba | 389a6cf | 2017-05-31 17:21:15 +0200 | [diff] [blame] | 81 | workspace = kzalloc(sizeof(*workspace), GFP_KERNEL); |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 82 | if (!workspace) |
| 83 | return ERR_PTR(-ENOMEM); |
| 84 | |
David Sterba | 6acafd1 | 2017-05-31 17:21:15 +0200 | [diff] [blame] | 85 | workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); |
| 86 | workspace->buf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL); |
| 87 | workspace->cbuf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL); |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 88 | if (!workspace->mem || !workspace->buf || !workspace->cbuf) |
| 89 | goto fail; |
| 90 | |
| 91 | INIT_LIST_HEAD(&workspace->list); |
| 92 | |
| 93 | return &workspace->list; |
| 94 | fail: |
| 95 | lzo_free_workspace(&workspace->list); |
| 96 | return ERR_PTR(-ENOMEM); |
| 97 | } |
| 98 | |
| 99 | static inline void write_compress_length(char *buf, size_t len) |
| 100 | { |
| 101 | __le32 dlen; |
| 102 | |
| 103 | dlen = cpu_to_le32(len); |
| 104 | memcpy(buf, &dlen, LZO_LEN); |
| 105 | } |
| 106 | |
David Sterba | 14a3357 | 2017-02-14 17:58:04 +0100 | [diff] [blame] | 107 | static inline size_t read_compress_length(const char *buf) |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 108 | { |
| 109 | __le32 dlen; |
| 110 | |
| 111 | memcpy(&dlen, buf, LZO_LEN); |
| 112 | return le32_to_cpu(dlen); |
| 113 | } |
| 114 | |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 115 | /* |
| 116 | * Will do: |
| 117 | * |
| 118 | * - Write a segment header into the destination |
| 119 | * - Copy the compressed buffer into the destination |
| 120 | * - Make sure we have enough space in the last sector to fit a segment header |
| 121 | * If not, we will pad at most (LZO_LEN (4)) - 1 bytes of zeros. |
| 122 | * |
| 123 | * Will allocate new pages when needed. |
| 124 | */ |
| 125 | static int copy_compressed_data_to_page(char *compressed_data, |
| 126 | size_t compressed_size, |
| 127 | struct page **out_pages, |
Qu Wenruo | 6f019c0 | 2021-11-12 12:47:30 +0800 | [diff] [blame] | 128 | unsigned long max_nr_page, |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 129 | u32 *cur_out, |
| 130 | const u32 sectorsize) |
| 131 | { |
| 132 | u32 sector_bytes_left; |
| 133 | u32 orig_out; |
| 134 | struct page *cur_page; |
Linus Torvalds | 037c50b | 2021-11-01 12:48:25 -0700 | [diff] [blame] | 135 | char *kaddr; |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 136 | |
Qu Wenruo | 6f019c0 | 2021-11-12 12:47:30 +0800 | [diff] [blame] | 137 | if ((*cur_out / PAGE_SIZE) >= max_nr_page) |
| 138 | return -E2BIG; |
| 139 | |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 140 | /* |
| 141 | * We never allow a segment header crossing sector boundary, previous |
| 142 | * run should ensure we have enough space left inside the sector. |
| 143 | */ |
| 144 | ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize); |
| 145 | |
| 146 | cur_page = out_pages[*cur_out / PAGE_SIZE]; |
| 147 | /* Allocate a new page */ |
| 148 | if (!cur_page) { |
| 149 | cur_page = alloc_page(GFP_NOFS); |
| 150 | if (!cur_page) |
| 151 | return -ENOMEM; |
| 152 | out_pages[*cur_out / PAGE_SIZE] = cur_page; |
| 153 | } |
| 154 | |
Linus Torvalds | 037c50b | 2021-11-01 12:48:25 -0700 | [diff] [blame] | 155 | kaddr = kmap(cur_page); |
| 156 | write_compress_length(kaddr + offset_in_page(*cur_out), |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 157 | compressed_size); |
| 158 | *cur_out += LZO_LEN; |
| 159 | |
| 160 | orig_out = *cur_out; |
| 161 | |
| 162 | /* Copy compressed data */ |
| 163 | while (*cur_out - orig_out < compressed_size) { |
| 164 | u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize, |
| 165 | orig_out + compressed_size - *cur_out); |
| 166 | |
Linus Torvalds | 037c50b | 2021-11-01 12:48:25 -0700 | [diff] [blame] | 167 | kunmap(cur_page); |
Linus Torvalds | 6fdf886 | 2021-11-18 12:41:14 -0800 | [diff] [blame] | 168 | |
Qu Wenruo | 6f019c0 | 2021-11-12 12:47:30 +0800 | [diff] [blame] | 169 | if ((*cur_out / PAGE_SIZE) >= max_nr_page) |
| 170 | return -E2BIG; |
| 171 | |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 172 | cur_page = out_pages[*cur_out / PAGE_SIZE]; |
| 173 | /* Allocate a new page */ |
| 174 | if (!cur_page) { |
| 175 | cur_page = alloc_page(GFP_NOFS); |
| 176 | if (!cur_page) |
| 177 | return -ENOMEM; |
| 178 | out_pages[*cur_out / PAGE_SIZE] = cur_page; |
| 179 | } |
Linus Torvalds | 037c50b | 2021-11-01 12:48:25 -0700 | [diff] [blame] | 180 | kaddr = kmap(cur_page); |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 181 | |
Linus Torvalds | 037c50b | 2021-11-01 12:48:25 -0700 | [diff] [blame] | 182 | memcpy(kaddr + offset_in_page(*cur_out), |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 183 | compressed_data + *cur_out - orig_out, copy_len); |
| 184 | |
| 185 | *cur_out += copy_len; |
| 186 | } |
| 187 | |
| 188 | /* |
| 189 | * Check if we can fit the next segment header into the remaining space |
| 190 | * of the sector. |
| 191 | */ |
| 192 | sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out; |
| 193 | if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0) |
Linus Torvalds | 037c50b | 2021-11-01 12:48:25 -0700 | [diff] [blame] | 194 | goto out; |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 195 | |
| 196 | /* The remaining size is not enough, pad it with zeros */ |
Linus Torvalds | 037c50b | 2021-11-01 12:48:25 -0700 | [diff] [blame] | 197 | memset(kaddr + offset_in_page(*cur_out), 0, |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 198 | sector_bytes_left); |
| 199 | *cur_out += sector_bytes_left; |
Linus Torvalds | 037c50b | 2021-11-01 12:48:25 -0700 | [diff] [blame] | 200 | |
| 201 | out: |
| 202 | kunmap(cur_page); |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 203 | return 0; |
| 204 | } |
| 205 | |
David Sterba | c4bf665 | 2019-10-01 22:38:34 +0200 | [diff] [blame] | 206 | int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, |
| 207 | u64 start, struct page **pages, unsigned long *out_pages, |
| 208 | unsigned long *total_in, unsigned long *total_out) |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 209 | { |
| 210 | struct workspace *workspace = list_entry(ws, struct workspace, list); |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 211 | const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize; |
| 212 | struct page *page_in = NULL; |
Linus Torvalds | 037c50b | 2021-11-01 12:48:25 -0700 | [diff] [blame] | 213 | char *sizes_ptr; |
Qu Wenruo | 6f019c0 | 2021-11-12 12:47:30 +0800 | [diff] [blame] | 214 | const unsigned long max_nr_page = *out_pages; |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 215 | int ret = 0; |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 216 | /* Points to the file offset of input data */ |
| 217 | u64 cur_in = start; |
| 218 | /* Points to the current output byte */ |
| 219 | u32 cur_out = 0; |
| 220 | u32 len = *total_out; |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 221 | |
Qu Wenruo | 6f019c0 | 2021-11-12 12:47:30 +0800 | [diff] [blame] | 222 | ASSERT(max_nr_page > 0); |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 223 | *out_pages = 0; |
| 224 | *total_out = 0; |
| 225 | *total_in = 0; |
| 226 | |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 227 | /* |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 228 | * Skip the header for now, we will later come back and write the total |
| 229 | * compressed size |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 230 | */ |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 231 | cur_out += LZO_LEN; |
| 232 | while (cur_in < start + len) { |
Linus Torvalds | 037c50b | 2021-11-01 12:48:25 -0700 | [diff] [blame] | 233 | char *data_in; |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 234 | const u32 sectorsize_mask = sectorsize - 1; |
| 235 | u32 sector_off = (cur_in - start) & sectorsize_mask; |
| 236 | u32 in_len; |
| 237 | size_t out_len; |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 238 | |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 239 | /* Get the input page first */ |
| 240 | if (!page_in) { |
| 241 | page_in = find_get_page(mapping, cur_in >> PAGE_SHIFT); |
| 242 | ASSERT(page_in); |
| 243 | } |
| 244 | |
| 245 | /* Compress at most one sector of data each time */ |
| 246 | in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off); |
| 247 | ASSERT(in_len); |
Linus Torvalds | 037c50b | 2021-11-01 12:48:25 -0700 | [diff] [blame] | 248 | data_in = kmap(page_in); |
| 249 | ret = lzo1x_1_compress(data_in + |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 250 | offset_in_page(cur_in), in_len, |
| 251 | workspace->cbuf, &out_len, |
| 252 | workspace->mem); |
Linus Torvalds | 037c50b | 2021-11-01 12:48:25 -0700 | [diff] [blame] | 253 | kunmap(page_in); |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 254 | if (ret < 0) { |
| 255 | pr_debug("BTRFS: lzo in loop returned %d\n", ret); |
Zach Brown | 60e1975 | 2014-05-09 17:15:08 -0400 | [diff] [blame] | 256 | ret = -EIO; |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 257 | goto out; |
| 258 | } |
| 259 | |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 260 | ret = copy_compressed_data_to_page(workspace->cbuf, out_len, |
Qu Wenruo | 6f019c0 | 2021-11-12 12:47:30 +0800 | [diff] [blame] | 261 | pages, max_nr_page, |
| 262 | &cur_out, sectorsize); |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 263 | if (ret < 0) |
| 264 | goto out; |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 265 | |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 266 | cur_in += in_len; |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 267 | |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 268 | /* |
| 269 | * Check if we're making it bigger after two sectors. And if |
| 270 | * it is so, give up. |
| 271 | */ |
| 272 | if (cur_in - start > sectorsize * 2 && cur_in - start < cur_out) { |
Zach Brown | 60e1975 | 2014-05-09 17:15:08 -0400 | [diff] [blame] | 273 | ret = -E2BIG; |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 274 | goto out; |
Stefan Agner | 59516f6 | 2013-07-01 20:33:39 +0200 | [diff] [blame] | 275 | } |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 276 | |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 277 | /* Check if we have reached page boundary */ |
| 278 | if (IS_ALIGNED(cur_in, PAGE_SIZE)) { |
| 279 | put_page(page_in); |
| 280 | page_in = NULL; |
| 281 | } |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 282 | } |
| 283 | |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 284 | /* Store the size of all chunks of compressed data */ |
David Sterba | ccaa66c | 2021-10-27 10:44:21 +0200 | [diff] [blame] | 285 | sizes_ptr = kmap_local_page(pages[0]); |
Linus Torvalds | 037c50b | 2021-11-01 12:48:25 -0700 | [diff] [blame] | 286 | write_compress_length(sizes_ptr, cur_out); |
David Sterba | ccaa66c | 2021-10-27 10:44:21 +0200 | [diff] [blame] | 287 | kunmap_local(sizes_ptr); |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 288 | |
| 289 | ret = 0; |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 290 | *total_out = cur_out; |
| 291 | *total_in = cur_in - start; |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 292 | out: |
Qu Wenruo | daf87e9 | 2021-11-20 16:34:11 +0800 | [diff] [blame] | 293 | if (page_in) |
| 294 | put_page(page_in); |
Qu Wenruo | d408880 | 2021-09-27 15:22:04 +0800 | [diff] [blame] | 295 | *out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE); |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 296 | return ret; |
| 297 | } |
| 298 | |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 299 | /* |
| 300 | * Copy the compressed segment payload into @dest. |
| 301 | * |
| 302 | * For the payload there will be no padding, just need to do page switching. |
| 303 | */ |
| 304 | static void copy_compressed_segment(struct compressed_bio *cb, |
| 305 | char *dest, u32 len, u32 *cur_in) |
| 306 | { |
| 307 | u32 orig_in = *cur_in; |
| 308 | |
| 309 | while (*cur_in < orig_in + len) { |
David Sterba | ccaa66c | 2021-10-27 10:44:21 +0200 | [diff] [blame] | 310 | char *kaddr; |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 311 | struct page *cur_page; |
| 312 | u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in), |
| 313 | orig_in + len - *cur_in); |
| 314 | |
| 315 | ASSERT(copy_len); |
| 316 | cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE]; |
| 317 | |
David Sterba | ccaa66c | 2021-10-27 10:44:21 +0200 | [diff] [blame] | 318 | kaddr = kmap(cur_page); |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 319 | memcpy(dest + *cur_in - orig_in, |
David Sterba | ccaa66c | 2021-10-27 10:44:21 +0200 | [diff] [blame] | 320 | kaddr + offset_in_page(*cur_in), |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 321 | copy_len); |
David Sterba | ccaa66c | 2021-10-27 10:44:21 +0200 | [diff] [blame] | 322 | kunmap(cur_page); |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 323 | |
| 324 | *cur_in += copy_len; |
| 325 | } |
| 326 | } |
| 327 | |
David Sterba | c4bf665 | 2019-10-01 22:38:34 +0200 | [diff] [blame] | 328 | int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 329 | { |
| 330 | struct workspace *workspace = list_entry(ws, struct workspace, list); |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 331 | const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb); |
| 332 | const u32 sectorsize = fs_info->sectorsize; |
David Sterba | ccaa66c | 2021-10-27 10:44:21 +0200 | [diff] [blame] | 333 | char *kaddr; |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 334 | int ret; |
| 335 | /* Compressed data length, can be unaligned */ |
| 336 | u32 len_in; |
| 337 | /* Offset inside the compressed data */ |
| 338 | u32 cur_in = 0; |
| 339 | /* Bytes decompressed so far */ |
| 340 | u32 cur_out = 0; |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 341 | |
David Sterba | ccaa66c | 2021-10-27 10:44:21 +0200 | [diff] [blame] | 342 | kaddr = kmap(cb->compressed_pages[0]); |
| 343 | len_in = read_compress_length(kaddr); |
| 344 | kunmap(cb->compressed_pages[0]); |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 345 | cur_in += LZO_LEN; |
| 346 | |
Qu Wenruo | 314bfa4 | 2018-05-15 14:57:51 +0800 | [diff] [blame] | 347 | /* |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 348 | * LZO header length check |
Qu Wenruo | 314bfa4 | 2018-05-15 14:57:51 +0800 | [diff] [blame] | 349 | * |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 350 | * The total length should not exceed the maximum extent length, |
| 351 | * and all sectors should be used. |
| 352 | * If this happens, it means the compressed extent is corrupted. |
Qu Wenruo | 314bfa4 | 2018-05-15 14:57:51 +0800 | [diff] [blame] | 353 | */ |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 354 | if (len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) || |
| 355 | round_up(len_in, sectorsize) < cb->compressed_len) { |
| 356 | btrfs_err(fs_info, |
| 357 | "invalid lzo header, lzo len %u compressed len %u", |
| 358 | len_in, cb->compressed_len); |
| 359 | return -EUCLEAN; |
Qu Wenruo | 314bfa4 | 2018-05-15 14:57:51 +0800 | [diff] [blame] | 360 | } |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 361 | |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 362 | /* Go through each lzo segment */ |
| 363 | while (cur_in < len_in) { |
| 364 | struct page *cur_page; |
| 365 | /* Length of the compressed segment */ |
| 366 | u32 seg_len; |
| 367 | u32 sector_bytes_left; |
| 368 | size_t out_len = lzo1x_worst_compress(sectorsize); |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 369 | |
Qu Wenruo | 314bfa4 | 2018-05-15 14:57:51 +0800 | [diff] [blame] | 370 | /* |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 371 | * We should always have enough space for one segment header |
| 372 | * inside current sector. |
Qu Wenruo | 314bfa4 | 2018-05-15 14:57:51 +0800 | [diff] [blame] | 373 | */ |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 374 | ASSERT(cur_in / sectorsize == |
| 375 | (cur_in + LZO_LEN - 1) / sectorsize); |
| 376 | cur_page = cb->compressed_pages[cur_in / PAGE_SIZE]; |
| 377 | ASSERT(cur_page); |
Linus Torvalds | 2cf3f81 | 2021-11-01 12:46:47 -0700 | [diff] [blame] | 378 | kaddr = kmap(cur_page); |
David Sterba | ccaa66c | 2021-10-27 10:44:21 +0200 | [diff] [blame] | 379 | seg_len = read_compress_length(kaddr + offset_in_page(cur_in)); |
Linus Torvalds | 2cf3f81 | 2021-11-01 12:46:47 -0700 | [diff] [blame] | 380 | kunmap(cur_page); |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 381 | cur_in += LZO_LEN; |
Qu Wenruo | 314bfa4 | 2018-05-15 14:57:51 +0800 | [diff] [blame] | 382 | |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 383 | /* Copy the compressed segment payload into workspace */ |
| 384 | copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in); |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 385 | |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 386 | /* Decompress the data */ |
| 387 | ret = lzo1x_decompress_safe(workspace->cbuf, seg_len, |
| 388 | workspace->buf, &out_len); |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 389 | if (ret != LZO_E_OK) { |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 390 | btrfs_err(fs_info, "failed to decompress"); |
Zach Brown | 60e1975 | 2014-05-09 17:15:08 -0400 | [diff] [blame] | 391 | ret = -EIO; |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 392 | goto out; |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 393 | } |
| 394 | |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 395 | /* Copy the data into inode pages */ |
| 396 | ret = btrfs_decompress_buf2page(workspace->buf, out_len, cb, cur_out); |
| 397 | cur_out += out_len; |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 398 | |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 399 | /* All data read, exit */ |
| 400 | if (ret == 0) |
| 401 | goto out; |
| 402 | ret = 0; |
| 403 | |
| 404 | /* Check if the sector has enough space for a segment header */ |
| 405 | sector_bytes_left = sectorsize - (cur_in % sectorsize); |
| 406 | if (sector_bytes_left >= LZO_LEN) |
| 407 | continue; |
| 408 | |
| 409 | /* Skip the padding zeros */ |
| 410 | cur_in += sector_bytes_left; |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 411 | } |
Qu Wenruo | a6e66e6 | 2021-07-26 14:34:55 +0800 | [diff] [blame] | 412 | out: |
Chris Mason | 2f19cad | 2014-11-30 08:56:33 -0500 | [diff] [blame] | 413 | if (!ret) |
Qu Wenruo | 1c3dc17 | 2021-07-05 10:00:58 +0800 | [diff] [blame] | 414 | zero_fill_bio(cb->orig_bio); |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 415 | return ret; |
| 416 | } |
| 417 | |
David Sterba | c4bf665 | 2019-10-01 22:38:34 +0200 | [diff] [blame] | 418 | int lzo_decompress(struct list_head *ws, unsigned char *data_in, |
| 419 | struct page *dest_page, unsigned long start_byte, size_t srclen, |
| 420 | size_t destlen) |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 421 | { |
| 422 | struct workspace *workspace = list_entry(ws, struct workspace, list); |
| 423 | size_t in_len; |
| 424 | size_t out_len; |
Qu Wenruo | de885e3 | 2018-05-17 14:10:29 +0800 | [diff] [blame] | 425 | size_t max_segment_len = lzo1x_worst_compress(PAGE_SIZE); |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 426 | int ret = 0; |
| 427 | char *kaddr; |
| 428 | unsigned long bytes; |
| 429 | |
Qu Wenruo | de885e3 | 2018-05-17 14:10:29 +0800 | [diff] [blame] | 430 | if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2) |
| 431 | return -EUCLEAN; |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 432 | |
Qu Wenruo | de885e3 | 2018-05-17 14:10:29 +0800 | [diff] [blame] | 433 | in_len = read_compress_length(data_in); |
| 434 | if (in_len != srclen) |
| 435 | return -EUCLEAN; |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 436 | data_in += LZO_LEN; |
| 437 | |
| 438 | in_len = read_compress_length(data_in); |
Qu Wenruo | de885e3 | 2018-05-17 14:10:29 +0800 | [diff] [blame] | 439 | if (in_len != srclen - LZO_LEN * 2) { |
| 440 | ret = -EUCLEAN; |
| 441 | goto out; |
| 442 | } |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 443 | data_in += LZO_LEN; |
| 444 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 445 | out_len = PAGE_SIZE; |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 446 | ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len); |
| 447 | if (ret != LZO_E_OK) { |
Jeff Mahoney | 62e8557 | 2016-09-20 10:05:01 -0400 | [diff] [blame] | 448 | pr_warn("BTRFS: decompress failed!\n"); |
Zach Brown | 60e1975 | 2014-05-09 17:15:08 -0400 | [diff] [blame] | 449 | ret = -EIO; |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 450 | goto out; |
| 451 | } |
| 452 | |
| 453 | if (out_len < start_byte) { |
Zach Brown | 60e1975 | 2014-05-09 17:15:08 -0400 | [diff] [blame] | 454 | ret = -EIO; |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 455 | goto out; |
| 456 | } |
| 457 | |
Chris Mason | 2f19cad | 2014-11-30 08:56:33 -0500 | [diff] [blame] | 458 | /* |
| 459 | * the caller is already checking against PAGE_SIZE, but lets |
| 460 | * move this check closer to the memcpy/memset |
| 461 | */ |
| 462 | destlen = min_t(unsigned long, destlen, PAGE_SIZE); |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 463 | bytes = min_t(unsigned long, destlen, out_len - start_byte); |
| 464 | |
David Sterba | ccaa66c | 2021-10-27 10:44:21 +0200 | [diff] [blame] | 465 | kaddr = kmap_local_page(dest_page); |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 466 | memcpy(kaddr, workspace->buf + start_byte, bytes); |
Chris Mason | 2f19cad | 2014-11-30 08:56:33 -0500 | [diff] [blame] | 467 | |
| 468 | /* |
| 469 | * btrfs_getblock is doing a zero on the tail of the page too, |
| 470 | * but this will cover anything missing from the decompressed |
| 471 | * data. |
| 472 | */ |
| 473 | if (bytes < destlen) |
| 474 | memset(kaddr+bytes, 0, destlen-bytes); |
David Sterba | ccaa66c | 2021-10-27 10:44:21 +0200 | [diff] [blame] | 475 | kunmap_local(kaddr); |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 476 | out: |
| 477 | return ret; |
| 478 | } |
| 479 | |
David Sterba | e8c9f18 | 2015-01-02 18:23:10 +0100 | [diff] [blame] | 480 | const struct btrfs_compress_op btrfs_lzo_compress = { |
David Sterba | be951045 | 2019-10-02 00:53:31 +0200 | [diff] [blame] | 481 | .workspace_manager = &wsm, |
David Sterba | e18333a | 2019-08-09 16:25:34 +0200 | [diff] [blame] | 482 | .max_level = 1, |
| 483 | .default_level = 1, |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 484 | }; |