blob: de9b06574f4267abd9a104276c05913d4b73bbdc [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Masonc8b97812008-10-29 14:49:59 -04002/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
Chris Masonc8b97812008-10-29 14:49:59 -04004 */
5
6#include <linux/kernel.h>
7#include <linux/bio.h>
Chris Masonc8b97812008-10-29 14:49:59 -04008#include <linux/file.h>
9#include <linux/fs.h>
10#include <linux/pagemap.h>
11#include <linux/highmem.h>
12#include <linux/time.h>
13#include <linux/init.h>
14#include <linux/string.h>
Chris Masonc8b97812008-10-29 14:49:59 -040015#include <linux/backing-dev.h>
Chris Masonc8b97812008-10-29 14:49:59 -040016#include <linux/writeback.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
David Sterbafe308532017-05-31 17:14:56 +020018#include <linux/sched/mm.h>
Timofey Titovets19562432017-10-08 16:11:59 +030019#include <linux/log2.h>
Johannes Thumshirnd5178572019-06-03 16:58:57 +020020#include <crypto/hash.h>
David Sterba602cbe92019-08-21 18:48:25 +020021#include "misc.h"
Chris Masonc8b97812008-10-29 14:49:59 -040022#include "ctree.h"
23#include "disk-io.h"
24#include "transaction.h"
25#include "btrfs_inode.h"
26#include "volumes.h"
27#include "ordered-data.h"
Chris Masonc8b97812008-10-29 14:49:59 -040028#include "compression.h"
29#include "extent_io.h"
30#include "extent_map.h"
31
David Sterbac4bf6652019-10-01 22:38:34 +020032int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
33 u64 start, struct page **pages, unsigned long *out_pages,
34 unsigned long *total_in, unsigned long *total_out);
35int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
36int zlib_decompress(struct list_head *ws, unsigned char *data_in,
37 struct page *dest_page, unsigned long start_byte, size_t srclen,
38 size_t destlen);
David Sterbad20f3952019-10-04 02:21:48 +020039struct list_head *zlib_alloc_workspace(unsigned int level);
40void zlib_free_workspace(struct list_head *ws);
41struct list_head *zlib_get_workspace(unsigned int level);
42void zlib_put_workspace(struct list_head *ws);
David Sterbac4bf6652019-10-01 22:38:34 +020043
44int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
45 u64 start, struct page **pages, unsigned long *out_pages,
46 unsigned long *total_in, unsigned long *total_out);
47int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
48int lzo_decompress(struct list_head *ws, unsigned char *data_in,
49 struct page *dest_page, unsigned long start_byte, size_t srclen,
50 size_t destlen);
David Sterbad20f3952019-10-04 02:21:48 +020051struct list_head *lzo_alloc_workspace(unsigned int level);
52void lzo_free_workspace(struct list_head *ws);
David Sterbad20f3952019-10-04 02:21:48 +020053void lzo_put_workspace(struct list_head *ws);
David Sterbac4bf6652019-10-01 22:38:34 +020054
55int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
56 u64 start, struct page **pages, unsigned long *out_pages,
57 unsigned long *total_in, unsigned long *total_out);
58int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
59int zstd_decompress(struct list_head *ws, unsigned char *data_in,
60 struct page *dest_page, unsigned long start_byte, size_t srclen,
61 size_t destlen);
David Sterbad5517032019-10-02 01:08:03 +020062void zstd_init_workspace_manager(void);
David Sterba25103072019-10-02 01:08:03 +020063void zstd_cleanup_workspace_manager(void);
David Sterbad20f3952019-10-04 02:21:48 +020064struct list_head *zstd_alloc_workspace(unsigned int level);
65void zstd_free_workspace(struct list_head *ws);
66struct list_head *zstd_get_workspace(unsigned int level);
67void zstd_put_workspace(struct list_head *ws);
David Sterbac4bf6652019-10-01 22:38:34 +020068
David Sterbae128f9c2017-10-31 17:24:26 +010069static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
70
71const char* btrfs_compress_type2str(enum btrfs_compression_type type)
72{
73 switch (type) {
74 case BTRFS_COMPRESS_ZLIB:
75 case BTRFS_COMPRESS_LZO:
76 case BTRFS_COMPRESS_ZSTD:
77 case BTRFS_COMPRESS_NONE:
78 return btrfs_compress_types[type];
Chengguang Xuce96b7f2019-10-10 15:59:57 +080079 default:
80 break;
David Sterbae128f9c2017-10-31 17:24:26 +010081 }
82
83 return NULL;
84}
85
Johannes Thumshirnaa53e3b2019-06-06 12:07:15 +020086bool btrfs_compress_is_valid_type(const char *str, size_t len)
87{
88 int i;
89
90 for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
91 size_t comp_len = strlen(btrfs_compress_types[i]);
92
93 if (len < comp_len)
94 continue;
95
96 if (!strncmp(btrfs_compress_types[i], str, comp_len))
97 return true;
98 }
99 return false;
100}
101
David Sterba1e4eb742019-10-02 00:06:15 +0200102static int compression_compress_pages(int type, struct list_head *ws,
103 struct address_space *mapping, u64 start, struct page **pages,
104 unsigned long *out_pages, unsigned long *total_in,
105 unsigned long *total_out)
106{
107 switch (type) {
108 case BTRFS_COMPRESS_ZLIB:
109 return zlib_compress_pages(ws, mapping, start, pages,
110 out_pages, total_in, total_out);
111 case BTRFS_COMPRESS_LZO:
112 return lzo_compress_pages(ws, mapping, start, pages,
113 out_pages, total_in, total_out);
114 case BTRFS_COMPRESS_ZSTD:
115 return zstd_compress_pages(ws, mapping, start, pages,
116 out_pages, total_in, total_out);
117 case BTRFS_COMPRESS_NONE:
118 default:
119 /*
120 * This can't happen, the type is validated several times
121 * before we get here. As a sane fallback, return what the
122 * callers will understand as 'no compression happened'.
123 */
124 return -E2BIG;
125 }
126}
127
128static int compression_decompress_bio(int type, struct list_head *ws,
129 struct compressed_bio *cb)
130{
131 switch (type) {
132 case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
133 case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
134 case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
135 case BTRFS_COMPRESS_NONE:
136 default:
137 /*
138 * This can't happen, the type is validated several times
139 * before we get here.
140 */
141 BUG();
142 }
143}
144
145static int compression_decompress(int type, struct list_head *ws,
146 unsigned char *data_in, struct page *dest_page,
147 unsigned long start_byte, size_t srclen, size_t destlen)
148{
149 switch (type) {
150 case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
151 start_byte, srclen, destlen);
152 case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
153 start_byte, srclen, destlen);
154 case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
155 start_byte, srclen, destlen);
156 case BTRFS_COMPRESS_NONE:
157 default:
158 /*
159 * This can't happen, the type is validated several times
160 * before we get here.
161 */
162 BUG();
163 }
164}
165
Anand Jain8140dc32017-05-26 15:44:58 +0800166static int btrfs_decompress_bio(struct compressed_bio *cb);
Eric Sandeen48a3b632013-04-25 20:41:01 +0000167
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400168static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
Chris Masond20f7042008-12-08 16:58:54 -0500169 unsigned long disk_size)
170{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400171 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
David Sterba6c417612011-04-13 15:41:04 +0200172
Chris Masond20f7042008-12-08 16:58:54 -0500173 return sizeof(struct compressed_bio) +
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400174 (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
Chris Masond20f7042008-12-08 16:58:54 -0500175}
176
Nikolay Borisovf898ac62017-02-20 13:50:54 +0200177static int check_compressed_csum(struct btrfs_inode *inode,
Chris Masond20f7042008-12-08 16:58:54 -0500178 struct compressed_bio *cb,
179 u64 disk_start)
180{
Johannes Thumshirn10fe6ca2019-05-22 10:19:02 +0200181 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Johannes Thumshirnd5178572019-06-03 16:58:57 +0200182 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
Johannes Thumshirn10fe6ca2019-05-22 10:19:02 +0200183 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
Chris Masond20f7042008-12-08 16:58:54 -0500184 int ret;
Chris Masond20f7042008-12-08 16:58:54 -0500185 struct page *page;
186 unsigned long i;
187 char *kaddr;
Johannes Thumshirnd5178572019-06-03 16:58:57 +0200188 u8 csum[BTRFS_CSUM_SIZE];
Johannes Thumshirn10fe6ca2019-05-22 10:19:02 +0200189 u8 *cb_sum = cb->sums;
Chris Masond20f7042008-12-08 16:58:54 -0500190
Nikolay Borisovf898ac62017-02-20 13:50:54 +0200191 if (inode->flags & BTRFS_INODE_NODATASUM)
Chris Masond20f7042008-12-08 16:58:54 -0500192 return 0;
193
Johannes Thumshirnd5178572019-06-03 16:58:57 +0200194 shash->tfm = fs_info->csum_shash;
195
Chris Masond20f7042008-12-08 16:58:54 -0500196 for (i = 0; i < cb->nr_pages; i++) {
197 page = cb->compressed_pages[i];
Chris Masond20f7042008-12-08 16:58:54 -0500198
Johannes Thumshirnd5178572019-06-03 16:58:57 +0200199 crypto_shash_init(shash);
Cong Wang7ac687d2011-11-25 23:14:28 +0800200 kaddr = kmap_atomic(page);
Johannes Thumshirnd5178572019-06-03 16:58:57 +0200201 crypto_shash_update(shash, kaddr, PAGE_SIZE);
Cong Wang7ac687d2011-11-25 23:14:28 +0800202 kunmap_atomic(kaddr);
Johannes Thumshirnd5178572019-06-03 16:58:57 +0200203 crypto_shash_final(shash, (u8 *)&csum);
Chris Masond20f7042008-12-08 16:58:54 -0500204
Johannes Thumshirn10fe6ca2019-05-22 10:19:02 +0200205 if (memcmp(&csum, cb_sum, csum_size)) {
Johannes Thumshirnd5178572019-06-03 16:58:57 +0200206 btrfs_print_data_csum_error(inode, disk_start,
Johannes Thumshirnea41d6b2019-06-03 16:58:58 +0200207 csum, cb_sum, cb->mirror_num);
Chris Masond20f7042008-12-08 16:58:54 -0500208 ret = -EIO;
209 goto fail;
210 }
Johannes Thumshirn10fe6ca2019-05-22 10:19:02 +0200211 cb_sum += csum_size;
Chris Masond20f7042008-12-08 16:58:54 -0500212
213 }
214 ret = 0;
215fail:
216 return ret;
217}
218
Chris Masonc8b97812008-10-29 14:49:59 -0400219/* when we finish reading compressed pages from the disk, we
220 * decompress them and then run the bio end_io routines on the
221 * decompressed pages (in the inode address space).
222 *
223 * This allows the checksumming and other IO error handling routines
224 * to work normally
225 *
226 * The compressed pages are freed here, and it must be run
227 * in process context
228 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200229static void end_compressed_bio_read(struct bio *bio)
Chris Masonc8b97812008-10-29 14:49:59 -0400230{
Chris Masonc8b97812008-10-29 14:49:59 -0400231 struct compressed_bio *cb = bio->bi_private;
232 struct inode *inode;
233 struct page *page;
234 unsigned long index;
Liu Bocf1167d2017-09-20 17:50:18 -0600235 unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
Liu Boe6311f22017-09-20 17:50:19 -0600236 int ret = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400237
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200238 if (bio->bi_status)
Chris Masonc8b97812008-10-29 14:49:59 -0400239 cb->errors = 1;
240
241 /* if there are more bios still pending for this compressed
242 * extent, just exit
243 */
Elena Reshetovaa50299a2017-03-03 10:55:20 +0200244 if (!refcount_dec_and_test(&cb->pending_bios))
Chris Masonc8b97812008-10-29 14:49:59 -0400245 goto out;
246
Liu Bocf1167d2017-09-20 17:50:18 -0600247 /*
248 * Record the correct mirror_num in cb->orig_bio so that
249 * read-repair can work properly.
250 */
251 ASSERT(btrfs_io_bio(cb->orig_bio));
252 btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
253 cb->mirror_num = mirror;
254
Liu Boe6311f22017-09-20 17:50:19 -0600255 /*
256 * Some IO in this cb have failed, just skip checksum as there
257 * is no way it could be correct.
258 */
259 if (cb->errors == 1)
260 goto csum_failed;
261
Chris Masond20f7042008-12-08 16:58:54 -0500262 inode = cb->inode;
Nikolay Borisovf898ac62017-02-20 13:50:54 +0200263 ret = check_compressed_csum(BTRFS_I(inode), cb,
Kent Overstreet4f024f32013-10-11 15:44:27 -0700264 (u64)bio->bi_iter.bi_sector << 9);
Chris Masond20f7042008-12-08 16:58:54 -0500265 if (ret)
266 goto csum_failed;
267
Chris Masonc8b97812008-10-29 14:49:59 -0400268 /* ok, we're the last bio for this extent, lets start
269 * the decompression.
270 */
Anand Jain8140dc32017-05-26 15:44:58 +0800271 ret = btrfs_decompress_bio(cb);
272
Chris Masond20f7042008-12-08 16:58:54 -0500273csum_failed:
Chris Masonc8b97812008-10-29 14:49:59 -0400274 if (ret)
275 cb->errors = 1;
276
277 /* release the compressed pages */
278 index = 0;
279 for (index = 0; index < cb->nr_pages; index++) {
280 page = cb->compressed_pages[index];
281 page->mapping = NULL;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300282 put_page(page);
Chris Masonc8b97812008-10-29 14:49:59 -0400283 }
284
285 /* do io completion on the original bio */
Chris Mason771ed682008-11-06 22:02:51 -0500286 if (cb->errors) {
Chris Masonc8b97812008-10-29 14:49:59 -0400287 bio_io_error(cb->orig_bio);
Chris Masond20f7042008-12-08 16:58:54 -0500288 } else {
Kent Overstreet2c30c712013-11-07 12:20:26 -0800289 struct bio_vec *bvec;
Ming Lei6dc4f102019-02-15 19:13:19 +0800290 struct bvec_iter_all iter_all;
Chris Masond20f7042008-12-08 16:58:54 -0500291
292 /*
293 * we have verified the checksum already, set page
294 * checked so the end_io handlers know about it
295 */
David Sterbac09abff2017-07-13 18:10:07 +0200296 ASSERT(!bio_flagged(bio, BIO_CLONED));
Christoph Hellwig2b070cf2019-04-25 09:03:00 +0200297 bio_for_each_segment_all(bvec, cb->orig_bio, iter_all)
Chris Masond20f7042008-12-08 16:58:54 -0500298 SetPageChecked(bvec->bv_page);
Kent Overstreet2c30c712013-11-07 12:20:26 -0800299
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200300 bio_endio(cb->orig_bio);
Chris Masond20f7042008-12-08 16:58:54 -0500301 }
Chris Masonc8b97812008-10-29 14:49:59 -0400302
303 /* finally free the cb struct */
304 kfree(cb->compressed_pages);
305 kfree(cb);
306out:
307 bio_put(bio);
308}
309
310/*
311 * Clear the writeback bits on all of the file
312 * pages for a compressed write
313 */
Filipe Manana7bdcefc2014-10-07 01:48:26 +0100314static noinline void end_compressed_writeback(struct inode *inode,
315 const struct compressed_bio *cb)
Chris Masonc8b97812008-10-29 14:49:59 -0400316{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300317 unsigned long index = cb->start >> PAGE_SHIFT;
318 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
Chris Masonc8b97812008-10-29 14:49:59 -0400319 struct page *pages[16];
320 unsigned long nr_pages = end_index - index + 1;
321 int i;
322 int ret;
323
Filipe Manana7bdcefc2014-10-07 01:48:26 +0100324 if (cb->errors)
325 mapping_set_error(inode->i_mapping, -EIO);
326
Chris Masond3977122009-01-05 21:25:51 -0500327 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -0400328 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -0500329 min_t(unsigned long,
330 nr_pages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -0400331 if (ret == 0) {
332 nr_pages -= 1;
333 index += 1;
334 continue;
335 }
336 for (i = 0; i < ret; i++) {
Filipe Manana7bdcefc2014-10-07 01:48:26 +0100337 if (cb->errors)
338 SetPageError(pages[i]);
Chris Masonc8b97812008-10-29 14:49:59 -0400339 end_page_writeback(pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300340 put_page(pages[i]);
Chris Masonc8b97812008-10-29 14:49:59 -0400341 }
342 nr_pages -= ret;
343 index += ret;
344 }
345 /* the inode may be gone now */
Chris Masonc8b97812008-10-29 14:49:59 -0400346}
347
348/*
349 * do the cleanup once all the compressed pages hit the disk.
350 * This will clear writeback on the file pages and free the compressed
351 * pages.
352 *
353 * This also calls the writeback end hooks for the file pages so that
354 * metadata and checksums can be updated in the file.
355 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200356static void end_compressed_bio_write(struct bio *bio)
Chris Masonc8b97812008-10-29 14:49:59 -0400357{
Chris Masonc8b97812008-10-29 14:49:59 -0400358 struct compressed_bio *cb = bio->bi_private;
359 struct inode *inode;
360 struct page *page;
361 unsigned long index;
362
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200363 if (bio->bi_status)
Chris Masonc8b97812008-10-29 14:49:59 -0400364 cb->errors = 1;
365
366 /* if there are more bios still pending for this compressed
367 * extent, just exit
368 */
Elena Reshetovaa50299a2017-03-03 10:55:20 +0200369 if (!refcount_dec_and_test(&cb->pending_bios))
Chris Masonc8b97812008-10-29 14:49:59 -0400370 goto out;
371
372 /* ok, we're the last bio for this extent, step one is to
373 * call back into the FS and do all the end_io operations
374 */
375 inode = cb->inode;
Chris Mason70b99e62008-10-31 12:46:39 -0400376 cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
Nikolay Borisov7087a9d2018-11-01 14:09:48 +0200377 btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
Nikolay Borisovc6297322018-11-08 10:18:08 +0200378 cb->start, cb->start + cb->len - 1,
Nikolay Borisov6a8d2132019-03-20 21:53:16 +0200379 bio->bi_status == BLK_STS_OK);
Chris Mason70b99e62008-10-31 12:46:39 -0400380 cb->compressed_pages[0]->mapping = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -0400381
Filipe Manana7bdcefc2014-10-07 01:48:26 +0100382 end_compressed_writeback(inode, cb);
Chris Masonc8b97812008-10-29 14:49:59 -0400383 /* note, our inode could be gone now */
384
385 /*
386 * release the compressed pages, these came from alloc_page and
387 * are not attached to the inode at all
388 */
389 index = 0;
390 for (index = 0; index < cb->nr_pages; index++) {
391 page = cb->compressed_pages[index];
392 page->mapping = NULL;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300393 put_page(page);
Chris Masonc8b97812008-10-29 14:49:59 -0400394 }
395
396 /* finally free the cb struct */
397 kfree(cb->compressed_pages);
398 kfree(cb);
399out:
400 bio_put(bio);
401}
402
403/*
404 * worker function to build and submit bios for previously compressed pages.
405 * The corresponding pages in the inode should be marked for writeback
406 * and the compressed pages should have a reference on them for dropping
407 * when the IO is complete.
408 *
409 * This also checksums the file bytes and gets things ready for
410 * the end io hooks.
411 */
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200412blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
Chris Masonc8b97812008-10-29 14:49:59 -0400413 unsigned long len, u64 disk_start,
414 unsigned long compressed_len,
415 struct page **compressed_pages,
Liu Bof82b7352017-10-23 23:18:16 -0600416 unsigned long nr_pages,
Chris Masonec39f762019-07-10 12:28:17 -0700417 unsigned int write_flags,
418 struct cgroup_subsys_state *blkcg_css)
Chris Masonc8b97812008-10-29 14:49:59 -0400419{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400420 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Chris Masonc8b97812008-10-29 14:49:59 -0400421 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -0400422 struct compressed_bio *cb;
423 unsigned long bytes_left;
David Sterba306e16c2011-04-19 14:29:38 +0200424 int pg_index = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400425 struct page *page;
426 u64 first_byte = disk_start;
427 struct block_device *bdev;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200428 blk_status_t ret;
Li Zefane55179b2011-07-14 03:16:47 +0000429 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
Chris Masonc8b97812008-10-29 14:49:59 -0400430
Johannes Thumshirnfdb1e122018-12-05 15:23:04 +0100431 WARN_ON(!PAGE_ALIGNED(start));
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400432 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
Yoshinori Sanodac97e52011-02-15 12:01:42 +0000433 if (!cb)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200434 return BLK_STS_RESOURCE;
Elena Reshetovaa50299a2017-03-03 10:55:20 +0200435 refcount_set(&cb->pending_bios, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400436 cb->errors = 0;
437 cb->inode = inode;
438 cb->start = start;
439 cb->len = len;
Chris Masond20f7042008-12-08 16:58:54 -0500440 cb->mirror_num = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400441 cb->compressed_pages = compressed_pages;
442 cb->compressed_len = compressed_len;
443 cb->orig_bio = NULL;
444 cb->nr_pages = nr_pages;
445
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400446 bdev = fs_info->fs_devices->latest_bdev;
Chris Masonc8b97812008-10-29 14:49:59 -0400447
David Sterbae749af442019-06-18 20:00:16 +0200448 bio = btrfs_bio_alloc(first_byte);
449 bio_set_dev(bio, bdev);
Liu Bof82b7352017-10-23 23:18:16 -0600450 bio->bi_opf = REQ_OP_WRITE | write_flags;
Chris Masonc8b97812008-10-29 14:49:59 -0400451 bio->bi_private = cb;
452 bio->bi_end_io = end_compressed_bio_write;
Chris Masonec39f762019-07-10 12:28:17 -0700453
454 if (blkcg_css) {
455 bio->bi_opf |= REQ_CGROUP_PUNT;
456 bio_associate_blkg_from_css(bio, blkcg_css);
457 }
Elena Reshetovaa50299a2017-03-03 10:55:20 +0200458 refcount_set(&cb->pending_bios, 1);
Chris Masonc8b97812008-10-29 14:49:59 -0400459
460 /* create and submit bios for the compressed pages */
461 bytes_left = compressed_len;
David Sterba306e16c2011-04-19 14:29:38 +0200462 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200463 int submit = 0;
464
David Sterba306e16c2011-04-19 14:29:38 +0200465 page = compressed_pages[pg_index];
Chris Masonc8b97812008-10-29 14:49:59 -0400466 page->mapping = inode->i_mapping;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700467 if (bio->bi_iter.bi_size)
Nikolay Borisovda12fe52018-11-27 20:57:58 +0200468 submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
469 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400470
Chris Mason70b99e62008-10-31 12:46:39 -0400471 page->mapping = NULL;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200472 if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300473 PAGE_SIZE) {
Chris Masonaf09abf2008-11-07 12:35:44 -0500474 /*
475 * inc the count before we submit the bio so
476 * we know the end IO handler won't happen before
477 * we inc the count. Otherwise, the cb might get
478 * freed before we're done setting it up
479 */
Elena Reshetovaa50299a2017-03-03 10:55:20 +0200480 refcount_inc(&cb->pending_bios);
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400481 ret = btrfs_bio_wq_end_io(fs_info, bio,
482 BTRFS_WQ_ENDIO_DATA);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100483 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400484
Li Zefane55179b2011-07-14 03:16:47 +0000485 if (!skip_sum) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400486 ret = btrfs_csum_one_bio(inode, bio, start, 1);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100487 BUG_ON(ret); /* -ENOMEM */
Li Zefane55179b2011-07-14 03:16:47 +0000488 }
Chris Masond20f7042008-12-08 16:58:54 -0500489
Chris Mason08635ba2019-07-10 12:28:14 -0700490 ret = btrfs_map_bio(fs_info, bio, 0);
Liu Bof5daf2c2016-06-22 18:32:06 -0700491 if (ret) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200492 bio->bi_status = ret;
Liu Bof5daf2c2016-06-22 18:32:06 -0700493 bio_endio(bio);
494 }
Chris Masonc8b97812008-10-29 14:49:59 -0400495
David Sterbae749af442019-06-18 20:00:16 +0200496 bio = btrfs_bio_alloc(first_byte);
497 bio_set_dev(bio, bdev);
Liu Bof82b7352017-10-23 23:18:16 -0600498 bio->bi_opf = REQ_OP_WRITE | write_flags;
Chris Masonc8b97812008-10-29 14:49:59 -0400499 bio->bi_private = cb;
500 bio->bi_end_io = end_compressed_bio_write;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300501 bio_add_page(bio, page, PAGE_SIZE, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400502 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300503 if (bytes_left < PAGE_SIZE) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400504 btrfs_info(fs_info,
Frank Holtonefe120a2013-12-20 11:37:06 -0500505 "bytes left %lu compress len %lu nr %lu",
Chris Masoncfbc2462008-10-30 13:22:14 -0400506 bytes_left, cb->compressed_len, cb->nr_pages);
507 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300508 bytes_left -= PAGE_SIZE;
509 first_byte += PAGE_SIZE;
Chris Mason771ed682008-11-06 22:02:51 -0500510 cond_resched();
Chris Masonc8b97812008-10-29 14:49:59 -0400511 }
Chris Masonc8b97812008-10-29 14:49:59 -0400512
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400513 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100514 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400515
Li Zefane55179b2011-07-14 03:16:47 +0000516 if (!skip_sum) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400517 ret = btrfs_csum_one_bio(inode, bio, start, 1);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100518 BUG_ON(ret); /* -ENOMEM */
Li Zefane55179b2011-07-14 03:16:47 +0000519 }
Chris Masond20f7042008-12-08 16:58:54 -0500520
Chris Mason08635ba2019-07-10 12:28:14 -0700521 ret = btrfs_map_bio(fs_info, bio, 0);
Liu Bof5daf2c2016-06-22 18:32:06 -0700522 if (ret) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200523 bio->bi_status = ret;
Liu Bof5daf2c2016-06-22 18:32:06 -0700524 bio_endio(bio);
525 }
Chris Masonc8b97812008-10-29 14:49:59 -0400526
Chris Masonc8b97812008-10-29 14:49:59 -0400527 return 0;
528}
529
Christoph Hellwig2a4d0c92016-11-25 09:07:51 +0100530static u64 bio_end_offset(struct bio *bio)
531{
Ming Leic45a8f22017-12-18 20:22:05 +0800532 struct bio_vec *last = bio_last_bvec_all(bio);
Christoph Hellwig2a4d0c92016-11-25 09:07:51 +0100533
534 return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
535}
536
Chris Mason771ed682008-11-06 22:02:51 -0500537static noinline int add_ra_bio_pages(struct inode *inode,
538 u64 compressed_end,
539 struct compressed_bio *cb)
540{
541 unsigned long end_index;
David Sterba306e16c2011-04-19 14:29:38 +0200542 unsigned long pg_index;
Chris Mason771ed682008-11-06 22:02:51 -0500543 u64 last_offset;
544 u64 isize = i_size_read(inode);
545 int ret;
546 struct page *page;
547 unsigned long nr_pages = 0;
548 struct extent_map *em;
549 struct address_space *mapping = inode->i_mapping;
Chris Mason771ed682008-11-06 22:02:51 -0500550 struct extent_map_tree *em_tree;
551 struct extent_io_tree *tree;
552 u64 end;
553 int misses = 0;
554
Christoph Hellwig2a4d0c92016-11-25 09:07:51 +0100555 last_offset = bio_end_offset(cb->orig_bio);
Chris Mason771ed682008-11-06 22:02:51 -0500556 em_tree = &BTRFS_I(inode)->extent_tree;
557 tree = &BTRFS_I(inode)->io_tree;
558
559 if (isize == 0)
560 return 0;
561
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300562 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
Chris Mason771ed682008-11-06 22:02:51 -0500563
Chris Masond3977122009-01-05 21:25:51 -0500564 while (last_offset < compressed_end) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300565 pg_index = last_offset >> PAGE_SHIFT;
Chris Mason771ed682008-11-06 22:02:51 -0500566
David Sterba306e16c2011-04-19 14:29:38 +0200567 if (pg_index > end_index)
Chris Mason771ed682008-11-06 22:02:51 -0500568 break;
569
Matthew Wilcox0a943c62017-12-04 10:37:22 -0500570 page = xa_load(&mapping->i_pages, pg_index);
Matthew Wilcox3159f942017-11-03 13:30:42 -0400571 if (page && !xa_is_value(page)) {
Chris Mason771ed682008-11-06 22:02:51 -0500572 misses++;
573 if (misses > 4)
574 break;
575 goto next;
576 }
577
Michal Hockoc62d2552015-11-06 16:28:49 -0800578 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
579 ~__GFP_FS));
Chris Mason771ed682008-11-06 22:02:51 -0500580 if (!page)
581 break;
582
Michal Hockoc62d2552015-11-06 16:28:49 -0800583 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300584 put_page(page);
Chris Mason771ed682008-11-06 22:02:51 -0500585 goto next;
586 }
587
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300588 end = last_offset + PAGE_SIZE - 1;
Chris Mason771ed682008-11-06 22:02:51 -0500589 /*
590 * at this point, we have a locked page in the page cache
591 * for these bytes in the file. But, we have to make
592 * sure they map to this compressed extent on disk.
593 */
594 set_page_extent_mapped(page);
Jeff Mahoneyd0082372012-03-01 14:57:19 +0100595 lock_extent(tree, last_offset, end);
Chris Mason890871b2009-09-02 16:24:52 -0400596 read_lock(&em_tree->lock);
Chris Mason771ed682008-11-06 22:02:51 -0500597 em = lookup_extent_mapping(em_tree, last_offset,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300598 PAGE_SIZE);
Chris Mason890871b2009-09-02 16:24:52 -0400599 read_unlock(&em_tree->lock);
Chris Mason771ed682008-11-06 22:02:51 -0500600
601 if (!em || last_offset < em->start ||
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300602 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
Kent Overstreet4f024f32013-10-11 15:44:27 -0700603 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
Chris Mason771ed682008-11-06 22:02:51 -0500604 free_extent_map(em);
Jeff Mahoneyd0082372012-03-01 14:57:19 +0100605 unlock_extent(tree, last_offset, end);
Chris Mason771ed682008-11-06 22:02:51 -0500606 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300607 put_page(page);
Chris Mason771ed682008-11-06 22:02:51 -0500608 break;
609 }
610 free_extent_map(em);
611
612 if (page->index == end_index) {
613 char *userpage;
Johannes Thumshirn70730172018-12-05 15:23:03 +0100614 size_t zero_offset = offset_in_page(isize);
Chris Mason771ed682008-11-06 22:02:51 -0500615
616 if (zero_offset) {
617 int zeros;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300618 zeros = PAGE_SIZE - zero_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +0800619 userpage = kmap_atomic(page);
Chris Mason771ed682008-11-06 22:02:51 -0500620 memset(userpage + zero_offset, 0, zeros);
621 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +0800622 kunmap_atomic(userpage);
Chris Mason771ed682008-11-06 22:02:51 -0500623 }
624 }
625
626 ret = bio_add_page(cb->orig_bio, page,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300627 PAGE_SIZE, 0);
Chris Mason771ed682008-11-06 22:02:51 -0500628
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300629 if (ret == PAGE_SIZE) {
Chris Mason771ed682008-11-06 22:02:51 -0500630 nr_pages++;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300631 put_page(page);
Chris Mason771ed682008-11-06 22:02:51 -0500632 } else {
Jeff Mahoneyd0082372012-03-01 14:57:19 +0100633 unlock_extent(tree, last_offset, end);
Chris Mason771ed682008-11-06 22:02:51 -0500634 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300635 put_page(page);
Chris Mason771ed682008-11-06 22:02:51 -0500636 break;
637 }
638next:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300639 last_offset += PAGE_SIZE;
Chris Mason771ed682008-11-06 22:02:51 -0500640 }
Chris Mason771ed682008-11-06 22:02:51 -0500641 return 0;
642}
643
Chris Masonc8b97812008-10-29 14:49:59 -0400644/*
645 * for a compressed read, the bio we get passed has all the inode pages
646 * in it. We don't actually do IO on those pages but allocate new ones
647 * to hold the compressed pages on disk.
648 *
Kent Overstreet4f024f32013-10-11 15:44:27 -0700649 * bio->bi_iter.bi_sector points to the compressed extent on disk
Chris Masonc8b97812008-10-29 14:49:59 -0400650 * bio->bi_io_vec points to all of the inode pages
Chris Masonc8b97812008-10-29 14:49:59 -0400651 *
652 * After the compressed pages are read, we copy the bytes into the
653 * bio we were passed and then call the bio end_io calls
654 */
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200655blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
Chris Masonc8b97812008-10-29 14:49:59 -0400656 int mirror_num, unsigned long bio_flags)
657{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400658 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Chris Masonc8b97812008-10-29 14:49:59 -0400659 struct extent_map_tree *em_tree;
660 struct compressed_bio *cb;
Chris Masonc8b97812008-10-29 14:49:59 -0400661 unsigned long compressed_len;
662 unsigned long nr_pages;
David Sterba306e16c2011-04-19 14:29:38 +0200663 unsigned long pg_index;
Chris Masonc8b97812008-10-29 14:49:59 -0400664 struct page *page;
665 struct block_device *bdev;
666 struct bio *comp_bio;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700667 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
Chris Masone04ca622008-11-10 11:44:58 -0500668 u64 em_len;
669 u64 em_start;
Chris Masonc8b97812008-10-29 14:49:59 -0400670 struct extent_map *em;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200671 blk_status_t ret = BLK_STS_RESOURCE;
Josef Bacik15e3004a2012-10-05 13:39:50 -0400672 int faili = 0;
Johannes Thumshirn10fe6ca2019-05-22 10:19:02 +0200673 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
674 u8 *sums;
Chris Masonc8b97812008-10-29 14:49:59 -0400675
Chris Masonc8b97812008-10-29 14:49:59 -0400676 em_tree = &BTRFS_I(inode)->extent_tree;
677
678 /* we need the actual starting offset of this extent in the file */
Chris Mason890871b2009-09-02 16:24:52 -0400679 read_lock(&em_tree->lock);
Chris Masonc8b97812008-10-29 14:49:59 -0400680 em = lookup_extent_mapping(em_tree,
Ming Lei263663c2017-12-18 20:22:04 +0800681 page_offset(bio_first_page_all(bio)),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300682 PAGE_SIZE);
Chris Mason890871b2009-09-02 16:24:52 -0400683 read_unlock(&em_tree->lock);
Tsutomu Itoh285190d2012-02-16 16:23:58 +0900684 if (!em)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200685 return BLK_STS_IOERR;
Chris Masonc8b97812008-10-29 14:49:59 -0400686
Chris Masond20f7042008-12-08 16:58:54 -0500687 compressed_len = em->block_len;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400688 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
liubo6b82ce82011-01-26 06:21:39 +0000689 if (!cb)
690 goto out;
691
Elena Reshetovaa50299a2017-03-03 10:55:20 +0200692 refcount_set(&cb->pending_bios, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400693 cb->errors = 0;
694 cb->inode = inode;
Chris Masond20f7042008-12-08 16:58:54 -0500695 cb->mirror_num = mirror_num;
Johannes Thumshirn10fe6ca2019-05-22 10:19:02 +0200696 sums = cb->sums;
Chris Masonc8b97812008-10-29 14:49:59 -0400697
Yan Zhengff5b7ee2008-11-10 07:34:43 -0500698 cb->start = em->orig_start;
Chris Masone04ca622008-11-10 11:44:58 -0500699 em_len = em->len;
700 em_start = em->start;
Chris Masond20f7042008-12-08 16:58:54 -0500701
Chris Masonc8b97812008-10-29 14:49:59 -0400702 free_extent_map(em);
Chris Masone04ca622008-11-10 11:44:58 -0500703 em = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -0400704
Christoph Hellwig81381052016-11-25 09:07:50 +0100705 cb->len = bio->bi_iter.bi_size;
Chris Masonc8b97812008-10-29 14:49:59 -0400706 cb->compressed_len = compressed_len;
Li Zefan261507a02010-12-17 14:21:50 +0800707 cb->compress_type = extent_compress_type(bio_flags);
Chris Masonc8b97812008-10-29 14:49:59 -0400708 cb->orig_bio = bio;
709
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300710 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
David Sterba31e818f2015-02-20 18:00:26 +0100711 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
Chris Masonc8b97812008-10-29 14:49:59 -0400712 GFP_NOFS);
liubo6b82ce82011-01-26 06:21:39 +0000713 if (!cb->compressed_pages)
714 goto fail1;
715
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400716 bdev = fs_info->fs_devices->latest_bdev;
Chris Masonc8b97812008-10-29 14:49:59 -0400717
David Sterba306e16c2011-04-19 14:29:38 +0200718 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
719 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
Chris Masonc8b97812008-10-29 14:49:59 -0400720 __GFP_HIGHMEM);
Josef Bacik15e3004a2012-10-05 13:39:50 -0400721 if (!cb->compressed_pages[pg_index]) {
722 faili = pg_index - 1;
Dan Carpenter0e9350d2017-06-19 13:55:37 +0300723 ret = BLK_STS_RESOURCE;
liubo6b82ce82011-01-26 06:21:39 +0000724 goto fail2;
Josef Bacik15e3004a2012-10-05 13:39:50 -0400725 }
Chris Masonc8b97812008-10-29 14:49:59 -0400726 }
Josef Bacik15e3004a2012-10-05 13:39:50 -0400727 faili = nr_pages - 1;
Chris Masonc8b97812008-10-29 14:49:59 -0400728 cb->nr_pages = nr_pages;
729
Filipe Manana7f042a82016-01-27 19:17:20 +0000730 add_ra_bio_pages(inode, em_start + em_len, cb);
Chris Mason771ed682008-11-06 22:02:51 -0500731
Chris Mason771ed682008-11-06 22:02:51 -0500732 /* include any pages we added in add_ra-bio_pages */
Christoph Hellwig81381052016-11-25 09:07:50 +0100733 cb->len = bio->bi_iter.bi_size;
Chris Mason771ed682008-11-06 22:02:51 -0500734
David Sterbae749af442019-06-18 20:00:16 +0200735 comp_bio = btrfs_bio_alloc(cur_disk_byte);
736 bio_set_dev(comp_bio, bdev);
David Sterbaebcc3262018-06-29 10:56:53 +0200737 comp_bio->bi_opf = REQ_OP_READ;
Chris Masonc8b97812008-10-29 14:49:59 -0400738 comp_bio->bi_private = cb;
739 comp_bio->bi_end_io = end_compressed_bio_read;
Elena Reshetovaa50299a2017-03-03 10:55:20 +0200740 refcount_set(&cb->pending_bios, 1);
Chris Masonc8b97812008-10-29 14:49:59 -0400741
David Sterba306e16c2011-04-19 14:29:38 +0200742 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200743 int submit = 0;
744
David Sterba306e16c2011-04-19 14:29:38 +0200745 page = cb->compressed_pages[pg_index];
Chris Masonc8b97812008-10-29 14:49:59 -0400746 page->mapping = inode->i_mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300747 page->index = em_start >> PAGE_SHIFT;
Chris Masond20f7042008-12-08 16:58:54 -0500748
Kent Overstreet4f024f32013-10-11 15:44:27 -0700749 if (comp_bio->bi_iter.bi_size)
Nikolay Borisovda12fe52018-11-27 20:57:58 +0200750 submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE,
751 comp_bio, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400752
Chris Mason70b99e62008-10-31 12:46:39 -0400753 page->mapping = NULL;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200754 if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300755 PAGE_SIZE) {
Johannes Thumshirn10fe6ca2019-05-22 10:19:02 +0200756 unsigned int nr_sectors;
757
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400758 ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
759 BTRFS_WQ_ENDIO_DATA);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100760 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400761
Chris Masonaf09abf2008-11-07 12:35:44 -0500762 /*
763 * inc the count before we submit the bio so
764 * we know the end IO handler won't happen before
765 * we inc the count. Otherwise, the cb might get
766 * freed before we're done setting it up
767 */
Elena Reshetovaa50299a2017-03-03 10:55:20 +0200768 refcount_inc(&cb->pending_bios);
Chris Masonaf09abf2008-11-07 12:35:44 -0500769
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200770 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400771 ret = btrfs_lookup_bio_sums(inode, comp_bio,
Johannes Thumshirn10fe6ca2019-05-22 10:19:02 +0200772 sums);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100773 BUG_ON(ret); /* -ENOMEM */
Chris Masond20f7042008-12-08 16:58:54 -0500774 }
Johannes Thumshirn10fe6ca2019-05-22 10:19:02 +0200775
776 nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
777 fs_info->sectorsize);
778 sums += csum_size * nr_sectors;
Chris Masond20f7042008-12-08 16:58:54 -0500779
Chris Mason08635ba2019-07-10 12:28:14 -0700780 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200781 if (ret) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200782 comp_bio->bi_status = ret;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200783 bio_endio(comp_bio);
784 }
Chris Masonc8b97812008-10-29 14:49:59 -0400785
David Sterbae749af442019-06-18 20:00:16 +0200786 comp_bio = btrfs_bio_alloc(cur_disk_byte);
787 bio_set_dev(comp_bio, bdev);
David Sterbaebcc3262018-06-29 10:56:53 +0200788 comp_bio->bi_opf = REQ_OP_READ;
Chris Mason771ed682008-11-06 22:02:51 -0500789 comp_bio->bi_private = cb;
790 comp_bio->bi_end_io = end_compressed_bio_read;
791
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300792 bio_add_page(comp_bio, page, PAGE_SIZE, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400793 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300794 cur_disk_byte += PAGE_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -0400795 }
Chris Masonc8b97812008-10-29 14:49:59 -0400796
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400797 ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100798 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400799
Tsutomu Itohc2db1072011-03-01 06:48:31 +0000800 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
Johannes Thumshirn10fe6ca2019-05-22 10:19:02 +0200801 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100802 BUG_ON(ret); /* -ENOMEM */
Tsutomu Itohc2db1072011-03-01 06:48:31 +0000803 }
Chris Masond20f7042008-12-08 16:58:54 -0500804
Chris Mason08635ba2019-07-10 12:28:14 -0700805 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200806 if (ret) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200807 comp_bio->bi_status = ret;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200808 bio_endio(comp_bio);
809 }
Chris Masonc8b97812008-10-29 14:49:59 -0400810
Chris Masonc8b97812008-10-29 14:49:59 -0400811 return 0;
liubo6b82ce82011-01-26 06:21:39 +0000812
813fail2:
Josef Bacik15e3004a2012-10-05 13:39:50 -0400814 while (faili >= 0) {
815 __free_page(cb->compressed_pages[faili]);
816 faili--;
817 }
liubo6b82ce82011-01-26 06:21:39 +0000818
819 kfree(cb->compressed_pages);
820fail1:
821 kfree(cb);
822out:
823 free_extent_map(em);
824 return ret;
Chris Masonc8b97812008-10-29 14:49:59 -0400825}
Li Zefan261507a02010-12-17 14:21:50 +0800826
Timofey Titovets17b5a6c2017-09-28 17:33:37 +0300827/*
828 * Heuristic uses systematic sampling to collect data from the input data
829 * range, the logic can be tuned by the following constants:
830 *
831 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
832 * @SAMPLING_INTERVAL - range from which the sampled data can be collected
833 */
834#define SAMPLING_READ_SIZE (16)
835#define SAMPLING_INTERVAL (256)
836
837/*
838 * For statistical analysis of the input data we consider bytes that form a
839 * Galois Field of 256 objects. Each object has an attribute count, ie. how
840 * many times the object appeared in the sample.
841 */
842#define BUCKET_SIZE (256)
843
844/*
845 * The size of the sample is based on a statistical sampling rule of thumb.
846 * The common way is to perform sampling tests as long as the number of
847 * elements in each cell is at least 5.
848 *
849 * Instead of 5, we choose 32 to obtain more accurate results.
850 * If the data contain the maximum number of symbols, which is 256, we obtain a
851 * sample size bound by 8192.
852 *
853 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
854 * from up to 512 locations.
855 */
856#define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
857 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
858
859struct bucket_item {
860 u32 count;
861};
Timofey Titovets4e439a02017-09-28 17:33:36 +0300862
863struct heuristic_ws {
Timofey Titovets17b5a6c2017-09-28 17:33:37 +0300864 /* Partial copy of input data */
865 u8 *sample;
Timofey Titovetsa440d482017-09-28 17:33:38 +0300866 u32 sample_size;
Timofey Titovets17b5a6c2017-09-28 17:33:37 +0300867 /* Buckets store counters for each byte value */
868 struct bucket_item *bucket;
Timofey Titovets440c8402017-12-04 00:30:33 +0300869 /* Sorting buffer */
870 struct bucket_item *bucket_b;
Timofey Titovets4e439a02017-09-28 17:33:36 +0300871 struct list_head list;
872};
873
Dennis Zhou92ee55302019-02-04 15:20:03 -0500874static struct workspace_manager heuristic_wsm;
875
Dennis Zhou92ee55302019-02-04 15:20:03 -0500876static void heuristic_put_workspace(struct list_head *ws)
877{
878 btrfs_put_workspace(&heuristic_wsm, ws);
879}
880
Timofey Titovets4e439a02017-09-28 17:33:36 +0300881static void free_heuristic_ws(struct list_head *ws)
882{
883 struct heuristic_ws *workspace;
884
885 workspace = list_entry(ws, struct heuristic_ws, list);
886
Timofey Titovets17b5a6c2017-09-28 17:33:37 +0300887 kvfree(workspace->sample);
888 kfree(workspace->bucket);
Timofey Titovets440c8402017-12-04 00:30:33 +0300889 kfree(workspace->bucket_b);
Timofey Titovets4e439a02017-09-28 17:33:36 +0300890 kfree(workspace);
891}
892
Dennis Zhou7bf49942019-02-04 15:20:04 -0500893static struct list_head *alloc_heuristic_ws(unsigned int level)
Timofey Titovets4e439a02017-09-28 17:33:36 +0300894{
895 struct heuristic_ws *ws;
896
897 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
898 if (!ws)
899 return ERR_PTR(-ENOMEM);
900
Timofey Titovets17b5a6c2017-09-28 17:33:37 +0300901 ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
902 if (!ws->sample)
903 goto fail;
Timofey Titovets4e439a02017-09-28 17:33:36 +0300904
Timofey Titovets17b5a6c2017-09-28 17:33:37 +0300905 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
906 if (!ws->bucket)
907 goto fail;
908
Timofey Titovets440c8402017-12-04 00:30:33 +0300909 ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
910 if (!ws->bucket_b)
911 goto fail;
912
Timofey Titovets17b5a6c2017-09-28 17:33:37 +0300913 INIT_LIST_HEAD(&ws->list);
Timofey Titovets4e439a02017-09-28 17:33:36 +0300914 return &ws->list;
Timofey Titovets17b5a6c2017-09-28 17:33:37 +0300915fail:
916 free_heuristic_ws(&ws->list);
917 return ERR_PTR(-ENOMEM);
Timofey Titovets4e439a02017-09-28 17:33:36 +0300918}
919
Dennis Zhouca4ac362019-02-04 15:19:59 -0500920const struct btrfs_compress_op btrfs_heuristic_compress = {
David Sterbabe9510452019-10-02 00:53:31 +0200921 .workspace_manager = &heuristic_wsm,
Dennis Zhou92ee55302019-02-04 15:20:03 -0500922 .put_workspace = heuristic_put_workspace,
Dennis Zhouca4ac362019-02-04 15:19:59 -0500923 .alloc_workspace = alloc_heuristic_ws,
924 .free_workspace = free_heuristic_ws,
925};
926
David Sterbae8c9f182015-01-02 18:23:10 +0100927static const struct btrfs_compress_op * const btrfs_compress_op[] = {
Dennis Zhouca4ac362019-02-04 15:19:59 -0500928 /* The heuristic is represented as compression type 0 */
929 &btrfs_heuristic_compress,
Li Zefan261507a02010-12-17 14:21:50 +0800930 &btrfs_zlib_compress,
Li Zefana6fa6fa2010-10-25 15:12:26 +0800931 &btrfs_lzo_compress,
Nick Terrell5c1aab12017-08-09 19:39:02 -0700932 &btrfs_zstd_compress,
Li Zefan261507a02010-12-17 14:21:50 +0800933};
934
David Sterbad5517032019-10-02 01:08:03 +0200935static void btrfs_init_workspace_manager(int type)
Li Zefan261507a02010-12-17 14:21:50 +0800936{
David Sterba975db482019-10-04 01:40:58 +0200937 const struct btrfs_compress_op *ops = btrfs_compress_op[type];
938 struct workspace_manager *wsm = ops->workspace_manager;
Timofey Titovets4e439a02017-09-28 17:33:36 +0300939 struct list_head *workspace;
Li Zefan261507a02010-12-17 14:21:50 +0800940
Dennis Zhou92ee55302019-02-04 15:20:03 -0500941 wsm->ops = ops;
Dennis Zhou10b94a52019-02-04 15:20:00 -0500942
Dennis Zhou92ee55302019-02-04 15:20:03 -0500943 INIT_LIST_HEAD(&wsm->idle_ws);
944 spin_lock_init(&wsm->ws_lock);
945 atomic_set(&wsm->total_ws, 0);
946 init_waitqueue_head(&wsm->ws_wait);
David Sterbaf77dd0d2016-04-27 02:55:15 +0200947
Dennis Zhou1666eda2019-02-04 15:20:01 -0500948 /*
949 * Preallocate one workspace for each compression type so we can
950 * guarantee forward progress in the worst case
951 */
Dennis Zhou7bf49942019-02-04 15:20:04 -0500952 workspace = wsm->ops->alloc_workspace(0);
Dennis Zhou1666eda2019-02-04 15:20:01 -0500953 if (IS_ERR(workspace)) {
954 pr_warn(
955 "BTRFS: cannot preallocate compression workspace, will try later\n");
956 } else {
Dennis Zhou92ee55302019-02-04 15:20:03 -0500957 atomic_set(&wsm->total_ws, 1);
958 wsm->free_ws = 1;
959 list_add(workspace, &wsm->idle_ws);
Dennis Zhou1666eda2019-02-04 15:20:01 -0500960 }
961}
962
David Sterba25103072019-10-02 01:08:03 +0200963static void btrfs_cleanup_workspace_manager(int type)
Dennis Zhou1666eda2019-02-04 15:20:01 -0500964{
David Sterba2dba7142019-10-04 01:40:58 +0200965 struct workspace_manager *wsman;
Dennis Zhou1666eda2019-02-04 15:20:01 -0500966 struct list_head *ws;
967
David Sterba2dba7142019-10-04 01:40:58 +0200968 wsman = btrfs_compress_op[type]->workspace_manager;
Dennis Zhou1666eda2019-02-04 15:20:01 -0500969 while (!list_empty(&wsman->idle_ws)) {
970 ws = wsman->idle_ws.next;
971 list_del(ws);
972 wsman->ops->free_workspace(ws);
973 atomic_dec(&wsman->total_ws);
Li Zefan261507a02010-12-17 14:21:50 +0800974 }
Li Zefan261507a02010-12-17 14:21:50 +0800975}
976
977/*
David Sterbae721e492016-04-27 02:41:17 +0200978 * This finds an available workspace or allocates a new one.
979 * If it's not possible to allocate a new one, waits until there's one.
980 * Preallocation makes a forward progress guarantees and we do not return
981 * errors.
Li Zefan261507a02010-12-17 14:21:50 +0800982 */
Dennis Zhou7bf49942019-02-04 15:20:04 -0500983struct list_head *btrfs_get_workspace(struct workspace_manager *wsm,
984 unsigned int level)
Li Zefan261507a02010-12-17 14:21:50 +0800985{
986 struct list_head *workspace;
987 int cpus = num_online_cpus();
David Sterbafe308532017-05-31 17:14:56 +0200988 unsigned nofs_flag;
Timofey Titovets4e439a02017-09-28 17:33:36 +0300989 struct list_head *idle_ws;
990 spinlock_t *ws_lock;
991 atomic_t *total_ws;
992 wait_queue_head_t *ws_wait;
993 int *free_ws;
Li Zefan261507a02010-12-17 14:21:50 +0800994
Dennis Zhou92ee55302019-02-04 15:20:03 -0500995 idle_ws = &wsm->idle_ws;
996 ws_lock = &wsm->ws_lock;
997 total_ws = &wsm->total_ws;
998 ws_wait = &wsm->ws_wait;
999 free_ws = &wsm->free_ws;
Timofey Titovets4e439a02017-09-28 17:33:36 +03001000
Li Zefan261507a02010-12-17 14:21:50 +08001001again:
Byongho Leed9187642015-10-14 14:05:24 +09001002 spin_lock(ws_lock);
1003 if (!list_empty(idle_ws)) {
1004 workspace = idle_ws->next;
Li Zefan261507a02010-12-17 14:21:50 +08001005 list_del(workspace);
David Sterba6ac10a62016-04-27 02:15:15 +02001006 (*free_ws)--;
Byongho Leed9187642015-10-14 14:05:24 +09001007 spin_unlock(ws_lock);
Li Zefan261507a02010-12-17 14:21:50 +08001008 return workspace;
1009
1010 }
David Sterba6ac10a62016-04-27 02:15:15 +02001011 if (atomic_read(total_ws) > cpus) {
Li Zefan261507a02010-12-17 14:21:50 +08001012 DEFINE_WAIT(wait);
1013
Byongho Leed9187642015-10-14 14:05:24 +09001014 spin_unlock(ws_lock);
1015 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
David Sterba6ac10a62016-04-27 02:15:15 +02001016 if (atomic_read(total_ws) > cpus && !*free_ws)
Li Zefan261507a02010-12-17 14:21:50 +08001017 schedule();
Byongho Leed9187642015-10-14 14:05:24 +09001018 finish_wait(ws_wait, &wait);
Li Zefan261507a02010-12-17 14:21:50 +08001019 goto again;
1020 }
David Sterba6ac10a62016-04-27 02:15:15 +02001021 atomic_inc(total_ws);
Byongho Leed9187642015-10-14 14:05:24 +09001022 spin_unlock(ws_lock);
Li Zefan261507a02010-12-17 14:21:50 +08001023
David Sterbafe308532017-05-31 17:14:56 +02001024 /*
1025 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
1026 * to turn it off here because we might get called from the restricted
1027 * context of btrfs_compress_bio/btrfs_compress_pages
1028 */
1029 nofs_flag = memalloc_nofs_save();
Dennis Zhou7bf49942019-02-04 15:20:04 -05001030 workspace = wsm->ops->alloc_workspace(level);
David Sterbafe308532017-05-31 17:14:56 +02001031 memalloc_nofs_restore(nofs_flag);
1032
Li Zefan261507a02010-12-17 14:21:50 +08001033 if (IS_ERR(workspace)) {
David Sterba6ac10a62016-04-27 02:15:15 +02001034 atomic_dec(total_ws);
Byongho Leed9187642015-10-14 14:05:24 +09001035 wake_up(ws_wait);
David Sterbae721e492016-04-27 02:41:17 +02001036
1037 /*
1038 * Do not return the error but go back to waiting. There's a
1039 * workspace preallocated for each type and the compression
1040 * time is bounded so we get to a workspace eventually. This
1041 * makes our caller's life easier.
David Sterba523567162016-04-27 03:07:39 +02001042 *
1043 * To prevent silent and low-probability deadlocks (when the
1044 * initial preallocation fails), check if there are any
1045 * workspaces at all.
David Sterbae721e492016-04-27 02:41:17 +02001046 */
David Sterba523567162016-04-27 03:07:39 +02001047 if (atomic_read(total_ws) == 0) {
1048 static DEFINE_RATELIMIT_STATE(_rs,
1049 /* once per minute */ 60 * HZ,
1050 /* no burst */ 1);
1051
1052 if (__ratelimit(&_rs)) {
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04001053 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
David Sterba523567162016-04-27 03:07:39 +02001054 }
1055 }
David Sterbae721e492016-04-27 02:41:17 +02001056 goto again;
Li Zefan261507a02010-12-17 14:21:50 +08001057 }
1058 return workspace;
1059}
1060
Dennis Zhou7bf49942019-02-04 15:20:04 -05001061static struct list_head *get_workspace(int type, int level)
Dennis Zhou929f4ba2019-02-04 15:20:02 -05001062{
David Sterba6a0d1272019-10-04 02:36:16 +02001063 struct workspace_manager *wsm;
1064
1065 wsm = btrfs_compress_op[type]->workspace_manager;
1066 switch (type) {
1067 case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(wsm, level);
1068 case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
1069 case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(wsm, level);
1070 case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
1071 default:
1072 /*
1073 * This can't happen, the type is validated several times
1074 * before we get here.
1075 */
1076 BUG();
1077 }
Dennis Zhou929f4ba2019-02-04 15:20:02 -05001078}
1079
Li Zefan261507a02010-12-17 14:21:50 +08001080/*
1081 * put a workspace struct back on the list or free it if we have enough
1082 * idle ones sitting around
1083 */
Dennis Zhou92ee55302019-02-04 15:20:03 -05001084void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws)
Li Zefan261507a02010-12-17 14:21:50 +08001085{
Timofey Titovets4e439a02017-09-28 17:33:36 +03001086 struct list_head *idle_ws;
1087 spinlock_t *ws_lock;
1088 atomic_t *total_ws;
1089 wait_queue_head_t *ws_wait;
1090 int *free_ws;
1091
Dennis Zhou92ee55302019-02-04 15:20:03 -05001092 idle_ws = &wsm->idle_ws;
1093 ws_lock = &wsm->ws_lock;
1094 total_ws = &wsm->total_ws;
1095 ws_wait = &wsm->ws_wait;
1096 free_ws = &wsm->free_ws;
Li Zefan261507a02010-12-17 14:21:50 +08001097
Byongho Leed9187642015-10-14 14:05:24 +09001098 spin_lock(ws_lock);
Nick Terrell26b28dc2017-06-29 10:57:26 -07001099 if (*free_ws <= num_online_cpus()) {
Dennis Zhou929f4ba2019-02-04 15:20:02 -05001100 list_add(ws, idle_ws);
David Sterba6ac10a62016-04-27 02:15:15 +02001101 (*free_ws)++;
Byongho Leed9187642015-10-14 14:05:24 +09001102 spin_unlock(ws_lock);
Li Zefan261507a02010-12-17 14:21:50 +08001103 goto wake;
1104 }
Byongho Leed9187642015-10-14 14:05:24 +09001105 spin_unlock(ws_lock);
Li Zefan261507a02010-12-17 14:21:50 +08001106
Dennis Zhou92ee55302019-02-04 15:20:03 -05001107 wsm->ops->free_workspace(ws);
David Sterba6ac10a62016-04-27 02:15:15 +02001108 atomic_dec(total_ws);
Li Zefan261507a02010-12-17 14:21:50 +08001109wake:
David Sterba093258e2018-02-26 16:15:17 +01001110 cond_wake_up(ws_wait);
Li Zefan261507a02010-12-17 14:21:50 +08001111}
1112
Dennis Zhou929f4ba2019-02-04 15:20:02 -05001113static void put_workspace(int type, struct list_head *ws)
1114{
Dennis Zhou92ee55302019-02-04 15:20:03 -05001115 return btrfs_compress_op[type]->put_workspace(ws);
Dennis Zhou929f4ba2019-02-04 15:20:02 -05001116}
1117
Li Zefan261507a02010-12-17 14:21:50 +08001118/*
David Sterba38c31462017-02-14 19:04:07 +01001119 * Given an address space and start and length, compress the bytes into @pages
1120 * that are allocated on demand.
Li Zefan261507a02010-12-17 14:21:50 +08001121 *
David Sterbaf51d2b52017-09-15 17:36:57 +02001122 * @type_level is encoded algorithm and level, where level 0 means whatever
1123 * default the algorithm chooses and is opaque here;
1124 * - compression algo are 0-3
1125 * - the level are bits 4-7
1126 *
David Sterba4d3a8002017-02-14 19:04:07 +01001127 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1128 * and returns number of actually allocated pages
Li Zefan261507a02010-12-17 14:21:50 +08001129 *
David Sterba38c31462017-02-14 19:04:07 +01001130 * @total_in is used to return the number of bytes actually read. It
1131 * may be smaller than the input length if we had to exit early because we
Li Zefan261507a02010-12-17 14:21:50 +08001132 * ran out of room in the pages array or because we cross the
1133 * max_out threshold.
1134 *
David Sterba38c31462017-02-14 19:04:07 +01001135 * @total_out is an in/out parameter, must be set to the input length and will
1136 * be also used to return the total number of compressed bytes
Li Zefan261507a02010-12-17 14:21:50 +08001137 *
David Sterba38c31462017-02-14 19:04:07 +01001138 * @max_out tells us the max number of bytes that we're allowed to
Li Zefan261507a02010-12-17 14:21:50 +08001139 * stuff into pages
1140 */
David Sterbaf51d2b52017-09-15 17:36:57 +02001141int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
David Sterba38c31462017-02-14 19:04:07 +01001142 u64 start, struct page **pages,
Li Zefan261507a02010-12-17 14:21:50 +08001143 unsigned long *out_pages,
1144 unsigned long *total_in,
David Sterbae5d74902017-02-14 19:45:05 +01001145 unsigned long *total_out)
Li Zefan261507a02010-12-17 14:21:50 +08001146{
Dennis Zhou19727082019-02-04 15:19:57 -05001147 int type = btrfs_compress_type(type_level);
Dennis Zhou7bf49942019-02-04 15:20:04 -05001148 int level = btrfs_compress_level(type_level);
Li Zefan261507a02010-12-17 14:21:50 +08001149 struct list_head *workspace;
1150 int ret;
1151
David Sterbab0c1fe12019-08-09 16:49:06 +02001152 level = btrfs_compress_set_level(type, level);
Dennis Zhou7bf49942019-02-04 15:20:04 -05001153 workspace = get_workspace(type, level);
David Sterba1e4eb742019-10-02 00:06:15 +02001154 ret = compression_compress_pages(type, workspace, mapping, start, pages,
1155 out_pages, total_in, total_out);
Dennis Zhou929f4ba2019-02-04 15:20:02 -05001156 put_workspace(type, workspace);
Li Zefan261507a02010-12-17 14:21:50 +08001157 return ret;
1158}
1159
1160/*
1161 * pages_in is an array of pages with compressed data.
1162 *
1163 * disk_start is the starting logical offset of this array in the file
1164 *
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001165 * orig_bio contains the pages from the file that we want to decompress into
Li Zefan261507a02010-12-17 14:21:50 +08001166 *
1167 * srclen is the number of bytes in pages_in
1168 *
1169 * The basic idea is that we have a bio that was created by readpages.
1170 * The pages in the bio are for the uncompressed data, and they may not
1171 * be contiguous. They all correspond to the range of bytes covered by
1172 * the compressed extent.
1173 */
Anand Jain8140dc32017-05-26 15:44:58 +08001174static int btrfs_decompress_bio(struct compressed_bio *cb)
Li Zefan261507a02010-12-17 14:21:50 +08001175{
1176 struct list_head *workspace;
1177 int ret;
Anand Jain8140dc32017-05-26 15:44:58 +08001178 int type = cb->compress_type;
Li Zefan261507a02010-12-17 14:21:50 +08001179
Dennis Zhou7bf49942019-02-04 15:20:04 -05001180 workspace = get_workspace(type, 0);
David Sterba1e4eb742019-10-02 00:06:15 +02001181 ret = compression_decompress_bio(type, workspace, cb);
Dennis Zhou929f4ba2019-02-04 15:20:02 -05001182 put_workspace(type, workspace);
Anand Jaine1ddce72017-05-26 15:44:59 +08001183
Li Zefan261507a02010-12-17 14:21:50 +08001184 return ret;
1185}
1186
1187/*
1188 * a less complex decompression routine. Our compressed data fits in a
1189 * single page, and we want to read a single page out of it.
1190 * start_byte tells us the offset into the compressed data we're interested in
1191 */
1192int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1193 unsigned long start_byte, size_t srclen, size_t destlen)
1194{
1195 struct list_head *workspace;
1196 int ret;
1197
Dennis Zhou7bf49942019-02-04 15:20:04 -05001198 workspace = get_workspace(type, 0);
David Sterba1e4eb742019-10-02 00:06:15 +02001199 ret = compression_decompress(type, workspace, data_in, dest_page,
1200 start_byte, srclen, destlen);
Dennis Zhou929f4ba2019-02-04 15:20:02 -05001201 put_workspace(type, workspace);
Dennis Zhou7bf49942019-02-04 15:20:04 -05001202
Li Zefan261507a02010-12-17 14:21:50 +08001203 return ret;
1204}
1205
Dennis Zhou1666eda2019-02-04 15:20:01 -05001206void __init btrfs_init_compress(void)
1207{
David Sterbad5517032019-10-02 01:08:03 +02001208 btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1209 btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1210 btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1211 zstd_init_workspace_manager();
Dennis Zhou1666eda2019-02-04 15:20:01 -05001212}
1213
David Sterbae67c7182018-02-19 17:24:18 +01001214void __cold btrfs_exit_compress(void)
Li Zefan261507a02010-12-17 14:21:50 +08001215{
David Sterba25103072019-10-02 01:08:03 +02001216 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1217 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1218 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1219 zstd_cleanup_workspace_manager();
Li Zefan261507a02010-12-17 14:21:50 +08001220}
Li Zefan3a39c182010-11-08 15:22:19 +08001221
1222/*
1223 * Copy uncompressed data from working buffer to pages.
1224 *
1225 * buf_start is the byte offset we're of the start of our workspace buffer.
1226 *
1227 * total_out is the last byte of the buffer
1228 */
David Sterba14a33572017-02-14 17:58:04 +01001229int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
Li Zefan3a39c182010-11-08 15:22:19 +08001230 unsigned long total_out, u64 disk_start,
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001231 struct bio *bio)
Li Zefan3a39c182010-11-08 15:22:19 +08001232{
1233 unsigned long buf_offset;
1234 unsigned long current_buf_start;
1235 unsigned long start_byte;
Omar Sandoval6e78b3f2017-02-10 15:03:35 -08001236 unsigned long prev_start_byte;
Li Zefan3a39c182010-11-08 15:22:19 +08001237 unsigned long working_bytes = total_out - buf_start;
1238 unsigned long bytes;
1239 char *kaddr;
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001240 struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
Li Zefan3a39c182010-11-08 15:22:19 +08001241
1242 /*
1243 * start byte is the first byte of the page we're currently
1244 * copying into relative to the start of the compressed data.
1245 */
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001246 start_byte = page_offset(bvec.bv_page) - disk_start;
Li Zefan3a39c182010-11-08 15:22:19 +08001247
1248 /* we haven't yet hit data corresponding to this page */
1249 if (total_out <= start_byte)
1250 return 1;
1251
1252 /*
1253 * the start of the data we care about is offset into
1254 * the middle of our working buffer
1255 */
1256 if (total_out > start_byte && buf_start < start_byte) {
1257 buf_offset = start_byte - buf_start;
1258 working_bytes -= buf_offset;
1259 } else {
1260 buf_offset = 0;
1261 }
1262 current_buf_start = buf_start;
1263
1264 /* copy bytes from the working buffer into the pages */
1265 while (working_bytes > 0) {
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001266 bytes = min_t(unsigned long, bvec.bv_len,
1267 PAGE_SIZE - buf_offset);
Li Zefan3a39c182010-11-08 15:22:19 +08001268 bytes = min(bytes, working_bytes);
Li Zefan3a39c182010-11-08 15:22:19 +08001269
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001270 kaddr = kmap_atomic(bvec.bv_page);
1271 memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1272 kunmap_atomic(kaddr);
1273 flush_dcache_page(bvec.bv_page);
1274
Li Zefan3a39c182010-11-08 15:22:19 +08001275 buf_offset += bytes;
1276 working_bytes -= bytes;
1277 current_buf_start += bytes;
1278
1279 /* check if we need to pick another page */
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001280 bio_advance(bio, bytes);
1281 if (!bio->bi_iter.bi_size)
1282 return 0;
1283 bvec = bio_iter_iovec(bio, bio->bi_iter);
Omar Sandoval6e78b3f2017-02-10 15:03:35 -08001284 prev_start_byte = start_byte;
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001285 start_byte = page_offset(bvec.bv_page) - disk_start;
Li Zefan3a39c182010-11-08 15:22:19 +08001286
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001287 /*
Omar Sandoval6e78b3f2017-02-10 15:03:35 -08001288 * We need to make sure we're only adjusting
1289 * our offset into compression working buffer when
1290 * we're switching pages. Otherwise we can incorrectly
1291 * keep copying when we were actually done.
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001292 */
Omar Sandoval6e78b3f2017-02-10 15:03:35 -08001293 if (start_byte != prev_start_byte) {
1294 /*
1295 * make sure our new page is covered by this
1296 * working buffer
1297 */
1298 if (total_out <= start_byte)
1299 return 1;
Li Zefan3a39c182010-11-08 15:22:19 +08001300
Omar Sandoval6e78b3f2017-02-10 15:03:35 -08001301 /*
1302 * the next page in the biovec might not be adjacent
1303 * to the last page, but it might still be found
1304 * inside this working buffer. bump our offset pointer
1305 */
1306 if (total_out > start_byte &&
1307 current_buf_start < start_byte) {
1308 buf_offset = start_byte - buf_start;
1309 working_bytes = total_out - start_byte;
1310 current_buf_start = buf_start + buf_offset;
1311 }
Li Zefan3a39c182010-11-08 15:22:19 +08001312 }
1313 }
1314
1315 return 1;
1316}
Timofey Titovetsc2fcdcd2017-07-17 16:52:58 +03001317
Timofey Titovets19562432017-10-08 16:11:59 +03001318/*
1319 * Shannon Entropy calculation
1320 *
Andrea Gelmini52042d82018-11-28 12:05:13 +01001321 * Pure byte distribution analysis fails to determine compressibility of data.
Timofey Titovets19562432017-10-08 16:11:59 +03001322 * Try calculating entropy to estimate the average minimum number of bits
1323 * needed to encode the sampled data.
1324 *
1325 * For convenience, return the percentage of needed bits, instead of amount of
1326 * bits directly.
1327 *
1328 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1329 * and can be compressible with high probability
1330 *
1331 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1332 *
1333 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1334 */
1335#define ENTROPY_LVL_ACEPTABLE (65)
1336#define ENTROPY_LVL_HIGH (80)
1337
1338/*
1339 * For increasead precision in shannon_entropy calculation,
1340 * let's do pow(n, M) to save more digits after comma:
1341 *
1342 * - maximum int bit length is 64
1343 * - ilog2(MAX_SAMPLE_SIZE) -> 13
1344 * - 13 * 4 = 52 < 64 -> M = 4
1345 *
1346 * So use pow(n, 4).
1347 */
1348static inline u32 ilog2_w(u64 n)
1349{
1350 return ilog2(n * n * n * n);
1351}
1352
1353static u32 shannon_entropy(struct heuristic_ws *ws)
1354{
1355 const u32 entropy_max = 8 * ilog2_w(2);
1356 u32 entropy_sum = 0;
1357 u32 p, p_base, sz_base;
1358 u32 i;
1359
1360 sz_base = ilog2_w(ws->sample_size);
1361 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1362 p = ws->bucket[i].count;
1363 p_base = ilog2_w(p);
1364 entropy_sum += p * (sz_base - p_base);
1365 }
1366
1367 entropy_sum /= ws->sample_size;
1368 return entropy_sum * 100 / entropy_max;
1369}
1370
Timofey Titovets440c8402017-12-04 00:30:33 +03001371#define RADIX_BASE 4U
1372#define COUNTERS_SIZE (1U << RADIX_BASE)
Timofey Titovets858177d2017-09-28 17:33:41 +03001373
Timofey Titovets440c8402017-12-04 00:30:33 +03001374static u8 get4bits(u64 num, int shift) {
1375 u8 low4bits;
1376
1377 num >>= shift;
1378 /* Reverse order */
1379 low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1380 return low4bits;
1381}
1382
Timofey Titovets440c8402017-12-04 00:30:33 +03001383/*
1384 * Use 4 bits as radix base
Andrea Gelmini52042d82018-11-28 12:05:13 +01001385 * Use 16 u32 counters for calculating new position in buf array
Timofey Titovets440c8402017-12-04 00:30:33 +03001386 *
1387 * @array - array that will be sorted
1388 * @array_buf - buffer array to store sorting results
1389 * must be equal in size to @array
1390 * @num - array size
Timofey Titovets440c8402017-12-04 00:30:33 +03001391 */
David Sterba23ae8c62017-12-12 20:35:02 +01001392static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
David Sterba36243c92017-12-12 20:35:02 +01001393 int num)
Timofey Titovets440c8402017-12-04 00:30:33 +03001394{
1395 u64 max_num;
1396 u64 buf_num;
1397 u32 counters[COUNTERS_SIZE];
1398 u32 new_addr;
1399 u32 addr;
1400 int bitlen;
1401 int shift;
1402 int i;
1403
1404 /*
1405 * Try avoid useless loop iterations for small numbers stored in big
1406 * counters. Example: 48 33 4 ... in 64bit array
1407 */
David Sterba23ae8c62017-12-12 20:35:02 +01001408 max_num = array[0].count;
Timofey Titovets440c8402017-12-04 00:30:33 +03001409 for (i = 1; i < num; i++) {
David Sterba23ae8c62017-12-12 20:35:02 +01001410 buf_num = array[i].count;
Timofey Titovets440c8402017-12-04 00:30:33 +03001411 if (buf_num > max_num)
1412 max_num = buf_num;
1413 }
1414
1415 buf_num = ilog2(max_num);
1416 bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1417
1418 shift = 0;
1419 while (shift < bitlen) {
1420 memset(counters, 0, sizeof(counters));
1421
1422 for (i = 0; i < num; i++) {
David Sterba23ae8c62017-12-12 20:35:02 +01001423 buf_num = array[i].count;
Timofey Titovets440c8402017-12-04 00:30:33 +03001424 addr = get4bits(buf_num, shift);
1425 counters[addr]++;
1426 }
1427
1428 for (i = 1; i < COUNTERS_SIZE; i++)
1429 counters[i] += counters[i - 1];
1430
1431 for (i = num - 1; i >= 0; i--) {
David Sterba23ae8c62017-12-12 20:35:02 +01001432 buf_num = array[i].count;
Timofey Titovets440c8402017-12-04 00:30:33 +03001433 addr = get4bits(buf_num, shift);
1434 counters[addr]--;
1435 new_addr = counters[addr];
David Sterba7add17b2017-12-12 20:35:02 +01001436 array_buf[new_addr] = array[i];
Timofey Titovets440c8402017-12-04 00:30:33 +03001437 }
1438
1439 shift += RADIX_BASE;
1440
1441 /*
1442 * Normal radix expects to move data from a temporary array, to
1443 * the main one. But that requires some CPU time. Avoid that
1444 * by doing another sort iteration to original array instead of
1445 * memcpy()
1446 */
1447 memset(counters, 0, sizeof(counters));
1448
1449 for (i = 0; i < num; i ++) {
David Sterba23ae8c62017-12-12 20:35:02 +01001450 buf_num = array_buf[i].count;
Timofey Titovets440c8402017-12-04 00:30:33 +03001451 addr = get4bits(buf_num, shift);
1452 counters[addr]++;
1453 }
1454
1455 for (i = 1; i < COUNTERS_SIZE; i++)
1456 counters[i] += counters[i - 1];
1457
1458 for (i = num - 1; i >= 0; i--) {
David Sterba23ae8c62017-12-12 20:35:02 +01001459 buf_num = array_buf[i].count;
Timofey Titovets440c8402017-12-04 00:30:33 +03001460 addr = get4bits(buf_num, shift);
1461 counters[addr]--;
1462 new_addr = counters[addr];
David Sterba7add17b2017-12-12 20:35:02 +01001463 array[new_addr] = array_buf[i];
Timofey Titovets440c8402017-12-04 00:30:33 +03001464 }
1465
1466 shift += RADIX_BASE;
1467 }
Timofey Titovets858177d2017-09-28 17:33:41 +03001468}
1469
1470/*
1471 * Size of the core byte set - how many bytes cover 90% of the sample
1472 *
1473 * There are several types of structured binary data that use nearly all byte
1474 * values. The distribution can be uniform and counts in all buckets will be
1475 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1476 *
1477 * Other possibility is normal (Gaussian) distribution, where the data could
1478 * be potentially compressible, but we have to take a few more steps to decide
1479 * how much.
1480 *
1481 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
1482 * compression algo can easy fix that
1483 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1484 * probability is not compressible
1485 */
1486#define BYTE_CORE_SET_LOW (64)
1487#define BYTE_CORE_SET_HIGH (200)
1488
1489static int byte_core_set_size(struct heuristic_ws *ws)
1490{
1491 u32 i;
1492 u32 coreset_sum = 0;
1493 const u32 core_set_threshold = ws->sample_size * 90 / 100;
1494 struct bucket_item *bucket = ws->bucket;
1495
1496 /* Sort in reverse order */
David Sterba36243c92017-12-12 20:35:02 +01001497 radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
Timofey Titovets858177d2017-09-28 17:33:41 +03001498
1499 for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1500 coreset_sum += bucket[i].count;
1501
1502 if (coreset_sum > core_set_threshold)
1503 return i;
1504
1505 for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1506 coreset_sum += bucket[i].count;
1507 if (coreset_sum > core_set_threshold)
1508 break;
1509 }
1510
1511 return i;
1512}
1513
Timofey Titovetsa288e922017-09-28 17:33:40 +03001514/*
1515 * Count byte values in buckets.
1516 * This heuristic can detect textual data (configs, xml, json, html, etc).
1517 * Because in most text-like data byte set is restricted to limited number of
1518 * possible characters, and that restriction in most cases makes data easy to
1519 * compress.
1520 *
1521 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1522 * less - compressible
1523 * more - need additional analysis
1524 */
1525#define BYTE_SET_THRESHOLD (64)
1526
1527static u32 byte_set_size(const struct heuristic_ws *ws)
1528{
1529 u32 i;
1530 u32 byte_set_size = 0;
1531
1532 for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1533 if (ws->bucket[i].count > 0)
1534 byte_set_size++;
1535 }
1536
1537 /*
1538 * Continue collecting count of byte values in buckets. If the byte
1539 * set size is bigger then the threshold, it's pointless to continue,
1540 * the detection technique would fail for this type of data.
1541 */
1542 for (; i < BUCKET_SIZE; i++) {
1543 if (ws->bucket[i].count > 0) {
1544 byte_set_size++;
1545 if (byte_set_size > BYTE_SET_THRESHOLD)
1546 return byte_set_size;
1547 }
1548 }
1549
1550 return byte_set_size;
1551}
1552
Timofey Titovets1fe4f6f2017-09-28 17:33:39 +03001553static bool sample_repeated_patterns(struct heuristic_ws *ws)
1554{
1555 const u32 half_of_sample = ws->sample_size / 2;
1556 const u8 *data = ws->sample;
1557
1558 return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1559}
1560
Timofey Titovetsa440d482017-09-28 17:33:38 +03001561static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1562 struct heuristic_ws *ws)
1563{
1564 struct page *page;
1565 u64 index, index_end;
1566 u32 i, curr_sample_pos;
1567 u8 *in_data;
1568
1569 /*
1570 * Compression handles the input data by chunks of 128KiB
1571 * (defined by BTRFS_MAX_UNCOMPRESSED)
1572 *
1573 * We do the same for the heuristic and loop over the whole range.
1574 *
1575 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1576 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1577 */
1578 if (end - start > BTRFS_MAX_UNCOMPRESSED)
1579 end = start + BTRFS_MAX_UNCOMPRESSED;
1580
1581 index = start >> PAGE_SHIFT;
1582 index_end = end >> PAGE_SHIFT;
1583
1584 /* Don't miss unaligned end */
1585 if (!IS_ALIGNED(end, PAGE_SIZE))
1586 index_end++;
1587
1588 curr_sample_pos = 0;
1589 while (index < index_end) {
1590 page = find_get_page(inode->i_mapping, index);
1591 in_data = kmap(page);
1592 /* Handle case where the start is not aligned to PAGE_SIZE */
1593 i = start % PAGE_SIZE;
1594 while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1595 /* Don't sample any garbage from the last page */
1596 if (start > end - SAMPLING_READ_SIZE)
1597 break;
1598 memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1599 SAMPLING_READ_SIZE);
1600 i += SAMPLING_INTERVAL;
1601 start += SAMPLING_INTERVAL;
1602 curr_sample_pos += SAMPLING_READ_SIZE;
1603 }
1604 kunmap(page);
1605 put_page(page);
1606
1607 index++;
1608 }
1609
1610 ws->sample_size = curr_sample_pos;
1611}
1612
Timofey Titovetsc2fcdcd2017-07-17 16:52:58 +03001613/*
1614 * Compression heuristic.
1615 *
1616 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1617 * quickly (compared to direct compression) detect data characteristics
1618 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1619 * data.
1620 *
1621 * The following types of analysis can be performed:
1622 * - detect mostly zero data
1623 * - detect data with low "byte set" size (text, etc)
1624 * - detect data with low/high "core byte" set
1625 *
1626 * Return non-zero if the compression should be done, 0 otherwise.
1627 */
1628int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1629{
Dennis Zhou7bf49942019-02-04 15:20:04 -05001630 struct list_head *ws_list = get_workspace(0, 0);
Timofey Titovets4e439a02017-09-28 17:33:36 +03001631 struct heuristic_ws *ws;
Timofey Titovetsa440d482017-09-28 17:33:38 +03001632 u32 i;
1633 u8 byte;
Timofey Titovets19562432017-10-08 16:11:59 +03001634 int ret = 0;
Timofey Titovetsc2fcdcd2017-07-17 16:52:58 +03001635
Timofey Titovets4e439a02017-09-28 17:33:36 +03001636 ws = list_entry(ws_list, struct heuristic_ws, list);
1637
Timofey Titovetsa440d482017-09-28 17:33:38 +03001638 heuristic_collect_sample(inode, start, end, ws);
1639
Timofey Titovets1fe4f6f2017-09-28 17:33:39 +03001640 if (sample_repeated_patterns(ws)) {
1641 ret = 1;
1642 goto out;
1643 }
1644
Timofey Titovetsa440d482017-09-28 17:33:38 +03001645 memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1646
1647 for (i = 0; i < ws->sample_size; i++) {
1648 byte = ws->sample[i];
1649 ws->bucket[byte].count++;
Timofey Titovetsc2fcdcd2017-07-17 16:52:58 +03001650 }
1651
Timofey Titovetsa288e922017-09-28 17:33:40 +03001652 i = byte_set_size(ws);
1653 if (i < BYTE_SET_THRESHOLD) {
1654 ret = 2;
1655 goto out;
1656 }
1657
Timofey Titovets858177d2017-09-28 17:33:41 +03001658 i = byte_core_set_size(ws);
1659 if (i <= BYTE_CORE_SET_LOW) {
1660 ret = 3;
1661 goto out;
1662 }
1663
1664 if (i >= BYTE_CORE_SET_HIGH) {
1665 ret = 0;
1666 goto out;
1667 }
1668
Timofey Titovets19562432017-10-08 16:11:59 +03001669 i = shannon_entropy(ws);
1670 if (i <= ENTROPY_LVL_ACEPTABLE) {
1671 ret = 4;
1672 goto out;
1673 }
1674
1675 /*
1676 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1677 * needed to give green light to compression.
1678 *
1679 * For now just assume that compression at that level is not worth the
1680 * resources because:
1681 *
1682 * 1. it is possible to defrag the data later
1683 *
1684 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1685 * values, every bucket has counter at level ~54. The heuristic would
1686 * be confused. This can happen when data have some internal repeated
1687 * patterns like "abbacbbc...". This can be detected by analyzing
1688 * pairs of bytes, which is too costly.
1689 */
1690 if (i < ENTROPY_LVL_HIGH) {
1691 ret = 5;
1692 goto out;
1693 } else {
1694 ret = 0;
1695 goto out;
1696 }
1697
Timofey Titovets1fe4f6f2017-09-28 17:33:39 +03001698out:
Dennis Zhou929f4ba2019-02-04 15:20:02 -05001699 put_workspace(0, ws_list);
Timofey Titovetsc2fcdcd2017-07-17 16:52:58 +03001700 return ret;
1701}
David Sterbaf51d2b52017-09-15 17:36:57 +02001702
Dennis Zhoud0ab62c2019-02-04 15:20:05 -05001703/*
1704 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1705 * level, unrecognized string will set the default level
1706 */
1707unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
David Sterbaf51d2b52017-09-15 17:36:57 +02001708{
Dennis Zhoud0ab62c2019-02-04 15:20:05 -05001709 unsigned int level = 0;
1710 int ret;
1711
1712 if (!type)
David Sterbaf51d2b52017-09-15 17:36:57 +02001713 return 0;
1714
Dennis Zhoud0ab62c2019-02-04 15:20:05 -05001715 if (str[0] == ':') {
1716 ret = kstrtouint(str + 1, 10, &level);
1717 if (ret)
1718 level = 0;
1719 }
David Sterbaf51d2b52017-09-15 17:36:57 +02001720
David Sterbab0c1fe12019-08-09 16:49:06 +02001721 level = btrfs_compress_set_level(type, level);
1722
1723 return level;
1724}
1725
1726/*
1727 * Adjust @level according to the limits of the compression algorithm or
1728 * fallback to default
1729 */
1730unsigned int btrfs_compress_set_level(int type, unsigned level)
1731{
1732 const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1733
1734 if (level == 0)
1735 level = ops->default_level;
1736 else
1737 level = min(level, ops->max_level);
Dennis Zhoud0ab62c2019-02-04 15:20:05 -05001738
1739 return level;
David Sterbaf51d2b52017-09-15 17:36:57 +02001740}