blob: 75610d23d19759d80dde406542dfab0671da1267 [file] [log] [blame]
Chris Masonc8b97812008-10-29 14:49:59 -04001/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/kernel.h>
20#include <linux/bio.h>
21#include <linux/buffer_head.h>
22#include <linux/file.h>
23#include <linux/fs.h>
24#include <linux/pagemap.h>
25#include <linux/highmem.h>
26#include <linux/time.h>
27#include <linux/init.h>
28#include <linux/string.h>
Chris Masonc8b97812008-10-29 14:49:59 -040029#include <linux/backing-dev.h>
30#include <linux/mpage.h>
31#include <linux/swap.h>
32#include <linux/writeback.h>
33#include <linux/bit_spinlock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
David Sterbafe308532017-05-31 17:14:56 +020035#include <linux/sched/mm.h>
Timofey Titovets858177d2017-09-28 17:33:41 +030036#include <linux/sort.h>
Timofey Titovets19562432017-10-08 16:11:59 +030037#include <linux/log2.h>
Chris Masonc8b97812008-10-29 14:49:59 -040038#include "ctree.h"
39#include "disk-io.h"
40#include "transaction.h"
41#include "btrfs_inode.h"
42#include "volumes.h"
43#include "ordered-data.h"
Chris Masonc8b97812008-10-29 14:49:59 -040044#include "compression.h"
45#include "extent_io.h"
46#include "extent_map.h"
47
Anand Jain8140dc32017-05-26 15:44:58 +080048static int btrfs_decompress_bio(struct compressed_bio *cb);
Eric Sandeen48a3b632013-04-25 20:41:01 +000049
Jeff Mahoney2ff7e612016-06-22 18:54:24 -040050static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
Chris Masond20f7042008-12-08 16:58:54 -050051 unsigned long disk_size)
52{
Jeff Mahoney0b246af2016-06-22 18:54:23 -040053 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
David Sterba6c417612011-04-13 15:41:04 +020054
Chris Masond20f7042008-12-08 16:58:54 -050055 return sizeof(struct compressed_bio) +
Jeff Mahoney0b246af2016-06-22 18:54:23 -040056 (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
Chris Masond20f7042008-12-08 16:58:54 -050057}
58
Nikolay Borisovf898ac62017-02-20 13:50:54 +020059static int check_compressed_csum(struct btrfs_inode *inode,
Chris Masond20f7042008-12-08 16:58:54 -050060 struct compressed_bio *cb,
61 u64 disk_start)
62{
63 int ret;
Chris Masond20f7042008-12-08 16:58:54 -050064 struct page *page;
65 unsigned long i;
66 char *kaddr;
67 u32 csum;
68 u32 *cb_sum = &cb->sums;
69
Nikolay Borisovf898ac62017-02-20 13:50:54 +020070 if (inode->flags & BTRFS_INODE_NODATASUM)
Chris Masond20f7042008-12-08 16:58:54 -050071 return 0;
72
73 for (i = 0; i < cb->nr_pages; i++) {
74 page = cb->compressed_pages[i];
75 csum = ~(u32)0;
76
Cong Wang7ac687d2011-11-25 23:14:28 +080077 kaddr = kmap_atomic(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030078 csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
Domagoj Tršan0b5e3da2016-10-27 08:52:33 +010079 btrfs_csum_final(csum, (u8 *)&csum);
Cong Wang7ac687d2011-11-25 23:14:28 +080080 kunmap_atomic(kaddr);
Chris Masond20f7042008-12-08 16:58:54 -050081
82 if (csum != *cb_sum) {
Nikolay Borisovf898ac62017-02-20 13:50:54 +020083 btrfs_print_data_csum_error(inode, disk_start, csum,
Nikolay Borisov0970a222017-02-20 13:50:53 +020084 *cb_sum, cb->mirror_num);
Chris Masond20f7042008-12-08 16:58:54 -050085 ret = -EIO;
86 goto fail;
87 }
88 cb_sum++;
89
90 }
91 ret = 0;
92fail:
93 return ret;
94}
95
Chris Masonc8b97812008-10-29 14:49:59 -040096/* when we finish reading compressed pages from the disk, we
97 * decompress them and then run the bio end_io routines on the
98 * decompressed pages (in the inode address space).
99 *
100 * This allows the checksumming and other IO error handling routines
101 * to work normally
102 *
103 * The compressed pages are freed here, and it must be run
104 * in process context
105 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200106static void end_compressed_bio_read(struct bio *bio)
Chris Masonc8b97812008-10-29 14:49:59 -0400107{
Chris Masonc8b97812008-10-29 14:49:59 -0400108 struct compressed_bio *cb = bio->bi_private;
109 struct inode *inode;
110 struct page *page;
111 unsigned long index;
Liu Bocf1167d2017-09-20 17:50:18 -0600112 unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
Liu Boe6311f22017-09-20 17:50:19 -0600113 int ret = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400114
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200115 if (bio->bi_status)
Chris Masonc8b97812008-10-29 14:49:59 -0400116 cb->errors = 1;
117
118 /* if there are more bios still pending for this compressed
119 * extent, just exit
120 */
Elena Reshetovaa50299a2017-03-03 10:55:20 +0200121 if (!refcount_dec_and_test(&cb->pending_bios))
Chris Masonc8b97812008-10-29 14:49:59 -0400122 goto out;
123
Liu Bocf1167d2017-09-20 17:50:18 -0600124 /*
125 * Record the correct mirror_num in cb->orig_bio so that
126 * read-repair can work properly.
127 */
128 ASSERT(btrfs_io_bio(cb->orig_bio));
129 btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
130 cb->mirror_num = mirror;
131
Liu Boe6311f22017-09-20 17:50:19 -0600132 /*
133 * Some IO in this cb have failed, just skip checksum as there
134 * is no way it could be correct.
135 */
136 if (cb->errors == 1)
137 goto csum_failed;
138
Chris Masond20f7042008-12-08 16:58:54 -0500139 inode = cb->inode;
Nikolay Borisovf898ac62017-02-20 13:50:54 +0200140 ret = check_compressed_csum(BTRFS_I(inode), cb,
Kent Overstreet4f024f32013-10-11 15:44:27 -0700141 (u64)bio->bi_iter.bi_sector << 9);
Chris Masond20f7042008-12-08 16:58:54 -0500142 if (ret)
143 goto csum_failed;
144
Chris Masonc8b97812008-10-29 14:49:59 -0400145 /* ok, we're the last bio for this extent, lets start
146 * the decompression.
147 */
Anand Jain8140dc32017-05-26 15:44:58 +0800148 ret = btrfs_decompress_bio(cb);
149
Chris Masond20f7042008-12-08 16:58:54 -0500150csum_failed:
Chris Masonc8b97812008-10-29 14:49:59 -0400151 if (ret)
152 cb->errors = 1;
153
154 /* release the compressed pages */
155 index = 0;
156 for (index = 0; index < cb->nr_pages; index++) {
157 page = cb->compressed_pages[index];
158 page->mapping = NULL;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300159 put_page(page);
Chris Masonc8b97812008-10-29 14:49:59 -0400160 }
161
162 /* do io completion on the original bio */
Chris Mason771ed682008-11-06 22:02:51 -0500163 if (cb->errors) {
Chris Masonc8b97812008-10-29 14:49:59 -0400164 bio_io_error(cb->orig_bio);
Chris Masond20f7042008-12-08 16:58:54 -0500165 } else {
Kent Overstreet2c30c712013-11-07 12:20:26 -0800166 int i;
167 struct bio_vec *bvec;
Chris Masond20f7042008-12-08 16:58:54 -0500168
169 /*
170 * we have verified the checksum already, set page
171 * checked so the end_io handlers know about it
172 */
David Sterbac09abff2017-07-13 18:10:07 +0200173 ASSERT(!bio_flagged(bio, BIO_CLONED));
Kent Overstreet2c30c712013-11-07 12:20:26 -0800174 bio_for_each_segment_all(bvec, cb->orig_bio, i)
Chris Masond20f7042008-12-08 16:58:54 -0500175 SetPageChecked(bvec->bv_page);
Kent Overstreet2c30c712013-11-07 12:20:26 -0800176
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200177 bio_endio(cb->orig_bio);
Chris Masond20f7042008-12-08 16:58:54 -0500178 }
Chris Masonc8b97812008-10-29 14:49:59 -0400179
180 /* finally free the cb struct */
181 kfree(cb->compressed_pages);
182 kfree(cb);
183out:
184 bio_put(bio);
185}
186
187/*
188 * Clear the writeback bits on all of the file
189 * pages for a compressed write
190 */
Filipe Manana7bdcefc2014-10-07 01:48:26 +0100191static noinline void end_compressed_writeback(struct inode *inode,
192 const struct compressed_bio *cb)
Chris Masonc8b97812008-10-29 14:49:59 -0400193{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300194 unsigned long index = cb->start >> PAGE_SHIFT;
195 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
Chris Masonc8b97812008-10-29 14:49:59 -0400196 struct page *pages[16];
197 unsigned long nr_pages = end_index - index + 1;
198 int i;
199 int ret;
200
Filipe Manana7bdcefc2014-10-07 01:48:26 +0100201 if (cb->errors)
202 mapping_set_error(inode->i_mapping, -EIO);
203
Chris Masond3977122009-01-05 21:25:51 -0500204 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -0400205 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -0500206 min_t(unsigned long,
207 nr_pages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -0400208 if (ret == 0) {
209 nr_pages -= 1;
210 index += 1;
211 continue;
212 }
213 for (i = 0; i < ret; i++) {
Filipe Manana7bdcefc2014-10-07 01:48:26 +0100214 if (cb->errors)
215 SetPageError(pages[i]);
Chris Masonc8b97812008-10-29 14:49:59 -0400216 end_page_writeback(pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300217 put_page(pages[i]);
Chris Masonc8b97812008-10-29 14:49:59 -0400218 }
219 nr_pages -= ret;
220 index += ret;
221 }
222 /* the inode may be gone now */
Chris Masonc8b97812008-10-29 14:49:59 -0400223}
224
225/*
226 * do the cleanup once all the compressed pages hit the disk.
227 * This will clear writeback on the file pages and free the compressed
228 * pages.
229 *
230 * This also calls the writeback end hooks for the file pages so that
231 * metadata and checksums can be updated in the file.
232 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200233static void end_compressed_bio_write(struct bio *bio)
Chris Masonc8b97812008-10-29 14:49:59 -0400234{
235 struct extent_io_tree *tree;
236 struct compressed_bio *cb = bio->bi_private;
237 struct inode *inode;
238 struct page *page;
239 unsigned long index;
240
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200241 if (bio->bi_status)
Chris Masonc8b97812008-10-29 14:49:59 -0400242 cb->errors = 1;
243
244 /* if there are more bios still pending for this compressed
245 * extent, just exit
246 */
Elena Reshetovaa50299a2017-03-03 10:55:20 +0200247 if (!refcount_dec_and_test(&cb->pending_bios))
Chris Masonc8b97812008-10-29 14:49:59 -0400248 goto out;
249
250 /* ok, we're the last bio for this extent, step one is to
251 * call back into the FS and do all the end_io operations
252 */
253 inode = cb->inode;
254 tree = &BTRFS_I(inode)->io_tree;
Chris Mason70b99e62008-10-31 12:46:39 -0400255 cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
Chris Masonc8b97812008-10-29 14:49:59 -0400256 tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
257 cb->start,
258 cb->start + cb->len - 1,
Filipe Manana7bdcefc2014-10-07 01:48:26 +0100259 NULL,
Anand Jain2dbe0c72017-10-14 08:35:56 +0800260 bio->bi_status ?
261 BLK_STS_OK : BLK_STS_NOTSUPP);
Chris Mason70b99e62008-10-31 12:46:39 -0400262 cb->compressed_pages[0]->mapping = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -0400263
Filipe Manana7bdcefc2014-10-07 01:48:26 +0100264 end_compressed_writeback(inode, cb);
Chris Masonc8b97812008-10-29 14:49:59 -0400265 /* note, our inode could be gone now */
266
267 /*
268 * release the compressed pages, these came from alloc_page and
269 * are not attached to the inode at all
270 */
271 index = 0;
272 for (index = 0; index < cb->nr_pages; index++) {
273 page = cb->compressed_pages[index];
274 page->mapping = NULL;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300275 put_page(page);
Chris Masonc8b97812008-10-29 14:49:59 -0400276 }
277
278 /* finally free the cb struct */
279 kfree(cb->compressed_pages);
280 kfree(cb);
281out:
282 bio_put(bio);
283}
284
285/*
286 * worker function to build and submit bios for previously compressed pages.
287 * The corresponding pages in the inode should be marked for writeback
288 * and the compressed pages should have a reference on them for dropping
289 * when the IO is complete.
290 *
291 * This also checksums the file bytes and gets things ready for
292 * the end io hooks.
293 */
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200294blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
Chris Masonc8b97812008-10-29 14:49:59 -0400295 unsigned long len, u64 disk_start,
296 unsigned long compressed_len,
297 struct page **compressed_pages,
Liu Bof82b7352017-10-23 23:18:16 -0600298 unsigned long nr_pages,
299 unsigned int write_flags)
Chris Masonc8b97812008-10-29 14:49:59 -0400300{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400301 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Chris Masonc8b97812008-10-29 14:49:59 -0400302 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -0400303 struct compressed_bio *cb;
304 unsigned long bytes_left;
305 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
David Sterba306e16c2011-04-19 14:29:38 +0200306 int pg_index = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400307 struct page *page;
308 u64 first_byte = disk_start;
309 struct block_device *bdev;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200310 blk_status_t ret;
Li Zefane55179b2011-07-14 03:16:47 +0000311 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
Chris Masonc8b97812008-10-29 14:49:59 -0400312
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300313 WARN_ON(start & ((u64)PAGE_SIZE - 1));
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400314 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
Yoshinori Sanodac97e52011-02-15 12:01:42 +0000315 if (!cb)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200316 return BLK_STS_RESOURCE;
Elena Reshetovaa50299a2017-03-03 10:55:20 +0200317 refcount_set(&cb->pending_bios, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400318 cb->errors = 0;
319 cb->inode = inode;
320 cb->start = start;
321 cb->len = len;
Chris Masond20f7042008-12-08 16:58:54 -0500322 cb->mirror_num = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400323 cb->compressed_pages = compressed_pages;
324 cb->compressed_len = compressed_len;
325 cb->orig_bio = NULL;
326 cb->nr_pages = nr_pages;
327
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400328 bdev = fs_info->fs_devices->latest_bdev;
Chris Masonc8b97812008-10-29 14:49:59 -0400329
David Sterbac821e7f32017-06-02 18:35:36 +0200330 bio = btrfs_bio_alloc(bdev, first_byte);
Liu Bof82b7352017-10-23 23:18:16 -0600331 bio->bi_opf = REQ_OP_WRITE | write_flags;
Chris Masonc8b97812008-10-29 14:49:59 -0400332 bio->bi_private = cb;
333 bio->bi_end_io = end_compressed_bio_write;
Elena Reshetovaa50299a2017-03-03 10:55:20 +0200334 refcount_set(&cb->pending_bios, 1);
Chris Masonc8b97812008-10-29 14:49:59 -0400335
336 /* create and submit bios for the compressed pages */
337 bytes_left = compressed_len;
David Sterba306e16c2011-04-19 14:29:38 +0200338 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200339 int submit = 0;
340
David Sterba306e16c2011-04-19 14:29:38 +0200341 page = compressed_pages[pg_index];
Chris Masonc8b97812008-10-29 14:49:59 -0400342 page->mapping = inode->i_mapping;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700343 if (bio->bi_iter.bi_size)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200344 submit = io_tree->ops->merge_bio_hook(page, 0,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300345 PAGE_SIZE,
Chris Masonc8b97812008-10-29 14:49:59 -0400346 bio, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400347
Chris Mason70b99e62008-10-31 12:46:39 -0400348 page->mapping = NULL;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200349 if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300350 PAGE_SIZE) {
Chris Masonc8b97812008-10-29 14:49:59 -0400351 bio_get(bio);
352
Chris Masonaf09abf2008-11-07 12:35:44 -0500353 /*
354 * inc the count before we submit the bio so
355 * we know the end IO handler won't happen before
356 * we inc the count. Otherwise, the cb might get
357 * freed before we're done setting it up
358 */
Elena Reshetovaa50299a2017-03-03 10:55:20 +0200359 refcount_inc(&cb->pending_bios);
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400360 ret = btrfs_bio_wq_end_io(fs_info, bio,
361 BTRFS_WQ_ENDIO_DATA);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100362 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400363
Li Zefane55179b2011-07-14 03:16:47 +0000364 if (!skip_sum) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400365 ret = btrfs_csum_one_bio(inode, bio, start, 1);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100366 BUG_ON(ret); /* -ENOMEM */
Li Zefane55179b2011-07-14 03:16:47 +0000367 }
Chris Masond20f7042008-12-08 16:58:54 -0500368
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400369 ret = btrfs_map_bio(fs_info, bio, 0, 1);
Liu Bof5daf2c2016-06-22 18:32:06 -0700370 if (ret) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200371 bio->bi_status = ret;
Liu Bof5daf2c2016-06-22 18:32:06 -0700372 bio_endio(bio);
373 }
Chris Masonc8b97812008-10-29 14:49:59 -0400374
375 bio_put(bio);
376
David Sterbac821e7f32017-06-02 18:35:36 +0200377 bio = btrfs_bio_alloc(bdev, first_byte);
Liu Bof82b7352017-10-23 23:18:16 -0600378 bio->bi_opf = REQ_OP_WRITE | write_flags;
Chris Masonc8b97812008-10-29 14:49:59 -0400379 bio->bi_private = cb;
380 bio->bi_end_io = end_compressed_bio_write;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300381 bio_add_page(bio, page, PAGE_SIZE, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400382 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300383 if (bytes_left < PAGE_SIZE) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400384 btrfs_info(fs_info,
Frank Holtonefe120a2013-12-20 11:37:06 -0500385 "bytes left %lu compress len %lu nr %lu",
Chris Masoncfbc2462008-10-30 13:22:14 -0400386 bytes_left, cb->compressed_len, cb->nr_pages);
387 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300388 bytes_left -= PAGE_SIZE;
389 first_byte += PAGE_SIZE;
Chris Mason771ed682008-11-06 22:02:51 -0500390 cond_resched();
Chris Masonc8b97812008-10-29 14:49:59 -0400391 }
392 bio_get(bio);
393
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400394 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100395 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400396
Li Zefane55179b2011-07-14 03:16:47 +0000397 if (!skip_sum) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400398 ret = btrfs_csum_one_bio(inode, bio, start, 1);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100399 BUG_ON(ret); /* -ENOMEM */
Li Zefane55179b2011-07-14 03:16:47 +0000400 }
Chris Masond20f7042008-12-08 16:58:54 -0500401
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400402 ret = btrfs_map_bio(fs_info, bio, 0, 1);
Liu Bof5daf2c2016-06-22 18:32:06 -0700403 if (ret) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200404 bio->bi_status = ret;
Liu Bof5daf2c2016-06-22 18:32:06 -0700405 bio_endio(bio);
406 }
Chris Masonc8b97812008-10-29 14:49:59 -0400407
408 bio_put(bio);
409 return 0;
410}
411
Christoph Hellwig2a4d0c92016-11-25 09:07:51 +0100412static u64 bio_end_offset(struct bio *bio)
413{
Ming Leic45a8f22017-12-18 20:22:05 +0800414 struct bio_vec *last = bio_last_bvec_all(bio);
Christoph Hellwig2a4d0c92016-11-25 09:07:51 +0100415
416 return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
417}
418
Chris Mason771ed682008-11-06 22:02:51 -0500419static noinline int add_ra_bio_pages(struct inode *inode,
420 u64 compressed_end,
421 struct compressed_bio *cb)
422{
423 unsigned long end_index;
David Sterba306e16c2011-04-19 14:29:38 +0200424 unsigned long pg_index;
Chris Mason771ed682008-11-06 22:02:51 -0500425 u64 last_offset;
426 u64 isize = i_size_read(inode);
427 int ret;
428 struct page *page;
429 unsigned long nr_pages = 0;
430 struct extent_map *em;
431 struct address_space *mapping = inode->i_mapping;
Chris Mason771ed682008-11-06 22:02:51 -0500432 struct extent_map_tree *em_tree;
433 struct extent_io_tree *tree;
434 u64 end;
435 int misses = 0;
436
Christoph Hellwig2a4d0c92016-11-25 09:07:51 +0100437 last_offset = bio_end_offset(cb->orig_bio);
Chris Mason771ed682008-11-06 22:02:51 -0500438 em_tree = &BTRFS_I(inode)->extent_tree;
439 tree = &BTRFS_I(inode)->io_tree;
440
441 if (isize == 0)
442 return 0;
443
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300444 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
Chris Mason771ed682008-11-06 22:02:51 -0500445
Chris Masond3977122009-01-05 21:25:51 -0500446 while (last_offset < compressed_end) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300447 pg_index = last_offset >> PAGE_SHIFT;
Chris Mason771ed682008-11-06 22:02:51 -0500448
David Sterba306e16c2011-04-19 14:29:38 +0200449 if (pg_index > end_index)
Chris Mason771ed682008-11-06 22:02:51 -0500450 break;
451
452 rcu_read_lock();
David Sterba306e16c2011-04-19 14:29:38 +0200453 page = radix_tree_lookup(&mapping->page_tree, pg_index);
Chris Mason771ed682008-11-06 22:02:51 -0500454 rcu_read_unlock();
Johannes Weiner0cd61442014-04-03 14:47:46 -0700455 if (page && !radix_tree_exceptional_entry(page)) {
Chris Mason771ed682008-11-06 22:02:51 -0500456 misses++;
457 if (misses > 4)
458 break;
459 goto next;
460 }
461
Michal Hockoc62d2552015-11-06 16:28:49 -0800462 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
463 ~__GFP_FS));
Chris Mason771ed682008-11-06 22:02:51 -0500464 if (!page)
465 break;
466
Michal Hockoc62d2552015-11-06 16:28:49 -0800467 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300468 put_page(page);
Chris Mason771ed682008-11-06 22:02:51 -0500469 goto next;
470 }
471
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300472 end = last_offset + PAGE_SIZE - 1;
Chris Mason771ed682008-11-06 22:02:51 -0500473 /*
474 * at this point, we have a locked page in the page cache
475 * for these bytes in the file. But, we have to make
476 * sure they map to this compressed extent on disk.
477 */
478 set_page_extent_mapped(page);
Jeff Mahoneyd0082372012-03-01 14:57:19 +0100479 lock_extent(tree, last_offset, end);
Chris Mason890871b2009-09-02 16:24:52 -0400480 read_lock(&em_tree->lock);
Chris Mason771ed682008-11-06 22:02:51 -0500481 em = lookup_extent_mapping(em_tree, last_offset,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300482 PAGE_SIZE);
Chris Mason890871b2009-09-02 16:24:52 -0400483 read_unlock(&em_tree->lock);
Chris Mason771ed682008-11-06 22:02:51 -0500484
485 if (!em || last_offset < em->start ||
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300486 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
Kent Overstreet4f024f32013-10-11 15:44:27 -0700487 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
Chris Mason771ed682008-11-06 22:02:51 -0500488 free_extent_map(em);
Jeff Mahoneyd0082372012-03-01 14:57:19 +0100489 unlock_extent(tree, last_offset, end);
Chris Mason771ed682008-11-06 22:02:51 -0500490 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300491 put_page(page);
Chris Mason771ed682008-11-06 22:02:51 -0500492 break;
493 }
494 free_extent_map(em);
495
496 if (page->index == end_index) {
497 char *userpage;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300498 size_t zero_offset = isize & (PAGE_SIZE - 1);
Chris Mason771ed682008-11-06 22:02:51 -0500499
500 if (zero_offset) {
501 int zeros;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300502 zeros = PAGE_SIZE - zero_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +0800503 userpage = kmap_atomic(page);
Chris Mason771ed682008-11-06 22:02:51 -0500504 memset(userpage + zero_offset, 0, zeros);
505 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +0800506 kunmap_atomic(userpage);
Chris Mason771ed682008-11-06 22:02:51 -0500507 }
508 }
509
510 ret = bio_add_page(cb->orig_bio, page,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300511 PAGE_SIZE, 0);
Chris Mason771ed682008-11-06 22:02:51 -0500512
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300513 if (ret == PAGE_SIZE) {
Chris Mason771ed682008-11-06 22:02:51 -0500514 nr_pages++;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300515 put_page(page);
Chris Mason771ed682008-11-06 22:02:51 -0500516 } else {
Jeff Mahoneyd0082372012-03-01 14:57:19 +0100517 unlock_extent(tree, last_offset, end);
Chris Mason771ed682008-11-06 22:02:51 -0500518 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300519 put_page(page);
Chris Mason771ed682008-11-06 22:02:51 -0500520 break;
521 }
522next:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300523 last_offset += PAGE_SIZE;
Chris Mason771ed682008-11-06 22:02:51 -0500524 }
Chris Mason771ed682008-11-06 22:02:51 -0500525 return 0;
526}
527
Chris Masonc8b97812008-10-29 14:49:59 -0400528/*
529 * for a compressed read, the bio we get passed has all the inode pages
530 * in it. We don't actually do IO on those pages but allocate new ones
531 * to hold the compressed pages on disk.
532 *
Kent Overstreet4f024f32013-10-11 15:44:27 -0700533 * bio->bi_iter.bi_sector points to the compressed extent on disk
Chris Masonc8b97812008-10-29 14:49:59 -0400534 * bio->bi_io_vec points to all of the inode pages
Chris Masonc8b97812008-10-29 14:49:59 -0400535 *
536 * After the compressed pages are read, we copy the bytes into the
537 * bio we were passed and then call the bio end_io calls
538 */
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200539blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
Chris Masonc8b97812008-10-29 14:49:59 -0400540 int mirror_num, unsigned long bio_flags)
541{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400542 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Chris Masonc8b97812008-10-29 14:49:59 -0400543 struct extent_io_tree *tree;
544 struct extent_map_tree *em_tree;
545 struct compressed_bio *cb;
Chris Masonc8b97812008-10-29 14:49:59 -0400546 unsigned long compressed_len;
547 unsigned long nr_pages;
David Sterba306e16c2011-04-19 14:29:38 +0200548 unsigned long pg_index;
Chris Masonc8b97812008-10-29 14:49:59 -0400549 struct page *page;
550 struct block_device *bdev;
551 struct bio *comp_bio;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700552 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
Chris Masone04ca622008-11-10 11:44:58 -0500553 u64 em_len;
554 u64 em_start;
Chris Masonc8b97812008-10-29 14:49:59 -0400555 struct extent_map *em;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200556 blk_status_t ret = BLK_STS_RESOURCE;
Josef Bacik15e3004a2012-10-05 13:39:50 -0400557 int faili = 0;
Chris Masond20f7042008-12-08 16:58:54 -0500558 u32 *sums;
Chris Masonc8b97812008-10-29 14:49:59 -0400559
560 tree = &BTRFS_I(inode)->io_tree;
561 em_tree = &BTRFS_I(inode)->extent_tree;
562
563 /* we need the actual starting offset of this extent in the file */
Chris Mason890871b2009-09-02 16:24:52 -0400564 read_lock(&em_tree->lock);
Chris Masonc8b97812008-10-29 14:49:59 -0400565 em = lookup_extent_mapping(em_tree,
Ming Lei263663c2017-12-18 20:22:04 +0800566 page_offset(bio_first_page_all(bio)),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300567 PAGE_SIZE);
Chris Mason890871b2009-09-02 16:24:52 -0400568 read_unlock(&em_tree->lock);
Tsutomu Itoh285190d2012-02-16 16:23:58 +0900569 if (!em)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200570 return BLK_STS_IOERR;
Chris Masonc8b97812008-10-29 14:49:59 -0400571
Chris Masond20f7042008-12-08 16:58:54 -0500572 compressed_len = em->block_len;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400573 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
liubo6b82ce82011-01-26 06:21:39 +0000574 if (!cb)
575 goto out;
576
Elena Reshetovaa50299a2017-03-03 10:55:20 +0200577 refcount_set(&cb->pending_bios, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400578 cb->errors = 0;
579 cb->inode = inode;
Chris Masond20f7042008-12-08 16:58:54 -0500580 cb->mirror_num = mirror_num;
581 sums = &cb->sums;
Chris Masonc8b97812008-10-29 14:49:59 -0400582
Yan Zhengff5b7ee2008-11-10 07:34:43 -0500583 cb->start = em->orig_start;
Chris Masone04ca622008-11-10 11:44:58 -0500584 em_len = em->len;
585 em_start = em->start;
Chris Masond20f7042008-12-08 16:58:54 -0500586
Chris Masonc8b97812008-10-29 14:49:59 -0400587 free_extent_map(em);
Chris Masone04ca622008-11-10 11:44:58 -0500588 em = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -0400589
Christoph Hellwig81381052016-11-25 09:07:50 +0100590 cb->len = bio->bi_iter.bi_size;
Chris Masonc8b97812008-10-29 14:49:59 -0400591 cb->compressed_len = compressed_len;
Li Zefan261507a02010-12-17 14:21:50 +0800592 cb->compress_type = extent_compress_type(bio_flags);
Chris Masonc8b97812008-10-29 14:49:59 -0400593 cb->orig_bio = bio;
594
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300595 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
David Sterba31e818f2015-02-20 18:00:26 +0100596 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
Chris Masonc8b97812008-10-29 14:49:59 -0400597 GFP_NOFS);
liubo6b82ce82011-01-26 06:21:39 +0000598 if (!cb->compressed_pages)
599 goto fail1;
600
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400601 bdev = fs_info->fs_devices->latest_bdev;
Chris Masonc8b97812008-10-29 14:49:59 -0400602
David Sterba306e16c2011-04-19 14:29:38 +0200603 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
604 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
Chris Masonc8b97812008-10-29 14:49:59 -0400605 __GFP_HIGHMEM);
Josef Bacik15e3004a2012-10-05 13:39:50 -0400606 if (!cb->compressed_pages[pg_index]) {
607 faili = pg_index - 1;
Dan Carpenter0e9350d2017-06-19 13:55:37 +0300608 ret = BLK_STS_RESOURCE;
liubo6b82ce82011-01-26 06:21:39 +0000609 goto fail2;
Josef Bacik15e3004a2012-10-05 13:39:50 -0400610 }
Chris Masonc8b97812008-10-29 14:49:59 -0400611 }
Josef Bacik15e3004a2012-10-05 13:39:50 -0400612 faili = nr_pages - 1;
Chris Masonc8b97812008-10-29 14:49:59 -0400613 cb->nr_pages = nr_pages;
614
Filipe Manana7f042a82016-01-27 19:17:20 +0000615 add_ra_bio_pages(inode, em_start + em_len, cb);
Chris Mason771ed682008-11-06 22:02:51 -0500616
Chris Mason771ed682008-11-06 22:02:51 -0500617 /* include any pages we added in add_ra-bio_pages */
Christoph Hellwig81381052016-11-25 09:07:50 +0100618 cb->len = bio->bi_iter.bi_size;
Chris Mason771ed682008-11-06 22:02:51 -0500619
David Sterbac821e7f32017-06-02 18:35:36 +0200620 comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
Mike Christie37226b22016-06-05 14:31:52 -0500621 bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400622 comp_bio->bi_private = cb;
623 comp_bio->bi_end_io = end_compressed_bio_read;
Elena Reshetovaa50299a2017-03-03 10:55:20 +0200624 refcount_set(&cb->pending_bios, 1);
Chris Masonc8b97812008-10-29 14:49:59 -0400625
David Sterba306e16c2011-04-19 14:29:38 +0200626 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200627 int submit = 0;
628
David Sterba306e16c2011-04-19 14:29:38 +0200629 page = cb->compressed_pages[pg_index];
Chris Masonc8b97812008-10-29 14:49:59 -0400630 page->mapping = inode->i_mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300631 page->index = em_start >> PAGE_SHIFT;
Chris Masond20f7042008-12-08 16:58:54 -0500632
Kent Overstreet4f024f32013-10-11 15:44:27 -0700633 if (comp_bio->bi_iter.bi_size)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200634 submit = tree->ops->merge_bio_hook(page, 0,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300635 PAGE_SIZE,
Chris Masonc8b97812008-10-29 14:49:59 -0400636 comp_bio, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400637
Chris Mason70b99e62008-10-31 12:46:39 -0400638 page->mapping = NULL;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200639 if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300640 PAGE_SIZE) {
Chris Masonc8b97812008-10-29 14:49:59 -0400641 bio_get(comp_bio);
642
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400643 ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
644 BTRFS_WQ_ENDIO_DATA);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100645 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400646
Chris Masonaf09abf2008-11-07 12:35:44 -0500647 /*
648 * inc the count before we submit the bio so
649 * we know the end IO handler won't happen before
650 * we inc the count. Otherwise, the cb might get
651 * freed before we're done setting it up
652 */
Elena Reshetovaa50299a2017-03-03 10:55:20 +0200653 refcount_inc(&cb->pending_bios);
Chris Masonaf09abf2008-11-07 12:35:44 -0500654
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200655 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400656 ret = btrfs_lookup_bio_sums(inode, comp_bio,
657 sums);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100658 BUG_ON(ret); /* -ENOMEM */
Chris Masond20f7042008-12-08 16:58:54 -0500659 }
David Sterbaed6078f2014-06-05 01:59:57 +0200660 sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400661 fs_info->sectorsize);
Chris Masond20f7042008-12-08 16:58:54 -0500662
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400663 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200664 if (ret) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200665 comp_bio->bi_status = ret;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200666 bio_endio(comp_bio);
667 }
Chris Masonc8b97812008-10-29 14:49:59 -0400668
669 bio_put(comp_bio);
670
David Sterbac821e7f32017-06-02 18:35:36 +0200671 comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
Mike Christie37226b22016-06-05 14:31:52 -0500672 bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
Chris Mason771ed682008-11-06 22:02:51 -0500673 comp_bio->bi_private = cb;
674 comp_bio->bi_end_io = end_compressed_bio_read;
675
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300676 bio_add_page(comp_bio, page, PAGE_SIZE, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400677 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300678 cur_disk_byte += PAGE_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -0400679 }
680 bio_get(comp_bio);
681
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400682 ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100683 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400684
Tsutomu Itohc2db1072011-03-01 06:48:31 +0000685 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400686 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100687 BUG_ON(ret); /* -ENOMEM */
Tsutomu Itohc2db1072011-03-01 06:48:31 +0000688 }
Chris Masond20f7042008-12-08 16:58:54 -0500689
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400690 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200691 if (ret) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200692 comp_bio->bi_status = ret;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200693 bio_endio(comp_bio);
694 }
Chris Masonc8b97812008-10-29 14:49:59 -0400695
696 bio_put(comp_bio);
697 return 0;
liubo6b82ce82011-01-26 06:21:39 +0000698
699fail2:
Josef Bacik15e3004a2012-10-05 13:39:50 -0400700 while (faili >= 0) {
701 __free_page(cb->compressed_pages[faili]);
702 faili--;
703 }
liubo6b82ce82011-01-26 06:21:39 +0000704
705 kfree(cb->compressed_pages);
706fail1:
707 kfree(cb);
708out:
709 free_extent_map(em);
710 return ret;
Chris Masonc8b97812008-10-29 14:49:59 -0400711}
Li Zefan261507a02010-12-17 14:21:50 +0800712
Timofey Titovets17b5a6c2017-09-28 17:33:37 +0300713/*
714 * Heuristic uses systematic sampling to collect data from the input data
715 * range, the logic can be tuned by the following constants:
716 *
717 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
718 * @SAMPLING_INTERVAL - range from which the sampled data can be collected
719 */
720#define SAMPLING_READ_SIZE (16)
721#define SAMPLING_INTERVAL (256)
722
723/*
724 * For statistical analysis of the input data we consider bytes that form a
725 * Galois Field of 256 objects. Each object has an attribute count, ie. how
726 * many times the object appeared in the sample.
727 */
728#define BUCKET_SIZE (256)
729
730/*
731 * The size of the sample is based on a statistical sampling rule of thumb.
732 * The common way is to perform sampling tests as long as the number of
733 * elements in each cell is at least 5.
734 *
735 * Instead of 5, we choose 32 to obtain more accurate results.
736 * If the data contain the maximum number of symbols, which is 256, we obtain a
737 * sample size bound by 8192.
738 *
739 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
740 * from up to 512 locations.
741 */
742#define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
743 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
744
745struct bucket_item {
746 u32 count;
747};
Timofey Titovets4e439a02017-09-28 17:33:36 +0300748
749struct heuristic_ws {
Timofey Titovets17b5a6c2017-09-28 17:33:37 +0300750 /* Partial copy of input data */
751 u8 *sample;
Timofey Titovetsa440d482017-09-28 17:33:38 +0300752 u32 sample_size;
Timofey Titovets17b5a6c2017-09-28 17:33:37 +0300753 /* Buckets store counters for each byte value */
754 struct bucket_item *bucket;
Timofey Titovets4e439a02017-09-28 17:33:36 +0300755 struct list_head list;
756};
757
758static void free_heuristic_ws(struct list_head *ws)
759{
760 struct heuristic_ws *workspace;
761
762 workspace = list_entry(ws, struct heuristic_ws, list);
763
Timofey Titovets17b5a6c2017-09-28 17:33:37 +0300764 kvfree(workspace->sample);
765 kfree(workspace->bucket);
Timofey Titovets4e439a02017-09-28 17:33:36 +0300766 kfree(workspace);
767}
768
769static struct list_head *alloc_heuristic_ws(void)
770{
771 struct heuristic_ws *ws;
772
773 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
774 if (!ws)
775 return ERR_PTR(-ENOMEM);
776
Timofey Titovets17b5a6c2017-09-28 17:33:37 +0300777 ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
778 if (!ws->sample)
779 goto fail;
Timofey Titovets4e439a02017-09-28 17:33:36 +0300780
Timofey Titovets17b5a6c2017-09-28 17:33:37 +0300781 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
782 if (!ws->bucket)
783 goto fail;
784
785 INIT_LIST_HEAD(&ws->list);
Timofey Titovets4e439a02017-09-28 17:33:36 +0300786 return &ws->list;
Timofey Titovets17b5a6c2017-09-28 17:33:37 +0300787fail:
788 free_heuristic_ws(&ws->list);
789 return ERR_PTR(-ENOMEM);
Timofey Titovets4e439a02017-09-28 17:33:36 +0300790}
791
792struct workspaces_list {
Byongho Leed9187642015-10-14 14:05:24 +0900793 struct list_head idle_ws;
794 spinlock_t ws_lock;
David Sterba6ac10a62016-04-27 02:15:15 +0200795 /* Number of free workspaces */
796 int free_ws;
797 /* Total number of allocated workspaces */
798 atomic_t total_ws;
799 /* Waiters for a free workspace */
Byongho Leed9187642015-10-14 14:05:24 +0900800 wait_queue_head_t ws_wait;
Timofey Titovets4e439a02017-09-28 17:33:36 +0300801};
802
803static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
804
805static struct workspaces_list btrfs_heuristic_ws;
Li Zefan261507a02010-12-17 14:21:50 +0800806
David Sterbae8c9f182015-01-02 18:23:10 +0100807static const struct btrfs_compress_op * const btrfs_compress_op[] = {
Li Zefan261507a02010-12-17 14:21:50 +0800808 &btrfs_zlib_compress,
Li Zefana6fa6fa2010-10-25 15:12:26 +0800809 &btrfs_lzo_compress,
Nick Terrell5c1aab12017-08-09 19:39:02 -0700810 &btrfs_zstd_compress,
Li Zefan261507a02010-12-17 14:21:50 +0800811};
812
Jeff Mahoney143bede2012-03-01 14:56:26 +0100813void __init btrfs_init_compress(void)
Li Zefan261507a02010-12-17 14:21:50 +0800814{
Timofey Titovets4e439a02017-09-28 17:33:36 +0300815 struct list_head *workspace;
Li Zefan261507a02010-12-17 14:21:50 +0800816 int i;
817
Timofey Titovets4e439a02017-09-28 17:33:36 +0300818 INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws);
819 spin_lock_init(&btrfs_heuristic_ws.ws_lock);
820 atomic_set(&btrfs_heuristic_ws.total_ws, 0);
821 init_waitqueue_head(&btrfs_heuristic_ws.ws_wait);
David Sterbaf77dd0d2016-04-27 02:55:15 +0200822
Timofey Titovets4e439a02017-09-28 17:33:36 +0300823 workspace = alloc_heuristic_ws();
824 if (IS_ERR(workspace)) {
825 pr_warn(
826 "BTRFS: cannot preallocate heuristic workspace, will try later\n");
827 } else {
828 atomic_set(&btrfs_heuristic_ws.total_ws, 1);
829 btrfs_heuristic_ws.free_ws = 1;
830 list_add(workspace, &btrfs_heuristic_ws.idle_ws);
831 }
832
833 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
Byongho Leed9187642015-10-14 14:05:24 +0900834 INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
835 spin_lock_init(&btrfs_comp_ws[i].ws_lock);
David Sterba6ac10a62016-04-27 02:15:15 +0200836 atomic_set(&btrfs_comp_ws[i].total_ws, 0);
Byongho Leed9187642015-10-14 14:05:24 +0900837 init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
David Sterbaf77dd0d2016-04-27 02:55:15 +0200838
839 /*
840 * Preallocate one workspace for each compression type so
841 * we can guarantee forward progress in the worst case
842 */
843 workspace = btrfs_compress_op[i]->alloc_workspace();
844 if (IS_ERR(workspace)) {
Jeff Mahoney62e85572016-09-20 10:05:01 -0400845 pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
David Sterbaf77dd0d2016-04-27 02:55:15 +0200846 } else {
847 atomic_set(&btrfs_comp_ws[i].total_ws, 1);
848 btrfs_comp_ws[i].free_ws = 1;
849 list_add(workspace, &btrfs_comp_ws[i].idle_ws);
850 }
Li Zefan261507a02010-12-17 14:21:50 +0800851 }
Li Zefan261507a02010-12-17 14:21:50 +0800852}
853
854/*
David Sterbae721e492016-04-27 02:41:17 +0200855 * This finds an available workspace or allocates a new one.
856 * If it's not possible to allocate a new one, waits until there's one.
857 * Preallocation makes a forward progress guarantees and we do not return
858 * errors.
Li Zefan261507a02010-12-17 14:21:50 +0800859 */
Timofey Titovets4e439a02017-09-28 17:33:36 +0300860static struct list_head *__find_workspace(int type, bool heuristic)
Li Zefan261507a02010-12-17 14:21:50 +0800861{
862 struct list_head *workspace;
863 int cpus = num_online_cpus();
864 int idx = type - 1;
David Sterbafe308532017-05-31 17:14:56 +0200865 unsigned nofs_flag;
Timofey Titovets4e439a02017-09-28 17:33:36 +0300866 struct list_head *idle_ws;
867 spinlock_t *ws_lock;
868 atomic_t *total_ws;
869 wait_queue_head_t *ws_wait;
870 int *free_ws;
Li Zefan261507a02010-12-17 14:21:50 +0800871
Timofey Titovets4e439a02017-09-28 17:33:36 +0300872 if (heuristic) {
873 idle_ws = &btrfs_heuristic_ws.idle_ws;
874 ws_lock = &btrfs_heuristic_ws.ws_lock;
875 total_ws = &btrfs_heuristic_ws.total_ws;
876 ws_wait = &btrfs_heuristic_ws.ws_wait;
877 free_ws = &btrfs_heuristic_ws.free_ws;
878 } else {
879 idle_ws = &btrfs_comp_ws[idx].idle_ws;
880 ws_lock = &btrfs_comp_ws[idx].ws_lock;
881 total_ws = &btrfs_comp_ws[idx].total_ws;
882 ws_wait = &btrfs_comp_ws[idx].ws_wait;
883 free_ws = &btrfs_comp_ws[idx].free_ws;
884 }
885
Li Zefan261507a02010-12-17 14:21:50 +0800886again:
Byongho Leed9187642015-10-14 14:05:24 +0900887 spin_lock(ws_lock);
888 if (!list_empty(idle_ws)) {
889 workspace = idle_ws->next;
Li Zefan261507a02010-12-17 14:21:50 +0800890 list_del(workspace);
David Sterba6ac10a62016-04-27 02:15:15 +0200891 (*free_ws)--;
Byongho Leed9187642015-10-14 14:05:24 +0900892 spin_unlock(ws_lock);
Li Zefan261507a02010-12-17 14:21:50 +0800893 return workspace;
894
895 }
David Sterba6ac10a62016-04-27 02:15:15 +0200896 if (atomic_read(total_ws) > cpus) {
Li Zefan261507a02010-12-17 14:21:50 +0800897 DEFINE_WAIT(wait);
898
Byongho Leed9187642015-10-14 14:05:24 +0900899 spin_unlock(ws_lock);
900 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
David Sterba6ac10a62016-04-27 02:15:15 +0200901 if (atomic_read(total_ws) > cpus && !*free_ws)
Li Zefan261507a02010-12-17 14:21:50 +0800902 schedule();
Byongho Leed9187642015-10-14 14:05:24 +0900903 finish_wait(ws_wait, &wait);
Li Zefan261507a02010-12-17 14:21:50 +0800904 goto again;
905 }
David Sterba6ac10a62016-04-27 02:15:15 +0200906 atomic_inc(total_ws);
Byongho Leed9187642015-10-14 14:05:24 +0900907 spin_unlock(ws_lock);
Li Zefan261507a02010-12-17 14:21:50 +0800908
David Sterbafe308532017-05-31 17:14:56 +0200909 /*
910 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
911 * to turn it off here because we might get called from the restricted
912 * context of btrfs_compress_bio/btrfs_compress_pages
913 */
914 nofs_flag = memalloc_nofs_save();
Timofey Titovets4e439a02017-09-28 17:33:36 +0300915 if (heuristic)
916 workspace = alloc_heuristic_ws();
917 else
918 workspace = btrfs_compress_op[idx]->alloc_workspace();
David Sterbafe308532017-05-31 17:14:56 +0200919 memalloc_nofs_restore(nofs_flag);
920
Li Zefan261507a02010-12-17 14:21:50 +0800921 if (IS_ERR(workspace)) {
David Sterba6ac10a62016-04-27 02:15:15 +0200922 atomic_dec(total_ws);
Byongho Leed9187642015-10-14 14:05:24 +0900923 wake_up(ws_wait);
David Sterbae721e492016-04-27 02:41:17 +0200924
925 /*
926 * Do not return the error but go back to waiting. There's a
927 * workspace preallocated for each type and the compression
928 * time is bounded so we get to a workspace eventually. This
929 * makes our caller's life easier.
David Sterba523567162016-04-27 03:07:39 +0200930 *
931 * To prevent silent and low-probability deadlocks (when the
932 * initial preallocation fails), check if there are any
933 * workspaces at all.
David Sterbae721e492016-04-27 02:41:17 +0200934 */
David Sterba523567162016-04-27 03:07:39 +0200935 if (atomic_read(total_ws) == 0) {
936 static DEFINE_RATELIMIT_STATE(_rs,
937 /* once per minute */ 60 * HZ,
938 /* no burst */ 1);
939
940 if (__ratelimit(&_rs)) {
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -0400941 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
David Sterba523567162016-04-27 03:07:39 +0200942 }
943 }
David Sterbae721e492016-04-27 02:41:17 +0200944 goto again;
Li Zefan261507a02010-12-17 14:21:50 +0800945 }
946 return workspace;
947}
948
Timofey Titovets4e439a02017-09-28 17:33:36 +0300949static struct list_head *find_workspace(int type)
950{
951 return __find_workspace(type, false);
952}
953
Li Zefan261507a02010-12-17 14:21:50 +0800954/*
955 * put a workspace struct back on the list or free it if we have enough
956 * idle ones sitting around
957 */
Timofey Titovets4e439a02017-09-28 17:33:36 +0300958static void __free_workspace(int type, struct list_head *workspace,
959 bool heuristic)
Li Zefan261507a02010-12-17 14:21:50 +0800960{
961 int idx = type - 1;
Timofey Titovets4e439a02017-09-28 17:33:36 +0300962 struct list_head *idle_ws;
963 spinlock_t *ws_lock;
964 atomic_t *total_ws;
965 wait_queue_head_t *ws_wait;
966 int *free_ws;
967
968 if (heuristic) {
969 idle_ws = &btrfs_heuristic_ws.idle_ws;
970 ws_lock = &btrfs_heuristic_ws.ws_lock;
971 total_ws = &btrfs_heuristic_ws.total_ws;
972 ws_wait = &btrfs_heuristic_ws.ws_wait;
973 free_ws = &btrfs_heuristic_ws.free_ws;
974 } else {
975 idle_ws = &btrfs_comp_ws[idx].idle_ws;
976 ws_lock = &btrfs_comp_ws[idx].ws_lock;
977 total_ws = &btrfs_comp_ws[idx].total_ws;
978 ws_wait = &btrfs_comp_ws[idx].ws_wait;
979 free_ws = &btrfs_comp_ws[idx].free_ws;
980 }
Li Zefan261507a02010-12-17 14:21:50 +0800981
Byongho Leed9187642015-10-14 14:05:24 +0900982 spin_lock(ws_lock);
Nick Terrell26b28dc2017-06-29 10:57:26 -0700983 if (*free_ws <= num_online_cpus()) {
Byongho Leed9187642015-10-14 14:05:24 +0900984 list_add(workspace, idle_ws);
David Sterba6ac10a62016-04-27 02:15:15 +0200985 (*free_ws)++;
Byongho Leed9187642015-10-14 14:05:24 +0900986 spin_unlock(ws_lock);
Li Zefan261507a02010-12-17 14:21:50 +0800987 goto wake;
988 }
Byongho Leed9187642015-10-14 14:05:24 +0900989 spin_unlock(ws_lock);
Li Zefan261507a02010-12-17 14:21:50 +0800990
Timofey Titovets4e439a02017-09-28 17:33:36 +0300991 if (heuristic)
992 free_heuristic_ws(workspace);
993 else
994 btrfs_compress_op[idx]->free_workspace(workspace);
David Sterba6ac10a62016-04-27 02:15:15 +0200995 atomic_dec(total_ws);
Li Zefan261507a02010-12-17 14:21:50 +0800996wake:
David Sterbaa83342a2015-02-16 19:36:47 +0100997 /*
998 * Make sure counter is updated before we wake up waiters.
999 */
Josef Bacik66657b32012-08-01 15:36:24 -04001000 smp_mb();
Byongho Leed9187642015-10-14 14:05:24 +09001001 if (waitqueue_active(ws_wait))
1002 wake_up(ws_wait);
Li Zefan261507a02010-12-17 14:21:50 +08001003}
1004
Timofey Titovets4e439a02017-09-28 17:33:36 +03001005static void free_workspace(int type, struct list_head *ws)
1006{
1007 return __free_workspace(type, ws, false);
1008}
1009
Li Zefan261507a02010-12-17 14:21:50 +08001010/*
1011 * cleanup function for module exit
1012 */
1013static void free_workspaces(void)
1014{
1015 struct list_head *workspace;
1016 int i;
1017
Timofey Titovets4e439a02017-09-28 17:33:36 +03001018 while (!list_empty(&btrfs_heuristic_ws.idle_ws)) {
1019 workspace = btrfs_heuristic_ws.idle_ws.next;
1020 list_del(workspace);
1021 free_heuristic_ws(workspace);
1022 atomic_dec(&btrfs_heuristic_ws.total_ws);
1023 }
1024
Li Zefan261507a02010-12-17 14:21:50 +08001025 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
Byongho Leed9187642015-10-14 14:05:24 +09001026 while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
1027 workspace = btrfs_comp_ws[i].idle_ws.next;
Li Zefan261507a02010-12-17 14:21:50 +08001028 list_del(workspace);
1029 btrfs_compress_op[i]->free_workspace(workspace);
David Sterba6ac10a62016-04-27 02:15:15 +02001030 atomic_dec(&btrfs_comp_ws[i].total_ws);
Li Zefan261507a02010-12-17 14:21:50 +08001031 }
1032 }
1033}
1034
1035/*
David Sterba38c31462017-02-14 19:04:07 +01001036 * Given an address space and start and length, compress the bytes into @pages
1037 * that are allocated on demand.
Li Zefan261507a02010-12-17 14:21:50 +08001038 *
David Sterbaf51d2b52017-09-15 17:36:57 +02001039 * @type_level is encoded algorithm and level, where level 0 means whatever
1040 * default the algorithm chooses and is opaque here;
1041 * - compression algo are 0-3
1042 * - the level are bits 4-7
1043 *
David Sterba4d3a8002017-02-14 19:04:07 +01001044 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1045 * and returns number of actually allocated pages
Li Zefan261507a02010-12-17 14:21:50 +08001046 *
David Sterba38c31462017-02-14 19:04:07 +01001047 * @total_in is used to return the number of bytes actually read. It
1048 * may be smaller than the input length if we had to exit early because we
Li Zefan261507a02010-12-17 14:21:50 +08001049 * ran out of room in the pages array or because we cross the
1050 * max_out threshold.
1051 *
David Sterba38c31462017-02-14 19:04:07 +01001052 * @total_out is an in/out parameter, must be set to the input length and will
1053 * be also used to return the total number of compressed bytes
Li Zefan261507a02010-12-17 14:21:50 +08001054 *
David Sterba38c31462017-02-14 19:04:07 +01001055 * @max_out tells us the max number of bytes that we're allowed to
Li Zefan261507a02010-12-17 14:21:50 +08001056 * stuff into pages
1057 */
David Sterbaf51d2b52017-09-15 17:36:57 +02001058int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
David Sterba38c31462017-02-14 19:04:07 +01001059 u64 start, struct page **pages,
Li Zefan261507a02010-12-17 14:21:50 +08001060 unsigned long *out_pages,
1061 unsigned long *total_in,
David Sterbae5d74902017-02-14 19:45:05 +01001062 unsigned long *total_out)
Li Zefan261507a02010-12-17 14:21:50 +08001063{
1064 struct list_head *workspace;
1065 int ret;
David Sterbaf51d2b52017-09-15 17:36:57 +02001066 int type = type_level & 0xF;
Li Zefan261507a02010-12-17 14:21:50 +08001067
1068 workspace = find_workspace(type);
Li Zefan261507a02010-12-17 14:21:50 +08001069
David Sterbaf51d2b52017-09-15 17:36:57 +02001070 btrfs_compress_op[type - 1]->set_level(workspace, type_level);
Li Zefan261507a02010-12-17 14:21:50 +08001071 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
David Sterba38c31462017-02-14 19:04:07 +01001072 start, pages,
David Sterba4d3a8002017-02-14 19:04:07 +01001073 out_pages,
David Sterbae5d74902017-02-14 19:45:05 +01001074 total_in, total_out);
Li Zefan261507a02010-12-17 14:21:50 +08001075 free_workspace(type, workspace);
1076 return ret;
1077}
1078
1079/*
1080 * pages_in is an array of pages with compressed data.
1081 *
1082 * disk_start is the starting logical offset of this array in the file
1083 *
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001084 * orig_bio contains the pages from the file that we want to decompress into
Li Zefan261507a02010-12-17 14:21:50 +08001085 *
1086 * srclen is the number of bytes in pages_in
1087 *
1088 * The basic idea is that we have a bio that was created by readpages.
1089 * The pages in the bio are for the uncompressed data, and they may not
1090 * be contiguous. They all correspond to the range of bytes covered by
1091 * the compressed extent.
1092 */
Anand Jain8140dc32017-05-26 15:44:58 +08001093static int btrfs_decompress_bio(struct compressed_bio *cb)
Li Zefan261507a02010-12-17 14:21:50 +08001094{
1095 struct list_head *workspace;
1096 int ret;
Anand Jain8140dc32017-05-26 15:44:58 +08001097 int type = cb->compress_type;
Li Zefan261507a02010-12-17 14:21:50 +08001098
1099 workspace = find_workspace(type);
Anand Jaine1ddce72017-05-26 15:44:59 +08001100 ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb);
Li Zefan261507a02010-12-17 14:21:50 +08001101 free_workspace(type, workspace);
Anand Jaine1ddce72017-05-26 15:44:59 +08001102
Li Zefan261507a02010-12-17 14:21:50 +08001103 return ret;
1104}
1105
1106/*
1107 * a less complex decompression routine. Our compressed data fits in a
1108 * single page, and we want to read a single page out of it.
1109 * start_byte tells us the offset into the compressed data we're interested in
1110 */
1111int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1112 unsigned long start_byte, size_t srclen, size_t destlen)
1113{
1114 struct list_head *workspace;
1115 int ret;
1116
1117 workspace = find_workspace(type);
Li Zefan261507a02010-12-17 14:21:50 +08001118
1119 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
1120 dest_page, start_byte,
1121 srclen, destlen);
1122
1123 free_workspace(type, workspace);
1124 return ret;
1125}
1126
Alexey Charkov8e4eef72011-02-02 21:15:35 +00001127void btrfs_exit_compress(void)
Li Zefan261507a02010-12-17 14:21:50 +08001128{
1129 free_workspaces();
1130}
Li Zefan3a39c182010-11-08 15:22:19 +08001131
1132/*
1133 * Copy uncompressed data from working buffer to pages.
1134 *
1135 * buf_start is the byte offset we're of the start of our workspace buffer.
1136 *
1137 * total_out is the last byte of the buffer
1138 */
David Sterba14a33572017-02-14 17:58:04 +01001139int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
Li Zefan3a39c182010-11-08 15:22:19 +08001140 unsigned long total_out, u64 disk_start,
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001141 struct bio *bio)
Li Zefan3a39c182010-11-08 15:22:19 +08001142{
1143 unsigned long buf_offset;
1144 unsigned long current_buf_start;
1145 unsigned long start_byte;
Omar Sandoval6e78b3f2017-02-10 15:03:35 -08001146 unsigned long prev_start_byte;
Li Zefan3a39c182010-11-08 15:22:19 +08001147 unsigned long working_bytes = total_out - buf_start;
1148 unsigned long bytes;
1149 char *kaddr;
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001150 struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
Li Zefan3a39c182010-11-08 15:22:19 +08001151
1152 /*
1153 * start byte is the first byte of the page we're currently
1154 * copying into relative to the start of the compressed data.
1155 */
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001156 start_byte = page_offset(bvec.bv_page) - disk_start;
Li Zefan3a39c182010-11-08 15:22:19 +08001157
1158 /* we haven't yet hit data corresponding to this page */
1159 if (total_out <= start_byte)
1160 return 1;
1161
1162 /*
1163 * the start of the data we care about is offset into
1164 * the middle of our working buffer
1165 */
1166 if (total_out > start_byte && buf_start < start_byte) {
1167 buf_offset = start_byte - buf_start;
1168 working_bytes -= buf_offset;
1169 } else {
1170 buf_offset = 0;
1171 }
1172 current_buf_start = buf_start;
1173
1174 /* copy bytes from the working buffer into the pages */
1175 while (working_bytes > 0) {
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001176 bytes = min_t(unsigned long, bvec.bv_len,
1177 PAGE_SIZE - buf_offset);
Li Zefan3a39c182010-11-08 15:22:19 +08001178 bytes = min(bytes, working_bytes);
Li Zefan3a39c182010-11-08 15:22:19 +08001179
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001180 kaddr = kmap_atomic(bvec.bv_page);
1181 memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1182 kunmap_atomic(kaddr);
1183 flush_dcache_page(bvec.bv_page);
1184
Li Zefan3a39c182010-11-08 15:22:19 +08001185 buf_offset += bytes;
1186 working_bytes -= bytes;
1187 current_buf_start += bytes;
1188
1189 /* check if we need to pick another page */
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001190 bio_advance(bio, bytes);
1191 if (!bio->bi_iter.bi_size)
1192 return 0;
1193 bvec = bio_iter_iovec(bio, bio->bi_iter);
Omar Sandoval6e78b3f2017-02-10 15:03:35 -08001194 prev_start_byte = start_byte;
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001195 start_byte = page_offset(bvec.bv_page) - disk_start;
Li Zefan3a39c182010-11-08 15:22:19 +08001196
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001197 /*
Omar Sandoval6e78b3f2017-02-10 15:03:35 -08001198 * We need to make sure we're only adjusting
1199 * our offset into compression working buffer when
1200 * we're switching pages. Otherwise we can incorrectly
1201 * keep copying when we were actually done.
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001202 */
Omar Sandoval6e78b3f2017-02-10 15:03:35 -08001203 if (start_byte != prev_start_byte) {
1204 /*
1205 * make sure our new page is covered by this
1206 * working buffer
1207 */
1208 if (total_out <= start_byte)
1209 return 1;
Li Zefan3a39c182010-11-08 15:22:19 +08001210
Omar Sandoval6e78b3f2017-02-10 15:03:35 -08001211 /*
1212 * the next page in the biovec might not be adjacent
1213 * to the last page, but it might still be found
1214 * inside this working buffer. bump our offset pointer
1215 */
1216 if (total_out > start_byte &&
1217 current_buf_start < start_byte) {
1218 buf_offset = start_byte - buf_start;
1219 working_bytes = total_out - start_byte;
1220 current_buf_start = buf_start + buf_offset;
1221 }
Li Zefan3a39c182010-11-08 15:22:19 +08001222 }
1223 }
1224
1225 return 1;
1226}
Timofey Titovetsc2fcdcd2017-07-17 16:52:58 +03001227
Timofey Titovets19562432017-10-08 16:11:59 +03001228/*
1229 * Shannon Entropy calculation
1230 *
1231 * Pure byte distribution analysis fails to determine compressiability of data.
1232 * Try calculating entropy to estimate the average minimum number of bits
1233 * needed to encode the sampled data.
1234 *
1235 * For convenience, return the percentage of needed bits, instead of amount of
1236 * bits directly.
1237 *
1238 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1239 * and can be compressible with high probability
1240 *
1241 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1242 *
1243 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1244 */
1245#define ENTROPY_LVL_ACEPTABLE (65)
1246#define ENTROPY_LVL_HIGH (80)
1247
1248/*
1249 * For increasead precision in shannon_entropy calculation,
1250 * let's do pow(n, M) to save more digits after comma:
1251 *
1252 * - maximum int bit length is 64
1253 * - ilog2(MAX_SAMPLE_SIZE) -> 13
1254 * - 13 * 4 = 52 < 64 -> M = 4
1255 *
1256 * So use pow(n, 4).
1257 */
1258static inline u32 ilog2_w(u64 n)
1259{
1260 return ilog2(n * n * n * n);
1261}
1262
1263static u32 shannon_entropy(struct heuristic_ws *ws)
1264{
1265 const u32 entropy_max = 8 * ilog2_w(2);
1266 u32 entropy_sum = 0;
1267 u32 p, p_base, sz_base;
1268 u32 i;
1269
1270 sz_base = ilog2_w(ws->sample_size);
1271 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1272 p = ws->bucket[i].count;
1273 p_base = ilog2_w(p);
1274 entropy_sum += p * (sz_base - p_base);
1275 }
1276
1277 entropy_sum /= ws->sample_size;
1278 return entropy_sum * 100 / entropy_max;
1279}
1280
Timofey Titovets858177d2017-09-28 17:33:41 +03001281/* Compare buckets by size, ascending */
1282static int bucket_comp_rev(const void *lv, const void *rv)
1283{
1284 const struct bucket_item *l = (const struct bucket_item *)lv;
1285 const struct bucket_item *r = (const struct bucket_item *)rv;
1286
1287 return r->count - l->count;
1288}
1289
1290/*
1291 * Size of the core byte set - how many bytes cover 90% of the sample
1292 *
1293 * There are several types of structured binary data that use nearly all byte
1294 * values. The distribution can be uniform and counts in all buckets will be
1295 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1296 *
1297 * Other possibility is normal (Gaussian) distribution, where the data could
1298 * be potentially compressible, but we have to take a few more steps to decide
1299 * how much.
1300 *
1301 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
1302 * compression algo can easy fix that
1303 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1304 * probability is not compressible
1305 */
1306#define BYTE_CORE_SET_LOW (64)
1307#define BYTE_CORE_SET_HIGH (200)
1308
1309static int byte_core_set_size(struct heuristic_ws *ws)
1310{
1311 u32 i;
1312 u32 coreset_sum = 0;
1313 const u32 core_set_threshold = ws->sample_size * 90 / 100;
1314 struct bucket_item *bucket = ws->bucket;
1315
1316 /* Sort in reverse order */
1317 sort(bucket, BUCKET_SIZE, sizeof(*bucket), &bucket_comp_rev, NULL);
1318
1319 for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1320 coreset_sum += bucket[i].count;
1321
1322 if (coreset_sum > core_set_threshold)
1323 return i;
1324
1325 for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1326 coreset_sum += bucket[i].count;
1327 if (coreset_sum > core_set_threshold)
1328 break;
1329 }
1330
1331 return i;
1332}
1333
Timofey Titovetsa288e922017-09-28 17:33:40 +03001334/*
1335 * Count byte values in buckets.
1336 * This heuristic can detect textual data (configs, xml, json, html, etc).
1337 * Because in most text-like data byte set is restricted to limited number of
1338 * possible characters, and that restriction in most cases makes data easy to
1339 * compress.
1340 *
1341 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1342 * less - compressible
1343 * more - need additional analysis
1344 */
1345#define BYTE_SET_THRESHOLD (64)
1346
1347static u32 byte_set_size(const struct heuristic_ws *ws)
1348{
1349 u32 i;
1350 u32 byte_set_size = 0;
1351
1352 for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1353 if (ws->bucket[i].count > 0)
1354 byte_set_size++;
1355 }
1356
1357 /*
1358 * Continue collecting count of byte values in buckets. If the byte
1359 * set size is bigger then the threshold, it's pointless to continue,
1360 * the detection technique would fail for this type of data.
1361 */
1362 for (; i < BUCKET_SIZE; i++) {
1363 if (ws->bucket[i].count > 0) {
1364 byte_set_size++;
1365 if (byte_set_size > BYTE_SET_THRESHOLD)
1366 return byte_set_size;
1367 }
1368 }
1369
1370 return byte_set_size;
1371}
1372
Timofey Titovets1fe4f6f2017-09-28 17:33:39 +03001373static bool sample_repeated_patterns(struct heuristic_ws *ws)
1374{
1375 const u32 half_of_sample = ws->sample_size / 2;
1376 const u8 *data = ws->sample;
1377
1378 return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1379}
1380
Timofey Titovetsa440d482017-09-28 17:33:38 +03001381static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1382 struct heuristic_ws *ws)
1383{
1384 struct page *page;
1385 u64 index, index_end;
1386 u32 i, curr_sample_pos;
1387 u8 *in_data;
1388
1389 /*
1390 * Compression handles the input data by chunks of 128KiB
1391 * (defined by BTRFS_MAX_UNCOMPRESSED)
1392 *
1393 * We do the same for the heuristic and loop over the whole range.
1394 *
1395 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1396 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1397 */
1398 if (end - start > BTRFS_MAX_UNCOMPRESSED)
1399 end = start + BTRFS_MAX_UNCOMPRESSED;
1400
1401 index = start >> PAGE_SHIFT;
1402 index_end = end >> PAGE_SHIFT;
1403
1404 /* Don't miss unaligned end */
1405 if (!IS_ALIGNED(end, PAGE_SIZE))
1406 index_end++;
1407
1408 curr_sample_pos = 0;
1409 while (index < index_end) {
1410 page = find_get_page(inode->i_mapping, index);
1411 in_data = kmap(page);
1412 /* Handle case where the start is not aligned to PAGE_SIZE */
1413 i = start % PAGE_SIZE;
1414 while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1415 /* Don't sample any garbage from the last page */
1416 if (start > end - SAMPLING_READ_SIZE)
1417 break;
1418 memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1419 SAMPLING_READ_SIZE);
1420 i += SAMPLING_INTERVAL;
1421 start += SAMPLING_INTERVAL;
1422 curr_sample_pos += SAMPLING_READ_SIZE;
1423 }
1424 kunmap(page);
1425 put_page(page);
1426
1427 index++;
1428 }
1429
1430 ws->sample_size = curr_sample_pos;
1431}
1432
Timofey Titovetsc2fcdcd2017-07-17 16:52:58 +03001433/*
1434 * Compression heuristic.
1435 *
1436 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1437 * quickly (compared to direct compression) detect data characteristics
1438 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1439 * data.
1440 *
1441 * The following types of analysis can be performed:
1442 * - detect mostly zero data
1443 * - detect data with low "byte set" size (text, etc)
1444 * - detect data with low/high "core byte" set
1445 *
1446 * Return non-zero if the compression should be done, 0 otherwise.
1447 */
1448int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1449{
Timofey Titovets4e439a02017-09-28 17:33:36 +03001450 struct list_head *ws_list = __find_workspace(0, true);
1451 struct heuristic_ws *ws;
Timofey Titovetsa440d482017-09-28 17:33:38 +03001452 u32 i;
1453 u8 byte;
Timofey Titovets19562432017-10-08 16:11:59 +03001454 int ret = 0;
Timofey Titovetsc2fcdcd2017-07-17 16:52:58 +03001455
Timofey Titovets4e439a02017-09-28 17:33:36 +03001456 ws = list_entry(ws_list, struct heuristic_ws, list);
1457
Timofey Titovetsa440d482017-09-28 17:33:38 +03001458 heuristic_collect_sample(inode, start, end, ws);
1459
Timofey Titovets1fe4f6f2017-09-28 17:33:39 +03001460 if (sample_repeated_patterns(ws)) {
1461 ret = 1;
1462 goto out;
1463 }
1464
Timofey Titovetsa440d482017-09-28 17:33:38 +03001465 memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1466
1467 for (i = 0; i < ws->sample_size; i++) {
1468 byte = ws->sample[i];
1469 ws->bucket[byte].count++;
Timofey Titovetsc2fcdcd2017-07-17 16:52:58 +03001470 }
1471
Timofey Titovetsa288e922017-09-28 17:33:40 +03001472 i = byte_set_size(ws);
1473 if (i < BYTE_SET_THRESHOLD) {
1474 ret = 2;
1475 goto out;
1476 }
1477
Timofey Titovets858177d2017-09-28 17:33:41 +03001478 i = byte_core_set_size(ws);
1479 if (i <= BYTE_CORE_SET_LOW) {
1480 ret = 3;
1481 goto out;
1482 }
1483
1484 if (i >= BYTE_CORE_SET_HIGH) {
1485 ret = 0;
1486 goto out;
1487 }
1488
Timofey Titovets19562432017-10-08 16:11:59 +03001489 i = shannon_entropy(ws);
1490 if (i <= ENTROPY_LVL_ACEPTABLE) {
1491 ret = 4;
1492 goto out;
1493 }
1494
1495 /*
1496 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1497 * needed to give green light to compression.
1498 *
1499 * For now just assume that compression at that level is not worth the
1500 * resources because:
1501 *
1502 * 1. it is possible to defrag the data later
1503 *
1504 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1505 * values, every bucket has counter at level ~54. The heuristic would
1506 * be confused. This can happen when data have some internal repeated
1507 * patterns like "abbacbbc...". This can be detected by analyzing
1508 * pairs of bytes, which is too costly.
1509 */
1510 if (i < ENTROPY_LVL_HIGH) {
1511 ret = 5;
1512 goto out;
1513 } else {
1514 ret = 0;
1515 goto out;
1516 }
1517
Timofey Titovets1fe4f6f2017-09-28 17:33:39 +03001518out:
Timofey Titovets4e439a02017-09-28 17:33:36 +03001519 __free_workspace(0, ws_list, true);
Timofey Titovetsc2fcdcd2017-07-17 16:52:58 +03001520 return ret;
1521}
David Sterbaf51d2b52017-09-15 17:36:57 +02001522
1523unsigned int btrfs_compress_str2level(const char *str)
1524{
1525 if (strncmp(str, "zlib", 4) != 0)
1526 return 0;
1527
Adam Borowskifa4d8852017-09-15 17:36:58 +02001528 /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */
1529 if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0)
1530 return str[5] - '0';
David Sterbaf51d2b52017-09-15 17:36:57 +02001531
Qu Wenruoeae8d822017-11-06 10:43:18 +08001532 return BTRFS_ZLIB_DEFAULT_LEVEL;
David Sterbaf51d2b52017-09-15 17:36:57 +02001533}