blob: 56eef0821e3e31d6680515ed7393d2cdf65b9002 [file] [log] [blame]
David Sterba9888c342018-04-03 19:16:55 +02001/* SPDX-License-Identifier: GPL-2.0 */
Chris Masonc8b97812008-10-29 14:49:59 -04002/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
Chris Masonc8b97812008-10-29 14:49:59 -04004 */
5
David Sterba9888c342018-04-03 19:16:55 +02006#ifndef BTRFS_COMPRESSION_H
7#define BTRFS_COMPRESSION_H
Chris Masonc8b97812008-10-29 14:49:59 -04008
Qu Wenruod5c1d682018-05-17 13:52:22 +08009#include <linux/sizes.h>
10
Nikolay Borisovc7ee1812020-06-03 08:55:16 +030011struct btrfs_inode;
12
David Sterbaff763862017-02-14 19:30:39 +010013/*
14 * We want to make sure that amount of RAM required to uncompress an extent is
15 * reasonable, so we limit the total size in ram of a compressed extent to
16 * 128k. This is a crucial number because it also controls how easily we can
17 * spread reads across cpus for decompression.
18 *
19 * We also want to make sure the amount of IO required to do a random read is
20 * reasonably small, so we limit the size of a compressed extent to 128k.
21 */
22
23/* Maximum length of compressed data stored on disk */
24#define BTRFS_MAX_COMPRESSED (SZ_128K)
25/* Maximum size of data before compression */
26#define BTRFS_MAX_UNCOMPRESSED (SZ_128K)
27
Qu Wenruoeae8d822017-11-06 10:43:18 +080028#define BTRFS_ZLIB_DEFAULT_LEVEL 3
29
Anand Jaine1ddce72017-05-26 15:44:59 +080030struct compressed_bio {
Qu Wenruo6ec97652021-09-27 15:21:48 +080031 /* Number of sectors with unfinished IO (unsubmitted or unfinished) */
32 refcount_t pending_sectors;
Anand Jaine1ddce72017-05-26 15:44:59 +080033
David Sterba282ab3f2019-10-14 14:38:33 +020034 /* Number of compressed pages in the array */
35 unsigned int nr_pages;
36
Anand Jaine1ddce72017-05-26 15:44:59 +080037 /* the pages with the compressed data on them */
38 struct page **compressed_pages;
39
40 /* inode that owns this data */
41 struct inode *inode;
42
43 /* starting offset in the inode for our pages */
44 u64 start;
45
David Sterba282ab3f2019-10-14 14:38:33 +020046 /* Number of bytes in the inode we're working on */
47 unsigned int len;
Anand Jaine1ddce72017-05-26 15:44:59 +080048
David Sterba282ab3f2019-10-14 14:38:33 +020049 /* Number of bytes on disk */
50 unsigned int compressed_len;
Anand Jaine1ddce72017-05-26 15:44:59 +080051
David Sterba282ab3f2019-10-14 14:38:33 +020052 /* The compression algorithm for this bio */
53 u8 compress_type;
Anand Jaine1ddce72017-05-26 15:44:59 +080054
55 /* IO errors */
David Sterba282ab3f2019-10-14 14:38:33 +020056 u8 errors;
Anand Jaine1ddce72017-05-26 15:44:59 +080057 int mirror_num;
58
59 /* for reads, this is the bio we are copying the data into */
60 struct bio *orig_bio;
61
62 /*
63 * the start of a variable length array of checksums only
64 * used by reads
65 */
Johannes Thumshirn10fe6ca2019-05-22 10:19:02 +020066 u8 sums[];
Anand Jaine1ddce72017-05-26 15:44:59 +080067};
68
Dennis Zhou19727082019-02-04 15:19:57 -050069static inline unsigned int btrfs_compress_type(unsigned int type_level)
70{
71 return (type_level & 0xF);
72}
73
74static inline unsigned int btrfs_compress_level(unsigned int type_level)
75{
76 return ((type_level & 0xF0) >> 4);
77}
78
Liu Bof5c29bd2017-11-02 17:21:50 -060079void __init btrfs_init_compress(void);
David Sterbae67c7182018-02-19 17:24:18 +010080void __cold btrfs_exit_compress(void);
Li Zefan261507a02010-12-17 14:21:50 +080081
David Sterbaf51d2b52017-09-15 17:36:57 +020082int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
David Sterba38c31462017-02-14 19:04:07 +010083 u64 start, struct page **pages,
Li Zefan261507a02010-12-17 14:21:50 +080084 unsigned long *out_pages,
85 unsigned long *total_in,
David Sterbae5d74902017-02-14 19:45:05 +010086 unsigned long *total_out);
Li Zefan261507a02010-12-17 14:21:50 +080087int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
88 unsigned long start_byte, size_t srclen, size_t destlen);
Qu Wenruo1c3dc172021-07-05 10:00:58 +080089int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
90 struct compressed_bio *cb, u32 decompressed);
Li Zefan261507a02010-12-17 14:21:50 +080091
Nikolay Borisovc7ee1812020-06-03 08:55:16 +030092blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
Anand Jain65b53552021-05-29 17:48:35 +080093 unsigned int len, u64 disk_start,
94 unsigned int compressed_len,
Chris Masonc8b97812008-10-29 14:49:59 -040095 struct page **compressed_pages,
Anand Jain65b53552021-05-29 17:48:35 +080096 unsigned int nr_pages,
Chris Masonec39f762019-07-10 12:28:17 -070097 unsigned int write_flags,
98 struct cgroup_subsys_state *blkcg_css);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020099blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
Chris Masonc8b97812008-10-29 14:49:59 -0400100 int mirror_num, unsigned long bio_flags);
Anand Jainebb87652016-03-10 17:26:59 +0800101
Dennis Zhoud0ab62c2019-02-04 15:20:05 -0500102unsigned int btrfs_compress_str2level(unsigned int type, const char *str);
David Sterbaf51d2b52017-09-15 17:36:57 +0200103
Anand Jainebb87652016-03-10 17:26:59 +0800104enum btrfs_compression_type {
105 BTRFS_COMPRESS_NONE = 0,
106 BTRFS_COMPRESS_ZLIB = 1,
107 BTRFS_COMPRESS_LZO = 2,
Nick Terrell5c1aab12017-08-09 19:39:02 -0700108 BTRFS_COMPRESS_ZSTD = 3,
Chengguang Xuce96b7f2019-10-10 15:59:57 +0800109 BTRFS_NR_COMPRESS_TYPES = 4,
Anand Jainebb87652016-03-10 17:26:59 +0800110};
111
Dennis Zhou92ee55302019-02-04 15:20:03 -0500112struct workspace_manager {
Dennis Zhou92ee55302019-02-04 15:20:03 -0500113 struct list_head idle_ws;
114 spinlock_t ws_lock;
115 /* Number of free workspaces */
116 int free_ws;
117 /* Total number of allocated workspaces */
118 atomic_t total_ws;
119 /* Waiters for a free workspace */
120 wait_queue_head_t ws_wait;
121};
122
David Sterba5907a9b2019-10-04 02:50:28 +0200123struct list_head *btrfs_get_workspace(int type, unsigned int level);
David Sterbaa3bbd2a2019-10-04 02:50:28 +0200124void btrfs_put_workspace(int type, struct list_head *ws);
Dennis Zhou92ee55302019-02-04 15:20:03 -0500125
Li Zefan261507a02010-12-17 14:21:50 +0800126struct btrfs_compress_op {
David Sterbabe9510452019-10-02 00:53:31 +0200127 struct workspace_manager *workspace_manager;
David Sterbae18333a2019-08-09 16:25:34 +0200128 /* Maximum level supported by the compression algorithm */
129 unsigned int max_level;
130 unsigned int default_level;
Li Zefan261507a02010-12-17 14:21:50 +0800131};
132
Dennis Zhouca4ac362019-02-04 15:19:59 -0500133/* The heuristic workspaces are managed via the 0th workspace manager */
Chengguang Xuce96b7f2019-10-10 15:59:57 +0800134#define BTRFS_NR_WORKSPACE_MANAGERS BTRFS_NR_COMPRESS_TYPES
Dennis Zhouca4ac362019-02-04 15:19:59 -0500135
136extern const struct btrfs_compress_op btrfs_heuristic_compress;
David Sterbae8c9f182015-01-02 18:23:10 +0100137extern const struct btrfs_compress_op btrfs_zlib_compress;
138extern const struct btrfs_compress_op btrfs_lzo_compress;
Nick Terrell5c1aab12017-08-09 19:39:02 -0700139extern const struct btrfs_compress_op btrfs_zstd_compress;
Li Zefan261507a02010-12-17 14:21:50 +0800140
David Sterbae128f9c2017-10-31 17:24:26 +0100141const char* btrfs_compress_type2str(enum btrfs_compression_type type);
Johannes Thumshirnaa53e3b2019-06-06 12:07:15 +0200142bool btrfs_compress_is_valid_type(const char *str, size_t len);
David Sterbae128f9c2017-10-31 17:24:26 +0100143
Timofey Titovetsc2fcdcd2017-07-17 16:52:58 +0300144int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
145
David Sterbacb4c9192020-08-17 10:58:38 +0200146int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
147 u64 start, struct page **pages, unsigned long *out_pages,
148 unsigned long *total_in, unsigned long *total_out);
149int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
150int zlib_decompress(struct list_head *ws, unsigned char *data_in,
151 struct page *dest_page, unsigned long start_byte, size_t srclen,
152 size_t destlen);
153struct list_head *zlib_alloc_workspace(unsigned int level);
154void zlib_free_workspace(struct list_head *ws);
155struct list_head *zlib_get_workspace(unsigned int level);
156
157int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
158 u64 start, struct page **pages, unsigned long *out_pages,
159 unsigned long *total_in, unsigned long *total_out);
160int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
161int lzo_decompress(struct list_head *ws, unsigned char *data_in,
162 struct page *dest_page, unsigned long start_byte, size_t srclen,
163 size_t destlen);
164struct list_head *lzo_alloc_workspace(unsigned int level);
165void lzo_free_workspace(struct list_head *ws);
166
167int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
168 u64 start, struct page **pages, unsigned long *out_pages,
169 unsigned long *total_in, unsigned long *total_out);
170int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
171int zstd_decompress(struct list_head *ws, unsigned char *data_in,
172 struct page *dest_page, unsigned long start_byte, size_t srclen,
173 size_t destlen);
174void zstd_init_workspace_manager(void);
175void zstd_cleanup_workspace_manager(void);
176struct list_head *zstd_alloc_workspace(unsigned int level);
177void zstd_free_workspace(struct list_head *ws);
178struct list_head *zstd_get_workspace(unsigned int level);
179void zstd_put_workspace(struct list_head *ws);
180
Chris Masonc8b97812008-10-29 14:49:59 -0400181#endif