blob: 5c2a83876220caa4be6d97f7692084ebad33b315 [file] [log] [blame]
Gao Xiang29b24f62019-07-31 23:57:31 +08001/* SPDX-License-Identifier: GPL-2.0-only */
2/*
Gao Xiangbfb86742018-07-26 20:21:45 +08003 * Copyright (C) 2017-2018 HUAWEI, Inc.
Alexander A. Klimov592e7cd2020-07-13 15:09:44 +02004 * https://www.huawei.com/
Gao Xiangc5aa9032021-08-20 18:00:19 +08005 * Copyright (C) 2021, Alibaba Cloud
Gao Xiangbfb86742018-07-26 20:21:45 +08006 */
Gao Xiang14f362b2019-07-31 23:57:36 +08007#ifndef __EROFS_INTERNAL_H
8#define __EROFS_INTERNAL_H
Gao Xiangbfb86742018-07-26 20:21:45 +08009
10#include <linux/fs.h>
11#include <linux/dcache.h>
12#include <linux/mm.h>
13#include <linux/pagemap.h>
14#include <linux/bio.h>
15#include <linux/buffer_head.h>
Gao Xiang47e49372019-08-23 05:36:59 +080016#include <linux/magic.h>
Gao Xiangbfb86742018-07-26 20:21:45 +080017#include <linux/slab.h>
18#include <linux/vmalloc.h>
Gao Xiangeadcd6b2021-08-13 13:29:31 +080019#include <linux/iomap.h>
Gao Xiangbfb86742018-07-26 20:21:45 +080020#include "erofs_fs.h"
21
22/* redefine pr_fmt "erofs: " */
23#undef pr_fmt
24#define pr_fmt(fmt) "erofs: " fmt
25
Gao Xiang4f761fa2019-09-04 10:09:09 +080026__printf(3, 4) void _erofs_err(struct super_block *sb,
27 const char *function, const char *fmt, ...);
28#define erofs_err(sb, fmt, ...) \
29 _erofs_err(sb, __func__, fmt "\n", ##__VA_ARGS__)
30__printf(3, 4) void _erofs_info(struct super_block *sb,
31 const char *function, const char *fmt, ...);
32#define erofs_info(sb, fmt, ...) \
33 _erofs_info(sb, __func__, fmt "\n", ##__VA_ARGS__)
Gao Xiangbfb86742018-07-26 20:21:45 +080034#ifdef CONFIG_EROFS_FS_DEBUG
Gao Xiang4f761fa2019-09-04 10:09:09 +080035#define erofs_dbg(x, ...) pr_debug(x "\n", ##__VA_ARGS__)
Gao Xiangbfb86742018-07-26 20:21:45 +080036#define DBG_BUGON BUG_ON
37#else
Gao Xiang4f761fa2019-09-04 10:09:09 +080038#define erofs_dbg(x, ...) ((void)0)
Gao Xiangeef16872018-11-23 01:15:59 +080039#define DBG_BUGON(x) ((void)(x))
Gao Xiang14f362b2019-07-31 23:57:36 +080040#endif /* !CONFIG_EROFS_FS_DEBUG */
Gao Xiangbfb86742018-07-26 20:21:45 +080041
42/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
43#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
44
45typedef u64 erofs_nid_t;
Gao Xiang14f362b2019-07-31 23:57:36 +080046typedef u64 erofs_off_t;
47/* data type for filesystem-wide blocks number */
48typedef u32 erofs_blk_t;
Gao Xiangbfb86742018-07-26 20:21:45 +080049
Gao Xiangdfeab2e2021-10-14 16:10:10 +080050struct erofs_device_info {
51 char *path;
52 struct block_device *bdev;
53 struct dax_device *dax_dev;
Christoph Hellwigcd913c72021-11-29 11:21:59 +010054 u64 dax_part_off;
Gao Xiangdfeab2e2021-10-14 16:10:10 +080055
56 u32 blocks;
57 u32 mapped_blkaddr;
58};
59
Gao Xiange6242462021-10-07 15:02:23 +080060struct erofs_mount_opts {
Chao Yuf57a3fe2020-05-29 18:48:36 +080061#ifdef CONFIG_EROFS_FS_ZIP
62 /* current strategy of how to use managed cache */
63 unsigned char cache_strategy;
Huang Jianan30048cd2021-03-17 11:54:48 +080064 /* strategy of sync decompression (false - auto, true - force on) */
65 bool readahead_sync_decompress;
Chao Yuf57a3fe2020-05-29 18:48:36 +080066
67 /* threshold for decompression synchronously */
68 unsigned int max_sync_decompress_pages;
69#endif
70 unsigned int mount_opt;
71};
72
Gao Xiangdfeab2e2021-10-14 16:10:10 +080073struct erofs_dev_context {
74 struct idr tree;
75 struct rw_semaphore rwsem;
76
77 unsigned int extra_devices;
78};
79
Gao Xiange6242462021-10-07 15:02:23 +080080struct erofs_fs_context {
81 struct erofs_mount_opts opt;
Gao Xiangdfeab2e2021-10-14 16:10:10 +080082 struct erofs_dev_context *devs;
Gao Xiange6242462021-10-07 15:02:23 +080083};
84
Huang Jianan5d505382021-03-29 09:23:06 +080085/* all filesystem-wide lz4 configurations */
86struct erofs_sb_lz4_info {
87 /* # of pages needed for EROFS lz4 rolling decompression */
88 u16 max_distance_pages;
Gao Xiang4fea63f2021-04-07 12:39:23 +080089 /* maximum possible blocks for pclusters in the filesystem */
90 u16 max_pclusterblks;
Huang Jianan5d505382021-03-29 09:23:06 +080091};
92
Gao Xiangbfb86742018-07-26 20:21:45 +080093struct erofs_sb_info {
Gao Xiange6242462021-10-07 15:02:23 +080094 struct erofs_mount_opts opt; /* options */
Gao Xiang22fe04a2019-07-31 23:57:39 +080095#ifdef CONFIG_EROFS_FS_ZIP
Gao Xiang2497ee42018-07-26 20:22:03 +080096 /* list for all registered superblocks, mainly for shrinker */
97 struct list_head list;
Gao Xianga1581312018-07-26 20:22:04 +080098 struct mutex umount_mutex;
Gao Xiang2497ee42018-07-26 20:22:03 +080099
Gao Xiang64094a02020-02-20 10:46:42 +0800100 /* managed XArray arranged in physical block number */
101 struct xarray managed_pslots;
Gao Xiang22fe04a2019-07-31 23:57:39 +0800102
Gao Xiang22fe04a2019-07-31 23:57:39 +0800103 unsigned int shrinker_run_no;
Gao Xiang14373712021-03-29 18:00:12 +0800104 u16 available_compr_algs;
Gao Xiang22fe04a2019-07-31 23:57:39 +0800105
Gao Xiang4279f3f2019-07-31 23:57:49 +0800106 /* pseudo inode to manage cached pages */
107 struct inode *managed_cache;
Huang Jianan5d505382021-03-29 09:23:06 +0800108
109 struct erofs_sb_lz4_info lz4;
Gao Xiang22fe04a2019-07-31 23:57:39 +0800110#endif /* CONFIG_EROFS_FS_ZIP */
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800111 struct erofs_dev_context *devs;
Gao Xiang06252e92021-08-05 08:36:00 +0800112 struct dax_device *dax_dev;
Christoph Hellwigcd913c72021-11-29 11:21:59 +0100113 u64 dax_part_off;
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800114 u64 total_blocks;
115 u32 primarydevice_blocks;
116
Gao Xiangbfb86742018-07-26 20:21:45 +0800117 u32 meta_blkaddr;
Gao Xiangb17500a2018-07-26 20:21:52 +0800118#ifdef CONFIG_EROFS_FS_XATTR
119 u32 xattr_blkaddr;
120#endif
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800121 u16 device_id_mask; /* valid bits of device id to be used */
Gao Xiangbfb86742018-07-26 20:21:45 +0800122
123 /* inode slot unit size in bit shift */
124 unsigned char islotbits;
125
Gao Xiang14373712021-03-29 18:00:12 +0800126 u32 sb_size; /* total superblock size */
Gao Xiangbfb86742018-07-26 20:21:45 +0800127 u32 build_time_nsec;
128 u64 build_time;
129
130 /* what we really care is nid, rather than ino.. */
131 erofs_nid_t root_nid;
132 /* used for statfs, f_files - f_favail */
133 u64 inos;
134
135 u8 uuid[16]; /* 128-bit uuid for volume */
136 u8 volume_name[16]; /* volume name */
Pratik Shindeb858a482019-11-04 10:49:37 +0800137 u32 feature_compat;
Gao Xiang426a9302019-09-04 10:08:53 +0800138 u32 feature_incompat;
Gao Xiangbfb86742018-07-26 20:21:45 +0800139};
140
141#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
142#define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
143
Gao Xiangb17500a2018-07-26 20:21:52 +0800144/* Mount flags set via mount options or defaults */
145#define EROFS_MOUNT_XATTR_USER 0x00000010
146#define EROFS_MOUNT_POSIX_ACL 0x00000020
Gao Xiang06252e92021-08-05 08:36:00 +0800147#define EROFS_MOUNT_DAX_ALWAYS 0x00000040
148#define EROFS_MOUNT_DAX_NEVER 0x00000080
Gao Xiangb17500a2018-07-26 20:21:52 +0800149
Gao Xiange6242462021-10-07 15:02:23 +0800150#define clear_opt(opt, option) ((opt)->mount_opt &= ~EROFS_MOUNT_##option)
151#define set_opt(opt, option) ((opt)->mount_opt |= EROFS_MOUNT_##option)
152#define test_opt(opt, option) ((opt)->mount_opt & EROFS_MOUNT_##option)
Gao Xiangbfb86742018-07-26 20:21:45 +0800153
Gao Xiang4279f3f2019-07-31 23:57:49 +0800154enum {
155 EROFS_ZIP_CACHE_DISABLED,
156 EROFS_ZIP_CACHE_READAHEAD,
157 EROFS_ZIP_CACHE_READAROUND
158};
159
Chao Yuf57a3fe2020-05-29 18:48:36 +0800160#ifdef CONFIG_EROFS_FS_ZIP
Gao Xiang14f362b2019-07-31 23:57:36 +0800161#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
162
Gao Xiange7e9a302018-07-26 20:22:05 +0800163/* basic unit of the workstation of a super_block */
164struct erofs_workgroup {
165 /* the workgroup index in the workstation */
166 pgoff_t index;
167
168 /* overall workgroup reference count */
169 atomic_t refcount;
170};
171
Gao Xiang73f5c662018-11-23 01:16:02 +0800172#if defined(CONFIG_SMP)
173static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
174 int val)
Gao Xiange7e9a302018-07-26 20:22:05 +0800175{
Gao Xiange7e9a302018-07-26 20:22:05 +0800176 preempt_disable();
Gao Xiang73f5c662018-11-23 01:16:02 +0800177 if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
Gao Xiange7e9a302018-07-26 20:22:05 +0800178 preempt_enable();
179 return false;
180 }
Gao Xiange7e9a302018-07-26 20:22:05 +0800181 return true;
182}
183
Gao Xiang73f5c662018-11-23 01:16:02 +0800184static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
185 int orig_val)
Gao Xiange7e9a302018-07-26 20:22:05 +0800186{
Gao Xiang948bbdb2018-11-23 01:16:03 +0800187 /*
188 * other observers should notice all modifications
189 * in the freezing period.
190 */
191 smp_mb();
Gao Xiang73f5c662018-11-23 01:16:02 +0800192 atomic_set(&grp->refcount, orig_val);
Gao Xiange7e9a302018-07-26 20:22:05 +0800193 preempt_enable();
194}
195
Gao Xiangdf134b82018-11-23 01:16:01 +0800196static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
197{
198 return atomic_cond_read_relaxed(&grp->refcount,
199 VAL != EROFS_LOCKED_MAGIC);
200}
201#else
Gao Xiang73f5c662018-11-23 01:16:02 +0800202static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
203 int val)
204{
205 preempt_disable();
206 /* no need to spin on UP platforms, let's just disable preemption. */
207 if (val != atomic_read(&grp->refcount)) {
208 preempt_enable();
209 return false;
210 }
211 return true;
212}
213
214static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
215 int orig_val)
216{
217 preempt_enable();
218}
219
Gao Xiangdf134b82018-11-23 01:16:01 +0800220static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
221{
222 int v = atomic_read(&grp->refcount);
223
224 /* workgroup is never freezed on uniprocessor systems */
225 DBG_BUGON(v == EROFS_LOCKED_MAGIC);
226 return v;
227}
Gao Xiang14f362b2019-07-31 23:57:36 +0800228#endif /* !CONFIG_SMP */
Gao Xiang14f362b2019-07-31 23:57:36 +0800229#endif /* !CONFIG_EROFS_FS_ZIP */
Gao Xiange7e9a302018-07-26 20:22:05 +0800230
Gao Xiangbfb86742018-07-26 20:21:45 +0800231/* we strictly follow PAGE_SIZE and no buffer head yet */
232#define LOG_BLOCK_SIZE PAGE_SHIFT
233
234#undef LOG_SECTORS_PER_BLOCK
235#define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9)
236
237#undef SECTORS_PER_BLOCK
238#define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK)
239
240#define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE)
241
242#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
243#error erofs cannot be used in this platform
244#endif
245
246#define ROOT_NID(sb) ((sb)->root_nid)
247
Gao Xiangbfb86742018-07-26 20:21:45 +0800248#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ)
249#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ)
250#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ)
251
252static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
253{
254 return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
255}
256
Gao Xiangde06a6a2021-03-29 09:23:05 +0800257#define EROFS_FEATURE_FUNCS(name, compat, feature) \
258static inline bool erofs_sb_has_##name(struct erofs_sb_info *sbi) \
259{ \
260 return sbi->feature_##compat & EROFS_FEATURE_##feature; \
261}
262
263EROFS_FEATURE_FUNCS(lz4_0padding, incompat, INCOMPAT_LZ4_0PADDING)
Gao Xiang14373712021-03-29 18:00:12 +0800264EROFS_FEATURE_FUNCS(compr_cfgs, incompat, INCOMPAT_COMPR_CFGS)
Gao Xiang5404c33012021-04-07 12:39:22 +0800265EROFS_FEATURE_FUNCS(big_pcluster, incompat, INCOMPAT_BIG_PCLUSTER)
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800266EROFS_FEATURE_FUNCS(device_table, incompat, INCOMPAT_DEVICE_TABLE)
Gao Xiangde06a6a2021-03-29 09:23:05 +0800267EROFS_FEATURE_FUNCS(sb_chksum, compat, COMPAT_SB_CHKSUM)
268
Gao Xiang62dc4592019-02-18 15:19:04 +0800269/* atomic flag definitions */
Gao Xianga5876e22019-09-04 10:08:56 +0800270#define EROFS_I_EA_INITED_BIT 0
271#define EROFS_I_Z_INITED_BIT 1
Gao Xiang62dc4592019-02-18 15:19:04 +0800272
273/* bitlock definitions (arranged in reverse order) */
Gao Xianga5876e22019-09-04 10:08:56 +0800274#define EROFS_I_BL_XATTR_BIT (BITS_PER_LONG - 1)
275#define EROFS_I_BL_Z_BIT (BITS_PER_LONG - 2)
Gao Xiangbfb86742018-07-26 20:21:45 +0800276
Gao Xianga5876e22019-09-04 10:08:56 +0800277struct erofs_inode {
Gao Xiangbfb86742018-07-26 20:21:45 +0800278 erofs_nid_t nid;
Gao Xiang62dc4592019-02-18 15:19:04 +0800279
280 /* atomic flags (including bitlocks) */
281 unsigned long flags;
Gao Xiangbfb86742018-07-26 20:21:45 +0800282
Gao Xiang8a765682019-09-04 10:08:54 +0800283 unsigned char datalayout;
Gao Xiangbfb86742018-07-26 20:21:45 +0800284 unsigned char inode_isize;
285 unsigned short xattr_isize;
286
Pratik Shindee82a9a12019-07-15 17:51:27 +0530287 unsigned int xattr_shared_count;
288 unsigned int *xattr_shared_xattrs;
Gao Xiangbfb86742018-07-26 20:21:45 +0800289
Gao Xiang152a3332019-06-24 15:22:52 +0800290 union {
291 erofs_blk_t raw_blkaddr;
Gao Xiangc5aa9032021-08-20 18:00:19 +0800292 struct {
293 unsigned short chunkformat;
294 unsigned char chunkbits;
295 };
Gao Xiang152a3332019-06-24 15:22:52 +0800296#ifdef CONFIG_EROFS_FS_ZIP
297 struct {
298 unsigned short z_advise;
299 unsigned char z_algorithmtype[2];
300 unsigned char z_logical_clusterbits;
Gao Xiang152a3332019-06-24 15:22:52 +0800301 };
Gao Xiang14f362b2019-07-31 23:57:36 +0800302#endif /* CONFIG_EROFS_FS_ZIP */
Gao Xiang152a3332019-06-24 15:22:52 +0800303 };
Gao Xiangbfb86742018-07-26 20:21:45 +0800304 /* the corresponding vfs inode */
305 struct inode vfs_inode;
306};
307
Gao Xianga5876e22019-09-04 10:08:56 +0800308#define EROFS_I(ptr) \
309 container_of(ptr, struct erofs_inode, vfs_inode)
Gao Xiangbfb86742018-07-26 20:21:45 +0800310
Gao Xiang99634bf2019-09-04 10:09:05 +0800311static inline unsigned long erofs_inode_datablocks(struct inode *inode)
Gao Xiangbfb86742018-07-26 20:21:45 +0800312{
313 /* since i_size cannot be changed */
314 return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
315}
316
Gao Xiang8a765682019-09-04 10:08:54 +0800317static inline unsigned int erofs_bitrange(unsigned int value, unsigned int bit,
318 unsigned int bits)
Gao Xiangbfb86742018-07-26 20:21:45 +0800319{
Gao Xiang8a765682019-09-04 10:08:54 +0800320
321 return (value >> bit) & ((1 << bits) - 1);
Gao Xiangbfb86742018-07-26 20:21:45 +0800322}
323
Gao Xiang8a765682019-09-04 10:08:54 +0800324
325static inline unsigned int erofs_inode_version(unsigned int value)
Gao Xiangbfb86742018-07-26 20:21:45 +0800326{
Gao Xiang8a765682019-09-04 10:08:54 +0800327 return erofs_bitrange(value, EROFS_I_VERSION_BIT,
328 EROFS_I_VERSION_BITS);
329}
330
331static inline unsigned int erofs_inode_datalayout(unsigned int value)
332{
333 return erofs_bitrange(value, EROFS_I_DATALAYOUT_BIT,
334 EROFS_I_DATALAYOUT_BITS);
Gao Xiangbfb86742018-07-26 20:21:45 +0800335}
336
Gao Xiang38629292021-10-09 04:08:39 +0800337/*
338 * Different from grab_cache_page_nowait(), reclaiming is never triggered
339 * when allocating new pages.
340 */
341static inline
342struct page *erofs_grab_cache_page_nowait(struct address_space *mapping,
343 pgoff_t index)
344{
345 return pagecache_get_page(mapping, index,
346 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
347 readahead_gfp_mask(mapping) & ~__GFP_RECLAIM);
348}
349
Gao Xiangbfb86742018-07-26 20:21:45 +0800350extern const struct super_operations erofs_sops;
Gao Xiangbfb86742018-07-26 20:21:45 +0800351
352extern const struct address_space_operations erofs_raw_access_aops;
Gao Xiang0c638f72019-11-08 11:37:33 +0800353extern const struct address_space_operations z_erofs_aops;
Gao Xiangbfb86742018-07-26 20:21:45 +0800354
355/*
Yue Hu81378242021-03-25 15:10:08 +0800356 * Logical to physical block mapping
Gao Xiangbfb86742018-07-26 20:21:45 +0800357 *
358 * Different with other file systems, it is used for 2 access modes:
359 *
360 * 1) RAW access mode:
361 *
362 * Users pass a valid (m_lblk, m_lofs -- usually 0) pair,
363 * and get the valid m_pblk, m_pofs and the longest m_len(in bytes).
364 *
365 * Note that m_lblk in the RAW access mode refers to the number of
366 * the compressed ondisk block rather than the uncompressed
367 * in-memory block for the compressed file.
368 *
369 * m_pofs equals to m_lofs except for the inline data page.
370 *
371 * 2) Normal access mode:
372 *
373 * If the inode is not compressed, it has no difference with
374 * the RAW access mode. However, if the inode is compressed,
375 * users should pass a valid (m_lblk, m_lofs) pair, and get
376 * the needed m_pblk, m_pofs, m_len to get the compressed data
377 * and the updated m_lblk, m_lofs which indicates the start
378 * of the corresponding uncompressed data in the file.
379 */
380enum {
Gao Xiang8f899262021-10-09 04:08:37 +0800381 BH_Encoded = BH_PrivateStart,
Gao Xiangb6a76182019-06-24 15:22:58 +0800382 BH_FullMapped,
Gao Xiangbfb86742018-07-26 20:21:45 +0800383};
384
385/* Has a disk mapping */
386#define EROFS_MAP_MAPPED (1 << BH_Mapped)
387/* Located in metadata (could be copied from bd_inode) */
388#define EROFS_MAP_META (1 << BH_Meta)
Gao Xiang8f899262021-10-09 04:08:37 +0800389/* The extent is encoded */
390#define EROFS_MAP_ENCODED (1 << BH_Encoded)
Gao Xiangb6a76182019-06-24 15:22:58 +0800391/* The length of extent is full */
392#define EROFS_MAP_FULL_MAPPED (1 << BH_FullMapped)
Gao Xiangbfb86742018-07-26 20:21:45 +0800393
394struct erofs_map_blocks {
395 erofs_off_t m_pa, m_la;
396 u64 m_plen, m_llen;
397
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800398 unsigned short m_deviceid;
Gao Xiang8f899262021-10-09 04:08:37 +0800399 char m_algorithmformat;
Gao Xiangbfb86742018-07-26 20:21:45 +0800400 unsigned int m_flags;
Chao Yu3b423412019-01-15 09:42:21 +0800401
402 struct page *mpage;
Gao Xiangbfb86742018-07-26 20:21:45 +0800403};
404
Yue Hu81378242021-03-25 15:10:08 +0800405/* Flags used by erofs_map_blocks_flatmode() */
Gao Xiangbfb86742018-07-26 20:21:45 +0800406#define EROFS_GET_BLOCKS_RAW 0x0001
Gao Xiangd95ae5e2021-08-18 23:22:31 +0800407/*
408 * Used to get the exact decompressed length, e.g. fiemap (consider lookback
409 * approach instead if possible since it's more metadata lightweight.)
410 */
411#define EROFS_GET_BLOCKS_FIEMAP 0x0002
Gao Xiang622cead2021-10-11 05:31:45 +0800412/* Used to map the whole extent if non-negligible data is requested for LZMA */
413#define EROFS_GET_BLOCKS_READMORE 0x0004
Gao Xiangbfb86742018-07-26 20:21:45 +0800414
Gao Xiang8f899262021-10-09 04:08:37 +0800415enum {
416 Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX,
417 Z_EROFS_COMPRESSION_RUNTIME_MAX
418};
419
Gao Xiang152a3332019-06-24 15:22:52 +0800420/* zmap.c */
Gao Xiangeadcd6b2021-08-13 13:29:31 +0800421extern const struct iomap_ops z_erofs_iomap_report_ops;
422
Chao Yu3b423412019-01-15 09:42:21 +0800423#ifdef CONFIG_EROFS_FS_ZIP
Gao Xiang152a3332019-06-24 15:22:52 +0800424int z_erofs_fill_inode(struct inode *inode);
Chao Yu3b423412019-01-15 09:42:21 +0800425int z_erofs_map_blocks_iter(struct inode *inode,
426 struct erofs_map_blocks *map,
427 int flags);
428#else
Gao Xiangff784a72019-08-14 18:37:05 +0800429static inline int z_erofs_fill_inode(struct inode *inode) { return -EOPNOTSUPP; }
Chao Yu3b423412019-01-15 09:42:21 +0800430static inline int z_erofs_map_blocks_iter(struct inode *inode,
431 struct erofs_map_blocks *map,
432 int flags)
433{
Gao Xiangff784a72019-08-14 18:37:05 +0800434 return -EOPNOTSUPP;
Chao Yu3b423412019-01-15 09:42:21 +0800435}
Gao Xiang14f362b2019-07-31 23:57:36 +0800436#endif /* !CONFIG_EROFS_FS_ZIP */
Chao Yu3b423412019-01-15 09:42:21 +0800437
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800438struct erofs_map_dev {
439 struct block_device *m_bdev;
440 struct dax_device *m_daxdev;
Christoph Hellwigde205112021-11-29 11:22:00 +0100441 u64 m_dax_part_off;
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800442
443 erofs_off_t m_pa;
444 unsigned int m_deviceid;
445};
446
Gao Xiangbfb86742018-07-26 20:21:45 +0800447/* data.c */
Huang Jianana08e67a2021-08-05 08:35:59 +0800448extern const struct file_operations erofs_file_fops;
Gao Xiange655b5b2019-09-04 10:09:03 +0800449struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr);
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800450int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev);
Gao Xiangeadcd6b2021-08-13 13:29:31 +0800451int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
452 u64 start, u64 len);
Gao Xiang6e789012018-08-21 22:49:30 +0800453
Gao Xiangbfb86742018-07-26 20:21:45 +0800454/* inode.c */
Gao Xiang2abd7812018-10-09 22:07:13 +0800455static inline unsigned long erofs_inode_hash(erofs_nid_t nid)
456{
457#if BITS_PER_LONG == 32
458 return (nid >> 32) ^ (nid & 0xffffffff);
459#else
460 return nid;
461#endif
462}
463
Gao Xiang60939822019-01-14 19:40:24 +0800464extern const struct inode_operations erofs_generic_iops;
465extern const struct inode_operations erofs_symlink_iops;
466extern const struct inode_operations erofs_fast_symlink_iops;
Gao Xiangb17500a2018-07-26 20:21:52 +0800467
Gao Xiang2e1d6632019-01-16 16:59:56 +0800468struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir);
Christian Brauner549c7292021-01-21 14:19:43 +0100469int erofs_getattr(struct user_namespace *mnt_userns, const struct path *path,
470 struct kstat *stat, u32 request_mask,
471 unsigned int query_flags);
Gao Xiang2e1d6632019-01-16 16:59:56 +0800472
Gao Xiang60939822019-01-14 19:40:24 +0800473/* namei.c */
474extern const struct inode_operations erofs_dir_iops;
475
476int erofs_namei(struct inode *dir, struct qstr *name,
477 erofs_nid_t *nid, unsigned int *d_type);
478
479/* dir.c */
480extern const struct file_operations erofs_dir_fops;
481
Gao Xiang598162d2021-04-07 12:39:26 +0800482static inline void *erofs_vm_map_ram(struct page **pages, unsigned int count)
483{
484 int retried = 0;
485
486 while (1) {
487 void *p = vm_map_ram(pages, count, -1);
488
489 /* retry two more times (totally 3 times) */
490 if (p || ++retried >= 3)
491 return p;
492 vm_unmap_aliases();
493 }
494 return NULL;
495}
496
Gao Xiang52488732021-04-10 03:06:30 +0800497/* pcpubuf.c */
498void *erofs_get_pcpubuf(unsigned int requiredpages);
499void erofs_put_pcpubuf(void *ptr);
500int erofs_pcpubuf_growsize(unsigned int nrpages);
501void erofs_pcpubuf_init(void);
502void erofs_pcpubuf_exit(void);
503
Gao Xiang14f362b2019-07-31 23:57:36 +0800504/* utils.c / zdata.c */
Gao Xiangeaa91722021-10-22 17:01:20 +0800505struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp);
506static inline void erofs_pagepool_add(struct page **pagepool,
507 struct page *page)
508{
509 set_page_private(page, (unsigned long)*pagepool);
510 *pagepool = page;
511}
512void erofs_release_pages(struct page **pagepool);
Gao Xiangfa61a332019-06-24 15:22:53 +0800513
Gao Xiang22fe04a2019-07-31 23:57:39 +0800514#ifdef CONFIG_EROFS_FS_ZIP
Gao Xiang14f362b2019-07-31 23:57:36 +0800515int erofs_workgroup_put(struct erofs_workgroup *grp);
516struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
Vladimir Zapolskiy997626d2020-01-02 14:01:16 +0200517 pgoff_t index);
Gao Xiang64094a02020-02-20 10:46:42 +0800518struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
519 struct erofs_workgroup *grp);
Gao Xiang14f362b2019-07-31 23:57:36 +0800520void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
Gao Xiang22fe04a2019-07-31 23:57:39 +0800521void erofs_shrinker_register(struct super_block *sb);
522void erofs_shrinker_unregister(struct super_block *sb);
523int __init erofs_init_shrinker(void);
524void erofs_exit_shrinker(void);
525int __init z_erofs_init_zip_subsystem(void);
526void z_erofs_exit_zip_subsystem(void);
Gao Xiang14f362b2019-07-31 23:57:36 +0800527int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
528 struct erofs_workgroup *egrp);
Yue Hud252ff32021-08-10 15:24:16 +0800529int erofs_try_to_free_cached_page(struct page *page);
Huang Jianan5d505382021-03-29 09:23:06 +0800530int z_erofs_load_lz4_config(struct super_block *sb,
Gao Xiang46249cd2021-03-29 09:23:07 +0800531 struct erofs_super_block *dsb,
532 struct z_erofs_lz4_cfgs *lz4, int len);
Gao Xiang22fe04a2019-07-31 23:57:39 +0800533#else
534static inline void erofs_shrinker_register(struct super_block *sb) {}
535static inline void erofs_shrinker_unregister(struct super_block *sb) {}
536static inline int erofs_init_shrinker(void) { return 0; }
537static inline void erofs_exit_shrinker(void) {}
538static inline int z_erofs_init_zip_subsystem(void) { return 0; }
539static inline void z_erofs_exit_zip_subsystem(void) {}
Huang Jianan5d505382021-03-29 09:23:06 +0800540static inline int z_erofs_load_lz4_config(struct super_block *sb,
Gao Xiang46249cd2021-03-29 09:23:07 +0800541 struct erofs_super_block *dsb,
542 struct z_erofs_lz4_cfgs *lz4, int len)
Huang Jianan5d505382021-03-29 09:23:06 +0800543{
Gao Xiang14373712021-03-29 18:00:12 +0800544 if (lz4 || dsb->u1.lz4_max_distance) {
Huang Jianan5d505382021-03-29 09:23:06 +0800545 erofs_err(sb, "lz4 algorithm isn't enabled");
546 return -EINVAL;
547 }
548 return 0;
549}
Gao Xiang22fe04a2019-07-31 23:57:39 +0800550#endif /* !CONFIG_EROFS_FS_ZIP */
Gao Xiang2e1d6632019-01-16 16:59:56 +0800551
Gao Xiang622cead2021-10-11 05:31:45 +0800552#ifdef CONFIG_EROFS_FS_ZIP_LZMA
553int z_erofs_lzma_init(void);
554void z_erofs_lzma_exit(void);
555int z_erofs_load_lzma_config(struct super_block *sb,
556 struct erofs_super_block *dsb,
557 struct z_erofs_lzma_cfgs *lzma, int size);
558#else
559static inline int z_erofs_lzma_init(void) { return 0; }
560static inline int z_erofs_lzma_exit(void) { return 0; }
561static inline int z_erofs_load_lzma_config(struct super_block *sb,
562 struct erofs_super_block *dsb,
563 struct z_erofs_lzma_cfgs *lzma, int size) {
564 if (lzma) {
565 erofs_err(sb, "lzma algorithm isn't enabled");
566 return -EINVAL;
567 }
568 return 0;
569}
570#endif /* !CONFIG_EROFS_FS_ZIP */
571
Gao Xianga6b9b1d2019-08-14 18:37:03 +0800572#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
573
Gao Xiang14f362b2019-07-31 23:57:36 +0800574#endif /* __EROFS_INTERNAL_H */