blob: ff62f84f47d30f9369fbeb01d9e7f7ce26223e60 [file] [log] [blame]
Gao Xiang29b24f62019-07-31 23:57:31 +08001// SPDX-License-Identifier: GPL-2.0-only
Gao Xiang431339b2018-07-26 20:21:48 +08002/*
Gao Xiang431339b2018-07-26 20:21:48 +08003 * Copyright (C) 2017-2018 HUAWEI, Inc.
Alexander A. Klimov592e7cd2020-07-13 15:09:44 +02004 * https://www.huawei.com/
Gao Xiangc5aa9032021-08-20 18:00:19 +08005 * Copyright (C) 2021, Alibaba Cloud
Gao Xiang431339b2018-07-26 20:21:48 +08006 */
Gao Xiangb17500a2018-07-26 20:21:52 +08007#include "xattr.h"
Gao Xiang431339b2018-07-26 20:21:48 +08008
Chao Yu13f06f42018-07-26 20:21:55 +08009#include <trace/events/erofs.h>
10
Gao Xiang0dcd3c92020-07-30 01:58:01 +080011/*
12 * if inode is successfully read, return its inode page (or sometimes
13 * the inode payload page if it's an extended inode) in order to fill
14 * inline data if possible.
15 */
Gao Xiangc521e3a2022-01-02 12:00:14 +080016static void *erofs_read_inode(struct erofs_buf *buf,
17 struct inode *inode, unsigned int *ofs)
Gao Xiang431339b2018-07-26 20:21:48 +080018{
Gao Xiang0dcd3c92020-07-30 01:58:01 +080019 struct super_block *sb = inode->i_sb;
20 struct erofs_sb_info *sbi = EROFS_SB(sb);
Gao Xianga5876e22019-09-04 10:08:56 +080021 struct erofs_inode *vi = EROFS_I(inode);
Gao Xiang0dcd3c92020-07-30 01:58:01 +080022 const erofs_off_t inode_loc = iloc(sbi, vi->nid);
Gao Xiang8a765682019-09-04 10:08:54 +080023
Gao Xiang0dcd3c92020-07-30 01:58:01 +080024 erofs_blk_t blkaddr, nblks = 0;
Gao Xiangc521e3a2022-01-02 12:00:14 +080025 void *kaddr;
Gao Xiang0dcd3c92020-07-30 01:58:01 +080026 struct erofs_inode_compact *dic;
27 struct erofs_inode_extended *die, *copied = NULL;
28 unsigned int ifmt;
29 int err;
30
31 blkaddr = erofs_blknr(inode_loc);
32 *ofs = erofs_blkoff(inode_loc);
33
34 erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
35 __func__, vi->nid, *ofs, blkaddr);
36
Gao Xiangc521e3a2022-01-02 12:00:14 +080037 kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP);
38 if (IS_ERR(kaddr)) {
Gao Xiang0dcd3c92020-07-30 01:58:01 +080039 erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
Gao Xiangc521e3a2022-01-02 12:00:14 +080040 vi->nid, PTR_ERR(kaddr));
41 return kaddr;
Gao Xiang0dcd3c92020-07-30 01:58:01 +080042 }
43
Gao Xiangc521e3a2022-01-02 12:00:14 +080044 dic = kaddr + *ofs;
Gao Xiang0dcd3c92020-07-30 01:58:01 +080045 ifmt = le16_to_cpu(dic->i_format);
Gao Xiang431339b2018-07-26 20:21:48 +080046
Gao Xiang24a806d2021-03-29 08:36:14 +080047 if (ifmt & ~EROFS_I_ALL) {
48 erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
49 ifmt, vi->nid);
50 err = -EOPNOTSUPP;
51 goto err_out;
52 }
53
Gao Xiang8a765682019-09-04 10:08:54 +080054 vi->datalayout = erofs_inode_datalayout(ifmt);
Gao Xiang8a765682019-09-04 10:08:54 +080055 if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
Gao Xiang4f761fa2019-09-04 10:09:09 +080056 erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
57 vi->datalayout, vi->nid);
Gao Xiang0dcd3c92020-07-30 01:58:01 +080058 err = -EOPNOTSUPP;
59 goto err_out;
Gao Xiang431339b2018-07-26 20:21:48 +080060 }
61
Gao Xiang8a765682019-09-04 10:08:54 +080062 switch (erofs_inode_version(ifmt)) {
63 case EROFS_INODE_LAYOUT_EXTENDED:
Gao Xiang8a765682019-09-04 10:08:54 +080064 vi->inode_isize = sizeof(struct erofs_inode_extended);
Gao Xiangc521e3a2022-01-02 12:00:14 +080065 /* check if the extended inode acrosses block boundary */
66 if (*ofs + vi->inode_isize <= EROFS_BLKSIZ) {
Gao Xiang0dcd3c92020-07-30 01:58:01 +080067 *ofs += vi->inode_isize;
68 die = (struct erofs_inode_extended *)dic;
69 } else {
Gao Xiangc521e3a2022-01-02 12:00:14 +080070 const unsigned int gotten = EROFS_BLKSIZ - *ofs;
Gao Xiang0dcd3c92020-07-30 01:58:01 +080071
72 copied = kmalloc(vi->inode_isize, GFP_NOFS);
73 if (!copied) {
74 err = -ENOMEM;
75 goto err_out;
76 }
77 memcpy(copied, dic, gotten);
Gao Xiangc521e3a2022-01-02 12:00:14 +080078 kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1,
79 EROFS_KMAP);
80 if (IS_ERR(kaddr)) {
81 erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld",
82 vi->nid, PTR_ERR(kaddr));
Gao Xiang0dcd3c92020-07-30 01:58:01 +080083 kfree(copied);
Gao Xiangc521e3a2022-01-02 12:00:14 +080084 return kaddr;
Gao Xiang0dcd3c92020-07-30 01:58:01 +080085 }
86 *ofs = vi->inode_isize - gotten;
Gao Xiangc521e3a2022-01-02 12:00:14 +080087 memcpy((u8 *)copied + gotten, kaddr, *ofs);
Gao Xiang0dcd3c92020-07-30 01:58:01 +080088 die = copied;
89 }
Gao Xiang8a765682019-09-04 10:08:54 +080090 vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
Gao Xiang431339b2018-07-26 20:21:48 +080091
Gao Xiang8a765682019-09-04 10:08:54 +080092 inode->i_mode = le16_to_cpu(die->i_mode);
93 switch (inode->i_mode & S_IFMT) {
94 case S_IFREG:
95 case S_IFDIR:
96 case S_IFLNK:
97 vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr);
98 break;
99 case S_IFCHR:
100 case S_IFBLK:
Chao Yud5beb31b2018-07-26 20:21:53 +0800101 inode->i_rdev =
Gao Xiang8a765682019-09-04 10:08:54 +0800102 new_decode_dev(le32_to_cpu(die->i_u.rdev));
103 break;
104 case S_IFIFO:
105 case S_IFSOCK:
Chao Yud5beb31b2018-07-26 20:21:53 +0800106 inode->i_rdev = 0;
Gao Xiang8a765682019-09-04 10:08:54 +0800107 break;
108 default:
Gao Xianga6b9b1d2019-08-14 18:37:03 +0800109 goto bogusimode;
Gao Xiang8a765682019-09-04 10:08:54 +0800110 }
111 i_uid_write(inode, le32_to_cpu(die->i_uid));
112 i_gid_write(inode, le32_to_cpu(die->i_gid));
113 set_nlink(inode, le32_to_cpu(die->i_nlink));
Gao Xiang431339b2018-07-26 20:21:48 +0800114
Gao Xiangd3938ee2020-11-01 03:51:02 +0800115 /* extended inode has its own timestamp */
116 inode->i_ctime.tv_sec = le64_to_cpu(die->i_ctime);
117 inode->i_ctime.tv_nsec = le32_to_cpu(die->i_ctime_nsec);
Gao Xiang431339b2018-07-26 20:21:48 +0800118
Gao Xiang8a765682019-09-04 10:08:54 +0800119 inode->i_size = le64_to_cpu(die->i_size);
Gao Xiangfe6d9872019-05-28 11:19:43 +0800120
121 /* total blocks for compressed files */
Gao Xiang8a765682019-09-04 10:08:54 +0800122 if (erofs_inode_is_data_compressed(vi->datalayout))
123 nblks = le32_to_cpu(die->i_u.compressed_blocks);
Gao Xiangc5aa9032021-08-20 18:00:19 +0800124 else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
125 /* fill chunked inode summary info */
126 vi->chunkformat = le16_to_cpu(die->i_u.c.format);
Gao Xiang0dcd3c92020-07-30 01:58:01 +0800127 kfree(copied);
Gao Xiang1266b4a2021-08-25 20:07:57 +0800128 copied = NULL;
Gao Xiang8a765682019-09-04 10:08:54 +0800129 break;
130 case EROFS_INODE_LAYOUT_COMPACT:
131 vi->inode_isize = sizeof(struct erofs_inode_compact);
Gao Xiang0dcd3c92020-07-30 01:58:01 +0800132 *ofs += vi->inode_isize;
Gao Xiang8a765682019-09-04 10:08:54 +0800133 vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
Gao Xiang431339b2018-07-26 20:21:48 +0800134
Gao Xiang8a765682019-09-04 10:08:54 +0800135 inode->i_mode = le16_to_cpu(dic->i_mode);
136 switch (inode->i_mode & S_IFMT) {
137 case S_IFREG:
138 case S_IFDIR:
139 case S_IFLNK:
140 vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr);
141 break;
142 case S_IFCHR:
143 case S_IFBLK:
Chao Yud5beb31b2018-07-26 20:21:53 +0800144 inode->i_rdev =
Gao Xiang8a765682019-09-04 10:08:54 +0800145 new_decode_dev(le32_to_cpu(dic->i_u.rdev));
146 break;
147 case S_IFIFO:
148 case S_IFSOCK:
Chao Yud5beb31b2018-07-26 20:21:53 +0800149 inode->i_rdev = 0;
Gao Xiang8a765682019-09-04 10:08:54 +0800150 break;
151 default:
Gao Xianga6b9b1d2019-08-14 18:37:03 +0800152 goto bogusimode;
Gao Xiang8a765682019-09-04 10:08:54 +0800153 }
154 i_uid_write(inode, le16_to_cpu(dic->i_uid));
155 i_gid_write(inode, le16_to_cpu(dic->i_gid));
156 set_nlink(inode, le16_to_cpu(dic->i_nlink));
Gao Xiang431339b2018-07-26 20:21:48 +0800157
Gao Xiangd3938ee2020-11-01 03:51:02 +0800158 /* use build time for compact inodes */
159 inode->i_ctime.tv_sec = sbi->build_time;
160 inode->i_ctime.tv_nsec = sbi->build_time_nsec;
Gao Xiang431339b2018-07-26 20:21:48 +0800161
Gao Xiang8a765682019-09-04 10:08:54 +0800162 inode->i_size = le32_to_cpu(dic->i_size);
163 if (erofs_inode_is_data_compressed(vi->datalayout))
164 nblks = le32_to_cpu(dic->i_u.compressed_blocks);
Gao Xiangc5aa9032021-08-20 18:00:19 +0800165 else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
166 vi->chunkformat = le16_to_cpu(dic->i_u.c.format);
Gao Xiang8a765682019-09-04 10:08:54 +0800167 break;
168 default:
Gao Xiang4f761fa2019-09-04 10:09:09 +0800169 erofs_err(inode->i_sb,
170 "unsupported on-disk inode version %u of nid %llu",
171 erofs_inode_version(ifmt), vi->nid);
Gao Xiang0dcd3c92020-07-30 01:58:01 +0800172 err = -EOPNOTSUPP;
173 goto err_out;
Gao Xiang431339b2018-07-26 20:21:48 +0800174 }
175
Gao Xiangc5aa9032021-08-20 18:00:19 +0800176 if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
Gao Xiangd7051172021-09-22 17:51:41 +0800177 if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
Gao Xiangc5aa9032021-08-20 18:00:19 +0800178 erofs_err(inode->i_sb,
179 "unsupported chunk format %x of nid %llu",
180 vi->chunkformat, vi->nid);
181 err = -EOPNOTSUPP;
182 goto err_out;
183 }
184 vi->chunkbits = LOG_BLOCK_SIZE +
185 (vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
186 }
Gao Xiangd3938ee2020-11-01 03:51:02 +0800187 inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
188 inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
189 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
190 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
191
Gao Xiang06252e92021-08-05 08:36:00 +0800192 inode->i_flags &= ~S_DAX;
Gao Xiange6242462021-10-07 15:02:23 +0800193 if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
Gao Xiang06252e92021-08-05 08:36:00 +0800194 vi->datalayout == EROFS_INODE_FLAT_PLAIN)
195 inode->i_flags |= S_DAX;
Gao Xiangfe6d9872019-05-28 11:19:43 +0800196 if (!nblks)
197 /* measure inode.i_blocks as generic filesystems */
198 inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
199 else
200 inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
Gao Xiangc521e3a2022-01-02 12:00:14 +0800201 return kaddr;
Gao Xianga6b9b1d2019-08-14 18:37:03 +0800202
203bogusimode:
Gao Xiang4f761fa2019-09-04 10:09:09 +0800204 erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
205 inode->i_mode, vi->nid);
Gao Xiang0dcd3c92020-07-30 01:58:01 +0800206 err = -EFSCORRUPTED;
207err_out:
Gao Xianga6b9b1d2019-08-14 18:37:03 +0800208 DBG_BUGON(1);
Gao Xiang0dcd3c92020-07-30 01:58:01 +0800209 kfree(copied);
Gao Xiangc521e3a2022-01-02 12:00:14 +0800210 erofs_put_metabuf(buf);
Gao Xiang0dcd3c92020-07-30 01:58:01 +0800211 return ERR_PTR(err);
Gao Xiang431339b2018-07-26 20:21:48 +0800212}
213
Gao Xiangc521e3a2022-01-02 12:00:14 +0800214static int erofs_fill_symlink(struct inode *inode, void *kaddr,
Gao Xianga2c75c82019-09-04 10:08:59 +0800215 unsigned int m_pofs)
Gao Xiang431339b2018-07-26 20:21:48 +0800216{
Gao Xianga5876e22019-09-04 10:08:56 +0800217 struct erofs_inode *vi = EROFS_I(inode);
Gao Xianga2c75c82019-09-04 10:08:59 +0800218 char *lnk;
Gao Xiang431339b2018-07-26 20:21:48 +0800219
Gao Xianga2c75c82019-09-04 10:08:59 +0800220 /* if it cannot be handled with fast symlink scheme */
221 if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
Gao Xiangc521e3a2022-01-02 12:00:14 +0800222 inode->i_size >= EROFS_BLKSIZ) {
Gao Xianga2c75c82019-09-04 10:08:59 +0800223 inode->i_op = &erofs_symlink_iops;
Gao Xiang431339b2018-07-26 20:21:48 +0800224 return 0;
Gao Xiang431339b2018-07-26 20:21:48 +0800225 }
Gao Xianga2c75c82019-09-04 10:08:59 +0800226
Gao Xiange2c71e72019-09-04 10:09:06 +0800227 lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
Gao Xianga2c75c82019-09-04 10:08:59 +0800228 if (!lnk)
229 return -ENOMEM;
230
Gao Xiang0dcd3c92020-07-30 01:58:01 +0800231 m_pofs += vi->xattr_isize;
Gao Xiangc521e3a2022-01-02 12:00:14 +0800232 /* inline symlink data shouldn't cross block boundary */
233 if (m_pofs + inode->i_size > EROFS_BLKSIZ) {
Gao Xianga2c75c82019-09-04 10:08:59 +0800234 kfree(lnk);
Gao Xiang4f761fa2019-09-04 10:09:09 +0800235 erofs_err(inode->i_sb,
236 "inline data cross block boundary @ nid %llu",
237 vi->nid);
Gao Xianga2c75c82019-09-04 10:08:59 +0800238 DBG_BUGON(1);
239 return -EFSCORRUPTED;
240 }
Gao Xiangc521e3a2022-01-02 12:00:14 +0800241 memcpy(lnk, kaddr + m_pofs, inode->i_size);
Gao Xianga2c75c82019-09-04 10:08:59 +0800242 lnk[inode->i_size] = '\0';
243
244 inode->i_link = lnk;
245 inode->i_op = &erofs_fast_symlink_iops;
Yue Hu55457452019-06-27 17:46:15 +0800246 return 0;
Gao Xiang431339b2018-07-26 20:21:48 +0800247}
248
Gao Xiang99634bf2019-09-04 10:09:05 +0800249static int erofs_fill_inode(struct inode *inode, int isdir)
Gao Xiang431339b2018-07-26 20:21:48 +0800250{
Gao Xianga5876e22019-09-04 10:08:56 +0800251 struct erofs_inode *vi = EROFS_I(inode);
Gao Xiangc521e3a2022-01-02 12:00:14 +0800252 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
253 void *kaddr;
Thomas Weißschuh7dd68b12018-09-10 21:41:14 +0200254 unsigned int ofs;
Gao Xiang0dcd3c92020-07-30 01:58:01 +0800255 int err = 0;
Gao Xiang431339b2018-07-26 20:21:48 +0800256
Chao Yu13f06f42018-07-26 20:21:55 +0800257 trace_erofs_fill_inode(inode, isdir);
Gao Xiang431339b2018-07-26 20:21:48 +0800258
Gao Xiang0dcd3c92020-07-30 01:58:01 +0800259 /* read inode base data from disk */
Gao Xiangc521e3a2022-01-02 12:00:14 +0800260 kaddr = erofs_read_inode(&buf, inode, &ofs);
261 if (IS_ERR(kaddr))
262 return PTR_ERR(kaddr);
Gao Xiang431339b2018-07-26 20:21:48 +0800263
Gao Xiang84947eb2019-09-04 10:09:08 +0800264 /* setup the new inode */
265 switch (inode->i_mode & S_IFMT) {
266 case S_IFREG:
267 inode->i_op = &erofs_generic_iops;
Huang Jianana08e67a2021-08-05 08:35:59 +0800268 if (erofs_inode_is_data_compressed(vi->datalayout))
269 inode->i_fop = &generic_ro_fops;
270 else
271 inode->i_fop = &erofs_file_fops;
Gao Xiang84947eb2019-09-04 10:09:08 +0800272 break;
273 case S_IFDIR:
274 inode->i_op = &erofs_dir_iops;
275 inode->i_fop = &erofs_dir_fops;
276 break;
277 case S_IFLNK:
Gao Xiangc521e3a2022-01-02 12:00:14 +0800278 err = erofs_fill_symlink(inode, kaddr, ofs);
Gao Xiang84947eb2019-09-04 10:09:08 +0800279 if (err)
Gao Xiang431339b2018-07-26 20:21:48 +0800280 goto out_unlock;
Gao Xiang84947eb2019-09-04 10:09:08 +0800281 inode_nohighmem(inode);
282 break;
283 case S_IFCHR:
284 case S_IFBLK:
285 case S_IFIFO:
286 case S_IFSOCK:
287 inode->i_op = &erofs_generic_iops;
288 init_special_inode(inode, inode->i_mode, inode->i_rdev);
289 goto out_unlock;
290 default:
291 err = -EFSCORRUPTED;
292 goto out_unlock;
Gao Xiang431339b2018-07-26 20:21:48 +0800293 }
294
Gao Xiang84947eb2019-09-04 10:09:08 +0800295 if (erofs_inode_is_data_compressed(vi->datalayout)) {
296 err = z_erofs_fill_inode(inode);
297 goto out_unlock;
298 }
299 inode->i_mapping->a_ops = &erofs_raw_access_aops;
300
Gao Xiang431339b2018-07-26 20:21:48 +0800301out_unlock:
Gao Xiangc521e3a2022-01-02 12:00:14 +0800302 erofs_put_metabuf(&buf);
Gao Xiang431339b2018-07-26 20:21:48 +0800303 return err;
304}
305
Gao Xiang2abd7812018-10-09 22:07:13 +0800306/*
307 * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
308 * we should do more for 32-bit platform to find the right inode.
309 */
Gao Xiang2abd7812018-10-09 22:07:13 +0800310static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
311{
312 const erofs_nid_t nid = *(erofs_nid_t *)opaque;
313
Gao Xianga5876e22019-09-04 10:08:56 +0800314 return EROFS_I(inode)->nid == nid;
Gao Xiang2abd7812018-10-09 22:07:13 +0800315}
316
317static int erofs_iget_set_actor(struct inode *inode, void *opaque)
318{
319 const erofs_nid_t nid = *(erofs_nid_t *)opaque;
320
321 inode->i_ino = erofs_inode_hash(nid);
322 return 0;
323}
Gao Xiang2abd7812018-10-09 22:07:13 +0800324
325static inline struct inode *erofs_iget_locked(struct super_block *sb,
326 erofs_nid_t nid)
327{
328 const unsigned long hashval = erofs_inode_hash(nid);
329
Gao Xiang2abd7812018-10-09 22:07:13 +0800330 return iget5_locked(sb, hashval, erofs_ilookup_test_actor,
331 erofs_iget_set_actor, &nid);
Gao Xiang2abd7812018-10-09 22:07:13 +0800332}
333
Gao Xiang431339b2018-07-26 20:21:48 +0800334struct inode *erofs_iget(struct super_block *sb,
Julian Merida447a3622019-03-18 20:58:41 -0300335 erofs_nid_t nid,
336 bool isdir)
Gao Xiang431339b2018-07-26 20:21:48 +0800337{
Gao Xiang2abd7812018-10-09 22:07:13 +0800338 struct inode *inode = erofs_iget_locked(sb, nid);
Gao Xiang431339b2018-07-26 20:21:48 +0800339
Gao Xiang8d8a09b2019-08-30 00:38:27 +0800340 if (!inode)
Gao Xiang431339b2018-07-26 20:21:48 +0800341 return ERR_PTR(-ENOMEM);
342
343 if (inode->i_state & I_NEW) {
344 int err;
Gao Xianga5876e22019-09-04 10:08:56 +0800345 struct erofs_inode *vi = EROFS_I(inode);
Julio Bianco8af36472019-03-09 14:08:53 -0300346
Gao Xiang431339b2018-07-26 20:21:48 +0800347 vi->nid = nid;
348
Gao Xiang99634bf2019-09-04 10:09:05 +0800349 err = erofs_fill_inode(inode, isdir);
Gao Xiang8d8a09b2019-08-30 00:38:27 +0800350 if (!err)
Gao Xiang431339b2018-07-26 20:21:48 +0800351 unlock_new_inode(inode);
352 else {
353 iget_failed(inode);
354 inode = ERR_PTR(err);
355 }
356 }
357 return inode;
358}
359
Christian Brauner549c7292021-01-21 14:19:43 +0100360int erofs_getattr(struct user_namespace *mnt_userns, const struct path *path,
361 struct kstat *stat, u32 request_mask,
362 unsigned int query_flags)
Gao Xiang89f27ed2019-05-28 11:19:42 +0800363{
364 struct inode *const inode = d_inode(path->dentry);
Gao Xiang89f27ed2019-05-28 11:19:42 +0800365
Gao Xianga5876e22019-09-04 10:08:56 +0800366 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
Gao Xiang89f27ed2019-05-28 11:19:42 +0800367 stat->attributes |= STATX_ATTR_COMPRESSED;
368
369 stat->attributes |= STATX_ATTR_IMMUTABLE;
370 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
371 STATX_ATTR_IMMUTABLE);
372
Christian Brauner0d56a452021-01-21 14:19:30 +0100373 generic_fillattr(&init_user_ns, inode, stat);
Gao Xiang89f27ed2019-05-28 11:19:42 +0800374 return 0;
375}
376
Gao Xiang60939822019-01-14 19:40:24 +0800377const struct inode_operations erofs_generic_iops = {
Gao Xiang89f27ed2019-05-28 11:19:42 +0800378 .getattr = erofs_getattr,
Gao Xiangb17500a2018-07-26 20:21:52 +0800379 .listxattr = erofs_listxattr,
Gao Xiang516c115c2019-01-29 16:35:20 +0800380 .get_acl = erofs_get_acl,
Gao Xiangeadcd6b2021-08-13 13:29:31 +0800381 .fiemap = erofs_fiemap,
Gao Xiang60939822019-01-14 19:40:24 +0800382};
383
384const struct inode_operations erofs_symlink_iops = {
385 .get_link = page_get_link,
Gao Xiang89f27ed2019-05-28 11:19:42 +0800386 .getattr = erofs_getattr,
Gao Xiang60939822019-01-14 19:40:24 +0800387 .listxattr = erofs_listxattr,
Gao Xiang516c115c2019-01-29 16:35:20 +0800388 .get_acl = erofs_get_acl,
Gao Xiang60939822019-01-14 19:40:24 +0800389};
390
391const struct inode_operations erofs_fast_symlink_iops = {
392 .get_link = simple_get_link,
Gao Xiang89f27ed2019-05-28 11:19:42 +0800393 .getattr = erofs_getattr,
Gao Xiang60939822019-01-14 19:40:24 +0800394 .listxattr = erofs_listxattr,
Gao Xiang516c115c2019-01-29 16:35:20 +0800395 .get_acl = erofs_get_acl,
Gao Xiang60939822019-01-14 19:40:24 +0800396};