blob: 8e5cd9c916ff1e27208a9cca5511efeec8aaa012 [file] [log] [blame]
Chao Yu7c1a0002018-09-12 09:16:07 +08001// SPDX-License-Identifier: GPL-2.0
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002/*
Jaegeuk Kimaf48b852012-11-02 17:12:17 +09003 * fs/f2fs/xattr.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 *
8 * Portions of this code from linux/fs/ext2/xattr.c
9 *
10 * Copyright (C) 2001-2003 Andreas Gruenbacher <agruen@suse.de>
11 *
12 * Fix by Harrison Xing <harrison@mountainviewdata.com>.
13 * Extended attributes for symlinks and special files added per
14 * suggestion of Luka Renko <luka.renko@hermes.si>.
15 * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
16 * Red Hat Inc.
Jaegeuk Kimaf48b852012-11-02 17:12:17 +090017 */
18#include <linux/rwsem.h>
19#include <linux/f2fs_fs.h>
Jaegeuk Kim8ae8f162013-06-03 19:46:19 +090020#include <linux/security.h>
Christoph Hellwiga6dda0e2013-12-20 05:16:45 -080021#include <linux/posix_acl_xattr.h>
Jaegeuk Kimaf48b852012-11-02 17:12:17 +090022#include "f2fs.h"
23#include "xattr.h"
Chao Yu955ebcd2019-07-22 17:57:05 +080024#include "segment.h"
Jaegeuk Kimaf48b852012-11-02 17:12:17 +090025
Chao Yua9991502020-02-25 18:17:10 +080026static void *xattr_alloc(struct f2fs_sb_info *sbi, int size, bool *is_inline)
27{
28 if (likely(size == sbi->inline_xattr_slab_size)) {
29 *is_inline = true;
Chao Yu32410572021-08-09 08:24:48 +080030 return f2fs_kmem_cache_alloc(sbi->inline_xattr_slab,
31 GFP_F2FS_ZERO, false, sbi);
Chao Yua9991502020-02-25 18:17:10 +080032 }
33 *is_inline = false;
34 return f2fs_kzalloc(sbi, size, GFP_NOFS);
35}
36
37static void xattr_free(struct f2fs_sb_info *sbi, void *xattr_addr,
38 bool is_inline)
39{
40 if (is_inline)
41 kmem_cache_free(sbi->inline_xattr_slab, xattr_addr);
42 else
Chao Yuc8eb7022020-09-14 16:47:00 +080043 kfree(xattr_addr);
Chao Yua9991502020-02-25 18:17:10 +080044}
45
Andreas Gruenbacherd9a82a02015-10-04 19:18:51 +020046static int f2fs_xattr_generic_get(const struct xattr_handler *handler,
Al Virob2968212016-04-10 20:48:24 -040047 struct dentry *unused, struct inode *inode,
48 const char *name, void *buffer, size_t size)
Jaegeuk Kimaf48b852012-11-02 17:12:17 +090049{
Al Virob2968212016-04-10 20:48:24 -040050 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Jaegeuk Kimaf48b852012-11-02 17:12:17 +090051
Andreas Gruenbacherd9a82a02015-10-04 19:18:51 +020052 switch (handler->flags) {
Jaegeuk Kimaf48b852012-11-02 17:12:17 +090053 case F2FS_XATTR_INDEX_USER:
54 if (!test_opt(sbi, XATTR_USER))
55 return -EOPNOTSUPP;
56 break;
57 case F2FS_XATTR_INDEX_TRUSTED:
Jaegeuk Kim8ae8f162013-06-03 19:46:19 +090058 case F2FS_XATTR_INDEX_SECURITY:
59 break;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +090060 default:
61 return -EINVAL;
62 }
Al Virob2968212016-04-10 20:48:24 -040063 return f2fs_getxattr(inode, handler->flags, name,
Andreas Gruenbacherd9a82a02015-10-04 19:18:51 +020064 buffer, size, NULL);
Jaegeuk Kimaf48b852012-11-02 17:12:17 +090065}
66
Andreas Gruenbacherd9a82a02015-10-04 19:18:51 +020067static int f2fs_xattr_generic_set(const struct xattr_handler *handler,
Christian Braunere65ce2a2021-01-21 14:19:27 +010068 struct user_namespace *mnt_userns,
Al Viro59301222016-05-27 10:19:30 -040069 struct dentry *unused, struct inode *inode,
70 const char *name, const void *value,
Andreas Gruenbacherd9a82a02015-10-04 19:18:51 +020071 size_t size, int flags)
Jaegeuk Kimaf48b852012-11-02 17:12:17 +090072{
Al Viro59301222016-05-27 10:19:30 -040073 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Jaegeuk Kimaf48b852012-11-02 17:12:17 +090074
Andreas Gruenbacherd9a82a02015-10-04 19:18:51 +020075 switch (handler->flags) {
Jaegeuk Kimaf48b852012-11-02 17:12:17 +090076 case F2FS_XATTR_INDEX_USER:
77 if (!test_opt(sbi, XATTR_USER))
78 return -EOPNOTSUPP;
79 break;
80 case F2FS_XATTR_INDEX_TRUSTED:
Jaegeuk Kim8ae8f162013-06-03 19:46:19 +090081 case F2FS_XATTR_INDEX_SECURITY:
82 break;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +090083 default:
84 return -EINVAL;
85 }
Al Viro59301222016-05-27 10:19:30 -040086 return f2fs_setxattr(inode, handler->flags, name,
Jaegeuk Kimc02745e2014-04-23 12:23:14 +090087 value, size, NULL, flags);
Jaegeuk Kimaf48b852012-11-02 17:12:17 +090088}
89
Andreas Gruenbacher764a5c62015-12-02 14:44:43 +010090static bool f2fs_xattr_user_list(struct dentry *dentry)
Jaegeuk Kim573ea5f2012-11-30 17:32:08 +090091{
Andreas Gruenbacher764a5c62015-12-02 14:44:43 +010092 struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
Jaegeuk Kim573ea5f2012-11-30 17:32:08 +090093
Andreas Gruenbacher764a5c62015-12-02 14:44:43 +010094 return test_opt(sbi, XATTR_USER);
95}
96
97static bool f2fs_xattr_trusted_list(struct dentry *dentry)
98{
99 return capable(CAP_SYS_ADMIN);
Jaegeuk Kim573ea5f2012-11-30 17:32:08 +0900100}
101
Andreas Gruenbacherd9a82a02015-10-04 19:18:51 +0200102static int f2fs_xattr_advise_get(const struct xattr_handler *handler,
Al Virob2968212016-04-10 20:48:24 -0400103 struct dentry *unused, struct inode *inode,
104 const char *name, void *buffer, size_t size)
Jaegeuk Kim573ea5f2012-11-30 17:32:08 +0900105{
Chao Yu84e97c22015-03-23 10:36:15 +0800106 if (buffer)
107 *((char *)buffer) = F2FS_I(inode)->i_advise;
Jaegeuk Kim573ea5f2012-11-30 17:32:08 +0900108 return sizeof(char);
109}
110
Andreas Gruenbacherd9a82a02015-10-04 19:18:51 +0200111static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
Christian Braunere65ce2a2021-01-21 14:19:27 +0100112 struct user_namespace *mnt_userns,
Al Viro59301222016-05-27 10:19:30 -0400113 struct dentry *unused, struct inode *inode,
114 const char *name, const void *value,
Andreas Gruenbacherd9a82a02015-10-04 19:18:51 +0200115 size_t size, int flags)
Jaegeuk Kim573ea5f2012-11-30 17:32:08 +0900116{
Chao Yu797c1cb2018-07-19 23:57:54 +0800117 unsigned char old_advise = F2FS_I(inode)->i_advise;
118 unsigned char new_advise;
119
Christian Brauner21cb47b2021-01-21 14:19:25 +0100120 if (!inode_owner_or_capable(&init_user_ns, inode))
Jaegeuk Kim573ea5f2012-11-30 17:32:08 +0900121 return -EPERM;
122 if (value == NULL)
123 return -EINVAL;
124
Chao Yu797c1cb2018-07-19 23:57:54 +0800125 new_advise = *(char *)value;
126 if (new_advise & ~FADVISE_MODIFIABLE_BITS)
127 return -EINVAL;
128
129 new_advise = new_advise & FADVISE_MODIFIABLE_BITS;
130 new_advise |= old_advise & ~FADVISE_MODIFIABLE_BITS;
131
132 F2FS_I(inode)->i_advise = new_advise;
Jaegeuk Kim7c457292016-10-14 11:51:23 -0700133 f2fs_mark_inode_dirty_sync(inode, true);
Jaegeuk Kim573ea5f2012-11-30 17:32:08 +0900134 return 0;
135}
136
Jaegeuk Kim8ae8f162013-06-03 19:46:19 +0900137#ifdef CONFIG_F2FS_FS_SECURITY
138static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
139 void *page)
140{
141 const struct xattr *xattr;
142 int err = 0;
143
144 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
Jaegeuk Kimd631abd2014-06-01 23:24:30 +0900145 err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY,
Jaegeuk Kim8ae8f162013-06-03 19:46:19 +0900146 xattr->name, xattr->value,
Jaegeuk Kimc02745e2014-04-23 12:23:14 +0900147 xattr->value_len, (struct page *)page, 0);
Jaegeuk Kim8ae8f162013-06-03 19:46:19 +0900148 if (err < 0)
149 break;
150 }
151 return err;
152}
153
154int f2fs_init_security(struct inode *inode, struct inode *dir,
155 const struct qstr *qstr, struct page *ipage)
156{
157 return security_inode_init_security(inode, dir, qstr,
158 &f2fs_initxattrs, ipage);
159}
160#endif
161
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900162const struct xattr_handler f2fs_xattr_user_handler = {
163 .prefix = XATTR_USER_PREFIX,
164 .flags = F2FS_XATTR_INDEX_USER,
Andreas Gruenbacher764a5c62015-12-02 14:44:43 +0100165 .list = f2fs_xattr_user_list,
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900166 .get = f2fs_xattr_generic_get,
167 .set = f2fs_xattr_generic_set,
168};
169
170const struct xattr_handler f2fs_xattr_trusted_handler = {
171 .prefix = XATTR_TRUSTED_PREFIX,
172 .flags = F2FS_XATTR_INDEX_TRUSTED,
Andreas Gruenbacher764a5c62015-12-02 14:44:43 +0100173 .list = f2fs_xattr_trusted_list,
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900174 .get = f2fs_xattr_generic_get,
175 .set = f2fs_xattr_generic_set,
176};
177
Jaegeuk Kim573ea5f2012-11-30 17:32:08 +0900178const struct xattr_handler f2fs_xattr_advise_handler = {
Andreas Gruenbacher98e9cb52015-12-02 14:44:36 +0100179 .name = F2FS_SYSTEM_ADVISE_NAME,
Jaegeuk Kim573ea5f2012-11-30 17:32:08 +0900180 .flags = F2FS_XATTR_INDEX_ADVISE,
Jack Qiua87aff12020-07-24 16:55:28 +0800181 .get = f2fs_xattr_advise_get,
182 .set = f2fs_xattr_advise_set,
Jaegeuk Kim573ea5f2012-11-30 17:32:08 +0900183};
184
Jaegeuk Kim8ae8f162013-06-03 19:46:19 +0900185const struct xattr_handler f2fs_xattr_security_handler = {
186 .prefix = XATTR_SECURITY_PREFIX,
187 .flags = F2FS_XATTR_INDEX_SECURITY,
Jaegeuk Kim8ae8f162013-06-03 19:46:19 +0900188 .get = f2fs_xattr_generic_get,
189 .set = f2fs_xattr_generic_set,
190};
191
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900192static const struct xattr_handler *f2fs_xattr_handler_map[] = {
193 [F2FS_XATTR_INDEX_USER] = &f2fs_xattr_user_handler,
194#ifdef CONFIG_F2FS_FS_POSIX_ACL
Christoph Hellwiga6dda0e2013-12-20 05:16:45 -0800195 [F2FS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
196 [F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900197#endif
198 [F2FS_XATTR_INDEX_TRUSTED] = &f2fs_xattr_trusted_handler,
Jaegeuk Kim8ae8f162013-06-03 19:46:19 +0900199#ifdef CONFIG_F2FS_FS_SECURITY
200 [F2FS_XATTR_INDEX_SECURITY] = &f2fs_xattr_security_handler,
201#endif
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900202 [F2FS_XATTR_INDEX_ADVISE] = &f2fs_xattr_advise_handler,
203};
204
205const struct xattr_handler *f2fs_xattr_handlers[] = {
206 &f2fs_xattr_user_handler,
207#ifdef CONFIG_F2FS_FS_POSIX_ACL
Christoph Hellwiga6dda0e2013-12-20 05:16:45 -0800208 &posix_acl_access_xattr_handler,
209 &posix_acl_default_xattr_handler,
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900210#endif
211 &f2fs_xattr_trusted_handler,
Jaegeuk Kim8ae8f162013-06-03 19:46:19 +0900212#ifdef CONFIG_F2FS_FS_SECURITY
213 &f2fs_xattr_security_handler,
214#endif
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900215 &f2fs_xattr_advise_handler,
216 NULL,
217};
218
Jaegeuk Kime1123262014-04-23 12:17:25 +0900219static inline const struct xattr_handler *f2fs_xattr_handler(int index)
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900220{
221 const struct xattr_handler *handler = NULL;
222
Jaegeuk Kime1123262014-04-23 12:17:25 +0900223 if (index > 0 && index < ARRAY_SIZE(f2fs_xattr_handler_map))
224 handler = f2fs_xattr_handler_map[index];
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900225 return handler;
226}
227
Randall Huang2777e652019-04-11 16:26:46 +0800228static struct f2fs_xattr_entry *__find_xattr(void *base_addr,
Chao Yudd9d4a32021-12-12 17:16:56 +0800229 void *last_base_addr, void **last_addr,
230 int index, size_t len, const char *name)
Jaegeuk Kimdd9cfe22013-08-13 10:13:55 +0900231{
232 struct f2fs_xattr_entry *entry;
233
234 list_for_each_xattr(entry, base_addr) {
Randall Huang2777e652019-04-11 16:26:46 +0800235 if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
Chao Yudd9d4a32021-12-12 17:16:56 +0800236 (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
237 if (last_addr)
238 *last_addr = entry;
Randall Huang2777e652019-04-11 16:26:46 +0800239 return NULL;
Chao Yudd9d4a32021-12-12 17:16:56 +0800240 }
Randall Huang2777e652019-04-11 16:26:46 +0800241
Jaegeuk Kime1123262014-04-23 12:17:25 +0900242 if (entry->e_name_index != index)
Jaegeuk Kimdd9cfe22013-08-13 10:13:55 +0900243 continue;
Jaegeuk Kime1123262014-04-23 12:17:25 +0900244 if (entry->e_name_len != len)
Jaegeuk Kimdd9cfe22013-08-13 10:13:55 +0900245 continue;
Jaegeuk Kime1123262014-04-23 12:17:25 +0900246 if (!memcmp(entry->e_name, name, len))
Jaegeuk Kimdd9cfe22013-08-13 10:13:55 +0900247 break;
248 }
249 return entry;
250}
251
Chao Yu6afc6622017-09-06 21:59:50 +0800252static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode,
253 void *base_addr, void **last_addr, int index,
254 size_t len, const char *name)
Chao Yuba38c272017-01-24 20:39:51 +0800255{
256 struct f2fs_xattr_entry *entry;
Chao Yu6afc6622017-09-06 21:59:50 +0800257 unsigned int inline_size = inline_xattr_size(inode);
Chao Yu2c28aba2019-03-05 19:32:26 +0800258 void *max_addr = base_addr + inline_size;
Chao Yuba38c272017-01-24 20:39:51 +0800259
Chao Yudd9d4a32021-12-12 17:16:56 +0800260 entry = __find_xattr(base_addr, max_addr, last_addr, index, len, name);
261 if (!entry)
262 return NULL;
Chao Yu2c28aba2019-03-05 19:32:26 +0800263
264 /* inline xattr header or entry across max inline xattr size */
265 if (IS_XATTR_LAST_ENTRY(entry) &&
266 (void *)entry + sizeof(__u32) > max_addr) {
267 *last_addr = entry;
268 return NULL;
269 }
Chao Yuba38c272017-01-24 20:39:51 +0800270 return entry;
271}
272
Chao Yua5f433f2017-09-04 18:58:02 +0800273static int read_inline_xattr(struct inode *inode, struct page *ipage,
274 void *txattr_addr)
275{
276 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
277 unsigned int inline_size = inline_xattr_size(inode);
278 struct page *page = NULL;
279 void *inline_addr;
280
281 if (ipage) {
Chao Yu6afc6622017-09-06 21:59:50 +0800282 inline_addr = inline_xattr_addr(inode, ipage);
Chao Yua5f433f2017-09-04 18:58:02 +0800283 } else {
Chao Yu4d57b862018-05-30 00:20:41 +0800284 page = f2fs_get_node_page(sbi, inode->i_ino);
Chao Yua5f433f2017-09-04 18:58:02 +0800285 if (IS_ERR(page))
286 return PTR_ERR(page);
287
Chao Yu6afc6622017-09-06 21:59:50 +0800288 inline_addr = inline_xattr_addr(inode, page);
Chao Yua5f433f2017-09-04 18:58:02 +0800289 }
290 memcpy(txattr_addr, inline_addr, inline_size);
291 f2fs_put_page(page, 1);
292
293 return 0;
294}
295
Chao Yu63840692017-09-04 18:58:03 +0800296static int read_xattr_block(struct inode *inode, void *txattr_addr)
297{
298 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
299 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
300 unsigned int inline_size = inline_xattr_size(inode);
301 struct page *xpage;
302 void *xattr_addr;
303
304 /* The inode already has an extended attribute block. */
Chao Yu4d57b862018-05-30 00:20:41 +0800305 xpage = f2fs_get_node_page(sbi, xnid);
Chao Yu63840692017-09-04 18:58:03 +0800306 if (IS_ERR(xpage))
307 return PTR_ERR(xpage);
308
309 xattr_addr = page_address(xpage);
310 memcpy(txattr_addr + inline_size, xattr_addr, VALID_XATTR_BLOCK_SIZE);
311 f2fs_put_page(xpage, 1);
312
313 return 0;
314}
315
Chao Yuba38c272017-01-24 20:39:51 +0800316static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
317 unsigned int index, unsigned int len,
318 const char *name, struct f2fs_xattr_entry **xe,
Chao Yua9991502020-02-25 18:17:10 +0800319 void **base_addr, int *base_size,
320 bool *is_inline)
Chao Yuba38c272017-01-24 20:39:51 +0800321{
Randall Huang2777e652019-04-11 16:26:46 +0800322 void *cur_addr, *txattr_addr, *last_txattr_addr;
323 void *last_addr = NULL;
Chao Yuba38c272017-01-24 20:39:51 +0800324 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
Chao Yu89e9eab2017-03-23 13:38:25 +0800325 unsigned int inline_size = inline_xattr_size(inode);
Liu Song2e0cd472021-01-31 20:26:05 +0800326 int err;
Chao Yuba38c272017-01-24 20:39:51 +0800327
Randall Huang2777e652019-04-11 16:26:46 +0800328 if (!xnid && !inline_size)
Chao Yuba38c272017-01-24 20:39:51 +0800329 return -ENODATA;
330
Chao Yuba3b5832020-02-14 17:44:11 +0800331 *base_size = XATTR_SIZE(inode) + XATTR_PADDING_SIZE;
Chao Yua9991502020-02-25 18:17:10 +0800332 txattr_addr = xattr_alloc(F2FS_I_SB(inode), *base_size, is_inline);
Chao Yuba38c272017-01-24 20:39:51 +0800333 if (!txattr_addr)
334 return -ENOMEM;
335
Chao Yuba3b5832020-02-14 17:44:11 +0800336 last_txattr_addr = (void *)txattr_addr + XATTR_SIZE(inode);
Randall Huang2777e652019-04-11 16:26:46 +0800337
Chao Yuba38c272017-01-24 20:39:51 +0800338 /* read from inline xattr */
339 if (inline_size) {
Chao Yua5f433f2017-09-04 18:58:02 +0800340 err = read_inline_xattr(inode, ipage, txattr_addr);
341 if (err)
342 goto out;
Chao Yuba38c272017-01-24 20:39:51 +0800343
Chao Yu6afc6622017-09-06 21:59:50 +0800344 *xe = __find_inline_xattr(inode, txattr_addr, &last_addr,
Chao Yuba38c272017-01-24 20:39:51 +0800345 index, len, name);
Jaegeuk Kim64beba02018-12-26 19:54:07 -0800346 if (*xe) {
347 *base_size = inline_size;
Chao Yuba38c272017-01-24 20:39:51 +0800348 goto check;
Jaegeuk Kim64beba02018-12-26 19:54:07 -0800349 }
Chao Yuba38c272017-01-24 20:39:51 +0800350 }
351
352 /* read from xattr node block */
353 if (xnid) {
Chao Yu63840692017-09-04 18:58:03 +0800354 err = read_xattr_block(inode, txattr_addr);
355 if (err)
Chao Yuba38c272017-01-24 20:39:51 +0800356 goto out;
Chao Yuba38c272017-01-24 20:39:51 +0800357 }
358
359 if (last_addr)
360 cur_addr = XATTR_HDR(last_addr) - 1;
361 else
362 cur_addr = txattr_addr;
363
Chao Yudd9d4a32021-12-12 17:16:56 +0800364 *xe = __find_xattr(cur_addr, last_txattr_addr, NULL, index, len, name);
Randall Huang2777e652019-04-11 16:26:46 +0800365 if (!*xe) {
Chao Yuc83414a2019-06-20 11:36:15 +0800366 f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
367 inode->i_ino);
368 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
Chao Yu10f966b2019-06-20 11:36:14 +0800369 err = -EFSCORRUPTED;
Randall Huang2777e652019-04-11 16:26:46 +0800370 goto out;
371 }
Chao Yuba38c272017-01-24 20:39:51 +0800372check:
373 if (IS_XATTR_LAST_ENTRY(*xe)) {
374 err = -ENODATA;
375 goto out;
376 }
377
378 *base_addr = txattr_addr;
379 return 0;
380out:
Chao Yua9991502020-02-25 18:17:10 +0800381 xattr_free(F2FS_I_SB(inode), txattr_addr, *is_inline);
Chao Yuba38c272017-01-24 20:39:51 +0800382 return err;
383}
384
Chao Yu86696962016-09-18 23:30:04 +0800385static int read_all_xattrs(struct inode *inode, struct page *ipage,
386 void **base_addr)
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900387{
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900388 struct f2fs_xattr_header *header;
Chao Yu89e9eab2017-03-23 13:38:25 +0800389 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
390 unsigned int size = VALID_XATTR_BLOCK_SIZE;
391 unsigned int inline_size = inline_xattr_size(inode);
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900392 void *txattr_addr;
Chao Yu86696962016-09-18 23:30:04 +0800393 int err;
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900394
Chao Yuacbf0542017-11-30 19:28:17 +0800395 txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode),
396 inline_size + size + XATTR_PADDING_SIZE, GFP_NOFS);
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900397 if (!txattr_addr)
Chao Yu86696962016-09-18 23:30:04 +0800398 return -ENOMEM;
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900399
400 /* read from inline xattr */
401 if (inline_size) {
Chao Yua5f433f2017-09-04 18:58:02 +0800402 err = read_inline_xattr(inode, ipage, txattr_addr);
403 if (err)
404 goto fail;
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900405 }
406
407 /* read from xattr node block */
Chao Yu89e9eab2017-03-23 13:38:25 +0800408 if (xnid) {
Chao Yu63840692017-09-04 18:58:03 +0800409 err = read_xattr_block(inode, txattr_addr);
410 if (err)
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900411 goto fail;
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900412 }
413
414 header = XATTR_HDR(txattr_addr);
415
416 /* never been allocated xattrs */
417 if (le32_to_cpu(header->h_magic) != F2FS_XATTR_MAGIC) {
418 header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC);
419 header->h_refcount = cpu_to_le32(1);
420 }
Chao Yu86696962016-09-18 23:30:04 +0800421 *base_addr = txattr_addr;
422 return 0;
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900423fail:
Chao Yuc8eb7022020-09-14 16:47:00 +0800424 kfree(txattr_addr);
Chao Yu86696962016-09-18 23:30:04 +0800425 return err;
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900426}
427
428static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
429 void *txattr_addr, struct page *ipage)
430{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700431 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yu89e9eab2017-03-23 13:38:25 +0800432 size_t inline_size = inline_xattr_size(inode);
Jaegeuk Kimbf9c1422017-10-16 15:05:16 -0700433 struct page *in_page = NULL;
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900434 void *xattr_addr;
Jaegeuk Kimbf9c1422017-10-16 15:05:16 -0700435 void *inline_addr = NULL;
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900436 struct page *xpage;
437 nid_t new_nid = 0;
Jaegeuk Kimbf9c1422017-10-16 15:05:16 -0700438 int err = 0;
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900439
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900440 if (hsize > inline_size && !F2FS_I(inode)->i_xattr_nid)
Chao Yu4d57b862018-05-30 00:20:41 +0800441 if (!f2fs_alloc_nid(sbi, &new_nid))
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900442 return -ENOSPC;
443
444 /* write to inline xattr */
445 if (inline_size) {
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900446 if (ipage) {
Chao Yu6afc6622017-09-06 21:59:50 +0800447 inline_addr = inline_xattr_addr(inode, ipage);
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900448 } else {
Chao Yu4d57b862018-05-30 00:20:41 +0800449 in_page = f2fs_get_node_page(sbi, inode->i_ino);
Jaegeuk Kimbf9c1422017-10-16 15:05:16 -0700450 if (IS_ERR(in_page)) {
Chao Yu4d57b862018-05-30 00:20:41 +0800451 f2fs_alloc_nid_failed(sbi, new_nid);
Jaegeuk Kimbf9c1422017-10-16 15:05:16 -0700452 return PTR_ERR(in_page);
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900453 }
Jaegeuk Kimbf9c1422017-10-16 15:05:16 -0700454 inline_addr = inline_xattr_addr(inode, in_page);
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900455 }
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900456
Jaegeuk Kimbf9c1422017-10-16 15:05:16 -0700457 f2fs_wait_on_page_writeback(ipage ? ipage : in_page,
Chao Yubae0ee72018-12-25 17:43:42 +0800458 NODE, true, true);
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900459 /* no need to use xattr node block */
460 if (hsize <= inline_size) {
Chao Yu4d57b862018-05-30 00:20:41 +0800461 err = f2fs_truncate_xattr_node(inode);
462 f2fs_alloc_nid_failed(sbi, new_nid);
Jaegeuk Kimbf9c1422017-10-16 15:05:16 -0700463 if (err) {
464 f2fs_put_page(in_page, 1);
465 return err;
466 }
467 memcpy(inline_addr, txattr_addr, inline_size);
468 set_page_dirty(ipage ? ipage : in_page);
469 goto in_page_out;
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900470 }
471 }
472
473 /* write to xattr node block */
474 if (F2FS_I(inode)->i_xattr_nid) {
Chao Yu4d57b862018-05-30 00:20:41 +0800475 xpage = f2fs_get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900476 if (IS_ERR(xpage)) {
Jaegeuk Kimd6204392017-12-28 17:47:19 -0800477 err = PTR_ERR(xpage);
Chao Yu4d57b862018-05-30 00:20:41 +0800478 f2fs_alloc_nid_failed(sbi, new_nid);
Jaegeuk Kimbf9c1422017-10-16 15:05:16 -0700479 goto in_page_out;
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900480 }
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700481 f2fs_bug_on(sbi, new_nid);
Chao Yubae0ee72018-12-25 17:43:42 +0800482 f2fs_wait_on_page_writeback(xpage, NODE, true, true);
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900483 } else {
484 struct dnode_of_data dn;
Yi Zhuang5f029c02021-04-06 09:47:35 +0800485
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900486 set_new_dnode(&dn, inode, NULL, NULL, new_nid);
Chao Yu4d57b862018-05-30 00:20:41 +0800487 xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900488 if (IS_ERR(xpage)) {
Jaegeuk Kimd6204392017-12-28 17:47:19 -0800489 err = PTR_ERR(xpage);
Chao Yu4d57b862018-05-30 00:20:41 +0800490 f2fs_alloc_nid_failed(sbi, new_nid);
Jaegeuk Kimbf9c1422017-10-16 15:05:16 -0700491 goto in_page_out;
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900492 }
Chao Yu4d57b862018-05-30 00:20:41 +0800493 f2fs_alloc_nid_done(sbi, new_nid);
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900494 }
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900495 xattr_addr = page_address(xpage);
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900496
Jaegeuk Kimbf9c1422017-10-16 15:05:16 -0700497 if (inline_size)
498 memcpy(inline_addr, txattr_addr, inline_size);
499 memcpy(xattr_addr, txattr_addr + inline_size, VALID_XATTR_BLOCK_SIZE);
500
501 if (inline_size)
502 set_page_dirty(ipage ? ipage : in_page);
503 set_page_dirty(xpage);
504
505 f2fs_put_page(xpage, 1);
506in_page_out:
507 f2fs_put_page(in_page, 1);
508 return err;
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900509}
510
Jaegeuk Kime1123262014-04-23 12:17:25 +0900511int f2fs_getxattr(struct inode *inode, int index, const char *name,
Jaegeuk Kimbce8d112014-10-13 19:42:53 -0700512 void *buffer, size_t buffer_size, struct page *ipage)
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900513{
Chao Yuba38c272017-01-24 20:39:51 +0800514 struct f2fs_xattr_entry *entry = NULL;
Liu Song2e0cd472021-01-31 20:26:05 +0800515 int error;
Chao Yuba38c272017-01-24 20:39:51 +0800516 unsigned int size, len;
Chao Yuba38c272017-01-24 20:39:51 +0800517 void *base_addr = NULL;
Jaegeuk Kim64beba02018-12-26 19:54:07 -0800518 int base_size;
Chao Yua9991502020-02-25 18:17:10 +0800519 bool is_inline;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900520
521 if (name == NULL)
522 return -EINVAL;
Jaegeuk Kime1123262014-04-23 12:17:25 +0900523
524 len = strlen(name);
525 if (len > F2FS_NAME_LEN)
Chao Yu6e452d62014-03-22 14:59:45 +0800526 return -ERANGE;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900527
Yunlei He27161f12017-09-07 10:40:54 +0800528 down_read(&F2FS_I(inode)->i_xattr_sem);
Chao Yuba38c272017-01-24 20:39:51 +0800529 error = lookup_all_xattrs(inode, ipage, index, len, name,
Chao Yua9991502020-02-25 18:17:10 +0800530 &entry, &base_addr, &base_size, &is_inline);
Yunlei He27161f12017-09-07 10:40:54 +0800531 up_read(&F2FS_I(inode)->i_xattr_sem);
Chao Yu86696962016-09-18 23:30:04 +0800532 if (error)
533 return error;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900534
Jaegeuk Kime1123262014-04-23 12:17:25 +0900535 size = le16_to_cpu(entry->e_value_size);
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900536
Jaegeuk Kime1123262014-04-23 12:17:25 +0900537 if (buffer && size > buffer_size) {
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900538 error = -ERANGE;
Chao Yuba38c272017-01-24 20:39:51 +0800539 goto out;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900540 }
541
542 if (buffer) {
543 char *pval = entry->e_name + entry->e_name_len;
Jaegeuk Kim64beba02018-12-26 19:54:07 -0800544
545 if (base_size - (pval - (char *)base_addr) < size) {
546 error = -ERANGE;
547 goto out;
548 }
Jaegeuk Kime1123262014-04-23 12:17:25 +0900549 memcpy(buffer, pval, size);
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900550 }
Jaegeuk Kime1123262014-04-23 12:17:25 +0900551 error = size;
Chao Yuba38c272017-01-24 20:39:51 +0800552out:
Chao Yua9991502020-02-25 18:17:10 +0800553 xattr_free(F2FS_I_SB(inode), base_addr, is_inline);
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900554 return error;
555}
556
557ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
558{
David Howells2b0143b2015-03-17 22:25:59 +0000559 struct inode *inode = d_inode(dentry);
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900560 struct f2fs_xattr_entry *entry;
Randall Huang688078e2019-10-18 14:56:22 +0800561 void *base_addr, *last_base_addr;
Liu Song2e0cd472021-01-31 20:26:05 +0800562 int error;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900563 size_t rest = buffer_size;
564
Yunlei He27161f12017-09-07 10:40:54 +0800565 down_read(&F2FS_I(inode)->i_xattr_sem);
Chao Yu86696962016-09-18 23:30:04 +0800566 error = read_all_xattrs(inode, NULL, &base_addr);
Yunlei He27161f12017-09-07 10:40:54 +0800567 up_read(&F2FS_I(inode)->i_xattr_sem);
Chao Yu86696962016-09-18 23:30:04 +0800568 if (error)
569 return error;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900570
Chao Yuba3b5832020-02-14 17:44:11 +0800571 last_base_addr = (void *)base_addr + XATTR_SIZE(inode);
Randall Huang688078e2019-10-18 14:56:22 +0800572
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900573 list_for_each_xattr(entry, base_addr) {
574 const struct xattr_handler *handler =
575 f2fs_xattr_handler(entry->e_name_index);
Andreas Gruenbacher764a5c62015-12-02 14:44:43 +0100576 const char *prefix;
577 size_t prefix_len;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900578 size_t size;
579
Randall Huang688078e2019-10-18 14:56:22 +0800580 if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
581 (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
582 f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
583 inode->i_ino);
584 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
585 error = -EFSCORRUPTED;
586 goto cleanup;
587 }
588
Andreas Gruenbacher764a5c62015-12-02 14:44:43 +0100589 if (!handler || (handler->list && !handler->list(dentry)))
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900590 continue;
591
Gao Xiangeecfa422019-01-25 20:11:39 +0800592 prefix = xattr_prefix(handler);
Andreas Gruenbacher764a5c62015-12-02 14:44:43 +0100593 prefix_len = strlen(prefix);
594 size = prefix_len + entry->e_name_len + 1;
595 if (buffer) {
596 if (size > rest) {
597 error = -ERANGE;
598 goto cleanup;
599 }
600 memcpy(buffer, prefix, prefix_len);
601 buffer += prefix_len;
602 memcpy(buffer, entry->e_name, entry->e_name_len);
603 buffer += entry->e_name_len;
604 *buffer++ = 0;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900605 }
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900606 rest -= size;
607 }
608 error = buffer_size - rest;
609cleanup:
Chao Yuc8eb7022020-09-14 16:47:00 +0800610 kfree(base_addr);
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900611 return error;
612}
613
Kinglong Mee5f35a2c2017-02-25 19:23:27 +0800614static bool f2fs_xattr_value_same(struct f2fs_xattr_entry *entry,
615 const void *value, size_t size)
616{
617 void *pval = entry->e_name + entry->e_name_len;
Jaegeuk Kimb71dead2017-03-10 09:36:10 -0800618
619 return (le16_to_cpu(entry->e_value_size) == size) &&
620 !memcmp(pval, value, size);
Kinglong Mee5f35a2c2017-02-25 19:23:27 +0800621}
622
Jaegeuk Kime1123262014-04-23 12:17:25 +0900623static int __f2fs_setxattr(struct inode *inode, int index,
624 const char *name, const void *value, size_t size,
Jaegeuk Kimc02745e2014-04-23 12:23:14 +0900625 struct page *ipage, int flags)
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900626{
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900627 struct f2fs_xattr_entry *here, *last;
Randall Huang2777e652019-04-11 16:26:46 +0800628 void *base_addr, *last_base_addr;
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900629 int found, newsize;
Jaegeuk Kime1123262014-04-23 12:17:25 +0900630 size_t len;
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900631 __u32 new_hsize;
Liu Song2e0cd472021-01-31 20:26:05 +0800632 int error;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900633
634 if (name == NULL)
635 return -EINVAL;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900636
637 if (value == NULL)
Jaegeuk Kime1123262014-04-23 12:17:25 +0900638 size = 0;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900639
Jaegeuk Kime1123262014-04-23 12:17:25 +0900640 len = strlen(name);
Namjae Jeon7c909772013-03-17 17:26:39 +0900641
Chao Yu037fe702015-07-13 17:45:19 +0800642 if (len > F2FS_NAME_LEN)
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900643 return -ERANGE;
644
Chao Yu037fe702015-07-13 17:45:19 +0800645 if (size > MAX_VALUE_LEN(inode))
646 return -E2BIG;
647
Chao Yu86696962016-09-18 23:30:04 +0800648 error = read_all_xattrs(inode, ipage, &base_addr);
649 if (error)
650 return error;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900651
Chao Yuba3b5832020-02-14 17:44:11 +0800652 last_base_addr = (void *)base_addr + XATTR_SIZE(inode);
Randall Huang2777e652019-04-11 16:26:46 +0800653
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900654 /* find entry with wanted name. */
Chao Yudd9d4a32021-12-12 17:16:56 +0800655 here = __find_xattr(base_addr, last_base_addr, NULL, index, len, name);
Randall Huang2777e652019-04-11 16:26:46 +0800656 if (!here) {
Chao Yuc83414a2019-06-20 11:36:15 +0800657 f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
658 inode->i_ino);
659 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
Chao Yu10f966b2019-06-20 11:36:14 +0800660 error = -EFSCORRUPTED;
Randall Huang2777e652019-04-11 16:26:46 +0800661 goto exit;
662 }
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900663
Jaegeuk Kimdd9cfe22013-08-13 10:13:55 +0900664 found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900665
Kinglong Mee5f35a2c2017-02-25 19:23:27 +0800666 if (found) {
667 if ((flags & XATTR_CREATE)) {
668 error = -EEXIST;
669 goto exit;
670 }
671
Daeho Jeongb2c46922018-01-20 15:46:33 +0800672 if (value && f2fs_xattr_value_same(here, value, size))
Chao Yu17232e832020-12-25 16:52:27 +0800673 goto same;
Kinglong Mee5f35a2c2017-02-25 19:23:27 +0800674 } else if ((flags & XATTR_REPLACE)) {
Jaegeuk Kim916decb2014-04-23 12:28:18 +0900675 error = -ENODATA;
676 goto exit;
Jaegeuk Kim916decb2014-04-23 12:28:18 +0900677 }
678
679 last = here;
Chao Yu645a3c42021-12-12 17:16:30 +0800680 while (!IS_XATTR_LAST_ENTRY(last)) {
681 if ((void *)(last) + sizeof(__u32) > last_base_addr ||
682 (void *)XATTR_NEXT_ENTRY(last) > last_base_addr) {
683 f2fs_err(F2FS_I_SB(inode), "inode (%lu) has invalid last xattr entry, entry_size: %zu",
684 inode->i_ino, ENTRY_SIZE(last));
685 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
686 error = -EFSCORRUPTED;
687 goto exit;
688 }
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900689 last = XATTR_NEXT_ENTRY(last);
Chao Yu645a3c42021-12-12 17:16:30 +0800690 }
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900691
Jaegeuk Kime1123262014-04-23 12:17:25 +0900692 newsize = XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + len + size);
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900693
694 /* 1. Check space */
695 if (value) {
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900696 int free;
697 /*
698 * If value is NULL, it is remove operation.
arter97e1c42042014-08-06 23:22:50 +0900699 * In case of update operation, we calculate free.
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900700 */
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900701 free = MIN_OFFSET(inode) - ((char *)last - (char *)base_addr);
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900702 if (found)
Chao Yucc3de6a2013-10-29 14:17:05 +0800703 free = free + ENTRY_SIZE(here);
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900704
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900705 if (unlikely(free < newsize)) {
Jaegeuk Kim58457f12016-04-12 11:52:30 -0700706 error = -E2BIG;
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900707 goto exit;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900708 }
709 }
710
711 /* 2. Remove old entry */
712 if (found) {
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900713 /*
714 * If entry is found, remove old entry.
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900715 * If not found, remove operation is not needed.
716 */
717 struct f2fs_xattr_entry *next = XATTR_NEXT_ENTRY(here);
718 int oldsize = ENTRY_SIZE(here);
719
720 memmove(here, next, (char *)last - (char *)next);
721 last = (struct f2fs_xattr_entry *)((char *)last - oldsize);
722 memset(last, 0, oldsize);
723 }
724
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900725 new_hsize = (char *)last - (char *)base_addr;
726
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900727 /* 3. Write new entry */
728 if (value) {
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900729 char *pval;
730 /*
731 * Before we come here, old entry is removed.
732 * We just write new entry.
733 */
Jaegeuk Kime1123262014-04-23 12:17:25 +0900734 last->e_name_index = index;
735 last->e_name_len = len;
736 memcpy(last->e_name, name, len);
737 pval = last->e_name + len;
738 memcpy(pval, value, size);
739 last->e_value_size = cpu_to_le16(size);
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900740 new_hsize += newsize;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900741 }
742
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900743 error = write_all_xattrs(inode, new_hsize, base_addr, ipage);
744 if (error)
745 goto exit;
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900746
Jaegeuk Kimf424f662015-04-20 15:19:06 -0700747 if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
748 !strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
749 f2fs_set_encrypted_inode(inode);
Jaegeuk Kim7c457292016-10-14 11:51:23 -0700750 f2fs_mark_inode_dirty_sync(inode, true);
Jaegeuk Kimbbf156f2016-08-29 18:23:45 -0700751 if (!error && S_ISDIR(inode->i_mode))
752 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
Chao Yu17232e832020-12-25 16:52:27 +0800753
754same:
755 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
756 inode->i_mode = F2FS_I(inode)->i_acl_mode;
757 inode->i_ctime = current_time(inode);
758 clear_inode_flag(inode, FI_ACL_MODE);
759 }
760
Namjae Jeon7c909772013-03-17 17:26:39 +0900761exit:
Chao Yuc8eb7022020-09-14 16:47:00 +0800762 kfree(base_addr);
Jaegeuk Kimaf48b852012-11-02 17:12:17 +0900763 return error;
764}
Russ Knize52ab9562013-09-24 15:49:23 -0500765
Jaegeuk Kime1123262014-04-23 12:17:25 +0900766int f2fs_setxattr(struct inode *inode, int index, const char *name,
767 const void *value, size_t size,
Jaegeuk Kimc02745e2014-04-23 12:23:14 +0900768 struct page *ipage, int flags)
Russ Knize52ab9562013-09-24 15:49:23 -0500769{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700770 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Russ Knize52ab9562013-09-24 15:49:23 -0500771 int err;
772
Chao Yua25c2cd2019-07-22 17:57:06 +0800773 if (unlikely(f2fs_cp_error(sbi)))
774 return -EIO;
Chao Yu00e09c02019-08-23 17:58:36 +0800775 if (!f2fs_is_checkpoint_ready(sbi))
776 return -ENOSPC;
Chao Yu955ebcd2019-07-22 17:57:05 +0800777
Chao Yu10a26872021-10-28 21:03:05 +0800778 err = f2fs_dquot_initialize(inode);
Jaegeuk Kimd8d13892017-10-23 23:50:15 +0200779 if (err)
780 return err;
781
Chao Yu4d57b862018-05-30 00:20:41 +0800782 /* this case is only from f2fs_init_inode_metadata */
Jaegeuk Kimd631abd2014-06-01 23:24:30 +0900783 if (ipage)
784 return __f2fs_setxattr(inode, index, name, value,
785 size, ipage, flags);
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -0800786 f2fs_balance_fs(sbi, true);
Russ Knize52ab9562013-09-24 15:49:23 -0500787
Gu Zhenge4795562013-09-27 18:08:30 +0800788 f2fs_lock_op(sbi);
Yunlei He27161f12017-09-07 10:40:54 +0800789 down_write(&F2FS_I(inode)->i_xattr_sem);
Jaegeuk Kimc02745e2014-04-23 12:23:14 +0900790 err = __f2fs_setxattr(inode, index, name, value, size, ipage, flags);
Yunlei He27161f12017-09-07 10:40:54 +0800791 up_write(&F2FS_I(inode)->i_xattr_sem);
Gu Zhenge4795562013-09-27 18:08:30 +0800792 f2fs_unlock_op(sbi);
Russ Knize52ab9562013-09-24 15:49:23 -0500793
Jaegeuk Kimd0239e12016-01-08 16:57:48 -0800794 f2fs_update_time(sbi, REQ_TIME);
Russ Knize52ab9562013-09-24 15:49:23 -0500795 return err;
796}
Chao Yua9991502020-02-25 18:17:10 +0800797
798int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi)
799{
800 dev_t dev = sbi->sb->s_bdev->bd_dev;
801 char slab_name[32];
802
803 sprintf(slab_name, "f2fs_xattr_entry-%u:%u", MAJOR(dev), MINOR(dev));
804
805 sbi->inline_xattr_slab_size = F2FS_OPTION(sbi).inline_xattr_size *
806 sizeof(__le32) + XATTR_PADDING_SIZE;
807
808 sbi->inline_xattr_slab = f2fs_kmem_cache_create(slab_name,
809 sbi->inline_xattr_slab_size);
810 if (!sbi->inline_xattr_slab)
811 return -ENOMEM;
812
813 return 0;
814}
815
816void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi)
817{
818 kmem_cache_destroy(sbi->inline_xattr_slab);
819}