blob: 5c52ee869272d4143acf485612f53d01799a8eb9 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Christoph Hellwigef14f0c2009-06-10 17:07:47 +02002/*
3 * Copyright (c) 2008, Christoph Hellwig
4 * All Rights Reserved.
Christoph Hellwigef14f0c2009-06-10 17:07:47 +02005 */
6#include "xfs.h"
Darrick J. Wong5467b342019-06-28 19:25:35 -07007#include "xfs_shared.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +11008#include "xfs_format.h"
Dave Chinner69432832013-08-12 20:49:23 +10009#include "xfs_log_format.h"
Dave Chinner7fd36c42013-08-12 20:49:32 +100010#include "xfs_trans_resv.h"
Dave Chinner0a8aa192013-06-05 12:09:10 +100011#include "xfs_mount.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110012#include "xfs_inode.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110013#include "xfs_attr.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000014#include "xfs_trace.h"
Darrick J. Wonga5155b82019-11-02 09:40:53 -070015#include "xfs_error.h"
Darrick J. Wong5f213dd2019-11-06 17:19:33 -080016#include "xfs_acl.h"
Christoph Hellwiga2544622020-02-26 17:30:33 -080017#include "xfs_da_format.h"
18#include "xfs_da_btree.h"
Christoph Hellwig5d24ec4c2020-12-10 20:00:39 -080019#include "xfs_trans.h"
Christoph Hellwigef14f0c2009-06-10 17:07:47 +020020
Darrick J. Wong5f213dd2019-11-06 17:19:33 -080021#include <linux/posix_acl_xattr.h>
Christoph Hellwigef14f0c2009-06-10 17:07:47 +020022
Christoph Hellwigef14f0c2009-06-10 17:07:47 +020023/*
24 * Locking scheme:
25 * - all ACL updates are protected by inode->i_mutex, which is taken before
26 * calling into this file.
Christoph Hellwigef14f0c2009-06-10 17:07:47 +020027 */
28
29STATIC struct posix_acl *
Dave Chinner0a8aa192013-06-05 12:09:10 +100030xfs_acl_from_disk(
Darrick J. Wonga5155b82019-11-02 09:40:53 -070031 struct xfs_mount *mp,
Andreas Gruenbacher86a21c72015-11-03 12:41:59 +110032 const struct xfs_acl *aclp,
33 int len,
34 int max_entries)
Christoph Hellwigef14f0c2009-06-10 17:07:47 +020035{
36 struct posix_acl_entry *acl_e;
37 struct posix_acl *acl;
Andreas Gruenbacher86a21c72015-11-03 12:41:59 +110038 const struct xfs_acl_entry *ace;
Xi Wang093019c2011-12-12 21:55:52 +000039 unsigned int count, i;
Christoph Hellwigef14f0c2009-06-10 17:07:47 +020040
Darrick J. Wonga5155b82019-11-02 09:40:53 -070041 if (len < sizeof(*aclp)) {
42 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
43 len);
Andreas Gruenbacher86a21c72015-11-03 12:41:59 +110044 return ERR_PTR(-EFSCORRUPTED);
Darrick J. Wonga5155b82019-11-02 09:40:53 -070045 }
46
Christoph Hellwigef14f0c2009-06-10 17:07:47 +020047 count = be32_to_cpu(aclp->acl_cnt);
Darrick J. Wonga5155b82019-11-02 09:40:53 -070048 if (count > max_entries || XFS_ACL_SIZE(count) != len) {
49 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
50 len);
Christoph Hellwigfa8b18e2011-11-20 15:35:32 +000051 return ERR_PTR(-EFSCORRUPTED);
Darrick J. Wonga5155b82019-11-02 09:40:53 -070052 }
Christoph Hellwigef14f0c2009-06-10 17:07:47 +020053
54 acl = posix_acl_alloc(count, GFP_KERNEL);
55 if (!acl)
56 return ERR_PTR(-ENOMEM);
57
58 for (i = 0; i < count; i++) {
59 acl_e = &acl->a_entries[i];
60 ace = &aclp->acl_entry[i];
61
62 /*
63 * The tag is 32 bits on disk and 16 bits in core.
64 *
65 * Because every access to it goes through the core
66 * format first this is not a problem.
67 */
68 acl_e->e_tag = be32_to_cpu(ace->ae_tag);
69 acl_e->e_perm = be16_to_cpu(ace->ae_perm);
70
71 switch (acl_e->e_tag) {
72 case ACL_USER:
Christoph Hellwigba8adad2020-02-21 08:31:27 -080073 acl_e->e_uid = make_kuid(&init_user_ns,
74 be32_to_cpu(ace->ae_id));
Dwight Engen288bbe02013-08-15 14:07:59 -040075 break;
Christoph Hellwigef14f0c2009-06-10 17:07:47 +020076 case ACL_GROUP:
Christoph Hellwigba8adad2020-02-21 08:31:27 -080077 acl_e->e_gid = make_kgid(&init_user_ns,
78 be32_to_cpu(ace->ae_id));
Christoph Hellwigef14f0c2009-06-10 17:07:47 +020079 break;
80 case ACL_USER_OBJ:
81 case ACL_GROUP_OBJ:
82 case ACL_MASK:
83 case ACL_OTHER:
Christoph Hellwigef14f0c2009-06-10 17:07:47 +020084 break;
85 default:
86 goto fail;
87 }
88 }
89 return acl;
90
91fail:
92 posix_acl_release(acl);
93 return ERR_PTR(-EINVAL);
94}
95
96STATIC void
97xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
98{
99 const struct posix_acl_entry *acl_e;
100 struct xfs_acl_entry *ace;
101 int i;
102
103 aclp->acl_cnt = cpu_to_be32(acl->a_count);
104 for (i = 0; i < acl->a_count; i++) {
105 ace = &aclp->acl_entry[i];
106 acl_e = &acl->a_entries[i];
107
108 ace->ae_tag = cpu_to_be32(acl_e->e_tag);
Dwight Engen288bbe02013-08-15 14:07:59 -0400109 switch (acl_e->e_tag) {
110 case ACL_USER:
Christoph Hellwigba8adad2020-02-21 08:31:27 -0800111 ace->ae_id = cpu_to_be32(
112 from_kuid(&init_user_ns, acl_e->e_uid));
Dwight Engen288bbe02013-08-15 14:07:59 -0400113 break;
114 case ACL_GROUP:
Christoph Hellwigba8adad2020-02-21 08:31:27 -0800115 ace->ae_id = cpu_to_be32(
116 from_kgid(&init_user_ns, acl_e->e_gid));
Dwight Engen288bbe02013-08-15 14:07:59 -0400117 break;
118 default:
119 ace->ae_id = cpu_to_be32(ACL_UNDEFINED_ID);
120 break;
121 }
122
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200123 ace->ae_perm = cpu_to_be16(acl_e->e_perm);
124 }
125}
126
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200127struct posix_acl *
Miklos Szeredi0cad6242021-08-18 22:08:24 +0200128xfs_get_acl(struct inode *inode, int type, bool rcu)
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200129{
Christoph Hellwige5171d72020-02-26 17:30:34 -0800130 struct xfs_inode *ip = XFS_I(inode);
131 struct xfs_mount *mp = ip->i_mount;
132 struct posix_acl *acl = NULL;
133 struct xfs_da_args args = {
134 .dp = ip,
Christoph Hellwigd5f0f492020-02-26 17:30:42 -0800135 .attr_filter = XFS_ATTR_ROOT,
Christoph Hellwige5171d72020-02-26 17:30:34 -0800136 .valuelen = XFS_ACL_MAX_SIZE(mp),
137 };
138 int error;
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200139
Miklos Szeredi0cad6242021-08-18 22:08:24 +0200140 if (rcu)
141 return ERR_PTR(-ECHILD);
142
Christoph Hellwig4e34e712011-07-23 17:37:31 +0200143 trace_xfs_get_acl(ip);
144
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200145 switch (type) {
146 case ACL_TYPE_ACCESS:
Christoph Hellwige5171d72020-02-26 17:30:34 -0800147 args.name = SGI_ACL_FILE;
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200148 break;
149 case ACL_TYPE_DEFAULT:
Christoph Hellwige5171d72020-02-26 17:30:34 -0800150 args.name = SGI_ACL_DEFAULT;
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200151 break;
152 default:
Al Viro1cbd20d2009-06-09 13:29:39 -0400153 BUG();
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200154 }
Christoph Hellwige5171d72020-02-26 17:30:34 -0800155 args.namelen = strlen(args.name);
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200156
Christoph Hellwigd49db182020-02-26 17:30:35 -0800157 /*
158 * If the attribute doesn't exist make sure we have a negative cache
159 * entry, for any other error assume it is transient.
160 */
Christoph Hellwige5171d72020-02-26 17:30:34 -0800161 error = xfs_attr_get(&args);
Christoph Hellwigd49db182020-02-26 17:30:35 -0800162 if (!error) {
Christoph Hellwige5171d72020-02-26 17:30:34 -0800163 acl = xfs_acl_from_disk(mp, args.value, args.valuelen,
164 XFS_ACL_MAX_ENTRIES(mp));
Christoph Hellwigd49db182020-02-26 17:30:35 -0800165 } else if (error != -ENOATTR) {
166 acl = ERR_PTR(error);
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200167 }
Christoph Hellwigd49db182020-02-26 17:30:35 -0800168
169 kmem_free(args.value);
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200170 return acl;
171}
172
Jan Kara8ba35872017-06-26 08:48:18 -0700173int
174__xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200175{
Christoph Hellwiga2544622020-02-26 17:30:33 -0800176 struct xfs_inode *ip = XFS_I(inode);
177 struct xfs_da_args args = {
178 .dp = ip,
Christoph Hellwigd5f0f492020-02-26 17:30:42 -0800179 .attr_filter = XFS_ATTR_ROOT,
Christoph Hellwiga2544622020-02-26 17:30:33 -0800180 };
181 int error;
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200182
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200183 switch (type) {
184 case ACL_TYPE_ACCESS:
Christoph Hellwiga2544622020-02-26 17:30:33 -0800185 args.name = SGI_ACL_FILE;
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200186 break;
187 case ACL_TYPE_DEFAULT:
188 if (!S_ISDIR(inode->i_mode))
189 return acl ? -EACCES : 0;
Christoph Hellwiga2544622020-02-26 17:30:33 -0800190 args.name = SGI_ACL_DEFAULT;
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200191 break;
192 default:
193 return -EINVAL;
194 }
Christoph Hellwiga2544622020-02-26 17:30:33 -0800195 args.namelen = strlen(args.name);
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200196
197 if (acl) {
Christoph Hellwiged02d132020-02-26 17:30:44 -0800198 args.valuelen = XFS_ACL_SIZE(acl->a_count);
Carlos Maiolino8ca79df2020-09-01 11:47:12 -0700199 args.value = kvzalloc(args.valuelen, GFP_KERNEL);
Christoph Hellwiga2544622020-02-26 17:30:33 -0800200 if (!args.value)
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000201 return -ENOMEM;
Christoph Hellwiga2544622020-02-26 17:30:33 -0800202 xfs_acl_to_disk(args.value, acl);
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200203 }
204
Christoph Hellwiga2544622020-02-26 17:30:33 -0800205 error = xfs_attr_set(&args);
206 kmem_free(args.value);
Christoph Hellwig0eb81a52020-02-26 17:30:29 -0800207
208 /*
209 * If the attribute didn't exist to start with that's fine.
210 */
211 if (!acl && error == -ENOATTR)
212 error = 0;
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200213 if (!error)
Al Viro1cbd20d2009-06-09 13:29:39 -0400214 set_cached_acl(inode, type, acl);
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200215 return error;
216}
217
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200218static int
Christoph Hellwig5d24ec4c2020-12-10 20:00:39 -0800219xfs_acl_set_mode(
220 struct inode *inode,
221 umode_t mode)
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200222{
Christoph Hellwig5d24ec4c2020-12-10 20:00:39 -0800223 struct xfs_inode *ip = XFS_I(inode);
224 struct xfs_mount *mp = ip->i_mount;
225 struct xfs_trans *tp;
226 int error;
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200227
Christoph Hellwig5d24ec4c2020-12-10 20:00:39 -0800228 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
229 if (error)
230 return error;
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200231
Christoph Hellwig5d24ec4c2020-12-10 20:00:39 -0800232 xfs_ilock(ip, XFS_ILOCK_EXCL);
233 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
234 inode->i_mode = mode;
235 inode->i_ctime = current_time(inode);
236 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200237
Dave Chinner0560f312021-08-18 18:46:52 -0700238 if (xfs_has_wsync(mp))
Christoph Hellwig5d24ec4c2020-12-10 20:00:39 -0800239 xfs_trans_set_sync(tp);
240 return xfs_trans_commit(tp);
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200241}
242
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200243int
Christian Brauner549c7292021-01-21 14:19:43 +0100244xfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
245 struct posix_acl *acl, int type)
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200246{
Dave Chinner67f2ffe2017-10-09 11:37:23 -0700247 umode_t mode;
248 bool set_mode = false;
Christoph Hellwig431547b2009-11-13 09:52:56 +0000249 int error = 0;
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200250
Christoph Hellwig2401dc22013-12-20 05:16:50 -0800251 if (!acl)
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200252 goto set_acl;
253
Jie Liu4ae69fe2014-02-07 15:26:11 +1100254 error = -E2BIG;
Dave Chinner0a8aa192013-06-05 12:09:10 +1000255 if (acl->a_count > XFS_ACL_MAX_ENTRIES(XFS_M(inode->i_sb)))
Christoph Hellwig2401dc22013-12-20 05:16:50 -0800256 return error;
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200257
258 if (type == ACL_TYPE_ACCESS) {
Christoph Hellwigf736d932021-01-21 14:19:58 +0100259 error = posix_acl_update_mode(mnt_userns, inode, &mode, &acl);
Jan Kara07393102016-09-19 17:39:09 +0200260 if (error)
261 return error;
Dave Chinner67f2ffe2017-10-09 11:37:23 -0700262 set_mode = true;
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200263 }
264
265 set_acl:
Dave Chinner67f2ffe2017-10-09 11:37:23 -0700266 /*
267 * We set the mode after successfully updating the ACL xattr because the
268 * xattr update can fail at ENOSPC and we don't want to change the mode
269 * if the ACL update hasn't been applied.
270 */
Christoph Hellwig5d24ec4c2020-12-10 20:00:39 -0800271 error = __xfs_set_acl(inode, acl, type);
272 if (!error && set_mode && mode != inode->i_mode)
273 error = xfs_acl_set_mode(inode, mode);
Dave Chinner67f2ffe2017-10-09 11:37:23 -0700274 return error;
Christoph Hellwigef14f0c2009-06-10 17:07:47 +0200275}
Christoph Hellwig5a3930e2020-02-26 17:30:41 -0800276
277/*
278 * Invalidate any cached ACLs if the user has bypassed the ACL interface.
279 * We don't validate the content whatsoever so it is caller responsibility to
280 * provide data in valid format and ensure i_mode is consistent.
281 */
282void
283xfs_forget_acl(
284 struct inode *inode,
285 const char *name)
286{
287 if (!strcmp(name, SGI_ACL_FILE))
288 forget_cached_acl(inode, ACL_TYPE_ACCESS);
289 else if (!strcmp(name, SGI_ACL_DEFAULT))
290 forget_cached_acl(inode, ACL_TYPE_DEFAULT);
291}