blob: 5c9a7440d9e4ed5eae62debe0242e152f7c182f3 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Dave Chinner1fd71152013-08-12 20:49:35 +10002/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
Dave Chinner1fd71152013-08-12 20:49:35 +10005 */
6#include "xfs.h"
7#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11008#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +11009#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Dave Chinner1fd71152013-08-12 20:49:35 +100012#include "xfs_mount.h"
Dave Chinner1fd71152013-08-12 20:49:35 +100013#include "xfs_inode.h"
Darrick J. Wonge9e899a2017-10-31 12:04:49 -070014#include "xfs_errortag.h"
Dave Chinner1fd71152013-08-12 20:49:35 +100015#include "xfs_error.h"
Dave Chinner1fd71152013-08-12 20:49:35 +100016#include "xfs_icache.h"
Dave Chinner239880e2013-10-23 10:50:10 +110017#include "xfs_trans.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110018#include "xfs_ialloc.h"
Amir Goldsteina324cbf2017-01-17 11:41:44 -080019#include "xfs_dir2.h"
Dave Chinner1fd71152013-08-12 20:49:35 +100020
Jeff Laytonf0e28282017-12-11 06:35:19 -050021#include <linux/iversion.h>
22
Dave Chinner1fd71152013-08-12 20:49:35 +100023/*
Dave Chinnerd8914002013-08-27 11:39:37 +100024 * If we are doing readahead on an inode buffer, we might be in log recovery
25 * reading an inode allocation buffer that hasn't yet been replayed, and hence
26 * has not had the inode cores stamped into it. Hence for readahead, the buffer
27 * may be potentially invalid.
28 *
Dave Chinnerb79f4a12016-01-12 07:03:44 +110029 * If the readahead buffer is invalid, we need to mark it with an error and
30 * clear the DONE status of the buffer so that a followup read will re-read it
31 * from disk. We don't report the error otherwise to avoid warnings during log
Keyur Patel06734e32020-06-29 14:44:35 -070032 * recovery and we don't get unnecessary panics on debug kernels. We use EIO here
Dave Chinnerb79f4a12016-01-12 07:03:44 +110033 * because all we want to do is say readahead failed; there is no-one to report
34 * the error to, so this will distinguish it from a non-ra verifier failure.
Keyur Patel06734e32020-06-29 14:44:35 -070035 * Changes to this readahead error behaviour also need to be reflected in
Dave Chinner7d6a13f2016-01-12 07:04:01 +110036 * xfs_dquot_buf_readahead_verify().
Dave Chinnerd8914002013-08-27 11:39:37 +100037 */
Dave Chinner1fd71152013-08-12 20:49:35 +100038static void
39xfs_inode_buf_verify(
Dave Chinnerd8914002013-08-27 11:39:37 +100040 struct xfs_buf *bp,
41 bool readahead)
Dave Chinner1fd71152013-08-12 20:49:35 +100042{
Christoph Hellwigdbd329f12019-06-28 19:27:29 -070043 struct xfs_mount *mp = bp->b_mount;
Darrick J. Wong6a96c562018-03-23 10:06:56 -070044 xfs_agnumber_t agno;
Dave Chinner1fd71152013-08-12 20:49:35 +100045 int i;
46 int ni;
47
48 /*
49 * Validate the magic number and version of every inode in the buffer
50 */
Darrick J. Wong6a96c562018-03-23 10:06:56 -070051 agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
Dave Chinner1fd71152013-08-12 20:49:35 +100052 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
53 for (i = 0; i < ni; i++) {
54 int di_ok;
55 xfs_dinode_t *dip;
Darrick J. Wong6a96c562018-03-23 10:06:56 -070056 xfs_agino_t unlinked_ino;
Dave Chinner1fd71152013-08-12 20:49:35 +100057
Christoph Hellwig88ee2df2015-06-22 09:44:29 +100058 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
Darrick J. Wong6a96c562018-03-23 10:06:56 -070059 unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
Darrick J. Wong15baadf2019-02-16 11:47:28 -080060 di_ok = xfs_verify_magic16(bp, dip->di_magic) &&
Christoph Hellwigb81b79f2020-03-18 08:15:09 -070061 xfs_dinode_good_version(&mp->m_sb, dip->di_version) &&
Darrick J. Wong7d36c192019-02-07 10:37:13 -080062 xfs_verify_agino_or_null(mp, agno, unlinked_ino);
Dave Chinner1fd71152013-08-12 20:49:35 +100063 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -070064 XFS_ERRTAG_ITOBP_INOTOBP))) {
Dave Chinnerd8914002013-08-27 11:39:37 +100065 if (readahead) {
66 bp->b_flags &= ~XBF_DONE;
Dave Chinnerb79f4a12016-01-12 07:03:44 +110067 xfs_buf_ioerror(bp, -EIO);
Dave Chinnerd8914002013-08-27 11:39:37 +100068 return;
69 }
70
Dave Chinner1fd71152013-08-12 20:49:35 +100071#ifdef DEBUG
Dave Chinner74ffa792013-09-03 21:47:38 +100072 xfs_alert(mp,
Dave Chinner1fd71152013-08-12 20:49:35 +100073 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
74 (unsigned long long)bp->b_bn, i,
75 be16_to_cpu(dip->di_magic));
Dave Chinner1fd71152013-08-12 20:49:35 +100076#endif
Darrick J. Wong6edb1812018-03-23 10:06:53 -070077 xfs_buf_verifier_error(bp, -EFSCORRUPTED,
78 __func__, dip, sizeof(*dip),
79 NULL);
Darrick J. Wong6a96c562018-03-23 10:06:56 -070080 return;
Dave Chinner1fd71152013-08-12 20:49:35 +100081 }
82 }
Dave Chinner1fd71152013-08-12 20:49:35 +100083}
84
85
86static void
87xfs_inode_buf_read_verify(
88 struct xfs_buf *bp)
89{
Dave Chinnerd8914002013-08-27 11:39:37 +100090 xfs_inode_buf_verify(bp, false);
91}
92
93static void
94xfs_inode_buf_readahead_verify(
95 struct xfs_buf *bp)
96{
97 xfs_inode_buf_verify(bp, true);
Dave Chinner1fd71152013-08-12 20:49:35 +100098}
99
100static void
101xfs_inode_buf_write_verify(
102 struct xfs_buf *bp)
103{
Dave Chinnerd8914002013-08-27 11:39:37 +1000104 xfs_inode_buf_verify(bp, false);
Dave Chinner1fd71152013-08-12 20:49:35 +1000105}
106
107const struct xfs_buf_ops xfs_inode_buf_ops = {
Eric Sandeen233135b2016-01-04 16:10:19 +1100108 .name = "xfs_inode",
Darrick J. Wong15baadf2019-02-16 11:47:28 -0800109 .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
110 cpu_to_be16(XFS_DINODE_MAGIC) },
Dave Chinner1fd71152013-08-12 20:49:35 +1000111 .verify_read = xfs_inode_buf_read_verify,
112 .verify_write = xfs_inode_buf_write_verify,
113};
114
Dave Chinnerd8914002013-08-27 11:39:37 +1000115const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
Brian Fostere34d3e72019-02-07 10:45:45 -0800116 .name = "xfs_inode_ra",
Darrick J. Wong15baadf2019-02-16 11:47:28 -0800117 .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
118 cpu_to_be16(XFS_DINODE_MAGIC) },
Dave Chinnerd8914002013-08-27 11:39:37 +1000119 .verify_read = xfs_inode_buf_readahead_verify,
120 .verify_write = xfs_inode_buf_write_verify,
121};
122
Dave Chinner1fd71152013-08-12 20:49:35 +1000123
124/*
125 * This routine is called to map an inode to the buffer containing the on-disk
126 * version of the inode. It returns a pointer to the buffer containing the
Christoph Hellwigaf9dcdd2021-03-29 11:11:37 -0700127 * on-disk inode in the bpp parameter.
Dave Chinner1fd71152013-08-12 20:49:35 +1000128 */
129int
130xfs_imap_to_bp(
131 struct xfs_mount *mp,
132 struct xfs_trans *tp,
133 struct xfs_imap *imap,
Christoph Hellwigaf9dcdd2021-03-29 11:11:37 -0700134 struct xfs_buf **bpp)
Dave Chinner1fd71152013-08-12 20:49:35 +1000135{
Christoph Hellwigaf9dcdd2021-03-29 11:11:37 -0700136 return xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
137 imap->im_len, XBF_UNMAPPED, bpp,
Dave Chinner1fd71152013-08-12 20:49:35 +1000138 &xfs_inode_buf_ops);
Dave Chinner1fd71152013-08-12 20:49:35 +1000139}
140
Darrick J. Wongf93e54362020-08-17 09:59:07 -0700141static inline struct timespec64 xfs_inode_decode_bigtime(uint64_t ts)
142{
143 struct timespec64 tv;
144 uint32_t n;
145
146 tv.tv_sec = xfs_bigtime_to_unix(div_u64_rem(ts, NSEC_PER_SEC, &n));
147 tv.tv_nsec = n;
148
149 return tv;
150}
151
Darrick J. Wong5a0bb062020-08-24 15:15:46 -0700152/* Convert an ondisk timestamp to an incore timestamp. */
153struct timespec64
154xfs_inode_from_disk_ts(
Darrick J. Wongf93e54362020-08-17 09:59:07 -0700155 struct xfs_dinode *dip,
Darrick J. Wong5a0bb062020-08-24 15:15:46 -0700156 const xfs_timestamp_t ts)
157{
158 struct timespec64 tv;
159 struct xfs_legacy_timestamp *lts;
160
Darrick J. Wongf93e54362020-08-17 09:59:07 -0700161 if (xfs_dinode_has_bigtime(dip))
162 return xfs_inode_decode_bigtime(be64_to_cpu(ts));
163
Darrick J. Wong5a0bb062020-08-24 15:15:46 -0700164 lts = (struct xfs_legacy_timestamp *)&ts;
165 tv.tv_sec = (int)be32_to_cpu(lts->t_sec);
166 tv.tv_nsec = (int)be32_to_cpu(lts->t_nsec);
167
168 return tv;
169}
170
Christoph Hellwigcb7d5852020-05-14 14:00:02 -0700171int
Dave Chinner39878482016-02-09 16:54:58 +1100172xfs_inode_from_disk(
173 struct xfs_inode *ip,
Dave Chinnerf8d55aa0522016-02-09 16:54:58 +1100174 struct xfs_dinode *from)
Dave Chinner1fd71152013-08-12 20:49:35 +1000175{
Dave Chinner39878482016-02-09 16:54:58 +1100176 struct inode *inode = VFS_I(ip);
Christoph Hellwig9229d182020-05-14 14:01:17 -0700177 int error;
Christoph Hellwig2d6051d2020-05-14 14:01:18 -0700178 xfs_failaddr_t fa;
Christoph Hellwig9229d182020-05-14 14:01:17 -0700179
180 ASSERT(ip->i_cowfp == NULL);
181 ASSERT(ip->i_afp == NULL);
Dave Chinner39878482016-02-09 16:54:58 +1100182
Christoph Hellwig2d6051d2020-05-14 14:01:18 -0700183 fa = xfs_dinode_verify(ip->i_mount, ip->i_ino, from);
184 if (fa) {
185 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", from,
186 sizeof(*from), fa);
187 return -EFSCORRUPTED;
188 }
189
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100190 /*
Christoph Hellwig0bce8172020-05-14 14:01:17 -0700191 * First get the permanent information that is needed to allocate an
192 * inode. If the inode is unused, mode is zero and we shouldn't mess
Keyur Patel06734e32020-06-29 14:44:35 -0700193 * with the uninitialized part of it.
Christoph Hellwig0bce8172020-05-14 14:01:17 -0700194 */
Christoph Hellwigee7b83f2021-03-29 11:11:43 -0700195 if (!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb))
196 ip->i_flushiter = be16_to_cpu(from->di_flushiter);
Christoph Hellwig0bce8172020-05-14 14:01:17 -0700197 inode->i_generation = be32_to_cpu(from->di_gen);
198 inode->i_mode = be16_to_cpu(from->di_mode);
199 if (!inode->i_mode)
200 return 0;
201
202 /*
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100203 * Convert v1 inodes immediately to v2 inode format as this is the
204 * minimum inode version format we support in the rest of the code.
Christoph Hellwig6471e9c2020-03-18 08:15:11 -0700205 * They will also be unconditionally written back to disk as v2 inodes.
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100206 */
Christoph Hellwig6471e9c2020-03-18 08:15:11 -0700207 if (unlikely(from->di_version == 1)) {
Dave Chinner54d7b5c2016-02-09 16:54:58 +1100208 set_nlink(inode, be16_to_cpu(from->di_onlink));
Christoph Hellwigceaf6032021-03-29 11:11:39 -0700209 ip->i_projid = 0;
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100210 } else {
Dave Chinner54d7b5c2016-02-09 16:54:58 +1100211 set_nlink(inode, be32_to_cpu(from->di_nlink));
Christoph Hellwigceaf6032021-03-29 11:11:39 -0700212 ip->i_projid = (prid_t)be16_to_cpu(from->di_projid_hi) << 16 |
Christoph Hellwigde7a8662019-11-12 08:22:54 -0800213 be16_to_cpu(from->di_projid_lo);
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100214 }
215
Christoph Hellwigba8adad2020-02-21 08:31:27 -0800216 i_uid_write(inode, be32_to_cpu(from->di_uid));
217 i_gid_write(inode, be32_to_cpu(from->di_gid));
Dave Chinner39878482016-02-09 16:54:58 +1100218
219 /*
220 * Time is signed, so need to convert to signed 32 bit before
221 * storing in inode timestamp which may be 64 bit. Otherwise
222 * a time before epoch is converted to a time long after epoch
223 * on 64 bit systems.
224 */
Darrick J. Wongf93e54362020-08-17 09:59:07 -0700225 inode->i_atime = xfs_inode_from_disk_ts(from, from->di_atime);
226 inode->i_mtime = xfs_inode_from_disk_ts(from, from->di_mtime);
227 inode->i_ctime = xfs_inode_from_disk_ts(from, from->di_ctime);
Dave Chinner39878482016-02-09 16:54:58 +1100228
Christoph Hellwig13d2c102021-03-29 11:11:40 -0700229 ip->i_disk_size = be64_to_cpu(from->di_size);
Christoph Hellwig6e73a542021-03-29 11:11:40 -0700230 ip->i_nblocks = be64_to_cpu(from->di_nblocks);
Christoph Hellwig031474c2021-03-29 11:11:41 -0700231 ip->i_extsize = be32_to_cpu(from->di_extsize);
Christoph Hellwig7821ea32021-03-29 11:11:44 -0700232 ip->i_forkoff = from->di_forkoff;
Christoph Hellwigdb073492021-03-29 11:11:44 -0700233 ip->i_diflags = be16_to_cpu(from->di_flags);
Dave Chinner1fd71152013-08-12 20:49:35 +1000234
Christoph Hellwig9b3beb02021-03-29 11:11:38 -0700235 if (from->di_dmevmask || from->di_dmstate)
236 xfs_iflags_set(ip, XFS_IPRESERVE_DM_FIELDS);
237
Christoph Hellwig6471e9c2020-03-18 08:15:11 -0700238 if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
Jeff Laytonf0e28282017-12-11 06:35:19 -0500239 inode_set_iversion_queried(inode,
240 be64_to_cpu(from->di_changecount));
Christoph Hellwige98d5e82021-03-29 11:11:45 -0700241 ip->i_crtime = xfs_inode_from_disk_ts(from, from->di_crtime);
Christoph Hellwig3e09ab82021-03-29 11:11:45 -0700242 ip->i_diflags2 = be64_to_cpu(from->di_flags2);
Christoph Hellwigb33ce572021-03-29 11:11:42 -0700243 ip->i_cowextsize = be32_to_cpu(from->di_cowextsize);
Dave Chinner1fd71152013-08-12 20:49:35 +1000244 }
Christoph Hellwigcb7d5852020-05-14 14:00:02 -0700245
Christoph Hellwig9229d182020-05-14 14:01:17 -0700246 error = xfs_iformat_data_fork(ip, from);
247 if (error)
248 return error;
Christoph Hellwig09c38ed2020-05-18 10:27:21 -0700249 if (from->di_forkoff) {
Christoph Hellwig9229d182020-05-14 14:01:17 -0700250 error = xfs_iformat_attr_fork(ip, from);
251 if (error)
252 goto out_destroy_data_fork;
253 }
254 if (xfs_is_reflink_inode(ip))
255 xfs_ifork_init_cow(ip);
256 return 0;
257
258out_destroy_data_fork:
Christoph Hellwigef838512020-05-18 10:29:27 -0700259 xfs_idestroy_fork(&ip->i_df);
Christoph Hellwig9229d182020-05-14 14:01:17 -0700260 return error;
Dave Chinner1fd71152013-08-12 20:49:35 +1000261}
262
Darrick J. Wong5a0bb062020-08-24 15:15:46 -0700263/* Convert an incore timestamp to an ondisk timestamp. */
264static inline xfs_timestamp_t
265xfs_inode_to_disk_ts(
Darrick J. Wongf93e54362020-08-17 09:59:07 -0700266 struct xfs_inode *ip,
Darrick J. Wong5a0bb062020-08-24 15:15:46 -0700267 const struct timespec64 tv)
268{
269 struct xfs_legacy_timestamp *lts;
270 xfs_timestamp_t ts;
271
Darrick J. Wongf93e54362020-08-17 09:59:07 -0700272 if (xfs_inode_has_bigtime(ip))
273 return cpu_to_be64(xfs_inode_encode_bigtime(tv));
274
Darrick J. Wong5a0bb062020-08-24 15:15:46 -0700275 lts = (struct xfs_legacy_timestamp *)&ts;
276 lts->t_sec = cpu_to_be32(tv.tv_sec);
277 lts->t_nsec = cpu_to_be32(tv.tv_nsec);
278
279 return ts;
280}
281
Dave Chinner1fd71152013-08-12 20:49:35 +1000282void
Dave Chinner39878482016-02-09 16:54:58 +1100283xfs_inode_to_disk(
284 struct xfs_inode *ip,
Dave Chinner93f958f2016-02-09 16:54:58 +1100285 struct xfs_dinode *to,
286 xfs_lsn_t lsn)
Dave Chinner39878482016-02-09 16:54:58 +1100287{
Dave Chinner39878482016-02-09 16:54:58 +1100288 struct inode *inode = VFS_I(ip);
289
Dave Chinner93f958f2016-02-09 16:54:58 +1100290 to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100291 to->di_onlink = 0;
Dave Chinner93f958f2016-02-09 16:54:58 +1100292
Christoph Hellwigf7e67b22020-05-18 10:28:05 -0700293 to->di_format = xfs_ifork_format(&ip->i_df);
Christoph Hellwigba8adad2020-02-21 08:31:27 -0800294 to->di_uid = cpu_to_be32(i_uid_read(inode));
295 to->di_gid = cpu_to_be32(i_gid_read(inode));
Christoph Hellwigceaf6032021-03-29 11:11:39 -0700296 to->di_projid_lo = cpu_to_be16(ip->i_projid & 0xffff);
297 to->di_projid_hi = cpu_to_be16(ip->i_projid >> 16);
Dave Chinner39878482016-02-09 16:54:58 +1100298
Dave Chinner93f958f2016-02-09 16:54:58 +1100299 memset(to->di_pad, 0, sizeof(to->di_pad));
Darrick J. Wongf93e54362020-08-17 09:59:07 -0700300 to->di_atime = xfs_inode_to_disk_ts(ip, inode->i_atime);
301 to->di_mtime = xfs_inode_to_disk_ts(ip, inode->i_mtime);
302 to->di_ctime = xfs_inode_to_disk_ts(ip, inode->i_ctime);
Dave Chinner54d7b5c2016-02-09 16:54:58 +1100303 to->di_nlink = cpu_to_be32(inode->i_nlink);
Dave Chinner9e9a2672016-02-09 16:54:58 +1100304 to->di_gen = cpu_to_be32(inode->i_generation);
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100305 to->di_mode = cpu_to_be16(inode->i_mode);
Dave Chinner39878482016-02-09 16:54:58 +1100306
Christoph Hellwig13d2c102021-03-29 11:11:40 -0700307 to->di_size = cpu_to_be64(ip->i_disk_size);
Christoph Hellwig6e73a542021-03-29 11:11:40 -0700308 to->di_nblocks = cpu_to_be64(ip->i_nblocks);
Christoph Hellwig031474c2021-03-29 11:11:41 -0700309 to->di_extsize = cpu_to_be32(ip->i_extsize);
Christoph Hellwigdaf83962020-05-18 10:27:22 -0700310 to->di_nextents = cpu_to_be32(xfs_ifork_nextents(&ip->i_df));
311 to->di_anextents = cpu_to_be16(xfs_ifork_nextents(ip->i_afp));
Christoph Hellwig7821ea32021-03-29 11:11:44 -0700312 to->di_forkoff = ip->i_forkoff;
Christoph Hellwigf7e67b22020-05-18 10:28:05 -0700313 to->di_aformat = xfs_ifork_format(ip->i_afp);
Christoph Hellwigdb073492021-03-29 11:11:44 -0700314 to->di_flags = cpu_to_be16(ip->i_diflags);
Dave Chinner39878482016-02-09 16:54:58 +1100315
Christoph Hellwig6471e9c2020-03-18 08:15:11 -0700316 if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
317 to->di_version = 3;
Jeff Laytonf0e28282017-12-11 06:35:19 -0500318 to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
Christoph Hellwige98d5e82021-03-29 11:11:45 -0700319 to->di_crtime = xfs_inode_to_disk_ts(ip, ip->i_crtime);
Christoph Hellwig3e09ab82021-03-29 11:11:45 -0700320 to->di_flags2 = cpu_to_be64(ip->i_diflags2);
Christoph Hellwigb33ce572021-03-29 11:11:42 -0700321 to->di_cowextsize = cpu_to_be32(ip->i_cowextsize);
Dave Chinner93f958f2016-02-09 16:54:58 +1100322 to->di_ino = cpu_to_be64(ip->i_ino);
323 to->di_lsn = cpu_to_be64(lsn);
324 memset(to->di_pad2, 0, sizeof(to->di_pad2));
325 uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
Dave Chinner39878482016-02-09 16:54:58 +1100326 to->di_flushiter = 0;
327 } else {
Christoph Hellwig6471e9c2020-03-18 08:15:11 -0700328 to->di_version = 2;
Christoph Hellwig965e0a12021-03-29 11:11:42 -0700329 to->di_flushiter = cpu_to_be16(ip->i_flushiter);
Dave Chinner39878482016-02-09 16:54:58 +1100330 }
331}
332
Dave Chinner23fcb332018-06-21 23:25:57 -0700333static xfs_failaddr_t
334xfs_dinode_verify_fork(
335 struct xfs_dinode *dip,
336 struct xfs_mount *mp,
337 int whichfork)
338{
339 uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork);
340
341 switch (XFS_DFORK_FORMAT(dip, whichfork)) {
342 case XFS_DINODE_FMT_LOCAL:
343 /*
344 * no local regular files yet
345 */
346 if (whichfork == XFS_DATA_FORK) {
347 if (S_ISREG(be16_to_cpu(dip->di_mode)))
348 return __this_address;
349 if (be64_to_cpu(dip->di_size) >
350 XFS_DFORK_SIZE(dip, mp, whichfork))
351 return __this_address;
352 }
353 if (di_nextents)
354 return __this_address;
355 break;
356 case XFS_DINODE_FMT_EXTENTS:
357 if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork))
358 return __this_address;
359 break;
360 case XFS_DINODE_FMT_BTREE:
361 if (whichfork == XFS_ATTR_FORK) {
362 if (di_nextents > MAXAEXTNUM)
363 return __this_address;
364 } else if (di_nextents > MAXEXTNUM) {
365 return __this_address;
366 }
367 break;
368 default:
369 return __this_address;
370 }
371 return NULL;
372}
373
Eric Sandeen339e1a32018-09-29 13:50:13 +1000374static xfs_failaddr_t
375xfs_dinode_verify_forkoff(
376 struct xfs_dinode *dip,
377 struct xfs_mount *mp)
378{
Christoph Hellwig09c38ed2020-05-18 10:27:21 -0700379 if (!dip->di_forkoff)
Eric Sandeen339e1a32018-09-29 13:50:13 +1000380 return NULL;
381
382 switch (dip->di_format) {
383 case XFS_DINODE_FMT_DEV:
384 if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
385 return __this_address;
386 break;
387 case XFS_DINODE_FMT_LOCAL: /* fall through ... */
388 case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
389 case XFS_DINODE_FMT_BTREE:
Christoph Hellwige9e2eae2020-03-18 08:15:10 -0700390 if (dip->di_forkoff >= (XFS_LITINO(mp) >> 3))
Eric Sandeen339e1a32018-09-29 13:50:13 +1000391 return __this_address;
392 break;
393 default:
394 return __this_address;
395 }
396 return NULL;
397}
398
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800399xfs_failaddr_t
Dave Chinner1fd71152013-08-12 20:49:35 +1000400xfs_dinode_verify(
401 struct xfs_mount *mp,
Darrick J. Wong420fbeb2016-11-08 11:56:06 +1100402 xfs_ino_t ino,
Dave Chinner1fd71152013-08-12 20:49:35 +1000403 struct xfs_dinode *dip)
404{
Dave Chinner7d71a672018-06-05 10:06:44 -0700405 xfs_failaddr_t fa;
Amir Goldstein3c6f46e2017-01-17 11:41:41 -0800406 uint16_t mode;
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700407 uint16_t flags;
408 uint64_t flags2;
Darrick J. Wong71493b82018-01-08 10:51:04 -0800409 uint64_t di_size;
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700410
Dave Chinner1fd71152013-08-12 20:49:35 +1000411 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800412 return __this_address;
Dave Chinner1fd71152013-08-12 20:49:35 +1000413
Darrick J. Wong50aa90e2018-01-08 10:51:04 -0800414 /* Verify v3 integrity information first */
415 if (dip->di_version >= 3) {
Christoph Hellwigb81b79f2020-03-18 08:15:09 -0700416 if (!xfs_sb_version_has_v3inode(&mp->m_sb))
Darrick J. Wong50aa90e2018-01-08 10:51:04 -0800417 return __this_address;
418 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
419 XFS_DINODE_CRC_OFF))
420 return __this_address;
421 if (be64_to_cpu(dip->di_ino) != ino)
422 return __this_address;
423 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
424 return __this_address;
425 }
Dave Chinner1fd71152013-08-12 20:49:35 +1000426
Darrick J. Wongef388e22016-12-05 12:38:38 +1100427 /* don't allow invalid i_size */
Darrick J. Wong71493b82018-01-08 10:51:04 -0800428 di_size = be64_to_cpu(dip->di_size);
429 if (di_size & (1ULL << 63))
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800430 return __this_address;
Darrick J. Wongef388e22016-12-05 12:38:38 +1100431
Amir Goldstein3c6f46e2017-01-17 11:41:41 -0800432 mode = be16_to_cpu(dip->di_mode);
Amir Goldsteina324cbf2017-01-17 11:41:44 -0800433 if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800434 return __this_address;
Amir Goldstein3c6f46e2017-01-17 11:41:41 -0800435
436 /* No zero-length symlinks/dirs. */
Darrick J. Wong71493b82018-01-08 10:51:04 -0800437 if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800438 return __this_address;
Darrick J. Wongef388e22016-12-05 12:38:38 +1100439
Darrick J. Wong71493b82018-01-08 10:51:04 -0800440 /* Fork checks carried over from xfs_iformat_fork */
441 if (mode &&
442 be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents) >
443 be64_to_cpu(dip->di_nblocks))
444 return __this_address;
445
446 if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize)
447 return __this_address;
448
449 flags = be16_to_cpu(dip->di_flags);
450
451 if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
452 return __this_address;
453
Eric Sandeen339e1a32018-09-29 13:50:13 +1000454 /* check for illegal values of forkoff */
455 fa = xfs_dinode_verify_forkoff(dip, mp);
456 if (fa)
457 return fa;
458
Darrick J. Wong71493b82018-01-08 10:51:04 -0800459 /* Do we have appropriate data fork formats for the mode? */
460 switch (mode & S_IFMT) {
461 case S_IFIFO:
462 case S_IFCHR:
463 case S_IFBLK:
464 case S_IFSOCK:
465 if (dip->di_format != XFS_DINODE_FMT_DEV)
466 return __this_address;
467 break;
468 case S_IFREG:
469 case S_IFLNK:
470 case S_IFDIR:
Dave Chinner23fcb332018-06-21 23:25:57 -0700471 fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK);
472 if (fa)
473 return fa;
Darrick J. Wong71493b82018-01-08 10:51:04 -0800474 break;
475 case 0:
476 /* Uninitialized inode ok. */
477 break;
478 default:
479 return __this_address;
480 }
481
Christoph Hellwig09c38ed2020-05-18 10:27:21 -0700482 if (dip->di_forkoff) {
Dave Chinner23fcb332018-06-21 23:25:57 -0700483 fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
484 if (fa)
485 return fa;
Eric Sandeenb42db082018-04-16 23:06:53 -0700486 } else {
487 /*
488 * If there is no fork offset, this may be a freshly-made inode
489 * in a new disk cluster, in which case di_aformat is zeroed.
490 * Otherwise, such an inode must be in EXTENTS format; this goes
491 * for freed inodes as well.
492 */
493 switch (dip->di_aformat) {
494 case 0:
495 case XFS_DINODE_FMT_EXTENTS:
496 break;
497 default:
498 return __this_address;
499 }
500 if (dip->di_anextents)
501 return __this_address;
Darrick J. Wong71493b82018-01-08 10:51:04 -0800502 }
Dave Chinner1fd71152013-08-12 20:49:35 +1000503
Dave Chinner7d71a672018-06-05 10:06:44 -0700504 /* extent size hint validation */
505 fa = xfs_inode_validate_extsize(mp, be32_to_cpu(dip->di_extsize),
506 mode, flags);
507 if (fa)
508 return fa;
509
Dave Chinner1fd71152013-08-12 20:49:35 +1000510 /* only version 3 or greater inodes are extensively verified here */
511 if (dip->di_version < 3)
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800512 return NULL;
Dave Chinner1fd71152013-08-12 20:49:35 +1000513
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700514 flags2 = be64_to_cpu(dip->di_flags2);
515
516 /* don't allow reflink/cowextsize if we don't have reflink */
517 if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
Dave Chinner7d71a672018-06-05 10:06:44 -0700518 !xfs_sb_version_hasreflink(&mp->m_sb))
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800519 return __this_address;
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700520
Darrick J. Wong71493b82018-01-08 10:51:04 -0800521 /* only regular files get reflink */
522 if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG)
523 return __this_address;
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700524
525 /* don't let reflink and realtime mix */
526 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800527 return __this_address;
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700528
Dave Chinner02a0fda2018-06-05 10:09:33 -0700529 /* COW extent size hint validation */
530 fa = xfs_inode_validate_cowextsize(mp, be32_to_cpu(dip->di_cowextsize),
531 mode, flags, flags2);
532 if (fa)
533 return fa;
534
Darrick J. Wongf93e54362020-08-17 09:59:07 -0700535 /* bigtime iflag can only happen on bigtime filesystems */
536 if (xfs_dinode_has_bigtime(dip) &&
537 !xfs_sb_version_hasbigtime(&mp->m_sb))
538 return __this_address;
539
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800540 return NULL;
Dave Chinner1fd71152013-08-12 20:49:35 +1000541}
542
543void
544xfs_dinode_calc_crc(
545 struct xfs_mount *mp,
546 struct xfs_dinode *dip)
547{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700548 uint32_t crc;
Dave Chinner1fd71152013-08-12 20:49:35 +1000549
550 if (dip->di_version < 3)
551 return;
552
553 ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
Dave Chinnercae028d2016-12-05 14:40:32 +1100554 crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
Eric Sandeen533b81c2014-02-27 15:15:27 +1100555 XFS_DINODE_CRC_OFF);
Dave Chinner1fd71152013-08-12 20:49:35 +1000556 dip->di_crc = xfs_end_cksum(crc);
557}
558
559/*
Darrick J. Wong8bb82bc2018-03-23 10:06:55 -0700560 * Validate di_extsize hint.
561 *
562 * The rules are documented at xfs_ioctl_setattr_check_extsize().
563 * These functions must be kept in sync with each other.
564 */
565xfs_failaddr_t
566xfs_inode_validate_extsize(
567 struct xfs_mount *mp,
568 uint32_t extsize,
569 uint16_t mode,
570 uint16_t flags)
571{
572 bool rt_flag;
573 bool hint_flag;
574 bool inherit_flag;
575 uint32_t extsize_bytes;
576 uint32_t blocksize_bytes;
577
578 rt_flag = (flags & XFS_DIFLAG_REALTIME);
579 hint_flag = (flags & XFS_DIFLAG_EXTSIZE);
580 inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
581 extsize_bytes = XFS_FSB_TO_B(mp, extsize);
582
583 if (rt_flag)
584 blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
585 else
586 blocksize_bytes = mp->m_sb.sb_blocksize;
587
588 if ((hint_flag || inherit_flag) && !(S_ISDIR(mode) || S_ISREG(mode)))
589 return __this_address;
590
591 if (hint_flag && !S_ISREG(mode))
592 return __this_address;
593
594 if (inherit_flag && !S_ISDIR(mode))
595 return __this_address;
596
597 if ((hint_flag || inherit_flag) && extsize == 0)
598 return __this_address;
599
Eric Sandeend4a34e12018-07-24 11:34:52 -0700600 /* free inodes get flags set to zero but extsize remains */
601 if (mode && !(hint_flag || inherit_flag) && extsize != 0)
Darrick J. Wong8bb82bc2018-03-23 10:06:55 -0700602 return __this_address;
603
604 if (extsize_bytes % blocksize_bytes)
605 return __this_address;
606
607 if (extsize > MAXEXTLEN)
608 return __this_address;
609
610 if (!rt_flag && extsize > mp->m_sb.sb_agblocks / 2)
611 return __this_address;
612
613 return NULL;
614}
615
616/*
617 * Validate di_cowextsize hint.
618 *
619 * The rules are documented at xfs_ioctl_setattr_check_cowextsize().
620 * These functions must be kept in sync with each other.
621 */
622xfs_failaddr_t
623xfs_inode_validate_cowextsize(
624 struct xfs_mount *mp,
625 uint32_t cowextsize,
626 uint16_t mode,
627 uint16_t flags,
628 uint64_t flags2)
629{
630 bool rt_flag;
631 bool hint_flag;
632 uint32_t cowextsize_bytes;
633
634 rt_flag = (flags & XFS_DIFLAG_REALTIME);
635 hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
636 cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize);
637
638 if (hint_flag && !xfs_sb_version_hasreflink(&mp->m_sb))
639 return __this_address;
640
641 if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode)))
642 return __this_address;
643
644 if (hint_flag && cowextsize == 0)
645 return __this_address;
646
Eric Sandeend4a34e12018-07-24 11:34:52 -0700647 /* free inodes get flags set to zero but cowextsize remains */
648 if (mode && !hint_flag && cowextsize != 0)
Darrick J. Wong8bb82bc2018-03-23 10:06:55 -0700649 return __this_address;
650
651 if (hint_flag && rt_flag)
652 return __this_address;
653
654 if (cowextsize_bytes % mp->m_sb.sb_blocksize)
655 return __this_address;
656
657 if (cowextsize > MAXEXTLEN)
658 return __this_address;
659
660 if (cowextsize > mp->m_sb.sb_agblocks / 2)
661 return __this_address;
662
663 return NULL;
664}