blob: 30d1d60f1d46e62ff71eca1f45b273536cc6cce1 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Dave Chinner1fd71152013-08-12 20:49:35 +10002/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
Dave Chinner1fd71152013-08-12 20:49:35 +10005 */
6#include "xfs.h"
7#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11008#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +11009#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Dave Chinner1fd71152013-08-12 20:49:35 +100012#include "xfs_mount.h"
Darrick J. Wong3ab78df2016-08-03 11:15:38 +100013#include "xfs_defer.h"
Dave Chinner1fd71152013-08-12 20:49:35 +100014#include "xfs_inode.h"
Darrick J. Wonge9e899a2017-10-31 12:04:49 -070015#include "xfs_errortag.h"
Dave Chinner1fd71152013-08-12 20:49:35 +100016#include "xfs_error.h"
17#include "xfs_cksum.h"
18#include "xfs_icache.h"
Dave Chinner239880e2013-10-23 10:50:10 +110019#include "xfs_trans.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110020#include "xfs_ialloc.h"
Amir Goldsteina324cbf2017-01-17 11:41:44 -080021#include "xfs_dir2.h"
Dave Chinner1fd71152013-08-12 20:49:35 +100022
Jeff Laytonf0e28282017-12-11 06:35:19 -050023#include <linux/iversion.h>
24
Dave Chinner1fd71152013-08-12 20:49:35 +100025/*
26 * Check that none of the inode's in the buffer have a next
27 * unlinked field of 0.
28 */
29#if defined(DEBUG)
30void
31xfs_inobp_check(
32 xfs_mount_t *mp,
33 xfs_buf_t *bp)
34{
35 int i;
36 int j;
37 xfs_dinode_t *dip;
38
39 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
40
41 for (i = 0; i < j; i++) {
Christoph Hellwig88ee2df2015-06-22 09:44:29 +100042 dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize);
Dave Chinner1fd71152013-08-12 20:49:35 +100043 if (!dip->di_next_unlinked) {
44 xfs_alert(mp,
Dave Chinner74ffa792013-09-03 21:47:38 +100045 "Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
46 i, (long long)bp->b_bn);
Dave Chinner1fd71152013-08-12 20:49:35 +100047 }
48 }
49}
50#endif
51
Roger Willcocks8cdcc812016-10-20 15:48:38 +110052bool
53xfs_dinode_good_version(
54 struct xfs_mount *mp,
55 __u8 version)
56{
57 if (xfs_sb_version_hascrc(&mp->m_sb))
58 return version == 3;
59
60 return version == 1 || version == 2;
61}
62
Dave Chinnerd8914002013-08-27 11:39:37 +100063/*
64 * If we are doing readahead on an inode buffer, we might be in log recovery
65 * reading an inode allocation buffer that hasn't yet been replayed, and hence
66 * has not had the inode cores stamped into it. Hence for readahead, the buffer
67 * may be potentially invalid.
68 *
Dave Chinnerb79f4a12016-01-12 07:03:44 +110069 * If the readahead buffer is invalid, we need to mark it with an error and
70 * clear the DONE status of the buffer so that a followup read will re-read it
71 * from disk. We don't report the error otherwise to avoid warnings during log
72 * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
73 * because all we want to do is say readahead failed; there is no-one to report
74 * the error to, so this will distinguish it from a non-ra verifier failure.
Dave Chinner7d6a13f2016-01-12 07:04:01 +110075 * Changes to this readahead error behavour also need to be reflected in
76 * xfs_dquot_buf_readahead_verify().
Dave Chinnerd8914002013-08-27 11:39:37 +100077 */
Dave Chinner1fd71152013-08-12 20:49:35 +100078static void
79xfs_inode_buf_verify(
Dave Chinnerd8914002013-08-27 11:39:37 +100080 struct xfs_buf *bp,
81 bool readahead)
Dave Chinner1fd71152013-08-12 20:49:35 +100082{
83 struct xfs_mount *mp = bp->b_target->bt_mount;
Darrick J. Wong6a96c562018-03-23 10:06:56 -070084 xfs_agnumber_t agno;
Dave Chinner1fd71152013-08-12 20:49:35 +100085 int i;
86 int ni;
87
88 /*
89 * Validate the magic number and version of every inode in the buffer
90 */
Darrick J. Wong6a96c562018-03-23 10:06:56 -070091 agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
Dave Chinner1fd71152013-08-12 20:49:35 +100092 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
93 for (i = 0; i < ni; i++) {
94 int di_ok;
95 xfs_dinode_t *dip;
Darrick J. Wong6a96c562018-03-23 10:06:56 -070096 xfs_agino_t unlinked_ino;
Dave Chinner1fd71152013-08-12 20:49:35 +100097
Christoph Hellwig88ee2df2015-06-22 09:44:29 +100098 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
Darrick J. Wong6a96c562018-03-23 10:06:56 -070099 unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
Dave Chinner1fd71152013-08-12 20:49:35 +1000100 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
Darrick J. Wong6a96c562018-03-23 10:06:56 -0700101 xfs_dinode_good_version(mp, dip->di_version) &&
102 (unlinked_ino == NULLAGINO ||
103 xfs_verify_agino(mp, agno, unlinked_ino));
Dave Chinner1fd71152013-08-12 20:49:35 +1000104 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -0700105 XFS_ERRTAG_ITOBP_INOTOBP))) {
Dave Chinnerd8914002013-08-27 11:39:37 +1000106 if (readahead) {
107 bp->b_flags &= ~XBF_DONE;
Dave Chinnerb79f4a12016-01-12 07:03:44 +1100108 xfs_buf_ioerror(bp, -EIO);
Dave Chinnerd8914002013-08-27 11:39:37 +1000109 return;
110 }
111
Dave Chinner1fd71152013-08-12 20:49:35 +1000112#ifdef DEBUG
Dave Chinner74ffa792013-09-03 21:47:38 +1000113 xfs_alert(mp,
Dave Chinner1fd71152013-08-12 20:49:35 +1000114 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
115 (unsigned long long)bp->b_bn, i,
116 be16_to_cpu(dip->di_magic));
Dave Chinner1fd71152013-08-12 20:49:35 +1000117#endif
Darrick J. Wong6edb1812018-03-23 10:06:53 -0700118 xfs_buf_verifier_error(bp, -EFSCORRUPTED,
119 __func__, dip, sizeof(*dip),
120 NULL);
Darrick J. Wong6a96c562018-03-23 10:06:56 -0700121 return;
Dave Chinner1fd71152013-08-12 20:49:35 +1000122 }
123 }
Dave Chinner1fd71152013-08-12 20:49:35 +1000124}
125
126
127static void
128xfs_inode_buf_read_verify(
129 struct xfs_buf *bp)
130{
Dave Chinnerd8914002013-08-27 11:39:37 +1000131 xfs_inode_buf_verify(bp, false);
132}
133
134static void
135xfs_inode_buf_readahead_verify(
136 struct xfs_buf *bp)
137{
138 xfs_inode_buf_verify(bp, true);
Dave Chinner1fd71152013-08-12 20:49:35 +1000139}
140
141static void
142xfs_inode_buf_write_verify(
143 struct xfs_buf *bp)
144{
Dave Chinnerd8914002013-08-27 11:39:37 +1000145 xfs_inode_buf_verify(bp, false);
Dave Chinner1fd71152013-08-12 20:49:35 +1000146}
147
148const struct xfs_buf_ops xfs_inode_buf_ops = {
Eric Sandeen233135b2016-01-04 16:10:19 +1100149 .name = "xfs_inode",
Dave Chinner1fd71152013-08-12 20:49:35 +1000150 .verify_read = xfs_inode_buf_read_verify,
151 .verify_write = xfs_inode_buf_write_verify,
152};
153
Dave Chinnerd8914002013-08-27 11:39:37 +1000154const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
Eric Sandeen233135b2016-01-04 16:10:19 +1100155 .name = "xxfs_inode_ra",
Dave Chinnerd8914002013-08-27 11:39:37 +1000156 .verify_read = xfs_inode_buf_readahead_verify,
157 .verify_write = xfs_inode_buf_write_verify,
158};
159
Dave Chinner1fd71152013-08-12 20:49:35 +1000160
161/*
162 * This routine is called to map an inode to the buffer containing the on-disk
163 * version of the inode. It returns a pointer to the buffer containing the
164 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
165 * pointer to the on-disk inode within that buffer.
166 *
167 * If a non-zero error is returned, then the contents of bpp and dipp are
168 * undefined.
169 */
170int
171xfs_imap_to_bp(
172 struct xfs_mount *mp,
173 struct xfs_trans *tp,
174 struct xfs_imap *imap,
175 struct xfs_dinode **dipp,
176 struct xfs_buf **bpp,
177 uint buf_flags,
178 uint iget_flags)
179{
180 struct xfs_buf *bp;
181 int error;
182
183 buf_flags |= XBF_UNMAPPED;
184 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
185 (int)imap->im_len, buf_flags, &bp,
186 &xfs_inode_buf_ops);
187 if (error) {
Dave Chinner24513372014-06-25 14:58:08 +1000188 if (error == -EAGAIN) {
Dave Chinner1fd71152013-08-12 20:49:35 +1000189 ASSERT(buf_flags & XBF_TRYLOCK);
190 return error;
191 }
Dave Chinner1fd71152013-08-12 20:49:35 +1000192 xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
193 __func__, error);
194 return error;
195 }
196
197 *bpp = bp;
Christoph Hellwig88ee2df2015-06-22 09:44:29 +1000198 *dipp = xfs_buf_offset(bp, imap->im_boffset);
Dave Chinner1fd71152013-08-12 20:49:35 +1000199 return 0;
200}
201
Dave Chinner638f44162013-08-30 10:23:45 +1000202void
Dave Chinner39878482016-02-09 16:54:58 +1100203xfs_inode_from_disk(
204 struct xfs_inode *ip,
Dave Chinnerf8d55aa0522016-02-09 16:54:58 +1100205 struct xfs_dinode *from)
Dave Chinner1fd71152013-08-12 20:49:35 +1000206{
Dave Chinner39878482016-02-09 16:54:58 +1100207 struct xfs_icdinode *to = &ip->i_d;
208 struct inode *inode = VFS_I(ip);
209
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100210
211 /*
212 * Convert v1 inodes immediately to v2 inode format as this is the
213 * minimum inode version format we support in the rest of the code.
214 */
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100215 to->di_version = from->di_version;
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100216 if (to->di_version == 1) {
Dave Chinner54d7b5c2016-02-09 16:54:58 +1100217 set_nlink(inode, be16_to_cpu(from->di_onlink));
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100218 to->di_projid_lo = 0;
219 to->di_projid_hi = 0;
220 to->di_version = 2;
221 } else {
Dave Chinner54d7b5c2016-02-09 16:54:58 +1100222 set_nlink(inode, be32_to_cpu(from->di_nlink));
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100223 to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
224 to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
225 }
226
Dave Chinner1fd71152013-08-12 20:49:35 +1000227 to->di_format = from->di_format;
Dave Chinner1fd71152013-08-12 20:49:35 +1000228 to->di_uid = be32_to_cpu(from->di_uid);
229 to->di_gid = be32_to_cpu(from->di_gid);
Dave Chinner1fd71152013-08-12 20:49:35 +1000230 to->di_flushiter = be16_to_cpu(from->di_flushiter);
Dave Chinner39878482016-02-09 16:54:58 +1100231
232 /*
233 * Time is signed, so need to convert to signed 32 bit before
234 * storing in inode timestamp which may be 64 bit. Otherwise
235 * a time before epoch is converted to a time long after epoch
236 * on 64 bit systems.
237 */
238 inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec);
239 inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec);
240 inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec);
241 inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec);
242 inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec);
243 inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec);
Dave Chinner9e9a2672016-02-09 16:54:58 +1100244 inode->i_generation = be32_to_cpu(from->di_gen);
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100245 inode->i_mode = be16_to_cpu(from->di_mode);
Dave Chinner39878482016-02-09 16:54:58 +1100246
Dave Chinner1fd71152013-08-12 20:49:35 +1000247 to->di_size = be64_to_cpu(from->di_size);
248 to->di_nblocks = be64_to_cpu(from->di_nblocks);
249 to->di_extsize = be32_to_cpu(from->di_extsize);
250 to->di_nextents = be32_to_cpu(from->di_nextents);
251 to->di_anextents = be16_to_cpu(from->di_anextents);
252 to->di_forkoff = from->di_forkoff;
253 to->di_aformat = from->di_aformat;
254 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
255 to->di_dmstate = be16_to_cpu(from->di_dmstate);
256 to->di_flags = be16_to_cpu(from->di_flags);
Dave Chinner1fd71152013-08-12 20:49:35 +1000257
258 if (to->di_version == 3) {
Jeff Laytonf0e28282017-12-11 06:35:19 -0500259 inode_set_iversion_queried(inode,
260 be64_to_cpu(from->di_changecount));
Dave Chinner1fd71152013-08-12 20:49:35 +1000261 to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
262 to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
263 to->di_flags2 = be64_to_cpu(from->di_flags2);
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700264 to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
Dave Chinner1fd71152013-08-12 20:49:35 +1000265 }
266}
267
268void
Dave Chinner39878482016-02-09 16:54:58 +1100269xfs_inode_to_disk(
270 struct xfs_inode *ip,
Dave Chinner93f958f2016-02-09 16:54:58 +1100271 struct xfs_dinode *to,
272 xfs_lsn_t lsn)
Dave Chinner39878482016-02-09 16:54:58 +1100273{
274 struct xfs_icdinode *from = &ip->i_d;
275 struct inode *inode = VFS_I(ip);
276
Dave Chinner93f958f2016-02-09 16:54:58 +1100277 to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100278 to->di_onlink = 0;
Dave Chinner93f958f2016-02-09 16:54:58 +1100279
Dave Chinner93f958f2016-02-09 16:54:58 +1100280 to->di_version = from->di_version;
Dave Chinner39878482016-02-09 16:54:58 +1100281 to->di_format = from->di_format;
Dave Chinner39878482016-02-09 16:54:58 +1100282 to->di_uid = cpu_to_be32(from->di_uid);
283 to->di_gid = cpu_to_be32(from->di_gid);
Dave Chinner39878482016-02-09 16:54:58 +1100284 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
285 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
Dave Chinner39878482016-02-09 16:54:58 +1100286
Dave Chinner93f958f2016-02-09 16:54:58 +1100287 memset(to->di_pad, 0, sizeof(to->di_pad));
Dave Chinner39878482016-02-09 16:54:58 +1100288 to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
289 to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
290 to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
291 to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
292 to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
293 to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
Dave Chinner54d7b5c2016-02-09 16:54:58 +1100294 to->di_nlink = cpu_to_be32(inode->i_nlink);
Dave Chinner9e9a2672016-02-09 16:54:58 +1100295 to->di_gen = cpu_to_be32(inode->i_generation);
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100296 to->di_mode = cpu_to_be16(inode->i_mode);
Dave Chinner39878482016-02-09 16:54:58 +1100297
298 to->di_size = cpu_to_be64(from->di_size);
299 to->di_nblocks = cpu_to_be64(from->di_nblocks);
300 to->di_extsize = cpu_to_be32(from->di_extsize);
301 to->di_nextents = cpu_to_be32(from->di_nextents);
302 to->di_anextents = cpu_to_be16(from->di_anextents);
303 to->di_forkoff = from->di_forkoff;
304 to->di_aformat = from->di_aformat;
305 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
306 to->di_dmstate = cpu_to_be16(from->di_dmstate);
307 to->di_flags = cpu_to_be16(from->di_flags);
Dave Chinner39878482016-02-09 16:54:58 +1100308
309 if (from->di_version == 3) {
Jeff Laytonf0e28282017-12-11 06:35:19 -0500310 to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
Dave Chinner39878482016-02-09 16:54:58 +1100311 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
312 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
313 to->di_flags2 = cpu_to_be64(from->di_flags2);
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700314 to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
Dave Chinner93f958f2016-02-09 16:54:58 +1100315 to->di_ino = cpu_to_be64(ip->i_ino);
316 to->di_lsn = cpu_to_be64(lsn);
317 memset(to->di_pad2, 0, sizeof(to->di_pad2));
318 uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
Dave Chinner39878482016-02-09 16:54:58 +1100319 to->di_flushiter = 0;
320 } else {
321 to->di_flushiter = cpu_to_be16(from->di_flushiter);
322 }
323}
324
325void
326xfs_log_dinode_to_disk(
327 struct xfs_log_dinode *from,
328 struct xfs_dinode *to)
Dave Chinner1fd71152013-08-12 20:49:35 +1000329{
330 to->di_magic = cpu_to_be16(from->di_magic);
331 to->di_mode = cpu_to_be16(from->di_mode);
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100332 to->di_version = from->di_version;
Dave Chinner1fd71152013-08-12 20:49:35 +1000333 to->di_format = from->di_format;
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100334 to->di_onlink = 0;
Dave Chinner1fd71152013-08-12 20:49:35 +1000335 to->di_uid = cpu_to_be32(from->di_uid);
336 to->di_gid = cpu_to_be32(from->di_gid);
337 to->di_nlink = cpu_to_be32(from->di_nlink);
338 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
339 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
340 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
Dave Chinner39878482016-02-09 16:54:58 +1100341
Dave Chinner1fd71152013-08-12 20:49:35 +1000342 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
343 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
344 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
345 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
346 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
347 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
Dave Chinner39878482016-02-09 16:54:58 +1100348
Dave Chinner1fd71152013-08-12 20:49:35 +1000349 to->di_size = cpu_to_be64(from->di_size);
350 to->di_nblocks = cpu_to_be64(from->di_nblocks);
351 to->di_extsize = cpu_to_be32(from->di_extsize);
352 to->di_nextents = cpu_to_be32(from->di_nextents);
353 to->di_anextents = cpu_to_be16(from->di_anextents);
354 to->di_forkoff = from->di_forkoff;
355 to->di_aformat = from->di_aformat;
356 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
357 to->di_dmstate = cpu_to_be16(from->di_dmstate);
358 to->di_flags = cpu_to_be16(from->di_flags);
359 to->di_gen = cpu_to_be32(from->di_gen);
360
361 if (from->di_version == 3) {
362 to->di_changecount = cpu_to_be64(from->di_changecount);
363 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
364 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
365 to->di_flags2 = cpu_to_be64(from->di_flags2);
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700366 to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
Dave Chinner1fd71152013-08-12 20:49:35 +1000367 to->di_ino = cpu_to_be64(from->di_ino);
368 to->di_lsn = cpu_to_be64(from->di_lsn);
369 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
370 uuid_copy(&to->di_uuid, &from->di_uuid);
371 to->di_flushiter = 0;
372 } else {
373 to->di_flushiter = cpu_to_be16(from->di_flushiter);
374 }
375}
376
Dave Chinner23fcb332018-06-21 23:25:57 -0700377static xfs_failaddr_t
378xfs_dinode_verify_fork(
379 struct xfs_dinode *dip,
380 struct xfs_mount *mp,
381 int whichfork)
382{
383 uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork);
384
385 switch (XFS_DFORK_FORMAT(dip, whichfork)) {
386 case XFS_DINODE_FMT_LOCAL:
387 /*
388 * no local regular files yet
389 */
390 if (whichfork == XFS_DATA_FORK) {
391 if (S_ISREG(be16_to_cpu(dip->di_mode)))
392 return __this_address;
393 if (be64_to_cpu(dip->di_size) >
394 XFS_DFORK_SIZE(dip, mp, whichfork))
395 return __this_address;
396 }
397 if (di_nextents)
398 return __this_address;
399 break;
400 case XFS_DINODE_FMT_EXTENTS:
401 if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork))
402 return __this_address;
403 break;
404 case XFS_DINODE_FMT_BTREE:
405 if (whichfork == XFS_ATTR_FORK) {
406 if (di_nextents > MAXAEXTNUM)
407 return __this_address;
408 } else if (di_nextents > MAXEXTNUM) {
409 return __this_address;
410 }
411 break;
412 default:
413 return __this_address;
414 }
415 return NULL;
416}
417
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800418xfs_failaddr_t
Dave Chinner1fd71152013-08-12 20:49:35 +1000419xfs_dinode_verify(
420 struct xfs_mount *mp,
Darrick J. Wong420fbeb2016-11-08 11:56:06 +1100421 xfs_ino_t ino,
Dave Chinner1fd71152013-08-12 20:49:35 +1000422 struct xfs_dinode *dip)
423{
Dave Chinner7d71a672018-06-05 10:06:44 -0700424 xfs_failaddr_t fa;
Amir Goldstein3c6f46e2017-01-17 11:41:41 -0800425 uint16_t mode;
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700426 uint16_t flags;
427 uint64_t flags2;
Darrick J. Wong71493b82018-01-08 10:51:04 -0800428 uint64_t di_size;
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700429
Dave Chinner1fd71152013-08-12 20:49:35 +1000430 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800431 return __this_address;
Dave Chinner1fd71152013-08-12 20:49:35 +1000432
Darrick J. Wong50aa90e2018-01-08 10:51:04 -0800433 /* Verify v3 integrity information first */
434 if (dip->di_version >= 3) {
435 if (!xfs_sb_version_hascrc(&mp->m_sb))
436 return __this_address;
437 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
438 XFS_DINODE_CRC_OFF))
439 return __this_address;
440 if (be64_to_cpu(dip->di_ino) != ino)
441 return __this_address;
442 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
443 return __this_address;
444 }
Dave Chinner1fd71152013-08-12 20:49:35 +1000445
Darrick J. Wongef388e22016-12-05 12:38:38 +1100446 /* don't allow invalid i_size */
Darrick J. Wong71493b82018-01-08 10:51:04 -0800447 di_size = be64_to_cpu(dip->di_size);
448 if (di_size & (1ULL << 63))
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800449 return __this_address;
Darrick J. Wongef388e22016-12-05 12:38:38 +1100450
Amir Goldstein3c6f46e2017-01-17 11:41:41 -0800451 mode = be16_to_cpu(dip->di_mode);
Amir Goldsteina324cbf2017-01-17 11:41:44 -0800452 if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800453 return __this_address;
Amir Goldstein3c6f46e2017-01-17 11:41:41 -0800454
455 /* No zero-length symlinks/dirs. */
Darrick J. Wong71493b82018-01-08 10:51:04 -0800456 if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800457 return __this_address;
Darrick J. Wongef388e22016-12-05 12:38:38 +1100458
Darrick J. Wong71493b82018-01-08 10:51:04 -0800459 /* Fork checks carried over from xfs_iformat_fork */
460 if (mode &&
461 be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents) >
462 be64_to_cpu(dip->di_nblocks))
463 return __this_address;
464
465 if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize)
466 return __this_address;
467
468 flags = be16_to_cpu(dip->di_flags);
469
470 if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
471 return __this_address;
472
473 /* Do we have appropriate data fork formats for the mode? */
474 switch (mode & S_IFMT) {
475 case S_IFIFO:
476 case S_IFCHR:
477 case S_IFBLK:
478 case S_IFSOCK:
479 if (dip->di_format != XFS_DINODE_FMT_DEV)
480 return __this_address;
481 break;
482 case S_IFREG:
483 case S_IFLNK:
484 case S_IFDIR:
Dave Chinner23fcb332018-06-21 23:25:57 -0700485 fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK);
486 if (fa)
487 return fa;
Darrick J. Wong71493b82018-01-08 10:51:04 -0800488 break;
489 case 0:
490 /* Uninitialized inode ok. */
491 break;
492 default:
493 return __this_address;
494 }
495
496 if (XFS_DFORK_Q(dip)) {
Dave Chinner23fcb332018-06-21 23:25:57 -0700497 fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
498 if (fa)
499 return fa;
Eric Sandeenb42db082018-04-16 23:06:53 -0700500 } else {
501 /*
502 * If there is no fork offset, this may be a freshly-made inode
503 * in a new disk cluster, in which case di_aformat is zeroed.
504 * Otherwise, such an inode must be in EXTENTS format; this goes
505 * for freed inodes as well.
506 */
507 switch (dip->di_aformat) {
508 case 0:
509 case XFS_DINODE_FMT_EXTENTS:
510 break;
511 default:
512 return __this_address;
513 }
514 if (dip->di_anextents)
515 return __this_address;
Darrick J. Wong71493b82018-01-08 10:51:04 -0800516 }
Dave Chinner1fd71152013-08-12 20:49:35 +1000517
Dave Chinner7d71a672018-06-05 10:06:44 -0700518 /* extent size hint validation */
519 fa = xfs_inode_validate_extsize(mp, be32_to_cpu(dip->di_extsize),
520 mode, flags);
521 if (fa)
522 return fa;
523
Dave Chinner1fd71152013-08-12 20:49:35 +1000524 /* only version 3 or greater inodes are extensively verified here */
525 if (dip->di_version < 3)
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800526 return NULL;
Dave Chinner1fd71152013-08-12 20:49:35 +1000527
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700528 flags2 = be64_to_cpu(dip->di_flags2);
529
530 /* don't allow reflink/cowextsize if we don't have reflink */
531 if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
Dave Chinner7d71a672018-06-05 10:06:44 -0700532 !xfs_sb_version_hasreflink(&mp->m_sb))
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800533 return __this_address;
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700534
Darrick J. Wong71493b82018-01-08 10:51:04 -0800535 /* only regular files get reflink */
536 if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG)
537 return __this_address;
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700538
539 /* don't let reflink and realtime mix */
540 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800541 return __this_address;
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700542
Darrick J. Wong4f435eb2016-10-03 09:11:50 -0700543 /* don't let reflink and dax mix */
544 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX))
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800545 return __this_address;
Darrick J. Wong4f435eb2016-10-03 09:11:50 -0700546
Dave Chinner02a0fda2018-06-05 10:09:33 -0700547 /* COW extent size hint validation */
548 fa = xfs_inode_validate_cowextsize(mp, be32_to_cpu(dip->di_cowextsize),
549 mode, flags, flags2);
550 if (fa)
551 return fa;
552
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800553 return NULL;
Dave Chinner1fd71152013-08-12 20:49:35 +1000554}
555
556void
557xfs_dinode_calc_crc(
558 struct xfs_mount *mp,
559 struct xfs_dinode *dip)
560{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700561 uint32_t crc;
Dave Chinner1fd71152013-08-12 20:49:35 +1000562
563 if (dip->di_version < 3)
564 return;
565
566 ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
Dave Chinnercae028d2016-12-05 14:40:32 +1100567 crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
Eric Sandeen533b81c2014-02-27 15:15:27 +1100568 XFS_DINODE_CRC_OFF);
Dave Chinner1fd71152013-08-12 20:49:35 +1000569 dip->di_crc = xfs_end_cksum(crc);
570}
571
572/*
573 * Read the disk inode attributes into the in-core inode structure.
574 *
575 * For version 5 superblocks, if we are initialising a new inode and we are not
576 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
577 * inode core with a random generation number. If we are keeping inodes around,
578 * we need to read the inode cluster to get the existing generation number off
579 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
580 * format) then log recovery is dependent on the di_flushiter field being
581 * initialised from the current on-disk value and hence we must also read the
582 * inode off disk.
583 */
584int
585xfs_iread(
586 xfs_mount_t *mp,
587 xfs_trans_t *tp,
588 xfs_inode_t *ip,
589 uint iget_flags)
590{
591 xfs_buf_t *bp;
592 xfs_dinode_t *dip;
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800593 xfs_failaddr_t fa;
Dave Chinner1fd71152013-08-12 20:49:35 +1000594 int error;
595
596 /*
597 * Fill in the location information in the in-core inode.
598 */
599 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
600 if (error)
601 return error;
602
603 /* shortcut IO on inode allocation if possible */
604 if ((iget_flags & XFS_IGET_CREATE) &&
605 xfs_sb_version_hascrc(&mp->m_sb) &&
606 !(mp->m_flags & XFS_MOUNT_IKEEP)) {
607 /* initialise the on-disk inode core */
608 memset(&ip->i_d, 0, sizeof(ip->i_d));
Dave Chinner9e9a2672016-02-09 16:54:58 +1100609 VFS_I(ip)->i_generation = prandom_u32();
Dave Chinnerfa4493f2018-03-23 10:22:54 -0700610 ip->i_d.di_version = 3;
Dave Chinner1fd71152013-08-12 20:49:35 +1000611 return 0;
612 }
613
614 /*
615 * Get pointers to the on-disk inode and the buffer containing it.
616 */
617 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
618 if (error)
619 return error;
620
621 /* even unallocated inodes are verified */
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800622 fa = xfs_dinode_verify(mp, ip->i_ino, dip);
623 if (fa) {
Darrick J. Wong22431bf2018-01-22 18:09:48 -0800624 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", dip,
625 sizeof(*dip), fa);
Dave Chinner24513372014-06-25 14:58:08 +1000626 error = -EFSCORRUPTED;
Dave Chinner1fd71152013-08-12 20:49:35 +1000627 goto out_brelse;
628 }
629
630 /*
631 * If the on-disk inode is already linked to a directory
632 * entry, copy all of the inode into the in-core inode.
633 * xfs_iformat_fork() handles copying in the inode format
634 * specific information.
635 * Otherwise, just get the truly permanent information.
636 */
637 if (dip->di_mode) {
Dave Chinner39878482016-02-09 16:54:58 +1100638 xfs_inode_from_disk(ip, dip);
Dave Chinner1fd71152013-08-12 20:49:35 +1000639 error = xfs_iformat_fork(ip, dip);
640 if (error) {
641#ifdef DEBUG
642 xfs_alert(mp, "%s: xfs_iformat() returned error %d",
643 __func__, error);
644#endif /* DEBUG */
645 goto out_brelse;
646 }
647 } else {
648 /*
649 * Partial initialisation of the in-core inode. Just the bits
650 * that xfs_ialloc won't overwrite or relies on being correct.
651 */
Dave Chinner1fd71152013-08-12 20:49:35 +1000652 ip->i_d.di_version = dip->di_version;
Dave Chinner9e9a2672016-02-09 16:54:58 +1100653 VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
Dave Chinner1fd71152013-08-12 20:49:35 +1000654 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
655
Dave Chinner1fd71152013-08-12 20:49:35 +1000656 /*
657 * Make sure to pull in the mode here as well in
658 * case the inode is released without being used.
659 * This ensures that xfs_inactive() will see that
660 * the inode is already free and not try to mess
661 * with the uninitialized part of it.
662 */
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100663 VFS_I(ip)->i_mode = 0;
Dave Chinner1fd71152013-08-12 20:49:35 +1000664 }
665
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100666 ASSERT(ip->i_d.di_version >= 2);
Dave Chinner1fd71152013-08-12 20:49:35 +1000667 ip->i_delayed_blks = 0;
668
669 /*
670 * Mark the buffer containing the inode as something to keep
671 * around for a while. This helps to keep recently accessed
672 * meta-data in-core longer.
673 */
674 xfs_buf_set_ref(bp, XFS_INO_REF);
675
676 /*
677 * Use xfs_trans_brelse() to release the buffer containing the on-disk
678 * inode, because it was acquired with xfs_trans_read_buf() in
679 * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
680 * brelse(). If we're within a transaction, then xfs_trans_brelse()
681 * will only release the buffer if it is not dirty within the
682 * transaction. It will be OK to release the buffer in this case,
683 * because inodes on disk are never destroyed and we will be locking the
684 * new in-core inode before putting it in the cache where other
685 * processes can find it. Thus we don't have to worry about the inode
686 * being changed just because we released the buffer.
687 */
688 out_brelse:
689 xfs_trans_brelse(tp, bp);
690 return error;
691}
Darrick J. Wong8bb82bc2018-03-23 10:06:55 -0700692
693/*
694 * Validate di_extsize hint.
695 *
696 * The rules are documented at xfs_ioctl_setattr_check_extsize().
697 * These functions must be kept in sync with each other.
698 */
699xfs_failaddr_t
700xfs_inode_validate_extsize(
701 struct xfs_mount *mp,
702 uint32_t extsize,
703 uint16_t mode,
704 uint16_t flags)
705{
706 bool rt_flag;
707 bool hint_flag;
708 bool inherit_flag;
709 uint32_t extsize_bytes;
710 uint32_t blocksize_bytes;
711
712 rt_flag = (flags & XFS_DIFLAG_REALTIME);
713 hint_flag = (flags & XFS_DIFLAG_EXTSIZE);
714 inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
715 extsize_bytes = XFS_FSB_TO_B(mp, extsize);
716
717 if (rt_flag)
718 blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
719 else
720 blocksize_bytes = mp->m_sb.sb_blocksize;
721
722 if ((hint_flag || inherit_flag) && !(S_ISDIR(mode) || S_ISREG(mode)))
723 return __this_address;
724
725 if (hint_flag && !S_ISREG(mode))
726 return __this_address;
727
728 if (inherit_flag && !S_ISDIR(mode))
729 return __this_address;
730
731 if ((hint_flag || inherit_flag) && extsize == 0)
732 return __this_address;
733
Eric Sandeend4a34e12018-07-24 11:34:52 -0700734 /* free inodes get flags set to zero but extsize remains */
735 if (mode && !(hint_flag || inherit_flag) && extsize != 0)
Darrick J. Wong8bb82bc2018-03-23 10:06:55 -0700736 return __this_address;
737
738 if (extsize_bytes % blocksize_bytes)
739 return __this_address;
740
741 if (extsize > MAXEXTLEN)
742 return __this_address;
743
744 if (!rt_flag && extsize > mp->m_sb.sb_agblocks / 2)
745 return __this_address;
746
747 return NULL;
748}
749
750/*
751 * Validate di_cowextsize hint.
752 *
753 * The rules are documented at xfs_ioctl_setattr_check_cowextsize().
754 * These functions must be kept in sync with each other.
755 */
756xfs_failaddr_t
757xfs_inode_validate_cowextsize(
758 struct xfs_mount *mp,
759 uint32_t cowextsize,
760 uint16_t mode,
761 uint16_t flags,
762 uint64_t flags2)
763{
764 bool rt_flag;
765 bool hint_flag;
766 uint32_t cowextsize_bytes;
767
768 rt_flag = (flags & XFS_DIFLAG_REALTIME);
769 hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
770 cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize);
771
772 if (hint_flag && !xfs_sb_version_hasreflink(&mp->m_sb))
773 return __this_address;
774
775 if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode)))
776 return __this_address;
777
778 if (hint_flag && cowextsize == 0)
779 return __this_address;
780
Eric Sandeend4a34e12018-07-24 11:34:52 -0700781 /* free inodes get flags set to zero but cowextsize remains */
782 if (mode && !hint_flag && cowextsize != 0)
Darrick J. Wong8bb82bc2018-03-23 10:06:55 -0700783 return __this_address;
784
785 if (hint_flag && rt_flag)
786 return __this_address;
787
788 if (cowextsize_bytes % mp->m_sb.sb_blocksize)
789 return __this_address;
790
791 if (cowextsize > MAXEXTLEN)
792 return __this_address;
793
794 if (cowextsize > mp->m_sb.sb_agblocks / 2)
795 return __this_address;
796
797 return NULL;
798}