Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include "xfs.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 7 | #include "xfs_fs.h" |
Darrick J. Wong | 5467b34 | 2019-06-28 19:25:35 -0700 | [diff] [blame] | 8 | #include "xfs_shared.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 9 | #include "xfs_format.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 10 | #include "xfs_log_format.h" |
| 11 | #include "xfs_trans_resv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include "xfs_mount.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include "xfs_inode.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 14 | #include "xfs_trans.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 15 | #include "xfs_inode_item.h" |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 16 | #include "xfs_trace.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 17 | #include "xfs_trans_priv.h" |
Carlos Maiolino | d3a304b | 2017-08-08 18:21:50 -0700 | [diff] [blame] | 18 | #include "xfs_buf_item.h" |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 19 | #include "xfs_log.h" |
Darrick J. Wong | a5155b8 | 2019-11-02 09:40:53 -0700 | [diff] [blame] | 20 | #include "xfs_error.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
Jeff Layton | f0e2828 | 2017-12-11 06:35:19 -0500 | [diff] [blame] | 22 | #include <linux/iversion.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
| 24 | kmem_zone_t *xfs_ili_zone; /* inode log item zone */ |
| 25 | |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 26 | static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip) |
| 27 | { |
| 28 | return container_of(lip, struct xfs_inode_log_item, ili_item); |
| 29 | } |
| 30 | |
Christoph Hellwig | ce9641d | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 31 | STATIC void |
| 32 | xfs_inode_item_data_fork_size( |
| 33 | struct xfs_inode_log_item *iip, |
| 34 | int *nvecs, |
| 35 | int *nbytes) |
| 36 | { |
| 37 | struct xfs_inode *ip = iip->ili_inode; |
| 38 | |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 39 | switch (ip->i_df.if_format) { |
Christoph Hellwig | ce9641d | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 40 | case XFS_DINODE_FMT_EXTENTS: |
| 41 | if ((iip->ili_fields & XFS_ILOG_DEXT) && |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 42 | ip->i_df.if_nextents > 0 && |
Christoph Hellwig | ce9641d | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 43 | ip->i_df.if_bytes > 0) { |
| 44 | /* worst case, doesn't subtract delalloc extents */ |
| 45 | *nbytes += XFS_IFORK_DSIZE(ip); |
| 46 | *nvecs += 1; |
| 47 | } |
| 48 | break; |
| 49 | case XFS_DINODE_FMT_BTREE: |
| 50 | if ((iip->ili_fields & XFS_ILOG_DBROOT) && |
| 51 | ip->i_df.if_broot_bytes > 0) { |
| 52 | *nbytes += ip->i_df.if_broot_bytes; |
| 53 | *nvecs += 1; |
| 54 | } |
| 55 | break; |
| 56 | case XFS_DINODE_FMT_LOCAL: |
| 57 | if ((iip->ili_fields & XFS_ILOG_DDATA) && |
| 58 | ip->i_df.if_bytes > 0) { |
| 59 | *nbytes += roundup(ip->i_df.if_bytes, 4); |
| 60 | *nvecs += 1; |
| 61 | } |
| 62 | break; |
| 63 | |
| 64 | case XFS_DINODE_FMT_DEV: |
Christoph Hellwig | ce9641d | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 65 | break; |
| 66 | default: |
| 67 | ASSERT(0); |
| 68 | break; |
| 69 | } |
| 70 | } |
| 71 | |
| 72 | STATIC void |
| 73 | xfs_inode_item_attr_fork_size( |
| 74 | struct xfs_inode_log_item *iip, |
| 75 | int *nvecs, |
| 76 | int *nbytes) |
| 77 | { |
| 78 | struct xfs_inode *ip = iip->ili_inode; |
| 79 | |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 80 | switch (ip->i_afp->if_format) { |
Christoph Hellwig | ce9641d | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 81 | case XFS_DINODE_FMT_EXTENTS: |
| 82 | if ((iip->ili_fields & XFS_ILOG_AEXT) && |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 83 | ip->i_afp->if_nextents > 0 && |
Christoph Hellwig | ce9641d | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 84 | ip->i_afp->if_bytes > 0) { |
| 85 | /* worst case, doesn't subtract unused space */ |
| 86 | *nbytes += XFS_IFORK_ASIZE(ip); |
| 87 | *nvecs += 1; |
| 88 | } |
| 89 | break; |
| 90 | case XFS_DINODE_FMT_BTREE: |
| 91 | if ((iip->ili_fields & XFS_ILOG_ABROOT) && |
| 92 | ip->i_afp->if_broot_bytes > 0) { |
| 93 | *nbytes += ip->i_afp->if_broot_bytes; |
| 94 | *nvecs += 1; |
| 95 | } |
| 96 | break; |
| 97 | case XFS_DINODE_FMT_LOCAL: |
| 98 | if ((iip->ili_fields & XFS_ILOG_ADATA) && |
| 99 | ip->i_afp->if_bytes > 0) { |
| 100 | *nbytes += roundup(ip->i_afp->if_bytes, 4); |
| 101 | *nvecs += 1; |
| 102 | } |
| 103 | break; |
| 104 | default: |
| 105 | ASSERT(0); |
| 106 | break; |
| 107 | } |
| 108 | } |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 109 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | /* |
| 111 | * This returns the number of iovecs needed to log the given inode item. |
| 112 | * |
| 113 | * We need one iovec for the inode log format structure, one for the |
| 114 | * inode core, and possibly one for the inode data/extents/b-tree root |
| 115 | * and one for the inode attribute data/extents/b-tree root. |
| 116 | */ |
Dave Chinner | 166d136 | 2013-08-12 20:50:04 +1000 | [diff] [blame] | 117 | STATIC void |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | xfs_inode_item_size( |
Dave Chinner | 166d136 | 2013-08-12 20:50:04 +1000 | [diff] [blame] | 119 | struct xfs_log_item *lip, |
| 120 | int *nvecs, |
| 121 | int *nbytes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | { |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 123 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 124 | struct xfs_inode *ip = iip->ili_inode; |
Dave Chinner | 166d136 | 2013-08-12 20:50:04 +1000 | [diff] [blame] | 125 | |
| 126 | *nvecs += 2; |
| 127 | *nbytes += sizeof(struct xfs_inode_log_format) + |
Christoph Hellwig | e9e2eae | 2020-03-18 08:15:10 -0700 | [diff] [blame] | 128 | xfs_log_dinode_size(ip->i_mount); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | |
Christoph Hellwig | ce9641d | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 130 | xfs_inode_item_data_fork_size(iip, nvecs, nbytes); |
| 131 | if (XFS_IFORK_Q(ip)) |
| 132 | xfs_inode_item_attr_fork_size(iip, nvecs, nbytes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | } |
| 134 | |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 135 | STATIC void |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 136 | xfs_inode_item_format_data_fork( |
| 137 | struct xfs_inode_log_item *iip, |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 138 | struct xfs_inode_log_format *ilf, |
| 139 | struct xfs_log_vec *lv, |
| 140 | struct xfs_log_iovec **vecp) |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 141 | { |
| 142 | struct xfs_inode *ip = iip->ili_inode; |
| 143 | size_t data_bytes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 145 | switch (ip->i_df.if_format) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | case XFS_DINODE_FMT_EXTENTS: |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 147 | iip->ili_fields &= |
Christoph Hellwig | 42b67dc | 2017-10-19 11:07:09 -0700 | [diff] [blame] | 148 | ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEV); |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 149 | |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 150 | if ((iip->ili_fields & XFS_ILOG_DEXT) && |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 151 | ip->i_df.if_nextents > 0 && |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 152 | ip->i_df.if_bytes > 0) { |
Christoph Hellwig | da77650 | 2013-12-13 11:34:04 +1100 | [diff] [blame] | 153 | struct xfs_bmbt_rec *p; |
| 154 | |
Eric Sandeen | 5d82930 | 2016-11-08 12:59:42 +1100 | [diff] [blame] | 155 | ASSERT(xfs_iext_count(&ip->i_df) > 0); |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 156 | |
Christoph Hellwig | da77650 | 2013-12-13 11:34:04 +1100 | [diff] [blame] | 157 | p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IEXT); |
| 158 | data_bytes = xfs_iextents_copy(ip, p, XFS_DATA_FORK); |
| 159 | xlog_finish_iovec(lv, *vecp, data_bytes); |
| 160 | |
| 161 | ASSERT(data_bytes <= ip->i_df.if_bytes); |
| 162 | |
| 163 | ilf->ilf_dsize = data_bytes; |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 164 | ilf->ilf_size++; |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 165 | } else { |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 166 | iip->ili_fields &= ~XFS_ILOG_DEXT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | } |
| 168 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | case XFS_DINODE_FMT_BTREE: |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 170 | iip->ili_fields &= |
Christoph Hellwig | 42b67dc | 2017-10-19 11:07:09 -0700 | [diff] [blame] | 171 | ~(XFS_ILOG_DDATA | XFS_ILOG_DEXT | XFS_ILOG_DEV); |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 172 | |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 173 | if ((iip->ili_fields & XFS_ILOG_DBROOT) && |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 174 | ip->i_df.if_broot_bytes > 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | ASSERT(ip->i_df.if_broot != NULL); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 176 | xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IBROOT, |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 177 | ip->i_df.if_broot, |
| 178 | ip->i_df.if_broot_bytes); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 179 | ilf->ilf_dsize = ip->i_df.if_broot_bytes; |
| 180 | ilf->ilf_size++; |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 181 | } else { |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 182 | ASSERT(!(iip->ili_fields & |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 183 | XFS_ILOG_DBROOT)); |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 184 | iip->ili_fields &= ~XFS_ILOG_DBROOT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | } |
| 186 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | case XFS_DINODE_FMT_LOCAL: |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 188 | iip->ili_fields &= |
Christoph Hellwig | 42b67dc | 2017-10-19 11:07:09 -0700 | [diff] [blame] | 189 | ~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT | XFS_ILOG_DEV); |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 190 | if ((iip->ili_fields & XFS_ILOG_DDATA) && |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 191 | ip->i_df.if_bytes > 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | /* |
| 193 | * Round i_bytes up to a word boundary. |
Randy Dunlap | b63da6c | 2020-08-05 08:49:58 -0700 | [diff] [blame] | 194 | * The underlying memory is guaranteed |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | * to be there by xfs_idata_realloc(). |
| 196 | */ |
| 197 | data_bytes = roundup(ip->i_df.if_bytes, 4); |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 198 | ASSERT(ip->i_df.if_u1.if_data != NULL); |
| 199 | ASSERT(ip->i_d.di_size > 0); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 200 | xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_ILOCAL, |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 201 | ip->i_df.if_u1.if_data, data_bytes); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 202 | ilf->ilf_dsize = (unsigned)data_bytes; |
| 203 | ilf->ilf_size++; |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 204 | } else { |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 205 | iip->ili_fields &= ~XFS_ILOG_DDATA; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | } |
| 207 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | case XFS_DINODE_FMT_DEV: |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 209 | iip->ili_fields &= |
Christoph Hellwig | 42b67dc | 2017-10-19 11:07:09 -0700 | [diff] [blame] | 210 | ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEXT); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 211 | if (iip->ili_fields & XFS_ILOG_DEV) |
Christoph Hellwig | 66f3646 | 2017-10-19 11:07:09 -0700 | [diff] [blame] | 212 | ilf->ilf_u.ilfu_rdev = sysv_encode_dev(VFS_I(ip)->i_rdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | default: |
| 215 | ASSERT(0); |
| 216 | break; |
| 217 | } |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 218 | } |
| 219 | |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 220 | STATIC void |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 221 | xfs_inode_item_format_attr_fork( |
| 222 | struct xfs_inode_log_item *iip, |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 223 | struct xfs_inode_log_format *ilf, |
| 224 | struct xfs_log_vec *lv, |
| 225 | struct xfs_log_iovec **vecp) |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 226 | { |
| 227 | struct xfs_inode *ip = iip->ili_inode; |
| 228 | size_t data_bytes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 230 | switch (ip->i_afp->if_format) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | case XFS_DINODE_FMT_EXTENTS: |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 232 | iip->ili_fields &= |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 233 | ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT); |
| 234 | |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 235 | if ((iip->ili_fields & XFS_ILOG_AEXT) && |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 236 | ip->i_afp->if_nextents > 0 && |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 237 | ip->i_afp->if_bytes > 0) { |
Christoph Hellwig | da77650 | 2013-12-13 11:34:04 +1100 | [diff] [blame] | 238 | struct xfs_bmbt_rec *p; |
| 239 | |
Eric Sandeen | 5d82930 | 2016-11-08 12:59:42 +1100 | [diff] [blame] | 240 | ASSERT(xfs_iext_count(ip->i_afp) == |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 241 | ip->i_afp->if_nextents); |
Christoph Hellwig | da77650 | 2013-12-13 11:34:04 +1100 | [diff] [blame] | 242 | |
| 243 | p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_EXT); |
| 244 | data_bytes = xfs_iextents_copy(ip, p, XFS_ATTR_FORK); |
| 245 | xlog_finish_iovec(lv, *vecp, data_bytes); |
| 246 | |
| 247 | ilf->ilf_asize = data_bytes; |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 248 | ilf->ilf_size++; |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 249 | } else { |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 250 | iip->ili_fields &= ~XFS_ILOG_AEXT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | } |
| 252 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | case XFS_DINODE_FMT_BTREE: |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 254 | iip->ili_fields &= |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 255 | ~(XFS_ILOG_ADATA | XFS_ILOG_AEXT); |
| 256 | |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 257 | if ((iip->ili_fields & XFS_ILOG_ABROOT) && |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 258 | ip->i_afp->if_broot_bytes > 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | ASSERT(ip->i_afp->if_broot != NULL); |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 260 | |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 261 | xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_BROOT, |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 262 | ip->i_afp->if_broot, |
| 263 | ip->i_afp->if_broot_bytes); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 264 | ilf->ilf_asize = ip->i_afp->if_broot_bytes; |
| 265 | ilf->ilf_size++; |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 266 | } else { |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 267 | iip->ili_fields &= ~XFS_ILOG_ABROOT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | } |
| 269 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | case XFS_DINODE_FMT_LOCAL: |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 271 | iip->ili_fields &= |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 272 | ~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT); |
| 273 | |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 274 | if ((iip->ili_fields & XFS_ILOG_ADATA) && |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 275 | ip->i_afp->if_bytes > 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | /* |
| 277 | * Round i_bytes up to a word boundary. |
Randy Dunlap | b63da6c | 2020-08-05 08:49:58 -0700 | [diff] [blame] | 278 | * The underlying memory is guaranteed |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | * to be there by xfs_idata_realloc(). |
| 280 | */ |
| 281 | data_bytes = roundup(ip->i_afp->if_bytes, 4); |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 282 | ASSERT(ip->i_afp->if_u1.if_data != NULL); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 283 | xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_LOCAL, |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 284 | ip->i_afp->if_u1.if_data, |
| 285 | data_bytes); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 286 | ilf->ilf_asize = (unsigned)data_bytes; |
| 287 | ilf->ilf_size++; |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 288 | } else { |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 289 | iip->ili_fields &= ~XFS_ILOG_ADATA; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | } |
| 291 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | default: |
| 293 | ASSERT(0); |
| 294 | break; |
| 295 | } |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 296 | } |
| 297 | |
Darrick J. Wong | 30e0559 | 2020-08-24 16:01:34 -0700 | [diff] [blame] | 298 | /* |
| 299 | * Convert an incore timestamp to a log timestamp. Note that the log format |
| 300 | * specifies host endian format! |
| 301 | */ |
| 302 | static inline xfs_ictimestamp_t |
| 303 | xfs_inode_to_log_dinode_ts( |
Darrick J. Wong | f93e5436 | 2020-08-17 09:59:07 -0700 | [diff] [blame] | 304 | struct xfs_inode *ip, |
Darrick J. Wong | 30e0559 | 2020-08-24 16:01:34 -0700 | [diff] [blame] | 305 | const struct timespec64 tv) |
| 306 | { |
| 307 | struct xfs_legacy_ictimestamp *lits; |
| 308 | xfs_ictimestamp_t its; |
| 309 | |
Darrick J. Wong | f93e5436 | 2020-08-17 09:59:07 -0700 | [diff] [blame] | 310 | if (xfs_inode_has_bigtime(ip)) |
| 311 | return xfs_inode_encode_bigtime(tv); |
| 312 | |
Darrick J. Wong | 30e0559 | 2020-08-24 16:01:34 -0700 | [diff] [blame] | 313 | lits = (struct xfs_legacy_ictimestamp *)&its; |
| 314 | lits->t_sec = tv.tv_sec; |
| 315 | lits->t_nsec = tv.tv_nsec; |
| 316 | |
| 317 | return its; |
| 318 | } |
| 319 | |
Christoph Hellwig | 9b3beb0 | 2021-03-29 11:11:38 -0700 | [diff] [blame] | 320 | /* |
| 321 | * The legacy DMAPI fields are only present in the on-disk and in-log inodes, |
| 322 | * but not in the in-memory one. But we are guaranteed to have an inode buffer |
| 323 | * in memory when logging an inode, so we can just copy it from the on-disk |
| 324 | * inode to the in-log inode here so that recovery of file system with these |
| 325 | * fields set to non-zero values doesn't lose them. For all other cases we zero |
| 326 | * the fields. |
| 327 | */ |
| 328 | static void |
| 329 | xfs_copy_dm_fields_to_log_dinode( |
| 330 | struct xfs_inode *ip, |
| 331 | struct xfs_log_dinode *to) |
| 332 | { |
| 333 | struct xfs_dinode *dip; |
| 334 | |
| 335 | dip = xfs_buf_offset(ip->i_itemp->ili_item.li_buf, |
| 336 | ip->i_imap.im_boffset); |
| 337 | |
| 338 | if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS)) { |
| 339 | to->di_dmevmask = be32_to_cpu(dip->di_dmevmask); |
| 340 | to->di_dmstate = be16_to_cpu(dip->di_dmstate); |
| 341 | } else { |
| 342 | to->di_dmevmask = 0; |
| 343 | to->di_dmstate = 0; |
| 344 | } |
| 345 | } |
| 346 | |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 347 | static void |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 348 | xfs_inode_to_log_dinode( |
| 349 | struct xfs_inode *ip, |
Dave Chinner | 93f958f | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 350 | struct xfs_log_dinode *to, |
| 351 | xfs_lsn_t lsn) |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 352 | { |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 353 | struct xfs_icdinode *from = &ip->i_d; |
| 354 | struct inode *inode = VFS_I(ip); |
| 355 | |
Dave Chinner | 93f958f | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 356 | to->di_magic = XFS_DINODE_MAGIC; |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 357 | to->di_format = xfs_ifork_format(&ip->i_df); |
Christoph Hellwig | ba8adad | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 358 | to->di_uid = i_uid_read(inode); |
| 359 | to->di_gid = i_gid_read(inode); |
Christoph Hellwig | ceaf603 | 2021-03-29 11:11:39 -0700 | [diff] [blame^] | 360 | to->di_projid_lo = ip->i_projid & 0xffff; |
| 361 | to->di_projid_hi = ip->i_projid >> 16; |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 362 | |
Dave Chinner | 93f958f | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 363 | memset(to->di_pad, 0, sizeof(to->di_pad)); |
Dave Chinner | faeb4e4 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 364 | memset(to->di_pad3, 0, sizeof(to->di_pad3)); |
Darrick J. Wong | f93e5436 | 2020-08-17 09:59:07 -0700 | [diff] [blame] | 365 | to->di_atime = xfs_inode_to_log_dinode_ts(ip, inode->i_atime); |
| 366 | to->di_mtime = xfs_inode_to_log_dinode_ts(ip, inode->i_mtime); |
| 367 | to->di_ctime = xfs_inode_to_log_dinode_ts(ip, inode->i_ctime); |
Dave Chinner | 54d7b5c | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 368 | to->di_nlink = inode->i_nlink; |
Dave Chinner | 9e9a267 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 369 | to->di_gen = inode->i_generation; |
Dave Chinner | c19b3b05 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 370 | to->di_mode = inode->i_mode; |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 371 | |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 372 | to->di_size = from->di_size; |
| 373 | to->di_nblocks = from->di_nblocks; |
| 374 | to->di_extsize = from->di_extsize; |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 375 | to->di_nextents = xfs_ifork_nextents(&ip->i_df); |
| 376 | to->di_anextents = xfs_ifork_nextents(ip->i_afp); |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 377 | to->di_forkoff = from->di_forkoff; |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 378 | to->di_aformat = xfs_ifork_format(ip->i_afp); |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 379 | to->di_flags = from->di_flags; |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 380 | |
Christoph Hellwig | 9b3beb0 | 2021-03-29 11:11:38 -0700 | [diff] [blame] | 381 | xfs_copy_dm_fields_to_log_dinode(ip, to); |
| 382 | |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 383 | /* log a dummy value to ensure log structure is fully initialised */ |
| 384 | to->di_next_unlinked = NULLAGINO; |
| 385 | |
Christoph Hellwig | 6471e9c | 2020-03-18 08:15:11 -0700 | [diff] [blame] | 386 | if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) { |
| 387 | to->di_version = 3; |
Jeff Layton | f0e2828 | 2017-12-11 06:35:19 -0500 | [diff] [blame] | 388 | to->di_changecount = inode_peek_iversion(inode); |
Darrick J. Wong | f93e5436 | 2020-08-17 09:59:07 -0700 | [diff] [blame] | 389 | to->di_crtime = xfs_inode_to_log_dinode_ts(ip, from->di_crtime); |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 390 | to->di_flags2 = from->di_flags2; |
Darrick J. Wong | f7ca352 | 2016-10-03 09:11:43 -0700 | [diff] [blame] | 391 | to->di_cowextsize = from->di_cowextsize; |
Dave Chinner | 93f958f | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 392 | to->di_ino = ip->i_ino; |
| 393 | to->di_lsn = lsn; |
| 394 | memset(to->di_pad2, 0, sizeof(to->di_pad2)); |
| 395 | uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid); |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 396 | to->di_flushiter = 0; |
| 397 | } else { |
Christoph Hellwig | 6471e9c | 2020-03-18 08:15:11 -0700 | [diff] [blame] | 398 | to->di_version = 2; |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 399 | to->di_flushiter = from->di_flushiter; |
| 400 | } |
| 401 | } |
| 402 | |
| 403 | /* |
| 404 | * Format the inode core. Current timestamp data is only in the VFS inode |
| 405 | * fields, so we need to grab them from there. Hence rather than just copying |
| 406 | * the XFS inode core structure, format the fields directly into the iovec. |
| 407 | */ |
| 408 | static void |
| 409 | xfs_inode_item_format_core( |
| 410 | struct xfs_inode *ip, |
| 411 | struct xfs_log_vec *lv, |
| 412 | struct xfs_log_iovec **vecp) |
| 413 | { |
| 414 | struct xfs_log_dinode *dic; |
| 415 | |
| 416 | dic = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_ICORE); |
Dave Chinner | 93f958f | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 417 | xfs_inode_to_log_dinode(ip, dic, ip->i_itemp->ili_item.li_lsn); |
Christoph Hellwig | e9e2eae | 2020-03-18 08:15:10 -0700 | [diff] [blame] | 418 | xlog_finish_iovec(lv, *vecp, xfs_log_dinode_size(ip->i_mount)); |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 419 | } |
| 420 | |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 421 | /* |
| 422 | * This is called to fill in the vector of log iovecs for the given inode |
| 423 | * log item. It fills the first item with an inode log format structure, |
| 424 | * the second with the on-disk inode structure, and a possible third and/or |
| 425 | * fourth with the inode data/extents/b-tree root and inode attributes |
| 426 | * data/extents/b-tree root. |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 427 | * |
| 428 | * Note: Always use the 64 bit inode log format structure so we don't |
| 429 | * leave an uninitialised hole in the format item on 64 bit systems. Log |
| 430 | * recovery on 32 bit systems handles this just fine, so there's no reason |
| 431 | * for not using an initialising the properly padded structure all the time. |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 432 | */ |
| 433 | STATIC void |
| 434 | xfs_inode_item_format( |
| 435 | struct xfs_log_item *lip, |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 436 | struct xfs_log_vec *lv) |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 437 | { |
| 438 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 439 | struct xfs_inode *ip = iip->ili_inode; |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 440 | struct xfs_log_iovec *vecp = NULL; |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 441 | struct xfs_inode_log_format *ilf; |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 442 | |
Christoph Hellwig | 2f25129 | 2013-12-13 11:34:05 +1100 | [diff] [blame] | 443 | ilf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_IFORMAT); |
| 444 | ilf->ilf_type = XFS_LI_INODE; |
| 445 | ilf->ilf_ino = ip->i_ino; |
| 446 | ilf->ilf_blkno = ip->i_imap.im_blkno; |
| 447 | ilf->ilf_len = ip->i_imap.im_len; |
| 448 | ilf->ilf_boffset = ip->i_imap.im_boffset; |
| 449 | ilf->ilf_fields = XFS_ILOG_CORE; |
| 450 | ilf->ilf_size = 2; /* format + core */ |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 451 | |
| 452 | /* |
| 453 | * make sure we don't leak uninitialised data into the log in the case |
| 454 | * when we don't log every field in the inode. |
| 455 | */ |
| 456 | ilf->ilf_dsize = 0; |
| 457 | ilf->ilf_asize = 0; |
| 458 | ilf->ilf_pad = 0; |
Christoph Hellwig | 42b67dc | 2017-10-19 11:07:09 -0700 | [diff] [blame] | 459 | memset(&ilf->ilf_u, 0, sizeof(ilf->ilf_u)); |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 460 | |
| 461 | xlog_finish_iovec(lv, vecp, sizeof(*ilf)); |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 462 | |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 463 | xfs_inode_item_format_core(ip, lv, &vecp); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 464 | xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp); |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 465 | if (XFS_IFORK_Q(ip)) { |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 466 | xfs_inode_item_format_attr_fork(iip, ilf, lv, &vecp); |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 467 | } else { |
| 468 | iip->ili_fields &= |
| 469 | ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT); |
| 470 | } |
| 471 | |
Christoph Hellwig | 2f25129 | 2013-12-13 11:34:05 +1100 | [diff] [blame] | 472 | /* update the format with the exact fields we actually logged */ |
| 473 | ilf->ilf_fields |= (iip->ili_fields & ~XFS_ILOG_TIMESTAMP); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | } |
| 475 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | /* |
| 477 | * This is called to pin the inode associated with the inode log |
Christoph Hellwig | a14a5ab | 2010-02-18 12:43:22 +0000 | [diff] [blame] | 478 | * item in memory so it cannot be written out. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | */ |
| 480 | STATIC void |
| 481 | xfs_inode_item_pin( |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 482 | struct xfs_log_item *lip) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | { |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 484 | struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode; |
Christoph Hellwig | a14a5ab | 2010-02-18 12:43:22 +0000 | [diff] [blame] | 485 | |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 486 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 487 | ASSERT(lip->li_buf); |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 488 | |
| 489 | trace_xfs_inode_pin(ip, _RET_IP_); |
| 490 | atomic_inc(&ip->i_pincount); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | } |
| 492 | |
| 493 | |
| 494 | /* |
| 495 | * This is called to unpin the inode associated with the inode log |
| 496 | * item which was previously pinned with a call to xfs_inode_item_pin(). |
Christoph Hellwig | a14a5ab | 2010-02-18 12:43:22 +0000 | [diff] [blame] | 497 | * |
| 498 | * Also wake up anyone in xfs_iunpin_wait() if the count goes to 0. |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 499 | * |
| 500 | * Note that unpin can race with inode cluster buffer freeing marking the buffer |
| 501 | * stale. In that case, flush completions are run from the buffer unpin call, |
| 502 | * which may happen before the inode is unpinned. If we lose the race, there |
| 503 | * will be no buffer attached to the log item, but the inode will be marked |
| 504 | * XFS_ISTALE. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | STATIC void |
| 507 | xfs_inode_item_unpin( |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 508 | struct xfs_log_item *lip, |
Christoph Hellwig | 9412e31 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 509 | int remove) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | { |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 511 | struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode; |
Christoph Hellwig | a14a5ab | 2010-02-18 12:43:22 +0000 | [diff] [blame] | 512 | |
Dave Chinner | 4aaf15d | 2010-03-08 11:24:07 +1100 | [diff] [blame] | 513 | trace_xfs_inode_unpin(ip, _RET_IP_); |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 514 | ASSERT(lip->li_buf || xfs_iflags_test(ip, XFS_ISTALE)); |
Christoph Hellwig | a14a5ab | 2010-02-18 12:43:22 +0000 | [diff] [blame] | 515 | ASSERT(atomic_read(&ip->i_pincount) > 0); |
| 516 | if (atomic_dec_and_test(&ip->i_pincount)) |
Christoph Hellwig | f392e63 | 2011-12-18 20:00:10 +0000 | [diff] [blame] | 517 | wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | } |
| 519 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | STATIC uint |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 521 | xfs_inode_item_push( |
| 522 | struct xfs_log_item *lip, |
| 523 | struct list_head *buffer_list) |
Matthew Wilcox | 57e8095 | 2018-03-07 14:59:39 -0800 | [diff] [blame] | 524 | __releases(&lip->li_ailp->ail_lock) |
| 525 | __acquires(&lip->li_ailp->ail_lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | { |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 527 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 528 | struct xfs_inode *ip = iip->ili_inode; |
Carlos Maiolino | d3a304b | 2017-08-08 18:21:50 -0700 | [diff] [blame] | 529 | struct xfs_buf *bp = lip->li_buf; |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 530 | uint rval = XFS_ITEM_SUCCESS; |
| 531 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | |
Dave Chinner | 90c60e1 | 2020-06-29 14:49:19 -0700 | [diff] [blame] | 533 | ASSERT(iip->ili_item.li_buf); |
| 534 | |
| 535 | if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp) || |
| 536 | (ip->i_flags & XFS_ISTALE)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | return XFS_ITEM_PINNED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | |
Dave Chinner | 718ecc5 | 2020-08-17 16:41:01 -0700 | [diff] [blame] | 539 | if (xfs_iflags_test(ip, XFS_IFLUSHING)) |
Dave Chinner | 90c60e1 | 2020-06-29 14:49:19 -0700 | [diff] [blame] | 540 | return XFS_ITEM_FLUSHING; |
| 541 | |
| 542 | if (!xfs_buf_trylock(bp)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | return XFS_ITEM_LOCKED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 544 | |
Matthew Wilcox | 57e8095 | 2018-03-07 14:59:39 -0800 | [diff] [blame] | 545 | spin_unlock(&lip->li_ailp->ail_lock); |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 546 | |
Dave Chinner | 90c60e1 | 2020-06-29 14:49:19 -0700 | [diff] [blame] | 547 | /* |
| 548 | * We need to hold a reference for flushing the cluster buffer as it may |
| 549 | * fail the buffer without IO submission. In which case, we better get a |
| 550 | * reference for that completion because otherwise we don't get a |
| 551 | * reference for IO until we queue the buffer for delwri submission. |
| 552 | */ |
| 553 | xfs_buf_hold(bp); |
Dave Chinner | 5717ea4 | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 554 | error = xfs_iflush_cluster(bp); |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 555 | if (!error) { |
| 556 | if (!xfs_buf_delwri_queue(bp, buffer_list)) |
| 557 | rval = XFS_ITEM_FLUSHING; |
| 558 | xfs_buf_relse(bp); |
Dave Chinner | 90c60e1 | 2020-06-29 14:49:19 -0700 | [diff] [blame] | 559 | } else { |
Dave Chinner | 5717ea4 | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 560 | /* |
| 561 | * Release the buffer if we were unable to flush anything. On |
| 562 | * any other error, the buffer has already been released. |
| 563 | */ |
| 564 | if (error == -EAGAIN) |
| 565 | xfs_buf_relse(bp); |
Brian Foster | d4bc4c5 | 2020-03-27 08:29:55 -0700 | [diff] [blame] | 566 | rval = XFS_ITEM_LOCKED; |
Dave Chinner | 90c60e1 | 2020-06-29 14:49:19 -0700 | [diff] [blame] | 567 | } |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 568 | |
Matthew Wilcox | 57e8095 | 2018-03-07 14:59:39 -0800 | [diff] [blame] | 569 | spin_lock(&lip->li_ailp->ail_lock); |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 570 | return rval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | } |
| 572 | |
| 573 | /* |
| 574 | * Unlock the inode associated with the inode log item. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | */ |
| 576 | STATIC void |
Christoph Hellwig | ddf9205 | 2019-06-28 19:27:32 -0700 | [diff] [blame] | 577 | xfs_inode_item_release( |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 578 | struct xfs_log_item *lip) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 579 | { |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 580 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 581 | struct xfs_inode *ip = iip->ili_inode; |
Christoph Hellwig | 898621d | 2010-06-24 11:36:58 +1000 | [diff] [blame] | 582 | unsigned short lock_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | |
Christoph Hellwig | f3ca8738 | 2011-07-08 14:34:47 +0200 | [diff] [blame] | 584 | ASSERT(ip->i_itemp != NULL); |
| 585 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 | |
Christoph Hellwig | 898621d | 2010-06-24 11:36:58 +1000 | [diff] [blame] | 587 | lock_flags = iip->ili_lock_flags; |
| 588 | iip->ili_lock_flags = 0; |
Christoph Hellwig | ddc3415 | 2011-09-19 15:00:54 +0000 | [diff] [blame] | 589 | if (lock_flags) |
Christoph Hellwig | f3ca8738 | 2011-07-08 14:34:47 +0200 | [diff] [blame] | 590 | xfs_iunlock(ip, lock_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | } |
| 592 | |
| 593 | /* |
Dave Chinner | de25c18 | 2010-11-30 15:15:46 +1100 | [diff] [blame] | 594 | * This is called to find out where the oldest active copy of the inode log |
| 595 | * item in the on disk log resides now that the last log write of it completed |
| 596 | * at the given lsn. Since we always re-log all dirty data in an inode, the |
| 597 | * latest copy in the on disk log is the only one that matters. Therefore, |
| 598 | * simply return the given lsn. |
| 599 | * |
| 600 | * If the inode has been marked stale because the cluster is being freed, we |
| 601 | * don't want to (re-)insert this inode into the AIL. There is a race condition |
| 602 | * where the cluster buffer may be unpinned before the inode is inserted into |
| 603 | * the AIL during transaction committed processing. If the buffer is unpinned |
| 604 | * before the inode item has been committed and inserted, then it is possible |
Dave Chinner | 1316d4d | 2011-07-04 05:27:36 +0000 | [diff] [blame] | 605 | * for the buffer to be written and IO completes before the inode is inserted |
Dave Chinner | de25c18 | 2010-11-30 15:15:46 +1100 | [diff] [blame] | 606 | * into the AIL. In that case, we'd be inserting a clean, stale inode into the |
| 607 | * AIL which will never get removed. It will, however, get reclaimed which |
| 608 | * triggers an assert in xfs_inode_free() complaining about freein an inode |
| 609 | * still in the AIL. |
| 610 | * |
Dave Chinner | 1316d4d | 2011-07-04 05:27:36 +0000 | [diff] [blame] | 611 | * To avoid this, just unpin the inode directly and return a LSN of -1 so the |
| 612 | * transaction committed code knows that it does not need to do any further |
| 613 | * processing on the item. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | STATIC xfs_lsn_t |
| 616 | xfs_inode_item_committed( |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 617 | struct xfs_log_item *lip, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | xfs_lsn_t lsn) |
| 619 | { |
Dave Chinner | de25c18 | 2010-11-30 15:15:46 +1100 | [diff] [blame] | 620 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 621 | struct xfs_inode *ip = iip->ili_inode; |
| 622 | |
Dave Chinner | 1316d4d | 2011-07-04 05:27:36 +0000 | [diff] [blame] | 623 | if (xfs_iflags_test(ip, XFS_ISTALE)) { |
| 624 | xfs_inode_item_unpin(lip, 0); |
| 625 | return -1; |
| 626 | } |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 627 | return lsn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | } |
| 629 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | STATIC void |
| 631 | xfs_inode_item_committing( |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 632 | struct xfs_log_item *lip, |
Christoph Hellwig | ddf9205 | 2019-06-28 19:27:32 -0700 | [diff] [blame] | 633 | xfs_lsn_t commit_lsn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | { |
Christoph Hellwig | ddf9205 | 2019-06-28 19:27:32 -0700 | [diff] [blame] | 635 | INODE_ITEM(lip)->ili_last_lsn = commit_lsn; |
| 636 | return xfs_inode_item_release(lip); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 | } |
| 638 | |
Christoph Hellwig | 272e42b | 2011-10-28 09:54:24 +0000 | [diff] [blame] | 639 | static const struct xfs_item_ops xfs_inode_item_ops = { |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 640 | .iop_size = xfs_inode_item_size, |
| 641 | .iop_format = xfs_inode_item_format, |
| 642 | .iop_pin = xfs_inode_item_pin, |
| 643 | .iop_unpin = xfs_inode_item_unpin, |
Christoph Hellwig | ddf9205 | 2019-06-28 19:27:32 -0700 | [diff] [blame] | 644 | .iop_release = xfs_inode_item_release, |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 645 | .iop_committed = xfs_inode_item_committed, |
| 646 | .iop_push = xfs_inode_item_push, |
Christoph Hellwig | ddf9205 | 2019-06-28 19:27:32 -0700 | [diff] [blame] | 647 | .iop_committing = xfs_inode_item_committing, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 648 | }; |
| 649 | |
| 650 | |
| 651 | /* |
| 652 | * Initialize the inode log item for a newly allocated (in-core) inode. |
| 653 | */ |
| 654 | void |
| 655 | xfs_inode_item_init( |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 656 | struct xfs_inode *ip, |
| 657 | struct xfs_mount *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 658 | { |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 659 | struct xfs_inode_log_item *iip; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | |
| 661 | ASSERT(ip->i_itemp == NULL); |
Carlos Maiolino | 32a2b11 | 2020-07-22 09:23:10 -0700 | [diff] [blame] | 662 | iip = ip->i_itemp = kmem_cache_zalloc(xfs_ili_zone, |
| 663 | GFP_KERNEL | __GFP_NOFAIL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | iip->ili_inode = ip; |
Dave Chinner | 1319ebe | 2020-06-29 14:48:46 -0700 | [diff] [blame] | 666 | spin_lock_init(&iip->ili_lock); |
Dave Chinner | 43f5efc | 2010-03-23 10:10:00 +1100 | [diff] [blame] | 667 | xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE, |
| 668 | &xfs_inode_item_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 669 | } |
| 670 | |
| 671 | /* |
| 672 | * Free the inode log item and any memory hanging off of it. |
| 673 | */ |
| 674 | void |
| 675 | xfs_inode_item_destroy( |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 676 | struct xfs_inode *ip) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 | { |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 678 | struct xfs_inode_log_item *iip = ip->i_itemp; |
| 679 | |
| 680 | ASSERT(iip->ili_item.li_buf == NULL); |
| 681 | |
| 682 | ip->i_itemp = NULL; |
| 683 | kmem_free(iip->ili_item.li_lv_shadow); |
| 684 | kmem_cache_free(xfs_ili_zone, iip); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 685 | } |
| 686 | |
| 687 | |
| 688 | /* |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 689 | * We only want to pull the item from the AIL if it is actually there |
| 690 | * and its location in the log has not changed since we started the |
| 691 | * flush. Thus, we only bother if the inode's lsn has not changed. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | */ |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 693 | static void |
| 694 | xfs_iflush_ail_updates( |
| 695 | struct xfs_ail *ailp, |
| 696 | struct list_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 697 | { |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 698 | struct xfs_log_item *lip; |
| 699 | xfs_lsn_t tail_lsn = 0; |
| 700 | |
| 701 | /* this is an opencoded batch version of xfs_trans_ail_delete */ |
| 702 | spin_lock(&ailp->ail_lock); |
| 703 | list_for_each_entry(lip, list, li_bio_list) { |
| 704 | xfs_lsn_t lsn; |
| 705 | |
| 706 | clear_bit(XFS_LI_FAILED, &lip->li_flags); |
| 707 | if (INODE_ITEM(lip)->ili_flush_lsn != lip->li_lsn) |
| 708 | continue; |
| 709 | |
| 710 | lsn = xfs_ail_delete_one(ailp, lip); |
| 711 | if (!tail_lsn && lsn) |
| 712 | tail_lsn = lsn; |
| 713 | } |
| 714 | xfs_ail_update_finish(ailp, tail_lsn); |
| 715 | } |
| 716 | |
| 717 | /* |
| 718 | * Walk the list of inodes that have completed their IOs. If they are clean |
| 719 | * remove them from the list and dissociate them from the buffer. Buffers that |
| 720 | * are still dirty remain linked to the buffer and on the list. Caller must |
| 721 | * handle them appropriately. |
| 722 | */ |
| 723 | static void |
| 724 | xfs_iflush_finish( |
| 725 | struct xfs_buf *bp, |
| 726 | struct list_head *list) |
| 727 | { |
Dave Chinner | aac855a | 2020-06-29 14:48:48 -0700 | [diff] [blame] | 728 | struct xfs_log_item *lip, *n; |
Dave Chinner | 3013683 | 2010-12-20 12:03:17 +1100 | [diff] [blame] | 729 | |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 730 | list_for_each_entry_safe(lip, n, list, li_bio_list) { |
| 731 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 732 | bool drop_buffer = false; |
| 733 | |
Dave Chinner | 1319ebe | 2020-06-29 14:48:46 -0700 | [diff] [blame] | 734 | spin_lock(&iip->ili_lock); |
Dave Chinner | 1319ebe | 2020-06-29 14:48:46 -0700 | [diff] [blame] | 735 | |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 736 | /* |
| 737 | * Remove the reference to the cluster buffer if the inode is |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 738 | * clean in memory and drop the buffer reference once we've |
| 739 | * dropped the locks we hold. |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 740 | */ |
| 741 | ASSERT(iip->ili_item.li_buf == bp); |
| 742 | if (!iip->ili_fields) { |
| 743 | iip->ili_item.li_buf = NULL; |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 744 | list_del_init(&lip->li_bio_list); |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 745 | drop_buffer = true; |
| 746 | } |
| 747 | iip->ili_last_fields = 0; |
| 748 | iip->ili_flush_lsn = 0; |
| 749 | spin_unlock(&iip->ili_lock); |
Dave Chinner | 718ecc5 | 2020-08-17 16:41:01 -0700 | [diff] [blame] | 750 | xfs_iflags_clear(iip->ili_inode, XFS_IFLUSHING); |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 751 | if (drop_buffer) |
| 752 | xfs_buf_rele(bp); |
Dave Chinner | 3013683 | 2010-12-20 12:03:17 +1100 | [diff] [blame] | 753 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 754 | } |
| 755 | |
| 756 | /* |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 757 | * Inode buffer IO completion routine. It is responsible for removing inodes |
Dave Chinner | 718ecc5 | 2020-08-17 16:41:01 -0700 | [diff] [blame] | 758 | * attached to the buffer from the AIL if they have not been re-logged and |
| 759 | * completing the inode flush. |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 760 | */ |
| 761 | void |
Christoph Hellwig | 664ffb8 | 2020-09-01 10:55:29 -0700 | [diff] [blame] | 762 | xfs_buf_inode_iodone( |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 763 | struct xfs_buf *bp) |
| 764 | { |
| 765 | struct xfs_log_item *lip, *n; |
| 766 | LIST_HEAD(flushed_inodes); |
| 767 | LIST_HEAD(ail_updates); |
| 768 | |
| 769 | /* |
| 770 | * Pull the attached inodes from the buffer one at a time and take the |
| 771 | * appropriate action on them. |
| 772 | */ |
| 773 | list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) { |
| 774 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 775 | |
| 776 | if (xfs_iflags_test(iip->ili_inode, XFS_ISTALE)) { |
| 777 | xfs_iflush_abort(iip->ili_inode); |
| 778 | continue; |
| 779 | } |
| 780 | if (!iip->ili_last_fields) |
| 781 | continue; |
| 782 | |
| 783 | /* Do an unlocked check for needing the AIL lock. */ |
| 784 | if (iip->ili_flush_lsn == lip->li_lsn || |
| 785 | test_bit(XFS_LI_FAILED, &lip->li_flags)) |
| 786 | list_move_tail(&lip->li_bio_list, &ail_updates); |
| 787 | else |
| 788 | list_move_tail(&lip->li_bio_list, &flushed_inodes); |
| 789 | } |
| 790 | |
| 791 | if (!list_empty(&ail_updates)) { |
| 792 | xfs_iflush_ail_updates(bp->b_mount->m_ail, &ail_updates); |
| 793 | list_splice_tail(&ail_updates, &flushed_inodes); |
| 794 | } |
| 795 | |
| 796 | xfs_iflush_finish(bp, &flushed_inodes); |
| 797 | if (!list_empty(&flushed_inodes)) |
| 798 | list_splice_tail(&flushed_inodes, &bp->b_li_list); |
| 799 | } |
| 800 | |
Christoph Hellwig | 664ffb8 | 2020-09-01 10:55:29 -0700 | [diff] [blame] | 801 | void |
| 802 | xfs_buf_inode_io_fail( |
| 803 | struct xfs_buf *bp) |
| 804 | { |
| 805 | struct xfs_log_item *lip; |
| 806 | |
| 807 | list_for_each_entry(lip, &bp->b_li_list, li_bio_list) |
| 808 | set_bit(XFS_LI_FAILED, &lip->li_flags); |
| 809 | } |
| 810 | |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 811 | /* |
Dave Chinner | 718ecc5 | 2020-08-17 16:41:01 -0700 | [diff] [blame] | 812 | * This is the inode flushing abort routine. It is called when |
Dave Chinner | 04913fd | 2012-04-23 15:58:41 +1000 | [diff] [blame] | 813 | * the filesystem is shutting down to clean up the inode state. It is |
| 814 | * responsible for removing the inode item from the AIL if it has not been |
Dave Chinner | 718ecc5 | 2020-08-17 16:41:01 -0700 | [diff] [blame] | 815 | * re-logged and clearing the inode's flush state. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 | */ |
| 817 | void |
| 818 | xfs_iflush_abort( |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 819 | struct xfs_inode *ip) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 820 | { |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 821 | struct xfs_inode_log_item *iip = ip->i_itemp; |
| 822 | struct xfs_buf *bp = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 823 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 824 | if (iip) { |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 825 | /* |
| 826 | * Clear the failed bit before removing the item from the AIL so |
| 827 | * xfs_trans_ail_delete() doesn't try to clear and release the |
| 828 | * buffer attached to the log item before we are done with it. |
| 829 | */ |
| 830 | clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags); |
Brian Foster | 2b3cf09 | 2020-05-06 13:27:04 -0700 | [diff] [blame] | 831 | xfs_trans_ail_delete(&iip->ili_item, 0); |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 832 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 | /* |
| 834 | * Clear the inode logging fields so no more flushes are |
| 835 | * attempted. |
| 836 | */ |
Dave Chinner | 1319ebe | 2020-06-29 14:48:46 -0700 | [diff] [blame] | 837 | spin_lock(&iip->ili_lock); |
Dave Chinner | 1dfde68 | 2020-06-29 14:48:45 -0700 | [diff] [blame] | 838 | iip->ili_last_fields = 0; |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 839 | iip->ili_fields = 0; |
Dave Chinner | fc0561c | 2015-11-03 13:14:59 +1100 | [diff] [blame] | 840 | iip->ili_fsync_fields = 0; |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 841 | iip->ili_flush_lsn = 0; |
| 842 | bp = iip->ili_item.li_buf; |
| 843 | iip->ili_item.li_buf = NULL; |
Dave Chinner | 48d55e2 | 2020-06-29 14:49:18 -0700 | [diff] [blame] | 844 | list_del_init(&iip->ili_item.li_bio_list); |
Dave Chinner | 1319ebe | 2020-06-29 14:48:46 -0700 | [diff] [blame] | 845 | spin_unlock(&iip->ili_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 846 | } |
Dave Chinner | 718ecc5 | 2020-08-17 16:41:01 -0700 | [diff] [blame] | 847 | xfs_iflags_clear(ip, XFS_IFLUSHING); |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 848 | if (bp) |
| 849 | xfs_buf_rele(bp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 850 | } |
| 851 | |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 852 | /* |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 853 | * convert an xfs_inode_log_format struct from the old 32 bit version |
| 854 | * (which can have different field alignments) to the native 64 bit version |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 855 | */ |
| 856 | int |
| 857 | xfs_inode_item_format_convert( |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 858 | struct xfs_log_iovec *buf, |
| 859 | struct xfs_inode_log_format *in_f) |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 860 | { |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 861 | struct xfs_inode_log_format_32 *in_f32 = buf->i_addr; |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 862 | |
Darrick J. Wong | a5155b8 | 2019-11-02 09:40:53 -0700 | [diff] [blame] | 863 | if (buf->i_len != sizeof(*in_f32)) { |
| 864 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 865 | return -EFSCORRUPTED; |
Darrick J. Wong | a5155b8 | 2019-11-02 09:40:53 -0700 | [diff] [blame] | 866 | } |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 867 | |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 868 | in_f->ilf_type = in_f32->ilf_type; |
| 869 | in_f->ilf_size = in_f32->ilf_size; |
| 870 | in_f->ilf_fields = in_f32->ilf_fields; |
| 871 | in_f->ilf_asize = in_f32->ilf_asize; |
| 872 | in_f->ilf_dsize = in_f32->ilf_dsize; |
| 873 | in_f->ilf_ino = in_f32->ilf_ino; |
Christoph Hellwig | 42b67dc | 2017-10-19 11:07:09 -0700 | [diff] [blame] | 874 | memcpy(&in_f->ilf_u, &in_f32->ilf_u, sizeof(in_f->ilf_u)); |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 875 | in_f->ilf_blkno = in_f32->ilf_blkno; |
| 876 | in_f->ilf_len = in_f32->ilf_len; |
| 877 | in_f->ilf_boffset = in_f32->ilf_boffset; |
| 878 | return 0; |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 879 | } |