Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include "xfs.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 7 | #include "xfs_fs.h" |
Darrick J. Wong | 5467b34 | 2019-06-28 19:25:35 -0700 | [diff] [blame] | 8 | #include "xfs_shared.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 9 | #include "xfs_format.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 10 | #include "xfs_log_format.h" |
| 11 | #include "xfs_trans_resv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include "xfs_mount.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include "xfs_inode.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 14 | #include "xfs_trans.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 15 | #include "xfs_inode_item.h" |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 16 | #include "xfs_trace.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 17 | #include "xfs_trans_priv.h" |
Carlos Maiolino | d3a304b | 2017-08-08 18:21:50 -0700 | [diff] [blame] | 18 | #include "xfs_buf_item.h" |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 19 | #include "xfs_log.h" |
Darrick J. Wong | a5155b8 | 2019-11-02 09:40:53 -0700 | [diff] [blame] | 20 | #include "xfs_error.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
Jeff Layton | f0e2828 | 2017-12-11 06:35:19 -0500 | [diff] [blame] | 22 | #include <linux/iversion.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame^] | 24 | struct kmem_cache *xfs_ili_cache; /* inode log item */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 26 | static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip) |
| 27 | { |
| 28 | return container_of(lip, struct xfs_inode_log_item, ili_item); |
| 29 | } |
| 30 | |
Dave Chinner | 19f4e7c | 2021-06-18 08:21:51 -0700 | [diff] [blame] | 31 | /* |
| 32 | * The logged size of an inode fork is always the current size of the inode |
| 33 | * fork. This means that when an inode fork is relogged, the size of the logged |
| 34 | * region is determined by the current state, not the combination of the |
| 35 | * previously logged state + the current state. This is different relogging |
| 36 | * behaviour to most other log items which will retain the size of the |
| 37 | * previously logged changes when smaller regions are relogged. |
| 38 | * |
| 39 | * Hence operations that remove data from the inode fork (e.g. shortform |
| 40 | * dir/attr remove, extent form extent removal, etc), the size of the relogged |
| 41 | * inode gets -smaller- rather than stays the same size as the previously logged |
| 42 | * size and this can result in the committing transaction reducing the amount of |
| 43 | * space being consumed by the CIL. |
| 44 | */ |
Christoph Hellwig | ce9641d | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 45 | STATIC void |
| 46 | xfs_inode_item_data_fork_size( |
| 47 | struct xfs_inode_log_item *iip, |
| 48 | int *nvecs, |
| 49 | int *nbytes) |
| 50 | { |
| 51 | struct xfs_inode *ip = iip->ili_inode; |
| 52 | |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 53 | switch (ip->i_df.if_format) { |
Christoph Hellwig | ce9641d | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 54 | case XFS_DINODE_FMT_EXTENTS: |
| 55 | if ((iip->ili_fields & XFS_ILOG_DEXT) && |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 56 | ip->i_df.if_nextents > 0 && |
Christoph Hellwig | ce9641d | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 57 | ip->i_df.if_bytes > 0) { |
| 58 | /* worst case, doesn't subtract delalloc extents */ |
| 59 | *nbytes += XFS_IFORK_DSIZE(ip); |
| 60 | *nvecs += 1; |
| 61 | } |
| 62 | break; |
| 63 | case XFS_DINODE_FMT_BTREE: |
| 64 | if ((iip->ili_fields & XFS_ILOG_DBROOT) && |
| 65 | ip->i_df.if_broot_bytes > 0) { |
| 66 | *nbytes += ip->i_df.if_broot_bytes; |
| 67 | *nvecs += 1; |
| 68 | } |
| 69 | break; |
| 70 | case XFS_DINODE_FMT_LOCAL: |
| 71 | if ((iip->ili_fields & XFS_ILOG_DDATA) && |
| 72 | ip->i_df.if_bytes > 0) { |
| 73 | *nbytes += roundup(ip->i_df.if_bytes, 4); |
| 74 | *nvecs += 1; |
| 75 | } |
| 76 | break; |
| 77 | |
| 78 | case XFS_DINODE_FMT_DEV: |
Christoph Hellwig | ce9641d | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 79 | break; |
| 80 | default: |
| 81 | ASSERT(0); |
| 82 | break; |
| 83 | } |
| 84 | } |
| 85 | |
| 86 | STATIC void |
| 87 | xfs_inode_item_attr_fork_size( |
| 88 | struct xfs_inode_log_item *iip, |
| 89 | int *nvecs, |
| 90 | int *nbytes) |
| 91 | { |
| 92 | struct xfs_inode *ip = iip->ili_inode; |
| 93 | |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 94 | switch (ip->i_afp->if_format) { |
Christoph Hellwig | ce9641d | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 95 | case XFS_DINODE_FMT_EXTENTS: |
| 96 | if ((iip->ili_fields & XFS_ILOG_AEXT) && |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 97 | ip->i_afp->if_nextents > 0 && |
Christoph Hellwig | ce9641d | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 98 | ip->i_afp->if_bytes > 0) { |
| 99 | /* worst case, doesn't subtract unused space */ |
| 100 | *nbytes += XFS_IFORK_ASIZE(ip); |
| 101 | *nvecs += 1; |
| 102 | } |
| 103 | break; |
| 104 | case XFS_DINODE_FMT_BTREE: |
| 105 | if ((iip->ili_fields & XFS_ILOG_ABROOT) && |
| 106 | ip->i_afp->if_broot_bytes > 0) { |
| 107 | *nbytes += ip->i_afp->if_broot_bytes; |
| 108 | *nvecs += 1; |
| 109 | } |
| 110 | break; |
| 111 | case XFS_DINODE_FMT_LOCAL: |
| 112 | if ((iip->ili_fields & XFS_ILOG_ADATA) && |
| 113 | ip->i_afp->if_bytes > 0) { |
| 114 | *nbytes += roundup(ip->i_afp->if_bytes, 4); |
| 115 | *nvecs += 1; |
| 116 | } |
| 117 | break; |
| 118 | default: |
| 119 | ASSERT(0); |
| 120 | break; |
| 121 | } |
| 122 | } |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 123 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | /* |
| 125 | * This returns the number of iovecs needed to log the given inode item. |
| 126 | * |
| 127 | * We need one iovec for the inode log format structure, one for the |
| 128 | * inode core, and possibly one for the inode data/extents/b-tree root |
| 129 | * and one for the inode attribute data/extents/b-tree root. |
| 130 | */ |
Dave Chinner | 166d136 | 2013-08-12 20:50:04 +1000 | [diff] [blame] | 131 | STATIC void |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | xfs_inode_item_size( |
Dave Chinner | 166d136 | 2013-08-12 20:50:04 +1000 | [diff] [blame] | 133 | struct xfs_log_item *lip, |
| 134 | int *nvecs, |
| 135 | int *nbytes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | { |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 137 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 138 | struct xfs_inode *ip = iip->ili_inode; |
Dave Chinner | 166d136 | 2013-08-12 20:50:04 +1000 | [diff] [blame] | 139 | |
| 140 | *nvecs += 2; |
| 141 | *nbytes += sizeof(struct xfs_inode_log_format) + |
Christoph Hellwig | e9e2eae | 2020-03-18 08:15:10 -0700 | [diff] [blame] | 142 | xfs_log_dinode_size(ip->i_mount); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | |
Christoph Hellwig | ce9641d | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 144 | xfs_inode_item_data_fork_size(iip, nvecs, nbytes); |
| 145 | if (XFS_IFORK_Q(ip)) |
| 146 | xfs_inode_item_attr_fork_size(iip, nvecs, nbytes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | } |
| 148 | |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 149 | STATIC void |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 150 | xfs_inode_item_format_data_fork( |
| 151 | struct xfs_inode_log_item *iip, |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 152 | struct xfs_inode_log_format *ilf, |
| 153 | struct xfs_log_vec *lv, |
| 154 | struct xfs_log_iovec **vecp) |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 155 | { |
| 156 | struct xfs_inode *ip = iip->ili_inode; |
| 157 | size_t data_bytes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 159 | switch (ip->i_df.if_format) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | case XFS_DINODE_FMT_EXTENTS: |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 161 | iip->ili_fields &= |
Christoph Hellwig | 42b67dc | 2017-10-19 11:07:09 -0700 | [diff] [blame] | 162 | ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEV); |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 163 | |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 164 | if ((iip->ili_fields & XFS_ILOG_DEXT) && |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 165 | ip->i_df.if_nextents > 0 && |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 166 | ip->i_df.if_bytes > 0) { |
Christoph Hellwig | da77650 | 2013-12-13 11:34:04 +1100 | [diff] [blame] | 167 | struct xfs_bmbt_rec *p; |
| 168 | |
Eric Sandeen | 5d82930 | 2016-11-08 12:59:42 +1100 | [diff] [blame] | 169 | ASSERT(xfs_iext_count(&ip->i_df) > 0); |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 170 | |
Christoph Hellwig | da77650 | 2013-12-13 11:34:04 +1100 | [diff] [blame] | 171 | p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IEXT); |
| 172 | data_bytes = xfs_iextents_copy(ip, p, XFS_DATA_FORK); |
| 173 | xlog_finish_iovec(lv, *vecp, data_bytes); |
| 174 | |
| 175 | ASSERT(data_bytes <= ip->i_df.if_bytes); |
| 176 | |
| 177 | ilf->ilf_dsize = data_bytes; |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 178 | ilf->ilf_size++; |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 179 | } else { |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 180 | iip->ili_fields &= ~XFS_ILOG_DEXT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | } |
| 182 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | case XFS_DINODE_FMT_BTREE: |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 184 | iip->ili_fields &= |
Christoph Hellwig | 42b67dc | 2017-10-19 11:07:09 -0700 | [diff] [blame] | 185 | ~(XFS_ILOG_DDATA | XFS_ILOG_DEXT | XFS_ILOG_DEV); |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 186 | |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 187 | if ((iip->ili_fields & XFS_ILOG_DBROOT) && |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 188 | ip->i_df.if_broot_bytes > 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | ASSERT(ip->i_df.if_broot != NULL); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 190 | xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IBROOT, |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 191 | ip->i_df.if_broot, |
| 192 | ip->i_df.if_broot_bytes); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 193 | ilf->ilf_dsize = ip->i_df.if_broot_bytes; |
| 194 | ilf->ilf_size++; |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 195 | } else { |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 196 | ASSERT(!(iip->ili_fields & |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 197 | XFS_ILOG_DBROOT)); |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 198 | iip->ili_fields &= ~XFS_ILOG_DBROOT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | } |
| 200 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | case XFS_DINODE_FMT_LOCAL: |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 202 | iip->ili_fields &= |
Christoph Hellwig | 42b67dc | 2017-10-19 11:07:09 -0700 | [diff] [blame] | 203 | ~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT | XFS_ILOG_DEV); |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 204 | if ((iip->ili_fields & XFS_ILOG_DDATA) && |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 205 | ip->i_df.if_bytes > 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | /* |
| 207 | * Round i_bytes up to a word boundary. |
Randy Dunlap | b63da6c | 2020-08-05 08:49:58 -0700 | [diff] [blame] | 208 | * The underlying memory is guaranteed |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | * to be there by xfs_idata_realloc(). |
| 210 | */ |
| 211 | data_bytes = roundup(ip->i_df.if_bytes, 4); |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 212 | ASSERT(ip->i_df.if_u1.if_data != NULL); |
Christoph Hellwig | 13d2c10 | 2021-03-29 11:11:40 -0700 | [diff] [blame] | 213 | ASSERT(ip->i_disk_size > 0); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 214 | xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_ILOCAL, |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 215 | ip->i_df.if_u1.if_data, data_bytes); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 216 | ilf->ilf_dsize = (unsigned)data_bytes; |
| 217 | ilf->ilf_size++; |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 218 | } else { |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 219 | iip->ili_fields &= ~XFS_ILOG_DDATA; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | } |
| 221 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | case XFS_DINODE_FMT_DEV: |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 223 | iip->ili_fields &= |
Christoph Hellwig | 42b67dc | 2017-10-19 11:07:09 -0700 | [diff] [blame] | 224 | ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEXT); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 225 | if (iip->ili_fields & XFS_ILOG_DEV) |
Christoph Hellwig | 66f3646 | 2017-10-19 11:07:09 -0700 | [diff] [blame] | 226 | ilf->ilf_u.ilfu_rdev = sysv_encode_dev(VFS_I(ip)->i_rdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | default: |
| 229 | ASSERT(0); |
| 230 | break; |
| 231 | } |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 232 | } |
| 233 | |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 234 | STATIC void |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 235 | xfs_inode_item_format_attr_fork( |
| 236 | struct xfs_inode_log_item *iip, |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 237 | struct xfs_inode_log_format *ilf, |
| 238 | struct xfs_log_vec *lv, |
| 239 | struct xfs_log_iovec **vecp) |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 240 | { |
| 241 | struct xfs_inode *ip = iip->ili_inode; |
| 242 | size_t data_bytes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 244 | switch (ip->i_afp->if_format) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | case XFS_DINODE_FMT_EXTENTS: |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 246 | iip->ili_fields &= |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 247 | ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT); |
| 248 | |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 249 | if ((iip->ili_fields & XFS_ILOG_AEXT) && |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 250 | ip->i_afp->if_nextents > 0 && |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 251 | ip->i_afp->if_bytes > 0) { |
Christoph Hellwig | da77650 | 2013-12-13 11:34:04 +1100 | [diff] [blame] | 252 | struct xfs_bmbt_rec *p; |
| 253 | |
Eric Sandeen | 5d82930 | 2016-11-08 12:59:42 +1100 | [diff] [blame] | 254 | ASSERT(xfs_iext_count(ip->i_afp) == |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 255 | ip->i_afp->if_nextents); |
Christoph Hellwig | da77650 | 2013-12-13 11:34:04 +1100 | [diff] [blame] | 256 | |
| 257 | p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_EXT); |
| 258 | data_bytes = xfs_iextents_copy(ip, p, XFS_ATTR_FORK); |
| 259 | xlog_finish_iovec(lv, *vecp, data_bytes); |
| 260 | |
| 261 | ilf->ilf_asize = data_bytes; |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 262 | ilf->ilf_size++; |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 263 | } else { |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 264 | iip->ili_fields &= ~XFS_ILOG_AEXT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | } |
| 266 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | case XFS_DINODE_FMT_BTREE: |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 268 | iip->ili_fields &= |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 269 | ~(XFS_ILOG_ADATA | XFS_ILOG_AEXT); |
| 270 | |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 271 | if ((iip->ili_fields & XFS_ILOG_ABROOT) && |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 272 | ip->i_afp->if_broot_bytes > 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | ASSERT(ip->i_afp->if_broot != NULL); |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 274 | |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 275 | xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_BROOT, |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 276 | ip->i_afp->if_broot, |
| 277 | ip->i_afp->if_broot_bytes); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 278 | ilf->ilf_asize = ip->i_afp->if_broot_bytes; |
| 279 | ilf->ilf_size++; |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 280 | } else { |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 281 | iip->ili_fields &= ~XFS_ILOG_ABROOT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | } |
| 283 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | case XFS_DINODE_FMT_LOCAL: |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 285 | iip->ili_fields &= |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 286 | ~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT); |
| 287 | |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 288 | if ((iip->ili_fields & XFS_ILOG_ADATA) && |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 289 | ip->i_afp->if_bytes > 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | /* |
| 291 | * Round i_bytes up to a word boundary. |
Randy Dunlap | b63da6c | 2020-08-05 08:49:58 -0700 | [diff] [blame] | 292 | * The underlying memory is guaranteed |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | * to be there by xfs_idata_realloc(). |
| 294 | */ |
| 295 | data_bytes = roundup(ip->i_afp->if_bytes, 4); |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 296 | ASSERT(ip->i_afp->if_u1.if_data != NULL); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 297 | xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_LOCAL, |
Christoph Hellwig | 1234351 | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 298 | ip->i_afp->if_u1.if_data, |
| 299 | data_bytes); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 300 | ilf->ilf_asize = (unsigned)data_bytes; |
| 301 | ilf->ilf_size++; |
Christoph Hellwig | 339a5f5 | 2012-02-29 09:53:53 +0000 | [diff] [blame] | 302 | } else { |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 303 | iip->ili_fields &= ~XFS_ILOG_ADATA; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | } |
| 305 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | default: |
| 307 | ASSERT(0); |
| 308 | break; |
| 309 | } |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 310 | } |
| 311 | |
Darrick J. Wong | 30e0559 | 2020-08-24 16:01:34 -0700 | [diff] [blame] | 312 | /* |
| 313 | * Convert an incore timestamp to a log timestamp. Note that the log format |
| 314 | * specifies host endian format! |
| 315 | */ |
Christoph Hellwig | 6fc277c | 2021-04-21 13:48:27 -0700 | [diff] [blame] | 316 | static inline xfs_log_timestamp_t |
Darrick J. Wong | 30e0559 | 2020-08-24 16:01:34 -0700 | [diff] [blame] | 317 | xfs_inode_to_log_dinode_ts( |
Darrick J. Wong | f93e5436 | 2020-08-17 09:59:07 -0700 | [diff] [blame] | 318 | struct xfs_inode *ip, |
Darrick J. Wong | 30e0559 | 2020-08-24 16:01:34 -0700 | [diff] [blame] | 319 | const struct timespec64 tv) |
| 320 | { |
Christoph Hellwig | 732de7d | 2021-04-21 13:48:27 -0700 | [diff] [blame] | 321 | struct xfs_log_legacy_timestamp *lits; |
Christoph Hellwig | 6fc277c | 2021-04-21 13:48:27 -0700 | [diff] [blame] | 322 | xfs_log_timestamp_t its; |
Darrick J. Wong | 30e0559 | 2020-08-24 16:01:34 -0700 | [diff] [blame] | 323 | |
Darrick J. Wong | f93e5436 | 2020-08-17 09:59:07 -0700 | [diff] [blame] | 324 | if (xfs_inode_has_bigtime(ip)) |
| 325 | return xfs_inode_encode_bigtime(tv); |
| 326 | |
Christoph Hellwig | 732de7d | 2021-04-21 13:48:27 -0700 | [diff] [blame] | 327 | lits = (struct xfs_log_legacy_timestamp *)&its; |
Darrick J. Wong | 30e0559 | 2020-08-24 16:01:34 -0700 | [diff] [blame] | 328 | lits->t_sec = tv.tv_sec; |
| 329 | lits->t_nsec = tv.tv_nsec; |
| 330 | |
| 331 | return its; |
| 332 | } |
| 333 | |
Christoph Hellwig | 9b3beb0 | 2021-03-29 11:11:38 -0700 | [diff] [blame] | 334 | /* |
| 335 | * The legacy DMAPI fields are only present in the on-disk and in-log inodes, |
| 336 | * but not in the in-memory one. But we are guaranteed to have an inode buffer |
| 337 | * in memory when logging an inode, so we can just copy it from the on-disk |
| 338 | * inode to the in-log inode here so that recovery of file system with these |
| 339 | * fields set to non-zero values doesn't lose them. For all other cases we zero |
| 340 | * the fields. |
| 341 | */ |
| 342 | static void |
| 343 | xfs_copy_dm_fields_to_log_dinode( |
| 344 | struct xfs_inode *ip, |
| 345 | struct xfs_log_dinode *to) |
| 346 | { |
| 347 | struct xfs_dinode *dip; |
| 348 | |
| 349 | dip = xfs_buf_offset(ip->i_itemp->ili_item.li_buf, |
| 350 | ip->i_imap.im_boffset); |
| 351 | |
| 352 | if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS)) { |
| 353 | to->di_dmevmask = be32_to_cpu(dip->di_dmevmask); |
| 354 | to->di_dmstate = be16_to_cpu(dip->di_dmstate); |
| 355 | } else { |
| 356 | to->di_dmevmask = 0; |
| 357 | to->di_dmstate = 0; |
| 358 | } |
| 359 | } |
| 360 | |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 361 | static void |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 362 | xfs_inode_to_log_dinode( |
| 363 | struct xfs_inode *ip, |
Dave Chinner | 93f958f | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 364 | struct xfs_log_dinode *to, |
| 365 | xfs_lsn_t lsn) |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 366 | { |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 367 | struct inode *inode = VFS_I(ip); |
| 368 | |
Dave Chinner | 93f958f | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 369 | to->di_magic = XFS_DINODE_MAGIC; |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 370 | to->di_format = xfs_ifork_format(&ip->i_df); |
Christoph Hellwig | ba8adad | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 371 | to->di_uid = i_uid_read(inode); |
| 372 | to->di_gid = i_gid_read(inode); |
Christoph Hellwig | ceaf603 | 2021-03-29 11:11:39 -0700 | [diff] [blame] | 373 | to->di_projid_lo = ip->i_projid & 0xffff; |
| 374 | to->di_projid_hi = ip->i_projid >> 16; |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 375 | |
Dave Chinner | 93f958f | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 376 | memset(to->di_pad, 0, sizeof(to->di_pad)); |
Dave Chinner | faeb4e4 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 377 | memset(to->di_pad3, 0, sizeof(to->di_pad3)); |
Darrick J. Wong | f93e5436 | 2020-08-17 09:59:07 -0700 | [diff] [blame] | 378 | to->di_atime = xfs_inode_to_log_dinode_ts(ip, inode->i_atime); |
| 379 | to->di_mtime = xfs_inode_to_log_dinode_ts(ip, inode->i_mtime); |
| 380 | to->di_ctime = xfs_inode_to_log_dinode_ts(ip, inode->i_ctime); |
Dave Chinner | 54d7b5c | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 381 | to->di_nlink = inode->i_nlink; |
Dave Chinner | 9e9a267 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 382 | to->di_gen = inode->i_generation; |
Dave Chinner | c19b3b05 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 383 | to->di_mode = inode->i_mode; |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 384 | |
Christoph Hellwig | 13d2c10 | 2021-03-29 11:11:40 -0700 | [diff] [blame] | 385 | to->di_size = ip->i_disk_size; |
Christoph Hellwig | 6e73a54 | 2021-03-29 11:11:40 -0700 | [diff] [blame] | 386 | to->di_nblocks = ip->i_nblocks; |
Christoph Hellwig | 031474c | 2021-03-29 11:11:41 -0700 | [diff] [blame] | 387 | to->di_extsize = ip->i_extsize; |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 388 | to->di_nextents = xfs_ifork_nextents(&ip->i_df); |
| 389 | to->di_anextents = xfs_ifork_nextents(ip->i_afp); |
Christoph Hellwig | 7821ea3 | 2021-03-29 11:11:44 -0700 | [diff] [blame] | 390 | to->di_forkoff = ip->i_forkoff; |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 391 | to->di_aformat = xfs_ifork_format(ip->i_afp); |
Christoph Hellwig | db07349 | 2021-03-29 11:11:44 -0700 | [diff] [blame] | 392 | to->di_flags = ip->i_diflags; |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 393 | |
Christoph Hellwig | 9b3beb0 | 2021-03-29 11:11:38 -0700 | [diff] [blame] | 394 | xfs_copy_dm_fields_to_log_dinode(ip, to); |
| 395 | |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 396 | /* log a dummy value to ensure log structure is fully initialised */ |
| 397 | to->di_next_unlinked = NULLAGINO; |
| 398 | |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 399 | if (xfs_has_v3inodes(ip->i_mount)) { |
Christoph Hellwig | 6471e9c | 2020-03-18 08:15:11 -0700 | [diff] [blame] | 400 | to->di_version = 3; |
Jeff Layton | f0e2828 | 2017-12-11 06:35:19 -0500 | [diff] [blame] | 401 | to->di_changecount = inode_peek_iversion(inode); |
Christoph Hellwig | e98d5e8 | 2021-03-29 11:11:45 -0700 | [diff] [blame] | 402 | to->di_crtime = xfs_inode_to_log_dinode_ts(ip, ip->i_crtime); |
Christoph Hellwig | 3e09ab8 | 2021-03-29 11:11:45 -0700 | [diff] [blame] | 403 | to->di_flags2 = ip->i_diflags2; |
Christoph Hellwig | b33ce57 | 2021-03-29 11:11:42 -0700 | [diff] [blame] | 404 | to->di_cowextsize = ip->i_cowextsize; |
Dave Chinner | 93f958f | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 405 | to->di_ino = ip->i_ino; |
| 406 | to->di_lsn = lsn; |
| 407 | memset(to->di_pad2, 0, sizeof(to->di_pad2)); |
| 408 | uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid); |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 409 | to->di_flushiter = 0; |
| 410 | } else { |
Christoph Hellwig | 6471e9c | 2020-03-18 08:15:11 -0700 | [diff] [blame] | 411 | to->di_version = 2; |
Christoph Hellwig | 965e0a1 | 2021-03-29 11:11:42 -0700 | [diff] [blame] | 412 | to->di_flushiter = ip->i_flushiter; |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 413 | } |
| 414 | } |
| 415 | |
| 416 | /* |
| 417 | * Format the inode core. Current timestamp data is only in the VFS inode |
| 418 | * fields, so we need to grab them from there. Hence rather than just copying |
| 419 | * the XFS inode core structure, format the fields directly into the iovec. |
| 420 | */ |
| 421 | static void |
| 422 | xfs_inode_item_format_core( |
| 423 | struct xfs_inode *ip, |
| 424 | struct xfs_log_vec *lv, |
| 425 | struct xfs_log_iovec **vecp) |
| 426 | { |
| 427 | struct xfs_log_dinode *dic; |
| 428 | |
| 429 | dic = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_ICORE); |
Dave Chinner | 93f958f | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 430 | xfs_inode_to_log_dinode(ip, dic, ip->i_itemp->ili_item.li_lsn); |
Christoph Hellwig | e9e2eae | 2020-03-18 08:15:10 -0700 | [diff] [blame] | 431 | xlog_finish_iovec(lv, *vecp, xfs_log_dinode_size(ip->i_mount)); |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 432 | } |
| 433 | |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 434 | /* |
| 435 | * This is called to fill in the vector of log iovecs for the given inode |
| 436 | * log item. It fills the first item with an inode log format structure, |
| 437 | * the second with the on-disk inode structure, and a possible third and/or |
| 438 | * fourth with the inode data/extents/b-tree root and inode attributes |
| 439 | * data/extents/b-tree root. |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 440 | * |
| 441 | * Note: Always use the 64 bit inode log format structure so we don't |
| 442 | * leave an uninitialised hole in the format item on 64 bit systems. Log |
| 443 | * recovery on 32 bit systems handles this just fine, so there's no reason |
| 444 | * for not using an initialising the properly padded structure all the time. |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 445 | */ |
| 446 | STATIC void |
| 447 | xfs_inode_item_format( |
| 448 | struct xfs_log_item *lip, |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 449 | struct xfs_log_vec *lv) |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 450 | { |
| 451 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 452 | struct xfs_inode *ip = iip->ili_inode; |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 453 | struct xfs_log_iovec *vecp = NULL; |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 454 | struct xfs_inode_log_format *ilf; |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 455 | |
Christoph Hellwig | 2f25129 | 2013-12-13 11:34:05 +1100 | [diff] [blame] | 456 | ilf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_IFORMAT); |
| 457 | ilf->ilf_type = XFS_LI_INODE; |
| 458 | ilf->ilf_ino = ip->i_ino; |
| 459 | ilf->ilf_blkno = ip->i_imap.im_blkno; |
| 460 | ilf->ilf_len = ip->i_imap.im_len; |
| 461 | ilf->ilf_boffset = ip->i_imap.im_boffset; |
| 462 | ilf->ilf_fields = XFS_ILOG_CORE; |
| 463 | ilf->ilf_size = 2; /* format + core */ |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 464 | |
| 465 | /* |
| 466 | * make sure we don't leak uninitialised data into the log in the case |
| 467 | * when we don't log every field in the inode. |
| 468 | */ |
| 469 | ilf->ilf_dsize = 0; |
| 470 | ilf->ilf_asize = 0; |
| 471 | ilf->ilf_pad = 0; |
Christoph Hellwig | 42b67dc | 2017-10-19 11:07:09 -0700 | [diff] [blame] | 472 | memset(&ilf->ilf_u, 0, sizeof(ilf->ilf_u)); |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 473 | |
| 474 | xlog_finish_iovec(lv, vecp, sizeof(*ilf)); |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 475 | |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 476 | xfs_inode_item_format_core(ip, lv, &vecp); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 477 | xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp); |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 478 | if (XFS_IFORK_Q(ip)) { |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 479 | xfs_inode_item_format_attr_fork(iip, ilf, lv, &vecp); |
Christoph Hellwig | 3de559fb | 2013-12-13 11:00:43 +1100 | [diff] [blame] | 480 | } else { |
| 481 | iip->ili_fields &= |
| 482 | ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT); |
| 483 | } |
| 484 | |
Christoph Hellwig | 2f25129 | 2013-12-13 11:34:05 +1100 | [diff] [blame] | 485 | /* update the format with the exact fields we actually logged */ |
| 486 | ilf->ilf_fields |= (iip->ili_fields & ~XFS_ILOG_TIMESTAMP); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | } |
| 488 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | /* |
| 490 | * This is called to pin the inode associated with the inode log |
Christoph Hellwig | a14a5ab | 2010-02-18 12:43:22 +0000 | [diff] [blame] | 491 | * item in memory so it cannot be written out. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | */ |
| 493 | STATIC void |
| 494 | xfs_inode_item_pin( |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 495 | struct xfs_log_item *lip) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | { |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 497 | struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode; |
Christoph Hellwig | a14a5ab | 2010-02-18 12:43:22 +0000 | [diff] [blame] | 498 | |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 499 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 500 | ASSERT(lip->li_buf); |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 501 | |
| 502 | trace_xfs_inode_pin(ip, _RET_IP_); |
| 503 | atomic_inc(&ip->i_pincount); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | } |
| 505 | |
| 506 | |
| 507 | /* |
| 508 | * This is called to unpin the inode associated with the inode log |
| 509 | * item which was previously pinned with a call to xfs_inode_item_pin(). |
Christoph Hellwig | a14a5ab | 2010-02-18 12:43:22 +0000 | [diff] [blame] | 510 | * |
| 511 | * Also wake up anyone in xfs_iunpin_wait() if the count goes to 0. |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 512 | * |
| 513 | * Note that unpin can race with inode cluster buffer freeing marking the buffer |
| 514 | * stale. In that case, flush completions are run from the buffer unpin call, |
| 515 | * which may happen before the inode is unpinned. If we lose the race, there |
| 516 | * will be no buffer attached to the log item, but the inode will be marked |
| 517 | * XFS_ISTALE. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | STATIC void |
| 520 | xfs_inode_item_unpin( |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 521 | struct xfs_log_item *lip, |
Christoph Hellwig | 9412e31 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 522 | int remove) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | { |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 524 | struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode; |
Christoph Hellwig | a14a5ab | 2010-02-18 12:43:22 +0000 | [diff] [blame] | 525 | |
Dave Chinner | 4aaf15d | 2010-03-08 11:24:07 +1100 | [diff] [blame] | 526 | trace_xfs_inode_unpin(ip, _RET_IP_); |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 527 | ASSERT(lip->li_buf || xfs_iflags_test(ip, XFS_ISTALE)); |
Christoph Hellwig | a14a5ab | 2010-02-18 12:43:22 +0000 | [diff] [blame] | 528 | ASSERT(atomic_read(&ip->i_pincount) > 0); |
| 529 | if (atomic_dec_and_test(&ip->i_pincount)) |
Christoph Hellwig | f392e63 | 2011-12-18 20:00:10 +0000 | [diff] [blame] | 530 | wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | } |
| 532 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 | STATIC uint |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 534 | xfs_inode_item_push( |
| 535 | struct xfs_log_item *lip, |
| 536 | struct list_head *buffer_list) |
Matthew Wilcox | 57e8095 | 2018-03-07 14:59:39 -0800 | [diff] [blame] | 537 | __releases(&lip->li_ailp->ail_lock) |
| 538 | __acquires(&lip->li_ailp->ail_lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | { |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 540 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 541 | struct xfs_inode *ip = iip->ili_inode; |
Carlos Maiolino | d3a304b | 2017-08-08 18:21:50 -0700 | [diff] [blame] | 542 | struct xfs_buf *bp = lip->li_buf; |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 543 | uint rval = XFS_ITEM_SUCCESS; |
| 544 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | |
Dave Chinner | 90c60e1 | 2020-06-29 14:49:19 -0700 | [diff] [blame] | 546 | ASSERT(iip->ili_item.li_buf); |
| 547 | |
| 548 | if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp) || |
| 549 | (ip->i_flags & XFS_ISTALE)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | return XFS_ITEM_PINNED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | |
Dave Chinner | 718ecc5 | 2020-08-17 16:41:01 -0700 | [diff] [blame] | 552 | if (xfs_iflags_test(ip, XFS_IFLUSHING)) |
Dave Chinner | 90c60e1 | 2020-06-29 14:49:19 -0700 | [diff] [blame] | 553 | return XFS_ITEM_FLUSHING; |
| 554 | |
| 555 | if (!xfs_buf_trylock(bp)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 556 | return XFS_ITEM_LOCKED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | |
Matthew Wilcox | 57e8095 | 2018-03-07 14:59:39 -0800 | [diff] [blame] | 558 | spin_unlock(&lip->li_ailp->ail_lock); |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 559 | |
Dave Chinner | 90c60e1 | 2020-06-29 14:49:19 -0700 | [diff] [blame] | 560 | /* |
| 561 | * We need to hold a reference for flushing the cluster buffer as it may |
| 562 | * fail the buffer without IO submission. In which case, we better get a |
| 563 | * reference for that completion because otherwise we don't get a |
| 564 | * reference for IO until we queue the buffer for delwri submission. |
| 565 | */ |
| 566 | xfs_buf_hold(bp); |
Dave Chinner | 5717ea4 | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 567 | error = xfs_iflush_cluster(bp); |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 568 | if (!error) { |
| 569 | if (!xfs_buf_delwri_queue(bp, buffer_list)) |
| 570 | rval = XFS_ITEM_FLUSHING; |
| 571 | xfs_buf_relse(bp); |
Dave Chinner | 90c60e1 | 2020-06-29 14:49:19 -0700 | [diff] [blame] | 572 | } else { |
Dave Chinner | 5717ea4 | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 573 | /* |
| 574 | * Release the buffer if we were unable to flush anything. On |
| 575 | * any other error, the buffer has already been released. |
| 576 | */ |
| 577 | if (error == -EAGAIN) |
| 578 | xfs_buf_relse(bp); |
Brian Foster | d4bc4c5 | 2020-03-27 08:29:55 -0700 | [diff] [blame] | 579 | rval = XFS_ITEM_LOCKED; |
Dave Chinner | 90c60e1 | 2020-06-29 14:49:19 -0700 | [diff] [blame] | 580 | } |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 581 | |
Matthew Wilcox | 57e8095 | 2018-03-07 14:59:39 -0800 | [diff] [blame] | 582 | spin_lock(&lip->li_ailp->ail_lock); |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 583 | return rval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | } |
| 585 | |
| 586 | /* |
| 587 | * Unlock the inode associated with the inode log item. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 588 | */ |
| 589 | STATIC void |
Christoph Hellwig | ddf9205 | 2019-06-28 19:27:32 -0700 | [diff] [blame] | 590 | xfs_inode_item_release( |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 591 | struct xfs_log_item *lip) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 592 | { |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 593 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 594 | struct xfs_inode *ip = iip->ili_inode; |
Christoph Hellwig | 898621d | 2010-06-24 11:36:58 +1000 | [diff] [blame] | 595 | unsigned short lock_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | |
Christoph Hellwig | f3ca8738 | 2011-07-08 14:34:47 +0200 | [diff] [blame] | 597 | ASSERT(ip->i_itemp != NULL); |
| 598 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 599 | |
Christoph Hellwig | 898621d | 2010-06-24 11:36:58 +1000 | [diff] [blame] | 600 | lock_flags = iip->ili_lock_flags; |
| 601 | iip->ili_lock_flags = 0; |
Christoph Hellwig | ddc3415 | 2011-09-19 15:00:54 +0000 | [diff] [blame] | 602 | if (lock_flags) |
Christoph Hellwig | f3ca8738 | 2011-07-08 14:34:47 +0200 | [diff] [blame] | 603 | xfs_iunlock(ip, lock_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | } |
| 605 | |
| 606 | /* |
Dave Chinner | de25c18 | 2010-11-30 15:15:46 +1100 | [diff] [blame] | 607 | * This is called to find out where the oldest active copy of the inode log |
| 608 | * item in the on disk log resides now that the last log write of it completed |
| 609 | * at the given lsn. Since we always re-log all dirty data in an inode, the |
| 610 | * latest copy in the on disk log is the only one that matters. Therefore, |
| 611 | * simply return the given lsn. |
| 612 | * |
| 613 | * If the inode has been marked stale because the cluster is being freed, we |
| 614 | * don't want to (re-)insert this inode into the AIL. There is a race condition |
| 615 | * where the cluster buffer may be unpinned before the inode is inserted into |
| 616 | * the AIL during transaction committed processing. If the buffer is unpinned |
| 617 | * before the inode item has been committed and inserted, then it is possible |
Dave Chinner | 1316d4d | 2011-07-04 05:27:36 +0000 | [diff] [blame] | 618 | * for the buffer to be written and IO completes before the inode is inserted |
Dave Chinner | de25c18 | 2010-11-30 15:15:46 +1100 | [diff] [blame] | 619 | * into the AIL. In that case, we'd be inserting a clean, stale inode into the |
| 620 | * AIL which will never get removed. It will, however, get reclaimed which |
| 621 | * triggers an assert in xfs_inode_free() complaining about freein an inode |
| 622 | * still in the AIL. |
| 623 | * |
Dave Chinner | 1316d4d | 2011-07-04 05:27:36 +0000 | [diff] [blame] | 624 | * To avoid this, just unpin the inode directly and return a LSN of -1 so the |
| 625 | * transaction committed code knows that it does not need to do any further |
| 626 | * processing on the item. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | STATIC xfs_lsn_t |
| 629 | xfs_inode_item_committed( |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 630 | struct xfs_log_item *lip, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | xfs_lsn_t lsn) |
| 632 | { |
Dave Chinner | de25c18 | 2010-11-30 15:15:46 +1100 | [diff] [blame] | 633 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 634 | struct xfs_inode *ip = iip->ili_inode; |
| 635 | |
Dave Chinner | 1316d4d | 2011-07-04 05:27:36 +0000 | [diff] [blame] | 636 | if (xfs_iflags_test(ip, XFS_ISTALE)) { |
| 637 | xfs_inode_item_unpin(lip, 0); |
| 638 | return -1; |
| 639 | } |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 640 | return lsn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | } |
| 642 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | STATIC void |
| 644 | xfs_inode_item_committing( |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 645 | struct xfs_log_item *lip, |
Dave Chinner | 5f9b4b0 | 2021-06-18 08:21:52 -0700 | [diff] [blame] | 646 | xfs_csn_t seq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | { |
Dave Chinner | 5f9b4b0 | 2021-06-18 08:21:52 -0700 | [diff] [blame] | 648 | INODE_ITEM(lip)->ili_commit_seq = seq; |
Christoph Hellwig | ddf9205 | 2019-06-28 19:27:32 -0700 | [diff] [blame] | 649 | return xfs_inode_item_release(lip); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | } |
| 651 | |
Christoph Hellwig | 272e42b | 2011-10-28 09:54:24 +0000 | [diff] [blame] | 652 | static const struct xfs_item_ops xfs_inode_item_ops = { |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 653 | .iop_size = xfs_inode_item_size, |
| 654 | .iop_format = xfs_inode_item_format, |
| 655 | .iop_pin = xfs_inode_item_pin, |
| 656 | .iop_unpin = xfs_inode_item_unpin, |
Christoph Hellwig | ddf9205 | 2019-06-28 19:27:32 -0700 | [diff] [blame] | 657 | .iop_release = xfs_inode_item_release, |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 658 | .iop_committed = xfs_inode_item_committed, |
| 659 | .iop_push = xfs_inode_item_push, |
Christoph Hellwig | ddf9205 | 2019-06-28 19:27:32 -0700 | [diff] [blame] | 660 | .iop_committing = xfs_inode_item_committing, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | }; |
| 662 | |
| 663 | |
| 664 | /* |
| 665 | * Initialize the inode log item for a newly allocated (in-core) inode. |
| 666 | */ |
| 667 | void |
| 668 | xfs_inode_item_init( |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 669 | struct xfs_inode *ip, |
| 670 | struct xfs_mount *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | { |
Christoph Hellwig | 7bfa31d | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 672 | struct xfs_inode_log_item *iip; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | |
| 674 | ASSERT(ip->i_itemp == NULL); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame^] | 675 | iip = ip->i_itemp = kmem_cache_zalloc(xfs_ili_cache, |
Carlos Maiolino | 32a2b11 | 2020-07-22 09:23:10 -0700 | [diff] [blame] | 676 | GFP_KERNEL | __GFP_NOFAIL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 | iip->ili_inode = ip; |
Dave Chinner | 1319ebe | 2020-06-29 14:48:46 -0700 | [diff] [blame] | 679 | spin_lock_init(&iip->ili_lock); |
Dave Chinner | 43f5efc | 2010-03-23 10:10:00 +1100 | [diff] [blame] | 680 | xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE, |
| 681 | &xfs_inode_item_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 682 | } |
| 683 | |
| 684 | /* |
| 685 | * Free the inode log item and any memory hanging off of it. |
| 686 | */ |
| 687 | void |
| 688 | xfs_inode_item_destroy( |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 689 | struct xfs_inode *ip) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 | { |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 691 | struct xfs_inode_log_item *iip = ip->i_itemp; |
| 692 | |
| 693 | ASSERT(iip->ili_item.li_buf == NULL); |
| 694 | |
| 695 | ip->i_itemp = NULL; |
| 696 | kmem_free(iip->ili_item.li_lv_shadow); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame^] | 697 | kmem_cache_free(xfs_ili_cache, iip); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | } |
| 699 | |
| 700 | |
| 701 | /* |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 702 | * We only want to pull the item from the AIL if it is actually there |
| 703 | * and its location in the log has not changed since we started the |
| 704 | * flush. Thus, we only bother if the inode's lsn has not changed. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 705 | */ |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 706 | static void |
| 707 | xfs_iflush_ail_updates( |
| 708 | struct xfs_ail *ailp, |
| 709 | struct list_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 | { |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 711 | struct xfs_log_item *lip; |
| 712 | xfs_lsn_t tail_lsn = 0; |
| 713 | |
| 714 | /* this is an opencoded batch version of xfs_trans_ail_delete */ |
| 715 | spin_lock(&ailp->ail_lock); |
| 716 | list_for_each_entry(lip, list, li_bio_list) { |
| 717 | xfs_lsn_t lsn; |
| 718 | |
| 719 | clear_bit(XFS_LI_FAILED, &lip->li_flags); |
| 720 | if (INODE_ITEM(lip)->ili_flush_lsn != lip->li_lsn) |
| 721 | continue; |
| 722 | |
| 723 | lsn = xfs_ail_delete_one(ailp, lip); |
| 724 | if (!tail_lsn && lsn) |
| 725 | tail_lsn = lsn; |
| 726 | } |
| 727 | xfs_ail_update_finish(ailp, tail_lsn); |
| 728 | } |
| 729 | |
| 730 | /* |
| 731 | * Walk the list of inodes that have completed their IOs. If they are clean |
| 732 | * remove them from the list and dissociate them from the buffer. Buffers that |
| 733 | * are still dirty remain linked to the buffer and on the list. Caller must |
| 734 | * handle them appropriately. |
| 735 | */ |
| 736 | static void |
| 737 | xfs_iflush_finish( |
| 738 | struct xfs_buf *bp, |
| 739 | struct list_head *list) |
| 740 | { |
Dave Chinner | aac855a | 2020-06-29 14:48:48 -0700 | [diff] [blame] | 741 | struct xfs_log_item *lip, *n; |
Dave Chinner | 3013683 | 2010-12-20 12:03:17 +1100 | [diff] [blame] | 742 | |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 743 | list_for_each_entry_safe(lip, n, list, li_bio_list) { |
| 744 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 745 | bool drop_buffer = false; |
| 746 | |
Dave Chinner | 1319ebe | 2020-06-29 14:48:46 -0700 | [diff] [blame] | 747 | spin_lock(&iip->ili_lock); |
Dave Chinner | 1319ebe | 2020-06-29 14:48:46 -0700 | [diff] [blame] | 748 | |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 749 | /* |
| 750 | * Remove the reference to the cluster buffer if the inode is |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 751 | * clean in memory and drop the buffer reference once we've |
| 752 | * dropped the locks we hold. |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 753 | */ |
| 754 | ASSERT(iip->ili_item.li_buf == bp); |
| 755 | if (!iip->ili_fields) { |
| 756 | iip->ili_item.li_buf = NULL; |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 757 | list_del_init(&lip->li_bio_list); |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 758 | drop_buffer = true; |
| 759 | } |
| 760 | iip->ili_last_fields = 0; |
| 761 | iip->ili_flush_lsn = 0; |
| 762 | spin_unlock(&iip->ili_lock); |
Dave Chinner | 718ecc5 | 2020-08-17 16:41:01 -0700 | [diff] [blame] | 763 | xfs_iflags_clear(iip->ili_inode, XFS_IFLUSHING); |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 764 | if (drop_buffer) |
| 765 | xfs_buf_rele(bp); |
Dave Chinner | 3013683 | 2010-12-20 12:03:17 +1100 | [diff] [blame] | 766 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 | } |
| 768 | |
| 769 | /* |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 770 | * Inode buffer IO completion routine. It is responsible for removing inodes |
Dave Chinner | 718ecc5 | 2020-08-17 16:41:01 -0700 | [diff] [blame] | 771 | * attached to the buffer from the AIL if they have not been re-logged and |
| 772 | * completing the inode flush. |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 773 | */ |
| 774 | void |
Christoph Hellwig | 664ffb8 | 2020-09-01 10:55:29 -0700 | [diff] [blame] | 775 | xfs_buf_inode_iodone( |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 776 | struct xfs_buf *bp) |
| 777 | { |
| 778 | struct xfs_log_item *lip, *n; |
| 779 | LIST_HEAD(flushed_inodes); |
| 780 | LIST_HEAD(ail_updates); |
| 781 | |
| 782 | /* |
| 783 | * Pull the attached inodes from the buffer one at a time and take the |
| 784 | * appropriate action on them. |
| 785 | */ |
| 786 | list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) { |
| 787 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 788 | |
| 789 | if (xfs_iflags_test(iip->ili_inode, XFS_ISTALE)) { |
| 790 | xfs_iflush_abort(iip->ili_inode); |
| 791 | continue; |
| 792 | } |
| 793 | if (!iip->ili_last_fields) |
| 794 | continue; |
| 795 | |
| 796 | /* Do an unlocked check for needing the AIL lock. */ |
| 797 | if (iip->ili_flush_lsn == lip->li_lsn || |
| 798 | test_bit(XFS_LI_FAILED, &lip->li_flags)) |
| 799 | list_move_tail(&lip->li_bio_list, &ail_updates); |
| 800 | else |
| 801 | list_move_tail(&lip->li_bio_list, &flushed_inodes); |
| 802 | } |
| 803 | |
| 804 | if (!list_empty(&ail_updates)) { |
| 805 | xfs_iflush_ail_updates(bp->b_mount->m_ail, &ail_updates); |
| 806 | list_splice_tail(&ail_updates, &flushed_inodes); |
| 807 | } |
| 808 | |
| 809 | xfs_iflush_finish(bp, &flushed_inodes); |
| 810 | if (!list_empty(&flushed_inodes)) |
| 811 | list_splice_tail(&flushed_inodes, &bp->b_li_list); |
| 812 | } |
| 813 | |
Christoph Hellwig | 664ffb8 | 2020-09-01 10:55:29 -0700 | [diff] [blame] | 814 | void |
| 815 | xfs_buf_inode_io_fail( |
| 816 | struct xfs_buf *bp) |
| 817 | { |
| 818 | struct xfs_log_item *lip; |
| 819 | |
| 820 | list_for_each_entry(lip, &bp->b_li_list, li_bio_list) |
| 821 | set_bit(XFS_LI_FAILED, &lip->li_flags); |
| 822 | } |
| 823 | |
Dave Chinner | a69a1dc | 2020-06-29 14:49:20 -0700 | [diff] [blame] | 824 | /* |
Dave Chinner | 718ecc5 | 2020-08-17 16:41:01 -0700 | [diff] [blame] | 825 | * This is the inode flushing abort routine. It is called when |
Dave Chinner | 04913fd | 2012-04-23 15:58:41 +1000 | [diff] [blame] | 826 | * the filesystem is shutting down to clean up the inode state. It is |
| 827 | * responsible for removing the inode item from the AIL if it has not been |
Dave Chinner | 718ecc5 | 2020-08-17 16:41:01 -0700 | [diff] [blame] | 828 | * re-logged and clearing the inode's flush state. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 829 | */ |
| 830 | void |
| 831 | xfs_iflush_abort( |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 832 | struct xfs_inode *ip) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 | { |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 834 | struct xfs_inode_log_item *iip = ip->i_itemp; |
| 835 | struct xfs_buf *bp = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 836 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 837 | if (iip) { |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 838 | /* |
| 839 | * Clear the failed bit before removing the item from the AIL so |
| 840 | * xfs_trans_ail_delete() doesn't try to clear and release the |
| 841 | * buffer attached to the log item before we are done with it. |
| 842 | */ |
| 843 | clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags); |
Brian Foster | 2b3cf09 | 2020-05-06 13:27:04 -0700 | [diff] [blame] | 844 | xfs_trans_ail_delete(&iip->ili_item, 0); |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 845 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 846 | /* |
| 847 | * Clear the inode logging fields so no more flushes are |
| 848 | * attempted. |
| 849 | */ |
Dave Chinner | 1319ebe | 2020-06-29 14:48:46 -0700 | [diff] [blame] | 850 | spin_lock(&iip->ili_lock); |
Dave Chinner | 1dfde68 | 2020-06-29 14:48:45 -0700 | [diff] [blame] | 851 | iip->ili_last_fields = 0; |
Christoph Hellwig | f5d8d5c | 2012-02-29 09:53:54 +0000 | [diff] [blame] | 852 | iip->ili_fields = 0; |
Dave Chinner | fc0561c | 2015-11-03 13:14:59 +1100 | [diff] [blame] | 853 | iip->ili_fsync_fields = 0; |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 854 | iip->ili_flush_lsn = 0; |
| 855 | bp = iip->ili_item.li_buf; |
| 856 | iip->ili_item.li_buf = NULL; |
Dave Chinner | 48d55e2 | 2020-06-29 14:49:18 -0700 | [diff] [blame] | 857 | list_del_init(&iip->ili_item.li_bio_list); |
Dave Chinner | 1319ebe | 2020-06-29 14:48:46 -0700 | [diff] [blame] | 858 | spin_unlock(&iip->ili_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 | } |
Dave Chinner | 718ecc5 | 2020-08-17 16:41:01 -0700 | [diff] [blame] | 860 | xfs_iflags_clear(ip, XFS_IFLUSHING); |
Dave Chinner | 298f7be | 2020-06-29 14:49:15 -0700 | [diff] [blame] | 861 | if (bp) |
| 862 | xfs_buf_rele(bp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 | } |
| 864 | |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 865 | /* |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 866 | * convert an xfs_inode_log_format struct from the old 32 bit version |
| 867 | * (which can have different field alignments) to the native 64 bit version |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 868 | */ |
| 869 | int |
| 870 | xfs_inode_item_format_convert( |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 871 | struct xfs_log_iovec *buf, |
| 872 | struct xfs_inode_log_format *in_f) |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 873 | { |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 874 | struct xfs_inode_log_format_32 *in_f32 = buf->i_addr; |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 875 | |
Darrick J. Wong | a5155b8 | 2019-11-02 09:40:53 -0700 | [diff] [blame] | 876 | if (buf->i_len != sizeof(*in_f32)) { |
| 877 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 878 | return -EFSCORRUPTED; |
Darrick J. Wong | a5155b8 | 2019-11-02 09:40:53 -0700 | [diff] [blame] | 879 | } |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 880 | |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 881 | in_f->ilf_type = in_f32->ilf_type; |
| 882 | in_f->ilf_size = in_f32->ilf_size; |
| 883 | in_f->ilf_fields = in_f32->ilf_fields; |
| 884 | in_f->ilf_asize = in_f32->ilf_asize; |
| 885 | in_f->ilf_dsize = in_f32->ilf_dsize; |
| 886 | in_f->ilf_ino = in_f32->ilf_ino; |
Christoph Hellwig | 42b67dc | 2017-10-19 11:07:09 -0700 | [diff] [blame] | 887 | memcpy(&in_f->ilf_u, &in_f32->ilf_u, sizeof(in_f->ilf_u)); |
Dave Chinner | 20413e37 | 2017-10-09 11:37:22 -0700 | [diff] [blame] | 888 | in_f->ilf_blkno = in_f32->ilf_blkno; |
| 889 | in_f->ilf_len = in_f32->ilf_len; |
| 890 | in_f->ilf_boffset = in_f32->ilf_boffset; |
| 891 | return 0; |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 892 | } |