Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include "xfs.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 7 | #include "xfs_fs.h" |
Dave Chinner | 70a9883 | 2013-10-23 10:36:05 +1100 | [diff] [blame] | 8 | #include "xfs_shared.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 9 | #include "xfs_format.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 10 | #include "xfs_log_format.h" |
| 11 | #include "xfs_trans_resv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include "xfs_mount.h" |
Darrick J. Wong | e9e899a | 2017-10-31 12:04:49 -0700 | [diff] [blame] | 13 | #include "xfs_errortag.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include "xfs_error.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 15 | #include "xfs_trans.h" |
| 16 | #include "xfs_trans_priv.h" |
| 17 | #include "xfs_log.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include "xfs_log_priv.h" |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 19 | #include "xfs_trace.h" |
Brian Foster | baff4e4 | 2014-07-15 08:07:29 +1000 | [diff] [blame] | 20 | #include "xfs_sysfs.h" |
Dave Chinner | 61e63ec | 2015-01-22 09:10:31 +1100 | [diff] [blame] | 21 | #include "xfs_sb.h" |
Darrick J. Wong | 39353ff | 2019-04-12 07:41:15 -0700 | [diff] [blame] | 22 | #include "xfs_health.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
David Chinner | eb01c9c | 2008-04-10 12:18:46 +1000 | [diff] [blame] | 24 | kmem_zone_t *xfs_log_ticket_zone; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | /* Local miscellaneous function prototypes */ |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 27 | STATIC struct xlog * |
| 28 | xlog_alloc_log( |
| 29 | struct xfs_mount *mp, |
| 30 | struct xfs_buftarg *log_target, |
| 31 | xfs_daddr_t blk_offset, |
| 32 | int num_bblks); |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 33 | STATIC int |
| 34 | xlog_space_left( |
| 35 | struct xlog *log, |
| 36 | atomic64_t *head); |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 37 | STATIC void |
| 38 | xlog_dealloc_log( |
| 39 | struct xlog *log); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | |
| 41 | /* local state machine functions */ |
Christoph Hellwig | d15cbf2 | 2019-06-28 19:27:30 -0700 | [diff] [blame] | 42 | STATIC void xlog_state_done_syncing( |
Christoph Hellwig | 12e6a0f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 43 | struct xlog_in_core *iclog); |
Dave Chinner | 502a01f | 2021-08-10 18:00:41 -0700 | [diff] [blame] | 44 | STATIC void xlog_state_do_callback( |
| 45 | struct xlog *log); |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 46 | STATIC int |
| 47 | xlog_state_get_iclog_space( |
| 48 | struct xlog *log, |
| 49 | int len, |
| 50 | struct xlog_in_core **iclog, |
| 51 | struct xlog_ticket *ticket, |
| 52 | int *continued_write, |
| 53 | int *logoffsetp); |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 54 | STATIC void |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 55 | xlog_grant_push_ail( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 56 | struct xlog *log, |
| 57 | int need_bytes); |
| 58 | STATIC void |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 59 | xlog_sync( |
| 60 | struct xlog *log, |
| 61 | struct xlog_in_core *iclog); |
Nathan Scott | cfcbbbd | 2005-11-02 15:12:04 +1100 | [diff] [blame] | 62 | #if defined(DEBUG) |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 63 | STATIC void |
| 64 | xlog_verify_dest_ptr( |
| 65 | struct xlog *log, |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 66 | void *ptr); |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 67 | STATIC void |
| 68 | xlog_verify_grant_tail( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 69 | struct xlog *log); |
| 70 | STATIC void |
| 71 | xlog_verify_iclog( |
| 72 | struct xlog *log, |
| 73 | struct xlog_in_core *iclog, |
Christoph Hellwig | abca1f3 | 2019-06-28 19:27:24 -0700 | [diff] [blame] | 74 | int count); |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 75 | STATIC void |
| 76 | xlog_verify_tail_lsn( |
| 77 | struct xlog *log, |
Dave Chinner | 9d11001 | 2021-07-28 17:14:11 -0700 | [diff] [blame] | 78 | struct xlog_in_core *iclog); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | #else |
| 80 | #define xlog_verify_dest_ptr(a,b) |
Dave Chinner | 3f336c6 | 2010-12-21 12:02:52 +1100 | [diff] [blame] | 81 | #define xlog_verify_grant_tail(a) |
Christoph Hellwig | abca1f3 | 2019-06-28 19:27:24 -0700 | [diff] [blame] | 82 | #define xlog_verify_iclog(a,b,c) |
Dave Chinner | 9d11001 | 2021-07-28 17:14:11 -0700 | [diff] [blame] | 83 | #define xlog_verify_tail_lsn(a,b) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | #endif |
| 85 | |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 86 | STATIC int |
| 87 | xlog_iclogs_empty( |
| 88 | struct xlog *log); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | |
Brian Foster | 303591a | 2021-01-22 16:48:22 -0800 | [diff] [blame] | 90 | static int |
| 91 | xfs_log_cover(struct xfs_mount *); |
| 92 | |
Christoph Hellwig | dd954c6 | 2006-01-11 15:34:50 +1100 | [diff] [blame] | 93 | static void |
Dave Chinner | 663e496 | 2010-12-21 12:06:05 +1100 | [diff] [blame] | 94 | xlog_grant_sub_space( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 95 | struct xlog *log, |
| 96 | atomic64_t *head, |
| 97 | int bytes) |
Christoph Hellwig | dd954c6 | 2006-01-11 15:34:50 +1100 | [diff] [blame] | 98 | { |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 99 | int64_t head_val = atomic64_read(head); |
| 100 | int64_t new, old; |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 101 | |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 102 | do { |
| 103 | int cycle, space; |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 104 | |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 105 | xlog_crack_grant_head_val(head_val, &cycle, &space); |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 106 | |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 107 | space -= bytes; |
| 108 | if (space < 0) { |
| 109 | space += log->l_logsize; |
| 110 | cycle--; |
| 111 | } |
| 112 | |
| 113 | old = head_val; |
| 114 | new = xlog_assign_grant_head_val(cycle, space); |
| 115 | head_val = atomic64_cmpxchg(head, old, new); |
| 116 | } while (head_val != old); |
Christoph Hellwig | dd954c6 | 2006-01-11 15:34:50 +1100 | [diff] [blame] | 117 | } |
| 118 | |
| 119 | static void |
Dave Chinner | 663e496 | 2010-12-21 12:06:05 +1100 | [diff] [blame] | 120 | xlog_grant_add_space( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 121 | struct xlog *log, |
| 122 | atomic64_t *head, |
| 123 | int bytes) |
Christoph Hellwig | dd954c6 | 2006-01-11 15:34:50 +1100 | [diff] [blame] | 124 | { |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 125 | int64_t head_val = atomic64_read(head); |
| 126 | int64_t new, old; |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 127 | |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 128 | do { |
| 129 | int tmp; |
| 130 | int cycle, space; |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 131 | |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 132 | xlog_crack_grant_head_val(head_val, &cycle, &space); |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 133 | |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 134 | tmp = log->l_logsize - space; |
| 135 | if (tmp > bytes) |
| 136 | space += bytes; |
| 137 | else { |
| 138 | space = bytes - tmp; |
| 139 | cycle++; |
| 140 | } |
| 141 | |
| 142 | old = head_val; |
| 143 | new = xlog_assign_grant_head_val(cycle, space); |
| 144 | head_val = atomic64_cmpxchg(head, old, new); |
| 145 | } while (head_val != old); |
Christoph Hellwig | dd954c6 | 2006-01-11 15:34:50 +1100 | [diff] [blame] | 146 | } |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 147 | |
Christoph Hellwig | c303c5b | 2012-02-20 02:31:26 +0000 | [diff] [blame] | 148 | STATIC void |
| 149 | xlog_grant_head_init( |
| 150 | struct xlog_grant_head *head) |
| 151 | { |
| 152 | xlog_assign_grant_head(&head->grant, 1, 0); |
| 153 | INIT_LIST_HEAD(&head->waiters); |
| 154 | spin_lock_init(&head->lock); |
| 155 | } |
| 156 | |
Christoph Hellwig | a79bf2d | 2012-02-20 02:31:27 +0000 | [diff] [blame] | 157 | STATIC void |
| 158 | xlog_grant_head_wake_all( |
| 159 | struct xlog_grant_head *head) |
| 160 | { |
| 161 | struct xlog_ticket *tic; |
| 162 | |
| 163 | spin_lock(&head->lock); |
| 164 | list_for_each_entry(tic, &head->waiters, t_queue) |
| 165 | wake_up_process(tic->t_task); |
| 166 | spin_unlock(&head->lock); |
| 167 | } |
| 168 | |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 169 | static inline int |
| 170 | xlog_ticket_reservation( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 171 | struct xlog *log, |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 172 | struct xlog_grant_head *head, |
| 173 | struct xlog_ticket *tic) |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 174 | { |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 175 | if (head == &log->l_write_head) { |
| 176 | ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); |
| 177 | return tic->t_unit_res; |
| 178 | } else { |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 179 | if (tic->t_flags & XLOG_TIC_PERM_RESERV) |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 180 | return tic->t_unit_res * tic->t_cnt; |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 181 | else |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 182 | return tic->t_unit_res; |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 183 | } |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 184 | } |
| 185 | |
| 186 | STATIC bool |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 187 | xlog_grant_head_wake( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 188 | struct xlog *log, |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 189 | struct xlog_grant_head *head, |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 190 | int *free_bytes) |
| 191 | { |
| 192 | struct xlog_ticket *tic; |
| 193 | int need_bytes; |
Dave Chinner | 7c107af | 2019-09-05 17:32:48 -0700 | [diff] [blame] | 194 | bool woken_task = false; |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 195 | |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 196 | list_for_each_entry(tic, &head->waiters, t_queue) { |
Dave Chinner | 7c107af | 2019-09-05 17:32:48 -0700 | [diff] [blame] | 197 | |
| 198 | /* |
| 199 | * There is a chance that the size of the CIL checkpoints in |
| 200 | * progress at the last AIL push target calculation resulted in |
| 201 | * limiting the target to the log head (l_last_sync_lsn) at the |
| 202 | * time. This may not reflect where the log head is now as the |
| 203 | * CIL checkpoints may have completed. |
| 204 | * |
| 205 | * Hence when we are woken here, it may be that the head of the |
| 206 | * log that has moved rather than the tail. As the tail didn't |
| 207 | * move, there still won't be space available for the |
| 208 | * reservation we require. However, if the AIL has already |
| 209 | * pushed to the target defined by the old log head location, we |
| 210 | * will hang here waiting for something else to update the AIL |
| 211 | * push target. |
| 212 | * |
| 213 | * Therefore, if there isn't space to wake the first waiter on |
| 214 | * the grant head, we need to push the AIL again to ensure the |
| 215 | * target reflects both the current log tail and log head |
| 216 | * position before we wait for the tail to move again. |
| 217 | */ |
| 218 | |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 219 | need_bytes = xlog_ticket_reservation(log, head, tic); |
Dave Chinner | 7c107af | 2019-09-05 17:32:48 -0700 | [diff] [blame] | 220 | if (*free_bytes < need_bytes) { |
| 221 | if (!woken_task) |
| 222 | xlog_grant_push_ail(log, need_bytes); |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 223 | return false; |
Dave Chinner | 7c107af | 2019-09-05 17:32:48 -0700 | [diff] [blame] | 224 | } |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 225 | |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 226 | *free_bytes -= need_bytes; |
| 227 | trace_xfs_log_grant_wake_up(log, tic); |
Christoph Hellwig | 14a7235f | 2012-02-20 02:31:24 +0000 | [diff] [blame] | 228 | wake_up_process(tic->t_task); |
Dave Chinner | 7c107af | 2019-09-05 17:32:48 -0700 | [diff] [blame] | 229 | woken_task = true; |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 230 | } |
| 231 | |
| 232 | return true; |
| 233 | } |
| 234 | |
| 235 | STATIC int |
Christoph Hellwig | 23ee3df | 2012-02-20 02:31:28 +0000 | [diff] [blame] | 236 | xlog_grant_head_wait( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 237 | struct xlog *log, |
Christoph Hellwig | 23ee3df | 2012-02-20 02:31:28 +0000 | [diff] [blame] | 238 | struct xlog_grant_head *head, |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 239 | struct xlog_ticket *tic, |
Dave Chinner | a30b036 | 2013-09-02 20:49:36 +1000 | [diff] [blame] | 240 | int need_bytes) __releases(&head->lock) |
| 241 | __acquires(&head->lock) |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 242 | { |
Christoph Hellwig | 23ee3df | 2012-02-20 02:31:28 +0000 | [diff] [blame] | 243 | list_add_tail(&tic->t_queue, &head->waiters); |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 244 | |
| 245 | do { |
Dave Chinner | 2039a27 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 246 | if (xlog_is_shutdown(log)) |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 247 | goto shutdown; |
| 248 | xlog_grant_push_ail(log, need_bytes); |
| 249 | |
Christoph Hellwig | 14a7235f | 2012-02-20 02:31:24 +0000 | [diff] [blame] | 250 | __set_current_state(TASK_UNINTERRUPTIBLE); |
Christoph Hellwig | 23ee3df | 2012-02-20 02:31:28 +0000 | [diff] [blame] | 251 | spin_unlock(&head->lock); |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 252 | |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 253 | XFS_STATS_INC(log->l_mp, xs_sleep_logspace); |
Christoph Hellwig | 14a7235f | 2012-02-20 02:31:24 +0000 | [diff] [blame] | 254 | |
| 255 | trace_xfs_log_grant_sleep(log, tic); |
| 256 | schedule(); |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 257 | trace_xfs_log_grant_wake(log, tic); |
| 258 | |
Christoph Hellwig | 23ee3df | 2012-02-20 02:31:28 +0000 | [diff] [blame] | 259 | spin_lock(&head->lock); |
Dave Chinner | 2039a27 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 260 | if (xlog_is_shutdown(log)) |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 261 | goto shutdown; |
Christoph Hellwig | 23ee3df | 2012-02-20 02:31:28 +0000 | [diff] [blame] | 262 | } while (xlog_space_left(log, &head->grant) < need_bytes); |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 263 | |
| 264 | list_del_init(&tic->t_queue); |
| 265 | return 0; |
| 266 | shutdown: |
| 267 | list_del_init(&tic->t_queue); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 268 | return -EIO; |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 269 | } |
| 270 | |
Christoph Hellwig | 42ceedb | 2012-02-20 02:31:30 +0000 | [diff] [blame] | 271 | /* |
| 272 | * Atomically get the log space required for a log ticket. |
| 273 | * |
| 274 | * Once a ticket gets put onto head->waiters, it will only return after the |
| 275 | * needed reservation is satisfied. |
| 276 | * |
| 277 | * This function is structured so that it has a lock free fast path. This is |
| 278 | * necessary because every new transaction reservation will come through this |
| 279 | * path. Hence any lock will be globally hot if we take it unconditionally on |
| 280 | * every pass. |
| 281 | * |
| 282 | * As tickets are only ever moved on and off head->waiters under head->lock, we |
| 283 | * only need to take that lock if we are going to add the ticket to the queue |
| 284 | * and sleep. We can avoid taking the lock if the ticket was never added to |
| 285 | * head->waiters because the t_queue list head will be empty and we hold the |
| 286 | * only reference to it so it can safely be checked unlocked. |
| 287 | */ |
| 288 | STATIC int |
| 289 | xlog_grant_head_check( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 290 | struct xlog *log, |
Christoph Hellwig | 42ceedb | 2012-02-20 02:31:30 +0000 | [diff] [blame] | 291 | struct xlog_grant_head *head, |
| 292 | struct xlog_ticket *tic, |
| 293 | int *need_bytes) |
| 294 | { |
| 295 | int free_bytes; |
| 296 | int error = 0; |
| 297 | |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 298 | ASSERT(!xlog_in_recovery(log)); |
Christoph Hellwig | 42ceedb | 2012-02-20 02:31:30 +0000 | [diff] [blame] | 299 | |
| 300 | /* |
| 301 | * If there are other waiters on the queue then give them a chance at |
| 302 | * logspace before us. Wake up the first waiters, if we do not wake |
| 303 | * up all the waiters then go to sleep waiting for more free space, |
| 304 | * otherwise try to get some space for this transaction. |
| 305 | */ |
| 306 | *need_bytes = xlog_ticket_reservation(log, head, tic); |
| 307 | free_bytes = xlog_space_left(log, &head->grant); |
| 308 | if (!list_empty_careful(&head->waiters)) { |
| 309 | spin_lock(&head->lock); |
| 310 | if (!xlog_grant_head_wake(log, head, &free_bytes) || |
| 311 | free_bytes < *need_bytes) { |
| 312 | error = xlog_grant_head_wait(log, head, tic, |
| 313 | *need_bytes); |
| 314 | } |
| 315 | spin_unlock(&head->lock); |
| 316 | } else if (free_bytes < *need_bytes) { |
| 317 | spin_lock(&head->lock); |
| 318 | error = xlog_grant_head_wait(log, head, tic, *need_bytes); |
| 319 | spin_unlock(&head->lock); |
| 320 | } |
| 321 | |
| 322 | return error; |
| 323 | } |
| 324 | |
Christoph Hellwig | 0adba53 | 2007-08-30 17:21:46 +1000 | [diff] [blame] | 325 | static void |
| 326 | xlog_tic_reset_res(xlog_ticket_t *tic) |
| 327 | { |
| 328 | tic->t_res_num = 0; |
| 329 | tic->t_res_arr_sum = 0; |
| 330 | tic->t_res_num_ophdrs = 0; |
| 331 | } |
| 332 | |
| 333 | static void |
| 334 | xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type) |
| 335 | { |
| 336 | if (tic->t_res_num == XLOG_TIC_LEN_MAX) { |
| 337 | /* add to overflow and start again */ |
| 338 | tic->t_res_o_flow += tic->t_res_arr_sum; |
| 339 | tic->t_res_num = 0; |
| 340 | tic->t_res_arr_sum = 0; |
| 341 | } |
| 342 | |
| 343 | tic->t_res_arr[tic->t_res_num].r_len = len; |
| 344 | tic->t_res_arr[tic->t_res_num].r_type = type; |
| 345 | tic->t_res_arr_sum += len; |
| 346 | tic->t_res_num++; |
| 347 | } |
Christoph Hellwig | dd954c6 | 2006-01-11 15:34:50 +1100 | [diff] [blame] | 348 | |
Brian Foster | 50d2548 | 2021-01-22 16:48:20 -0800 | [diff] [blame] | 349 | bool |
| 350 | xfs_log_writable( |
| 351 | struct xfs_mount *mp) |
| 352 | { |
| 353 | /* |
Darrick J. Wong | 8e9800f | 2021-04-29 14:39:33 -0700 | [diff] [blame] | 354 | * Do not write to the log on norecovery mounts, if the data or log |
| 355 | * devices are read-only, or if the filesystem is shutdown. Read-only |
| 356 | * mounts allow internal writes for log recovery and unmount purposes, |
| 357 | * so don't restrict that case. |
Brian Foster | 50d2548 | 2021-01-22 16:48:20 -0800 | [diff] [blame] | 358 | */ |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame^] | 359 | if (xfs_has_norecovery(mp)) |
Brian Foster | 50d2548 | 2021-01-22 16:48:20 -0800 | [diff] [blame] | 360 | return false; |
Darrick J. Wong | 8e9800f | 2021-04-29 14:39:33 -0700 | [diff] [blame] | 361 | if (xfs_readonly_buftarg(mp->m_ddev_targp)) |
| 362 | return false; |
Brian Foster | 50d2548 | 2021-01-22 16:48:20 -0800 | [diff] [blame] | 363 | if (xfs_readonly_buftarg(mp->m_log->l_targ)) |
| 364 | return false; |
Dave Chinner | 2039a27 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 365 | if (xlog_is_shutdown(mp->m_log)) |
Brian Foster | 50d2548 | 2021-01-22 16:48:20 -0800 | [diff] [blame] | 366 | return false; |
| 367 | return true; |
| 368 | } |
| 369 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | /* |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 371 | * Replenish the byte reservation required by moving the grant write head. |
| 372 | */ |
| 373 | int |
| 374 | xfs_log_regrant( |
| 375 | struct xfs_mount *mp, |
| 376 | struct xlog_ticket *tic) |
| 377 | { |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 378 | struct xlog *log = mp->m_log; |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 379 | int need_bytes; |
| 380 | int error = 0; |
| 381 | |
Dave Chinner | 2039a27 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 382 | if (xlog_is_shutdown(log)) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 383 | return -EIO; |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 384 | |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 385 | XFS_STATS_INC(mp, xs_try_logspace); |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 386 | |
| 387 | /* |
| 388 | * This is a new transaction on the ticket, so we need to change the |
| 389 | * transaction ID so that the next transaction has a different TID in |
| 390 | * the log. Just add one to the existing tid so that we can see chains |
| 391 | * of rolling transactions in the log easily. |
| 392 | */ |
| 393 | tic->t_tid++; |
| 394 | |
| 395 | xlog_grant_push_ail(log, tic->t_unit_res); |
| 396 | |
| 397 | tic->t_curr_res = tic->t_unit_res; |
| 398 | xlog_tic_reset_res(tic); |
| 399 | |
| 400 | if (tic->t_cnt > 0) |
| 401 | return 0; |
| 402 | |
| 403 | trace_xfs_log_regrant(log, tic); |
| 404 | |
| 405 | error = xlog_grant_head_check(log, &log->l_write_head, tic, |
| 406 | &need_bytes); |
| 407 | if (error) |
| 408 | goto out_error; |
| 409 | |
| 410 | xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); |
| 411 | trace_xfs_log_regrant_exit(log, tic); |
| 412 | xlog_verify_grant_tail(log); |
| 413 | return 0; |
| 414 | |
| 415 | out_error: |
| 416 | /* |
| 417 | * If we are failing, make sure the ticket doesn't have any current |
| 418 | * reservations. We don't want to add this back when the ticket/ |
| 419 | * transaction gets cancelled. |
| 420 | */ |
| 421 | tic->t_curr_res = 0; |
| 422 | tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ |
| 423 | return error; |
| 424 | } |
| 425 | |
| 426 | /* |
Huang Chong | a0e336ba | 2018-08-03 08:17:54 -0700 | [diff] [blame] | 427 | * Reserve log space and return a ticket corresponding to the reservation. |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 428 | * |
| 429 | * Each reservation is going to reserve extra space for a log record header. |
| 430 | * When writes happen to the on-disk log, we don't subtract the length of the |
| 431 | * log record header from any reservation. By wasting space in each |
| 432 | * reservation, we prevent over allocation problems. |
| 433 | */ |
| 434 | int |
| 435 | xfs_log_reserve( |
| 436 | struct xfs_mount *mp, |
| 437 | int unit_bytes, |
| 438 | int cnt, |
| 439 | struct xlog_ticket **ticp, |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 440 | uint8_t client, |
Christoph Hellwig | 710b1e2 | 2016-04-06 09:20:36 +1000 | [diff] [blame] | 441 | bool permanent) |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 442 | { |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 443 | struct xlog *log = mp->m_log; |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 444 | struct xlog_ticket *tic; |
| 445 | int need_bytes; |
| 446 | int error = 0; |
| 447 | |
| 448 | ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); |
| 449 | |
Dave Chinner | 2039a27 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 450 | if (xlog_is_shutdown(log)) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 451 | return -EIO; |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 452 | |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 453 | XFS_STATS_INC(mp, xs_try_logspace); |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 454 | |
| 455 | ASSERT(*ticp == NULL); |
Carlos Maiolino | ca4f258 | 2020-07-22 09:23:17 -0700 | [diff] [blame] | 456 | tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent); |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 457 | *ticp = tic; |
| 458 | |
Dave Chinner | 437a255 | 2012-11-28 13:01:00 +1100 | [diff] [blame] | 459 | xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt |
| 460 | : tic->t_unit_res); |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 461 | |
| 462 | trace_xfs_log_reserve(log, tic); |
| 463 | |
| 464 | error = xlog_grant_head_check(log, &log->l_reserve_head, tic, |
| 465 | &need_bytes); |
| 466 | if (error) |
| 467 | goto out_error; |
| 468 | |
| 469 | xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); |
| 470 | xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); |
| 471 | trace_xfs_log_reserve_exit(log, tic); |
| 472 | xlog_verify_grant_tail(log); |
| 473 | return 0; |
| 474 | |
| 475 | out_error: |
| 476 | /* |
| 477 | * If we are failing, make sure the ticket doesn't have any current |
| 478 | * reservations. We don't want to add this back when the ticket/ |
| 479 | * transaction gets cancelled. |
| 480 | */ |
| 481 | tic->t_curr_res = 0; |
| 482 | tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ |
| 483 | return error; |
| 484 | } |
| 485 | |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 486 | /* |
Dave Chinner | aad7272 | 2021-08-10 18:00:40 -0700 | [diff] [blame] | 487 | * Run all the pending iclog callbacks and wake log force waiters and iclog |
| 488 | * space waiters so they can process the newly set shutdown state. We really |
| 489 | * don't care what order we process callbacks here because the log is shut down |
| 490 | * and so state cannot change on disk anymore. |
Dave Chinner | 502a01f | 2021-08-10 18:00:41 -0700 | [diff] [blame] | 491 | * |
| 492 | * We avoid processing actively referenced iclogs so that we don't run callbacks |
| 493 | * while the iclog owner might still be preparing the iclog for IO submssion. |
| 494 | * These will be caught by xlog_state_iclog_release() and call this function |
| 495 | * again to process any callbacks that may have been added to that iclog. |
Dave Chinner | aad7272 | 2021-08-10 18:00:40 -0700 | [diff] [blame] | 496 | */ |
| 497 | static void |
| 498 | xlog_state_shutdown_callbacks( |
| 499 | struct xlog *log) |
| 500 | { |
| 501 | struct xlog_in_core *iclog; |
| 502 | LIST_HEAD(cb_list); |
| 503 | |
| 504 | spin_lock(&log->l_icloglock); |
| 505 | iclog = log->l_iclog; |
| 506 | do { |
Dave Chinner | 502a01f | 2021-08-10 18:00:41 -0700 | [diff] [blame] | 507 | if (atomic_read(&iclog->ic_refcnt)) { |
| 508 | /* Reference holder will re-run iclog callbacks. */ |
| 509 | continue; |
| 510 | } |
Dave Chinner | aad7272 | 2021-08-10 18:00:40 -0700 | [diff] [blame] | 511 | list_splice_init(&iclog->ic_callbacks, &cb_list); |
Dave Chinner | 502a01f | 2021-08-10 18:00:41 -0700 | [diff] [blame] | 512 | wake_up_all(&iclog->ic_write_wait); |
Dave Chinner | aad7272 | 2021-08-10 18:00:40 -0700 | [diff] [blame] | 513 | wake_up_all(&iclog->ic_force_wait); |
| 514 | } while ((iclog = iclog->ic_next) != log->l_iclog); |
| 515 | |
| 516 | wake_up_all(&log->l_flush_wait); |
| 517 | spin_unlock(&log->l_icloglock); |
| 518 | |
| 519 | xlog_cil_process_committed(&cb_list); |
| 520 | } |
| 521 | |
| 522 | /* |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 523 | * Flush iclog to disk if this is the last reference to the given iclog and the |
Dave Chinner | 9d11001 | 2021-07-28 17:14:11 -0700 | [diff] [blame] | 524 | * it is in the WANT_SYNC state. |
| 525 | * |
| 526 | * If the caller passes in a non-zero @old_tail_lsn and the current log tail |
| 527 | * does not match, there may be metadata on disk that must be persisted before |
| 528 | * this iclog is written. To satisfy that requirement, set the |
| 529 | * XLOG_ICL_NEED_FLUSH flag as a condition for writing this iclog with the new |
| 530 | * log tail value. |
| 531 | * |
| 532 | * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the |
| 533 | * log tail is updated correctly. NEED_FUA indicates that the iclog will be |
| 534 | * written to stable storage, and implies that a commit record is contained |
| 535 | * within the iclog. We need to ensure that the log tail does not move beyond |
| 536 | * the tail that the first commit record in the iclog ordered against, otherwise |
| 537 | * correct recovery of that checkpoint becomes dependent on future operations |
| 538 | * performed on this iclog. |
| 539 | * |
| 540 | * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the |
| 541 | * current tail into iclog. Once the iclog tail is set, future operations must |
| 542 | * not modify it, otherwise they potentially violate ordering constraints for |
| 543 | * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in |
| 544 | * the iclog will get zeroed on activation of the iclog after sync, so we |
| 545 | * always capture the tail lsn on the iclog on the first NEED_FUA release |
| 546 | * regardless of the number of active reference counts on this iclog. |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 547 | */ |
Dave Chinner | 9d11001 | 2021-07-28 17:14:11 -0700 | [diff] [blame] | 548 | |
Dave Chinner | eef983f | 2021-06-18 08:21:51 -0700 | [diff] [blame] | 549 | int |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 550 | xlog_state_release_iclog( |
| 551 | struct xlog *log, |
Dave Chinner | 0dc8f7f | 2021-07-27 16:23:48 -0700 | [diff] [blame] | 552 | struct xlog_in_core *iclog, |
| 553 | xfs_lsn_t old_tail_lsn) |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 554 | { |
Dave Chinner | 9d39206 | 2021-07-27 16:23:47 -0700 | [diff] [blame] | 555 | xfs_lsn_t tail_lsn; |
Dave Chinner | 502a01f | 2021-08-10 18:00:41 -0700 | [diff] [blame] | 556 | bool last_ref; |
| 557 | |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 558 | lockdep_assert_held(&log->l_icloglock); |
| 559 | |
Dave Chinner | 956f6da | 2021-06-18 11:57:05 -0700 | [diff] [blame] | 560 | trace_xlog_iclog_release(iclog, _RET_IP_); |
Dave Chinner | 0dc8f7f | 2021-07-27 16:23:48 -0700 | [diff] [blame] | 561 | /* |
| 562 | * Grabbing the current log tail needs to be atomic w.r.t. the writing |
| 563 | * of the tail LSN into the iclog so we guarantee that the log tail does |
| 564 | * not move between deciding if a cache flush is required and writing |
| 565 | * the LSN into the iclog below. |
| 566 | */ |
| 567 | if (old_tail_lsn || iclog->ic_state == XLOG_STATE_WANT_SYNC) { |
| 568 | tail_lsn = xlog_assign_tail_lsn(log->l_mp); |
| 569 | |
| 570 | if (old_tail_lsn && tail_lsn != old_tail_lsn) |
| 571 | iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; |
Dave Chinner | 9d11001 | 2021-07-28 17:14:11 -0700 | [diff] [blame] | 572 | |
| 573 | if ((iclog->ic_flags & XLOG_ICL_NEED_FUA) && |
| 574 | !iclog->ic_header.h_tail_lsn) |
| 575 | iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); |
Dave Chinner | 0dc8f7f | 2021-07-27 16:23:48 -0700 | [diff] [blame] | 576 | } |
| 577 | |
Dave Chinner | 502a01f | 2021-08-10 18:00:41 -0700 | [diff] [blame] | 578 | last_ref = atomic_dec_and_test(&iclog->ic_refcnt); |
| 579 | |
| 580 | if (xlog_is_shutdown(log)) { |
| 581 | /* |
| 582 | * If there are no more references to this iclog, process the |
| 583 | * pending iclog callbacks that were waiting on the release of |
| 584 | * this iclog. |
| 585 | */ |
| 586 | if (last_ref) { |
| 587 | spin_unlock(&log->l_icloglock); |
| 588 | xlog_state_shutdown_callbacks(log); |
| 589 | spin_lock(&log->l_icloglock); |
| 590 | } |
| 591 | return -EIO; |
| 592 | } |
| 593 | |
| 594 | if (!last_ref) |
Dave Chinner | 9d39206 | 2021-07-27 16:23:47 -0700 | [diff] [blame] | 595 | return 0; |
| 596 | |
| 597 | if (iclog->ic_state != XLOG_STATE_WANT_SYNC) { |
| 598 | ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); |
| 599 | return 0; |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 600 | } |
| 601 | |
Dave Chinner | 9d39206 | 2021-07-27 16:23:47 -0700 | [diff] [blame] | 602 | iclog->ic_state = XLOG_STATE_SYNCING; |
Dave Chinner | 9d11001 | 2021-07-28 17:14:11 -0700 | [diff] [blame] | 603 | if (!iclog->ic_header.h_tail_lsn) |
| 604 | iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); |
| 605 | xlog_verify_tail_lsn(log, iclog); |
Dave Chinner | 9d39206 | 2021-07-27 16:23:47 -0700 | [diff] [blame] | 606 | trace_xlog_iclog_syncing(iclog, _RET_IP_); |
| 607 | |
| 608 | spin_unlock(&log->l_icloglock); |
| 609 | xlog_sync(log, iclog); |
| 610 | spin_lock(&log->l_icloglock); |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 611 | return 0; |
| 612 | } |
| 613 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | * Mount a log filesystem |
| 616 | * |
| 617 | * mp - ubiquitous xfs mount point structure |
| 618 | * log_target - buftarg of on-disk log device |
| 619 | * blk_offset - Start block # where block size is 512 bytes (BBSIZE) |
| 620 | * num_bblocks - Number of BBSIZE blocks in on-disk log |
| 621 | * |
| 622 | * Return error or zero. |
| 623 | */ |
| 624 | int |
David Chinner | 249a8c1 | 2008-02-05 12:13:32 +1100 | [diff] [blame] | 625 | xfs_log_mount( |
| 626 | xfs_mount_t *mp, |
| 627 | xfs_buftarg_t *log_target, |
| 628 | xfs_daddr_t blk_offset, |
| 629 | int num_bblks) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | { |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 631 | struct xlog *log; |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 632 | bool fatal = xfs_has_crc(mp); |
Jie Liu | 3e7b91c | 2013-08-12 20:50:03 +1000 | [diff] [blame] | 633 | int error = 0; |
| 634 | int min_logfsbs; |
David Chinner | 249a8c1 | 2008-02-05 12:13:32 +1100 | [diff] [blame] | 635 | |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame^] | 636 | if (!xfs_has_norecovery(mp)) { |
Dave Chinner | c99d609 | 2014-05-05 16:18:37 +1000 | [diff] [blame] | 637 | xfs_notice(mp, "Mounting V%d Filesystem", |
| 638 | XFS_SB_VERSION_NUM(&mp->m_sb)); |
| 639 | } else { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 640 | xfs_notice(mp, |
Dave Chinner | c99d609 | 2014-05-05 16:18:37 +1000 | [diff] [blame] | 641 | "Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.", |
| 642 | XFS_SB_VERSION_NUM(&mp->m_sb)); |
Christoph Hellwig | bd186aa | 2007-08-30 17:21:12 +1000 | [diff] [blame] | 643 | ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | } |
| 645 | |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 646 | log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); |
| 647 | if (IS_ERR(log)) { |
| 648 | error = PTR_ERR(log); |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 649 | goto out; |
| 650 | } |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 651 | mp->m_log = log; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 | /* |
Jie Liu | 3e7b91c | 2013-08-12 20:50:03 +1000 | [diff] [blame] | 654 | * Validate the given log space and drop a critical message via syslog |
| 655 | * if the log size is too small that would lead to some unexpected |
| 656 | * situations in transaction log space reservation stage. |
| 657 | * |
| 658 | * Note: we can't just reject the mount if the validation fails. This |
| 659 | * would mean that people would have to downgrade their kernel just to |
| 660 | * remedy the situation as there is no way to grow the log (short of |
| 661 | * black magic surgery with xfs_db). |
| 662 | * |
| 663 | * We can, however, reject mounts for CRC format filesystems, as the |
| 664 | * mkfs binary being used to make the filesystem should never create a |
| 665 | * filesystem with a log that is too small. |
| 666 | */ |
| 667 | min_logfsbs = xfs_log_calc_minimum_size(mp); |
| 668 | |
| 669 | if (mp->m_sb.sb_logblocks < min_logfsbs) { |
| 670 | xfs_warn(mp, |
| 671 | "Log size %d blocks too small, minimum size is %d blocks", |
| 672 | mp->m_sb.sb_logblocks, min_logfsbs); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 673 | error = -EINVAL; |
Jie Liu | 3e7b91c | 2013-08-12 20:50:03 +1000 | [diff] [blame] | 674 | } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) { |
| 675 | xfs_warn(mp, |
| 676 | "Log size %d blocks too large, maximum size is %lld blocks", |
| 677 | mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 678 | error = -EINVAL; |
Jie Liu | 3e7b91c | 2013-08-12 20:50:03 +1000 | [diff] [blame] | 679 | } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) { |
| 680 | xfs_warn(mp, |
| 681 | "log size %lld bytes too large, maximum size is %lld bytes", |
| 682 | XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks), |
| 683 | XFS_MAX_LOG_BYTES); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 684 | error = -EINVAL; |
Darrick J. Wong | 9c92ee2 | 2017-10-25 16:59:43 -0700 | [diff] [blame] | 685 | } else if (mp->m_sb.sb_logsunit > 1 && |
| 686 | mp->m_sb.sb_logsunit % mp->m_sb.sb_blocksize) { |
| 687 | xfs_warn(mp, |
| 688 | "log stripe unit %u bytes must be a multiple of block size", |
| 689 | mp->m_sb.sb_logsunit); |
| 690 | error = -EINVAL; |
| 691 | fatal = true; |
Jie Liu | 3e7b91c | 2013-08-12 20:50:03 +1000 | [diff] [blame] | 692 | } |
| 693 | if (error) { |
Darrick J. Wong | 9c92ee2 | 2017-10-25 16:59:43 -0700 | [diff] [blame] | 694 | /* |
| 695 | * Log check errors are always fatal on v5; or whenever bad |
| 696 | * metadata leads to a crash. |
| 697 | */ |
| 698 | if (fatal) { |
Jie Liu | 3e7b91c | 2013-08-12 20:50:03 +1000 | [diff] [blame] | 699 | xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!"); |
| 700 | ASSERT(0); |
| 701 | goto out_free_log; |
| 702 | } |
Joe Perches | f41febd | 2015-07-29 11:52:04 +1000 | [diff] [blame] | 703 | xfs_crit(mp, "Log size out of supported range."); |
Jie Liu | 3e7b91c | 2013-08-12 20:50:03 +1000 | [diff] [blame] | 704 | xfs_crit(mp, |
Joe Perches | f41febd | 2015-07-29 11:52:04 +1000 | [diff] [blame] | 705 | "Continuing onwards, but if log hangs are experienced then please report this message in the bug report."); |
Jie Liu | 3e7b91c | 2013-08-12 20:50:03 +1000 | [diff] [blame] | 706 | } |
| 707 | |
| 708 | /* |
David Chinner | 249a8c1 | 2008-02-05 12:13:32 +1100 | [diff] [blame] | 709 | * Initialize the AIL now we have a log. |
| 710 | */ |
David Chinner | 249a8c1 | 2008-02-05 12:13:32 +1100 | [diff] [blame] | 711 | error = xfs_trans_ail_init(mp); |
| 712 | if (error) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 713 | xfs_warn(mp, "AIL initialisation failed: error %d", error); |
Christoph Hellwig | 2643075 | 2009-02-12 19:55:48 +0100 | [diff] [blame] | 714 | goto out_free_log; |
David Chinner | 249a8c1 | 2008-02-05 12:13:32 +1100 | [diff] [blame] | 715 | } |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 716 | log->l_ailp = mp->m_ail; |
David Chinner | 249a8c1 | 2008-02-05 12:13:32 +1100 | [diff] [blame] | 717 | |
| 718 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 719 | * skip log recovery on a norecovery mount. pretend it all |
| 720 | * just worked. |
| 721 | */ |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame^] | 722 | if (!xfs_has_norecovery(mp)) { |
| 723 | bool readonly = (mp->m_flags & XFS_MOUNT_RDONLY); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | |
| 725 | if (readonly) |
Christoph Hellwig | bd186aa | 2007-08-30 17:21:12 +1000 | [diff] [blame] | 726 | mp->m_flags &= ~XFS_MOUNT_RDONLY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 728 | error = xlog_recover(log); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 729 | |
| 730 | if (readonly) |
Christoph Hellwig | bd186aa | 2007-08-30 17:21:12 +1000 | [diff] [blame] | 731 | mp->m_flags |= XFS_MOUNT_RDONLY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 732 | if (error) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 733 | xfs_warn(mp, "log mount/recovery failed: error %d", |
| 734 | error); |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 735 | xlog_recover_cancel(log); |
Christoph Hellwig | 2643075 | 2009-02-12 19:55:48 +0100 | [diff] [blame] | 736 | goto out_destroy_ail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 737 | } |
| 738 | } |
| 739 | |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 740 | error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj, |
Brian Foster | baff4e4 | 2014-07-15 08:07:29 +1000 | [diff] [blame] | 741 | "log"); |
| 742 | if (error) |
| 743 | goto out_destroy_ail; |
| 744 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 745 | /* Normal transactions can now occur */ |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 746 | clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 748 | /* |
| 749 | * Now the log has been fully initialised and we know were our |
| 750 | * space grant counters are, we can initialise the permanent ticket |
| 751 | * needed for delayed logging to work. |
| 752 | */ |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 753 | xlog_cil_init_post_recovery(log); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 754 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 755 | return 0; |
Christoph Hellwig | 2643075 | 2009-02-12 19:55:48 +0100 | [diff] [blame] | 756 | |
| 757 | out_destroy_ail: |
| 758 | xfs_trans_ail_destroy(mp); |
| 759 | out_free_log: |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 760 | xlog_dealloc_log(log); |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 761 | out: |
David Chinner | 249a8c1 | 2008-02-05 12:13:32 +1100 | [diff] [blame] | 762 | return error; |
Christoph Hellwig | 2643075 | 2009-02-12 19:55:48 +0100 | [diff] [blame] | 763 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | |
| 765 | /* |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 766 | * Finish the recovery of the file system. This is separate from the |
| 767 | * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read |
| 768 | * in the root and real-time bitmap inodes between calling xfs_log_mount() and |
| 769 | * here. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 770 | * |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 771 | * If we finish recovery successfully, start the background log work. If we are |
| 772 | * not doing recovery, then we have a RO filesystem and we don't need to start |
| 773 | * it. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | */ |
| 775 | int |
Brian Foster | f0b2efa | 2015-08-19 09:58:36 +1000 | [diff] [blame] | 776 | xfs_log_mount_finish( |
| 777 | struct xfs_mount *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | { |
Dave Chinner | fd67d8a | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 779 | struct xlog *log = mp->m_log; |
| 780 | bool readonly = (mp->m_flags & XFS_MOUNT_RDONLY); |
| 781 | int error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 | |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame^] | 783 | if (xfs_has_norecovery(mp)) { |
| 784 | ASSERT(readonly); |
Brian Foster | f0b2efa | 2015-08-19 09:58:36 +1000 | [diff] [blame] | 785 | return 0; |
Eric Sandeen | 6f4a1ee | 2017-08-08 18:21:49 -0700 | [diff] [blame] | 786 | } else if (readonly) { |
| 787 | /* Allow unlinked processing to proceed */ |
| 788 | mp->m_flags &= ~XFS_MOUNT_RDONLY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 789 | } |
| 790 | |
Darrick J. Wong | 8204f8d | 2017-08-10 14:20:28 -0700 | [diff] [blame] | 791 | /* |
| 792 | * During the second phase of log recovery, we need iget and |
| 793 | * iput to behave like they do for an active filesystem. |
| 794 | * xfs_fs_drop_inode needs to be able to prevent the deletion |
| 795 | * of inodes before we're done replaying log items on those |
| 796 | * inodes. Turn it off immediately after recovery finishes |
| 797 | * so that we don't leak the quota inodes if subsequent mount |
| 798 | * activities fail. |
Darrick J. Wong | 799ea9e | 2017-08-18 18:08:25 -0700 | [diff] [blame] | 799 | * |
| 800 | * We let all inodes involved in redo item processing end up on |
| 801 | * the LRU instead of being evicted immediately so that if we do |
| 802 | * something to an unlinked inode, the irele won't cause |
| 803 | * premature truncation and freeing of the inode, which results |
| 804 | * in log recovery failure. We have to evict the unreferenced |
Linus Torvalds | 1751e8a | 2017-11-27 13:05:09 -0800 | [diff] [blame] | 805 | * lru inodes after clearing SB_ACTIVE because we don't |
Darrick J. Wong | 799ea9e | 2017-08-18 18:08:25 -0700 | [diff] [blame] | 806 | * otherwise clean up the lru if there's a subsequent failure in |
| 807 | * xfs_mountfs, which leads to us leaking the inodes if nothing |
| 808 | * else (e.g. quotacheck) references the inodes before the |
| 809 | * mount failure occurs. |
Darrick J. Wong | 8204f8d | 2017-08-10 14:20:28 -0700 | [diff] [blame] | 810 | */ |
Linus Torvalds | 1751e8a | 2017-11-27 13:05:09 -0800 | [diff] [blame] | 811 | mp->m_super->s_flags |= SB_ACTIVE; |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 812 | if (xlog_recovery_needed(log)) |
Dave Chinner | fd67d8a | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 813 | error = xlog_recover_finish(log); |
Brian Foster | f0b2efa | 2015-08-19 09:58:36 +1000 | [diff] [blame] | 814 | if (!error) |
| 815 | xfs_log_work_queue(mp); |
Linus Torvalds | 1751e8a | 2017-11-27 13:05:09 -0800 | [diff] [blame] | 816 | mp->m_super->s_flags &= ~SB_ACTIVE; |
Darrick J. Wong | 799ea9e | 2017-08-18 18:08:25 -0700 | [diff] [blame] | 817 | evict_inodes(mp->m_super); |
Brian Foster | f0b2efa | 2015-08-19 09:58:36 +1000 | [diff] [blame] | 818 | |
Brian Foster | f1b92bbc | 2017-10-26 09:31:16 -0700 | [diff] [blame] | 819 | /* |
| 820 | * Drain the buffer LRU after log recovery. This is required for v4 |
| 821 | * filesystems to avoid leaving around buffers with NULL verifier ops, |
| 822 | * but we do it unconditionally to make sure we're always in a clean |
| 823 | * cache state after mount. |
| 824 | * |
| 825 | * Don't push in the error case because the AIL may have pending intents |
| 826 | * that aren't removed until recovery is cancelled. |
| 827 | */ |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 828 | if (xlog_recovery_needed(log)) { |
Dave Chinner | fd67d8a | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 829 | if (!error) { |
| 830 | xfs_log_force(mp, XFS_LOG_SYNC); |
| 831 | xfs_ail_push_all_sync(mp->m_ail); |
| 832 | } |
| 833 | xfs_notice(mp, "Ending recovery (logdev: %s)", |
| 834 | mp->m_logname ? mp->m_logname : "internal"); |
| 835 | } else { |
| 836 | xfs_info(mp, "Ending clean mount"); |
Brian Foster | f1b92bbc | 2017-10-26 09:31:16 -0700 | [diff] [blame] | 837 | } |
Brian Foster | 10fb9ac | 2021-01-22 16:48:19 -0800 | [diff] [blame] | 838 | xfs_buftarg_drain(mp->m_ddev_targp); |
Brian Foster | f1b92bbc | 2017-10-26 09:31:16 -0700 | [diff] [blame] | 839 | |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 840 | clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); |
Eric Sandeen | 6f4a1ee | 2017-08-08 18:21:49 -0700 | [diff] [blame] | 841 | if (readonly) |
| 842 | mp->m_flags |= XFS_MOUNT_RDONLY; |
| 843 | |
Darrick J. Wong | 4e6b827 | 2021-06-18 11:57:07 -0700 | [diff] [blame] | 844 | /* Make sure the log is dead if we're returning failure. */ |
Dave Chinner | fd67d8a | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 845 | ASSERT(!error || xlog_is_shutdown(log)); |
Darrick J. Wong | 4e6b827 | 2021-06-18 11:57:07 -0700 | [diff] [blame] | 846 | |
Brian Foster | f0b2efa | 2015-08-19 09:58:36 +1000 | [diff] [blame] | 847 | return error; |
| 848 | } |
| 849 | |
| 850 | /* |
| 851 | * The mount has failed. Cancel the recovery if it hasn't completed and destroy |
| 852 | * the log. |
| 853 | */ |
Hariprasad Kelam | a7a9250 | 2019-07-03 07:34:18 -0700 | [diff] [blame] | 854 | void |
Brian Foster | f0b2efa | 2015-08-19 09:58:36 +1000 | [diff] [blame] | 855 | xfs_log_mount_cancel( |
| 856 | struct xfs_mount *mp) |
| 857 | { |
Hariprasad Kelam | a7a9250 | 2019-07-03 07:34:18 -0700 | [diff] [blame] | 858 | xlog_recover_cancel(mp->m_log); |
Brian Foster | f0b2efa | 2015-08-19 09:58:36 +1000 | [diff] [blame] | 859 | xfs_log_unmount(mp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | } |
| 861 | |
| 862 | /* |
Dave Chinner | 45eddb4 | 2021-07-27 16:23:48 -0700 | [diff] [blame] | 863 | * Flush out the iclog to disk ensuring that device caches are flushed and |
| 864 | * the iclog hits stable storage before any completion waiters are woken. |
| 865 | */ |
| 866 | static inline int |
| 867 | xlog_force_iclog( |
| 868 | struct xlog_in_core *iclog) |
| 869 | { |
| 870 | atomic_inc(&iclog->ic_refcnt); |
Dave Chinner | 2bf1ec0 | 2021-07-27 16:23:49 -0700 | [diff] [blame] | 871 | iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; |
Dave Chinner | 45eddb4 | 2021-07-27 16:23:48 -0700 | [diff] [blame] | 872 | if (iclog->ic_state == XLOG_STATE_ACTIVE) |
| 873 | xlog_state_switch_iclogs(iclog->ic_log, iclog, 0); |
| 874 | return xlog_state_release_iclog(iclog->ic_log, iclog, 0); |
| 875 | } |
| 876 | |
| 877 | /* |
Dave Chinner | a79b28c | 2021-06-18 08:21:48 -0700 | [diff] [blame] | 878 | * Wait for the iclog and all prior iclogs to be written disk as required by the |
| 879 | * log force state machine. Waiting on ic_force_wait ensures iclog completions |
| 880 | * have been ordered and callbacks run before we are woken here, hence |
| 881 | * guaranteeing that all the iclogs up to this one are on stable storage. |
Christoph Hellwig | 81e5b50 | 2020-03-20 08:49:18 -0700 | [diff] [blame] | 882 | */ |
Dave Chinner | a79b28c | 2021-06-18 08:21:48 -0700 | [diff] [blame] | 883 | int |
Christoph Hellwig | 81e5b50 | 2020-03-20 08:49:18 -0700 | [diff] [blame] | 884 | xlog_wait_on_iclog( |
| 885 | struct xlog_in_core *iclog) |
| 886 | __releases(iclog->ic_log->l_icloglock) |
| 887 | { |
| 888 | struct xlog *log = iclog->ic_log; |
| 889 | |
Dave Chinner | 956f6da | 2021-06-18 11:57:05 -0700 | [diff] [blame] | 890 | trace_xlog_iclog_wait_on(iclog, _RET_IP_); |
Dave Chinner | 2039a27 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 891 | if (!xlog_is_shutdown(log) && |
Christoph Hellwig | 81e5b50 | 2020-03-20 08:49:18 -0700 | [diff] [blame] | 892 | iclog->ic_state != XLOG_STATE_ACTIVE && |
| 893 | iclog->ic_state != XLOG_STATE_DIRTY) { |
| 894 | XFS_STATS_INC(log->l_mp, xs_log_force_sleep); |
| 895 | xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); |
| 896 | } else { |
| 897 | spin_unlock(&log->l_icloglock); |
| 898 | } |
| 899 | |
Dave Chinner | 2039a27 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 900 | if (xlog_is_shutdown(log)) |
Christoph Hellwig | 81e5b50 | 2020-03-20 08:49:18 -0700 | [diff] [blame] | 901 | return -EIO; |
| 902 | return 0; |
| 903 | } |
| 904 | |
| 905 | /* |
Dave Chinner | 3c702f9 | 2020-03-25 18:18:24 -0700 | [diff] [blame] | 906 | * Write out an unmount record using the ticket provided. We have to account for |
| 907 | * the data space used in the unmount ticket as this write is not done from a |
| 908 | * transaction context that has already done the accounting for us. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 909 | */ |
Dave Chinner | 3c702f9 | 2020-03-25 18:18:24 -0700 | [diff] [blame] | 910 | static int |
| 911 | xlog_write_unmount_record( |
| 912 | struct xlog *log, |
Dave Chinner | 3468bb1 | 2021-06-18 08:21:50 -0700 | [diff] [blame] | 913 | struct xlog_ticket *ticket) |
Darrick J. Wong | 53235f2 | 2018-07-20 09:28:39 -0700 | [diff] [blame] | 914 | { |
Dave Chinner | 3c702f9 | 2020-03-25 18:18:24 -0700 | [diff] [blame] | 915 | struct xfs_unmount_log_format ulf = { |
Darrick J. Wong | 53235f2 | 2018-07-20 09:28:39 -0700 | [diff] [blame] | 916 | .magic = XLOG_UNMOUNT_TYPE, |
| 917 | }; |
| 918 | struct xfs_log_iovec reg = { |
Dave Chinner | 3c702f9 | 2020-03-25 18:18:24 -0700 | [diff] [blame] | 919 | .i_addr = &ulf, |
| 920 | .i_len = sizeof(ulf), |
Darrick J. Wong | 53235f2 | 2018-07-20 09:28:39 -0700 | [diff] [blame] | 921 | .i_type = XLOG_REG_TYPE_UNMOUNT, |
| 922 | }; |
| 923 | struct xfs_log_vec vec = { |
| 924 | .lv_niovecs = 1, |
| 925 | .lv_iovecp = ®, |
| 926 | }; |
Dave Chinner | 3c702f9 | 2020-03-25 18:18:24 -0700 | [diff] [blame] | 927 | |
| 928 | /* account for space used by record data */ |
| 929 | ticket->t_curr_res -= sizeof(ulf); |
Dave Chinner | eef983f | 2021-06-18 08:21:51 -0700 | [diff] [blame] | 930 | |
Dave Chinner | caa8009 | 2021-08-10 18:00:43 -0700 | [diff] [blame] | 931 | return xlog_write(log, NULL, &vec, ticket, XLOG_UNMOUNT_TRANS); |
Dave Chinner | 3c702f9 | 2020-03-25 18:18:24 -0700 | [diff] [blame] | 932 | } |
| 933 | |
| 934 | /* |
| 935 | * Mark the filesystem clean by writing an unmount record to the head of the |
| 936 | * log. |
| 937 | */ |
| 938 | static void |
| 939 | xlog_unmount_write( |
| 940 | struct xlog *log) |
| 941 | { |
| 942 | struct xfs_mount *mp = log->l_mp; |
Darrick J. Wong | 53235f2 | 2018-07-20 09:28:39 -0700 | [diff] [blame] | 943 | struct xlog_in_core *iclog; |
| 944 | struct xlog_ticket *tic = NULL; |
Darrick J. Wong | 53235f2 | 2018-07-20 09:28:39 -0700 | [diff] [blame] | 945 | int error; |
| 946 | |
| 947 | error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0); |
| 948 | if (error) |
| 949 | goto out_err; |
| 950 | |
Dave Chinner | 3468bb1 | 2021-06-18 08:21:50 -0700 | [diff] [blame] | 951 | error = xlog_write_unmount_record(log, tic); |
Darrick J. Wong | 53235f2 | 2018-07-20 09:28:39 -0700 | [diff] [blame] | 952 | /* |
| 953 | * At this point, we're umounting anyway, so there's no point in |
Dave Chinner | 5112e206 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 954 | * transitioning log state to shutdown. Just continue... |
Darrick J. Wong | 53235f2 | 2018-07-20 09:28:39 -0700 | [diff] [blame] | 955 | */ |
| 956 | out_err: |
| 957 | if (error) |
| 958 | xfs_alert(mp, "%s: unmount record failed", __func__); |
| 959 | |
| 960 | spin_lock(&log->l_icloglock); |
| 961 | iclog = log->l_iclog; |
Dave Chinner | 45eddb4 | 2021-07-27 16:23:48 -0700 | [diff] [blame] | 962 | error = xlog_force_iclog(iclog); |
Christoph Hellwig | 81e5b50 | 2020-03-20 08:49:18 -0700 | [diff] [blame] | 963 | xlog_wait_on_iclog(iclog); |
Darrick J. Wong | 53235f2 | 2018-07-20 09:28:39 -0700 | [diff] [blame] | 964 | |
| 965 | if (tic) { |
| 966 | trace_xfs_log_umount_write(log, tic); |
Christoph Hellwig | 8b41e3f | 2020-03-25 18:18:23 -0700 | [diff] [blame] | 967 | xfs_log_ticket_ungrant(log, tic); |
Darrick J. Wong | 53235f2 | 2018-07-20 09:28:39 -0700 | [diff] [blame] | 968 | } |
| 969 | } |
| 970 | |
Christoph Hellwig | 13859c9 | 2020-03-12 16:52:51 -0700 | [diff] [blame] | 971 | static void |
| 972 | xfs_log_unmount_verify_iclog( |
| 973 | struct xlog *log) |
| 974 | { |
| 975 | struct xlog_in_core *iclog = log->l_iclog; |
| 976 | |
| 977 | do { |
| 978 | ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); |
| 979 | ASSERT(iclog->ic_offset == 0); |
| 980 | } while ((iclog = iclog->ic_next) != log->l_iclog); |
| 981 | } |
| 982 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 983 | /* |
| 984 | * Unmount record used to have a string "Unmount filesystem--" in the |
| 985 | * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE). |
| 986 | * We just write the magic number now since that particular field isn't |
Zhi Yong Wu | 8e159e7 | 2013-08-12 03:15:00 +0000 | [diff] [blame] | 987 | * currently architecture converted and "Unmount" is a bit foo. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 988 | * As far as I know, there weren't any dependencies on the old behaviour. |
| 989 | */ |
Christoph Hellwig | 550319e | 2020-03-12 16:52:50 -0700 | [diff] [blame] | 990 | static void |
Christoph Hellwig | 13859c9 | 2020-03-12 16:52:51 -0700 | [diff] [blame] | 991 | xfs_log_unmount_write( |
| 992 | struct xfs_mount *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 993 | { |
Christoph Hellwig | 13859c9 | 2020-03-12 16:52:51 -0700 | [diff] [blame] | 994 | struct xlog *log = mp->m_log; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 995 | |
Brian Foster | 50d2548 | 2021-01-22 16:48:20 -0800 | [diff] [blame] | 996 | if (!xfs_log_writable(mp)) |
Christoph Hellwig | 550319e | 2020-03-12 16:52:50 -0700 | [diff] [blame] | 997 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 998 | |
Christoph Hellwig | 550319e | 2020-03-12 16:52:50 -0700 | [diff] [blame] | 999 | xfs_log_force(mp, XFS_LOG_SYNC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1000 | |
Dave Chinner | 2039a27 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 1001 | if (xlog_is_shutdown(log)) |
Christoph Hellwig | 6178d10 | 2020-03-12 16:52:51 -0700 | [diff] [blame] | 1002 | return; |
Darrick J. Wong | 5cc3c00 | 2020-03-26 10:26:44 -0700 | [diff] [blame] | 1003 | |
| 1004 | /* |
| 1005 | * If we think the summary counters are bad, avoid writing the unmount |
| 1006 | * record to force log recovery at next mount, after which the summary |
| 1007 | * counters will be recalculated. Refer to xlog_check_unmount_rec for |
| 1008 | * more details. |
| 1009 | */ |
| 1010 | if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS), mp, |
| 1011 | XFS_ERRTAG_FORCE_SUMMARY_RECALC)) { |
| 1012 | xfs_alert(mp, "%s: will fix summary counters at next mount", |
| 1013 | __func__); |
| 1014 | return; |
| 1015 | } |
| 1016 | |
Christoph Hellwig | 13859c9 | 2020-03-12 16:52:51 -0700 | [diff] [blame] | 1017 | xfs_log_unmount_verify_iclog(log); |
Dave Chinner | 3c702f9 | 2020-03-25 18:18:24 -0700 | [diff] [blame] | 1018 | xlog_unmount_write(log); |
Christoph Hellwig | 550319e | 2020-03-12 16:52:50 -0700 | [diff] [blame] | 1019 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1020 | |
| 1021 | /* |
Dave Chinner | c75921a | 2012-10-08 21:56:08 +1100 | [diff] [blame] | 1022 | * Empty the log for unmount/freeze. |
Dave Chinner | cf2931d | 2012-10-08 21:56:03 +1100 | [diff] [blame] | 1023 | * |
| 1024 | * To do this, we first need to shut down the background log work so it is not |
| 1025 | * trying to cover the log as we clean up. We then need to unpin all objects in |
| 1026 | * the log so we can then flush them out. Once they have completed their IO and |
Brian Foster | 303591a | 2021-01-22 16:48:22 -0800 | [diff] [blame] | 1027 | * run the callbacks removing themselves from the AIL, we can cover the log. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1028 | */ |
Brian Foster | 303591a | 2021-01-22 16:48:22 -0800 | [diff] [blame] | 1029 | int |
Dave Chinner | c75921a | 2012-10-08 21:56:08 +1100 | [diff] [blame] | 1030 | xfs_log_quiesce( |
| 1031 | struct xfs_mount *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1032 | { |
Darrick J. Wong | 908ce71 | 2021-08-08 08:27:12 -0700 | [diff] [blame] | 1033 | /* |
| 1034 | * Clear log incompat features since we're quiescing the log. Report |
| 1035 | * failures, though it's not fatal to have a higher log feature |
| 1036 | * protection level than the log contents actually require. |
| 1037 | */ |
| 1038 | if (xfs_clear_incompat_log_features(mp)) { |
| 1039 | int error; |
| 1040 | |
| 1041 | error = xfs_sync_sb(mp, false); |
| 1042 | if (error) |
| 1043 | xfs_warn(mp, |
| 1044 | "Failed to clear log incompat features on quiesce"); |
| 1045 | } |
| 1046 | |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 1047 | cancel_delayed_work_sync(&mp->m_log->l_work); |
Dave Chinner | cf2931d | 2012-10-08 21:56:03 +1100 | [diff] [blame] | 1048 | xfs_log_force(mp, XFS_LOG_SYNC); |
| 1049 | |
| 1050 | /* |
| 1051 | * The superblock buffer is uncached and while xfs_ail_push_all_sync() |
Brian Foster | 8321ddb | 2021-01-22 16:48:20 -0800 | [diff] [blame] | 1052 | * will push it, xfs_buftarg_wait() will not wait for it. Further, |
Dave Chinner | cf2931d | 2012-10-08 21:56:03 +1100 | [diff] [blame] | 1053 | * xfs_buf_iowait() cannot be used because it was pushed with the |
| 1054 | * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for |
| 1055 | * the IO to complete. |
| 1056 | */ |
| 1057 | xfs_ail_push_all_sync(mp->m_ail); |
Brian Foster | 8321ddb | 2021-01-22 16:48:20 -0800 | [diff] [blame] | 1058 | xfs_buftarg_wait(mp->m_ddev_targp); |
Dave Chinner | cf2931d | 2012-10-08 21:56:03 +1100 | [diff] [blame] | 1059 | xfs_buf_lock(mp->m_sb_bp); |
| 1060 | xfs_buf_unlock(mp->m_sb_bp); |
Brian Foster | 303591a | 2021-01-22 16:48:22 -0800 | [diff] [blame] | 1061 | |
| 1062 | return xfs_log_cover(mp); |
Brian Foster | 9e54ee0 | 2021-01-22 16:48:21 -0800 | [diff] [blame] | 1063 | } |
Dave Chinner | cf2931d | 2012-10-08 21:56:03 +1100 | [diff] [blame] | 1064 | |
Brian Foster | 9e54ee0 | 2021-01-22 16:48:21 -0800 | [diff] [blame] | 1065 | void |
| 1066 | xfs_log_clean( |
| 1067 | struct xfs_mount *mp) |
| 1068 | { |
| 1069 | xfs_log_quiesce(mp); |
Dave Chinner | cf2931d | 2012-10-08 21:56:03 +1100 | [diff] [blame] | 1070 | xfs_log_unmount_write(mp); |
Dave Chinner | c75921a | 2012-10-08 21:56:08 +1100 | [diff] [blame] | 1071 | } |
| 1072 | |
| 1073 | /* |
| 1074 | * Shut down and release the AIL and Log. |
| 1075 | * |
| 1076 | * During unmount, we need to ensure we flush all the dirty metadata objects |
| 1077 | * from the AIL so that the log is empty before we write the unmount record to |
| 1078 | * the log. Once this is done, we can tear down the AIL and the log. |
| 1079 | */ |
| 1080 | void |
| 1081 | xfs_log_unmount( |
| 1082 | struct xfs_mount *mp) |
| 1083 | { |
Brian Foster | 9e54ee0 | 2021-01-22 16:48:21 -0800 | [diff] [blame] | 1084 | xfs_log_clean(mp); |
Dave Chinner | cf2931d | 2012-10-08 21:56:03 +1100 | [diff] [blame] | 1085 | |
Brian Foster | 8321ddb | 2021-01-22 16:48:20 -0800 | [diff] [blame] | 1086 | xfs_buftarg_drain(mp->m_ddev_targp); |
| 1087 | |
David Chinner | 249a8c1 | 2008-02-05 12:13:32 +1100 | [diff] [blame] | 1088 | xfs_trans_ail_destroy(mp); |
Brian Foster | baff4e4 | 2014-07-15 08:07:29 +1000 | [diff] [blame] | 1089 | |
| 1090 | xfs_sysfs_del(&mp->m_log->l_kobj); |
| 1091 | |
Nathan Scott | c41564b | 2006-03-29 08:55:14 +1000 | [diff] [blame] | 1092 | xlog_dealloc_log(mp->m_log); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1093 | } |
| 1094 | |
Dave Chinner | 43f5efc | 2010-03-23 10:10:00 +1100 | [diff] [blame] | 1095 | void |
| 1096 | xfs_log_item_init( |
| 1097 | struct xfs_mount *mp, |
| 1098 | struct xfs_log_item *item, |
| 1099 | int type, |
Christoph Hellwig | 272e42b | 2011-10-28 09:54:24 +0000 | [diff] [blame] | 1100 | const struct xfs_item_ops *ops) |
Dave Chinner | 43f5efc | 2010-03-23 10:10:00 +1100 | [diff] [blame] | 1101 | { |
| 1102 | item->li_mountp = mp; |
| 1103 | item->li_ailp = mp->m_ail; |
| 1104 | item->li_type = type; |
| 1105 | item->li_ops = ops; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1106 | item->li_lv = NULL; |
| 1107 | |
| 1108 | INIT_LIST_HEAD(&item->li_ail); |
| 1109 | INIT_LIST_HEAD(&item->li_cil); |
Carlos Maiolino | 643c8c0 | 2018-01-24 13:38:49 -0800 | [diff] [blame] | 1110 | INIT_LIST_HEAD(&item->li_bio_list); |
Dave Chinner | e6631f8 | 2018-05-09 07:49:37 -0700 | [diff] [blame] | 1111 | INIT_LIST_HEAD(&item->li_trans); |
Dave Chinner | 43f5efc | 2010-03-23 10:10:00 +1100 | [diff] [blame] | 1112 | } |
| 1113 | |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 1114 | /* |
| 1115 | * Wake up processes waiting for log space after we have moved the log tail. |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 1116 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1117 | void |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 1118 | xfs_log_space_wake( |
Christoph Hellwig | cfb7cdc | 2012-02-20 02:31:23 +0000 | [diff] [blame] | 1119 | struct xfs_mount *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1120 | { |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 1121 | struct xlog *log = mp->m_log; |
Christoph Hellwig | cfb7cdc | 2012-02-20 02:31:23 +0000 | [diff] [blame] | 1122 | int free_bytes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1123 | |
Dave Chinner | 2039a27 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 1124 | if (xlog_is_shutdown(log)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1125 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1126 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 1127 | if (!list_empty_careful(&log->l_write_head.waiters)) { |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 1128 | ASSERT(!xlog_in_recovery(log)); |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 1129 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 1130 | spin_lock(&log->l_write_head.lock); |
| 1131 | free_bytes = xlog_space_left(log, &log->l_write_head.grant); |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 1132 | xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 1133 | spin_unlock(&log->l_write_head.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1134 | } |
Dave Chinner | 1054794 | 2010-12-21 12:02:25 +1100 | [diff] [blame] | 1135 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 1136 | if (!list_empty_careful(&log->l_reserve_head.waiters)) { |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 1137 | ASSERT(!xlog_in_recovery(log)); |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 1138 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 1139 | spin_lock(&log->l_reserve_head.lock); |
| 1140 | free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 1141 | xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 1142 | spin_unlock(&log->l_reserve_head.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1143 | } |
Dave Chinner | 3f16b98 | 2010-12-21 12:29:01 +1100 | [diff] [blame] | 1144 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1145 | |
| 1146 | /* |
Dave Chinner | 2c6e24c | 2013-10-15 09:17:49 +1100 | [diff] [blame] | 1147 | * Determine if we have a transaction that has gone to disk that needs to be |
| 1148 | * covered. To begin the transition to the idle state firstly the log needs to |
| 1149 | * be idle. That means the CIL, the AIL and the iclogs needs to be empty before |
| 1150 | * we start attempting to cover the log. |
Dave Chinner | b6f8dd4 | 2010-04-13 15:06:44 +1000 | [diff] [blame] | 1151 | * |
Dave Chinner | 2c6e24c | 2013-10-15 09:17:49 +1100 | [diff] [blame] | 1152 | * Only if we are then in a state where covering is needed, the caller is |
| 1153 | * informed that dummy transactions are required to move the log into the idle |
| 1154 | * state. |
| 1155 | * |
| 1156 | * If there are any items in the AIl or CIL, then we do not want to attempt to |
| 1157 | * cover the log as we may be in a situation where there isn't log space |
| 1158 | * available to run a dummy transaction and this can lead to deadlocks when the |
| 1159 | * tail of the log is pinned by an item that is modified in the CIL. Hence |
| 1160 | * there's no point in running a dummy transaction at this point because we |
| 1161 | * can't start trying to idle the log until both the CIL and AIL are empty. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1162 | */ |
Brian Foster | 37444fc | 2021-01-22 16:48:21 -0800 | [diff] [blame] | 1163 | static bool |
| 1164 | xfs_log_need_covered( |
| 1165 | struct xfs_mount *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1166 | { |
Brian Foster | 37444fc | 2021-01-22 16:48:21 -0800 | [diff] [blame] | 1167 | struct xlog *log = mp->m_log; |
| 1168 | bool needed = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1169 | |
Dave Chinner | 2c6e24c | 2013-10-15 09:17:49 +1100 | [diff] [blame] | 1170 | if (!xlog_cil_empty(log)) |
kernel test robot | 8646b98 | 2021-02-10 17:27:31 -0800 | [diff] [blame] | 1171 | return false; |
Dave Chinner | 2c6e24c | 2013-10-15 09:17:49 +1100 | [diff] [blame] | 1172 | |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 1173 | spin_lock(&log->l_icloglock); |
Dave Chinner | b6f8dd4 | 2010-04-13 15:06:44 +1000 | [diff] [blame] | 1174 | switch (log->l_covered_state) { |
| 1175 | case XLOG_STATE_COVER_DONE: |
| 1176 | case XLOG_STATE_COVER_DONE2: |
| 1177 | case XLOG_STATE_COVER_IDLE: |
| 1178 | break; |
| 1179 | case XLOG_STATE_COVER_NEED: |
| 1180 | case XLOG_STATE_COVER_NEED2: |
Dave Chinner | 2c6e24c | 2013-10-15 09:17:49 +1100 | [diff] [blame] | 1181 | if (xfs_ail_min_lsn(log->l_ailp)) |
| 1182 | break; |
| 1183 | if (!xlog_iclogs_empty(log)) |
| 1184 | break; |
| 1185 | |
Brian Foster | 37444fc | 2021-01-22 16:48:21 -0800 | [diff] [blame] | 1186 | needed = true; |
Dave Chinner | 2c6e24c | 2013-10-15 09:17:49 +1100 | [diff] [blame] | 1187 | if (log->l_covered_state == XLOG_STATE_COVER_NEED) |
| 1188 | log->l_covered_state = XLOG_STATE_COVER_DONE; |
| 1189 | else |
| 1190 | log->l_covered_state = XLOG_STATE_COVER_DONE2; |
| 1191 | break; |
Dave Chinner | b6f8dd4 | 2010-04-13 15:06:44 +1000 | [diff] [blame] | 1192 | default: |
Brian Foster | 37444fc | 2021-01-22 16:48:21 -0800 | [diff] [blame] | 1193 | needed = true; |
Dave Chinner | b6f8dd4 | 2010-04-13 15:06:44 +1000 | [diff] [blame] | 1194 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1195 | } |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 1196 | spin_unlock(&log->l_icloglock); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1197 | return needed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1198 | } |
| 1199 | |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 1200 | /* |
Brian Foster | 303591a | 2021-01-22 16:48:22 -0800 | [diff] [blame] | 1201 | * Explicitly cover the log. This is similar to background log covering but |
| 1202 | * intended for usage in quiesce codepaths. The caller is responsible to ensure |
| 1203 | * the log is idle and suitable for covering. The CIL, iclog buffers and AIL |
| 1204 | * must all be empty. |
| 1205 | */ |
| 1206 | static int |
| 1207 | xfs_log_cover( |
| 1208 | struct xfs_mount *mp) |
| 1209 | { |
Brian Foster | 303591a | 2021-01-22 16:48:22 -0800 | [diff] [blame] | 1210 | int error = 0; |
Brian Foster | f46e5a1 | 2021-01-22 16:48:23 -0800 | [diff] [blame] | 1211 | bool need_covered; |
Brian Foster | 303591a | 2021-01-22 16:48:22 -0800 | [diff] [blame] | 1212 | |
Brian Foster | 4533fc6 | 2021-01-26 19:14:55 -0800 | [diff] [blame] | 1213 | ASSERT((xlog_cil_empty(mp->m_log) && xlog_iclogs_empty(mp->m_log) && |
| 1214 | !xfs_ail_min_lsn(mp->m_log->l_ailp)) || |
Dave Chinner | 2039a27 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 1215 | xlog_is_shutdown(mp->m_log)); |
Brian Foster | 303591a | 2021-01-22 16:48:22 -0800 | [diff] [blame] | 1216 | |
| 1217 | if (!xfs_log_writable(mp)) |
| 1218 | return 0; |
| 1219 | |
| 1220 | /* |
Brian Foster | f46e5a1 | 2021-01-22 16:48:23 -0800 | [diff] [blame] | 1221 | * xfs_log_need_covered() is not idempotent because it progresses the |
| 1222 | * state machine if the log requires covering. Therefore, we must call |
| 1223 | * this function once and use the result until we've issued an sb sync. |
| 1224 | * Do so first to make that abundantly clear. |
| 1225 | * |
| 1226 | * Fall into the covering sequence if the log needs covering or the |
| 1227 | * mount has lazy superblock accounting to sync to disk. The sb sync |
| 1228 | * used for covering accumulates the in-core counters, so covering |
| 1229 | * handles this for us. |
| 1230 | */ |
| 1231 | need_covered = xfs_log_need_covered(mp); |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 1232 | if (!need_covered && !xfs_has_lazysbcount(mp)) |
Brian Foster | f46e5a1 | 2021-01-22 16:48:23 -0800 | [diff] [blame] | 1233 | return 0; |
| 1234 | |
| 1235 | /* |
Brian Foster | 303591a | 2021-01-22 16:48:22 -0800 | [diff] [blame] | 1236 | * To cover the log, commit the superblock twice (at most) in |
| 1237 | * independent checkpoints. The first serves as a reference for the |
| 1238 | * tail pointer. The sync transaction and AIL push empties the AIL and |
| 1239 | * updates the in-core tail to the LSN of the first checkpoint. The |
| 1240 | * second commit updates the on-disk tail with the in-core LSN, |
| 1241 | * covering the log. Push the AIL one more time to leave it empty, as |
| 1242 | * we found it. |
| 1243 | */ |
Brian Foster | f46e5a1 | 2021-01-22 16:48:23 -0800 | [diff] [blame] | 1244 | do { |
Brian Foster | 303591a | 2021-01-22 16:48:22 -0800 | [diff] [blame] | 1245 | error = xfs_sync_sb(mp, true); |
| 1246 | if (error) |
| 1247 | break; |
| 1248 | xfs_ail_push_all_sync(mp->m_ail); |
Brian Foster | f46e5a1 | 2021-01-22 16:48:23 -0800 | [diff] [blame] | 1249 | } while (xfs_log_need_covered(mp)); |
Brian Foster | 303591a | 2021-01-22 16:48:22 -0800 | [diff] [blame] | 1250 | |
| 1251 | return error; |
| 1252 | } |
| 1253 | |
| 1254 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1255 | * We may be holding the log iclog lock upon entering this routine. |
| 1256 | */ |
| 1257 | xfs_lsn_t |
Christoph Hellwig | 1c30462 | 2012-04-23 15:58:33 +1000 | [diff] [blame] | 1258 | xlog_assign_tail_lsn_locked( |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 1259 | struct xfs_mount *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1260 | { |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 1261 | struct xlog *log = mp->m_log; |
Christoph Hellwig | 1c30462 | 2012-04-23 15:58:33 +1000 | [diff] [blame] | 1262 | struct xfs_log_item *lip; |
| 1263 | xfs_lsn_t tail_lsn; |
| 1264 | |
Matthew Wilcox | 57e8095 | 2018-03-07 14:59:39 -0800 | [diff] [blame] | 1265 | assert_spin_locked(&mp->m_ail->ail_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1266 | |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 1267 | /* |
| 1268 | * To make sure we always have a valid LSN for the log tail we keep |
| 1269 | * track of the last LSN which was committed in log->l_last_sync_lsn, |
Christoph Hellwig | 1c30462 | 2012-04-23 15:58:33 +1000 | [diff] [blame] | 1270 | * and use that when the AIL was empty. |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 1271 | */ |
Christoph Hellwig | 1c30462 | 2012-04-23 15:58:33 +1000 | [diff] [blame] | 1272 | lip = xfs_ail_min(mp->m_ail); |
| 1273 | if (lip) |
| 1274 | tail_lsn = lip->li_lsn; |
| 1275 | else |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 1276 | tail_lsn = atomic64_read(&log->l_last_sync_lsn); |
Dave Chinner | 750b9c9 | 2013-11-01 15:27:18 +1100 | [diff] [blame] | 1277 | trace_xfs_log_assign_tail_lsn(log, tail_lsn); |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 1278 | atomic64_set(&log->l_tail_lsn, tail_lsn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1279 | return tail_lsn; |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 1280 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1281 | |
Christoph Hellwig | 1c30462 | 2012-04-23 15:58:33 +1000 | [diff] [blame] | 1282 | xfs_lsn_t |
| 1283 | xlog_assign_tail_lsn( |
| 1284 | struct xfs_mount *mp) |
| 1285 | { |
| 1286 | xfs_lsn_t tail_lsn; |
| 1287 | |
Matthew Wilcox | 57e8095 | 2018-03-07 14:59:39 -0800 | [diff] [blame] | 1288 | spin_lock(&mp->m_ail->ail_lock); |
Christoph Hellwig | 1c30462 | 2012-04-23 15:58:33 +1000 | [diff] [blame] | 1289 | tail_lsn = xlog_assign_tail_lsn_locked(mp); |
Matthew Wilcox | 57e8095 | 2018-03-07 14:59:39 -0800 | [diff] [blame] | 1290 | spin_unlock(&mp->m_ail->ail_lock); |
Christoph Hellwig | 1c30462 | 2012-04-23 15:58:33 +1000 | [diff] [blame] | 1291 | |
| 1292 | return tail_lsn; |
| 1293 | } |
| 1294 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1295 | /* |
| 1296 | * Return the space in the log between the tail and the head. The head |
| 1297 | * is passed in the cycle/bytes formal parms. In the special case where |
| 1298 | * the reserve head has wrapped passed the tail, this calculation is no |
| 1299 | * longer valid. In this case, just return 0 which means there is no space |
| 1300 | * in the log. This works for all places where this function is called |
| 1301 | * with the reserve head. Of course, if the write head were to ever |
| 1302 | * wrap the tail, we should blow up. Rather than catch this case here, |
| 1303 | * we depend on other ASSERTions in other parts of the code. XXXmiken |
| 1304 | * |
Dave Chinner | 2562c32 | 2021-08-10 18:00:41 -0700 | [diff] [blame] | 1305 | * If reservation head is behind the tail, we have a problem. Warn about it, |
| 1306 | * but then treat it as if the log is empty. |
| 1307 | * |
| 1308 | * If the log is shut down, the head and tail may be invalid or out of whack, so |
| 1309 | * shortcut invalidity asserts in this case so that we don't trigger them |
| 1310 | * falsely. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1311 | */ |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 1312 | STATIC int |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 1313 | xlog_space_left( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 1314 | struct xlog *log, |
Dave Chinner | c8a09ff | 2010-12-04 00:02:40 +1100 | [diff] [blame] | 1315 | atomic64_t *head) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1316 | { |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 1317 | int tail_bytes; |
| 1318 | int tail_cycle; |
| 1319 | int head_cycle; |
| 1320 | int head_bytes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1321 | |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 1322 | xlog_crack_grant_head(head, &head_cycle, &head_bytes); |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 1323 | xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); |
| 1324 | tail_bytes = BBTOB(tail_bytes); |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 1325 | if (tail_cycle == head_cycle && head_bytes >= tail_bytes) |
Dave Chinner | 2562c32 | 2021-08-10 18:00:41 -0700 | [diff] [blame] | 1326 | return log->l_logsize - (head_bytes - tail_bytes); |
| 1327 | if (tail_cycle + 1 < head_cycle) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1328 | return 0; |
Dave Chinner | 2562c32 | 2021-08-10 18:00:41 -0700 | [diff] [blame] | 1329 | |
| 1330 | /* Ignore potential inconsistency when shutdown. */ |
| 1331 | if (xlog_is_shutdown(log)) |
| 1332 | return log->l_logsize; |
| 1333 | |
| 1334 | if (tail_cycle < head_cycle) { |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 1335 | ASSERT(tail_cycle == (head_cycle - 1)); |
Dave Chinner | 2562c32 | 2021-08-10 18:00:41 -0700 | [diff] [blame] | 1336 | return tail_bytes - head_bytes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1337 | } |
Dave Chinner | 2562c32 | 2021-08-10 18:00:41 -0700 | [diff] [blame] | 1338 | |
| 1339 | /* |
| 1340 | * The reservation head is behind the tail. In this case we just want to |
| 1341 | * return the size of the log as the amount of space left. |
| 1342 | */ |
| 1343 | xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); |
| 1344 | xfs_alert(log->l_mp, " tail_cycle = %d, tail_bytes = %d", |
| 1345 | tail_cycle, tail_bytes); |
| 1346 | xfs_alert(log->l_mp, " GH cycle = %d, GH bytes = %d", |
| 1347 | head_cycle, head_bytes); |
| 1348 | ASSERT(0); |
| 1349 | return log->l_logsize; |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 1350 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1351 | |
| 1352 | |
Eric Sandeen | 0d5a75e | 2016-06-01 17:38:15 +1000 | [diff] [blame] | 1353 | static void |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1354 | xlog_ioend_work( |
| 1355 | struct work_struct *work) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1356 | { |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1357 | struct xlog_in_core *iclog = |
| 1358 | container_of(work, struct xlog_in_core, ic_end_io_work); |
| 1359 | struct xlog *log = iclog->ic_log; |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1360 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1361 | |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1362 | error = blk_status_to_errno(iclog->ic_bio.bi_status); |
Christoph Hellwig | 366fc4b | 2019-06-28 19:27:21 -0700 | [diff] [blame] | 1363 | #ifdef DEBUG |
| 1364 | /* treat writes with injected CRC errors as failed */ |
| 1365 | if (iclog->ic_fail_crc) |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1366 | error = -EIO; |
Christoph Hellwig | 366fc4b | 2019-06-28 19:27:21 -0700 | [diff] [blame] | 1367 | #endif |
Brian Foster | 609adfc | 2016-01-05 07:41:16 +1100 | [diff] [blame] | 1368 | |
Christoph Hellwig | 366fc4b | 2019-06-28 19:27:21 -0700 | [diff] [blame] | 1369 | /* |
| 1370 | * Race to shutdown the filesystem if we see an error. |
| 1371 | */ |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1372 | if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) { |
| 1373 | xfs_alert(log->l_mp, "log I/O error %d", error); |
| 1374 | xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1375 | } |
David Chinner | 3db296f | 2007-05-14 18:24:16 +1000 | [diff] [blame] | 1376 | |
Christoph Hellwig | 12e6a0f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 1377 | xlog_state_done_syncing(iclog); |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1378 | bio_uninit(&iclog->ic_bio); |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 1379 | |
David Chinner | 3db296f | 2007-05-14 18:24:16 +1000 | [diff] [blame] | 1380 | /* |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1381 | * Drop the lock to signal that we are done. Nothing references the |
| 1382 | * iclog after this, so an unmount waiting on this lock can now tear it |
| 1383 | * down safely. As such, it is unsafe to reference the iclog after the |
| 1384 | * unlock as we could race with it being freed. |
David Chinner | 3db296f | 2007-05-14 18:24:16 +1000 | [diff] [blame] | 1385 | */ |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1386 | up(&iclog->ic_sema); |
Dave Chinner | c3f8fc7 | 2012-11-12 22:54:01 +1100 | [diff] [blame] | 1387 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1388 | |
| 1389 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1390 | * Return size of each in-core log record buffer. |
| 1391 | * |
Malcolm Parsons | 9da096f | 2009-03-29 09:55:42 +0200 | [diff] [blame] | 1392 | * All machines get 8 x 32kB buffers by default, unless tuned otherwise. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1393 | * |
| 1394 | * If the filesystem blocksize is too large, we may need to choose a |
| 1395 | * larger size since the directory code currently logs entire blocks. |
| 1396 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1397 | STATIC void |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 1398 | xlog_get_iclog_buffer_size( |
| 1399 | struct xfs_mount *mp, |
| 1400 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1401 | { |
Eric Sandeen | 1cb5125 | 2007-08-16 16:24:43 +1000 | [diff] [blame] | 1402 | if (mp->m_logbufs <= 0) |
Christoph Hellwig | 4f62282 | 2019-06-28 19:27:20 -0700 | [diff] [blame] | 1403 | mp->m_logbufs = XLOG_MAX_ICLOGS; |
| 1404 | if (mp->m_logbsize <= 0) |
| 1405 | mp->m_logbsize = XLOG_BIG_RECORD_BSIZE; |
| 1406 | |
| 1407 | log->l_iclog_bufs = mp->m_logbufs; |
| 1408 | log->l_iclog_size = mp->m_logbsize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1409 | |
| 1410 | /* |
Christoph Hellwig | 4f62282 | 2019-06-28 19:27:20 -0700 | [diff] [blame] | 1411 | * # headers = size / 32k - one header holds cycles from 32k of data. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1412 | */ |
Christoph Hellwig | 4f62282 | 2019-06-28 19:27:20 -0700 | [diff] [blame] | 1413 | log->l_iclog_heads = |
| 1414 | DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE); |
| 1415 | log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT; |
| 1416 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1417 | |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 1418 | void |
| 1419 | xfs_log_work_queue( |
| 1420 | struct xfs_mount *mp) |
| 1421 | { |
Brian Foster | 696a562 | 2017-03-28 14:51:44 -0700 | [diff] [blame] | 1422 | queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work, |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 1423 | msecs_to_jiffies(xfs_syncd_centisecs * 10)); |
| 1424 | } |
| 1425 | |
| 1426 | /* |
Darrick J. Wong | 2b73a2c | 2021-08-08 08:27:12 -0700 | [diff] [blame] | 1427 | * Clear the log incompat flags if we have the opportunity. |
| 1428 | * |
| 1429 | * This only happens if we're about to log the second dummy transaction as part |
| 1430 | * of covering the log and we can get the log incompat feature usage lock. |
| 1431 | */ |
| 1432 | static inline void |
| 1433 | xlog_clear_incompat( |
| 1434 | struct xlog *log) |
| 1435 | { |
| 1436 | struct xfs_mount *mp = log->l_mp; |
| 1437 | |
| 1438 | if (!xfs_sb_has_incompat_log_feature(&mp->m_sb, |
| 1439 | XFS_SB_FEAT_INCOMPAT_LOG_ALL)) |
| 1440 | return; |
| 1441 | |
| 1442 | if (log->l_covered_state != XLOG_STATE_COVER_DONE2) |
| 1443 | return; |
| 1444 | |
| 1445 | if (!down_write_trylock(&log->l_incompat_users)) |
| 1446 | return; |
| 1447 | |
| 1448 | xfs_clear_incompat_log_features(mp); |
| 1449 | up_write(&log->l_incompat_users); |
| 1450 | } |
| 1451 | |
| 1452 | /* |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 1453 | * Every sync period we need to unpin all items in the AIL and push them to |
| 1454 | * disk. If there is nothing dirty, then we might need to cover the log to |
| 1455 | * indicate that the filesystem is idle. |
| 1456 | */ |
Eric Sandeen | 0d5a75e | 2016-06-01 17:38:15 +1000 | [diff] [blame] | 1457 | static void |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 1458 | xfs_log_worker( |
| 1459 | struct work_struct *work) |
| 1460 | { |
| 1461 | struct xlog *log = container_of(to_delayed_work(work), |
| 1462 | struct xlog, l_work); |
| 1463 | struct xfs_mount *mp = log->l_mp; |
| 1464 | |
| 1465 | /* dgc: errors ignored - not fatal and nowhere to report them */ |
Brian Foster | 37444fc | 2021-01-22 16:48:21 -0800 | [diff] [blame] | 1466 | if (xfs_fs_writable(mp, SB_FREEZE_WRITE) && xfs_log_need_covered(mp)) { |
Dave Chinner | 61e63ec | 2015-01-22 09:10:31 +1100 | [diff] [blame] | 1467 | /* |
| 1468 | * Dump a transaction into the log that contains no real change. |
| 1469 | * This is needed to stamp the current tail LSN into the log |
| 1470 | * during the covering operation. |
| 1471 | * |
| 1472 | * We cannot use an inode here for this - that will push dirty |
| 1473 | * state back up into the VFS and then periodic inode flushing |
| 1474 | * will prevent log covering from making progress. Hence we |
| 1475 | * synchronously log the superblock instead to ensure the |
| 1476 | * superblock is immediately unpinned and can be written back. |
| 1477 | */ |
Darrick J. Wong | 2b73a2c | 2021-08-08 08:27:12 -0700 | [diff] [blame] | 1478 | xlog_clear_incompat(log); |
Dave Chinner | 61e63ec | 2015-01-22 09:10:31 +1100 | [diff] [blame] | 1479 | xfs_sync_sb(mp, true); |
| 1480 | } else |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 1481 | xfs_log_force(mp, 0); |
| 1482 | |
| 1483 | /* start pushing all the metadata that is currently dirty */ |
| 1484 | xfs_ail_push_all(mp->m_ail); |
| 1485 | |
| 1486 | /* queue us up again */ |
| 1487 | xfs_log_work_queue(mp); |
| 1488 | } |
| 1489 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1490 | /* |
| 1491 | * This routine initializes some of the log structure for a given mount point. |
| 1492 | * Its primary purpose is to fill in enough, so recovery can occur. However, |
| 1493 | * some other stuff may be filled in too. |
| 1494 | */ |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 1495 | STATIC struct xlog * |
| 1496 | xlog_alloc_log( |
| 1497 | struct xfs_mount *mp, |
| 1498 | struct xfs_buftarg *log_target, |
| 1499 | xfs_daddr_t blk_offset, |
| 1500 | int num_bblks) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1501 | { |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 1502 | struct xlog *log; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1503 | xlog_rec_header_t *head; |
| 1504 | xlog_in_core_t **iclogp; |
| 1505 | xlog_in_core_t *iclog, *prev_iclog=NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1506 | int i; |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1507 | int error = -ENOMEM; |
Alex Elder | 69ce58f | 2010-04-20 17:09:59 +1000 | [diff] [blame] | 1508 | uint log2_size = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1509 | |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 1510 | log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); |
Dave Chinner | a6cb767 | 2009-04-06 18:39:27 +0200 | [diff] [blame] | 1511 | if (!log) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 1512 | xfs_warn(mp, "Log allocation failed: No memory!"); |
Dave Chinner | a6cb767 | 2009-04-06 18:39:27 +0200 | [diff] [blame] | 1513 | goto out; |
| 1514 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1515 | |
| 1516 | log->l_mp = mp; |
| 1517 | log->l_targ = log_target; |
| 1518 | log->l_logsize = BBTOB(num_bblks); |
| 1519 | log->l_logBBstart = blk_offset; |
| 1520 | log->l_logBBsize = num_bblks; |
| 1521 | log->l_covered_state = XLOG_STATE_COVER_IDLE; |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 1522 | set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 1523 | INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1524 | |
| 1525 | log->l_prev_block = -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1526 | /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 1527 | xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); |
| 1528 | xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1529 | log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ |
Christoph Hellwig | c303c5b | 2012-02-20 02:31:26 +0000 | [diff] [blame] | 1530 | |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 1531 | if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1) |
Dave Chinner | a6a65fe | 2021-06-18 08:21:48 -0700 | [diff] [blame] | 1532 | log->l_iclog_roundoff = mp->m_sb.sb_logsunit; |
| 1533 | else |
| 1534 | log->l_iclog_roundoff = BBSIZE; |
| 1535 | |
Christoph Hellwig | c303c5b | 2012-02-20 02:31:26 +0000 | [diff] [blame] | 1536 | xlog_grant_head_init(&log->l_reserve_head); |
| 1537 | xlog_grant_head_init(&log->l_write_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1538 | |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1539 | error = -EFSCORRUPTED; |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 1540 | if (xfs_has_sector(mp)) { |
Alex Elder | 69ce58f | 2010-04-20 17:09:59 +1000 | [diff] [blame] | 1541 | log2_size = mp->m_sb.sb_logsectlog; |
| 1542 | if (log2_size < BBSHIFT) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 1543 | xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)", |
| 1544 | log2_size, BBSHIFT); |
Alex Elder | 69ce58f | 2010-04-20 17:09:59 +1000 | [diff] [blame] | 1545 | goto out_free_log; |
| 1546 | } |
| 1547 | |
| 1548 | log2_size -= BBSHIFT; |
| 1549 | if (log2_size > mp->m_sectbb_log) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 1550 | xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)", |
| 1551 | log2_size, mp->m_sectbb_log); |
Dave Chinner | a6cb767 | 2009-04-06 18:39:27 +0200 | [diff] [blame] | 1552 | goto out_free_log; |
| 1553 | } |
| 1554 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1555 | /* for larger sector sizes, must have v2 or external log */ |
Alex Elder | 69ce58f | 2010-04-20 17:09:59 +1000 | [diff] [blame] | 1556 | if (log2_size && log->l_logBBstart > 0 && |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 1557 | !xfs_has_logv2(mp)) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 1558 | xfs_warn(mp, |
| 1559 | "log sector size (0x%x) invalid for configuration.", |
| 1560 | log2_size); |
Dave Chinner | a6cb767 | 2009-04-06 18:39:27 +0200 | [diff] [blame] | 1561 | goto out_free_log; |
| 1562 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1563 | } |
Alex Elder | 69ce58f | 2010-04-20 17:09:59 +1000 | [diff] [blame] | 1564 | log->l_sectBBsize = 1 << log2_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1565 | |
Darrick J. Wong | 2b73a2c | 2021-08-08 08:27:12 -0700 | [diff] [blame] | 1566 | init_rwsem(&log->l_incompat_users); |
| 1567 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1568 | xlog_get_iclog_buffer_size(mp, log); |
| 1569 | |
Eric Sandeen | 007c61c | 2007-10-11 17:43:56 +1000 | [diff] [blame] | 1570 | spin_lock_init(&log->l_icloglock); |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 1571 | init_waitqueue_head(&log->l_flush_wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1572 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1573 | iclogp = &log->l_iclog; |
| 1574 | /* |
| 1575 | * The amount of memory to allocate for the iclog structure is |
| 1576 | * rather funky due to the way the structure is defined. It is |
| 1577 | * done this way so that we can use different sizes for machines |
| 1578 | * with different amounts of memory. See the definition of |
| 1579 | * xlog_in_core_t in xfs_log_priv.h for details. |
| 1580 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1581 | ASSERT(log->l_iclog_size >= 4096); |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1582 | for (i = 0; i < log->l_iclog_bufs; i++) { |
Christoph Hellwig | 89b171a | 2019-06-28 19:31:36 -0700 | [diff] [blame] | 1583 | size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) * |
| 1584 | sizeof(struct bio_vec); |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1585 | |
| 1586 | iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL); |
| 1587 | if (!iclog) |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 1588 | goto out_free_iclog; |
| 1589 | |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1590 | *iclogp = iclog; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1591 | iclog->ic_prev = prev_iclog; |
| 1592 | prev_iclog = iclog; |
Christoph Hellwig | 1fa40b0 | 2007-05-14 18:23:50 +1000 | [diff] [blame] | 1593 | |
Dave Chinner | d634525 | 2021-08-09 10:10:01 -0700 | [diff] [blame] | 1594 | iclog->ic_data = kvzalloc(log->l_iclog_size, |
| 1595 | GFP_KERNEL | __GFP_RETRY_MAYFAIL); |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1596 | if (!iclog->ic_data) |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 1597 | goto out_free_iclog; |
David Chinner | 4679b2d | 2008-04-10 12:18:54 +1000 | [diff] [blame] | 1598 | #ifdef DEBUG |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 1599 | log->l_iclog_bak[i] = &iclog->ic_header; |
David Chinner | 4679b2d | 2008-04-10 12:18:54 +1000 | [diff] [blame] | 1600 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1601 | head = &iclog->ic_header; |
| 1602 | memset(head, 0, sizeof(xlog_rec_header_t)); |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 1603 | head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); |
| 1604 | head->h_version = cpu_to_be32( |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 1605 | xfs_has_logv2(log->l_mp) ? 2 : 1); |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 1606 | head->h_size = cpu_to_be32(log->l_iclog_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1607 | /* new fields */ |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 1608 | head->h_fmt = cpu_to_be32(XLOG_FMT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1609 | memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); |
| 1610 | |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1611 | iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1612 | iclog->ic_state = XLOG_STATE_ACTIVE; |
| 1613 | iclog->ic_log = log; |
David Chinner | 114d23a | 2008-04-10 12:18:39 +1000 | [diff] [blame] | 1614 | atomic_set(&iclog->ic_refcnt, 0); |
Christoph Hellwig | 89ae379 | 2019-06-28 19:27:34 -0700 | [diff] [blame] | 1615 | INIT_LIST_HEAD(&iclog->ic_callbacks); |
Christoph Hellwig | b28708d | 2008-11-28 14:23:38 +1100 | [diff] [blame] | 1616 | iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1617 | |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 1618 | init_waitqueue_head(&iclog->ic_force_wait); |
| 1619 | init_waitqueue_head(&iclog->ic_write_wait); |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1620 | INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work); |
| 1621 | sema_init(&iclog->ic_sema, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1622 | |
| 1623 | iclogp = &iclog->ic_next; |
| 1624 | } |
| 1625 | *iclogp = log->l_iclog; /* complete ring */ |
| 1626 | log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ |
| 1627 | |
Christoph Hellwig | 1058d0f | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1628 | log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s", |
Darrick J. Wong | 05a302a | 2021-01-22 16:48:42 -0800 | [diff] [blame] | 1629 | XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | |
| 1630 | WQ_HIGHPRI), |
| 1631 | 0, mp->m_super->s_id); |
Christoph Hellwig | 1058d0f | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1632 | if (!log->l_ioend_workqueue) |
| 1633 | goto out_free_iclog; |
| 1634 | |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1635 | error = xlog_cil_init(log); |
| 1636 | if (error) |
Christoph Hellwig | 1058d0f | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1637 | goto out_destroy_workqueue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1638 | return log; |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 1639 | |
Christoph Hellwig | 1058d0f | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1640 | out_destroy_workqueue: |
| 1641 | destroy_workqueue(log->l_ioend_workqueue); |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 1642 | out_free_iclog: |
| 1643 | for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { |
| 1644 | prev_iclog = iclog->ic_next; |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1645 | kmem_free(iclog->ic_data); |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 1646 | kmem_free(iclog); |
Brian Foster | 798a9ca | 2019-12-03 07:53:15 -0800 | [diff] [blame] | 1647 | if (prev_iclog == log->l_iclog) |
| 1648 | break; |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 1649 | } |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 1650 | out_free_log: |
| 1651 | kmem_free(log); |
Dave Chinner | a6cb767 | 2009-04-06 18:39:27 +0200 | [diff] [blame] | 1652 | out: |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1653 | return ERR_PTR(error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1654 | } /* xlog_alloc_log */ |
| 1655 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1656 | /* |
Darrick J. Wong | ed1575d | 2020-09-25 17:39:51 -0700 | [diff] [blame] | 1657 | * Compute the LSN that we'd need to push the log tail towards in order to have |
| 1658 | * (a) enough on-disk log space to log the number of bytes specified, (b) at |
| 1659 | * least 25% of the log space free, and (c) at least 256 blocks free. If the |
| 1660 | * log free space already meets all three thresholds, this function returns |
| 1661 | * NULLCOMMITLSN. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1662 | */ |
Darrick J. Wong | ed1575d | 2020-09-25 17:39:51 -0700 | [diff] [blame] | 1663 | xfs_lsn_t |
| 1664 | xlog_grant_push_threshold( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 1665 | struct xlog *log, |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1666 | int need_bytes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1667 | { |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1668 | xfs_lsn_t threshold_lsn = 0; |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 1669 | xfs_lsn_t last_sync_lsn; |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1670 | int free_blocks; |
| 1671 | int free_bytes; |
| 1672 | int threshold_block; |
| 1673 | int threshold_cycle; |
| 1674 | int free_threshold; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1675 | |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1676 | ASSERT(BTOBB(need_bytes) < log->l_logBBsize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1677 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 1678 | free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1679 | free_blocks = BTOBBT(free_bytes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1680 | |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1681 | /* |
| 1682 | * Set the threshold for the minimum number of free blocks in the |
| 1683 | * log to the maximum of what the caller needs, one quarter of the |
| 1684 | * log, and 256 blocks. |
| 1685 | */ |
| 1686 | free_threshold = BTOBB(need_bytes); |
Dave Chinner | 9bb54cb | 2018-06-07 07:54:02 -0700 | [diff] [blame] | 1687 | free_threshold = max(free_threshold, (log->l_logBBsize >> 2)); |
| 1688 | free_threshold = max(free_threshold, 256); |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1689 | if (free_blocks >= free_threshold) |
Darrick J. Wong | ed1575d | 2020-09-25 17:39:51 -0700 | [diff] [blame] | 1690 | return NULLCOMMITLSN; |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1691 | |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 1692 | xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, |
| 1693 | &threshold_block); |
| 1694 | threshold_block += free_threshold; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1695 | if (threshold_block >= log->l_logBBsize) { |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1696 | threshold_block -= log->l_logBBsize; |
| 1697 | threshold_cycle += 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1698 | } |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1699 | threshold_lsn = xlog_assign_lsn(threshold_cycle, |
| 1700 | threshold_block); |
| 1701 | /* |
| 1702 | * Don't pass in an lsn greater than the lsn of the last |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 1703 | * log record known to be on disk. Use a snapshot of the last sync lsn |
| 1704 | * so that it doesn't change between the compare and the set. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1705 | */ |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 1706 | last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); |
| 1707 | if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0) |
| 1708 | threshold_lsn = last_sync_lsn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1709 | |
Darrick J. Wong | ed1575d | 2020-09-25 17:39:51 -0700 | [diff] [blame] | 1710 | return threshold_lsn; |
| 1711 | } |
| 1712 | |
| 1713 | /* |
| 1714 | * Push the tail of the log if we need to do so to maintain the free log space |
| 1715 | * thresholds set out by xlog_grant_push_threshold. We may need to adopt a |
| 1716 | * policy which pushes on an lsn which is further along in the log once we |
| 1717 | * reach the high water mark. In this manner, we would be creating a low water |
| 1718 | * mark. |
| 1719 | */ |
| 1720 | STATIC void |
| 1721 | xlog_grant_push_ail( |
| 1722 | struct xlog *log, |
| 1723 | int need_bytes) |
| 1724 | { |
| 1725 | xfs_lsn_t threshold_lsn; |
| 1726 | |
| 1727 | threshold_lsn = xlog_grant_push_threshold(log, need_bytes); |
Dave Chinner | 2039a27 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 1728 | if (threshold_lsn == NULLCOMMITLSN || xlog_is_shutdown(log)) |
Darrick J. Wong | ed1575d | 2020-09-25 17:39:51 -0700 | [diff] [blame] | 1729 | return; |
| 1730 | |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1731 | /* |
| 1732 | * Get the transaction layer to kick the dirty buffers out to |
| 1733 | * disk asynchronously. No point in trying to do this if |
| 1734 | * the filesystem is shutting down. |
| 1735 | */ |
Darrick J. Wong | ed1575d | 2020-09-25 17:39:51 -0700 | [diff] [blame] | 1736 | xfs_ail_push(log->l_ailp, threshold_lsn); |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1737 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1738 | |
Christoph Hellwig | 873ff550 | 2010-01-13 22:17:57 +0000 | [diff] [blame] | 1739 | /* |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1740 | * Stamp cycle number in every block |
| 1741 | */ |
| 1742 | STATIC void |
| 1743 | xlog_pack_data( |
| 1744 | struct xlog *log, |
| 1745 | struct xlog_in_core *iclog, |
| 1746 | int roundoff) |
| 1747 | { |
| 1748 | int i, j, k; |
| 1749 | int size = iclog->ic_offset + roundoff; |
| 1750 | __be32 cycle_lsn; |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 1751 | char *dp; |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1752 | |
| 1753 | cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn); |
| 1754 | |
| 1755 | dp = iclog->ic_datap; |
| 1756 | for (i = 0; i < BTOBB(size); i++) { |
| 1757 | if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) |
| 1758 | break; |
| 1759 | iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; |
| 1760 | *(__be32 *)dp = cycle_lsn; |
| 1761 | dp += BBSIZE; |
| 1762 | } |
| 1763 | |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 1764 | if (xfs_has_logv2(log->l_mp)) { |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1765 | xlog_in_core_2_t *xhdr = iclog->ic_data; |
| 1766 | |
| 1767 | for ( ; i < BTOBB(size); i++) { |
| 1768 | j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
| 1769 | k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
| 1770 | xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp; |
| 1771 | *(__be32 *)dp = cycle_lsn; |
| 1772 | dp += BBSIZE; |
| 1773 | } |
| 1774 | |
| 1775 | for (i = 1; i < log->l_iclog_heads; i++) |
| 1776 | xhdr[i].hic_xheader.xh_cycle = cycle_lsn; |
| 1777 | } |
| 1778 | } |
| 1779 | |
| 1780 | /* |
| 1781 | * Calculate the checksum for a log buffer. |
| 1782 | * |
| 1783 | * This is a little more complicated than it should be because the various |
| 1784 | * headers and the actual data are non-contiguous. |
| 1785 | */ |
Dave Chinner | f9668a0 | 2012-11-28 13:01:03 +1100 | [diff] [blame] | 1786 | __le32 |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1787 | xlog_cksum( |
| 1788 | struct xlog *log, |
| 1789 | struct xlog_rec_header *rhead, |
| 1790 | char *dp, |
| 1791 | int size) |
| 1792 | { |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 1793 | uint32_t crc; |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1794 | |
| 1795 | /* first generate the crc for the record header ... */ |
Dave Chinner | cae028d | 2016-12-05 14:40:32 +1100 | [diff] [blame] | 1796 | crc = xfs_start_cksum_update((char *)rhead, |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1797 | sizeof(struct xlog_rec_header), |
| 1798 | offsetof(struct xlog_rec_header, h_crc)); |
| 1799 | |
| 1800 | /* ... then for additional cycle data for v2 logs ... */ |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 1801 | if (xfs_has_logv2(log->l_mp)) { |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1802 | union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead; |
| 1803 | int i; |
Brian Foster | a3f2001 | 2015-08-19 09:59:50 +1000 | [diff] [blame] | 1804 | int xheads; |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1805 | |
Gao Xiang | 0c771b9 | 2020-09-22 09:41:06 -0700 | [diff] [blame] | 1806 | xheads = DIV_ROUND_UP(size, XLOG_HEADER_CYCLE_SIZE); |
Brian Foster | a3f2001 | 2015-08-19 09:59:50 +1000 | [diff] [blame] | 1807 | |
| 1808 | for (i = 1; i < xheads; i++) { |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1809 | crc = crc32c(crc, &xhdr[i].hic_xheader, |
| 1810 | sizeof(struct xlog_rec_ext_header)); |
| 1811 | } |
| 1812 | } |
| 1813 | |
| 1814 | /* ... and finally for the payload */ |
| 1815 | crc = crc32c(crc, dp, size); |
| 1816 | |
| 1817 | return xfs_end_cksum(crc); |
| 1818 | } |
| 1819 | |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1820 | static void |
| 1821 | xlog_bio_end_io( |
| 1822 | struct bio *bio) |
| 1823 | { |
| 1824 | struct xlog_in_core *iclog = bio->bi_private; |
| 1825 | |
Christoph Hellwig | 1058d0f | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1826 | queue_work(iclog->ic_log->l_ioend_workqueue, |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1827 | &iclog->ic_end_io_work); |
| 1828 | } |
| 1829 | |
Brian Foster | 842a42d | 2020-03-25 09:17:13 -0700 | [diff] [blame] | 1830 | static int |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1831 | xlog_map_iclog_data( |
| 1832 | struct bio *bio, |
| 1833 | void *data, |
| 1834 | size_t count) |
| 1835 | { |
| 1836 | do { |
| 1837 | struct page *page = kmem_to_page(data); |
| 1838 | unsigned int off = offset_in_page(data); |
| 1839 | size_t len = min_t(size_t, count, PAGE_SIZE - off); |
| 1840 | |
Brian Foster | 842a42d | 2020-03-25 09:17:13 -0700 | [diff] [blame] | 1841 | if (bio_add_page(bio, page, len, off) != len) |
| 1842 | return -EIO; |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1843 | |
| 1844 | data += len; |
| 1845 | count -= len; |
| 1846 | } while (count); |
Brian Foster | 842a42d | 2020-03-25 09:17:13 -0700 | [diff] [blame] | 1847 | |
| 1848 | return 0; |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1849 | } |
| 1850 | |
Christoph Hellwig | 94860a3 | 2019-06-28 19:27:22 -0700 | [diff] [blame] | 1851 | STATIC void |
| 1852 | xlog_write_iclog( |
| 1853 | struct xlog *log, |
| 1854 | struct xlog_in_core *iclog, |
Christoph Hellwig | 94860a3 | 2019-06-28 19:27:22 -0700 | [diff] [blame] | 1855 | uint64_t bno, |
Dave Chinner | eef983f | 2021-06-18 08:21:51 -0700 | [diff] [blame] | 1856 | unsigned int count) |
Christoph Hellwig | 873ff550 | 2010-01-13 22:17:57 +0000 | [diff] [blame] | 1857 | { |
Christoph Hellwig | 94860a3 | 2019-06-28 19:27:22 -0700 | [diff] [blame] | 1858 | ASSERT(bno < log->l_logBBsize); |
Dave Chinner | 956f6da | 2021-06-18 11:57:05 -0700 | [diff] [blame] | 1859 | trace_xlog_iclog_write(iclog, _RET_IP_); |
Christoph Hellwig | 94860a3 | 2019-06-28 19:27:22 -0700 | [diff] [blame] | 1860 | |
| 1861 | /* |
| 1862 | * We lock the iclogbufs here so that we can serialise against I/O |
| 1863 | * completion during unmount. We might be processing a shutdown |
| 1864 | * triggered during unmount, and that can occur asynchronously to the |
| 1865 | * unmount thread, and hence we need to ensure that completes before |
| 1866 | * tearing down the iclogbufs. Hence we need to hold the buffer lock |
| 1867 | * across the log IO to archieve that. |
| 1868 | */ |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1869 | down(&iclog->ic_sema); |
Dave Chinner | 5112e206 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 1870 | if (xlog_is_shutdown(log)) { |
Christoph Hellwig | 873ff550 | 2010-01-13 22:17:57 +0000 | [diff] [blame] | 1871 | /* |
| 1872 | * It would seem logical to return EIO here, but we rely on |
| 1873 | * the log state machine to propagate I/O errors instead of |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1874 | * doing it here. We kick of the state machine and unlock |
| 1875 | * the buffer manually, the code needs to be kept in sync |
| 1876 | * with the I/O completion path. |
Christoph Hellwig | 873ff550 | 2010-01-13 22:17:57 +0000 | [diff] [blame] | 1877 | */ |
Christoph Hellwig | 12e6a0f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 1878 | xlog_state_done_syncing(iclog); |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1879 | up(&iclog->ic_sema); |
Christoph Hellwig | 94860a3 | 2019-06-28 19:27:22 -0700 | [diff] [blame] | 1880 | return; |
Christoph Hellwig | 873ff550 | 2010-01-13 22:17:57 +0000 | [diff] [blame] | 1881 | } |
| 1882 | |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1883 | bio_init(&iclog->ic_bio, iclog->ic_bvec, howmany(count, PAGE_SIZE)); |
| 1884 | bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev); |
| 1885 | iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; |
| 1886 | iclog->ic_bio.bi_end_io = xlog_bio_end_io; |
| 1887 | iclog->ic_bio.bi_private = iclog; |
Dave Chinner | 2def284 | 2020-03-24 20:10:27 -0700 | [diff] [blame] | 1888 | |
| 1889 | /* |
| 1890 | * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more |
| 1891 | * IOs coming immediately after this one. This prevents the block layer |
| 1892 | * writeback throttle from throttling log writes behind background |
| 1893 | * metadata writeback and causing priority inversions. |
| 1894 | */ |
Dave Chinner | eef983f | 2021-06-18 08:21:51 -0700 | [diff] [blame] | 1895 | iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE; |
Dave Chinner | b5d721e | 2021-07-27 16:23:47 -0700 | [diff] [blame] | 1896 | if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) { |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1897 | iclog->ic_bio.bi_opf |= REQ_PREFLUSH; |
Dave Chinner | b5d721e | 2021-07-27 16:23:47 -0700 | [diff] [blame] | 1898 | /* |
| 1899 | * For external log devices, we also need to flush the data |
| 1900 | * device cache first to ensure all metadata writeback covered |
| 1901 | * by the LSN in this iclog is on stable storage. This is slow, |
| 1902 | * but it *must* complete before we issue the external log IO. |
| 1903 | */ |
| 1904 | if (log->l_targ != log->l_mp->m_ddev_targp) |
| 1905 | blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev); |
| 1906 | } |
Dave Chinner | eef983f | 2021-06-18 08:21:51 -0700 | [diff] [blame] | 1907 | if (iclog->ic_flags & XLOG_ICL_NEED_FUA) |
| 1908 | iclog->ic_bio.bi_opf |= REQ_FUA; |
Dave Chinner | b5d721e | 2021-07-27 16:23:47 -0700 | [diff] [blame] | 1909 | |
Dave Chinner | eef983f | 2021-06-18 08:21:51 -0700 | [diff] [blame] | 1910 | iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA); |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1911 | |
Brian Foster | 842a42d | 2020-03-25 09:17:13 -0700 | [diff] [blame] | 1912 | if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) { |
| 1913 | xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); |
| 1914 | return; |
| 1915 | } |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1916 | if (is_vmalloc_addr(iclog->ic_data)) |
Christoph Hellwig | 2c68a1d | 2019-10-14 10:36:40 -0700 | [diff] [blame] | 1917 | flush_kernel_vmap_range(iclog->ic_data, count); |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1918 | |
| 1919 | /* |
| 1920 | * If this log buffer would straddle the end of the log we will have |
| 1921 | * to split it up into two bios, so that we can continue at the start. |
| 1922 | */ |
| 1923 | if (bno + BTOBB(count) > log->l_logBBsize) { |
| 1924 | struct bio *split; |
| 1925 | |
| 1926 | split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno, |
| 1927 | GFP_NOIO, &fs_bio_set); |
| 1928 | bio_chain(split, &iclog->ic_bio); |
| 1929 | submit_bio(split); |
| 1930 | |
| 1931 | /* restart at logical offset zero for the remainder */ |
| 1932 | iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart; |
| 1933 | } |
| 1934 | |
| 1935 | submit_bio(&iclog->ic_bio); |
Christoph Hellwig | 873ff550 | 2010-01-13 22:17:57 +0000 | [diff] [blame] | 1936 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1937 | |
| 1938 | /* |
Christoph Hellwig | 5693384 | 2019-06-28 19:27:22 -0700 | [diff] [blame] | 1939 | * We need to bump cycle number for the part of the iclog that is |
| 1940 | * written to the start of the log. Watch out for the header magic |
| 1941 | * number case, though. |
| 1942 | */ |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 1943 | static void |
Christoph Hellwig | 5693384 | 2019-06-28 19:27:22 -0700 | [diff] [blame] | 1944 | xlog_split_iclog( |
| 1945 | struct xlog *log, |
| 1946 | void *data, |
| 1947 | uint64_t bno, |
| 1948 | unsigned int count) |
| 1949 | { |
| 1950 | unsigned int split_offset = BBTOB(log->l_logBBsize - bno); |
| 1951 | unsigned int i; |
| 1952 | |
| 1953 | for (i = split_offset; i < count; i += BBSIZE) { |
| 1954 | uint32_t cycle = get_unaligned_be32(data + i); |
| 1955 | |
| 1956 | if (++cycle == XLOG_HEADER_MAGIC_NUM) |
| 1957 | cycle++; |
| 1958 | put_unaligned_be32(cycle, data + i); |
| 1959 | } |
Christoph Hellwig | 5693384 | 2019-06-28 19:27:22 -0700 | [diff] [blame] | 1960 | } |
| 1961 | |
Christoph Hellwig | db0a6fa | 2019-06-28 19:27:23 -0700 | [diff] [blame] | 1962 | static int |
| 1963 | xlog_calc_iclog_size( |
| 1964 | struct xlog *log, |
| 1965 | struct xlog_in_core *iclog, |
| 1966 | uint32_t *roundoff) |
| 1967 | { |
| 1968 | uint32_t count_init, count; |
Christoph Hellwig | db0a6fa | 2019-06-28 19:27:23 -0700 | [diff] [blame] | 1969 | |
| 1970 | /* Add for LR header */ |
| 1971 | count_init = log->l_iclog_hsize + iclog->ic_offset; |
Dave Chinner | a6a65fe | 2021-06-18 08:21:48 -0700 | [diff] [blame] | 1972 | count = roundup(count_init, log->l_iclog_roundoff); |
Christoph Hellwig | db0a6fa | 2019-06-28 19:27:23 -0700 | [diff] [blame] | 1973 | |
Christoph Hellwig | db0a6fa | 2019-06-28 19:27:23 -0700 | [diff] [blame] | 1974 | *roundoff = count - count_init; |
| 1975 | |
Dave Chinner | a6a65fe | 2021-06-18 08:21:48 -0700 | [diff] [blame] | 1976 | ASSERT(count >= count_init); |
| 1977 | ASSERT(*roundoff < log->l_iclog_roundoff); |
Christoph Hellwig | db0a6fa | 2019-06-28 19:27:23 -0700 | [diff] [blame] | 1978 | return count; |
| 1979 | } |
| 1980 | |
Christoph Hellwig | 5693384 | 2019-06-28 19:27:22 -0700 | [diff] [blame] | 1981 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1982 | * Flush out the in-core log (iclog) to the on-disk log in an asynchronous |
| 1983 | * fashion. Previously, we should have moved the current iclog |
| 1984 | * ptr in the log to point to the next available iclog. This allows further |
| 1985 | * write to continue while this code syncs out an iclog ready to go. |
| 1986 | * Before an in-core log can be written out, the data section must be scanned |
| 1987 | * to save away the 1st word of each BBSIZE block into the header. We replace |
| 1988 | * it with the current cycle count. Each BBSIZE block is tagged with the |
| 1989 | * cycle count because there in an implicit assumption that drives will |
| 1990 | * guarantee that entire 512 byte blocks get written at once. In other words, |
| 1991 | * we can't have part of a 512 byte block written and part not written. By |
| 1992 | * tagging each block, we will know which blocks are valid when recovering |
| 1993 | * after an unclean shutdown. |
| 1994 | * |
| 1995 | * This routine is single threaded on the iclog. No other thread can be in |
| 1996 | * this routine with the same iclog. Changing contents of iclog can there- |
| 1997 | * fore be done without grabbing the state machine lock. Updating the global |
| 1998 | * log will require grabbing the lock though. |
| 1999 | * |
| 2000 | * The entire log manager uses a logical block numbering scheme. Only |
Christoph Hellwig | 94860a3 | 2019-06-28 19:27:22 -0700 | [diff] [blame] | 2001 | * xlog_write_iclog knows about the fact that the log may not start with |
| 2002 | * block zero on a given device. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2003 | */ |
Christoph Hellwig | 94860a3 | 2019-06-28 19:27:22 -0700 | [diff] [blame] | 2004 | STATIC void |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2005 | xlog_sync( |
| 2006 | struct xlog *log, |
| 2007 | struct xlog_in_core *iclog) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2008 | { |
Christoph Hellwig | db0a6fa | 2019-06-28 19:27:23 -0700 | [diff] [blame] | 2009 | unsigned int count; /* byte count of bwrite */ |
| 2010 | unsigned int roundoff; /* roundoff to BB or stripe */ |
| 2011 | uint64_t bno; |
Christoph Hellwig | db0a6fa | 2019-06-28 19:27:23 -0700 | [diff] [blame] | 2012 | unsigned int size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2013 | |
David Chinner | 155cc6b | 2008-03-06 13:44:14 +1100 | [diff] [blame] | 2014 | ASSERT(atomic_read(&iclog->ic_refcnt) == 0); |
Dave Chinner | 956f6da | 2021-06-18 11:57:05 -0700 | [diff] [blame] | 2015 | trace_xlog_iclog_sync(iclog, _RET_IP_); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2016 | |
Christoph Hellwig | db0a6fa | 2019-06-28 19:27:23 -0700 | [diff] [blame] | 2017 | count = xlog_calc_iclog_size(log, iclog, &roundoff); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2018 | |
| 2019 | /* move grant heads by roundoff in sync */ |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 2020 | xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); |
| 2021 | xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2022 | |
| 2023 | /* put cycle number in every block */ |
| 2024 | xlog_pack_data(log, iclog, roundoff); |
| 2025 | |
| 2026 | /* real byte length */ |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 2027 | size = iclog->ic_offset; |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 2028 | if (xfs_has_logv2(log->l_mp)) |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 2029 | size += roundoff; |
| 2030 | iclog->ic_header.h_len = cpu_to_be32(size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2031 | |
Christoph Hellwig | 9b0489c | 2019-06-28 19:27:23 -0700 | [diff] [blame] | 2032 | XFS_STATS_INC(log->l_mp, xs_log_writes); |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 2033 | XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2034 | |
Christoph Hellwig | 94860a3 | 2019-06-28 19:27:22 -0700 | [diff] [blame] | 2035 | bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)); |
| 2036 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2037 | /* Do we need to split this write into 2 parts? */ |
Dave Chinner | eef983f | 2021-06-18 08:21:51 -0700 | [diff] [blame] | 2038 | if (bno + BTOBB(count) > log->l_logBBsize) |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 2039 | xlog_split_iclog(log, &iclog->ic_header, bno, count); |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 2040 | |
| 2041 | /* calculcate the checksum */ |
| 2042 | iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, |
| 2043 | iclog->ic_datap, size); |
Brian Foster | 609adfc | 2016-01-05 07:41:16 +1100 | [diff] [blame] | 2044 | /* |
| 2045 | * Intentionally corrupt the log record CRC based on the error injection |
| 2046 | * frequency, if defined. This facilitates testing log recovery in the |
| 2047 | * event of torn writes. Hence, set the IOABORT state to abort the log |
| 2048 | * write on I/O completion and shutdown the fs. The subsequent mount |
| 2049 | * detects the bad CRC and attempts to recover. |
| 2050 | */ |
Christoph Hellwig | 366fc4b | 2019-06-28 19:27:21 -0700 | [diff] [blame] | 2051 | #ifdef DEBUG |
Brian Foster | 3e88a00 | 2017-06-27 09:52:32 -0700 | [diff] [blame] | 2052 | if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) { |
Christoph Hellwig | e2a6419 | 2017-04-21 11:24:40 -0700 | [diff] [blame] | 2053 | iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA); |
Christoph Hellwig | 366fc4b | 2019-06-28 19:27:21 -0700 | [diff] [blame] | 2054 | iclog->ic_fail_crc = true; |
Brian Foster | 609adfc | 2016-01-05 07:41:16 +1100 | [diff] [blame] | 2055 | xfs_warn(log->l_mp, |
| 2056 | "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.", |
| 2057 | be64_to_cpu(iclog->ic_header.h_lsn)); |
| 2058 | } |
Christoph Hellwig | 366fc4b | 2019-06-28 19:27:21 -0700 | [diff] [blame] | 2059 | #endif |
Christoph Hellwig | abca1f3 | 2019-06-28 19:27:24 -0700 | [diff] [blame] | 2060 | xlog_verify_iclog(log, iclog, count); |
Dave Chinner | eef983f | 2021-06-18 08:21:51 -0700 | [diff] [blame] | 2061 | xlog_write_iclog(log, iclog, bno, count); |
Christoph Hellwig | 94860a3 | 2019-06-28 19:27:22 -0700 | [diff] [blame] | 2062 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2063 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2064 | /* |
Nathan Scott | c41564b | 2006-03-29 08:55:14 +1000 | [diff] [blame] | 2065 | * Deallocate a log structure |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2066 | */ |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 2067 | STATIC void |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2068 | xlog_dealloc_log( |
| 2069 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2070 | { |
| 2071 | xlog_in_core_t *iclog, *next_iclog; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2072 | int i; |
| 2073 | |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 2074 | xlog_cil_destroy(log); |
| 2075 | |
Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 2076 | /* |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 2077 | * Cycle all the iclogbuf locks to make sure all log IO completion |
| 2078 | * is done before we tear down these buffers. |
Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 2079 | */ |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 2080 | iclog = log->l_iclog; |
| 2081 | for (i = 0; i < log->l_iclog_bufs; i++) { |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 2082 | down(&iclog->ic_sema); |
| 2083 | up(&iclog->ic_sema); |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 2084 | iclog = iclog->ic_next; |
| 2085 | } |
| 2086 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2087 | iclog = log->l_iclog; |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 2088 | for (i = 0; i < log->l_iclog_bufs; i++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2089 | next_iclog = iclog->ic_next; |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 2090 | kmem_free(iclog->ic_data); |
Denys Vlasenko | f0e2d93 | 2008-05-19 16:31:57 +1000 | [diff] [blame] | 2091 | kmem_free(iclog); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2092 | iclog = next_iclog; |
| 2093 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2094 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2095 | log->l_mp->m_log = NULL; |
Christoph Hellwig | 1058d0f | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 2096 | destroy_workqueue(log->l_ioend_workqueue); |
Denys Vlasenko | f0e2d93 | 2008-05-19 16:31:57 +1000 | [diff] [blame] | 2097 | kmem_free(log); |
Dave Chinner | b843299 | 2020-03-25 18:18:24 -0700 | [diff] [blame] | 2098 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2099 | |
| 2100 | /* |
| 2101 | * Update counters atomically now that memcpy is done. |
| 2102 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2103 | static inline void |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2104 | xlog_state_finish_copy( |
| 2105 | struct xlog *log, |
| 2106 | struct xlog_in_core *iclog, |
| 2107 | int record_cnt, |
| 2108 | int copy_bytes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2109 | { |
Christoph Hellwig | 390aab0 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 2110 | lockdep_assert_held(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2111 | |
Marcin Slusarz | 413d57c | 2008-02-13 15:03:29 -0800 | [diff] [blame] | 2112 | be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2113 | iclog->ic_offset += copy_bytes; |
Christoph Hellwig | 390aab0 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 2114 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2115 | |
| 2116 | /* |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 2117 | * print out info relating to regions written which consume |
| 2118 | * the reservation |
| 2119 | */ |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 2120 | void |
| 2121 | xlog_print_tic_res( |
| 2122 | struct xfs_mount *mp, |
| 2123 | struct xlog_ticket *ticket) |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 2124 | { |
| 2125 | uint i; |
| 2126 | uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t); |
| 2127 | |
| 2128 | /* match with XLOG_REG_TYPE_* in xfs_log.h */ |
Darrick J. Wong | 5110cd8 | 2016-03-07 08:40:03 +1100 | [diff] [blame] | 2129 | #define REG_TYPE_STR(type, str) [XLOG_REG_TYPE_##type] = str |
Darrick J. Wong | d31d718 | 2019-05-23 08:45:21 -0700 | [diff] [blame] | 2130 | static char *res_type_str[] = { |
Darrick J. Wong | 5110cd8 | 2016-03-07 08:40:03 +1100 | [diff] [blame] | 2131 | REG_TYPE_STR(BFORMAT, "bformat"), |
| 2132 | REG_TYPE_STR(BCHUNK, "bchunk"), |
| 2133 | REG_TYPE_STR(EFI_FORMAT, "efi_format"), |
| 2134 | REG_TYPE_STR(EFD_FORMAT, "efd_format"), |
| 2135 | REG_TYPE_STR(IFORMAT, "iformat"), |
| 2136 | REG_TYPE_STR(ICORE, "icore"), |
| 2137 | REG_TYPE_STR(IEXT, "iext"), |
| 2138 | REG_TYPE_STR(IBROOT, "ibroot"), |
| 2139 | REG_TYPE_STR(ILOCAL, "ilocal"), |
| 2140 | REG_TYPE_STR(IATTR_EXT, "iattr_ext"), |
| 2141 | REG_TYPE_STR(IATTR_BROOT, "iattr_broot"), |
| 2142 | REG_TYPE_STR(IATTR_LOCAL, "iattr_local"), |
| 2143 | REG_TYPE_STR(QFORMAT, "qformat"), |
| 2144 | REG_TYPE_STR(DQUOT, "dquot"), |
| 2145 | REG_TYPE_STR(QUOTAOFF, "quotaoff"), |
| 2146 | REG_TYPE_STR(LRHEADER, "LR header"), |
| 2147 | REG_TYPE_STR(UNMOUNT, "unmount"), |
| 2148 | REG_TYPE_STR(COMMIT, "commit"), |
| 2149 | REG_TYPE_STR(TRANSHDR, "trans header"), |
Darrick J. Wong | d31d718 | 2019-05-23 08:45:21 -0700 | [diff] [blame] | 2150 | REG_TYPE_STR(ICREATE, "inode create"), |
| 2151 | REG_TYPE_STR(RUI_FORMAT, "rui_format"), |
| 2152 | REG_TYPE_STR(RUD_FORMAT, "rud_format"), |
| 2153 | REG_TYPE_STR(CUI_FORMAT, "cui_format"), |
| 2154 | REG_TYPE_STR(CUD_FORMAT, "cud_format"), |
| 2155 | REG_TYPE_STR(BUI_FORMAT, "bui_format"), |
| 2156 | REG_TYPE_STR(BUD_FORMAT, "bud_format"), |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 2157 | }; |
Darrick J. Wong | d31d718 | 2019-05-23 08:45:21 -0700 | [diff] [blame] | 2158 | BUILD_BUG_ON(ARRAY_SIZE(res_type_str) != XLOG_REG_TYPE_MAX + 1); |
Darrick J. Wong | 5110cd8 | 2016-03-07 08:40:03 +1100 | [diff] [blame] | 2159 | #undef REG_TYPE_STR |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 2160 | |
Brian Foster | 7d2d5653 | 2017-06-14 21:29:48 -0700 | [diff] [blame] | 2161 | xfs_warn(mp, "ticket reservation summary:"); |
Joe Perches | f41febd | 2015-07-29 11:52:04 +1000 | [diff] [blame] | 2162 | xfs_warn(mp, " unit res = %d bytes", |
| 2163 | ticket->t_unit_res); |
| 2164 | xfs_warn(mp, " current res = %d bytes", |
| 2165 | ticket->t_curr_res); |
| 2166 | xfs_warn(mp, " total reg = %u bytes (o/flow = %u bytes)", |
| 2167 | ticket->t_res_arr_sum, ticket->t_res_o_flow); |
| 2168 | xfs_warn(mp, " ophdrs = %u (ophdr space = %u bytes)", |
| 2169 | ticket->t_res_num_ophdrs, ophdr_spc); |
| 2170 | xfs_warn(mp, " ophdr + reg = %u bytes", |
| 2171 | ticket->t_res_arr_sum + ticket->t_res_o_flow + ophdr_spc); |
| 2172 | xfs_warn(mp, " num regions = %u", |
| 2173 | ticket->t_res_num); |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 2174 | |
| 2175 | for (i = 0; i < ticket->t_res_num; i++) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2176 | uint r_type = ticket->t_res_arr[i].r_type; |
Eric Sandeen | 08e96e1 | 2013-10-11 20:59:05 -0500 | [diff] [blame] | 2177 | xfs_warn(mp, "region[%u]: %s - %u bytes", i, |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 2178 | ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ? |
Darrick J. Wong | 5110cd8 | 2016-03-07 08:40:03 +1100 | [diff] [blame] | 2179 | "bad-rtype" : res_type_str[r_type]), |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 2180 | ticket->t_res_arr[i].r_len); |
| 2181 | } |
| 2182 | } |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 2183 | |
| 2184 | /* |
Brian Foster | d4ca1d5 | 2017-06-14 21:29:50 -0700 | [diff] [blame] | 2185 | * Print a summary of the transaction. |
| 2186 | */ |
| 2187 | void |
| 2188 | xlog_print_trans( |
Dave Chinner | e6631f8 | 2018-05-09 07:49:37 -0700 | [diff] [blame] | 2189 | struct xfs_trans *tp) |
Brian Foster | d4ca1d5 | 2017-06-14 21:29:50 -0700 | [diff] [blame] | 2190 | { |
Dave Chinner | e6631f8 | 2018-05-09 07:49:37 -0700 | [diff] [blame] | 2191 | struct xfs_mount *mp = tp->t_mountp; |
| 2192 | struct xfs_log_item *lip; |
Brian Foster | d4ca1d5 | 2017-06-14 21:29:50 -0700 | [diff] [blame] | 2193 | |
| 2194 | /* dump core transaction and ticket info */ |
| 2195 | xfs_warn(mp, "transaction summary:"); |
Brian Foster | 2c8f626 | 2018-01-08 10:41:35 -0800 | [diff] [blame] | 2196 | xfs_warn(mp, " log res = %d", tp->t_log_res); |
| 2197 | xfs_warn(mp, " log count = %d", tp->t_log_count); |
| 2198 | xfs_warn(mp, " flags = 0x%x", tp->t_flags); |
Brian Foster | d4ca1d5 | 2017-06-14 21:29:50 -0700 | [diff] [blame] | 2199 | |
| 2200 | xlog_print_tic_res(mp, tp->t_ticket); |
| 2201 | |
| 2202 | /* dump each log item */ |
Dave Chinner | e6631f8 | 2018-05-09 07:49:37 -0700 | [diff] [blame] | 2203 | list_for_each_entry(lip, &tp->t_items, li_trans) { |
Brian Foster | d4ca1d5 | 2017-06-14 21:29:50 -0700 | [diff] [blame] | 2204 | struct xfs_log_vec *lv = lip->li_lv; |
| 2205 | struct xfs_log_iovec *vec; |
| 2206 | int i; |
| 2207 | |
| 2208 | xfs_warn(mp, "log item: "); |
| 2209 | xfs_warn(mp, " type = 0x%x", lip->li_type); |
Dave Chinner | 22525c1 | 2018-05-09 07:47:34 -0700 | [diff] [blame] | 2210 | xfs_warn(mp, " flags = 0x%lx", lip->li_flags); |
Brian Foster | d4ca1d5 | 2017-06-14 21:29:50 -0700 | [diff] [blame] | 2211 | if (!lv) |
| 2212 | continue; |
| 2213 | xfs_warn(mp, " niovecs = %d", lv->lv_niovecs); |
| 2214 | xfs_warn(mp, " size = %d", lv->lv_size); |
| 2215 | xfs_warn(mp, " bytes = %d", lv->lv_bytes); |
| 2216 | xfs_warn(mp, " buf len = %d", lv->lv_buf_len); |
| 2217 | |
| 2218 | /* dump each iovec for the log item */ |
| 2219 | vec = lv->lv_iovecp; |
| 2220 | for (i = 0; i < lv->lv_niovecs; i++) { |
| 2221 | int dumplen = min(vec->i_len, 32); |
| 2222 | |
| 2223 | xfs_warn(mp, " iovec[%d]", i); |
| 2224 | xfs_warn(mp, " type = 0x%x", vec->i_type); |
| 2225 | xfs_warn(mp, " len = %d", vec->i_len); |
| 2226 | xfs_warn(mp, " first %d bytes of iovec[%d]:", dumplen, i); |
kbuild test robot | 244e3de | 2017-06-26 08:54:16 -0700 | [diff] [blame] | 2227 | xfs_hex_dump(vec->i_addr, dumplen); |
Brian Foster | d4ca1d5 | 2017-06-14 21:29:50 -0700 | [diff] [blame] | 2228 | |
| 2229 | vec++; |
| 2230 | } |
| 2231 | } |
| 2232 | } |
| 2233 | |
| 2234 | /* |
Dave Chinner | 7ec9492 | 2020-03-25 18:18:20 -0700 | [diff] [blame] | 2235 | * Calculate the potential space needed by the log vector. We may need a start |
| 2236 | * record, and each region gets its own struct xlog_op_header and may need to be |
| 2237 | * double word aligned. |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2238 | */ |
| 2239 | static int |
| 2240 | xlog_write_calc_vec_length( |
| 2241 | struct xlog_ticket *ticket, |
Dave Chinner | 7ec9492 | 2020-03-25 18:18:20 -0700 | [diff] [blame] | 2242 | struct xfs_log_vec *log_vector, |
Dave Chinner | 3468bb1 | 2021-06-18 08:21:50 -0700 | [diff] [blame] | 2243 | uint optype) |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2244 | { |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2245 | struct xfs_log_vec *lv; |
Dave Chinner | 3468bb1 | 2021-06-18 08:21:50 -0700 | [diff] [blame] | 2246 | int headers = 0; |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2247 | int len = 0; |
| 2248 | int i; |
| 2249 | |
Dave Chinner | 3468bb1 | 2021-06-18 08:21:50 -0700 | [diff] [blame] | 2250 | if (optype & XLOG_START_TRANS) |
| 2251 | headers++; |
| 2252 | |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2253 | for (lv = log_vector; lv; lv = lv->lv_next) { |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 2254 | /* we don't write ordered log vectors */ |
| 2255 | if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) |
| 2256 | continue; |
| 2257 | |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2258 | headers += lv->lv_niovecs; |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2259 | |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2260 | for (i = 0; i < lv->lv_niovecs; i++) { |
| 2261 | struct xfs_log_iovec *vecp = &lv->lv_iovecp[i]; |
| 2262 | |
| 2263 | len += vecp->i_len; |
| 2264 | xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type); |
| 2265 | } |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2266 | } |
| 2267 | |
| 2268 | ticket->t_res_num_ophdrs += headers; |
| 2269 | len += headers * sizeof(struct xlog_op_header); |
| 2270 | |
| 2271 | return len; |
| 2272 | } |
| 2273 | |
Dave Chinner | 7ec9492 | 2020-03-25 18:18:20 -0700 | [diff] [blame] | 2274 | static void |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2275 | xlog_write_start_rec( |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 2276 | struct xlog_op_header *ophdr, |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2277 | struct xlog_ticket *ticket) |
| 2278 | { |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2279 | ophdr->oh_tid = cpu_to_be32(ticket->t_tid); |
| 2280 | ophdr->oh_clientid = ticket->t_clientid; |
| 2281 | ophdr->oh_len = 0; |
| 2282 | ophdr->oh_flags = XLOG_START_TRANS; |
| 2283 | ophdr->oh_res2 = 0; |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2284 | } |
| 2285 | |
| 2286 | static xlog_op_header_t * |
| 2287 | xlog_write_setup_ophdr( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 2288 | struct xlog *log, |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 2289 | struct xlog_op_header *ophdr, |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2290 | struct xlog_ticket *ticket, |
| 2291 | uint flags) |
| 2292 | { |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2293 | ophdr->oh_tid = cpu_to_be32(ticket->t_tid); |
| 2294 | ophdr->oh_clientid = ticket->t_clientid; |
| 2295 | ophdr->oh_res2 = 0; |
| 2296 | |
| 2297 | /* are we copying a commit or unmount record? */ |
| 2298 | ophdr->oh_flags = flags; |
| 2299 | |
| 2300 | /* |
| 2301 | * We've seen logs corrupted with bad transaction client ids. This |
| 2302 | * makes sure that XFS doesn't generate them on. Turn this into an EIO |
| 2303 | * and shut down the filesystem. |
| 2304 | */ |
| 2305 | switch (ophdr->oh_clientid) { |
| 2306 | case XFS_TRANSACTION: |
| 2307 | case XFS_VOLUME: |
| 2308 | case XFS_LOG: |
| 2309 | break; |
| 2310 | default: |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2311 | xfs_warn(log->l_mp, |
Darrick J. Wong | c969004 | 2018-01-09 12:02:55 -0800 | [diff] [blame] | 2312 | "Bad XFS transaction clientid 0x%x in ticket "PTR_FMT, |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2313 | ophdr->oh_clientid, ticket); |
| 2314 | return NULL; |
| 2315 | } |
| 2316 | |
| 2317 | return ophdr; |
| 2318 | } |
| 2319 | |
| 2320 | /* |
| 2321 | * Set up the parameters of the region copy into the log. This has |
| 2322 | * to handle region write split across multiple log buffers - this |
| 2323 | * state is kept external to this function so that this code can |
Zhi Yong Wu | ac0e300 | 2013-08-07 10:11:02 +0000 | [diff] [blame] | 2324 | * be written in an obvious, self documenting manner. |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2325 | */ |
| 2326 | static int |
| 2327 | xlog_write_setup_copy( |
| 2328 | struct xlog_ticket *ticket, |
| 2329 | struct xlog_op_header *ophdr, |
| 2330 | int space_available, |
| 2331 | int space_required, |
| 2332 | int *copy_off, |
| 2333 | int *copy_len, |
| 2334 | int *last_was_partial_copy, |
| 2335 | int *bytes_consumed) |
| 2336 | { |
| 2337 | int still_to_copy; |
| 2338 | |
| 2339 | still_to_copy = space_required - *bytes_consumed; |
| 2340 | *copy_off = *bytes_consumed; |
| 2341 | |
| 2342 | if (still_to_copy <= space_available) { |
| 2343 | /* write of region completes here */ |
| 2344 | *copy_len = still_to_copy; |
| 2345 | ophdr->oh_len = cpu_to_be32(*copy_len); |
| 2346 | if (*last_was_partial_copy) |
| 2347 | ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); |
| 2348 | *last_was_partial_copy = 0; |
| 2349 | *bytes_consumed = 0; |
| 2350 | return 0; |
| 2351 | } |
| 2352 | |
| 2353 | /* partial write of region, needs extra log op header reservation */ |
| 2354 | *copy_len = space_available; |
| 2355 | ophdr->oh_len = cpu_to_be32(*copy_len); |
| 2356 | ophdr->oh_flags |= XLOG_CONTINUE_TRANS; |
| 2357 | if (*last_was_partial_copy) |
| 2358 | ophdr->oh_flags |= XLOG_WAS_CONT_TRANS; |
| 2359 | *bytes_consumed += *copy_len; |
| 2360 | (*last_was_partial_copy)++; |
| 2361 | |
| 2362 | /* account for new log op header */ |
| 2363 | ticket->t_curr_res -= sizeof(struct xlog_op_header); |
| 2364 | ticket->t_res_num_ophdrs++; |
| 2365 | |
| 2366 | return sizeof(struct xlog_op_header); |
| 2367 | } |
| 2368 | |
| 2369 | static int |
| 2370 | xlog_write_copy_finish( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 2371 | struct xlog *log, |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2372 | struct xlog_in_core *iclog, |
| 2373 | uint flags, |
| 2374 | int *record_cnt, |
| 2375 | int *data_cnt, |
| 2376 | int *partial_copy, |
| 2377 | int *partial_copy_len, |
Dave Chinner | caa8009 | 2021-08-10 18:00:43 -0700 | [diff] [blame] | 2378 | int log_offset) |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2379 | { |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 2380 | int error; |
| 2381 | |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2382 | if (*partial_copy) { |
| 2383 | /* |
| 2384 | * This iclog has already been marked WANT_SYNC by |
| 2385 | * xlog_state_get_iclog_space. |
| 2386 | */ |
Christoph Hellwig | 390aab0 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 2387 | spin_lock(&log->l_icloglock); |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2388 | xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); |
| 2389 | *record_cnt = 0; |
| 2390 | *data_cnt = 0; |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 2391 | goto release_iclog; |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2392 | } |
| 2393 | |
| 2394 | *partial_copy = 0; |
| 2395 | *partial_copy_len = 0; |
| 2396 | |
Dave Chinner | caa8009 | 2021-08-10 18:00:43 -0700 | [diff] [blame] | 2397 | if (iclog->ic_size - log_offset > sizeof(xlog_op_header_t)) |
| 2398 | return 0; |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2399 | |
Dave Chinner | caa8009 | 2021-08-10 18:00:43 -0700 | [diff] [blame] | 2400 | /* no more space in this iclog - push it. */ |
| 2401 | spin_lock(&log->l_icloglock); |
| 2402 | xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); |
| 2403 | *record_cnt = 0; |
| 2404 | *data_cnt = 0; |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2405 | |
Dave Chinner | caa8009 | 2021-08-10 18:00:43 -0700 | [diff] [blame] | 2406 | if (iclog->ic_state == XLOG_STATE_ACTIVE) |
| 2407 | xlog_state_switch_iclogs(log, iclog, 0); |
| 2408 | else |
| 2409 | ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC || |
| 2410 | xlog_is_shutdown(log)); |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 2411 | release_iclog: |
Dave Chinner | 0dc8f7f | 2021-07-27 16:23:48 -0700 | [diff] [blame] | 2412 | error = xlog_state_release_iclog(log, iclog, 0); |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 2413 | spin_unlock(&log->l_icloglock); |
| 2414 | return error; |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2415 | } |
| 2416 | |
| 2417 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2418 | * Write some region out to in-core log |
| 2419 | * |
| 2420 | * This will be called when writing externally provided regions or when |
| 2421 | * writing out a commit record for a given transaction. |
| 2422 | * |
| 2423 | * General algorithm: |
| 2424 | * 1. Find total length of this write. This may include adding to the |
| 2425 | * lengths passed in. |
| 2426 | * 2. Check whether we violate the tickets reservation. |
| 2427 | * 3. While writing to this iclog |
| 2428 | * A. Reserve as much space in this iclog as can get |
| 2429 | * B. If this is first write, save away start lsn |
| 2430 | * C. While writing this region: |
| 2431 | * 1. If first write of transaction, write start record |
| 2432 | * 2. Write log operation header (header per region) |
| 2433 | * 3. Find out if we can fit entire region into this iclog |
| 2434 | * 4. Potentially, verify destination memcpy ptr |
| 2435 | * 5. Memcpy (partial) region |
| 2436 | * 6. If partial copy, release iclog; otherwise, continue |
| 2437 | * copying more regions into current iclog |
| 2438 | * 4. Mark want sync bit (in simulation mode) |
| 2439 | * 5. Release iclog for potential flush to on-disk log. |
| 2440 | * |
| 2441 | * ERRORS: |
| 2442 | * 1. Panic if reservation is overrun. This should never happen since |
| 2443 | * reservation amounts are generated internal to the filesystem. |
| 2444 | * NOTES: |
| 2445 | * 1. Tickets are single threaded data structures. |
| 2446 | * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the |
| 2447 | * syncing routine. When a single log_write region needs to span |
| 2448 | * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set |
| 2449 | * on all log operation writes which don't contain the end of the |
| 2450 | * region. The XLOG_END_TRANS bit is used for the in-core log |
| 2451 | * operation which contains the end of the continued log_write region. |
| 2452 | * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog, |
| 2453 | * we don't really know exactly how much space will be used. As a result, |
| 2454 | * we don't update ic_offset until the end when we know exactly how many |
| 2455 | * bytes have been written out. |
| 2456 | */ |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 2457 | int |
Christoph Hellwig | 35a8a72 | 2010-02-15 23:34:54 +0000 | [diff] [blame] | 2458 | xlog_write( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 2459 | struct xlog *log, |
Dave Chinner | c45aba4 | 2021-08-10 18:00:42 -0700 | [diff] [blame] | 2460 | struct xfs_cil_ctx *ctx, |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2461 | struct xfs_log_vec *log_vector, |
Christoph Hellwig | 35a8a72 | 2010-02-15 23:34:54 +0000 | [diff] [blame] | 2462 | struct xlog_ticket *ticket, |
Dave Chinner | 3468bb1 | 2021-06-18 08:21:50 -0700 | [diff] [blame] | 2463 | uint optype) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2464 | { |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2465 | struct xlog_in_core *iclog = NULL; |
Dave Chinner | 9590e9c | 2020-03-25 18:18:21 -0700 | [diff] [blame] | 2466 | struct xfs_log_vec *lv = log_vector; |
| 2467 | struct xfs_log_iovec *vecp = lv->lv_iovecp; |
| 2468 | int index = 0; |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2469 | int len; |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2470 | int partial_copy = 0; |
| 2471 | int partial_copy_len = 0; |
| 2472 | int contwr = 0; |
| 2473 | int record_cnt = 0; |
| 2474 | int data_cnt = 0; |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 2475 | int error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2476 | |
Christoph Hellwig | 93b8a58 | 2011-12-06 21:58:07 +0000 | [diff] [blame] | 2477 | /* |
Dave Chinner | 9590e9c | 2020-03-25 18:18:21 -0700 | [diff] [blame] | 2478 | * If this is a commit or unmount transaction, we don't need a start |
| 2479 | * record to be written. We do, however, have to account for the |
| 2480 | * commit or unmount header that gets written. Hence we always have |
| 2481 | * to account for an extra xlog_op_header here. |
Christoph Hellwig | 93b8a58 | 2011-12-06 21:58:07 +0000 | [diff] [blame] | 2482 | */ |
Dave Chinner | 9590e9c | 2020-03-25 18:18:21 -0700 | [diff] [blame] | 2483 | ticket->t_curr_res -= sizeof(struct xlog_op_header); |
Brian Foster | 7d2d5653 | 2017-06-14 21:29:48 -0700 | [diff] [blame] | 2484 | if (ticket->t_curr_res < 0) { |
| 2485 | xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, |
| 2486 | "ctx ticket reservation ran out. Need to up reservation"); |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2487 | xlog_print_tic_res(log->l_mp, ticket); |
Brian Foster | 7d2d5653 | 2017-06-14 21:29:48 -0700 | [diff] [blame] | 2488 | xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); |
| 2489 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2490 | |
Dave Chinner | 3468bb1 | 2021-06-18 08:21:50 -0700 | [diff] [blame] | 2491 | len = xlog_write_calc_vec_length(ticket, log_vector, optype); |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 2492 | while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 2493 | void *ptr; |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2494 | int log_offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2495 | |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2496 | error = xlog_state_get_iclog_space(log, len, &iclog, ticket, |
| 2497 | &contwr, &log_offset); |
| 2498 | if (error) |
| 2499 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2500 | |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2501 | ASSERT(log_offset <= iclog->ic_size - 1); |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 2502 | ptr = iclog->ic_datap + log_offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2503 | |
Dave Chinner | c45aba4 | 2021-08-10 18:00:42 -0700 | [diff] [blame] | 2504 | /* |
| 2505 | * If we have a context pointer, pass it the first iclog we are |
| 2506 | * writing to so it can record state needed for iclog write |
| 2507 | * ordering. |
| 2508 | */ |
| 2509 | if (ctx) { |
| 2510 | xlog_cil_set_ctx_write_state(ctx, iclog); |
| 2511 | ctx = NULL; |
| 2512 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2513 | |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2514 | /* |
| 2515 | * This loop writes out as many regions as can fit in the amount |
| 2516 | * of space which was allocated by xlog_state_get_iclog_space(). |
| 2517 | */ |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 2518 | while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { |
| 2519 | struct xfs_log_iovec *reg; |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2520 | struct xlog_op_header *ophdr; |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2521 | int copy_len; |
| 2522 | int copy_off; |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 2523 | bool ordered = false; |
Dave Chinner | 3468bb1 | 2021-06-18 08:21:50 -0700 | [diff] [blame] | 2524 | bool wrote_start_rec = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2525 | |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 2526 | /* ordered log vectors have no regions to write */ |
| 2527 | if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) { |
| 2528 | ASSERT(lv->lv_niovecs == 0); |
| 2529 | ordered = true; |
| 2530 | goto next_lv; |
| 2531 | } |
| 2532 | |
| 2533 | reg = &vecp[index]; |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 2534 | ASSERT(reg->i_len % sizeof(int32_t) == 0); |
| 2535 | ASSERT((unsigned long)ptr % sizeof(int32_t) == 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2536 | |
Dave Chinner | 7ec9492 | 2020-03-25 18:18:20 -0700 | [diff] [blame] | 2537 | /* |
| 2538 | * Before we start formatting log vectors, we need to |
| 2539 | * write a start record. Only do this for the first |
| 2540 | * iclog we write to. |
| 2541 | */ |
Dave Chinner | 3468bb1 | 2021-06-18 08:21:50 -0700 | [diff] [blame] | 2542 | if (optype & XLOG_START_TRANS) { |
Dave Chinner | 7ec9492 | 2020-03-25 18:18:20 -0700 | [diff] [blame] | 2543 | xlog_write_start_rec(ptr, ticket); |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 2544 | xlog_write_adv_cnt(&ptr, &len, &log_offset, |
Dave Chinner | 7ec9492 | 2020-03-25 18:18:20 -0700 | [diff] [blame] | 2545 | sizeof(struct xlog_op_header)); |
Dave Chinner | 3468bb1 | 2021-06-18 08:21:50 -0700 | [diff] [blame] | 2546 | optype &= ~XLOG_START_TRANS; |
| 2547 | wrote_start_rec = true; |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2548 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2549 | |
Dave Chinner | 3468bb1 | 2021-06-18 08:21:50 -0700 | [diff] [blame] | 2550 | ophdr = xlog_write_setup_ophdr(log, ptr, ticket, optype); |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2551 | if (!ophdr) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2552 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2553 | |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 2554 | xlog_write_adv_cnt(&ptr, &len, &log_offset, |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2555 | sizeof(struct xlog_op_header)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2556 | |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2557 | len += xlog_write_setup_copy(ticket, ophdr, |
| 2558 | iclog->ic_size-log_offset, |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2559 | reg->i_len, |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2560 | ©_off, ©_len, |
| 2561 | &partial_copy, |
| 2562 | &partial_copy_len); |
| 2563 | xlog_verify_dest_ptr(log, ptr); |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2564 | |
Eric Sandeen | 91f9f5f | 2015-10-12 16:04:15 +1100 | [diff] [blame] | 2565 | /* |
| 2566 | * Copy region. |
| 2567 | * |
| 2568 | * Unmount records just log an opheader, so can have |
| 2569 | * empty payloads with no data region to copy. Hence we |
| 2570 | * only copy the payload if the vector says it has data |
| 2571 | * to copy. |
| 2572 | */ |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2573 | ASSERT(copy_len >= 0); |
Eric Sandeen | 91f9f5f | 2015-10-12 16:04:15 +1100 | [diff] [blame] | 2574 | if (copy_len > 0) { |
| 2575 | memcpy(ptr, reg->i_addr + copy_off, copy_len); |
| 2576 | xlog_write_adv_cnt(&ptr, &len, &log_offset, |
| 2577 | copy_len); |
| 2578 | } |
Dave Chinner | 7ec9492 | 2020-03-25 18:18:20 -0700 | [diff] [blame] | 2579 | copy_len += sizeof(struct xlog_op_header); |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2580 | record_cnt++; |
Dave Chinner | 3468bb1 | 2021-06-18 08:21:50 -0700 | [diff] [blame] | 2581 | if (wrote_start_rec) { |
Dave Chinner | 7ec9492 | 2020-03-25 18:18:20 -0700 | [diff] [blame] | 2582 | copy_len += sizeof(struct xlog_op_header); |
| 2583 | record_cnt++; |
Dave Chinner | 7ec9492 | 2020-03-25 18:18:20 -0700 | [diff] [blame] | 2584 | } |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2585 | data_cnt += contwr ? copy_len : 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2586 | |
Dave Chinner | 3468bb1 | 2021-06-18 08:21:50 -0700 | [diff] [blame] | 2587 | error = xlog_write_copy_finish(log, iclog, optype, |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2588 | &record_cnt, &data_cnt, |
| 2589 | &partial_copy, |
| 2590 | &partial_copy_len, |
Dave Chinner | caa8009 | 2021-08-10 18:00:43 -0700 | [diff] [blame] | 2591 | log_offset); |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2592 | if (error) |
| 2593 | return error; |
| 2594 | |
| 2595 | /* |
| 2596 | * if we had a partial copy, we need to get more iclog |
| 2597 | * space but we don't want to increment the region |
| 2598 | * index because there is still more is this region to |
| 2599 | * write. |
| 2600 | * |
| 2601 | * If we completed writing this region, and we flushed |
| 2602 | * the iclog (indicated by resetting of the record |
| 2603 | * count), then we also need to get more log space. If |
| 2604 | * this was the last record, though, we are done and |
| 2605 | * can just return. |
| 2606 | */ |
| 2607 | if (partial_copy) |
| 2608 | break; |
| 2609 | |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2610 | if (++index == lv->lv_niovecs) { |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 2611 | next_lv: |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2612 | lv = lv->lv_next; |
| 2613 | index = 0; |
| 2614 | if (lv) |
| 2615 | vecp = lv->lv_iovecp; |
| 2616 | } |
Thomas Meyer | 749f24f | 2017-10-09 11:38:54 -0700 | [diff] [blame] | 2617 | if (record_cnt == 0 && !ordered) { |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2618 | if (!lv) |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2619 | return 0; |
| 2620 | break; |
| 2621 | } |
| 2622 | } |
| 2623 | } |
| 2624 | |
| 2625 | ASSERT(len == 0); |
| 2626 | |
Christoph Hellwig | 390aab0 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 2627 | spin_lock(&log->l_icloglock); |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2628 | xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); |
Dave Chinner | caa8009 | 2021-08-10 18:00:43 -0700 | [diff] [blame] | 2629 | error = xlog_state_release_iclog(log, iclog, 0); |
Christoph Hellwig | 390aab0 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 2630 | spin_unlock(&log->l_icloglock); |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2631 | |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 2632 | return error; |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2633 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2634 | |
Christoph Hellwig | c814b4f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 2635 | static void |
| 2636 | xlog_state_activate_iclog( |
| 2637 | struct xlog_in_core *iclog, |
| 2638 | int *iclogs_changed) |
| 2639 | { |
| 2640 | ASSERT(list_empty_careful(&iclog->ic_callbacks)); |
Dave Chinner | 956f6da | 2021-06-18 11:57:05 -0700 | [diff] [blame] | 2641 | trace_xlog_iclog_activate(iclog, _RET_IP_); |
Christoph Hellwig | c814b4f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 2642 | |
| 2643 | /* |
| 2644 | * If the number of ops in this iclog indicate it just contains the |
| 2645 | * dummy transaction, we can change state into IDLE (the second time |
| 2646 | * around). Otherwise we should change the state into NEED a dummy. |
| 2647 | * We don't need to cover the dummy. |
| 2648 | */ |
| 2649 | if (*iclogs_changed == 0 && |
| 2650 | iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) { |
| 2651 | *iclogs_changed = 1; |
| 2652 | } else { |
| 2653 | /* |
| 2654 | * We have two dirty iclogs so start over. This could also be |
| 2655 | * num of ops indicating this is not the dummy going out. |
| 2656 | */ |
| 2657 | *iclogs_changed = 2; |
| 2658 | } |
| 2659 | |
| 2660 | iclog->ic_state = XLOG_STATE_ACTIVE; |
| 2661 | iclog->ic_offset = 0; |
| 2662 | iclog->ic_header.h_num_logops = 0; |
| 2663 | memset(iclog->ic_header.h_cycle_data, 0, |
| 2664 | sizeof(iclog->ic_header.h_cycle_data)); |
| 2665 | iclog->ic_header.h_lsn = 0; |
Dave Chinner | 9d11001 | 2021-07-28 17:14:11 -0700 | [diff] [blame] | 2666 | iclog->ic_header.h_tail_lsn = 0; |
Christoph Hellwig | c814b4f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 2667 | } |
| 2668 | |
Dave Chinner | 0383f54 | 2019-09-05 17:32:52 -0700 | [diff] [blame] | 2669 | /* |
Christoph Hellwig | c814b4f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 2670 | * Loop through all iclogs and mark all iclogs currently marked DIRTY as |
| 2671 | * ACTIVE after iclog I/O has completed. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2672 | */ |
Christoph Hellwig | c814b4f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 2673 | static void |
| 2674 | xlog_state_activate_iclogs( |
| 2675 | struct xlog *log, |
| 2676 | int *iclogs_changed) |
| 2677 | { |
| 2678 | struct xlog_in_core *iclog = log->l_iclog; |
| 2679 | |
| 2680 | do { |
| 2681 | if (iclog->ic_state == XLOG_STATE_DIRTY) |
| 2682 | xlog_state_activate_iclog(iclog, iclogs_changed); |
| 2683 | /* |
| 2684 | * The ordering of marking iclogs ACTIVE must be maintained, so |
| 2685 | * an iclog doesn't become ACTIVE beyond one that is SYNCING. |
| 2686 | */ |
| 2687 | else if (iclog->ic_state != XLOG_STATE_ACTIVE) |
| 2688 | break; |
| 2689 | } while ((iclog = iclog->ic_next) != log->l_iclog); |
| 2690 | } |
| 2691 | |
| 2692 | static int |
| 2693 | xlog_covered_state( |
| 2694 | int prev_state, |
| 2695 | int iclogs_changed) |
| 2696 | { |
| 2697 | /* |
Brian Foster | b0eb9e1 | 2021-01-22 16:48:22 -0800 | [diff] [blame] | 2698 | * We go to NEED for any non-covering writes. We go to NEED2 if we just |
| 2699 | * wrote the first covering record (DONE). We go to IDLE if we just |
| 2700 | * wrote the second covering record (DONE2) and remain in IDLE until a |
| 2701 | * non-covering write occurs. |
Christoph Hellwig | c814b4f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 2702 | */ |
| 2703 | switch (prev_state) { |
| 2704 | case XLOG_STATE_COVER_IDLE: |
Brian Foster | b0eb9e1 | 2021-01-22 16:48:22 -0800 | [diff] [blame] | 2705 | if (iclogs_changed == 1) |
| 2706 | return XLOG_STATE_COVER_IDLE; |
Gustavo A. R. Silva | 53004ee | 2021-04-20 17:54:36 -0500 | [diff] [blame] | 2707 | fallthrough; |
Christoph Hellwig | c814b4f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 2708 | case XLOG_STATE_COVER_NEED: |
| 2709 | case XLOG_STATE_COVER_NEED2: |
| 2710 | break; |
| 2711 | case XLOG_STATE_COVER_DONE: |
| 2712 | if (iclogs_changed == 1) |
| 2713 | return XLOG_STATE_COVER_NEED2; |
| 2714 | break; |
| 2715 | case XLOG_STATE_COVER_DONE2: |
| 2716 | if (iclogs_changed == 1) |
| 2717 | return XLOG_STATE_COVER_IDLE; |
| 2718 | break; |
| 2719 | default: |
| 2720 | ASSERT(0); |
| 2721 | } |
| 2722 | |
| 2723 | return XLOG_STATE_COVER_NEED; |
| 2724 | } |
| 2725 | |
Christoph Hellwig | ba0f32d | 2005-06-21 15:36:52 +1000 | [diff] [blame] | 2726 | STATIC void |
Dave Chinner | 0383f54 | 2019-09-05 17:32:52 -0700 | [diff] [blame] | 2727 | xlog_state_clean_iclog( |
| 2728 | struct xlog *log, |
| 2729 | struct xlog_in_core *dirty_iclog) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2730 | { |
Christoph Hellwig | c814b4f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 2731 | int iclogs_changed = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2732 | |
Dave Chinner | 956f6da | 2021-06-18 11:57:05 -0700 | [diff] [blame] | 2733 | trace_xlog_iclog_clean(dirty_iclog, _RET_IP_); |
| 2734 | |
Christoph Hellwig | 5781464 | 2020-03-20 08:49:21 -0700 | [diff] [blame] | 2735 | dirty_iclog->ic_state = XLOG_STATE_DIRTY; |
Dave Chinner | 0383f54 | 2019-09-05 17:32:52 -0700 | [diff] [blame] | 2736 | |
Christoph Hellwig | c814b4f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 2737 | xlog_state_activate_iclogs(log, &iclogs_changed); |
Dave Chinner | 0383f54 | 2019-09-05 17:32:52 -0700 | [diff] [blame] | 2738 | wake_up_all(&dirty_iclog->ic_force_wait); |
| 2739 | |
Christoph Hellwig | c814b4f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 2740 | if (iclogs_changed) { |
| 2741 | log->l_covered_state = xlog_covered_state(log->l_covered_state, |
| 2742 | iclogs_changed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2743 | } |
Dave Chinner | 0383f54 | 2019-09-05 17:32:52 -0700 | [diff] [blame] | 2744 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2745 | |
| 2746 | STATIC xfs_lsn_t |
| 2747 | xlog_get_lowest_lsn( |
Christoph Hellwig | 9bff3132 | 2019-06-28 19:27:20 -0700 | [diff] [blame] | 2748 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2749 | { |
Christoph Hellwig | 9bff3132 | 2019-06-28 19:27:20 -0700 | [diff] [blame] | 2750 | struct xlog_in_core *iclog = log->l_iclog; |
| 2751 | xfs_lsn_t lowest_lsn = 0, lsn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2752 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2753 | do { |
Christoph Hellwig | 1858bb0 | 2019-10-14 10:36:43 -0700 | [diff] [blame] | 2754 | if (iclog->ic_state == XLOG_STATE_ACTIVE || |
| 2755 | iclog->ic_state == XLOG_STATE_DIRTY) |
Christoph Hellwig | 9bff3132 | 2019-06-28 19:27:20 -0700 | [diff] [blame] | 2756 | continue; |
| 2757 | |
| 2758 | lsn = be64_to_cpu(iclog->ic_header.h_lsn); |
| 2759 | if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2760 | lowest_lsn = lsn; |
Christoph Hellwig | 9bff3132 | 2019-06-28 19:27:20 -0700 | [diff] [blame] | 2761 | } while ((iclog = iclog->ic_next) != log->l_iclog); |
| 2762 | |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 2763 | return lowest_lsn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2764 | } |
| 2765 | |
Dave Chinner | 6546818 | 2019-09-05 17:32:50 -0700 | [diff] [blame] | 2766 | /* |
Dave Chinner | 14e15f1 | 2019-09-05 17:32:52 -0700 | [diff] [blame] | 2767 | * Completion of a iclog IO does not imply that a transaction has completed, as |
| 2768 | * transactions can be large enough to span many iclogs. We cannot change the |
| 2769 | * tail of the log half way through a transaction as this may be the only |
| 2770 | * transaction in the log and moving the tail to point to the middle of it |
| 2771 | * will prevent recovery from finding the start of the transaction. Hence we |
| 2772 | * should only update the last_sync_lsn if this iclog contains transaction |
| 2773 | * completion callbacks on it. |
| 2774 | * |
| 2775 | * We have to do this before we drop the icloglock to ensure we are the only one |
| 2776 | * that can update it. |
| 2777 | * |
| 2778 | * If we are moving the last_sync_lsn forwards, we also need to ensure we kick |
| 2779 | * the reservation grant head pushing. This is due to the fact that the push |
| 2780 | * target is bound by the current last_sync_lsn value. Hence if we have a large |
| 2781 | * amount of log space bound up in this committing transaction then the |
| 2782 | * last_sync_lsn value may be the limiting factor preventing tail pushing from |
| 2783 | * freeing space in the log. Hence once we've updated the last_sync_lsn we |
| 2784 | * should push the AIL to ensure the push target (and hence the grant head) is |
| 2785 | * no longer bound by the old log head location and can move forwards and make |
| 2786 | * progress again. |
| 2787 | */ |
| 2788 | static void |
| 2789 | xlog_state_set_callback( |
| 2790 | struct xlog *log, |
| 2791 | struct xlog_in_core *iclog, |
| 2792 | xfs_lsn_t header_lsn) |
| 2793 | { |
Dave Chinner | 956f6da | 2021-06-18 11:57:05 -0700 | [diff] [blame] | 2794 | trace_xlog_iclog_callback(iclog, _RET_IP_); |
Dave Chinner | 14e15f1 | 2019-09-05 17:32:52 -0700 | [diff] [blame] | 2795 | iclog->ic_state = XLOG_STATE_CALLBACK; |
| 2796 | |
| 2797 | ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), |
| 2798 | header_lsn) <= 0); |
| 2799 | |
| 2800 | if (list_empty_careful(&iclog->ic_callbacks)) |
| 2801 | return; |
| 2802 | |
| 2803 | atomic64_set(&log->l_last_sync_lsn, header_lsn); |
| 2804 | xlog_grant_push_ail(log, 0); |
| 2805 | } |
| 2806 | |
| 2807 | /* |
Dave Chinner | 5e96fa8 | 2019-09-05 17:32:51 -0700 | [diff] [blame] | 2808 | * Return true if we need to stop processing, false to continue to the next |
| 2809 | * iclog. The caller will need to run callbacks if the iclog is returned in the |
| 2810 | * XLOG_STATE_CALLBACK state. |
| 2811 | */ |
| 2812 | static bool |
| 2813 | xlog_state_iodone_process_iclog( |
| 2814 | struct xlog *log, |
Dave Chinner | 5112e206 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 2815 | struct xlog_in_core *iclog) |
Dave Chinner | 5e96fa8 | 2019-09-05 17:32:51 -0700 | [diff] [blame] | 2816 | { |
| 2817 | xfs_lsn_t lowest_lsn; |
Dave Chinner | 14e15f1 | 2019-09-05 17:32:52 -0700 | [diff] [blame] | 2818 | xfs_lsn_t header_lsn; |
Dave Chinner | 5e96fa8 | 2019-09-05 17:32:51 -0700 | [diff] [blame] | 2819 | |
Christoph Hellwig | 1858bb0 | 2019-10-14 10:36:43 -0700 | [diff] [blame] | 2820 | switch (iclog->ic_state) { |
| 2821 | case XLOG_STATE_ACTIVE: |
| 2822 | case XLOG_STATE_DIRTY: |
| 2823 | /* |
| 2824 | * Skip all iclogs in the ACTIVE & DIRTY states: |
| 2825 | */ |
Dave Chinner | 5e96fa8 | 2019-09-05 17:32:51 -0700 | [diff] [blame] | 2826 | return false; |
Christoph Hellwig | 1858bb0 | 2019-10-14 10:36:43 -0700 | [diff] [blame] | 2827 | case XLOG_STATE_DONE_SYNC: |
Christoph Hellwig | 1858bb0 | 2019-10-14 10:36:43 -0700 | [diff] [blame] | 2828 | /* |
Christoph Hellwig | 4b29ab0 | 2019-10-14 10:36:43 -0700 | [diff] [blame] | 2829 | * Now that we have an iclog that is in the DONE_SYNC state, do |
| 2830 | * one more check here to see if we have chased our tail around. |
| 2831 | * If this is not the lowest lsn iclog, then we will leave it |
| 2832 | * for another completion to process. |
Christoph Hellwig | 1858bb0 | 2019-10-14 10:36:43 -0700 | [diff] [blame] | 2833 | */ |
| 2834 | header_lsn = be64_to_cpu(iclog->ic_header.h_lsn); |
| 2835 | lowest_lsn = xlog_get_lowest_lsn(log); |
| 2836 | if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0) |
| 2837 | return false; |
| 2838 | xlog_state_set_callback(log, iclog, header_lsn); |
| 2839 | return false; |
| 2840 | default: |
| 2841 | /* |
| 2842 | * Can only perform callbacks in order. Since this iclog is not |
Christoph Hellwig | 4b29ab0 | 2019-10-14 10:36:43 -0700 | [diff] [blame] | 2843 | * in the DONE_SYNC state, we skip the rest and just try to |
| 2844 | * clean up. |
Christoph Hellwig | 1858bb0 | 2019-10-14 10:36:43 -0700 | [diff] [blame] | 2845 | */ |
Dave Chinner | 5e96fa8 | 2019-09-05 17:32:51 -0700 | [diff] [blame] | 2846 | return true; |
| 2847 | } |
Dave Chinner | 5e96fa8 | 2019-09-05 17:32:51 -0700 | [diff] [blame] | 2848 | } |
| 2849 | |
Dave Chinner | 8bb9200 | 2021-08-10 18:00:40 -0700 | [diff] [blame] | 2850 | /* |
| 2851 | * Loop over all the iclogs, running attached callbacks on them. Return true if |
Dave Chinner | aad7272 | 2021-08-10 18:00:40 -0700 | [diff] [blame] | 2852 | * we ran any callbacks, indicating that we dropped the icloglock. We don't need |
| 2853 | * to handle transient shutdown state here at all because |
| 2854 | * xlog_state_shutdown_callbacks() will be run to do the necessary shutdown |
| 2855 | * cleanup of the callbacks. |
Dave Chinner | 8bb9200 | 2021-08-10 18:00:40 -0700 | [diff] [blame] | 2856 | */ |
| 2857 | static bool |
| 2858 | xlog_state_do_iclog_callbacks( |
| 2859 | struct xlog *log) |
| 2860 | __releases(&log->l_icloglock) |
| 2861 | __acquires(&log->l_icloglock) |
| 2862 | { |
| 2863 | struct xlog_in_core *first_iclog = log->l_iclog; |
| 2864 | struct xlog_in_core *iclog = first_iclog; |
| 2865 | bool ran_callback = false; |
| 2866 | |
| 2867 | do { |
| 2868 | LIST_HEAD(cb_list); |
| 2869 | |
Dave Chinner | aad7272 | 2021-08-10 18:00:40 -0700 | [diff] [blame] | 2870 | if (xlog_state_iodone_process_iclog(log, iclog)) |
| 2871 | break; |
| 2872 | if (iclog->ic_state != XLOG_STATE_CALLBACK) { |
| 2873 | iclog = iclog->ic_next; |
| 2874 | continue; |
Dave Chinner | 8bb9200 | 2021-08-10 18:00:40 -0700 | [diff] [blame] | 2875 | } |
| 2876 | list_splice_init(&iclog->ic_callbacks, &cb_list); |
| 2877 | spin_unlock(&log->l_icloglock); |
| 2878 | |
| 2879 | trace_xlog_iclog_callbacks_start(iclog, _RET_IP_); |
| 2880 | xlog_cil_process_committed(&cb_list); |
| 2881 | trace_xlog_iclog_callbacks_done(iclog, _RET_IP_); |
| 2882 | ran_callback = true; |
| 2883 | |
| 2884 | spin_lock(&log->l_icloglock); |
Dave Chinner | aad7272 | 2021-08-10 18:00:40 -0700 | [diff] [blame] | 2885 | xlog_state_clean_iclog(log, iclog); |
Dave Chinner | 8bb9200 | 2021-08-10 18:00:40 -0700 | [diff] [blame] | 2886 | iclog = iclog->ic_next; |
| 2887 | } while (iclog != first_iclog); |
| 2888 | |
| 2889 | return ran_callback; |
| 2890 | } |
| 2891 | |
| 2892 | |
| 2893 | /* |
| 2894 | * Loop running iclog completion callbacks until there are no more iclogs in a |
| 2895 | * state that can run callbacks. |
| 2896 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2897 | STATIC void |
| 2898 | xlog_state_do_callback( |
Christoph Hellwig | 12e6a0f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 2899 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2900 | { |
Dave Chinner | 5e96fa8 | 2019-09-05 17:32:51 -0700 | [diff] [blame] | 2901 | int flushcnt = 0; |
| 2902 | int repeats = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2903 | |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 2904 | spin_lock(&log->l_icloglock); |
Dave Chinner | 8bb9200 | 2021-08-10 18:00:40 -0700 | [diff] [blame] | 2905 | while (xlog_state_do_iclog_callbacks(log)) { |
| 2906 | if (xlog_is_shutdown(log)) |
| 2907 | break; |
Nathan Scott | a3c6685e | 2006-09-28 11:02:14 +1000 | [diff] [blame] | 2908 | |
Dave Chinner | 5112e206 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 2909 | if (++repeats > 5000) { |
Nathan Scott | a3c6685e | 2006-09-28 11:02:14 +1000 | [diff] [blame] | 2910 | flushcnt += repeats; |
| 2911 | repeats = 0; |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2912 | xfs_warn(log->l_mp, |
Nathan Scott | a3c6685e | 2006-09-28 11:02:14 +1000 | [diff] [blame] | 2913 | "%s: possible infinite loop (%d iterations)", |
Harvey Harrison | 34a622b | 2008-04-10 12:19:21 +1000 | [diff] [blame] | 2914 | __func__, flushcnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2915 | } |
Dave Chinner | 8bb9200 | 2021-08-10 18:00:40 -0700 | [diff] [blame] | 2916 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2917 | |
Dave Chinner | aad7272 | 2021-08-10 18:00:40 -0700 | [diff] [blame] | 2918 | if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE) |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 2919 | wake_up_all(&log->l_flush_wait); |
Rik van Riel | cdea545 | 2019-09-05 17:32:48 -0700 | [diff] [blame] | 2920 | |
| 2921 | spin_unlock(&log->l_icloglock); |
Matthew Wilcox | d748c62 | 2008-05-19 16:34:27 +1000 | [diff] [blame] | 2922 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2923 | |
| 2924 | |
| 2925 | /* |
| 2926 | * Finish transitioning this iclog to the dirty state. |
| 2927 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2928 | * Callbacks could take time, so they are done outside the scope of the |
David Chinner | 12017fa | 2008-08-13 16:34:31 +1000 | [diff] [blame] | 2929 | * global state machine log lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2930 | */ |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 2931 | STATIC void |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2932 | xlog_state_done_syncing( |
Christoph Hellwig | 12e6a0f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 2933 | struct xlog_in_core *iclog) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2934 | { |
Christoph Hellwig | d15cbf2 | 2019-06-28 19:27:30 -0700 | [diff] [blame] | 2935 | struct xlog *log = iclog->ic_log; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2936 | |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 2937 | spin_lock(&log->l_icloglock); |
David Chinner | 155cc6b | 2008-03-06 13:44:14 +1100 | [diff] [blame] | 2938 | ASSERT(atomic_read(&iclog->ic_refcnt) == 0); |
Dave Chinner | 956f6da | 2021-06-18 11:57:05 -0700 | [diff] [blame] | 2939 | trace_xlog_iclog_sync_done(iclog, _RET_IP_); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2940 | |
| 2941 | /* |
| 2942 | * If we got an error, either on the first buffer, or in the case of |
Christoph Hellwig | 12e6a0f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 2943 | * split log writes, on the second, we shut down the file system and |
| 2944 | * no iclogs should ever be attempted to be written to disk again. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2945 | */ |
Dave Chinner | 2039a27 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 2946 | if (!xlog_is_shutdown(log)) { |
Christoph Hellwig | 12e6a0f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 2947 | ASSERT(iclog->ic_state == XLOG_STATE_SYNCING); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2948 | iclog->ic_state = XLOG_STATE_DONE_SYNC; |
Christoph Hellwig | 12e6a0f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 2949 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2950 | |
| 2951 | /* |
| 2952 | * Someone could be sleeping prior to writing out the next |
| 2953 | * iclog buffer, we wake them all, one will get to do the |
| 2954 | * I/O, the others get to wait for the result. |
| 2955 | */ |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 2956 | wake_up_all(&iclog->ic_write_wait); |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 2957 | spin_unlock(&log->l_icloglock); |
Dave Chinner | b843299 | 2020-03-25 18:18:24 -0700 | [diff] [blame] | 2958 | xlog_state_do_callback(log); |
Christoph Hellwig | 12e6a0f | 2020-03-20 08:49:20 -0700 | [diff] [blame] | 2959 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2960 | |
| 2961 | /* |
| 2962 | * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must |
David Chinner | 12017fa | 2008-08-13 16:34:31 +1000 | [diff] [blame] | 2963 | * sleep. We wait on the flush queue on the head iclog as that should be |
| 2964 | * the first iclog to complete flushing. Hence if all iclogs are syncing, |
| 2965 | * we will wait here and all new writes will sleep until a sync completes. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2966 | * |
| 2967 | * The in-core logs are used in a circular fashion. They are not used |
| 2968 | * out-of-order even when an iclog past the head is free. |
| 2969 | * |
| 2970 | * return: |
| 2971 | * * log_offset where xlog_write() can start writing into the in-core |
| 2972 | * log's data space. |
| 2973 | * * in-core log pointer to which xlog_write() should write. |
| 2974 | * * boolean indicating this is a continued write to an in-core log. |
| 2975 | * If this is the last write, then the in-core log's offset field |
| 2976 | * needs to be incremented, depending on the amount of data which |
| 2977 | * is copied. |
| 2978 | */ |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 2979 | STATIC int |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2980 | xlog_state_get_iclog_space( |
| 2981 | struct xlog *log, |
| 2982 | int len, |
| 2983 | struct xlog_in_core **iclogp, |
| 2984 | struct xlog_ticket *ticket, |
| 2985 | int *continued_write, |
| 2986 | int *logoffsetp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2987 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2988 | int log_offset; |
| 2989 | xlog_rec_header_t *head; |
| 2990 | xlog_in_core_t *iclog; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2991 | |
| 2992 | restart: |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 2993 | spin_lock(&log->l_icloglock); |
Dave Chinner | 2039a27 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 2994 | if (xlog_is_shutdown(log)) { |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 2995 | spin_unlock(&log->l_icloglock); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2996 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2997 | } |
| 2998 | |
| 2999 | iclog = log->l_iclog; |
Matthew Wilcox | d748c62 | 2008-05-19 16:34:27 +1000 | [diff] [blame] | 3000 | if (iclog->ic_state != XLOG_STATE_ACTIVE) { |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 3001 | XFS_STATS_INC(log->l_mp, xs_log_noiclogs); |
Matthew Wilcox | d748c62 | 2008-05-19 16:34:27 +1000 | [diff] [blame] | 3002 | |
| 3003 | /* Wait for log writes to have flushed */ |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 3004 | xlog_wait(&log->l_flush_wait, &log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3005 | goto restart; |
| 3006 | } |
Matthew Wilcox | d748c62 | 2008-05-19 16:34:27 +1000 | [diff] [blame] | 3007 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3008 | head = &iclog->ic_header; |
| 3009 | |
David Chinner | 155cc6b | 2008-03-06 13:44:14 +1100 | [diff] [blame] | 3010 | atomic_inc(&iclog->ic_refcnt); /* prevents sync */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3011 | log_offset = iclog->ic_offset; |
| 3012 | |
Dave Chinner | 956f6da | 2021-06-18 11:57:05 -0700 | [diff] [blame] | 3013 | trace_xlog_iclog_get_space(iclog, _RET_IP_); |
| 3014 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3015 | /* On the 1st write to an iclog, figure out lsn. This works |
| 3016 | * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are |
| 3017 | * committing to. If the offset is set, that's how many blocks |
| 3018 | * must be written. |
| 3019 | */ |
| 3020 | if (log_offset == 0) { |
| 3021 | ticket->t_curr_res -= log->l_iclog_hsize; |
Christoph Hellwig | 0adba53 | 2007-08-30 17:21:46 +1000 | [diff] [blame] | 3022 | xlog_tic_add_region(ticket, |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 3023 | log->l_iclog_hsize, |
| 3024 | XLOG_REG_TYPE_LRHEADER); |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 3025 | head->h_cycle = cpu_to_be32(log->l_curr_cycle); |
| 3026 | head->h_lsn = cpu_to_be64( |
Christoph Hellwig | 03bea6f | 2007-10-12 10:58:05 +1000 | [diff] [blame] | 3027 | xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3028 | ASSERT(log->l_curr_block >= 0); |
| 3029 | } |
| 3030 | |
| 3031 | /* If there is enough room to write everything, then do it. Otherwise, |
| 3032 | * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC |
| 3033 | * bit is on, so this will get flushed out. Don't update ic_offset |
| 3034 | * until you know exactly how many bytes get copied. Therefore, wait |
| 3035 | * until later to update ic_offset. |
| 3036 | * |
| 3037 | * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's |
| 3038 | * can fit into remaining data section. |
| 3039 | */ |
| 3040 | if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) { |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 3041 | int error = 0; |
| 3042 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3043 | xlog_state_switch_iclogs(log, iclog, iclog->ic_size); |
| 3044 | |
Dave Chinner | 49641f1 | 2008-07-11 17:43:55 +1000 | [diff] [blame] | 3045 | /* |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 3046 | * If we are the only one writing to this iclog, sync it to |
| 3047 | * disk. We need to do an atomic compare and decrement here to |
| 3048 | * avoid racing with concurrent atomic_dec_and_lock() calls in |
Dave Chinner | 49641f1 | 2008-07-11 17:43:55 +1000 | [diff] [blame] | 3049 | * xlog_state_release_iclog() when there is more than one |
| 3050 | * reference to the iclog. |
| 3051 | */ |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 3052 | if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) |
Dave Chinner | 0dc8f7f | 2021-07-27 16:23:48 -0700 | [diff] [blame] | 3053 | error = xlog_state_release_iclog(log, iclog, 0); |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 3054 | spin_unlock(&log->l_icloglock); |
| 3055 | if (error) |
| 3056 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3057 | goto restart; |
| 3058 | } |
| 3059 | |
| 3060 | /* Do we have enough room to write the full amount in the remainder |
| 3061 | * of this iclog? Or must we continue a write on the next iclog and |
| 3062 | * mark this iclog as completely taken? In the case where we switch |
| 3063 | * iclogs (to mark it taken), this particular iclog will release/sync |
| 3064 | * to disk in xlog_write(). |
| 3065 | */ |
| 3066 | if (len <= iclog->ic_size - iclog->ic_offset) { |
| 3067 | *continued_write = 0; |
| 3068 | iclog->ic_offset += len; |
| 3069 | } else { |
| 3070 | *continued_write = 1; |
| 3071 | xlog_state_switch_iclogs(log, iclog, iclog->ic_size); |
| 3072 | } |
| 3073 | *iclogp = iclog; |
| 3074 | |
| 3075 | ASSERT(iclog->ic_offset <= iclog->ic_size); |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3076 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3077 | |
| 3078 | *logoffsetp = log_offset; |
| 3079 | return 0; |
Dave Chinner | b843299 | 2020-03-25 18:18:24 -0700 | [diff] [blame] | 3080 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3081 | |
Christoph Hellwig | 8b41e3f | 2020-03-25 18:18:23 -0700 | [diff] [blame] | 3082 | /* |
Dave Chinner | b843299 | 2020-03-25 18:18:24 -0700 | [diff] [blame] | 3083 | * The first cnt-1 times a ticket goes through here we don't need to move the |
| 3084 | * grant write head because the permanent reservation has reserved cnt times the |
| 3085 | * unit amount. Release part of current permanent unit reservation and reset |
| 3086 | * current reservation to be one units worth. Also move grant reservation head |
| 3087 | * forward. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3088 | */ |
Christoph Hellwig | 8b41e3f | 2020-03-25 18:18:23 -0700 | [diff] [blame] | 3089 | void |
| 3090 | xfs_log_ticket_regrant( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3091 | struct xlog *log, |
| 3092 | struct xlog_ticket *ticket) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3093 | { |
Christoph Hellwig | 8b41e3f | 2020-03-25 18:18:23 -0700 | [diff] [blame] | 3094 | trace_xfs_log_ticket_regrant(log, ticket); |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 3095 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3096 | if (ticket->t_cnt > 0) |
| 3097 | ticket->t_cnt--; |
| 3098 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 3099 | xlog_grant_sub_space(log, &log->l_reserve_head.grant, |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 3100 | ticket->t_curr_res); |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 3101 | xlog_grant_sub_space(log, &log->l_write_head.grant, |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 3102 | ticket->t_curr_res); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3103 | ticket->t_curr_res = ticket->t_unit_res; |
Christoph Hellwig | 0adba53 | 2007-08-30 17:21:46 +1000 | [diff] [blame] | 3104 | xlog_tic_reset_res(ticket); |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 3105 | |
Christoph Hellwig | 8b41e3f | 2020-03-25 18:18:23 -0700 | [diff] [blame] | 3106 | trace_xfs_log_ticket_regrant_sub(log, ticket); |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 3107 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3108 | /* just return if we still have some of the pre-reserved space */ |
Christoph Hellwig | 8b41e3f | 2020-03-25 18:18:23 -0700 | [diff] [blame] | 3109 | if (!ticket->t_cnt) { |
| 3110 | xlog_grant_add_space(log, &log->l_reserve_head.grant, |
| 3111 | ticket->t_unit_res); |
| 3112 | trace_xfs_log_ticket_regrant_exit(log, ticket); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3113 | |
Christoph Hellwig | 8b41e3f | 2020-03-25 18:18:23 -0700 | [diff] [blame] | 3114 | ticket->t_curr_res = ticket->t_unit_res; |
| 3115 | xlog_tic_reset_res(ticket); |
| 3116 | } |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 3117 | |
Christoph Hellwig | 8b41e3f | 2020-03-25 18:18:23 -0700 | [diff] [blame] | 3118 | xfs_log_ticket_put(ticket); |
| 3119 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3120 | |
| 3121 | /* |
| 3122 | * Give back the space left from a reservation. |
| 3123 | * |
| 3124 | * All the information we need to make a correct determination of space left |
| 3125 | * is present. For non-permanent reservations, things are quite easy. The |
| 3126 | * count should have been decremented to zero. We only need to deal with the |
| 3127 | * space remaining in the current reservation part of the ticket. If the |
| 3128 | * ticket contains a permanent reservation, there may be left over space which |
| 3129 | * needs to be released. A count of N means that N-1 refills of the current |
| 3130 | * reservation can be done before we need to ask for more space. The first |
| 3131 | * one goes to fill up the first current reservation. Once we run out of |
| 3132 | * space, the count will stay at zero and the only space remaining will be |
| 3133 | * in the current reservation field. |
| 3134 | */ |
Christoph Hellwig | 8b41e3f | 2020-03-25 18:18:23 -0700 | [diff] [blame] | 3135 | void |
| 3136 | xfs_log_ticket_ungrant( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3137 | struct xlog *log, |
| 3138 | struct xlog_ticket *ticket) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3139 | { |
Christoph Hellwig | 8b41e3f | 2020-03-25 18:18:23 -0700 | [diff] [blame] | 3140 | int bytes; |
| 3141 | |
| 3142 | trace_xfs_log_ticket_ungrant(log, ticket); |
Dave Chinner | 663e496 | 2010-12-21 12:06:05 +1100 | [diff] [blame] | 3143 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3144 | if (ticket->t_cnt > 0) |
| 3145 | ticket->t_cnt--; |
| 3146 | |
Christoph Hellwig | 8b41e3f | 2020-03-25 18:18:23 -0700 | [diff] [blame] | 3147 | trace_xfs_log_ticket_ungrant_sub(log, ticket); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3148 | |
Dave Chinner | 663e496 | 2010-12-21 12:06:05 +1100 | [diff] [blame] | 3149 | /* |
| 3150 | * If this is a permanent reservation ticket, we may be able to free |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3151 | * up more space based on the remaining count. |
| 3152 | */ |
Dave Chinner | 663e496 | 2010-12-21 12:06:05 +1100 | [diff] [blame] | 3153 | bytes = ticket->t_curr_res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3154 | if (ticket->t_cnt > 0) { |
| 3155 | ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV); |
Dave Chinner | 663e496 | 2010-12-21 12:06:05 +1100 | [diff] [blame] | 3156 | bytes += ticket->t_unit_res*ticket->t_cnt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3157 | } |
| 3158 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 3159 | xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); |
| 3160 | xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); |
Dave Chinner | 663e496 | 2010-12-21 12:06:05 +1100 | [diff] [blame] | 3161 | |
Christoph Hellwig | 8b41e3f | 2020-03-25 18:18:23 -0700 | [diff] [blame] | 3162 | trace_xfs_log_ticket_ungrant_exit(log, ticket); |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 3163 | |
Christoph Hellwig | cfb7cdc | 2012-02-20 02:31:23 +0000 | [diff] [blame] | 3164 | xfs_log_space_wake(log->l_mp); |
Christoph Hellwig | 8b41e3f | 2020-03-25 18:18:23 -0700 | [diff] [blame] | 3165 | xfs_log_ticket_put(ticket); |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 3166 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3167 | |
| 3168 | /* |
Dave Chinner | b843299 | 2020-03-25 18:18:24 -0700 | [diff] [blame] | 3169 | * This routine will mark the current iclog in the ring as WANT_SYNC and move |
| 3170 | * the current iclog pointer to the next iclog in the ring. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3171 | */ |
Dave Chinner | 0020a19 | 2021-08-10 18:00:44 -0700 | [diff] [blame] | 3172 | void |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3173 | xlog_state_switch_iclogs( |
| 3174 | struct xlog *log, |
| 3175 | struct xlog_in_core *iclog, |
| 3176 | int eventual_size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3177 | { |
| 3178 | ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); |
Christoph Hellwig | 6936399 | 2020-03-20 08:49:21 -0700 | [diff] [blame] | 3179 | assert_spin_locked(&log->l_icloglock); |
Dave Chinner | 956f6da | 2021-06-18 11:57:05 -0700 | [diff] [blame] | 3180 | trace_xlog_iclog_switch(iclog, _RET_IP_); |
Christoph Hellwig | 6936399 | 2020-03-20 08:49:21 -0700 | [diff] [blame] | 3181 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3182 | if (!eventual_size) |
| 3183 | eventual_size = iclog->ic_offset; |
| 3184 | iclog->ic_state = XLOG_STATE_WANT_SYNC; |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 3185 | iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3186 | log->l_prev_block = log->l_curr_block; |
| 3187 | log->l_prev_cycle = log->l_curr_cycle; |
| 3188 | |
| 3189 | /* roll log?: ic_offset changed later */ |
| 3190 | log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); |
| 3191 | |
| 3192 | /* Round up to next log-sunit */ |
Dave Chinner | a6a65fe | 2021-06-18 08:21:48 -0700 | [diff] [blame] | 3193 | if (log->l_iclog_roundoff > BBSIZE) { |
Geert Uytterhoeven | 18842e0 | 2021-06-18 08:24:04 -0700 | [diff] [blame] | 3194 | uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3195 | log->l_curr_block = roundup(log->l_curr_block, sunit_bb); |
| 3196 | } |
| 3197 | |
| 3198 | if (log->l_curr_block >= log->l_logBBsize) { |
Brian Foster | a45086e | 2015-10-12 15:59:25 +1100 | [diff] [blame] | 3199 | /* |
| 3200 | * Rewind the current block before the cycle is bumped to make |
| 3201 | * sure that the combined LSN never transiently moves forward |
| 3202 | * when the log wraps to the next cycle. This is to support the |
| 3203 | * unlocked sample of these fields from xlog_valid_lsn(). Most |
| 3204 | * other cases should acquire l_icloglock. |
| 3205 | */ |
| 3206 | log->l_curr_block -= log->l_logBBsize; |
| 3207 | ASSERT(log->l_curr_block >= 0); |
| 3208 | smp_wmb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3209 | log->l_curr_cycle++; |
| 3210 | if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) |
| 3211 | log->l_curr_cycle++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3212 | } |
| 3213 | ASSERT(iclog == log->l_iclog); |
| 3214 | log->l_iclog = iclog->ic_next; |
Dave Chinner | b843299 | 2020-03-25 18:18:24 -0700 | [diff] [blame] | 3215 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3216 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3217 | /* |
Dave Chinner | 8191d82 | 2021-07-27 16:23:49 -0700 | [diff] [blame] | 3218 | * Force the iclog to disk and check if the iclog has been completed before |
| 3219 | * xlog_force_iclog() returns. This can happen on synchronous (e.g. |
| 3220 | * pmem) or fast async storage because we drop the icloglock to issue the IO. |
| 3221 | * If completion has already occurred, tell the caller so that it can avoid an |
| 3222 | * unnecessary wait on the iclog. |
| 3223 | */ |
| 3224 | static int |
| 3225 | xlog_force_and_check_iclog( |
| 3226 | struct xlog_in_core *iclog, |
| 3227 | bool *completed) |
| 3228 | { |
| 3229 | xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); |
| 3230 | int error; |
| 3231 | |
| 3232 | *completed = false; |
| 3233 | error = xlog_force_iclog(iclog); |
| 3234 | if (error) |
| 3235 | return error; |
| 3236 | |
| 3237 | /* |
| 3238 | * If the iclog has already been completed and reused the header LSN |
| 3239 | * will have been rewritten by completion |
| 3240 | */ |
| 3241 | if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) |
| 3242 | *completed = true; |
| 3243 | return 0; |
| 3244 | } |
| 3245 | |
| 3246 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3247 | * Write out all data in the in-core log as of this exact moment in time. |
| 3248 | * |
| 3249 | * Data may be written to the in-core log during this call. However, |
| 3250 | * we don't guarantee this data will be written out. A change from past |
| 3251 | * implementation means this routine will *not* write out zero length LRs. |
| 3252 | * |
| 3253 | * Basically, we try and perform an intelligent scan of the in-core logs. |
| 3254 | * If we determine there is no flushable data, we just return. There is no |
| 3255 | * flushable data if: |
| 3256 | * |
| 3257 | * 1. the current iclog is active and has no data; the previous iclog |
| 3258 | * is in the active or dirty state. |
| 3259 | * 2. the current iclog is drity, and the previous iclog is in the |
| 3260 | * active or dirty state. |
| 3261 | * |
David Chinner | 12017fa | 2008-08-13 16:34:31 +1000 | [diff] [blame] | 3262 | * We may sleep if: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3263 | * |
| 3264 | * 1. the current iclog is not in the active nor dirty state. |
| 3265 | * 2. the current iclog dirty, and the previous iclog is not in the |
| 3266 | * active nor dirty state. |
| 3267 | * 3. the current iclog is active, and there is another thread writing |
| 3268 | * to this particular iclog. |
| 3269 | * 4. a) the current iclog is active and has no other writers |
| 3270 | * b) when we return from flushing out this iclog, it is still |
| 3271 | * not in the active nor dirty state. |
| 3272 | */ |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3273 | int |
Christoph Hellwig | 60e5bb7 | 2018-03-13 23:15:28 -0700 | [diff] [blame] | 3274 | xfs_log_force( |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3275 | struct xfs_mount *mp, |
Christoph Hellwig | 60e5bb7 | 2018-03-13 23:15:28 -0700 | [diff] [blame] | 3276 | uint flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3277 | { |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 3278 | struct xlog *log = mp->m_log; |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3279 | struct xlog_in_core *iclog; |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3280 | |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 3281 | XFS_STATS_INC(mp, xs_log_force); |
Christoph Hellwig | 60e5bb7 | 2018-03-13 23:15:28 -0700 | [diff] [blame] | 3282 | trace_xfs_log_force(mp, 0, _RET_IP_); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3283 | |
Christoph Hellwig | 93b8a58 | 2011-12-06 21:58:07 +0000 | [diff] [blame] | 3284 | xlog_cil_force(log); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 3285 | |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3286 | spin_lock(&log->l_icloglock); |
Dave Chinner | 5112e206 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 3287 | if (xlog_is_shutdown(log)) |
Christoph Hellwig | e6b96570 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3288 | goto out_error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3289 | |
Dave Chinner | 5112e206 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 3290 | iclog = log->l_iclog; |
Dave Chinner | 956f6da | 2021-06-18 11:57:05 -0700 | [diff] [blame] | 3291 | trace_xlog_iclog_force(iclog, _RET_IP_); |
| 3292 | |
Christoph Hellwig | e6b96570 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3293 | if (iclog->ic_state == XLOG_STATE_DIRTY || |
| 3294 | (iclog->ic_state == XLOG_STATE_ACTIVE && |
| 3295 | atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3296 | /* |
Christoph Hellwig | e6b96570 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3297 | * If the head is dirty or (active and empty), then we need to |
| 3298 | * look at the previous iclog. |
| 3299 | * |
| 3300 | * If the previous iclog is active or dirty we are done. There |
| 3301 | * is nothing to sync out. Otherwise, we attach ourselves to the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3302 | * previous iclog and go to sleep. |
| 3303 | */ |
Christoph Hellwig | e6b96570 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3304 | iclog = iclog->ic_prev; |
Christoph Hellwig | e6b96570 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3305 | } else if (iclog->ic_state == XLOG_STATE_ACTIVE) { |
| 3306 | if (atomic_read(&iclog->ic_refcnt) == 0) { |
Dave Chinner | 45eddb4 | 2021-07-27 16:23:48 -0700 | [diff] [blame] | 3307 | /* We have exclusive access to this iclog. */ |
Dave Chinner | 8191d82 | 2021-07-27 16:23:49 -0700 | [diff] [blame] | 3308 | bool completed; |
| 3309 | |
| 3310 | if (xlog_force_and_check_iclog(iclog, &completed)) |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 3311 | goto out_error; |
Christoph Hellwig | e6b96570 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3312 | |
Dave Chinner | 8191d82 | 2021-07-27 16:23:49 -0700 | [diff] [blame] | 3313 | if (completed) |
Christoph Hellwig | e6b96570 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3314 | goto out_unlock; |
| 3315 | } else { |
| 3316 | /* |
Dave Chinner | 2bf1ec0 | 2021-07-27 16:23:49 -0700 | [diff] [blame] | 3317 | * Someone else is still writing to this iclog, so we |
| 3318 | * need to ensure that when they release the iclog it |
| 3319 | * gets synced immediately as we may be waiting on it. |
Christoph Hellwig | e6b96570 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3320 | */ |
| 3321 | xlog_state_switch_iclogs(log, iclog, 0); |
| 3322 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3323 | } |
Christoph Hellwig | e6b96570 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3324 | |
Dave Chinner | 2bf1ec0 | 2021-07-27 16:23:49 -0700 | [diff] [blame] | 3325 | /* |
| 3326 | * The iclog we are about to wait on may contain the checkpoint pushed |
| 3327 | * by the above xlog_cil_force() call, but it may not have been pushed |
| 3328 | * to disk yet. Like the ACTIVE case above, we need to make sure caches |
| 3329 | * are flushed when this iclog is written. |
| 3330 | */ |
| 3331 | if (iclog->ic_state == XLOG_STATE_WANT_SYNC) |
| 3332 | iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; |
| 3333 | |
Christoph Hellwig | 81e5b50 | 2020-03-20 08:49:18 -0700 | [diff] [blame] | 3334 | if (flags & XFS_LOG_SYNC) |
| 3335 | return xlog_wait_on_iclog(iclog); |
Christoph Hellwig | e6b96570 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3336 | out_unlock: |
| 3337 | spin_unlock(&log->l_icloglock); |
| 3338 | return 0; |
| 3339 | out_error: |
| 3340 | spin_unlock(&log->l_icloglock); |
| 3341 | return -EIO; |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3342 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3343 | |
Dave Chinner | 0020a19 | 2021-08-10 18:00:44 -0700 | [diff] [blame] | 3344 | /* |
| 3345 | * Force the log to a specific LSN. |
| 3346 | * |
| 3347 | * If an iclog with that lsn can be found: |
| 3348 | * If it is in the DIRTY state, just return. |
| 3349 | * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC |
| 3350 | * state and go to sleep or return. |
| 3351 | * If it is in any other state, go to sleep or return. |
| 3352 | * |
| 3353 | * Synchronous forces are implemented with a wait queue. All callers trying |
| 3354 | * to force a given lsn to disk must wait on the queue attached to the |
| 3355 | * specific in-core log. When given in-core log finally completes its write |
| 3356 | * to disk, that thread will wake up all threads waiting on the queue. |
| 3357 | */ |
Christoph Hellwig | 3e4da46 | 2018-03-13 23:15:30 -0700 | [diff] [blame] | 3358 | static int |
Dave Chinner | 5f9b4b0 | 2021-06-18 08:21:52 -0700 | [diff] [blame] | 3359 | xlog_force_lsn( |
| 3360 | struct xlog *log, |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3361 | xfs_lsn_t lsn, |
| 3362 | uint flags, |
Christoph Hellwig | 3e4da46 | 2018-03-13 23:15:30 -0700 | [diff] [blame] | 3363 | int *log_flushed, |
| 3364 | bool already_slept) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3365 | { |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3366 | struct xlog_in_core *iclog; |
Dave Chinner | 8191d82 | 2021-07-27 16:23:49 -0700 | [diff] [blame] | 3367 | bool completed; |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3368 | |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3369 | spin_lock(&log->l_icloglock); |
Dave Chinner | 5112e206 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 3370 | if (xlog_is_shutdown(log)) |
Christoph Hellwig | 9380629 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3371 | goto out_error; |
| 3372 | |
Dave Chinner | 5112e206 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 3373 | iclog = log->l_iclog; |
Christoph Hellwig | 9380629 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3374 | while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { |
Dave Chinner | 956f6da | 2021-06-18 11:57:05 -0700 | [diff] [blame] | 3375 | trace_xlog_iclog_force_lsn(iclog, _RET_IP_); |
Christoph Hellwig | 9380629 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3376 | iclog = iclog->ic_next; |
| 3377 | if (iclog == log->l_iclog) |
| 3378 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3379 | } |
| 3380 | |
Dave Chinner | 2bf1ec0 | 2021-07-27 16:23:49 -0700 | [diff] [blame] | 3381 | switch (iclog->ic_state) { |
| 3382 | case XLOG_STATE_ACTIVE: |
Christoph Hellwig | 9380629 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3383 | /* |
| 3384 | * We sleep here if we haven't already slept (e.g. this is the |
| 3385 | * first time we've looked at the correct iclog buf) and the |
| 3386 | * buffer before us is going to be sync'ed. The reason for this |
| 3387 | * is that if we are doing sync transactions here, by waiting |
| 3388 | * for the previous I/O to complete, we can allow a few more |
| 3389 | * transactions into this iclog before we close it down. |
| 3390 | * |
| 3391 | * Otherwise, we mark the buffer WANT_SYNC, and bump up the |
| 3392 | * refcnt so we can release the log (which drops the ref count). |
| 3393 | * The state switch keeps new transaction commits from using |
| 3394 | * this buffer. When the current commits finish writing into |
| 3395 | * the buffer, the refcount will drop to zero and the buffer |
| 3396 | * will go out then. |
| 3397 | */ |
| 3398 | if (!already_slept && |
Christoph Hellwig | 1858bb0 | 2019-10-14 10:36:43 -0700 | [diff] [blame] | 3399 | (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC || |
| 3400 | iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) { |
Christoph Hellwig | 9380629 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3401 | xlog_wait(&iclog->ic_prev->ic_write_wait, |
| 3402 | &log->l_icloglock); |
Christoph Hellwig | 3e4da46 | 2018-03-13 23:15:30 -0700 | [diff] [blame] | 3403 | return -EAGAIN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3404 | } |
Dave Chinner | 8191d82 | 2021-07-27 16:23:49 -0700 | [diff] [blame] | 3405 | if (xlog_force_and_check_iclog(iclog, &completed)) |
Christoph Hellwig | df732b2 | 2019-10-14 10:36:41 -0700 | [diff] [blame] | 3406 | goto out_error; |
Christoph Hellwig | 9380629 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3407 | if (log_flushed) |
| 3408 | *log_flushed = 1; |
Dave Chinner | 8191d82 | 2021-07-27 16:23:49 -0700 | [diff] [blame] | 3409 | if (completed) |
| 3410 | goto out_unlock; |
Dave Chinner | 2bf1ec0 | 2021-07-27 16:23:49 -0700 | [diff] [blame] | 3411 | break; |
| 3412 | case XLOG_STATE_WANT_SYNC: |
| 3413 | /* |
| 3414 | * This iclog may contain the checkpoint pushed by the |
| 3415 | * xlog_cil_force_seq() call, but there are other writers still |
| 3416 | * accessing it so it hasn't been pushed to disk yet. Like the |
| 3417 | * ACTIVE case above, we need to make sure caches are flushed |
| 3418 | * when this iclog is written. |
| 3419 | */ |
| 3420 | iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; |
| 3421 | break; |
| 3422 | default: |
| 3423 | /* |
| 3424 | * The entire checkpoint was written by the CIL force and is on |
| 3425 | * its way to disk already. It will be stable when it |
| 3426 | * completes, so we don't need to manipulate caches here at all. |
| 3427 | * We just need to wait for completion if necessary. |
| 3428 | */ |
| 3429 | break; |
Christoph Hellwig | 9380629 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3430 | } |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3431 | |
Christoph Hellwig | 81e5b50 | 2020-03-20 08:49:18 -0700 | [diff] [blame] | 3432 | if (flags & XFS_LOG_SYNC) |
| 3433 | return xlog_wait_on_iclog(iclog); |
Christoph Hellwig | 9380629 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3434 | out_unlock: |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3435 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3436 | return 0; |
Christoph Hellwig | 9380629 | 2018-03-13 23:15:29 -0700 | [diff] [blame] | 3437 | out_error: |
| 3438 | spin_unlock(&log->l_icloglock); |
| 3439 | return -EIO; |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3440 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3441 | |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3442 | /* |
Dave Chinner | 0020a19 | 2021-08-10 18:00:44 -0700 | [diff] [blame] | 3443 | * Force the log to a specific checkpoint sequence. |
Christoph Hellwig | 3e4da46 | 2018-03-13 23:15:30 -0700 | [diff] [blame] | 3444 | * |
Dave Chinner | 0020a19 | 2021-08-10 18:00:44 -0700 | [diff] [blame] | 3445 | * First force the CIL so that all the required changes have been flushed to the |
| 3446 | * iclogs. If the CIL force completed it will return a commit LSN that indicates |
| 3447 | * the iclog that needs to be flushed to stable storage. If the caller needs |
| 3448 | * a synchronous log force, we will wait on the iclog with the LSN returned by |
| 3449 | * xlog_cil_force_seq() to be completed. |
Christoph Hellwig | 3e4da46 | 2018-03-13 23:15:30 -0700 | [diff] [blame] | 3450 | */ |
| 3451 | int |
Dave Chinner | 5f9b4b0 | 2021-06-18 08:21:52 -0700 | [diff] [blame] | 3452 | xfs_log_force_seq( |
Christoph Hellwig | 3e4da46 | 2018-03-13 23:15:30 -0700 | [diff] [blame] | 3453 | struct xfs_mount *mp, |
Dave Chinner | 5f9b4b0 | 2021-06-18 08:21:52 -0700 | [diff] [blame] | 3454 | xfs_csn_t seq, |
Christoph Hellwig | 3e4da46 | 2018-03-13 23:15:30 -0700 | [diff] [blame] | 3455 | uint flags, |
| 3456 | int *log_flushed) |
| 3457 | { |
Dave Chinner | 5f9b4b0 | 2021-06-18 08:21:52 -0700 | [diff] [blame] | 3458 | struct xlog *log = mp->m_log; |
| 3459 | xfs_lsn_t lsn; |
Christoph Hellwig | 3e4da46 | 2018-03-13 23:15:30 -0700 | [diff] [blame] | 3460 | int ret; |
Dave Chinner | 5f9b4b0 | 2021-06-18 08:21:52 -0700 | [diff] [blame] | 3461 | ASSERT(seq != 0); |
Christoph Hellwig | 3e4da46 | 2018-03-13 23:15:30 -0700 | [diff] [blame] | 3462 | |
| 3463 | XFS_STATS_INC(mp, xs_log_force); |
Dave Chinner | 5f9b4b0 | 2021-06-18 08:21:52 -0700 | [diff] [blame] | 3464 | trace_xfs_log_force(mp, seq, _RET_IP_); |
Christoph Hellwig | 3e4da46 | 2018-03-13 23:15:30 -0700 | [diff] [blame] | 3465 | |
Dave Chinner | 5f9b4b0 | 2021-06-18 08:21:52 -0700 | [diff] [blame] | 3466 | lsn = xlog_cil_force_seq(log, seq); |
Christoph Hellwig | 3e4da46 | 2018-03-13 23:15:30 -0700 | [diff] [blame] | 3467 | if (lsn == NULLCOMMITLSN) |
| 3468 | return 0; |
| 3469 | |
Dave Chinner | 5f9b4b0 | 2021-06-18 08:21:52 -0700 | [diff] [blame] | 3470 | ret = xlog_force_lsn(log, lsn, flags, log_flushed, false); |
| 3471 | if (ret == -EAGAIN) { |
| 3472 | XFS_STATS_INC(mp, xs_log_force_sleep); |
| 3473 | ret = xlog_force_lsn(log, lsn, flags, log_flushed, true); |
| 3474 | } |
Christoph Hellwig | 3e4da46 | 2018-03-13 23:15:30 -0700 | [diff] [blame] | 3475 | return ret; |
| 3476 | } |
| 3477 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3478 | /* |
Malcolm Parsons | 9da096f | 2009-03-29 09:55:42 +0200 | [diff] [blame] | 3479 | * Free a used ticket when its refcount falls to zero. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3480 | */ |
Dave Chinner | cc09c0d | 2008-11-17 17:37:10 +1100 | [diff] [blame] | 3481 | void |
| 3482 | xfs_log_ticket_put( |
| 3483 | xlog_ticket_t *ticket) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3484 | { |
Dave Chinner | cc09c0d | 2008-11-17 17:37:10 +1100 | [diff] [blame] | 3485 | ASSERT(atomic_read(&ticket->t_ref) > 0); |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 3486 | if (atomic_dec_and_test(&ticket->t_ref)) |
Carlos Maiolino | 377bcd5 | 2019-11-14 12:43:04 -0800 | [diff] [blame] | 3487 | kmem_cache_free(xfs_log_ticket_zone, ticket); |
Dave Chinner | cc09c0d | 2008-11-17 17:37:10 +1100 | [diff] [blame] | 3488 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3489 | |
Dave Chinner | cc09c0d | 2008-11-17 17:37:10 +1100 | [diff] [blame] | 3490 | xlog_ticket_t * |
| 3491 | xfs_log_ticket_get( |
| 3492 | xlog_ticket_t *ticket) |
| 3493 | { |
| 3494 | ASSERT(atomic_read(&ticket->t_ref) > 0); |
| 3495 | atomic_inc(&ticket->t_ref); |
| 3496 | return ticket; |
| 3497 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3498 | |
| 3499 | /* |
Jie Liu | e773fc9 | 2013-08-12 20:50:01 +1000 | [diff] [blame] | 3500 | * Figure out the total log space unit (in bytes) that would be |
| 3501 | * required for a log ticket. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3502 | */ |
Dave Chinner | a6a65fe | 2021-06-18 08:21:48 -0700 | [diff] [blame] | 3503 | static int |
| 3504 | xlog_calc_unit_res( |
| 3505 | struct xlog *log, |
Jie Liu | e773fc9 | 2013-08-12 20:50:01 +1000 | [diff] [blame] | 3506 | int unit_bytes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3507 | { |
Jie Liu | e773fc9 | 2013-08-12 20:50:01 +1000 | [diff] [blame] | 3508 | int iclog_space; |
| 3509 | uint num_headers; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3510 | |
| 3511 | /* |
| 3512 | * Permanent reservations have up to 'cnt'-1 active log operations |
| 3513 | * in the log. A unit in this case is the amount of space for one |
| 3514 | * of these log operations. Normal reservations have a cnt of 1 |
| 3515 | * and their unit amount is the total amount of space required. |
| 3516 | * |
| 3517 | * The following lines of code account for non-transaction data |
Tim Shimmin | 32fb9b5 | 2005-09-02 16:41:43 +1000 | [diff] [blame] | 3518 | * which occupy space in the on-disk log. |
| 3519 | * |
| 3520 | * Normal form of a transaction is: |
| 3521 | * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph> |
| 3522 | * and then there are LR hdrs, split-recs and roundoff at end of syncs. |
| 3523 | * |
| 3524 | * We need to account for all the leadup data and trailer data |
| 3525 | * around the transaction data. |
| 3526 | * And then we need to account for the worst case in terms of using |
| 3527 | * more space. |
| 3528 | * The worst case will happen if: |
| 3529 | * - the placement of the transaction happens to be such that the |
| 3530 | * roundoff is at its maximum |
| 3531 | * - the transaction data is synced before the commit record is synced |
| 3532 | * i.e. <transaction-data><roundoff> | <commit-rec><roundoff> |
| 3533 | * Therefore the commit record is in its own Log Record. |
| 3534 | * This can happen as the commit record is called with its |
| 3535 | * own region to xlog_write(). |
| 3536 | * This then means that in the worst case, roundoff can happen for |
| 3537 | * the commit-rec as well. |
| 3538 | * The commit-rec is smaller than padding in this scenario and so it is |
| 3539 | * not added separately. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3540 | */ |
| 3541 | |
Tim Shimmin | 32fb9b5 | 2005-09-02 16:41:43 +1000 | [diff] [blame] | 3542 | /* for trans header */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3543 | unit_bytes += sizeof(xlog_op_header_t); |
Tim Shimmin | 32fb9b5 | 2005-09-02 16:41:43 +1000 | [diff] [blame] | 3544 | unit_bytes += sizeof(xfs_trans_header_t); |
| 3545 | |
| 3546 | /* for start-rec */ |
| 3547 | unit_bytes += sizeof(xlog_op_header_t); |
| 3548 | |
Dave Chinner | 9b9fc2b7 | 2010-03-23 11:21:11 +1100 | [diff] [blame] | 3549 | /* |
| 3550 | * for LR headers - the space for data in an iclog is the size minus |
| 3551 | * the space used for the headers. If we use the iclog size, then we |
| 3552 | * undercalculate the number of headers required. |
| 3553 | * |
| 3554 | * Furthermore - the addition of op headers for split-recs might |
| 3555 | * increase the space required enough to require more log and op |
| 3556 | * headers, so take that into account too. |
| 3557 | * |
| 3558 | * IMPORTANT: This reservation makes the assumption that if this |
| 3559 | * transaction is the first in an iclog and hence has the LR headers |
| 3560 | * accounted to it, then the remaining space in the iclog is |
| 3561 | * exclusively for this transaction. i.e. if the transaction is larger |
| 3562 | * than the iclog, it will be the only thing in that iclog. |
| 3563 | * Fundamentally, this means we must pass the entire log vector to |
| 3564 | * xlog_write to guarantee this. |
| 3565 | */ |
| 3566 | iclog_space = log->l_iclog_size - log->l_iclog_hsize; |
| 3567 | num_headers = howmany(unit_bytes, iclog_space); |
| 3568 | |
| 3569 | /* for split-recs - ophdrs added when data split over LRs */ |
| 3570 | unit_bytes += sizeof(xlog_op_header_t) * num_headers; |
| 3571 | |
| 3572 | /* add extra header reservations if we overrun */ |
| 3573 | while (!num_headers || |
| 3574 | howmany(unit_bytes, iclog_space) > num_headers) { |
| 3575 | unit_bytes += sizeof(xlog_op_header_t); |
| 3576 | num_headers++; |
| 3577 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3578 | unit_bytes += log->l_iclog_hsize * num_headers; |
| 3579 | |
Tim Shimmin | 32fb9b5 | 2005-09-02 16:41:43 +1000 | [diff] [blame] | 3580 | /* for commit-rec LR header - note: padding will subsume the ophdr */ |
| 3581 | unit_bytes += log->l_iclog_hsize; |
| 3582 | |
Dave Chinner | a6a65fe | 2021-06-18 08:21:48 -0700 | [diff] [blame] | 3583 | /* roundoff padding for transaction data and one for commit record */ |
| 3584 | unit_bytes += 2 * log->l_iclog_roundoff; |
Tim Shimmin | 32fb9b5 | 2005-09-02 16:41:43 +1000 | [diff] [blame] | 3585 | |
Jie Liu | e773fc9 | 2013-08-12 20:50:01 +1000 | [diff] [blame] | 3586 | return unit_bytes; |
| 3587 | } |
| 3588 | |
Dave Chinner | a6a65fe | 2021-06-18 08:21:48 -0700 | [diff] [blame] | 3589 | int |
| 3590 | xfs_log_calc_unit_res( |
| 3591 | struct xfs_mount *mp, |
| 3592 | int unit_bytes) |
| 3593 | { |
| 3594 | return xlog_calc_unit_res(mp->m_log, unit_bytes); |
| 3595 | } |
| 3596 | |
Jie Liu | e773fc9 | 2013-08-12 20:50:01 +1000 | [diff] [blame] | 3597 | /* |
| 3598 | * Allocate and initialise a new log ticket. |
| 3599 | */ |
| 3600 | struct xlog_ticket * |
| 3601 | xlog_ticket_alloc( |
| 3602 | struct xlog *log, |
| 3603 | int unit_bytes, |
| 3604 | int cnt, |
| 3605 | char client, |
Carlos Maiolino | ca4f258 | 2020-07-22 09:23:17 -0700 | [diff] [blame] | 3606 | bool permanent) |
Jie Liu | e773fc9 | 2013-08-12 20:50:01 +1000 | [diff] [blame] | 3607 | { |
| 3608 | struct xlog_ticket *tic; |
| 3609 | int unit_res; |
| 3610 | |
Carlos Maiolino | ca4f258 | 2020-07-22 09:23:17 -0700 | [diff] [blame] | 3611 | tic = kmem_cache_zalloc(xfs_log_ticket_zone, GFP_NOFS | __GFP_NOFAIL); |
Jie Liu | e773fc9 | 2013-08-12 20:50:01 +1000 | [diff] [blame] | 3612 | |
Dave Chinner | a6a65fe | 2021-06-18 08:21:48 -0700 | [diff] [blame] | 3613 | unit_res = xlog_calc_unit_res(log, unit_bytes); |
Jie Liu | e773fc9 | 2013-08-12 20:50:01 +1000 | [diff] [blame] | 3614 | |
Dave Chinner | cc09c0d | 2008-11-17 17:37:10 +1100 | [diff] [blame] | 3615 | atomic_set(&tic->t_ref, 1); |
Christoph Hellwig | 14a7235f | 2012-02-20 02:31:24 +0000 | [diff] [blame] | 3616 | tic->t_task = current; |
Dave Chinner | 1054794 | 2010-12-21 12:02:25 +1100 | [diff] [blame] | 3617 | INIT_LIST_HEAD(&tic->t_queue); |
Jie Liu | e773fc9 | 2013-08-12 20:50:01 +1000 | [diff] [blame] | 3618 | tic->t_unit_res = unit_res; |
| 3619 | tic->t_curr_res = unit_res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3620 | tic->t_cnt = cnt; |
| 3621 | tic->t_ocnt = cnt; |
Akinobu Mita | ecb3403 | 2013-03-04 21:58:20 +0900 | [diff] [blame] | 3622 | tic->t_tid = prandom_u32(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3623 | tic->t_clientid = client; |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 3624 | if (permanent) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3625 | tic->t_flags |= XLOG_TIC_PERM_RESERV; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3626 | |
Christoph Hellwig | 0adba53 | 2007-08-30 17:21:46 +1000 | [diff] [blame] | 3627 | xlog_tic_reset_res(tic); |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 3628 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3629 | return tic; |
Dave Chinner | cc09c0d | 2008-11-17 17:37:10 +1100 | [diff] [blame] | 3630 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3631 | |
Nathan Scott | cfcbbbd | 2005-11-02 15:12:04 +1100 | [diff] [blame] | 3632 | #if defined(DEBUG) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3633 | /* |
| 3634 | * Make sure that the destination ptr is within the valid data region of |
| 3635 | * one of the iclogs. This uses backup pointers stored in a different |
| 3636 | * part of the log in case we trash the log structure. |
| 3637 | */ |
Christoph Hellwig | 181fdfe | 2017-11-06 11:54:02 -0800 | [diff] [blame] | 3638 | STATIC void |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 3639 | xlog_verify_dest_ptr( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 3640 | struct xlog *log, |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 3641 | void *ptr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3642 | { |
| 3643 | int i; |
| 3644 | int good_ptr = 0; |
| 3645 | |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 3646 | for (i = 0; i < log->l_iclog_bufs; i++) { |
| 3647 | if (ptr >= log->l_iclog_bak[i] && |
| 3648 | ptr <= log->l_iclog_bak[i] + log->l_iclog_size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3649 | good_ptr++; |
| 3650 | } |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 3651 | |
| 3652 | if (!good_ptr) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3653 | xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 3654 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3655 | |
Dave Chinner | da8a1a4 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 3656 | /* |
| 3657 | * Check to make sure the grant write head didn't just over lap the tail. If |
| 3658 | * the cycles are the same, we can't be overlapping. Otherwise, make sure that |
| 3659 | * the cycles differ by exactly one and check the byte count. |
| 3660 | * |
| 3661 | * This check is run unlocked, so can give false positives. Rather than assert |
| 3662 | * on failures, use a warn-once flag and a panic tag to allow the admin to |
| 3663 | * determine if they want to panic the machine when such an error occurs. For |
| 3664 | * debug kernels this will have the same effect as using an assert but, unlinke |
| 3665 | * an assert, it can be turned off at runtime. |
| 3666 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3667 | STATIC void |
Dave Chinner | 3f336c6 | 2010-12-21 12:02:52 +1100 | [diff] [blame] | 3668 | xlog_verify_grant_tail( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 3669 | struct xlog *log) |
Dave Chinner | 3f336c6 | 2010-12-21 12:02:52 +1100 | [diff] [blame] | 3670 | { |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 3671 | int tail_cycle, tail_blocks; |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 3672 | int cycle, space; |
Dave Chinner | 3f336c6 | 2010-12-21 12:02:52 +1100 | [diff] [blame] | 3673 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 3674 | xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 3675 | xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); |
| 3676 | if (tail_cycle != cycle) { |
Dave Chinner | da8a1a4 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 3677 | if (cycle - 1 != tail_cycle && |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 3678 | !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) { |
Dave Chinner | da8a1a4 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 3679 | xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, |
| 3680 | "%s: cycle - 1 != tail_cycle", __func__); |
Dave Chinner | da8a1a4 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 3681 | } |
| 3682 | |
| 3683 | if (space > BBTOB(tail_blocks) && |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 3684 | !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) { |
Dave Chinner | da8a1a4 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 3685 | xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, |
| 3686 | "%s: space > BBTOB(tail_blocks)", __func__); |
Dave Chinner | da8a1a4 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 3687 | } |
Dave Chinner | 3f336c6 | 2010-12-21 12:02:52 +1100 | [diff] [blame] | 3688 | } |
| 3689 | } |
| 3690 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3691 | /* check if it will fit */ |
| 3692 | STATIC void |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3693 | xlog_verify_tail_lsn( |
| 3694 | struct xlog *log, |
Dave Chinner | 9d11001 | 2021-07-28 17:14:11 -0700 | [diff] [blame] | 3695 | struct xlog_in_core *iclog) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3696 | { |
Dave Chinner | 9d11001 | 2021-07-28 17:14:11 -0700 | [diff] [blame] | 3697 | xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn); |
| 3698 | int blocks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3699 | |
| 3700 | if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { |
| 3701 | blocks = |
| 3702 | log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); |
| 3703 | if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3704 | xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3705 | } else { |
| 3706 | ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); |
| 3707 | |
| 3708 | if (BLOCK_LSN(tail_lsn) == log->l_prev_block) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3709 | xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3710 | |
| 3711 | blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; |
| 3712 | if (blocks < BTOBB(iclog->ic_offset) + 1) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3713 | xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3714 | } |
Dave Chinner | b843299 | 2020-03-25 18:18:24 -0700 | [diff] [blame] | 3715 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3716 | |
| 3717 | /* |
| 3718 | * Perform a number of checks on the iclog before writing to disk. |
| 3719 | * |
| 3720 | * 1. Make sure the iclogs are still circular |
| 3721 | * 2. Make sure we have a good magic number |
| 3722 | * 3. Make sure we don't have magic numbers in the data |
| 3723 | * 4. Check fields of each log operation header for: |
| 3724 | * A. Valid client identifier |
| 3725 | * B. tid ptr value falls in valid ptr space (user space code) |
| 3726 | * C. Length in log record header is correct according to the |
| 3727 | * individual operation headers within record. |
| 3728 | * 5. When a bwrite will occur within 5 blocks of the front of the physical |
| 3729 | * log, check the preceding blocks of the physical log to make sure all |
| 3730 | * the cycle numbers agree with the current cycle number. |
| 3731 | */ |
| 3732 | STATIC void |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3733 | xlog_verify_iclog( |
| 3734 | struct xlog *log, |
| 3735 | struct xlog_in_core *iclog, |
Christoph Hellwig | abca1f3 | 2019-06-28 19:27:24 -0700 | [diff] [blame] | 3736 | int count) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3737 | { |
| 3738 | xlog_op_header_t *ophead; |
| 3739 | xlog_in_core_t *icptr; |
| 3740 | xlog_in_core_2_t *xhdr; |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 3741 | void *base_ptr, *ptr, *p; |
Christoph Hellwig | db9d67d | 2015-06-22 09:43:32 +1000 | [diff] [blame] | 3742 | ptrdiff_t field_offset; |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 3743 | uint8_t clientid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3744 | int len, i, j, k, op_len; |
| 3745 | int idx; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3746 | |
| 3747 | /* check validity of iclog pointers */ |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3748 | spin_lock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3749 | icptr = log->l_iclog; |
Geyslan G. Bem | 643f7c4 | 2013-10-30 16:01:00 -0500 | [diff] [blame] | 3750 | for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) |
| 3751 | ASSERT(icptr); |
| 3752 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3753 | if (icptr != log->l_iclog) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3754 | xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3755 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3756 | |
| 3757 | /* check log magic numbers */ |
Christoph Hellwig | 69ef921 | 2011-07-08 14:36:05 +0200 | [diff] [blame] | 3758 | if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3759 | xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3760 | |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 3761 | base_ptr = ptr = &iclog->ic_header; |
| 3762 | p = &iclog->ic_header; |
| 3763 | for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) { |
Christoph Hellwig | 69ef921 | 2011-07-08 14:36:05 +0200 | [diff] [blame] | 3764 | if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3765 | xfs_emerg(log->l_mp, "%s: unexpected magic num", |
| 3766 | __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3767 | } |
| 3768 | |
| 3769 | /* check fields */ |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 3770 | len = be32_to_cpu(iclog->ic_header.h_num_logops); |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 3771 | base_ptr = ptr = iclog->ic_datap; |
| 3772 | ophead = ptr; |
Christoph Hellwig | b28708d | 2008-11-28 14:23:38 +1100 | [diff] [blame] | 3773 | xhdr = iclog->ic_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3774 | for (i = 0; i < len; i++) { |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 3775 | ophead = ptr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3776 | |
| 3777 | /* clientid is only 1 byte */ |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 3778 | p = &ophead->oh_clientid; |
| 3779 | field_offset = p - base_ptr; |
Christoph Hellwig | abca1f3 | 2019-06-28 19:27:24 -0700 | [diff] [blame] | 3780 | if (field_offset & 0x1ff) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3781 | clientid = ophead->oh_clientid; |
| 3782 | } else { |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 3783 | idx = BTOBBT((char *)&ophead->oh_clientid - iclog->ic_datap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3784 | if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { |
| 3785 | j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
| 3786 | k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
Christoph Hellwig | 03bea6f | 2007-10-12 10:58:05 +1000 | [diff] [blame] | 3787 | clientid = xlog_get_client_id( |
| 3788 | xhdr[j].hic_xheader.xh_cycle_data[k]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3789 | } else { |
Christoph Hellwig | 03bea6f | 2007-10-12 10:58:05 +1000 | [diff] [blame] | 3790 | clientid = xlog_get_client_id( |
| 3791 | iclog->ic_header.h_cycle_data[idx]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3792 | } |
| 3793 | } |
| 3794 | if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3795 | xfs_warn(log->l_mp, |
Darrick J. Wong | c969004 | 2018-01-09 12:02:55 -0800 | [diff] [blame] | 3796 | "%s: invalid clientid %d op "PTR_FMT" offset 0x%lx", |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3797 | __func__, clientid, ophead, |
| 3798 | (unsigned long)field_offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3799 | |
| 3800 | /* check length */ |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 3801 | p = &ophead->oh_len; |
| 3802 | field_offset = p - base_ptr; |
Christoph Hellwig | abca1f3 | 2019-06-28 19:27:24 -0700 | [diff] [blame] | 3803 | if (field_offset & 0x1ff) { |
Christoph Hellwig | 67fcb7b | 2007-10-12 10:58:59 +1000 | [diff] [blame] | 3804 | op_len = be32_to_cpu(ophead->oh_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3805 | } else { |
Christoph Hellwig | db9d67d | 2015-06-22 09:43:32 +1000 | [diff] [blame] | 3806 | idx = BTOBBT((uintptr_t)&ophead->oh_len - |
| 3807 | (uintptr_t)iclog->ic_datap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3808 | if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { |
| 3809 | j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
| 3810 | k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 3811 | op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3812 | } else { |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 3813 | op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3814 | } |
| 3815 | } |
| 3816 | ptr += sizeof(xlog_op_header_t) + op_len; |
| 3817 | } |
Dave Chinner | b843299 | 2020-03-25 18:18:24 -0700 | [diff] [blame] | 3818 | } |
Nathan Scott | cfcbbbd | 2005-11-02 15:12:04 +1100 | [diff] [blame] | 3819 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3820 | |
| 3821 | /* |
Dave Chinner | b36d465 | 2021-08-10 18:00:39 -0700 | [diff] [blame] | 3822 | * Perform a forced shutdown on the log. This should be called once and once |
| 3823 | * only by the high level filesystem shutdown code to shut the log subsystem |
| 3824 | * down cleanly. |
| 3825 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3826 | * Our main objectives here are to make sure that: |
Dave Chinner | b36d465 | 2021-08-10 18:00:39 -0700 | [diff] [blame] | 3827 | * a. if the shutdown was not due to a log IO error, flush the logs to |
| 3828 | * disk. Anything modified after this is ignored. |
| 3829 | * b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested |
| 3830 | * parties to find out. Nothing new gets queued after this is done. |
| 3831 | * c. Tasks sleeping on log reservations, pinned objects and |
| 3832 | * other resources get woken up. |
Dave Chinner | 9da1ab1 | 2010-05-17 15:51:59 +1000 | [diff] [blame] | 3833 | * |
Dave Chinner | b36d465 | 2021-08-10 18:00:39 -0700 | [diff] [blame] | 3834 | * Return true if the shutdown cause was a log IO error and we actually shut the |
| 3835 | * log down. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3836 | */ |
Dave Chinner | b36d465 | 2021-08-10 18:00:39 -0700 | [diff] [blame] | 3837 | bool |
| 3838 | xlog_force_shutdown( |
| 3839 | struct xlog *log, |
| 3840 | int shutdown_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3841 | { |
Dave Chinner | b36d465 | 2021-08-10 18:00:39 -0700 | [diff] [blame] | 3842 | bool log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3843 | |
| 3844 | /* |
Dave Chinner | b36d465 | 2021-08-10 18:00:39 -0700 | [diff] [blame] | 3845 | * If this happens during log recovery then we aren't using the runtime |
| 3846 | * log mechanisms yet so there's nothing to shut down. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3847 | */ |
Dave Chinner | b36d465 | 2021-08-10 18:00:39 -0700 | [diff] [blame] | 3848 | if (!log || xlog_in_recovery(log)) |
| 3849 | return false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3850 | |
Dave Chinner | b36d465 | 2021-08-10 18:00:39 -0700 | [diff] [blame] | 3851 | ASSERT(!xlog_is_shutdown(log)); |
Dave Chinner | 9da1ab1 | 2010-05-17 15:51:59 +1000 | [diff] [blame] | 3852 | |
| 3853 | /* |
Dave Chinner | a870fe6d | 2014-10-02 09:02:28 +1000 | [diff] [blame] | 3854 | * Flush all the completed transactions to disk before marking the log |
Dave Chinner | b36d465 | 2021-08-10 18:00:39 -0700 | [diff] [blame] | 3855 | * being shut down. We need to do this first as shutting down the log |
| 3856 | * before the force will prevent the log force from flushing the iclogs |
| 3857 | * to disk. |
| 3858 | * |
| 3859 | * Re-entry due to a log IO error shutdown during the log force is |
| 3860 | * prevented by the atomicity of higher level shutdown code. |
Dave Chinner | 9da1ab1 | 2010-05-17 15:51:59 +1000 | [diff] [blame] | 3861 | */ |
Dave Chinner | b36d465 | 2021-08-10 18:00:39 -0700 | [diff] [blame] | 3862 | if (!log_error) |
| 3863 | xfs_log_force(log->l_mp, XFS_LOG_SYNC); |
Dave Chinner | 9da1ab1 | 2010-05-17 15:51:59 +1000 | [diff] [blame] | 3864 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3865 | /* |
Dave Chinner | b36d465 | 2021-08-10 18:00:39 -0700 | [diff] [blame] | 3866 | * Atomically set the shutdown state. If the shutdown state is already |
| 3867 | * set, there someone else is performing the shutdown and so we are done |
| 3868 | * here. This should never happen because we should only ever get called |
| 3869 | * once by the first shutdown caller. |
| 3870 | * |
| 3871 | * Much of the log state machine transitions assume that shutdown state |
| 3872 | * cannot change once they hold the log->l_icloglock. Hence we need to |
| 3873 | * hold that lock here, even though we use the atomic test_and_set_bit() |
| 3874 | * operation to set the shutdown state. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3875 | */ |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3876 | spin_lock(&log->l_icloglock); |
Dave Chinner | b36d465 | 2021-08-10 18:00:39 -0700 | [diff] [blame] | 3877 | if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) { |
| 3878 | spin_unlock(&log->l_icloglock); |
| 3879 | ASSERT(0); |
| 3880 | return false; |
| 3881 | } |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3882 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3883 | |
| 3884 | /* |
Dave Chinner | 1054794 | 2010-12-21 12:02:25 +1100 | [diff] [blame] | 3885 | * We don't want anybody waiting for log reservations after this. That |
| 3886 | * means we have to wake up everybody queued up on reserveq as well as |
| 3887 | * writeq. In addition, we make sure in xlog_{re}grant_log_space that |
| 3888 | * we don't enqueue anything once the SHUTDOWN flag is set, and this |
Dave Chinner | 3f16b98 | 2010-12-21 12:29:01 +1100 | [diff] [blame] | 3889 | * action is protected by the grant locks. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3890 | */ |
Christoph Hellwig | a79bf2d | 2012-02-20 02:31:27 +0000 | [diff] [blame] | 3891 | xlog_grant_head_wake_all(&log->l_reserve_head); |
| 3892 | xlog_grant_head_wake_all(&log->l_write_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3893 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3894 | /* |
Dave Chinner | ac98351 | 2014-05-07 08:05:50 +1000 | [diff] [blame] | 3895 | * Wake up everybody waiting on xfs_log_force. Wake the CIL push first |
| 3896 | * as if the log writes were completed. The abort handling in the log |
| 3897 | * item committed callback functions will do this again under lock to |
| 3898 | * avoid races. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3899 | */ |
Rik van Riel | cdea545 | 2019-09-05 17:32:48 -0700 | [diff] [blame] | 3900 | spin_lock(&log->l_cilp->xc_push_lock); |
Dave Chinner | 68a74dc | 2021-08-10 18:00:44 -0700 | [diff] [blame] | 3901 | wake_up_all(&log->l_cilp->xc_start_wait); |
Dave Chinner | ac98351 | 2014-05-07 08:05:50 +1000 | [diff] [blame] | 3902 | wake_up_all(&log->l_cilp->xc_commit_wait); |
Rik van Riel | cdea545 | 2019-09-05 17:32:48 -0700 | [diff] [blame] | 3903 | spin_unlock(&log->l_cilp->xc_push_lock); |
Dave Chinner | aad7272 | 2021-08-10 18:00:40 -0700 | [diff] [blame] | 3904 | xlog_state_shutdown_callbacks(log); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3905 | |
Dave Chinner | b36d465 | 2021-08-10 18:00:39 -0700 | [diff] [blame] | 3906 | return log_error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3907 | } |
| 3908 | |
Christoph Hellwig | ba0f32d | 2005-06-21 15:36:52 +1000 | [diff] [blame] | 3909 | STATIC int |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3910 | xlog_iclogs_empty( |
| 3911 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3912 | { |
| 3913 | xlog_in_core_t *iclog; |
| 3914 | |
| 3915 | iclog = log->l_iclog; |
| 3916 | do { |
| 3917 | /* endianness does not matter here, zero is zero in |
| 3918 | * any language. |
| 3919 | */ |
| 3920 | if (iclog->ic_header.h_num_logops) |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 3921 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3922 | iclog = iclog->ic_next; |
| 3923 | } while (iclog != log->l_iclog); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 3924 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3925 | } |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 3926 | |
Brian Foster | a45086e | 2015-10-12 15:59:25 +1100 | [diff] [blame] | 3927 | /* |
| 3928 | * Verify that an LSN stamped into a piece of metadata is valid. This is |
| 3929 | * intended for use in read verifiers on v5 superblocks. |
| 3930 | */ |
| 3931 | bool |
| 3932 | xfs_log_check_lsn( |
| 3933 | struct xfs_mount *mp, |
| 3934 | xfs_lsn_t lsn) |
| 3935 | { |
| 3936 | struct xlog *log = mp->m_log; |
| 3937 | bool valid; |
| 3938 | |
| 3939 | /* |
| 3940 | * norecovery mode skips mount-time log processing and unconditionally |
| 3941 | * resets the in-core LSN. We can't validate in this mode, but |
| 3942 | * modifications are not allowed anyways so just return true. |
| 3943 | */ |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame^] | 3944 | if (xfs_has_norecovery(mp)) |
Brian Foster | a45086e | 2015-10-12 15:59:25 +1100 | [diff] [blame] | 3945 | return true; |
| 3946 | |
| 3947 | /* |
| 3948 | * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is |
| 3949 | * handled by recovery and thus safe to ignore here. |
| 3950 | */ |
| 3951 | if (lsn == NULLCOMMITLSN) |
| 3952 | return true; |
| 3953 | |
| 3954 | valid = xlog_valid_lsn(mp->m_log, lsn); |
| 3955 | |
| 3956 | /* warn the user about what's gone wrong before verifier failure */ |
| 3957 | if (!valid) { |
| 3958 | spin_lock(&log->l_icloglock); |
| 3959 | xfs_warn(mp, |
| 3960 | "Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). " |
| 3961 | "Please unmount and run xfs_repair (>= v4.3) to resolve.", |
| 3962 | CYCLE_LSN(lsn), BLOCK_LSN(lsn), |
| 3963 | log->l_curr_cycle, log->l_curr_block); |
| 3964 | spin_unlock(&log->l_icloglock); |
| 3965 | } |
| 3966 | |
| 3967 | return valid; |
| 3968 | } |
Darrick J. Wong | 0c60d3a | 2018-08-01 07:40:48 -0700 | [diff] [blame] | 3969 | |
Darrick J. Wong | 2b73a2c | 2021-08-08 08:27:12 -0700 | [diff] [blame] | 3970 | /* |
| 3971 | * Notify the log that we're about to start using a feature that is protected |
| 3972 | * by a log incompat feature flag. This will prevent log covering from |
| 3973 | * clearing those flags. |
| 3974 | */ |
| 3975 | void |
| 3976 | xlog_use_incompat_feat( |
| 3977 | struct xlog *log) |
| 3978 | { |
| 3979 | down_read(&log->l_incompat_users); |
| 3980 | } |
| 3981 | |
| 3982 | /* Notify the log that we've finished using log incompat features. */ |
| 3983 | void |
| 3984 | xlog_drop_incompat_feat( |
| 3985 | struct xlog *log) |
| 3986 | { |
| 3987 | up_read(&log->l_incompat_users); |
| 3988 | } |