blob: a8289adc1b29ec13739016e71d1d7ad1375a0e12 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Tim Shimmin87c199c2006-06-09 14:56:16 +10003 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Nathan Scott7b718762005-11-02 14:58:39 +11004 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +11007#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11008#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +11009#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Nathan Scotta844f452005-11-02 14:38:42 +110012#include "xfs_bit.h"
Nathan Scotta844f452005-11-02 14:38:42 +110013#include "xfs_sb.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include "xfs_mount.h"
Darrick J. Wong50995582017-11-21 20:53:02 -080015#include "xfs_defer.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include "xfs_inode.h"
Dave Chinner239880e2013-10-23 10:50:10 +110017#include "xfs_trans.h"
Dave Chinner239880e2013-10-23 10:50:10 +110018#include "xfs_log.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include "xfs_log_priv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_log_recover.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include "xfs_trans_priv.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110022#include "xfs_alloc.h"
23#include "xfs_ialloc.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000024#include "xfs_trace.h"
Dave Chinner33479e02012-10-08 21:56:11 +110025#include "xfs_icache.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110026#include "xfs_error.h"
Brian Foster60a4a222016-09-26 08:34:27 +100027#include "xfs_buf_item.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Dave Chinnerfc06c6d2013-08-12 20:49:22 +100029#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
30
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -050031STATIC int
32xlog_find_zeroed(
33 struct xlog *,
34 xfs_daddr_t *);
35STATIC int
36xlog_clear_stale_blocks(
37 struct xlog *,
38 xfs_lsn_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#if defined(DEBUG)
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -050040STATIC void
41xlog_recover_check_summary(
42 struct xlog *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#else
44#define xlog_recover_check_summary(log)
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#endif
Brian Foster7088c412016-01-05 07:40:16 +110046STATIC int
47xlog_do_recovery_pass(
48 struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Linus Torvalds1da177e2005-04-16 15:20:36 -070050/*
51 * Sector aligned buffer routines for buffer create/read/write/access
52 */
53
Alex Elderff30a622010-04-13 15:22:58 +100054/*
Brian Foster99c26592017-10-26 09:31:15 -070055 * Verify the log-relative block number and length in basic blocks are valid for
56 * an operation involving the given XFS log buffer. Returns true if the fields
57 * are valid, false otherwise.
Alex Elderff30a622010-04-13 15:22:58 +100058 */
Brian Foster99c26592017-10-26 09:31:15 -070059static inline bool
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -070060xlog_verify_bno(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -050061 struct xlog *log,
Brian Foster99c26592017-10-26 09:31:15 -070062 xfs_daddr_t blk_no,
Alex Elderff30a622010-04-13 15:22:58 +100063 int bbcount)
64{
Brian Foster99c26592017-10-26 09:31:15 -070065 if (blk_no < 0 || blk_no >= log->l_logBBsize)
66 return false;
67 if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
68 return false;
69 return true;
Alex Elderff30a622010-04-13 15:22:58 +100070}
71
Alex Elder36adecf2010-04-13 15:21:13 +100072/*
Christoph Hellwig6ad5b322019-06-28 19:27:26 -070073 * Allocate a buffer to hold log data. The buffer needs to be able to map to
74 * a range of nbblks basic blocks at any valid offset within the log.
Alex Elder36adecf2010-04-13 15:21:13 +100075 */
Christoph Hellwig6ad5b322019-06-28 19:27:26 -070076static char *
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -070077xlog_alloc_buffer(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -050078 struct xlog *log,
Dave Chinner32281492009-01-22 15:37:47 +110079 int nbblks)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080{
Dave Chinnerf8f9ee42019-08-26 12:08:39 -070081 int align_mask = xfs_buftarg_dma_alignment(log->l_targ);
82
Brian Foster99c26592017-10-26 09:31:15 -070083 /*
84 * Pass log block 0 since we don't have an addr yet, buffer will be
85 * verified on read.
86 */
Darrick J. Wonga71895c2019-11-11 12:53:22 -080087 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
Dave Chinnera0fa2b62011-03-07 10:01:35 +110088 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
Alex Elderff30a622010-04-13 15:22:58 +100089 nbblks);
Dave Chinner32281492009-01-22 15:37:47 +110090 return NULL;
91 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Alex Elder36adecf2010-04-13 15:21:13 +100093 /*
Christoph Hellwig6ad5b322019-06-28 19:27:26 -070094 * We do log I/O in units of log sectors (a power-of-2 multiple of the
95 * basic block size), so we round up the requested size to accommodate
96 * the basic blocks required for complete log sectors.
Alex Elder36adecf2010-04-13 15:21:13 +100097 *
Christoph Hellwig6ad5b322019-06-28 19:27:26 -070098 * In addition, the buffer may be used for a non-sector-aligned block
99 * offset, in which case an I/O of the requested size could extend
100 * beyond the end of the buffer. If the requested size is only 1 basic
101 * block it will never straddle a sector boundary, so this won't be an
102 * issue. Nor will this be a problem if the log I/O is done in basic
103 * blocks (sector size 1). But otherwise we extend the buffer by one
104 * extra log sector to ensure there's space to accommodate this
105 * possibility.
Alex Elder36adecf2010-04-13 15:21:13 +1000106 */
Alex Elder69ce58f2010-04-20 17:09:59 +1000107 if (nbblks > 1 && log->l_sectBBsize > 1)
108 nbblks += log->l_sectBBsize;
109 nbblks = round_up(nbblks, log->l_sectBBsize);
Bill O'Donnell3219e8c2019-10-04 16:38:44 -0700110 return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111}
112
Alex Elder48389ef2010-04-20 17:10:21 +1000113/*
114 * Return the address of the start of the given block number's data
115 * in a log buffer. The buffer covers a log sector-aligned region.
116 */
Christoph Hellwig18ffb8c2019-06-28 19:27:26 -0700117static inline unsigned int
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100118xlog_align(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -0500119 struct xlog *log,
Christoph Hellwig18ffb8c2019-06-28 19:27:26 -0700120 xfs_daddr_t blk_no)
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100121{
Christoph Hellwig18ffb8c2019-06-28 19:27:26 -0700122 return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100123}
124
Christoph Hellwig6ad5b322019-06-28 19:27:26 -0700125static int
126xlog_do_io(
127 struct xlog *log,
128 xfs_daddr_t blk_no,
129 unsigned int nbblks,
130 char *data,
131 unsigned int op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132{
Christoph Hellwig6ad5b322019-06-28 19:27:26 -0700133 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Darrick J. Wonga71895c2019-11-11 12:53:22 -0800135 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) {
Brian Foster99c26592017-10-26 09:31:15 -0700136 xfs_warn(log->l_mp,
137 "Invalid log block/length (0x%llx, 0x%x) for buffer",
138 blk_no, nbblks);
Dave Chinner24513372014-06-25 14:58:08 +1000139 return -EFSCORRUPTED;
Dave Chinner32281492009-01-22 15:37:47 +1100140 }
141
Alex Elder69ce58f2010-04-20 17:09:59 +1000142 blk_no = round_down(blk_no, log->l_sectBBsize);
143 nbblks = round_up(nbblks, log->l_sectBBsize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 ASSERT(nbblks > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Christoph Hellwig6ad5b322019-06-28 19:27:26 -0700146 error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
147 BBTOB(nbblks), data, op);
148 if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) {
149 xfs_alert(log->l_mp,
150 "log recovery %s I/O error at daddr 0x%llx len %d error %d",
151 op == REQ_OP_WRITE ? "write" : "read",
152 blk_no, nbblks, error);
153 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 return error;
155}
156
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100157STATIC int
Christoph Hellwig6ad5b322019-06-28 19:27:26 -0700158xlog_bread_noalign(
159 struct xlog *log,
160 xfs_daddr_t blk_no,
161 int nbblks,
162 char *data)
163{
164 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
165}
166
167STATIC int
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100168xlog_bread(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -0500169 struct xlog *log,
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100170 xfs_daddr_t blk_no,
171 int nbblks,
Christoph Hellwig6ad5b322019-06-28 19:27:26 -0700172 char *data,
Christoph Hellwigb2a922c2015-06-22 09:45:10 +1000173 char **offset)
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100174{
175 int error;
176
Christoph Hellwig6ad5b322019-06-28 19:27:26 -0700177 error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
178 if (!error)
179 *offset = data + xlog_align(log, blk_no);
180 return error;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100181}
182
Christoph Hellwigba0f32d2005-06-21 15:36:52 +1000183STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184xlog_bwrite(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -0500185 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 xfs_daddr_t blk_no,
187 int nbblks,
Christoph Hellwig6ad5b322019-06-28 19:27:26 -0700188 char *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189{
Christoph Hellwig6ad5b322019-06-28 19:27:26 -0700190 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191}
192
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193#ifdef DEBUG
194/*
195 * dump debug superblock and log record information
196 */
197STATIC void
198xlog_header_check_dump(
199 xfs_mount_t *mp,
200 xlog_rec_header_t *head)
201{
Eric Sandeen08e96e12013-10-11 20:59:05 -0500202 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
Joe Perches03daa572009-12-14 18:01:10 -0800203 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
Eric Sandeen08e96e12013-10-11 20:59:05 -0500204 xfs_debug(mp, " log : uuid = %pU, fmt = %d",
Joe Perches03daa572009-12-14 18:01:10 -0800205 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206}
207#else
208#define xlog_header_check_dump(mp, head)
209#endif
210
211/*
212 * check log record header for recovery
213 */
214STATIC int
215xlog_header_check_recover(
216 xfs_mount_t *mp,
217 xlog_rec_header_t *head)
218{
Christoph Hellwig69ef9212011-07-08 14:36:05 +0200219 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221 /*
222 * IRIX doesn't write the h_fmt field and leaves it zeroed
223 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
224 * a dirty log created in IRIX.
225 */
Darrick J. Wonga71895c2019-11-11 12:53:22 -0800226 if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) {
Dave Chinnera0fa2b62011-03-07 10:01:35 +1100227 xfs_warn(mp,
228 "dirty log written in incompatible format - can't recover");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 xlog_header_check_dump(mp, head);
Dave Chinner24513372014-06-25 14:58:08 +1000230 return -EFSCORRUPTED;
Darrick J. Wonga71895c2019-11-11 12:53:22 -0800231 }
232 if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
233 &head->h_fs_uuid))) {
Dave Chinnera0fa2b62011-03-07 10:01:35 +1100234 xfs_warn(mp,
235 "dirty log entry has mismatched uuid - can't recover");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 xlog_header_check_dump(mp, head);
Dave Chinner24513372014-06-25 14:58:08 +1000237 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 }
239 return 0;
240}
241
242/*
243 * read the head block of the log and check the header
244 */
245STATIC int
246xlog_header_check_mount(
247 xfs_mount_t *mp,
248 xlog_rec_header_t *head)
249{
Christoph Hellwig69ef9212011-07-08 14:36:05 +0200250 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
Amir Goldsteind905fda2017-05-04 16:26:23 +0300252 if (uuid_is_null(&head->h_fs_uuid)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 /*
254 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
Amir Goldsteind905fda2017-05-04 16:26:23 +0300255 * h_fs_uuid is null, we assume this log was last mounted
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 * by IRIX and continue.
257 */
Amir Goldsteind905fda2017-05-04 16:26:23 +0300258 xfs_warn(mp, "null uuid in log - IRIX style log");
Darrick J. Wonga71895c2019-11-11 12:53:22 -0800259 } else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
260 &head->h_fs_uuid))) {
Dave Chinnera0fa2b62011-03-07 10:01:35 +1100261 xfs_warn(mp, "log has mismatched uuid - can't recover");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 xlog_header_check_dump(mp, head);
Dave Chinner24513372014-06-25 14:58:08 +1000263 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 }
265 return 0;
266}
267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268/*
269 * This routine finds (to an approximation) the first block in the physical
270 * log which contains the given cycle. It uses a binary search algorithm.
271 * Note that the algorithm can not be perfect because the disk will not
272 * necessarily be perfect.
273 */
David Chinnera8272ce2007-11-23 16:28:09 +1100274STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275xlog_find_cycle_start(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -0500276 struct xlog *log,
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700277 char *buffer,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 xfs_daddr_t first_blk,
279 xfs_daddr_t *last_blk,
280 uint cycle)
281{
Christoph Hellwigb2a922c2015-06-22 09:45:10 +1000282 char *offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 xfs_daddr_t mid_blk;
Alex Eldere3bb2e32010-04-15 18:17:30 +0000284 xfs_daddr_t end_blk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 uint mid_cycle;
286 int error;
287
Alex Eldere3bb2e32010-04-15 18:17:30 +0000288 end_blk = *last_blk;
289 mid_blk = BLK_AVG(first_blk, end_blk);
290 while (mid_blk != first_blk && mid_blk != end_blk) {
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700291 error = xlog_bread(log, mid_blk, 1, buffer, &offset);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100292 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 return error;
Christoph Hellwig03bea6f2007-10-12 10:58:05 +1000294 mid_cycle = xlog_get_cycle(offset);
Alex Eldere3bb2e32010-04-15 18:17:30 +0000295 if (mid_cycle == cycle)
296 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
297 else
298 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
299 mid_blk = BLK_AVG(first_blk, end_blk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 }
Alex Eldere3bb2e32010-04-15 18:17:30 +0000301 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
302 (mid_blk == end_blk && mid_blk-1 == first_blk));
303
304 *last_blk = end_blk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306 return 0;
307}
308
309/*
Alex Elder3f943d82010-04-15 18:17:34 +0000310 * Check that a range of blocks does not contain stop_on_cycle_no.
311 * Fill in *new_blk with the block offset where such a block is
312 * found, or with -1 (an invalid block number) if there is no such
313 * block in the range. The scan needs to occur from front to back
314 * and the pointer into the region must be updated since a later
315 * routine will need to perform another test.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 */
317STATIC int
318xlog_find_verify_cycle(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -0500319 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 xfs_daddr_t start_blk,
321 int nbblks,
322 uint stop_on_cycle_no,
323 xfs_daddr_t *new_blk)
324{
325 xfs_daddr_t i, j;
326 uint cycle;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700327 char *buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 xfs_daddr_t bufblks;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +1000329 char *buf = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 int error = 0;
331
Alex Elder6881a222010-04-13 15:22:29 +1000332 /*
333 * Greedily allocate a buffer big enough to handle the full
334 * range of basic blocks we'll be examining. If that fails,
335 * try a smaller size. We need to be able to read at least
336 * a log sector, or we're out of luck.
337 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 bufblks = 1 << ffs(nbblks);
Dave Chinner81158e02012-04-27 19:45:22 +1000339 while (bufblks > log->l_logBBsize)
340 bufblks >>= 1;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700341 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 bufblks >>= 1;
Alex Elder69ce58f2010-04-20 17:09:59 +1000343 if (bufblks < log->l_sectBBsize)
Dave Chinner24513372014-06-25 14:58:08 +1000344 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 }
346
347 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
348 int bcount;
349
350 bcount = min(bufblks, (start_blk + nbblks - i));
351
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700352 error = xlog_bread(log, i, bcount, buffer, &buf);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100353 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 goto out;
355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 for (j = 0; j < bcount; j++) {
Christoph Hellwig03bea6f2007-10-12 10:58:05 +1000357 cycle = xlog_get_cycle(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 if (cycle == stop_on_cycle_no) {
359 *new_blk = i+j;
360 goto out;
361 }
362
363 buf += BBSIZE;
364 }
365 }
366
367 *new_blk = -1;
368
369out:
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700370 kmem_free(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 return error;
372}
373
Gao Xiang0c771b92020-09-22 09:41:06 -0700374static inline int
375xlog_logrec_hblks(struct xlog *log, struct xlog_rec_header *rh)
376{
377 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
378 int h_size = be32_to_cpu(rh->h_size);
379
380 if ((be32_to_cpu(rh->h_version) & XLOG_VERSION_2) &&
381 h_size > XLOG_HEADER_CYCLE_SIZE)
382 return DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE);
383 }
384 return 1;
385}
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387/*
388 * Potentially backup over partial log record write.
389 *
390 * In the typical case, last_blk is the number of the block directly after
391 * a good log record. Therefore, we subtract one to get the block number
392 * of the last block in the given buffer. extra_bblks contains the number
393 * of blocks we would have read on a previous read. This happens when the
394 * last log record is split over the end of the physical log.
395 *
396 * extra_bblks is the number of blocks potentially verified on a previous
397 * call to this routine.
398 */
399STATIC int
400xlog_find_verify_log_record(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -0500401 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 xfs_daddr_t start_blk,
403 xfs_daddr_t *last_blk,
404 int extra_bblks)
405{
406 xfs_daddr_t i;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700407 char *buffer;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +1000408 char *offset = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 xlog_rec_header_t *head = NULL;
410 int error = 0;
411 int smallmem = 0;
412 int num_blks = *last_blk - start_blk;
413 int xhdrs;
414
415 ASSERT(start_blk != 0 || *last_blk != start_blk);
416
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700417 buffer = xlog_alloc_buffer(log, num_blks);
418 if (!buffer) {
419 buffer = xlog_alloc_buffer(log, 1);
420 if (!buffer)
Dave Chinner24513372014-06-25 14:58:08 +1000421 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 smallmem = 1;
423 } else {
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700424 error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100425 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 offset += ((num_blks - 1) << BBSHIFT);
428 }
429
430 for (i = (*last_blk) - 1; i >= 0; i--) {
431 if (i < start_blk) {
432 /* valid log record not found */
Dave Chinnera0fa2b62011-03-07 10:01:35 +1100433 xfs_warn(log->l_mp,
434 "Log inconsistent (didn't find previous header)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 ASSERT(0);
Darrick J. Wong895e1962019-11-06 09:17:43 -0800436 error = -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 goto out;
438 }
439
440 if (smallmem) {
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700441 error = xlog_bread(log, i, 1, buffer, &offset);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100442 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 }
445
446 head = (xlog_rec_header_t *)offset;
447
Christoph Hellwig69ef9212011-07-08 14:36:05 +0200448 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 break;
450
451 if (!smallmem)
452 offset -= BBSIZE;
453 }
454
455 /*
456 * We hit the beginning of the physical log & still no header. Return
457 * to caller. If caller can handle a return of -1, then this routine
458 * will be called again for the end of the physical log.
459 */
460 if (i == -1) {
Dave Chinner24513372014-06-25 14:58:08 +1000461 error = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 goto out;
463 }
464
465 /*
466 * We have the final block of the good log (the first block
467 * of the log record _before_ the head. So we check the uuid.
468 */
469 if ((error = xlog_header_check_mount(log->l_mp, head)))
470 goto out;
471
472 /*
473 * We may have found a log record header before we expected one.
474 * last_blk will be the 1st block # with a given cycle #. We may end
475 * up reading an entire log record. In this case, we don't want to
476 * reset last_blk. Only when last_blk points in the middle of a log
477 * record do we update last_blk.
478 */
Gao Xiang0c771b92020-09-22 09:41:06 -0700479 xhdrs = xlog_logrec_hblks(log, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
Christoph Hellwigb53e6752007-10-12 10:59:34 +1000481 if (*last_blk - i + extra_bblks !=
482 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 *last_blk = i;
484
485out:
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700486 kmem_free(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 return error;
488}
489
490/*
491 * Head is defined to be the point of the log where the next log write
Zhi Yong Wu0a94da22013-08-07 10:11:08 +0000492 * could go. This means that incomplete LR writes at the end are
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 * eliminated when calculating the head. We aren't guaranteed that previous
494 * LR have complete transactions. We only know that a cycle number of
495 * current cycle number -1 won't be present in the log if we start writing
496 * from our current block number.
497 *
498 * last_blk contains the block number of the first block with a given
499 * cycle number.
500 *
501 * Return: zero if normal, non-zero if error.
502 */
Christoph Hellwigba0f32d2005-06-21 15:36:52 +1000503STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504xlog_find_head(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -0500505 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 xfs_daddr_t *return_head_blk)
507{
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700508 char *buffer;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +1000509 char *offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
511 int num_scan_bblks;
512 uint first_half_cycle, last_half_cycle;
513 uint stop_on_cycle;
514 int error, log_bbnum = log->l_logBBsize;
515
516 /* Is the end of the log device zeroed? */
Dave Chinner24513372014-06-25 14:58:08 +1000517 error = xlog_find_zeroed(log, &first_blk);
518 if (error < 0) {
519 xfs_warn(log->l_mp, "empty log check failed");
520 return error;
521 }
522 if (error == 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 *return_head_blk = first_blk;
524
525 /* Is the whole lot zeroed? */
526 if (!first_blk) {
527 /* Linux XFS shouldn't generate totally zeroed logs -
528 * mkfs etc write a dummy unmount record to a fresh
529 * log so we can store the uuid in there
530 */
Dave Chinnera0fa2b62011-03-07 10:01:35 +1100531 xfs_warn(log->l_mp, "totally zeroed log");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 }
533
534 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 }
536
537 first_blk = 0; /* get cycle # of 1st block */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700538 buffer = xlog_alloc_buffer(log, 1);
539 if (!buffer)
Dave Chinner24513372014-06-25 14:58:08 +1000540 return -ENOMEM;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100541
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700542 error = xlog_bread(log, 0, 1, buffer, &offset);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100543 if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700544 goto out_free_buffer;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100545
Christoph Hellwig03bea6f2007-10-12 10:58:05 +1000546 first_half_cycle = xlog_get_cycle(offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547
548 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700549 error = xlog_bread(log, last_blk, 1, buffer, &offset);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100550 if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700551 goto out_free_buffer;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100552
Christoph Hellwig03bea6f2007-10-12 10:58:05 +1000553 last_half_cycle = xlog_get_cycle(offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 ASSERT(last_half_cycle != 0);
555
556 /*
557 * If the 1st half cycle number is equal to the last half cycle number,
558 * then the entire log is stamped with the same cycle number. In this
559 * case, head_blk can't be set to zero (which makes sense). The below
560 * math doesn't work out properly with head_blk equal to zero. Instead,
561 * we set it to log_bbnum which is an invalid block number, but this
562 * value makes the math correct. If head_blk doesn't changed through
563 * all the tests below, *head_blk is set to zero at the very end rather
564 * than log_bbnum. In a sense, log_bbnum and zero are the same block
565 * in a circular file.
566 */
567 if (first_half_cycle == last_half_cycle) {
568 /*
569 * In this case we believe that the entire log should have
570 * cycle number last_half_cycle. We need to scan backwards
571 * from the end verifying that there are no holes still
572 * containing last_half_cycle - 1. If we find such a hole,
573 * then the start of that hole will be the new head. The
574 * simple case looks like
575 * x | x ... | x - 1 | x
576 * Another case that fits this picture would be
577 * x | x + 1 | x ... | x
Nathan Scottc41564b2006-03-29 08:55:14 +1000578 * In this case the head really is somewhere at the end of the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 * log, as one of the latest writes at the beginning was
580 * incomplete.
581 * One more case is
582 * x | x + 1 | x ... | x - 1 | x
583 * This is really the combination of the above two cases, and
584 * the head has to end up at the start of the x-1 hole at the
585 * end of the log.
586 *
587 * In the 256k log case, we will read from the beginning to the
588 * end of the log and search for cycle numbers equal to x-1.
589 * We don't worry about the x+1 blocks that we encounter,
590 * because we know that they cannot be the head since the log
591 * started with x.
592 */
593 head_blk = log_bbnum;
594 stop_on_cycle = last_half_cycle - 1;
595 } else {
596 /*
597 * In this case we want to find the first block with cycle
598 * number matching last_half_cycle. We expect the log to be
599 * some variation on
Alex Elder3f943d82010-04-15 18:17:34 +0000600 * x + 1 ... | x ... | x
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 * The first block with cycle number x (last_half_cycle) will
602 * be where the new head belongs. First we do a binary search
603 * for the first occurrence of last_half_cycle. The binary
604 * search may not be totally accurate, so then we scan back
605 * from there looking for occurrences of last_half_cycle before
606 * us. If that backwards scan wraps around the beginning of
607 * the log, then we look for occurrences of last_half_cycle - 1
608 * at the end of the log. The cases we're looking for look
609 * like
Alex Elder3f943d82010-04-15 18:17:34 +0000610 * v binary search stopped here
611 * x + 1 ... | x | x + 1 | x ... | x
612 * ^ but we want to locate this spot
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 * or
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 * <---------> less than scan distance
Alex Elder3f943d82010-04-15 18:17:34 +0000615 * x + 1 ... | x ... | x - 1 | x
616 * ^ we want to locate this spot
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 */
618 stop_on_cycle = last_half_cycle;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700619 error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
620 last_half_cycle);
621 if (error)
622 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 }
624
625 /*
626 * Now validate the answer. Scan back some number of maximum possible
627 * blocks and make sure each one has the expected cycle number. The
628 * maximum is determined by the total possible amount of buffering
629 * in the in-core log. The following number can be made tighter if
630 * we actually look at the block size of the filesystem.
631 */
Brian Foster9f2a4502017-10-26 09:31:16 -0700632 num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 if (head_blk >= num_scan_bblks) {
634 /*
635 * We are guaranteed that the entire check can be performed
636 * in one buffer.
637 */
638 start_blk = head_blk - num_scan_bblks;
639 if ((error = xlog_find_verify_cycle(log,
640 start_blk, num_scan_bblks,
641 stop_on_cycle, &new_blk)))
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700642 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 if (new_blk != -1)
644 head_blk = new_blk;
645 } else { /* need to read 2 parts of log */
646 /*
647 * We are going to scan backwards in the log in two parts.
648 * First we scan the physical end of the log. In this part
649 * of the log, we are looking for blocks with cycle number
650 * last_half_cycle - 1.
651 * If we find one, then we know that the log starts there, as
652 * we've found a hole that didn't get written in going around
653 * the end of the physical log. The simple case for this is
654 * x + 1 ... | x ... | x - 1 | x
655 * <---------> less than scan distance
656 * If all of the blocks at the end of the log have cycle number
657 * last_half_cycle, then we check the blocks at the start of
658 * the log looking for occurrences of last_half_cycle. If we
659 * find one, then our current estimate for the location of the
660 * first occurrence of last_half_cycle is wrong and we move
661 * back to the hole we've found. This case looks like
662 * x + 1 ... | x | x + 1 | x ...
663 * ^ binary search stopped here
664 * Another case we need to handle that only occurs in 256k
665 * logs is
666 * x + 1 ... | x ... | x+1 | x ...
667 * ^ binary search stops here
668 * In a 256k log, the scan at the end of the log will see the
669 * x + 1 blocks. We need to skip past those since that is
670 * certainly not the head of the log. By searching for
671 * last_half_cycle-1 we accomplish that.
672 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 ASSERT(head_blk <= INT_MAX &&
Alex Elder3f943d82010-04-15 18:17:34 +0000674 (xfs_daddr_t) num_scan_bblks >= head_blk);
675 start_blk = log_bbnum - (num_scan_bblks - head_blk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 if ((error = xlog_find_verify_cycle(log, start_blk,
677 num_scan_bblks - (int)head_blk,
678 (stop_on_cycle - 1), &new_blk)))
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700679 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 if (new_blk != -1) {
681 head_blk = new_blk;
Alex Elder9db127e2010-04-15 18:17:26 +0000682 goto validate_head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 }
684
685 /*
686 * Scan beginning of log now. The last part of the physical
687 * log is good. This scan needs to verify that it doesn't find
688 * the last_half_cycle.
689 */
690 start_blk = 0;
691 ASSERT(head_blk <= INT_MAX);
692 if ((error = xlog_find_verify_cycle(log,
693 start_blk, (int)head_blk,
694 stop_on_cycle, &new_blk)))
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700695 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 if (new_blk != -1)
697 head_blk = new_blk;
698 }
699
Alex Elder9db127e2010-04-15 18:17:26 +0000700validate_head:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 /*
702 * Now we need to make sure head_blk is not pointing to a block in
703 * the middle of a log record.
704 */
705 num_scan_bblks = XLOG_REC_SHIFT(log);
706 if (head_blk >= num_scan_bblks) {
707 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
708
709 /* start ptr at last block ptr before head_blk */
Dave Chinner24513372014-06-25 14:58:08 +1000710 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
711 if (error == 1)
712 error = -EIO;
713 if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700714 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 } else {
716 start_blk = 0;
717 ASSERT(head_blk <= INT_MAX);
Dave Chinner24513372014-06-25 14:58:08 +1000718 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
719 if (error < 0)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700720 goto out_free_buffer;
Dave Chinner24513372014-06-25 14:58:08 +1000721 if (error == 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 /* We hit the beginning of the log during our search */
Alex Elder3f943d82010-04-15 18:17:34 +0000723 start_blk = log_bbnum - (num_scan_bblks - head_blk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 new_blk = log_bbnum;
725 ASSERT(start_blk <= INT_MAX &&
726 (xfs_daddr_t) log_bbnum-start_blk >= 0);
727 ASSERT(head_blk <= INT_MAX);
Dave Chinner24513372014-06-25 14:58:08 +1000728 error = xlog_find_verify_log_record(log, start_blk,
729 &new_blk, (int)head_blk);
730 if (error == 1)
731 error = -EIO;
732 if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700733 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 if (new_blk != log_bbnum)
735 head_blk = new_blk;
736 } else if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700737 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 }
739
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700740 kmem_free(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 if (head_blk == log_bbnum)
742 *return_head_blk = 0;
743 else
744 *return_head_blk = head_blk;
745 /*
746 * When returning here, we have a good block number. Bad block
747 * means that during a previous crash, we didn't have a clean break
748 * from cycle number N to cycle number N-1. In this case, we need
749 * to find the first block with cycle number N-1.
750 */
751 return 0;
752
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700753out_free_buffer:
754 kmem_free(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 if (error)
Dave Chinnera0fa2b62011-03-07 10:01:35 +1100756 xfs_warn(log->l_mp, "failed to find log head");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 return error;
758}
759
760/*
Brian Fostereed6b462016-01-04 15:55:10 +1100761 * Seek backwards in the log for log record headers.
762 *
763 * Given a starting log block, walk backwards until we find the provided number
764 * of records or hit the provided tail block. The return value is the number of
765 * records encountered or a negative error code. The log block and buffer
766 * pointer of the last record seen are returned in rblk and rhead respectively.
767 */
768STATIC int
769xlog_rseek_logrec_hdr(
770 struct xlog *log,
771 xfs_daddr_t head_blk,
772 xfs_daddr_t tail_blk,
773 int count,
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700774 char *buffer,
Brian Fostereed6b462016-01-04 15:55:10 +1100775 xfs_daddr_t *rblk,
776 struct xlog_rec_header **rhead,
777 bool *wrapped)
778{
779 int i;
780 int error;
781 int found = 0;
782 char *offset = NULL;
783 xfs_daddr_t end_blk;
784
785 *wrapped = false;
786
787 /*
788 * Walk backwards from the head block until we hit the tail or the first
789 * block in the log.
790 */
791 end_blk = head_blk > tail_blk ? tail_blk : 0;
792 for (i = (int) head_blk - 1; i >= end_blk; i--) {
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700793 error = xlog_bread(log, i, 1, buffer, &offset);
Brian Fostereed6b462016-01-04 15:55:10 +1100794 if (error)
795 goto out_error;
796
797 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
798 *rblk = i;
799 *rhead = (struct xlog_rec_header *) offset;
800 if (++found == count)
801 break;
802 }
803 }
804
805 /*
806 * If we haven't hit the tail block or the log record header count,
807 * start looking again from the end of the physical log. Note that
808 * callers can pass head == tail if the tail is not yet known.
809 */
810 if (tail_blk >= head_blk && found != count) {
811 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700812 error = xlog_bread(log, i, 1, buffer, &offset);
Brian Fostereed6b462016-01-04 15:55:10 +1100813 if (error)
814 goto out_error;
815
816 if (*(__be32 *)offset ==
817 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
818 *wrapped = true;
819 *rblk = i;
820 *rhead = (struct xlog_rec_header *) offset;
821 if (++found == count)
822 break;
823 }
824 }
825 }
826
827 return found;
828
829out_error:
830 return error;
831}
832
833/*
Brian Foster7088c412016-01-05 07:40:16 +1100834 * Seek forward in the log for log record headers.
835 *
836 * Given head and tail blocks, walk forward from the tail block until we find
837 * the provided number of records or hit the head block. The return value is the
838 * number of records encountered or a negative error code. The log block and
839 * buffer pointer of the last record seen are returned in rblk and rhead
840 * respectively.
841 */
842STATIC int
843xlog_seek_logrec_hdr(
844 struct xlog *log,
845 xfs_daddr_t head_blk,
846 xfs_daddr_t tail_blk,
847 int count,
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700848 char *buffer,
Brian Foster7088c412016-01-05 07:40:16 +1100849 xfs_daddr_t *rblk,
850 struct xlog_rec_header **rhead,
851 bool *wrapped)
852{
853 int i;
854 int error;
855 int found = 0;
856 char *offset = NULL;
857 xfs_daddr_t end_blk;
858
859 *wrapped = false;
860
861 /*
862 * Walk forward from the tail block until we hit the head or the last
863 * block in the log.
864 */
865 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
866 for (i = (int) tail_blk; i <= end_blk; i++) {
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700867 error = xlog_bread(log, i, 1, buffer, &offset);
Brian Foster7088c412016-01-05 07:40:16 +1100868 if (error)
869 goto out_error;
870
871 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
872 *rblk = i;
873 *rhead = (struct xlog_rec_header *) offset;
874 if (++found == count)
875 break;
876 }
877 }
878
879 /*
880 * If we haven't hit the head block or the log record header count,
881 * start looking again from the start of the physical log.
882 */
883 if (tail_blk > head_blk && found != count) {
884 for (i = 0; i < (int) head_blk; i++) {
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700885 error = xlog_bread(log, i, 1, buffer, &offset);
Brian Foster7088c412016-01-05 07:40:16 +1100886 if (error)
887 goto out_error;
888
889 if (*(__be32 *)offset ==
890 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
891 *wrapped = true;
892 *rblk = i;
893 *rhead = (struct xlog_rec_header *) offset;
894 if (++found == count)
895 break;
896 }
897 }
898 }
899
900 return found;
901
902out_error:
903 return error;
904}
905
906/*
Brian Foster4a4f66e2017-08-08 18:21:52 -0700907 * Calculate distance from head to tail (i.e., unused space in the log).
908 */
909static inline int
910xlog_tail_distance(
911 struct xlog *log,
912 xfs_daddr_t head_blk,
913 xfs_daddr_t tail_blk)
914{
915 if (head_blk < tail_blk)
916 return tail_blk - head_blk;
917
918 return tail_blk + (log->l_logBBsize - head_blk);
919}
920
921/*
922 * Verify the log tail. This is particularly important when torn or incomplete
923 * writes have been detected near the front of the log and the head has been
924 * walked back accordingly.
Brian Foster7088c412016-01-05 07:40:16 +1100925 *
Brian Foster4a4f66e2017-08-08 18:21:52 -0700926 * We also have to handle the case where the tail was pinned and the head
927 * blocked behind the tail right before a crash. If the tail had been pushed
928 * immediately prior to the crash and the subsequent checkpoint was only
929 * partially written, it's possible it overwrote the last referenced tail in the
930 * log with garbage. This is not a coherency problem because the tail must have
931 * been pushed before it can be overwritten, but appears as log corruption to
932 * recovery because we have no way to know the tail was updated if the
933 * subsequent checkpoint didn't write successfully.
934 *
935 * Therefore, CRC check the log from tail to head. If a failure occurs and the
936 * offending record is within max iclog bufs from the head, walk the tail
937 * forward and retry until a valid tail is found or corruption is detected out
938 * of the range of a possible overwrite.
Brian Foster7088c412016-01-05 07:40:16 +1100939 */
940STATIC int
941xlog_verify_tail(
942 struct xlog *log,
943 xfs_daddr_t head_blk,
Brian Foster4a4f66e2017-08-08 18:21:52 -0700944 xfs_daddr_t *tail_blk,
945 int hsize)
Brian Foster7088c412016-01-05 07:40:16 +1100946{
947 struct xlog_rec_header *thead;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700948 char *buffer;
Brian Foster7088c412016-01-05 07:40:16 +1100949 xfs_daddr_t first_bad;
Brian Foster7088c412016-01-05 07:40:16 +1100950 int error = 0;
951 bool wrapped;
Brian Foster4a4f66e2017-08-08 18:21:52 -0700952 xfs_daddr_t tmp_tail;
953 xfs_daddr_t orig_tail = *tail_blk;
Brian Foster7088c412016-01-05 07:40:16 +1100954
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700955 buffer = xlog_alloc_buffer(log, 1);
956 if (!buffer)
Brian Foster7088c412016-01-05 07:40:16 +1100957 return -ENOMEM;
958
959 /*
Brian Foster4a4f66e2017-08-08 18:21:52 -0700960 * Make sure the tail points to a record (returns positive count on
961 * success).
Brian Foster7088c412016-01-05 07:40:16 +1100962 */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700963 error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
Brian Foster4a4f66e2017-08-08 18:21:52 -0700964 &tmp_tail, &thead, &wrapped);
965 if (error < 0)
Brian Foster7088c412016-01-05 07:40:16 +1100966 goto out;
Brian Foster4a4f66e2017-08-08 18:21:52 -0700967 if (*tail_blk != tmp_tail)
968 *tail_blk = tmp_tail;
969
970 /*
971 * Run a CRC check from the tail to the head. We can't just check
972 * MAX_ICLOGS records past the tail because the tail may point to stale
973 * blocks cleared during the search for the head/tail. These blocks are
974 * overwritten with zero-length records and thus record count is not a
975 * reliable indicator of the iclog state before a crash.
976 */
977 first_bad = 0;
978 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
979 XLOG_RECOVER_CRCPASS, &first_bad);
Brian Fostera4c9b342017-08-08 18:21:53 -0700980 while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
Brian Foster4a4f66e2017-08-08 18:21:52 -0700981 int tail_distance;
982
983 /*
984 * Is corruption within range of the head? If so, retry from
985 * the next record. Otherwise return an error.
986 */
987 tail_distance = xlog_tail_distance(log, head_blk, first_bad);
988 if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
989 break;
990
991 /* skip to the next record; returns positive count on success */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700992 error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
993 buffer, &tmp_tail, &thead, &wrapped);
Brian Foster4a4f66e2017-08-08 18:21:52 -0700994 if (error < 0)
995 goto out;
996
997 *tail_blk = tmp_tail;
998 first_bad = 0;
999 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1000 XLOG_RECOVER_CRCPASS, &first_bad);
Brian Foster7088c412016-01-05 07:40:16 +11001001 }
1002
Brian Foster4a4f66e2017-08-08 18:21:52 -07001003 if (!error && *tail_blk != orig_tail)
1004 xfs_warn(log->l_mp,
1005 "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1006 orig_tail, *tail_blk);
Brian Foster7088c412016-01-05 07:40:16 +11001007out:
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001008 kmem_free(buffer);
Brian Foster7088c412016-01-05 07:40:16 +11001009 return error;
1010}
1011
1012/*
1013 * Detect and trim torn writes from the head of the log.
1014 *
1015 * Storage without sector atomicity guarantees can result in torn writes in the
1016 * log in the event of a crash. Our only means to detect this scenario is via
1017 * CRC verification. While we can't always be certain that CRC verification
1018 * failure is due to a torn write vs. an unrelated corruption, we do know that
1019 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1020 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1021 * the log and treat failures in this range as torn writes as a matter of
1022 * policy. In the event of CRC failure, the head is walked back to the last good
1023 * record in the log and the tail is updated from that record and verified.
1024 */
1025STATIC int
1026xlog_verify_head(
1027 struct xlog *log,
1028 xfs_daddr_t *head_blk, /* in/out: unverified head */
1029 xfs_daddr_t *tail_blk, /* out: tail block */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001030 char *buffer,
Brian Foster7088c412016-01-05 07:40:16 +11001031 xfs_daddr_t *rhead_blk, /* start blk of last record */
1032 struct xlog_rec_header **rhead, /* ptr to last record */
1033 bool *wrapped) /* last rec. wraps phys. log */
1034{
1035 struct xlog_rec_header *tmp_rhead;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001036 char *tmp_buffer;
Brian Foster7088c412016-01-05 07:40:16 +11001037 xfs_daddr_t first_bad;
1038 xfs_daddr_t tmp_rhead_blk;
1039 int found;
1040 int error;
1041 bool tmp_wrapped;
1042
1043 /*
Brian Foster82ff6cc2016-03-07 08:22:22 +11001044 * Check the head of the log for torn writes. Search backwards from the
1045 * head until we hit the tail or the maximum number of log record I/Os
1046 * that could have been in flight at one time. Use a temporary buffer so
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001047 * we don't trash the rhead/buffer pointers from the caller.
Brian Foster7088c412016-01-05 07:40:16 +11001048 */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001049 tmp_buffer = xlog_alloc_buffer(log, 1);
1050 if (!tmp_buffer)
Brian Foster7088c412016-01-05 07:40:16 +11001051 return -ENOMEM;
1052 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001053 XLOG_MAX_ICLOGS, tmp_buffer,
1054 &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
1055 kmem_free(tmp_buffer);
Brian Foster7088c412016-01-05 07:40:16 +11001056 if (error < 0)
1057 return error;
1058
1059 /*
1060 * Now run a CRC verification pass over the records starting at the
1061 * block found above to the current head. If a CRC failure occurs, the
1062 * log block of the first bad record is saved in first_bad.
1063 */
1064 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1065 XLOG_RECOVER_CRCPASS, &first_bad);
Brian Fostera4c9b342017-08-08 18:21:53 -07001066 if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
Brian Foster7088c412016-01-05 07:40:16 +11001067 /*
1068 * We've hit a potential torn write. Reset the error and warn
1069 * about it.
1070 */
1071 error = 0;
1072 xfs_warn(log->l_mp,
1073"Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1074 first_bad, *head_blk);
1075
1076 /*
1077 * Get the header block and buffer pointer for the last good
1078 * record before the bad record.
1079 *
1080 * Note that xlog_find_tail() clears the blocks at the new head
1081 * (i.e., the records with invalid CRC) if the cycle number
Randy Dunlapb63da6c2020-08-05 08:49:58 -07001082 * matches the current cycle.
Brian Foster7088c412016-01-05 07:40:16 +11001083 */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001084 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
1085 buffer, rhead_blk, rhead, wrapped);
Brian Foster7088c412016-01-05 07:40:16 +11001086 if (found < 0)
1087 return found;
1088 if (found == 0) /* XXX: right thing to do here? */
1089 return -EIO;
1090
1091 /*
1092 * Reset the head block to the starting block of the first bad
1093 * log record and set the tail block based on the last good
1094 * record.
1095 *
1096 * Bail out if the updated head/tail match as this indicates
1097 * possible corruption outside of the acceptable
1098 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1099 */
1100 *head_blk = first_bad;
1101 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1102 if (*head_blk == *tail_blk) {
1103 ASSERT(0);
1104 return 0;
1105 }
Brian Foster7088c412016-01-05 07:40:16 +11001106 }
Brian Foster5297ac12017-08-08 18:21:51 -07001107 if (error)
1108 return error;
Brian Foster7088c412016-01-05 07:40:16 +11001109
Brian Foster4a4f66e2017-08-08 18:21:52 -07001110 return xlog_verify_tail(log, *head_blk, tail_blk,
1111 be32_to_cpu((*rhead)->h_size));
Brian Foster7088c412016-01-05 07:40:16 +11001112}
1113
1114/*
Dave Chinner0703a8e2018-06-08 09:54:22 -07001115 * We need to make sure we handle log wrapping properly, so we can't use the
1116 * calculated logbno directly. Make sure it wraps to the correct bno inside the
1117 * log.
1118 *
1119 * The log is limited to 32 bit sizes, so we use the appropriate modulus
1120 * operation here and cast it back to a 64 bit daddr on return.
1121 */
1122static inline xfs_daddr_t
1123xlog_wrap_logbno(
1124 struct xlog *log,
1125 xfs_daddr_t bno)
1126{
1127 int mod;
1128
1129 div_s64_rem(bno, log->l_logBBsize, &mod);
1130 return mod;
1131}
1132
1133/*
Brian Foster65b99a02016-03-07 08:22:22 +11001134 * Check whether the head of the log points to an unmount record. In other
1135 * words, determine whether the log is clean. If so, update the in-core state
1136 * appropriately.
1137 */
1138static int
1139xlog_check_unmount_rec(
1140 struct xlog *log,
1141 xfs_daddr_t *head_blk,
1142 xfs_daddr_t *tail_blk,
1143 struct xlog_rec_header *rhead,
1144 xfs_daddr_t rhead_blk,
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001145 char *buffer,
Brian Foster65b99a02016-03-07 08:22:22 +11001146 bool *clean)
1147{
1148 struct xlog_op_header *op_head;
1149 xfs_daddr_t umount_data_blk;
1150 xfs_daddr_t after_umount_blk;
1151 int hblks;
1152 int error;
1153 char *offset;
1154
1155 *clean = false;
1156
1157 /*
1158 * Look for unmount record. If we find it, then we know there was a
1159 * clean unmount. Since 'i' could be the last block in the physical
1160 * log, we convert to a log block before comparing to the head_blk.
1161 *
1162 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1163 * below. We won't want to clear the unmount record if there is one, so
1164 * we pass the lsn of the unmount record rather than the block after it.
1165 */
Gao Xiang0c771b92020-09-22 09:41:06 -07001166 hblks = xlog_logrec_hblks(log, rhead);
Dave Chinner0703a8e2018-06-08 09:54:22 -07001167 after_umount_blk = xlog_wrap_logbno(log,
1168 rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1169
Brian Foster65b99a02016-03-07 08:22:22 +11001170 if (*head_blk == after_umount_blk &&
1171 be32_to_cpu(rhead->h_num_logops) == 1) {
Dave Chinner0703a8e2018-06-08 09:54:22 -07001172 umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001173 error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
Brian Foster65b99a02016-03-07 08:22:22 +11001174 if (error)
1175 return error;
1176
1177 op_head = (struct xlog_op_header *)offset;
1178 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1179 /*
1180 * Set tail and last sync so that newly written log
1181 * records will point recovery to after the current
1182 * unmount record.
1183 */
1184 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1185 log->l_curr_cycle, after_umount_blk);
1186 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1187 log->l_curr_cycle, after_umount_blk);
1188 *tail_blk = after_umount_blk;
1189
1190 *clean = true;
1191 }
1192 }
1193
1194 return 0;
1195}
1196
Brian Foster717bc0e2016-03-07 08:22:22 +11001197static void
1198xlog_set_state(
1199 struct xlog *log,
1200 xfs_daddr_t head_blk,
1201 struct xlog_rec_header *rhead,
1202 xfs_daddr_t rhead_blk,
1203 bool bump_cycle)
1204{
1205 /*
1206 * Reset log values according to the state of the log when we
1207 * crashed. In the case where head_blk == 0, we bump curr_cycle
1208 * one because the next write starts a new cycle rather than
1209 * continuing the cycle of the last good log record. At this
1210 * point we have guaranteed that all partial log records have been
1211 * accounted for. Therefore, we know that the last good log record
1212 * written was complete and ended exactly on the end boundary
1213 * of the physical log.
1214 */
1215 log->l_prev_block = rhead_blk;
1216 log->l_curr_block = (int)head_blk;
1217 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1218 if (bump_cycle)
1219 log->l_curr_cycle++;
1220 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1221 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1222 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1223 BBTOB(log->l_curr_block));
1224 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1225 BBTOB(log->l_curr_block));
1226}
1227
Brian Foster65b99a02016-03-07 08:22:22 +11001228/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 * Find the sync block number or the tail of the log.
1230 *
1231 * This will be the block number of the last record to have its
1232 * associated buffers synced to disk. Every log record header has
1233 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
1234 * to get a sync block number. The only concern is to figure out which
1235 * log record header to believe.
1236 *
1237 * The following algorithm uses the log record header with the largest
1238 * lsn. The entire log record does not need to be valid. We only care
1239 * that the header is valid.
1240 *
1241 * We could speed up search by using current head_blk buffer, but it is not
1242 * available.
1243 */
Eric Sandeen5d77c0d2009-11-19 15:52:00 +00001244STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245xlog_find_tail(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05001246 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 xfs_daddr_t *head_blk,
Eric Sandeen65be6052006-01-11 15:34:19 +11001248 xfs_daddr_t *tail_blk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249{
1250 xlog_rec_header_t *rhead;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10001251 char *offset = NULL;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001252 char *buffer;
Brian Foster7088c412016-01-05 07:40:16 +11001253 int error;
Brian Foster7088c412016-01-05 07:40:16 +11001254 xfs_daddr_t rhead_blk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 xfs_lsn_t tail_lsn;
Brian Fostereed6b462016-01-04 15:55:10 +11001256 bool wrapped = false;
Brian Foster65b99a02016-03-07 08:22:22 +11001257 bool clean = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
1259 /*
1260 * Find previous log record
1261 */
1262 if ((error = xlog_find_head(log, head_blk)))
1263 return error;
Brian Foster82ff6cc2016-03-07 08:22:22 +11001264 ASSERT(*head_blk < INT_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001266 buffer = xlog_alloc_buffer(log, 1);
1267 if (!buffer)
Dave Chinner24513372014-06-25 14:58:08 +10001268 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 if (*head_blk == 0) { /* special case */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001270 error = xlog_bread(log, 0, 1, buffer, &offset);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001271 if (error)
Alex Elder9db127e2010-04-15 18:17:26 +00001272 goto done;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001273
Christoph Hellwig03bea6f2007-10-12 10:58:05 +10001274 if (xlog_get_cycle(offset) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 *tail_blk = 0;
1276 /* leave all other log inited values alone */
Alex Elder9db127e2010-04-15 18:17:26 +00001277 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 }
1279 }
1280
1281 /*
Brian Foster82ff6cc2016-03-07 08:22:22 +11001282 * Search backwards through the log looking for the log record header
1283 * block. This wraps all the way back around to the head so something is
1284 * seriously wrong if we can't find it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001286 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
Brian Foster82ff6cc2016-03-07 08:22:22 +11001287 &rhead_blk, &rhead, &wrapped);
1288 if (error < 0)
Darrick J. Wong050552c2019-11-14 12:51:34 -08001289 goto done;
Brian Foster82ff6cc2016-03-07 08:22:22 +11001290 if (!error) {
1291 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
Darrick J. Wong050552c2019-11-14 12:51:34 -08001292 error = -EFSCORRUPTED;
1293 goto done;
Brian Foster82ff6cc2016-03-07 08:22:22 +11001294 }
1295 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1296
1297 /*
Brian Foster717bc0e2016-03-07 08:22:22 +11001298 * Set the log state based on the current head record.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 */
Brian Foster717bc0e2016-03-07 08:22:22 +11001300 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
Brian Foster65b99a02016-03-07 08:22:22 +11001301 tail_lsn = atomic64_read(&log->l_tail_lsn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302
1303 /*
Brian Foster65b99a02016-03-07 08:22:22 +11001304 * Look for an unmount record at the head of the log. This sets the log
1305 * state to determine whether recovery is necessary.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 */
Brian Foster65b99a02016-03-07 08:22:22 +11001307 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001308 rhead_blk, buffer, &clean);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 if (error)
1310 goto done;
1311
1312 /*
Brian Foster7f6aff3a2016-03-07 08:22:22 +11001313 * Verify the log head if the log is not clean (e.g., we have anything
1314 * but an unmount record at the head). This uses CRC verification to
1315 * detect and trim torn writes. If discovered, CRC failures are
1316 * considered torn writes and the log head is trimmed accordingly.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 *
Brian Foster7f6aff3a2016-03-07 08:22:22 +11001318 * Note that we can only run CRC verification when the log is dirty
1319 * because there's no guarantee that the log data behind an unmount
1320 * record is compatible with the current architecture.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 */
Brian Foster7f6aff3a2016-03-07 08:22:22 +11001322 if (!clean) {
1323 xfs_daddr_t orig_head = *head_blk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001325 error = xlog_verify_head(log, head_blk, tail_blk, buffer,
Brian Foster7f6aff3a2016-03-07 08:22:22 +11001326 &rhead_blk, &rhead, &wrapped);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001327 if (error)
Alex Elder9db127e2010-04-15 18:17:26 +00001328 goto done;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001329
Brian Foster7f6aff3a2016-03-07 08:22:22 +11001330 /* update in-core state again if the head changed */
1331 if (*head_blk != orig_head) {
1332 xlog_set_state(log, *head_blk, rhead, rhead_blk,
1333 wrapped);
1334 tail_lsn = atomic64_read(&log->l_tail_lsn);
1335 error = xlog_check_unmount_rec(log, head_blk, tail_blk,
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001336 rhead, rhead_blk, buffer,
Brian Foster7f6aff3a2016-03-07 08:22:22 +11001337 &clean);
1338 if (error)
1339 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 }
1341 }
1342
1343 /*
Brian Foster65b99a02016-03-07 08:22:22 +11001344 * Note that the unmount was clean. If the unmount was not clean, we
1345 * need to know this to rebuild the superblock counters from the perag
1346 * headers if we have a filesystem using non-persistent counters.
1347 */
1348 if (clean)
1349 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350
1351 /*
1352 * Make sure that there are no blocks in front of the head
1353 * with the same cycle number as the head. This can happen
1354 * because we allow multiple outstanding log writes concurrently,
1355 * and the later writes might make it out before earlier ones.
1356 *
1357 * We use the lsn from before modifying it so that we'll never
1358 * overwrite the unmount record after a clean unmount.
1359 *
1360 * Do this only if we are going to recover the filesystem
1361 *
1362 * NOTE: This used to say "if (!readonly)"
1363 * However on Linux, we can & do recover a read-only filesystem.
1364 * We only skip recovery if NORECOVERY is specified on mount,
1365 * in which case we would not be here.
1366 *
1367 * But... if the -device- itself is readonly, just skip this.
1368 * We can't recover this device anyway, so it won't matter.
1369 */
Christoph Hellwig2d15d2c2019-06-28 19:27:24 -07001370 if (!xfs_readonly_buftarg(log->l_targ))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 error = xlog_clear_stale_blocks(log, tail_lsn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
Alex Elder9db127e2010-04-15 18:17:26 +00001373done:
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001374 kmem_free(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
1376 if (error)
Dave Chinnera0fa2b62011-03-07 10:01:35 +11001377 xfs_warn(log->l_mp, "failed to locate log tail");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 return error;
1379}
1380
1381/*
1382 * Is the log zeroed at all?
1383 *
1384 * The last binary search should be changed to perform an X block read
1385 * once X becomes small enough. You can then search linearly through
1386 * the X blocks. This will cut down on the number of reads we need to do.
1387 *
1388 * If the log is partially zeroed, this routine will pass back the blkno
1389 * of the first block with cycle number 0. It won't have a complete LR
1390 * preceding it.
1391 *
1392 * Return:
1393 * 0 => the log is completely written to
Dave Chinner24513372014-06-25 14:58:08 +10001394 * 1 => use *blk_no as the first block of the log
1395 * <0 => error has occurred
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 */
David Chinnera8272ce2007-11-23 16:28:09 +11001397STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398xlog_find_zeroed(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05001399 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 xfs_daddr_t *blk_no)
1401{
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001402 char *buffer;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10001403 char *offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 uint first_cycle, last_cycle;
1405 xfs_daddr_t new_blk, last_blk, start_blk;
1406 xfs_daddr_t num_scan_bblks;
1407 int error, log_bbnum = log->l_logBBsize;
1408
Nathan Scott6fdf8cc2006-06-28 10:13:52 +10001409 *blk_no = 0;
1410
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 /* check totally zeroed log */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001412 buffer = xlog_alloc_buffer(log, 1);
1413 if (!buffer)
Dave Chinner24513372014-06-25 14:58:08 +10001414 return -ENOMEM;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001415 error = xlog_bread(log, 0, 1, buffer, &offset);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001416 if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001417 goto out_free_buffer;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001418
Christoph Hellwig03bea6f2007-10-12 10:58:05 +10001419 first_cycle = xlog_get_cycle(offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 if (first_cycle == 0) { /* completely zeroed log */
1421 *blk_no = 0;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001422 kmem_free(buffer);
Dave Chinner24513372014-06-25 14:58:08 +10001423 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 }
1425
1426 /* check partially zeroed log */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001427 error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001428 if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001429 goto out_free_buffer;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001430
Christoph Hellwig03bea6f2007-10-12 10:58:05 +10001431 last_cycle = xlog_get_cycle(offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 if (last_cycle != 0) { /* log completely written to */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001433 kmem_free(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 }
1436
1437 /* we have a partially zeroed log */
1438 last_blk = log_bbnum-1;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001439 error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1440 if (error)
1441 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442
1443 /*
1444 * Validate the answer. Because there is no way to guarantee that
1445 * the entire log is made up of log records which are the same size,
1446 * we scan over the defined maximum blocks. At this point, the maximum
1447 * is not chosen to mean anything special. XXXmiken
1448 */
1449 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1450 ASSERT(num_scan_bblks <= INT_MAX);
1451
1452 if (last_blk < num_scan_bblks)
1453 num_scan_bblks = last_blk;
1454 start_blk = last_blk - num_scan_bblks;
1455
1456 /*
1457 * We search for any instances of cycle number 0 that occur before
1458 * our current estimate of the head. What we're trying to detect is
1459 * 1 ... | 0 | 1 | 0...
1460 * ^ binary search ends here
1461 */
1462 if ((error = xlog_find_verify_cycle(log, start_blk,
1463 (int)num_scan_bblks, 0, &new_blk)))
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001464 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 if (new_blk != -1)
1466 last_blk = new_blk;
1467
1468 /*
1469 * Potentially backup over partial log record write. We don't need
1470 * to search the end of the log because we know it is zero.
1471 */
Dave Chinner24513372014-06-25 14:58:08 +10001472 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1473 if (error == 1)
1474 error = -EIO;
1475 if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001476 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
1478 *blk_no = last_blk;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001479out_free_buffer:
1480 kmem_free(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 if (error)
1482 return error;
Dave Chinner24513372014-06-25 14:58:08 +10001483 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484}
1485
1486/*
1487 * These are simple subroutines used by xlog_clear_stale_blocks() below
1488 * to initialize a buffer full of empty log record headers and write
1489 * them into the log.
1490 */
1491STATIC void
1492xlog_add_record(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05001493 struct xlog *log,
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10001494 char *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 int cycle,
1496 int block,
1497 int tail_cycle,
1498 int tail_block)
1499{
1500 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1501
1502 memset(buf, 0, BBSIZE);
Christoph Hellwigb53e6752007-10-12 10:59:34 +10001503 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1504 recp->h_cycle = cpu_to_be32(cycle);
1505 recp->h_version = cpu_to_be32(
Eric Sandeen62118702008-03-06 13:44:28 +11001506 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
Christoph Hellwigb53e6752007-10-12 10:59:34 +10001507 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1508 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1509 recp->h_fmt = cpu_to_be32(XLOG_FMT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1511}
1512
1513STATIC int
1514xlog_write_log_records(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05001515 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 int cycle,
1517 int start_block,
1518 int blocks,
1519 int tail_cycle,
1520 int tail_block)
1521{
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10001522 char *offset;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001523 char *buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 int balign, ealign;
Alex Elder69ce58f2010-04-20 17:09:59 +10001525 int sectbb = log->l_sectBBsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 int end_block = start_block + blocks;
1527 int bufblks;
1528 int error = 0;
1529 int i, j = 0;
1530
Alex Elder6881a222010-04-13 15:22:29 +10001531 /*
1532 * Greedily allocate a buffer big enough to handle the full
1533 * range of basic blocks to be written. If that fails, try
1534 * a smaller size. We need to be able to write at least a
1535 * log sector, or we're out of luck.
1536 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 bufblks = 1 << ffs(blocks);
Dave Chinner81158e02012-04-27 19:45:22 +10001538 while (bufblks > log->l_logBBsize)
1539 bufblks >>= 1;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001540 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 bufblks >>= 1;
Alex Elder69ce58f2010-04-20 17:09:59 +10001542 if (bufblks < sectbb)
Dave Chinner24513372014-06-25 14:58:08 +10001543 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 }
1545
1546 /* We may need to do a read at the start to fill in part of
1547 * the buffer in the starting sector not covered by the first
1548 * write below.
1549 */
Alex Elder5c17f532010-04-13 15:22:48 +10001550 balign = round_down(start_block, sectbb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 if (balign != start_block) {
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001552 error = xlog_bread_noalign(log, start_block, 1, buffer);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001553 if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001554 goto out_free_buffer;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001555
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 j = start_block - balign;
1557 }
1558
1559 for (i = start_block; i < end_block; i += bufblks) {
1560 int bcount, endcount;
1561
1562 bcount = min(bufblks, end_block - start_block);
1563 endcount = bcount - j;
1564
1565 /* We may need to do a read at the end to fill in part of
1566 * the buffer in the final sector not covered by the write.
1567 * If this is the same sector as the above read, skip it.
1568 */
Alex Elder5c17f532010-04-13 15:22:48 +10001569 ealign = round_down(end_block, sectbb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 if (j == 0 && (start_block + endcount > ealign)) {
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07001571 error = xlog_bread_noalign(log, ealign, sectbb,
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001572 buffer + BBTOB(ealign - start_block));
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001573 if (error)
1574 break;
1575
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 }
1577
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001578 offset = buffer + xlog_align(log, start_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 for (; j < endcount; j++) {
1580 xlog_add_record(log, offset, cycle, i+j,
1581 tail_cycle, tail_block);
1582 offset += BBSIZE;
1583 }
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001584 error = xlog_bwrite(log, start_block, endcount, buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 if (error)
1586 break;
1587 start_block += endcount;
1588 j = 0;
1589 }
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001590
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001591out_free_buffer:
1592 kmem_free(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 return error;
1594}
1595
1596/*
1597 * This routine is called to blow away any incomplete log writes out
1598 * in front of the log head. We do this so that we won't become confused
1599 * if we come up, write only a little bit more, and then crash again.
1600 * If we leave the partial log records out there, this situation could
1601 * cause us to think those partial writes are valid blocks since they
1602 * have the current cycle number. We get rid of them by overwriting them
1603 * with empty log records with the old cycle number rather than the
1604 * current one.
1605 *
1606 * The tail lsn is passed in rather than taken from
1607 * the log so that we will not write over the unmount record after a
1608 * clean unmount in a 512 block log. Doing so would leave the log without
1609 * any valid log records in it until a new one was written. If we crashed
1610 * during that time we would not be able to recover.
1611 */
1612STATIC int
1613xlog_clear_stale_blocks(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05001614 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 xfs_lsn_t tail_lsn)
1616{
1617 int tail_cycle, head_cycle;
1618 int tail_block, head_block;
1619 int tail_distance, max_distance;
1620 int distance;
1621 int error;
1622
1623 tail_cycle = CYCLE_LSN(tail_lsn);
1624 tail_block = BLOCK_LSN(tail_lsn);
1625 head_cycle = log->l_curr_cycle;
1626 head_block = log->l_curr_block;
1627
1628 /*
1629 * Figure out the distance between the new head of the log
1630 * and the tail. We want to write over any blocks beyond the
1631 * head that we may have written just before the crash, but
1632 * we don't want to overwrite the tail of the log.
1633 */
1634 if (head_cycle == tail_cycle) {
1635 /*
1636 * The tail is behind the head in the physical log,
1637 * so the distance from the head to the tail is the
1638 * distance from the head to the end of the log plus
1639 * the distance from the beginning of the log to the
1640 * tail.
1641 */
Darrick J. Wonga71895c2019-11-11 12:53:22 -08001642 if (XFS_IS_CORRUPT(log->l_mp,
1643 head_block < tail_block ||
1644 head_block >= log->l_logBBsize))
Dave Chinner24513372014-06-25 14:58:08 +10001645 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 tail_distance = tail_block + (log->l_logBBsize - head_block);
1647 } else {
1648 /*
1649 * The head is behind the tail in the physical log,
1650 * so the distance from the head to the tail is just
1651 * the tail block minus the head block.
1652 */
Darrick J. Wonga71895c2019-11-11 12:53:22 -08001653 if (XFS_IS_CORRUPT(log->l_mp,
1654 head_block >= tail_block ||
1655 head_cycle != tail_cycle + 1))
Dave Chinner24513372014-06-25 14:58:08 +10001656 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 tail_distance = tail_block - head_block;
1658 }
1659
1660 /*
1661 * If the head is right up against the tail, we can't clear
1662 * anything.
1663 */
1664 if (tail_distance <= 0) {
1665 ASSERT(tail_distance == 0);
1666 return 0;
1667 }
1668
1669 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1670 /*
1671 * Take the smaller of the maximum amount of outstanding I/O
1672 * we could have and the distance to the tail to clear out.
1673 * We take the smaller so that we don't overwrite the tail and
1674 * we don't waste all day writing from the head to the tail
1675 * for no reason.
1676 */
Dave Chinner9bb54cb2018-06-07 07:54:02 -07001677 max_distance = min(max_distance, tail_distance);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
1679 if ((head_block + max_distance) <= log->l_logBBsize) {
1680 /*
1681 * We can stomp all the blocks we need to without
1682 * wrapping around the end of the log. Just do it
1683 * in a single write. Use the cycle number of the
1684 * current cycle minus one so that the log will look like:
1685 * n ... | n - 1 ...
1686 */
1687 error = xlog_write_log_records(log, (head_cycle - 1),
1688 head_block, max_distance, tail_cycle,
1689 tail_block);
1690 if (error)
1691 return error;
1692 } else {
1693 /*
1694 * We need to wrap around the end of the physical log in
1695 * order to clear all the blocks. Do it in two separate
1696 * I/Os. The first write should be from the head to the
1697 * end of the physical log, and it should use the current
1698 * cycle number minus one just like above.
1699 */
1700 distance = log->l_logBBsize - head_block;
1701 error = xlog_write_log_records(log, (head_cycle - 1),
1702 head_block, distance, tail_cycle,
1703 tail_block);
1704
1705 if (error)
1706 return error;
1707
1708 /*
1709 * Now write the blocks at the start of the physical log.
1710 * This writes the remainder of the blocks we want to clear.
1711 * It uses the current cycle number since we're now on the
1712 * same cycle as the head so that we get:
1713 * n ... n ... | n - 1 ...
1714 * ^^^^^ blocks we're writing
1715 */
1716 distance = max_distance - (log->l_logBBsize - head_block);
1717 error = xlog_write_log_records(log, head_cycle, 0, distance,
1718 tail_cycle, tail_block);
1719 if (error)
1720 return error;
1721 }
1722
1723 return 0;
1724}
1725
Darrick J. Wong154c7332020-05-01 16:00:54 -07001726/*
1727 * Release the recovered intent item in the AIL that matches the given intent
1728 * type and intent id.
1729 */
1730void
1731xlog_recover_release_intent(
1732 struct xlog *log,
1733 unsigned short intent_type,
1734 uint64_t intent_id)
1735{
1736 struct xfs_ail_cursor cur;
1737 struct xfs_log_item *lip;
1738 struct xfs_ail *ailp = log->l_ailp;
1739
1740 spin_lock(&ailp->ail_lock);
1741 for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip != NULL;
1742 lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
1743 if (lip->li_type != intent_type)
1744 continue;
1745 if (!lip->li_ops->iop_match(lip, intent_id))
1746 continue;
1747
1748 spin_unlock(&ailp->ail_lock);
1749 lip->li_ops->iop_release(lip);
1750 spin_lock(&ailp->ail_lock);
1751 break;
1752 }
1753
1754 xfs_trans_ail_cursor_done(&cur);
1755 spin_unlock(&ailp->ail_lock);
1756}
1757
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758/******************************************************************************
1759 *
1760 * Log recover routines
1761 *
1762 ******************************************************************************
1763 */
Darrick J. Wong86ffa472020-05-01 16:00:45 -07001764static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = {
1765 &xlog_buf_item_ops,
1766 &xlog_inode_item_ops,
1767 &xlog_dquot_item_ops,
1768 &xlog_quotaoff_item_ops,
1769 &xlog_icreate_item_ops,
1770 &xlog_efi_item_ops,
1771 &xlog_efd_item_ops,
1772 &xlog_rui_item_ops,
1773 &xlog_rud_item_ops,
1774 &xlog_cui_item_ops,
1775 &xlog_cud_item_ops,
1776 &xlog_bui_item_ops,
1777 &xlog_bud_item_ops,
1778};
1779
1780static const struct xlog_recover_item_ops *
1781xlog_find_item_ops(
1782 struct xlog_recover_item *item)
1783{
1784 unsigned int i;
1785
1786 for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++)
1787 if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type)
1788 return xlog_recover_item_ops[i];
1789
1790 return NULL;
1791}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792
Dave Chinnerf0a76952010-01-11 11:49:57 +00001793/*
Dave Chinnera775ad72013-06-05 12:09:07 +10001794 * Sort the log items in the transaction.
1795 *
1796 * The ordering constraints are defined by the inode allocation and unlink
1797 * behaviour. The rules are:
1798 *
1799 * 1. Every item is only logged once in a given transaction. Hence it
1800 * represents the last logged state of the item. Hence ordering is
1801 * dependent on the order in which operations need to be performed so
1802 * required initial conditions are always met.
1803 *
1804 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1805 * there's nothing to replay from them so we can simply cull them
1806 * from the transaction. However, we can't do that until after we've
1807 * replayed all the other items because they may be dependent on the
1808 * cancelled buffer and replaying the cancelled buffer can remove it
1809 * form the cancelled buffer table. Hence they have tobe done last.
1810 *
1811 * 3. Inode allocation buffers must be replayed before inode items that
Dave Chinner28c8e412013-06-27 16:04:55 +10001812 * read the buffer and replay changes into it. For filesystems using the
1813 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1814 * treated the same as inode allocation buffers as they create and
1815 * initialise the buffers directly.
Dave Chinnera775ad72013-06-05 12:09:07 +10001816 *
1817 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1818 * This ensures that inodes are completely flushed to the inode buffer
1819 * in a "free" state before we remove the unlinked inode list pointer.
1820 *
1821 * Hence the ordering needs to be inode allocation buffers first, inode items
1822 * second, inode unlink buffers third and cancelled buffers last.
1823 *
1824 * But there's a problem with that - we can't tell an inode allocation buffer
1825 * apart from a regular buffer, so we can't separate them. We can, however,
1826 * tell an inode unlink buffer from the others, and so we can separate them out
1827 * from all the other buffers and move them to last.
1828 *
1829 * Hence, 4 lists, in order from head to tail:
Dave Chinner28c8e412013-06-27 16:04:55 +10001830 * - buffer_list for all buffers except cancelled/inode unlink buffers
1831 * - item_list for all non-buffer items
1832 * - inode_buffer_list for inode unlink buffers
1833 * - cancel_list for the cancelled buffers
1834 *
1835 * Note that we add objects to the tail of the lists so that first-to-last
1836 * ordering is preserved within the lists. Adding objects to the head of the
1837 * list means when we traverse from the head we walk them in last-to-first
1838 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1839 * but for all other items there may be specific ordering that we need to
1840 * preserve.
Dave Chinnerf0a76952010-01-11 11:49:57 +00001841 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842STATIC int
1843xlog_recover_reorder_trans(
Mark Tinguelyad223e62012-06-14 09:22:15 -05001844 struct xlog *log,
1845 struct xlog_recover *trans,
Dave Chinner9abbc532010-04-13 15:06:46 +10001846 int pass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847{
Darrick J. Wong35f45212020-04-30 10:45:41 -07001848 struct xlog_recover_item *item, *n;
Mark Tinguely2a841082013-10-02 07:51:12 -05001849 int error = 0;
Dave Chinnerf0a76952010-01-11 11:49:57 +00001850 LIST_HEAD(sort_list);
Dave Chinnera775ad72013-06-05 12:09:07 +10001851 LIST_HEAD(cancel_list);
1852 LIST_HEAD(buffer_list);
1853 LIST_HEAD(inode_buffer_list);
Christoph Hellwig5ce70b72020-04-27 11:14:59 -07001854 LIST_HEAD(item_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855
Dave Chinnerf0a76952010-01-11 11:49:57 +00001856 list_splice_init(&trans->r_itemq, &sort_list);
1857 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
Darrick J. Wong86ffa472020-05-01 16:00:45 -07001858 enum xlog_recover_reorder fate = XLOG_REORDER_ITEM_LIST;
Dave Chinnerf0a76952010-01-11 11:49:57 +00001859
Darrick J. Wong86ffa472020-05-01 16:00:45 -07001860 item->ri_ops = xlog_find_item_ops(item);
1861 if (!item->ri_ops) {
Dave Chinnera0fa2b62011-03-07 10:01:35 +11001862 xfs_warn(log->l_mp,
Darrick J. Wong0d2d35a2020-04-21 14:16:52 -07001863 "%s: unrecognized type of log operation (%d)",
1864 __func__, ITEM_TYPE(item));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 ASSERT(0);
Mark Tinguely2a841082013-10-02 07:51:12 -05001866 /*
1867 * return the remaining items back to the transaction
1868 * item list so they can be freed in caller.
1869 */
1870 if (!list_empty(&sort_list))
1871 list_splice_init(&sort_list, &trans->r_itemq);
Darrick J. Wong86ffa472020-05-01 16:00:45 -07001872 error = -EFSCORRUPTED;
1873 break;
1874 }
1875
1876 if (item->ri_ops->reorder)
1877 fate = item->ri_ops->reorder(item);
1878
1879 switch (fate) {
1880 case XLOG_REORDER_BUFFER_LIST:
1881 list_move_tail(&item->ri_list, &buffer_list);
1882 break;
1883 case XLOG_REORDER_CANCEL_LIST:
1884 trace_xfs_log_recover_item_reorder_head(log,
1885 trans, item, pass);
1886 list_move(&item->ri_list, &cancel_list);
1887 break;
1888 case XLOG_REORDER_INODE_BUFFER_LIST:
1889 list_move(&item->ri_list, &inode_buffer_list);
1890 break;
1891 case XLOG_REORDER_ITEM_LIST:
1892 trace_xfs_log_recover_item_reorder_tail(log,
1893 trans, item, pass);
1894 list_move_tail(&item->ri_list, &item_list);
1895 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 }
Dave Chinnerf0a76952010-01-11 11:49:57 +00001897 }
Darrick J. Wong86ffa472020-05-01 16:00:45 -07001898
Dave Chinnerf0a76952010-01-11 11:49:57 +00001899 ASSERT(list_empty(&sort_list));
Dave Chinnera775ad72013-06-05 12:09:07 +10001900 if (!list_empty(&buffer_list))
1901 list_splice(&buffer_list, &trans->r_itemq);
Christoph Hellwig5ce70b72020-04-27 11:14:59 -07001902 if (!list_empty(&item_list))
1903 list_splice_tail(&item_list, &trans->r_itemq);
Dave Chinnera775ad72013-06-05 12:09:07 +10001904 if (!list_empty(&inode_buffer_list))
1905 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1906 if (!list_empty(&cancel_list))
1907 list_splice_tail(&cancel_list, &trans->r_itemq);
Mark Tinguely2a841082013-10-02 07:51:12 -05001908 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909}
1910
Darrick J. Wong8ea56822020-05-01 16:00:46 -07001911void
Christoph Hellwig7d4894b2020-04-27 18:23:17 -07001912xlog_buf_readahead(
1913 struct xlog *log,
1914 xfs_daddr_t blkno,
1915 uint len,
1916 const struct xfs_buf_ops *ops)
1917{
1918 if (!xlog_is_buffer_cancelled(log, blkno, len))
1919 xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
1920}
1921
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922STATIC int
Zhi Yong Wu00574da2013-08-14 15:16:03 +08001923xlog_recover_items_pass2(
1924 struct xlog *log,
1925 struct xlog_recover *trans,
1926 struct list_head *buffer_list,
1927 struct list_head *item_list)
1928{
1929 struct xlog_recover_item *item;
1930 int error = 0;
1931
1932 list_for_each_entry(item, item_list, ri_list) {
Darrick J. Wong2565a112020-05-01 16:00:50 -07001933 trace_xfs_log_recover_item_recover(log, trans, item,
1934 XLOG_RECOVER_PASS2);
1935
1936 if (item->ri_ops->commit_pass2)
1937 error = item->ri_ops->commit_pass2(log, buffer_list,
1938 item, trans->r_lsn);
Zhi Yong Wu00574da2013-08-14 15:16:03 +08001939 if (error)
1940 return error;
1941 }
1942
1943 return error;
1944}
1945
Christoph Hellwigd0450942010-12-01 22:06:23 +00001946/*
1947 * Perform the transaction.
1948 *
1949 * If the transaction modifies a buffer or inode, do it now. Otherwise,
1950 * EFIs and EFDs get queued up by adding entries into the AIL for them.
1951 */
1952STATIC int
1953xlog_recover_commit_trans(
Mark Tinguelyad223e62012-06-14 09:22:15 -05001954 struct xlog *log,
Christoph Hellwigd0450942010-12-01 22:06:23 +00001955 struct xlog_recover *trans,
Brian Foster12818d22016-09-26 08:22:16 +10001956 int pass,
1957 struct list_head *buffer_list)
Christoph Hellwigd0450942010-12-01 22:06:23 +00001958{
Zhi Yong Wu00574da2013-08-14 15:16:03 +08001959 int error = 0;
Zhi Yong Wu00574da2013-08-14 15:16:03 +08001960 int items_queued = 0;
1961 struct xlog_recover_item *item;
1962 struct xlog_recover_item *next;
Zhi Yong Wu00574da2013-08-14 15:16:03 +08001963 LIST_HEAD (ra_list);
1964 LIST_HEAD (done_list);
1965
1966 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
Brian Foster39775432017-06-24 10:11:41 -07001968 hlist_del_init(&trans->r_list);
Christoph Hellwigd0450942010-12-01 22:06:23 +00001969
1970 error = xlog_recover_reorder_trans(log, trans, pass);
1971 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 return error;
Christoph Hellwigd0450942010-12-01 22:06:23 +00001973
Zhi Yong Wu00574da2013-08-14 15:16:03 +08001974 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
Darrick J. Wong3304a4f2020-05-01 16:00:46 -07001975 trace_xfs_log_recover_item_recover(log, trans, item, pass);
1976
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001977 switch (pass) {
1978 case XLOG_RECOVER_PASS1:
Darrick J. Wong3304a4f2020-05-01 16:00:46 -07001979 if (item->ri_ops->commit_pass1)
1980 error = item->ri_ops->commit_pass1(log, item);
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001981 break;
1982 case XLOG_RECOVER_PASS2:
Darrick J. Wong8ea56822020-05-01 16:00:46 -07001983 if (item->ri_ops->ra_pass2)
1984 item->ri_ops->ra_pass2(log, item);
Zhi Yong Wu00574da2013-08-14 15:16:03 +08001985 list_move_tail(&item->ri_list, &ra_list);
1986 items_queued++;
1987 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
1988 error = xlog_recover_items_pass2(log, trans,
Brian Foster12818d22016-09-26 08:22:16 +10001989 buffer_list, &ra_list);
Zhi Yong Wu00574da2013-08-14 15:16:03 +08001990 list_splice_tail_init(&ra_list, &done_list);
1991 items_queued = 0;
1992 }
1993
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001994 break;
1995 default:
1996 ASSERT(0);
1997 }
1998
Christoph Hellwigd0450942010-12-01 22:06:23 +00001999 if (error)
Christoph Hellwig43ff2122012-04-23 15:58:39 +10002000 goto out;
Christoph Hellwigd0450942010-12-01 22:06:23 +00002001 }
2002
Zhi Yong Wu00574da2013-08-14 15:16:03 +08002003out:
2004 if (!list_empty(&ra_list)) {
2005 if (!error)
2006 error = xlog_recover_items_pass2(log, trans,
Brian Foster12818d22016-09-26 08:22:16 +10002007 buffer_list, &ra_list);
Zhi Yong Wu00574da2013-08-14 15:16:03 +08002008 list_splice_tail_init(&ra_list, &done_list);
2009 }
2010
2011 if (!list_empty(&done_list))
2012 list_splice_init(&done_list, &trans->r_itemq);
2013
Brian Foster12818d22016-09-26 08:22:16 +10002014 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015}
2016
Dave Chinner76560662014-09-29 09:45:42 +10002017STATIC void
2018xlog_recover_add_item(
2019 struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020{
Darrick J. Wong35f45212020-04-30 10:45:41 -07002021 struct xlog_recover_item *item;
Dave Chinner76560662014-09-29 09:45:42 +10002022
Darrick J. Wong35f45212020-04-30 10:45:41 -07002023 item = kmem_zalloc(sizeof(struct xlog_recover_item), 0);
Dave Chinner76560662014-09-29 09:45:42 +10002024 INIT_LIST_HEAD(&item->ri_list);
2025 list_add_tail(&item->ri_list, head);
2026}
2027
2028STATIC int
2029xlog_recover_add_to_cont_trans(
2030 struct xlog *log,
2031 struct xlog_recover *trans,
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002032 char *dp,
Dave Chinner76560662014-09-29 09:45:42 +10002033 int len)
2034{
Darrick J. Wong35f45212020-04-30 10:45:41 -07002035 struct xlog_recover_item *item;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002036 char *ptr, *old_ptr;
Dave Chinner76560662014-09-29 09:45:42 +10002037 int old_len;
2038
Brian Foster89cebc842015-07-29 11:51:10 +10002039 /*
2040 * If the transaction is empty, the header was split across this and the
2041 * previous record. Copy the rest of the header.
2042 */
Dave Chinner76560662014-09-29 09:45:42 +10002043 if (list_empty(&trans->r_itemq)) {
Brian Foster848ccfc2015-11-10 10:10:33 +11002044 ASSERT(len <= sizeof(struct xfs_trans_header));
Brian Foster89cebc842015-07-29 11:51:10 +10002045 if (len > sizeof(struct xfs_trans_header)) {
2046 xfs_warn(log->l_mp, "%s: bad header length", __func__);
Darrick J. Wong895e1962019-11-06 09:17:43 -08002047 return -EFSCORRUPTED;
Brian Foster89cebc842015-07-29 11:51:10 +10002048 }
2049
Dave Chinner76560662014-09-29 09:45:42 +10002050 xlog_recover_add_item(&trans->r_itemq);
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002051 ptr = (char *)&trans->r_theader +
Brian Foster89cebc842015-07-29 11:51:10 +10002052 sizeof(struct xfs_trans_header) - len;
Dave Chinner76560662014-09-29 09:45:42 +10002053 memcpy(ptr, dp, len);
2054 return 0;
2055 }
Brian Foster89cebc842015-07-29 11:51:10 +10002056
Dave Chinner76560662014-09-29 09:45:42 +10002057 /* take the tail entry */
Darrick J. Wong35f45212020-04-30 10:45:41 -07002058 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2059 ri_list);
Dave Chinner76560662014-09-29 09:45:42 +10002060
2061 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
2062 old_len = item->ri_buf[item->ri_cnt-1].i_len;
2063
Carlos Maiolino771915c2020-08-26 14:05:56 -07002064 ptr = krealloc(old_ptr, len + old_len, GFP_KERNEL | __GFP_NOFAIL);
Dave Chinner76560662014-09-29 09:45:42 +10002065 memcpy(&ptr[old_len], dp, len);
2066 item->ri_buf[item->ri_cnt-1].i_len += len;
2067 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
2068 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 return 0;
2070}
2071
2072/*
Dave Chinner76560662014-09-29 09:45:42 +10002073 * The next region to add is the start of a new region. It could be
2074 * a whole region or it could be the first part of a new region. Because
2075 * of this, the assumption here is that the type and size fields of all
2076 * format structures fit into the first 32 bits of the structure.
2077 *
2078 * This works because all regions must be 32 bit aligned. Therefore, we
2079 * either have both fields or we have neither field. In the case we have
2080 * neither field, the data part of the region is zero length. We only have
2081 * a log_op_header and can throw away the header since a new one will appear
2082 * later. If we have at least 4 bytes, then we can determine how many regions
2083 * will appear in the current log item.
2084 */
2085STATIC int
2086xlog_recover_add_to_trans(
2087 struct xlog *log,
2088 struct xlog_recover *trans,
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002089 char *dp,
Dave Chinner76560662014-09-29 09:45:42 +10002090 int len)
2091{
Darrick J. Wong06b11322017-10-31 12:04:24 -07002092 struct xfs_inode_log_format *in_f; /* any will do */
Darrick J. Wong35f45212020-04-30 10:45:41 -07002093 struct xlog_recover_item *item;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002094 char *ptr;
Dave Chinner76560662014-09-29 09:45:42 +10002095
2096 if (!len)
2097 return 0;
2098 if (list_empty(&trans->r_itemq)) {
2099 /* we need to catch log corruptions here */
2100 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
2101 xfs_warn(log->l_mp, "%s: bad header magic number",
2102 __func__);
2103 ASSERT(0);
Darrick J. Wong895e1962019-11-06 09:17:43 -08002104 return -EFSCORRUPTED;
Dave Chinner76560662014-09-29 09:45:42 +10002105 }
Brian Foster89cebc842015-07-29 11:51:10 +10002106
2107 if (len > sizeof(struct xfs_trans_header)) {
2108 xfs_warn(log->l_mp, "%s: bad header length", __func__);
2109 ASSERT(0);
Darrick J. Wong895e1962019-11-06 09:17:43 -08002110 return -EFSCORRUPTED;
Brian Foster89cebc842015-07-29 11:51:10 +10002111 }
2112
2113 /*
2114 * The transaction header can be arbitrarily split across op
2115 * records. If we don't have the whole thing here, copy what we
2116 * do have and handle the rest in the next record.
2117 */
2118 if (len == sizeof(struct xfs_trans_header))
Dave Chinner76560662014-09-29 09:45:42 +10002119 xlog_recover_add_item(&trans->r_itemq);
2120 memcpy(&trans->r_theader, dp, len);
2121 return 0;
2122 }
2123
Tetsuo Handa707e0dd2019-08-26 12:06:22 -07002124 ptr = kmem_alloc(len, 0);
Dave Chinner76560662014-09-29 09:45:42 +10002125 memcpy(ptr, dp, len);
Darrick J. Wong06b11322017-10-31 12:04:24 -07002126 in_f = (struct xfs_inode_log_format *)ptr;
Dave Chinner76560662014-09-29 09:45:42 +10002127
2128 /* take the tail entry */
Darrick J. Wong35f45212020-04-30 10:45:41 -07002129 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2130 ri_list);
Dave Chinner76560662014-09-29 09:45:42 +10002131 if (item->ri_total != 0 &&
2132 item->ri_total == item->ri_cnt) {
2133 /* tail item is in use, get a new one */
2134 xlog_recover_add_item(&trans->r_itemq);
2135 item = list_entry(trans->r_itemq.prev,
Darrick J. Wong35f45212020-04-30 10:45:41 -07002136 struct xlog_recover_item, ri_list);
Dave Chinner76560662014-09-29 09:45:42 +10002137 }
2138
2139 if (item->ri_total == 0) { /* first region to be added */
2140 if (in_f->ilf_size == 0 ||
2141 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
2142 xfs_warn(log->l_mp,
2143 "bad number of regions (%d) in inode log format",
2144 in_f->ilf_size);
2145 ASSERT(0);
2146 kmem_free(ptr);
Darrick J. Wong895e1962019-11-06 09:17:43 -08002147 return -EFSCORRUPTED;
Dave Chinner76560662014-09-29 09:45:42 +10002148 }
2149
2150 item->ri_total = in_f->ilf_size;
2151 item->ri_buf =
2152 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
Tetsuo Handa707e0dd2019-08-26 12:06:22 -07002153 0);
Dave Chinner76560662014-09-29 09:45:42 +10002154 }
Darrick J. Wongd6abecb2019-11-06 09:11:23 -08002155
2156 if (item->ri_total <= item->ri_cnt) {
2157 xfs_warn(log->l_mp,
2158 "log item region count (%d) overflowed size (%d)",
2159 item->ri_cnt, item->ri_total);
2160 ASSERT(0);
2161 kmem_free(ptr);
2162 return -EFSCORRUPTED;
2163 }
2164
Dave Chinner76560662014-09-29 09:45:42 +10002165 /* Description region is ri_buf[0] */
2166 item->ri_buf[item->ri_cnt].i_addr = ptr;
2167 item->ri_buf[item->ri_cnt].i_len = len;
2168 item->ri_cnt++;
2169 trace_xfs_log_recover_item_add(log, trans, item, 0);
2170 return 0;
2171}
Dave Chinnerb818cca2014-09-29 09:45:54 +10002172
Dave Chinner76560662014-09-29 09:45:42 +10002173/*
2174 * Free up any resources allocated by the transaction
2175 *
2176 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2177 */
2178STATIC void
2179xlog_recover_free_trans(
2180 struct xlog_recover *trans)
2181{
Darrick J. Wong35f45212020-04-30 10:45:41 -07002182 struct xlog_recover_item *item, *n;
Dave Chinner76560662014-09-29 09:45:42 +10002183 int i;
2184
Brian Foster39775432017-06-24 10:11:41 -07002185 hlist_del_init(&trans->r_list);
2186
Dave Chinner76560662014-09-29 09:45:42 +10002187 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2188 /* Free the regions in the item. */
2189 list_del(&item->ri_list);
2190 for (i = 0; i < item->ri_cnt; i++)
2191 kmem_free(item->ri_buf[i].i_addr);
2192 /* Free the item itself */
2193 kmem_free(item->ri_buf);
2194 kmem_free(item);
2195 }
2196 /* Free the transaction recover structure */
2197 kmem_free(trans);
2198}
2199
Dave Chinnere9131e502014-09-29 09:45:18 +10002200/*
2201 * On error or completion, trans is freed.
2202 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203STATIC int
Dave Chinnereeb11682014-09-29 09:45:03 +10002204xlog_recovery_process_trans(
2205 struct xlog *log,
2206 struct xlog_recover *trans,
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002207 char *dp,
Dave Chinnereeb11682014-09-29 09:45:03 +10002208 unsigned int len,
2209 unsigned int flags,
Brian Foster12818d22016-09-26 08:22:16 +10002210 int pass,
2211 struct list_head *buffer_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212{
Dave Chinnere9131e502014-09-29 09:45:18 +10002213 int error = 0;
2214 bool freeit = false;
Dave Chinnereeb11682014-09-29 09:45:03 +10002215
2216 /* mask off ophdr transaction container flags */
2217 flags &= ~XLOG_END_TRANS;
2218 if (flags & XLOG_WAS_CONT_TRANS)
2219 flags &= ~XLOG_CONTINUE_TRANS;
2220
Dave Chinner88b863d2014-09-29 09:45:32 +10002221 /*
2222 * Callees must not free the trans structure. We'll decide if we need to
2223 * free it or not based on the operation being done and it's result.
2224 */
Dave Chinnereeb11682014-09-29 09:45:03 +10002225 switch (flags) {
2226 /* expected flag values */
2227 case 0:
2228 case XLOG_CONTINUE_TRANS:
2229 error = xlog_recover_add_to_trans(log, trans, dp, len);
2230 break;
2231 case XLOG_WAS_CONT_TRANS:
2232 error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
2233 break;
2234 case XLOG_COMMIT_TRANS:
Brian Foster12818d22016-09-26 08:22:16 +10002235 error = xlog_recover_commit_trans(log, trans, pass,
2236 buffer_list);
Dave Chinner88b863d2014-09-29 09:45:32 +10002237 /* success or fail, we are now done with this transaction. */
2238 freeit = true;
Dave Chinnereeb11682014-09-29 09:45:03 +10002239 break;
2240
2241 /* unexpected flag values */
2242 case XLOG_UNMOUNT_TRANS:
Dave Chinnere9131e502014-09-29 09:45:18 +10002243 /* just skip trans */
Dave Chinnereeb11682014-09-29 09:45:03 +10002244 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
Dave Chinnere9131e502014-09-29 09:45:18 +10002245 freeit = true;
Dave Chinnereeb11682014-09-29 09:45:03 +10002246 break;
2247 case XLOG_START_TRANS:
Dave Chinnereeb11682014-09-29 09:45:03 +10002248 default:
2249 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
2250 ASSERT(0);
Darrick J. Wong895e1962019-11-06 09:17:43 -08002251 error = -EFSCORRUPTED;
Dave Chinnereeb11682014-09-29 09:45:03 +10002252 break;
2253 }
Dave Chinnere9131e502014-09-29 09:45:18 +10002254 if (error || freeit)
2255 xlog_recover_free_trans(trans);
Dave Chinnereeb11682014-09-29 09:45:03 +10002256 return error;
2257}
2258
Dave Chinnerb818cca2014-09-29 09:45:54 +10002259/*
2260 * Lookup the transaction recovery structure associated with the ID in the
2261 * current ophdr. If the transaction doesn't exist and the start flag is set in
2262 * the ophdr, then allocate a new transaction for future ID matches to find.
2263 * Either way, return what we found during the lookup - an existing transaction
2264 * or nothing.
2265 */
Dave Chinnereeb11682014-09-29 09:45:03 +10002266STATIC struct xlog_recover *
2267xlog_recover_ophdr_to_trans(
2268 struct hlist_head rhash[],
2269 struct xlog_rec_header *rhead,
2270 struct xlog_op_header *ohead)
2271{
2272 struct xlog_recover *trans;
2273 xlog_tid_t tid;
2274 struct hlist_head *rhp;
2275
2276 tid = be32_to_cpu(ohead->oh_tid);
2277 rhp = &rhash[XLOG_RHASH(tid)];
Dave Chinnerb818cca2014-09-29 09:45:54 +10002278 hlist_for_each_entry(trans, rhp, r_list) {
2279 if (trans->r_log_tid == tid)
2280 return trans;
2281 }
Dave Chinnereeb11682014-09-29 09:45:03 +10002282
2283 /*
Dave Chinnerb818cca2014-09-29 09:45:54 +10002284 * skip over non-start transaction headers - we could be
2285 * processing slack space before the next transaction starts
Dave Chinnereeb11682014-09-29 09:45:03 +10002286 */
Dave Chinnerb818cca2014-09-29 09:45:54 +10002287 if (!(ohead->oh_flags & XLOG_START_TRANS))
2288 return NULL;
2289
2290 ASSERT(be32_to_cpu(ohead->oh_len) == 0);
2291
2292 /*
2293 * This is a new transaction so allocate a new recovery container to
2294 * hold the recovery ops that will follow.
2295 */
Tetsuo Handa707e0dd2019-08-26 12:06:22 -07002296 trans = kmem_zalloc(sizeof(struct xlog_recover), 0);
Dave Chinnerb818cca2014-09-29 09:45:54 +10002297 trans->r_log_tid = tid;
2298 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
2299 INIT_LIST_HEAD(&trans->r_itemq);
2300 INIT_HLIST_NODE(&trans->r_list);
2301 hlist_add_head(&trans->r_list, rhp);
2302
2303 /*
2304 * Nothing more to do for this ophdr. Items to be added to this new
2305 * transaction will be in subsequent ophdr containers.
2306 */
Dave Chinnereeb11682014-09-29 09:45:03 +10002307 return NULL;
2308}
2309
2310STATIC int
2311xlog_recover_process_ophdr(
2312 struct xlog *log,
2313 struct hlist_head rhash[],
2314 struct xlog_rec_header *rhead,
2315 struct xlog_op_header *ohead,
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002316 char *dp,
2317 char *end,
Brian Foster12818d22016-09-26 08:22:16 +10002318 int pass,
2319 struct list_head *buffer_list)
Dave Chinnereeb11682014-09-29 09:45:03 +10002320{
2321 struct xlog_recover *trans;
Dave Chinnereeb11682014-09-29 09:45:03 +10002322 unsigned int len;
Brian Foster12818d22016-09-26 08:22:16 +10002323 int error;
Dave Chinnereeb11682014-09-29 09:45:03 +10002324
2325 /* Do we understand who wrote this op? */
2326 if (ohead->oh_clientid != XFS_TRANSACTION &&
2327 ohead->oh_clientid != XFS_LOG) {
2328 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2329 __func__, ohead->oh_clientid);
2330 ASSERT(0);
Darrick J. Wong895e1962019-11-06 09:17:43 -08002331 return -EFSCORRUPTED;
Dave Chinnereeb11682014-09-29 09:45:03 +10002332 }
2333
2334 /*
2335 * Check the ophdr contains all the data it is supposed to contain.
2336 */
2337 len = be32_to_cpu(ohead->oh_len);
2338 if (dp + len > end) {
2339 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
2340 WARN_ON(1);
Darrick J. Wong895e1962019-11-06 09:17:43 -08002341 return -EFSCORRUPTED;
Dave Chinnereeb11682014-09-29 09:45:03 +10002342 }
2343
2344 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
2345 if (!trans) {
2346 /* nothing to do, so skip over this ophdr */
2347 return 0;
2348 }
2349
Brian Foster12818d22016-09-26 08:22:16 +10002350 /*
2351 * The recovered buffer queue is drained only once we know that all
2352 * recovery items for the current LSN have been processed. This is
2353 * required because:
2354 *
2355 * - Buffer write submission updates the metadata LSN of the buffer.
2356 * - Log recovery skips items with a metadata LSN >= the current LSN of
2357 * the recovery item.
2358 * - Separate recovery items against the same metadata buffer can share
2359 * a current LSN. I.e., consider that the LSN of a recovery item is
2360 * defined as the starting LSN of the first record in which its
2361 * transaction appears, that a record can hold multiple transactions,
2362 * and/or that a transaction can span multiple records.
2363 *
2364 * In other words, we are allowed to submit a buffer from log recovery
2365 * once per current LSN. Otherwise, we may incorrectly skip recovery
2366 * items and cause corruption.
2367 *
2368 * We don't know up front whether buffers are updated multiple times per
2369 * LSN. Therefore, track the current LSN of each commit log record as it
2370 * is processed and drain the queue when it changes. Use commit records
2371 * because they are ordered correctly by the logging code.
2372 */
2373 if (log->l_recovery_lsn != trans->r_lsn &&
2374 ohead->oh_flags & XLOG_COMMIT_TRANS) {
2375 error = xfs_buf_delwri_submit(buffer_list);
2376 if (error)
2377 return error;
2378 log->l_recovery_lsn = trans->r_lsn;
2379 }
2380
Dave Chinnere9131e502014-09-29 09:45:18 +10002381 return xlog_recovery_process_trans(log, trans, dp, len,
Brian Foster12818d22016-09-26 08:22:16 +10002382 ohead->oh_flags, pass, buffer_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383}
2384
2385/*
2386 * There are two valid states of the r_state field. 0 indicates that the
2387 * transaction structure is in a normal state. We have either seen the
2388 * start of the transaction or the last operation we added was not a partial
2389 * operation. If the last operation we added to the transaction was a
2390 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2391 *
2392 * NOTE: skip LRs with 0 data length.
2393 */
2394STATIC int
2395xlog_recover_process_data(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05002396 struct xlog *log,
Dave Chinnerf0a76952010-01-11 11:49:57 +00002397 struct hlist_head rhash[],
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05002398 struct xlog_rec_header *rhead,
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002399 char *dp,
Brian Foster12818d22016-09-26 08:22:16 +10002400 int pass,
2401 struct list_head *buffer_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402{
Dave Chinnereeb11682014-09-29 09:45:03 +10002403 struct xlog_op_header *ohead;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002404 char *end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 int num_logops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407
Dave Chinnereeb11682014-09-29 09:45:03 +10002408 end = dp + be32_to_cpu(rhead->h_len);
Christoph Hellwigb53e6752007-10-12 10:59:34 +10002409 num_logops = be32_to_cpu(rhead->h_num_logops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410
2411 /* check the log format matches our own - else we can't recover */
2412 if (xlog_header_check_recover(log->l_mp, rhead))
Dave Chinner24513372014-06-25 14:58:08 +10002413 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414
Brian Foster5cd9cee2016-09-26 08:34:52 +10002415 trace_xfs_log_recover_record(log, rhead, pass);
Dave Chinnereeb11682014-09-29 09:45:03 +10002416 while ((dp < end) && num_logops) {
2417
2418 ohead = (struct xlog_op_header *)dp;
2419 dp += sizeof(*ohead);
2420 ASSERT(dp <= end);
2421
2422 /* errors will abort recovery */
2423 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
Brian Foster12818d22016-09-26 08:22:16 +10002424 dp, end, pass, buffer_list);
Dave Chinnereeb11682014-09-29 09:45:03 +10002425 if (error)
2426 return error;
2427
Christoph Hellwig67fcb7b2007-10-12 10:58:59 +10002428 dp += be32_to_cpu(ohead->oh_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 num_logops--;
2430 }
2431 return 0;
2432}
2433
Darrick J. Wong50995582017-11-21 20:53:02 -08002434/* Take all the collected deferred ops and finish them in order. */
2435static int
2436xlog_finish_defer_ops(
Darrick J. Wonge6fff812020-09-25 17:39:37 -07002437 struct xfs_mount *mp,
2438 struct list_head *capture_list)
Darrick J. Wong50995582017-11-21 20:53:02 -08002439{
Darrick J. Wonge6fff812020-09-25 17:39:37 -07002440 struct xfs_defer_capture *dfc, *next;
Darrick J. Wong50995582017-11-21 20:53:02 -08002441 struct xfs_trans *tp;
Darrick J. Wongff4ab5e2020-09-25 17:39:51 -07002442 struct xfs_inode *ip;
Darrick J. Wonge6fff812020-09-25 17:39:37 -07002443 int error = 0;
Darrick J. Wong50995582017-11-21 20:53:02 -08002444
Darrick J. Wonge6fff812020-09-25 17:39:37 -07002445 list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
Darrick J. Wong929b92f2020-09-25 17:39:50 -07002446 struct xfs_trans_res resv;
2447
2448 /*
2449 * Create a new transaction reservation from the captured
2450 * information. Set logcount to 1 to force the new transaction
2451 * to regrant every roll so that we can make forward progress
2452 * in recovery no matter how full the log might be.
2453 */
2454 resv.tr_logres = dfc->dfc_logres;
2455 resv.tr_logcount = 1;
2456 resv.tr_logflags = XFS_TRANS_PERM_LOG_RES;
2457
2458 error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres,
2459 dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp);
Darrick J. Wonge6fff812020-09-25 17:39:37 -07002460 if (error)
2461 return error;
2462
2463 /*
2464 * Transfer to this new transaction all the dfops we captured
2465 * from recovering a single intent item.
2466 */
2467 list_del_init(&dfc->dfc_list);
Darrick J. Wongff4ab5e2020-09-25 17:39:51 -07002468 xfs_defer_ops_continue(dfc, tp, &ip);
Darrick J. Wonge6fff812020-09-25 17:39:37 -07002469
2470 error = xfs_trans_commit(tp);
Darrick J. Wongff4ab5e2020-09-25 17:39:51 -07002471 if (ip) {
2472 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2473 xfs_irele(ip);
2474 }
Darrick J. Wonge6fff812020-09-25 17:39:37 -07002475 if (error)
2476 return error;
2477 }
2478
2479 ASSERT(list_empty(capture_list));
2480 return 0;
Darrick J. Wong50995582017-11-21 20:53:02 -08002481}
2482
Darrick J. Wonge6fff812020-09-25 17:39:37 -07002483/* Release all the captured defer ops and capture structures in this list. */
2484static void
2485xlog_abort_defer_ops(
2486 struct xfs_mount *mp,
2487 struct list_head *capture_list)
2488{
2489 struct xfs_defer_capture *dfc;
2490 struct xfs_defer_capture *next;
2491
2492 list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
2493 list_del_init(&dfc->dfc_list);
2494 xfs_defer_ops_release(mp, dfc);
2495 }
2496}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497/*
Darrick J. Wongdc423752016-08-03 11:23:49 +10002498 * When this is called, all of the log intent items which did not have
2499 * corresponding log done items should be in the AIL. What we do now
2500 * is update the data structures associated with each one.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 *
Darrick J. Wongdc423752016-08-03 11:23:49 +10002502 * Since we process the log intent items in normal transactions, they
2503 * will be removed at some point after the commit. This prevents us
2504 * from just walking down the list processing each one. We'll use a
2505 * flag in the intent item to skip those that we've already processed
2506 * and use the AIL iteration mechanism's generation count to try to
2507 * speed this up at least a bit.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 *
Darrick J. Wongdc423752016-08-03 11:23:49 +10002509 * When we start, we know that the intents are the only things in the
2510 * AIL. As we process them, however, other items are added to the
2511 * AIL.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 */
David Chinner3c1e2bb2008-04-10 12:21:11 +10002513STATIC int
Darrick J. Wongdc423752016-08-03 11:23:49 +10002514xlog_recover_process_intents(
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002515 struct xlog *log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516{
Darrick J. Wonge6fff812020-09-25 17:39:37 -07002517 LIST_HEAD(capture_list);
David Chinner27d8d5f2008-10-30 17:38:39 +11002518 struct xfs_ail_cursor cur;
Darrick J. Wong50995582017-11-21 20:53:02 -08002519 struct xfs_log_item *lip;
David Chinnera9c21c12008-10-30 17:39:35 +11002520 struct xfs_ail *ailp;
Darrick J. Wonge6fff812020-09-25 17:39:37 -07002521 int error = 0;
Darrick J. Wong7bf7a192017-08-31 15:11:06 -07002522#if defined(DEBUG) || defined(XFS_WARN)
Darrick J. Wongdc423752016-08-03 11:23:49 +10002523 xfs_lsn_t last_lsn;
Darrick J. Wong7bf7a192017-08-31 15:11:06 -07002524#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525
David Chinnera9c21c12008-10-30 17:39:35 +11002526 ailp = log->l_ailp;
Matthew Wilcox57e80952018-03-07 14:59:39 -08002527 spin_lock(&ailp->ail_lock);
Darrick J. Wong7bf7a192017-08-31 15:11:06 -07002528#if defined(DEBUG) || defined(XFS_WARN)
Darrick J. Wongdc423752016-08-03 11:23:49 +10002529 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
Darrick J. Wong7bf7a192017-08-31 15:11:06 -07002530#endif
Darrick J. Wonge6fff812020-09-25 17:39:37 -07002531 for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2532 lip != NULL;
2533 lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 /*
Darrick J. Wongdc423752016-08-03 11:23:49 +10002535 * We're done when we see something other than an intent.
2536 * There should be no intents left in the AIL now.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 */
Darrick J. Wongdc423752016-08-03 11:23:49 +10002538 if (!xlog_item_is_intent(lip)) {
David Chinner27d8d5f2008-10-30 17:38:39 +11002539#ifdef DEBUG
David Chinnera9c21c12008-10-30 17:39:35 +11002540 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
Darrick J. Wongdc423752016-08-03 11:23:49 +10002541 ASSERT(!xlog_item_is_intent(lip));
David Chinner27d8d5f2008-10-30 17:38:39 +11002542#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 break;
2544 }
2545
2546 /*
Darrick J. Wongdc423752016-08-03 11:23:49 +10002547 * We should never see a redo item with a LSN higher than
2548 * the last transaction we found in the log at the start
2549 * of recovery.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 */
Darrick J. Wongdc423752016-08-03 11:23:49 +10002551 ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552
Darrick J. Wong50995582017-11-21 20:53:02 -08002553 /*
2554 * NOTE: If your intent processing routine can create more
Darrick J. Wonge6fff812020-09-25 17:39:37 -07002555 * deferred ops, you /must/ attach them to the capture list in
2556 * the recover routine or else those subsequent intents will be
Darrick J. Wong50995582017-11-21 20:53:02 -08002557 * replayed in the wrong order!
2558 */
Darrick J. Wong901219b2020-09-28 11:01:45 -07002559 spin_unlock(&ailp->ail_lock);
Darrick J. Wonge6fff812020-09-25 17:39:37 -07002560 error = lip->li_ops->iop_recover(lip, &capture_list);
Darrick J. Wong901219b2020-09-28 11:01:45 -07002561 spin_lock(&ailp->ail_lock);
David Chinner27d8d5f2008-10-30 17:38:39 +11002562 if (error)
Darrick J. Wonge6fff812020-09-25 17:39:37 -07002563 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 }
Darrick J. Wonge6fff812020-09-25 17:39:37 -07002565
Eric Sandeene4a1e292014-04-14 19:06:05 +10002566 xfs_trans_ail_cursor_done(&cur);
Matthew Wilcox57e80952018-03-07 14:59:39 -08002567 spin_unlock(&ailp->ail_lock);
Darrick J. Wonge6fff812020-09-25 17:39:37 -07002568 if (error)
2569 goto err;
Darrick J. Wong50995582017-11-21 20:53:02 -08002570
Darrick J. Wonge6fff812020-09-25 17:39:37 -07002571 error = xlog_finish_defer_ops(log->l_mp, &capture_list);
2572 if (error)
2573 goto err;
2574
2575 return 0;
2576err:
2577 xlog_abort_defer_ops(log->l_mp, &capture_list);
David Chinner3c1e2bb2008-04-10 12:21:11 +10002578 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579}
2580
2581/*
Darrick J. Wongdc423752016-08-03 11:23:49 +10002582 * A cancel occurs when the mount has failed and we're bailing out.
2583 * Release all pending log intent items so they don't pin the AIL.
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002584 */
Hariprasad Kelama7a92502019-07-03 07:34:18 -07002585STATIC void
Darrick J. Wongdc423752016-08-03 11:23:49 +10002586xlog_recover_cancel_intents(
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002587 struct xlog *log)
2588{
2589 struct xfs_log_item *lip;
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002590 struct xfs_ail_cursor cur;
2591 struct xfs_ail *ailp;
2592
2593 ailp = log->l_ailp;
Matthew Wilcox57e80952018-03-07 14:59:39 -08002594 spin_lock(&ailp->ail_lock);
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002595 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2596 while (lip != NULL) {
2597 /*
Darrick J. Wongdc423752016-08-03 11:23:49 +10002598 * We're done when we see something other than an intent.
2599 * There should be no intents left in the AIL now.
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002600 */
Darrick J. Wongdc423752016-08-03 11:23:49 +10002601 if (!xlog_item_is_intent(lip)) {
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002602#ifdef DEBUG
2603 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
Darrick J. Wongdc423752016-08-03 11:23:49 +10002604 ASSERT(!xlog_item_is_intent(lip));
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002605#endif
2606 break;
2607 }
2608
Darrick J. Wong9329ba82020-05-01 16:00:52 -07002609 spin_unlock(&ailp->ail_lock);
2610 lip->li_ops->iop_release(lip);
2611 spin_lock(&ailp->ail_lock);
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002612 lip = xfs_trans_ail_cursor_next(ailp, &cur);
2613 }
2614
2615 xfs_trans_ail_cursor_done(&cur);
Matthew Wilcox57e80952018-03-07 14:59:39 -08002616 spin_unlock(&ailp->ail_lock);
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002617}
2618
2619/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620 * This routine performs a transaction to null out a bad inode pointer
2621 * in an agi unlinked inode hash bucket.
2622 */
2623STATIC void
2624xlog_recover_clear_agi_bucket(
2625 xfs_mount_t *mp,
2626 xfs_agnumber_t agno,
2627 int bucket)
2628{
2629 xfs_trans_t *tp;
2630 xfs_agi_t *agi;
2631 xfs_buf_t *agibp;
2632 int offset;
2633 int error;
2634
Christoph Hellwig253f4912016-04-06 09:19:55 +10002635 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
David Chinnere5720ee2008-04-10 12:21:18 +10002636 if (error)
Christoph Hellwig253f4912016-04-06 09:19:55 +10002637 goto out_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11002639 error = xfs_read_agi(mp, tp, agno, &agibp);
2640 if (error)
David Chinnere5720ee2008-04-10 12:21:18 +10002641 goto out_abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642
Christoph Hellwig370c7822020-03-10 08:57:29 -07002643 agi = agibp->b_addr;
Christoph Hellwig16259e72005-11-02 15:11:25 +11002644 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 offset = offsetof(xfs_agi_t, agi_unlinked) +
2646 (sizeof(xfs_agino_t) * bucket);
2647 xfs_trans_log_buf(tp, agibp, offset,
2648 (offset + sizeof(xfs_agino_t) - 1));
2649
Christoph Hellwig70393312015-06-04 13:48:08 +10002650 error = xfs_trans_commit(tp);
David Chinnere5720ee2008-04-10 12:21:18 +10002651 if (error)
2652 goto out_error;
2653 return;
2654
2655out_abort:
Christoph Hellwig4906e212015-06-04 13:47:56 +10002656 xfs_trans_cancel(tp);
David Chinnere5720ee2008-04-10 12:21:18 +10002657out_error:
Dave Chinnera0fa2b62011-03-07 10:01:35 +11002658 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
David Chinnere5720ee2008-04-10 12:21:18 +10002659 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660}
2661
Christoph Hellwig23fac502008-11-28 14:23:40 +11002662STATIC xfs_agino_t
2663xlog_recover_process_one_iunlink(
2664 struct xfs_mount *mp,
2665 xfs_agnumber_t agno,
2666 xfs_agino_t agino,
2667 int bucket)
2668{
2669 struct xfs_buf *ibp;
2670 struct xfs_dinode *dip;
2671 struct xfs_inode *ip;
2672 xfs_ino_t ino;
2673 int error;
2674
2675 ino = XFS_AGINO_TO_INO(mp, agno, agino);
Dave Chinner7b6259e2010-06-24 11:35:17 +10002676 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
Christoph Hellwig23fac502008-11-28 14:23:40 +11002677 if (error)
2678 goto fail;
2679
2680 /*
2681 * Get the on disk inode to find the next inode in the bucket.
2682 */
Brian Fosterc1995072020-05-06 13:29:20 -07002683 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0);
Christoph Hellwig23fac502008-11-28 14:23:40 +11002684 if (error)
Christoph Hellwig0e446672008-11-28 14:23:42 +11002685 goto fail_iput;
Christoph Hellwig23fac502008-11-28 14:23:40 +11002686
Darrick J. Wong17c12bc2016-10-03 09:11:29 -07002687 xfs_iflags_clear(ip, XFS_IRECOVERY);
Dave Chinner54d7b5c2016-02-09 16:54:58 +11002688 ASSERT(VFS_I(ip)->i_nlink == 0);
Dave Chinnerc19b3b052016-02-09 16:54:58 +11002689 ASSERT(VFS_I(ip)->i_mode != 0);
Christoph Hellwig23fac502008-11-28 14:23:40 +11002690
2691 /* setup for the next pass */
2692 agino = be32_to_cpu(dip->di_next_unlinked);
2693 xfs_buf_relse(ibp);
2694
2695 /*
2696 * Prevent any DMAPI event from being sent when the reference on
2697 * the inode is dropped.
2698 */
2699 ip->i_d.di_dmevmask = 0;
2700
Darrick J. Wong44a87362018-07-25 12:52:32 -07002701 xfs_irele(ip);
Christoph Hellwig23fac502008-11-28 14:23:40 +11002702 return agino;
2703
Christoph Hellwig0e446672008-11-28 14:23:42 +11002704 fail_iput:
Darrick J. Wong44a87362018-07-25 12:52:32 -07002705 xfs_irele(ip);
Christoph Hellwig23fac502008-11-28 14:23:40 +11002706 fail:
2707 /*
2708 * We can't read in the inode this bucket points to, or this inode
2709 * is messed up. Just ditch this bucket of inodes. We will lose
2710 * some inodes and space, but at least we won't hang.
2711 *
2712 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
2713 * clear the inode pointer in the bucket.
2714 */
2715 xlog_recover_clear_agi_bucket(mp, agno, bucket);
2716 return NULLAGINO;
2717}
2718
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719/*
Dave Chinner8ab39f12019-09-05 21:35:39 -07002720 * Recover AGI unlinked lists
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 *
Dave Chinner8ab39f12019-09-05 21:35:39 -07002722 * This is called during recovery to process any inodes which we unlinked but
2723 * not freed when the system crashed. These inodes will be on the lists in the
2724 * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
2725 * any inodes found on the lists. Each inode is removed from the lists when it
2726 * has been fully truncated and is freed. The freeing of the inode and its
2727 * removal from the list must be atomic.
2728 *
2729 * If everything we touch in the agi processing loop is already in memory, this
2730 * loop can hold the cpu for a long time. It runs without lock contention,
2731 * memory allocation contention, the need wait for IO, etc, and so will run
2732 * until we either run out of inodes to process, run low on memory or we run out
2733 * of log space.
2734 *
2735 * This behaviour is bad for latency on single CPU and non-preemptible kernels,
2736 * and can prevent other filesytem work (such as CIL pushes) from running. This
2737 * can lead to deadlocks if the recovery process runs out of log reservation
2738 * space. Hence we need to yield the CPU when there is other kernel work
2739 * scheduled on this CPU to ensure other scheduled work can run without undue
2740 * latency.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 */
Eric Sandeend96f8f82009-07-02 00:09:33 -05002742STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743xlog_recover_process_iunlinks(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05002744 struct xlog *log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745{
2746 xfs_mount_t *mp;
2747 xfs_agnumber_t agno;
2748 xfs_agi_t *agi;
2749 xfs_buf_t *agibp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750 xfs_agino_t agino;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751 int bucket;
2752 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753
2754 mp = log->l_mp;
2755
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
2757 /*
2758 * Find the agi for this ag.
2759 */
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11002760 error = xfs_read_agi(mp, NULL, agno, &agibp);
2761 if (error) {
2762 /*
2763 * AGI is b0rked. Don't process it.
2764 *
2765 * We should probably mark the filesystem as corrupt
2766 * after we've recovered all the ag's we can....
2767 */
2768 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 }
Jan Karad97d32e2012-03-15 09:34:02 +00002770 /*
2771 * Unlock the buffer so that it can be acquired in the normal
2772 * course of the transaction to truncate and free each inode.
2773 * Because we are not racing with anyone else here for the AGI
2774 * buffer, we don't even need to hold it locked to read the
2775 * initial unlinked bucket entries out of the buffer. We keep
2776 * buffer reference though, so that it stays pinned in memory
2777 * while we need the buffer.
2778 */
Christoph Hellwig370c7822020-03-10 08:57:29 -07002779 agi = agibp->b_addr;
Jan Karad97d32e2012-03-15 09:34:02 +00002780 xfs_buf_unlock(agibp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781
2782 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
Christoph Hellwig16259e72005-11-02 15:11:25 +11002783 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 while (agino != NULLAGINO) {
Christoph Hellwig23fac502008-11-28 14:23:40 +11002785 agino = xlog_recover_process_one_iunlink(mp,
2786 agno, agino, bucket);
Dave Chinner8ab39f12019-09-05 21:35:39 -07002787 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 }
2789 }
Jan Karad97d32e2012-03-15 09:34:02 +00002790 xfs_buf_rele(agibp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792}
2793
Eric Sandeen91083262019-05-01 20:26:30 -07002794STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795xlog_unpack_data(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05002796 struct xlog_rec_header *rhead,
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002797 char *dp,
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05002798 struct xlog *log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799{
2800 int i, j, k;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801
Christoph Hellwigb53e6752007-10-12 10:59:34 +10002802 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
Christoph Hellwigb53e6752007-10-12 10:59:34 +10002804 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 dp += BBSIZE;
2806 }
2807
Eric Sandeen62118702008-03-06 13:44:28 +11002808 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
Christoph Hellwigb28708d2008-11-28 14:23:38 +11002809 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
Christoph Hellwigb53e6752007-10-12 10:59:34 +10002810 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2812 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
Christoph Hellwigb53e6752007-10-12 10:59:34 +10002813 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 dp += BBSIZE;
2815 }
2816 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817}
2818
Brian Foster9d949012016-01-04 15:55:10 +11002819/*
Brian Fosterb94fb2d2016-01-04 15:55:10 +11002820 * CRC check, unpack and process a log record.
Brian Foster9d949012016-01-04 15:55:10 +11002821 */
2822STATIC int
2823xlog_recover_process(
2824 struct xlog *log,
2825 struct hlist_head rhash[],
2826 struct xlog_rec_header *rhead,
2827 char *dp,
Brian Foster12818d22016-09-26 08:22:16 +10002828 int pass,
2829 struct list_head *buffer_list)
Brian Foster9d949012016-01-04 15:55:10 +11002830{
Dave Chinnercae028d2016-12-05 14:40:32 +11002831 __le32 old_crc = rhead->h_crc;
Brian Fosterb94fb2d2016-01-04 15:55:10 +11002832 __le32 crc;
2833
Brian Fosterb94fb2d2016-01-04 15:55:10 +11002834 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
Brian Foster65282502016-01-04 15:55:10 +11002835
2836 /*
2837 * Nothing else to do if this is a CRC verification pass. Just return
2838 * if this a record with a non-zero crc. Unfortunately, mkfs always
Dave Chinnercae028d2016-12-05 14:40:32 +11002839 * sets old_crc to 0 so we must consider this valid even on v5 supers.
Brian Foster65282502016-01-04 15:55:10 +11002840 * Otherwise, return EFSBADCRC on failure so the callers up the stack
2841 * know precisely what failed.
2842 */
2843 if (pass == XLOG_RECOVER_CRCPASS) {
Dave Chinnercae028d2016-12-05 14:40:32 +11002844 if (old_crc && crc != old_crc)
Brian Foster65282502016-01-04 15:55:10 +11002845 return -EFSBADCRC;
2846 return 0;
2847 }
2848
2849 /*
2850 * We're in the normal recovery path. Issue a warning if and only if the
2851 * CRC in the header is non-zero. This is an advisory warning and the
2852 * zero CRC check prevents warnings from being emitted when upgrading
2853 * the kernel from one that does not add CRCs by default.
2854 */
Dave Chinnercae028d2016-12-05 14:40:32 +11002855 if (crc != old_crc) {
2856 if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
Brian Fosterb94fb2d2016-01-04 15:55:10 +11002857 xfs_alert(log->l_mp,
2858 "log record CRC mismatch: found 0x%x, expected 0x%x.",
Dave Chinnercae028d2016-12-05 14:40:32 +11002859 le32_to_cpu(old_crc),
Brian Fosterb94fb2d2016-01-04 15:55:10 +11002860 le32_to_cpu(crc));
2861 xfs_hex_dump(dp, 32);
2862 }
2863
2864 /*
2865 * If the filesystem is CRC enabled, this mismatch becomes a
2866 * fatal log corruption failure.
2867 */
Darrick J. Wonga5155b82019-11-02 09:40:53 -07002868 if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
2869 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
Brian Fosterb94fb2d2016-01-04 15:55:10 +11002870 return -EFSCORRUPTED;
Darrick J. Wonga5155b82019-11-02 09:40:53 -07002871 }
Brian Fosterb94fb2d2016-01-04 15:55:10 +11002872 }
Brian Foster9d949012016-01-04 15:55:10 +11002873
Eric Sandeen91083262019-05-01 20:26:30 -07002874 xlog_unpack_data(rhead, dp, log);
Brian Foster9d949012016-01-04 15:55:10 +11002875
Brian Foster12818d22016-09-26 08:22:16 +10002876 return xlog_recover_process_data(log, rhash, rhead, dp, pass,
2877 buffer_list);
Brian Foster9d949012016-01-04 15:55:10 +11002878}
2879
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880STATIC int
2881xlog_valid_rec_header(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05002882 struct xlog *log,
2883 struct xlog_rec_header *rhead,
Gao Xiangf692d092020-09-22 09:41:06 -07002884 xfs_daddr_t blkno,
2885 int bufsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886{
2887 int hlen;
2888
Darrick J. Wonga71895c2019-11-11 12:53:22 -08002889 if (XFS_IS_CORRUPT(log->l_mp,
2890 rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
Dave Chinner24513372014-06-25 14:58:08 +10002891 return -EFSCORRUPTED;
Darrick J. Wonga71895c2019-11-11 12:53:22 -08002892 if (XFS_IS_CORRUPT(log->l_mp,
2893 (!rhead->h_version ||
2894 (be32_to_cpu(rhead->h_version) &
2895 (~XLOG_VERSION_OKBITS))))) {
Dave Chinnera0fa2b62011-03-07 10:01:35 +11002896 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
Harvey Harrison34a622b2008-04-10 12:19:21 +10002897 __func__, be32_to_cpu(rhead->h_version));
Darrick J. Wong895e1962019-11-06 09:17:43 -08002898 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 }
2900
Gao Xiangf692d092020-09-22 09:41:06 -07002901 /*
2902 * LR body must have data (or it wouldn't have been written)
2903 * and h_len must not be greater than LR buffer size.
2904 */
Christoph Hellwigb53e6752007-10-12 10:59:34 +10002905 hlen = be32_to_cpu(rhead->h_len);
Gao Xiangf692d092020-09-22 09:41:06 -07002906 if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize))
Dave Chinner24513372014-06-25 14:58:08 +10002907 return -EFSCORRUPTED;
Gao Xiangf692d092020-09-22 09:41:06 -07002908
Darrick J. Wonga71895c2019-11-11 12:53:22 -08002909 if (XFS_IS_CORRUPT(log->l_mp,
2910 blkno > log->l_logBBsize || blkno > INT_MAX))
Dave Chinner24513372014-06-25 14:58:08 +10002911 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912 return 0;
2913}
2914
2915/*
2916 * Read the log from tail to head and process the log records found.
2917 * Handle the two cases where the tail and head are in the same cycle
2918 * and where the active portion of the log wraps around the end of
2919 * the physical log separately. The pass parameter is passed through
2920 * to the routines called to process the data and is not looked at
2921 * here.
2922 */
2923STATIC int
2924xlog_do_recovery_pass(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05002925 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 xfs_daddr_t head_blk,
2927 xfs_daddr_t tail_blk,
Brian Fosterd7f37692016-01-04 15:55:10 +11002928 int pass,
2929 xfs_daddr_t *first_bad) /* out: first bad log rec */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930{
2931 xlog_rec_header_t *rhead;
Brian Foster284f1c22017-08-08 18:21:51 -07002932 xfs_daddr_t blk_no, rblk_no;
Brian Fosterd7f37692016-01-04 15:55:10 +11002933 xfs_daddr_t rhead_blk;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002934 char *offset;
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07002935 char *hbp, *dbp;
Brian Fostera70f9fe2016-01-04 15:55:10 +11002936 int error = 0, h_size, h_len;
Brian Foster12818d22016-09-26 08:22:16 +10002937 int error2 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 int bblks, split_bblks;
2939 int hblks, split_hblks, wrapped_hblks;
Brian Foster39775432017-06-24 10:11:41 -07002940 int i;
Dave Chinnerf0a76952010-01-11 11:49:57 +00002941 struct hlist_head rhash[XLOG_RHASH_SIZE];
Brian Foster12818d22016-09-26 08:22:16 +10002942 LIST_HEAD (buffer_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943
2944 ASSERT(head_blk != tail_blk);
Brian Fostera4c9b342017-08-08 18:21:53 -07002945 blk_no = rhead_blk = tail_blk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946
Brian Foster39775432017-06-24 10:11:41 -07002947 for (i = 0; i < XLOG_RHASH_SIZE; i++)
2948 INIT_HLIST_HEAD(&rhash[i]);
2949
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 /*
2951 * Read the header of the tail block and get the iclog buffer size from
2952 * h_size. Use this to tell how many sectors make up the log header.
2953 */
Eric Sandeen62118702008-03-06 13:44:28 +11002954 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955 /*
2956 * When using variable length iclogs, read first sector of
2957 * iclog header and extract the header size from it. Get a
2958 * new hbp that is the correct size.
2959 */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07002960 hbp = xlog_alloc_buffer(log, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961 if (!hbp)
Dave Chinner24513372014-06-25 14:58:08 +10002962 return -ENOMEM;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01002963
2964 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
2965 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 goto bread_err1;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01002967
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 rhead = (xlog_rec_header_t *)offset;
Brian Fostera70f9fe2016-01-04 15:55:10 +11002969
2970 /*
2971 * xfsprogs has a bug where record length is based on lsunit but
2972 * h_size (iclog size) is hardcoded to 32k. Now that we
2973 * unconditionally CRC verify the unmount record, this means the
2974 * log buffer can be too small for the record and cause an
2975 * overrun.
2976 *
2977 * Detect this condition here. Use lsunit for the buffer size as
2978 * long as this looks like the mkfs case. Otherwise, return an
2979 * error to avoid a buffer overrun.
2980 */
Christoph Hellwigb53e6752007-10-12 10:59:34 +10002981 h_size = be32_to_cpu(rhead->h_size);
Brian Fostera70f9fe2016-01-04 15:55:10 +11002982 h_len = be32_to_cpu(rhead->h_len);
Gao Xiangf692d092020-09-22 09:41:06 -07002983 if (h_len > h_size && h_len <= log->l_mp->m_logbsize &&
2984 rhead->h_num_logops == cpu_to_be32(1)) {
2985 xfs_warn(log->l_mp,
Brian Fostera70f9fe2016-01-04 15:55:10 +11002986 "invalid iclog size (%d bytes), using lsunit (%d bytes)",
Gao Xiangf692d092020-09-22 09:41:06 -07002987 h_size, log->l_mp->m_logbsize);
2988 h_size = log->l_mp->m_logbsize;
Brian Fostera70f9fe2016-01-04 15:55:10 +11002989 }
2990
Gao Xiangf692d092020-09-22 09:41:06 -07002991 error = xlog_valid_rec_header(log, rhead, tail_blk, h_size);
2992 if (error)
2993 goto bread_err1;
2994
Gao Xiang0c771b92020-09-22 09:41:06 -07002995 hblks = xlog_logrec_hblks(log, rhead);
2996 if (hblks != 1) {
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07002997 kmem_free(hbp);
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07002998 hbp = xlog_alloc_buffer(log, hblks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 }
3000 } else {
Alex Elder69ce58f2010-04-20 17:09:59 +10003001 ASSERT(log->l_sectBBsize == 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 hblks = 1;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07003003 hbp = xlog_alloc_buffer(log, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 h_size = XLOG_BIG_RECORD_BSIZE;
3005 }
3006
3007 if (!hbp)
Dave Chinner24513372014-06-25 14:58:08 +10003008 return -ENOMEM;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07003009 dbp = xlog_alloc_buffer(log, BTOBB(h_size));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010 if (!dbp) {
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07003011 kmem_free(hbp);
Dave Chinner24513372014-06-25 14:58:08 +10003012 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 }
3014
3015 memset(rhash, 0, sizeof(rhash));
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003016 if (tail_blk > head_blk) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017 /*
3018 * Perform recovery around the end of the physical log.
3019 * When the head is not on the same cycle number as the tail,
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003020 * we can't do a sequential recovery.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022 while (blk_no < log->l_logBBsize) {
3023 /*
3024 * Check for header wrapping around physical end-of-log
3025 */
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07003026 offset = hbp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027 split_hblks = 0;
3028 wrapped_hblks = 0;
3029 if (blk_no + hblks <= log->l_logBBsize) {
3030 /* Read header in one read */
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003031 error = xlog_bread(log, blk_no, hblks, hbp,
3032 &offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033 if (error)
3034 goto bread_err2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035 } else {
3036 /* This LR is split across physical log end */
3037 if (blk_no != log->l_logBBsize) {
3038 /* some data before physical log end */
3039 ASSERT(blk_no <= INT_MAX);
3040 split_hblks = log->l_logBBsize - (int)blk_no;
3041 ASSERT(split_hblks > 0);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003042 error = xlog_bread(log, blk_no,
3043 split_hblks, hbp,
3044 &offset);
3045 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046 goto bread_err2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 }
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003048
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049 /*
3050 * Note: this black magic still works with
3051 * large sector sizes (non-512) only because:
3052 * - we increased the buffer size originally
3053 * by 1 sector giving us enough extra space
3054 * for the second read;
3055 * - the log start is guaranteed to be sector
3056 * aligned;
3057 * - we read the log end (LR header start)
3058 * _first_, then the log start (LR header end)
3059 * - order is important.
3060 */
David Chinner234f56a2008-04-10 12:24:24 +10003061 wrapped_hblks = hblks - split_hblks;
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07003062 error = xlog_bread_noalign(log, 0,
3063 wrapped_hblks,
Dave Chinner44396472011-04-21 09:34:27 +00003064 offset + BBTOB(split_hblks));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065 if (error)
3066 goto bread_err2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067 }
3068 rhead = (xlog_rec_header_t *)offset;
3069 error = xlog_valid_rec_header(log, rhead,
Gao Xiangf692d092020-09-22 09:41:06 -07003070 split_hblks ? blk_no : 0, h_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071 if (error)
3072 goto bread_err2;
3073
Christoph Hellwigb53e6752007-10-12 10:59:34 +10003074 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003075 blk_no += hblks;
3076
Brian Foster284f1c22017-08-08 18:21:51 -07003077 /*
3078 * Read the log record data in multiple reads if it
3079 * wraps around the end of the log. Note that if the
3080 * header already wrapped, blk_no could point past the
3081 * end of the log. The record data is contiguous in
3082 * that case.
3083 */
3084 if (blk_no + bblks <= log->l_logBBsize ||
3085 blk_no >= log->l_logBBsize) {
Dave Chinner0703a8e2018-06-08 09:54:22 -07003086 rblk_no = xlog_wrap_logbno(log, blk_no);
Brian Foster284f1c22017-08-08 18:21:51 -07003087 error = xlog_bread(log, rblk_no, bblks, dbp,
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003088 &offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089 if (error)
3090 goto bread_err2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091 } else {
3092 /* This log record is split across the
3093 * physical end of log */
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07003094 offset = dbp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095 split_bblks = 0;
3096 if (blk_no != log->l_logBBsize) {
3097 /* some data is before the physical
3098 * end of log */
3099 ASSERT(!wrapped_hblks);
3100 ASSERT(blk_no <= INT_MAX);
3101 split_bblks =
3102 log->l_logBBsize - (int)blk_no;
3103 ASSERT(split_bblks > 0);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003104 error = xlog_bread(log, blk_no,
3105 split_bblks, dbp,
3106 &offset);
3107 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003108 goto bread_err2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003109 }
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003110
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 /*
3112 * Note: this black magic still works with
3113 * large sector sizes (non-512) only because:
3114 * - we increased the buffer size originally
3115 * by 1 sector giving us enough extra space
3116 * for the second read;
3117 * - the log start is guaranteed to be sector
3118 * aligned;
3119 * - we read the log end (LR header start)
3120 * _first_, then the log start (LR header end)
3121 * - order is important.
3122 */
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07003123 error = xlog_bread_noalign(log, 0,
3124 bblks - split_bblks,
Dave Chinner44396472011-04-21 09:34:27 +00003125 offset + BBTOB(split_bblks));
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003126 if (error)
3127 goto bread_err2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128 }
Christoph Hellwig0e446be2012-11-12 22:54:24 +11003129
Brian Foster9d949012016-01-04 15:55:10 +11003130 error = xlog_recover_process(log, rhash, rhead, offset,
Brian Foster12818d22016-09-26 08:22:16 +10003131 pass, &buffer_list);
Christoph Hellwig0e446be2012-11-12 22:54:24 +11003132 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133 goto bread_err2;
Brian Fosterd7f37692016-01-04 15:55:10 +11003134
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135 blk_no += bblks;
Brian Fosterd7f37692016-01-04 15:55:10 +11003136 rhead_blk = blk_no;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 }
3138
3139 ASSERT(blk_no >= log->l_logBBsize);
3140 blk_no -= log->l_logBBsize;
Brian Fosterd7f37692016-01-04 15:55:10 +11003141 rhead_blk = blk_no;
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003142 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003144 /* read first part of physical log */
3145 while (blk_no < head_blk) {
3146 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3147 if (error)
3148 goto bread_err2;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003149
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003150 rhead = (xlog_rec_header_t *)offset;
Gao Xiangf692d092020-09-22 09:41:06 -07003151 error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003152 if (error)
3153 goto bread_err2;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003154
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003155 /* blocks in data section */
3156 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3157 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3158 &offset);
3159 if (error)
3160 goto bread_err2;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003161
Brian Foster12818d22016-09-26 08:22:16 +10003162 error = xlog_recover_process(log, rhash, rhead, offset, pass,
3163 &buffer_list);
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003164 if (error)
3165 goto bread_err2;
Brian Fosterd7f37692016-01-04 15:55:10 +11003166
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003167 blk_no += bblks + hblks;
Brian Fosterd7f37692016-01-04 15:55:10 +11003168 rhead_blk = blk_no;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169 }
3170
3171 bread_err2:
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07003172 kmem_free(dbp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003173 bread_err1:
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07003174 kmem_free(hbp);
Brian Fosterd7f37692016-01-04 15:55:10 +11003175
Brian Foster12818d22016-09-26 08:22:16 +10003176 /*
3177 * Submit buffers that have been added from the last record processed,
3178 * regardless of error status.
3179 */
3180 if (!list_empty(&buffer_list))
3181 error2 = xfs_buf_delwri_submit(&buffer_list);
3182
Brian Fosterd7f37692016-01-04 15:55:10 +11003183 if (error && first_bad)
3184 *first_bad = rhead_blk;
3185
Brian Foster39775432017-06-24 10:11:41 -07003186 /*
3187 * Transactions are freed at commit time but transactions without commit
3188 * records on disk are never committed. Free any that may be left in the
3189 * hash table.
3190 */
3191 for (i = 0; i < XLOG_RHASH_SIZE; i++) {
3192 struct hlist_node *tmp;
3193 struct xlog_recover *trans;
3194
3195 hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
3196 xlog_recover_free_trans(trans);
3197 }
3198
Brian Foster12818d22016-09-26 08:22:16 +10003199 return error ? error : error2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200}
3201
3202/*
3203 * Do the recovery of the log. We actually do this in two phases.
3204 * The two passes are necessary in order to implement the function
3205 * of cancelling a record written into the log. The first pass
3206 * determines those things which have been cancelled, and the
3207 * second pass replays log items normally except for those which
3208 * have been cancelled. The handling of the replay and cancellations
3209 * takes place in the log item type specific routines.
3210 *
3211 * The table of items which have cancel records in the log is allocated
3212 * and freed at this level, since only here do we know when all of
3213 * the log recovery has been completed.
3214 */
3215STATIC int
3216xlog_do_log_recovery(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05003217 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218 xfs_daddr_t head_blk,
3219 xfs_daddr_t tail_blk)
3220{
Christoph Hellwigd5689ea2010-12-01 22:06:22 +00003221 int error, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222
3223 ASSERT(head_blk != tail_blk);
3224
3225 /*
3226 * First do a pass to find all of the cancelled buf log items.
3227 * Store them in the buf_cancel_table for use in the second pass.
3228 */
Christoph Hellwigd5689ea2010-12-01 22:06:22 +00003229 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
3230 sizeof(struct list_head),
Tetsuo Handa707e0dd2019-08-26 12:06:22 -07003231 0);
Christoph Hellwigd5689ea2010-12-01 22:06:22 +00003232 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3233 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
3234
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
Brian Fosterd7f37692016-01-04 15:55:10 +11003236 XLOG_RECOVER_PASS1, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237 if (error != 0) {
Denys Vlasenkof0e2d932008-05-19 16:31:57 +10003238 kmem_free(log->l_buf_cancel_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 log->l_buf_cancel_table = NULL;
3240 return error;
3241 }
3242 /*
3243 * Then do a second pass to actually recover the items in the log.
3244 * When it is complete free the table of buf cancel items.
3245 */
3246 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
Brian Fosterd7f37692016-01-04 15:55:10 +11003247 XLOG_RECOVER_PASS2, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248#ifdef DEBUG
Tim Shimmin6d192a92006-06-09 14:55:38 +10003249 if (!error) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250 int i;
3251
3252 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
Christoph Hellwigd5689ea2010-12-01 22:06:22 +00003253 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254 }
3255#endif /* DEBUG */
3256
Denys Vlasenkof0e2d932008-05-19 16:31:57 +10003257 kmem_free(log->l_buf_cancel_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258 log->l_buf_cancel_table = NULL;
3259
3260 return error;
3261}
3262
3263/*
3264 * Do the actual recovery
3265 */
3266STATIC int
3267xlog_do_recover(
Christoph Hellwigb3f8e082020-09-01 10:55:47 -07003268 struct xlog *log,
3269 xfs_daddr_t head_blk,
3270 xfs_daddr_t tail_blk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271{
Christoph Hellwigb3f8e082020-09-01 10:55:47 -07003272 struct xfs_mount *mp = log->l_mp;
3273 struct xfs_buf *bp = mp->m_sb_bp;
3274 struct xfs_sb *sbp = &mp->m_sb;
3275 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276
Brian Fostere67d3d42017-08-08 18:21:53 -07003277 trace_xfs_log_recover(log, head_blk, tail_blk);
3278
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279 /*
3280 * First replay the images in the log.
3281 */
3282 error = xlog_do_log_recovery(log, head_blk, tail_blk);
Christoph Hellwig43ff2122012-04-23 15:58:39 +10003283 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285
3286 /*
3287 * If IO errors happened during recovery, bail out.
3288 */
Christoph Hellwigb3f8e082020-09-01 10:55:47 -07003289 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +10003290 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291
3292 /*
3293 * We now update the tail_lsn since much of the recovery has completed
3294 * and there may be space available to use. If there were no extent
3295 * or iunlinks, we can free up the entire log and set the tail_lsn to
3296 * be the last_sync_lsn. This was set in xlog_find_tail to be the
3297 * lsn of the last known good LR on disk. If there are extent frees
3298 * or iunlinks they will have some entries in the AIL; so we look at
3299 * the AIL to determine how to set the tail_lsn.
3300 */
Dave Chinnera7980112016-03-07 08:39:36 +11003301 xlog_assign_tail_lsn(mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302
3303 /*
Christoph Hellwigb3f8e082020-09-01 10:55:47 -07003304 * Now that we've finished replaying all buffer and inode updates,
3305 * re-read the superblock and reverify it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306 */
Christoph Hellwigb3f8e082020-09-01 10:55:47 -07003307 xfs_buf_lock(bp);
3308 xfs_buf_hold(bp);
Christoph Hellwig26e328752020-09-01 10:55:47 -07003309 error = _xfs_buf_read(bp, XBF_READ);
David Chinnerd64e31a2008-04-10 12:22:17 +10003310 if (error) {
Dave Chinnera7980112016-03-07 08:39:36 +11003311 if (!XFS_FORCED_SHUTDOWN(mp)) {
Darrick J. Wongcdbcf822020-01-23 17:01:20 -08003312 xfs_buf_ioerror_alert(bp, __this_address);
Dave Chinner595bff72014-10-02 09:05:14 +10003313 ASSERT(0);
3314 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315 xfs_buf_relse(bp);
3316 return error;
3317 }
3318
3319 /* Convert superblock from on-disk format */
Christoph Hellwig3e6e8af2020-03-10 08:57:30 -07003320 xfs_sb_from_disk(sbp, bp->b_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321 xfs_buf_relse(bp);
3322
Dave Chinnera7980112016-03-07 08:39:36 +11003323 /* re-initialise in-core superblock and geometry structures */
3324 xfs_reinit_percpu_counters(mp);
3325 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
3326 if (error) {
3327 xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
3328 return error;
3329 }
Darrick J. Wong52548852016-08-03 11:38:24 +10003330 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
Lachlan McIlroy5478eea2007-02-10 18:36:29 +11003331
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332 xlog_recover_check_summary(log);
3333
3334 /* Normal transactions can now occur */
3335 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3336 return 0;
3337}
3338
3339/*
3340 * Perform recovery and re-initialize some log variables in xlog_find_tail.
3341 *
3342 * Return error or zero.
3343 */
3344int
3345xlog_recover(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05003346 struct xlog *log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347{
3348 xfs_daddr_t head_blk, tail_blk;
3349 int error;
3350
3351 /* find the tail of the log */
Brian Fostera45086e2015-10-12 15:59:25 +11003352 error = xlog_find_tail(log, &head_blk, &tail_blk);
3353 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 return error;
3355
Brian Fostera45086e2015-10-12 15:59:25 +11003356 /*
3357 * The superblock was read before the log was available and thus the LSN
3358 * could not be verified. Check the superblock LSN against the current
3359 * LSN now that it's known.
3360 */
3361 if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
3362 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
3363 return -EINVAL;
3364
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 if (tail_blk != head_blk) {
3366 /* There used to be a comment here:
3367 *
3368 * disallow recovery on read-only mounts. note -- mount
3369 * checks for ENOSPC and turns it into an intelligent
3370 * error message.
3371 * ...but this is no longer true. Now, unless you specify
3372 * NORECOVERY (in which case this function would never be
3373 * called), we just go ahead and recover. We do this all
3374 * under the vfs layer, so we can get away with it unless
3375 * the device itself is read-only, in which case we fail.
3376 */
Utako Kusaka3a02ee12007-05-08 13:50:06 +10003377 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378 return error;
3379 }
3380
Dave Chinnere721f502013-04-03 16:11:32 +11003381 /*
3382 * Version 5 superblock log feature mask validation. We know the
3383 * log is dirty so check if there are any unknown log features
3384 * in what we need to recover. If there are unknown features
3385 * (e.g. unsupported transactions, then simply reject the
3386 * attempt at recovery before touching anything.
3387 */
3388 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
3389 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
3390 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
3391 xfs_warn(log->l_mp,
Joe Perchesf41febd2015-07-29 11:52:04 +10003392"Superblock has unknown incompatible log features (0x%x) enabled.",
Dave Chinnere721f502013-04-03 16:11:32 +11003393 (log->l_mp->m_sb.sb_features_log_incompat &
3394 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
Joe Perchesf41febd2015-07-29 11:52:04 +10003395 xfs_warn(log->l_mp,
3396"The log can not be fully and/or safely recovered by this kernel.");
3397 xfs_warn(log->l_mp,
3398"Please recover the log on a kernel that supports the unknown features.");
Dave Chinner24513372014-06-25 14:58:08 +10003399 return -EINVAL;
Dave Chinnere721f502013-04-03 16:11:32 +11003400 }
3401
Brian Foster2e227172014-09-09 11:56:13 +10003402 /*
3403 * Delay log recovery if the debug hook is set. This is debug
3404 * instrumention to coordinate simulation of I/O failures with
3405 * log recovery.
3406 */
3407 if (xfs_globals.log_recovery_delay) {
3408 xfs_notice(log->l_mp,
3409 "Delaying log recovery for %d seconds.",
3410 xfs_globals.log_recovery_delay);
3411 msleep(xfs_globals.log_recovery_delay * 1000);
3412 }
3413
Dave Chinnera0fa2b62011-03-07 10:01:35 +11003414 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3415 log->l_mp->m_logname ? log->l_mp->m_logname
3416 : "internal");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417
3418 error = xlog_do_recover(log, head_blk, tail_blk);
3419 log->l_flags |= XLOG_RECOVERY_NEEDED;
3420 }
3421 return error;
3422}
3423
3424/*
3425 * In the first part of recovery we replay inodes and buffers and build
3426 * up the list of extent free items which need to be processed. Here
3427 * we process the extent free items and clean up the on disk unlinked
3428 * inode lists. This is separated from the first part of recovery so
3429 * that the root and real-time bitmap inodes can be read in from disk in
3430 * between the two stages. This is necessary so that we can free space
3431 * in the real-time portion of the file system.
3432 */
3433int
3434xlog_recover_finish(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05003435 struct xlog *log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436{
3437 /*
3438 * Now we're ready to do the transactions needed for the
3439 * rest of recovery. Start with completing all the extent
3440 * free intent records and then process the unlinked inode
3441 * lists. At this point, we essentially run in normal mode
3442 * except that we're still performing recovery actions
3443 * rather than accepting new requests.
3444 */
3445 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
David Chinner3c1e2bb2008-04-10 12:21:11 +10003446 int error;
Darrick J. Wongdc423752016-08-03 11:23:49 +10003447 error = xlog_recover_process_intents(log);
David Chinner3c1e2bb2008-04-10 12:21:11 +10003448 if (error) {
Darrick J. Wongdc423752016-08-03 11:23:49 +10003449 xfs_alert(log->l_mp, "Failed to recover intents");
David Chinner3c1e2bb2008-04-10 12:21:11 +10003450 return error;
3451 }
Darrick J. Wong9e88b5d2016-08-03 12:09:48 +10003452
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 /*
Darrick J. Wongdc423752016-08-03 11:23:49 +10003454 * Sync the log to get all the intents out of the AIL.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003455 * This isn't absolutely necessary, but it helps in
3456 * case the unlink transactions would have problems
Darrick J. Wongdc423752016-08-03 11:23:49 +10003457 * pushing the intents out of the way.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458 */
Christoph Hellwiga14a3482010-01-19 09:56:46 +00003459 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460
Christoph Hellwig42490232008-08-13 16:49:32 +10003461 xlog_recover_process_iunlinks(log);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462
3463 xlog_recover_check_summary(log);
3464
Dave Chinnera0fa2b62011-03-07 10:01:35 +11003465 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
3466 log->l_mp->m_logname ? log->l_mp->m_logname
3467 : "internal");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3469 } else {
Dave Chinnera0fa2b62011-03-07 10:01:35 +11003470 xfs_info(log->l_mp, "Ending clean mount");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 }
3472 return 0;
3473}
3474
Hariprasad Kelama7a92502019-07-03 07:34:18 -07003475void
Brian Fosterf0b2efa2015-08-19 09:58:36 +10003476xlog_recover_cancel(
3477 struct xlog *log)
3478{
Brian Fosterf0b2efa2015-08-19 09:58:36 +10003479 if (log->l_flags & XLOG_RECOVERY_NEEDED)
Hariprasad Kelama7a92502019-07-03 07:34:18 -07003480 xlog_recover_cancel_intents(log);
Brian Fosterf0b2efa2015-08-19 09:58:36 +10003481}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482
3483#if defined(DEBUG)
3484/*
3485 * Read all of the agf and agi counters and check that they
3486 * are consistent with the superblock counters.
3487 */
Christoph Hellwige89fbb52017-11-06 11:54:01 -08003488STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489xlog_recover_check_summary(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05003490 struct xlog *log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491{
3492 xfs_mount_t *mp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 xfs_buf_t *agfbp;
3494 xfs_buf_t *agibp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 xfs_agnumber_t agno;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07003496 uint64_t freeblks;
3497 uint64_t itotal;
3498 uint64_t ifree;
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11003499 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500
3501 mp = log->l_mp;
3502
3503 freeblks = 0LL;
3504 itotal = 0LL;
3505 ifree = 0LL;
3506 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
From: Christoph Hellwig48056212008-11-28 14:23:38 +11003507 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
3508 if (error) {
Dave Chinnera0fa2b62011-03-07 10:01:35 +11003509 xfs_alert(mp, "%s agf read failed agno %d error %d",
3510 __func__, agno, error);
From: Christoph Hellwig48056212008-11-28 14:23:38 +11003511 } else {
Christoph Hellwig9798f612020-03-10 08:57:29 -07003512 struct xfs_agf *agfp = agfbp->b_addr;
3513
From: Christoph Hellwig48056212008-11-28 14:23:38 +11003514 freeblks += be32_to_cpu(agfp->agf_freeblks) +
3515 be32_to_cpu(agfp->agf_flcount);
3516 xfs_buf_relse(agfbp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11003519 error = xfs_read_agi(mp, NULL, agno, &agibp);
Dave Chinnera0fa2b62011-03-07 10:01:35 +11003520 if (error) {
3521 xfs_alert(mp, "%s agi read failed agno %d error %d",
3522 __func__, agno, error);
3523 } else {
Christoph Hellwig370c7822020-03-10 08:57:29 -07003524 struct xfs_agi *agi = agibp->b_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11003526 itotal += be32_to_cpu(agi->agi_count);
3527 ifree += be32_to_cpu(agi->agi_freecount);
3528 xfs_buf_relse(agibp);
3529 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531}
3532#endif /* DEBUG */