blob: 782ec3eeab4da48891db8541e2bdd3f5d85abe2f [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Tim Shimmin87c199c2006-06-09 14:56:16 +10003 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Nathan Scott7b718762005-11-02 14:58:39 +11004 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +11007#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11008#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +11009#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Nathan Scotta844f452005-11-02 14:38:42 +110012#include "xfs_bit.h"
Nathan Scotta844f452005-11-02 14:38:42 +110013#include "xfs_sb.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include "xfs_mount.h"
Darrick J. Wong50995582017-11-21 20:53:02 -080015#include "xfs_defer.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include "xfs_inode.h"
Dave Chinner239880e2013-10-23 10:50:10 +110017#include "xfs_trans.h"
Dave Chinner239880e2013-10-23 10:50:10 +110018#include "xfs_log.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include "xfs_log_priv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_log_recover.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include "xfs_trans_priv.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110022#include "xfs_alloc.h"
23#include "xfs_ialloc.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000024#include "xfs_trace.h"
Dave Chinner33479e02012-10-08 21:56:11 +110025#include "xfs_icache.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110026#include "xfs_error.h"
Brian Foster60a4a222016-09-26 08:34:27 +100027#include "xfs_buf_item.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Dave Chinnerfc06c6d2013-08-12 20:49:22 +100029#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
30
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -050031STATIC int
32xlog_find_zeroed(
33 struct xlog *,
34 xfs_daddr_t *);
35STATIC int
36xlog_clear_stale_blocks(
37 struct xlog *,
38 xfs_lsn_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#if defined(DEBUG)
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -050040STATIC void
41xlog_recover_check_summary(
42 struct xlog *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#else
44#define xlog_recover_check_summary(log)
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#endif
Brian Foster7088c412016-01-05 07:40:16 +110046STATIC int
47xlog_do_recovery_pass(
48 struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Linus Torvalds1da177e2005-04-16 15:20:36 -070050/*
51 * Sector aligned buffer routines for buffer create/read/write/access
52 */
53
Alex Elderff30a622010-04-13 15:22:58 +100054/*
Brian Foster99c26592017-10-26 09:31:15 -070055 * Verify the log-relative block number and length in basic blocks are valid for
56 * an operation involving the given XFS log buffer. Returns true if the fields
57 * are valid, false otherwise.
Alex Elderff30a622010-04-13 15:22:58 +100058 */
Brian Foster99c26592017-10-26 09:31:15 -070059static inline bool
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -070060xlog_verify_bno(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -050061 struct xlog *log,
Brian Foster99c26592017-10-26 09:31:15 -070062 xfs_daddr_t blk_no,
Alex Elderff30a622010-04-13 15:22:58 +100063 int bbcount)
64{
Brian Foster99c26592017-10-26 09:31:15 -070065 if (blk_no < 0 || blk_no >= log->l_logBBsize)
66 return false;
67 if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
68 return false;
69 return true;
Alex Elderff30a622010-04-13 15:22:58 +100070}
71
Alex Elder36adecf2010-04-13 15:21:13 +100072/*
Christoph Hellwig6ad5b322019-06-28 19:27:26 -070073 * Allocate a buffer to hold log data. The buffer needs to be able to map to
74 * a range of nbblks basic blocks at any valid offset within the log.
Alex Elder36adecf2010-04-13 15:21:13 +100075 */
Christoph Hellwig6ad5b322019-06-28 19:27:26 -070076static char *
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -070077xlog_alloc_buffer(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -050078 struct xlog *log,
Dave Chinner32281492009-01-22 15:37:47 +110079 int nbblks)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080{
Dave Chinnerf8f9ee42019-08-26 12:08:39 -070081 int align_mask = xfs_buftarg_dma_alignment(log->l_targ);
82
Brian Foster99c26592017-10-26 09:31:15 -070083 /*
84 * Pass log block 0 since we don't have an addr yet, buffer will be
85 * verified on read.
86 */
Darrick J. Wonga71895c2019-11-11 12:53:22 -080087 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
Dave Chinnera0fa2b62011-03-07 10:01:35 +110088 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
Alex Elderff30a622010-04-13 15:22:58 +100089 nbblks);
Dave Chinner32281492009-01-22 15:37:47 +110090 return NULL;
91 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Alex Elder36adecf2010-04-13 15:21:13 +100093 /*
Christoph Hellwig6ad5b322019-06-28 19:27:26 -070094 * We do log I/O in units of log sectors (a power-of-2 multiple of the
95 * basic block size), so we round up the requested size to accommodate
96 * the basic blocks required for complete log sectors.
Alex Elder36adecf2010-04-13 15:21:13 +100097 *
Christoph Hellwig6ad5b322019-06-28 19:27:26 -070098 * In addition, the buffer may be used for a non-sector-aligned block
99 * offset, in which case an I/O of the requested size could extend
100 * beyond the end of the buffer. If the requested size is only 1 basic
101 * block it will never straddle a sector boundary, so this won't be an
102 * issue. Nor will this be a problem if the log I/O is done in basic
103 * blocks (sector size 1). But otherwise we extend the buffer by one
104 * extra log sector to ensure there's space to accommodate this
105 * possibility.
Alex Elder36adecf2010-04-13 15:21:13 +1000106 */
Alex Elder69ce58f2010-04-20 17:09:59 +1000107 if (nbblks > 1 && log->l_sectBBsize > 1)
108 nbblks += log->l_sectBBsize;
109 nbblks = round_up(nbblks, log->l_sectBBsize);
Bill O'Donnell3219e8c2019-10-04 16:38:44 -0700110 return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111}
112
Alex Elder48389ef2010-04-20 17:10:21 +1000113/*
114 * Return the address of the start of the given block number's data
115 * in a log buffer. The buffer covers a log sector-aligned region.
116 */
Christoph Hellwig18ffb8c2019-06-28 19:27:26 -0700117static inline unsigned int
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100118xlog_align(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -0500119 struct xlog *log,
Christoph Hellwig18ffb8c2019-06-28 19:27:26 -0700120 xfs_daddr_t blk_no)
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100121{
Christoph Hellwig18ffb8c2019-06-28 19:27:26 -0700122 return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100123}
124
Christoph Hellwig6ad5b322019-06-28 19:27:26 -0700125static int
126xlog_do_io(
127 struct xlog *log,
128 xfs_daddr_t blk_no,
129 unsigned int nbblks,
130 char *data,
131 unsigned int op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132{
Christoph Hellwig6ad5b322019-06-28 19:27:26 -0700133 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Darrick J. Wonga71895c2019-11-11 12:53:22 -0800135 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) {
Brian Foster99c26592017-10-26 09:31:15 -0700136 xfs_warn(log->l_mp,
137 "Invalid log block/length (0x%llx, 0x%x) for buffer",
138 blk_no, nbblks);
Dave Chinner24513372014-06-25 14:58:08 +1000139 return -EFSCORRUPTED;
Dave Chinner32281492009-01-22 15:37:47 +1100140 }
141
Alex Elder69ce58f2010-04-20 17:09:59 +1000142 blk_no = round_down(blk_no, log->l_sectBBsize);
143 nbblks = round_up(nbblks, log->l_sectBBsize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 ASSERT(nbblks > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Christoph Hellwig6ad5b322019-06-28 19:27:26 -0700146 error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
147 BBTOB(nbblks), data, op);
148 if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) {
149 xfs_alert(log->l_mp,
150 "log recovery %s I/O error at daddr 0x%llx len %d error %d",
151 op == REQ_OP_WRITE ? "write" : "read",
152 blk_no, nbblks, error);
153 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 return error;
155}
156
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100157STATIC int
Christoph Hellwig6ad5b322019-06-28 19:27:26 -0700158xlog_bread_noalign(
159 struct xlog *log,
160 xfs_daddr_t blk_no,
161 int nbblks,
162 char *data)
163{
164 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
165}
166
167STATIC int
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100168xlog_bread(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -0500169 struct xlog *log,
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100170 xfs_daddr_t blk_no,
171 int nbblks,
Christoph Hellwig6ad5b322019-06-28 19:27:26 -0700172 char *data,
Christoph Hellwigb2a922c2015-06-22 09:45:10 +1000173 char **offset)
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100174{
175 int error;
176
Christoph Hellwig6ad5b322019-06-28 19:27:26 -0700177 error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
178 if (!error)
179 *offset = data + xlog_align(log, blk_no);
180 return error;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100181}
182
Christoph Hellwigba0f32d2005-06-21 15:36:52 +1000183STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184xlog_bwrite(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -0500185 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 xfs_daddr_t blk_no,
187 int nbblks,
Christoph Hellwig6ad5b322019-06-28 19:27:26 -0700188 char *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189{
Christoph Hellwig6ad5b322019-06-28 19:27:26 -0700190 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191}
192
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193#ifdef DEBUG
194/*
195 * dump debug superblock and log record information
196 */
197STATIC void
198xlog_header_check_dump(
199 xfs_mount_t *mp,
200 xlog_rec_header_t *head)
201{
Eric Sandeen08e96e12013-10-11 20:59:05 -0500202 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
Joe Perches03daa572009-12-14 18:01:10 -0800203 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
Eric Sandeen08e96e12013-10-11 20:59:05 -0500204 xfs_debug(mp, " log : uuid = %pU, fmt = %d",
Joe Perches03daa572009-12-14 18:01:10 -0800205 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206}
207#else
208#define xlog_header_check_dump(mp, head)
209#endif
210
211/*
212 * check log record header for recovery
213 */
214STATIC int
215xlog_header_check_recover(
216 xfs_mount_t *mp,
217 xlog_rec_header_t *head)
218{
Christoph Hellwig69ef9212011-07-08 14:36:05 +0200219 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221 /*
222 * IRIX doesn't write the h_fmt field and leaves it zeroed
223 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
224 * a dirty log created in IRIX.
225 */
Darrick J. Wonga71895c2019-11-11 12:53:22 -0800226 if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) {
Dave Chinnera0fa2b62011-03-07 10:01:35 +1100227 xfs_warn(mp,
228 "dirty log written in incompatible format - can't recover");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 xlog_header_check_dump(mp, head);
Dave Chinner24513372014-06-25 14:58:08 +1000230 return -EFSCORRUPTED;
Darrick J. Wonga71895c2019-11-11 12:53:22 -0800231 }
232 if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
233 &head->h_fs_uuid))) {
Dave Chinnera0fa2b62011-03-07 10:01:35 +1100234 xfs_warn(mp,
235 "dirty log entry has mismatched uuid - can't recover");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 xlog_header_check_dump(mp, head);
Dave Chinner24513372014-06-25 14:58:08 +1000237 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 }
239 return 0;
240}
241
242/*
243 * read the head block of the log and check the header
244 */
245STATIC int
246xlog_header_check_mount(
247 xfs_mount_t *mp,
248 xlog_rec_header_t *head)
249{
Christoph Hellwig69ef9212011-07-08 14:36:05 +0200250 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
Amir Goldsteind905fda2017-05-04 16:26:23 +0300252 if (uuid_is_null(&head->h_fs_uuid)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 /*
254 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
Amir Goldsteind905fda2017-05-04 16:26:23 +0300255 * h_fs_uuid is null, we assume this log was last mounted
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 * by IRIX and continue.
257 */
Amir Goldsteind905fda2017-05-04 16:26:23 +0300258 xfs_warn(mp, "null uuid in log - IRIX style log");
Darrick J. Wonga71895c2019-11-11 12:53:22 -0800259 } else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
260 &head->h_fs_uuid))) {
Dave Chinnera0fa2b62011-03-07 10:01:35 +1100261 xfs_warn(mp, "log has mismatched uuid - can't recover");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 xlog_header_check_dump(mp, head);
Dave Chinner24513372014-06-25 14:58:08 +1000263 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 }
265 return 0;
266}
267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268/*
269 * This routine finds (to an approximation) the first block in the physical
270 * log which contains the given cycle. It uses a binary search algorithm.
271 * Note that the algorithm can not be perfect because the disk will not
272 * necessarily be perfect.
273 */
David Chinnera8272ce2007-11-23 16:28:09 +1100274STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275xlog_find_cycle_start(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -0500276 struct xlog *log,
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700277 char *buffer,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 xfs_daddr_t first_blk,
279 xfs_daddr_t *last_blk,
280 uint cycle)
281{
Christoph Hellwigb2a922c2015-06-22 09:45:10 +1000282 char *offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 xfs_daddr_t mid_blk;
Alex Eldere3bb2e32010-04-15 18:17:30 +0000284 xfs_daddr_t end_blk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 uint mid_cycle;
286 int error;
287
Alex Eldere3bb2e32010-04-15 18:17:30 +0000288 end_blk = *last_blk;
289 mid_blk = BLK_AVG(first_blk, end_blk);
290 while (mid_blk != first_blk && mid_blk != end_blk) {
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700291 error = xlog_bread(log, mid_blk, 1, buffer, &offset);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100292 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 return error;
Christoph Hellwig03bea6f2007-10-12 10:58:05 +1000294 mid_cycle = xlog_get_cycle(offset);
Alex Eldere3bb2e32010-04-15 18:17:30 +0000295 if (mid_cycle == cycle)
296 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
297 else
298 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
299 mid_blk = BLK_AVG(first_blk, end_blk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 }
Alex Eldere3bb2e32010-04-15 18:17:30 +0000301 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
302 (mid_blk == end_blk && mid_blk-1 == first_blk));
303
304 *last_blk = end_blk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306 return 0;
307}
308
309/*
Alex Elder3f943d82010-04-15 18:17:34 +0000310 * Check that a range of blocks does not contain stop_on_cycle_no.
311 * Fill in *new_blk with the block offset where such a block is
312 * found, or with -1 (an invalid block number) if there is no such
313 * block in the range. The scan needs to occur from front to back
314 * and the pointer into the region must be updated since a later
315 * routine will need to perform another test.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 */
317STATIC int
318xlog_find_verify_cycle(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -0500319 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 xfs_daddr_t start_blk,
321 int nbblks,
322 uint stop_on_cycle_no,
323 xfs_daddr_t *new_blk)
324{
325 xfs_daddr_t i, j;
326 uint cycle;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700327 char *buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 xfs_daddr_t bufblks;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +1000329 char *buf = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 int error = 0;
331
Alex Elder6881a222010-04-13 15:22:29 +1000332 /*
333 * Greedily allocate a buffer big enough to handle the full
334 * range of basic blocks we'll be examining. If that fails,
335 * try a smaller size. We need to be able to read at least
336 * a log sector, or we're out of luck.
337 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 bufblks = 1 << ffs(nbblks);
Dave Chinner81158e02012-04-27 19:45:22 +1000339 while (bufblks > log->l_logBBsize)
340 bufblks >>= 1;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700341 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 bufblks >>= 1;
Alex Elder69ce58f2010-04-20 17:09:59 +1000343 if (bufblks < log->l_sectBBsize)
Dave Chinner24513372014-06-25 14:58:08 +1000344 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 }
346
347 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
348 int bcount;
349
350 bcount = min(bufblks, (start_blk + nbblks - i));
351
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700352 error = xlog_bread(log, i, bcount, buffer, &buf);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100353 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 goto out;
355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 for (j = 0; j < bcount; j++) {
Christoph Hellwig03bea6f2007-10-12 10:58:05 +1000357 cycle = xlog_get_cycle(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 if (cycle == stop_on_cycle_no) {
359 *new_blk = i+j;
360 goto out;
361 }
362
363 buf += BBSIZE;
364 }
365 }
366
367 *new_blk = -1;
368
369out:
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700370 kmem_free(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 return error;
372}
373
374/*
375 * Potentially backup over partial log record write.
376 *
377 * In the typical case, last_blk is the number of the block directly after
378 * a good log record. Therefore, we subtract one to get the block number
379 * of the last block in the given buffer. extra_bblks contains the number
380 * of blocks we would have read on a previous read. This happens when the
381 * last log record is split over the end of the physical log.
382 *
383 * extra_bblks is the number of blocks potentially verified on a previous
384 * call to this routine.
385 */
386STATIC int
387xlog_find_verify_log_record(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -0500388 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 xfs_daddr_t start_blk,
390 xfs_daddr_t *last_blk,
391 int extra_bblks)
392{
393 xfs_daddr_t i;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700394 char *buffer;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +1000395 char *offset = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 xlog_rec_header_t *head = NULL;
397 int error = 0;
398 int smallmem = 0;
399 int num_blks = *last_blk - start_blk;
400 int xhdrs;
401
402 ASSERT(start_blk != 0 || *last_blk != start_blk);
403
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700404 buffer = xlog_alloc_buffer(log, num_blks);
405 if (!buffer) {
406 buffer = xlog_alloc_buffer(log, 1);
407 if (!buffer)
Dave Chinner24513372014-06-25 14:58:08 +1000408 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 smallmem = 1;
410 } else {
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700411 error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100412 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 offset += ((num_blks - 1) << BBSHIFT);
415 }
416
417 for (i = (*last_blk) - 1; i >= 0; i--) {
418 if (i < start_blk) {
419 /* valid log record not found */
Dave Chinnera0fa2b62011-03-07 10:01:35 +1100420 xfs_warn(log->l_mp,
421 "Log inconsistent (didn't find previous header)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 ASSERT(0);
Darrick J. Wong895e1962019-11-06 09:17:43 -0800423 error = -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 goto out;
425 }
426
427 if (smallmem) {
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700428 error = xlog_bread(log, i, 1, buffer, &offset);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100429 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 }
432
433 head = (xlog_rec_header_t *)offset;
434
Christoph Hellwig69ef9212011-07-08 14:36:05 +0200435 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 break;
437
438 if (!smallmem)
439 offset -= BBSIZE;
440 }
441
442 /*
443 * We hit the beginning of the physical log & still no header. Return
444 * to caller. If caller can handle a return of -1, then this routine
445 * will be called again for the end of the physical log.
446 */
447 if (i == -1) {
Dave Chinner24513372014-06-25 14:58:08 +1000448 error = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 goto out;
450 }
451
452 /*
453 * We have the final block of the good log (the first block
454 * of the log record _before_ the head. So we check the uuid.
455 */
456 if ((error = xlog_header_check_mount(log->l_mp, head)))
457 goto out;
458
459 /*
460 * We may have found a log record header before we expected one.
461 * last_blk will be the 1st block # with a given cycle #. We may end
462 * up reading an entire log record. In this case, we don't want to
463 * reset last_blk. Only when last_blk points in the middle of a log
464 * record do we update last_blk.
465 */
Eric Sandeen62118702008-03-06 13:44:28 +1100466 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
Christoph Hellwigb53e6752007-10-12 10:59:34 +1000467 uint h_size = be32_to_cpu(head->h_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
469 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
470 if (h_size % XLOG_HEADER_CYCLE_SIZE)
471 xhdrs++;
472 } else {
473 xhdrs = 1;
474 }
475
Christoph Hellwigb53e6752007-10-12 10:59:34 +1000476 if (*last_blk - i + extra_bblks !=
477 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 *last_blk = i;
479
480out:
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700481 kmem_free(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 return error;
483}
484
485/*
486 * Head is defined to be the point of the log where the next log write
Zhi Yong Wu0a94da22013-08-07 10:11:08 +0000487 * could go. This means that incomplete LR writes at the end are
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 * eliminated when calculating the head. We aren't guaranteed that previous
489 * LR have complete transactions. We only know that a cycle number of
490 * current cycle number -1 won't be present in the log if we start writing
491 * from our current block number.
492 *
493 * last_blk contains the block number of the first block with a given
494 * cycle number.
495 *
496 * Return: zero if normal, non-zero if error.
497 */
Christoph Hellwigba0f32d2005-06-21 15:36:52 +1000498STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499xlog_find_head(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -0500500 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 xfs_daddr_t *return_head_blk)
502{
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700503 char *buffer;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +1000504 char *offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
506 int num_scan_bblks;
507 uint first_half_cycle, last_half_cycle;
508 uint stop_on_cycle;
509 int error, log_bbnum = log->l_logBBsize;
510
511 /* Is the end of the log device zeroed? */
Dave Chinner24513372014-06-25 14:58:08 +1000512 error = xlog_find_zeroed(log, &first_blk);
513 if (error < 0) {
514 xfs_warn(log->l_mp, "empty log check failed");
515 return error;
516 }
517 if (error == 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 *return_head_blk = first_blk;
519
520 /* Is the whole lot zeroed? */
521 if (!first_blk) {
522 /* Linux XFS shouldn't generate totally zeroed logs -
523 * mkfs etc write a dummy unmount record to a fresh
524 * log so we can store the uuid in there
525 */
Dave Chinnera0fa2b62011-03-07 10:01:35 +1100526 xfs_warn(log->l_mp, "totally zeroed log");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 }
528
529 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 }
531
532 first_blk = 0; /* get cycle # of 1st block */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700533 buffer = xlog_alloc_buffer(log, 1);
534 if (!buffer)
Dave Chinner24513372014-06-25 14:58:08 +1000535 return -ENOMEM;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100536
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700537 error = xlog_bread(log, 0, 1, buffer, &offset);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100538 if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700539 goto out_free_buffer;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100540
Christoph Hellwig03bea6f2007-10-12 10:58:05 +1000541 first_half_cycle = xlog_get_cycle(offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542
543 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700544 error = xlog_bread(log, last_blk, 1, buffer, &offset);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100545 if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700546 goto out_free_buffer;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +0100547
Christoph Hellwig03bea6f2007-10-12 10:58:05 +1000548 last_half_cycle = xlog_get_cycle(offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 ASSERT(last_half_cycle != 0);
550
551 /*
552 * If the 1st half cycle number is equal to the last half cycle number,
553 * then the entire log is stamped with the same cycle number. In this
554 * case, head_blk can't be set to zero (which makes sense). The below
555 * math doesn't work out properly with head_blk equal to zero. Instead,
556 * we set it to log_bbnum which is an invalid block number, but this
557 * value makes the math correct. If head_blk doesn't changed through
558 * all the tests below, *head_blk is set to zero at the very end rather
559 * than log_bbnum. In a sense, log_bbnum and zero are the same block
560 * in a circular file.
561 */
562 if (first_half_cycle == last_half_cycle) {
563 /*
564 * In this case we believe that the entire log should have
565 * cycle number last_half_cycle. We need to scan backwards
566 * from the end verifying that there are no holes still
567 * containing last_half_cycle - 1. If we find such a hole,
568 * then the start of that hole will be the new head. The
569 * simple case looks like
570 * x | x ... | x - 1 | x
571 * Another case that fits this picture would be
572 * x | x + 1 | x ... | x
Nathan Scottc41564b2006-03-29 08:55:14 +1000573 * In this case the head really is somewhere at the end of the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 * log, as one of the latest writes at the beginning was
575 * incomplete.
576 * One more case is
577 * x | x + 1 | x ... | x - 1 | x
578 * This is really the combination of the above two cases, and
579 * the head has to end up at the start of the x-1 hole at the
580 * end of the log.
581 *
582 * In the 256k log case, we will read from the beginning to the
583 * end of the log and search for cycle numbers equal to x-1.
584 * We don't worry about the x+1 blocks that we encounter,
585 * because we know that they cannot be the head since the log
586 * started with x.
587 */
588 head_blk = log_bbnum;
589 stop_on_cycle = last_half_cycle - 1;
590 } else {
591 /*
592 * In this case we want to find the first block with cycle
593 * number matching last_half_cycle. We expect the log to be
594 * some variation on
Alex Elder3f943d82010-04-15 18:17:34 +0000595 * x + 1 ... | x ... | x
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 * The first block with cycle number x (last_half_cycle) will
597 * be where the new head belongs. First we do a binary search
598 * for the first occurrence of last_half_cycle. The binary
599 * search may not be totally accurate, so then we scan back
600 * from there looking for occurrences of last_half_cycle before
601 * us. If that backwards scan wraps around the beginning of
602 * the log, then we look for occurrences of last_half_cycle - 1
603 * at the end of the log. The cases we're looking for look
604 * like
Alex Elder3f943d82010-04-15 18:17:34 +0000605 * v binary search stopped here
606 * x + 1 ... | x | x + 1 | x ... | x
607 * ^ but we want to locate this spot
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 * or
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 * <---------> less than scan distance
Alex Elder3f943d82010-04-15 18:17:34 +0000610 * x + 1 ... | x ... | x - 1 | x
611 * ^ we want to locate this spot
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 */
613 stop_on_cycle = last_half_cycle;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700614 error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
615 last_half_cycle);
616 if (error)
617 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 }
619
620 /*
621 * Now validate the answer. Scan back some number of maximum possible
622 * blocks and make sure each one has the expected cycle number. The
623 * maximum is determined by the total possible amount of buffering
624 * in the in-core log. The following number can be made tighter if
625 * we actually look at the block size of the filesystem.
626 */
Brian Foster9f2a4502017-10-26 09:31:16 -0700627 num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 if (head_blk >= num_scan_bblks) {
629 /*
630 * We are guaranteed that the entire check can be performed
631 * in one buffer.
632 */
633 start_blk = head_blk - num_scan_bblks;
634 if ((error = xlog_find_verify_cycle(log,
635 start_blk, num_scan_bblks,
636 stop_on_cycle, &new_blk)))
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700637 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 if (new_blk != -1)
639 head_blk = new_blk;
640 } else { /* need to read 2 parts of log */
641 /*
642 * We are going to scan backwards in the log in two parts.
643 * First we scan the physical end of the log. In this part
644 * of the log, we are looking for blocks with cycle number
645 * last_half_cycle - 1.
646 * If we find one, then we know that the log starts there, as
647 * we've found a hole that didn't get written in going around
648 * the end of the physical log. The simple case for this is
649 * x + 1 ... | x ... | x - 1 | x
650 * <---------> less than scan distance
651 * If all of the blocks at the end of the log have cycle number
652 * last_half_cycle, then we check the blocks at the start of
653 * the log looking for occurrences of last_half_cycle. If we
654 * find one, then our current estimate for the location of the
655 * first occurrence of last_half_cycle is wrong and we move
656 * back to the hole we've found. This case looks like
657 * x + 1 ... | x | x + 1 | x ...
658 * ^ binary search stopped here
659 * Another case we need to handle that only occurs in 256k
660 * logs is
661 * x + 1 ... | x ... | x+1 | x ...
662 * ^ binary search stops here
663 * In a 256k log, the scan at the end of the log will see the
664 * x + 1 blocks. We need to skip past those since that is
665 * certainly not the head of the log. By searching for
666 * last_half_cycle-1 we accomplish that.
667 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 ASSERT(head_blk <= INT_MAX &&
Alex Elder3f943d82010-04-15 18:17:34 +0000669 (xfs_daddr_t) num_scan_bblks >= head_blk);
670 start_blk = log_bbnum - (num_scan_bblks - head_blk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 if ((error = xlog_find_verify_cycle(log, start_blk,
672 num_scan_bblks - (int)head_blk,
673 (stop_on_cycle - 1), &new_blk)))
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700674 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 if (new_blk != -1) {
676 head_blk = new_blk;
Alex Elder9db127e2010-04-15 18:17:26 +0000677 goto validate_head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 }
679
680 /*
681 * Scan beginning of log now. The last part of the physical
682 * log is good. This scan needs to verify that it doesn't find
683 * the last_half_cycle.
684 */
685 start_blk = 0;
686 ASSERT(head_blk <= INT_MAX);
687 if ((error = xlog_find_verify_cycle(log,
688 start_blk, (int)head_blk,
689 stop_on_cycle, &new_blk)))
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700690 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 if (new_blk != -1)
692 head_blk = new_blk;
693 }
694
Alex Elder9db127e2010-04-15 18:17:26 +0000695validate_head:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 /*
697 * Now we need to make sure head_blk is not pointing to a block in
698 * the middle of a log record.
699 */
700 num_scan_bblks = XLOG_REC_SHIFT(log);
701 if (head_blk >= num_scan_bblks) {
702 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
703
704 /* start ptr at last block ptr before head_blk */
Dave Chinner24513372014-06-25 14:58:08 +1000705 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
706 if (error == 1)
707 error = -EIO;
708 if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700709 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 } else {
711 start_blk = 0;
712 ASSERT(head_blk <= INT_MAX);
Dave Chinner24513372014-06-25 14:58:08 +1000713 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
714 if (error < 0)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700715 goto out_free_buffer;
Dave Chinner24513372014-06-25 14:58:08 +1000716 if (error == 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 /* We hit the beginning of the log during our search */
Alex Elder3f943d82010-04-15 18:17:34 +0000718 start_blk = log_bbnum - (num_scan_bblks - head_blk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 new_blk = log_bbnum;
720 ASSERT(start_blk <= INT_MAX &&
721 (xfs_daddr_t) log_bbnum-start_blk >= 0);
722 ASSERT(head_blk <= INT_MAX);
Dave Chinner24513372014-06-25 14:58:08 +1000723 error = xlog_find_verify_log_record(log, start_blk,
724 &new_blk, (int)head_blk);
725 if (error == 1)
726 error = -EIO;
727 if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700728 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 if (new_blk != log_bbnum)
730 head_blk = new_blk;
731 } else if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700732 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 }
734
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700735 kmem_free(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 if (head_blk == log_bbnum)
737 *return_head_blk = 0;
738 else
739 *return_head_blk = head_blk;
740 /*
741 * When returning here, we have a good block number. Bad block
742 * means that during a previous crash, we didn't have a clean break
743 * from cycle number N to cycle number N-1. In this case, we need
744 * to find the first block with cycle number N-1.
745 */
746 return 0;
747
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700748out_free_buffer:
749 kmem_free(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 if (error)
Dave Chinnera0fa2b62011-03-07 10:01:35 +1100751 xfs_warn(log->l_mp, "failed to find log head");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 return error;
753}
754
755/*
Brian Fostereed6b462016-01-04 15:55:10 +1100756 * Seek backwards in the log for log record headers.
757 *
758 * Given a starting log block, walk backwards until we find the provided number
759 * of records or hit the provided tail block. The return value is the number of
760 * records encountered or a negative error code. The log block and buffer
761 * pointer of the last record seen are returned in rblk and rhead respectively.
762 */
763STATIC int
764xlog_rseek_logrec_hdr(
765 struct xlog *log,
766 xfs_daddr_t head_blk,
767 xfs_daddr_t tail_blk,
768 int count,
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700769 char *buffer,
Brian Fostereed6b462016-01-04 15:55:10 +1100770 xfs_daddr_t *rblk,
771 struct xlog_rec_header **rhead,
772 bool *wrapped)
773{
774 int i;
775 int error;
776 int found = 0;
777 char *offset = NULL;
778 xfs_daddr_t end_blk;
779
780 *wrapped = false;
781
782 /*
783 * Walk backwards from the head block until we hit the tail or the first
784 * block in the log.
785 */
786 end_blk = head_blk > tail_blk ? tail_blk : 0;
787 for (i = (int) head_blk - 1; i >= end_blk; i--) {
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700788 error = xlog_bread(log, i, 1, buffer, &offset);
Brian Fostereed6b462016-01-04 15:55:10 +1100789 if (error)
790 goto out_error;
791
792 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
793 *rblk = i;
794 *rhead = (struct xlog_rec_header *) offset;
795 if (++found == count)
796 break;
797 }
798 }
799
800 /*
801 * If we haven't hit the tail block or the log record header count,
802 * start looking again from the end of the physical log. Note that
803 * callers can pass head == tail if the tail is not yet known.
804 */
805 if (tail_blk >= head_blk && found != count) {
806 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700807 error = xlog_bread(log, i, 1, buffer, &offset);
Brian Fostereed6b462016-01-04 15:55:10 +1100808 if (error)
809 goto out_error;
810
811 if (*(__be32 *)offset ==
812 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
813 *wrapped = true;
814 *rblk = i;
815 *rhead = (struct xlog_rec_header *) offset;
816 if (++found == count)
817 break;
818 }
819 }
820 }
821
822 return found;
823
824out_error:
825 return error;
826}
827
828/*
Brian Foster7088c412016-01-05 07:40:16 +1100829 * Seek forward in the log for log record headers.
830 *
831 * Given head and tail blocks, walk forward from the tail block until we find
832 * the provided number of records or hit the head block. The return value is the
833 * number of records encountered or a negative error code. The log block and
834 * buffer pointer of the last record seen are returned in rblk and rhead
835 * respectively.
836 */
837STATIC int
838xlog_seek_logrec_hdr(
839 struct xlog *log,
840 xfs_daddr_t head_blk,
841 xfs_daddr_t tail_blk,
842 int count,
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700843 char *buffer,
Brian Foster7088c412016-01-05 07:40:16 +1100844 xfs_daddr_t *rblk,
845 struct xlog_rec_header **rhead,
846 bool *wrapped)
847{
848 int i;
849 int error;
850 int found = 0;
851 char *offset = NULL;
852 xfs_daddr_t end_blk;
853
854 *wrapped = false;
855
856 /*
857 * Walk forward from the tail block until we hit the head or the last
858 * block in the log.
859 */
860 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
861 for (i = (int) tail_blk; i <= end_blk; i++) {
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700862 error = xlog_bread(log, i, 1, buffer, &offset);
Brian Foster7088c412016-01-05 07:40:16 +1100863 if (error)
864 goto out_error;
865
866 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
867 *rblk = i;
868 *rhead = (struct xlog_rec_header *) offset;
869 if (++found == count)
870 break;
871 }
872 }
873
874 /*
875 * If we haven't hit the head block or the log record header count,
876 * start looking again from the start of the physical log.
877 */
878 if (tail_blk > head_blk && found != count) {
879 for (i = 0; i < (int) head_blk; i++) {
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700880 error = xlog_bread(log, i, 1, buffer, &offset);
Brian Foster7088c412016-01-05 07:40:16 +1100881 if (error)
882 goto out_error;
883
884 if (*(__be32 *)offset ==
885 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
886 *wrapped = true;
887 *rblk = i;
888 *rhead = (struct xlog_rec_header *) offset;
889 if (++found == count)
890 break;
891 }
892 }
893 }
894
895 return found;
896
897out_error:
898 return error;
899}
900
901/*
Brian Foster4a4f66e2017-08-08 18:21:52 -0700902 * Calculate distance from head to tail (i.e., unused space in the log).
903 */
904static inline int
905xlog_tail_distance(
906 struct xlog *log,
907 xfs_daddr_t head_blk,
908 xfs_daddr_t tail_blk)
909{
910 if (head_blk < tail_blk)
911 return tail_blk - head_blk;
912
913 return tail_blk + (log->l_logBBsize - head_blk);
914}
915
916/*
917 * Verify the log tail. This is particularly important when torn or incomplete
918 * writes have been detected near the front of the log and the head has been
919 * walked back accordingly.
Brian Foster7088c412016-01-05 07:40:16 +1100920 *
Brian Foster4a4f66e2017-08-08 18:21:52 -0700921 * We also have to handle the case where the tail was pinned and the head
922 * blocked behind the tail right before a crash. If the tail had been pushed
923 * immediately prior to the crash and the subsequent checkpoint was only
924 * partially written, it's possible it overwrote the last referenced tail in the
925 * log with garbage. This is not a coherency problem because the tail must have
926 * been pushed before it can be overwritten, but appears as log corruption to
927 * recovery because we have no way to know the tail was updated if the
928 * subsequent checkpoint didn't write successfully.
929 *
930 * Therefore, CRC check the log from tail to head. If a failure occurs and the
931 * offending record is within max iclog bufs from the head, walk the tail
932 * forward and retry until a valid tail is found or corruption is detected out
933 * of the range of a possible overwrite.
Brian Foster7088c412016-01-05 07:40:16 +1100934 */
935STATIC int
936xlog_verify_tail(
937 struct xlog *log,
938 xfs_daddr_t head_blk,
Brian Foster4a4f66e2017-08-08 18:21:52 -0700939 xfs_daddr_t *tail_blk,
940 int hsize)
Brian Foster7088c412016-01-05 07:40:16 +1100941{
942 struct xlog_rec_header *thead;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700943 char *buffer;
Brian Foster7088c412016-01-05 07:40:16 +1100944 xfs_daddr_t first_bad;
Brian Foster7088c412016-01-05 07:40:16 +1100945 int error = 0;
946 bool wrapped;
Brian Foster4a4f66e2017-08-08 18:21:52 -0700947 xfs_daddr_t tmp_tail;
948 xfs_daddr_t orig_tail = *tail_blk;
Brian Foster7088c412016-01-05 07:40:16 +1100949
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700950 buffer = xlog_alloc_buffer(log, 1);
951 if (!buffer)
Brian Foster7088c412016-01-05 07:40:16 +1100952 return -ENOMEM;
953
954 /*
Brian Foster4a4f66e2017-08-08 18:21:52 -0700955 * Make sure the tail points to a record (returns positive count on
956 * success).
Brian Foster7088c412016-01-05 07:40:16 +1100957 */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700958 error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
Brian Foster4a4f66e2017-08-08 18:21:52 -0700959 &tmp_tail, &thead, &wrapped);
960 if (error < 0)
Brian Foster7088c412016-01-05 07:40:16 +1100961 goto out;
Brian Foster4a4f66e2017-08-08 18:21:52 -0700962 if (*tail_blk != tmp_tail)
963 *tail_blk = tmp_tail;
964
965 /*
966 * Run a CRC check from the tail to the head. We can't just check
967 * MAX_ICLOGS records past the tail because the tail may point to stale
968 * blocks cleared during the search for the head/tail. These blocks are
969 * overwritten with zero-length records and thus record count is not a
970 * reliable indicator of the iclog state before a crash.
971 */
972 first_bad = 0;
973 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
974 XLOG_RECOVER_CRCPASS, &first_bad);
Brian Fostera4c9b342017-08-08 18:21:53 -0700975 while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
Brian Foster4a4f66e2017-08-08 18:21:52 -0700976 int tail_distance;
977
978 /*
979 * Is corruption within range of the head? If so, retry from
980 * the next record. Otherwise return an error.
981 */
982 tail_distance = xlog_tail_distance(log, head_blk, first_bad);
983 if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
984 break;
985
986 /* skip to the next record; returns positive count on success */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -0700987 error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
988 buffer, &tmp_tail, &thead, &wrapped);
Brian Foster4a4f66e2017-08-08 18:21:52 -0700989 if (error < 0)
990 goto out;
991
992 *tail_blk = tmp_tail;
993 first_bad = 0;
994 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
995 XLOG_RECOVER_CRCPASS, &first_bad);
Brian Foster7088c412016-01-05 07:40:16 +1100996 }
997
Brian Foster4a4f66e2017-08-08 18:21:52 -0700998 if (!error && *tail_blk != orig_tail)
999 xfs_warn(log->l_mp,
1000 "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1001 orig_tail, *tail_blk);
Brian Foster7088c412016-01-05 07:40:16 +11001002out:
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001003 kmem_free(buffer);
Brian Foster7088c412016-01-05 07:40:16 +11001004 return error;
1005}
1006
1007/*
1008 * Detect and trim torn writes from the head of the log.
1009 *
1010 * Storage without sector atomicity guarantees can result in torn writes in the
1011 * log in the event of a crash. Our only means to detect this scenario is via
1012 * CRC verification. While we can't always be certain that CRC verification
1013 * failure is due to a torn write vs. an unrelated corruption, we do know that
1014 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1015 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1016 * the log and treat failures in this range as torn writes as a matter of
1017 * policy. In the event of CRC failure, the head is walked back to the last good
1018 * record in the log and the tail is updated from that record and verified.
1019 */
1020STATIC int
1021xlog_verify_head(
1022 struct xlog *log,
1023 xfs_daddr_t *head_blk, /* in/out: unverified head */
1024 xfs_daddr_t *tail_blk, /* out: tail block */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001025 char *buffer,
Brian Foster7088c412016-01-05 07:40:16 +11001026 xfs_daddr_t *rhead_blk, /* start blk of last record */
1027 struct xlog_rec_header **rhead, /* ptr to last record */
1028 bool *wrapped) /* last rec. wraps phys. log */
1029{
1030 struct xlog_rec_header *tmp_rhead;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001031 char *tmp_buffer;
Brian Foster7088c412016-01-05 07:40:16 +11001032 xfs_daddr_t first_bad;
1033 xfs_daddr_t tmp_rhead_blk;
1034 int found;
1035 int error;
1036 bool tmp_wrapped;
1037
1038 /*
Brian Foster82ff6cc2016-03-07 08:22:22 +11001039 * Check the head of the log for torn writes. Search backwards from the
1040 * head until we hit the tail or the maximum number of log record I/Os
1041 * that could have been in flight at one time. Use a temporary buffer so
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001042 * we don't trash the rhead/buffer pointers from the caller.
Brian Foster7088c412016-01-05 07:40:16 +11001043 */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001044 tmp_buffer = xlog_alloc_buffer(log, 1);
1045 if (!tmp_buffer)
Brian Foster7088c412016-01-05 07:40:16 +11001046 return -ENOMEM;
1047 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001048 XLOG_MAX_ICLOGS, tmp_buffer,
1049 &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
1050 kmem_free(tmp_buffer);
Brian Foster7088c412016-01-05 07:40:16 +11001051 if (error < 0)
1052 return error;
1053
1054 /*
1055 * Now run a CRC verification pass over the records starting at the
1056 * block found above to the current head. If a CRC failure occurs, the
1057 * log block of the first bad record is saved in first_bad.
1058 */
1059 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1060 XLOG_RECOVER_CRCPASS, &first_bad);
Brian Fostera4c9b342017-08-08 18:21:53 -07001061 if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
Brian Foster7088c412016-01-05 07:40:16 +11001062 /*
1063 * We've hit a potential torn write. Reset the error and warn
1064 * about it.
1065 */
1066 error = 0;
1067 xfs_warn(log->l_mp,
1068"Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1069 first_bad, *head_blk);
1070
1071 /*
1072 * Get the header block and buffer pointer for the last good
1073 * record before the bad record.
1074 *
1075 * Note that xlog_find_tail() clears the blocks at the new head
1076 * (i.e., the records with invalid CRC) if the cycle number
Randy Dunlapb63da6c2020-08-05 08:49:58 -07001077 * matches the current cycle.
Brian Foster7088c412016-01-05 07:40:16 +11001078 */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001079 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
1080 buffer, rhead_blk, rhead, wrapped);
Brian Foster7088c412016-01-05 07:40:16 +11001081 if (found < 0)
1082 return found;
1083 if (found == 0) /* XXX: right thing to do here? */
1084 return -EIO;
1085
1086 /*
1087 * Reset the head block to the starting block of the first bad
1088 * log record and set the tail block based on the last good
1089 * record.
1090 *
1091 * Bail out if the updated head/tail match as this indicates
1092 * possible corruption outside of the acceptable
1093 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1094 */
1095 *head_blk = first_bad;
1096 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1097 if (*head_blk == *tail_blk) {
1098 ASSERT(0);
1099 return 0;
1100 }
Brian Foster7088c412016-01-05 07:40:16 +11001101 }
Brian Foster5297ac12017-08-08 18:21:51 -07001102 if (error)
1103 return error;
Brian Foster7088c412016-01-05 07:40:16 +11001104
Brian Foster4a4f66e2017-08-08 18:21:52 -07001105 return xlog_verify_tail(log, *head_blk, tail_blk,
1106 be32_to_cpu((*rhead)->h_size));
Brian Foster7088c412016-01-05 07:40:16 +11001107}
1108
1109/*
Dave Chinner0703a8e2018-06-08 09:54:22 -07001110 * We need to make sure we handle log wrapping properly, so we can't use the
1111 * calculated logbno directly. Make sure it wraps to the correct bno inside the
1112 * log.
1113 *
1114 * The log is limited to 32 bit sizes, so we use the appropriate modulus
1115 * operation here and cast it back to a 64 bit daddr on return.
1116 */
1117static inline xfs_daddr_t
1118xlog_wrap_logbno(
1119 struct xlog *log,
1120 xfs_daddr_t bno)
1121{
1122 int mod;
1123
1124 div_s64_rem(bno, log->l_logBBsize, &mod);
1125 return mod;
1126}
1127
1128/*
Brian Foster65b99a02016-03-07 08:22:22 +11001129 * Check whether the head of the log points to an unmount record. In other
1130 * words, determine whether the log is clean. If so, update the in-core state
1131 * appropriately.
1132 */
1133static int
1134xlog_check_unmount_rec(
1135 struct xlog *log,
1136 xfs_daddr_t *head_blk,
1137 xfs_daddr_t *tail_blk,
1138 struct xlog_rec_header *rhead,
1139 xfs_daddr_t rhead_blk,
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001140 char *buffer,
Brian Foster65b99a02016-03-07 08:22:22 +11001141 bool *clean)
1142{
1143 struct xlog_op_header *op_head;
1144 xfs_daddr_t umount_data_blk;
1145 xfs_daddr_t after_umount_blk;
1146 int hblks;
1147 int error;
1148 char *offset;
1149
1150 *clean = false;
1151
1152 /*
1153 * Look for unmount record. If we find it, then we know there was a
1154 * clean unmount. Since 'i' could be the last block in the physical
1155 * log, we convert to a log block before comparing to the head_blk.
1156 *
1157 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1158 * below. We won't want to clear the unmount record if there is one, so
1159 * we pass the lsn of the unmount record rather than the block after it.
1160 */
1161 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1162 int h_size = be32_to_cpu(rhead->h_size);
1163 int h_version = be32_to_cpu(rhead->h_version);
1164
1165 if ((h_version & XLOG_VERSION_2) &&
1166 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1167 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1168 if (h_size % XLOG_HEADER_CYCLE_SIZE)
1169 hblks++;
1170 } else {
1171 hblks = 1;
1172 }
1173 } else {
1174 hblks = 1;
1175 }
Dave Chinner0703a8e2018-06-08 09:54:22 -07001176
1177 after_umount_blk = xlog_wrap_logbno(log,
1178 rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1179
Brian Foster65b99a02016-03-07 08:22:22 +11001180 if (*head_blk == after_umount_blk &&
1181 be32_to_cpu(rhead->h_num_logops) == 1) {
Dave Chinner0703a8e2018-06-08 09:54:22 -07001182 umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001183 error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
Brian Foster65b99a02016-03-07 08:22:22 +11001184 if (error)
1185 return error;
1186
1187 op_head = (struct xlog_op_header *)offset;
1188 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1189 /*
1190 * Set tail and last sync so that newly written log
1191 * records will point recovery to after the current
1192 * unmount record.
1193 */
1194 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1195 log->l_curr_cycle, after_umount_blk);
1196 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1197 log->l_curr_cycle, after_umount_blk);
1198 *tail_blk = after_umount_blk;
1199
1200 *clean = true;
1201 }
1202 }
1203
1204 return 0;
1205}
1206
Brian Foster717bc0e2016-03-07 08:22:22 +11001207static void
1208xlog_set_state(
1209 struct xlog *log,
1210 xfs_daddr_t head_blk,
1211 struct xlog_rec_header *rhead,
1212 xfs_daddr_t rhead_blk,
1213 bool bump_cycle)
1214{
1215 /*
1216 * Reset log values according to the state of the log when we
1217 * crashed. In the case where head_blk == 0, we bump curr_cycle
1218 * one because the next write starts a new cycle rather than
1219 * continuing the cycle of the last good log record. At this
1220 * point we have guaranteed that all partial log records have been
1221 * accounted for. Therefore, we know that the last good log record
1222 * written was complete and ended exactly on the end boundary
1223 * of the physical log.
1224 */
1225 log->l_prev_block = rhead_blk;
1226 log->l_curr_block = (int)head_blk;
1227 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1228 if (bump_cycle)
1229 log->l_curr_cycle++;
1230 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1231 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1232 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1233 BBTOB(log->l_curr_block));
1234 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1235 BBTOB(log->l_curr_block));
1236}
1237
Brian Foster65b99a02016-03-07 08:22:22 +11001238/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 * Find the sync block number or the tail of the log.
1240 *
1241 * This will be the block number of the last record to have its
1242 * associated buffers synced to disk. Every log record header has
1243 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
1244 * to get a sync block number. The only concern is to figure out which
1245 * log record header to believe.
1246 *
1247 * The following algorithm uses the log record header with the largest
1248 * lsn. The entire log record does not need to be valid. We only care
1249 * that the header is valid.
1250 *
1251 * We could speed up search by using current head_blk buffer, but it is not
1252 * available.
1253 */
Eric Sandeen5d77c0d2009-11-19 15:52:00 +00001254STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255xlog_find_tail(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05001256 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 xfs_daddr_t *head_blk,
Eric Sandeen65be6052006-01-11 15:34:19 +11001258 xfs_daddr_t *tail_blk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259{
1260 xlog_rec_header_t *rhead;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10001261 char *offset = NULL;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001262 char *buffer;
Brian Foster7088c412016-01-05 07:40:16 +11001263 int error;
Brian Foster7088c412016-01-05 07:40:16 +11001264 xfs_daddr_t rhead_blk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 xfs_lsn_t tail_lsn;
Brian Fostereed6b462016-01-04 15:55:10 +11001266 bool wrapped = false;
Brian Foster65b99a02016-03-07 08:22:22 +11001267 bool clean = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268
1269 /*
1270 * Find previous log record
1271 */
1272 if ((error = xlog_find_head(log, head_blk)))
1273 return error;
Brian Foster82ff6cc2016-03-07 08:22:22 +11001274 ASSERT(*head_blk < INT_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001276 buffer = xlog_alloc_buffer(log, 1);
1277 if (!buffer)
Dave Chinner24513372014-06-25 14:58:08 +10001278 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 if (*head_blk == 0) { /* special case */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001280 error = xlog_bread(log, 0, 1, buffer, &offset);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001281 if (error)
Alex Elder9db127e2010-04-15 18:17:26 +00001282 goto done;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001283
Christoph Hellwig03bea6f2007-10-12 10:58:05 +10001284 if (xlog_get_cycle(offset) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 *tail_blk = 0;
1286 /* leave all other log inited values alone */
Alex Elder9db127e2010-04-15 18:17:26 +00001287 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 }
1289 }
1290
1291 /*
Brian Foster82ff6cc2016-03-07 08:22:22 +11001292 * Search backwards through the log looking for the log record header
1293 * block. This wraps all the way back around to the head so something is
1294 * seriously wrong if we can't find it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001296 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
Brian Foster82ff6cc2016-03-07 08:22:22 +11001297 &rhead_blk, &rhead, &wrapped);
1298 if (error < 0)
Darrick J. Wong050552c2019-11-14 12:51:34 -08001299 goto done;
Brian Foster82ff6cc2016-03-07 08:22:22 +11001300 if (!error) {
1301 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
Darrick J. Wong050552c2019-11-14 12:51:34 -08001302 error = -EFSCORRUPTED;
1303 goto done;
Brian Foster82ff6cc2016-03-07 08:22:22 +11001304 }
1305 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1306
1307 /*
Brian Foster717bc0e2016-03-07 08:22:22 +11001308 * Set the log state based on the current head record.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 */
Brian Foster717bc0e2016-03-07 08:22:22 +11001310 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
Brian Foster65b99a02016-03-07 08:22:22 +11001311 tail_lsn = atomic64_read(&log->l_tail_lsn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312
1313 /*
Brian Foster65b99a02016-03-07 08:22:22 +11001314 * Look for an unmount record at the head of the log. This sets the log
1315 * state to determine whether recovery is necessary.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 */
Brian Foster65b99a02016-03-07 08:22:22 +11001317 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001318 rhead_blk, buffer, &clean);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 if (error)
1320 goto done;
1321
1322 /*
Brian Foster7f6aff3a2016-03-07 08:22:22 +11001323 * Verify the log head if the log is not clean (e.g., we have anything
1324 * but an unmount record at the head). This uses CRC verification to
1325 * detect and trim torn writes. If discovered, CRC failures are
1326 * considered torn writes and the log head is trimmed accordingly.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 *
Brian Foster7f6aff3a2016-03-07 08:22:22 +11001328 * Note that we can only run CRC verification when the log is dirty
1329 * because there's no guarantee that the log data behind an unmount
1330 * record is compatible with the current architecture.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 */
Brian Foster7f6aff3a2016-03-07 08:22:22 +11001332 if (!clean) {
1333 xfs_daddr_t orig_head = *head_blk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001335 error = xlog_verify_head(log, head_blk, tail_blk, buffer,
Brian Foster7f6aff3a2016-03-07 08:22:22 +11001336 &rhead_blk, &rhead, &wrapped);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001337 if (error)
Alex Elder9db127e2010-04-15 18:17:26 +00001338 goto done;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001339
Brian Foster7f6aff3a2016-03-07 08:22:22 +11001340 /* update in-core state again if the head changed */
1341 if (*head_blk != orig_head) {
1342 xlog_set_state(log, *head_blk, rhead, rhead_blk,
1343 wrapped);
1344 tail_lsn = atomic64_read(&log->l_tail_lsn);
1345 error = xlog_check_unmount_rec(log, head_blk, tail_blk,
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001346 rhead, rhead_blk, buffer,
Brian Foster7f6aff3a2016-03-07 08:22:22 +11001347 &clean);
1348 if (error)
1349 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 }
1351 }
1352
1353 /*
Brian Foster65b99a02016-03-07 08:22:22 +11001354 * Note that the unmount was clean. If the unmount was not clean, we
1355 * need to know this to rebuild the superblock counters from the perag
1356 * headers if we have a filesystem using non-persistent counters.
1357 */
1358 if (clean)
1359 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
1361 /*
1362 * Make sure that there are no blocks in front of the head
1363 * with the same cycle number as the head. This can happen
1364 * because we allow multiple outstanding log writes concurrently,
1365 * and the later writes might make it out before earlier ones.
1366 *
1367 * We use the lsn from before modifying it so that we'll never
1368 * overwrite the unmount record after a clean unmount.
1369 *
1370 * Do this only if we are going to recover the filesystem
1371 *
1372 * NOTE: This used to say "if (!readonly)"
1373 * However on Linux, we can & do recover a read-only filesystem.
1374 * We only skip recovery if NORECOVERY is specified on mount,
1375 * in which case we would not be here.
1376 *
1377 * But... if the -device- itself is readonly, just skip this.
1378 * We can't recover this device anyway, so it won't matter.
1379 */
Christoph Hellwig2d15d2c2019-06-28 19:27:24 -07001380 if (!xfs_readonly_buftarg(log->l_targ))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 error = xlog_clear_stale_blocks(log, tail_lsn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382
Alex Elder9db127e2010-04-15 18:17:26 +00001383done:
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001384 kmem_free(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385
1386 if (error)
Dave Chinnera0fa2b62011-03-07 10:01:35 +11001387 xfs_warn(log->l_mp, "failed to locate log tail");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 return error;
1389}
1390
1391/*
1392 * Is the log zeroed at all?
1393 *
1394 * The last binary search should be changed to perform an X block read
1395 * once X becomes small enough. You can then search linearly through
1396 * the X blocks. This will cut down on the number of reads we need to do.
1397 *
1398 * If the log is partially zeroed, this routine will pass back the blkno
1399 * of the first block with cycle number 0. It won't have a complete LR
1400 * preceding it.
1401 *
1402 * Return:
1403 * 0 => the log is completely written to
Dave Chinner24513372014-06-25 14:58:08 +10001404 * 1 => use *blk_no as the first block of the log
1405 * <0 => error has occurred
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 */
David Chinnera8272ce2007-11-23 16:28:09 +11001407STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408xlog_find_zeroed(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05001409 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 xfs_daddr_t *blk_no)
1411{
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001412 char *buffer;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10001413 char *offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 uint first_cycle, last_cycle;
1415 xfs_daddr_t new_blk, last_blk, start_blk;
1416 xfs_daddr_t num_scan_bblks;
1417 int error, log_bbnum = log->l_logBBsize;
1418
Nathan Scott6fdf8cc2006-06-28 10:13:52 +10001419 *blk_no = 0;
1420
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 /* check totally zeroed log */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001422 buffer = xlog_alloc_buffer(log, 1);
1423 if (!buffer)
Dave Chinner24513372014-06-25 14:58:08 +10001424 return -ENOMEM;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001425 error = xlog_bread(log, 0, 1, buffer, &offset);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001426 if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001427 goto out_free_buffer;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001428
Christoph Hellwig03bea6f2007-10-12 10:58:05 +10001429 first_cycle = xlog_get_cycle(offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 if (first_cycle == 0) { /* completely zeroed log */
1431 *blk_no = 0;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001432 kmem_free(buffer);
Dave Chinner24513372014-06-25 14:58:08 +10001433 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 }
1435
1436 /* check partially zeroed log */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001437 error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001438 if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001439 goto out_free_buffer;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001440
Christoph Hellwig03bea6f2007-10-12 10:58:05 +10001441 last_cycle = xlog_get_cycle(offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 if (last_cycle != 0) { /* log completely written to */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001443 kmem_free(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 }
1446
1447 /* we have a partially zeroed log */
1448 last_blk = log_bbnum-1;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001449 error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1450 if (error)
1451 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452
1453 /*
1454 * Validate the answer. Because there is no way to guarantee that
1455 * the entire log is made up of log records which are the same size,
1456 * we scan over the defined maximum blocks. At this point, the maximum
1457 * is not chosen to mean anything special. XXXmiken
1458 */
1459 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1460 ASSERT(num_scan_bblks <= INT_MAX);
1461
1462 if (last_blk < num_scan_bblks)
1463 num_scan_bblks = last_blk;
1464 start_blk = last_blk - num_scan_bblks;
1465
1466 /*
1467 * We search for any instances of cycle number 0 that occur before
1468 * our current estimate of the head. What we're trying to detect is
1469 * 1 ... | 0 | 1 | 0...
1470 * ^ binary search ends here
1471 */
1472 if ((error = xlog_find_verify_cycle(log, start_blk,
1473 (int)num_scan_bblks, 0, &new_blk)))
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001474 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 if (new_blk != -1)
1476 last_blk = new_blk;
1477
1478 /*
1479 * Potentially backup over partial log record write. We don't need
1480 * to search the end of the log because we know it is zero.
1481 */
Dave Chinner24513372014-06-25 14:58:08 +10001482 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1483 if (error == 1)
1484 error = -EIO;
1485 if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001486 goto out_free_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487
1488 *blk_no = last_blk;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001489out_free_buffer:
1490 kmem_free(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 if (error)
1492 return error;
Dave Chinner24513372014-06-25 14:58:08 +10001493 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494}
1495
1496/*
1497 * These are simple subroutines used by xlog_clear_stale_blocks() below
1498 * to initialize a buffer full of empty log record headers and write
1499 * them into the log.
1500 */
1501STATIC void
1502xlog_add_record(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05001503 struct xlog *log,
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10001504 char *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 int cycle,
1506 int block,
1507 int tail_cycle,
1508 int tail_block)
1509{
1510 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1511
1512 memset(buf, 0, BBSIZE);
Christoph Hellwigb53e6752007-10-12 10:59:34 +10001513 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1514 recp->h_cycle = cpu_to_be32(cycle);
1515 recp->h_version = cpu_to_be32(
Eric Sandeen62118702008-03-06 13:44:28 +11001516 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
Christoph Hellwigb53e6752007-10-12 10:59:34 +10001517 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1518 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1519 recp->h_fmt = cpu_to_be32(XLOG_FMT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1521}
1522
1523STATIC int
1524xlog_write_log_records(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05001525 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 int cycle,
1527 int start_block,
1528 int blocks,
1529 int tail_cycle,
1530 int tail_block)
1531{
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10001532 char *offset;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001533 char *buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 int balign, ealign;
Alex Elder69ce58f2010-04-20 17:09:59 +10001535 int sectbb = log->l_sectBBsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 int end_block = start_block + blocks;
1537 int bufblks;
1538 int error = 0;
1539 int i, j = 0;
1540
Alex Elder6881a222010-04-13 15:22:29 +10001541 /*
1542 * Greedily allocate a buffer big enough to handle the full
1543 * range of basic blocks to be written. If that fails, try
1544 * a smaller size. We need to be able to write at least a
1545 * log sector, or we're out of luck.
1546 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 bufblks = 1 << ffs(blocks);
Dave Chinner81158e02012-04-27 19:45:22 +10001548 while (bufblks > log->l_logBBsize)
1549 bufblks >>= 1;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001550 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 bufblks >>= 1;
Alex Elder69ce58f2010-04-20 17:09:59 +10001552 if (bufblks < sectbb)
Dave Chinner24513372014-06-25 14:58:08 +10001553 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 }
1555
1556 /* We may need to do a read at the start to fill in part of
1557 * the buffer in the starting sector not covered by the first
1558 * write below.
1559 */
Alex Elder5c17f532010-04-13 15:22:48 +10001560 balign = round_down(start_block, sectbb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 if (balign != start_block) {
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001562 error = xlog_bread_noalign(log, start_block, 1, buffer);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001563 if (error)
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001564 goto out_free_buffer;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001565
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 j = start_block - balign;
1567 }
1568
1569 for (i = start_block; i < end_block; i += bufblks) {
1570 int bcount, endcount;
1571
1572 bcount = min(bufblks, end_block - start_block);
1573 endcount = bcount - j;
1574
1575 /* We may need to do a read at the end to fill in part of
1576 * the buffer in the final sector not covered by the write.
1577 * If this is the same sector as the above read, skip it.
1578 */
Alex Elder5c17f532010-04-13 15:22:48 +10001579 ealign = round_down(end_block, sectbb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 if (j == 0 && (start_block + endcount > ealign)) {
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07001581 error = xlog_bread_noalign(log, ealign, sectbb,
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001582 buffer + BBTOB(ealign - start_block));
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001583 if (error)
1584 break;
1585
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 }
1587
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001588 offset = buffer + xlog_align(log, start_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 for (; j < endcount; j++) {
1590 xlog_add_record(log, offset, cycle, i+j,
1591 tail_cycle, tail_block);
1592 offset += BBSIZE;
1593 }
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001594 error = xlog_bwrite(log, start_block, endcount, buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 if (error)
1596 break;
1597 start_block += endcount;
1598 j = 0;
1599 }
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01001600
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07001601out_free_buffer:
1602 kmem_free(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 return error;
1604}
1605
1606/*
1607 * This routine is called to blow away any incomplete log writes out
1608 * in front of the log head. We do this so that we won't become confused
1609 * if we come up, write only a little bit more, and then crash again.
1610 * If we leave the partial log records out there, this situation could
1611 * cause us to think those partial writes are valid blocks since they
1612 * have the current cycle number. We get rid of them by overwriting them
1613 * with empty log records with the old cycle number rather than the
1614 * current one.
1615 *
1616 * The tail lsn is passed in rather than taken from
1617 * the log so that we will not write over the unmount record after a
1618 * clean unmount in a 512 block log. Doing so would leave the log without
1619 * any valid log records in it until a new one was written. If we crashed
1620 * during that time we would not be able to recover.
1621 */
1622STATIC int
1623xlog_clear_stale_blocks(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05001624 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 xfs_lsn_t tail_lsn)
1626{
1627 int tail_cycle, head_cycle;
1628 int tail_block, head_block;
1629 int tail_distance, max_distance;
1630 int distance;
1631 int error;
1632
1633 tail_cycle = CYCLE_LSN(tail_lsn);
1634 tail_block = BLOCK_LSN(tail_lsn);
1635 head_cycle = log->l_curr_cycle;
1636 head_block = log->l_curr_block;
1637
1638 /*
1639 * Figure out the distance between the new head of the log
1640 * and the tail. We want to write over any blocks beyond the
1641 * head that we may have written just before the crash, but
1642 * we don't want to overwrite the tail of the log.
1643 */
1644 if (head_cycle == tail_cycle) {
1645 /*
1646 * The tail is behind the head in the physical log,
1647 * so the distance from the head to the tail is the
1648 * distance from the head to the end of the log plus
1649 * the distance from the beginning of the log to the
1650 * tail.
1651 */
Darrick J. Wonga71895c2019-11-11 12:53:22 -08001652 if (XFS_IS_CORRUPT(log->l_mp,
1653 head_block < tail_block ||
1654 head_block >= log->l_logBBsize))
Dave Chinner24513372014-06-25 14:58:08 +10001655 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 tail_distance = tail_block + (log->l_logBBsize - head_block);
1657 } else {
1658 /*
1659 * The head is behind the tail in the physical log,
1660 * so the distance from the head to the tail is just
1661 * the tail block minus the head block.
1662 */
Darrick J. Wonga71895c2019-11-11 12:53:22 -08001663 if (XFS_IS_CORRUPT(log->l_mp,
1664 head_block >= tail_block ||
1665 head_cycle != tail_cycle + 1))
Dave Chinner24513372014-06-25 14:58:08 +10001666 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 tail_distance = tail_block - head_block;
1668 }
1669
1670 /*
1671 * If the head is right up against the tail, we can't clear
1672 * anything.
1673 */
1674 if (tail_distance <= 0) {
1675 ASSERT(tail_distance == 0);
1676 return 0;
1677 }
1678
1679 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1680 /*
1681 * Take the smaller of the maximum amount of outstanding I/O
1682 * we could have and the distance to the tail to clear out.
1683 * We take the smaller so that we don't overwrite the tail and
1684 * we don't waste all day writing from the head to the tail
1685 * for no reason.
1686 */
Dave Chinner9bb54cb2018-06-07 07:54:02 -07001687 max_distance = min(max_distance, tail_distance);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688
1689 if ((head_block + max_distance) <= log->l_logBBsize) {
1690 /*
1691 * We can stomp all the blocks we need to without
1692 * wrapping around the end of the log. Just do it
1693 * in a single write. Use the cycle number of the
1694 * current cycle minus one so that the log will look like:
1695 * n ... | n - 1 ...
1696 */
1697 error = xlog_write_log_records(log, (head_cycle - 1),
1698 head_block, max_distance, tail_cycle,
1699 tail_block);
1700 if (error)
1701 return error;
1702 } else {
1703 /*
1704 * We need to wrap around the end of the physical log in
1705 * order to clear all the blocks. Do it in two separate
1706 * I/Os. The first write should be from the head to the
1707 * end of the physical log, and it should use the current
1708 * cycle number minus one just like above.
1709 */
1710 distance = log->l_logBBsize - head_block;
1711 error = xlog_write_log_records(log, (head_cycle - 1),
1712 head_block, distance, tail_cycle,
1713 tail_block);
1714
1715 if (error)
1716 return error;
1717
1718 /*
1719 * Now write the blocks at the start of the physical log.
1720 * This writes the remainder of the blocks we want to clear.
1721 * It uses the current cycle number since we're now on the
1722 * same cycle as the head so that we get:
1723 * n ... n ... | n - 1 ...
1724 * ^^^^^ blocks we're writing
1725 */
1726 distance = max_distance - (log->l_logBBsize - head_block);
1727 error = xlog_write_log_records(log, head_cycle, 0, distance,
1728 tail_cycle, tail_block);
1729 if (error)
1730 return error;
1731 }
1732
1733 return 0;
1734}
1735
Darrick J. Wong154c7332020-05-01 16:00:54 -07001736/*
1737 * Release the recovered intent item in the AIL that matches the given intent
1738 * type and intent id.
1739 */
1740void
1741xlog_recover_release_intent(
1742 struct xlog *log,
1743 unsigned short intent_type,
1744 uint64_t intent_id)
1745{
1746 struct xfs_ail_cursor cur;
1747 struct xfs_log_item *lip;
1748 struct xfs_ail *ailp = log->l_ailp;
1749
1750 spin_lock(&ailp->ail_lock);
1751 for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip != NULL;
1752 lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
1753 if (lip->li_type != intent_type)
1754 continue;
1755 if (!lip->li_ops->iop_match(lip, intent_id))
1756 continue;
1757
1758 spin_unlock(&ailp->ail_lock);
1759 lip->li_ops->iop_release(lip);
1760 spin_lock(&ailp->ail_lock);
1761 break;
1762 }
1763
1764 xfs_trans_ail_cursor_done(&cur);
1765 spin_unlock(&ailp->ail_lock);
1766}
1767
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768/******************************************************************************
1769 *
1770 * Log recover routines
1771 *
1772 ******************************************************************************
1773 */
Darrick J. Wong86ffa472020-05-01 16:00:45 -07001774static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = {
1775 &xlog_buf_item_ops,
1776 &xlog_inode_item_ops,
1777 &xlog_dquot_item_ops,
1778 &xlog_quotaoff_item_ops,
1779 &xlog_icreate_item_ops,
1780 &xlog_efi_item_ops,
1781 &xlog_efd_item_ops,
1782 &xlog_rui_item_ops,
1783 &xlog_rud_item_ops,
1784 &xlog_cui_item_ops,
1785 &xlog_cud_item_ops,
1786 &xlog_bui_item_ops,
1787 &xlog_bud_item_ops,
1788};
1789
1790static const struct xlog_recover_item_ops *
1791xlog_find_item_ops(
1792 struct xlog_recover_item *item)
1793{
1794 unsigned int i;
1795
1796 for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++)
1797 if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type)
1798 return xlog_recover_item_ops[i];
1799
1800 return NULL;
1801}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802
Dave Chinnerf0a76952010-01-11 11:49:57 +00001803/*
Dave Chinnera775ad72013-06-05 12:09:07 +10001804 * Sort the log items in the transaction.
1805 *
1806 * The ordering constraints are defined by the inode allocation and unlink
1807 * behaviour. The rules are:
1808 *
1809 * 1. Every item is only logged once in a given transaction. Hence it
1810 * represents the last logged state of the item. Hence ordering is
1811 * dependent on the order in which operations need to be performed so
1812 * required initial conditions are always met.
1813 *
1814 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1815 * there's nothing to replay from them so we can simply cull them
1816 * from the transaction. However, we can't do that until after we've
1817 * replayed all the other items because they may be dependent on the
1818 * cancelled buffer and replaying the cancelled buffer can remove it
1819 * form the cancelled buffer table. Hence they have tobe done last.
1820 *
1821 * 3. Inode allocation buffers must be replayed before inode items that
Dave Chinner28c8e412013-06-27 16:04:55 +10001822 * read the buffer and replay changes into it. For filesystems using the
1823 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1824 * treated the same as inode allocation buffers as they create and
1825 * initialise the buffers directly.
Dave Chinnera775ad72013-06-05 12:09:07 +10001826 *
1827 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1828 * This ensures that inodes are completely flushed to the inode buffer
1829 * in a "free" state before we remove the unlinked inode list pointer.
1830 *
1831 * Hence the ordering needs to be inode allocation buffers first, inode items
1832 * second, inode unlink buffers third and cancelled buffers last.
1833 *
1834 * But there's a problem with that - we can't tell an inode allocation buffer
1835 * apart from a regular buffer, so we can't separate them. We can, however,
1836 * tell an inode unlink buffer from the others, and so we can separate them out
1837 * from all the other buffers and move them to last.
1838 *
1839 * Hence, 4 lists, in order from head to tail:
Dave Chinner28c8e412013-06-27 16:04:55 +10001840 * - buffer_list for all buffers except cancelled/inode unlink buffers
1841 * - item_list for all non-buffer items
1842 * - inode_buffer_list for inode unlink buffers
1843 * - cancel_list for the cancelled buffers
1844 *
1845 * Note that we add objects to the tail of the lists so that first-to-last
1846 * ordering is preserved within the lists. Adding objects to the head of the
1847 * list means when we traverse from the head we walk them in last-to-first
1848 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1849 * but for all other items there may be specific ordering that we need to
1850 * preserve.
Dave Chinnerf0a76952010-01-11 11:49:57 +00001851 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852STATIC int
1853xlog_recover_reorder_trans(
Mark Tinguelyad223e62012-06-14 09:22:15 -05001854 struct xlog *log,
1855 struct xlog_recover *trans,
Dave Chinner9abbc532010-04-13 15:06:46 +10001856 int pass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857{
Darrick J. Wong35f45212020-04-30 10:45:41 -07001858 struct xlog_recover_item *item, *n;
Mark Tinguely2a841082013-10-02 07:51:12 -05001859 int error = 0;
Dave Chinnerf0a76952010-01-11 11:49:57 +00001860 LIST_HEAD(sort_list);
Dave Chinnera775ad72013-06-05 12:09:07 +10001861 LIST_HEAD(cancel_list);
1862 LIST_HEAD(buffer_list);
1863 LIST_HEAD(inode_buffer_list);
Christoph Hellwig5ce70b72020-04-27 11:14:59 -07001864 LIST_HEAD(item_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865
Dave Chinnerf0a76952010-01-11 11:49:57 +00001866 list_splice_init(&trans->r_itemq, &sort_list);
1867 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
Darrick J. Wong86ffa472020-05-01 16:00:45 -07001868 enum xlog_recover_reorder fate = XLOG_REORDER_ITEM_LIST;
Dave Chinnerf0a76952010-01-11 11:49:57 +00001869
Darrick J. Wong86ffa472020-05-01 16:00:45 -07001870 item->ri_ops = xlog_find_item_ops(item);
1871 if (!item->ri_ops) {
Dave Chinnera0fa2b62011-03-07 10:01:35 +11001872 xfs_warn(log->l_mp,
Darrick J. Wong0d2d35a2020-04-21 14:16:52 -07001873 "%s: unrecognized type of log operation (%d)",
1874 __func__, ITEM_TYPE(item));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 ASSERT(0);
Mark Tinguely2a841082013-10-02 07:51:12 -05001876 /*
1877 * return the remaining items back to the transaction
1878 * item list so they can be freed in caller.
1879 */
1880 if (!list_empty(&sort_list))
1881 list_splice_init(&sort_list, &trans->r_itemq);
Darrick J. Wong86ffa472020-05-01 16:00:45 -07001882 error = -EFSCORRUPTED;
1883 break;
1884 }
1885
1886 if (item->ri_ops->reorder)
1887 fate = item->ri_ops->reorder(item);
1888
1889 switch (fate) {
1890 case XLOG_REORDER_BUFFER_LIST:
1891 list_move_tail(&item->ri_list, &buffer_list);
1892 break;
1893 case XLOG_REORDER_CANCEL_LIST:
1894 trace_xfs_log_recover_item_reorder_head(log,
1895 trans, item, pass);
1896 list_move(&item->ri_list, &cancel_list);
1897 break;
1898 case XLOG_REORDER_INODE_BUFFER_LIST:
1899 list_move(&item->ri_list, &inode_buffer_list);
1900 break;
1901 case XLOG_REORDER_ITEM_LIST:
1902 trace_xfs_log_recover_item_reorder_tail(log,
1903 trans, item, pass);
1904 list_move_tail(&item->ri_list, &item_list);
1905 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 }
Dave Chinnerf0a76952010-01-11 11:49:57 +00001907 }
Darrick J. Wong86ffa472020-05-01 16:00:45 -07001908
Dave Chinnerf0a76952010-01-11 11:49:57 +00001909 ASSERT(list_empty(&sort_list));
Dave Chinnera775ad72013-06-05 12:09:07 +10001910 if (!list_empty(&buffer_list))
1911 list_splice(&buffer_list, &trans->r_itemq);
Christoph Hellwig5ce70b72020-04-27 11:14:59 -07001912 if (!list_empty(&item_list))
1913 list_splice_tail(&item_list, &trans->r_itemq);
Dave Chinnera775ad72013-06-05 12:09:07 +10001914 if (!list_empty(&inode_buffer_list))
1915 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1916 if (!list_empty(&cancel_list))
1917 list_splice_tail(&cancel_list, &trans->r_itemq);
Mark Tinguely2a841082013-10-02 07:51:12 -05001918 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919}
1920
Darrick J. Wong8ea56822020-05-01 16:00:46 -07001921void
Christoph Hellwig7d4894b2020-04-27 18:23:17 -07001922xlog_buf_readahead(
1923 struct xlog *log,
1924 xfs_daddr_t blkno,
1925 uint len,
1926 const struct xfs_buf_ops *ops)
1927{
1928 if (!xlog_is_buffer_cancelled(log, blkno, len))
1929 xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
1930}
1931
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932STATIC int
Zhi Yong Wu00574da2013-08-14 15:16:03 +08001933xlog_recover_items_pass2(
1934 struct xlog *log,
1935 struct xlog_recover *trans,
1936 struct list_head *buffer_list,
1937 struct list_head *item_list)
1938{
1939 struct xlog_recover_item *item;
1940 int error = 0;
1941
1942 list_for_each_entry(item, item_list, ri_list) {
Darrick J. Wong2565a112020-05-01 16:00:50 -07001943 trace_xfs_log_recover_item_recover(log, trans, item,
1944 XLOG_RECOVER_PASS2);
1945
1946 if (item->ri_ops->commit_pass2)
1947 error = item->ri_ops->commit_pass2(log, buffer_list,
1948 item, trans->r_lsn);
Zhi Yong Wu00574da2013-08-14 15:16:03 +08001949 if (error)
1950 return error;
1951 }
1952
1953 return error;
1954}
1955
Christoph Hellwigd0450942010-12-01 22:06:23 +00001956/*
1957 * Perform the transaction.
1958 *
1959 * If the transaction modifies a buffer or inode, do it now. Otherwise,
1960 * EFIs and EFDs get queued up by adding entries into the AIL for them.
1961 */
1962STATIC int
1963xlog_recover_commit_trans(
Mark Tinguelyad223e62012-06-14 09:22:15 -05001964 struct xlog *log,
Christoph Hellwigd0450942010-12-01 22:06:23 +00001965 struct xlog_recover *trans,
Brian Foster12818d22016-09-26 08:22:16 +10001966 int pass,
1967 struct list_head *buffer_list)
Christoph Hellwigd0450942010-12-01 22:06:23 +00001968{
Zhi Yong Wu00574da2013-08-14 15:16:03 +08001969 int error = 0;
Zhi Yong Wu00574da2013-08-14 15:16:03 +08001970 int items_queued = 0;
1971 struct xlog_recover_item *item;
1972 struct xlog_recover_item *next;
Zhi Yong Wu00574da2013-08-14 15:16:03 +08001973 LIST_HEAD (ra_list);
1974 LIST_HEAD (done_list);
1975
1976 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
Brian Foster39775432017-06-24 10:11:41 -07001978 hlist_del_init(&trans->r_list);
Christoph Hellwigd0450942010-12-01 22:06:23 +00001979
1980 error = xlog_recover_reorder_trans(log, trans, pass);
1981 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 return error;
Christoph Hellwigd0450942010-12-01 22:06:23 +00001983
Zhi Yong Wu00574da2013-08-14 15:16:03 +08001984 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
Darrick J. Wong3304a4f2020-05-01 16:00:46 -07001985 trace_xfs_log_recover_item_recover(log, trans, item, pass);
1986
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001987 switch (pass) {
1988 case XLOG_RECOVER_PASS1:
Darrick J. Wong3304a4f2020-05-01 16:00:46 -07001989 if (item->ri_ops->commit_pass1)
1990 error = item->ri_ops->commit_pass1(log, item);
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001991 break;
1992 case XLOG_RECOVER_PASS2:
Darrick J. Wong8ea56822020-05-01 16:00:46 -07001993 if (item->ri_ops->ra_pass2)
1994 item->ri_ops->ra_pass2(log, item);
Zhi Yong Wu00574da2013-08-14 15:16:03 +08001995 list_move_tail(&item->ri_list, &ra_list);
1996 items_queued++;
1997 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
1998 error = xlog_recover_items_pass2(log, trans,
Brian Foster12818d22016-09-26 08:22:16 +10001999 buffer_list, &ra_list);
Zhi Yong Wu00574da2013-08-14 15:16:03 +08002000 list_splice_tail_init(&ra_list, &done_list);
2001 items_queued = 0;
2002 }
2003
Christoph Hellwig43ff2122012-04-23 15:58:39 +10002004 break;
2005 default:
2006 ASSERT(0);
2007 }
2008
Christoph Hellwigd0450942010-12-01 22:06:23 +00002009 if (error)
Christoph Hellwig43ff2122012-04-23 15:58:39 +10002010 goto out;
Christoph Hellwigd0450942010-12-01 22:06:23 +00002011 }
2012
Zhi Yong Wu00574da2013-08-14 15:16:03 +08002013out:
2014 if (!list_empty(&ra_list)) {
2015 if (!error)
2016 error = xlog_recover_items_pass2(log, trans,
Brian Foster12818d22016-09-26 08:22:16 +10002017 buffer_list, &ra_list);
Zhi Yong Wu00574da2013-08-14 15:16:03 +08002018 list_splice_tail_init(&ra_list, &done_list);
2019 }
2020
2021 if (!list_empty(&done_list))
2022 list_splice_init(&done_list, &trans->r_itemq);
2023
Brian Foster12818d22016-09-26 08:22:16 +10002024 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025}
2026
Dave Chinner76560662014-09-29 09:45:42 +10002027STATIC void
2028xlog_recover_add_item(
2029 struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030{
Darrick J. Wong35f45212020-04-30 10:45:41 -07002031 struct xlog_recover_item *item;
Dave Chinner76560662014-09-29 09:45:42 +10002032
Darrick J. Wong35f45212020-04-30 10:45:41 -07002033 item = kmem_zalloc(sizeof(struct xlog_recover_item), 0);
Dave Chinner76560662014-09-29 09:45:42 +10002034 INIT_LIST_HEAD(&item->ri_list);
2035 list_add_tail(&item->ri_list, head);
2036}
2037
2038STATIC int
2039xlog_recover_add_to_cont_trans(
2040 struct xlog *log,
2041 struct xlog_recover *trans,
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002042 char *dp,
Dave Chinner76560662014-09-29 09:45:42 +10002043 int len)
2044{
Darrick J. Wong35f45212020-04-30 10:45:41 -07002045 struct xlog_recover_item *item;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002046 char *ptr, *old_ptr;
Dave Chinner76560662014-09-29 09:45:42 +10002047 int old_len;
2048
Brian Foster89cebc842015-07-29 11:51:10 +10002049 /*
2050 * If the transaction is empty, the header was split across this and the
2051 * previous record. Copy the rest of the header.
2052 */
Dave Chinner76560662014-09-29 09:45:42 +10002053 if (list_empty(&trans->r_itemq)) {
Brian Foster848ccfc2015-11-10 10:10:33 +11002054 ASSERT(len <= sizeof(struct xfs_trans_header));
Brian Foster89cebc842015-07-29 11:51:10 +10002055 if (len > sizeof(struct xfs_trans_header)) {
2056 xfs_warn(log->l_mp, "%s: bad header length", __func__);
Darrick J. Wong895e1962019-11-06 09:17:43 -08002057 return -EFSCORRUPTED;
Brian Foster89cebc842015-07-29 11:51:10 +10002058 }
2059
Dave Chinner76560662014-09-29 09:45:42 +10002060 xlog_recover_add_item(&trans->r_itemq);
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002061 ptr = (char *)&trans->r_theader +
Brian Foster89cebc842015-07-29 11:51:10 +10002062 sizeof(struct xfs_trans_header) - len;
Dave Chinner76560662014-09-29 09:45:42 +10002063 memcpy(ptr, dp, len);
2064 return 0;
2065 }
Brian Foster89cebc842015-07-29 11:51:10 +10002066
Dave Chinner76560662014-09-29 09:45:42 +10002067 /* take the tail entry */
Darrick J. Wong35f45212020-04-30 10:45:41 -07002068 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2069 ri_list);
Dave Chinner76560662014-09-29 09:45:42 +10002070
2071 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
2072 old_len = item->ri_buf[item->ri_cnt-1].i_len;
2073
Carlos Maiolino771915c2020-08-26 14:05:56 -07002074 ptr = krealloc(old_ptr, len + old_len, GFP_KERNEL | __GFP_NOFAIL);
Dave Chinner76560662014-09-29 09:45:42 +10002075 memcpy(&ptr[old_len], dp, len);
2076 item->ri_buf[item->ri_cnt-1].i_len += len;
2077 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
2078 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 return 0;
2080}
2081
2082/*
Dave Chinner76560662014-09-29 09:45:42 +10002083 * The next region to add is the start of a new region. It could be
2084 * a whole region or it could be the first part of a new region. Because
2085 * of this, the assumption here is that the type and size fields of all
2086 * format structures fit into the first 32 bits of the structure.
2087 *
2088 * This works because all regions must be 32 bit aligned. Therefore, we
2089 * either have both fields or we have neither field. In the case we have
2090 * neither field, the data part of the region is zero length. We only have
2091 * a log_op_header and can throw away the header since a new one will appear
2092 * later. If we have at least 4 bytes, then we can determine how many regions
2093 * will appear in the current log item.
2094 */
2095STATIC int
2096xlog_recover_add_to_trans(
2097 struct xlog *log,
2098 struct xlog_recover *trans,
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002099 char *dp,
Dave Chinner76560662014-09-29 09:45:42 +10002100 int len)
2101{
Darrick J. Wong06b11322017-10-31 12:04:24 -07002102 struct xfs_inode_log_format *in_f; /* any will do */
Darrick J. Wong35f45212020-04-30 10:45:41 -07002103 struct xlog_recover_item *item;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002104 char *ptr;
Dave Chinner76560662014-09-29 09:45:42 +10002105
2106 if (!len)
2107 return 0;
2108 if (list_empty(&trans->r_itemq)) {
2109 /* we need to catch log corruptions here */
2110 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
2111 xfs_warn(log->l_mp, "%s: bad header magic number",
2112 __func__);
2113 ASSERT(0);
Darrick J. Wong895e1962019-11-06 09:17:43 -08002114 return -EFSCORRUPTED;
Dave Chinner76560662014-09-29 09:45:42 +10002115 }
Brian Foster89cebc842015-07-29 11:51:10 +10002116
2117 if (len > sizeof(struct xfs_trans_header)) {
2118 xfs_warn(log->l_mp, "%s: bad header length", __func__);
2119 ASSERT(0);
Darrick J. Wong895e1962019-11-06 09:17:43 -08002120 return -EFSCORRUPTED;
Brian Foster89cebc842015-07-29 11:51:10 +10002121 }
2122
2123 /*
2124 * The transaction header can be arbitrarily split across op
2125 * records. If we don't have the whole thing here, copy what we
2126 * do have and handle the rest in the next record.
2127 */
2128 if (len == sizeof(struct xfs_trans_header))
Dave Chinner76560662014-09-29 09:45:42 +10002129 xlog_recover_add_item(&trans->r_itemq);
2130 memcpy(&trans->r_theader, dp, len);
2131 return 0;
2132 }
2133
Tetsuo Handa707e0dd2019-08-26 12:06:22 -07002134 ptr = kmem_alloc(len, 0);
Dave Chinner76560662014-09-29 09:45:42 +10002135 memcpy(ptr, dp, len);
Darrick J. Wong06b11322017-10-31 12:04:24 -07002136 in_f = (struct xfs_inode_log_format *)ptr;
Dave Chinner76560662014-09-29 09:45:42 +10002137
2138 /* take the tail entry */
Darrick J. Wong35f45212020-04-30 10:45:41 -07002139 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2140 ri_list);
Dave Chinner76560662014-09-29 09:45:42 +10002141 if (item->ri_total != 0 &&
2142 item->ri_total == item->ri_cnt) {
2143 /* tail item is in use, get a new one */
2144 xlog_recover_add_item(&trans->r_itemq);
2145 item = list_entry(trans->r_itemq.prev,
Darrick J. Wong35f45212020-04-30 10:45:41 -07002146 struct xlog_recover_item, ri_list);
Dave Chinner76560662014-09-29 09:45:42 +10002147 }
2148
2149 if (item->ri_total == 0) { /* first region to be added */
2150 if (in_f->ilf_size == 0 ||
2151 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
2152 xfs_warn(log->l_mp,
2153 "bad number of regions (%d) in inode log format",
2154 in_f->ilf_size);
2155 ASSERT(0);
2156 kmem_free(ptr);
Darrick J. Wong895e1962019-11-06 09:17:43 -08002157 return -EFSCORRUPTED;
Dave Chinner76560662014-09-29 09:45:42 +10002158 }
2159
2160 item->ri_total = in_f->ilf_size;
2161 item->ri_buf =
2162 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
Tetsuo Handa707e0dd2019-08-26 12:06:22 -07002163 0);
Dave Chinner76560662014-09-29 09:45:42 +10002164 }
Darrick J. Wongd6abecb2019-11-06 09:11:23 -08002165
2166 if (item->ri_total <= item->ri_cnt) {
2167 xfs_warn(log->l_mp,
2168 "log item region count (%d) overflowed size (%d)",
2169 item->ri_cnt, item->ri_total);
2170 ASSERT(0);
2171 kmem_free(ptr);
2172 return -EFSCORRUPTED;
2173 }
2174
Dave Chinner76560662014-09-29 09:45:42 +10002175 /* Description region is ri_buf[0] */
2176 item->ri_buf[item->ri_cnt].i_addr = ptr;
2177 item->ri_buf[item->ri_cnt].i_len = len;
2178 item->ri_cnt++;
2179 trace_xfs_log_recover_item_add(log, trans, item, 0);
2180 return 0;
2181}
Dave Chinnerb818cca2014-09-29 09:45:54 +10002182
Dave Chinner76560662014-09-29 09:45:42 +10002183/*
2184 * Free up any resources allocated by the transaction
2185 *
2186 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2187 */
2188STATIC void
2189xlog_recover_free_trans(
2190 struct xlog_recover *trans)
2191{
Darrick J. Wong35f45212020-04-30 10:45:41 -07002192 struct xlog_recover_item *item, *n;
Dave Chinner76560662014-09-29 09:45:42 +10002193 int i;
2194
Brian Foster39775432017-06-24 10:11:41 -07002195 hlist_del_init(&trans->r_list);
2196
Dave Chinner76560662014-09-29 09:45:42 +10002197 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2198 /* Free the regions in the item. */
2199 list_del(&item->ri_list);
2200 for (i = 0; i < item->ri_cnt; i++)
2201 kmem_free(item->ri_buf[i].i_addr);
2202 /* Free the item itself */
2203 kmem_free(item->ri_buf);
2204 kmem_free(item);
2205 }
2206 /* Free the transaction recover structure */
2207 kmem_free(trans);
2208}
2209
Dave Chinnere9131e502014-09-29 09:45:18 +10002210/*
2211 * On error or completion, trans is freed.
2212 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213STATIC int
Dave Chinnereeb11682014-09-29 09:45:03 +10002214xlog_recovery_process_trans(
2215 struct xlog *log,
2216 struct xlog_recover *trans,
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002217 char *dp,
Dave Chinnereeb11682014-09-29 09:45:03 +10002218 unsigned int len,
2219 unsigned int flags,
Brian Foster12818d22016-09-26 08:22:16 +10002220 int pass,
2221 struct list_head *buffer_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222{
Dave Chinnere9131e502014-09-29 09:45:18 +10002223 int error = 0;
2224 bool freeit = false;
Dave Chinnereeb11682014-09-29 09:45:03 +10002225
2226 /* mask off ophdr transaction container flags */
2227 flags &= ~XLOG_END_TRANS;
2228 if (flags & XLOG_WAS_CONT_TRANS)
2229 flags &= ~XLOG_CONTINUE_TRANS;
2230
Dave Chinner88b863d2014-09-29 09:45:32 +10002231 /*
2232 * Callees must not free the trans structure. We'll decide if we need to
2233 * free it or not based on the operation being done and it's result.
2234 */
Dave Chinnereeb11682014-09-29 09:45:03 +10002235 switch (flags) {
2236 /* expected flag values */
2237 case 0:
2238 case XLOG_CONTINUE_TRANS:
2239 error = xlog_recover_add_to_trans(log, trans, dp, len);
2240 break;
2241 case XLOG_WAS_CONT_TRANS:
2242 error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
2243 break;
2244 case XLOG_COMMIT_TRANS:
Brian Foster12818d22016-09-26 08:22:16 +10002245 error = xlog_recover_commit_trans(log, trans, pass,
2246 buffer_list);
Dave Chinner88b863d2014-09-29 09:45:32 +10002247 /* success or fail, we are now done with this transaction. */
2248 freeit = true;
Dave Chinnereeb11682014-09-29 09:45:03 +10002249 break;
2250
2251 /* unexpected flag values */
2252 case XLOG_UNMOUNT_TRANS:
Dave Chinnere9131e502014-09-29 09:45:18 +10002253 /* just skip trans */
Dave Chinnereeb11682014-09-29 09:45:03 +10002254 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
Dave Chinnere9131e502014-09-29 09:45:18 +10002255 freeit = true;
Dave Chinnereeb11682014-09-29 09:45:03 +10002256 break;
2257 case XLOG_START_TRANS:
Dave Chinnereeb11682014-09-29 09:45:03 +10002258 default:
2259 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
2260 ASSERT(0);
Darrick J. Wong895e1962019-11-06 09:17:43 -08002261 error = -EFSCORRUPTED;
Dave Chinnereeb11682014-09-29 09:45:03 +10002262 break;
2263 }
Dave Chinnere9131e502014-09-29 09:45:18 +10002264 if (error || freeit)
2265 xlog_recover_free_trans(trans);
Dave Chinnereeb11682014-09-29 09:45:03 +10002266 return error;
2267}
2268
Dave Chinnerb818cca2014-09-29 09:45:54 +10002269/*
2270 * Lookup the transaction recovery structure associated with the ID in the
2271 * current ophdr. If the transaction doesn't exist and the start flag is set in
2272 * the ophdr, then allocate a new transaction for future ID matches to find.
2273 * Either way, return what we found during the lookup - an existing transaction
2274 * or nothing.
2275 */
Dave Chinnereeb11682014-09-29 09:45:03 +10002276STATIC struct xlog_recover *
2277xlog_recover_ophdr_to_trans(
2278 struct hlist_head rhash[],
2279 struct xlog_rec_header *rhead,
2280 struct xlog_op_header *ohead)
2281{
2282 struct xlog_recover *trans;
2283 xlog_tid_t tid;
2284 struct hlist_head *rhp;
2285
2286 tid = be32_to_cpu(ohead->oh_tid);
2287 rhp = &rhash[XLOG_RHASH(tid)];
Dave Chinnerb818cca2014-09-29 09:45:54 +10002288 hlist_for_each_entry(trans, rhp, r_list) {
2289 if (trans->r_log_tid == tid)
2290 return trans;
2291 }
Dave Chinnereeb11682014-09-29 09:45:03 +10002292
2293 /*
Dave Chinnerb818cca2014-09-29 09:45:54 +10002294 * skip over non-start transaction headers - we could be
2295 * processing slack space before the next transaction starts
Dave Chinnereeb11682014-09-29 09:45:03 +10002296 */
Dave Chinnerb818cca2014-09-29 09:45:54 +10002297 if (!(ohead->oh_flags & XLOG_START_TRANS))
2298 return NULL;
2299
2300 ASSERT(be32_to_cpu(ohead->oh_len) == 0);
2301
2302 /*
2303 * This is a new transaction so allocate a new recovery container to
2304 * hold the recovery ops that will follow.
2305 */
Tetsuo Handa707e0dd2019-08-26 12:06:22 -07002306 trans = kmem_zalloc(sizeof(struct xlog_recover), 0);
Dave Chinnerb818cca2014-09-29 09:45:54 +10002307 trans->r_log_tid = tid;
2308 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
2309 INIT_LIST_HEAD(&trans->r_itemq);
2310 INIT_HLIST_NODE(&trans->r_list);
2311 hlist_add_head(&trans->r_list, rhp);
2312
2313 /*
2314 * Nothing more to do for this ophdr. Items to be added to this new
2315 * transaction will be in subsequent ophdr containers.
2316 */
Dave Chinnereeb11682014-09-29 09:45:03 +10002317 return NULL;
2318}
2319
2320STATIC int
2321xlog_recover_process_ophdr(
2322 struct xlog *log,
2323 struct hlist_head rhash[],
2324 struct xlog_rec_header *rhead,
2325 struct xlog_op_header *ohead,
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002326 char *dp,
2327 char *end,
Brian Foster12818d22016-09-26 08:22:16 +10002328 int pass,
2329 struct list_head *buffer_list)
Dave Chinnereeb11682014-09-29 09:45:03 +10002330{
2331 struct xlog_recover *trans;
Dave Chinnereeb11682014-09-29 09:45:03 +10002332 unsigned int len;
Brian Foster12818d22016-09-26 08:22:16 +10002333 int error;
Dave Chinnereeb11682014-09-29 09:45:03 +10002334
2335 /* Do we understand who wrote this op? */
2336 if (ohead->oh_clientid != XFS_TRANSACTION &&
2337 ohead->oh_clientid != XFS_LOG) {
2338 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2339 __func__, ohead->oh_clientid);
2340 ASSERT(0);
Darrick J. Wong895e1962019-11-06 09:17:43 -08002341 return -EFSCORRUPTED;
Dave Chinnereeb11682014-09-29 09:45:03 +10002342 }
2343
2344 /*
2345 * Check the ophdr contains all the data it is supposed to contain.
2346 */
2347 len = be32_to_cpu(ohead->oh_len);
2348 if (dp + len > end) {
2349 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
2350 WARN_ON(1);
Darrick J. Wong895e1962019-11-06 09:17:43 -08002351 return -EFSCORRUPTED;
Dave Chinnereeb11682014-09-29 09:45:03 +10002352 }
2353
2354 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
2355 if (!trans) {
2356 /* nothing to do, so skip over this ophdr */
2357 return 0;
2358 }
2359
Brian Foster12818d22016-09-26 08:22:16 +10002360 /*
2361 * The recovered buffer queue is drained only once we know that all
2362 * recovery items for the current LSN have been processed. This is
2363 * required because:
2364 *
2365 * - Buffer write submission updates the metadata LSN of the buffer.
2366 * - Log recovery skips items with a metadata LSN >= the current LSN of
2367 * the recovery item.
2368 * - Separate recovery items against the same metadata buffer can share
2369 * a current LSN. I.e., consider that the LSN of a recovery item is
2370 * defined as the starting LSN of the first record in which its
2371 * transaction appears, that a record can hold multiple transactions,
2372 * and/or that a transaction can span multiple records.
2373 *
2374 * In other words, we are allowed to submit a buffer from log recovery
2375 * once per current LSN. Otherwise, we may incorrectly skip recovery
2376 * items and cause corruption.
2377 *
2378 * We don't know up front whether buffers are updated multiple times per
2379 * LSN. Therefore, track the current LSN of each commit log record as it
2380 * is processed and drain the queue when it changes. Use commit records
2381 * because they are ordered correctly by the logging code.
2382 */
2383 if (log->l_recovery_lsn != trans->r_lsn &&
2384 ohead->oh_flags & XLOG_COMMIT_TRANS) {
2385 error = xfs_buf_delwri_submit(buffer_list);
2386 if (error)
2387 return error;
2388 log->l_recovery_lsn = trans->r_lsn;
2389 }
2390
Dave Chinnere9131e502014-09-29 09:45:18 +10002391 return xlog_recovery_process_trans(log, trans, dp, len,
Brian Foster12818d22016-09-26 08:22:16 +10002392 ohead->oh_flags, pass, buffer_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393}
2394
2395/*
2396 * There are two valid states of the r_state field. 0 indicates that the
2397 * transaction structure is in a normal state. We have either seen the
2398 * start of the transaction or the last operation we added was not a partial
2399 * operation. If the last operation we added to the transaction was a
2400 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2401 *
2402 * NOTE: skip LRs with 0 data length.
2403 */
2404STATIC int
2405xlog_recover_process_data(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05002406 struct xlog *log,
Dave Chinnerf0a76952010-01-11 11:49:57 +00002407 struct hlist_head rhash[],
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05002408 struct xlog_rec_header *rhead,
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002409 char *dp,
Brian Foster12818d22016-09-26 08:22:16 +10002410 int pass,
2411 struct list_head *buffer_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412{
Dave Chinnereeb11682014-09-29 09:45:03 +10002413 struct xlog_op_header *ohead;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002414 char *end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 int num_logops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417
Dave Chinnereeb11682014-09-29 09:45:03 +10002418 end = dp + be32_to_cpu(rhead->h_len);
Christoph Hellwigb53e6752007-10-12 10:59:34 +10002419 num_logops = be32_to_cpu(rhead->h_num_logops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420
2421 /* check the log format matches our own - else we can't recover */
2422 if (xlog_header_check_recover(log->l_mp, rhead))
Dave Chinner24513372014-06-25 14:58:08 +10002423 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424
Brian Foster5cd9cee2016-09-26 08:34:52 +10002425 trace_xfs_log_recover_record(log, rhead, pass);
Dave Chinnereeb11682014-09-29 09:45:03 +10002426 while ((dp < end) && num_logops) {
2427
2428 ohead = (struct xlog_op_header *)dp;
2429 dp += sizeof(*ohead);
2430 ASSERT(dp <= end);
2431
2432 /* errors will abort recovery */
2433 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
Brian Foster12818d22016-09-26 08:22:16 +10002434 dp, end, pass, buffer_list);
Dave Chinnereeb11682014-09-29 09:45:03 +10002435 if (error)
2436 return error;
2437
Christoph Hellwig67fcb7b2007-10-12 10:58:59 +10002438 dp += be32_to_cpu(ohead->oh_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 num_logops--;
2440 }
2441 return 0;
2442}
2443
Darrick J. Wong50995582017-11-21 20:53:02 -08002444/* Take all the collected deferred ops and finish them in order. */
2445static int
2446xlog_finish_defer_ops(
Brian Fosterfbfa9772018-08-01 07:20:29 -07002447 struct xfs_trans *parent_tp)
Darrick J. Wong50995582017-11-21 20:53:02 -08002448{
Brian Fosterfbfa9772018-08-01 07:20:29 -07002449 struct xfs_mount *mp = parent_tp->t_mountp;
Darrick J. Wong50995582017-11-21 20:53:02 -08002450 struct xfs_trans *tp;
2451 int64_t freeblks;
2452 uint resblks;
2453 int error;
2454
2455 /*
2456 * We're finishing the defer_ops that accumulated as a result of
2457 * recovering unfinished intent items during log recovery. We
2458 * reserve an itruncate transaction because it is the largest
2459 * permanent transaction type. Since we're the only user of the fs
2460 * right now, take 93% (15/16) of the available free blocks. Use
2461 * weird math to avoid a 64-bit division.
2462 */
2463 freeblks = percpu_counter_sum(&mp->m_fdblocks);
2464 if (freeblks <= 0)
2465 return -ENOSPC;
2466 resblks = min_t(int64_t, UINT_MAX, freeblks);
2467 resblks = (resblks * 15) >> 4;
2468 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
2469 0, XFS_TRANS_RESERVE, &tp);
2470 if (error)
2471 return error;
Brian Foster91ef75b2018-07-24 13:43:13 -07002472 /* transfer all collected dfops to this transaction */
Brian Fosterce356d62018-08-01 07:20:30 -07002473 xfs_defer_move(tp, parent_tp);
Darrick J. Wong50995582017-11-21 20:53:02 -08002474
Darrick J. Wong50995582017-11-21 20:53:02 -08002475 return xfs_trans_commit(tp);
Darrick J. Wong50995582017-11-21 20:53:02 -08002476}
2477
Darrick J. Wongbba7b162020-05-06 12:07:25 -07002478/* Is this log item a deferred action intent? */
2479static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
2480{
Darrick J. Wong154c7332020-05-01 16:00:54 -07002481 return lip->li_ops->iop_recover != NULL &&
2482 lip->li_ops->iop_match != NULL;
Darrick J. Wongbba7b162020-05-06 12:07:25 -07002483}
2484
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485/*
Darrick J. Wongdc423752016-08-03 11:23:49 +10002486 * When this is called, all of the log intent items which did not have
2487 * corresponding log done items should be in the AIL. What we do now
2488 * is update the data structures associated with each one.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 *
Darrick J. Wongdc423752016-08-03 11:23:49 +10002490 * Since we process the log intent items in normal transactions, they
2491 * will be removed at some point after the commit. This prevents us
2492 * from just walking down the list processing each one. We'll use a
2493 * flag in the intent item to skip those that we've already processed
2494 * and use the AIL iteration mechanism's generation count to try to
2495 * speed this up at least a bit.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 *
Darrick J. Wongdc423752016-08-03 11:23:49 +10002497 * When we start, we know that the intents are the only things in the
2498 * AIL. As we process them, however, other items are added to the
2499 * AIL.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 */
David Chinner3c1e2bb2008-04-10 12:21:11 +10002501STATIC int
Darrick J. Wongdc423752016-08-03 11:23:49 +10002502xlog_recover_process_intents(
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002503 struct xlog *log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504{
Brian Fosterfbfa9772018-08-01 07:20:29 -07002505 struct xfs_trans *parent_tp;
David Chinner27d8d5f2008-10-30 17:38:39 +11002506 struct xfs_ail_cursor cur;
Darrick J. Wong50995582017-11-21 20:53:02 -08002507 struct xfs_log_item *lip;
David Chinnera9c21c12008-10-30 17:39:35 +11002508 struct xfs_ail *ailp;
Darrick J. Wongcc560a52020-05-01 16:00:55 -07002509 int error;
Darrick J. Wong7bf7a192017-08-31 15:11:06 -07002510#if defined(DEBUG) || defined(XFS_WARN)
Darrick J. Wongdc423752016-08-03 11:23:49 +10002511 xfs_lsn_t last_lsn;
Darrick J. Wong7bf7a192017-08-31 15:11:06 -07002512#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513
Brian Fosterfbfa9772018-08-01 07:20:29 -07002514 /*
2515 * The intent recovery handlers commit transactions to complete recovery
2516 * for individual intents, but any new deferred operations that are
2517 * queued during that process are held off until the very end. The
2518 * purpose of this transaction is to serve as a container for deferred
2519 * operations. Each intent recovery handler must transfer dfops here
2520 * before its local transaction commits, and we'll finish the entire
2521 * list below.
2522 */
2523 error = xfs_trans_alloc_empty(log->l_mp, &parent_tp);
2524 if (error)
2525 return error;
2526
David Chinnera9c21c12008-10-30 17:39:35 +11002527 ailp = log->l_ailp;
Matthew Wilcox57e80952018-03-07 14:59:39 -08002528 spin_lock(&ailp->ail_lock);
David Chinnera9c21c12008-10-30 17:39:35 +11002529 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
Darrick J. Wong7bf7a192017-08-31 15:11:06 -07002530#if defined(DEBUG) || defined(XFS_WARN)
Darrick J. Wongdc423752016-08-03 11:23:49 +10002531 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
Darrick J. Wong7bf7a192017-08-31 15:11:06 -07002532#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 while (lip != NULL) {
2534 /*
Darrick J. Wongdc423752016-08-03 11:23:49 +10002535 * We're done when we see something other than an intent.
2536 * There should be no intents left in the AIL now.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 */
Darrick J. Wongdc423752016-08-03 11:23:49 +10002538 if (!xlog_item_is_intent(lip)) {
David Chinner27d8d5f2008-10-30 17:38:39 +11002539#ifdef DEBUG
David Chinnera9c21c12008-10-30 17:39:35 +11002540 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
Darrick J. Wongdc423752016-08-03 11:23:49 +10002541 ASSERT(!xlog_item_is_intent(lip));
David Chinner27d8d5f2008-10-30 17:38:39 +11002542#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 break;
2544 }
2545
2546 /*
Darrick J. Wongdc423752016-08-03 11:23:49 +10002547 * We should never see a redo item with a LSN higher than
2548 * the last transaction we found in the log at the start
2549 * of recovery.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 */
Darrick J. Wongdc423752016-08-03 11:23:49 +10002551 ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552
Darrick J. Wong50995582017-11-21 20:53:02 -08002553 /*
2554 * NOTE: If your intent processing routine can create more
Darrick J. Wong9329ba82020-05-01 16:00:52 -07002555 * deferred ops, you /must/ attach them to the transaction in
2556 * this routine or else those subsequent intents will get
Darrick J. Wong50995582017-11-21 20:53:02 -08002557 * replayed in the wrong order!
2558 */
Darrick J. Wongcc560a52020-05-01 16:00:55 -07002559 if (!test_and_set_bit(XFS_LI_RECOVERED, &lip->li_flags)) {
Darrick J. Wong96b60f82020-05-01 16:00:55 -07002560 spin_unlock(&ailp->ail_lock);
2561 error = lip->li_ops->iop_recover(lip, parent_tp);
2562 spin_lock(&ailp->ail_lock);
2563 }
David Chinner27d8d5f2008-10-30 17:38:39 +11002564 if (error)
2565 goto out;
David Chinnera9c21c12008-10-30 17:39:35 +11002566 lip = xfs_trans_ail_cursor_next(ailp, &cur);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 }
David Chinner27d8d5f2008-10-30 17:38:39 +11002568out:
Eric Sandeene4a1e292014-04-14 19:06:05 +10002569 xfs_trans_ail_cursor_done(&cur);
Matthew Wilcox57e80952018-03-07 14:59:39 -08002570 spin_unlock(&ailp->ail_lock);
Brian Fosterfbfa9772018-08-01 07:20:29 -07002571 if (!error)
2572 error = xlog_finish_defer_ops(parent_tp);
2573 xfs_trans_cancel(parent_tp);
Darrick J. Wong50995582017-11-21 20:53:02 -08002574
David Chinner3c1e2bb2008-04-10 12:21:11 +10002575 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576}
2577
2578/*
Darrick J. Wongdc423752016-08-03 11:23:49 +10002579 * A cancel occurs when the mount has failed and we're bailing out.
2580 * Release all pending log intent items so they don't pin the AIL.
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002581 */
Hariprasad Kelama7a92502019-07-03 07:34:18 -07002582STATIC void
Darrick J. Wongdc423752016-08-03 11:23:49 +10002583xlog_recover_cancel_intents(
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002584 struct xlog *log)
2585{
2586 struct xfs_log_item *lip;
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002587 struct xfs_ail_cursor cur;
2588 struct xfs_ail *ailp;
2589
2590 ailp = log->l_ailp;
Matthew Wilcox57e80952018-03-07 14:59:39 -08002591 spin_lock(&ailp->ail_lock);
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002592 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2593 while (lip != NULL) {
2594 /*
Darrick J. Wongdc423752016-08-03 11:23:49 +10002595 * We're done when we see something other than an intent.
2596 * There should be no intents left in the AIL now.
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002597 */
Darrick J. Wongdc423752016-08-03 11:23:49 +10002598 if (!xlog_item_is_intent(lip)) {
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002599#ifdef DEBUG
2600 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
Darrick J. Wongdc423752016-08-03 11:23:49 +10002601 ASSERT(!xlog_item_is_intent(lip));
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002602#endif
2603 break;
2604 }
2605
Darrick J. Wong9329ba82020-05-01 16:00:52 -07002606 spin_unlock(&ailp->ail_lock);
2607 lip->li_ops->iop_release(lip);
2608 spin_lock(&ailp->ail_lock);
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002609 lip = xfs_trans_ail_cursor_next(ailp, &cur);
2610 }
2611
2612 xfs_trans_ail_cursor_done(&cur);
Matthew Wilcox57e80952018-03-07 14:59:39 -08002613 spin_unlock(&ailp->ail_lock);
Brian Fosterf0b2efa2015-08-19 09:58:36 +10002614}
2615
2616/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617 * This routine performs a transaction to null out a bad inode pointer
2618 * in an agi unlinked inode hash bucket.
2619 */
2620STATIC void
2621xlog_recover_clear_agi_bucket(
2622 xfs_mount_t *mp,
2623 xfs_agnumber_t agno,
2624 int bucket)
2625{
2626 xfs_trans_t *tp;
2627 xfs_agi_t *agi;
2628 xfs_buf_t *agibp;
2629 int offset;
2630 int error;
2631
Christoph Hellwig253f4912016-04-06 09:19:55 +10002632 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
David Chinnere5720ee2008-04-10 12:21:18 +10002633 if (error)
Christoph Hellwig253f4912016-04-06 09:19:55 +10002634 goto out_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11002636 error = xfs_read_agi(mp, tp, agno, &agibp);
2637 if (error)
David Chinnere5720ee2008-04-10 12:21:18 +10002638 goto out_abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639
Christoph Hellwig370c7822020-03-10 08:57:29 -07002640 agi = agibp->b_addr;
Christoph Hellwig16259e72005-11-02 15:11:25 +11002641 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642 offset = offsetof(xfs_agi_t, agi_unlinked) +
2643 (sizeof(xfs_agino_t) * bucket);
2644 xfs_trans_log_buf(tp, agibp, offset,
2645 (offset + sizeof(xfs_agino_t) - 1));
2646
Christoph Hellwig70393312015-06-04 13:48:08 +10002647 error = xfs_trans_commit(tp);
David Chinnere5720ee2008-04-10 12:21:18 +10002648 if (error)
2649 goto out_error;
2650 return;
2651
2652out_abort:
Christoph Hellwig4906e212015-06-04 13:47:56 +10002653 xfs_trans_cancel(tp);
David Chinnere5720ee2008-04-10 12:21:18 +10002654out_error:
Dave Chinnera0fa2b62011-03-07 10:01:35 +11002655 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
David Chinnere5720ee2008-04-10 12:21:18 +10002656 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657}
2658
Christoph Hellwig23fac502008-11-28 14:23:40 +11002659STATIC xfs_agino_t
2660xlog_recover_process_one_iunlink(
2661 struct xfs_mount *mp,
2662 xfs_agnumber_t agno,
2663 xfs_agino_t agino,
2664 int bucket)
2665{
2666 struct xfs_buf *ibp;
2667 struct xfs_dinode *dip;
2668 struct xfs_inode *ip;
2669 xfs_ino_t ino;
2670 int error;
2671
2672 ino = XFS_AGINO_TO_INO(mp, agno, agino);
Dave Chinner7b6259e2010-06-24 11:35:17 +10002673 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
Christoph Hellwig23fac502008-11-28 14:23:40 +11002674 if (error)
2675 goto fail;
2676
2677 /*
2678 * Get the on disk inode to find the next inode in the bucket.
2679 */
Brian Fosterc1995072020-05-06 13:29:20 -07002680 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0);
Christoph Hellwig23fac502008-11-28 14:23:40 +11002681 if (error)
Christoph Hellwig0e446672008-11-28 14:23:42 +11002682 goto fail_iput;
Christoph Hellwig23fac502008-11-28 14:23:40 +11002683
Darrick J. Wong17c12bc2016-10-03 09:11:29 -07002684 xfs_iflags_clear(ip, XFS_IRECOVERY);
Dave Chinner54d7b5c2016-02-09 16:54:58 +11002685 ASSERT(VFS_I(ip)->i_nlink == 0);
Dave Chinnerc19b3b052016-02-09 16:54:58 +11002686 ASSERT(VFS_I(ip)->i_mode != 0);
Christoph Hellwig23fac502008-11-28 14:23:40 +11002687
2688 /* setup for the next pass */
2689 agino = be32_to_cpu(dip->di_next_unlinked);
2690 xfs_buf_relse(ibp);
2691
2692 /*
2693 * Prevent any DMAPI event from being sent when the reference on
2694 * the inode is dropped.
2695 */
2696 ip->i_d.di_dmevmask = 0;
2697
Darrick J. Wong44a87362018-07-25 12:52:32 -07002698 xfs_irele(ip);
Christoph Hellwig23fac502008-11-28 14:23:40 +11002699 return agino;
2700
Christoph Hellwig0e446672008-11-28 14:23:42 +11002701 fail_iput:
Darrick J. Wong44a87362018-07-25 12:52:32 -07002702 xfs_irele(ip);
Christoph Hellwig23fac502008-11-28 14:23:40 +11002703 fail:
2704 /*
2705 * We can't read in the inode this bucket points to, or this inode
2706 * is messed up. Just ditch this bucket of inodes. We will lose
2707 * some inodes and space, but at least we won't hang.
2708 *
2709 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
2710 * clear the inode pointer in the bucket.
2711 */
2712 xlog_recover_clear_agi_bucket(mp, agno, bucket);
2713 return NULLAGINO;
2714}
2715
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716/*
Dave Chinner8ab39f12019-09-05 21:35:39 -07002717 * Recover AGI unlinked lists
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 *
Dave Chinner8ab39f12019-09-05 21:35:39 -07002719 * This is called during recovery to process any inodes which we unlinked but
2720 * not freed when the system crashed. These inodes will be on the lists in the
2721 * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
2722 * any inodes found on the lists. Each inode is removed from the lists when it
2723 * has been fully truncated and is freed. The freeing of the inode and its
2724 * removal from the list must be atomic.
2725 *
2726 * If everything we touch in the agi processing loop is already in memory, this
2727 * loop can hold the cpu for a long time. It runs without lock contention,
2728 * memory allocation contention, the need wait for IO, etc, and so will run
2729 * until we either run out of inodes to process, run low on memory or we run out
2730 * of log space.
2731 *
2732 * This behaviour is bad for latency on single CPU and non-preemptible kernels,
2733 * and can prevent other filesytem work (such as CIL pushes) from running. This
2734 * can lead to deadlocks if the recovery process runs out of log reservation
2735 * space. Hence we need to yield the CPU when there is other kernel work
2736 * scheduled on this CPU to ensure other scheduled work can run without undue
2737 * latency.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 */
Eric Sandeend96f8f82009-07-02 00:09:33 -05002739STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740xlog_recover_process_iunlinks(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05002741 struct xlog *log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742{
2743 xfs_mount_t *mp;
2744 xfs_agnumber_t agno;
2745 xfs_agi_t *agi;
2746 xfs_buf_t *agibp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747 xfs_agino_t agino;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748 int bucket;
2749 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750
2751 mp = log->l_mp;
2752
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
2754 /*
2755 * Find the agi for this ag.
2756 */
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11002757 error = xfs_read_agi(mp, NULL, agno, &agibp);
2758 if (error) {
2759 /*
2760 * AGI is b0rked. Don't process it.
2761 *
2762 * We should probably mark the filesystem as corrupt
2763 * after we've recovered all the ag's we can....
2764 */
2765 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 }
Jan Karad97d32e2012-03-15 09:34:02 +00002767 /*
2768 * Unlock the buffer so that it can be acquired in the normal
2769 * course of the transaction to truncate and free each inode.
2770 * Because we are not racing with anyone else here for the AGI
2771 * buffer, we don't even need to hold it locked to read the
2772 * initial unlinked bucket entries out of the buffer. We keep
2773 * buffer reference though, so that it stays pinned in memory
2774 * while we need the buffer.
2775 */
Christoph Hellwig370c7822020-03-10 08:57:29 -07002776 agi = agibp->b_addr;
Jan Karad97d32e2012-03-15 09:34:02 +00002777 xfs_buf_unlock(agibp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778
2779 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
Christoph Hellwig16259e72005-11-02 15:11:25 +11002780 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781 while (agino != NULLAGINO) {
Christoph Hellwig23fac502008-11-28 14:23:40 +11002782 agino = xlog_recover_process_one_iunlink(mp,
2783 agno, agino, bucket);
Dave Chinner8ab39f12019-09-05 21:35:39 -07002784 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 }
2786 }
Jan Karad97d32e2012-03-15 09:34:02 +00002787 xfs_buf_rele(agibp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789}
2790
Eric Sandeen91083262019-05-01 20:26:30 -07002791STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792xlog_unpack_data(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05002793 struct xlog_rec_header *rhead,
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002794 char *dp,
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05002795 struct xlog *log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796{
2797 int i, j, k;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798
Christoph Hellwigb53e6752007-10-12 10:59:34 +10002799 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
Christoph Hellwigb53e6752007-10-12 10:59:34 +10002801 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 dp += BBSIZE;
2803 }
2804
Eric Sandeen62118702008-03-06 13:44:28 +11002805 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
Christoph Hellwigb28708d2008-11-28 14:23:38 +11002806 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
Christoph Hellwigb53e6752007-10-12 10:59:34 +10002807 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2809 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
Christoph Hellwigb53e6752007-10-12 10:59:34 +10002810 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 dp += BBSIZE;
2812 }
2813 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814}
2815
Brian Foster9d949012016-01-04 15:55:10 +11002816/*
Brian Fosterb94fb2d2016-01-04 15:55:10 +11002817 * CRC check, unpack and process a log record.
Brian Foster9d949012016-01-04 15:55:10 +11002818 */
2819STATIC int
2820xlog_recover_process(
2821 struct xlog *log,
2822 struct hlist_head rhash[],
2823 struct xlog_rec_header *rhead,
2824 char *dp,
Brian Foster12818d22016-09-26 08:22:16 +10002825 int pass,
2826 struct list_head *buffer_list)
Brian Foster9d949012016-01-04 15:55:10 +11002827{
Dave Chinnercae028d2016-12-05 14:40:32 +11002828 __le32 old_crc = rhead->h_crc;
Brian Fosterb94fb2d2016-01-04 15:55:10 +11002829 __le32 crc;
2830
Brian Fosterb94fb2d2016-01-04 15:55:10 +11002831 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
Brian Foster65282502016-01-04 15:55:10 +11002832
2833 /*
2834 * Nothing else to do if this is a CRC verification pass. Just return
2835 * if this a record with a non-zero crc. Unfortunately, mkfs always
Dave Chinnercae028d2016-12-05 14:40:32 +11002836 * sets old_crc to 0 so we must consider this valid even on v5 supers.
Brian Foster65282502016-01-04 15:55:10 +11002837 * Otherwise, return EFSBADCRC on failure so the callers up the stack
2838 * know precisely what failed.
2839 */
2840 if (pass == XLOG_RECOVER_CRCPASS) {
Dave Chinnercae028d2016-12-05 14:40:32 +11002841 if (old_crc && crc != old_crc)
Brian Foster65282502016-01-04 15:55:10 +11002842 return -EFSBADCRC;
2843 return 0;
2844 }
2845
2846 /*
2847 * We're in the normal recovery path. Issue a warning if and only if the
2848 * CRC in the header is non-zero. This is an advisory warning and the
2849 * zero CRC check prevents warnings from being emitted when upgrading
2850 * the kernel from one that does not add CRCs by default.
2851 */
Dave Chinnercae028d2016-12-05 14:40:32 +11002852 if (crc != old_crc) {
2853 if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
Brian Fosterb94fb2d2016-01-04 15:55:10 +11002854 xfs_alert(log->l_mp,
2855 "log record CRC mismatch: found 0x%x, expected 0x%x.",
Dave Chinnercae028d2016-12-05 14:40:32 +11002856 le32_to_cpu(old_crc),
Brian Fosterb94fb2d2016-01-04 15:55:10 +11002857 le32_to_cpu(crc));
2858 xfs_hex_dump(dp, 32);
2859 }
2860
2861 /*
2862 * If the filesystem is CRC enabled, this mismatch becomes a
2863 * fatal log corruption failure.
2864 */
Darrick J. Wonga5155b82019-11-02 09:40:53 -07002865 if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
2866 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
Brian Fosterb94fb2d2016-01-04 15:55:10 +11002867 return -EFSCORRUPTED;
Darrick J. Wonga5155b82019-11-02 09:40:53 -07002868 }
Brian Fosterb94fb2d2016-01-04 15:55:10 +11002869 }
Brian Foster9d949012016-01-04 15:55:10 +11002870
Eric Sandeen91083262019-05-01 20:26:30 -07002871 xlog_unpack_data(rhead, dp, log);
Brian Foster9d949012016-01-04 15:55:10 +11002872
Brian Foster12818d22016-09-26 08:22:16 +10002873 return xlog_recover_process_data(log, rhash, rhead, dp, pass,
2874 buffer_list);
Brian Foster9d949012016-01-04 15:55:10 +11002875}
2876
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877STATIC int
2878xlog_valid_rec_header(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05002879 struct xlog *log,
2880 struct xlog_rec_header *rhead,
Gao Xiangf692d092020-09-22 09:41:06 -07002881 xfs_daddr_t blkno,
2882 int bufsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883{
2884 int hlen;
2885
Darrick J. Wonga71895c2019-11-11 12:53:22 -08002886 if (XFS_IS_CORRUPT(log->l_mp,
2887 rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
Dave Chinner24513372014-06-25 14:58:08 +10002888 return -EFSCORRUPTED;
Darrick J. Wonga71895c2019-11-11 12:53:22 -08002889 if (XFS_IS_CORRUPT(log->l_mp,
2890 (!rhead->h_version ||
2891 (be32_to_cpu(rhead->h_version) &
2892 (~XLOG_VERSION_OKBITS))))) {
Dave Chinnera0fa2b62011-03-07 10:01:35 +11002893 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
Harvey Harrison34a622b2008-04-10 12:19:21 +10002894 __func__, be32_to_cpu(rhead->h_version));
Darrick J. Wong895e1962019-11-06 09:17:43 -08002895 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 }
2897
Gao Xiangf692d092020-09-22 09:41:06 -07002898 /*
2899 * LR body must have data (or it wouldn't have been written)
2900 * and h_len must not be greater than LR buffer size.
2901 */
Christoph Hellwigb53e6752007-10-12 10:59:34 +10002902 hlen = be32_to_cpu(rhead->h_len);
Gao Xiangf692d092020-09-22 09:41:06 -07002903 if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize))
Dave Chinner24513372014-06-25 14:58:08 +10002904 return -EFSCORRUPTED;
Gao Xiangf692d092020-09-22 09:41:06 -07002905
Darrick J. Wonga71895c2019-11-11 12:53:22 -08002906 if (XFS_IS_CORRUPT(log->l_mp,
2907 blkno > log->l_logBBsize || blkno > INT_MAX))
Dave Chinner24513372014-06-25 14:58:08 +10002908 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 return 0;
2910}
2911
2912/*
2913 * Read the log from tail to head and process the log records found.
2914 * Handle the two cases where the tail and head are in the same cycle
2915 * and where the active portion of the log wraps around the end of
2916 * the physical log separately. The pass parameter is passed through
2917 * to the routines called to process the data and is not looked at
2918 * here.
2919 */
2920STATIC int
2921xlog_do_recovery_pass(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05002922 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923 xfs_daddr_t head_blk,
2924 xfs_daddr_t tail_blk,
Brian Fosterd7f37692016-01-04 15:55:10 +11002925 int pass,
2926 xfs_daddr_t *first_bad) /* out: first bad log rec */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927{
2928 xlog_rec_header_t *rhead;
Brian Foster284f1c22017-08-08 18:21:51 -07002929 xfs_daddr_t blk_no, rblk_no;
Brian Fosterd7f37692016-01-04 15:55:10 +11002930 xfs_daddr_t rhead_blk;
Christoph Hellwigb2a922c2015-06-22 09:45:10 +10002931 char *offset;
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07002932 char *hbp, *dbp;
Brian Fostera70f9fe2016-01-04 15:55:10 +11002933 int error = 0, h_size, h_len;
Brian Foster12818d22016-09-26 08:22:16 +10002934 int error2 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 int bblks, split_bblks;
2936 int hblks, split_hblks, wrapped_hblks;
Brian Foster39775432017-06-24 10:11:41 -07002937 int i;
Dave Chinnerf0a76952010-01-11 11:49:57 +00002938 struct hlist_head rhash[XLOG_RHASH_SIZE];
Brian Foster12818d22016-09-26 08:22:16 +10002939 LIST_HEAD (buffer_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940
2941 ASSERT(head_blk != tail_blk);
Brian Fostera4c9b342017-08-08 18:21:53 -07002942 blk_no = rhead_blk = tail_blk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943
Brian Foster39775432017-06-24 10:11:41 -07002944 for (i = 0; i < XLOG_RHASH_SIZE; i++)
2945 INIT_HLIST_HEAD(&rhash[i]);
2946
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947 /*
2948 * Read the header of the tail block and get the iclog buffer size from
2949 * h_size. Use this to tell how many sectors make up the log header.
2950 */
Eric Sandeen62118702008-03-06 13:44:28 +11002951 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952 /*
2953 * When using variable length iclogs, read first sector of
2954 * iclog header and extract the header size from it. Get a
2955 * new hbp that is the correct size.
2956 */
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07002957 hbp = xlog_alloc_buffer(log, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958 if (!hbp)
Dave Chinner24513372014-06-25 14:58:08 +10002959 return -ENOMEM;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01002960
2961 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
2962 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 goto bread_err1;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01002964
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 rhead = (xlog_rec_header_t *)offset;
Brian Fostera70f9fe2016-01-04 15:55:10 +11002966
2967 /*
2968 * xfsprogs has a bug where record length is based on lsunit but
2969 * h_size (iclog size) is hardcoded to 32k. Now that we
2970 * unconditionally CRC verify the unmount record, this means the
2971 * log buffer can be too small for the record and cause an
2972 * overrun.
2973 *
2974 * Detect this condition here. Use lsunit for the buffer size as
2975 * long as this looks like the mkfs case. Otherwise, return an
2976 * error to avoid a buffer overrun.
2977 */
Christoph Hellwigb53e6752007-10-12 10:59:34 +10002978 h_size = be32_to_cpu(rhead->h_size);
Brian Fostera70f9fe2016-01-04 15:55:10 +11002979 h_len = be32_to_cpu(rhead->h_len);
Gao Xiangf692d092020-09-22 09:41:06 -07002980 if (h_len > h_size && h_len <= log->l_mp->m_logbsize &&
2981 rhead->h_num_logops == cpu_to_be32(1)) {
2982 xfs_warn(log->l_mp,
Brian Fostera70f9fe2016-01-04 15:55:10 +11002983 "invalid iclog size (%d bytes), using lsunit (%d bytes)",
Gao Xiangf692d092020-09-22 09:41:06 -07002984 h_size, log->l_mp->m_logbsize);
2985 h_size = log->l_mp->m_logbsize;
Brian Fostera70f9fe2016-01-04 15:55:10 +11002986 }
2987
Gao Xiangf692d092020-09-22 09:41:06 -07002988 error = xlog_valid_rec_header(log, rhead, tail_blk, h_size);
2989 if (error)
2990 goto bread_err1;
2991
Christoph Hellwigb53e6752007-10-12 10:59:34 +10002992 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
2994 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
2995 if (h_size % XLOG_HEADER_CYCLE_SIZE)
2996 hblks++;
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07002997 kmem_free(hbp);
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07002998 hbp = xlog_alloc_buffer(log, hblks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 } else {
3000 hblks = 1;
3001 }
3002 } else {
Alex Elder69ce58f2010-04-20 17:09:59 +10003003 ASSERT(log->l_sectBBsize == 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 hblks = 1;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07003005 hbp = xlog_alloc_buffer(log, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 h_size = XLOG_BIG_RECORD_BSIZE;
3007 }
3008
3009 if (!hbp)
Dave Chinner24513372014-06-25 14:58:08 +10003010 return -ENOMEM;
Christoph Hellwig6e9b3dd2019-06-28 19:27:27 -07003011 dbp = xlog_alloc_buffer(log, BTOBB(h_size));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012 if (!dbp) {
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07003013 kmem_free(hbp);
Dave Chinner24513372014-06-25 14:58:08 +10003014 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015 }
3016
3017 memset(rhash, 0, sizeof(rhash));
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003018 if (tail_blk > head_blk) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019 /*
3020 * Perform recovery around the end of the physical log.
3021 * When the head is not on the same cycle number as the tail,
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003022 * we can't do a sequential recovery.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024 while (blk_no < log->l_logBBsize) {
3025 /*
3026 * Check for header wrapping around physical end-of-log
3027 */
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07003028 offset = hbp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029 split_hblks = 0;
3030 wrapped_hblks = 0;
3031 if (blk_no + hblks <= log->l_logBBsize) {
3032 /* Read header in one read */
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003033 error = xlog_bread(log, blk_no, hblks, hbp,
3034 &offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035 if (error)
3036 goto bread_err2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037 } else {
3038 /* This LR is split across physical log end */
3039 if (blk_no != log->l_logBBsize) {
3040 /* some data before physical log end */
3041 ASSERT(blk_no <= INT_MAX);
3042 split_hblks = log->l_logBBsize - (int)blk_no;
3043 ASSERT(split_hblks > 0);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003044 error = xlog_bread(log, blk_no,
3045 split_hblks, hbp,
3046 &offset);
3047 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048 goto bread_err2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049 }
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003050
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 /*
3052 * Note: this black magic still works with
3053 * large sector sizes (non-512) only because:
3054 * - we increased the buffer size originally
3055 * by 1 sector giving us enough extra space
3056 * for the second read;
3057 * - the log start is guaranteed to be sector
3058 * aligned;
3059 * - we read the log end (LR header start)
3060 * _first_, then the log start (LR header end)
3061 * - order is important.
3062 */
David Chinner234f56a2008-04-10 12:24:24 +10003063 wrapped_hblks = hblks - split_hblks;
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07003064 error = xlog_bread_noalign(log, 0,
3065 wrapped_hblks,
Dave Chinner44396472011-04-21 09:34:27 +00003066 offset + BBTOB(split_hblks));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067 if (error)
3068 goto bread_err2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069 }
3070 rhead = (xlog_rec_header_t *)offset;
3071 error = xlog_valid_rec_header(log, rhead,
Gao Xiangf692d092020-09-22 09:41:06 -07003072 split_hblks ? blk_no : 0, h_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073 if (error)
3074 goto bread_err2;
3075
Christoph Hellwigb53e6752007-10-12 10:59:34 +10003076 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003077 blk_no += hblks;
3078
Brian Foster284f1c22017-08-08 18:21:51 -07003079 /*
3080 * Read the log record data in multiple reads if it
3081 * wraps around the end of the log. Note that if the
3082 * header already wrapped, blk_no could point past the
3083 * end of the log. The record data is contiguous in
3084 * that case.
3085 */
3086 if (blk_no + bblks <= log->l_logBBsize ||
3087 blk_no >= log->l_logBBsize) {
Dave Chinner0703a8e2018-06-08 09:54:22 -07003088 rblk_no = xlog_wrap_logbno(log, blk_no);
Brian Foster284f1c22017-08-08 18:21:51 -07003089 error = xlog_bread(log, rblk_no, bblks, dbp,
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003090 &offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091 if (error)
3092 goto bread_err2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093 } else {
3094 /* This log record is split across the
3095 * physical end of log */
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07003096 offset = dbp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097 split_bblks = 0;
3098 if (blk_no != log->l_logBBsize) {
3099 /* some data is before the physical
3100 * end of log */
3101 ASSERT(!wrapped_hblks);
3102 ASSERT(blk_no <= INT_MAX);
3103 split_bblks =
3104 log->l_logBBsize - (int)blk_no;
3105 ASSERT(split_bblks > 0);
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003106 error = xlog_bread(log, blk_no,
3107 split_bblks, dbp,
3108 &offset);
3109 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110 goto bread_err2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 }
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003112
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113 /*
3114 * Note: this black magic still works with
3115 * large sector sizes (non-512) only because:
3116 * - we increased the buffer size originally
3117 * by 1 sector giving us enough extra space
3118 * for the second read;
3119 * - the log start is guaranteed to be sector
3120 * aligned;
3121 * - we read the log end (LR header start)
3122 * _first_, then the log start (LR header end)
3123 * - order is important.
3124 */
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07003125 error = xlog_bread_noalign(log, 0,
3126 bblks - split_bblks,
Dave Chinner44396472011-04-21 09:34:27 +00003127 offset + BBTOB(split_bblks));
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003128 if (error)
3129 goto bread_err2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130 }
Christoph Hellwig0e446be2012-11-12 22:54:24 +11003131
Brian Foster9d949012016-01-04 15:55:10 +11003132 error = xlog_recover_process(log, rhash, rhead, offset,
Brian Foster12818d22016-09-26 08:22:16 +10003133 pass, &buffer_list);
Christoph Hellwig0e446be2012-11-12 22:54:24 +11003134 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135 goto bread_err2;
Brian Fosterd7f37692016-01-04 15:55:10 +11003136
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 blk_no += bblks;
Brian Fosterd7f37692016-01-04 15:55:10 +11003138 rhead_blk = blk_no;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139 }
3140
3141 ASSERT(blk_no >= log->l_logBBsize);
3142 blk_no -= log->l_logBBsize;
Brian Fosterd7f37692016-01-04 15:55:10 +11003143 rhead_blk = blk_no;
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003146 /* read first part of physical log */
3147 while (blk_no < head_blk) {
3148 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3149 if (error)
3150 goto bread_err2;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003151
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003152 rhead = (xlog_rec_header_t *)offset;
Gao Xiangf692d092020-09-22 09:41:06 -07003153 error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003154 if (error)
3155 goto bread_err2;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003156
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003157 /* blocks in data section */
3158 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3159 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3160 &offset);
3161 if (error)
3162 goto bread_err2;
Christoph Hellwig076e6ac2009-03-16 08:24:13 +01003163
Brian Foster12818d22016-09-26 08:22:16 +10003164 error = xlog_recover_process(log, rhash, rhead, offset, pass,
3165 &buffer_list);
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003166 if (error)
3167 goto bread_err2;
Brian Fosterd7f37692016-01-04 15:55:10 +11003168
Eric Sandeen970fd3f2014-09-09 11:57:29 +10003169 blk_no += bblks + hblks;
Brian Fosterd7f37692016-01-04 15:55:10 +11003170 rhead_blk = blk_no;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171 }
3172
3173 bread_err2:
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07003174 kmem_free(dbp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175 bread_err1:
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07003176 kmem_free(hbp);
Brian Fosterd7f37692016-01-04 15:55:10 +11003177
Brian Foster12818d22016-09-26 08:22:16 +10003178 /*
3179 * Submit buffers that have been added from the last record processed,
3180 * regardless of error status.
3181 */
3182 if (!list_empty(&buffer_list))
3183 error2 = xfs_buf_delwri_submit(&buffer_list);
3184
Brian Fosterd7f37692016-01-04 15:55:10 +11003185 if (error && first_bad)
3186 *first_bad = rhead_blk;
3187
Brian Foster39775432017-06-24 10:11:41 -07003188 /*
3189 * Transactions are freed at commit time but transactions without commit
3190 * records on disk are never committed. Free any that may be left in the
3191 * hash table.
3192 */
3193 for (i = 0; i < XLOG_RHASH_SIZE; i++) {
3194 struct hlist_node *tmp;
3195 struct xlog_recover *trans;
3196
3197 hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
3198 xlog_recover_free_trans(trans);
3199 }
3200
Brian Foster12818d22016-09-26 08:22:16 +10003201 return error ? error : error2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202}
3203
3204/*
3205 * Do the recovery of the log. We actually do this in two phases.
3206 * The two passes are necessary in order to implement the function
3207 * of cancelling a record written into the log. The first pass
3208 * determines those things which have been cancelled, and the
3209 * second pass replays log items normally except for those which
3210 * have been cancelled. The handling of the replay and cancellations
3211 * takes place in the log item type specific routines.
3212 *
3213 * The table of items which have cancel records in the log is allocated
3214 * and freed at this level, since only here do we know when all of
3215 * the log recovery has been completed.
3216 */
3217STATIC int
3218xlog_do_log_recovery(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05003219 struct xlog *log,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220 xfs_daddr_t head_blk,
3221 xfs_daddr_t tail_blk)
3222{
Christoph Hellwigd5689ea2010-12-01 22:06:22 +00003223 int error, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224
3225 ASSERT(head_blk != tail_blk);
3226
3227 /*
3228 * First do a pass to find all of the cancelled buf log items.
3229 * Store them in the buf_cancel_table for use in the second pass.
3230 */
Christoph Hellwigd5689ea2010-12-01 22:06:22 +00003231 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
3232 sizeof(struct list_head),
Tetsuo Handa707e0dd2019-08-26 12:06:22 -07003233 0);
Christoph Hellwigd5689ea2010-12-01 22:06:22 +00003234 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3235 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
3236
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
Brian Fosterd7f37692016-01-04 15:55:10 +11003238 XLOG_RECOVER_PASS1, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 if (error != 0) {
Denys Vlasenkof0e2d932008-05-19 16:31:57 +10003240 kmem_free(log->l_buf_cancel_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241 log->l_buf_cancel_table = NULL;
3242 return error;
3243 }
3244 /*
3245 * Then do a second pass to actually recover the items in the log.
3246 * When it is complete free the table of buf cancel items.
3247 */
3248 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
Brian Fosterd7f37692016-01-04 15:55:10 +11003249 XLOG_RECOVER_PASS2, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250#ifdef DEBUG
Tim Shimmin6d192a92006-06-09 14:55:38 +10003251 if (!error) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003252 int i;
3253
3254 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
Christoph Hellwigd5689ea2010-12-01 22:06:22 +00003255 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256 }
3257#endif /* DEBUG */
3258
Denys Vlasenkof0e2d932008-05-19 16:31:57 +10003259 kmem_free(log->l_buf_cancel_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260 log->l_buf_cancel_table = NULL;
3261
3262 return error;
3263}
3264
3265/*
3266 * Do the actual recovery
3267 */
3268STATIC int
3269xlog_do_recover(
Christoph Hellwigb3f8e082020-09-01 10:55:47 -07003270 struct xlog *log,
3271 xfs_daddr_t head_blk,
3272 xfs_daddr_t tail_blk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273{
Christoph Hellwigb3f8e082020-09-01 10:55:47 -07003274 struct xfs_mount *mp = log->l_mp;
3275 struct xfs_buf *bp = mp->m_sb_bp;
3276 struct xfs_sb *sbp = &mp->m_sb;
3277 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278
Brian Fostere67d3d42017-08-08 18:21:53 -07003279 trace_xfs_log_recover(log, head_blk, tail_blk);
3280
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281 /*
3282 * First replay the images in the log.
3283 */
3284 error = xlog_do_log_recovery(log, head_blk, tail_blk);
Christoph Hellwig43ff2122012-04-23 15:58:39 +10003285 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287
3288 /*
3289 * If IO errors happened during recovery, bail out.
3290 */
Christoph Hellwigb3f8e082020-09-01 10:55:47 -07003291 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +10003292 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293
3294 /*
3295 * We now update the tail_lsn since much of the recovery has completed
3296 * and there may be space available to use. If there were no extent
3297 * or iunlinks, we can free up the entire log and set the tail_lsn to
3298 * be the last_sync_lsn. This was set in xlog_find_tail to be the
3299 * lsn of the last known good LR on disk. If there are extent frees
3300 * or iunlinks they will have some entries in the AIL; so we look at
3301 * the AIL to determine how to set the tail_lsn.
3302 */
Dave Chinnera7980112016-03-07 08:39:36 +11003303 xlog_assign_tail_lsn(mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304
3305 /*
Christoph Hellwigb3f8e082020-09-01 10:55:47 -07003306 * Now that we've finished replaying all buffer and inode updates,
3307 * re-read the superblock and reverify it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308 */
Christoph Hellwigb3f8e082020-09-01 10:55:47 -07003309 xfs_buf_lock(bp);
3310 xfs_buf_hold(bp);
Christoph Hellwig26e328752020-09-01 10:55:47 -07003311 error = _xfs_buf_read(bp, XBF_READ);
David Chinnerd64e31a2008-04-10 12:22:17 +10003312 if (error) {
Dave Chinnera7980112016-03-07 08:39:36 +11003313 if (!XFS_FORCED_SHUTDOWN(mp)) {
Darrick J. Wongcdbcf822020-01-23 17:01:20 -08003314 xfs_buf_ioerror_alert(bp, __this_address);
Dave Chinner595bff72014-10-02 09:05:14 +10003315 ASSERT(0);
3316 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317 xfs_buf_relse(bp);
3318 return error;
3319 }
3320
3321 /* Convert superblock from on-disk format */
Christoph Hellwig3e6e8af2020-03-10 08:57:30 -07003322 xfs_sb_from_disk(sbp, bp->b_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 xfs_buf_relse(bp);
3324
Dave Chinnera7980112016-03-07 08:39:36 +11003325 /* re-initialise in-core superblock and geometry structures */
3326 xfs_reinit_percpu_counters(mp);
3327 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
3328 if (error) {
3329 xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
3330 return error;
3331 }
Darrick J. Wong52548852016-08-03 11:38:24 +10003332 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
Lachlan McIlroy5478eea2007-02-10 18:36:29 +11003333
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 xlog_recover_check_summary(log);
3335
3336 /* Normal transactions can now occur */
3337 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3338 return 0;
3339}
3340
3341/*
3342 * Perform recovery and re-initialize some log variables in xlog_find_tail.
3343 *
3344 * Return error or zero.
3345 */
3346int
3347xlog_recover(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05003348 struct xlog *log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349{
3350 xfs_daddr_t head_blk, tail_blk;
3351 int error;
3352
3353 /* find the tail of the log */
Brian Fostera45086e2015-10-12 15:59:25 +11003354 error = xlog_find_tail(log, &head_blk, &tail_blk);
3355 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356 return error;
3357
Brian Fostera45086e2015-10-12 15:59:25 +11003358 /*
3359 * The superblock was read before the log was available and thus the LSN
3360 * could not be verified. Check the superblock LSN against the current
3361 * LSN now that it's known.
3362 */
3363 if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
3364 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
3365 return -EINVAL;
3366
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367 if (tail_blk != head_blk) {
3368 /* There used to be a comment here:
3369 *
3370 * disallow recovery on read-only mounts. note -- mount
3371 * checks for ENOSPC and turns it into an intelligent
3372 * error message.
3373 * ...but this is no longer true. Now, unless you specify
3374 * NORECOVERY (in which case this function would never be
3375 * called), we just go ahead and recover. We do this all
3376 * under the vfs layer, so we can get away with it unless
3377 * the device itself is read-only, in which case we fail.
3378 */
Utako Kusaka3a02ee12007-05-08 13:50:06 +10003379 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380 return error;
3381 }
3382
Dave Chinnere721f502013-04-03 16:11:32 +11003383 /*
3384 * Version 5 superblock log feature mask validation. We know the
3385 * log is dirty so check if there are any unknown log features
3386 * in what we need to recover. If there are unknown features
3387 * (e.g. unsupported transactions, then simply reject the
3388 * attempt at recovery before touching anything.
3389 */
3390 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
3391 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
3392 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
3393 xfs_warn(log->l_mp,
Joe Perchesf41febd2015-07-29 11:52:04 +10003394"Superblock has unknown incompatible log features (0x%x) enabled.",
Dave Chinnere721f502013-04-03 16:11:32 +11003395 (log->l_mp->m_sb.sb_features_log_incompat &
3396 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
Joe Perchesf41febd2015-07-29 11:52:04 +10003397 xfs_warn(log->l_mp,
3398"The log can not be fully and/or safely recovered by this kernel.");
3399 xfs_warn(log->l_mp,
3400"Please recover the log on a kernel that supports the unknown features.");
Dave Chinner24513372014-06-25 14:58:08 +10003401 return -EINVAL;
Dave Chinnere721f502013-04-03 16:11:32 +11003402 }
3403
Brian Foster2e227172014-09-09 11:56:13 +10003404 /*
3405 * Delay log recovery if the debug hook is set. This is debug
3406 * instrumention to coordinate simulation of I/O failures with
3407 * log recovery.
3408 */
3409 if (xfs_globals.log_recovery_delay) {
3410 xfs_notice(log->l_mp,
3411 "Delaying log recovery for %d seconds.",
3412 xfs_globals.log_recovery_delay);
3413 msleep(xfs_globals.log_recovery_delay * 1000);
3414 }
3415
Dave Chinnera0fa2b62011-03-07 10:01:35 +11003416 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3417 log->l_mp->m_logname ? log->l_mp->m_logname
3418 : "internal");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419
3420 error = xlog_do_recover(log, head_blk, tail_blk);
3421 log->l_flags |= XLOG_RECOVERY_NEEDED;
3422 }
3423 return error;
3424}
3425
3426/*
3427 * In the first part of recovery we replay inodes and buffers and build
3428 * up the list of extent free items which need to be processed. Here
3429 * we process the extent free items and clean up the on disk unlinked
3430 * inode lists. This is separated from the first part of recovery so
3431 * that the root and real-time bitmap inodes can be read in from disk in
3432 * between the two stages. This is necessary so that we can free space
3433 * in the real-time portion of the file system.
3434 */
3435int
3436xlog_recover_finish(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05003437 struct xlog *log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438{
3439 /*
3440 * Now we're ready to do the transactions needed for the
3441 * rest of recovery. Start with completing all the extent
3442 * free intent records and then process the unlinked inode
3443 * lists. At this point, we essentially run in normal mode
3444 * except that we're still performing recovery actions
3445 * rather than accepting new requests.
3446 */
3447 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
David Chinner3c1e2bb2008-04-10 12:21:11 +10003448 int error;
Darrick J. Wongdc423752016-08-03 11:23:49 +10003449 error = xlog_recover_process_intents(log);
David Chinner3c1e2bb2008-04-10 12:21:11 +10003450 if (error) {
Darrick J. Wongdc423752016-08-03 11:23:49 +10003451 xfs_alert(log->l_mp, "Failed to recover intents");
David Chinner3c1e2bb2008-04-10 12:21:11 +10003452 return error;
3453 }
Darrick J. Wong9e88b5d2016-08-03 12:09:48 +10003454
Linus Torvalds1da177e2005-04-16 15:20:36 -07003455 /*
Darrick J. Wongdc423752016-08-03 11:23:49 +10003456 * Sync the log to get all the intents out of the AIL.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457 * This isn't absolutely necessary, but it helps in
3458 * case the unlink transactions would have problems
Darrick J. Wongdc423752016-08-03 11:23:49 +10003459 * pushing the intents out of the way.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460 */
Christoph Hellwiga14a3482010-01-19 09:56:46 +00003461 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462
Christoph Hellwig42490232008-08-13 16:49:32 +10003463 xlog_recover_process_iunlinks(log);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464
3465 xlog_recover_check_summary(log);
3466
Dave Chinnera0fa2b62011-03-07 10:01:35 +11003467 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
3468 log->l_mp->m_logname ? log->l_mp->m_logname
3469 : "internal");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3471 } else {
Dave Chinnera0fa2b62011-03-07 10:01:35 +11003472 xfs_info(log->l_mp, "Ending clean mount");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 }
3474 return 0;
3475}
3476
Hariprasad Kelama7a92502019-07-03 07:34:18 -07003477void
Brian Fosterf0b2efa2015-08-19 09:58:36 +10003478xlog_recover_cancel(
3479 struct xlog *log)
3480{
Brian Fosterf0b2efa2015-08-19 09:58:36 +10003481 if (log->l_flags & XLOG_RECOVERY_NEEDED)
Hariprasad Kelama7a92502019-07-03 07:34:18 -07003482 xlog_recover_cancel_intents(log);
Brian Fosterf0b2efa2015-08-19 09:58:36 +10003483}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484
3485#if defined(DEBUG)
3486/*
3487 * Read all of the agf and agi counters and check that they
3488 * are consistent with the superblock counters.
3489 */
Christoph Hellwige89fbb52017-11-06 11:54:01 -08003490STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491xlog_recover_check_summary(
Mark Tinguely9a8d2fd2012-06-14 09:22:16 -05003492 struct xlog *log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493{
3494 xfs_mount_t *mp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 xfs_buf_t *agfbp;
3496 xfs_buf_t *agibp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497 xfs_agnumber_t agno;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07003498 uint64_t freeblks;
3499 uint64_t itotal;
3500 uint64_t ifree;
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11003501 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502
3503 mp = log->l_mp;
3504
3505 freeblks = 0LL;
3506 itotal = 0LL;
3507 ifree = 0LL;
3508 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
From: Christoph Hellwig48056212008-11-28 14:23:38 +11003509 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
3510 if (error) {
Dave Chinnera0fa2b62011-03-07 10:01:35 +11003511 xfs_alert(mp, "%s agf read failed agno %d error %d",
3512 __func__, agno, error);
From: Christoph Hellwig48056212008-11-28 14:23:38 +11003513 } else {
Christoph Hellwig9798f612020-03-10 08:57:29 -07003514 struct xfs_agf *agfp = agfbp->b_addr;
3515
From: Christoph Hellwig48056212008-11-28 14:23:38 +11003516 freeblks += be32_to_cpu(agfp->agf_freeblks) +
3517 be32_to_cpu(agfp->agf_flcount);
3518 xfs_buf_relse(agfbp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11003521 error = xfs_read_agi(mp, NULL, agno, &agibp);
Dave Chinnera0fa2b62011-03-07 10:01:35 +11003522 if (error) {
3523 xfs_alert(mp, "%s agi read failed agno %d error %d",
3524 __func__, agno, error);
3525 } else {
Christoph Hellwig370c7822020-03-10 08:57:29 -07003526 struct xfs_agi *agi = agibp->b_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11003528 itotal += be32_to_cpu(agi->agi_count);
3529 ifree += be32_to_cpu(agi->agi_freecount);
3530 xfs_buf_relse(agibp);
3531 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533}
3534#endif /* DEBUG */