Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000,2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
| 6 | #ifndef __XFS_LOG_RECOVER_H__ |
| 7 | #define __XFS_LOG_RECOVER_H__ |
| 8 | |
| 9 | /* |
Darrick J. Wong | 86ffa47 | 2020-05-01 16:00:45 -0700 | [diff] [blame] | 10 | * Each log item type (XFS_LI_*) gets its own xlog_recover_item_ops to |
| 11 | * define how recovery should work for that type of log item. |
| 12 | */ |
| 13 | struct xlog_recover_item; |
| 14 | |
| 15 | /* Sorting hat for log items as they're read in. */ |
| 16 | enum xlog_recover_reorder { |
| 17 | XLOG_REORDER_BUFFER_LIST, |
| 18 | XLOG_REORDER_ITEM_LIST, |
| 19 | XLOG_REORDER_INODE_BUFFER_LIST, |
| 20 | XLOG_REORDER_CANCEL_LIST, |
| 21 | }; |
| 22 | |
| 23 | struct xlog_recover_item_ops { |
| 24 | uint16_t item_type; /* XFS_LI_* type code. */ |
| 25 | |
| 26 | /* |
| 27 | * Help sort recovered log items into the order required to replay them |
| 28 | * correctly. Log item types that always use XLOG_REORDER_ITEM_LIST do |
| 29 | * not have to supply a function here. See the comment preceding |
| 30 | * xlog_recover_reorder_trans for more details about what the return |
| 31 | * values mean. |
| 32 | */ |
| 33 | enum xlog_recover_reorder (*reorder)(struct xlog_recover_item *item); |
Darrick J. Wong | 8ea5682 | 2020-05-01 16:00:46 -0700 | [diff] [blame] | 34 | |
| 35 | /* Start readahead for pass2, if provided. */ |
| 36 | void (*ra_pass2)(struct xlog *log, struct xlog_recover_item *item); |
Darrick J. Wong | 3304a4f | 2020-05-01 16:00:46 -0700 | [diff] [blame] | 37 | |
| 38 | /* Do whatever work we need to do for pass1, if provided. */ |
| 39 | int (*commit_pass1)(struct xlog *log, struct xlog_recover_item *item); |
Darrick J. Wong | 1094d3f | 2020-05-01 16:00:47 -0700 | [diff] [blame] | 40 | |
| 41 | /* |
| 42 | * This function should do whatever work is needed for pass2 of log |
| 43 | * recovery, if provided. |
| 44 | * |
| 45 | * If the recovered item is an intent item, this function should parse |
| 46 | * the recovered item to construct an in-core log intent item and |
| 47 | * insert it into the AIL. The in-core log intent item should have 1 |
| 48 | * refcount so that the item is freed either (a) when we commit the |
| 49 | * recovered log item for the intent-done item; (b) replay the work and |
| 50 | * log a new intent-done item; or (c) recovery fails and we have to |
| 51 | * abort. |
| 52 | * |
| 53 | * If the recovered item is an intent-done item, this function should |
| 54 | * parse the recovered item to find the id of the corresponding intent |
| 55 | * log item. Next, it should find the in-core log intent item in the |
| 56 | * AIL and release it. |
| 57 | */ |
| 58 | int (*commit_pass2)(struct xlog *log, struct list_head *buffer_list, |
| 59 | struct xlog_recover_item *item, xfs_lsn_t lsn); |
Darrick J. Wong | 86ffa47 | 2020-05-01 16:00:45 -0700 | [diff] [blame] | 60 | }; |
| 61 | |
| 62 | extern const struct xlog_recover_item_ops xlog_icreate_item_ops; |
| 63 | extern const struct xlog_recover_item_ops xlog_buf_item_ops; |
| 64 | extern const struct xlog_recover_item_ops xlog_inode_item_ops; |
| 65 | extern const struct xlog_recover_item_ops xlog_dquot_item_ops; |
| 66 | extern const struct xlog_recover_item_ops xlog_quotaoff_item_ops; |
| 67 | extern const struct xlog_recover_item_ops xlog_bui_item_ops; |
| 68 | extern const struct xlog_recover_item_ops xlog_bud_item_ops; |
| 69 | extern const struct xlog_recover_item_ops xlog_efi_item_ops; |
| 70 | extern const struct xlog_recover_item_ops xlog_efd_item_ops; |
| 71 | extern const struct xlog_recover_item_ops xlog_rui_item_ops; |
| 72 | extern const struct xlog_recover_item_ops xlog_rud_item_ops; |
| 73 | extern const struct xlog_recover_item_ops xlog_cui_item_ops; |
| 74 | extern const struct xlog_recover_item_ops xlog_cud_item_ops; |
| 75 | |
| 76 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | * Macros, structures, prototypes for internal log manager use. |
| 78 | */ |
| 79 | |
| 80 | #define XLOG_RHASH_BITS 4 |
| 81 | #define XLOG_RHASH_SIZE 16 |
| 82 | #define XLOG_RHASH_SHIFT 2 |
| 83 | #define XLOG_RHASH(tid) \ |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 84 | ((((uint32_t)tid)>>XLOG_RHASH_SHIFT) & (XLOG_RHASH_SIZE-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
Dave Chinner | c115541 | 2010-05-07 11:05:19 +1000 | [diff] [blame] | 86 | #define XLOG_MAX_REGIONS_IN_ITEM (XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK / 2 + 1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | |
| 88 | |
| 89 | /* |
| 90 | * item headers are in ri_buf[0]. Additional buffers follow. |
| 91 | */ |
Darrick J. Wong | 35f4521 | 2020-04-30 10:45:41 -0700 | [diff] [blame] | 92 | struct xlog_recover_item { |
Dave Chinner | f0a7695 | 2010-01-11 11:49:57 +0000 | [diff] [blame] | 93 | struct list_head ri_list; |
Dave Chinner | f0a7695 | 2010-01-11 11:49:57 +0000 | [diff] [blame] | 94 | int ri_cnt; /* count of regions found */ |
| 95 | int ri_total; /* total regions */ |
Darrick J. Wong | 86ffa47 | 2020-05-01 16:00:45 -0700 | [diff] [blame] | 96 | struct xfs_log_iovec *ri_buf; /* ptr to regions buffer */ |
| 97 | const struct xlog_recover_item_ops *ri_ops; |
Darrick J. Wong | 35f4521 | 2020-04-30 10:45:41 -0700 | [diff] [blame] | 98 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | |
Eric Sandeen | 35dab30 | 2019-11-12 17:04:28 -0800 | [diff] [blame] | 100 | struct xlog_recover { |
Dave Chinner | f0a7695 | 2010-01-11 11:49:57 +0000 | [diff] [blame] | 101 | struct hlist_node r_list; |
| 102 | xlog_tid_t r_log_tid; /* log's transaction id */ |
| 103 | xfs_trans_header_t r_theader; /* trans header for partial */ |
| 104 | int r_state; /* not needed */ |
| 105 | xfs_lsn_t r_lsn; /* xact lsn */ |
| 106 | struct list_head r_itemq; /* q for items */ |
Eric Sandeen | 35dab30 | 2019-11-12 17:04:28 -0800 | [diff] [blame] | 107 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | |
Darrick J. Wong | 755c7bf | 2016-11-08 11:55:48 +1100 | [diff] [blame] | 109 | #define ITEM_TYPE(i) (*(unsigned short *)(i)->ri_buf[0].i_addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | |
| 111 | /* |
| 112 | * This is the number of entries in the l_buf_cancel_table used during |
| 113 | * recovery. |
| 114 | */ |
| 115 | #define XLOG_BC_TABLE_SIZE 64 |
| 116 | |
Brian Foster | 6528250 | 2016-01-04 15:55:10 +1100 | [diff] [blame] | 117 | #define XLOG_RECOVER_CRCPASS 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | #define XLOG_RECOVER_PASS1 1 |
| 119 | #define XLOG_RECOVER_PASS2 2 |
| 120 | |
Darrick J. Wong | 8ea5682 | 2020-05-01 16:00:46 -0700 | [diff] [blame] | 121 | void xlog_buf_readahead(struct xlog *log, xfs_daddr_t blkno, uint len, |
| 122 | const struct xfs_buf_ops *ops); |
Darrick J. Wong | 1094d3f | 2020-05-01 16:00:47 -0700 | [diff] [blame] | 123 | bool xlog_is_buffer_cancelled(struct xlog *log, xfs_daddr_t blkno, uint len); |
Darrick J. Wong | 1094d3f | 2020-05-01 16:00:47 -0700 | [diff] [blame] | 124 | void xlog_recover_iodone(struct xfs_buf *bp); |
Darrick J. Wong | 8ea5682 | 2020-05-01 16:00:46 -0700 | [diff] [blame] | 125 | |
Darrick J. Wong | 154c733 | 2020-05-01 16:00:54 -0700 | [diff] [blame] | 126 | void xlog_recover_release_intent(struct xlog *log, unsigned short intent_type, |
| 127 | uint64_t intent_id); |
| 128 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | #endif /* __XFS_LOG_RECOVER_H__ */ |