Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * |
| 4 | * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. |
| 5 | * |
| 6 | */ |
| 7 | |
| 8 | #include <linux/blkdev.h> |
| 9 | #include <linux/buffer_head.h> |
| 10 | #include <linux/fs.h> |
| 11 | #include <linux/hash.h> |
| 12 | #include <linux/nls.h> |
| 13 | #include <linux/random.h> |
| 14 | #include <linux/ratelimit.h> |
| 15 | #include <linux/slab.h> |
| 16 | |
| 17 | #include "debug.h" |
| 18 | #include "ntfs.h" |
| 19 | #include "ntfs_fs.h" |
| 20 | |
| 21 | /* |
| 22 | * LOG FILE structs |
| 23 | */ |
| 24 | |
| 25 | // clang-format off |
| 26 | |
| 27 | #define MaxLogFileSize 0x100000000ull |
| 28 | #define DefaultLogPageSize 4096 |
| 29 | #define MinLogRecordPages 0x30 |
| 30 | |
| 31 | struct RESTART_HDR { |
| 32 | struct NTFS_RECORD_HEADER rhdr; // 'RSTR' |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 33 | __le32 sys_page_size; // 0x10: Page size of the system which initialized the log. |
| 34 | __le32 page_size; // 0x14: Log page size used for this log file. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 35 | __le16 ra_off; // 0x18: |
| 36 | __le16 minor_ver; // 0x1A: |
| 37 | __le16 major_ver; // 0x1C: |
| 38 | __le16 fixups[]; |
| 39 | }; |
| 40 | |
| 41 | #define LFS_NO_CLIENT 0xffff |
| 42 | #define LFS_NO_CLIENT_LE cpu_to_le16(0xffff) |
| 43 | |
| 44 | struct CLIENT_REC { |
| 45 | __le64 oldest_lsn; |
| 46 | __le64 restart_lsn; // 0x08: |
| 47 | __le16 prev_client; // 0x10: |
| 48 | __le16 next_client; // 0x12: |
| 49 | __le16 seq_num; // 0x14: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 50 | u8 align[6]; // 0x16: |
| 51 | __le32 name_bytes; // 0x1C: In bytes. |
| 52 | __le16 name[32]; // 0x20: Name of client. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 53 | }; |
| 54 | |
| 55 | static_assert(sizeof(struct CLIENT_REC) == 0x60); |
| 56 | |
| 57 | /* Two copies of these will exist at the beginning of the log file */ |
| 58 | struct RESTART_AREA { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 59 | __le64 current_lsn; // 0x00: Current logical end of log file. |
| 60 | __le16 log_clients; // 0x08: Maximum number of clients. |
| 61 | __le16 client_idx[2]; // 0x0A: Free/use index into the client record arrays. |
| 62 | __le16 flags; // 0x0E: See RESTART_SINGLE_PAGE_IO. |
| 63 | __le32 seq_num_bits; // 0x10: The number of bits in sequence number. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 64 | __le16 ra_len; // 0x14: |
| 65 | __le16 client_off; // 0x16: |
| 66 | __le64 l_size; // 0x18: Usable log file size. |
| 67 | __le32 last_lsn_data_len; // 0x20: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 68 | __le16 rec_hdr_len; // 0x24: Log page data offset. |
| 69 | __le16 data_off; // 0x26: Log page data length. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 70 | __le32 open_log_count; // 0x28: |
| 71 | __le32 align[5]; // 0x2C: |
| 72 | struct CLIENT_REC clients[]; // 0x40: |
| 73 | }; |
| 74 | |
| 75 | struct LOG_REC_HDR { |
| 76 | __le16 redo_op; // 0x00: NTFS_LOG_OPERATION |
| 77 | __le16 undo_op; // 0x02: NTFS_LOG_OPERATION |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 78 | __le16 redo_off; // 0x04: Offset to Redo record. |
| 79 | __le16 redo_len; // 0x06: Redo length. |
| 80 | __le16 undo_off; // 0x08: Offset to Undo record. |
| 81 | __le16 undo_len; // 0x0A: Undo length. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 82 | __le16 target_attr; // 0x0C: |
| 83 | __le16 lcns_follow; // 0x0E: |
| 84 | __le16 record_off; // 0x10: |
| 85 | __le16 attr_off; // 0x12: |
| 86 | __le16 cluster_off; // 0x14: |
| 87 | __le16 reserved; // 0x16: |
| 88 | __le64 target_vcn; // 0x18: |
| 89 | __le64 page_lcns[]; // 0x20: |
| 90 | }; |
| 91 | |
| 92 | static_assert(sizeof(struct LOG_REC_HDR) == 0x20); |
| 93 | |
| 94 | #define RESTART_ENTRY_ALLOCATED 0xFFFFFFFF |
| 95 | #define RESTART_ENTRY_ALLOCATED_LE cpu_to_le32(0xFFFFFFFF) |
| 96 | |
| 97 | struct RESTART_TABLE { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 98 | __le16 size; // 0x00: In bytes |
| 99 | __le16 used; // 0x02: Entries |
| 100 | __le16 total; // 0x04: Entries |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 101 | __le16 res[3]; // 0x06: |
| 102 | __le32 free_goal; // 0x0C: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 103 | __le32 first_free; // 0x10: |
| 104 | __le32 last_free; // 0x14: |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 105 | |
| 106 | }; |
| 107 | |
| 108 | static_assert(sizeof(struct RESTART_TABLE) == 0x18); |
| 109 | |
| 110 | struct ATTR_NAME_ENTRY { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 111 | __le16 off; // Offset in the Open attribute Table. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 112 | __le16 name_bytes; |
| 113 | __le16 name[]; |
| 114 | }; |
| 115 | |
| 116 | struct OPEN_ATTR_ENRTY { |
| 117 | __le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated |
| 118 | __le32 bytes_per_index; // 0x04: |
| 119 | enum ATTR_TYPE type; // 0x08: |
| 120 | u8 is_dirty_pages; // 0x0C: |
| 121 | u8 is_attr_name; // 0x0B: Faked field to manage 'ptr' |
| 122 | u8 name_len; // 0x0C: Faked field to manage 'ptr' |
| 123 | u8 res; |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 124 | struct MFT_REF ref; // 0x10: File Reference of file containing attribute |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 125 | __le64 open_record_lsn; // 0x18: |
| 126 | void *ptr; // 0x20: |
| 127 | }; |
| 128 | |
| 129 | /* 32 bit version of 'struct OPEN_ATTR_ENRTY' */ |
| 130 | struct OPEN_ATTR_ENRTY_32 { |
| 131 | __le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated |
| 132 | __le32 ptr; // 0x04: |
| 133 | struct MFT_REF ref; // 0x08: |
| 134 | __le64 open_record_lsn; // 0x10: |
| 135 | u8 is_dirty_pages; // 0x18: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 136 | u8 is_attr_name; // 0x19: |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 137 | u8 res1[2]; |
| 138 | enum ATTR_TYPE type; // 0x1C: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 139 | u8 name_len; // 0x20: In wchar |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 140 | u8 res2[3]; |
| 141 | __le32 AttributeName; // 0x24: |
| 142 | __le32 bytes_per_index; // 0x28: |
| 143 | }; |
| 144 | |
| 145 | #define SIZEOF_OPENATTRIBUTEENTRY0 0x2c |
| 146 | // static_assert( 0x2C == sizeof(struct OPEN_ATTR_ENRTY_32) ); |
| 147 | static_assert(sizeof(struct OPEN_ATTR_ENRTY) < SIZEOF_OPENATTRIBUTEENTRY0); |
| 148 | |
| 149 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 150 | * One entry exists in the Dirty Pages Table for each page which is dirty at |
| 151 | * the time the Restart Area is written. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 152 | */ |
| 153 | struct DIR_PAGE_ENTRY { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 154 | __le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated |
| 155 | __le32 target_attr; // 0x04: Index into the Open attribute Table |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 156 | __le32 transfer_len; // 0x08: |
| 157 | __le32 lcns_follow; // 0x0C: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 158 | __le64 vcn; // 0x10: Vcn of dirty page |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 159 | __le64 oldest_lsn; // 0x18: |
| 160 | __le64 page_lcns[]; // 0x20: |
| 161 | }; |
| 162 | |
| 163 | static_assert(sizeof(struct DIR_PAGE_ENTRY) == 0x20); |
| 164 | |
| 165 | /* 32 bit version of 'struct DIR_PAGE_ENTRY' */ |
| 166 | struct DIR_PAGE_ENTRY_32 { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 167 | __le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated |
| 168 | __le32 target_attr; // 0x04: Index into the Open attribute Table |
| 169 | __le32 transfer_len; // 0x08: |
| 170 | __le32 lcns_follow; // 0x0C: |
| 171 | __le32 reserved; // 0x10: |
| 172 | __le32 vcn_low; // 0x14: Vcn of dirty page |
| 173 | __le32 vcn_hi; // 0x18: Vcn of dirty page |
| 174 | __le32 oldest_lsn_low; // 0x1C: |
| 175 | __le32 oldest_lsn_hi; // 0x1C: |
| 176 | __le32 page_lcns_low; // 0x24: |
| 177 | __le32 page_lcns_hi; // 0x24: |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 178 | }; |
| 179 | |
| 180 | static_assert(offsetof(struct DIR_PAGE_ENTRY_32, vcn_low) == 0x14); |
| 181 | static_assert(sizeof(struct DIR_PAGE_ENTRY_32) == 0x2c); |
| 182 | |
| 183 | enum transact_state { |
| 184 | TransactionUninitialized = 0, |
| 185 | TransactionActive, |
| 186 | TransactionPrepared, |
| 187 | TransactionCommitted |
| 188 | }; |
| 189 | |
| 190 | struct TRANSACTION_ENTRY { |
| 191 | __le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated |
| 192 | u8 transact_state; // 0x04: |
| 193 | u8 reserved[3]; // 0x05: |
| 194 | __le64 first_lsn; // 0x08: |
| 195 | __le64 prev_lsn; // 0x10: |
| 196 | __le64 undo_next_lsn; // 0x18: |
| 197 | __le32 undo_records; // 0x20: Number of undo log records pending abort |
| 198 | __le32 undo_len; // 0x24: Total undo size |
| 199 | }; |
| 200 | |
| 201 | static_assert(sizeof(struct TRANSACTION_ENTRY) == 0x28); |
| 202 | |
| 203 | struct NTFS_RESTART { |
| 204 | __le32 major_ver; // 0x00: |
| 205 | __le32 minor_ver; // 0x04: |
| 206 | __le64 check_point_start; // 0x08: |
| 207 | __le64 open_attr_table_lsn; // 0x10: |
| 208 | __le64 attr_names_lsn; // 0x18: |
| 209 | __le64 dirty_pages_table_lsn; // 0x20: |
| 210 | __le64 transact_table_lsn; // 0x28: |
| 211 | __le32 open_attr_len; // 0x30: In bytes |
| 212 | __le32 attr_names_len; // 0x34: In bytes |
| 213 | __le32 dirty_pages_len; // 0x38: In bytes |
| 214 | __le32 transact_table_len; // 0x3C: In bytes |
| 215 | }; |
| 216 | |
| 217 | static_assert(sizeof(struct NTFS_RESTART) == 0x40); |
| 218 | |
| 219 | struct NEW_ATTRIBUTE_SIZES { |
| 220 | __le64 alloc_size; |
| 221 | __le64 valid_size; |
| 222 | __le64 data_size; |
| 223 | __le64 total_size; |
| 224 | }; |
| 225 | |
| 226 | struct BITMAP_RANGE { |
| 227 | __le32 bitmap_off; |
| 228 | __le32 bits; |
| 229 | }; |
| 230 | |
| 231 | struct LCN_RANGE { |
| 232 | __le64 lcn; |
| 233 | __le64 len; |
| 234 | }; |
| 235 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 236 | /* The following type defines the different log record types. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 237 | #define LfsClientRecord cpu_to_le32(1) |
| 238 | #define LfsClientRestart cpu_to_le32(2) |
| 239 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 240 | /* This is used to uniquely identify a client for a particular log file. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 241 | struct CLIENT_ID { |
| 242 | __le16 seq_num; |
| 243 | __le16 client_idx; |
| 244 | }; |
| 245 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 246 | /* This is the header that begins every Log Record in the log file. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 247 | struct LFS_RECORD_HDR { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 248 | __le64 this_lsn; // 0x00: |
| 249 | __le64 client_prev_lsn; // 0x08: |
| 250 | __le64 client_undo_next_lsn; // 0x10: |
| 251 | __le32 client_data_len; // 0x18: |
| 252 | struct CLIENT_ID client; // 0x1C: Owner of this log record. |
| 253 | __le32 record_type; // 0x20: LfsClientRecord or LfsClientRestart. |
| 254 | __le32 transact_id; // 0x24: |
| 255 | __le16 flags; // 0x28: LOG_RECORD_MULTI_PAGE |
| 256 | u8 align[6]; // 0x2A: |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 257 | }; |
| 258 | |
| 259 | #define LOG_RECORD_MULTI_PAGE cpu_to_le16(1) |
| 260 | |
| 261 | static_assert(sizeof(struct LFS_RECORD_HDR) == 0x30); |
| 262 | |
| 263 | struct LFS_RECORD { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 264 | __le16 next_record_off; // 0x00: Offset of the free space in the page, |
| 265 | u8 align[6]; // 0x02: |
| 266 | __le64 last_end_lsn; // 0x08: lsn for the last log record which ends on the page, |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 267 | }; |
| 268 | |
| 269 | static_assert(sizeof(struct LFS_RECORD) == 0x10); |
| 270 | |
| 271 | struct RECORD_PAGE_HDR { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 272 | struct NTFS_RECORD_HEADER rhdr; // 'RCRD' |
| 273 | __le32 rflags; // 0x10: See LOG_PAGE_LOG_RECORD_END |
| 274 | __le16 page_count; // 0x14: |
| 275 | __le16 page_pos; // 0x16: |
| 276 | struct LFS_RECORD record_hdr; // 0x18: |
| 277 | __le16 fixups[10]; // 0x28: |
| 278 | __le32 file_off; // 0x3c: Used when major version >= 2 |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 279 | }; |
| 280 | |
| 281 | // clang-format on |
| 282 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 283 | // Page contains the end of a log record. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 284 | #define LOG_PAGE_LOG_RECORD_END cpu_to_le32(0x00000001) |
| 285 | |
| 286 | static inline bool is_log_record_end(const struct RECORD_PAGE_HDR *hdr) |
| 287 | { |
| 288 | return hdr->rflags & LOG_PAGE_LOG_RECORD_END; |
| 289 | } |
| 290 | |
| 291 | static_assert(offsetof(struct RECORD_PAGE_HDR, file_off) == 0x3c); |
| 292 | |
| 293 | /* |
| 294 | * END of NTFS LOG structures |
| 295 | */ |
| 296 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 297 | /* Define some tuning parameters to keep the restart tables a reasonable size. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 298 | #define INITIAL_NUMBER_TRANSACTIONS 5 |
| 299 | |
| 300 | enum NTFS_LOG_OPERATION { |
| 301 | |
| 302 | Noop = 0x00, |
| 303 | CompensationLogRecord = 0x01, |
| 304 | InitializeFileRecordSegment = 0x02, |
| 305 | DeallocateFileRecordSegment = 0x03, |
| 306 | WriteEndOfFileRecordSegment = 0x04, |
| 307 | CreateAttribute = 0x05, |
| 308 | DeleteAttribute = 0x06, |
| 309 | UpdateResidentValue = 0x07, |
| 310 | UpdateNonresidentValue = 0x08, |
| 311 | UpdateMappingPairs = 0x09, |
| 312 | DeleteDirtyClusters = 0x0A, |
| 313 | SetNewAttributeSizes = 0x0B, |
| 314 | AddIndexEntryRoot = 0x0C, |
| 315 | DeleteIndexEntryRoot = 0x0D, |
| 316 | AddIndexEntryAllocation = 0x0E, |
| 317 | DeleteIndexEntryAllocation = 0x0F, |
| 318 | WriteEndOfIndexBuffer = 0x10, |
| 319 | SetIndexEntryVcnRoot = 0x11, |
| 320 | SetIndexEntryVcnAllocation = 0x12, |
| 321 | UpdateFileNameRoot = 0x13, |
| 322 | UpdateFileNameAllocation = 0x14, |
| 323 | SetBitsInNonresidentBitMap = 0x15, |
| 324 | ClearBitsInNonresidentBitMap = 0x16, |
| 325 | HotFix = 0x17, |
| 326 | EndTopLevelAction = 0x18, |
| 327 | PrepareTransaction = 0x19, |
| 328 | CommitTransaction = 0x1A, |
| 329 | ForgetTransaction = 0x1B, |
| 330 | OpenNonresidentAttribute = 0x1C, |
| 331 | OpenAttributeTableDump = 0x1D, |
| 332 | AttributeNamesDump = 0x1E, |
| 333 | DirtyPageTableDump = 0x1F, |
| 334 | TransactionTableDump = 0x20, |
| 335 | UpdateRecordDataRoot = 0x21, |
| 336 | UpdateRecordDataAllocation = 0x22, |
| 337 | |
| 338 | UpdateRelativeDataInIndex = |
| 339 | 0x23, // NtOfsRestartUpdateRelativeDataInIndex |
| 340 | UpdateRelativeDataInIndex2 = 0x24, |
| 341 | ZeroEndOfFileRecord = 0x25, |
| 342 | }; |
| 343 | |
| 344 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 345 | * Array for log records which require a target attribute. |
| 346 | * A true indicates that the corresponding restart operation |
| 347 | * requires a target attribute. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 348 | */ |
| 349 | static const u8 AttributeRequired[] = { |
| 350 | 0xFC, 0xFB, 0xFF, 0x10, 0x06, |
| 351 | }; |
| 352 | |
| 353 | static inline bool is_target_required(u16 op) |
| 354 | { |
| 355 | bool ret = op <= UpdateRecordDataAllocation && |
| 356 | (AttributeRequired[op >> 3] >> (op & 7) & 1); |
| 357 | return ret; |
| 358 | } |
| 359 | |
| 360 | static inline bool can_skip_action(enum NTFS_LOG_OPERATION op) |
| 361 | { |
| 362 | switch (op) { |
| 363 | case Noop: |
| 364 | case DeleteDirtyClusters: |
| 365 | case HotFix: |
| 366 | case EndTopLevelAction: |
| 367 | case PrepareTransaction: |
| 368 | case CommitTransaction: |
| 369 | case ForgetTransaction: |
| 370 | case CompensationLogRecord: |
| 371 | case OpenNonresidentAttribute: |
| 372 | case OpenAttributeTableDump: |
| 373 | case AttributeNamesDump: |
| 374 | case DirtyPageTableDump: |
| 375 | case TransactionTableDump: |
| 376 | return true; |
| 377 | default: |
| 378 | return false; |
| 379 | } |
| 380 | } |
| 381 | |
| 382 | enum { lcb_ctx_undo_next, lcb_ctx_prev, lcb_ctx_next }; |
| 383 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 384 | /* Bytes per restart table. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 385 | static inline u32 bytes_per_rt(const struct RESTART_TABLE *rt) |
| 386 | { |
| 387 | return le16_to_cpu(rt->used) * le16_to_cpu(rt->size) + |
| 388 | sizeof(struct RESTART_TABLE); |
| 389 | } |
| 390 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 391 | /* Log record length. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 392 | static inline u32 lrh_length(const struct LOG_REC_HDR *lr) |
| 393 | { |
| 394 | u16 t16 = le16_to_cpu(lr->lcns_follow); |
| 395 | |
| 396 | return struct_size(lr, page_lcns, max_t(u16, 1, t16)); |
| 397 | } |
| 398 | |
| 399 | struct lcb { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 400 | struct LFS_RECORD_HDR *lrh; // Log record header of the current lsn. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 401 | struct LOG_REC_HDR *log_rec; |
| 402 | u32 ctx_mode; // lcb_ctx_undo_next/lcb_ctx_prev/lcb_ctx_next |
| 403 | struct CLIENT_ID client; |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 404 | bool alloc; // If true the we should deallocate 'log_rec'. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 405 | }; |
| 406 | |
| 407 | static void lcb_put(struct lcb *lcb) |
| 408 | { |
| 409 | if (lcb->alloc) |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 410 | kfree(lcb->log_rec); |
| 411 | kfree(lcb->lrh); |
| 412 | kfree(lcb); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 413 | } |
| 414 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 415 | /* Find the oldest lsn from active clients. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 416 | static inline void oldest_client_lsn(const struct CLIENT_REC *ca, |
| 417 | __le16 next_client, u64 *oldest_lsn) |
| 418 | { |
| 419 | while (next_client != LFS_NO_CLIENT_LE) { |
| 420 | const struct CLIENT_REC *cr = ca + le16_to_cpu(next_client); |
| 421 | u64 lsn = le64_to_cpu(cr->oldest_lsn); |
| 422 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 423 | /* Ignore this block if it's oldest lsn is 0. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 424 | if (lsn && lsn < *oldest_lsn) |
| 425 | *oldest_lsn = lsn; |
| 426 | |
| 427 | next_client = cr->next_client; |
| 428 | } |
| 429 | } |
| 430 | |
| 431 | static inline bool is_rst_page_hdr_valid(u32 file_off, |
| 432 | const struct RESTART_HDR *rhdr) |
| 433 | { |
| 434 | u32 sys_page = le32_to_cpu(rhdr->sys_page_size); |
| 435 | u32 page_size = le32_to_cpu(rhdr->page_size); |
| 436 | u32 end_usa; |
| 437 | u16 ro; |
| 438 | |
| 439 | if (sys_page < SECTOR_SIZE || page_size < SECTOR_SIZE || |
| 440 | sys_page & (sys_page - 1) || page_size & (page_size - 1)) { |
| 441 | return false; |
| 442 | } |
| 443 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 444 | /* Check that if the file offset isn't 0, it is the system page size. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 445 | if (file_off && file_off != sys_page) |
| 446 | return false; |
| 447 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 448 | /* Check support version 1.1+. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 449 | if (le16_to_cpu(rhdr->major_ver) <= 1 && !rhdr->minor_ver) |
| 450 | return false; |
| 451 | |
| 452 | if (le16_to_cpu(rhdr->major_ver) > 2) |
| 453 | return false; |
| 454 | |
| 455 | ro = le16_to_cpu(rhdr->ra_off); |
Kari Argillander | fa3cacf | 2021-08-26 11:56:29 +0300 | [diff] [blame] | 456 | if (!IS_ALIGNED(ro, 8) || ro > sys_page) |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 457 | return false; |
| 458 | |
| 459 | end_usa = ((sys_page >> SECTOR_SHIFT) + 1) * sizeof(short); |
| 460 | end_usa += le16_to_cpu(rhdr->rhdr.fix_off); |
| 461 | |
| 462 | if (ro < end_usa) |
| 463 | return false; |
| 464 | |
| 465 | return true; |
| 466 | } |
| 467 | |
| 468 | static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr) |
| 469 | { |
| 470 | const struct RESTART_AREA *ra; |
| 471 | u16 cl, fl, ul; |
| 472 | u32 off, l_size, file_dat_bits, file_size_round; |
| 473 | u16 ro = le16_to_cpu(rhdr->ra_off); |
| 474 | u32 sys_page = le32_to_cpu(rhdr->sys_page_size); |
| 475 | |
| 476 | if (ro + offsetof(struct RESTART_AREA, l_size) > |
| 477 | SECTOR_SIZE - sizeof(short)) |
| 478 | return false; |
| 479 | |
| 480 | ra = Add2Ptr(rhdr, ro); |
| 481 | cl = le16_to_cpu(ra->log_clients); |
| 482 | |
| 483 | if (cl > 1) |
| 484 | return false; |
| 485 | |
| 486 | off = le16_to_cpu(ra->client_off); |
| 487 | |
Kari Argillander | fa3cacf | 2021-08-26 11:56:29 +0300 | [diff] [blame] | 488 | if (!IS_ALIGNED(off, 8) || ro + off > SECTOR_SIZE - sizeof(short)) |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 489 | return false; |
| 490 | |
| 491 | off += cl * sizeof(struct CLIENT_REC); |
| 492 | |
| 493 | if (off > sys_page) |
| 494 | return false; |
| 495 | |
| 496 | /* |
| 497 | * Check the restart length field and whether the entire |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 498 | * restart area is contained that length. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 499 | */ |
| 500 | if (le16_to_cpu(rhdr->ra_off) + le16_to_cpu(ra->ra_len) > sys_page || |
| 501 | off > le16_to_cpu(ra->ra_len)) { |
| 502 | return false; |
| 503 | } |
| 504 | |
| 505 | /* |
| 506 | * As a final check make sure that the use list and the free list |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 507 | * are either empty or point to a valid client. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 508 | */ |
| 509 | fl = le16_to_cpu(ra->client_idx[0]); |
| 510 | ul = le16_to_cpu(ra->client_idx[1]); |
| 511 | if ((fl != LFS_NO_CLIENT && fl >= cl) || |
| 512 | (ul != LFS_NO_CLIENT && ul >= cl)) |
| 513 | return false; |
| 514 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 515 | /* Make sure the sequence number bits match the log file size. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 516 | l_size = le64_to_cpu(ra->l_size); |
| 517 | |
| 518 | file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits); |
| 519 | file_size_round = 1u << (file_dat_bits + 3); |
| 520 | if (file_size_round != l_size && |
| 521 | (file_size_round < l_size || (file_size_round / 2) > l_size)) { |
| 522 | return false; |
| 523 | } |
| 524 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 525 | /* The log page data offset and record header length must be quad-aligned. */ |
Kari Argillander | fa3cacf | 2021-08-26 11:56:29 +0300 | [diff] [blame] | 526 | if (!IS_ALIGNED(le16_to_cpu(ra->data_off), 8) || |
| 527 | !IS_ALIGNED(le16_to_cpu(ra->rec_hdr_len), 8)) |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 528 | return false; |
| 529 | |
| 530 | return true; |
| 531 | } |
| 532 | |
| 533 | static inline bool is_client_area_valid(const struct RESTART_HDR *rhdr, |
| 534 | bool usa_error) |
| 535 | { |
| 536 | u16 ro = le16_to_cpu(rhdr->ra_off); |
| 537 | const struct RESTART_AREA *ra = Add2Ptr(rhdr, ro); |
| 538 | u16 ra_len = le16_to_cpu(ra->ra_len); |
| 539 | const struct CLIENT_REC *ca; |
| 540 | u32 i; |
| 541 | |
| 542 | if (usa_error && ra_len + ro > SECTOR_SIZE - sizeof(short)) |
| 543 | return false; |
| 544 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 545 | /* Find the start of the client array. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 546 | ca = Add2Ptr(ra, le16_to_cpu(ra->client_off)); |
| 547 | |
| 548 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 549 | * Start with the free list. |
| 550 | * Check that all the clients are valid and that there isn't a cycle. |
| 551 | * Do the in-use list on the second pass. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 552 | */ |
| 553 | for (i = 0; i < 2; i++) { |
| 554 | u16 client_idx = le16_to_cpu(ra->client_idx[i]); |
| 555 | bool first_client = true; |
| 556 | u16 clients = le16_to_cpu(ra->log_clients); |
| 557 | |
| 558 | while (client_idx != LFS_NO_CLIENT) { |
| 559 | const struct CLIENT_REC *cr; |
| 560 | |
| 561 | if (!clients || |
| 562 | client_idx >= le16_to_cpu(ra->log_clients)) |
| 563 | return false; |
| 564 | |
| 565 | clients -= 1; |
| 566 | cr = ca + client_idx; |
| 567 | |
| 568 | client_idx = le16_to_cpu(cr->next_client); |
| 569 | |
| 570 | if (first_client) { |
| 571 | first_client = false; |
| 572 | if (cr->prev_client != LFS_NO_CLIENT_LE) |
| 573 | return false; |
| 574 | } |
| 575 | } |
| 576 | } |
| 577 | |
| 578 | return true; |
| 579 | } |
| 580 | |
| 581 | /* |
| 582 | * remove_client |
| 583 | * |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 584 | * Remove a client record from a client record list an restart area. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 585 | */ |
| 586 | static inline void remove_client(struct CLIENT_REC *ca, |
| 587 | const struct CLIENT_REC *cr, __le16 *head) |
| 588 | { |
| 589 | if (cr->prev_client == LFS_NO_CLIENT_LE) |
| 590 | *head = cr->next_client; |
| 591 | else |
| 592 | ca[le16_to_cpu(cr->prev_client)].next_client = cr->next_client; |
| 593 | |
| 594 | if (cr->next_client != LFS_NO_CLIENT_LE) |
| 595 | ca[le16_to_cpu(cr->next_client)].prev_client = cr->prev_client; |
| 596 | } |
| 597 | |
| 598 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 599 | * add_client - Add a client record to the start of a list. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 600 | */ |
| 601 | static inline void add_client(struct CLIENT_REC *ca, u16 index, __le16 *head) |
| 602 | { |
| 603 | struct CLIENT_REC *cr = ca + index; |
| 604 | |
| 605 | cr->prev_client = LFS_NO_CLIENT_LE; |
| 606 | cr->next_client = *head; |
| 607 | |
| 608 | if (*head != LFS_NO_CLIENT_LE) |
| 609 | ca[le16_to_cpu(*head)].prev_client = cpu_to_le16(index); |
| 610 | |
| 611 | *head = cpu_to_le16(index); |
| 612 | } |
| 613 | |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 614 | static inline void *enum_rstbl(struct RESTART_TABLE *t, void *c) |
| 615 | { |
| 616 | __le32 *e; |
| 617 | u32 bprt; |
| 618 | u16 rsize = t ? le16_to_cpu(t->size) : 0; |
| 619 | |
| 620 | if (!c) { |
| 621 | if (!t || !t->total) |
| 622 | return NULL; |
| 623 | e = Add2Ptr(t, sizeof(struct RESTART_TABLE)); |
| 624 | } else { |
| 625 | e = Add2Ptr(c, rsize); |
| 626 | } |
| 627 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 628 | /* Loop until we hit the first one allocated, or the end of the list. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 629 | for (bprt = bytes_per_rt(t); PtrOffset(t, e) < bprt; |
| 630 | e = Add2Ptr(e, rsize)) { |
| 631 | if (*e == RESTART_ENTRY_ALLOCATED_LE) |
| 632 | return e; |
| 633 | } |
| 634 | return NULL; |
| 635 | } |
| 636 | |
| 637 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 638 | * find_dp - Search for a @vcn in Dirty Page Table. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 639 | */ |
| 640 | static inline struct DIR_PAGE_ENTRY *find_dp(struct RESTART_TABLE *dptbl, |
| 641 | u32 target_attr, u64 vcn) |
| 642 | { |
| 643 | __le32 ta = cpu_to_le32(target_attr); |
| 644 | struct DIR_PAGE_ENTRY *dp = NULL; |
| 645 | |
| 646 | while ((dp = enum_rstbl(dptbl, dp))) { |
| 647 | u64 dp_vcn = le64_to_cpu(dp->vcn); |
| 648 | |
| 649 | if (dp->target_attr == ta && vcn >= dp_vcn && |
| 650 | vcn < dp_vcn + le32_to_cpu(dp->lcns_follow)) { |
| 651 | return dp; |
| 652 | } |
| 653 | } |
| 654 | return NULL; |
| 655 | } |
| 656 | |
| 657 | static inline u32 norm_file_page(u32 page_size, u32 *l_size, bool use_default) |
| 658 | { |
| 659 | if (use_default) |
| 660 | page_size = DefaultLogPageSize; |
| 661 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 662 | /* Round the file size down to a system page boundary. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 663 | *l_size &= ~(page_size - 1); |
| 664 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 665 | /* File should contain at least 2 restart pages and MinLogRecordPages pages. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 666 | if (*l_size < (MinLogRecordPages + 2) * page_size) |
| 667 | return 0; |
| 668 | |
| 669 | return page_size; |
| 670 | } |
| 671 | |
| 672 | static bool check_log_rec(const struct LOG_REC_HDR *lr, u32 bytes, u32 tr, |
| 673 | u32 bytes_per_attr_entry) |
| 674 | { |
| 675 | u16 t16; |
| 676 | |
| 677 | if (bytes < sizeof(struct LOG_REC_HDR)) |
| 678 | return false; |
| 679 | if (!tr) |
| 680 | return false; |
| 681 | |
| 682 | if ((tr - sizeof(struct RESTART_TABLE)) % |
| 683 | sizeof(struct TRANSACTION_ENTRY)) |
| 684 | return false; |
| 685 | |
| 686 | if (le16_to_cpu(lr->redo_off) & 7) |
| 687 | return false; |
| 688 | |
| 689 | if (le16_to_cpu(lr->undo_off) & 7) |
| 690 | return false; |
| 691 | |
| 692 | if (lr->target_attr) |
| 693 | goto check_lcns; |
| 694 | |
| 695 | if (is_target_required(le16_to_cpu(lr->redo_op))) |
| 696 | return false; |
| 697 | |
| 698 | if (is_target_required(le16_to_cpu(lr->undo_op))) |
| 699 | return false; |
| 700 | |
| 701 | check_lcns: |
| 702 | if (!lr->lcns_follow) |
| 703 | goto check_length; |
| 704 | |
| 705 | t16 = le16_to_cpu(lr->target_attr); |
| 706 | if ((t16 - sizeof(struct RESTART_TABLE)) % bytes_per_attr_entry) |
| 707 | return false; |
| 708 | |
| 709 | check_length: |
| 710 | if (bytes < lrh_length(lr)) |
| 711 | return false; |
| 712 | |
| 713 | return true; |
| 714 | } |
| 715 | |
| 716 | static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes) |
| 717 | { |
| 718 | u32 ts; |
| 719 | u32 i, off; |
| 720 | u16 rsize = le16_to_cpu(rt->size); |
| 721 | u16 ne = le16_to_cpu(rt->used); |
| 722 | u32 ff = le32_to_cpu(rt->first_free); |
| 723 | u32 lf = le32_to_cpu(rt->last_free); |
| 724 | |
| 725 | ts = rsize * ne + sizeof(struct RESTART_TABLE); |
| 726 | |
| 727 | if (!rsize || rsize > bytes || |
| 728 | rsize + sizeof(struct RESTART_TABLE) > bytes || bytes < ts || |
| 729 | le16_to_cpu(rt->total) > ne || ff > ts || lf > ts || |
| 730 | (ff && ff < sizeof(struct RESTART_TABLE)) || |
| 731 | (lf && lf < sizeof(struct RESTART_TABLE))) { |
| 732 | return false; |
| 733 | } |
| 734 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 735 | /* |
| 736 | * Verify each entry is either allocated or points |
| 737 | * to a valid offset the table. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 738 | */ |
| 739 | for (i = 0; i < ne; i++) { |
| 740 | off = le32_to_cpu(*(__le32 *)Add2Ptr( |
| 741 | rt, i * rsize + sizeof(struct RESTART_TABLE))); |
| 742 | |
| 743 | if (off != RESTART_ENTRY_ALLOCATED && off && |
| 744 | (off < sizeof(struct RESTART_TABLE) || |
| 745 | ((off - sizeof(struct RESTART_TABLE)) % rsize))) { |
| 746 | return false; |
| 747 | } |
| 748 | } |
| 749 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 750 | /* |
| 751 | * Walk through the list headed by the first entry to make |
| 752 | * sure none of the entries are currently being used. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 753 | */ |
| 754 | for (off = ff; off;) { |
| 755 | if (off == RESTART_ENTRY_ALLOCATED) |
| 756 | return false; |
| 757 | |
| 758 | off = le32_to_cpu(*(__le32 *)Add2Ptr(rt, off)); |
| 759 | } |
| 760 | |
| 761 | return true; |
| 762 | } |
| 763 | |
| 764 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 765 | * free_rsttbl_idx - Free a previously allocated index a Restart Table. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 766 | */ |
| 767 | static inline void free_rsttbl_idx(struct RESTART_TABLE *rt, u32 off) |
| 768 | { |
| 769 | __le32 *e; |
| 770 | u32 lf = le32_to_cpu(rt->last_free); |
| 771 | __le32 off_le = cpu_to_le32(off); |
| 772 | |
| 773 | e = Add2Ptr(rt, off); |
| 774 | |
| 775 | if (off < le32_to_cpu(rt->free_goal)) { |
| 776 | *e = rt->first_free; |
| 777 | rt->first_free = off_le; |
| 778 | if (!lf) |
| 779 | rt->last_free = off_le; |
| 780 | } else { |
| 781 | if (lf) |
| 782 | *(__le32 *)Add2Ptr(rt, lf) = off_le; |
| 783 | else |
| 784 | rt->first_free = off_le; |
| 785 | |
| 786 | rt->last_free = off_le; |
| 787 | *e = 0; |
| 788 | } |
| 789 | |
| 790 | le16_sub_cpu(&rt->total, 1); |
| 791 | } |
| 792 | |
| 793 | static inline struct RESTART_TABLE *init_rsttbl(u16 esize, u16 used) |
| 794 | { |
| 795 | __le32 *e, *last_free; |
| 796 | u32 off; |
| 797 | u32 bytes = esize * used + sizeof(struct RESTART_TABLE); |
| 798 | u32 lf = sizeof(struct RESTART_TABLE) + (used - 1) * esize; |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 799 | struct RESTART_TABLE *t = kzalloc(bytes, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 800 | |
Dan Carpenter | a1b04d3 | 2021-08-24 14:52:36 +0300 | [diff] [blame] | 801 | if (!t) |
| 802 | return NULL; |
| 803 | |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 804 | t->size = cpu_to_le16(esize); |
| 805 | t->used = cpu_to_le16(used); |
| 806 | t->free_goal = cpu_to_le32(~0u); |
| 807 | t->first_free = cpu_to_le32(sizeof(struct RESTART_TABLE)); |
| 808 | t->last_free = cpu_to_le32(lf); |
| 809 | |
| 810 | e = (__le32 *)(t + 1); |
| 811 | last_free = Add2Ptr(t, lf); |
| 812 | |
| 813 | for (off = sizeof(struct RESTART_TABLE) + esize; e < last_free; |
| 814 | e = Add2Ptr(e, esize), off += esize) { |
| 815 | *e = cpu_to_le32(off); |
| 816 | } |
| 817 | return t; |
| 818 | } |
| 819 | |
| 820 | static inline struct RESTART_TABLE *extend_rsttbl(struct RESTART_TABLE *tbl, |
| 821 | u32 add, u32 free_goal) |
| 822 | { |
| 823 | u16 esize = le16_to_cpu(tbl->size); |
| 824 | __le32 osize = cpu_to_le32(bytes_per_rt(tbl)); |
| 825 | u32 used = le16_to_cpu(tbl->used); |
Dan Carpenter | a1b04d3 | 2021-08-24 14:52:36 +0300 | [diff] [blame] | 826 | struct RESTART_TABLE *rt; |
| 827 | |
| 828 | rt = init_rsttbl(esize, used + add); |
| 829 | if (!rt) |
| 830 | return NULL; |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 831 | |
| 832 | memcpy(rt + 1, tbl + 1, esize * used); |
| 833 | |
| 834 | rt->free_goal = free_goal == ~0u |
| 835 | ? cpu_to_le32(~0u) |
| 836 | : cpu_to_le32(sizeof(struct RESTART_TABLE) + |
| 837 | free_goal * esize); |
| 838 | |
| 839 | if (tbl->first_free) { |
| 840 | rt->first_free = tbl->first_free; |
| 841 | *(__le32 *)Add2Ptr(rt, le32_to_cpu(tbl->last_free)) = osize; |
| 842 | } else { |
| 843 | rt->first_free = osize; |
| 844 | } |
| 845 | |
| 846 | rt->total = tbl->total; |
| 847 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 848 | kfree(tbl); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 849 | return rt; |
| 850 | } |
| 851 | |
| 852 | /* |
| 853 | * alloc_rsttbl_idx |
| 854 | * |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 855 | * Allocate an index from within a previously initialized Restart Table. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 856 | */ |
| 857 | static inline void *alloc_rsttbl_idx(struct RESTART_TABLE **tbl) |
| 858 | { |
| 859 | u32 off; |
| 860 | __le32 *e; |
| 861 | struct RESTART_TABLE *t = *tbl; |
| 862 | |
Dan Carpenter | a1b04d3 | 2021-08-24 14:52:36 +0300 | [diff] [blame] | 863 | if (!t->first_free) { |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 864 | *tbl = t = extend_rsttbl(t, 16, ~0u); |
Dan Carpenter | a1b04d3 | 2021-08-24 14:52:36 +0300 | [diff] [blame] | 865 | if (!t) |
| 866 | return NULL; |
| 867 | } |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 868 | |
| 869 | off = le32_to_cpu(t->first_free); |
| 870 | |
| 871 | /* Dequeue this entry and zero it. */ |
| 872 | e = Add2Ptr(t, off); |
| 873 | |
| 874 | t->first_free = *e; |
| 875 | |
| 876 | memset(e, 0, le16_to_cpu(t->size)); |
| 877 | |
| 878 | *e = RESTART_ENTRY_ALLOCATED_LE; |
| 879 | |
| 880 | /* If list is going empty, then we fix the last_free as well. */ |
| 881 | if (!t->first_free) |
| 882 | t->last_free = 0; |
| 883 | |
| 884 | le16_add_cpu(&t->total, 1); |
| 885 | |
| 886 | return Add2Ptr(t, off); |
| 887 | } |
| 888 | |
| 889 | /* |
| 890 | * alloc_rsttbl_from_idx |
| 891 | * |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 892 | * Allocate a specific index from within a previously initialized Restart Table. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 893 | */ |
| 894 | static inline void *alloc_rsttbl_from_idx(struct RESTART_TABLE **tbl, u32 vbo) |
| 895 | { |
| 896 | u32 off; |
| 897 | __le32 *e; |
| 898 | struct RESTART_TABLE *rt = *tbl; |
| 899 | u32 bytes = bytes_per_rt(rt); |
| 900 | u16 esize = le16_to_cpu(rt->size); |
| 901 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 902 | /* If the entry is not the table, we will have to extend the table. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 903 | if (vbo >= bytes) { |
| 904 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 905 | * Extend the size by computing the number of entries between |
| 906 | * the existing size and the desired index and adding 1 to that. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 907 | */ |
| 908 | u32 bytes2idx = vbo - bytes; |
| 909 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 910 | /* |
| 911 | * There should always be an integral number of entries |
| 912 | * being added. Now extend the table. |
| 913 | */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 914 | *tbl = rt = extend_rsttbl(rt, bytes2idx / esize + 1, bytes); |
| 915 | if (!rt) |
| 916 | return NULL; |
| 917 | } |
| 918 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 919 | /* See if the entry is already allocated, and just return if it is. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 920 | e = Add2Ptr(rt, vbo); |
| 921 | |
| 922 | if (*e == RESTART_ENTRY_ALLOCATED_LE) |
| 923 | return e; |
| 924 | |
| 925 | /* |
| 926 | * Walk through the table, looking for the entry we're |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 927 | * interested and the previous entry. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 928 | */ |
| 929 | off = le32_to_cpu(rt->first_free); |
| 930 | e = Add2Ptr(rt, off); |
| 931 | |
| 932 | if (off == vbo) { |
| 933 | /* this is a match */ |
| 934 | rt->first_free = *e; |
| 935 | goto skip_looking; |
| 936 | } |
| 937 | |
| 938 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 939 | * Need to walk through the list looking for the predecessor |
| 940 | * of our entry. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 941 | */ |
| 942 | for (;;) { |
| 943 | /* Remember the entry just found */ |
| 944 | u32 last_off = off; |
| 945 | __le32 *last_e = e; |
| 946 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 947 | /* Should never run of entries. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 948 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 949 | /* Lookup up the next entry the list. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 950 | off = le32_to_cpu(*last_e); |
| 951 | e = Add2Ptr(rt, off); |
| 952 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 953 | /* If this is our match we are done. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 954 | if (off == vbo) { |
| 955 | *last_e = *e; |
| 956 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 957 | /* |
| 958 | * If this was the last entry, we update that |
| 959 | * table as well. |
| 960 | */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 961 | if (le32_to_cpu(rt->last_free) == off) |
| 962 | rt->last_free = cpu_to_le32(last_off); |
| 963 | break; |
| 964 | } |
| 965 | } |
| 966 | |
| 967 | skip_looking: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 968 | /* If the list is now empty, we fix the last_free as well. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 969 | if (!rt->first_free) |
| 970 | rt->last_free = 0; |
| 971 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 972 | /* Zero this entry. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 973 | memset(e, 0, esize); |
| 974 | *e = RESTART_ENTRY_ALLOCATED_LE; |
| 975 | |
| 976 | le16_add_cpu(&rt->total, 1); |
| 977 | |
| 978 | return e; |
| 979 | } |
| 980 | |
| 981 | #define RESTART_SINGLE_PAGE_IO cpu_to_le16(0x0001) |
| 982 | |
| 983 | #define NTFSLOG_WRAPPED 0x00000001 |
| 984 | #define NTFSLOG_MULTIPLE_PAGE_IO 0x00000002 |
| 985 | #define NTFSLOG_NO_LAST_LSN 0x00000004 |
| 986 | #define NTFSLOG_REUSE_TAIL 0x00000010 |
| 987 | #define NTFSLOG_NO_OLDEST_LSN 0x00000020 |
| 988 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 989 | /* Helper struct to work with NTFS $LogFile. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 990 | struct ntfs_log { |
| 991 | struct ntfs_inode *ni; |
| 992 | |
| 993 | u32 l_size; |
| 994 | u32 sys_page_size; |
| 995 | u32 sys_page_mask; |
| 996 | u32 page_size; |
| 997 | u32 page_mask; // page_size - 1 |
| 998 | u8 page_bits; |
| 999 | struct RECORD_PAGE_HDR *one_page_buf; |
| 1000 | |
| 1001 | struct RESTART_TABLE *open_attr_tbl; |
| 1002 | u32 transaction_id; |
| 1003 | u32 clst_per_page; |
| 1004 | |
| 1005 | u32 first_page; |
| 1006 | u32 next_page; |
| 1007 | u32 ra_off; |
| 1008 | u32 data_off; |
| 1009 | u32 restart_size; |
| 1010 | u32 data_size; |
| 1011 | u16 record_header_len; |
| 1012 | u64 seq_num; |
| 1013 | u32 seq_num_bits; |
| 1014 | u32 file_data_bits; |
| 1015 | u32 seq_num_mask; /* (1 << file_data_bits) - 1 */ |
| 1016 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1017 | struct RESTART_AREA *ra; /* In-memory image of the next restart area. */ |
| 1018 | u32 ra_size; /* The usable size of the restart area. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1019 | |
| 1020 | /* |
| 1021 | * If true, then the in-memory restart area is to be written |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1022 | * to the first position on the disk. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1023 | */ |
| 1024 | bool init_ra; |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1025 | bool set_dirty; /* True if we need to set dirty flag. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1026 | |
| 1027 | u64 oldest_lsn; |
| 1028 | |
| 1029 | u32 oldest_lsn_off; |
| 1030 | u64 last_lsn; |
| 1031 | |
| 1032 | u32 total_avail; |
| 1033 | u32 total_avail_pages; |
| 1034 | u32 total_undo_commit; |
| 1035 | u32 max_current_avail; |
| 1036 | u32 current_avail; |
| 1037 | u32 reserved; |
| 1038 | |
| 1039 | short major_ver; |
| 1040 | short minor_ver; |
| 1041 | |
| 1042 | u32 l_flags; /* See NTFSLOG_XXX */ |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1043 | u32 current_openlog_count; /* On-disk value for open_log_count. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1044 | |
| 1045 | struct CLIENT_ID client_id; |
| 1046 | u32 client_undo_commit; |
| 1047 | }; |
| 1048 | |
| 1049 | static inline u32 lsn_to_vbo(struct ntfs_log *log, const u64 lsn) |
| 1050 | { |
| 1051 | u32 vbo = (lsn << log->seq_num_bits) >> (log->seq_num_bits - 3); |
| 1052 | |
| 1053 | return vbo; |
| 1054 | } |
| 1055 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1056 | /* Compute the offset in the log file of the next log page. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1057 | static inline u32 next_page_off(struct ntfs_log *log, u32 off) |
| 1058 | { |
| 1059 | off = (off & ~log->sys_page_mask) + log->page_size; |
| 1060 | return off >= log->l_size ? log->first_page : off; |
| 1061 | } |
| 1062 | |
| 1063 | static inline u32 lsn_to_page_off(struct ntfs_log *log, u64 lsn) |
| 1064 | { |
| 1065 | return (((u32)lsn) << 3) & log->page_mask; |
| 1066 | } |
| 1067 | |
| 1068 | static inline u64 vbo_to_lsn(struct ntfs_log *log, u32 off, u64 Seq) |
| 1069 | { |
| 1070 | return (off >> 3) + (Seq << log->file_data_bits); |
| 1071 | } |
| 1072 | |
| 1073 | static inline bool is_lsn_in_file(struct ntfs_log *log, u64 lsn) |
| 1074 | { |
| 1075 | return lsn >= log->oldest_lsn && |
| 1076 | lsn <= le64_to_cpu(log->ra->current_lsn); |
| 1077 | } |
| 1078 | |
| 1079 | static inline u32 hdr_file_off(struct ntfs_log *log, |
| 1080 | struct RECORD_PAGE_HDR *hdr) |
| 1081 | { |
| 1082 | if (log->major_ver < 2) |
| 1083 | return le64_to_cpu(hdr->rhdr.lsn); |
| 1084 | |
| 1085 | return le32_to_cpu(hdr->file_off); |
| 1086 | } |
| 1087 | |
| 1088 | static inline u64 base_lsn(struct ntfs_log *log, |
| 1089 | const struct RECORD_PAGE_HDR *hdr, u64 lsn) |
| 1090 | { |
| 1091 | u64 h_lsn = le64_to_cpu(hdr->rhdr.lsn); |
| 1092 | u64 ret = (((h_lsn >> log->file_data_bits) + |
| 1093 | (lsn < (lsn_to_vbo(log, h_lsn) & ~log->page_mask) ? 1 : 0)) |
| 1094 | << log->file_data_bits) + |
| 1095 | ((((is_log_record_end(hdr) && |
| 1096 | h_lsn <= le64_to_cpu(hdr->record_hdr.last_end_lsn)) |
| 1097 | ? le16_to_cpu(hdr->record_hdr.next_record_off) |
| 1098 | : log->page_size) + |
| 1099 | lsn) >> |
| 1100 | 3); |
| 1101 | |
| 1102 | return ret; |
| 1103 | } |
| 1104 | |
| 1105 | static inline bool verify_client_lsn(struct ntfs_log *log, |
| 1106 | const struct CLIENT_REC *client, u64 lsn) |
| 1107 | { |
| 1108 | return lsn >= le64_to_cpu(client->oldest_lsn) && |
| 1109 | lsn <= le64_to_cpu(log->ra->current_lsn) && lsn; |
| 1110 | } |
| 1111 | |
| 1112 | struct restart_info { |
| 1113 | u64 last_lsn; |
| 1114 | struct RESTART_HDR *r_page; |
| 1115 | u32 vbo; |
| 1116 | bool chkdsk_was_run; |
| 1117 | bool valid_page; |
| 1118 | bool initialized; |
| 1119 | bool restart; |
| 1120 | }; |
| 1121 | |
| 1122 | static int read_log_page(struct ntfs_log *log, u32 vbo, |
| 1123 | struct RECORD_PAGE_HDR **buffer, bool *usa_error) |
| 1124 | { |
| 1125 | int err = 0; |
| 1126 | u32 page_idx = vbo >> log->page_bits; |
| 1127 | u32 page_off = vbo & log->page_mask; |
| 1128 | u32 bytes = log->page_size - page_off; |
| 1129 | void *to_free = NULL; |
| 1130 | u32 page_vbo = page_idx << log->page_bits; |
| 1131 | struct RECORD_PAGE_HDR *page_buf; |
| 1132 | struct ntfs_inode *ni = log->ni; |
| 1133 | bool bBAAD; |
| 1134 | |
| 1135 | if (vbo >= log->l_size) |
| 1136 | return -EINVAL; |
| 1137 | |
| 1138 | if (!*buffer) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 1139 | to_free = kmalloc(bytes, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1140 | if (!to_free) |
| 1141 | return -ENOMEM; |
| 1142 | *buffer = to_free; |
| 1143 | } |
| 1144 | |
| 1145 | page_buf = page_off ? log->one_page_buf : *buffer; |
| 1146 | |
| 1147 | err = ntfs_read_run_nb(ni->mi.sbi, &ni->file.run, page_vbo, page_buf, |
| 1148 | log->page_size, NULL); |
| 1149 | if (err) |
| 1150 | goto out; |
| 1151 | |
| 1152 | if (page_buf->rhdr.sign != NTFS_FFFF_SIGNATURE) |
| 1153 | ntfs_fix_post_read(&page_buf->rhdr, PAGE_SIZE, false); |
| 1154 | |
| 1155 | if (page_buf != *buffer) |
| 1156 | memcpy(*buffer, Add2Ptr(page_buf, page_off), bytes); |
| 1157 | |
| 1158 | bBAAD = page_buf->rhdr.sign == NTFS_BAAD_SIGNATURE; |
| 1159 | |
| 1160 | if (usa_error) |
| 1161 | *usa_error = bBAAD; |
| 1162 | /* Check that the update sequence array for this page is valid */ |
| 1163 | /* If we don't allow errors, raise an error status */ |
| 1164 | else if (bBAAD) |
| 1165 | err = -EINVAL; |
| 1166 | |
| 1167 | out: |
| 1168 | if (err && to_free) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 1169 | kfree(to_free); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1170 | *buffer = NULL; |
| 1171 | } |
| 1172 | |
| 1173 | return err; |
| 1174 | } |
| 1175 | |
| 1176 | /* |
| 1177 | * log_read_rst |
| 1178 | * |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1179 | * It walks through 512 blocks of the file looking for a valid |
| 1180 | * restart page header. It will stop the first time we find a |
| 1181 | * valid page header. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1182 | */ |
| 1183 | static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first, |
| 1184 | struct restart_info *info) |
| 1185 | { |
| 1186 | u32 skip, vbo; |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 1187 | struct RESTART_HDR *r_page = kmalloc(DefaultLogPageSize, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1188 | |
| 1189 | if (!r_page) |
| 1190 | return -ENOMEM; |
| 1191 | |
| 1192 | memset(info, 0, sizeof(struct restart_info)); |
| 1193 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1194 | /* Determine which restart area we are looking for. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1195 | if (first) { |
| 1196 | vbo = 0; |
| 1197 | skip = 512; |
| 1198 | } else { |
| 1199 | vbo = 512; |
| 1200 | skip = 0; |
| 1201 | } |
| 1202 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1203 | /* Loop continuously until we succeed. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1204 | for (; vbo < l_size; vbo = 2 * vbo + skip, skip = 0) { |
| 1205 | bool usa_error; |
| 1206 | u32 sys_page_size; |
| 1207 | bool brst, bchk; |
| 1208 | struct RESTART_AREA *ra; |
| 1209 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1210 | /* Read a page header at the current offset. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1211 | if (read_log_page(log, vbo, (struct RECORD_PAGE_HDR **)&r_page, |
| 1212 | &usa_error)) { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1213 | /* Ignore any errors. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1214 | continue; |
| 1215 | } |
| 1216 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1217 | /* Exit if the signature is a log record page. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1218 | if (r_page->rhdr.sign == NTFS_RCRD_SIGNATURE) { |
| 1219 | info->initialized = true; |
| 1220 | break; |
| 1221 | } |
| 1222 | |
| 1223 | brst = r_page->rhdr.sign == NTFS_RSTR_SIGNATURE; |
| 1224 | bchk = r_page->rhdr.sign == NTFS_CHKD_SIGNATURE; |
| 1225 | |
| 1226 | if (!bchk && !brst) { |
| 1227 | if (r_page->rhdr.sign != NTFS_FFFF_SIGNATURE) { |
| 1228 | /* |
| 1229 | * Remember if the signature does not |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1230 | * indicate uninitialized file. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1231 | */ |
| 1232 | info->initialized = true; |
| 1233 | } |
| 1234 | continue; |
| 1235 | } |
| 1236 | |
| 1237 | ra = NULL; |
| 1238 | info->valid_page = false; |
| 1239 | info->initialized = true; |
| 1240 | info->vbo = vbo; |
| 1241 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1242 | /* Let's check the restart area if this is a valid page. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1243 | if (!is_rst_page_hdr_valid(vbo, r_page)) |
| 1244 | goto check_result; |
| 1245 | ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off)); |
| 1246 | |
| 1247 | if (!is_rst_area_valid(r_page)) |
| 1248 | goto check_result; |
| 1249 | |
| 1250 | /* |
| 1251 | * We have a valid restart page header and restart area. |
| 1252 | * If chkdsk was run or we have no clients then we have |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1253 | * no more checking to do. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1254 | */ |
| 1255 | if (bchk || ra->client_idx[1] == LFS_NO_CLIENT_LE) { |
| 1256 | info->valid_page = true; |
| 1257 | goto check_result; |
| 1258 | } |
| 1259 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1260 | /* Read the entire restart area. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1261 | sys_page_size = le32_to_cpu(r_page->sys_page_size); |
| 1262 | if (DefaultLogPageSize != sys_page_size) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 1263 | kfree(r_page); |
| 1264 | r_page = kzalloc(sys_page_size, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1265 | if (!r_page) |
| 1266 | return -ENOMEM; |
| 1267 | |
| 1268 | if (read_log_page(log, vbo, |
| 1269 | (struct RECORD_PAGE_HDR **)&r_page, |
| 1270 | &usa_error)) { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1271 | /* Ignore any errors. */ |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 1272 | kfree(r_page); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1273 | r_page = NULL; |
| 1274 | continue; |
| 1275 | } |
| 1276 | } |
| 1277 | |
| 1278 | if (is_client_area_valid(r_page, usa_error)) { |
| 1279 | info->valid_page = true; |
| 1280 | ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off)); |
| 1281 | } |
| 1282 | |
| 1283 | check_result: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1284 | /* |
| 1285 | * If chkdsk was run then update the caller's |
| 1286 | * values and return. |
| 1287 | */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1288 | if (r_page->rhdr.sign == NTFS_CHKD_SIGNATURE) { |
| 1289 | info->chkdsk_was_run = true; |
| 1290 | info->last_lsn = le64_to_cpu(r_page->rhdr.lsn); |
| 1291 | info->restart = true; |
| 1292 | info->r_page = r_page; |
| 1293 | return 0; |
| 1294 | } |
| 1295 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1296 | /* |
| 1297 | * If we have a valid page then copy the values |
| 1298 | * we need from it. |
| 1299 | */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1300 | if (info->valid_page) { |
| 1301 | info->last_lsn = le64_to_cpu(ra->current_lsn); |
| 1302 | info->restart = true; |
| 1303 | info->r_page = r_page; |
| 1304 | return 0; |
| 1305 | } |
| 1306 | } |
| 1307 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 1308 | kfree(r_page); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1309 | |
| 1310 | return 0; |
| 1311 | } |
| 1312 | |
| 1313 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1314 | * Ilog_init_pg_hdr - Init @log from restart page header. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1315 | */ |
| 1316 | static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size, |
| 1317 | u32 page_size, u16 major_ver, u16 minor_ver) |
| 1318 | { |
| 1319 | log->sys_page_size = sys_page_size; |
| 1320 | log->sys_page_mask = sys_page_size - 1; |
| 1321 | log->page_size = page_size; |
| 1322 | log->page_mask = page_size - 1; |
| 1323 | log->page_bits = blksize_bits(page_size); |
| 1324 | |
| 1325 | log->clst_per_page = log->page_size >> log->ni->mi.sbi->cluster_bits; |
| 1326 | if (!log->clst_per_page) |
| 1327 | log->clst_per_page = 1; |
| 1328 | |
| 1329 | log->first_page = major_ver >= 2 |
| 1330 | ? 0x22 * page_size |
| 1331 | : ((sys_page_size << 1) + (page_size << 1)); |
| 1332 | log->major_ver = major_ver; |
| 1333 | log->minor_ver = minor_ver; |
| 1334 | } |
| 1335 | |
| 1336 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1337 | * log_create - Init @log in cases when we don't have a restart area to use. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1338 | */ |
| 1339 | static void log_create(struct ntfs_log *log, u32 l_size, const u64 last_lsn, |
| 1340 | u32 open_log_count, bool wrapped, bool use_multi_page) |
| 1341 | { |
| 1342 | log->l_size = l_size; |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1343 | /* All file offsets must be quadword aligned. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1344 | log->file_data_bits = blksize_bits(l_size) - 3; |
| 1345 | log->seq_num_mask = (8 << log->file_data_bits) - 1; |
| 1346 | log->seq_num_bits = sizeof(u64) * 8 - log->file_data_bits; |
| 1347 | log->seq_num = (last_lsn >> log->file_data_bits) + 2; |
| 1348 | log->next_page = log->first_page; |
| 1349 | log->oldest_lsn = log->seq_num << log->file_data_bits; |
| 1350 | log->oldest_lsn_off = 0; |
| 1351 | log->last_lsn = log->oldest_lsn; |
| 1352 | |
| 1353 | log->l_flags |= NTFSLOG_NO_LAST_LSN | NTFSLOG_NO_OLDEST_LSN; |
| 1354 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1355 | /* Set the correct flags for the I/O and indicate if we have wrapped. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1356 | if (wrapped) |
| 1357 | log->l_flags |= NTFSLOG_WRAPPED; |
| 1358 | |
| 1359 | if (use_multi_page) |
| 1360 | log->l_flags |= NTFSLOG_MULTIPLE_PAGE_IO; |
| 1361 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1362 | /* Compute the log page values. */ |
Kari Argillander | fa3cacf | 2021-08-26 11:56:29 +0300 | [diff] [blame] | 1363 | log->data_off = ALIGN( |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1364 | offsetof(struct RECORD_PAGE_HDR, fixups) + |
Konstantin Komarov | d362446 | 2021-08-31 16:57:40 +0300 | [diff] [blame] | 1365 | sizeof(short) * ((log->page_size >> SECTOR_SHIFT) + 1), |
| 1366 | 8); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1367 | log->data_size = log->page_size - log->data_off; |
| 1368 | log->record_header_len = sizeof(struct LFS_RECORD_HDR); |
| 1369 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1370 | /* Remember the different page sizes for reservation. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1371 | log->reserved = log->data_size - log->record_header_len; |
| 1372 | |
| 1373 | /* Compute the restart page values. */ |
Kari Argillander | fa3cacf | 2021-08-26 11:56:29 +0300 | [diff] [blame] | 1374 | log->ra_off = ALIGN( |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1375 | offsetof(struct RESTART_HDR, fixups) + |
Konstantin Komarov | d362446 | 2021-08-31 16:57:40 +0300 | [diff] [blame] | 1376 | sizeof(short) * |
| 1377 | ((log->sys_page_size >> SECTOR_SHIFT) + 1), |
| 1378 | 8); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1379 | log->restart_size = log->sys_page_size - log->ra_off; |
| 1380 | log->ra_size = struct_size(log->ra, clients, 1); |
| 1381 | log->current_openlog_count = open_log_count; |
| 1382 | |
| 1383 | /* |
| 1384 | * The total available log file space is the number of |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1385 | * log file pages times the space available on each page. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1386 | */ |
| 1387 | log->total_avail_pages = log->l_size - log->first_page; |
| 1388 | log->total_avail = log->total_avail_pages >> log->page_bits; |
| 1389 | |
| 1390 | /* |
| 1391 | * We assume that we can't use the end of the page less than |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1392 | * the file record size. |
| 1393 | * Then we won't need to reserve more than the caller asks for. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1394 | */ |
| 1395 | log->max_current_avail = log->total_avail * log->reserved; |
| 1396 | log->total_avail = log->total_avail * log->data_size; |
| 1397 | log->current_avail = log->max_current_avail; |
| 1398 | } |
| 1399 | |
| 1400 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1401 | * log_create_ra - Fill a restart area from the values stored in @log. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1402 | */ |
| 1403 | static struct RESTART_AREA *log_create_ra(struct ntfs_log *log) |
| 1404 | { |
| 1405 | struct CLIENT_REC *cr; |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 1406 | struct RESTART_AREA *ra = kzalloc(log->restart_size, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1407 | |
| 1408 | if (!ra) |
| 1409 | return NULL; |
| 1410 | |
| 1411 | ra->current_lsn = cpu_to_le64(log->last_lsn); |
| 1412 | ra->log_clients = cpu_to_le16(1); |
| 1413 | ra->client_idx[1] = LFS_NO_CLIENT_LE; |
| 1414 | if (log->l_flags & NTFSLOG_MULTIPLE_PAGE_IO) |
| 1415 | ra->flags = RESTART_SINGLE_PAGE_IO; |
| 1416 | ra->seq_num_bits = cpu_to_le32(log->seq_num_bits); |
| 1417 | ra->ra_len = cpu_to_le16(log->ra_size); |
| 1418 | ra->client_off = cpu_to_le16(offsetof(struct RESTART_AREA, clients)); |
| 1419 | ra->l_size = cpu_to_le64(log->l_size); |
| 1420 | ra->rec_hdr_len = cpu_to_le16(log->record_header_len); |
| 1421 | ra->data_off = cpu_to_le16(log->data_off); |
| 1422 | ra->open_log_count = cpu_to_le32(log->current_openlog_count + 1); |
| 1423 | |
| 1424 | cr = ra->clients; |
| 1425 | |
| 1426 | cr->prev_client = LFS_NO_CLIENT_LE; |
| 1427 | cr->next_client = LFS_NO_CLIENT_LE; |
| 1428 | |
| 1429 | return ra; |
| 1430 | } |
| 1431 | |
| 1432 | static u32 final_log_off(struct ntfs_log *log, u64 lsn, u32 data_len) |
| 1433 | { |
| 1434 | u32 base_vbo = lsn << 3; |
| 1435 | u32 final_log_off = (base_vbo & log->seq_num_mask) & ~log->page_mask; |
| 1436 | u32 page_off = base_vbo & log->page_mask; |
| 1437 | u32 tail = log->page_size - page_off; |
| 1438 | |
| 1439 | page_off -= 1; |
| 1440 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1441 | /* Add the length of the header. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1442 | data_len += log->record_header_len; |
| 1443 | |
| 1444 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1445 | * If this lsn is contained this log page we are done. |
| 1446 | * Otherwise we need to walk through several log pages. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1447 | */ |
| 1448 | if (data_len > tail) { |
| 1449 | data_len -= tail; |
| 1450 | tail = log->data_size; |
| 1451 | page_off = log->data_off - 1; |
| 1452 | |
| 1453 | for (;;) { |
| 1454 | final_log_off = next_page_off(log, final_log_off); |
| 1455 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1456 | /* |
| 1457 | * We are done if the remaining bytes |
| 1458 | * fit on this page. |
| 1459 | */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1460 | if (data_len <= tail) |
| 1461 | break; |
| 1462 | data_len -= tail; |
| 1463 | } |
| 1464 | } |
| 1465 | |
| 1466 | /* |
| 1467 | * We add the remaining bytes to our starting position on this page |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1468 | * and then add that value to the file offset of this log page. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1469 | */ |
| 1470 | return final_log_off + data_len + page_off; |
| 1471 | } |
| 1472 | |
| 1473 | static int next_log_lsn(struct ntfs_log *log, const struct LFS_RECORD_HDR *rh, |
| 1474 | u64 *lsn) |
| 1475 | { |
| 1476 | int err; |
| 1477 | u64 this_lsn = le64_to_cpu(rh->this_lsn); |
| 1478 | u32 vbo = lsn_to_vbo(log, this_lsn); |
| 1479 | u32 end = |
| 1480 | final_log_off(log, this_lsn, le32_to_cpu(rh->client_data_len)); |
| 1481 | u32 hdr_off = end & ~log->sys_page_mask; |
| 1482 | u64 seq = this_lsn >> log->file_data_bits; |
| 1483 | struct RECORD_PAGE_HDR *page = NULL; |
| 1484 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1485 | /* Remember if we wrapped. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1486 | if (end <= vbo) |
| 1487 | seq += 1; |
| 1488 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1489 | /* Log page header for this page. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1490 | err = read_log_page(log, hdr_off, &page, NULL); |
| 1491 | if (err) |
| 1492 | return err; |
| 1493 | |
| 1494 | /* |
| 1495 | * If the lsn we were given was not the last lsn on this page, |
| 1496 | * then the starting offset for the next lsn is on a quad word |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1497 | * boundary following the last file offset for the current lsn. |
| 1498 | * Otherwise the file offset is the start of the data on the next page. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1499 | */ |
| 1500 | if (this_lsn == le64_to_cpu(page->rhdr.lsn)) { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1501 | /* If we wrapped, we need to increment the sequence number. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1502 | hdr_off = next_page_off(log, hdr_off); |
| 1503 | if (hdr_off == log->first_page) |
| 1504 | seq += 1; |
| 1505 | |
| 1506 | vbo = hdr_off + log->data_off; |
| 1507 | } else { |
Kari Argillander | fa3cacf | 2021-08-26 11:56:29 +0300 | [diff] [blame] | 1508 | vbo = ALIGN(end, 8); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1509 | } |
| 1510 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1511 | /* Compute the lsn based on the file offset and the sequence count. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1512 | *lsn = vbo_to_lsn(log, vbo, seq); |
| 1513 | |
| 1514 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1515 | * If this lsn is within the legal range for the file, we return true. |
| 1516 | * Otherwise false indicates that there are no more lsn's. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1517 | */ |
| 1518 | if (!is_lsn_in_file(log, *lsn)) |
| 1519 | *lsn = 0; |
| 1520 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 1521 | kfree(page); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1522 | |
| 1523 | return 0; |
| 1524 | } |
| 1525 | |
| 1526 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1527 | * current_log_avail - Calculate the number of bytes available for log records. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1528 | */ |
| 1529 | static u32 current_log_avail(struct ntfs_log *log) |
| 1530 | { |
| 1531 | u32 oldest_off, next_free_off, free_bytes; |
| 1532 | |
| 1533 | if (log->l_flags & NTFSLOG_NO_LAST_LSN) { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1534 | /* The entire file is available. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1535 | return log->max_current_avail; |
| 1536 | } |
| 1537 | |
| 1538 | /* |
| 1539 | * If there is a last lsn the restart area then we know that we will |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1540 | * have to compute the free range. |
| 1541 | * If there is no oldest lsn then start at the first page of the file. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1542 | */ |
| 1543 | oldest_off = (log->l_flags & NTFSLOG_NO_OLDEST_LSN) |
| 1544 | ? log->first_page |
| 1545 | : (log->oldest_lsn_off & ~log->sys_page_mask); |
| 1546 | |
| 1547 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1548 | * We will use the next log page offset to compute the next free page. |
| 1549 | * If we are going to reuse this page go to the next page. |
| 1550 | * If we are at the first page then use the end of the file. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1551 | */ |
| 1552 | next_free_off = (log->l_flags & NTFSLOG_REUSE_TAIL) |
| 1553 | ? log->next_page + log->page_size |
| 1554 | : log->next_page == log->first_page |
| 1555 | ? log->l_size |
| 1556 | : log->next_page; |
| 1557 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1558 | /* If the two offsets are the same then there is no available space. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1559 | if (oldest_off == next_free_off) |
| 1560 | return 0; |
| 1561 | /* |
| 1562 | * If the free offset follows the oldest offset then subtract |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1563 | * this range from the total available pages. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1564 | */ |
| 1565 | free_bytes = |
| 1566 | oldest_off < next_free_off |
| 1567 | ? log->total_avail_pages - (next_free_off - oldest_off) |
| 1568 | : oldest_off - next_free_off; |
| 1569 | |
| 1570 | free_bytes >>= log->page_bits; |
| 1571 | return free_bytes * log->reserved; |
| 1572 | } |
| 1573 | |
| 1574 | static bool check_subseq_log_page(struct ntfs_log *log, |
| 1575 | const struct RECORD_PAGE_HDR *rp, u32 vbo, |
| 1576 | u64 seq) |
| 1577 | { |
| 1578 | u64 lsn_seq; |
| 1579 | const struct NTFS_RECORD_HEADER *rhdr = &rp->rhdr; |
| 1580 | u64 lsn = le64_to_cpu(rhdr->lsn); |
| 1581 | |
| 1582 | if (rhdr->sign == NTFS_FFFF_SIGNATURE || !rhdr->sign) |
| 1583 | return false; |
| 1584 | |
| 1585 | /* |
| 1586 | * If the last lsn on the page occurs was written after the page |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1587 | * that caused the original error then we have a fatal error. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1588 | */ |
| 1589 | lsn_seq = lsn >> log->file_data_bits; |
| 1590 | |
| 1591 | /* |
| 1592 | * If the sequence number for the lsn the page is equal or greater |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1593 | * than lsn we expect, then this is a subsequent write. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1594 | */ |
| 1595 | return lsn_seq >= seq || |
| 1596 | (lsn_seq == seq - 1 && log->first_page == vbo && |
| 1597 | vbo != (lsn_to_vbo(log, lsn) & ~log->page_mask)); |
| 1598 | } |
| 1599 | |
| 1600 | /* |
| 1601 | * last_log_lsn |
| 1602 | * |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1603 | * Walks through the log pages for a file, searching for the |
| 1604 | * last log page written to the file. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1605 | */ |
| 1606 | static int last_log_lsn(struct ntfs_log *log) |
| 1607 | { |
| 1608 | int err; |
| 1609 | bool usa_error = false; |
| 1610 | bool replace_page = false; |
| 1611 | bool reuse_page = log->l_flags & NTFSLOG_REUSE_TAIL; |
| 1612 | bool wrapped_file, wrapped; |
| 1613 | |
| 1614 | u32 page_cnt = 1, page_pos = 1; |
| 1615 | u32 page_off = 0, page_off1 = 0, saved_off = 0; |
| 1616 | u32 final_off, second_off, final_off_prev = 0, second_off_prev = 0; |
| 1617 | u32 first_file_off = 0, second_file_off = 0; |
| 1618 | u32 part_io_count = 0; |
| 1619 | u32 tails = 0; |
| 1620 | u32 this_off, curpage_off, nextpage_off, remain_pages; |
| 1621 | |
| 1622 | u64 expected_seq, seq_base = 0, lsn_base = 0; |
| 1623 | u64 best_lsn, best_lsn1, best_lsn2; |
| 1624 | u64 lsn_cur, lsn1, lsn2; |
| 1625 | u64 last_ok_lsn = reuse_page ? log->last_lsn : 0; |
| 1626 | |
| 1627 | u16 cur_pos, best_page_pos; |
| 1628 | |
| 1629 | struct RECORD_PAGE_HDR *page = NULL; |
| 1630 | struct RECORD_PAGE_HDR *tst_page = NULL; |
| 1631 | struct RECORD_PAGE_HDR *first_tail = NULL; |
| 1632 | struct RECORD_PAGE_HDR *second_tail = NULL; |
| 1633 | struct RECORD_PAGE_HDR *tail_page = NULL; |
| 1634 | struct RECORD_PAGE_HDR *second_tail_prev = NULL; |
| 1635 | struct RECORD_PAGE_HDR *first_tail_prev = NULL; |
| 1636 | struct RECORD_PAGE_HDR *page_bufs = NULL; |
| 1637 | struct RECORD_PAGE_HDR *best_page; |
| 1638 | |
| 1639 | if (log->major_ver >= 2) { |
| 1640 | final_off = 0x02 * log->page_size; |
| 1641 | second_off = 0x12 * log->page_size; |
| 1642 | |
| 1643 | // 0x10 == 0x12 - 0x2 |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 1644 | page_bufs = kmalloc(log->page_size * 0x10, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1645 | if (!page_bufs) |
| 1646 | return -ENOMEM; |
| 1647 | } else { |
| 1648 | second_off = log->first_page - log->page_size; |
| 1649 | final_off = second_off - log->page_size; |
| 1650 | } |
| 1651 | |
| 1652 | next_tail: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1653 | /* Read second tail page (at pos 3/0x12000). */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1654 | if (read_log_page(log, second_off, &second_tail, &usa_error) || |
| 1655 | usa_error || second_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 1656 | kfree(second_tail); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1657 | second_tail = NULL; |
| 1658 | second_file_off = 0; |
| 1659 | lsn2 = 0; |
| 1660 | } else { |
| 1661 | second_file_off = hdr_file_off(log, second_tail); |
| 1662 | lsn2 = le64_to_cpu(second_tail->record_hdr.last_end_lsn); |
| 1663 | } |
| 1664 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1665 | /* Read first tail page (at pos 2/0x2000). */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1666 | if (read_log_page(log, final_off, &first_tail, &usa_error) || |
| 1667 | usa_error || first_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 1668 | kfree(first_tail); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1669 | first_tail = NULL; |
| 1670 | first_file_off = 0; |
| 1671 | lsn1 = 0; |
| 1672 | } else { |
| 1673 | first_file_off = hdr_file_off(log, first_tail); |
| 1674 | lsn1 = le64_to_cpu(first_tail->record_hdr.last_end_lsn); |
| 1675 | } |
| 1676 | |
| 1677 | if (log->major_ver < 2) { |
| 1678 | int best_page; |
| 1679 | |
| 1680 | first_tail_prev = first_tail; |
| 1681 | final_off_prev = first_file_off; |
| 1682 | second_tail_prev = second_tail; |
| 1683 | second_off_prev = second_file_off; |
| 1684 | tails = 1; |
| 1685 | |
| 1686 | if (!first_tail && !second_tail) |
| 1687 | goto tail_read; |
| 1688 | |
| 1689 | if (first_tail && second_tail) |
| 1690 | best_page = lsn1 < lsn2 ? 1 : 0; |
| 1691 | else if (first_tail) |
| 1692 | best_page = 0; |
| 1693 | else |
| 1694 | best_page = 1; |
| 1695 | |
| 1696 | page_off = best_page ? second_file_off : first_file_off; |
| 1697 | seq_base = (best_page ? lsn2 : lsn1) >> log->file_data_bits; |
| 1698 | goto tail_read; |
| 1699 | } |
| 1700 | |
| 1701 | best_lsn1 = first_tail ? base_lsn(log, first_tail, first_file_off) : 0; |
| 1702 | best_lsn2 = |
| 1703 | second_tail ? base_lsn(log, second_tail, second_file_off) : 0; |
| 1704 | |
| 1705 | if (first_tail && second_tail) { |
| 1706 | if (best_lsn1 > best_lsn2) { |
| 1707 | best_lsn = best_lsn1; |
| 1708 | best_page = first_tail; |
| 1709 | this_off = first_file_off; |
| 1710 | } else { |
| 1711 | best_lsn = best_lsn2; |
| 1712 | best_page = second_tail; |
| 1713 | this_off = second_file_off; |
| 1714 | } |
| 1715 | } else if (first_tail) { |
| 1716 | best_lsn = best_lsn1; |
| 1717 | best_page = first_tail; |
| 1718 | this_off = first_file_off; |
| 1719 | } else if (second_tail) { |
| 1720 | best_lsn = best_lsn2; |
| 1721 | best_page = second_tail; |
| 1722 | this_off = second_file_off; |
| 1723 | } else { |
| 1724 | goto tail_read; |
| 1725 | } |
| 1726 | |
| 1727 | best_page_pos = le16_to_cpu(best_page->page_pos); |
| 1728 | |
| 1729 | if (!tails) { |
| 1730 | if (best_page_pos == page_pos) { |
| 1731 | seq_base = best_lsn >> log->file_data_bits; |
| 1732 | saved_off = page_off = le32_to_cpu(best_page->file_off); |
| 1733 | lsn_base = best_lsn; |
| 1734 | |
| 1735 | memmove(page_bufs, best_page, log->page_size); |
| 1736 | |
| 1737 | page_cnt = le16_to_cpu(best_page->page_count); |
| 1738 | if (page_cnt > 1) |
| 1739 | page_pos += 1; |
| 1740 | |
| 1741 | tails = 1; |
| 1742 | } |
| 1743 | } else if (seq_base == (best_lsn >> log->file_data_bits) && |
| 1744 | saved_off + log->page_size == this_off && |
| 1745 | lsn_base < best_lsn && |
| 1746 | (page_pos != page_cnt || best_page_pos == page_pos || |
| 1747 | best_page_pos == 1) && |
| 1748 | (page_pos >= page_cnt || best_page_pos == page_pos)) { |
| 1749 | u16 bppc = le16_to_cpu(best_page->page_count); |
| 1750 | |
| 1751 | saved_off += log->page_size; |
| 1752 | lsn_base = best_lsn; |
| 1753 | |
| 1754 | memmove(Add2Ptr(page_bufs, tails * log->page_size), best_page, |
| 1755 | log->page_size); |
| 1756 | |
| 1757 | tails += 1; |
| 1758 | |
| 1759 | if (best_page_pos != bppc) { |
| 1760 | page_cnt = bppc; |
| 1761 | page_pos = best_page_pos; |
| 1762 | |
| 1763 | if (page_cnt > 1) |
| 1764 | page_pos += 1; |
| 1765 | } else { |
| 1766 | page_pos = page_cnt = 1; |
| 1767 | } |
| 1768 | } else { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 1769 | kfree(first_tail); |
| 1770 | kfree(second_tail); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1771 | goto tail_read; |
| 1772 | } |
| 1773 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 1774 | kfree(first_tail_prev); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1775 | first_tail_prev = first_tail; |
| 1776 | final_off_prev = first_file_off; |
| 1777 | first_tail = NULL; |
| 1778 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 1779 | kfree(second_tail_prev); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1780 | second_tail_prev = second_tail; |
| 1781 | second_off_prev = second_file_off; |
| 1782 | second_tail = NULL; |
| 1783 | |
| 1784 | final_off += log->page_size; |
| 1785 | second_off += log->page_size; |
| 1786 | |
| 1787 | if (tails < 0x10) |
| 1788 | goto next_tail; |
| 1789 | tail_read: |
| 1790 | first_tail = first_tail_prev; |
| 1791 | final_off = final_off_prev; |
| 1792 | |
| 1793 | second_tail = second_tail_prev; |
| 1794 | second_off = second_off_prev; |
| 1795 | |
| 1796 | page_cnt = page_pos = 1; |
| 1797 | |
| 1798 | curpage_off = seq_base == log->seq_num ? min(log->next_page, page_off) |
| 1799 | : log->next_page; |
| 1800 | |
| 1801 | wrapped_file = |
| 1802 | curpage_off == log->first_page && |
| 1803 | !(log->l_flags & (NTFSLOG_NO_LAST_LSN | NTFSLOG_REUSE_TAIL)); |
| 1804 | |
| 1805 | expected_seq = wrapped_file ? (log->seq_num + 1) : log->seq_num; |
| 1806 | |
| 1807 | nextpage_off = curpage_off; |
| 1808 | |
| 1809 | next_page: |
| 1810 | tail_page = NULL; |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1811 | /* Read the next log page. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1812 | err = read_log_page(log, curpage_off, &page, &usa_error); |
| 1813 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1814 | /* Compute the next log page offset the file. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1815 | nextpage_off = next_page_off(log, curpage_off); |
| 1816 | wrapped = nextpage_off == log->first_page; |
| 1817 | |
| 1818 | if (tails > 1) { |
| 1819 | struct RECORD_PAGE_HDR *cur_page = |
| 1820 | Add2Ptr(page_bufs, curpage_off - page_off); |
| 1821 | |
| 1822 | if (curpage_off == saved_off) { |
| 1823 | tail_page = cur_page; |
| 1824 | goto use_tail_page; |
| 1825 | } |
| 1826 | |
| 1827 | if (page_off > curpage_off || curpage_off >= saved_off) |
| 1828 | goto use_tail_page; |
| 1829 | |
| 1830 | if (page_off1) |
| 1831 | goto use_cur_page; |
| 1832 | |
| 1833 | if (!err && !usa_error && |
| 1834 | page->rhdr.sign == NTFS_RCRD_SIGNATURE && |
| 1835 | cur_page->rhdr.lsn == page->rhdr.lsn && |
| 1836 | cur_page->record_hdr.next_record_off == |
| 1837 | page->record_hdr.next_record_off && |
| 1838 | ((page_pos == page_cnt && |
| 1839 | le16_to_cpu(page->page_pos) == 1) || |
| 1840 | (page_pos != page_cnt && |
| 1841 | le16_to_cpu(page->page_pos) == page_pos + 1 && |
| 1842 | le16_to_cpu(page->page_count) == page_cnt))) { |
| 1843 | cur_page = NULL; |
| 1844 | goto use_tail_page; |
| 1845 | } |
| 1846 | |
| 1847 | page_off1 = page_off; |
| 1848 | |
| 1849 | use_cur_page: |
| 1850 | |
| 1851 | lsn_cur = le64_to_cpu(cur_page->rhdr.lsn); |
| 1852 | |
| 1853 | if (last_ok_lsn != |
| 1854 | le64_to_cpu(cur_page->record_hdr.last_end_lsn) && |
| 1855 | ((lsn_cur >> log->file_data_bits) + |
| 1856 | ((curpage_off < |
| 1857 | (lsn_to_vbo(log, lsn_cur) & ~log->page_mask)) |
| 1858 | ? 1 |
| 1859 | : 0)) != expected_seq) { |
| 1860 | goto check_tail; |
| 1861 | } |
| 1862 | |
| 1863 | if (!is_log_record_end(cur_page)) { |
| 1864 | tail_page = NULL; |
| 1865 | last_ok_lsn = lsn_cur; |
| 1866 | goto next_page_1; |
| 1867 | } |
| 1868 | |
| 1869 | log->seq_num = expected_seq; |
| 1870 | log->l_flags &= ~NTFSLOG_NO_LAST_LSN; |
| 1871 | log->last_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn); |
| 1872 | log->ra->current_lsn = cur_page->record_hdr.last_end_lsn; |
| 1873 | |
| 1874 | if (log->record_header_len <= |
| 1875 | log->page_size - |
| 1876 | le16_to_cpu(cur_page->record_hdr.next_record_off)) { |
| 1877 | log->l_flags |= NTFSLOG_REUSE_TAIL; |
| 1878 | log->next_page = curpage_off; |
| 1879 | } else { |
| 1880 | log->l_flags &= ~NTFSLOG_REUSE_TAIL; |
| 1881 | log->next_page = nextpage_off; |
| 1882 | } |
| 1883 | |
| 1884 | if (wrapped_file) |
| 1885 | log->l_flags |= NTFSLOG_WRAPPED; |
| 1886 | |
| 1887 | last_ok_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn); |
| 1888 | goto next_page_1; |
| 1889 | } |
| 1890 | |
| 1891 | /* |
| 1892 | * If we are at the expected first page of a transfer check to see |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1893 | * if either tail copy is at this offset. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1894 | * If this page is the last page of a transfer, check if we wrote |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1895 | * a subsequent tail copy. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1896 | */ |
| 1897 | if (page_cnt == page_pos || page_cnt == page_pos + 1) { |
| 1898 | /* |
| 1899 | * Check if the offset matches either the first or second |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1900 | * tail copy. It is possible it will match both. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1901 | */ |
| 1902 | if (curpage_off == final_off) |
| 1903 | tail_page = first_tail; |
| 1904 | |
| 1905 | /* |
| 1906 | * If we already matched on the first page then |
| 1907 | * check the ending lsn's. |
| 1908 | */ |
| 1909 | if (curpage_off == second_off) { |
| 1910 | if (!tail_page || |
| 1911 | (second_tail && |
| 1912 | le64_to_cpu(second_tail->record_hdr.last_end_lsn) > |
| 1913 | le64_to_cpu(first_tail->record_hdr |
| 1914 | .last_end_lsn))) { |
| 1915 | tail_page = second_tail; |
| 1916 | } |
| 1917 | } |
| 1918 | } |
| 1919 | |
| 1920 | use_tail_page: |
| 1921 | if (tail_page) { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1922 | /* We have a candidate for a tail copy. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1923 | lsn_cur = le64_to_cpu(tail_page->record_hdr.last_end_lsn); |
| 1924 | |
| 1925 | if (last_ok_lsn < lsn_cur) { |
| 1926 | /* |
| 1927 | * If the sequence number is not expected, |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1928 | * then don't use the tail copy. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1929 | */ |
| 1930 | if (expected_seq != (lsn_cur >> log->file_data_bits)) |
| 1931 | tail_page = NULL; |
| 1932 | } else if (last_ok_lsn > lsn_cur) { |
| 1933 | /* |
| 1934 | * If the last lsn is greater than the one on |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1935 | * this page then forget this tail. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1936 | */ |
| 1937 | tail_page = NULL; |
| 1938 | } |
| 1939 | } |
| 1940 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1941 | /* |
| 1942 | *If we have an error on the current page, |
| 1943 | * we will break of this loop. |
| 1944 | */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1945 | if (err || usa_error) |
| 1946 | goto check_tail; |
| 1947 | |
| 1948 | /* |
| 1949 | * Done if the last lsn on this page doesn't match the previous known |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1950 | * last lsn or the sequence number is not expected. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1951 | */ |
| 1952 | lsn_cur = le64_to_cpu(page->rhdr.lsn); |
| 1953 | if (last_ok_lsn != lsn_cur && |
| 1954 | expected_seq != (lsn_cur >> log->file_data_bits)) { |
| 1955 | goto check_tail; |
| 1956 | } |
| 1957 | |
| 1958 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1959 | * Check that the page position and page count values are correct. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1960 | * If this is the first page of a transfer the position must be 1 |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1961 | * and the count will be unknown. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1962 | */ |
| 1963 | if (page_cnt == page_pos) { |
| 1964 | if (page->page_pos != cpu_to_le16(1) && |
| 1965 | (!reuse_page || page->page_pos != page->page_count)) { |
| 1966 | /* |
| 1967 | * If the current page is the first page we are |
| 1968 | * looking at and we are reusing this page then |
| 1969 | * it can be either the first or last page of a |
| 1970 | * transfer. Otherwise it can only be the first. |
| 1971 | */ |
| 1972 | goto check_tail; |
| 1973 | } |
| 1974 | } else if (le16_to_cpu(page->page_count) != page_cnt || |
| 1975 | le16_to_cpu(page->page_pos) != page_pos + 1) { |
| 1976 | /* |
| 1977 | * The page position better be 1 more than the last page |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1978 | * position and the page count better match. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1979 | */ |
| 1980 | goto check_tail; |
| 1981 | } |
| 1982 | |
| 1983 | /* |
| 1984 | * We have a valid page the file and may have a valid page |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1985 | * the tail copy area. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1986 | * If the tail page was written after the page the file then |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1987 | * break of the loop. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1988 | */ |
| 1989 | if (tail_page && |
| 1990 | le64_to_cpu(tail_page->record_hdr.last_end_lsn) > lsn_cur) { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 1991 | /* Remember if we will replace the page. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 1992 | replace_page = true; |
| 1993 | goto check_tail; |
| 1994 | } |
| 1995 | |
| 1996 | tail_page = NULL; |
| 1997 | |
| 1998 | if (is_log_record_end(page)) { |
| 1999 | /* |
| 2000 | * Since we have read this page we know the sequence number |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2001 | * is the same as our expected value. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2002 | */ |
| 2003 | log->seq_num = expected_seq; |
| 2004 | log->last_lsn = le64_to_cpu(page->record_hdr.last_end_lsn); |
| 2005 | log->ra->current_lsn = page->record_hdr.last_end_lsn; |
| 2006 | log->l_flags &= ~NTFSLOG_NO_LAST_LSN; |
| 2007 | |
| 2008 | /* |
| 2009 | * If there is room on this page for another header then |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2010 | * remember we want to reuse the page. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2011 | */ |
| 2012 | if (log->record_header_len <= |
| 2013 | log->page_size - |
| 2014 | le16_to_cpu(page->record_hdr.next_record_off)) { |
| 2015 | log->l_flags |= NTFSLOG_REUSE_TAIL; |
| 2016 | log->next_page = curpage_off; |
| 2017 | } else { |
| 2018 | log->l_flags &= ~NTFSLOG_REUSE_TAIL; |
| 2019 | log->next_page = nextpage_off; |
| 2020 | } |
| 2021 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2022 | /* Remember if we wrapped the log file. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2023 | if (wrapped_file) |
| 2024 | log->l_flags |= NTFSLOG_WRAPPED; |
| 2025 | } |
| 2026 | |
| 2027 | /* |
| 2028 | * Remember the last page count and position. |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2029 | * Also remember the last known lsn. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2030 | */ |
| 2031 | page_cnt = le16_to_cpu(page->page_count); |
| 2032 | page_pos = le16_to_cpu(page->page_pos); |
| 2033 | last_ok_lsn = le64_to_cpu(page->rhdr.lsn); |
| 2034 | |
| 2035 | next_page_1: |
| 2036 | |
| 2037 | if (wrapped) { |
| 2038 | expected_seq += 1; |
| 2039 | wrapped_file = 1; |
| 2040 | } |
| 2041 | |
| 2042 | curpage_off = nextpage_off; |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 2043 | kfree(page); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2044 | page = NULL; |
| 2045 | reuse_page = 0; |
| 2046 | goto next_page; |
| 2047 | |
| 2048 | check_tail: |
| 2049 | if (tail_page) { |
| 2050 | log->seq_num = expected_seq; |
| 2051 | log->last_lsn = le64_to_cpu(tail_page->record_hdr.last_end_lsn); |
| 2052 | log->ra->current_lsn = tail_page->record_hdr.last_end_lsn; |
| 2053 | log->l_flags &= ~NTFSLOG_NO_LAST_LSN; |
| 2054 | |
| 2055 | if (log->page_size - |
| 2056 | le16_to_cpu( |
| 2057 | tail_page->record_hdr.next_record_off) >= |
| 2058 | log->record_header_len) { |
| 2059 | log->l_flags |= NTFSLOG_REUSE_TAIL; |
| 2060 | log->next_page = curpage_off; |
| 2061 | } else { |
| 2062 | log->l_flags &= ~NTFSLOG_REUSE_TAIL; |
| 2063 | log->next_page = nextpage_off; |
| 2064 | } |
| 2065 | |
| 2066 | if (wrapped) |
| 2067 | log->l_flags |= NTFSLOG_WRAPPED; |
| 2068 | } |
| 2069 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2070 | /* Remember that the partial IO will start at the next page. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2071 | second_off = nextpage_off; |
| 2072 | |
| 2073 | /* |
| 2074 | * If the next page is the first page of the file then update |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2075 | * the sequence number for log records which begon the next page. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2076 | */ |
| 2077 | if (wrapped) |
| 2078 | expected_seq += 1; |
| 2079 | |
| 2080 | /* |
| 2081 | * If we have a tail copy or are performing single page I/O we can |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2082 | * immediately look at the next page. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2083 | */ |
| 2084 | if (replace_page || (log->ra->flags & RESTART_SINGLE_PAGE_IO)) { |
| 2085 | page_cnt = 2; |
| 2086 | page_pos = 1; |
| 2087 | goto check_valid; |
| 2088 | } |
| 2089 | |
| 2090 | if (page_pos != page_cnt) |
| 2091 | goto check_valid; |
| 2092 | /* |
| 2093 | * If the next page causes us to wrap to the beginning of the log |
| 2094 | * file then we know which page to check next. |
| 2095 | */ |
| 2096 | if (wrapped) { |
| 2097 | page_cnt = 2; |
| 2098 | page_pos = 1; |
| 2099 | goto check_valid; |
| 2100 | } |
| 2101 | |
| 2102 | cur_pos = 2; |
| 2103 | |
| 2104 | next_test_page: |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 2105 | kfree(tst_page); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2106 | tst_page = NULL; |
| 2107 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2108 | /* Walk through the file, reading log pages. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2109 | err = read_log_page(log, nextpage_off, &tst_page, &usa_error); |
| 2110 | |
| 2111 | /* |
| 2112 | * If we get a USA error then assume that we correctly found |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2113 | * the end of the original transfer. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2114 | */ |
| 2115 | if (usa_error) |
| 2116 | goto file_is_valid; |
| 2117 | |
| 2118 | /* |
| 2119 | * If we were able to read the page, we examine it to see if it |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2120 | * is the same or different Io block. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2121 | */ |
| 2122 | if (err) |
| 2123 | goto next_test_page_1; |
| 2124 | |
| 2125 | if (le16_to_cpu(tst_page->page_pos) == cur_pos && |
| 2126 | check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) { |
| 2127 | page_cnt = le16_to_cpu(tst_page->page_count) + 1; |
| 2128 | page_pos = le16_to_cpu(tst_page->page_pos); |
| 2129 | goto check_valid; |
| 2130 | } else { |
| 2131 | goto file_is_valid; |
| 2132 | } |
| 2133 | |
| 2134 | next_test_page_1: |
| 2135 | |
| 2136 | nextpage_off = next_page_off(log, curpage_off); |
| 2137 | wrapped = nextpage_off == log->first_page; |
| 2138 | |
| 2139 | if (wrapped) { |
| 2140 | expected_seq += 1; |
| 2141 | page_cnt = 2; |
| 2142 | page_pos = 1; |
| 2143 | } |
| 2144 | |
| 2145 | cur_pos += 1; |
| 2146 | part_io_count += 1; |
| 2147 | if (!wrapped) |
| 2148 | goto next_test_page; |
| 2149 | |
| 2150 | check_valid: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2151 | /* Skip over the remaining pages this transfer. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2152 | remain_pages = page_cnt - page_pos - 1; |
| 2153 | part_io_count += remain_pages; |
| 2154 | |
| 2155 | while (remain_pages--) { |
| 2156 | nextpage_off = next_page_off(log, curpage_off); |
| 2157 | wrapped = nextpage_off == log->first_page; |
| 2158 | |
| 2159 | if (wrapped) |
| 2160 | expected_seq += 1; |
| 2161 | } |
| 2162 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2163 | /* Call our routine to check this log page. */ |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 2164 | kfree(tst_page); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2165 | tst_page = NULL; |
| 2166 | |
| 2167 | err = read_log_page(log, nextpage_off, &tst_page, &usa_error); |
| 2168 | if (!err && !usa_error && |
| 2169 | check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) { |
| 2170 | err = -EINVAL; |
| 2171 | goto out; |
| 2172 | } |
| 2173 | |
| 2174 | file_is_valid: |
| 2175 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2176 | /* We have a valid file. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2177 | if (page_off1 || tail_page) { |
| 2178 | struct RECORD_PAGE_HDR *tmp_page; |
| 2179 | |
| 2180 | if (sb_rdonly(log->ni->mi.sbi->sb)) { |
| 2181 | err = -EROFS; |
| 2182 | goto out; |
| 2183 | } |
| 2184 | |
| 2185 | if (page_off1) { |
| 2186 | tmp_page = Add2Ptr(page_bufs, page_off1 - page_off); |
| 2187 | tails -= (page_off1 - page_off) / log->page_size; |
| 2188 | if (!tail_page) |
| 2189 | tails -= 1; |
| 2190 | } else { |
| 2191 | tmp_page = tail_page; |
| 2192 | tails = 1; |
| 2193 | } |
| 2194 | |
| 2195 | while (tails--) { |
| 2196 | u64 off = hdr_file_off(log, tmp_page); |
| 2197 | |
| 2198 | if (!page) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 2199 | page = kmalloc(log->page_size, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2200 | if (!page) |
| 2201 | return -ENOMEM; |
| 2202 | } |
| 2203 | |
| 2204 | /* |
| 2205 | * Correct page and copy the data from this page |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2206 | * into it and flush it to disk. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2207 | */ |
| 2208 | memcpy(page, tmp_page, log->page_size); |
| 2209 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2210 | /* Fill last flushed lsn value flush the page. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2211 | if (log->major_ver < 2) |
| 2212 | page->rhdr.lsn = page->record_hdr.last_end_lsn; |
| 2213 | else |
| 2214 | page->file_off = 0; |
| 2215 | |
| 2216 | page->page_pos = page->page_count = cpu_to_le16(1); |
| 2217 | |
| 2218 | ntfs_fix_pre_write(&page->rhdr, log->page_size); |
| 2219 | |
| 2220 | err = ntfs_sb_write_run(log->ni->mi.sbi, |
| 2221 | &log->ni->file.run, off, page, |
| 2222 | log->page_size); |
| 2223 | |
| 2224 | if (err) |
| 2225 | goto out; |
| 2226 | |
| 2227 | if (part_io_count && second_off == off) { |
| 2228 | second_off += log->page_size; |
| 2229 | part_io_count -= 1; |
| 2230 | } |
| 2231 | |
| 2232 | tmp_page = Add2Ptr(tmp_page, log->page_size); |
| 2233 | } |
| 2234 | } |
| 2235 | |
| 2236 | if (part_io_count) { |
| 2237 | if (sb_rdonly(log->ni->mi.sbi->sb)) { |
| 2238 | err = -EROFS; |
| 2239 | goto out; |
| 2240 | } |
| 2241 | } |
| 2242 | |
| 2243 | out: |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 2244 | kfree(second_tail); |
| 2245 | kfree(first_tail); |
| 2246 | kfree(page); |
| 2247 | kfree(tst_page); |
| 2248 | kfree(page_bufs); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2249 | |
| 2250 | return err; |
| 2251 | } |
| 2252 | |
| 2253 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2254 | * read_log_rec_buf - Copy a log record from the file to a buffer. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2255 | * |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2256 | * The log record may span several log pages and may even wrap the file. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2257 | */ |
| 2258 | static int read_log_rec_buf(struct ntfs_log *log, |
| 2259 | const struct LFS_RECORD_HDR *rh, void *buffer) |
| 2260 | { |
| 2261 | int err; |
| 2262 | struct RECORD_PAGE_HDR *ph = NULL; |
| 2263 | u64 lsn = le64_to_cpu(rh->this_lsn); |
| 2264 | u32 vbo = lsn_to_vbo(log, lsn) & ~log->page_mask; |
| 2265 | u32 off = lsn_to_page_off(log, lsn) + log->record_header_len; |
| 2266 | u32 data_len = le32_to_cpu(rh->client_data_len); |
| 2267 | |
| 2268 | /* |
| 2269 | * While there are more bytes to transfer, |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2270 | * we continue to attempt to perform the read. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2271 | */ |
| 2272 | for (;;) { |
| 2273 | bool usa_error; |
| 2274 | u32 tail = log->page_size - off; |
| 2275 | |
| 2276 | if (tail >= data_len) |
| 2277 | tail = data_len; |
| 2278 | |
| 2279 | data_len -= tail; |
| 2280 | |
| 2281 | err = read_log_page(log, vbo, &ph, &usa_error); |
| 2282 | if (err) |
| 2283 | goto out; |
| 2284 | |
| 2285 | /* |
| 2286 | * The last lsn on this page better be greater or equal |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2287 | * to the lsn we are copying. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2288 | */ |
| 2289 | if (lsn > le64_to_cpu(ph->rhdr.lsn)) { |
| 2290 | err = -EINVAL; |
| 2291 | goto out; |
| 2292 | } |
| 2293 | |
| 2294 | memcpy(buffer, Add2Ptr(ph, off), tail); |
| 2295 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2296 | /* If there are no more bytes to transfer, we exit the loop. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2297 | if (!data_len) { |
| 2298 | if (!is_log_record_end(ph) || |
| 2299 | lsn > le64_to_cpu(ph->record_hdr.last_end_lsn)) { |
| 2300 | err = -EINVAL; |
| 2301 | goto out; |
| 2302 | } |
| 2303 | break; |
| 2304 | } |
| 2305 | |
| 2306 | if (ph->rhdr.lsn == ph->record_hdr.last_end_lsn || |
| 2307 | lsn > le64_to_cpu(ph->rhdr.lsn)) { |
| 2308 | err = -EINVAL; |
| 2309 | goto out; |
| 2310 | } |
| 2311 | |
| 2312 | vbo = next_page_off(log, vbo); |
| 2313 | off = log->data_off; |
| 2314 | |
| 2315 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2316 | * Adjust our pointer the user's buffer to transfer |
| 2317 | * the next block to. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2318 | */ |
| 2319 | buffer = Add2Ptr(buffer, tail); |
| 2320 | } |
| 2321 | |
| 2322 | out: |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 2323 | kfree(ph); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2324 | return err; |
| 2325 | } |
| 2326 | |
| 2327 | static int read_rst_area(struct ntfs_log *log, struct NTFS_RESTART **rst_, |
| 2328 | u64 *lsn) |
| 2329 | { |
| 2330 | int err; |
| 2331 | struct LFS_RECORD_HDR *rh = NULL; |
| 2332 | const struct CLIENT_REC *cr = |
| 2333 | Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off)); |
| 2334 | u64 lsnr, lsnc = le64_to_cpu(cr->restart_lsn); |
| 2335 | u32 len; |
| 2336 | struct NTFS_RESTART *rst; |
| 2337 | |
| 2338 | *lsn = 0; |
| 2339 | *rst_ = NULL; |
| 2340 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2341 | /* If the client doesn't have a restart area, go ahead and exit now. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2342 | if (!lsnc) |
| 2343 | return 0; |
| 2344 | |
| 2345 | err = read_log_page(log, lsn_to_vbo(log, lsnc), |
| 2346 | (struct RECORD_PAGE_HDR **)&rh, NULL); |
| 2347 | if (err) |
| 2348 | return err; |
| 2349 | |
| 2350 | rst = NULL; |
| 2351 | lsnr = le64_to_cpu(rh->this_lsn); |
| 2352 | |
| 2353 | if (lsnc != lsnr) { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2354 | /* If the lsn values don't match, then the disk is corrupt. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2355 | err = -EINVAL; |
| 2356 | goto out; |
| 2357 | } |
| 2358 | |
| 2359 | *lsn = lsnr; |
| 2360 | len = le32_to_cpu(rh->client_data_len); |
| 2361 | |
| 2362 | if (!len) { |
| 2363 | err = 0; |
| 2364 | goto out; |
| 2365 | } |
| 2366 | |
| 2367 | if (len < sizeof(struct NTFS_RESTART)) { |
| 2368 | err = -EINVAL; |
| 2369 | goto out; |
| 2370 | } |
| 2371 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 2372 | rst = kmalloc(len, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2373 | if (!rst) { |
| 2374 | err = -ENOMEM; |
| 2375 | goto out; |
| 2376 | } |
| 2377 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2378 | /* Copy the data into the 'rst' buffer. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2379 | err = read_log_rec_buf(log, rh, rst); |
| 2380 | if (err) |
| 2381 | goto out; |
| 2382 | |
| 2383 | *rst_ = rst; |
| 2384 | rst = NULL; |
| 2385 | |
| 2386 | out: |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 2387 | kfree(rh); |
| 2388 | kfree(rst); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2389 | |
| 2390 | return err; |
| 2391 | } |
| 2392 | |
| 2393 | static int find_log_rec(struct ntfs_log *log, u64 lsn, struct lcb *lcb) |
| 2394 | { |
| 2395 | int err; |
| 2396 | struct LFS_RECORD_HDR *rh = lcb->lrh; |
| 2397 | u32 rec_len, len; |
| 2398 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2399 | /* Read the record header for this lsn. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2400 | if (!rh) { |
| 2401 | err = read_log_page(log, lsn_to_vbo(log, lsn), |
| 2402 | (struct RECORD_PAGE_HDR **)&rh, NULL); |
| 2403 | |
| 2404 | lcb->lrh = rh; |
| 2405 | if (err) |
| 2406 | return err; |
| 2407 | } |
| 2408 | |
| 2409 | /* |
| 2410 | * If the lsn the log record doesn't match the desired |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2411 | * lsn then the disk is corrupt. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2412 | */ |
| 2413 | if (lsn != le64_to_cpu(rh->this_lsn)) |
| 2414 | return -EINVAL; |
| 2415 | |
| 2416 | len = le32_to_cpu(rh->client_data_len); |
| 2417 | |
| 2418 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2419 | * Check that the length field isn't greater than the total |
| 2420 | * available space the log file. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2421 | */ |
| 2422 | rec_len = len + log->record_header_len; |
| 2423 | if (rec_len >= log->total_avail) |
| 2424 | return -EINVAL; |
| 2425 | |
| 2426 | /* |
| 2427 | * If the entire log record is on this log page, |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2428 | * put a pointer to the log record the context block. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2429 | */ |
| 2430 | if (rh->flags & LOG_RECORD_MULTI_PAGE) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 2431 | void *lr = kmalloc(len, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2432 | |
| 2433 | if (!lr) |
| 2434 | return -ENOMEM; |
| 2435 | |
| 2436 | lcb->log_rec = lr; |
| 2437 | lcb->alloc = true; |
| 2438 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2439 | /* Copy the data into the buffer returned. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2440 | err = read_log_rec_buf(log, rh, lr); |
| 2441 | if (err) |
| 2442 | return err; |
| 2443 | } else { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2444 | /* If beyond the end of the current page -> an error. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2445 | u32 page_off = lsn_to_page_off(log, lsn); |
| 2446 | |
| 2447 | if (page_off + len + log->record_header_len > log->page_size) |
| 2448 | return -EINVAL; |
| 2449 | |
| 2450 | lcb->log_rec = Add2Ptr(rh, sizeof(struct LFS_RECORD_HDR)); |
| 2451 | lcb->alloc = false; |
| 2452 | } |
| 2453 | |
| 2454 | return 0; |
| 2455 | } |
| 2456 | |
| 2457 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2458 | * read_log_rec_lcb - Init the query operation. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2459 | */ |
| 2460 | static int read_log_rec_lcb(struct ntfs_log *log, u64 lsn, u32 ctx_mode, |
| 2461 | struct lcb **lcb_) |
| 2462 | { |
| 2463 | int err; |
| 2464 | const struct CLIENT_REC *cr; |
| 2465 | struct lcb *lcb; |
| 2466 | |
| 2467 | switch (ctx_mode) { |
| 2468 | case lcb_ctx_undo_next: |
| 2469 | case lcb_ctx_prev: |
| 2470 | case lcb_ctx_next: |
| 2471 | break; |
| 2472 | default: |
| 2473 | return -EINVAL; |
| 2474 | } |
| 2475 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2476 | /* Check that the given lsn is the legal range for this client. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2477 | cr = Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off)); |
| 2478 | |
| 2479 | if (!verify_client_lsn(log, cr, lsn)) |
| 2480 | return -EINVAL; |
| 2481 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 2482 | lcb = kzalloc(sizeof(struct lcb), GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2483 | if (!lcb) |
| 2484 | return -ENOMEM; |
| 2485 | lcb->client = log->client_id; |
| 2486 | lcb->ctx_mode = ctx_mode; |
| 2487 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2488 | /* Find the log record indicated by the given lsn. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2489 | err = find_log_rec(log, lsn, lcb); |
| 2490 | if (err) |
| 2491 | goto out; |
| 2492 | |
| 2493 | *lcb_ = lcb; |
| 2494 | return 0; |
| 2495 | |
| 2496 | out: |
| 2497 | lcb_put(lcb); |
| 2498 | *lcb_ = NULL; |
| 2499 | return err; |
| 2500 | } |
| 2501 | |
| 2502 | /* |
| 2503 | * find_client_next_lsn |
| 2504 | * |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2505 | * Attempt to find the next lsn to return to a client based on the context mode. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2506 | */ |
| 2507 | static int find_client_next_lsn(struct ntfs_log *log, struct lcb *lcb, u64 *lsn) |
| 2508 | { |
| 2509 | int err; |
| 2510 | u64 next_lsn; |
| 2511 | struct LFS_RECORD_HDR *hdr; |
| 2512 | |
| 2513 | hdr = lcb->lrh; |
| 2514 | *lsn = 0; |
| 2515 | |
| 2516 | if (lcb_ctx_next != lcb->ctx_mode) |
| 2517 | goto check_undo_next; |
| 2518 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2519 | /* Loop as long as another lsn can be found. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2520 | for (;;) { |
| 2521 | u64 current_lsn; |
| 2522 | |
| 2523 | err = next_log_lsn(log, hdr, ¤t_lsn); |
| 2524 | if (err) |
| 2525 | goto out; |
| 2526 | |
| 2527 | if (!current_lsn) |
| 2528 | break; |
| 2529 | |
| 2530 | if (hdr != lcb->lrh) |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 2531 | kfree(hdr); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2532 | |
| 2533 | hdr = NULL; |
| 2534 | err = read_log_page(log, lsn_to_vbo(log, current_lsn), |
| 2535 | (struct RECORD_PAGE_HDR **)&hdr, NULL); |
| 2536 | if (err) |
| 2537 | goto out; |
| 2538 | |
| 2539 | if (memcmp(&hdr->client, &lcb->client, |
| 2540 | sizeof(struct CLIENT_ID))) { |
| 2541 | /*err = -EINVAL; */ |
| 2542 | } else if (LfsClientRecord == hdr->record_type) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 2543 | kfree(lcb->lrh); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2544 | lcb->lrh = hdr; |
| 2545 | *lsn = current_lsn; |
| 2546 | return 0; |
| 2547 | } |
| 2548 | } |
| 2549 | |
| 2550 | out: |
| 2551 | if (hdr != lcb->lrh) |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 2552 | kfree(hdr); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2553 | return err; |
| 2554 | |
| 2555 | check_undo_next: |
| 2556 | if (lcb_ctx_undo_next == lcb->ctx_mode) |
| 2557 | next_lsn = le64_to_cpu(hdr->client_undo_next_lsn); |
| 2558 | else if (lcb_ctx_prev == lcb->ctx_mode) |
| 2559 | next_lsn = le64_to_cpu(hdr->client_prev_lsn); |
| 2560 | else |
| 2561 | return 0; |
| 2562 | |
| 2563 | if (!next_lsn) |
| 2564 | return 0; |
| 2565 | |
| 2566 | if (!verify_client_lsn( |
| 2567 | log, Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off)), |
| 2568 | next_lsn)) |
| 2569 | return 0; |
| 2570 | |
| 2571 | hdr = NULL; |
| 2572 | err = read_log_page(log, lsn_to_vbo(log, next_lsn), |
| 2573 | (struct RECORD_PAGE_HDR **)&hdr, NULL); |
| 2574 | if (err) |
| 2575 | return err; |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 2576 | kfree(lcb->lrh); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2577 | lcb->lrh = hdr; |
| 2578 | |
| 2579 | *lsn = next_lsn; |
| 2580 | |
| 2581 | return 0; |
| 2582 | } |
| 2583 | |
| 2584 | static int read_next_log_rec(struct ntfs_log *log, struct lcb *lcb, u64 *lsn) |
| 2585 | { |
| 2586 | int err; |
| 2587 | |
| 2588 | err = find_client_next_lsn(log, lcb, lsn); |
| 2589 | if (err) |
| 2590 | return err; |
| 2591 | |
| 2592 | if (!*lsn) |
| 2593 | return 0; |
| 2594 | |
| 2595 | if (lcb->alloc) |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 2596 | kfree(lcb->log_rec); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2597 | |
| 2598 | lcb->log_rec = NULL; |
| 2599 | lcb->alloc = false; |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 2600 | kfree(lcb->lrh); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2601 | lcb->lrh = NULL; |
| 2602 | |
| 2603 | return find_log_rec(log, *lsn, lcb); |
| 2604 | } |
| 2605 | |
| 2606 | static inline bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes) |
| 2607 | { |
| 2608 | __le16 mask; |
| 2609 | u32 min_de, de_off, used, total; |
| 2610 | const struct NTFS_DE *e; |
| 2611 | |
| 2612 | if (hdr_has_subnode(hdr)) { |
| 2613 | min_de = sizeof(struct NTFS_DE) + sizeof(u64); |
| 2614 | mask = NTFS_IE_HAS_SUBNODES; |
| 2615 | } else { |
| 2616 | min_de = sizeof(struct NTFS_DE); |
| 2617 | mask = 0; |
| 2618 | } |
| 2619 | |
| 2620 | de_off = le32_to_cpu(hdr->de_off); |
| 2621 | used = le32_to_cpu(hdr->used); |
| 2622 | total = le32_to_cpu(hdr->total); |
| 2623 | |
| 2624 | if (de_off > bytes - min_de || used > bytes || total > bytes || |
| 2625 | de_off + min_de > used || used > total) { |
| 2626 | return false; |
| 2627 | } |
| 2628 | |
| 2629 | e = Add2Ptr(hdr, de_off); |
| 2630 | for (;;) { |
| 2631 | u16 esize = le16_to_cpu(e->size); |
| 2632 | struct NTFS_DE *next = Add2Ptr(e, esize); |
| 2633 | |
| 2634 | if (esize < min_de || PtrOffset(hdr, next) > used || |
| 2635 | (e->flags & NTFS_IE_HAS_SUBNODES) != mask) { |
| 2636 | return false; |
| 2637 | } |
| 2638 | |
| 2639 | if (de_is_last(e)) |
| 2640 | break; |
| 2641 | |
| 2642 | e = next; |
| 2643 | } |
| 2644 | |
| 2645 | return true; |
| 2646 | } |
| 2647 | |
| 2648 | static inline bool check_index_buffer(const struct INDEX_BUFFER *ib, u32 bytes) |
| 2649 | { |
| 2650 | u16 fo; |
| 2651 | const struct NTFS_RECORD_HEADER *r = &ib->rhdr; |
| 2652 | |
| 2653 | if (r->sign != NTFS_INDX_SIGNATURE) |
| 2654 | return false; |
| 2655 | |
| 2656 | fo = (SECTOR_SIZE - ((bytes >> SECTOR_SHIFT) + 1) * sizeof(short)); |
| 2657 | |
| 2658 | if (le16_to_cpu(r->fix_off) > fo) |
| 2659 | return false; |
| 2660 | |
| 2661 | if ((le16_to_cpu(r->fix_num) - 1) * SECTOR_SIZE != bytes) |
| 2662 | return false; |
| 2663 | |
| 2664 | return check_index_header(&ib->ihdr, |
| 2665 | bytes - offsetof(struct INDEX_BUFFER, ihdr)); |
| 2666 | } |
| 2667 | |
| 2668 | static inline bool check_index_root(const struct ATTRIB *attr, |
| 2669 | struct ntfs_sb_info *sbi) |
| 2670 | { |
| 2671 | bool ret; |
| 2672 | const struct INDEX_ROOT *root = resident_data(attr); |
| 2673 | u8 index_bits = le32_to_cpu(root->index_block_size) >= sbi->cluster_size |
| 2674 | ? sbi->cluster_bits |
| 2675 | : SECTOR_SHIFT; |
| 2676 | u8 block_clst = root->index_block_clst; |
| 2677 | |
| 2678 | if (le32_to_cpu(attr->res.data_size) < sizeof(struct INDEX_ROOT) || |
| 2679 | (root->type != ATTR_NAME && root->type != ATTR_ZERO) || |
| 2680 | (root->type == ATTR_NAME && |
| 2681 | root->rule != NTFS_COLLATION_TYPE_FILENAME) || |
| 2682 | (le32_to_cpu(root->index_block_size) != |
| 2683 | (block_clst << index_bits)) || |
| 2684 | (block_clst != 1 && block_clst != 2 && block_clst != 4 && |
| 2685 | block_clst != 8 && block_clst != 0x10 && block_clst != 0x20 && |
| 2686 | block_clst != 0x40 && block_clst != 0x80)) { |
| 2687 | return false; |
| 2688 | } |
| 2689 | |
| 2690 | ret = check_index_header(&root->ihdr, |
| 2691 | le32_to_cpu(attr->res.data_size) - |
| 2692 | offsetof(struct INDEX_ROOT, ihdr)); |
| 2693 | return ret; |
| 2694 | } |
| 2695 | |
| 2696 | static inline bool check_attr(const struct MFT_REC *rec, |
| 2697 | const struct ATTRIB *attr, |
| 2698 | struct ntfs_sb_info *sbi) |
| 2699 | { |
| 2700 | u32 asize = le32_to_cpu(attr->size); |
| 2701 | u32 rsize = 0; |
| 2702 | u64 dsize, svcn, evcn; |
| 2703 | u16 run_off; |
| 2704 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2705 | /* Check the fixed part of the attribute record header. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2706 | if (asize >= sbi->record_size || |
| 2707 | asize + PtrOffset(rec, attr) >= sbi->record_size || |
| 2708 | (attr->name_len && |
| 2709 | le16_to_cpu(attr->name_off) + attr->name_len * sizeof(short) > |
| 2710 | asize)) { |
| 2711 | return false; |
| 2712 | } |
| 2713 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2714 | /* Check the attribute fields. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2715 | switch (attr->non_res) { |
| 2716 | case 0: |
| 2717 | rsize = le32_to_cpu(attr->res.data_size); |
| 2718 | if (rsize >= asize || |
| 2719 | le16_to_cpu(attr->res.data_off) + rsize > asize) { |
| 2720 | return false; |
| 2721 | } |
| 2722 | break; |
| 2723 | |
| 2724 | case 1: |
| 2725 | dsize = le64_to_cpu(attr->nres.data_size); |
| 2726 | svcn = le64_to_cpu(attr->nres.svcn); |
| 2727 | evcn = le64_to_cpu(attr->nres.evcn); |
| 2728 | run_off = le16_to_cpu(attr->nres.run_off); |
| 2729 | |
| 2730 | if (svcn > evcn + 1 || run_off >= asize || |
| 2731 | le64_to_cpu(attr->nres.valid_size) > dsize || |
| 2732 | dsize > le64_to_cpu(attr->nres.alloc_size)) { |
| 2733 | return false; |
| 2734 | } |
| 2735 | |
| 2736 | if (run_unpack(NULL, sbi, 0, svcn, evcn, svcn, |
| 2737 | Add2Ptr(attr, run_off), asize - run_off) < 0) { |
| 2738 | return false; |
| 2739 | } |
| 2740 | |
| 2741 | return true; |
| 2742 | |
| 2743 | default: |
| 2744 | return false; |
| 2745 | } |
| 2746 | |
| 2747 | switch (attr->type) { |
| 2748 | case ATTR_NAME: |
| 2749 | if (fname_full_size(Add2Ptr( |
| 2750 | attr, le16_to_cpu(attr->res.data_off))) > asize) { |
| 2751 | return false; |
| 2752 | } |
| 2753 | break; |
| 2754 | |
| 2755 | case ATTR_ROOT: |
| 2756 | return check_index_root(attr, sbi); |
| 2757 | |
| 2758 | case ATTR_STD: |
| 2759 | if (rsize < sizeof(struct ATTR_STD_INFO5) && |
| 2760 | rsize != sizeof(struct ATTR_STD_INFO)) { |
| 2761 | return false; |
| 2762 | } |
| 2763 | break; |
| 2764 | |
| 2765 | case ATTR_LIST: |
| 2766 | case ATTR_ID: |
| 2767 | case ATTR_SECURE: |
| 2768 | case ATTR_LABEL: |
| 2769 | case ATTR_VOL_INFO: |
| 2770 | case ATTR_DATA: |
| 2771 | case ATTR_ALLOC: |
| 2772 | case ATTR_BITMAP: |
| 2773 | case ATTR_REPARSE: |
| 2774 | case ATTR_EA_INFO: |
| 2775 | case ATTR_EA: |
| 2776 | case ATTR_PROPERTYSET: |
| 2777 | case ATTR_LOGGED_UTILITY_STREAM: |
| 2778 | break; |
| 2779 | |
| 2780 | default: |
| 2781 | return false; |
| 2782 | } |
| 2783 | |
| 2784 | return true; |
| 2785 | } |
| 2786 | |
| 2787 | static inline bool check_file_record(const struct MFT_REC *rec, |
| 2788 | const struct MFT_REC *rec2, |
| 2789 | struct ntfs_sb_info *sbi) |
| 2790 | { |
| 2791 | const struct ATTRIB *attr; |
| 2792 | u16 fo = le16_to_cpu(rec->rhdr.fix_off); |
| 2793 | u16 fn = le16_to_cpu(rec->rhdr.fix_num); |
| 2794 | u16 ao = le16_to_cpu(rec->attr_off); |
| 2795 | u32 rs = sbi->record_size; |
| 2796 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2797 | /* Check the file record header for consistency. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2798 | if (rec->rhdr.sign != NTFS_FILE_SIGNATURE || |
| 2799 | fo > (SECTOR_SIZE - ((rs >> SECTOR_SHIFT) + 1) * sizeof(short)) || |
| 2800 | (fn - 1) * SECTOR_SIZE != rs || ao < MFTRECORD_FIXUP_OFFSET_1 || |
| 2801 | ao > sbi->record_size - SIZEOF_RESIDENT || !is_rec_inuse(rec) || |
| 2802 | le32_to_cpu(rec->total) != rs) { |
| 2803 | return false; |
| 2804 | } |
| 2805 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2806 | /* Loop to check all of the attributes. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2807 | for (attr = Add2Ptr(rec, ao); attr->type != ATTR_END; |
| 2808 | attr = Add2Ptr(attr, le32_to_cpu(attr->size))) { |
| 2809 | if (check_attr(rec, attr, sbi)) |
| 2810 | continue; |
| 2811 | return false; |
| 2812 | } |
| 2813 | |
| 2814 | return true; |
| 2815 | } |
| 2816 | |
| 2817 | static inline int check_lsn(const struct NTFS_RECORD_HEADER *hdr, |
| 2818 | const u64 *rlsn) |
| 2819 | { |
| 2820 | u64 lsn; |
| 2821 | |
| 2822 | if (!rlsn) |
| 2823 | return true; |
| 2824 | |
| 2825 | lsn = le64_to_cpu(hdr->lsn); |
| 2826 | |
| 2827 | if (hdr->sign == NTFS_HOLE_SIGNATURE) |
| 2828 | return false; |
| 2829 | |
| 2830 | if (*rlsn > lsn) |
| 2831 | return true; |
| 2832 | |
| 2833 | return false; |
| 2834 | } |
| 2835 | |
| 2836 | static inline bool check_if_attr(const struct MFT_REC *rec, |
| 2837 | const struct LOG_REC_HDR *lrh) |
| 2838 | { |
| 2839 | u16 ro = le16_to_cpu(lrh->record_off); |
| 2840 | u16 o = le16_to_cpu(rec->attr_off); |
| 2841 | const struct ATTRIB *attr = Add2Ptr(rec, o); |
| 2842 | |
| 2843 | while (o < ro) { |
| 2844 | u32 asize; |
| 2845 | |
| 2846 | if (attr->type == ATTR_END) |
| 2847 | break; |
| 2848 | |
| 2849 | asize = le32_to_cpu(attr->size); |
| 2850 | if (!asize) |
| 2851 | break; |
| 2852 | |
| 2853 | o += asize; |
| 2854 | attr = Add2Ptr(attr, asize); |
| 2855 | } |
| 2856 | |
| 2857 | return o == ro; |
| 2858 | } |
| 2859 | |
| 2860 | static inline bool check_if_index_root(const struct MFT_REC *rec, |
| 2861 | const struct LOG_REC_HDR *lrh) |
| 2862 | { |
| 2863 | u16 ro = le16_to_cpu(lrh->record_off); |
| 2864 | u16 o = le16_to_cpu(rec->attr_off); |
| 2865 | const struct ATTRIB *attr = Add2Ptr(rec, o); |
| 2866 | |
| 2867 | while (o < ro) { |
| 2868 | u32 asize; |
| 2869 | |
| 2870 | if (attr->type == ATTR_END) |
| 2871 | break; |
| 2872 | |
| 2873 | asize = le32_to_cpu(attr->size); |
| 2874 | if (!asize) |
| 2875 | break; |
| 2876 | |
| 2877 | o += asize; |
| 2878 | attr = Add2Ptr(attr, asize); |
| 2879 | } |
| 2880 | |
| 2881 | return o == ro && attr->type == ATTR_ROOT; |
| 2882 | } |
| 2883 | |
| 2884 | static inline bool check_if_root_index(const struct ATTRIB *attr, |
| 2885 | const struct INDEX_HDR *hdr, |
| 2886 | const struct LOG_REC_HDR *lrh) |
| 2887 | { |
| 2888 | u16 ao = le16_to_cpu(lrh->attr_off); |
| 2889 | u32 de_off = le32_to_cpu(hdr->de_off); |
| 2890 | u32 o = PtrOffset(attr, hdr) + de_off; |
| 2891 | const struct NTFS_DE *e = Add2Ptr(hdr, de_off); |
| 2892 | u32 asize = le32_to_cpu(attr->size); |
| 2893 | |
| 2894 | while (o < ao) { |
| 2895 | u16 esize; |
| 2896 | |
| 2897 | if (o >= asize) |
| 2898 | break; |
| 2899 | |
| 2900 | esize = le16_to_cpu(e->size); |
| 2901 | if (!esize) |
| 2902 | break; |
| 2903 | |
| 2904 | o += esize; |
| 2905 | e = Add2Ptr(e, esize); |
| 2906 | } |
| 2907 | |
| 2908 | return o == ao; |
| 2909 | } |
| 2910 | |
| 2911 | static inline bool check_if_alloc_index(const struct INDEX_HDR *hdr, |
| 2912 | u32 attr_off) |
| 2913 | { |
| 2914 | u32 de_off = le32_to_cpu(hdr->de_off); |
| 2915 | u32 o = offsetof(struct INDEX_BUFFER, ihdr) + de_off; |
| 2916 | const struct NTFS_DE *e = Add2Ptr(hdr, de_off); |
| 2917 | u32 used = le32_to_cpu(hdr->used); |
| 2918 | |
| 2919 | while (o < attr_off) { |
| 2920 | u16 esize; |
| 2921 | |
| 2922 | if (de_off >= used) |
| 2923 | break; |
| 2924 | |
| 2925 | esize = le16_to_cpu(e->size); |
| 2926 | if (!esize) |
| 2927 | break; |
| 2928 | |
| 2929 | o += esize; |
| 2930 | de_off += esize; |
| 2931 | e = Add2Ptr(e, esize); |
| 2932 | } |
| 2933 | |
| 2934 | return o == attr_off; |
| 2935 | } |
| 2936 | |
| 2937 | static inline void change_attr_size(struct MFT_REC *rec, struct ATTRIB *attr, |
| 2938 | u32 nsize) |
| 2939 | { |
| 2940 | u32 asize = le32_to_cpu(attr->size); |
| 2941 | int dsize = nsize - asize; |
| 2942 | u8 *next = Add2Ptr(attr, asize); |
| 2943 | u32 used = le32_to_cpu(rec->used); |
| 2944 | |
| 2945 | memmove(Add2Ptr(attr, nsize), next, used - PtrOffset(rec, next)); |
| 2946 | |
| 2947 | rec->used = cpu_to_le32(used + dsize); |
| 2948 | attr->size = cpu_to_le32(nsize); |
| 2949 | } |
| 2950 | |
| 2951 | struct OpenAttr { |
| 2952 | struct ATTRIB *attr; |
| 2953 | struct runs_tree *run1; |
| 2954 | struct runs_tree run0; |
| 2955 | struct ntfs_inode *ni; |
| 2956 | // CLST rno; |
| 2957 | }; |
| 2958 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 2959 | /* |
| 2960 | * cmp_type_and_name |
| 2961 | * |
| 2962 | * Return: 0 if 'attr' has the same type and name. |
| 2963 | */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2964 | static inline int cmp_type_and_name(const struct ATTRIB *a1, |
| 2965 | const struct ATTRIB *a2) |
| 2966 | { |
| 2967 | return a1->type != a2->type || a1->name_len != a2->name_len || |
| 2968 | (a1->name_len && memcmp(attr_name(a1), attr_name(a2), |
| 2969 | a1->name_len * sizeof(short))); |
| 2970 | } |
| 2971 | |
| 2972 | static struct OpenAttr *find_loaded_attr(struct ntfs_log *log, |
| 2973 | const struct ATTRIB *attr, CLST rno) |
| 2974 | { |
| 2975 | struct OPEN_ATTR_ENRTY *oe = NULL; |
| 2976 | |
| 2977 | while ((oe = enum_rstbl(log->open_attr_tbl, oe))) { |
| 2978 | struct OpenAttr *op_attr; |
| 2979 | |
| 2980 | if (ino_get(&oe->ref) != rno) |
| 2981 | continue; |
| 2982 | |
| 2983 | op_attr = (struct OpenAttr *)oe->ptr; |
| 2984 | if (!cmp_type_and_name(op_attr->attr, attr)) |
| 2985 | return op_attr; |
| 2986 | } |
| 2987 | return NULL; |
| 2988 | } |
| 2989 | |
| 2990 | static struct ATTRIB *attr_create_nonres_log(struct ntfs_sb_info *sbi, |
| 2991 | enum ATTR_TYPE type, u64 size, |
| 2992 | const u16 *name, size_t name_len, |
| 2993 | __le16 flags) |
| 2994 | { |
| 2995 | struct ATTRIB *attr; |
Kari Argillander | fa3cacf | 2021-08-26 11:56:29 +0300 | [diff] [blame] | 2996 | u32 name_size = ALIGN(name_len * sizeof(short), 8); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 2997 | bool is_ext = flags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED); |
| 2998 | u32 asize = name_size + |
| 2999 | (is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT); |
| 3000 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 3001 | attr = kzalloc(asize, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3002 | if (!attr) |
| 3003 | return NULL; |
| 3004 | |
| 3005 | attr->type = type; |
| 3006 | attr->size = cpu_to_le32(asize); |
| 3007 | attr->flags = flags; |
| 3008 | attr->non_res = 1; |
| 3009 | attr->name_len = name_len; |
| 3010 | |
| 3011 | attr->nres.evcn = cpu_to_le64((u64)bytes_to_cluster(sbi, size) - 1); |
| 3012 | attr->nres.alloc_size = cpu_to_le64(ntfs_up_cluster(sbi, size)); |
| 3013 | attr->nres.data_size = cpu_to_le64(size); |
| 3014 | attr->nres.valid_size = attr->nres.data_size; |
| 3015 | if (is_ext) { |
| 3016 | attr->name_off = SIZEOF_NONRESIDENT_EX_LE; |
| 3017 | if (is_attr_compressed(attr)) |
| 3018 | attr->nres.c_unit = COMPRESSION_UNIT; |
| 3019 | |
| 3020 | attr->nres.run_off = |
| 3021 | cpu_to_le16(SIZEOF_NONRESIDENT_EX + name_size); |
| 3022 | memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT_EX), name, |
| 3023 | name_len * sizeof(short)); |
| 3024 | } else { |
| 3025 | attr->name_off = SIZEOF_NONRESIDENT_LE; |
| 3026 | attr->nres.run_off = |
| 3027 | cpu_to_le16(SIZEOF_NONRESIDENT + name_size); |
| 3028 | memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT), name, |
| 3029 | name_len * sizeof(short)); |
| 3030 | } |
| 3031 | |
| 3032 | return attr; |
| 3033 | } |
| 3034 | |
| 3035 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3036 | * do_action - Common routine for the Redo and Undo Passes. |
| 3037 | * @rlsn: If it is NULL then undo. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3038 | */ |
| 3039 | static int do_action(struct ntfs_log *log, struct OPEN_ATTR_ENRTY *oe, |
| 3040 | const struct LOG_REC_HDR *lrh, u32 op, void *data, |
| 3041 | u32 dlen, u32 rec_len, const u64 *rlsn) |
| 3042 | { |
| 3043 | int err = 0; |
| 3044 | struct ntfs_sb_info *sbi = log->ni->mi.sbi; |
| 3045 | struct inode *inode = NULL, *inode_parent; |
| 3046 | struct mft_inode *mi = NULL, *mi2_child = NULL; |
| 3047 | CLST rno = 0, rno_base = 0; |
| 3048 | struct INDEX_BUFFER *ib = NULL; |
| 3049 | struct MFT_REC *rec = NULL; |
| 3050 | struct ATTRIB *attr = NULL, *attr2; |
| 3051 | struct INDEX_HDR *hdr; |
| 3052 | struct INDEX_ROOT *root; |
| 3053 | struct NTFS_DE *e, *e1, *e2; |
| 3054 | struct NEW_ATTRIBUTE_SIZES *new_sz; |
| 3055 | struct ATTR_FILE_NAME *fname; |
| 3056 | struct OpenAttr *oa, *oa2; |
| 3057 | u32 nsize, t32, asize, used, esize, bmp_off, bmp_bits; |
| 3058 | u16 id, id2; |
| 3059 | u32 record_size = sbi->record_size; |
| 3060 | u64 t64; |
| 3061 | u16 roff = le16_to_cpu(lrh->record_off); |
| 3062 | u16 aoff = le16_to_cpu(lrh->attr_off); |
| 3063 | u64 lco = 0; |
| 3064 | u64 cbo = (u64)le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT; |
| 3065 | u64 tvo = le64_to_cpu(lrh->target_vcn) << sbi->cluster_bits; |
| 3066 | u64 vbo = cbo + tvo; |
| 3067 | void *buffer_le = NULL; |
| 3068 | u32 bytes = 0; |
| 3069 | bool a_dirty = false; |
| 3070 | u16 data_off; |
| 3071 | |
| 3072 | oa = oe->ptr; |
| 3073 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3074 | /* Big switch to prepare. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3075 | switch (op) { |
| 3076 | /* ============================================================ |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3077 | * Process MFT records, as described by the current log record. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3078 | * ============================================================ |
| 3079 | */ |
| 3080 | case InitializeFileRecordSegment: |
| 3081 | case DeallocateFileRecordSegment: |
| 3082 | case WriteEndOfFileRecordSegment: |
| 3083 | case CreateAttribute: |
| 3084 | case DeleteAttribute: |
| 3085 | case UpdateResidentValue: |
| 3086 | case UpdateMappingPairs: |
| 3087 | case SetNewAttributeSizes: |
| 3088 | case AddIndexEntryRoot: |
| 3089 | case DeleteIndexEntryRoot: |
| 3090 | case SetIndexEntryVcnRoot: |
| 3091 | case UpdateFileNameRoot: |
| 3092 | case UpdateRecordDataRoot: |
| 3093 | case ZeroEndOfFileRecord: |
| 3094 | rno = vbo >> sbi->record_bits; |
| 3095 | inode = ilookup(sbi->sb, rno); |
| 3096 | if (inode) { |
| 3097 | mi = &ntfs_i(inode)->mi; |
| 3098 | } else if (op == InitializeFileRecordSegment) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 3099 | mi = kzalloc(sizeof(struct mft_inode), GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3100 | if (!mi) |
| 3101 | return -ENOMEM; |
| 3102 | err = mi_format_new(mi, sbi, rno, 0, false); |
| 3103 | if (err) |
| 3104 | goto out; |
| 3105 | } else { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3106 | /* Read from disk. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3107 | err = mi_get(sbi, rno, &mi); |
| 3108 | if (err) |
| 3109 | return err; |
| 3110 | } |
| 3111 | rec = mi->mrec; |
| 3112 | |
| 3113 | if (op == DeallocateFileRecordSegment) |
| 3114 | goto skip_load_parent; |
| 3115 | |
| 3116 | if (InitializeFileRecordSegment != op) { |
| 3117 | if (rec->rhdr.sign == NTFS_BAAD_SIGNATURE) |
| 3118 | goto dirty_vol; |
| 3119 | if (!check_lsn(&rec->rhdr, rlsn)) |
| 3120 | goto out; |
| 3121 | if (!check_file_record(rec, NULL, sbi)) |
| 3122 | goto dirty_vol; |
| 3123 | attr = Add2Ptr(rec, roff); |
| 3124 | } |
| 3125 | |
| 3126 | if (is_rec_base(rec) || InitializeFileRecordSegment == op) { |
| 3127 | rno_base = rno; |
| 3128 | goto skip_load_parent; |
| 3129 | } |
| 3130 | |
| 3131 | rno_base = ino_get(&rec->parent_ref); |
| 3132 | inode_parent = ntfs_iget5(sbi->sb, &rec->parent_ref, NULL); |
| 3133 | if (IS_ERR(inode_parent)) |
| 3134 | goto skip_load_parent; |
| 3135 | |
| 3136 | if (is_bad_inode(inode_parent)) { |
| 3137 | iput(inode_parent); |
| 3138 | goto skip_load_parent; |
| 3139 | } |
| 3140 | |
| 3141 | if (ni_load_mi_ex(ntfs_i(inode_parent), rno, &mi2_child)) { |
| 3142 | iput(inode_parent); |
| 3143 | } else { |
| 3144 | if (mi2_child->mrec != mi->mrec) |
| 3145 | memcpy(mi2_child->mrec, mi->mrec, |
| 3146 | sbi->record_size); |
| 3147 | |
| 3148 | if (inode) |
| 3149 | iput(inode); |
| 3150 | else if (mi) |
| 3151 | mi_put(mi); |
| 3152 | |
| 3153 | inode = inode_parent; |
| 3154 | mi = mi2_child; |
| 3155 | rec = mi2_child->mrec; |
| 3156 | attr = Add2Ptr(rec, roff); |
| 3157 | } |
| 3158 | |
| 3159 | skip_load_parent: |
| 3160 | inode_parent = NULL; |
| 3161 | break; |
| 3162 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3163 | /* |
| 3164 | * Process attributes, as described by the current log record. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3165 | */ |
| 3166 | case UpdateNonresidentValue: |
| 3167 | case AddIndexEntryAllocation: |
| 3168 | case DeleteIndexEntryAllocation: |
| 3169 | case WriteEndOfIndexBuffer: |
| 3170 | case SetIndexEntryVcnAllocation: |
| 3171 | case UpdateFileNameAllocation: |
| 3172 | case SetBitsInNonresidentBitMap: |
| 3173 | case ClearBitsInNonresidentBitMap: |
| 3174 | case UpdateRecordDataAllocation: |
| 3175 | attr = oa->attr; |
| 3176 | bytes = UpdateNonresidentValue == op ? dlen : 0; |
| 3177 | lco = (u64)le16_to_cpu(lrh->lcns_follow) << sbi->cluster_bits; |
| 3178 | |
| 3179 | if (attr->type == ATTR_ALLOC) { |
| 3180 | t32 = le32_to_cpu(oe->bytes_per_index); |
| 3181 | if (bytes < t32) |
| 3182 | bytes = t32; |
| 3183 | } |
| 3184 | |
| 3185 | if (!bytes) |
| 3186 | bytes = lco - cbo; |
| 3187 | |
| 3188 | bytes += roff; |
| 3189 | if (attr->type == ATTR_ALLOC) |
| 3190 | bytes = (bytes + 511) & ~511; // align |
| 3191 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 3192 | buffer_le = kmalloc(bytes, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3193 | if (!buffer_le) |
| 3194 | return -ENOMEM; |
| 3195 | |
| 3196 | err = ntfs_read_run_nb(sbi, oa->run1, vbo, buffer_le, bytes, |
| 3197 | NULL); |
| 3198 | if (err) |
| 3199 | goto out; |
| 3200 | |
| 3201 | if (attr->type == ATTR_ALLOC && *(int *)buffer_le) |
| 3202 | ntfs_fix_post_read(buffer_le, bytes, false); |
| 3203 | break; |
| 3204 | |
| 3205 | default: |
| 3206 | WARN_ON(1); |
| 3207 | } |
| 3208 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3209 | /* Big switch to do operation. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3210 | switch (op) { |
| 3211 | case InitializeFileRecordSegment: |
| 3212 | if (roff + dlen > record_size) |
| 3213 | goto dirty_vol; |
| 3214 | |
| 3215 | memcpy(Add2Ptr(rec, roff), data, dlen); |
| 3216 | mi->dirty = true; |
| 3217 | break; |
| 3218 | |
| 3219 | case DeallocateFileRecordSegment: |
| 3220 | clear_rec_inuse(rec); |
| 3221 | le16_add_cpu(&rec->seq, 1); |
| 3222 | mi->dirty = true; |
| 3223 | break; |
| 3224 | |
| 3225 | case WriteEndOfFileRecordSegment: |
| 3226 | attr2 = (struct ATTRIB *)data; |
| 3227 | if (!check_if_attr(rec, lrh) || roff + dlen > record_size) |
| 3228 | goto dirty_vol; |
| 3229 | |
| 3230 | memmove(attr, attr2, dlen); |
Kari Argillander | fa3cacf | 2021-08-26 11:56:29 +0300 | [diff] [blame] | 3231 | rec->used = cpu_to_le32(ALIGN(roff + dlen, 8)); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3232 | |
| 3233 | mi->dirty = true; |
| 3234 | break; |
| 3235 | |
| 3236 | case CreateAttribute: |
| 3237 | attr2 = (struct ATTRIB *)data; |
| 3238 | asize = le32_to_cpu(attr2->size); |
| 3239 | used = le32_to_cpu(rec->used); |
| 3240 | |
| 3241 | if (!check_if_attr(rec, lrh) || dlen < SIZEOF_RESIDENT || |
Kari Argillander | fa3cacf | 2021-08-26 11:56:29 +0300 | [diff] [blame] | 3242 | !IS_ALIGNED(asize, 8) || |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3243 | Add2Ptr(attr2, asize) > Add2Ptr(lrh, rec_len) || |
| 3244 | dlen > record_size - used) { |
| 3245 | goto dirty_vol; |
| 3246 | } |
| 3247 | |
| 3248 | memmove(Add2Ptr(attr, asize), attr, used - roff); |
| 3249 | memcpy(attr, attr2, asize); |
| 3250 | |
| 3251 | rec->used = cpu_to_le32(used + asize); |
| 3252 | id = le16_to_cpu(rec->next_attr_id); |
| 3253 | id2 = le16_to_cpu(attr2->id); |
| 3254 | if (id <= id2) |
| 3255 | rec->next_attr_id = cpu_to_le16(id2 + 1); |
| 3256 | if (is_attr_indexed(attr)) |
| 3257 | le16_add_cpu(&rec->hard_links, 1); |
| 3258 | |
| 3259 | oa2 = find_loaded_attr(log, attr, rno_base); |
| 3260 | if (oa2) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 3261 | void *p2 = kmemdup(attr, le32_to_cpu(attr->size), |
| 3262 | GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3263 | if (p2) { |
| 3264 | // run_close(oa2->run1); |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 3265 | kfree(oa2->attr); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3266 | oa2->attr = p2; |
| 3267 | } |
| 3268 | } |
| 3269 | |
| 3270 | mi->dirty = true; |
| 3271 | break; |
| 3272 | |
| 3273 | case DeleteAttribute: |
| 3274 | asize = le32_to_cpu(attr->size); |
| 3275 | used = le32_to_cpu(rec->used); |
| 3276 | |
| 3277 | if (!check_if_attr(rec, lrh)) |
| 3278 | goto dirty_vol; |
| 3279 | |
| 3280 | rec->used = cpu_to_le32(used - asize); |
| 3281 | if (is_attr_indexed(attr)) |
| 3282 | le16_add_cpu(&rec->hard_links, -1); |
| 3283 | |
| 3284 | memmove(attr, Add2Ptr(attr, asize), used - asize - roff); |
| 3285 | |
| 3286 | mi->dirty = true; |
| 3287 | break; |
| 3288 | |
| 3289 | case UpdateResidentValue: |
| 3290 | nsize = aoff + dlen; |
| 3291 | |
| 3292 | if (!check_if_attr(rec, lrh)) |
| 3293 | goto dirty_vol; |
| 3294 | |
| 3295 | asize = le32_to_cpu(attr->size); |
| 3296 | used = le32_to_cpu(rec->used); |
| 3297 | |
| 3298 | if (lrh->redo_len == lrh->undo_len) { |
| 3299 | if (nsize > asize) |
| 3300 | goto dirty_vol; |
| 3301 | goto move_data; |
| 3302 | } |
| 3303 | |
| 3304 | if (nsize > asize && nsize - asize > record_size - used) |
| 3305 | goto dirty_vol; |
| 3306 | |
Kari Argillander | fa3cacf | 2021-08-26 11:56:29 +0300 | [diff] [blame] | 3307 | nsize = ALIGN(nsize, 8); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3308 | data_off = le16_to_cpu(attr->res.data_off); |
| 3309 | |
| 3310 | if (nsize < asize) { |
| 3311 | memmove(Add2Ptr(attr, aoff), data, dlen); |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3312 | data = NULL; // To skip below memmove(). |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3313 | } |
| 3314 | |
| 3315 | memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize), |
| 3316 | used - le16_to_cpu(lrh->record_off) - asize); |
| 3317 | |
| 3318 | rec->used = cpu_to_le32(used + nsize - asize); |
| 3319 | attr->size = cpu_to_le32(nsize); |
| 3320 | attr->res.data_size = cpu_to_le32(aoff + dlen - data_off); |
| 3321 | |
| 3322 | move_data: |
| 3323 | if (data) |
| 3324 | memmove(Add2Ptr(attr, aoff), data, dlen); |
| 3325 | |
| 3326 | oa2 = find_loaded_attr(log, attr, rno_base); |
| 3327 | if (oa2) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 3328 | void *p2 = kmemdup(attr, le32_to_cpu(attr->size), |
| 3329 | GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3330 | if (p2) { |
| 3331 | // run_close(&oa2->run0); |
| 3332 | oa2->run1 = &oa2->run0; |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 3333 | kfree(oa2->attr); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3334 | oa2->attr = p2; |
| 3335 | } |
| 3336 | } |
| 3337 | |
| 3338 | mi->dirty = true; |
| 3339 | break; |
| 3340 | |
| 3341 | case UpdateMappingPairs: |
| 3342 | nsize = aoff + dlen; |
| 3343 | asize = le32_to_cpu(attr->size); |
| 3344 | used = le32_to_cpu(rec->used); |
| 3345 | |
| 3346 | if (!check_if_attr(rec, lrh) || !attr->non_res || |
| 3347 | aoff < le16_to_cpu(attr->nres.run_off) || aoff > asize || |
| 3348 | (nsize > asize && nsize - asize > record_size - used)) { |
| 3349 | goto dirty_vol; |
| 3350 | } |
| 3351 | |
Kari Argillander | fa3cacf | 2021-08-26 11:56:29 +0300 | [diff] [blame] | 3352 | nsize = ALIGN(nsize, 8); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3353 | |
| 3354 | memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize), |
| 3355 | used - le16_to_cpu(lrh->record_off) - asize); |
| 3356 | rec->used = cpu_to_le32(used + nsize - asize); |
| 3357 | attr->size = cpu_to_le32(nsize); |
| 3358 | memmove(Add2Ptr(attr, aoff), data, dlen); |
| 3359 | |
| 3360 | if (run_get_highest_vcn(le64_to_cpu(attr->nres.svcn), |
| 3361 | attr_run(attr), &t64)) { |
| 3362 | goto dirty_vol; |
| 3363 | } |
| 3364 | |
| 3365 | attr->nres.evcn = cpu_to_le64(t64); |
| 3366 | oa2 = find_loaded_attr(log, attr, rno_base); |
| 3367 | if (oa2 && oa2->attr->non_res) |
| 3368 | oa2->attr->nres.evcn = attr->nres.evcn; |
| 3369 | |
| 3370 | mi->dirty = true; |
| 3371 | break; |
| 3372 | |
| 3373 | case SetNewAttributeSizes: |
| 3374 | new_sz = data; |
| 3375 | if (!check_if_attr(rec, lrh) || !attr->non_res) |
| 3376 | goto dirty_vol; |
| 3377 | |
| 3378 | attr->nres.alloc_size = new_sz->alloc_size; |
| 3379 | attr->nres.data_size = new_sz->data_size; |
| 3380 | attr->nres.valid_size = new_sz->valid_size; |
| 3381 | |
| 3382 | if (dlen >= sizeof(struct NEW_ATTRIBUTE_SIZES)) |
| 3383 | attr->nres.total_size = new_sz->total_size; |
| 3384 | |
| 3385 | oa2 = find_loaded_attr(log, attr, rno_base); |
| 3386 | if (oa2) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 3387 | void *p2 = kmemdup(attr, le32_to_cpu(attr->size), |
| 3388 | GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3389 | if (p2) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 3390 | kfree(oa2->attr); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3391 | oa2->attr = p2; |
| 3392 | } |
| 3393 | } |
| 3394 | mi->dirty = true; |
| 3395 | break; |
| 3396 | |
| 3397 | case AddIndexEntryRoot: |
| 3398 | e = (struct NTFS_DE *)data; |
| 3399 | esize = le16_to_cpu(e->size); |
| 3400 | root = resident_data(attr); |
| 3401 | hdr = &root->ihdr; |
| 3402 | used = le32_to_cpu(hdr->used); |
| 3403 | |
| 3404 | if (!check_if_index_root(rec, lrh) || |
| 3405 | !check_if_root_index(attr, hdr, lrh) || |
| 3406 | Add2Ptr(data, esize) > Add2Ptr(lrh, rec_len) || |
| 3407 | esize > le32_to_cpu(rec->total) - le32_to_cpu(rec->used)) { |
| 3408 | goto dirty_vol; |
| 3409 | } |
| 3410 | |
| 3411 | e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off)); |
| 3412 | |
| 3413 | change_attr_size(rec, attr, le32_to_cpu(attr->size) + esize); |
| 3414 | |
| 3415 | memmove(Add2Ptr(e1, esize), e1, |
| 3416 | PtrOffset(e1, Add2Ptr(hdr, used))); |
| 3417 | memmove(e1, e, esize); |
| 3418 | |
| 3419 | le32_add_cpu(&attr->res.data_size, esize); |
| 3420 | hdr->used = cpu_to_le32(used + esize); |
| 3421 | le32_add_cpu(&hdr->total, esize); |
| 3422 | |
| 3423 | mi->dirty = true; |
| 3424 | break; |
| 3425 | |
| 3426 | case DeleteIndexEntryRoot: |
| 3427 | root = resident_data(attr); |
| 3428 | hdr = &root->ihdr; |
| 3429 | used = le32_to_cpu(hdr->used); |
| 3430 | |
| 3431 | if (!check_if_index_root(rec, lrh) || |
| 3432 | !check_if_root_index(attr, hdr, lrh)) { |
| 3433 | goto dirty_vol; |
| 3434 | } |
| 3435 | |
| 3436 | e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off)); |
| 3437 | esize = le16_to_cpu(e1->size); |
| 3438 | e2 = Add2Ptr(e1, esize); |
| 3439 | |
| 3440 | memmove(e1, e2, PtrOffset(e2, Add2Ptr(hdr, used))); |
| 3441 | |
| 3442 | le32_sub_cpu(&attr->res.data_size, esize); |
| 3443 | hdr->used = cpu_to_le32(used - esize); |
| 3444 | le32_sub_cpu(&hdr->total, esize); |
| 3445 | |
| 3446 | change_attr_size(rec, attr, le32_to_cpu(attr->size) - esize); |
| 3447 | |
| 3448 | mi->dirty = true; |
| 3449 | break; |
| 3450 | |
| 3451 | case SetIndexEntryVcnRoot: |
| 3452 | root = resident_data(attr); |
| 3453 | hdr = &root->ihdr; |
| 3454 | |
| 3455 | if (!check_if_index_root(rec, lrh) || |
| 3456 | !check_if_root_index(attr, hdr, lrh)) { |
| 3457 | goto dirty_vol; |
| 3458 | } |
| 3459 | |
| 3460 | e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off)); |
| 3461 | |
| 3462 | de_set_vbn_le(e, *(__le64 *)data); |
| 3463 | mi->dirty = true; |
| 3464 | break; |
| 3465 | |
| 3466 | case UpdateFileNameRoot: |
| 3467 | root = resident_data(attr); |
| 3468 | hdr = &root->ihdr; |
| 3469 | |
| 3470 | if (!check_if_index_root(rec, lrh) || |
| 3471 | !check_if_root_index(attr, hdr, lrh)) { |
| 3472 | goto dirty_vol; |
| 3473 | } |
| 3474 | |
| 3475 | e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off)); |
| 3476 | fname = (struct ATTR_FILE_NAME *)(e + 1); |
| 3477 | memmove(&fname->dup, data, sizeof(fname->dup)); // |
| 3478 | mi->dirty = true; |
| 3479 | break; |
| 3480 | |
| 3481 | case UpdateRecordDataRoot: |
| 3482 | root = resident_data(attr); |
| 3483 | hdr = &root->ihdr; |
| 3484 | |
| 3485 | if (!check_if_index_root(rec, lrh) || |
| 3486 | !check_if_root_index(attr, hdr, lrh)) { |
| 3487 | goto dirty_vol; |
| 3488 | } |
| 3489 | |
| 3490 | e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off)); |
| 3491 | |
| 3492 | memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen); |
| 3493 | |
| 3494 | mi->dirty = true; |
| 3495 | break; |
| 3496 | |
| 3497 | case ZeroEndOfFileRecord: |
| 3498 | if (roff + dlen > record_size) |
| 3499 | goto dirty_vol; |
| 3500 | |
| 3501 | memset(attr, 0, dlen); |
| 3502 | mi->dirty = true; |
| 3503 | break; |
| 3504 | |
| 3505 | case UpdateNonresidentValue: |
| 3506 | if (lco < cbo + roff + dlen) |
| 3507 | goto dirty_vol; |
| 3508 | |
| 3509 | memcpy(Add2Ptr(buffer_le, roff), data, dlen); |
| 3510 | |
| 3511 | a_dirty = true; |
| 3512 | if (attr->type == ATTR_ALLOC) |
| 3513 | ntfs_fix_pre_write(buffer_le, bytes); |
| 3514 | break; |
| 3515 | |
| 3516 | case AddIndexEntryAllocation: |
| 3517 | ib = Add2Ptr(buffer_le, roff); |
| 3518 | hdr = &ib->ihdr; |
| 3519 | e = data; |
| 3520 | esize = le16_to_cpu(e->size); |
| 3521 | e1 = Add2Ptr(ib, aoff); |
| 3522 | |
| 3523 | if (is_baad(&ib->rhdr)) |
| 3524 | goto dirty_vol; |
| 3525 | if (!check_lsn(&ib->rhdr, rlsn)) |
| 3526 | goto out; |
| 3527 | |
| 3528 | used = le32_to_cpu(hdr->used); |
| 3529 | |
| 3530 | if (!check_index_buffer(ib, bytes) || |
| 3531 | !check_if_alloc_index(hdr, aoff) || |
| 3532 | Add2Ptr(e, esize) > Add2Ptr(lrh, rec_len) || |
| 3533 | used + esize > le32_to_cpu(hdr->total)) { |
| 3534 | goto dirty_vol; |
| 3535 | } |
| 3536 | |
| 3537 | memmove(Add2Ptr(e1, esize), e1, |
| 3538 | PtrOffset(e1, Add2Ptr(hdr, used))); |
| 3539 | memcpy(e1, e, esize); |
| 3540 | |
| 3541 | hdr->used = cpu_to_le32(used + esize); |
| 3542 | |
| 3543 | a_dirty = true; |
| 3544 | |
| 3545 | ntfs_fix_pre_write(&ib->rhdr, bytes); |
| 3546 | break; |
| 3547 | |
| 3548 | case DeleteIndexEntryAllocation: |
| 3549 | ib = Add2Ptr(buffer_le, roff); |
| 3550 | hdr = &ib->ihdr; |
| 3551 | e = Add2Ptr(ib, aoff); |
| 3552 | esize = le16_to_cpu(e->size); |
| 3553 | |
| 3554 | if (is_baad(&ib->rhdr)) |
| 3555 | goto dirty_vol; |
| 3556 | if (!check_lsn(&ib->rhdr, rlsn)) |
| 3557 | goto out; |
| 3558 | |
| 3559 | if (!check_index_buffer(ib, bytes) || |
| 3560 | !check_if_alloc_index(hdr, aoff)) { |
| 3561 | goto dirty_vol; |
| 3562 | } |
| 3563 | |
| 3564 | e1 = Add2Ptr(e, esize); |
| 3565 | nsize = esize; |
| 3566 | used = le32_to_cpu(hdr->used); |
| 3567 | |
| 3568 | memmove(e, e1, PtrOffset(e1, Add2Ptr(hdr, used))); |
| 3569 | |
| 3570 | hdr->used = cpu_to_le32(used - nsize); |
| 3571 | |
| 3572 | a_dirty = true; |
| 3573 | |
| 3574 | ntfs_fix_pre_write(&ib->rhdr, bytes); |
| 3575 | break; |
| 3576 | |
| 3577 | case WriteEndOfIndexBuffer: |
| 3578 | ib = Add2Ptr(buffer_le, roff); |
| 3579 | hdr = &ib->ihdr; |
| 3580 | e = Add2Ptr(ib, aoff); |
| 3581 | |
| 3582 | if (is_baad(&ib->rhdr)) |
| 3583 | goto dirty_vol; |
| 3584 | if (!check_lsn(&ib->rhdr, rlsn)) |
| 3585 | goto out; |
| 3586 | if (!check_index_buffer(ib, bytes) || |
| 3587 | !check_if_alloc_index(hdr, aoff) || |
| 3588 | aoff + dlen > offsetof(struct INDEX_BUFFER, ihdr) + |
| 3589 | le32_to_cpu(hdr->total)) { |
| 3590 | goto dirty_vol; |
| 3591 | } |
| 3592 | |
| 3593 | hdr->used = cpu_to_le32(dlen + PtrOffset(hdr, e)); |
| 3594 | memmove(e, data, dlen); |
| 3595 | |
| 3596 | a_dirty = true; |
| 3597 | ntfs_fix_pre_write(&ib->rhdr, bytes); |
| 3598 | break; |
| 3599 | |
| 3600 | case SetIndexEntryVcnAllocation: |
| 3601 | ib = Add2Ptr(buffer_le, roff); |
| 3602 | hdr = &ib->ihdr; |
| 3603 | e = Add2Ptr(ib, aoff); |
| 3604 | |
| 3605 | if (is_baad(&ib->rhdr)) |
| 3606 | goto dirty_vol; |
| 3607 | |
| 3608 | if (!check_lsn(&ib->rhdr, rlsn)) |
| 3609 | goto out; |
| 3610 | if (!check_index_buffer(ib, bytes) || |
| 3611 | !check_if_alloc_index(hdr, aoff)) { |
| 3612 | goto dirty_vol; |
| 3613 | } |
| 3614 | |
| 3615 | de_set_vbn_le(e, *(__le64 *)data); |
| 3616 | |
| 3617 | a_dirty = true; |
| 3618 | ntfs_fix_pre_write(&ib->rhdr, bytes); |
| 3619 | break; |
| 3620 | |
| 3621 | case UpdateFileNameAllocation: |
| 3622 | ib = Add2Ptr(buffer_le, roff); |
| 3623 | hdr = &ib->ihdr; |
| 3624 | e = Add2Ptr(ib, aoff); |
| 3625 | |
| 3626 | if (is_baad(&ib->rhdr)) |
| 3627 | goto dirty_vol; |
| 3628 | |
| 3629 | if (!check_lsn(&ib->rhdr, rlsn)) |
| 3630 | goto out; |
| 3631 | if (!check_index_buffer(ib, bytes) || |
| 3632 | !check_if_alloc_index(hdr, aoff)) { |
| 3633 | goto dirty_vol; |
| 3634 | } |
| 3635 | |
| 3636 | fname = (struct ATTR_FILE_NAME *)(e + 1); |
| 3637 | memmove(&fname->dup, data, sizeof(fname->dup)); |
| 3638 | |
| 3639 | a_dirty = true; |
| 3640 | ntfs_fix_pre_write(&ib->rhdr, bytes); |
| 3641 | break; |
| 3642 | |
| 3643 | case SetBitsInNonresidentBitMap: |
| 3644 | bmp_off = |
| 3645 | le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off); |
| 3646 | bmp_bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits); |
| 3647 | |
| 3648 | if (cbo + (bmp_off + 7) / 8 > lco || |
| 3649 | cbo + ((bmp_off + bmp_bits + 7) / 8) > lco) { |
| 3650 | goto dirty_vol; |
| 3651 | } |
| 3652 | |
| 3653 | __bitmap_set(Add2Ptr(buffer_le, roff), bmp_off, bmp_bits); |
| 3654 | a_dirty = true; |
| 3655 | break; |
| 3656 | |
| 3657 | case ClearBitsInNonresidentBitMap: |
| 3658 | bmp_off = |
| 3659 | le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off); |
| 3660 | bmp_bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits); |
| 3661 | |
| 3662 | if (cbo + (bmp_off + 7) / 8 > lco || |
| 3663 | cbo + ((bmp_off + bmp_bits + 7) / 8) > lco) { |
| 3664 | goto dirty_vol; |
| 3665 | } |
| 3666 | |
| 3667 | __bitmap_clear(Add2Ptr(buffer_le, roff), bmp_off, bmp_bits); |
| 3668 | a_dirty = true; |
| 3669 | break; |
| 3670 | |
| 3671 | case UpdateRecordDataAllocation: |
| 3672 | ib = Add2Ptr(buffer_le, roff); |
| 3673 | hdr = &ib->ihdr; |
| 3674 | e = Add2Ptr(ib, aoff); |
| 3675 | |
| 3676 | if (is_baad(&ib->rhdr)) |
| 3677 | goto dirty_vol; |
| 3678 | |
| 3679 | if (!check_lsn(&ib->rhdr, rlsn)) |
| 3680 | goto out; |
| 3681 | if (!check_index_buffer(ib, bytes) || |
| 3682 | !check_if_alloc_index(hdr, aoff)) { |
| 3683 | goto dirty_vol; |
| 3684 | } |
| 3685 | |
| 3686 | memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen); |
| 3687 | |
| 3688 | a_dirty = true; |
| 3689 | ntfs_fix_pre_write(&ib->rhdr, bytes); |
| 3690 | break; |
| 3691 | |
| 3692 | default: |
| 3693 | WARN_ON(1); |
| 3694 | } |
| 3695 | |
| 3696 | if (rlsn) { |
| 3697 | __le64 t64 = cpu_to_le64(*rlsn); |
| 3698 | |
| 3699 | if (rec) |
| 3700 | rec->rhdr.lsn = t64; |
| 3701 | if (ib) |
| 3702 | ib->rhdr.lsn = t64; |
| 3703 | } |
| 3704 | |
| 3705 | if (mi && mi->dirty) { |
| 3706 | err = mi_write(mi, 0); |
| 3707 | if (err) |
| 3708 | goto out; |
| 3709 | } |
| 3710 | |
| 3711 | if (a_dirty) { |
| 3712 | attr = oa->attr; |
| 3713 | err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes); |
| 3714 | if (err) |
| 3715 | goto out; |
| 3716 | } |
| 3717 | |
| 3718 | out: |
| 3719 | |
| 3720 | if (inode) |
| 3721 | iput(inode); |
| 3722 | else if (mi != mi2_child) |
| 3723 | mi_put(mi); |
| 3724 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 3725 | kfree(buffer_le); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3726 | |
| 3727 | return err; |
| 3728 | |
| 3729 | dirty_vol: |
| 3730 | log->set_dirty = true; |
| 3731 | goto out; |
| 3732 | } |
| 3733 | |
| 3734 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3735 | * log_replay - Replays log and empties it. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3736 | * |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3737 | * This function is called during mount operation. |
| 3738 | * It replays log and empties it. |
| 3739 | * Initialized is set false if logfile contains '-1'. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3740 | */ |
| 3741 | int log_replay(struct ntfs_inode *ni, bool *initialized) |
| 3742 | { |
| 3743 | int err; |
| 3744 | struct ntfs_sb_info *sbi = ni->mi.sbi; |
| 3745 | struct ntfs_log *log; |
| 3746 | |
| 3747 | struct restart_info rst_info, rst_info2; |
| 3748 | u64 rec_lsn, ra_lsn, checkpt_lsn = 0, rlsn = 0; |
| 3749 | struct ATTR_NAME_ENTRY *attr_names = NULL; |
| 3750 | struct ATTR_NAME_ENTRY *ane; |
| 3751 | struct RESTART_TABLE *dptbl = NULL; |
| 3752 | struct RESTART_TABLE *trtbl = NULL; |
| 3753 | const struct RESTART_TABLE *rt; |
| 3754 | struct RESTART_TABLE *oatbl = NULL; |
| 3755 | struct inode *inode; |
| 3756 | struct OpenAttr *oa; |
| 3757 | struct ntfs_inode *ni_oe; |
| 3758 | struct ATTRIB *attr = NULL; |
| 3759 | u64 size, vcn, undo_next_lsn; |
| 3760 | CLST rno, lcn, lcn0, len0, clen; |
| 3761 | void *data; |
| 3762 | struct NTFS_RESTART *rst = NULL; |
| 3763 | struct lcb *lcb = NULL; |
| 3764 | struct OPEN_ATTR_ENRTY *oe; |
| 3765 | struct TRANSACTION_ENTRY *tr; |
| 3766 | struct DIR_PAGE_ENTRY *dp; |
| 3767 | u32 i, bytes_per_attr_entry; |
| 3768 | u32 l_size = ni->vfs_inode.i_size; |
| 3769 | u32 orig_file_size = l_size; |
| 3770 | u32 page_size, vbo, tail, off, dlen; |
| 3771 | u32 saved_len, rec_len, transact_id; |
| 3772 | bool use_second_page; |
| 3773 | struct RESTART_AREA *ra2, *ra = NULL; |
| 3774 | struct CLIENT_REC *ca, *cr; |
| 3775 | __le16 client; |
| 3776 | struct RESTART_HDR *rh; |
| 3777 | const struct LFS_RECORD_HDR *frh; |
| 3778 | const struct LOG_REC_HDR *lrh; |
| 3779 | bool is_mapped; |
| 3780 | bool is_ro = sb_rdonly(sbi->sb); |
| 3781 | u64 t64; |
| 3782 | u16 t16; |
| 3783 | u32 t32; |
| 3784 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3785 | /* Get the size of page. NOTE: To replay we can use default page. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3786 | #if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2 |
| 3787 | page_size = norm_file_page(PAGE_SIZE, &l_size, true); |
| 3788 | #else |
| 3789 | page_size = norm_file_page(PAGE_SIZE, &l_size, false); |
| 3790 | #endif |
| 3791 | if (!page_size) |
| 3792 | return -EINVAL; |
| 3793 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 3794 | log = kzalloc(sizeof(struct ntfs_log), GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3795 | if (!log) |
| 3796 | return -ENOMEM; |
| 3797 | |
| 3798 | log->ni = ni; |
| 3799 | log->l_size = l_size; |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 3800 | log->one_page_buf = kmalloc(page_size, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3801 | |
| 3802 | if (!log->one_page_buf) { |
| 3803 | err = -ENOMEM; |
| 3804 | goto out; |
| 3805 | } |
| 3806 | |
| 3807 | log->page_size = page_size; |
| 3808 | log->page_mask = page_size - 1; |
| 3809 | log->page_bits = blksize_bits(page_size); |
| 3810 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3811 | /* Look for a restart area on the disk. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3812 | err = log_read_rst(log, l_size, true, &rst_info); |
| 3813 | if (err) |
| 3814 | goto out; |
| 3815 | |
| 3816 | /* remember 'initialized' */ |
| 3817 | *initialized = rst_info.initialized; |
| 3818 | |
| 3819 | if (!rst_info.restart) { |
| 3820 | if (rst_info.initialized) { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3821 | /* No restart area but the file is not initialized. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3822 | err = -EINVAL; |
| 3823 | goto out; |
| 3824 | } |
| 3825 | |
| 3826 | log_init_pg_hdr(log, page_size, page_size, 1, 1); |
| 3827 | log_create(log, l_size, 0, get_random_int(), false, false); |
| 3828 | |
| 3829 | log->ra = ra; |
| 3830 | |
| 3831 | ra = log_create_ra(log); |
| 3832 | if (!ra) { |
| 3833 | err = -ENOMEM; |
| 3834 | goto out; |
| 3835 | } |
| 3836 | log->ra = ra; |
| 3837 | log->init_ra = true; |
| 3838 | |
| 3839 | goto process_log; |
| 3840 | } |
| 3841 | |
| 3842 | /* |
| 3843 | * If the restart offset above wasn't zero then we won't |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3844 | * look for a second restart. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3845 | */ |
| 3846 | if (rst_info.vbo) |
| 3847 | goto check_restart_area; |
| 3848 | |
| 3849 | err = log_read_rst(log, l_size, false, &rst_info2); |
| 3850 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3851 | /* Determine which restart area to use. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3852 | if (!rst_info2.restart || rst_info2.last_lsn <= rst_info.last_lsn) |
| 3853 | goto use_first_page; |
| 3854 | |
| 3855 | use_second_page = true; |
| 3856 | |
| 3857 | if (rst_info.chkdsk_was_run && page_size != rst_info.vbo) { |
| 3858 | struct RECORD_PAGE_HDR *sp = NULL; |
| 3859 | bool usa_error; |
| 3860 | |
| 3861 | if (!read_log_page(log, page_size, &sp, &usa_error) && |
| 3862 | sp->rhdr.sign == NTFS_CHKD_SIGNATURE) { |
| 3863 | use_second_page = false; |
| 3864 | } |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 3865 | kfree(sp); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3866 | } |
| 3867 | |
| 3868 | if (use_second_page) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 3869 | kfree(rst_info.r_page); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3870 | memcpy(&rst_info, &rst_info2, sizeof(struct restart_info)); |
| 3871 | rst_info2.r_page = NULL; |
| 3872 | } |
| 3873 | |
| 3874 | use_first_page: |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 3875 | kfree(rst_info2.r_page); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3876 | |
| 3877 | check_restart_area: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3878 | /* |
| 3879 | * If the restart area is at offset 0, we want |
| 3880 | * to write the second restart area first. |
| 3881 | */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3882 | log->init_ra = !!rst_info.vbo; |
| 3883 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3884 | /* If we have a valid page then grab a pointer to the restart area. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3885 | ra2 = rst_info.valid_page |
| 3886 | ? Add2Ptr(rst_info.r_page, |
| 3887 | le16_to_cpu(rst_info.r_page->ra_off)) |
| 3888 | : NULL; |
| 3889 | |
| 3890 | if (rst_info.chkdsk_was_run || |
| 3891 | (ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) { |
| 3892 | bool wrapped = false; |
| 3893 | bool use_multi_page = false; |
| 3894 | u32 open_log_count; |
| 3895 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3896 | /* Do some checks based on whether we have a valid log page. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3897 | if (!rst_info.valid_page) { |
| 3898 | open_log_count = get_random_int(); |
| 3899 | goto init_log_instance; |
| 3900 | } |
| 3901 | open_log_count = le32_to_cpu(ra2->open_log_count); |
| 3902 | |
| 3903 | /* |
| 3904 | * If the restart page size isn't changing then we want to |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3905 | * check how much work we need to do. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3906 | */ |
| 3907 | if (page_size != le32_to_cpu(rst_info.r_page->sys_page_size)) |
| 3908 | goto init_log_instance; |
| 3909 | |
| 3910 | init_log_instance: |
| 3911 | log_init_pg_hdr(log, page_size, page_size, 1, 1); |
| 3912 | |
| 3913 | log_create(log, l_size, rst_info.last_lsn, open_log_count, |
| 3914 | wrapped, use_multi_page); |
| 3915 | |
| 3916 | ra = log_create_ra(log); |
| 3917 | if (!ra) { |
| 3918 | err = -ENOMEM; |
| 3919 | goto out; |
| 3920 | } |
| 3921 | log->ra = ra; |
| 3922 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3923 | /* Put the restart areas and initialize |
| 3924 | * the log file as required. |
| 3925 | */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3926 | goto process_log; |
| 3927 | } |
| 3928 | |
| 3929 | if (!ra2) { |
| 3930 | err = -EINVAL; |
| 3931 | goto out; |
| 3932 | } |
| 3933 | |
| 3934 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3935 | * If the log page or the system page sizes have changed, we can't |
| 3936 | * use the log file. We must use the system page size instead of the |
| 3937 | * default size if there is not a clean shutdown. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3938 | */ |
| 3939 | t32 = le32_to_cpu(rst_info.r_page->sys_page_size); |
| 3940 | if (page_size != t32) { |
| 3941 | l_size = orig_file_size; |
| 3942 | page_size = |
| 3943 | norm_file_page(t32, &l_size, t32 == DefaultLogPageSize); |
| 3944 | } |
| 3945 | |
| 3946 | if (page_size != t32 || |
| 3947 | page_size != le32_to_cpu(rst_info.r_page->page_size)) { |
| 3948 | err = -EINVAL; |
| 3949 | goto out; |
| 3950 | } |
| 3951 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3952 | /* If the file size has shrunk then we won't mount it. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3953 | if (l_size < le64_to_cpu(ra2->l_size)) { |
| 3954 | err = -EINVAL; |
| 3955 | goto out; |
| 3956 | } |
| 3957 | |
| 3958 | log_init_pg_hdr(log, page_size, page_size, |
| 3959 | le16_to_cpu(rst_info.r_page->major_ver), |
| 3960 | le16_to_cpu(rst_info.r_page->minor_ver)); |
| 3961 | |
| 3962 | log->l_size = le64_to_cpu(ra2->l_size); |
| 3963 | log->seq_num_bits = le32_to_cpu(ra2->seq_num_bits); |
| 3964 | log->file_data_bits = sizeof(u64) * 8 - log->seq_num_bits; |
| 3965 | log->seq_num_mask = (8 << log->file_data_bits) - 1; |
| 3966 | log->last_lsn = le64_to_cpu(ra2->current_lsn); |
| 3967 | log->seq_num = log->last_lsn >> log->file_data_bits; |
| 3968 | log->ra_off = le16_to_cpu(rst_info.r_page->ra_off); |
| 3969 | log->restart_size = log->sys_page_size - log->ra_off; |
| 3970 | log->record_header_len = le16_to_cpu(ra2->rec_hdr_len); |
| 3971 | log->ra_size = le16_to_cpu(ra2->ra_len); |
| 3972 | log->data_off = le16_to_cpu(ra2->data_off); |
| 3973 | log->data_size = log->page_size - log->data_off; |
| 3974 | log->reserved = log->data_size - log->record_header_len; |
| 3975 | |
| 3976 | vbo = lsn_to_vbo(log, log->last_lsn); |
| 3977 | |
| 3978 | if (vbo < log->first_page) { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3979 | /* This is a pseudo lsn. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3980 | log->l_flags |= NTFSLOG_NO_LAST_LSN; |
| 3981 | log->next_page = log->first_page; |
| 3982 | goto find_oldest; |
| 3983 | } |
| 3984 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3985 | /* Find the end of this log record. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3986 | off = final_log_off(log, log->last_lsn, |
| 3987 | le32_to_cpu(ra2->last_lsn_data_len)); |
| 3988 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3989 | /* If we wrapped the file then increment the sequence number. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3990 | if (off <= vbo) { |
| 3991 | log->seq_num += 1; |
| 3992 | log->l_flags |= NTFSLOG_WRAPPED; |
| 3993 | } |
| 3994 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3995 | /* Now compute the next log page to use. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 3996 | vbo &= ~log->sys_page_mask; |
| 3997 | tail = log->page_size - (off & log->page_mask) - 1; |
| 3998 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 3999 | /* |
| 4000 | *If we can fit another log record on the page, |
| 4001 | * move back a page the log file. |
| 4002 | */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4003 | if (tail >= log->record_header_len) { |
| 4004 | log->l_flags |= NTFSLOG_REUSE_TAIL; |
| 4005 | log->next_page = vbo; |
| 4006 | } else { |
| 4007 | log->next_page = next_page_off(log, vbo); |
| 4008 | } |
| 4009 | |
| 4010 | find_oldest: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4011 | /* |
| 4012 | * Find the oldest client lsn. Use the last |
| 4013 | * flushed lsn as a starting point. |
| 4014 | */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4015 | log->oldest_lsn = log->last_lsn; |
| 4016 | oldest_client_lsn(Add2Ptr(ra2, le16_to_cpu(ra2->client_off)), |
| 4017 | ra2->client_idx[1], &log->oldest_lsn); |
| 4018 | log->oldest_lsn_off = lsn_to_vbo(log, log->oldest_lsn); |
| 4019 | |
| 4020 | if (log->oldest_lsn_off < log->first_page) |
| 4021 | log->l_flags |= NTFSLOG_NO_OLDEST_LSN; |
| 4022 | |
| 4023 | if (!(ra2->flags & RESTART_SINGLE_PAGE_IO)) |
| 4024 | log->l_flags |= NTFSLOG_WRAPPED | NTFSLOG_MULTIPLE_PAGE_IO; |
| 4025 | |
| 4026 | log->current_openlog_count = le32_to_cpu(ra2->open_log_count); |
| 4027 | log->total_avail_pages = log->l_size - log->first_page; |
| 4028 | log->total_avail = log->total_avail_pages >> log->page_bits; |
| 4029 | log->max_current_avail = log->total_avail * log->reserved; |
| 4030 | log->total_avail = log->total_avail * log->data_size; |
| 4031 | |
| 4032 | log->current_avail = current_log_avail(log); |
| 4033 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 4034 | ra = kzalloc(log->restart_size, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4035 | if (!ra) { |
| 4036 | err = -ENOMEM; |
| 4037 | goto out; |
| 4038 | } |
| 4039 | log->ra = ra; |
| 4040 | |
| 4041 | t16 = le16_to_cpu(ra2->client_off); |
| 4042 | if (t16 == offsetof(struct RESTART_AREA, clients)) { |
| 4043 | memcpy(ra, ra2, log->ra_size); |
| 4044 | } else { |
| 4045 | memcpy(ra, ra2, offsetof(struct RESTART_AREA, clients)); |
| 4046 | memcpy(ra->clients, Add2Ptr(ra2, t16), |
| 4047 | le16_to_cpu(ra2->ra_len) - t16); |
| 4048 | |
| 4049 | log->current_openlog_count = get_random_int(); |
| 4050 | ra->open_log_count = cpu_to_le32(log->current_openlog_count); |
| 4051 | log->ra_size = offsetof(struct RESTART_AREA, clients) + |
| 4052 | sizeof(struct CLIENT_REC); |
| 4053 | ra->client_off = |
| 4054 | cpu_to_le16(offsetof(struct RESTART_AREA, clients)); |
| 4055 | ra->ra_len = cpu_to_le16(log->ra_size); |
| 4056 | } |
| 4057 | |
| 4058 | le32_add_cpu(&ra->open_log_count, 1); |
| 4059 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4060 | /* Now we need to walk through looking for the last lsn. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4061 | err = last_log_lsn(log); |
| 4062 | if (err) |
| 4063 | goto out; |
| 4064 | |
| 4065 | log->current_avail = current_log_avail(log); |
| 4066 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4067 | /* Remember which restart area to write first. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4068 | log->init_ra = rst_info.vbo; |
| 4069 | |
| 4070 | process_log: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4071 | /* 1.0, 1.1, 2.0 log->major_ver/minor_ver - short values. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4072 | switch ((log->major_ver << 16) + log->minor_ver) { |
| 4073 | case 0x10000: |
| 4074 | case 0x10001: |
| 4075 | case 0x20000: |
| 4076 | break; |
| 4077 | default: |
| 4078 | ntfs_warn(sbi->sb, "\x24LogFile version %d.%d is not supported", |
| 4079 | log->major_ver, log->minor_ver); |
| 4080 | err = -EOPNOTSUPP; |
| 4081 | log->set_dirty = true; |
| 4082 | goto out; |
| 4083 | } |
| 4084 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4085 | /* One client "NTFS" per logfile. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4086 | ca = Add2Ptr(ra, le16_to_cpu(ra->client_off)); |
| 4087 | |
| 4088 | for (client = ra->client_idx[1];; client = cr->next_client) { |
| 4089 | if (client == LFS_NO_CLIENT_LE) { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4090 | /* Insert "NTFS" client LogFile. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4091 | client = ra->client_idx[0]; |
| 4092 | if (client == LFS_NO_CLIENT_LE) |
| 4093 | return -EINVAL; |
| 4094 | |
| 4095 | t16 = le16_to_cpu(client); |
| 4096 | cr = ca + t16; |
| 4097 | |
| 4098 | remove_client(ca, cr, &ra->client_idx[0]); |
| 4099 | |
| 4100 | cr->restart_lsn = 0; |
| 4101 | cr->oldest_lsn = cpu_to_le64(log->oldest_lsn); |
| 4102 | cr->name_bytes = cpu_to_le32(8); |
| 4103 | cr->name[0] = cpu_to_le16('N'); |
| 4104 | cr->name[1] = cpu_to_le16('T'); |
| 4105 | cr->name[2] = cpu_to_le16('F'); |
| 4106 | cr->name[3] = cpu_to_le16('S'); |
| 4107 | |
| 4108 | add_client(ca, t16, &ra->client_idx[1]); |
| 4109 | break; |
| 4110 | } |
| 4111 | |
| 4112 | cr = ca + le16_to_cpu(client); |
| 4113 | |
| 4114 | if (cpu_to_le32(8) == cr->name_bytes && |
| 4115 | cpu_to_le16('N') == cr->name[0] && |
| 4116 | cpu_to_le16('T') == cr->name[1] && |
| 4117 | cpu_to_le16('F') == cr->name[2] && |
| 4118 | cpu_to_le16('S') == cr->name[3]) |
| 4119 | break; |
| 4120 | } |
| 4121 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4122 | /* Update the client handle with the client block information. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4123 | log->client_id.seq_num = cr->seq_num; |
| 4124 | log->client_id.client_idx = client; |
| 4125 | |
| 4126 | err = read_rst_area(log, &rst, &ra_lsn); |
| 4127 | if (err) |
| 4128 | goto out; |
| 4129 | |
| 4130 | if (!rst) |
| 4131 | goto out; |
| 4132 | |
| 4133 | bytes_per_attr_entry = !rst->major_ver ? 0x2C : 0x28; |
| 4134 | |
| 4135 | checkpt_lsn = le64_to_cpu(rst->check_point_start); |
| 4136 | if (!checkpt_lsn) |
| 4137 | checkpt_lsn = ra_lsn; |
| 4138 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4139 | /* Allocate and Read the Transaction Table. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4140 | if (!rst->transact_table_len) |
| 4141 | goto check_dirty_page_table; |
| 4142 | |
| 4143 | t64 = le64_to_cpu(rst->transact_table_lsn); |
| 4144 | err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb); |
| 4145 | if (err) |
| 4146 | goto out; |
| 4147 | |
| 4148 | lrh = lcb->log_rec; |
| 4149 | frh = lcb->lrh; |
| 4150 | rec_len = le32_to_cpu(frh->client_data_len); |
| 4151 | |
| 4152 | if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id), |
| 4153 | bytes_per_attr_entry)) { |
| 4154 | err = -EINVAL; |
| 4155 | goto out; |
| 4156 | } |
| 4157 | |
| 4158 | t16 = le16_to_cpu(lrh->redo_off); |
| 4159 | |
| 4160 | rt = Add2Ptr(lrh, t16); |
| 4161 | t32 = rec_len - t16; |
| 4162 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4163 | /* Now check that this is a valid restart table. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4164 | if (!check_rstbl(rt, t32)) { |
| 4165 | err = -EINVAL; |
| 4166 | goto out; |
| 4167 | } |
| 4168 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 4169 | trtbl = kmemdup(rt, t32, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4170 | if (!trtbl) { |
| 4171 | err = -ENOMEM; |
| 4172 | goto out; |
| 4173 | } |
| 4174 | |
| 4175 | lcb_put(lcb); |
| 4176 | lcb = NULL; |
| 4177 | |
| 4178 | check_dirty_page_table: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4179 | /* The next record back should be the Dirty Pages Table. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4180 | if (!rst->dirty_pages_len) |
| 4181 | goto check_attribute_names; |
| 4182 | |
| 4183 | t64 = le64_to_cpu(rst->dirty_pages_table_lsn); |
| 4184 | err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb); |
| 4185 | if (err) |
| 4186 | goto out; |
| 4187 | |
| 4188 | lrh = lcb->log_rec; |
| 4189 | frh = lcb->lrh; |
| 4190 | rec_len = le32_to_cpu(frh->client_data_len); |
| 4191 | |
| 4192 | if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id), |
| 4193 | bytes_per_attr_entry)) { |
| 4194 | err = -EINVAL; |
| 4195 | goto out; |
| 4196 | } |
| 4197 | |
| 4198 | t16 = le16_to_cpu(lrh->redo_off); |
| 4199 | |
| 4200 | rt = Add2Ptr(lrh, t16); |
| 4201 | t32 = rec_len - t16; |
| 4202 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4203 | /* Now check that this is a valid restart table. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4204 | if (!check_rstbl(rt, t32)) { |
| 4205 | err = -EINVAL; |
| 4206 | goto out; |
| 4207 | } |
| 4208 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 4209 | dptbl = kmemdup(rt, t32, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4210 | if (!dptbl) { |
| 4211 | err = -ENOMEM; |
| 4212 | goto out; |
| 4213 | } |
| 4214 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4215 | /* Convert Ra version '0' into version '1'. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4216 | if (rst->major_ver) |
| 4217 | goto end_conv_1; |
| 4218 | |
| 4219 | dp = NULL; |
| 4220 | while ((dp = enum_rstbl(dptbl, dp))) { |
| 4221 | struct DIR_PAGE_ENTRY_32 *dp0 = (struct DIR_PAGE_ENTRY_32 *)dp; |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4222 | // NOTE: Danger. Check for of boundary. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4223 | memmove(&dp->vcn, &dp0->vcn_low, |
| 4224 | 2 * sizeof(u64) + |
| 4225 | le32_to_cpu(dp->lcns_follow) * sizeof(u64)); |
| 4226 | } |
| 4227 | |
| 4228 | end_conv_1: |
| 4229 | lcb_put(lcb); |
| 4230 | lcb = NULL; |
| 4231 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4232 | /* |
| 4233 | * Go through the table and remove the duplicates, |
| 4234 | * remembering the oldest lsn values. |
| 4235 | */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4236 | if (sbi->cluster_size <= log->page_size) |
| 4237 | goto trace_dp_table; |
| 4238 | |
| 4239 | dp = NULL; |
| 4240 | while ((dp = enum_rstbl(dptbl, dp))) { |
| 4241 | struct DIR_PAGE_ENTRY *next = dp; |
| 4242 | |
| 4243 | while ((next = enum_rstbl(dptbl, next))) { |
| 4244 | if (next->target_attr == dp->target_attr && |
| 4245 | next->vcn == dp->vcn) { |
| 4246 | if (le64_to_cpu(next->oldest_lsn) < |
| 4247 | le64_to_cpu(dp->oldest_lsn)) { |
| 4248 | dp->oldest_lsn = next->oldest_lsn; |
| 4249 | } |
| 4250 | |
| 4251 | free_rsttbl_idx(dptbl, PtrOffset(dptbl, next)); |
| 4252 | } |
| 4253 | } |
| 4254 | } |
| 4255 | trace_dp_table: |
| 4256 | check_attribute_names: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4257 | /* The next record should be the Attribute Names. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4258 | if (!rst->attr_names_len) |
| 4259 | goto check_attr_table; |
| 4260 | |
| 4261 | t64 = le64_to_cpu(rst->attr_names_lsn); |
| 4262 | err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb); |
| 4263 | if (err) |
| 4264 | goto out; |
| 4265 | |
| 4266 | lrh = lcb->log_rec; |
| 4267 | frh = lcb->lrh; |
| 4268 | rec_len = le32_to_cpu(frh->client_data_len); |
| 4269 | |
| 4270 | if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id), |
| 4271 | bytes_per_attr_entry)) { |
| 4272 | err = -EINVAL; |
| 4273 | goto out; |
| 4274 | } |
| 4275 | |
| 4276 | t32 = lrh_length(lrh); |
| 4277 | rec_len -= t32; |
| 4278 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 4279 | attr_names = kmemdup(Add2Ptr(lrh, t32), rec_len, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4280 | |
| 4281 | lcb_put(lcb); |
| 4282 | lcb = NULL; |
| 4283 | |
| 4284 | check_attr_table: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4285 | /* The next record should be the attribute Table. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4286 | if (!rst->open_attr_len) |
| 4287 | goto check_attribute_names2; |
| 4288 | |
| 4289 | t64 = le64_to_cpu(rst->open_attr_table_lsn); |
| 4290 | err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb); |
| 4291 | if (err) |
| 4292 | goto out; |
| 4293 | |
| 4294 | lrh = lcb->log_rec; |
| 4295 | frh = lcb->lrh; |
| 4296 | rec_len = le32_to_cpu(frh->client_data_len); |
| 4297 | |
| 4298 | if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id), |
| 4299 | bytes_per_attr_entry)) { |
| 4300 | err = -EINVAL; |
| 4301 | goto out; |
| 4302 | } |
| 4303 | |
| 4304 | t16 = le16_to_cpu(lrh->redo_off); |
| 4305 | |
| 4306 | rt = Add2Ptr(lrh, t16); |
| 4307 | t32 = rec_len - t16; |
| 4308 | |
| 4309 | if (!check_rstbl(rt, t32)) { |
| 4310 | err = -EINVAL; |
| 4311 | goto out; |
| 4312 | } |
| 4313 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 4314 | oatbl = kmemdup(rt, t32, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4315 | if (!oatbl) { |
| 4316 | err = -ENOMEM; |
| 4317 | goto out; |
| 4318 | } |
| 4319 | |
| 4320 | log->open_attr_tbl = oatbl; |
| 4321 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4322 | /* Clear all of the Attr pointers. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4323 | oe = NULL; |
| 4324 | while ((oe = enum_rstbl(oatbl, oe))) { |
| 4325 | if (!rst->major_ver) { |
| 4326 | struct OPEN_ATTR_ENRTY_32 oe0; |
| 4327 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4328 | /* Really 'oe' points to OPEN_ATTR_ENRTY_32. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4329 | memcpy(&oe0, oe, SIZEOF_OPENATTRIBUTEENTRY0); |
| 4330 | |
| 4331 | oe->bytes_per_index = oe0.bytes_per_index; |
| 4332 | oe->type = oe0.type; |
| 4333 | oe->is_dirty_pages = oe0.is_dirty_pages; |
| 4334 | oe->name_len = 0; |
| 4335 | oe->ref = oe0.ref; |
| 4336 | oe->open_record_lsn = oe0.open_record_lsn; |
| 4337 | } |
| 4338 | |
| 4339 | oe->is_attr_name = 0; |
| 4340 | oe->ptr = NULL; |
| 4341 | } |
| 4342 | |
| 4343 | lcb_put(lcb); |
| 4344 | lcb = NULL; |
| 4345 | |
| 4346 | check_attribute_names2: |
| 4347 | if (!rst->attr_names_len) |
| 4348 | goto trace_attribute_table; |
| 4349 | |
| 4350 | ane = attr_names; |
| 4351 | if (!oatbl) |
| 4352 | goto trace_attribute_table; |
| 4353 | while (ane->off) { |
| 4354 | /* TODO: Clear table on exit! */ |
| 4355 | oe = Add2Ptr(oatbl, le16_to_cpu(ane->off)); |
| 4356 | t16 = le16_to_cpu(ane->name_bytes); |
| 4357 | oe->name_len = t16 / sizeof(short); |
| 4358 | oe->ptr = ane->name; |
| 4359 | oe->is_attr_name = 2; |
| 4360 | ane = Add2Ptr(ane, sizeof(struct ATTR_NAME_ENTRY) + t16); |
| 4361 | } |
| 4362 | |
| 4363 | trace_attribute_table: |
| 4364 | /* |
| 4365 | * If the checkpt_lsn is zero, then this is a freshly |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4366 | * formatted disk and we have no work to do. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4367 | */ |
| 4368 | if (!checkpt_lsn) { |
| 4369 | err = 0; |
| 4370 | goto out; |
| 4371 | } |
| 4372 | |
| 4373 | if (!oatbl) { |
| 4374 | oatbl = init_rsttbl(bytes_per_attr_entry, 8); |
| 4375 | if (!oatbl) { |
| 4376 | err = -ENOMEM; |
| 4377 | goto out; |
| 4378 | } |
| 4379 | } |
| 4380 | |
| 4381 | log->open_attr_tbl = oatbl; |
| 4382 | |
| 4383 | /* Start the analysis pass from the Checkpoint lsn. */ |
| 4384 | rec_lsn = checkpt_lsn; |
| 4385 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4386 | /* Read the first lsn. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4387 | err = read_log_rec_lcb(log, checkpt_lsn, lcb_ctx_next, &lcb); |
| 4388 | if (err) |
| 4389 | goto out; |
| 4390 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4391 | /* Loop to read all subsequent records to the end of the log file. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4392 | next_log_record_analyze: |
| 4393 | err = read_next_log_rec(log, lcb, &rec_lsn); |
| 4394 | if (err) |
| 4395 | goto out; |
| 4396 | |
| 4397 | if (!rec_lsn) |
| 4398 | goto end_log_records_enumerate; |
| 4399 | |
| 4400 | frh = lcb->lrh; |
| 4401 | transact_id = le32_to_cpu(frh->transact_id); |
| 4402 | rec_len = le32_to_cpu(frh->client_data_len); |
| 4403 | lrh = lcb->log_rec; |
| 4404 | |
| 4405 | if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) { |
| 4406 | err = -EINVAL; |
| 4407 | goto out; |
| 4408 | } |
| 4409 | |
| 4410 | /* |
| 4411 | * The first lsn after the previous lsn remembered |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4412 | * the checkpoint is the first candidate for the rlsn. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4413 | */ |
| 4414 | if (!rlsn) |
| 4415 | rlsn = rec_lsn; |
| 4416 | |
| 4417 | if (LfsClientRecord != frh->record_type) |
| 4418 | goto next_log_record_analyze; |
| 4419 | |
| 4420 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4421 | * Now update the Transaction Table for this transaction. If there |
| 4422 | * is no entry present or it is unallocated we allocate the entry. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4423 | */ |
| 4424 | if (!trtbl) { |
| 4425 | trtbl = init_rsttbl(sizeof(struct TRANSACTION_ENTRY), |
| 4426 | INITIAL_NUMBER_TRANSACTIONS); |
| 4427 | if (!trtbl) { |
| 4428 | err = -ENOMEM; |
| 4429 | goto out; |
| 4430 | } |
| 4431 | } |
| 4432 | |
| 4433 | tr = Add2Ptr(trtbl, transact_id); |
| 4434 | |
| 4435 | if (transact_id >= bytes_per_rt(trtbl) || |
| 4436 | tr->next != RESTART_ENTRY_ALLOCATED_LE) { |
| 4437 | tr = alloc_rsttbl_from_idx(&trtbl, transact_id); |
| 4438 | if (!tr) { |
| 4439 | err = -ENOMEM; |
| 4440 | goto out; |
| 4441 | } |
| 4442 | tr->transact_state = TransactionActive; |
| 4443 | tr->first_lsn = cpu_to_le64(rec_lsn); |
| 4444 | } |
| 4445 | |
| 4446 | tr->prev_lsn = tr->undo_next_lsn = cpu_to_le64(rec_lsn); |
| 4447 | |
| 4448 | /* |
| 4449 | * If this is a compensation log record, then change |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4450 | * the undo_next_lsn to be the undo_next_lsn of this record. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4451 | */ |
| 4452 | if (lrh->undo_op == cpu_to_le16(CompensationLogRecord)) |
| 4453 | tr->undo_next_lsn = frh->client_undo_next_lsn; |
| 4454 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4455 | /* Dispatch to handle log record depending on type. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4456 | switch (le16_to_cpu(lrh->redo_op)) { |
| 4457 | case InitializeFileRecordSegment: |
| 4458 | case DeallocateFileRecordSegment: |
| 4459 | case WriteEndOfFileRecordSegment: |
| 4460 | case CreateAttribute: |
| 4461 | case DeleteAttribute: |
| 4462 | case UpdateResidentValue: |
| 4463 | case UpdateNonresidentValue: |
| 4464 | case UpdateMappingPairs: |
| 4465 | case SetNewAttributeSizes: |
| 4466 | case AddIndexEntryRoot: |
| 4467 | case DeleteIndexEntryRoot: |
| 4468 | case AddIndexEntryAllocation: |
| 4469 | case DeleteIndexEntryAllocation: |
| 4470 | case WriteEndOfIndexBuffer: |
| 4471 | case SetIndexEntryVcnRoot: |
| 4472 | case SetIndexEntryVcnAllocation: |
| 4473 | case UpdateFileNameRoot: |
| 4474 | case UpdateFileNameAllocation: |
| 4475 | case SetBitsInNonresidentBitMap: |
| 4476 | case ClearBitsInNonresidentBitMap: |
| 4477 | case UpdateRecordDataRoot: |
| 4478 | case UpdateRecordDataAllocation: |
| 4479 | case ZeroEndOfFileRecord: |
| 4480 | t16 = le16_to_cpu(lrh->target_attr); |
| 4481 | t64 = le64_to_cpu(lrh->target_vcn); |
| 4482 | dp = find_dp(dptbl, t16, t64); |
| 4483 | |
| 4484 | if (dp) |
| 4485 | goto copy_lcns; |
| 4486 | |
| 4487 | /* |
| 4488 | * Calculate the number of clusters per page the system |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4489 | * which wrote the checkpoint, possibly creating the table. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4490 | */ |
| 4491 | if (dptbl) { |
| 4492 | t32 = (le16_to_cpu(dptbl->size) - |
| 4493 | sizeof(struct DIR_PAGE_ENTRY)) / |
| 4494 | sizeof(u64); |
| 4495 | } else { |
| 4496 | t32 = log->clst_per_page; |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 4497 | kfree(dptbl); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4498 | dptbl = init_rsttbl(struct_size(dp, page_lcns, t32), |
| 4499 | 32); |
| 4500 | if (!dptbl) { |
| 4501 | err = -ENOMEM; |
| 4502 | goto out; |
| 4503 | } |
| 4504 | } |
| 4505 | |
| 4506 | dp = alloc_rsttbl_idx(&dptbl); |
Dan Carpenter | a1b04d3 | 2021-08-24 14:52:36 +0300 | [diff] [blame] | 4507 | if (!dp) { |
| 4508 | err = -ENOMEM; |
| 4509 | goto out; |
| 4510 | } |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4511 | dp->target_attr = cpu_to_le32(t16); |
| 4512 | dp->transfer_len = cpu_to_le32(t32 << sbi->cluster_bits); |
| 4513 | dp->lcns_follow = cpu_to_le32(t32); |
| 4514 | dp->vcn = cpu_to_le64(t64 & ~((u64)t32 - 1)); |
| 4515 | dp->oldest_lsn = cpu_to_le64(rec_lsn); |
| 4516 | |
| 4517 | copy_lcns: |
| 4518 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4519 | * Copy the Lcns from the log record into the Dirty Page Entry. |
| 4520 | * TODO: For different page size support, must somehow make |
| 4521 | * whole routine a loop, case Lcns do not fit below. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4522 | */ |
| 4523 | t16 = le16_to_cpu(lrh->lcns_follow); |
| 4524 | for (i = 0; i < t16; i++) { |
| 4525 | size_t j = (size_t)(le64_to_cpu(lrh->target_vcn) - |
| 4526 | le64_to_cpu(dp->vcn)); |
| 4527 | dp->page_lcns[j + i] = lrh->page_lcns[i]; |
| 4528 | } |
| 4529 | |
| 4530 | goto next_log_record_analyze; |
| 4531 | |
| 4532 | case DeleteDirtyClusters: { |
| 4533 | u32 range_count = |
| 4534 | le16_to_cpu(lrh->redo_len) / sizeof(struct LCN_RANGE); |
| 4535 | const struct LCN_RANGE *r = |
| 4536 | Add2Ptr(lrh, le16_to_cpu(lrh->redo_off)); |
| 4537 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4538 | /* Loop through all of the Lcn ranges this log record. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4539 | for (i = 0; i < range_count; i++, r++) { |
| 4540 | u64 lcn0 = le64_to_cpu(r->lcn); |
| 4541 | u64 lcn_e = lcn0 + le64_to_cpu(r->len) - 1; |
| 4542 | |
| 4543 | dp = NULL; |
| 4544 | while ((dp = enum_rstbl(dptbl, dp))) { |
| 4545 | u32 j; |
| 4546 | |
| 4547 | t32 = le32_to_cpu(dp->lcns_follow); |
| 4548 | for (j = 0; j < t32; j++) { |
| 4549 | t64 = le64_to_cpu(dp->page_lcns[j]); |
| 4550 | if (t64 >= lcn0 && t64 <= lcn_e) |
| 4551 | dp->page_lcns[j] = 0; |
| 4552 | } |
| 4553 | } |
| 4554 | } |
| 4555 | goto next_log_record_analyze; |
| 4556 | ; |
| 4557 | } |
| 4558 | |
| 4559 | case OpenNonresidentAttribute: |
| 4560 | t16 = le16_to_cpu(lrh->target_attr); |
| 4561 | if (t16 >= bytes_per_rt(oatbl)) { |
| 4562 | /* |
| 4563 | * Compute how big the table needs to be. |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4564 | * Add 10 extra entries for some cushion. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4565 | */ |
| 4566 | u32 new_e = t16 / le16_to_cpu(oatbl->size); |
| 4567 | |
| 4568 | new_e += 10 - le16_to_cpu(oatbl->used); |
| 4569 | |
| 4570 | oatbl = extend_rsttbl(oatbl, new_e, ~0u); |
| 4571 | log->open_attr_tbl = oatbl; |
| 4572 | if (!oatbl) { |
| 4573 | err = -ENOMEM; |
| 4574 | goto out; |
| 4575 | } |
| 4576 | } |
| 4577 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4578 | /* Point to the entry being opened. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4579 | oe = alloc_rsttbl_from_idx(&oatbl, t16); |
| 4580 | log->open_attr_tbl = oatbl; |
| 4581 | if (!oe) { |
| 4582 | err = -ENOMEM; |
| 4583 | goto out; |
| 4584 | } |
| 4585 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4586 | /* Initialize this entry from the log record. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4587 | t16 = le16_to_cpu(lrh->redo_off); |
| 4588 | if (!rst->major_ver) { |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4589 | /* Convert version '0' into version '1'. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4590 | struct OPEN_ATTR_ENRTY_32 *oe0 = Add2Ptr(lrh, t16); |
| 4591 | |
| 4592 | oe->bytes_per_index = oe0->bytes_per_index; |
| 4593 | oe->type = oe0->type; |
| 4594 | oe->is_dirty_pages = oe0->is_dirty_pages; |
| 4595 | oe->name_len = 0; //oe0.name_len; |
| 4596 | oe->ref = oe0->ref; |
| 4597 | oe->open_record_lsn = oe0->open_record_lsn; |
| 4598 | } else { |
| 4599 | memcpy(oe, Add2Ptr(lrh, t16), bytes_per_attr_entry); |
| 4600 | } |
| 4601 | |
| 4602 | t16 = le16_to_cpu(lrh->undo_len); |
| 4603 | if (t16) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 4604 | oe->ptr = kmalloc(t16, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4605 | if (!oe->ptr) { |
| 4606 | err = -ENOMEM; |
| 4607 | goto out; |
| 4608 | } |
| 4609 | oe->name_len = t16 / sizeof(short); |
| 4610 | memcpy(oe->ptr, |
| 4611 | Add2Ptr(lrh, le16_to_cpu(lrh->undo_off)), t16); |
| 4612 | oe->is_attr_name = 1; |
| 4613 | } else { |
| 4614 | oe->ptr = NULL; |
| 4615 | oe->is_attr_name = 0; |
| 4616 | } |
| 4617 | |
| 4618 | goto next_log_record_analyze; |
| 4619 | |
| 4620 | case HotFix: |
| 4621 | t16 = le16_to_cpu(lrh->target_attr); |
| 4622 | t64 = le64_to_cpu(lrh->target_vcn); |
| 4623 | dp = find_dp(dptbl, t16, t64); |
| 4624 | if (dp) { |
| 4625 | size_t j = le64_to_cpu(lrh->target_vcn) - |
| 4626 | le64_to_cpu(dp->vcn); |
| 4627 | if (dp->page_lcns[j]) |
| 4628 | dp->page_lcns[j] = lrh->page_lcns[0]; |
| 4629 | } |
| 4630 | goto next_log_record_analyze; |
| 4631 | |
| 4632 | case EndTopLevelAction: |
| 4633 | tr = Add2Ptr(trtbl, transact_id); |
| 4634 | tr->prev_lsn = cpu_to_le64(rec_lsn); |
| 4635 | tr->undo_next_lsn = frh->client_undo_next_lsn; |
| 4636 | goto next_log_record_analyze; |
| 4637 | |
| 4638 | case PrepareTransaction: |
| 4639 | tr = Add2Ptr(trtbl, transact_id); |
| 4640 | tr->transact_state = TransactionPrepared; |
| 4641 | goto next_log_record_analyze; |
| 4642 | |
| 4643 | case CommitTransaction: |
| 4644 | tr = Add2Ptr(trtbl, transact_id); |
| 4645 | tr->transact_state = TransactionCommitted; |
| 4646 | goto next_log_record_analyze; |
| 4647 | |
| 4648 | case ForgetTransaction: |
| 4649 | free_rsttbl_idx(trtbl, transact_id); |
| 4650 | goto next_log_record_analyze; |
| 4651 | |
| 4652 | case Noop: |
| 4653 | case OpenAttributeTableDump: |
| 4654 | case AttributeNamesDump: |
| 4655 | case DirtyPageTableDump: |
| 4656 | case TransactionTableDump: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4657 | /* The following cases require no action the Analysis Pass. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4658 | goto next_log_record_analyze; |
| 4659 | |
| 4660 | default: |
| 4661 | /* |
| 4662 | * All codes will be explicitly handled. |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4663 | * If we see a code we do not expect, then we are trouble. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4664 | */ |
| 4665 | goto next_log_record_analyze; |
| 4666 | } |
| 4667 | |
| 4668 | end_log_records_enumerate: |
| 4669 | lcb_put(lcb); |
| 4670 | lcb = NULL; |
| 4671 | |
| 4672 | /* |
| 4673 | * Scan the Dirty Page Table and Transaction Table for |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4674 | * the lowest lsn, and return it as the Redo lsn. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4675 | */ |
| 4676 | dp = NULL; |
| 4677 | while ((dp = enum_rstbl(dptbl, dp))) { |
| 4678 | t64 = le64_to_cpu(dp->oldest_lsn); |
| 4679 | if (t64 && t64 < rlsn) |
| 4680 | rlsn = t64; |
| 4681 | } |
| 4682 | |
| 4683 | tr = NULL; |
| 4684 | while ((tr = enum_rstbl(trtbl, tr))) { |
| 4685 | t64 = le64_to_cpu(tr->first_lsn); |
| 4686 | if (t64 && t64 < rlsn) |
| 4687 | rlsn = t64; |
| 4688 | } |
| 4689 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4690 | /* |
| 4691 | * Only proceed if the Dirty Page Table or Transaction |
| 4692 | * table are not empty. |
| 4693 | */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4694 | if ((!dptbl || !dptbl->total) && (!trtbl || !trtbl->total)) |
| 4695 | goto end_reply; |
| 4696 | |
| 4697 | sbi->flags |= NTFS_FLAGS_NEED_REPLAY; |
| 4698 | if (is_ro) |
| 4699 | goto out; |
| 4700 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4701 | /* Reopen all of the attributes with dirty pages. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4702 | oe = NULL; |
| 4703 | next_open_attribute: |
| 4704 | |
| 4705 | oe = enum_rstbl(oatbl, oe); |
| 4706 | if (!oe) { |
| 4707 | err = 0; |
| 4708 | dp = NULL; |
| 4709 | goto next_dirty_page; |
| 4710 | } |
| 4711 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 4712 | oa = kzalloc(sizeof(struct OpenAttr), GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4713 | if (!oa) { |
| 4714 | err = -ENOMEM; |
| 4715 | goto out; |
| 4716 | } |
| 4717 | |
| 4718 | inode = ntfs_iget5(sbi->sb, &oe->ref, NULL); |
| 4719 | if (IS_ERR(inode)) |
| 4720 | goto fake_attr; |
| 4721 | |
| 4722 | if (is_bad_inode(inode)) { |
| 4723 | iput(inode); |
| 4724 | fake_attr: |
| 4725 | if (oa->ni) { |
| 4726 | iput(&oa->ni->vfs_inode); |
| 4727 | oa->ni = NULL; |
| 4728 | } |
| 4729 | |
| 4730 | attr = attr_create_nonres_log(sbi, oe->type, 0, oe->ptr, |
| 4731 | oe->name_len, 0); |
| 4732 | if (!attr) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 4733 | kfree(oa); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4734 | err = -ENOMEM; |
| 4735 | goto out; |
| 4736 | } |
| 4737 | oa->attr = attr; |
| 4738 | oa->run1 = &oa->run0; |
| 4739 | goto final_oe; |
| 4740 | } |
| 4741 | |
| 4742 | ni_oe = ntfs_i(inode); |
| 4743 | oa->ni = ni_oe; |
| 4744 | |
| 4745 | attr = ni_find_attr(ni_oe, NULL, NULL, oe->type, oe->ptr, oe->name_len, |
| 4746 | NULL, NULL); |
| 4747 | |
| 4748 | if (!attr) |
| 4749 | goto fake_attr; |
| 4750 | |
| 4751 | t32 = le32_to_cpu(attr->size); |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 4752 | oa->attr = kmemdup(attr, t32, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4753 | if (!oa->attr) |
| 4754 | goto fake_attr; |
| 4755 | |
| 4756 | if (!S_ISDIR(inode->i_mode)) { |
| 4757 | if (attr->type == ATTR_DATA && !attr->name_len) { |
| 4758 | oa->run1 = &ni_oe->file.run; |
| 4759 | goto final_oe; |
| 4760 | } |
| 4761 | } else { |
| 4762 | if (attr->type == ATTR_ALLOC && |
| 4763 | attr->name_len == ARRAY_SIZE(I30_NAME) && |
| 4764 | !memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME))) { |
| 4765 | oa->run1 = &ni_oe->dir.alloc_run; |
| 4766 | goto final_oe; |
| 4767 | } |
| 4768 | } |
| 4769 | |
| 4770 | if (attr->non_res) { |
| 4771 | u16 roff = le16_to_cpu(attr->nres.run_off); |
| 4772 | CLST svcn = le64_to_cpu(attr->nres.svcn); |
| 4773 | |
| 4774 | err = run_unpack(&oa->run0, sbi, inode->i_ino, svcn, |
| 4775 | le64_to_cpu(attr->nres.evcn), svcn, |
| 4776 | Add2Ptr(attr, roff), t32 - roff); |
| 4777 | if (err < 0) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 4778 | kfree(oa->attr); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4779 | oa->attr = NULL; |
| 4780 | goto fake_attr; |
| 4781 | } |
| 4782 | err = 0; |
| 4783 | } |
| 4784 | oa->run1 = &oa->run0; |
| 4785 | attr = oa->attr; |
| 4786 | |
| 4787 | final_oe: |
| 4788 | if (oe->is_attr_name == 1) |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 4789 | kfree(oe->ptr); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4790 | oe->is_attr_name = 0; |
| 4791 | oe->ptr = oa; |
| 4792 | oe->name_len = attr->name_len; |
| 4793 | |
| 4794 | goto next_open_attribute; |
| 4795 | |
| 4796 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4797 | * Now loop through the dirty page table to extract all of the Vcn/Lcn. |
| 4798 | * Mapping that we have, and insert it into the appropriate run. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4799 | */ |
| 4800 | next_dirty_page: |
| 4801 | dp = enum_rstbl(dptbl, dp); |
| 4802 | if (!dp) |
| 4803 | goto do_redo_1; |
| 4804 | |
| 4805 | oe = Add2Ptr(oatbl, le32_to_cpu(dp->target_attr)); |
| 4806 | |
| 4807 | if (oe->next != RESTART_ENTRY_ALLOCATED_LE) |
| 4808 | goto next_dirty_page; |
| 4809 | |
| 4810 | oa = oe->ptr; |
| 4811 | if (!oa) |
| 4812 | goto next_dirty_page; |
| 4813 | |
| 4814 | i = -1; |
| 4815 | next_dirty_page_vcn: |
| 4816 | i += 1; |
| 4817 | if (i >= le32_to_cpu(dp->lcns_follow)) |
| 4818 | goto next_dirty_page; |
| 4819 | |
| 4820 | vcn = le64_to_cpu(dp->vcn) + i; |
| 4821 | size = (vcn + 1) << sbi->cluster_bits; |
| 4822 | |
| 4823 | if (!dp->page_lcns[i]) |
| 4824 | goto next_dirty_page_vcn; |
| 4825 | |
| 4826 | rno = ino_get(&oe->ref); |
| 4827 | if (rno <= MFT_REC_MIRR && |
| 4828 | size < (MFT_REC_VOL + 1) * sbi->record_size && |
| 4829 | oe->type == ATTR_DATA) { |
| 4830 | goto next_dirty_page_vcn; |
| 4831 | } |
| 4832 | |
| 4833 | lcn = le64_to_cpu(dp->page_lcns[i]); |
| 4834 | |
| 4835 | if ((!run_lookup_entry(oa->run1, vcn, &lcn0, &len0, NULL) || |
| 4836 | lcn0 != lcn) && |
| 4837 | !run_add_entry(oa->run1, vcn, lcn, 1, false)) { |
| 4838 | err = -ENOMEM; |
| 4839 | goto out; |
| 4840 | } |
| 4841 | attr = oa->attr; |
| 4842 | t64 = le64_to_cpu(attr->nres.alloc_size); |
| 4843 | if (size > t64) { |
| 4844 | attr->nres.valid_size = attr->nres.data_size = |
| 4845 | attr->nres.alloc_size = cpu_to_le64(size); |
| 4846 | } |
| 4847 | goto next_dirty_page_vcn; |
| 4848 | |
| 4849 | do_redo_1: |
| 4850 | /* |
| 4851 | * Perform the Redo Pass, to restore all of the dirty pages to the same |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4852 | * contents that they had immediately before the crash. If the dirty |
| 4853 | * page table is empty, then we can skip the entire Redo Pass. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4854 | */ |
| 4855 | if (!dptbl || !dptbl->total) |
| 4856 | goto do_undo_action; |
| 4857 | |
| 4858 | rec_lsn = rlsn; |
| 4859 | |
| 4860 | /* |
| 4861 | * Read the record at the Redo lsn, before falling |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4862 | * into common code to handle each record. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4863 | */ |
| 4864 | err = read_log_rec_lcb(log, rlsn, lcb_ctx_next, &lcb); |
| 4865 | if (err) |
| 4866 | goto out; |
| 4867 | |
| 4868 | /* |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4869 | * Now loop to read all of our log records forwards, until |
| 4870 | * we hit the end of the file, cleaning up at the end. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4871 | */ |
| 4872 | do_action_next: |
| 4873 | frh = lcb->lrh; |
| 4874 | |
| 4875 | if (LfsClientRecord != frh->record_type) |
| 4876 | goto read_next_log_do_action; |
| 4877 | |
| 4878 | transact_id = le32_to_cpu(frh->transact_id); |
| 4879 | rec_len = le32_to_cpu(frh->client_data_len); |
| 4880 | lrh = lcb->log_rec; |
| 4881 | |
| 4882 | if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) { |
| 4883 | err = -EINVAL; |
| 4884 | goto out; |
| 4885 | } |
| 4886 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4887 | /* Ignore log records that do not update pages. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4888 | if (lrh->lcns_follow) |
| 4889 | goto find_dirty_page; |
| 4890 | |
| 4891 | goto read_next_log_do_action; |
| 4892 | |
| 4893 | find_dirty_page: |
| 4894 | t16 = le16_to_cpu(lrh->target_attr); |
| 4895 | t64 = le64_to_cpu(lrh->target_vcn); |
| 4896 | dp = find_dp(dptbl, t16, t64); |
| 4897 | |
| 4898 | if (!dp) |
| 4899 | goto read_next_log_do_action; |
| 4900 | |
| 4901 | if (rec_lsn < le64_to_cpu(dp->oldest_lsn)) |
| 4902 | goto read_next_log_do_action; |
| 4903 | |
| 4904 | t16 = le16_to_cpu(lrh->target_attr); |
| 4905 | if (t16 >= bytes_per_rt(oatbl)) { |
| 4906 | err = -EINVAL; |
| 4907 | goto out; |
| 4908 | } |
| 4909 | |
| 4910 | oe = Add2Ptr(oatbl, t16); |
| 4911 | |
| 4912 | if (oe->next != RESTART_ENTRY_ALLOCATED_LE) { |
| 4913 | err = -EINVAL; |
| 4914 | goto out; |
| 4915 | } |
| 4916 | |
| 4917 | oa = oe->ptr; |
| 4918 | |
| 4919 | if (!oa) { |
| 4920 | err = -EINVAL; |
| 4921 | goto out; |
| 4922 | } |
| 4923 | attr = oa->attr; |
| 4924 | |
| 4925 | vcn = le64_to_cpu(lrh->target_vcn); |
| 4926 | |
| 4927 | if (!run_lookup_entry(oa->run1, vcn, &lcn, NULL, NULL) || |
| 4928 | lcn == SPARSE_LCN) { |
| 4929 | goto read_next_log_do_action; |
| 4930 | } |
| 4931 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4932 | /* Point to the Redo data and get its length. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4933 | data = Add2Ptr(lrh, le16_to_cpu(lrh->redo_off)); |
| 4934 | dlen = le16_to_cpu(lrh->redo_len); |
| 4935 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4936 | /* Shorten length by any Lcns which were deleted. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4937 | saved_len = dlen; |
| 4938 | |
| 4939 | for (i = le16_to_cpu(lrh->lcns_follow); i; i--) { |
| 4940 | size_t j; |
| 4941 | u32 alen, voff; |
| 4942 | |
| 4943 | voff = le16_to_cpu(lrh->record_off) + |
| 4944 | le16_to_cpu(lrh->attr_off); |
| 4945 | voff += le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT; |
| 4946 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4947 | /* If the Vcn question is allocated, we can just get out. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4948 | j = le64_to_cpu(lrh->target_vcn) - le64_to_cpu(dp->vcn); |
| 4949 | if (dp->page_lcns[j + i - 1]) |
| 4950 | break; |
| 4951 | |
| 4952 | if (!saved_len) |
| 4953 | saved_len = 1; |
| 4954 | |
| 4955 | /* |
| 4956 | * Calculate the allocated space left relative to the |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4957 | * log record Vcn, after removing this unallocated Vcn. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4958 | */ |
| 4959 | alen = (i - 1) << sbi->cluster_bits; |
| 4960 | |
| 4961 | /* |
| 4962 | * If the update described this log record goes beyond |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4963 | * the allocated space, then we will have to reduce the length. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4964 | */ |
| 4965 | if (voff >= alen) |
| 4966 | dlen = 0; |
| 4967 | else if (voff + dlen > alen) |
| 4968 | dlen = alen - voff; |
| 4969 | } |
| 4970 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4971 | /* |
| 4972 | * If the resulting dlen from above is now zero, |
| 4973 | * we can skip this log record. |
| 4974 | */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4975 | if (!dlen && saved_len) |
| 4976 | goto read_next_log_do_action; |
| 4977 | |
| 4978 | t16 = le16_to_cpu(lrh->redo_op); |
| 4979 | if (can_skip_action(t16)) |
| 4980 | goto read_next_log_do_action; |
| 4981 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4982 | /* Apply the Redo operation a common routine. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4983 | err = do_action(log, oe, lrh, t16, data, dlen, rec_len, &rec_lsn); |
| 4984 | if (err) |
| 4985 | goto out; |
| 4986 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4987 | /* Keep reading and looping back until end of file. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4988 | read_next_log_do_action: |
| 4989 | err = read_next_log_rec(log, lcb, &rec_lsn); |
| 4990 | if (!err && rec_lsn) |
| 4991 | goto do_action_next; |
| 4992 | |
| 4993 | lcb_put(lcb); |
| 4994 | lcb = NULL; |
| 4995 | |
| 4996 | do_undo_action: |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 4997 | /* Scan Transaction Table. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 4998 | tr = NULL; |
| 4999 | transaction_table_next: |
| 5000 | tr = enum_rstbl(trtbl, tr); |
| 5001 | if (!tr) |
| 5002 | goto undo_action_done; |
| 5003 | |
| 5004 | if (TransactionActive != tr->transact_state || !tr->undo_next_lsn) { |
| 5005 | free_rsttbl_idx(trtbl, PtrOffset(trtbl, tr)); |
| 5006 | goto transaction_table_next; |
| 5007 | } |
| 5008 | |
| 5009 | log->transaction_id = PtrOffset(trtbl, tr); |
| 5010 | undo_next_lsn = le64_to_cpu(tr->undo_next_lsn); |
| 5011 | |
| 5012 | /* |
| 5013 | * We only have to do anything if the transaction has |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 5014 | * something its undo_next_lsn field. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5015 | */ |
| 5016 | if (!undo_next_lsn) |
| 5017 | goto commit_undo; |
| 5018 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 5019 | /* Read the first record to be undone by this transaction. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5020 | err = read_log_rec_lcb(log, undo_next_lsn, lcb_ctx_undo_next, &lcb); |
| 5021 | if (err) |
| 5022 | goto out; |
| 5023 | |
| 5024 | /* |
| 5025 | * Now loop to read all of our log records forwards, |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 5026 | * until we hit the end of the file, cleaning up at the end. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5027 | */ |
| 5028 | undo_action_next: |
| 5029 | |
| 5030 | lrh = lcb->log_rec; |
| 5031 | frh = lcb->lrh; |
| 5032 | transact_id = le32_to_cpu(frh->transact_id); |
| 5033 | rec_len = le32_to_cpu(frh->client_data_len); |
| 5034 | |
| 5035 | if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) { |
| 5036 | err = -EINVAL; |
| 5037 | goto out; |
| 5038 | } |
| 5039 | |
| 5040 | if (lrh->undo_op == cpu_to_le16(Noop)) |
| 5041 | goto read_next_log_undo_action; |
| 5042 | |
| 5043 | oe = Add2Ptr(oatbl, le16_to_cpu(lrh->target_attr)); |
| 5044 | oa = oe->ptr; |
| 5045 | |
| 5046 | t16 = le16_to_cpu(lrh->lcns_follow); |
| 5047 | if (!t16) |
| 5048 | goto add_allocated_vcns; |
| 5049 | |
| 5050 | is_mapped = run_lookup_entry(oa->run1, le64_to_cpu(lrh->target_vcn), |
| 5051 | &lcn, &clen, NULL); |
| 5052 | |
| 5053 | /* |
| 5054 | * If the mapping isn't already the table or the mapping |
| 5055 | * corresponds to a hole the mapping, we need to make sure |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 5056 | * there is no partial page already memory. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5057 | */ |
| 5058 | if (is_mapped && lcn != SPARSE_LCN && clen >= t16) |
| 5059 | goto add_allocated_vcns; |
| 5060 | |
| 5061 | vcn = le64_to_cpu(lrh->target_vcn); |
| 5062 | vcn &= ~(log->clst_per_page - 1); |
| 5063 | |
| 5064 | add_allocated_vcns: |
| 5065 | for (i = 0, vcn = le64_to_cpu(lrh->target_vcn), |
| 5066 | size = (vcn + 1) << sbi->cluster_bits; |
| 5067 | i < t16; i++, vcn += 1, size += sbi->cluster_size) { |
| 5068 | attr = oa->attr; |
| 5069 | if (!attr->non_res) { |
| 5070 | if (size > le32_to_cpu(attr->res.data_size)) |
| 5071 | attr->res.data_size = cpu_to_le32(size); |
| 5072 | } else { |
| 5073 | if (size > le64_to_cpu(attr->nres.data_size)) |
| 5074 | attr->nres.valid_size = attr->nres.data_size = |
| 5075 | attr->nres.alloc_size = |
| 5076 | cpu_to_le64(size); |
| 5077 | } |
| 5078 | } |
| 5079 | |
| 5080 | t16 = le16_to_cpu(lrh->undo_op); |
| 5081 | if (can_skip_action(t16)) |
| 5082 | goto read_next_log_undo_action; |
| 5083 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 5084 | /* Point to the Redo data and get its length. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5085 | data = Add2Ptr(lrh, le16_to_cpu(lrh->undo_off)); |
| 5086 | dlen = le16_to_cpu(lrh->undo_len); |
| 5087 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 5088 | /* It is time to apply the undo action. */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5089 | err = do_action(log, oe, lrh, t16, data, dlen, rec_len, NULL); |
| 5090 | |
| 5091 | read_next_log_undo_action: |
| 5092 | /* |
| 5093 | * Keep reading and looping back until we have read the |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 5094 | * last record for this transaction. |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5095 | */ |
| 5096 | err = read_next_log_rec(log, lcb, &rec_lsn); |
| 5097 | if (err) |
| 5098 | goto out; |
| 5099 | |
| 5100 | if (rec_lsn) |
| 5101 | goto undo_action_next; |
| 5102 | |
| 5103 | lcb_put(lcb); |
| 5104 | lcb = NULL; |
| 5105 | |
| 5106 | commit_undo: |
| 5107 | free_rsttbl_idx(trtbl, log->transaction_id); |
| 5108 | |
| 5109 | log->transaction_id = 0; |
| 5110 | |
| 5111 | goto transaction_table_next; |
| 5112 | |
| 5113 | undo_action_done: |
| 5114 | |
| 5115 | ntfs_update_mftmirr(sbi, 0); |
| 5116 | |
| 5117 | sbi->flags &= ~NTFS_FLAGS_NEED_REPLAY; |
| 5118 | |
| 5119 | end_reply: |
| 5120 | |
| 5121 | err = 0; |
| 5122 | if (is_ro) |
| 5123 | goto out; |
| 5124 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 5125 | rh = kzalloc(log->page_size, GFP_NOFS); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5126 | if (!rh) { |
| 5127 | err = -ENOMEM; |
| 5128 | goto out; |
| 5129 | } |
| 5130 | |
| 5131 | rh->rhdr.sign = NTFS_RSTR_SIGNATURE; |
| 5132 | rh->rhdr.fix_off = cpu_to_le16(offsetof(struct RESTART_HDR, fixups)); |
| 5133 | t16 = (log->page_size >> SECTOR_SHIFT) + 1; |
| 5134 | rh->rhdr.fix_num = cpu_to_le16(t16); |
| 5135 | rh->sys_page_size = cpu_to_le32(log->page_size); |
| 5136 | rh->page_size = cpu_to_le32(log->page_size); |
| 5137 | |
Konstantin Komarov | d362446 | 2021-08-31 16:57:40 +0300 | [diff] [blame] | 5138 | t16 = ALIGN(offsetof(struct RESTART_HDR, fixups) + sizeof(short) * t16, |
| 5139 | 8); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5140 | rh->ra_off = cpu_to_le16(t16); |
| 5141 | rh->minor_ver = cpu_to_le16(1); // 0x1A: |
| 5142 | rh->major_ver = cpu_to_le16(1); // 0x1C: |
| 5143 | |
| 5144 | ra2 = Add2Ptr(rh, t16); |
| 5145 | memcpy(ra2, ra, sizeof(struct RESTART_AREA)); |
| 5146 | |
| 5147 | ra2->client_idx[0] = 0; |
| 5148 | ra2->client_idx[1] = LFS_NO_CLIENT_LE; |
| 5149 | ra2->flags = cpu_to_le16(2); |
| 5150 | |
| 5151 | le32_add_cpu(&ra2->open_log_count, 1); |
| 5152 | |
| 5153 | ntfs_fix_pre_write(&rh->rhdr, log->page_size); |
| 5154 | |
| 5155 | err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rh, log->page_size); |
| 5156 | if (!err) |
| 5157 | err = ntfs_sb_write_run(sbi, &log->ni->file.run, log->page_size, |
| 5158 | rh, log->page_size); |
| 5159 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 5160 | kfree(rh); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5161 | if (err) |
| 5162 | goto out; |
| 5163 | |
| 5164 | out: |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 5165 | kfree(rst); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5166 | if (lcb) |
| 5167 | lcb_put(lcb); |
| 5168 | |
Kari Argillander | e8b8e97 | 2021-08-03 14:57:09 +0300 | [diff] [blame] | 5169 | /* |
| 5170 | * Scan the Open Attribute Table to close all of |
| 5171 | * the open attributes. |
| 5172 | */ |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5173 | oe = NULL; |
| 5174 | while ((oe = enum_rstbl(oatbl, oe))) { |
| 5175 | rno = ino_get(&oe->ref); |
| 5176 | |
| 5177 | if (oe->is_attr_name == 1) { |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 5178 | kfree(oe->ptr); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5179 | oe->ptr = NULL; |
| 5180 | continue; |
| 5181 | } |
| 5182 | |
| 5183 | if (oe->is_attr_name) |
| 5184 | continue; |
| 5185 | |
| 5186 | oa = oe->ptr; |
| 5187 | if (!oa) |
| 5188 | continue; |
| 5189 | |
| 5190 | run_close(&oa->run0); |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 5191 | kfree(oa->attr); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5192 | if (oa->ni) |
| 5193 | iput(&oa->ni->vfs_inode); |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 5194 | kfree(oa); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5195 | } |
| 5196 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 5197 | kfree(trtbl); |
| 5198 | kfree(oatbl); |
| 5199 | kfree(dptbl); |
| 5200 | kfree(attr_names); |
| 5201 | kfree(rst_info.r_page); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5202 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 5203 | kfree(ra); |
| 5204 | kfree(log->one_page_buf); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5205 | |
| 5206 | if (err) |
| 5207 | sbi->flags |= NTFS_FLAGS_NEED_REPLAY; |
| 5208 | |
| 5209 | if (err == -EROFS) |
| 5210 | err = 0; |
| 5211 | else if (log->set_dirty) |
| 5212 | ntfs_set_state(sbi, NTFS_DIRTY_ERROR); |
| 5213 | |
Kari Argillander | 195c52b | 2021-08-24 21:37:07 +0300 | [diff] [blame] | 5214 | kfree(log); |
Konstantin Komarov | b46acd6 | 2021-08-13 17:21:30 +0300 | [diff] [blame] | 5215 | |
| 5216 | return err; |
| 5217 | } |