Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
| 6 | #ifndef __XFS_LOG_PRIV_H__ |
| 7 | #define __XFS_LOG_PRIV_H__ |
| 8 | |
| 9 | struct xfs_buf; |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 10 | struct xlog; |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 11 | struct xlog_ticket; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | struct xfs_mount; |
| 13 | |
| 14 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | * get client id from packed copy. |
| 16 | * |
| 17 | * this hack is here because the xlog_pack code copies four bytes |
| 18 | * of xlog_op_header containing the fields oh_clientid, oh_flags |
| 19 | * and oh_res2 into the packed copy. |
| 20 | * |
| 21 | * later on this four byte chunk is treated as an int and the |
| 22 | * client id is pulled out. |
| 23 | * |
| 24 | * this has endian issues, of course. |
| 25 | */ |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 26 | static inline uint xlog_get_client_id(__be32 i) |
Christoph Hellwig | 03bea6f | 2007-10-12 10:58:05 +1000 | [diff] [blame] | 27 | { |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 28 | return be32_to_cpu(i) >> 24; |
Christoph Hellwig | 03bea6f | 2007-10-12 10:58:05 +1000 | [diff] [blame] | 29 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | /* |
| 32 | * In core log state |
| 33 | */ |
Christoph Hellwig | 1858bb0 | 2019-10-14 10:36:43 -0700 | [diff] [blame] | 34 | enum xlog_iclog_state { |
| 35 | XLOG_STATE_ACTIVE, /* Current IC log being written to */ |
| 36 | XLOG_STATE_WANT_SYNC, /* Want to sync this iclog; no more writes */ |
| 37 | XLOG_STATE_SYNCING, /* This IC log is syncing */ |
| 38 | XLOG_STATE_DONE_SYNC, /* Done syncing to disk */ |
Christoph Hellwig | 1858bb0 | 2019-10-14 10:36:43 -0700 | [diff] [blame] | 39 | XLOG_STATE_CALLBACK, /* Callback functions now */ |
| 40 | XLOG_STATE_DIRTY, /* Dirty IC log, not ready for ACTIVE status */ |
Christoph Hellwig | 1858bb0 | 2019-10-14 10:36:43 -0700 | [diff] [blame] | 41 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | |
Dave Chinner | 956f6da | 2021-06-18 11:57:05 -0700 | [diff] [blame] | 43 | #define XLOG_STATE_STRINGS \ |
| 44 | { XLOG_STATE_ACTIVE, "XLOG_STATE_ACTIVE" }, \ |
| 45 | { XLOG_STATE_WANT_SYNC, "XLOG_STATE_WANT_SYNC" }, \ |
| 46 | { XLOG_STATE_SYNCING, "XLOG_STATE_SYNCING" }, \ |
| 47 | { XLOG_STATE_DONE_SYNC, "XLOG_STATE_DONE_SYNC" }, \ |
| 48 | { XLOG_STATE_CALLBACK, "XLOG_STATE_CALLBACK" }, \ |
Dave Chinner | 5112e206 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 49 | { XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" } |
Dave Chinner | 956f6da | 2021-06-18 11:57:05 -0700 | [diff] [blame] | 50 | |
Dave Chinner | b2ae3a9 | 2021-07-27 16:23:50 -0700 | [diff] [blame] | 51 | /* |
| 52 | * In core log flags |
| 53 | */ |
| 54 | #define XLOG_ICL_NEED_FLUSH (1 << 0) /* iclog needs REQ_PREFLUSH */ |
| 55 | #define XLOG_ICL_NEED_FUA (1 << 1) /* iclog needs REQ_FUA */ |
| 56 | |
| 57 | #define XLOG_ICL_STRINGS \ |
| 58 | { XLOG_ICL_NEED_FLUSH, "XLOG_ICL_NEED_FLUSH" }, \ |
| 59 | { XLOG_ICL_NEED_FUA, "XLOG_ICL_NEED_FUA" } |
| 60 | |
Dave Chinner | 956f6da | 2021-06-18 11:57:05 -0700 | [diff] [blame] | 61 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | /* |
Dave Chinner | 70e42f2 | 2020-03-25 18:18:22 -0700 | [diff] [blame] | 63 | * Log ticket flags |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | */ |
Dave Chinner | 70e42f2 | 2020-03-25 18:18:22 -0700 | [diff] [blame] | 65 | #define XLOG_TIC_PERM_RESERV 0x1 /* permanent reservation */ |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 66 | |
| 67 | #define XLOG_TIC_FLAGS \ |
Dave Chinner | 1054794 | 2010-12-21 12:02:25 +1100 | [diff] [blame] | 68 | { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" } |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 69 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | /* |
| 71 | * Below are states for covering allocation transactions. |
| 72 | * By covering, we mean changing the h_tail_lsn in the last on-disk |
| 73 | * log write such that no allocation transactions will be re-done during |
| 74 | * recovery after a system crash. Recovery starts at the last on-disk |
| 75 | * log write. |
| 76 | * |
| 77 | * These states are used to insert dummy log entries to cover |
| 78 | * space allocation transactions which can undo non-transactional changes |
| 79 | * after a crash. Writes to a file with space |
| 80 | * already allocated do not result in any transactions. Allocations |
| 81 | * might include space beyond the EOF. So if we just push the EOF a |
| 82 | * little, the last transaction for the file could contain the wrong |
| 83 | * size. If there is no file system activity, after an allocation |
| 84 | * transaction, and the system crashes, the allocation transaction |
| 85 | * will get replayed and the file will be truncated. This could |
| 86 | * be hours/days/... after the allocation occurred. |
| 87 | * |
| 88 | * The fix for this is to do two dummy transactions when the |
| 89 | * system is idle. We need two dummy transaction because the h_tail_lsn |
| 90 | * in the log record header needs to point beyond the last possible |
| 91 | * non-dummy transaction. The first dummy changes the h_tail_lsn to |
| 92 | * the first transaction before the dummy. The second dummy causes |
| 93 | * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn. |
| 94 | * |
| 95 | * These dummy transactions get committed when everything |
| 96 | * is idle (after there has been some activity). |
| 97 | * |
| 98 | * There are 5 states used to control this. |
| 99 | * |
| 100 | * IDLE -- no logging has been done on the file system or |
| 101 | * we are done covering previous transactions. |
| 102 | * NEED -- logging has occurred and we need a dummy transaction |
| 103 | * when the log becomes idle. |
| 104 | * DONE -- we were in the NEED state and have committed a dummy |
| 105 | * transaction. |
| 106 | * NEED2 -- we detected that a dummy transaction has gone to the |
| 107 | * on disk log with no other transactions. |
| 108 | * DONE2 -- we committed a dummy transaction when in the NEED2 state. |
| 109 | * |
| 110 | * There are two places where we switch states: |
| 111 | * |
| 112 | * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2. |
| 113 | * We commit the dummy transaction and switch to DONE or DONE2, |
| 114 | * respectively. In all other states, we don't do anything. |
| 115 | * |
| 116 | * 2.) When we finish writing the on-disk log (xlog_state_clean_log). |
| 117 | * |
| 118 | * No matter what state we are in, if this isn't the dummy |
| 119 | * transaction going out, the next state is NEED. |
| 120 | * So, if we aren't in the DONE or DONE2 states, the next state |
| 121 | * is NEED. We can't be finishing a write of the dummy record |
| 122 | * unless it was committed and the state switched to DONE or DONE2. |
| 123 | * |
| 124 | * If we are in the DONE state and this was a write of the |
| 125 | * dummy transaction, we move to NEED2. |
| 126 | * |
| 127 | * If we are in the DONE2 state and this was a write of the |
| 128 | * dummy transaction, we move to IDLE. |
| 129 | * |
| 130 | * |
| 131 | * Writing only one dummy transaction can get appended to |
| 132 | * one file space allocation. When this happens, the log recovery |
| 133 | * code replays the space allocation and a file could be truncated. |
| 134 | * This is why we have the NEED2 and DONE2 states before going idle. |
| 135 | */ |
| 136 | |
| 137 | #define XLOG_STATE_COVER_IDLE 0 |
| 138 | #define XLOG_STATE_COVER_NEED 1 |
| 139 | #define XLOG_STATE_COVER_DONE 2 |
| 140 | #define XLOG_STATE_COVER_NEED2 3 |
| 141 | #define XLOG_STATE_COVER_DONE2 4 |
| 142 | |
| 143 | #define XLOG_COVER_OPS 5 |
| 144 | |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 145 | /* Ticket reservation region accounting */ |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 146 | #define XLOG_TIC_LEN_MAX 15 |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 147 | |
| 148 | /* |
| 149 | * Reservation region |
| 150 | * As would be stored in xfs_log_iovec but without the i_addr which |
| 151 | * we don't care about. |
| 152 | */ |
| 153 | typedef struct xlog_res { |
Tim Shimmin | 1259845 | 2006-01-11 21:02:47 +1100 | [diff] [blame] | 154 | uint r_len; /* region length :4 */ |
| 155 | uint r_type; /* region's transaction type :4 */ |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 156 | } xlog_res_t; |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 157 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | typedef struct xlog_ticket { |
Dave Chinner | 1054794 | 2010-12-21 12:02:25 +1100 | [diff] [blame] | 159 | struct list_head t_queue; /* reserve/write queue */ |
Christoph Hellwig | 14a7235f | 2012-02-20 02:31:24 +0000 | [diff] [blame] | 160 | struct task_struct *t_task; /* task that owns this ticket */ |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 161 | xlog_tid_t t_tid; /* transaction identifier : 4 */ |
Dave Chinner | cc09c0d | 2008-11-17 17:37:10 +1100 | [diff] [blame] | 162 | atomic_t t_ref; /* ticket reference count : 4 */ |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 163 | int t_curr_res; /* current reservation in bytes : 4 */ |
| 164 | int t_unit_res; /* unit reservation in bytes : 4 */ |
| 165 | char t_ocnt; /* original count : 1 */ |
| 166 | char t_cnt; /* current count : 1 */ |
| 167 | char t_clientid; /* who does this belong to; : 1 */ |
| 168 | char t_flags; /* properties of reservation : 1 */ |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 169 | |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 170 | /* reservation array fields */ |
| 171 | uint t_res_num; /* num in array : 4 */ |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 172 | uint t_res_num_ophdrs; /* num op hdrs : 4 */ |
| 173 | uint t_res_arr_sum; /* array sum : 4 */ |
| 174 | uint t_res_o_flow; /* sum overflow : 4 */ |
Tim Shimmin | 1259845 | 2006-01-11 21:02:47 +1100 | [diff] [blame] | 175 | xlog_res_t t_res_arr[XLOG_TIC_LEN_MAX]; /* array of res : 8 * 15 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | } xlog_ticket_t; |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 177 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | /* |
| 179 | * - A log record header is 512 bytes. There is plenty of room to grow the |
| 180 | * xlog_rec_header_t into the reserved space. |
| 181 | * - ic_data follows, so a write to disk can start at the beginning of |
| 182 | * the iclog. |
David Chinner | 12017fa | 2008-08-13 16:34:31 +1000 | [diff] [blame] | 183 | * - ic_forcewait is used to implement synchronous forcing of the iclog to disk. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | * - ic_next is the pointer to the next iclog in the ring. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | * - ic_log is a pointer back to the global log structure. |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 186 | * - ic_size is the full size of the log buffer, minus the cycle headers. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | * - ic_offset is the current number of bytes written to in this iclog. |
| 188 | * - ic_refcnt is bumped when someone is writing to the log. |
| 189 | * - ic_state is the state of the iclog. |
David Chinner | 114d23a | 2008-04-10 12:18:39 +1000 | [diff] [blame] | 190 | * |
| 191 | * Because of cacheline contention on large machines, we need to separate |
| 192 | * various resources onto different cachelines. To start with, make the |
| 193 | * structure cacheline aligned. The following fields can be contended on |
| 194 | * by independent processes: |
| 195 | * |
Christoph Hellwig | 89ae379 | 2019-06-28 19:27:34 -0700 | [diff] [blame] | 196 | * - ic_callbacks |
David Chinner | 114d23a | 2008-04-10 12:18:39 +1000 | [diff] [blame] | 197 | * - ic_refcnt |
| 198 | * - fields protected by the global l_icloglock |
| 199 | * |
| 200 | * so we need to ensure that these fields are located in separate cachelines. |
| 201 | * We'll put all the read-only and l_icloglock fields in the first cacheline, |
| 202 | * and move everything else out to subsequent cachelines. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | */ |
Christoph Hellwig | b28708d | 2008-11-28 14:23:38 +1100 | [diff] [blame] | 204 | typedef struct xlog_in_core { |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 205 | wait_queue_head_t ic_force_wait; |
| 206 | wait_queue_head_t ic_write_wait; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | struct xlog_in_core *ic_next; |
| 208 | struct xlog_in_core *ic_prev; |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 209 | struct xlog *ic_log; |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 210 | u32 ic_size; |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 211 | u32 ic_offset; |
Christoph Hellwig | 1858bb0 | 2019-10-14 10:36:43 -0700 | [diff] [blame] | 212 | enum xlog_iclog_state ic_state; |
Dave Chinner | eef983f | 2021-06-18 08:21:51 -0700 | [diff] [blame] | 213 | unsigned int ic_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | char *ic_datap; /* pointer to iclog data */ |
Christoph Hellwig | 89ae379 | 2019-06-28 19:27:34 -0700 | [diff] [blame] | 215 | struct list_head ic_callbacks; |
David Chinner | 114d23a | 2008-04-10 12:18:39 +1000 | [diff] [blame] | 216 | |
| 217 | /* reference counts need their own cacheline */ |
| 218 | atomic_t ic_refcnt ____cacheline_aligned_in_smp; |
Christoph Hellwig | b28708d | 2008-11-28 14:23:38 +1100 | [diff] [blame] | 219 | xlog_in_core_2_t *ic_data; |
| 220 | #define ic_header ic_data->hic_header |
Christoph Hellwig | 366fc4b | 2019-06-28 19:27:21 -0700 | [diff] [blame] | 221 | #ifdef DEBUG |
| 222 | bool ic_fail_crc : 1; |
| 223 | #endif |
Christoph Hellwig | 79b54d9 | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 224 | struct semaphore ic_sema; |
| 225 | struct work_struct ic_end_io_work; |
| 226 | struct bio ic_bio; |
| 227 | struct bio_vec ic_bvec[]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | } xlog_in_core_t; |
| 229 | |
| 230 | /* |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 231 | * The CIL context is used to aggregate per-transaction details as well be |
| 232 | * passed to the iclog for checkpoint post-commit processing. After being |
| 233 | * passed to the iclog, another context needs to be allocated for tracking the |
| 234 | * next set of transactions to be aggregated into a checkpoint. |
| 235 | */ |
| 236 | struct xfs_cil; |
| 237 | |
| 238 | struct xfs_cil_ctx { |
| 239 | struct xfs_cil *cil; |
Dave Chinner | 5f9b4b0 | 2021-06-18 08:21:52 -0700 | [diff] [blame] | 240 | xfs_csn_t sequence; /* chkpt sequence # */ |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 241 | xfs_lsn_t start_lsn; /* first LSN of chkpt commit */ |
| 242 | xfs_lsn_t commit_lsn; /* chkpt commit record lsn */ |
Dave Chinner | caa8009 | 2021-08-10 18:00:43 -0700 | [diff] [blame] | 243 | struct xlog_in_core *commit_iclog; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 244 | struct xlog_ticket *ticket; /* chkpt ticket */ |
| 245 | int nvecs; /* number of regions */ |
| 246 | int space_used; /* aggregate size of regions */ |
| 247 | struct list_head busy_extents; /* busy extents in chkpt */ |
| 248 | struct xfs_log_vec *lv_chain; /* logvecs being pushed */ |
Christoph Hellwig | 89ae379 | 2019-06-28 19:27:34 -0700 | [diff] [blame] | 249 | struct list_head iclog_entry; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 250 | struct list_head committing; /* ctx committing list */ |
Christoph Hellwig | 4560e78 | 2017-02-07 14:07:58 -0800 | [diff] [blame] | 251 | struct work_struct discard_endio_work; |
Dave Chinner | 39823d0 | 2021-08-10 18:00:45 -0700 | [diff] [blame] | 252 | struct work_struct push_work; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 253 | }; |
| 254 | |
| 255 | /* |
| 256 | * Committed Item List structure |
| 257 | * |
| 258 | * This structure is used to track log items that have been committed but not |
| 259 | * yet written into the log. It is used only when the delayed logging mount |
| 260 | * option is enabled. |
| 261 | * |
| 262 | * This structure tracks the list of committing checkpoint contexts so |
| 263 | * we can avoid the problem of having to hold out new transactions during a |
| 264 | * flush until we have a the commit record LSN of the checkpoint. We can |
| 265 | * traverse the list of committing contexts in xlog_cil_push_lsn() to find a |
| 266 | * sequence match and extract the commit LSN directly from there. If the |
| 267 | * checkpoint is still in the process of committing, we can block waiting for |
| 268 | * the commit LSN to be determined as well. This should make synchronous |
| 269 | * operations almost as efficient as the old logging methods. |
| 270 | */ |
| 271 | struct xfs_cil { |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 272 | struct xlog *xc_log; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 273 | struct list_head xc_cil; |
| 274 | spinlock_t xc_cil_lock; |
Dave Chinner | 33c0dd7 | 2021-08-10 18:00:45 -0700 | [diff] [blame] | 275 | struct workqueue_struct *xc_push_wq; |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 276 | |
| 277 | struct rw_semaphore xc_ctx_lock ____cacheline_aligned_in_smp; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 278 | struct xfs_cil_ctx *xc_ctx; |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 279 | |
| 280 | spinlock_t xc_push_lock ____cacheline_aligned_in_smp; |
Dave Chinner | 5f9b4b0 | 2021-06-18 08:21:52 -0700 | [diff] [blame] | 281 | xfs_csn_t xc_push_seq; |
Dave Chinner | 0020a19 | 2021-08-10 18:00:44 -0700 | [diff] [blame] | 282 | bool xc_push_commit_stable; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 283 | struct list_head xc_committing; |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 284 | wait_queue_head_t xc_commit_wait; |
Dave Chinner | 68a74dc | 2021-08-10 18:00:44 -0700 | [diff] [blame] | 285 | wait_queue_head_t xc_start_wait; |
Dave Chinner | 5f9b4b0 | 2021-06-18 08:21:52 -0700 | [diff] [blame] | 286 | xfs_csn_t xc_current_sequence; |
Dave Chinner | c7f87f3 | 2020-06-16 08:57:43 -0700 | [diff] [blame] | 287 | wait_queue_head_t xc_push_wait; /* background push throttle */ |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 288 | } ____cacheline_aligned_in_smp; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 289 | |
| 290 | /* |
Dave Chinner | 8016867 | 2010-09-24 18:13:44 +1000 | [diff] [blame] | 291 | * The amount of log space we allow the CIL to aggregate is difficult to size. |
| 292 | * Whatever we choose, we have to make sure we can get a reservation for the |
| 293 | * log space effectively, that it is large enough to capture sufficient |
| 294 | * relogging to reduce log buffer IO significantly, but it is not too large for |
| 295 | * the log or induces too much latency when writing out through the iclogs. We |
| 296 | * track both space consumed and the number of vectors in the checkpoint |
| 297 | * context, so we need to decide which to use for limiting. |
Dave Chinner | df80615 | 2010-05-17 15:52:13 +1000 | [diff] [blame] | 298 | * |
| 299 | * Every log buffer we write out during a push needs a header reserved, which |
| 300 | * is at least one sector and more for v2 logs. Hence we need a reservation of |
| 301 | * at least 512 bytes per 32k of log space just for the LR headers. That means |
| 302 | * 16KB of reservation per megabyte of delayed logging space we will consume, |
| 303 | * plus various headers. The number of headers will vary based on the num of |
| 304 | * io vectors, so limiting on a specific number of vectors is going to result |
| 305 | * in transactions of varying size. IOWs, it is more consistent to track and |
| 306 | * limit space consumed in the log rather than by the number of objects being |
| 307 | * logged in order to prevent checkpoint ticket overruns. |
| 308 | * |
| 309 | * Further, use of static reservations through the log grant mechanism is |
| 310 | * problematic. It introduces a lot of complexity (e.g. reserve grant vs write |
| 311 | * grant) and a significant deadlock potential because regranting write space |
| 312 | * can block on log pushes. Hence if we have to regrant log space during a log |
| 313 | * push, we can deadlock. |
| 314 | * |
| 315 | * However, we can avoid this by use of a dynamic "reservation stealing" |
| 316 | * technique during transaction commit whereby unused reservation space in the |
| 317 | * transaction ticket is transferred to the CIL ctx commit ticket to cover the |
| 318 | * space needed by the checkpoint transaction. This means that we never need to |
| 319 | * specifically reserve space for the CIL checkpoint transaction, nor do we |
| 320 | * need to regrant space once the checkpoint completes. This also means the |
| 321 | * checkpoint transaction ticket is specific to the checkpoint context, rather |
| 322 | * than the CIL itself. |
| 323 | * |
Dave Chinner | 8016867 | 2010-09-24 18:13:44 +1000 | [diff] [blame] | 324 | * With dynamic reservations, we can effectively make up arbitrary limits for |
| 325 | * the checkpoint size so long as they don't violate any other size rules. |
| 326 | * Recovery imposes a rule that no transaction exceed half the log, so we are |
| 327 | * limited by that. Furthermore, the log transaction reservation subsystem |
| 328 | * tries to keep 25% of the log free, so we need to keep below that limit or we |
| 329 | * risk running out of free log space to start any new transactions. |
| 330 | * |
Dave Chinner | 108a423 | 2020-03-24 20:10:26 -0700 | [diff] [blame] | 331 | * In order to keep background CIL push efficient, we only need to ensure the |
| 332 | * CIL is large enough to maintain sufficient in-memory relogging to avoid |
| 333 | * repeated physical writes of frequently modified metadata. If we allow the CIL |
| 334 | * to grow to a substantial fraction of the log, then we may be pinning hundreds |
| 335 | * of megabytes of metadata in memory until the CIL flushes. This can cause |
| 336 | * issues when we are running low on memory - pinned memory cannot be reclaimed, |
| 337 | * and the CIL consumes a lot of memory. Hence we need to set an upper physical |
| 338 | * size limit for the CIL that limits the maximum amount of memory pinned by the |
| 339 | * CIL but does not limit performance by reducing relogging efficiency |
| 340 | * significantly. |
| 341 | * |
| 342 | * As such, the CIL push threshold ends up being the smaller of two thresholds: |
| 343 | * - a threshold large enough that it allows CIL to be pushed and progress to be |
| 344 | * made without excessive blocking of incoming transaction commits. This is |
| 345 | * defined to be 12.5% of the log space - half the 25% push threshold of the |
| 346 | * AIL. |
| 347 | * - small enough that it doesn't pin excessive amounts of memory but maintains |
| 348 | * close to peak relogging efficiency. This is defined to be 16x the iclog |
| 349 | * buffer window (32MB) as measurements have shown this to be roughly the |
| 350 | * point of diminishing performance increases under highly concurrent |
| 351 | * modification workloads. |
Dave Chinner | 0e7ab7e | 2020-03-24 20:10:27 -0700 | [diff] [blame] | 352 | * |
| 353 | * To prevent the CIL from overflowing upper commit size bounds, we introduce a |
| 354 | * new threshold at which we block committing transactions until the background |
| 355 | * CIL commit commences and switches to a new context. While this is not a hard |
| 356 | * limit, it forces the process committing a transaction to the CIL to block and |
| 357 | * yeild the CPU, giving the CIL push work a chance to be scheduled and start |
| 358 | * work. This prevents a process running lots of transactions from overfilling |
| 359 | * the CIL because it is not yielding the CPU. We set the blocking limit at |
| 360 | * twice the background push space threshold so we keep in line with the AIL |
| 361 | * push thresholds. |
| 362 | * |
| 363 | * Note: this is not a -hard- limit as blocking is applied after the transaction |
| 364 | * is inserted into the CIL and the push has been triggered. It is largely a |
| 365 | * throttling mechanism that allows the CIL push to be scheduled and run. A hard |
| 366 | * limit will be difficult to implement without introducing global serialisation |
| 367 | * in the CIL commit fast path, and it's not at all clear that we actually need |
| 368 | * such hard limits given the ~7 years we've run without a hard limit before |
| 369 | * finding the first situation where a checkpoint size overflow actually |
| 370 | * occurred. Hence the simple throttle, and an ASSERT check to tell us that |
| 371 | * we've overrun the max size. |
Dave Chinner | df80615 | 2010-05-17 15:52:13 +1000 | [diff] [blame] | 372 | */ |
Dave Chinner | 108a423 | 2020-03-24 20:10:26 -0700 | [diff] [blame] | 373 | #define XLOG_CIL_SPACE_LIMIT(log) \ |
| 374 | min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4) |
Dave Chinner | df80615 | 2010-05-17 15:52:13 +1000 | [diff] [blame] | 375 | |
Dave Chinner | 0e7ab7e | 2020-03-24 20:10:27 -0700 | [diff] [blame] | 376 | #define XLOG_CIL_BLOCKING_SPACE_LIMIT(log) \ |
| 377 | (XLOG_CIL_SPACE_LIMIT(log) * 2) |
| 378 | |
Dave Chinner | df80615 | 2010-05-17 15:52:13 +1000 | [diff] [blame] | 379 | /* |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 380 | * ticket grant locks, queues and accounting have their own cachlines |
| 381 | * as these are quite hot and can be operated on concurrently. |
| 382 | */ |
| 383 | struct xlog_grant_head { |
| 384 | spinlock_t lock ____cacheline_aligned_in_smp; |
| 385 | struct list_head waiters; |
| 386 | atomic64_t grant; |
| 387 | }; |
| 388 | |
| 389 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | * The reservation head lsn is not made up of a cycle number and block number. |
| 391 | * Instead, it uses a cycle number and byte number. Logs don't expect to |
| 392 | * overflow 31 bits worth of byte offset, so using a byte number will mean |
| 393 | * that round off problems won't occur when releasing partial reservations. |
| 394 | */ |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 395 | struct xlog { |
David Chinner | 4679b2d | 2008-04-10 12:18:54 +1000 | [diff] [blame] | 396 | /* The following fields don't need locking */ |
| 397 | struct xfs_mount *l_mp; /* mount point */ |
David Chinner | a9c21c1 | 2008-10-30 17:39:35 +1100 | [diff] [blame] | 398 | struct xfs_ail *l_ailp; /* AIL log is working with */ |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 399 | struct xfs_cil *l_cilp; /* CIL log is working with */ |
David Chinner | 4679b2d | 2008-04-10 12:18:54 +1000 | [diff] [blame] | 400 | struct xfs_buftarg *l_targ; /* buftarg of log */ |
Christoph Hellwig | 1058d0f | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 401 | struct workqueue_struct *l_ioend_workqueue; /* for I/O completions */ |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 402 | struct delayed_work l_work; /* background flush work */ |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 403 | long l_opstate; /* operational state */ |
David Chinner | 4679b2d | 2008-04-10 12:18:54 +1000 | [diff] [blame] | 404 | uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */ |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 405 | struct list_head *l_buf_cancel_table; |
David Chinner | 4679b2d | 2008-04-10 12:18:54 +1000 | [diff] [blame] | 406 | int l_iclog_hsize; /* size of iclog header */ |
| 407 | int l_iclog_heads; /* # of iclog header sectors */ |
Alex Elder | 48389ef | 2010-04-20 17:10:21 +1000 | [diff] [blame] | 408 | uint l_sectBBsize; /* sector size in BBs (2^n) */ |
David Chinner | 4679b2d | 2008-04-10 12:18:54 +1000 | [diff] [blame] | 409 | int l_iclog_size; /* size of log in bytes */ |
David Chinner | 4679b2d | 2008-04-10 12:18:54 +1000 | [diff] [blame] | 410 | int l_iclog_bufs; /* number of iclog buffers */ |
| 411 | xfs_daddr_t l_logBBstart; /* start block of log */ |
| 412 | int l_logsize; /* size of log in bytes */ |
| 413 | int l_logBBsize; /* size of log in BB chunks */ |
| 414 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | /* The following block of fields are changed while holding icloglock */ |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 416 | wait_queue_head_t l_flush_wait ____cacheline_aligned_in_smp; |
Matthew Wilcox | d748c62 | 2008-05-19 16:34:27 +1000 | [diff] [blame] | 417 | /* waiting for iclog flush */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | int l_covered_state;/* state of "covering disk |
| 419 | * log entries" */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | xlog_in_core_t *l_iclog; /* head log queue */ |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 421 | spinlock_t l_icloglock; /* grab to change iclog state */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | int l_curr_cycle; /* Cycle number of log writes */ |
| 423 | int l_prev_cycle; /* Cycle number before last |
| 424 | * block increment */ |
| 425 | int l_curr_block; /* current logical log block */ |
| 426 | int l_prev_block; /* previous logical log block */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 428 | /* |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 429 | * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and |
| 430 | * read without needing to hold specific locks. To avoid operations |
| 431 | * contending with other hot objects, place each of them on a separate |
| 432 | * cacheline. |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 433 | */ |
| 434 | /* lsn of last LR on disk */ |
| 435 | atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp; |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 436 | /* lsn of 1st LR with unflushed * buffers */ |
| 437 | atomic64_t l_tail_lsn ____cacheline_aligned_in_smp; |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 438 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 439 | struct xlog_grant_head l_reserve_head; |
| 440 | struct xlog_grant_head l_write_head; |
Dave Chinner | 3f16b98 | 2010-12-21 12:29:01 +1100 | [diff] [blame] | 441 | |
Brian Foster | baff4e4 | 2014-07-15 08:07:29 +1000 | [diff] [blame] | 442 | struct xfs_kobj l_kobj; |
| 443 | |
David Chinner | 4679b2d | 2008-04-10 12:18:54 +1000 | [diff] [blame] | 444 | /* The following field are used for debugging; need to hold icloglock */ |
| 445 | #ifdef DEBUG |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 446 | void *l_iclog_bak[XLOG_MAX_ICLOGS]; |
David Chinner | 4679b2d | 2008-04-10 12:18:54 +1000 | [diff] [blame] | 447 | #endif |
Brian Foster | 12818d2 | 2016-09-26 08:22:16 +1000 | [diff] [blame] | 448 | /* log recovery lsn tracking (for buffer submission */ |
| 449 | xfs_lsn_t l_recovery_lsn; |
Dave Chinner | a6a65fe | 2021-06-18 08:21:48 -0700 | [diff] [blame] | 450 | |
| 451 | uint32_t l_iclog_roundoff;/* padding roundoff */ |
Darrick J. Wong | 2b73a2c | 2021-08-08 08:27:12 -0700 | [diff] [blame] | 452 | |
| 453 | /* Users of log incompat features should take a read lock. */ |
| 454 | struct rw_semaphore l_incompat_users; |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 455 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 457 | #define XLOG_BUF_CANCEL_BUCKET(log, blkno) \ |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 458 | ((log)->l_buf_cancel_table + ((uint64_t)blkno % XLOG_BC_TABLE_SIZE)) |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 459 | |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 460 | /* |
| 461 | * Bits for operational state |
| 462 | */ |
| 463 | #define XLOG_ACTIVE_RECOVERY 0 /* in the middle of recovery */ |
| 464 | #define XLOG_RECOVERY_NEEDED 1 /* log was recovered */ |
| 465 | #define XLOG_IO_ERROR 2 /* log hit an I/O error, and being |
| 466 | shutdown */ |
| 467 | #define XLOG_TAIL_WARN 3 /* log tail verify warning issued */ |
| 468 | |
| 469 | static inline bool |
| 470 | xlog_recovery_needed(struct xlog *log) |
| 471 | { |
| 472 | return test_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); |
| 473 | } |
| 474 | |
| 475 | static inline bool |
| 476 | xlog_in_recovery(struct xlog *log) |
| 477 | { |
| 478 | return test_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); |
| 479 | } |
| 480 | |
Dave Chinner | 2039a27 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 481 | static inline bool |
| 482 | xlog_is_shutdown(struct xlog *log) |
| 483 | { |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 484 | return test_bit(XLOG_IO_ERROR, &log->l_opstate); |
Dave Chinner | 2039a27 | 2021-08-10 17:59:01 -0700 | [diff] [blame] | 485 | } |
Nathan Scott | cfcbbbd | 2005-11-02 15:12:04 +1100 | [diff] [blame] | 486 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | /* common routines */ |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 488 | extern int |
| 489 | xlog_recover( |
| 490 | struct xlog *log); |
| 491 | extern int |
| 492 | xlog_recover_finish( |
| 493 | struct xlog *log); |
Hariprasad Kelam | a7a9250 | 2019-07-03 07:34:18 -0700 | [diff] [blame] | 494 | extern void |
Brian Foster | f0b2efa | 2015-08-19 09:58:36 +1000 | [diff] [blame] | 495 | xlog_recover_cancel(struct xlog *); |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 496 | |
Dave Chinner | f9668a0 | 2012-11-28 13:01:03 +1100 | [diff] [blame] | 497 | extern __le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead, |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 498 | char *dp, int size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 500 | extern struct kmem_cache *xfs_log_ticket_cache; |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 501 | struct xlog_ticket * |
| 502 | xlog_ticket_alloc( |
| 503 | struct xlog *log, |
| 504 | int unit_bytes, |
| 505 | int count, |
| 506 | char client, |
Carlos Maiolino | ca4f258 | 2020-07-22 09:23:17 -0700 | [diff] [blame] | 507 | bool permanent); |
David Chinner | eb01c9c | 2008-04-10 12:18:46 +1000 | [diff] [blame] | 508 | |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 509 | static inline void |
| 510 | xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes) |
| 511 | { |
| 512 | *ptr += bytes; |
| 513 | *len -= bytes; |
| 514 | *off += bytes; |
| 515 | } |
| 516 | |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 517 | void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket); |
Brian Foster | d4ca1d5 | 2017-06-14 21:29:50 -0700 | [diff] [blame] | 518 | void xlog_print_trans(struct xfs_trans *); |
Dave Chinner | c45aba4 | 2021-08-10 18:00:42 -0700 | [diff] [blame] | 519 | int xlog_write(struct xlog *log, struct xfs_cil_ctx *ctx, |
| 520 | struct xfs_log_vec *log_vector, struct xlog_ticket *tic, |
Dave Chinner | caa8009 | 2021-08-10 18:00:43 -0700 | [diff] [blame] | 521 | uint optype); |
Christoph Hellwig | 8b41e3f | 2020-03-25 18:18:23 -0700 | [diff] [blame] | 522 | void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket); |
| 523 | void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 524 | |
Dave Chinner | 0020a19 | 2021-08-10 18:00:44 -0700 | [diff] [blame] | 525 | void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog, |
| 526 | int eventual_size); |
Dave Chinner | 0dc8f7f | 2021-07-27 16:23:48 -0700 | [diff] [blame] | 527 | int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog, |
| 528 | xfs_lsn_t log_tail_lsn); |
Dave Chinner | eef983f | 2021-06-18 08:21:51 -0700 | [diff] [blame] | 529 | |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 530 | /* |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 531 | * When we crack an atomic LSN, we sample it first so that the value will not |
| 532 | * change while we are cracking it into the component values. This means we |
| 533 | * will always get consistent component values to work from. This should always |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 534 | * be used to sample and crack LSNs that are stored and updated in atomic |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 535 | * variables. |
| 536 | */ |
| 537 | static inline void |
| 538 | xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block) |
| 539 | { |
| 540 | xfs_lsn_t val = atomic64_read(lsn); |
| 541 | |
| 542 | *cycle = CYCLE_LSN(val); |
| 543 | *block = BLOCK_LSN(val); |
| 544 | } |
| 545 | |
| 546 | /* |
| 547 | * Calculate and assign a value to an atomic LSN variable from component pieces. |
| 548 | */ |
| 549 | static inline void |
| 550 | xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block) |
| 551 | { |
| 552 | atomic64_set(lsn, xlog_assign_lsn(cycle, block)); |
| 553 | } |
| 554 | |
| 555 | /* |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 556 | * When we crack the grant head, we sample it first so that the value will not |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 557 | * change while we are cracking it into the component values. This means we |
| 558 | * will always get consistent component values to work from. |
| 559 | */ |
| 560 | static inline void |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 561 | xlog_crack_grant_head_val(int64_t val, int *cycle, int *space) |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 562 | { |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 563 | *cycle = val >> 32; |
| 564 | *space = val & 0xffffffff; |
| 565 | } |
| 566 | |
| 567 | static inline void |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 568 | xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space) |
| 569 | { |
| 570 | xlog_crack_grant_head_val(atomic64_read(head), cycle, space); |
| 571 | } |
| 572 | |
| 573 | static inline int64_t |
| 574 | xlog_assign_grant_head_val(int cycle, int space) |
| 575 | { |
| 576 | return ((int64_t)cycle << 32) | space; |
| 577 | } |
| 578 | |
| 579 | static inline void |
Dave Chinner | c8a09ff | 2010-12-04 00:02:40 +1100 | [diff] [blame] | 580 | xlog_assign_grant_head(atomic64_t *head, int cycle, int space) |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 581 | { |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 582 | atomic64_set(head, xlog_assign_grant_head_val(cycle, space)); |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 583 | } |
| 584 | |
| 585 | /* |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 586 | * Committed Item List interfaces |
| 587 | */ |
Dave Chinner | 2c6e24c | 2013-10-15 09:17:49 +1100 | [diff] [blame] | 588 | int xlog_cil_init(struct xlog *log); |
| 589 | void xlog_cil_init_post_recovery(struct xlog *log); |
| 590 | void xlog_cil_destroy(struct xlog *log); |
| 591 | bool xlog_cil_empty(struct xlog *log); |
Dave Chinner | 5f9b4b0 | 2021-06-18 08:21:52 -0700 | [diff] [blame] | 592 | void xlog_cil_commit(struct xlog *log, struct xfs_trans *tp, |
| 593 | xfs_csn_t *commit_seq, bool regrant); |
Dave Chinner | c45aba4 | 2021-08-10 18:00:42 -0700 | [diff] [blame] | 594 | void xlog_cil_set_ctx_write_state(struct xfs_cil_ctx *ctx, |
| 595 | struct xlog_in_core *iclog); |
| 596 | |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 597 | |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 598 | /* |
| 599 | * CIL force routines |
| 600 | */ |
Dave Chinner | 0020a19 | 2021-08-10 18:00:44 -0700 | [diff] [blame] | 601 | void xlog_cil_flush(struct xlog *log); |
Dave Chinner | 5f9b4b0 | 2021-06-18 08:21:52 -0700 | [diff] [blame] | 602 | xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence); |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 603 | |
| 604 | static inline void |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 605 | xlog_cil_force(struct xlog *log) |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 606 | { |
Dave Chinner | 5f9b4b0 | 2021-06-18 08:21:52 -0700 | [diff] [blame] | 607 | xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence); |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 608 | } |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 609 | |
Tim Shimmin | 955e47a | 2006-09-28 11:04:16 +1000 | [diff] [blame] | 610 | /* |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 611 | * Wrapper function for waiting on a wait queue serialised against wakeups |
| 612 | * by a spinlock. This matches the semantics of all the wait queues used in the |
| 613 | * log code. |
| 614 | */ |
Darrick J. Wong | f755979 | 2019-11-06 08:41:20 -0800 | [diff] [blame] | 615 | static inline void |
| 616 | xlog_wait( |
| 617 | struct wait_queue_head *wq, |
| 618 | struct spinlock *lock) |
| 619 | __releases(lock) |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 620 | { |
| 621 | DECLARE_WAITQUEUE(wait, current); |
| 622 | |
| 623 | add_wait_queue_exclusive(wq, &wait); |
| 624 | __set_current_state(TASK_UNINTERRUPTIBLE); |
| 625 | spin_unlock(lock); |
| 626 | schedule(); |
| 627 | remove_wait_queue(wq, &wait); |
| 628 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 | |
Dave Chinner | a79b28c | 2021-06-18 08:21:48 -0700 | [diff] [blame] | 630 | int xlog_wait_on_iclog(struct xlog_in_core *iclog); |
| 631 | |
Brian Foster | a45086e | 2015-10-12 15:59:25 +1100 | [diff] [blame] | 632 | /* |
| 633 | * The LSN is valid so long as it is behind the current LSN. If it isn't, this |
| 634 | * means that the next log record that includes this metadata could have a |
| 635 | * smaller LSN. In turn, this means that the modification in the log would not |
| 636 | * replay. |
| 637 | */ |
| 638 | static inline bool |
| 639 | xlog_valid_lsn( |
| 640 | struct xlog *log, |
| 641 | xfs_lsn_t lsn) |
| 642 | { |
| 643 | int cur_cycle; |
| 644 | int cur_block; |
| 645 | bool valid = true; |
| 646 | |
| 647 | /* |
| 648 | * First, sample the current lsn without locking to avoid added |
| 649 | * contention from metadata I/O. The current cycle and block are updated |
| 650 | * (in xlog_state_switch_iclogs()) and read here in a particular order |
| 651 | * to avoid false negatives (e.g., thinking the metadata LSN is valid |
| 652 | * when it is not). |
| 653 | * |
| 654 | * The current block is always rewound before the cycle is bumped in |
| 655 | * xlog_state_switch_iclogs() to ensure the current LSN is never seen in |
| 656 | * a transiently forward state. Instead, we can see the LSN in a |
| 657 | * transiently behind state if we happen to race with a cycle wrap. |
| 658 | */ |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 659 | cur_cycle = READ_ONCE(log->l_curr_cycle); |
Brian Foster | a45086e | 2015-10-12 15:59:25 +1100 | [diff] [blame] | 660 | smp_rmb(); |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 661 | cur_block = READ_ONCE(log->l_curr_block); |
Brian Foster | a45086e | 2015-10-12 15:59:25 +1100 | [diff] [blame] | 662 | |
| 663 | if ((CYCLE_LSN(lsn) > cur_cycle) || |
| 664 | (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) { |
| 665 | /* |
| 666 | * If the metadata LSN appears invalid, it's possible the check |
| 667 | * above raced with a wrap to the next log cycle. Grab the lock |
| 668 | * to check for sure. |
| 669 | */ |
| 670 | spin_lock(&log->l_icloglock); |
| 671 | cur_cycle = log->l_curr_cycle; |
| 672 | cur_block = log->l_curr_block; |
| 673 | spin_unlock(&log->l_icloglock); |
| 674 | |
| 675 | if ((CYCLE_LSN(lsn) > cur_cycle) || |
| 676 | (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) |
| 677 | valid = false; |
| 678 | } |
| 679 | |
| 680 | return valid; |
| 681 | } |
| 682 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | #endif /* __XFS_LOG_PRIV_H__ */ |