Thomas Gleixner | 2025cf9 | 2019-05-29 07:18:02 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2015 Shaohua Li <shli@fb.com> |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 4 | * Copyright (C) 2016 Song Liu <songliubraving@fb.com> |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 5 | */ |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/wait.h> |
| 8 | #include <linux/blkdev.h> |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/raid/md_p.h> |
Shaohua Li | 5cb2fbd | 2015-10-28 08:41:25 -0700 | [diff] [blame] | 11 | #include <linux/crc32c.h> |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 12 | #include <linux/random.h> |
Shaohua Li | ce1ccd0 | 2016-11-21 10:29:18 -0800 | [diff] [blame] | 13 | #include <linux/kthread.h> |
Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 14 | #include <linux/types.h> |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 15 | #include "md.h" |
| 16 | #include "raid5.h" |
Mike Snitzer | 935fe09 | 2017-10-10 17:02:41 -0400 | [diff] [blame] | 17 | #include "md-bitmap.h" |
Song Liu | 70d466f | 2017-05-11 15:28:28 -0700 | [diff] [blame] | 18 | #include "raid5-log.h" |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 19 | |
| 20 | /* |
| 21 | * metadata/data stored in disk with 4k size unit (a block) regardless |
| 22 | * underneath hardware sector size. only works with PAGE_SIZE == 4096 |
| 23 | */ |
| 24 | #define BLOCK_SECTORS (8) |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 25 | #define BLOCK_SECTOR_SHIFT (3) |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 26 | |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 27 | /* |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 28 | * log->max_free_space is min(1/4 disk size, 10G reclaimable space). |
| 29 | * |
| 30 | * In write through mode, the reclaim runs every log->max_free_space. |
| 31 | * This can prevent the recovery scans for too long |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 32 | */ |
| 33 | #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */ |
| 34 | #define RECLAIM_MAX_FREE_SPACE_SHIFT (2) |
| 35 | |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 36 | /* wake up reclaim thread periodically */ |
| 37 | #define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ) |
| 38 | /* start flush with these full stripes */ |
Shaohua Li | 84890c0 | 2017-02-15 19:58:05 -0800 | [diff] [blame] | 39 | #define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4) |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 40 | /* reclaim stripes in groups */ |
| 41 | #define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2) |
| 42 | |
Christoph Hellwig | c38d29b | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 43 | /* |
| 44 | * We only need 2 bios per I/O unit to make progress, but ensure we |
| 45 | * have a few more available to not get too tight. |
| 46 | */ |
| 47 | #define R5L_POOL_SIZE 4 |
| 48 | |
Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 49 | static char *r5c_journal_mode_str[] = {"write-through", |
| 50 | "write-back"}; |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 51 | /* |
| 52 | * raid5 cache state machine |
| 53 | * |
JackieLiu | 9b69173 | 2016-11-28 16:19:18 +0800 | [diff] [blame] | 54 | * With the RAID cache, each stripe works in two phases: |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 55 | * - caching phase |
| 56 | * - writing-out phase |
| 57 | * |
| 58 | * These two phases are controlled by bit STRIPE_R5C_CACHING: |
| 59 | * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase |
| 60 | * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase |
| 61 | * |
| 62 | * When there is no journal, or the journal is in write-through mode, |
| 63 | * the stripe is always in writing-out phase. |
| 64 | * |
| 65 | * For write-back journal, the stripe is sent to caching phase on write |
| 66 | * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off |
| 67 | * the write-out phase by clearing STRIPE_R5C_CACHING. |
| 68 | * |
| 69 | * Stripes in caching phase do not write the raid disks. Instead, all |
| 70 | * writes are committed from the log device. Therefore, a stripe in |
| 71 | * caching phase handles writes as: |
| 72 | * - write to log device |
| 73 | * - return IO |
| 74 | * |
| 75 | * Stripes in writing-out phase handle writes as: |
| 76 | * - calculate parity |
| 77 | * - write pending data and parity to journal |
| 78 | * - write data and parity to raid disks |
| 79 | * - return IO for pending writes |
| 80 | */ |
| 81 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 82 | struct r5l_log { |
| 83 | struct md_rdev *rdev; |
| 84 | |
| 85 | u32 uuid_checksum; |
| 86 | |
| 87 | sector_t device_size; /* log device size, round to |
| 88 | * BLOCK_SECTORS */ |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 89 | sector_t max_free_space; /* reclaim run if free space is at |
| 90 | * this size */ |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 91 | |
| 92 | sector_t last_checkpoint; /* log tail. where recovery scan |
| 93 | * starts from */ |
| 94 | u64 last_cp_seq; /* log tail sequence */ |
| 95 | |
| 96 | sector_t log_start; /* log head. where new data appends */ |
| 97 | u64 seq; /* log head sequence */ |
| 98 | |
Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 99 | sector_t next_checkpoint; |
Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 100 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 101 | struct mutex io_mutex; |
| 102 | struct r5l_io_unit *current_io; /* current io_unit accepting new data */ |
| 103 | |
| 104 | spinlock_t io_list_lock; |
| 105 | struct list_head running_ios; /* io_units which are still running, |
| 106 | * and have not yet been completely |
| 107 | * written to the log */ |
| 108 | struct list_head io_end_ios; /* io_units which have been completely |
| 109 | * written to the log but not yet written |
| 110 | * to the RAID */ |
Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 111 | struct list_head flushing_ios; /* io_units which are waiting for log |
| 112 | * cache flush */ |
Christoph Hellwig | 04732f7 | 2015-10-05 09:31:07 +0200 | [diff] [blame] | 113 | struct list_head finished_ios; /* io_units which settle down in log disk */ |
Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 114 | struct bio flush_bio; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 115 | |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 116 | struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */ |
| 117 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 118 | struct kmem_cache *io_kc; |
Kent Overstreet | afeee51 | 2018-05-20 18:25:52 -0400 | [diff] [blame] | 119 | mempool_t io_pool; |
| 120 | struct bio_set bs; |
| 121 | mempool_t meta_pool; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 122 | |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 123 | struct md_thread *reclaim_thread; |
| 124 | unsigned long reclaim_target; /* number of space that need to be |
| 125 | * reclaimed. if it's 0, reclaim spaces |
| 126 | * used by io_units which are in |
| 127 | * IO_UNIT_STRIPE_END state (eg, reclaim |
| 128 | * dones't wait for specific io_unit |
| 129 | * switching to IO_UNIT_STRIPE_END |
| 130 | * state) */ |
Shaohua Li | 0fd22b4 | 2015-09-02 13:49:47 -0700 | [diff] [blame] | 131 | wait_queue_head_t iounit_wait; |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 132 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 133 | struct list_head no_space_stripes; /* pending stripes, log has no space */ |
| 134 | spinlock_t no_space_stripes_lock; |
Christoph Hellwig | 56fef7c | 2015-10-05 09:31:09 +0200 | [diff] [blame] | 135 | |
| 136 | bool need_cache_flush; |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 137 | |
| 138 | /* for r5c_cache */ |
| 139 | enum r5c_journal_mode r5c_journal_mode; |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 140 | |
| 141 | /* all stripes in r5cache, in the order of seq at sh->log_start */ |
| 142 | struct list_head stripe_in_journal_list; |
| 143 | |
| 144 | spinlock_t stripe_in_journal_lock; |
| 145 | atomic_t stripe_in_journal_count; |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 146 | |
| 147 | /* to submit async io_units, to fulfill ordering of flush */ |
| 148 | struct work_struct deferred_io_work; |
Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 149 | /* to disable write back during in degraded mode */ |
| 150 | struct work_struct disable_writeback_work; |
Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 151 | |
| 152 | /* to for chunk_aligned_read in writeback mode, details below */ |
| 153 | spinlock_t tree_lock; |
| 154 | struct radix_tree_root big_stripe_tree; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 155 | }; |
| 156 | |
| 157 | /* |
Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 158 | * Enable chunk_aligned_read() with write back cache. |
| 159 | * |
| 160 | * Each chunk may contain more than one stripe (for example, a 256kB |
| 161 | * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For |
| 162 | * chunk_aligned_read, these stripes are grouped into one "big_stripe". |
| 163 | * For each big_stripe, we count how many stripes of this big_stripe |
| 164 | * are in the write back cache. These data are tracked in a radix tree |
| 165 | * (big_stripe_tree). We use radix_tree item pointer as the counter. |
| 166 | * r5c_tree_index() is used to calculate keys for the radix tree. |
| 167 | * |
| 168 | * chunk_aligned_read() calls r5c_big_stripe_cached() to look up |
| 169 | * big_stripe of each chunk in the tree. If this big_stripe is in the |
| 170 | * tree, chunk_aligned_read() aborts. This look up is protected by |
| 171 | * rcu_read_lock(). |
| 172 | * |
| 173 | * It is necessary to remember whether a stripe is counted in |
| 174 | * big_stripe_tree. Instead of adding new flag, we reuses existing flags: |
| 175 | * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these |
| 176 | * two flags are set, the stripe is counted in big_stripe_tree. This |
| 177 | * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to |
| 178 | * r5c_try_caching_write(); and moving clear_bit of |
| 179 | * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to |
| 180 | * r5c_finish_stripe_write_out(). |
| 181 | */ |
| 182 | |
| 183 | /* |
| 184 | * radix tree requests lowest 2 bits of data pointer to be 2b'00. |
| 185 | * So it is necessary to left shift the counter by 2 bits before using it |
| 186 | * as data pointer of the tree. |
| 187 | */ |
| 188 | #define R5C_RADIX_COUNT_SHIFT 2 |
| 189 | |
| 190 | /* |
| 191 | * calculate key for big_stripe_tree |
| 192 | * |
| 193 | * sect: align_bi->bi_iter.bi_sector or sh->sector |
| 194 | */ |
| 195 | static inline sector_t r5c_tree_index(struct r5conf *conf, |
| 196 | sector_t sect) |
| 197 | { |
Damien Le Moal | 5292308 | 2020-07-16 13:54:41 +0900 | [diff] [blame] | 198 | sector_div(sect, conf->chunk_sectors); |
Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 199 | return sect; |
| 200 | } |
| 201 | |
| 202 | /* |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 203 | * an IO range starts from a meta data block and end at the next meta data |
| 204 | * block. The io unit's the meta data block tracks data/parity followed it. io |
| 205 | * unit is written to log disk with normal write, as we always flush log disk |
| 206 | * first and then start move data to raid disks, there is no requirement to |
| 207 | * write io unit with FLUSH/FUA |
| 208 | */ |
| 209 | struct r5l_io_unit { |
| 210 | struct r5l_log *log; |
| 211 | |
| 212 | struct page *meta_page; /* store meta block */ |
| 213 | int meta_offset; /* current offset in meta_page */ |
| 214 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 215 | struct bio *current_bio;/* current_bio accepting new data */ |
| 216 | |
| 217 | atomic_t pending_stripe;/* how many stripes not flushed to raid */ |
| 218 | u64 seq; /* seq number of the metablock */ |
| 219 | sector_t log_start; /* where the io_unit starts */ |
| 220 | sector_t log_end; /* where the io_unit ends */ |
| 221 | struct list_head log_sibling; /* log->running_ios */ |
| 222 | struct list_head stripe_list; /* stripes added to the io_unit */ |
| 223 | |
| 224 | int state; |
Christoph Hellwig | 6143e2c | 2015-10-05 09:31:16 +0200 | [diff] [blame] | 225 | bool need_split_bio; |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 226 | struct bio *split_bio; |
| 227 | |
Song Liu | a9501d7 | 2017-08-03 10:03:17 -0700 | [diff] [blame] | 228 | unsigned int has_flush:1; /* include flush request */ |
| 229 | unsigned int has_fua:1; /* include fua request */ |
| 230 | unsigned int has_null_flush:1; /* include null flush request */ |
| 231 | unsigned int has_flush_payload:1; /* include flush payload */ |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 232 | /* |
| 233 | * io isn't sent yet, flush/fua request can only be submitted till it's |
| 234 | * the first IO in running_ios list |
| 235 | */ |
| 236 | unsigned int io_deferred:1; |
| 237 | |
| 238 | struct bio_list flush_barriers; /* size == 0 flush bios */ |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 239 | }; |
| 240 | |
| 241 | /* r5l_io_unit state */ |
| 242 | enum r5l_io_unit_state { |
| 243 | IO_UNIT_RUNNING = 0, /* accepting new IO */ |
| 244 | IO_UNIT_IO_START = 1, /* io_unit bio start writing to log, |
| 245 | * don't accepting new bio */ |
| 246 | IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */ |
Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 247 | IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */ |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 248 | }; |
| 249 | |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 250 | bool r5c_is_writeback(struct r5l_log *log) |
| 251 | { |
| 252 | return (log != NULL && |
| 253 | log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK); |
| 254 | } |
| 255 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 256 | static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc) |
| 257 | { |
| 258 | start += inc; |
| 259 | if (start >= log->device_size) |
| 260 | start = start - log->device_size; |
| 261 | return start; |
| 262 | } |
| 263 | |
| 264 | static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start, |
| 265 | sector_t end) |
| 266 | { |
| 267 | if (end >= start) |
| 268 | return end - start; |
| 269 | else |
| 270 | return end + log->device_size - start; |
| 271 | } |
| 272 | |
| 273 | static bool r5l_has_free_space(struct r5l_log *log, sector_t size) |
| 274 | { |
| 275 | sector_t used_size; |
| 276 | |
| 277 | used_size = r5l_ring_distance(log, log->last_checkpoint, |
| 278 | log->log_start); |
| 279 | |
| 280 | return log->device_size > used_size + size; |
| 281 | } |
| 282 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 283 | static void __r5l_set_io_unit_state(struct r5l_io_unit *io, |
| 284 | enum r5l_io_unit_state state) |
| 285 | { |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 286 | if (WARN_ON(io->state >= state)) |
| 287 | return; |
| 288 | io->state = state; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 289 | } |
| 290 | |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 291 | static void |
NeilBrown | bd83d0a | 2017-03-15 14:05:12 +1100 | [diff] [blame] | 292 | r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev) |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 293 | { |
| 294 | struct bio *wbi, *wbi2; |
| 295 | |
| 296 | wbi = dev->written; |
| 297 | dev->written = NULL; |
| 298 | while (wbi && wbi->bi_iter.bi_sector < |
Yufen Yu | c911c46 | 2020-07-18 05:29:07 -0400 | [diff] [blame] | 299 | dev->sector + RAID5_STRIPE_SECTORS(conf)) { |
| 300 | wbi2 = r5_next_bio(conf, wbi, dev->sector); |
NeilBrown | 4972805 | 2017-03-15 14:05:12 +1100 | [diff] [blame] | 301 | md_write_end(conf->mddev); |
NeilBrown | 016c76a | 2017-03-15 14:05:13 +1100 | [diff] [blame] | 302 | bio_endio(wbi); |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 303 | wbi = wbi2; |
| 304 | } |
| 305 | } |
| 306 | |
| 307 | void r5c_handle_cached_data_endio(struct r5conf *conf, |
NeilBrown | bd83d0a | 2017-03-15 14:05:12 +1100 | [diff] [blame] | 308 | struct stripe_head *sh, int disks) |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 309 | { |
| 310 | int i; |
| 311 | |
| 312 | for (i = sh->disks; i--; ) { |
| 313 | if (sh->dev[i].written) { |
| 314 | set_bit(R5_UPTODATE, &sh->dev[i].flags); |
NeilBrown | bd83d0a | 2017-03-15 14:05:12 +1100 | [diff] [blame] | 315 | r5c_return_dev_pending_writes(conf, &sh->dev[i]); |
Andy Shevchenko | e64e4018 | 2018-08-01 15:20:50 -0700 | [diff] [blame] | 316 | md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, |
Yufen Yu | c911c46 | 2020-07-18 05:29:07 -0400 | [diff] [blame] | 317 | RAID5_STRIPE_SECTORS(conf), |
Andy Shevchenko | e64e4018 | 2018-08-01 15:20:50 -0700 | [diff] [blame] | 318 | !test_bit(STRIPE_DEGRADED, &sh->state), |
| 319 | 0); |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 320 | } |
| 321 | } |
| 322 | } |
| 323 | |
Artur Paszkiewicz | ff87573 | 2017-03-09 09:59:58 +0100 | [diff] [blame] | 324 | void r5l_wake_reclaim(struct r5l_log *log, sector_t space); |
| 325 | |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 326 | /* Check whether we should flush some stripes to free up stripe cache */ |
| 327 | void r5c_check_stripe_cache_usage(struct r5conf *conf) |
| 328 | { |
| 329 | int total_cached; |
| 330 | |
| 331 | if (!r5c_is_writeback(conf->log)) |
| 332 | return; |
| 333 | |
| 334 | total_cached = atomic_read(&conf->r5c_cached_partial_stripes) + |
| 335 | atomic_read(&conf->r5c_cached_full_stripes); |
| 336 | |
| 337 | /* |
| 338 | * The following condition is true for either of the following: |
| 339 | * - stripe cache pressure high: |
| 340 | * total_cached > 3/4 min_nr_stripes || |
| 341 | * empty_inactive_list_nr > 0 |
| 342 | * - stripe cache pressure moderate: |
| 343 | * total_cached > 1/2 min_nr_stripes |
| 344 | */ |
| 345 | if (total_cached > conf->min_nr_stripes * 1 / 2 || |
| 346 | atomic_read(&conf->empty_inactive_list_nr) > 0) |
| 347 | r5l_wake_reclaim(conf->log, 0); |
| 348 | } |
| 349 | |
| 350 | /* |
| 351 | * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full |
| 352 | * stripes in the cache |
| 353 | */ |
| 354 | void r5c_check_cached_full_stripe(struct r5conf *conf) |
| 355 | { |
| 356 | if (!r5c_is_writeback(conf->log)) |
| 357 | return; |
| 358 | |
| 359 | /* |
| 360 | * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes |
| 361 | * or a full stripe (chunk size / 4k stripes). |
| 362 | */ |
| 363 | if (atomic_read(&conf->r5c_cached_full_stripes) >= |
Shaohua Li | 84890c0 | 2017-02-15 19:58:05 -0800 | [diff] [blame] | 364 | min(R5C_FULL_STRIPE_FLUSH_BATCH(conf), |
Yufen Yu | c911c46 | 2020-07-18 05:29:07 -0400 | [diff] [blame] | 365 | conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf))) |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 366 | r5l_wake_reclaim(conf->log, 0); |
| 367 | } |
| 368 | |
| 369 | /* |
| 370 | * Total log space (in sectors) needed to flush all data in cache |
| 371 | * |
Song Liu | 39b9958 | 2017-01-24 14:08:23 -0800 | [diff] [blame] | 372 | * To avoid deadlock due to log space, it is necessary to reserve log |
| 373 | * space to flush critical stripes (stripes that occupying log space near |
| 374 | * last_checkpoint). This function helps check how much log space is |
| 375 | * required to flush all cached stripes. |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 376 | * |
Song Liu | 39b9958 | 2017-01-24 14:08:23 -0800 | [diff] [blame] | 377 | * To reduce log space requirements, two mechanisms are used to give cache |
| 378 | * flush higher priorities: |
| 379 | * 1. In handle_stripe_dirtying() and schedule_reconstruction(), |
| 380 | * stripes ALREADY in journal can be flushed w/o pending writes; |
| 381 | * 2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal |
| 382 | * can be delayed (r5l_add_no_space_stripe). |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 383 | * |
Song Liu | 39b9958 | 2017-01-24 14:08:23 -0800 | [diff] [blame] | 384 | * In cache flush, the stripe goes through 1 and then 2. For a stripe that |
| 385 | * already passed 1, flushing it requires at most (conf->max_degraded + 1) |
| 386 | * pages of journal space. For stripes that has not passed 1, flushing it |
| 387 | * requires (conf->raid_disks + 1) pages of journal space. There are at |
| 388 | * most (conf->group_cnt + 1) stripe that passed 1. So total journal space |
| 389 | * required to flush all cached stripes (in pages) is: |
| 390 | * |
| 391 | * (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) + |
| 392 | * (group_cnt + 1) * (raid_disks + 1) |
| 393 | * or |
| 394 | * (stripe_in_journal_count) * (max_degraded + 1) + |
| 395 | * (group_cnt + 1) * (raid_disks - max_degraded) |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 396 | */ |
| 397 | static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf) |
| 398 | { |
| 399 | struct r5l_log *log = conf->log; |
| 400 | |
| 401 | if (!r5c_is_writeback(log)) |
| 402 | return 0; |
| 403 | |
Song Liu | 39b9958 | 2017-01-24 14:08:23 -0800 | [diff] [blame] | 404 | return BLOCK_SECTORS * |
| 405 | ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) + |
| 406 | (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1)); |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 407 | } |
| 408 | |
| 409 | /* |
| 410 | * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL |
| 411 | * |
| 412 | * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of |
| 413 | * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log |
| 414 | * device is less than 2x of reclaim_required_space. |
| 415 | */ |
| 416 | static inline void r5c_update_log_state(struct r5l_log *log) |
| 417 | { |
| 418 | struct r5conf *conf = log->rdev->mddev->private; |
| 419 | sector_t free_space; |
| 420 | sector_t reclaim_space; |
Song Liu | f687a33 | 2016-11-30 16:57:54 -0800 | [diff] [blame] | 421 | bool wake_reclaim = false; |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 422 | |
| 423 | if (!r5c_is_writeback(log)) |
| 424 | return; |
| 425 | |
| 426 | free_space = r5l_ring_distance(log, log->log_start, |
| 427 | log->last_checkpoint); |
| 428 | reclaim_space = r5c_log_required_to_flush_cache(conf); |
| 429 | if (free_space < 2 * reclaim_space) |
| 430 | set_bit(R5C_LOG_CRITICAL, &conf->cache_state); |
Song Liu | f687a33 | 2016-11-30 16:57:54 -0800 | [diff] [blame] | 431 | else { |
| 432 | if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state)) |
| 433 | wake_reclaim = true; |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 434 | clear_bit(R5C_LOG_CRITICAL, &conf->cache_state); |
Song Liu | f687a33 | 2016-11-30 16:57:54 -0800 | [diff] [blame] | 435 | } |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 436 | if (free_space < 3 * reclaim_space) |
| 437 | set_bit(R5C_LOG_TIGHT, &conf->cache_state); |
| 438 | else |
| 439 | clear_bit(R5C_LOG_TIGHT, &conf->cache_state); |
Song Liu | f687a33 | 2016-11-30 16:57:54 -0800 | [diff] [blame] | 440 | |
| 441 | if (wake_reclaim) |
| 442 | r5l_wake_reclaim(log, 0); |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 443 | } |
| 444 | |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 445 | /* |
| 446 | * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING. |
| 447 | * This function should only be called in write-back mode. |
| 448 | */ |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 449 | void r5c_make_stripe_write_out(struct stripe_head *sh) |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 450 | { |
| 451 | struct r5conf *conf = sh->raid_conf; |
| 452 | struct r5l_log *log = conf->log; |
| 453 | |
| 454 | BUG_ON(!r5c_is_writeback(log)); |
| 455 | |
| 456 | WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); |
| 457 | clear_bit(STRIPE_R5C_CACHING, &sh->state); |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 458 | |
| 459 | if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
| 460 | atomic_inc(&conf->preread_active_stripes); |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 461 | } |
| 462 | |
| 463 | static void r5c_handle_data_cached(struct stripe_head *sh) |
| 464 | { |
| 465 | int i; |
| 466 | |
| 467 | for (i = sh->disks; i--; ) |
| 468 | if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { |
| 469 | set_bit(R5_InJournal, &sh->dev[i].flags); |
| 470 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
| 471 | } |
| 472 | clear_bit(STRIPE_LOG_TRAPPED, &sh->state); |
| 473 | } |
| 474 | |
| 475 | /* |
| 476 | * this journal write must contain full parity, |
| 477 | * it may also contain some data pages |
| 478 | */ |
| 479 | static void r5c_handle_parity_cached(struct stripe_head *sh) |
| 480 | { |
| 481 | int i; |
| 482 | |
| 483 | for (i = sh->disks; i--; ) |
| 484 | if (test_bit(R5_InJournal, &sh->dev[i].flags)) |
| 485 | set_bit(R5_Wantwrite, &sh->dev[i].flags); |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 486 | } |
| 487 | |
| 488 | /* |
| 489 | * Setting proper flags after writing (or flushing) data and/or parity to the |
| 490 | * log device. This is called from r5l_log_endio() or r5l_log_flush_endio(). |
| 491 | */ |
| 492 | static void r5c_finish_cache_stripe(struct stripe_head *sh) |
| 493 | { |
| 494 | struct r5l_log *log = sh->raid_conf->log; |
| 495 | |
| 496 | if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { |
| 497 | BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); |
| 498 | /* |
| 499 | * Set R5_InJournal for parity dev[pd_idx]. This means |
| 500 | * all data AND parity in the journal. For RAID 6, it is |
| 501 | * NOT necessary to set the flag for dev[qd_idx], as the |
| 502 | * two parities are written out together. |
| 503 | */ |
| 504 | set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 505 | } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) { |
| 506 | r5c_handle_data_cached(sh); |
| 507 | } else { |
| 508 | r5c_handle_parity_cached(sh); |
| 509 | set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); |
| 510 | } |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 511 | } |
| 512 | |
Christoph Hellwig | d8858f4 | 2015-10-05 09:31:08 +0200 | [diff] [blame] | 513 | static void r5l_io_run_stripes(struct r5l_io_unit *io) |
| 514 | { |
| 515 | struct stripe_head *sh, *next; |
| 516 | |
| 517 | list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { |
| 518 | list_del_init(&sh->log_list); |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 519 | |
| 520 | r5c_finish_cache_stripe(sh); |
| 521 | |
Christoph Hellwig | d8858f4 | 2015-10-05 09:31:08 +0200 | [diff] [blame] | 522 | set_bit(STRIPE_HANDLE, &sh->state); |
| 523 | raid5_release_stripe(sh); |
| 524 | } |
| 525 | } |
| 526 | |
Christoph Hellwig | 56fef7c | 2015-10-05 09:31:09 +0200 | [diff] [blame] | 527 | static void r5l_log_run_stripes(struct r5l_log *log) |
| 528 | { |
| 529 | struct r5l_io_unit *io, *next; |
| 530 | |
Shaohua Li | efa4b77 | 2017-10-18 22:08:13 -0700 | [diff] [blame] | 531 | lockdep_assert_held(&log->io_list_lock); |
Christoph Hellwig | 56fef7c | 2015-10-05 09:31:09 +0200 | [diff] [blame] | 532 | |
| 533 | list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { |
| 534 | /* don't change list order */ |
| 535 | if (io->state < IO_UNIT_IO_END) |
| 536 | break; |
| 537 | |
| 538 | list_move_tail(&io->log_sibling, &log->finished_ios); |
| 539 | r5l_io_run_stripes(io); |
| 540 | } |
| 541 | } |
| 542 | |
Christoph Hellwig | 3848c0b | 2015-12-21 10:51:01 +1100 | [diff] [blame] | 543 | static void r5l_move_to_end_ios(struct r5l_log *log) |
| 544 | { |
| 545 | struct r5l_io_unit *io, *next; |
| 546 | |
Shaohua Li | efa4b77 | 2017-10-18 22:08:13 -0700 | [diff] [blame] | 547 | lockdep_assert_held(&log->io_list_lock); |
Christoph Hellwig | 3848c0b | 2015-12-21 10:51:01 +1100 | [diff] [blame] | 548 | |
| 549 | list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { |
| 550 | /* don't change list order */ |
| 551 | if (io->state < IO_UNIT_IO_END) |
| 552 | break; |
| 553 | list_move_tail(&io->log_sibling, &log->io_end_ios); |
| 554 | } |
| 555 | } |
| 556 | |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 557 | static void __r5l_stripe_write_finished(struct r5l_io_unit *io); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 558 | static void r5l_log_endio(struct bio *bio) |
| 559 | { |
| 560 | struct r5l_io_unit *io = bio->bi_private; |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 561 | struct r5l_io_unit *io_deferred; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 562 | struct r5l_log *log = io->log; |
Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 563 | unsigned long flags; |
Song Liu | a9501d7 | 2017-08-03 10:03:17 -0700 | [diff] [blame] | 564 | bool has_null_flush; |
| 565 | bool has_flush_payload; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 566 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 567 | if (bio->bi_status) |
Shaohua Li | 6e74a9c | 2015-10-08 21:54:08 -0700 | [diff] [blame] | 568 | md_error(log->rdev->mddev, log->rdev); |
| 569 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 570 | bio_put(bio); |
Kent Overstreet | afeee51 | 2018-05-20 18:25:52 -0400 | [diff] [blame] | 571 | mempool_free(io->meta_page, &log->meta_pool); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 572 | |
Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 573 | spin_lock_irqsave(&log->io_list_lock, flags); |
| 574 | __r5l_set_io_unit_state(io, IO_UNIT_IO_END); |
Song Liu | a9501d7 | 2017-08-03 10:03:17 -0700 | [diff] [blame] | 575 | |
| 576 | /* |
| 577 | * if the io doesn't not have null_flush or flush payload, |
| 578 | * it is not safe to access it after releasing io_list_lock. |
| 579 | * Therefore, it is necessary to check the condition with |
| 580 | * the lock held. |
| 581 | */ |
| 582 | has_null_flush = io->has_null_flush; |
| 583 | has_flush_payload = io->has_flush_payload; |
| 584 | |
Song Liu | ea17481 | 2017-03-09 21:23:39 -0800 | [diff] [blame] | 585 | if (log->need_cache_flush && !list_empty(&io->stripe_list)) |
Christoph Hellwig | 3848c0b | 2015-12-21 10:51:01 +1100 | [diff] [blame] | 586 | r5l_move_to_end_ios(log); |
Christoph Hellwig | 56fef7c | 2015-10-05 09:31:09 +0200 | [diff] [blame] | 587 | else |
| 588 | r5l_log_run_stripes(log); |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 589 | if (!list_empty(&log->running_ios)) { |
| 590 | /* |
| 591 | * FLUSH/FUA io_unit is deferred because of ordering, now we |
| 592 | * can dispatch it |
| 593 | */ |
| 594 | io_deferred = list_first_entry(&log->running_ios, |
| 595 | struct r5l_io_unit, log_sibling); |
| 596 | if (io_deferred->io_deferred) |
| 597 | schedule_work(&log->deferred_io_work); |
| 598 | } |
| 599 | |
Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 600 | spin_unlock_irqrestore(&log->io_list_lock, flags); |
| 601 | |
Christoph Hellwig | 56fef7c | 2015-10-05 09:31:09 +0200 | [diff] [blame] | 602 | if (log->need_cache_flush) |
| 603 | md_wakeup_thread(log->rdev->mddev->thread); |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 604 | |
Song Liu | a9501d7 | 2017-08-03 10:03:17 -0700 | [diff] [blame] | 605 | /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */ |
| 606 | if (has_null_flush) { |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 607 | struct bio *bi; |
| 608 | |
| 609 | WARN_ON(bio_list_empty(&io->flush_barriers)); |
| 610 | while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) { |
| 611 | bio_endio(bi); |
Song Liu | a9501d7 | 2017-08-03 10:03:17 -0700 | [diff] [blame] | 612 | if (atomic_dec_and_test(&io->pending_stripe)) { |
| 613 | __r5l_stripe_write_finished(io); |
| 614 | return; |
| 615 | } |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 616 | } |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 617 | } |
Song Liu | a9501d7 | 2017-08-03 10:03:17 -0700 | [diff] [blame] | 618 | /* decrease pending_stripe for flush payload */ |
| 619 | if (has_flush_payload) |
| 620 | if (atomic_dec_and_test(&io->pending_stripe)) |
| 621 | __r5l_stripe_write_finished(io); |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 622 | } |
| 623 | |
| 624 | static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io) |
| 625 | { |
| 626 | unsigned long flags; |
| 627 | |
| 628 | spin_lock_irqsave(&log->io_list_lock, flags); |
| 629 | __r5l_set_io_unit_state(io, IO_UNIT_IO_START); |
| 630 | spin_unlock_irqrestore(&log->io_list_lock, flags); |
| 631 | |
Song Liu | bb3338d | 2017-05-08 17:39:24 -0700 | [diff] [blame] | 632 | /* |
| 633 | * In case of journal device failures, submit_bio will get error |
| 634 | * and calls endio, then active stripes will continue write |
| 635 | * process. Therefore, it is not necessary to check Faulty bit |
| 636 | * of journal device here. |
| 637 | * |
| 638 | * We can't check split_bio after current_bio is submitted. If |
| 639 | * io->split_bio is null, after current_bio is submitted, current_bio |
| 640 | * might already be completed and the io_unit is freed. We submit |
| 641 | * split_bio first to avoid the issue. |
| 642 | */ |
| 643 | if (io->split_bio) { |
| 644 | if (io->has_flush) |
| 645 | io->split_bio->bi_opf |= REQ_PREFLUSH; |
| 646 | if (io->has_fua) |
| 647 | io->split_bio->bi_opf |= REQ_FUA; |
| 648 | submit_bio(io->split_bio); |
| 649 | } |
| 650 | |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 651 | if (io->has_flush) |
Shaohua Li | 2073773 | 2016-12-13 12:40:15 -0800 | [diff] [blame] | 652 | io->current_bio->bi_opf |= REQ_PREFLUSH; |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 653 | if (io->has_fua) |
Shaohua Li | 2073773 | 2016-12-13 12:40:15 -0800 | [diff] [blame] | 654 | io->current_bio->bi_opf |= REQ_FUA; |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 655 | submit_bio(io->current_bio); |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 656 | } |
| 657 | |
| 658 | /* deferred io_unit will be dispatched here */ |
| 659 | static void r5l_submit_io_async(struct work_struct *work) |
| 660 | { |
| 661 | struct r5l_log *log = container_of(work, struct r5l_log, |
| 662 | deferred_io_work); |
| 663 | struct r5l_io_unit *io = NULL; |
| 664 | unsigned long flags; |
| 665 | |
| 666 | spin_lock_irqsave(&log->io_list_lock, flags); |
| 667 | if (!list_empty(&log->running_ios)) { |
| 668 | io = list_first_entry(&log->running_ios, struct r5l_io_unit, |
| 669 | log_sibling); |
| 670 | if (!io->io_deferred) |
| 671 | io = NULL; |
| 672 | else |
| 673 | io->io_deferred = 0; |
| 674 | } |
| 675 | spin_unlock_irqrestore(&log->io_list_lock, flags); |
| 676 | if (io) |
| 677 | r5l_do_submit_io(log, io); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 678 | } |
| 679 | |
Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 680 | static void r5c_disable_writeback_async(struct work_struct *work) |
| 681 | { |
| 682 | struct r5l_log *log = container_of(work, struct r5l_log, |
| 683 | disable_writeback_work); |
| 684 | struct mddev *mddev = log->rdev->mddev; |
NeilBrown | 4d5324f | 2017-10-19 12:17:16 +1100 | [diff] [blame] | 685 | struct r5conf *conf = mddev->private; |
| 686 | int locked = 0; |
Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 687 | |
| 688 | if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) |
| 689 | return; |
| 690 | pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n", |
| 691 | mdname(mddev)); |
Song Liu | 70d466f | 2017-05-11 15:28:28 -0700 | [diff] [blame] | 692 | |
| 693 | /* wait superblock change before suspend */ |
| 694 | wait_event(mddev->sb_wait, |
NeilBrown | 4d5324f | 2017-10-19 12:17:16 +1100 | [diff] [blame] | 695 | conf->log == NULL || |
| 696 | (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && |
| 697 | (locked = mddev_trylock(mddev)))); |
| 698 | if (locked) { |
| 699 | mddev_suspend(mddev); |
| 700 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; |
| 701 | mddev_resume(mddev); |
| 702 | mddev_unlock(mddev); |
| 703 | } |
Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 704 | } |
| 705 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 706 | static void r5l_submit_current_io(struct r5l_log *log) |
| 707 | { |
| 708 | struct r5l_io_unit *io = log->current_io; |
| 709 | struct r5l_meta_block *block; |
Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 710 | unsigned long flags; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 711 | u32 crc; |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 712 | bool do_submit = true; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 713 | |
| 714 | if (!io) |
| 715 | return; |
| 716 | |
| 717 | block = page_address(io->meta_page); |
| 718 | block->meta_size = cpu_to_le32(io->meta_offset); |
Shaohua Li | 5cb2fbd | 2015-10-28 08:41:25 -0700 | [diff] [blame] | 719 | crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 720 | block->checksum = cpu_to_le32(crc); |
| 721 | |
| 722 | log->current_io = NULL; |
Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 723 | spin_lock_irqsave(&log->io_list_lock, flags); |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 724 | if (io->has_flush || io->has_fua) { |
| 725 | if (io != list_first_entry(&log->running_ios, |
| 726 | struct r5l_io_unit, log_sibling)) { |
| 727 | io->io_deferred = 1; |
| 728 | do_submit = false; |
| 729 | } |
| 730 | } |
Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 731 | spin_unlock_irqrestore(&log->io_list_lock, flags); |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 732 | if (do_submit) |
| 733 | r5l_do_submit_io(log, io); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 734 | } |
| 735 | |
Christoph Hellwig | 6143e2c | 2015-10-05 09:31:16 +0200 | [diff] [blame] | 736 | static struct bio *r5l_bio_alloc(struct r5l_log *log) |
Christoph Hellwig | b349feb | 2015-10-05 09:31:11 +0200 | [diff] [blame] | 737 | { |
Kent Overstreet | afeee51 | 2018-05-20 18:25:52 -0400 | [diff] [blame] | 738 | struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, &log->bs); |
Christoph Hellwig | b349feb | 2015-10-05 09:31:11 +0200 | [diff] [blame] | 739 | |
Mike Christie | 796a5cf | 2016-06-05 14:32:07 -0500 | [diff] [blame] | 740 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 741 | bio_set_dev(bio, log->rdev->bdev); |
Christoph Hellwig | 1e932a3 | 2015-10-05 09:31:12 +0200 | [diff] [blame] | 742 | bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; |
Christoph Hellwig | b349feb | 2015-10-05 09:31:11 +0200 | [diff] [blame] | 743 | |
Christoph Hellwig | b349feb | 2015-10-05 09:31:11 +0200 | [diff] [blame] | 744 | return bio; |
| 745 | } |
| 746 | |
Christoph Hellwig | c1b9919 | 2015-10-05 09:31:14 +0200 | [diff] [blame] | 747 | static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io) |
| 748 | { |
| 749 | log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS); |
| 750 | |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 751 | r5c_update_log_state(log); |
Christoph Hellwig | c1b9919 | 2015-10-05 09:31:14 +0200 | [diff] [blame] | 752 | /* |
| 753 | * If we filled up the log device start from the beginning again, |
| 754 | * which will require a new bio. |
| 755 | * |
| 756 | * Note: for this to work properly the log size needs to me a multiple |
| 757 | * of BLOCK_SECTORS. |
| 758 | */ |
| 759 | if (log->log_start == 0) |
Christoph Hellwig | 6143e2c | 2015-10-05 09:31:16 +0200 | [diff] [blame] | 760 | io->need_split_bio = true; |
Christoph Hellwig | c1b9919 | 2015-10-05 09:31:14 +0200 | [diff] [blame] | 761 | |
| 762 | io->log_end = log->log_start; |
| 763 | } |
| 764 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 765 | static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) |
| 766 | { |
| 767 | struct r5l_io_unit *io; |
| 768 | struct r5l_meta_block *block; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 769 | |
Kent Overstreet | afeee51 | 2018-05-20 18:25:52 -0400 | [diff] [blame] | 770 | io = mempool_alloc(&log->io_pool, GFP_ATOMIC); |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 771 | if (!io) |
| 772 | return NULL; |
| 773 | memset(io, 0, sizeof(*io)); |
| 774 | |
Christoph Hellwig | 51039cd | 2015-10-05 09:31:13 +0200 | [diff] [blame] | 775 | io->log = log; |
Christoph Hellwig | 51039cd | 2015-10-05 09:31:13 +0200 | [diff] [blame] | 776 | INIT_LIST_HEAD(&io->log_sibling); |
| 777 | INIT_LIST_HEAD(&io->stripe_list); |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 778 | bio_list_init(&io->flush_barriers); |
Christoph Hellwig | 51039cd | 2015-10-05 09:31:13 +0200 | [diff] [blame] | 779 | io->state = IO_UNIT_RUNNING; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 780 | |
Kent Overstreet | afeee51 | 2018-05-20 18:25:52 -0400 | [diff] [blame] | 781 | io->meta_page = mempool_alloc(&log->meta_pool, GFP_NOIO); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 782 | block = page_address(io->meta_page); |
Christoph Hellwig | e8deb63 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 783 | clear_page(block); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 784 | block->magic = cpu_to_le32(R5LOG_MAGIC); |
| 785 | block->version = R5LOG_VERSION; |
| 786 | block->seq = cpu_to_le64(log->seq); |
| 787 | block->position = cpu_to_le64(log->log_start); |
| 788 | |
| 789 | io->log_start = log->log_start; |
| 790 | io->meta_offset = sizeof(struct r5l_meta_block); |
Christoph Hellwig | 2b8ef16 | 2015-10-05 09:31:15 +0200 | [diff] [blame] | 791 | io->seq = log->seq++; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 792 | |
Christoph Hellwig | 6143e2c | 2015-10-05 09:31:16 +0200 | [diff] [blame] | 793 | io->current_bio = r5l_bio_alloc(log); |
| 794 | io->current_bio->bi_end_io = r5l_log_endio; |
| 795 | io->current_bio->bi_private = io; |
Christoph Hellwig | b349feb | 2015-10-05 09:31:11 +0200 | [diff] [blame] | 796 | bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 797 | |
Christoph Hellwig | c1b9919 | 2015-10-05 09:31:14 +0200 | [diff] [blame] | 798 | r5_reserve_log_entry(log, io); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 799 | |
| 800 | spin_lock_irq(&log->io_list_lock); |
| 801 | list_add_tail(&io->log_sibling, &log->running_ios); |
| 802 | spin_unlock_irq(&log->io_list_lock); |
| 803 | |
| 804 | return io; |
| 805 | } |
| 806 | |
| 807 | static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size) |
| 808 | { |
Christoph Hellwig | 22581f5 | 2015-10-05 09:31:10 +0200 | [diff] [blame] | 809 | if (log->current_io && |
| 810 | log->current_io->meta_offset + payload_size > PAGE_SIZE) |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 811 | r5l_submit_current_io(log); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 812 | |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 813 | if (!log->current_io) { |
Christoph Hellwig | 22581f5 | 2015-10-05 09:31:10 +0200 | [diff] [blame] | 814 | log->current_io = r5l_new_meta(log); |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 815 | if (!log->current_io) |
| 816 | return -ENOMEM; |
| 817 | } |
| 818 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 819 | return 0; |
| 820 | } |
| 821 | |
| 822 | static void r5l_append_payload_meta(struct r5l_log *log, u16 type, |
| 823 | sector_t location, |
| 824 | u32 checksum1, u32 checksum2, |
| 825 | bool checksum2_valid) |
| 826 | { |
| 827 | struct r5l_io_unit *io = log->current_io; |
| 828 | struct r5l_payload_data_parity *payload; |
| 829 | |
| 830 | payload = page_address(io->meta_page) + io->meta_offset; |
| 831 | payload->header.type = cpu_to_le16(type); |
| 832 | payload->header.flags = cpu_to_le16(0); |
| 833 | payload->size = cpu_to_le32((1 + !!checksum2_valid) << |
| 834 | (PAGE_SHIFT - 9)); |
| 835 | payload->location = cpu_to_le64(location); |
| 836 | payload->checksum[0] = cpu_to_le32(checksum1); |
| 837 | if (checksum2_valid) |
| 838 | payload->checksum[1] = cpu_to_le32(checksum2); |
| 839 | |
| 840 | io->meta_offset += sizeof(struct r5l_payload_data_parity) + |
| 841 | sizeof(__le32) * (1 + !!checksum2_valid); |
| 842 | } |
| 843 | |
| 844 | static void r5l_append_payload_page(struct r5l_log *log, struct page *page) |
| 845 | { |
| 846 | struct r5l_io_unit *io = log->current_io; |
| 847 | |
Christoph Hellwig | 6143e2c | 2015-10-05 09:31:16 +0200 | [diff] [blame] | 848 | if (io->need_split_bio) { |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 849 | BUG_ON(io->split_bio); |
| 850 | io->split_bio = io->current_bio; |
Christoph Hellwig | 6143e2c | 2015-10-05 09:31:16 +0200 | [diff] [blame] | 851 | io->current_bio = r5l_bio_alloc(log); |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 852 | bio_chain(io->current_bio, io->split_bio); |
| 853 | io->need_split_bio = false; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 854 | } |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 855 | |
Christoph Hellwig | 6143e2c | 2015-10-05 09:31:16 +0200 | [diff] [blame] | 856 | if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) |
| 857 | BUG(); |
| 858 | |
Christoph Hellwig | c1b9919 | 2015-10-05 09:31:14 +0200 | [diff] [blame] | 859 | r5_reserve_log_entry(log, io); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 860 | } |
| 861 | |
Song Liu | ea17481 | 2017-03-09 21:23:39 -0800 | [diff] [blame] | 862 | static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect) |
| 863 | { |
| 864 | struct mddev *mddev = log->rdev->mddev; |
| 865 | struct r5conf *conf = mddev->private; |
| 866 | struct r5l_io_unit *io; |
| 867 | struct r5l_payload_flush *payload; |
| 868 | int meta_size; |
| 869 | |
| 870 | /* |
| 871 | * payload_flush requires extra writes to the journal. |
| 872 | * To avoid handling the extra IO in quiesce, just skip |
| 873 | * flush_payload |
| 874 | */ |
| 875 | if (conf->quiesce) |
| 876 | return; |
| 877 | |
| 878 | mutex_lock(&log->io_mutex); |
| 879 | meta_size = sizeof(struct r5l_payload_flush) + sizeof(__le64); |
| 880 | |
| 881 | if (r5l_get_meta(log, meta_size)) { |
| 882 | mutex_unlock(&log->io_mutex); |
| 883 | return; |
| 884 | } |
| 885 | |
| 886 | /* current implementation is one stripe per flush payload */ |
| 887 | io = log->current_io; |
| 888 | payload = page_address(io->meta_page) + io->meta_offset; |
| 889 | payload->header.type = cpu_to_le16(R5LOG_PAYLOAD_FLUSH); |
| 890 | payload->header.flags = cpu_to_le16(0); |
| 891 | payload->size = cpu_to_le32(sizeof(__le64)); |
| 892 | payload->flush_stripes[0] = cpu_to_le64(sect); |
| 893 | io->meta_offset += meta_size; |
Song Liu | a9501d7 | 2017-08-03 10:03:17 -0700 | [diff] [blame] | 894 | /* multiple flush payloads count as one pending_stripe */ |
| 895 | if (!io->has_flush_payload) { |
| 896 | io->has_flush_payload = 1; |
| 897 | atomic_inc(&io->pending_stripe); |
| 898 | } |
Song Liu | ea17481 | 2017-03-09 21:23:39 -0800 | [diff] [blame] | 899 | mutex_unlock(&log->io_mutex); |
| 900 | } |
| 901 | |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 902 | static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 903 | int data_pages, int parity_pages) |
| 904 | { |
| 905 | int i; |
| 906 | int meta_size; |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 907 | int ret; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 908 | struct r5l_io_unit *io; |
| 909 | |
| 910 | meta_size = |
| 911 | ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) |
| 912 | * data_pages) + |
| 913 | sizeof(struct r5l_payload_data_parity) + |
| 914 | sizeof(__le32) * parity_pages; |
| 915 | |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 916 | ret = r5l_get_meta(log, meta_size); |
| 917 | if (ret) |
| 918 | return ret; |
| 919 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 920 | io = log->current_io; |
| 921 | |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 922 | if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state)) |
| 923 | io->has_flush = 1; |
| 924 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 925 | for (i = 0; i < sh->disks; i++) { |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 926 | if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) || |
| 927 | test_bit(R5_InJournal, &sh->dev[i].flags)) |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 928 | continue; |
| 929 | if (i == sh->pd_idx || i == sh->qd_idx) |
| 930 | continue; |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 931 | if (test_bit(R5_WantFUA, &sh->dev[i].flags) && |
| 932 | log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) { |
| 933 | io->has_fua = 1; |
| 934 | /* |
| 935 | * we need to flush journal to make sure recovery can |
| 936 | * reach the data with fua flag |
| 937 | */ |
| 938 | io->has_flush = 1; |
| 939 | } |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 940 | r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA, |
| 941 | raid5_compute_blocknr(sh, i, 0), |
| 942 | sh->dev[i].log_checksum, 0, false); |
| 943 | r5l_append_payload_page(log, sh->dev[i].page); |
| 944 | } |
| 945 | |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 946 | if (parity_pages == 2) { |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 947 | r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, |
| 948 | sh->sector, sh->dev[sh->pd_idx].log_checksum, |
| 949 | sh->dev[sh->qd_idx].log_checksum, true); |
| 950 | r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); |
| 951 | r5l_append_payload_page(log, sh->dev[sh->qd_idx].page); |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 952 | } else if (parity_pages == 1) { |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 953 | r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, |
| 954 | sh->sector, sh->dev[sh->pd_idx].log_checksum, |
| 955 | 0, false); |
| 956 | r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 957 | } else /* Just writing data, not parity, in caching phase */ |
| 958 | BUG_ON(parity_pages != 0); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 959 | |
| 960 | list_add_tail(&sh->log_list, &io->stripe_list); |
| 961 | atomic_inc(&io->pending_stripe); |
| 962 | sh->log_io = io; |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 963 | |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 964 | if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) |
| 965 | return 0; |
| 966 | |
| 967 | if (sh->log_start == MaxSector) { |
| 968 | BUG_ON(!list_empty(&sh->r5c)); |
| 969 | sh->log_start = io->log_start; |
| 970 | spin_lock_irq(&log->stripe_in_journal_lock); |
| 971 | list_add_tail(&sh->r5c, |
| 972 | &log->stripe_in_journal_list); |
| 973 | spin_unlock_irq(&log->stripe_in_journal_lock); |
| 974 | atomic_inc(&log->stripe_in_journal_count); |
| 975 | } |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 976 | return 0; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 977 | } |
| 978 | |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 979 | /* add stripe to no_space_stripes, and then wake up reclaim */ |
| 980 | static inline void r5l_add_no_space_stripe(struct r5l_log *log, |
| 981 | struct stripe_head *sh) |
| 982 | { |
| 983 | spin_lock(&log->no_space_stripes_lock); |
| 984 | list_add_tail(&sh->log_list, &log->no_space_stripes); |
| 985 | spin_unlock(&log->no_space_stripes_lock); |
| 986 | } |
| 987 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 988 | /* |
| 989 | * running in raid5d, where reclaim could wait for raid5d too (when it flushes |
| 990 | * data from log to raid disks), so we shouldn't wait for reclaim here |
| 991 | */ |
| 992 | int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) |
| 993 | { |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 994 | struct r5conf *conf = sh->raid_conf; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 995 | int write_disks = 0; |
| 996 | int data_pages, parity_pages; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 997 | int reserve; |
| 998 | int i; |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 999 | int ret = 0; |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1000 | bool wake_reclaim = false; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1001 | |
| 1002 | if (!log) |
| 1003 | return -EAGAIN; |
| 1004 | /* Don't support stripe batch */ |
| 1005 | if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || |
| 1006 | test_bit(STRIPE_SYNCING, &sh->state)) { |
| 1007 | /* the stripe is written to log, we start writing it to raid */ |
| 1008 | clear_bit(STRIPE_LOG_TRAPPED, &sh->state); |
| 1009 | return -EAGAIN; |
| 1010 | } |
| 1011 | |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 1012 | WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); |
| 1013 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1014 | for (i = 0; i < sh->disks; i++) { |
| 1015 | void *addr; |
| 1016 | |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 1017 | if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) || |
| 1018 | test_bit(R5_InJournal, &sh->dev[i].flags)) |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1019 | continue; |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 1020 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1021 | write_disks++; |
| 1022 | /* checksum is already calculated in last run */ |
| 1023 | if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) |
| 1024 | continue; |
| 1025 | addr = kmap_atomic(sh->dev[i].page); |
Shaohua Li | 5cb2fbd | 2015-10-28 08:41:25 -0700 | [diff] [blame] | 1026 | sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, |
| 1027 | addr, PAGE_SIZE); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1028 | kunmap_atomic(addr); |
| 1029 | } |
| 1030 | parity_pages = 1 + !!(sh->qd_idx >= 0); |
| 1031 | data_pages = write_disks - parity_pages; |
| 1032 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1033 | set_bit(STRIPE_LOG_TRAPPED, &sh->state); |
Shaohua Li | 253f9fd4 | 2015-09-04 14:14:16 -0700 | [diff] [blame] | 1034 | /* |
| 1035 | * The stripe must enter state machine again to finish the write, so |
| 1036 | * don't delay. |
| 1037 | */ |
| 1038 | clear_bit(STRIPE_DELAYED, &sh->state); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1039 | atomic_inc(&sh->count); |
| 1040 | |
| 1041 | mutex_lock(&log->io_mutex); |
| 1042 | /* meta + data */ |
| 1043 | reserve = (1 + write_disks) << (PAGE_SHIFT - 9); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1044 | |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1045 | if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { |
| 1046 | if (!r5l_has_free_space(log, reserve)) { |
| 1047 | r5l_add_no_space_stripe(log, sh); |
| 1048 | wake_reclaim = true; |
| 1049 | } else { |
| 1050 | ret = r5l_log_stripe(log, sh, data_pages, parity_pages); |
| 1051 | if (ret) { |
| 1052 | spin_lock_irq(&log->io_list_lock); |
| 1053 | list_add_tail(&sh->log_list, |
| 1054 | &log->no_mem_stripes); |
| 1055 | spin_unlock_irq(&log->io_list_lock); |
| 1056 | } |
| 1057 | } |
| 1058 | } else { /* R5C_JOURNAL_MODE_WRITE_BACK */ |
| 1059 | /* |
| 1060 | * log space critical, do not process stripes that are |
| 1061 | * not in cache yet (sh->log_start == MaxSector). |
| 1062 | */ |
| 1063 | if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && |
| 1064 | sh->log_start == MaxSector) { |
| 1065 | r5l_add_no_space_stripe(log, sh); |
| 1066 | wake_reclaim = true; |
| 1067 | reserve = 0; |
| 1068 | } else if (!r5l_has_free_space(log, reserve)) { |
| 1069 | if (sh->log_start == log->last_checkpoint) |
| 1070 | BUG(); |
| 1071 | else |
| 1072 | r5l_add_no_space_stripe(log, sh); |
| 1073 | } else { |
| 1074 | ret = r5l_log_stripe(log, sh, data_pages, parity_pages); |
| 1075 | if (ret) { |
| 1076 | spin_lock_irq(&log->io_list_lock); |
| 1077 | list_add_tail(&sh->log_list, |
| 1078 | &log->no_mem_stripes); |
| 1079 | spin_unlock_irq(&log->io_list_lock); |
| 1080 | } |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 1081 | } |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1082 | } |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1083 | |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 1084 | mutex_unlock(&log->io_mutex); |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1085 | if (wake_reclaim) |
| 1086 | r5l_wake_reclaim(log, reserve); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1087 | return 0; |
| 1088 | } |
| 1089 | |
| 1090 | void r5l_write_stripe_run(struct r5l_log *log) |
| 1091 | { |
| 1092 | if (!log) |
| 1093 | return; |
| 1094 | mutex_lock(&log->io_mutex); |
| 1095 | r5l_submit_current_io(log); |
| 1096 | mutex_unlock(&log->io_mutex); |
| 1097 | } |
| 1098 | |
Shaohua Li | 828cbe9 | 2015-09-02 13:49:49 -0700 | [diff] [blame] | 1099 | int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio) |
| 1100 | { |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 1101 | if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { |
| 1102 | /* |
| 1103 | * in write through (journal only) |
| 1104 | * we flush log disk cache first, then write stripe data to |
| 1105 | * raid disks. So if bio is finished, the log disk cache is |
| 1106 | * flushed already. The recovery guarantees we can recovery |
| 1107 | * the bio from log disk, so we don't need to flush again |
| 1108 | */ |
| 1109 | if (bio->bi_iter.bi_size == 0) { |
| 1110 | bio_endio(bio); |
| 1111 | return 0; |
| 1112 | } |
| 1113 | bio->bi_opf &= ~REQ_PREFLUSH; |
| 1114 | } else { |
| 1115 | /* write back (with cache) */ |
| 1116 | if (bio->bi_iter.bi_size == 0) { |
| 1117 | mutex_lock(&log->io_mutex); |
| 1118 | r5l_get_meta(log, 0); |
| 1119 | bio_list_add(&log->current_io->flush_barriers, bio); |
| 1120 | log->current_io->has_flush = 1; |
| 1121 | log->current_io->has_null_flush = 1; |
| 1122 | atomic_inc(&log->current_io->pending_stripe); |
| 1123 | r5l_submit_current_io(log); |
| 1124 | mutex_unlock(&log->io_mutex); |
| 1125 | return 0; |
| 1126 | } |
Shaohua Li | 828cbe9 | 2015-09-02 13:49:49 -0700 | [diff] [blame] | 1127 | } |
Shaohua Li | 828cbe9 | 2015-09-02 13:49:49 -0700 | [diff] [blame] | 1128 | return -EAGAIN; |
| 1129 | } |
| 1130 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1131 | /* This will run after log space is reclaimed */ |
| 1132 | static void r5l_run_no_space_stripes(struct r5l_log *log) |
| 1133 | { |
| 1134 | struct stripe_head *sh; |
| 1135 | |
| 1136 | spin_lock(&log->no_space_stripes_lock); |
| 1137 | while (!list_empty(&log->no_space_stripes)) { |
| 1138 | sh = list_first_entry(&log->no_space_stripes, |
| 1139 | struct stripe_head, log_list); |
| 1140 | list_del_init(&sh->log_list); |
| 1141 | set_bit(STRIPE_HANDLE, &sh->state); |
| 1142 | raid5_release_stripe(sh); |
| 1143 | } |
| 1144 | spin_unlock(&log->no_space_stripes_lock); |
| 1145 | } |
| 1146 | |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1147 | /* |
| 1148 | * calculate new last_checkpoint |
| 1149 | * for write through mode, returns log->next_checkpoint |
| 1150 | * for write back, returns log_start of first sh in stripe_in_journal_list |
| 1151 | */ |
| 1152 | static sector_t r5c_calculate_new_cp(struct r5conf *conf) |
| 1153 | { |
| 1154 | struct stripe_head *sh; |
| 1155 | struct r5l_log *log = conf->log; |
| 1156 | sector_t new_cp; |
| 1157 | unsigned long flags; |
| 1158 | |
| 1159 | if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) |
| 1160 | return log->next_checkpoint; |
| 1161 | |
| 1162 | spin_lock_irqsave(&log->stripe_in_journal_lock, flags); |
| 1163 | if (list_empty(&conf->log->stripe_in_journal_list)) { |
| 1164 | /* all stripes flushed */ |
Dan Carpenter | d3014e2 | 2016-11-24 14:13:04 +0300 | [diff] [blame] | 1165 | spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1166 | return log->next_checkpoint; |
| 1167 | } |
| 1168 | sh = list_first_entry(&conf->log->stripe_in_journal_list, |
| 1169 | struct stripe_head, r5c); |
| 1170 | new_cp = sh->log_start; |
| 1171 | spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); |
| 1172 | return new_cp; |
| 1173 | } |
| 1174 | |
Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1175 | static sector_t r5l_reclaimable_space(struct r5l_log *log) |
| 1176 | { |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1177 | struct r5conf *conf = log->rdev->mddev->private; |
| 1178 | |
Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1179 | return r5l_ring_distance(log, log->last_checkpoint, |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1180 | r5c_calculate_new_cp(conf)); |
Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1181 | } |
| 1182 | |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 1183 | static void r5l_run_no_mem_stripe(struct r5l_log *log) |
| 1184 | { |
| 1185 | struct stripe_head *sh; |
| 1186 | |
Shaohua Li | efa4b77 | 2017-10-18 22:08:13 -0700 | [diff] [blame] | 1187 | lockdep_assert_held(&log->io_list_lock); |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 1188 | |
| 1189 | if (!list_empty(&log->no_mem_stripes)) { |
| 1190 | sh = list_first_entry(&log->no_mem_stripes, |
| 1191 | struct stripe_head, log_list); |
| 1192 | list_del_init(&sh->log_list); |
| 1193 | set_bit(STRIPE_HANDLE, &sh->state); |
| 1194 | raid5_release_stripe(sh); |
| 1195 | } |
| 1196 | } |
| 1197 | |
Christoph Hellwig | 04732f7 | 2015-10-05 09:31:07 +0200 | [diff] [blame] | 1198 | static bool r5l_complete_finished_ios(struct r5l_log *log) |
Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1199 | { |
| 1200 | struct r5l_io_unit *io, *next; |
| 1201 | bool found = false; |
| 1202 | |
Shaohua Li | efa4b77 | 2017-10-18 22:08:13 -0700 | [diff] [blame] | 1203 | lockdep_assert_held(&log->io_list_lock); |
Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1204 | |
Christoph Hellwig | 04732f7 | 2015-10-05 09:31:07 +0200 | [diff] [blame] | 1205 | list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) { |
Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1206 | /* don't change list order */ |
| 1207 | if (io->state < IO_UNIT_STRIPE_END) |
| 1208 | break; |
| 1209 | |
| 1210 | log->next_checkpoint = io->log_start; |
Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1211 | |
| 1212 | list_del(&io->log_sibling); |
Kent Overstreet | afeee51 | 2018-05-20 18:25:52 -0400 | [diff] [blame] | 1213 | mempool_free(io, &log->io_pool); |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 1214 | r5l_run_no_mem_stripe(log); |
Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1215 | |
| 1216 | found = true; |
| 1217 | } |
| 1218 | |
| 1219 | return found; |
| 1220 | } |
| 1221 | |
Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 1222 | static void __r5l_stripe_write_finished(struct r5l_io_unit *io) |
| 1223 | { |
| 1224 | struct r5l_log *log = io->log; |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1225 | struct r5conf *conf = log->rdev->mddev->private; |
Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 1226 | unsigned long flags; |
| 1227 | |
| 1228 | spin_lock_irqsave(&log->io_list_lock, flags); |
| 1229 | __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END); |
Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1230 | |
Christoph Hellwig | 04732f7 | 2015-10-05 09:31:07 +0200 | [diff] [blame] | 1231 | if (!r5l_complete_finished_ios(log)) { |
Shaohua Li | 85f2f9a | 2015-09-04 14:14:05 -0700 | [diff] [blame] | 1232 | spin_unlock_irqrestore(&log->io_list_lock, flags); |
| 1233 | return; |
| 1234 | } |
Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 1235 | |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1236 | if (r5l_reclaimable_space(log) > log->max_free_space || |
| 1237 | test_bit(R5C_LOG_TIGHT, &conf->cache_state)) |
Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 1238 | r5l_wake_reclaim(log, 0); |
| 1239 | |
Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 1240 | spin_unlock_irqrestore(&log->io_list_lock, flags); |
| 1241 | wake_up(&log->iounit_wait); |
| 1242 | } |
| 1243 | |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1244 | void r5l_stripe_write_finished(struct stripe_head *sh) |
| 1245 | { |
| 1246 | struct r5l_io_unit *io; |
| 1247 | |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1248 | io = sh->log_io; |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1249 | sh->log_io = NULL; |
| 1250 | |
Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 1251 | if (io && atomic_dec_and_test(&io->pending_stripe)) |
| 1252 | __r5l_stripe_write_finished(io); |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1253 | } |
| 1254 | |
Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1255 | static void r5l_log_flush_endio(struct bio *bio) |
| 1256 | { |
| 1257 | struct r5l_log *log = container_of(bio, struct r5l_log, |
| 1258 | flush_bio); |
| 1259 | unsigned long flags; |
| 1260 | struct r5l_io_unit *io; |
Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1261 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 1262 | if (bio->bi_status) |
Shaohua Li | 6e74a9c | 2015-10-08 21:54:08 -0700 | [diff] [blame] | 1263 | md_error(log->rdev->mddev, log->rdev); |
| 1264 | |
Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1265 | spin_lock_irqsave(&log->io_list_lock, flags); |
Christoph Hellwig | d8858f4 | 2015-10-05 09:31:08 +0200 | [diff] [blame] | 1266 | list_for_each_entry(io, &log->flushing_ios, log_sibling) |
| 1267 | r5l_io_run_stripes(io); |
Christoph Hellwig | 04732f7 | 2015-10-05 09:31:07 +0200 | [diff] [blame] | 1268 | list_splice_tail_init(&log->flushing_ios, &log->finished_ios); |
Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1269 | spin_unlock_irqrestore(&log->io_list_lock, flags); |
| 1270 | } |
| 1271 | |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1272 | /* |
| 1273 | * Starting dispatch IO to raid. |
| 1274 | * io_unit(meta) consists of a log. There is one situation we want to avoid. A |
| 1275 | * broken meta in the middle of a log causes recovery can't find meta at the |
| 1276 | * head of log. If operations require meta at the head persistent in log, we |
| 1277 | * must make sure meta before it persistent in log too. A case is: |
| 1278 | * |
| 1279 | * stripe data/parity is in log, we start write stripe to raid disks. stripe |
| 1280 | * data/parity must be persistent in log before we do the write to raid disks. |
| 1281 | * |
| 1282 | * The solution is we restrictly maintain io_unit list order. In this case, we |
| 1283 | * only write stripes of an io_unit to raid disks till the io_unit is the first |
| 1284 | * one whose data/parity is in log. |
| 1285 | */ |
| 1286 | void r5l_flush_stripe_to_raid(struct r5l_log *log) |
| 1287 | { |
Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1288 | bool do_flush; |
Christoph Hellwig | 56fef7c | 2015-10-05 09:31:09 +0200 | [diff] [blame] | 1289 | |
| 1290 | if (!log || !log->need_cache_flush) |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1291 | return; |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1292 | |
Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1293 | spin_lock_irq(&log->io_list_lock); |
| 1294 | /* flush bio is running */ |
| 1295 | if (!list_empty(&log->flushing_ios)) { |
| 1296 | spin_unlock_irq(&log->io_list_lock); |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1297 | return; |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1298 | } |
Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1299 | list_splice_tail_init(&log->io_end_ios, &log->flushing_ios); |
| 1300 | do_flush = !list_empty(&log->flushing_ios); |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1301 | spin_unlock_irq(&log->io_list_lock); |
Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1302 | |
| 1303 | if (!do_flush) |
| 1304 | return; |
| 1305 | bio_reset(&log->flush_bio); |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 1306 | bio_set_dev(&log->flush_bio, log->rdev->bdev); |
Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1307 | log->flush_bio.bi_end_io = r5l_log_flush_endio; |
Christoph Hellwig | 70fd761 | 2016-11-01 07:40:10 -0600 | [diff] [blame] | 1308 | log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 1309 | submit_bio(&log->flush_bio); |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1310 | } |
| 1311 | |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1312 | static void r5l_write_super(struct r5l_log *log, sector_t cp); |
Shaohua Li | 4b48204 | 2015-10-08 21:54:06 -0700 | [diff] [blame] | 1313 | static void r5l_write_super_and_discard_space(struct r5l_log *log, |
| 1314 | sector_t end) |
| 1315 | { |
| 1316 | struct block_device *bdev = log->rdev->bdev; |
| 1317 | struct mddev *mddev; |
| 1318 | |
| 1319 | r5l_write_super(log, end); |
| 1320 | |
| 1321 | if (!blk_queue_discard(bdev_get_queue(bdev))) |
| 1322 | return; |
| 1323 | |
| 1324 | mddev = log->rdev->mddev; |
| 1325 | /* |
Shaohua Li | 8e018c2 | 2016-08-25 10:09:39 -0700 | [diff] [blame] | 1326 | * Discard could zero data, so before discard we must make sure |
| 1327 | * superblock is updated to new log tail. Updating superblock (either |
| 1328 | * directly call md_update_sb() or depend on md thread) must hold |
| 1329 | * reconfig mutex. On the other hand, raid5_quiesce is called with |
| 1330 | * reconfig_mutex hold. The first step of raid5_quiesce() is waitting |
| 1331 | * for all IO finish, hence waitting for reclaim thread, while reclaim |
| 1332 | * thread is calling this function and waitting for reconfig mutex. So |
| 1333 | * there is a deadlock. We workaround this issue with a trylock. |
| 1334 | * FIXME: we could miss discard if we can't take reconfig mutex |
Shaohua Li | 4b48204 | 2015-10-08 21:54:06 -0700 | [diff] [blame] | 1335 | */ |
Shaohua Li | 2953079 | 2016-12-08 15:48:19 -0800 | [diff] [blame] | 1336 | set_mask_bits(&mddev->sb_flags, 0, |
| 1337 | BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); |
Shaohua Li | 8e018c2 | 2016-08-25 10:09:39 -0700 | [diff] [blame] | 1338 | if (!mddev_trylock(mddev)) |
| 1339 | return; |
| 1340 | md_update_sb(mddev, 1); |
| 1341 | mddev_unlock(mddev); |
Shaohua Li | 4b48204 | 2015-10-08 21:54:06 -0700 | [diff] [blame] | 1342 | |
Shaohua Li | 6e74a9c | 2015-10-08 21:54:08 -0700 | [diff] [blame] | 1343 | /* discard IO error really doesn't matter, ignore it */ |
Shaohua Li | 4b48204 | 2015-10-08 21:54:06 -0700 | [diff] [blame] | 1344 | if (log->last_checkpoint < end) { |
| 1345 | blkdev_issue_discard(bdev, |
| 1346 | log->last_checkpoint + log->rdev->data_offset, |
| 1347 | end - log->last_checkpoint, GFP_NOIO, 0); |
| 1348 | } else { |
| 1349 | blkdev_issue_discard(bdev, |
| 1350 | log->last_checkpoint + log->rdev->data_offset, |
| 1351 | log->device_size - log->last_checkpoint, |
| 1352 | GFP_NOIO, 0); |
| 1353 | blkdev_issue_discard(bdev, log->rdev->data_offset, end, |
| 1354 | GFP_NOIO, 0); |
| 1355 | } |
| 1356 | } |
| 1357 | |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1358 | /* |
| 1359 | * r5c_flush_stripe moves stripe from cached list to handle_list. When called, |
| 1360 | * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes. |
| 1361 | * |
| 1362 | * must hold conf->device_lock |
| 1363 | */ |
| 1364 | static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh) |
| 1365 | { |
| 1366 | BUG_ON(list_empty(&sh->lru)); |
| 1367 | BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); |
| 1368 | BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); |
| 1369 | |
| 1370 | /* |
| 1371 | * The stripe is not ON_RELEASE_LIST, so it is safe to call |
| 1372 | * raid5_release_stripe() while holding conf->device_lock |
| 1373 | */ |
| 1374 | BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state)); |
Shaohua Li | efa4b77 | 2017-10-18 22:08:13 -0700 | [diff] [blame] | 1375 | lockdep_assert_held(&conf->device_lock); |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1376 | |
| 1377 | list_del_init(&sh->lru); |
| 1378 | atomic_inc(&sh->count); |
| 1379 | |
| 1380 | set_bit(STRIPE_HANDLE, &sh->state); |
| 1381 | atomic_inc(&conf->active_stripes); |
| 1382 | r5c_make_stripe_write_out(sh); |
| 1383 | |
Shaohua Li | e33fbb9 | 2017-02-10 16:18:09 -0800 | [diff] [blame] | 1384 | if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) |
| 1385 | atomic_inc(&conf->r5c_flushing_partial_stripes); |
| 1386 | else |
| 1387 | atomic_inc(&conf->r5c_flushing_full_stripes); |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1388 | raid5_release_stripe(sh); |
| 1389 | } |
| 1390 | |
| 1391 | /* |
| 1392 | * if num == 0, flush all full stripes |
| 1393 | * if num > 0, flush all full stripes. If less than num full stripes are |
| 1394 | * flushed, flush some partial stripes until totally num stripes are |
| 1395 | * flushed or there is no more cached stripes. |
| 1396 | */ |
| 1397 | void r5c_flush_cache(struct r5conf *conf, int num) |
| 1398 | { |
| 1399 | int count; |
| 1400 | struct stripe_head *sh, *next; |
| 1401 | |
Shaohua Li | efa4b77 | 2017-10-18 22:08:13 -0700 | [diff] [blame] | 1402 | lockdep_assert_held(&conf->device_lock); |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1403 | if (!conf->log) |
| 1404 | return; |
| 1405 | |
| 1406 | count = 0; |
| 1407 | list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) { |
| 1408 | r5c_flush_stripe(conf, sh); |
| 1409 | count++; |
| 1410 | } |
| 1411 | |
| 1412 | if (count >= num) |
| 1413 | return; |
| 1414 | list_for_each_entry_safe(sh, next, |
| 1415 | &conf->r5c_partial_stripe_list, lru) { |
| 1416 | r5c_flush_stripe(conf, sh); |
| 1417 | if (++count >= num) |
| 1418 | break; |
| 1419 | } |
| 1420 | } |
| 1421 | |
| 1422 | static void r5c_do_reclaim(struct r5conf *conf) |
| 1423 | { |
| 1424 | struct r5l_log *log = conf->log; |
| 1425 | struct stripe_head *sh; |
| 1426 | int count = 0; |
| 1427 | unsigned long flags; |
| 1428 | int total_cached; |
| 1429 | int stripes_to_flush; |
Shaohua Li | e33fbb9 | 2017-02-10 16:18:09 -0800 | [diff] [blame] | 1430 | int flushing_partial, flushing_full; |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1431 | |
| 1432 | if (!r5c_is_writeback(log)) |
| 1433 | return; |
| 1434 | |
Shaohua Li | e33fbb9 | 2017-02-10 16:18:09 -0800 | [diff] [blame] | 1435 | flushing_partial = atomic_read(&conf->r5c_flushing_partial_stripes); |
| 1436 | flushing_full = atomic_read(&conf->r5c_flushing_full_stripes); |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1437 | total_cached = atomic_read(&conf->r5c_cached_partial_stripes) + |
Shaohua Li | e33fbb9 | 2017-02-10 16:18:09 -0800 | [diff] [blame] | 1438 | atomic_read(&conf->r5c_cached_full_stripes) - |
| 1439 | flushing_full - flushing_partial; |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1440 | |
| 1441 | if (total_cached > conf->min_nr_stripes * 3 / 4 || |
| 1442 | atomic_read(&conf->empty_inactive_list_nr) > 0) |
| 1443 | /* |
| 1444 | * if stripe cache pressure high, flush all full stripes and |
| 1445 | * some partial stripes |
| 1446 | */ |
| 1447 | stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP; |
| 1448 | else if (total_cached > conf->min_nr_stripes * 1 / 2 || |
Shaohua Li | e33fbb9 | 2017-02-10 16:18:09 -0800 | [diff] [blame] | 1449 | atomic_read(&conf->r5c_cached_full_stripes) - flushing_full > |
Shaohua Li | 84890c0 | 2017-02-15 19:58:05 -0800 | [diff] [blame] | 1450 | R5C_FULL_STRIPE_FLUSH_BATCH(conf)) |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1451 | /* |
| 1452 | * if stripe cache pressure moderate, or if there is many full |
| 1453 | * stripes,flush all full stripes |
| 1454 | */ |
| 1455 | stripes_to_flush = 0; |
| 1456 | else |
| 1457 | /* no need to flush */ |
| 1458 | stripes_to_flush = -1; |
| 1459 | |
| 1460 | if (stripes_to_flush >= 0) { |
| 1461 | spin_lock_irqsave(&conf->device_lock, flags); |
| 1462 | r5c_flush_cache(conf, stripes_to_flush); |
| 1463 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 1464 | } |
| 1465 | |
| 1466 | /* if log space is tight, flush stripes on stripe_in_journal_list */ |
| 1467 | if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) { |
| 1468 | spin_lock_irqsave(&log->stripe_in_journal_lock, flags); |
| 1469 | spin_lock(&conf->device_lock); |
| 1470 | list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) { |
| 1471 | /* |
| 1472 | * stripes on stripe_in_journal_list could be in any |
| 1473 | * state of the stripe_cache state machine. In this |
| 1474 | * case, we only want to flush stripe on |
| 1475 | * r5c_cached_full/partial_stripes. The following |
| 1476 | * condition makes sure the stripe is on one of the |
| 1477 | * two lists. |
| 1478 | */ |
| 1479 | if (!list_empty(&sh->lru) && |
| 1480 | !test_bit(STRIPE_HANDLE, &sh->state) && |
| 1481 | atomic_read(&sh->count) == 0) { |
| 1482 | r5c_flush_stripe(conf, sh); |
Shaohua Li | e8fd52e | 2017-02-10 16:18:08 -0800 | [diff] [blame] | 1483 | if (count++ >= R5C_RECLAIM_STRIPE_GROUP) |
| 1484 | break; |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1485 | } |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1486 | } |
| 1487 | spin_unlock(&conf->device_lock); |
| 1488 | spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); |
| 1489 | } |
Song Liu | f687a33 | 2016-11-30 16:57:54 -0800 | [diff] [blame] | 1490 | |
| 1491 | if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state)) |
| 1492 | r5l_run_no_space_stripes(log); |
| 1493 | |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1494 | md_wakeup_thread(conf->mddev->thread); |
| 1495 | } |
Shaohua Li | 4b48204 | 2015-10-08 21:54:06 -0700 | [diff] [blame] | 1496 | |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1497 | static void r5l_do_reclaim(struct r5l_log *log) |
| 1498 | { |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1499 | struct r5conf *conf = log->rdev->mddev->private; |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1500 | sector_t reclaim_target = xchg(&log->reclaim_target, 0); |
Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1501 | sector_t reclaimable; |
| 1502 | sector_t next_checkpoint; |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1503 | bool write_super; |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1504 | |
| 1505 | spin_lock_irq(&log->io_list_lock); |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1506 | write_super = r5l_reclaimable_space(log) > log->max_free_space || |
| 1507 | reclaim_target != 0 || !list_empty(&log->no_space_stripes); |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1508 | /* |
| 1509 | * move proper io_unit to reclaim list. We should not change the order. |
| 1510 | * reclaimable/unreclaimable io_unit can be mixed in the list, we |
| 1511 | * shouldn't reuse space of an unreclaimable io_unit |
| 1512 | */ |
| 1513 | while (1) { |
Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1514 | reclaimable = r5l_reclaimable_space(log); |
| 1515 | if (reclaimable >= reclaim_target || |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1516 | (list_empty(&log->running_ios) && |
| 1517 | list_empty(&log->io_end_ios) && |
Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1518 | list_empty(&log->flushing_ios) && |
Christoph Hellwig | 04732f7 | 2015-10-05 09:31:07 +0200 | [diff] [blame] | 1519 | list_empty(&log->finished_ios))) |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1520 | break; |
| 1521 | |
Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1522 | md_wakeup_thread(log->rdev->mddev->thread); |
| 1523 | wait_event_lock_irq(log->iounit_wait, |
| 1524 | r5l_reclaimable_space(log) > reclaimable, |
| 1525 | log->io_list_lock); |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1526 | } |
Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1527 | |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1528 | next_checkpoint = r5c_calculate_new_cp(conf); |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1529 | spin_unlock_irq(&log->io_list_lock); |
| 1530 | |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1531 | if (reclaimable == 0 || !write_super) |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1532 | return; |
| 1533 | |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1534 | /* |
| 1535 | * write_super will flush cache of each raid disk. We must write super |
| 1536 | * here, because the log area might be reused soon and we don't want to |
| 1537 | * confuse recovery |
| 1538 | */ |
Shaohua Li | 4b48204 | 2015-10-08 21:54:06 -0700 | [diff] [blame] | 1539 | r5l_write_super_and_discard_space(log, next_checkpoint); |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1540 | |
| 1541 | mutex_lock(&log->io_mutex); |
Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1542 | log->last_checkpoint = next_checkpoint; |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1543 | r5c_update_log_state(log); |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1544 | mutex_unlock(&log->io_mutex); |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1545 | |
Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1546 | r5l_run_no_space_stripes(log); |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1547 | } |
| 1548 | |
| 1549 | static void r5l_reclaim_thread(struct md_thread *thread) |
| 1550 | { |
| 1551 | struct mddev *mddev = thread->mddev; |
| 1552 | struct r5conf *conf = mddev->private; |
| 1553 | struct r5l_log *log = conf->log; |
| 1554 | |
| 1555 | if (!log) |
| 1556 | return; |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1557 | r5c_do_reclaim(conf); |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1558 | r5l_do_reclaim(log); |
| 1559 | } |
| 1560 | |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1561 | void r5l_wake_reclaim(struct r5l_log *log, sector_t space) |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1562 | { |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1563 | unsigned long target; |
| 1564 | unsigned long new = (unsigned long)space; /* overflow in theory */ |
| 1565 | |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1566 | if (!log) |
| 1567 | return; |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1568 | do { |
| 1569 | target = log->reclaim_target; |
| 1570 | if (new < target) |
| 1571 | return; |
| 1572 | } while (cmpxchg(&log->reclaim_target, target, new) != target); |
| 1573 | md_wakeup_thread(log->reclaim_thread); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1574 | } |
| 1575 | |
NeilBrown | b03e0cc | 2017-10-19 12:49:15 +1100 | [diff] [blame] | 1576 | void r5l_quiesce(struct r5l_log *log, int quiesce) |
Shaohua Li | e6c033f | 2015-10-04 09:20:12 -0700 | [diff] [blame] | 1577 | { |
Shaohua Li | 4b48204 | 2015-10-08 21:54:06 -0700 | [diff] [blame] | 1578 | struct mddev *mddev; |
NeilBrown | b03e0cc | 2017-10-19 12:49:15 +1100 | [diff] [blame] | 1579 | |
| 1580 | if (quiesce) { |
Shaohua Li | 4b48204 | 2015-10-08 21:54:06 -0700 | [diff] [blame] | 1581 | /* make sure r5l_write_super_and_discard_space exits */ |
| 1582 | mddev = log->rdev->mddev; |
| 1583 | wake_up(&mddev->sb_wait); |
Shaohua Li | ce1ccd0 | 2016-11-21 10:29:18 -0800 | [diff] [blame] | 1584 | kthread_park(log->reclaim_thread->tsk); |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1585 | r5l_wake_reclaim(log, MaxSector); |
Shaohua Li | e6c033f | 2015-10-04 09:20:12 -0700 | [diff] [blame] | 1586 | r5l_do_reclaim(log); |
NeilBrown | b03e0cc | 2017-10-19 12:49:15 +1100 | [diff] [blame] | 1587 | } else |
| 1588 | kthread_unpark(log->reclaim_thread->tsk); |
Shaohua Li | e6c033f | 2015-10-04 09:20:12 -0700 | [diff] [blame] | 1589 | } |
| 1590 | |
Shaohua Li | 6e74a9c | 2015-10-08 21:54:08 -0700 | [diff] [blame] | 1591 | bool r5l_log_disk_error(struct r5conf *conf) |
| 1592 | { |
Shaohua Li | f6b6ec5 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 1593 | struct r5l_log *log; |
| 1594 | bool ret; |
Shaohua Li | 7dde2ad | 2015-10-08 21:54:10 -0700 | [diff] [blame] | 1595 | /* don't allow write if journal disk is missing */ |
Shaohua Li | f6b6ec5 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 1596 | rcu_read_lock(); |
| 1597 | log = rcu_dereference(conf->log); |
| 1598 | |
| 1599 | if (!log) |
| 1600 | ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags); |
| 1601 | else |
| 1602 | ret = test_bit(Faulty, &log->rdev->flags); |
| 1603 | rcu_read_unlock(); |
| 1604 | return ret; |
Shaohua Li | 6e74a9c | 2015-10-08 21:54:08 -0700 | [diff] [blame] | 1605 | } |
| 1606 | |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1607 | #define R5L_RECOVERY_PAGE_POOL_SIZE 256 |
| 1608 | |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1609 | struct r5l_recovery_ctx { |
| 1610 | struct page *meta_page; /* current meta */ |
| 1611 | sector_t meta_total_blocks; /* total size of current meta and data */ |
| 1612 | sector_t pos; /* recovery position */ |
| 1613 | u64 seq; /* recovery position seq */ |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 1614 | int data_parity_stripes; /* number of data_parity stripes */ |
| 1615 | int data_only_stripes; /* number of data_only stripes */ |
| 1616 | struct list_head cached_list; |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1617 | |
| 1618 | /* |
| 1619 | * read ahead page pool (ra_pool) |
| 1620 | * in recovery, log is read sequentially. It is not efficient to |
| 1621 | * read every page with sync_page_io(). The read ahead page pool |
| 1622 | * reads multiple pages with one IO, so further log read can |
| 1623 | * just copy data from the pool. |
| 1624 | */ |
| 1625 | struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE]; |
| 1626 | sector_t pool_offset; /* offset of first page in the pool */ |
| 1627 | int total_pages; /* total allocated pages */ |
| 1628 | int valid_pages; /* pages with valid data */ |
| 1629 | struct bio *ra_bio; /* bio to do the read ahead */ |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1630 | }; |
| 1631 | |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1632 | static int r5l_recovery_allocate_ra_pool(struct r5l_log *log, |
| 1633 | struct r5l_recovery_ctx *ctx) |
| 1634 | { |
| 1635 | struct page *page; |
| 1636 | |
Kent Overstreet | afeee51 | 2018-05-20 18:25:52 -0400 | [diff] [blame] | 1637 | ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, &log->bs); |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1638 | if (!ctx->ra_bio) |
| 1639 | return -ENOMEM; |
| 1640 | |
| 1641 | ctx->valid_pages = 0; |
| 1642 | ctx->total_pages = 0; |
| 1643 | while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) { |
| 1644 | page = alloc_page(GFP_KERNEL); |
| 1645 | |
| 1646 | if (!page) |
| 1647 | break; |
| 1648 | ctx->ra_pool[ctx->total_pages] = page; |
| 1649 | ctx->total_pages += 1; |
| 1650 | } |
| 1651 | |
| 1652 | if (ctx->total_pages == 0) { |
| 1653 | bio_put(ctx->ra_bio); |
| 1654 | return -ENOMEM; |
| 1655 | } |
| 1656 | |
| 1657 | ctx->pool_offset = 0; |
| 1658 | return 0; |
| 1659 | } |
| 1660 | |
| 1661 | static void r5l_recovery_free_ra_pool(struct r5l_log *log, |
| 1662 | struct r5l_recovery_ctx *ctx) |
| 1663 | { |
| 1664 | int i; |
| 1665 | |
| 1666 | for (i = 0; i < ctx->total_pages; ++i) |
| 1667 | put_page(ctx->ra_pool[i]); |
| 1668 | bio_put(ctx->ra_bio); |
| 1669 | } |
| 1670 | |
| 1671 | /* |
| 1672 | * fetch ctx->valid_pages pages from offset |
| 1673 | * In normal cases, ctx->valid_pages == ctx->total_pages after the call. |
| 1674 | * However, if the offset is close to the end of the journal device, |
| 1675 | * ctx->valid_pages could be smaller than ctx->total_pages |
| 1676 | */ |
| 1677 | static int r5l_recovery_fetch_ra_pool(struct r5l_log *log, |
| 1678 | struct r5l_recovery_ctx *ctx, |
| 1679 | sector_t offset) |
| 1680 | { |
| 1681 | bio_reset(ctx->ra_bio); |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 1682 | bio_set_dev(ctx->ra_bio, log->rdev->bdev); |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1683 | bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0); |
| 1684 | ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset; |
| 1685 | |
| 1686 | ctx->valid_pages = 0; |
| 1687 | ctx->pool_offset = offset; |
| 1688 | |
| 1689 | while (ctx->valid_pages < ctx->total_pages) { |
| 1690 | bio_add_page(ctx->ra_bio, |
| 1691 | ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0); |
| 1692 | ctx->valid_pages += 1; |
| 1693 | |
| 1694 | offset = r5l_ring_add(log, offset, BLOCK_SECTORS); |
| 1695 | |
| 1696 | if (offset == 0) /* reached end of the device */ |
| 1697 | break; |
| 1698 | } |
| 1699 | |
| 1700 | return submit_bio_wait(ctx->ra_bio); |
| 1701 | } |
| 1702 | |
| 1703 | /* |
| 1704 | * try read a page from the read ahead page pool, if the page is not in the |
| 1705 | * pool, call r5l_recovery_fetch_ra_pool |
| 1706 | */ |
| 1707 | static int r5l_recovery_read_page(struct r5l_log *log, |
| 1708 | struct r5l_recovery_ctx *ctx, |
| 1709 | struct page *page, |
| 1710 | sector_t offset) |
| 1711 | { |
| 1712 | int ret; |
| 1713 | |
| 1714 | if (offset < ctx->pool_offset || |
| 1715 | offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) { |
| 1716 | ret = r5l_recovery_fetch_ra_pool(log, ctx, offset); |
| 1717 | if (ret) |
| 1718 | return ret; |
| 1719 | } |
| 1720 | |
| 1721 | BUG_ON(offset < ctx->pool_offset || |
| 1722 | offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS); |
| 1723 | |
| 1724 | memcpy(page_address(page), |
| 1725 | page_address(ctx->ra_pool[(offset - ctx->pool_offset) >> |
| 1726 | BLOCK_SECTOR_SHIFT]), |
| 1727 | PAGE_SIZE); |
| 1728 | return 0; |
| 1729 | } |
| 1730 | |
Song Liu | 9ed988f5 | 2016-11-17 15:24:42 -0800 | [diff] [blame] | 1731 | static int r5l_recovery_read_meta_block(struct r5l_log *log, |
| 1732 | struct r5l_recovery_ctx *ctx) |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1733 | { |
| 1734 | struct page *page = ctx->meta_page; |
| 1735 | struct r5l_meta_block *mb; |
| 1736 | u32 crc, stored_crc; |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1737 | int ret; |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1738 | |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1739 | ret = r5l_recovery_read_page(log, ctx, page, ctx->pos); |
| 1740 | if (ret != 0) |
| 1741 | return ret; |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1742 | |
| 1743 | mb = page_address(page); |
| 1744 | stored_crc = le32_to_cpu(mb->checksum); |
| 1745 | mb->checksum = 0; |
| 1746 | |
| 1747 | if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || |
| 1748 | le64_to_cpu(mb->seq) != ctx->seq || |
| 1749 | mb->version != R5LOG_VERSION || |
| 1750 | le64_to_cpu(mb->position) != ctx->pos) |
| 1751 | return -EINVAL; |
| 1752 | |
Shaohua Li | 5cb2fbd | 2015-10-28 08:41:25 -0700 | [diff] [blame] | 1753 | crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1754 | if (stored_crc != crc) |
| 1755 | return -EINVAL; |
| 1756 | |
| 1757 | if (le32_to_cpu(mb->meta_size) > PAGE_SIZE) |
| 1758 | return -EINVAL; |
| 1759 | |
| 1760 | ctx->meta_total_blocks = BLOCK_SECTORS; |
| 1761 | |
| 1762 | return 0; |
| 1763 | } |
| 1764 | |
Song Liu | 9ed988f5 | 2016-11-17 15:24:42 -0800 | [diff] [blame] | 1765 | static void |
| 1766 | r5l_recovery_create_empty_meta_block(struct r5l_log *log, |
| 1767 | struct page *page, |
| 1768 | sector_t pos, u64 seq) |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1769 | { |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1770 | struct r5l_meta_block *mb; |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1771 | |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1772 | mb = page_address(page); |
Song Liu | 9ed988f5 | 2016-11-17 15:24:42 -0800 | [diff] [blame] | 1773 | clear_page(mb); |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1774 | mb->magic = cpu_to_le32(R5LOG_MAGIC); |
| 1775 | mb->version = R5LOG_VERSION; |
| 1776 | mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block)); |
| 1777 | mb->seq = cpu_to_le64(seq); |
| 1778 | mb->position = cpu_to_le64(pos); |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1779 | } |
| 1780 | |
| 1781 | static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, |
| 1782 | u64 seq) |
| 1783 | { |
| 1784 | struct page *page; |
| 1785 | struct r5l_meta_block *mb; |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1786 | |
Song Liu | 9ed988f5 | 2016-11-17 15:24:42 -0800 | [diff] [blame] | 1787 | page = alloc_page(GFP_KERNEL); |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1788 | if (!page) |
| 1789 | return -ENOMEM; |
Song Liu | 9ed988f5 | 2016-11-17 15:24:42 -0800 | [diff] [blame] | 1790 | r5l_recovery_create_empty_meta_block(log, page, pos, seq); |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1791 | mb = page_address(page); |
Song Liu | 5c88f40 | 2016-12-07 09:42:05 -0800 | [diff] [blame] | 1792 | mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, |
| 1793 | mb, PAGE_SIZE)); |
Mike Christie | 796a5cf | 2016-06-05 14:32:07 -0500 | [diff] [blame] | 1794 | if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, |
Jan Kara | 5a8948f | 2017-05-31 09:44:33 +0200 | [diff] [blame] | 1795 | REQ_SYNC | REQ_FUA, false)) { |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1796 | __free_page(page); |
| 1797 | return -EIO; |
| 1798 | } |
| 1799 | __free_page(page); |
| 1800 | return 0; |
| 1801 | } |
| 1802 | |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 1803 | /* |
| 1804 | * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite |
| 1805 | * to mark valid (potentially not flushed) data in the journal. |
| 1806 | * |
| 1807 | * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb, |
| 1808 | * so there should not be any mismatch here. |
| 1809 | */ |
| 1810 | static void r5l_recovery_load_data(struct r5l_log *log, |
| 1811 | struct stripe_head *sh, |
| 1812 | struct r5l_recovery_ctx *ctx, |
| 1813 | struct r5l_payload_data_parity *payload, |
| 1814 | sector_t log_offset) |
| 1815 | { |
| 1816 | struct mddev *mddev = log->rdev->mddev; |
| 1817 | struct r5conf *conf = mddev->private; |
| 1818 | int dd_idx; |
| 1819 | |
| 1820 | raid5_compute_sector(conf, |
| 1821 | le64_to_cpu(payload->location), 0, |
| 1822 | &dd_idx, sh); |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1823 | r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset); |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 1824 | sh->dev[dd_idx].log_checksum = |
| 1825 | le32_to_cpu(payload->checksum[0]); |
| 1826 | ctx->meta_total_blocks += BLOCK_SECTORS; |
| 1827 | |
| 1828 | set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags); |
| 1829 | set_bit(STRIPE_R5C_CACHING, &sh->state); |
| 1830 | } |
| 1831 | |
| 1832 | static void r5l_recovery_load_parity(struct r5l_log *log, |
| 1833 | struct stripe_head *sh, |
| 1834 | struct r5l_recovery_ctx *ctx, |
| 1835 | struct r5l_payload_data_parity *payload, |
| 1836 | sector_t log_offset) |
| 1837 | { |
| 1838 | struct mddev *mddev = log->rdev->mddev; |
| 1839 | struct r5conf *conf = mddev->private; |
| 1840 | |
| 1841 | ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded; |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1842 | r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset); |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 1843 | sh->dev[sh->pd_idx].log_checksum = |
| 1844 | le32_to_cpu(payload->checksum[0]); |
| 1845 | set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags); |
| 1846 | |
| 1847 | if (sh->qd_idx >= 0) { |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1848 | r5l_recovery_read_page( |
| 1849 | log, ctx, sh->dev[sh->qd_idx].page, |
| 1850 | r5l_ring_add(log, log_offset, BLOCK_SECTORS)); |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 1851 | sh->dev[sh->qd_idx].log_checksum = |
| 1852 | le32_to_cpu(payload->checksum[1]); |
| 1853 | set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags); |
| 1854 | } |
| 1855 | clear_bit(STRIPE_R5C_CACHING, &sh->state); |
| 1856 | } |
| 1857 | |
| 1858 | static void r5l_recovery_reset_stripe(struct stripe_head *sh) |
| 1859 | { |
| 1860 | int i; |
| 1861 | |
| 1862 | sh->state = 0; |
| 1863 | sh->log_start = MaxSector; |
| 1864 | for (i = sh->disks; i--; ) |
| 1865 | sh->dev[i].flags = 0; |
| 1866 | } |
| 1867 | |
| 1868 | static void |
| 1869 | r5l_recovery_replay_one_stripe(struct r5conf *conf, |
| 1870 | struct stripe_head *sh, |
| 1871 | struct r5l_recovery_ctx *ctx) |
| 1872 | { |
| 1873 | struct md_rdev *rdev, *rrdev; |
| 1874 | int disk_index; |
| 1875 | int data_count = 0; |
| 1876 | |
| 1877 | for (disk_index = 0; disk_index < sh->disks; disk_index++) { |
| 1878 | if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) |
| 1879 | continue; |
| 1880 | if (disk_index == sh->qd_idx || disk_index == sh->pd_idx) |
| 1881 | continue; |
| 1882 | data_count++; |
| 1883 | } |
| 1884 | |
| 1885 | /* |
| 1886 | * stripes that only have parity must have been flushed |
| 1887 | * before the crash that we are now recovering from, so |
| 1888 | * there is nothing more to recovery. |
| 1889 | */ |
| 1890 | if (data_count == 0) |
| 1891 | goto out; |
| 1892 | |
| 1893 | for (disk_index = 0; disk_index < sh->disks; disk_index++) { |
| 1894 | if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) |
| 1895 | continue; |
| 1896 | |
| 1897 | /* in case device is broken */ |
| 1898 | rcu_read_lock(); |
| 1899 | rdev = rcu_dereference(conf->disks[disk_index].rdev); |
| 1900 | if (rdev) { |
| 1901 | atomic_inc(&rdev->nr_pending); |
| 1902 | rcu_read_unlock(); |
| 1903 | sync_page_io(rdev, sh->sector, PAGE_SIZE, |
| 1904 | sh->dev[disk_index].page, REQ_OP_WRITE, 0, |
| 1905 | false); |
| 1906 | rdev_dec_pending(rdev, rdev->mddev); |
| 1907 | rcu_read_lock(); |
| 1908 | } |
| 1909 | rrdev = rcu_dereference(conf->disks[disk_index].replacement); |
| 1910 | if (rrdev) { |
| 1911 | atomic_inc(&rrdev->nr_pending); |
| 1912 | rcu_read_unlock(); |
| 1913 | sync_page_io(rrdev, sh->sector, PAGE_SIZE, |
| 1914 | sh->dev[disk_index].page, REQ_OP_WRITE, 0, |
| 1915 | false); |
| 1916 | rdev_dec_pending(rrdev, rrdev->mddev); |
| 1917 | rcu_read_lock(); |
| 1918 | } |
| 1919 | rcu_read_unlock(); |
| 1920 | } |
| 1921 | ctx->data_parity_stripes++; |
| 1922 | out: |
| 1923 | r5l_recovery_reset_stripe(sh); |
| 1924 | } |
| 1925 | |
| 1926 | static struct stripe_head * |
Alexei Naberezhnov | 483cbbe | 2018-03-27 16:54:16 -0700 | [diff] [blame] | 1927 | r5c_recovery_alloc_stripe( |
| 1928 | struct r5conf *conf, |
| 1929 | sector_t stripe_sect, |
| 1930 | int noblock) |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 1931 | { |
| 1932 | struct stripe_head *sh; |
| 1933 | |
Alexei Naberezhnov | 483cbbe | 2018-03-27 16:54:16 -0700 | [diff] [blame] | 1934 | sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0); |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 1935 | if (!sh) |
| 1936 | return NULL; /* no more stripe available */ |
| 1937 | |
| 1938 | r5l_recovery_reset_stripe(sh); |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 1939 | |
| 1940 | return sh; |
| 1941 | } |
| 1942 | |
| 1943 | static struct stripe_head * |
| 1944 | r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect) |
| 1945 | { |
| 1946 | struct stripe_head *sh; |
| 1947 | |
| 1948 | list_for_each_entry(sh, list, lru) |
| 1949 | if (sh->sector == sect) |
| 1950 | return sh; |
| 1951 | return NULL; |
| 1952 | } |
| 1953 | |
| 1954 | static void |
| 1955 | r5c_recovery_drop_stripes(struct list_head *cached_stripe_list, |
| 1956 | struct r5l_recovery_ctx *ctx) |
| 1957 | { |
| 1958 | struct stripe_head *sh, *next; |
| 1959 | |
| 1960 | list_for_each_entry_safe(sh, next, cached_stripe_list, lru) { |
| 1961 | r5l_recovery_reset_stripe(sh); |
| 1962 | list_del_init(&sh->lru); |
| 1963 | raid5_release_stripe(sh); |
| 1964 | } |
| 1965 | } |
| 1966 | |
| 1967 | static void |
| 1968 | r5c_recovery_replay_stripes(struct list_head *cached_stripe_list, |
| 1969 | struct r5l_recovery_ctx *ctx) |
| 1970 | { |
| 1971 | struct stripe_head *sh, *next; |
| 1972 | |
| 1973 | list_for_each_entry_safe(sh, next, cached_stripe_list, lru) |
| 1974 | if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { |
| 1975 | r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx); |
| 1976 | list_del_init(&sh->lru); |
| 1977 | raid5_release_stripe(sh); |
| 1978 | } |
| 1979 | } |
| 1980 | |
| 1981 | /* if matches return 0; otherwise return -EINVAL */ |
| 1982 | static int |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1983 | r5l_recovery_verify_data_checksum(struct r5l_log *log, |
| 1984 | struct r5l_recovery_ctx *ctx, |
| 1985 | struct page *page, |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 1986 | sector_t log_offset, __le32 log_checksum) |
| 1987 | { |
| 1988 | void *addr; |
| 1989 | u32 checksum; |
| 1990 | |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1991 | r5l_recovery_read_page(log, ctx, page, log_offset); |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 1992 | addr = kmap_atomic(page); |
| 1993 | checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE); |
| 1994 | kunmap_atomic(addr); |
| 1995 | return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL; |
| 1996 | } |
| 1997 | |
| 1998 | /* |
| 1999 | * before loading data to stripe cache, we need verify checksum for all data, |
| 2000 | * if there is mismatch for any data page, we drop all data in the mata block |
| 2001 | */ |
| 2002 | static int |
| 2003 | r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log, |
| 2004 | struct r5l_recovery_ctx *ctx) |
| 2005 | { |
| 2006 | struct mddev *mddev = log->rdev->mddev; |
| 2007 | struct r5conf *conf = mddev->private; |
| 2008 | struct r5l_meta_block *mb = page_address(ctx->meta_page); |
| 2009 | sector_t mb_offset = sizeof(struct r5l_meta_block); |
| 2010 | sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); |
| 2011 | struct page *page; |
| 2012 | struct r5l_payload_data_parity *payload; |
Song Liu | 2d4f468 | 2017-03-07 17:44:21 -0800 | [diff] [blame] | 2013 | struct r5l_payload_flush *payload_flush; |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2014 | |
| 2015 | page = alloc_page(GFP_KERNEL); |
| 2016 | if (!page) |
| 2017 | return -ENOMEM; |
| 2018 | |
| 2019 | while (mb_offset < le32_to_cpu(mb->meta_size)) { |
| 2020 | payload = (void *)mb + mb_offset; |
Song Liu | 2d4f468 | 2017-03-07 17:44:21 -0800 | [diff] [blame] | 2021 | payload_flush = (void *)mb + mb_offset; |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2022 | |
Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2023 | if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) { |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2024 | if (r5l_recovery_verify_data_checksum( |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2025 | log, ctx, page, log_offset, |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2026 | payload->checksum[0]) < 0) |
| 2027 | goto mismatch; |
Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2028 | } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) { |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2029 | if (r5l_recovery_verify_data_checksum( |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2030 | log, ctx, page, log_offset, |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2031 | payload->checksum[0]) < 0) |
| 2032 | goto mismatch; |
| 2033 | if (conf->max_degraded == 2 && /* q for RAID 6 */ |
| 2034 | r5l_recovery_verify_data_checksum( |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2035 | log, ctx, page, |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2036 | r5l_ring_add(log, log_offset, |
| 2037 | BLOCK_SECTORS), |
| 2038 | payload->checksum[1]) < 0) |
| 2039 | goto mismatch; |
Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2040 | } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { |
Song Liu | 2d4f468 | 2017-03-07 17:44:21 -0800 | [diff] [blame] | 2041 | /* nothing to do for R5LOG_PAYLOAD_FLUSH here */ |
| 2042 | } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */ |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2043 | goto mismatch; |
| 2044 | |
Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2045 | if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { |
Song Liu | 2d4f468 | 2017-03-07 17:44:21 -0800 | [diff] [blame] | 2046 | mb_offset += sizeof(struct r5l_payload_flush) + |
| 2047 | le32_to_cpu(payload_flush->size); |
| 2048 | } else { |
| 2049 | /* DATA or PARITY payload */ |
| 2050 | log_offset = r5l_ring_add(log, log_offset, |
| 2051 | le32_to_cpu(payload->size)); |
| 2052 | mb_offset += sizeof(struct r5l_payload_data_parity) + |
| 2053 | sizeof(__le32) * |
| 2054 | (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); |
| 2055 | } |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2056 | |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2057 | } |
| 2058 | |
| 2059 | put_page(page); |
| 2060 | return 0; |
| 2061 | |
| 2062 | mismatch: |
| 2063 | put_page(page); |
| 2064 | return -EINVAL; |
| 2065 | } |
| 2066 | |
| 2067 | /* |
| 2068 | * Analyze all data/parity pages in one meta block |
| 2069 | * Returns: |
| 2070 | * 0 for success |
| 2071 | * -EINVAL for unknown playload type |
| 2072 | * -EAGAIN for checksum mismatch of data page |
| 2073 | * -ENOMEM for run out of memory (alloc_page failed or run out of stripes) |
| 2074 | */ |
| 2075 | static int |
| 2076 | r5c_recovery_analyze_meta_block(struct r5l_log *log, |
| 2077 | struct r5l_recovery_ctx *ctx, |
| 2078 | struct list_head *cached_stripe_list) |
| 2079 | { |
| 2080 | struct mddev *mddev = log->rdev->mddev; |
| 2081 | struct r5conf *conf = mddev->private; |
| 2082 | struct r5l_meta_block *mb; |
| 2083 | struct r5l_payload_data_parity *payload; |
Song Liu | 2d4f468 | 2017-03-07 17:44:21 -0800 | [diff] [blame] | 2084 | struct r5l_payload_flush *payload_flush; |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2085 | int mb_offset; |
| 2086 | sector_t log_offset; |
| 2087 | sector_t stripe_sect; |
| 2088 | struct stripe_head *sh; |
| 2089 | int ret; |
| 2090 | |
| 2091 | /* |
| 2092 | * for mismatch in data blocks, we will drop all data in this mb, but |
| 2093 | * we will still read next mb for other data with FLUSH flag, as |
| 2094 | * io_unit could finish out of order. |
| 2095 | */ |
| 2096 | ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx); |
| 2097 | if (ret == -EINVAL) |
| 2098 | return -EAGAIN; |
| 2099 | else if (ret) |
| 2100 | return ret; /* -ENOMEM duo to alloc_page() failed */ |
| 2101 | |
| 2102 | mb = page_address(ctx->meta_page); |
| 2103 | mb_offset = sizeof(struct r5l_meta_block); |
| 2104 | log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); |
| 2105 | |
| 2106 | while (mb_offset < le32_to_cpu(mb->meta_size)) { |
| 2107 | int dd; |
| 2108 | |
| 2109 | payload = (void *)mb + mb_offset; |
Song Liu | 2d4f468 | 2017-03-07 17:44:21 -0800 | [diff] [blame] | 2110 | payload_flush = (void *)mb + mb_offset; |
| 2111 | |
Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2112 | if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { |
Song Liu | 2d4f468 | 2017-03-07 17:44:21 -0800 | [diff] [blame] | 2113 | int i, count; |
| 2114 | |
| 2115 | count = le32_to_cpu(payload_flush->size) / sizeof(__le64); |
| 2116 | for (i = 0; i < count; ++i) { |
| 2117 | stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]); |
| 2118 | sh = r5c_recovery_lookup_stripe(cached_stripe_list, |
| 2119 | stripe_sect); |
| 2120 | if (sh) { |
| 2121 | WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); |
| 2122 | r5l_recovery_reset_stripe(sh); |
| 2123 | list_del_init(&sh->lru); |
| 2124 | raid5_release_stripe(sh); |
| 2125 | } |
| 2126 | } |
| 2127 | |
| 2128 | mb_offset += sizeof(struct r5l_payload_flush) + |
| 2129 | le32_to_cpu(payload_flush->size); |
| 2130 | continue; |
| 2131 | } |
| 2132 | |
| 2133 | /* DATA or PARITY payload */ |
Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2134 | stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ? |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2135 | raid5_compute_sector( |
| 2136 | conf, le64_to_cpu(payload->location), 0, &dd, |
| 2137 | NULL) |
| 2138 | : le64_to_cpu(payload->location); |
| 2139 | |
| 2140 | sh = r5c_recovery_lookup_stripe(cached_stripe_list, |
| 2141 | stripe_sect); |
| 2142 | |
| 2143 | if (!sh) { |
Alexei Naberezhnov | 483cbbe | 2018-03-27 16:54:16 -0700 | [diff] [blame] | 2144 | sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1); |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2145 | /* |
| 2146 | * cannot get stripe from raid5_get_active_stripe |
| 2147 | * try replay some stripes |
| 2148 | */ |
| 2149 | if (!sh) { |
| 2150 | r5c_recovery_replay_stripes( |
| 2151 | cached_stripe_list, ctx); |
| 2152 | sh = r5c_recovery_alloc_stripe( |
Alexei Naberezhnov | 483cbbe | 2018-03-27 16:54:16 -0700 | [diff] [blame] | 2153 | conf, stripe_sect, 1); |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2154 | } |
| 2155 | if (!sh) { |
Alexei Naberezhnov | 483cbbe | 2018-03-27 16:54:16 -0700 | [diff] [blame] | 2156 | int new_size = conf->min_nr_stripes * 2; |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2157 | pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", |
| 2158 | mdname(mddev), |
Alexei Naberezhnov | 483cbbe | 2018-03-27 16:54:16 -0700 | [diff] [blame] | 2159 | new_size); |
| 2160 | ret = raid5_set_cache_size(mddev, new_size); |
| 2161 | if (conf->min_nr_stripes <= new_size / 2) { |
| 2162 | pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n", |
| 2163 | mdname(mddev), |
| 2164 | ret, |
| 2165 | new_size, |
| 2166 | conf->min_nr_stripes, |
| 2167 | conf->max_nr_stripes); |
| 2168 | return -ENOMEM; |
| 2169 | } |
| 2170 | sh = r5c_recovery_alloc_stripe( |
| 2171 | conf, stripe_sect, 0); |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2172 | } |
| 2173 | if (!sh) { |
| 2174 | pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", |
Alexei Naberezhnov | 483cbbe | 2018-03-27 16:54:16 -0700 | [diff] [blame] | 2175 | mdname(mddev)); |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2176 | return -ENOMEM; |
| 2177 | } |
| 2178 | list_add_tail(&sh->lru, cached_stripe_list); |
| 2179 | } |
| 2180 | |
Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2181 | if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) { |
Zhengyuan Liu | f7b7bee | 2016-11-26 10:57:13 +0800 | [diff] [blame] | 2182 | if (!test_bit(STRIPE_R5C_CACHING, &sh->state) && |
| 2183 | test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) { |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2184 | r5l_recovery_replay_one_stripe(conf, sh, ctx); |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2185 | list_move_tail(&sh->lru, cached_stripe_list); |
| 2186 | } |
| 2187 | r5l_recovery_load_data(log, sh, ctx, payload, |
| 2188 | log_offset); |
Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2189 | } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2190 | r5l_recovery_load_parity(log, sh, ctx, payload, |
| 2191 | log_offset); |
| 2192 | else |
| 2193 | return -EINVAL; |
| 2194 | |
| 2195 | log_offset = r5l_ring_add(log, log_offset, |
| 2196 | le32_to_cpu(payload->size)); |
| 2197 | |
| 2198 | mb_offset += sizeof(struct r5l_payload_data_parity) + |
| 2199 | sizeof(__le32) * |
| 2200 | (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); |
| 2201 | } |
| 2202 | |
| 2203 | return 0; |
| 2204 | } |
| 2205 | |
| 2206 | /* |
| 2207 | * Load the stripe into cache. The stripe will be written out later by |
| 2208 | * the stripe cache state machine. |
| 2209 | */ |
| 2210 | static void r5c_recovery_load_one_stripe(struct r5l_log *log, |
| 2211 | struct stripe_head *sh) |
| 2212 | { |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2213 | struct r5dev *dev; |
| 2214 | int i; |
| 2215 | |
| 2216 | for (i = sh->disks; i--; ) { |
| 2217 | dev = sh->dev + i; |
| 2218 | if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) { |
| 2219 | set_bit(R5_InJournal, &dev->flags); |
| 2220 | set_bit(R5_UPTODATE, &dev->flags); |
| 2221 | } |
| 2222 | } |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2223 | } |
| 2224 | |
| 2225 | /* |
| 2226 | * Scan through the log for all to-be-flushed data |
| 2227 | * |
| 2228 | * For stripes with data and parity, namely Data-Parity stripe |
| 2229 | * (STRIPE_R5C_CACHING == 0), we simply replay all the writes. |
| 2230 | * |
| 2231 | * For stripes with only data, namely Data-Only stripe |
| 2232 | * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine. |
| 2233 | * |
| 2234 | * For a stripe, if we see data after parity, we should discard all previous |
| 2235 | * data and parity for this stripe, as these data are already flushed to |
| 2236 | * the array. |
| 2237 | * |
| 2238 | * At the end of the scan, we return the new journal_tail, which points to |
| 2239 | * first data-only stripe on the journal device, or next invalid meta block. |
| 2240 | */ |
| 2241 | static int r5c_recovery_flush_log(struct r5l_log *log, |
| 2242 | struct r5l_recovery_ctx *ctx) |
| 2243 | { |
JackieLiu | bc8f167 | 2016-11-28 16:19:20 +0800 | [diff] [blame] | 2244 | struct stripe_head *sh; |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2245 | int ret = 0; |
| 2246 | |
| 2247 | /* scan through the log */ |
| 2248 | while (1) { |
| 2249 | if (r5l_recovery_read_meta_block(log, ctx)) |
| 2250 | break; |
| 2251 | |
| 2252 | ret = r5c_recovery_analyze_meta_block(log, ctx, |
| 2253 | &ctx->cached_list); |
| 2254 | /* |
| 2255 | * -EAGAIN means mismatch in data block, in this case, we still |
| 2256 | * try scan the next metablock |
| 2257 | */ |
| 2258 | if (ret && ret != -EAGAIN) |
| 2259 | break; /* ret == -EINVAL or -ENOMEM */ |
| 2260 | ctx->seq++; |
| 2261 | ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks); |
| 2262 | } |
| 2263 | |
| 2264 | if (ret == -ENOMEM) { |
| 2265 | r5c_recovery_drop_stripes(&ctx->cached_list, ctx); |
| 2266 | return ret; |
| 2267 | } |
| 2268 | |
| 2269 | /* replay data-parity stripes */ |
| 2270 | r5c_recovery_replay_stripes(&ctx->cached_list, ctx); |
| 2271 | |
| 2272 | /* load data-only stripes to stripe cache */ |
JackieLiu | bc8f167 | 2016-11-28 16:19:20 +0800 | [diff] [blame] | 2273 | list_for_each_entry(sh, &ctx->cached_list, lru) { |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2274 | WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); |
| 2275 | r5c_recovery_load_one_stripe(log, sh); |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2276 | ctx->data_only_stripes++; |
| 2277 | } |
| 2278 | |
| 2279 | return 0; |
| 2280 | } |
| 2281 | |
| 2282 | /* |
| 2283 | * we did a recovery. Now ctx.pos points to an invalid meta block. New |
| 2284 | * log will start here. but we can't let superblock point to last valid |
| 2285 | * meta block. The log might looks like: |
| 2286 | * | meta 1| meta 2| meta 3| |
| 2287 | * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If |
| 2288 | * superblock points to meta 1, we write a new valid meta 2n. if crash |
| 2289 | * happens again, new recovery will start from meta 1. Since meta 2n is |
| 2290 | * valid now, recovery will think meta 3 is valid, which is wrong. |
| 2291 | * The solution is we create a new meta in meta2 with its seq == meta |
Song Liu | 3c6edc6 | 2016-12-07 09:42:06 -0800 | [diff] [blame] | 2292 | * 1's seq + 10000 and let superblock points to meta2. The same recovery |
| 2293 | * will not think meta 3 is a valid meta, because its seq doesn't match |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2294 | */ |
| 2295 | |
| 2296 | /* |
| 2297 | * Before recovery, the log looks like the following |
| 2298 | * |
| 2299 | * --------------------------------------------- |
| 2300 | * | valid log | invalid log | |
| 2301 | * --------------------------------------------- |
| 2302 | * ^ |
| 2303 | * |- log->last_checkpoint |
| 2304 | * |- log->last_cp_seq |
| 2305 | * |
| 2306 | * Now we scan through the log until we see invalid entry |
| 2307 | * |
| 2308 | * --------------------------------------------- |
| 2309 | * | valid log | invalid log | |
| 2310 | * --------------------------------------------- |
| 2311 | * ^ ^ |
| 2312 | * |- log->last_checkpoint |- ctx->pos |
| 2313 | * |- log->last_cp_seq |- ctx->seq |
| 2314 | * |
| 2315 | * From this point, we need to increase seq number by 10 to avoid |
| 2316 | * confusing next recovery. |
| 2317 | * |
| 2318 | * --------------------------------------------- |
| 2319 | * | valid log | invalid log | |
| 2320 | * --------------------------------------------- |
| 2321 | * ^ ^ |
| 2322 | * |- log->last_checkpoint |- ctx->pos+1 |
Song Liu | 3c6edc6 | 2016-12-07 09:42:06 -0800 | [diff] [blame] | 2323 | * |- log->last_cp_seq |- ctx->seq+10001 |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2324 | * |
| 2325 | * However, it is not safe to start the state machine yet, because data only |
| 2326 | * parities are not yet secured in RAID. To save these data only parities, we |
| 2327 | * rewrite them from seq+11. |
| 2328 | * |
| 2329 | * ----------------------------------------------------------------- |
| 2330 | * | valid log | data only stripes | invalid log | |
| 2331 | * ----------------------------------------------------------------- |
| 2332 | * ^ ^ |
| 2333 | * |- log->last_checkpoint |- ctx->pos+n |
Song Liu | 3c6edc6 | 2016-12-07 09:42:06 -0800 | [diff] [blame] | 2334 | * |- log->last_cp_seq |- ctx->seq+10000+n |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2335 | * |
| 2336 | * If failure happens again during this process, the recovery can safe start |
| 2337 | * again from log->last_checkpoint. |
| 2338 | * |
| 2339 | * Once data only stripes are rewritten to journal, we move log_tail |
| 2340 | * |
| 2341 | * ----------------------------------------------------------------- |
| 2342 | * | old log | data only stripes | invalid log | |
| 2343 | * ----------------------------------------------------------------- |
| 2344 | * ^ ^ |
| 2345 | * |- log->last_checkpoint |- ctx->pos+n |
Song Liu | 3c6edc6 | 2016-12-07 09:42:06 -0800 | [diff] [blame] | 2346 | * |- log->last_cp_seq |- ctx->seq+10000+n |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2347 | * |
| 2348 | * Then we can safely start the state machine. If failure happens from this |
| 2349 | * point on, the recovery will start from new log->last_checkpoint. |
| 2350 | */ |
| 2351 | static int |
| 2352 | r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, |
| 2353 | struct r5l_recovery_ctx *ctx) |
| 2354 | { |
Song Liu | a85dd7b | 2017-01-23 17:12:57 -0800 | [diff] [blame] | 2355 | struct stripe_head *sh; |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2356 | struct mddev *mddev = log->rdev->mddev; |
| 2357 | struct page *page; |
Song Liu | 3c66abb | 2016-12-14 15:38:01 -0800 | [diff] [blame] | 2358 | sector_t next_checkpoint = MaxSector; |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2359 | |
| 2360 | page = alloc_page(GFP_KERNEL); |
| 2361 | if (!page) { |
| 2362 | pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n", |
| 2363 | mdname(mddev)); |
| 2364 | return -ENOMEM; |
| 2365 | } |
| 2366 | |
Song Liu | 3c66abb | 2016-12-14 15:38:01 -0800 | [diff] [blame] | 2367 | WARN_ON(list_empty(&ctx->cached_list)); |
| 2368 | |
Song Liu | a85dd7b | 2017-01-23 17:12:57 -0800 | [diff] [blame] | 2369 | list_for_each_entry(sh, &ctx->cached_list, lru) { |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2370 | struct r5l_meta_block *mb; |
| 2371 | int i; |
| 2372 | int offset; |
| 2373 | sector_t write_pos; |
| 2374 | |
| 2375 | WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); |
| 2376 | r5l_recovery_create_empty_meta_block(log, page, |
| 2377 | ctx->pos, ctx->seq); |
| 2378 | mb = page_address(page); |
| 2379 | offset = le32_to_cpu(mb->meta_size); |
JackieLiu | fc833c2 | 2016-11-28 16:19:19 +0800 | [diff] [blame] | 2380 | write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2381 | |
| 2382 | for (i = sh->disks; i--; ) { |
| 2383 | struct r5dev *dev = &sh->dev[i]; |
| 2384 | struct r5l_payload_data_parity *payload; |
| 2385 | void *addr; |
| 2386 | |
| 2387 | if (test_bit(R5_InJournal, &dev->flags)) { |
| 2388 | payload = (void *)mb + offset; |
| 2389 | payload->header.type = cpu_to_le16( |
| 2390 | R5LOG_PAYLOAD_DATA); |
Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2391 | payload->size = cpu_to_le32(BLOCK_SECTORS); |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2392 | payload->location = cpu_to_le64( |
| 2393 | raid5_compute_blocknr(sh, i, 0)); |
| 2394 | addr = kmap_atomic(dev->page); |
| 2395 | payload->checksum[0] = cpu_to_le32( |
| 2396 | crc32c_le(log->uuid_checksum, addr, |
| 2397 | PAGE_SIZE)); |
| 2398 | kunmap_atomic(addr); |
| 2399 | sync_page_io(log->rdev, write_pos, PAGE_SIZE, |
| 2400 | dev->page, REQ_OP_WRITE, 0, false); |
| 2401 | write_pos = r5l_ring_add(log, write_pos, |
| 2402 | BLOCK_SECTORS); |
| 2403 | offset += sizeof(__le32) + |
| 2404 | sizeof(struct r5l_payload_data_parity); |
| 2405 | |
| 2406 | } |
| 2407 | } |
| 2408 | mb->meta_size = cpu_to_le32(offset); |
Song Liu | 5c88f40 | 2016-12-07 09:42:05 -0800 | [diff] [blame] | 2409 | mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, |
| 2410 | mb, PAGE_SIZE)); |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2411 | sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, |
Jan Kara | 5a8948f | 2017-05-31 09:44:33 +0200 | [diff] [blame] | 2412 | REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false); |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2413 | sh->log_start = ctx->pos; |
Song Liu | 3c66abb | 2016-12-14 15:38:01 -0800 | [diff] [blame] | 2414 | list_add_tail(&sh->r5c, &log->stripe_in_journal_list); |
| 2415 | atomic_inc(&log->stripe_in_journal_count); |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2416 | ctx->pos = write_pos; |
| 2417 | ctx->seq += 1; |
Song Liu | 3c66abb | 2016-12-14 15:38:01 -0800 | [diff] [blame] | 2418 | next_checkpoint = sh->log_start; |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2419 | } |
Song Liu | 3c66abb | 2016-12-14 15:38:01 -0800 | [diff] [blame] | 2420 | log->next_checkpoint = next_checkpoint; |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2421 | __free_page(page); |
| 2422 | return 0; |
| 2423 | } |
| 2424 | |
Song Liu | a85dd7b | 2017-01-23 17:12:57 -0800 | [diff] [blame] | 2425 | static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log, |
| 2426 | struct r5l_recovery_ctx *ctx) |
| 2427 | { |
| 2428 | struct mddev *mddev = log->rdev->mddev; |
| 2429 | struct r5conf *conf = mddev->private; |
| 2430 | struct stripe_head *sh, *next; |
Song Liu | c9020e6 | 2020-07-06 14:57:32 -0700 | [diff] [blame] | 2431 | bool cleared_pending = false; |
Song Liu | a85dd7b | 2017-01-23 17:12:57 -0800 | [diff] [blame] | 2432 | |
| 2433 | if (ctx->data_only_stripes == 0) |
| 2434 | return; |
| 2435 | |
Song Liu | c9020e6 | 2020-07-06 14:57:32 -0700 | [diff] [blame] | 2436 | if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { |
| 2437 | cleared_pending = true; |
| 2438 | clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); |
| 2439 | } |
Song Liu | a85dd7b | 2017-01-23 17:12:57 -0800 | [diff] [blame] | 2440 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK; |
| 2441 | |
| 2442 | list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { |
| 2443 | r5c_make_stripe_write_out(sh); |
| 2444 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2445 | list_del_init(&sh->lru); |
| 2446 | raid5_release_stripe(sh); |
| 2447 | } |
| 2448 | |
Song Liu | a85dd7b | 2017-01-23 17:12:57 -0800 | [diff] [blame] | 2449 | /* reuse conf->wait_for_quiescent in recovery */ |
| 2450 | wait_event(conf->wait_for_quiescent, |
| 2451 | atomic_read(&conf->active_stripes) == 0); |
| 2452 | |
| 2453 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; |
Song Liu | c9020e6 | 2020-07-06 14:57:32 -0700 | [diff] [blame] | 2454 | if (cleared_pending) |
| 2455 | set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); |
Song Liu | a85dd7b | 2017-01-23 17:12:57 -0800 | [diff] [blame] | 2456 | } |
| 2457 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 2458 | static int r5l_recovery_log(struct r5l_log *log) |
| 2459 | { |
Song Liu | 5aabf7c | 2016-11-17 15:24:44 -0800 | [diff] [blame] | 2460 | struct mddev *mddev = log->rdev->mddev; |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2461 | struct r5l_recovery_ctx *ctx; |
Song Liu | 5aabf7c | 2016-11-17 15:24:44 -0800 | [diff] [blame] | 2462 | int ret; |
JackieLiu | 43b9674 | 2016-12-05 11:58:53 +0800 | [diff] [blame] | 2463 | sector_t pos; |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 2464 | |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2465 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
| 2466 | if (!ctx) |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 2467 | return -ENOMEM; |
| 2468 | |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2469 | ctx->pos = log->last_checkpoint; |
| 2470 | ctx->seq = log->last_cp_seq; |
| 2471 | INIT_LIST_HEAD(&ctx->cached_list); |
| 2472 | ctx->meta_page = alloc_page(GFP_KERNEL); |
| 2473 | |
| 2474 | if (!ctx->meta_page) { |
| 2475 | ret = -ENOMEM; |
| 2476 | goto meta_page; |
| 2477 | } |
| 2478 | |
| 2479 | if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) { |
| 2480 | ret = -ENOMEM; |
| 2481 | goto ra_pool; |
| 2482 | } |
| 2483 | |
| 2484 | ret = r5c_recovery_flush_log(log, ctx); |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 2485 | |
Song Liu | 5aabf7c | 2016-11-17 15:24:44 -0800 | [diff] [blame] | 2486 | if (ret) |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2487 | goto error; |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 2488 | |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2489 | pos = ctx->pos; |
| 2490 | ctx->seq += 10000; |
JackieLiu | 43b9674 | 2016-12-05 11:58:53 +0800 | [diff] [blame] | 2491 | |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2492 | if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0)) |
Song Liu | 92e6245 | 2017-12-19 11:43:07 -0800 | [diff] [blame] | 2493 | pr_info("md/raid:%s: starting from clean shutdown\n", |
Song Liu | 5aabf7c | 2016-11-17 15:24:44 -0800 | [diff] [blame] | 2494 | mdname(mddev)); |
Song Liu | a85dd7b | 2017-01-23 17:12:57 -0800 | [diff] [blame] | 2495 | else |
Song Liu | 92e6245 | 2017-12-19 11:43:07 -0800 | [diff] [blame] | 2496 | pr_info("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n", |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2497 | mdname(mddev), ctx->data_only_stripes, |
| 2498 | ctx->data_parity_stripes); |
Song Liu | 5aabf7c | 2016-11-17 15:24:44 -0800 | [diff] [blame] | 2499 | |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2500 | if (ctx->data_only_stripes == 0) { |
| 2501 | log->next_checkpoint = ctx->pos; |
| 2502 | r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++); |
| 2503 | ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); |
| 2504 | } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) { |
Song Liu | a85dd7b | 2017-01-23 17:12:57 -0800 | [diff] [blame] | 2505 | pr_err("md/raid:%s: failed to rewrite stripes to journal\n", |
| 2506 | mdname(mddev)); |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2507 | ret = -EIO; |
| 2508 | goto error; |
Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 2509 | } |
Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2510 | |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2511 | log->log_start = ctx->pos; |
| 2512 | log->seq = ctx->seq; |
JackieLiu | 43b9674 | 2016-12-05 11:58:53 +0800 | [diff] [blame] | 2513 | log->last_checkpoint = pos; |
| 2514 | r5l_write_super(log, pos); |
Song Liu | a85dd7b | 2017-01-23 17:12:57 -0800 | [diff] [blame] | 2515 | |
Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2516 | r5c_recovery_flush_data_only_stripes(log, ctx); |
| 2517 | ret = 0; |
| 2518 | error: |
| 2519 | r5l_recovery_free_ra_pool(log, ctx); |
| 2520 | ra_pool: |
| 2521 | __free_page(ctx->meta_page); |
| 2522 | meta_page: |
| 2523 | kfree(ctx); |
| 2524 | return ret; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 2525 | } |
| 2526 | |
| 2527 | static void r5l_write_super(struct r5l_log *log, sector_t cp) |
| 2528 | { |
| 2529 | struct mddev *mddev = log->rdev->mddev; |
| 2530 | |
| 2531 | log->rdev->journal_tail = cp; |
Shaohua Li | 2953079 | 2016-12-08 15:48:19 -0800 | [diff] [blame] | 2532 | set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 2533 | } |
| 2534 | |
Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2535 | static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) |
| 2536 | { |
Song Liu | a72cbf8 | 2017-08-08 22:56:52 -0700 | [diff] [blame] | 2537 | struct r5conf *conf; |
Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2538 | int ret; |
| 2539 | |
Guoqing Jiang | 01b5d32 | 2020-07-28 12:01:42 +0200 | [diff] [blame] | 2540 | spin_lock(&mddev->lock); |
Song Liu | a72cbf8 | 2017-08-08 22:56:52 -0700 | [diff] [blame] | 2541 | conf = mddev->private; |
| 2542 | if (!conf || !conf->log) { |
Guoqing Jiang | 01b5d32 | 2020-07-28 12:01:42 +0200 | [diff] [blame] | 2543 | spin_unlock(&mddev->lock); |
Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2544 | return 0; |
Song Liu | a72cbf8 | 2017-08-08 22:56:52 -0700 | [diff] [blame] | 2545 | } |
Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2546 | |
| 2547 | switch (conf->log->r5c_journal_mode) { |
| 2548 | case R5C_JOURNAL_MODE_WRITE_THROUGH: |
| 2549 | ret = snprintf( |
| 2550 | page, PAGE_SIZE, "[%s] %s\n", |
| 2551 | r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH], |
| 2552 | r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]); |
| 2553 | break; |
| 2554 | case R5C_JOURNAL_MODE_WRITE_BACK: |
| 2555 | ret = snprintf( |
| 2556 | page, PAGE_SIZE, "%s [%s]\n", |
| 2557 | r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH], |
| 2558 | r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]); |
| 2559 | break; |
| 2560 | default: |
| 2561 | ret = 0; |
| 2562 | } |
Guoqing Jiang | 01b5d32 | 2020-07-28 12:01:42 +0200 | [diff] [blame] | 2563 | spin_unlock(&mddev->lock); |
Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2564 | return ret; |
| 2565 | } |
| 2566 | |
Heinz Mauelshagen | 78e470c | 2017-03-22 17:44:37 +0100 | [diff] [blame] | 2567 | /* |
| 2568 | * Set journal cache mode on @mddev (external API initially needed by dm-raid). |
| 2569 | * |
| 2570 | * @mode as defined in 'enum r5c_journal_mode'. |
| 2571 | * |
| 2572 | */ |
| 2573 | int r5c_journal_mode_set(struct mddev *mddev, int mode) |
Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2574 | { |
Song Liu | b44886c | 2017-07-31 14:52:26 -0700 | [diff] [blame] | 2575 | struct r5conf *conf; |
Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2576 | |
Heinz Mauelshagen | 78e470c | 2017-03-22 17:44:37 +0100 | [diff] [blame] | 2577 | if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH || |
| 2578 | mode > R5C_JOURNAL_MODE_WRITE_BACK) |
Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2579 | return -EINVAL; |
| 2580 | |
Song Liu | b44886c | 2017-07-31 14:52:26 -0700 | [diff] [blame] | 2581 | conf = mddev->private; |
Song Liu | ff35f58 | 2017-11-19 22:17:00 -0800 | [diff] [blame] | 2582 | if (!conf || !conf->log) |
Song Liu | b44886c | 2017-07-31 14:52:26 -0700 | [diff] [blame] | 2583 | return -ENODEV; |
Song Liu | b44886c | 2017-07-31 14:52:26 -0700 | [diff] [blame] | 2584 | |
Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 2585 | if (raid5_calc_degraded(conf) > 0 && |
Song Liu | ff35f58 | 2017-11-19 22:17:00 -0800 | [diff] [blame] | 2586 | mode == R5C_JOURNAL_MODE_WRITE_BACK) |
Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 2587 | return -EINVAL; |
| 2588 | |
Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2589 | mddev_suspend(mddev); |
Heinz Mauelshagen | 78e470c | 2017-03-22 17:44:37 +0100 | [diff] [blame] | 2590 | conf->log->r5c_journal_mode = mode; |
Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2591 | mddev_resume(mddev); |
| 2592 | |
| 2593 | pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n", |
Heinz Mauelshagen | 78e470c | 2017-03-22 17:44:37 +0100 | [diff] [blame] | 2594 | mdname(mddev), mode, r5c_journal_mode_str[mode]); |
| 2595 | return 0; |
| 2596 | } |
| 2597 | EXPORT_SYMBOL(r5c_journal_mode_set); |
| 2598 | |
| 2599 | static ssize_t r5c_journal_mode_store(struct mddev *mddev, |
| 2600 | const char *page, size_t length) |
| 2601 | { |
| 2602 | int mode = ARRAY_SIZE(r5c_journal_mode_str); |
| 2603 | size_t len = length; |
Song Liu | ff35f58 | 2017-11-19 22:17:00 -0800 | [diff] [blame] | 2604 | int ret; |
Heinz Mauelshagen | 78e470c | 2017-03-22 17:44:37 +0100 | [diff] [blame] | 2605 | |
| 2606 | if (len < 2) |
| 2607 | return -EINVAL; |
| 2608 | |
| 2609 | if (page[len - 1] == '\n') |
| 2610 | len--; |
| 2611 | |
| 2612 | while (mode--) |
| 2613 | if (strlen(r5c_journal_mode_str[mode]) == len && |
| 2614 | !strncmp(page, r5c_journal_mode_str[mode], len)) |
| 2615 | break; |
Song Liu | ff35f58 | 2017-11-19 22:17:00 -0800 | [diff] [blame] | 2616 | ret = mddev_lock(mddev); |
| 2617 | if (ret) |
| 2618 | return ret; |
| 2619 | ret = r5c_journal_mode_set(mddev, mode); |
| 2620 | mddev_unlock(mddev); |
| 2621 | return ret ?: length; |
Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2622 | } |
| 2623 | |
| 2624 | struct md_sysfs_entry |
| 2625 | r5c_journal_mode = __ATTR(journal_mode, 0644, |
| 2626 | r5c_journal_mode_show, r5c_journal_mode_store); |
| 2627 | |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 2628 | /* |
| 2629 | * Try handle write operation in caching phase. This function should only |
| 2630 | * be called in write-back mode. |
| 2631 | * |
| 2632 | * If all outstanding writes can be handled in caching phase, returns 0 |
| 2633 | * If writes requires write-out phase, call r5c_make_stripe_write_out() |
| 2634 | * and returns -EAGAIN |
| 2635 | */ |
| 2636 | int r5c_try_caching_write(struct r5conf *conf, |
| 2637 | struct stripe_head *sh, |
| 2638 | struct stripe_head_state *s, |
| 2639 | int disks) |
| 2640 | { |
| 2641 | struct r5l_log *log = conf->log; |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2642 | int i; |
| 2643 | struct r5dev *dev; |
| 2644 | int to_cache = 0; |
Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2645 | void **pslot; |
| 2646 | sector_t tree_index; |
| 2647 | int ret; |
| 2648 | uintptr_t refcount; |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 2649 | |
| 2650 | BUG_ON(!r5c_is_writeback(log)); |
| 2651 | |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2652 | if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { |
| 2653 | /* |
| 2654 | * There are two different scenarios here: |
| 2655 | * 1. The stripe has some data cached, and it is sent to |
| 2656 | * write-out phase for reclaim |
| 2657 | * 2. The stripe is clean, and this is the first write |
| 2658 | * |
| 2659 | * For 1, return -EAGAIN, so we continue with |
| 2660 | * handle_stripe_dirtying(). |
| 2661 | * |
| 2662 | * For 2, set STRIPE_R5C_CACHING and continue with caching |
| 2663 | * write. |
| 2664 | */ |
| 2665 | |
| 2666 | /* case 1: anything injournal or anything in written */ |
| 2667 | if (s->injournal > 0 || s->written > 0) |
| 2668 | return -EAGAIN; |
| 2669 | /* case 2 */ |
| 2670 | set_bit(STRIPE_R5C_CACHING, &sh->state); |
| 2671 | } |
| 2672 | |
Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 2673 | /* |
| 2674 | * When run in degraded mode, array is set to write-through mode. |
| 2675 | * This check helps drain pending write safely in the transition to |
| 2676 | * write-through mode. |
Song Liu | 5ddf044 | 2017-05-11 17:03:44 -0700 | [diff] [blame] | 2677 | * |
| 2678 | * When a stripe is syncing, the write is also handled in write |
| 2679 | * through mode. |
Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 2680 | */ |
Song Liu | 5ddf044 | 2017-05-11 17:03:44 -0700 | [diff] [blame] | 2681 | if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) { |
Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 2682 | r5c_make_stripe_write_out(sh); |
| 2683 | return -EAGAIN; |
| 2684 | } |
| 2685 | |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2686 | for (i = disks; i--; ) { |
| 2687 | dev = &sh->dev[i]; |
| 2688 | /* if non-overwrite, use writing-out phase */ |
| 2689 | if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) && |
| 2690 | !test_bit(R5_InJournal, &dev->flags)) { |
| 2691 | r5c_make_stripe_write_out(sh); |
| 2692 | return -EAGAIN; |
| 2693 | } |
| 2694 | } |
| 2695 | |
Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2696 | /* if the stripe is not counted in big_stripe_tree, add it now */ |
| 2697 | if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && |
| 2698 | !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { |
| 2699 | tree_index = r5c_tree_index(conf, sh->sector); |
| 2700 | spin_lock(&log->tree_lock); |
| 2701 | pslot = radix_tree_lookup_slot(&log->big_stripe_tree, |
| 2702 | tree_index); |
| 2703 | if (pslot) { |
| 2704 | refcount = (uintptr_t)radix_tree_deref_slot_protected( |
| 2705 | pslot, &log->tree_lock) >> |
| 2706 | R5C_RADIX_COUNT_SHIFT; |
| 2707 | radix_tree_replace_slot( |
| 2708 | &log->big_stripe_tree, pslot, |
| 2709 | (void *)((refcount + 1) << R5C_RADIX_COUNT_SHIFT)); |
| 2710 | } else { |
| 2711 | /* |
| 2712 | * this radix_tree_insert can fail safely, so no |
| 2713 | * need to call radix_tree_preload() |
| 2714 | */ |
| 2715 | ret = radix_tree_insert( |
| 2716 | &log->big_stripe_tree, tree_index, |
| 2717 | (void *)(1 << R5C_RADIX_COUNT_SHIFT)); |
| 2718 | if (ret) { |
| 2719 | spin_unlock(&log->tree_lock); |
| 2720 | r5c_make_stripe_write_out(sh); |
| 2721 | return -EAGAIN; |
| 2722 | } |
| 2723 | } |
| 2724 | spin_unlock(&log->tree_lock); |
| 2725 | |
| 2726 | /* |
| 2727 | * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is |
| 2728 | * counted in the radix tree |
| 2729 | */ |
| 2730 | set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state); |
| 2731 | atomic_inc(&conf->r5c_cached_partial_stripes); |
| 2732 | } |
| 2733 | |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2734 | for (i = disks; i--; ) { |
| 2735 | dev = &sh->dev[i]; |
| 2736 | if (dev->towrite) { |
| 2737 | set_bit(R5_Wantwrite, &dev->flags); |
| 2738 | set_bit(R5_Wantdrain, &dev->flags); |
| 2739 | set_bit(R5_LOCKED, &dev->flags); |
| 2740 | to_cache++; |
| 2741 | } |
| 2742 | } |
| 2743 | |
| 2744 | if (to_cache) { |
| 2745 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); |
| 2746 | /* |
| 2747 | * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data() |
| 2748 | * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in |
| 2749 | * r5c_handle_data_cached() |
| 2750 | */ |
| 2751 | set_bit(STRIPE_LOG_TRAPPED, &sh->state); |
| 2752 | } |
| 2753 | |
| 2754 | return 0; |
| 2755 | } |
| 2756 | |
| 2757 | /* |
| 2758 | * free extra pages (orig_page) we allocated for prexor |
| 2759 | */ |
| 2760 | void r5c_release_extra_page(struct stripe_head *sh) |
| 2761 | { |
Song Liu | d7bd398 | 2016-11-23 22:50:39 -0800 | [diff] [blame] | 2762 | struct r5conf *conf = sh->raid_conf; |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2763 | int i; |
Song Liu | d7bd398 | 2016-11-23 22:50:39 -0800 | [diff] [blame] | 2764 | bool using_disk_info_extra_page; |
| 2765 | |
| 2766 | using_disk_info_extra_page = |
| 2767 | sh->dev[0].orig_page == conf->disks[0].extra_page; |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2768 | |
| 2769 | for (i = sh->disks; i--; ) |
| 2770 | if (sh->dev[i].page != sh->dev[i].orig_page) { |
| 2771 | struct page *p = sh->dev[i].orig_page; |
| 2772 | |
| 2773 | sh->dev[i].orig_page = sh->dev[i].page; |
Song Liu | 86aa139 | 2017-01-12 17:22:41 -0800 | [diff] [blame] | 2774 | clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); |
| 2775 | |
Song Liu | d7bd398 | 2016-11-23 22:50:39 -0800 | [diff] [blame] | 2776 | if (!using_disk_info_extra_page) |
| 2777 | put_page(p); |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2778 | } |
Song Liu | d7bd398 | 2016-11-23 22:50:39 -0800 | [diff] [blame] | 2779 | |
| 2780 | if (using_disk_info_extra_page) { |
| 2781 | clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state); |
| 2782 | md_wakeup_thread(conf->mddev->thread); |
| 2783 | } |
| 2784 | } |
| 2785 | |
| 2786 | void r5c_use_extra_page(struct stripe_head *sh) |
| 2787 | { |
| 2788 | struct r5conf *conf = sh->raid_conf; |
| 2789 | int i; |
| 2790 | struct r5dev *dev; |
| 2791 | |
| 2792 | for (i = sh->disks; i--; ) { |
| 2793 | dev = &sh->dev[i]; |
| 2794 | if (dev->orig_page != dev->page) |
| 2795 | put_page(dev->orig_page); |
| 2796 | dev->orig_page = conf->disks[i].extra_page; |
| 2797 | } |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 2798 | } |
| 2799 | |
| 2800 | /* |
| 2801 | * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the |
| 2802 | * stripe is committed to RAID disks. |
| 2803 | */ |
| 2804 | void r5c_finish_stripe_write_out(struct r5conf *conf, |
| 2805 | struct stripe_head *sh, |
| 2806 | struct stripe_head_state *s) |
| 2807 | { |
Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2808 | struct r5l_log *log = conf->log; |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2809 | int i; |
| 2810 | int do_wakeup = 0; |
Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2811 | sector_t tree_index; |
| 2812 | void **pslot; |
| 2813 | uintptr_t refcount; |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2814 | |
Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2815 | if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)) |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 2816 | return; |
| 2817 | |
| 2818 | WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); |
| 2819 | clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); |
| 2820 | |
Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2821 | if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 2822 | return; |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2823 | |
| 2824 | for (i = sh->disks; i--; ) { |
| 2825 | clear_bit(R5_InJournal, &sh->dev[i].flags); |
| 2826 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) |
| 2827 | do_wakeup = 1; |
| 2828 | } |
| 2829 | |
| 2830 | /* |
| 2831 | * analyse_stripe() runs before r5c_finish_stripe_write_out(), |
| 2832 | * We updated R5_InJournal, so we also update s->injournal. |
| 2833 | */ |
| 2834 | s->injournal = 0; |
| 2835 | |
| 2836 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) |
| 2837 | if (atomic_dec_and_test(&conf->pending_full_writes)) |
| 2838 | md_wakeup_thread(conf->mddev->thread); |
| 2839 | |
| 2840 | if (do_wakeup) |
| 2841 | wake_up(&conf->wait_for_overlap); |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 2842 | |
Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2843 | spin_lock_irq(&log->stripe_in_journal_lock); |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 2844 | list_del_init(&sh->r5c); |
Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2845 | spin_unlock_irq(&log->stripe_in_journal_lock); |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 2846 | sh->log_start = MaxSector; |
Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2847 | |
| 2848 | atomic_dec(&log->stripe_in_journal_count); |
| 2849 | r5c_update_log_state(log); |
| 2850 | |
| 2851 | /* stop counting this stripe in big_stripe_tree */ |
| 2852 | if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) || |
| 2853 | test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { |
| 2854 | tree_index = r5c_tree_index(conf, sh->sector); |
| 2855 | spin_lock(&log->tree_lock); |
| 2856 | pslot = radix_tree_lookup_slot(&log->big_stripe_tree, |
| 2857 | tree_index); |
| 2858 | BUG_ON(pslot == NULL); |
| 2859 | refcount = (uintptr_t)radix_tree_deref_slot_protected( |
| 2860 | pslot, &log->tree_lock) >> |
| 2861 | R5C_RADIX_COUNT_SHIFT; |
| 2862 | if (refcount == 1) |
| 2863 | radix_tree_delete(&log->big_stripe_tree, tree_index); |
| 2864 | else |
| 2865 | radix_tree_replace_slot( |
| 2866 | &log->big_stripe_tree, pslot, |
| 2867 | (void *)((refcount - 1) << R5C_RADIX_COUNT_SHIFT)); |
| 2868 | spin_unlock(&log->tree_lock); |
| 2869 | } |
| 2870 | |
| 2871 | if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) { |
| 2872 | BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0); |
Shaohua Li | e33fbb9 | 2017-02-10 16:18:09 -0800 | [diff] [blame] | 2873 | atomic_dec(&conf->r5c_flushing_partial_stripes); |
Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2874 | atomic_dec(&conf->r5c_cached_partial_stripes); |
| 2875 | } |
| 2876 | |
| 2877 | if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { |
| 2878 | BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0); |
Shaohua Li | e33fbb9 | 2017-02-10 16:18:09 -0800 | [diff] [blame] | 2879 | atomic_dec(&conf->r5c_flushing_full_stripes); |
Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2880 | atomic_dec(&conf->r5c_cached_full_stripes); |
| 2881 | } |
Song Liu | ea17481 | 2017-03-09 21:23:39 -0800 | [diff] [blame] | 2882 | |
| 2883 | r5l_append_flush_payload(log, sh->sector); |
Song Liu | 5ddf044 | 2017-05-11 17:03:44 -0700 | [diff] [blame] | 2884 | /* stripe is flused to raid disks, we can do resync now */ |
| 2885 | if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) |
| 2886 | set_bit(STRIPE_HANDLE, &sh->state); |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2887 | } |
| 2888 | |
Artur Paszkiewicz | ff87573 | 2017-03-09 09:59:58 +0100 | [diff] [blame] | 2889 | int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh) |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2890 | { |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 2891 | struct r5conf *conf = sh->raid_conf; |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2892 | int pages = 0; |
| 2893 | int reserve; |
| 2894 | int i; |
| 2895 | int ret = 0; |
| 2896 | |
| 2897 | BUG_ON(!log); |
| 2898 | |
| 2899 | for (i = 0; i < sh->disks; i++) { |
| 2900 | void *addr; |
| 2901 | |
| 2902 | if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) |
| 2903 | continue; |
| 2904 | addr = kmap_atomic(sh->dev[i].page); |
| 2905 | sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, |
| 2906 | addr, PAGE_SIZE); |
| 2907 | kunmap_atomic(addr); |
| 2908 | pages++; |
| 2909 | } |
| 2910 | WARN_ON(pages == 0); |
| 2911 | |
| 2912 | /* |
| 2913 | * The stripe must enter state machine again to call endio, so |
| 2914 | * don't delay. |
| 2915 | */ |
| 2916 | clear_bit(STRIPE_DELAYED, &sh->state); |
| 2917 | atomic_inc(&sh->count); |
| 2918 | |
| 2919 | mutex_lock(&log->io_mutex); |
| 2920 | /* meta + data */ |
| 2921 | reserve = (1 + pages) << (PAGE_SHIFT - 9); |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2922 | |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 2923 | if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && |
| 2924 | sh->log_start == MaxSector) |
| 2925 | r5l_add_no_space_stripe(log, sh); |
| 2926 | else if (!r5l_has_free_space(log, reserve)) { |
| 2927 | if (sh->log_start == log->last_checkpoint) |
| 2928 | BUG(); |
| 2929 | else |
| 2930 | r5l_add_no_space_stripe(log, sh); |
Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2931 | } else { |
| 2932 | ret = r5l_log_stripe(log, sh, pages, 0); |
| 2933 | if (ret) { |
| 2934 | spin_lock_irq(&log->io_list_lock); |
| 2935 | list_add_tail(&sh->log_list, &log->no_mem_stripes); |
| 2936 | spin_unlock_irq(&log->io_list_lock); |
| 2937 | } |
| 2938 | } |
| 2939 | |
| 2940 | mutex_unlock(&log->io_mutex); |
| 2941 | return 0; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 2942 | } |
| 2943 | |
Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2944 | /* check whether this big stripe is in write back cache. */ |
| 2945 | bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect) |
| 2946 | { |
| 2947 | struct r5l_log *log = conf->log; |
| 2948 | sector_t tree_index; |
| 2949 | void *slot; |
| 2950 | |
| 2951 | if (!log) |
| 2952 | return false; |
| 2953 | |
| 2954 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 2955 | tree_index = r5c_tree_index(conf, sect); |
| 2956 | slot = radix_tree_lookup(&log->big_stripe_tree, tree_index); |
| 2957 | return slot != NULL; |
| 2958 | } |
| 2959 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 2960 | static int r5l_load_log(struct r5l_log *log) |
| 2961 | { |
| 2962 | struct md_rdev *rdev = log->rdev; |
| 2963 | struct page *page; |
| 2964 | struct r5l_meta_block *mb; |
| 2965 | sector_t cp = log->rdev->journal_tail; |
| 2966 | u32 stored_crc, expected_crc; |
| 2967 | bool create_super = false; |
JackieLiu | d30dfeb | 2016-12-08 08:47:39 +0800 | [diff] [blame] | 2968 | int ret = 0; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 2969 | |
| 2970 | /* Make sure it's valid */ |
| 2971 | if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp) |
| 2972 | cp = 0; |
| 2973 | page = alloc_page(GFP_KERNEL); |
| 2974 | if (!page) |
| 2975 | return -ENOMEM; |
| 2976 | |
Mike Christie | 796a5cf | 2016-06-05 14:32:07 -0500 | [diff] [blame] | 2977 | if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) { |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 2978 | ret = -EIO; |
| 2979 | goto ioerr; |
| 2980 | } |
| 2981 | mb = page_address(page); |
| 2982 | |
| 2983 | if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || |
| 2984 | mb->version != R5LOG_VERSION) { |
| 2985 | create_super = true; |
| 2986 | goto create; |
| 2987 | } |
| 2988 | stored_crc = le32_to_cpu(mb->checksum); |
| 2989 | mb->checksum = 0; |
Shaohua Li | 5cb2fbd | 2015-10-28 08:41:25 -0700 | [diff] [blame] | 2990 | expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 2991 | if (stored_crc != expected_crc) { |
| 2992 | create_super = true; |
| 2993 | goto create; |
| 2994 | } |
| 2995 | if (le64_to_cpu(mb->position) != cp) { |
| 2996 | create_super = true; |
| 2997 | goto create; |
| 2998 | } |
| 2999 | create: |
| 3000 | if (create_super) { |
| 3001 | log->last_cp_seq = prandom_u32(); |
| 3002 | cp = 0; |
Zhengyuan Liu | 56056c2 | 2016-10-24 16:15:59 +0800 | [diff] [blame] | 3003 | r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3004 | /* |
| 3005 | * Make sure super points to correct address. Log might have |
| 3006 | * data very soon. If super hasn't correct log tail address, |
| 3007 | * recovery can't find the log |
| 3008 | */ |
| 3009 | r5l_write_super(log, cp); |
| 3010 | } else |
| 3011 | log->last_cp_seq = le64_to_cpu(mb->seq); |
| 3012 | |
| 3013 | log->device_size = round_down(rdev->sectors, BLOCK_SECTORS); |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 3014 | log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT; |
| 3015 | if (log->max_free_space > RECLAIM_MAX_FREE_SPACE) |
| 3016 | log->max_free_space = RECLAIM_MAX_FREE_SPACE; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3017 | log->last_checkpoint = cp; |
| 3018 | |
| 3019 | __free_page(page); |
| 3020 | |
JackieLiu | d30dfeb | 2016-12-08 08:47:39 +0800 | [diff] [blame] | 3021 | if (create_super) { |
| 3022 | log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS); |
| 3023 | log->seq = log->last_cp_seq + 1; |
| 3024 | log->next_checkpoint = cp; |
| 3025 | } else |
| 3026 | ret = r5l_recovery_log(log); |
| 3027 | |
Zhengyuan Liu | 3d7e7e1 | 2016-12-04 16:49:44 +0800 | [diff] [blame] | 3028 | r5c_update_log_state(log); |
| 3029 | return ret; |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3030 | ioerr: |
| 3031 | __free_page(page); |
| 3032 | return ret; |
| 3033 | } |
| 3034 | |
Song Liu | d5d885f | 2017-11-19 22:17:01 -0800 | [diff] [blame] | 3035 | int r5l_start(struct r5l_log *log) |
| 3036 | { |
| 3037 | int ret; |
| 3038 | |
| 3039 | if (!log) |
| 3040 | return 0; |
| 3041 | |
| 3042 | ret = r5l_load_log(log); |
| 3043 | if (ret) { |
| 3044 | struct mddev *mddev = log->rdev->mddev; |
| 3045 | struct r5conf *conf = mddev->private; |
| 3046 | |
| 3047 | r5l_exit_log(conf); |
| 3048 | } |
| 3049 | return ret; |
| 3050 | } |
| 3051 | |
Song Liu | 70d466f | 2017-05-11 15:28:28 -0700 | [diff] [blame] | 3052 | void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev) |
Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 3053 | { |
| 3054 | struct r5conf *conf = mddev->private; |
| 3055 | struct r5l_log *log = conf->log; |
| 3056 | |
| 3057 | if (!log) |
| 3058 | return; |
| 3059 | |
Song Liu | 70d466f | 2017-05-11 15:28:28 -0700 | [diff] [blame] | 3060 | if ((raid5_calc_degraded(conf) > 0 || |
| 3061 | test_bit(Journal, &rdev->flags)) && |
Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 3062 | conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) |
| 3063 | schedule_work(&log->disable_writeback_work); |
| 3064 | } |
| 3065 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3066 | int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) |
| 3067 | { |
Jens Axboe | c888a8f | 2016-04-13 13:33:19 -0600 | [diff] [blame] | 3068 | struct request_queue *q = bdev_get_queue(rdev->bdev); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3069 | struct r5l_log *log; |
Artur Paszkiewicz | ff87573 | 2017-03-09 09:59:58 +0100 | [diff] [blame] | 3070 | char b[BDEVNAME_SIZE]; |
Kent Overstreet | afeee51 | 2018-05-20 18:25:52 -0400 | [diff] [blame] | 3071 | int ret; |
Artur Paszkiewicz | ff87573 | 2017-03-09 09:59:58 +0100 | [diff] [blame] | 3072 | |
| 3073 | pr_debug("md/raid:%s: using device %s as journal\n", |
| 3074 | mdname(conf->mddev), bdevname(rdev->bdev, b)); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3075 | |
| 3076 | if (PAGE_SIZE != 4096) |
| 3077 | return -EINVAL; |
Song Liu | c757ec9 | 2016-11-17 15:24:36 -0800 | [diff] [blame] | 3078 | |
| 3079 | /* |
| 3080 | * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and |
| 3081 | * raid_disks r5l_payload_data_parity. |
| 3082 | * |
| 3083 | * Write journal and cache does not work for very big array |
| 3084 | * (raid_disks > 203) |
| 3085 | */ |
| 3086 | if (sizeof(struct r5l_meta_block) + |
| 3087 | ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) * |
| 3088 | conf->raid_disks) > PAGE_SIZE) { |
| 3089 | pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n", |
| 3090 | mdname(conf->mddev), conf->raid_disks); |
| 3091 | return -EINVAL; |
| 3092 | } |
| 3093 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3094 | log = kzalloc(sizeof(*log), GFP_KERNEL); |
| 3095 | if (!log) |
| 3096 | return -ENOMEM; |
| 3097 | log->rdev = rdev; |
| 3098 | |
Jens Axboe | c888a8f | 2016-04-13 13:33:19 -0600 | [diff] [blame] | 3099 | log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0; |
Christoph Hellwig | 56fef7c | 2015-10-05 09:31:09 +0200 | [diff] [blame] | 3100 | |
Shaohua Li | 5cb2fbd | 2015-10-28 08:41:25 -0700 | [diff] [blame] | 3101 | log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid, |
| 3102 | sizeof(rdev->mddev->uuid)); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3103 | |
| 3104 | mutex_init(&log->io_mutex); |
| 3105 | |
| 3106 | spin_lock_init(&log->io_list_lock); |
| 3107 | INIT_LIST_HEAD(&log->running_ios); |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 3108 | INIT_LIST_HEAD(&log->io_end_ios); |
Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 3109 | INIT_LIST_HEAD(&log->flushing_ios); |
Christoph Hellwig | 04732f7 | 2015-10-05 09:31:07 +0200 | [diff] [blame] | 3110 | INIT_LIST_HEAD(&log->finished_ios); |
Ming Lei | 3a83f46 | 2016-11-22 08:57:21 -0700 | [diff] [blame] | 3111 | bio_init(&log->flush_bio, NULL, 0); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3112 | |
| 3113 | log->io_kc = KMEM_CACHE(r5l_io_unit, 0); |
| 3114 | if (!log->io_kc) |
| 3115 | goto io_kc; |
| 3116 | |
Kent Overstreet | afeee51 | 2018-05-20 18:25:52 -0400 | [diff] [blame] | 3117 | ret = mempool_init_slab_pool(&log->io_pool, R5L_POOL_SIZE, log->io_kc); |
| 3118 | if (ret) |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3119 | goto io_pool; |
| 3120 | |
Kent Overstreet | afeee51 | 2018-05-20 18:25:52 -0400 | [diff] [blame] | 3121 | ret = bioset_init(&log->bs, R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS); |
| 3122 | if (ret) |
Christoph Hellwig | c38d29b | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3123 | goto io_bs; |
| 3124 | |
Kent Overstreet | afeee51 | 2018-05-20 18:25:52 -0400 | [diff] [blame] | 3125 | ret = mempool_init_page_pool(&log->meta_pool, R5L_POOL_SIZE, 0); |
| 3126 | if (ret) |
Christoph Hellwig | e8deb63 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3127 | goto out_mempool; |
| 3128 | |
Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 3129 | spin_lock_init(&log->tree_lock); |
| 3130 | INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN); |
| 3131 | |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 3132 | log->reclaim_thread = md_register_thread(r5l_reclaim_thread, |
| 3133 | log->rdev->mddev, "reclaim"); |
| 3134 | if (!log->reclaim_thread) |
| 3135 | goto reclaim_thread; |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 3136 | log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL; |
| 3137 | |
Shaohua Li | 0fd22b4 | 2015-09-02 13:49:47 -0700 | [diff] [blame] | 3138 | init_waitqueue_head(&log->iounit_wait); |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 3139 | |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3140 | INIT_LIST_HEAD(&log->no_mem_stripes); |
| 3141 | |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3142 | INIT_LIST_HEAD(&log->no_space_stripes); |
| 3143 | spin_lock_init(&log->no_space_stripes_lock); |
| 3144 | |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 3145 | INIT_WORK(&log->deferred_io_work, r5l_submit_io_async); |
Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 3146 | INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async); |
Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 3147 | |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 3148 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; |
Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 3149 | INIT_LIST_HEAD(&log->stripe_in_journal_list); |
| 3150 | spin_lock_init(&log->stripe_in_journal_lock); |
| 3151 | atomic_set(&log->stripe_in_journal_count, 0); |
Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 3152 | |
Song Liu | d2250f1 | 2016-12-14 15:38:02 -0800 | [diff] [blame] | 3153 | rcu_assign_pointer(conf->log, log); |
| 3154 | |
Shaohua Li | a62ab49 | 2016-01-06 14:37:13 -0800 | [diff] [blame] | 3155 | set_bit(MD_HAS_JOURNAL, &conf->mddev->flags); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3156 | return 0; |
Christoph Hellwig | e8deb63 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3157 | |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 3158 | reclaim_thread: |
Kent Overstreet | afeee51 | 2018-05-20 18:25:52 -0400 | [diff] [blame] | 3159 | mempool_exit(&log->meta_pool); |
Christoph Hellwig | e8deb63 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3160 | out_mempool: |
Kent Overstreet | afeee51 | 2018-05-20 18:25:52 -0400 | [diff] [blame] | 3161 | bioset_exit(&log->bs); |
Christoph Hellwig | c38d29b | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3162 | io_bs: |
Kent Overstreet | afeee51 | 2018-05-20 18:25:52 -0400 | [diff] [blame] | 3163 | mempool_exit(&log->io_pool); |
Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3164 | io_pool: |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3165 | kmem_cache_destroy(log->io_kc); |
| 3166 | io_kc: |
| 3167 | kfree(log); |
| 3168 | return -EINVAL; |
| 3169 | } |
| 3170 | |
Artur Paszkiewicz | ff87573 | 2017-03-09 09:59:58 +0100 | [diff] [blame] | 3171 | void r5l_exit_log(struct r5conf *conf) |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3172 | { |
Artur Paszkiewicz | ff87573 | 2017-03-09 09:59:58 +0100 | [diff] [blame] | 3173 | struct r5l_log *log = conf->log; |
| 3174 | |
| 3175 | conf->log = NULL; |
| 3176 | synchronize_rcu(); |
| 3177 | |
NeilBrown | 4d5324f | 2017-10-19 12:17:16 +1100 | [diff] [blame] | 3178 | /* Ensure disable_writeback_work wakes up and exits */ |
| 3179 | wake_up(&conf->mddev->sb_wait); |
Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 3180 | flush_work(&log->disable_writeback_work); |
Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 3181 | md_unregister_thread(&log->reclaim_thread); |
Kent Overstreet | afeee51 | 2018-05-20 18:25:52 -0400 | [diff] [blame] | 3182 | mempool_exit(&log->meta_pool); |
| 3183 | bioset_exit(&log->bs); |
| 3184 | mempool_exit(&log->io_pool); |
Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3185 | kmem_cache_destroy(log->io_kc); |
| 3186 | kfree(log); |
| 3187 | } |