Thomas Gleixner | 328970d | 2019-05-24 12:04:05 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 2 | /* -*- mode: c; c-basic-offset: 8; -*- |
| 3 | * vim: noexpandtab sw=8 ts=8 sts=0: |
| 4 | * |
| 5 | * io.c |
| 6 | * |
| 7 | * Buffer cache handling |
| 8 | * |
| 9 | * Copyright (C) 2002, 2004 Oracle. All rights reserved. |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <linux/fs.h> |
| 13 | #include <linux/types.h> |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 14 | #include <linux/highmem.h> |
Christoph Hellwig | 2f8b544 | 2016-11-01 07:40:13 -0600 | [diff] [blame] | 15 | #include <linux/bio.h> |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 16 | |
| 17 | #include <cluster/masklog.h> |
| 18 | |
| 19 | #include "ocfs2.h" |
| 20 | |
| 21 | #include "alloc.h" |
| 22 | #include "inode.h" |
| 23 | #include "journal.h" |
| 24 | #include "uptodate.h" |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 25 | #include "buffer_head_io.h" |
Tao Ma | 15057e9 | 2011-02-24 16:09:38 +0800 | [diff] [blame] | 26 | #include "ocfs2_trace.h" |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 27 | |
Joel Becker | 970e493 | 2008-11-13 14:49:19 -0800 | [diff] [blame] | 28 | /* |
| 29 | * Bits on bh->b_state used by ocfs2. |
| 30 | * |
Mark Fasheh | b86c86fa | 2008-11-18 17:16:47 -0800 | [diff] [blame] | 31 | * These MUST be after the JBD2 bits. Hence, we use BH_JBDPrivateStart. |
Joel Becker | 970e493 | 2008-11-13 14:49:19 -0800 | [diff] [blame] | 32 | */ |
| 33 | enum ocfs2_state_bits { |
Mark Fasheh | b86c86fa | 2008-11-18 17:16:47 -0800 | [diff] [blame] | 34 | BH_NeedsValidate = BH_JBDPrivateStart, |
Joel Becker | 970e493 | 2008-11-13 14:49:19 -0800 | [diff] [blame] | 35 | }; |
| 36 | |
| 37 | /* Expand the magic b_state functions */ |
| 38 | BUFFER_FNS(NeedsValidate, needs_validate); |
| 39 | |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 40 | int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 41 | struct ocfs2_caching_info *ci) |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 42 | { |
| 43 | int ret = 0; |
| 44 | |
Tao Ma | 15057e9 | 2011-02-24 16:09:38 +0800 | [diff] [blame] | 45 | trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci); |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 46 | |
| 47 | BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO); |
| 48 | BUG_ON(buffer_jbd(bh)); |
| 49 | |
| 50 | /* No need to check for a soft readonly file system here. non |
| 51 | * journalled writes are only ever done on system files which |
| 52 | * can get modified during recovery even if read-only. */ |
| 53 | if (ocfs2_is_hard_readonly(osb)) { |
| 54 | ret = -EROFS; |
Tao Ma | c1e8d35 | 2011-03-07 16:43:21 +0800 | [diff] [blame] | 55 | mlog_errno(ret); |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 56 | goto out; |
| 57 | } |
| 58 | |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 59 | ocfs2_metadata_cache_io_lock(ci); |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 60 | |
| 61 | lock_buffer(bh); |
| 62 | set_buffer_uptodate(bh); |
| 63 | |
| 64 | /* remove from dirty list before I/O. */ |
| 65 | clear_buffer_dirty(bh); |
| 66 | |
Joel Becker | da1e909 | 2008-10-09 17:20:29 -0700 | [diff] [blame] | 67 | get_bh(bh); /* for end_buffer_write_sync() */ |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 68 | bh->b_end_io = end_buffer_write_sync; |
Mike Christie | 2a222ca | 2016-06-05 14:31:43 -0500 | [diff] [blame] | 69 | submit_bh(REQ_OP_WRITE, 0, bh); |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 70 | |
| 71 | wait_on_buffer(bh); |
| 72 | |
| 73 | if (buffer_uptodate(bh)) { |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 74 | ocfs2_set_buffer_uptodate(ci, bh); |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 75 | } else { |
| 76 | /* We don't need to remove the clustered uptodate |
| 77 | * information for this bh as it's not marked locally |
| 78 | * uptodate. */ |
| 79 | ret = -EIO; |
Tao Ma | c1e8d35 | 2011-03-07 16:43:21 +0800 | [diff] [blame] | 80 | mlog_errno(ret); |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 81 | } |
| 82 | |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 83 | ocfs2_metadata_cache_io_unlock(ci); |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 84 | out: |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 85 | return ret; |
| 86 | } |
| 87 | |
Changwei Ge | cf76c78 | 2018-11-02 15:48:19 -0700 | [diff] [blame] | 88 | /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it |
| 89 | * will be easier to handle read failure. |
| 90 | */ |
Joel Becker | da1e909 | 2008-10-09 17:20:29 -0700 | [diff] [blame] | 91 | int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, |
| 92 | unsigned int nr, struct buffer_head *bhs[]) |
| 93 | { |
| 94 | int status = 0; |
| 95 | unsigned int i; |
| 96 | struct buffer_head *bh; |
Changwei Ge | cf76c78 | 2018-11-02 15:48:19 -0700 | [diff] [blame] | 97 | int new_bh = 0; |
Joel Becker | da1e909 | 2008-10-09 17:20:29 -0700 | [diff] [blame] | 98 | |
Tao Ma | 15057e9 | 2011-02-24 16:09:38 +0800 | [diff] [blame] | 99 | trace_ocfs2_read_blocks_sync((unsigned long long)block, nr); |
| 100 | |
| 101 | if (!nr) |
Joel Becker | da1e909 | 2008-10-09 17:20:29 -0700 | [diff] [blame] | 102 | goto bail; |
Joel Becker | da1e909 | 2008-10-09 17:20:29 -0700 | [diff] [blame] | 103 | |
Changwei Ge | cf76c78 | 2018-11-02 15:48:19 -0700 | [diff] [blame] | 104 | /* Don't put buffer head and re-assign it to NULL if it is allocated |
| 105 | * outside since the caller can't be aware of this alternation! |
| 106 | */ |
| 107 | new_bh = (bhs[0] == NULL); |
| 108 | |
Joel Becker | da1e909 | 2008-10-09 17:20:29 -0700 | [diff] [blame] | 109 | for (i = 0 ; i < nr ; i++) { |
| 110 | if (bhs[i] == NULL) { |
| 111 | bhs[i] = sb_getblk(osb->sb, block++); |
| 112 | if (bhs[i] == NULL) { |
Rui Xiang | 7391a29 | 2013-11-12 15:06:54 -0800 | [diff] [blame] | 113 | status = -ENOMEM; |
Joel Becker | da1e909 | 2008-10-09 17:20:29 -0700 | [diff] [blame] | 114 | mlog_errno(status); |
Changwei Ge | cf76c78 | 2018-11-02 15:48:19 -0700 | [diff] [blame] | 115 | break; |
Joel Becker | da1e909 | 2008-10-09 17:20:29 -0700 | [diff] [blame] | 116 | } |
| 117 | } |
| 118 | bh = bhs[i]; |
| 119 | |
| 120 | if (buffer_jbd(bh)) { |
Tao Ma | 15057e9 | 2011-02-24 16:09:38 +0800 | [diff] [blame] | 121 | trace_ocfs2_read_blocks_sync_jbd( |
| 122 | (unsigned long long)bh->b_blocknr); |
Joel Becker | da1e909 | 2008-10-09 17:20:29 -0700 | [diff] [blame] | 123 | continue; |
| 124 | } |
| 125 | |
| 126 | if (buffer_dirty(bh)) { |
| 127 | /* This should probably be a BUG, or |
| 128 | * at least return an error. */ |
| 129 | mlog(ML_ERROR, |
| 130 | "trying to sync read a dirty " |
| 131 | "buffer! (blocknr = %llu), skipping\n", |
| 132 | (unsigned long long)bh->b_blocknr); |
| 133 | continue; |
| 134 | } |
| 135 | |
| 136 | lock_buffer(bh); |
| 137 | if (buffer_jbd(bh)) { |
Gang He | 7186ee0 | 2016-06-24 14:50:13 -0700 | [diff] [blame] | 138 | #ifdef CATCH_BH_JBD_RACES |
Joel Becker | da1e909 | 2008-10-09 17:20:29 -0700 | [diff] [blame] | 139 | mlog(ML_ERROR, |
| 140 | "block %llu had the JBD bit set " |
| 141 | "while I was in lock_buffer!", |
| 142 | (unsigned long long)bh->b_blocknr); |
| 143 | BUG(); |
Gang He | 7186ee0 | 2016-06-24 14:50:13 -0700 | [diff] [blame] | 144 | #else |
| 145 | unlock_buffer(bh); |
| 146 | continue; |
| 147 | #endif |
Joel Becker | da1e909 | 2008-10-09 17:20:29 -0700 | [diff] [blame] | 148 | } |
| 149 | |
Joel Becker | da1e909 | 2008-10-09 17:20:29 -0700 | [diff] [blame] | 150 | get_bh(bh); /* for end_buffer_read_sync() */ |
| 151 | bh->b_end_io = end_buffer_read_sync; |
Mike Christie | 2a222ca | 2016-06-05 14:31:43 -0500 | [diff] [blame] | 152 | submit_bh(REQ_OP_READ, 0, bh); |
Joel Becker | da1e909 | 2008-10-09 17:20:29 -0700 | [diff] [blame] | 153 | } |
| 154 | |
Changwei Ge | cf76c78 | 2018-11-02 15:48:19 -0700 | [diff] [blame] | 155 | read_failure: |
Joel Becker | da1e909 | 2008-10-09 17:20:29 -0700 | [diff] [blame] | 156 | for (i = nr; i > 0; i--) { |
| 157 | bh = bhs[i - 1]; |
| 158 | |
Changwei Ge | cf76c78 | 2018-11-02 15:48:19 -0700 | [diff] [blame] | 159 | if (unlikely(status)) { |
| 160 | if (new_bh && bh) { |
| 161 | /* If middle bh fails, let previous bh |
| 162 | * finish its read and then put it to |
| 163 | * aovoid bh leak |
| 164 | */ |
| 165 | if (!buffer_jbd(bh)) |
| 166 | wait_on_buffer(bh); |
| 167 | put_bh(bh); |
| 168 | bhs[i - 1] = NULL; |
| 169 | } else if (bh && buffer_uptodate(bh)) { |
| 170 | clear_buffer_uptodate(bh); |
| 171 | } |
| 172 | continue; |
| 173 | } |
| 174 | |
Mark Fasheh | d6b58f8 | 2008-11-21 14:06:55 -0800 | [diff] [blame] | 175 | /* No need to wait on the buffer if it's managed by JBD. */ |
| 176 | if (!buffer_jbd(bh)) |
| 177 | wait_on_buffer(bh); |
Joel Becker | da1e909 | 2008-10-09 17:20:29 -0700 | [diff] [blame] | 178 | |
Joel Becker | da1e909 | 2008-10-09 17:20:29 -0700 | [diff] [blame] | 179 | if (!buffer_uptodate(bh)) { |
| 180 | /* Status won't be cleared from here on out, |
| 181 | * so we can safely record this and loop back |
| 182 | * to cleanup the other buffers. */ |
| 183 | status = -EIO; |
Changwei Ge | cf76c78 | 2018-11-02 15:48:19 -0700 | [diff] [blame] | 184 | goto read_failure; |
Joel Becker | da1e909 | 2008-10-09 17:20:29 -0700 | [diff] [blame] | 185 | } |
| 186 | } |
| 187 | |
| 188 | bail: |
| 189 | return status; |
| 190 | } |
| 191 | |
Changwei Ge | cf76c78 | 2018-11-02 15:48:19 -0700 | [diff] [blame] | 192 | /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it |
| 193 | * will be easier to handle read failure. |
| 194 | */ |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 195 | int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, |
Joel Becker | 970e493 | 2008-11-13 14:49:19 -0800 | [diff] [blame] | 196 | struct buffer_head *bhs[], int flags, |
| 197 | int (*validate)(struct super_block *sb, |
| 198 | struct buffer_head *bh)) |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 199 | { |
| 200 | int status = 0; |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 201 | int i, ignore_cache = 0; |
| 202 | struct buffer_head *bh; |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 203 | struct super_block *sb = ocfs2_metadata_cache_get_super(ci); |
Changwei Ge | cf76c78 | 2018-11-02 15:48:19 -0700 | [diff] [blame] | 204 | int new_bh = 0; |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 205 | |
Tao Ma | 15057e9 | 2011-02-24 16:09:38 +0800 | [diff] [blame] | 206 | trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags); |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 207 | |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 208 | BUG_ON(!ci); |
Joel Becker | d4a8c93 | 2008-10-09 17:20:34 -0700 | [diff] [blame] | 209 | BUG_ON((flags & OCFS2_BH_READAHEAD) && |
| 210 | (flags & OCFS2_BH_IGNORE_CACHE)); |
Mark Fasheh | aa95887 | 2006-04-21 13:49:02 -0700 | [diff] [blame] | 211 | |
Joel Becker | 31d3307 | 2008-10-09 17:20:30 -0700 | [diff] [blame] | 212 | if (bhs == NULL) { |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 213 | status = -EINVAL; |
| 214 | mlog_errno(status); |
| 215 | goto bail; |
| 216 | } |
| 217 | |
| 218 | if (nr < 0) { |
| 219 | mlog(ML_ERROR, "asked to read %d blocks!\n", nr); |
| 220 | status = -EINVAL; |
| 221 | mlog_errno(status); |
| 222 | goto bail; |
| 223 | } |
| 224 | |
| 225 | if (nr == 0) { |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 226 | status = 0; |
| 227 | goto bail; |
| 228 | } |
| 229 | |
Changwei Ge | cf76c78 | 2018-11-02 15:48:19 -0700 | [diff] [blame] | 230 | /* Don't put buffer head and re-assign it to NULL if it is allocated |
| 231 | * outside since the caller can't be aware of this alternation! |
| 232 | */ |
| 233 | new_bh = (bhs[0] == NULL); |
| 234 | |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 235 | ocfs2_metadata_cache_io_lock(ci); |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 236 | for (i = 0 ; i < nr ; i++) { |
| 237 | if (bhs[i] == NULL) { |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 238 | bhs[i] = sb_getblk(sb, block++); |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 239 | if (bhs[i] == NULL) { |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 240 | ocfs2_metadata_cache_io_unlock(ci); |
Rui Xiang | 7391a29 | 2013-11-12 15:06:54 -0800 | [diff] [blame] | 241 | status = -ENOMEM; |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 242 | mlog_errno(status); |
Changwei Ge | cf76c78 | 2018-11-02 15:48:19 -0700 | [diff] [blame] | 243 | /* Don't forget to put previous bh! */ |
| 244 | break; |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 245 | } |
| 246 | } |
| 247 | bh = bhs[i]; |
Joel Becker | d4a8c93 | 2008-10-09 17:20:34 -0700 | [diff] [blame] | 248 | ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE); |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 249 | |
Mark Fasheh | aa95887 | 2006-04-21 13:49:02 -0700 | [diff] [blame] | 250 | /* There are three read-ahead cases here which we need to |
| 251 | * be concerned with. All three assume a buffer has |
| 252 | * previously been submitted with OCFS2_BH_READAHEAD |
| 253 | * and it hasn't yet completed I/O. |
| 254 | * |
| 255 | * 1) The current request is sync to disk. This rarely |
| 256 | * happens these days, and never when performance |
| 257 | * matters - the code can just wait on the buffer |
| 258 | * lock and re-submit. |
| 259 | * |
| 260 | * 2) The current request is cached, but not |
| 261 | * readahead. ocfs2_buffer_uptodate() will return |
| 262 | * false anyway, so we'll wind up waiting on the |
| 263 | * buffer lock to do I/O. We re-check the request |
| 264 | * with after getting the lock to avoid a re-submit. |
| 265 | * |
| 266 | * 3) The current request is readahead (and so must |
| 267 | * also be a caching one). We short circuit if the |
| 268 | * buffer is locked (under I/O) and if it's in the |
| 269 | * uptodate cache. The re-check from #2 catches the |
| 270 | * case that the previous read-ahead completes just |
| 271 | * before our is-it-in-flight check. |
| 272 | */ |
| 273 | |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 274 | if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) { |
Tao Ma | d701485 | 2011-02-24 16:22:20 +0800 | [diff] [blame] | 275 | trace_ocfs2_read_blocks_from_disk( |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 276 | (unsigned long long)bh->b_blocknr, |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 277 | (unsigned long long)ocfs2_metadata_cache_owner(ci)); |
Joel Becker | d4a8c93 | 2008-10-09 17:20:34 -0700 | [diff] [blame] | 278 | /* We're using ignore_cache here to say |
| 279 | * "go to disk" */ |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 280 | ignore_cache = 1; |
| 281 | } |
| 282 | |
Tao Ma | 15057e9 | 2011-02-24 16:09:38 +0800 | [diff] [blame] | 283 | trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr, |
| 284 | ignore_cache, buffer_jbd(bh), buffer_dirty(bh)); |
| 285 | |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 286 | if (buffer_jbd(bh)) { |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 287 | continue; |
| 288 | } |
| 289 | |
Joel Becker | d4a8c93 | 2008-10-09 17:20:34 -0700 | [diff] [blame] | 290 | if (ignore_cache) { |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 291 | if (buffer_dirty(bh)) { |
| 292 | /* This should probably be a BUG, or |
| 293 | * at least return an error. */ |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 294 | continue; |
| 295 | } |
| 296 | |
Mark Fasheh | aa95887 | 2006-04-21 13:49:02 -0700 | [diff] [blame] | 297 | /* A read-ahead request was made - if the |
| 298 | * buffer is already under read-ahead from a |
| 299 | * previously submitted request than we are |
| 300 | * done here. */ |
| 301 | if ((flags & OCFS2_BH_READAHEAD) |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 302 | && ocfs2_buffer_read_ahead(ci, bh)) |
Mark Fasheh | aa95887 | 2006-04-21 13:49:02 -0700 | [diff] [blame] | 303 | continue; |
| 304 | |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 305 | lock_buffer(bh); |
| 306 | if (buffer_jbd(bh)) { |
| 307 | #ifdef CATCH_BH_JBD_RACES |
| 308 | mlog(ML_ERROR, "block %llu had the JBD bit set " |
| 309 | "while I was in lock_buffer!", |
| 310 | (unsigned long long)bh->b_blocknr); |
| 311 | BUG(); |
| 312 | #else |
| 313 | unlock_buffer(bh); |
| 314 | continue; |
| 315 | #endif |
| 316 | } |
Mark Fasheh | aa95887 | 2006-04-21 13:49:02 -0700 | [diff] [blame] | 317 | |
| 318 | /* Re-check ocfs2_buffer_uptodate() as a |
| 319 | * previously read-ahead buffer may have |
| 320 | * completed I/O while we were waiting for the |
| 321 | * buffer lock. */ |
Joel Becker | d4a8c93 | 2008-10-09 17:20:34 -0700 | [diff] [blame] | 322 | if (!(flags & OCFS2_BH_IGNORE_CACHE) |
Mark Fasheh | aa95887 | 2006-04-21 13:49:02 -0700 | [diff] [blame] | 323 | && !(flags & OCFS2_BH_READAHEAD) |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 324 | && ocfs2_buffer_uptodate(ci, bh)) { |
Mark Fasheh | aa95887 | 2006-04-21 13:49:02 -0700 | [diff] [blame] | 325 | unlock_buffer(bh); |
| 326 | continue; |
| 327 | } |
| 328 | |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 329 | get_bh(bh); /* for end_buffer_read_sync() */ |
Joel Becker | 970e493 | 2008-11-13 14:49:19 -0800 | [diff] [blame] | 330 | if (validate) |
| 331 | set_buffer_needs_validate(bh); |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 332 | bh->b_end_io = end_buffer_read_sync; |
Mike Christie | 2a222ca | 2016-06-05 14:31:43 -0500 | [diff] [blame] | 333 | submit_bh(REQ_OP_READ, 0, bh); |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 334 | continue; |
| 335 | } |
| 336 | } |
| 337 | |
Changwei Ge | cf76c78 | 2018-11-02 15:48:19 -0700 | [diff] [blame] | 338 | read_failure: |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 339 | for (i = (nr - 1); i >= 0; i--) { |
| 340 | bh = bhs[i]; |
| 341 | |
Mark Fasheh | aa95887 | 2006-04-21 13:49:02 -0700 | [diff] [blame] | 342 | if (!(flags & OCFS2_BH_READAHEAD)) { |
Changwei Ge | cf76c78 | 2018-11-02 15:48:19 -0700 | [diff] [blame] | 343 | if (unlikely(status)) { |
| 344 | /* Clear the buffers on error including those |
| 345 | * ever succeeded in reading |
| 346 | */ |
| 347 | if (new_bh && bh) { |
| 348 | /* If middle bh fails, let previous bh |
| 349 | * finish its read and then put it to |
| 350 | * aovoid bh leak |
| 351 | */ |
| 352 | if (!buffer_jbd(bh)) |
| 353 | wait_on_buffer(bh); |
| 354 | put_bh(bh); |
| 355 | bhs[i] = NULL; |
| 356 | } else if (bh && buffer_uptodate(bh)) { |
| 357 | clear_buffer_uptodate(bh); |
| 358 | } |
Goldwyn Rodrigues | 3423768 | 2015-09-04 15:44:20 -0700 | [diff] [blame] | 359 | continue; |
| 360 | } |
Mark Fasheh | aa95887 | 2006-04-21 13:49:02 -0700 | [diff] [blame] | 361 | /* We know this can't have changed as we hold the |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 362 | * owner sem. Avoid doing any work on the bh if the |
Mark Fasheh | aa95887 | 2006-04-21 13:49:02 -0700 | [diff] [blame] | 363 | * journal has it. */ |
| 364 | if (!buffer_jbd(bh)) |
| 365 | wait_on_buffer(bh); |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 366 | |
Mark Fasheh | aa95887 | 2006-04-21 13:49:02 -0700 | [diff] [blame] | 367 | if (!buffer_uptodate(bh)) { |
| 368 | /* Status won't be cleared from here on out, |
| 369 | * so we can safely record this and loop back |
| 370 | * to cleanup the other buffers. Don't need to |
| 371 | * remove the clustered uptodate information |
| 372 | * for this bh as it's not marked locally |
| 373 | * uptodate. */ |
| 374 | status = -EIO; |
Junxiao Bi | 234b69e | 2018-09-20 12:22:51 -0700 | [diff] [blame] | 375 | clear_buffer_needs_validate(bh); |
Changwei Ge | cf76c78 | 2018-11-02 15:48:19 -0700 | [diff] [blame] | 376 | goto read_failure; |
Mark Fasheh | aa95887 | 2006-04-21 13:49:02 -0700 | [diff] [blame] | 377 | } |
Joel Becker | 970e493 | 2008-11-13 14:49:19 -0800 | [diff] [blame] | 378 | |
| 379 | if (buffer_needs_validate(bh)) { |
| 380 | /* We never set NeedsValidate if the |
| 381 | * buffer was held by the journal, so |
| 382 | * that better not have changed */ |
| 383 | BUG_ON(buffer_jbd(bh)); |
| 384 | clear_buffer_needs_validate(bh); |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 385 | status = validate(sb, bh); |
Changwei Ge | cf76c78 | 2018-11-02 15:48:19 -0700 | [diff] [blame] | 386 | if (status) |
| 387 | goto read_failure; |
Joel Becker | 970e493 | 2008-11-13 14:49:19 -0800 | [diff] [blame] | 388 | } |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 389 | } |
| 390 | |
Mark Fasheh | aa95887 | 2006-04-21 13:49:02 -0700 | [diff] [blame] | 391 | /* Always set the buffer in the cache, even if it was |
| 392 | * a forced read, or read-ahead which hasn't yet |
| 393 | * completed. */ |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 394 | ocfs2_set_buffer_uptodate(ci, bh); |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 395 | } |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 396 | ocfs2_metadata_cache_io_unlock(ci); |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 397 | |
Tao Ma | 15057e9 | 2011-02-24 16:09:38 +0800 | [diff] [blame] | 398 | trace_ocfs2_read_blocks_end((unsigned long long)block, nr, |
| 399 | flags, ignore_cache); |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 400 | |
| 401 | bail: |
| 402 | |
Mark Fasheh | ccd979b | 2005-12-15 14:31:24 -0800 | [diff] [blame] | 403 | return status; |
| 404 | } |
Tao Ma | d659072 | 2007-12-18 15:47:03 +0800 | [diff] [blame] | 405 | |
| 406 | /* Check whether the blkno is the super block or one of the backups. */ |
| 407 | static void ocfs2_check_super_or_backup(struct super_block *sb, |
| 408 | sector_t blkno) |
| 409 | { |
| 410 | int i; |
| 411 | u64 backup_blkno; |
| 412 | |
| 413 | if (blkno == OCFS2_SUPER_BLOCK_BLKNO) |
| 414 | return; |
| 415 | |
| 416 | for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) { |
| 417 | backup_blkno = ocfs2_backup_super_blkno(sb, i); |
| 418 | if (backup_blkno == blkno) |
| 419 | return; |
| 420 | } |
| 421 | |
| 422 | BUG(); |
| 423 | } |
| 424 | |
| 425 | /* |
| 426 | * Write super block and backups doesn't need to collaborate with journal, |
Joel Becker | 8cb471e | 2009-02-10 20:00:41 -0800 | [diff] [blame] | 427 | * so we don't need to lock ip_io_mutex and ci doesn't need to bea passed |
Tao Ma | d659072 | 2007-12-18 15:47:03 +0800 | [diff] [blame] | 428 | * into this function. |
| 429 | */ |
| 430 | int ocfs2_write_super_or_backup(struct ocfs2_super *osb, |
| 431 | struct buffer_head *bh) |
| 432 | { |
| 433 | int ret = 0; |
Joel Becker | a42ab8e | 2010-03-31 18:25:44 -0700 | [diff] [blame] | 434 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; |
Tao Ma | d659072 | 2007-12-18 15:47:03 +0800 | [diff] [blame] | 435 | |
Tao Ma | d659072 | 2007-12-18 15:47:03 +0800 | [diff] [blame] | 436 | BUG_ON(buffer_jbd(bh)); |
| 437 | ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr); |
| 438 | |
| 439 | if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) { |
| 440 | ret = -EROFS; |
Tao Ma | c1e8d35 | 2011-03-07 16:43:21 +0800 | [diff] [blame] | 441 | mlog_errno(ret); |
Tao Ma | d659072 | 2007-12-18 15:47:03 +0800 | [diff] [blame] | 442 | goto out; |
| 443 | } |
| 444 | |
| 445 | lock_buffer(bh); |
| 446 | set_buffer_uptodate(bh); |
| 447 | |
| 448 | /* remove from dirty list before I/O. */ |
| 449 | clear_buffer_dirty(bh); |
| 450 | |
| 451 | get_bh(bh); /* for end_buffer_write_sync() */ |
| 452 | bh->b_end_io = end_buffer_write_sync; |
Joel Becker | a42ab8e | 2010-03-31 18:25:44 -0700 | [diff] [blame] | 453 | ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check); |
Mike Christie | 2a222ca | 2016-06-05 14:31:43 -0500 | [diff] [blame] | 454 | submit_bh(REQ_OP_WRITE, 0, bh); |
Tao Ma | d659072 | 2007-12-18 15:47:03 +0800 | [diff] [blame] | 455 | |
| 456 | wait_on_buffer(bh); |
| 457 | |
| 458 | if (!buffer_uptodate(bh)) { |
| 459 | ret = -EIO; |
Tao Ma | c1e8d35 | 2011-03-07 16:43:21 +0800 | [diff] [blame] | 460 | mlog_errno(ret); |
Tao Ma | d659072 | 2007-12-18 15:47:03 +0800 | [diff] [blame] | 461 | } |
| 462 | |
| 463 | out: |
Tao Ma | d659072 | 2007-12-18 15:47:03 +0800 | [diff] [blame] | 464 | return ret; |
| 465 | } |