Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * fs/mpage.c |
| 3 | * |
| 4 | * Copyright (C) 2002, Linus Torvalds. |
| 5 | * |
| 6 | * Contains functions related to preparing and submitting BIOs which contain |
| 7 | * multiple pagecache pages. |
| 8 | * |
| 9 | * 15May2002 akpm@zip.com.au |
| 10 | * Initial version |
| 11 | * 27Jun2002 axboe@suse.de |
| 12 | * use bio_add_page() to build bio's just the right size |
| 13 | */ |
| 14 | |
| 15 | #include <linux/kernel.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/mm.h> |
| 18 | #include <linux/kdev_t.h> |
| 19 | #include <linux/bio.h> |
| 20 | #include <linux/fs.h> |
| 21 | #include <linux/buffer_head.h> |
| 22 | #include <linux/blkdev.h> |
| 23 | #include <linux/highmem.h> |
| 24 | #include <linux/prefetch.h> |
| 25 | #include <linux/mpage.h> |
| 26 | #include <linux/writeback.h> |
| 27 | #include <linux/backing-dev.h> |
| 28 | #include <linux/pagevec.h> |
| 29 | |
| 30 | /* |
| 31 | * I/O completion handler for multipage BIOs. |
| 32 | * |
| 33 | * The mpage code never puts partial pages into a BIO (except for end-of-file). |
| 34 | * If a page does not map to a contiguous run of blocks then it simply falls |
| 35 | * back to block_read_full_page(). |
| 36 | * |
| 37 | * Why is this? If a page's completion depends on a number of different BIOs |
| 38 | * which can complete in any order (or at the same time) then determining the |
| 39 | * status of that page is hard. See end_buffer_async_read() for the details. |
| 40 | * There is no point in duplicating all that complexity. |
| 41 | */ |
| 42 | static int mpage_end_io_read(struct bio *bio, unsigned int bytes_done, int err) |
| 43 | { |
| 44 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
| 45 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; |
| 46 | |
| 47 | if (bio->bi_size) |
| 48 | return 1; |
| 49 | |
| 50 | do { |
| 51 | struct page *page = bvec->bv_page; |
| 52 | |
| 53 | if (--bvec >= bio->bi_io_vec) |
| 54 | prefetchw(&bvec->bv_page->flags); |
| 55 | |
| 56 | if (uptodate) { |
| 57 | SetPageUptodate(page); |
| 58 | } else { |
| 59 | ClearPageUptodate(page); |
| 60 | SetPageError(page); |
| 61 | } |
| 62 | unlock_page(page); |
| 63 | } while (bvec >= bio->bi_io_vec); |
| 64 | bio_put(bio); |
| 65 | return 0; |
| 66 | } |
| 67 | |
| 68 | static int mpage_end_io_write(struct bio *bio, unsigned int bytes_done, int err) |
| 69 | { |
| 70 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
| 71 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; |
| 72 | |
| 73 | if (bio->bi_size) |
| 74 | return 1; |
| 75 | |
| 76 | do { |
| 77 | struct page *page = bvec->bv_page; |
| 78 | |
| 79 | if (--bvec >= bio->bi_io_vec) |
| 80 | prefetchw(&bvec->bv_page->flags); |
| 81 | |
| 82 | if (!uptodate) |
| 83 | SetPageError(page); |
| 84 | end_page_writeback(page); |
| 85 | } while (bvec >= bio->bi_io_vec); |
| 86 | bio_put(bio); |
| 87 | return 0; |
| 88 | } |
| 89 | |
| 90 | struct bio *mpage_bio_submit(int rw, struct bio *bio) |
| 91 | { |
| 92 | bio->bi_end_io = mpage_end_io_read; |
| 93 | if (rw == WRITE) |
| 94 | bio->bi_end_io = mpage_end_io_write; |
| 95 | submit_bio(rw, bio); |
| 96 | return NULL; |
| 97 | } |
| 98 | |
| 99 | static struct bio * |
| 100 | mpage_alloc(struct block_device *bdev, |
| 101 | sector_t first_sector, int nr_vecs, |
| 102 | unsigned int __nocast gfp_flags) |
| 103 | { |
| 104 | struct bio *bio; |
| 105 | |
| 106 | bio = bio_alloc(gfp_flags, nr_vecs); |
| 107 | |
| 108 | if (bio == NULL && (current->flags & PF_MEMALLOC)) { |
| 109 | while (!bio && (nr_vecs /= 2)) |
| 110 | bio = bio_alloc(gfp_flags, nr_vecs); |
| 111 | } |
| 112 | |
| 113 | if (bio) { |
| 114 | bio->bi_bdev = bdev; |
| 115 | bio->bi_sector = first_sector; |
| 116 | } |
| 117 | return bio; |
| 118 | } |
| 119 | |
| 120 | /* |
| 121 | * support function for mpage_readpages. The fs supplied get_block might |
| 122 | * return an up to date buffer. This is used to map that buffer into |
| 123 | * the page, which allows readpage to avoid triggering a duplicate call |
| 124 | * to get_block. |
| 125 | * |
| 126 | * The idea is to avoid adding buffers to pages that don't already have |
| 127 | * them. So when the buffer is up to date and the page size == block size, |
| 128 | * this marks the page up to date instead of adding new buffers. |
| 129 | */ |
| 130 | static void |
| 131 | map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) |
| 132 | { |
| 133 | struct inode *inode = page->mapping->host; |
| 134 | struct buffer_head *page_bh, *head; |
| 135 | int block = 0; |
| 136 | |
| 137 | if (!page_has_buffers(page)) { |
| 138 | /* |
| 139 | * don't make any buffers if there is only one buffer on |
| 140 | * the page and the page just needs to be set up to date |
| 141 | */ |
| 142 | if (inode->i_blkbits == PAGE_CACHE_SHIFT && |
| 143 | buffer_uptodate(bh)) { |
| 144 | SetPageUptodate(page); |
| 145 | return; |
| 146 | } |
| 147 | create_empty_buffers(page, 1 << inode->i_blkbits, 0); |
| 148 | } |
| 149 | head = page_buffers(page); |
| 150 | page_bh = head; |
| 151 | do { |
| 152 | if (block == page_block) { |
| 153 | page_bh->b_state = bh->b_state; |
| 154 | page_bh->b_bdev = bh->b_bdev; |
| 155 | page_bh->b_blocknr = bh->b_blocknr; |
| 156 | break; |
| 157 | } |
| 158 | page_bh = page_bh->b_this_page; |
| 159 | block++; |
| 160 | } while (page_bh != head); |
| 161 | } |
| 162 | |
| 163 | /** |
| 164 | * mpage_readpages - populate an address space with some pages, and |
| 165 | * start reads against them. |
| 166 | * |
| 167 | * @mapping: the address_space |
| 168 | * @pages: The address of a list_head which contains the target pages. These |
| 169 | * pages have their ->index populated and are otherwise uninitialised. |
| 170 | * |
| 171 | * The page at @pages->prev has the lowest file offset, and reads should be |
| 172 | * issued in @pages->prev to @pages->next order. |
| 173 | * |
| 174 | * @nr_pages: The number of pages at *@pages |
| 175 | * @get_block: The filesystem's block mapper function. |
| 176 | * |
| 177 | * This function walks the pages and the blocks within each page, building and |
| 178 | * emitting large BIOs. |
| 179 | * |
| 180 | * If anything unusual happens, such as: |
| 181 | * |
| 182 | * - encountering a page which has buffers |
| 183 | * - encountering a page which has a non-hole after a hole |
| 184 | * - encountering a page with non-contiguous blocks |
| 185 | * |
| 186 | * then this code just gives up and calls the buffer_head-based read function. |
| 187 | * It does handle a page which has holes at the end - that is a common case: |
| 188 | * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. |
| 189 | * |
| 190 | * BH_Boundary explanation: |
| 191 | * |
| 192 | * There is a problem. The mpage read code assembles several pages, gets all |
| 193 | * their disk mappings, and then submits them all. That's fine, but obtaining |
| 194 | * the disk mappings may require I/O. Reads of indirect blocks, for example. |
| 195 | * |
| 196 | * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be |
| 197 | * submitted in the following order: |
| 198 | * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16 |
| 199 | * because the indirect block has to be read to get the mappings of blocks |
| 200 | * 13,14,15,16. Obviously, this impacts performance. |
| 201 | * |
| 202 | * So what we do it to allow the filesystem's get_block() function to set |
| 203 | * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block |
| 204 | * after this one will require I/O against a block which is probably close to |
| 205 | * this one. So you should push what I/O you have currently accumulated. |
| 206 | * |
| 207 | * This all causes the disk requests to be issued in the correct order. |
| 208 | */ |
| 209 | static struct bio * |
| 210 | do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, |
| 211 | sector_t *last_block_in_bio, get_block_t get_block) |
| 212 | { |
| 213 | struct inode *inode = page->mapping->host; |
| 214 | const unsigned blkbits = inode->i_blkbits; |
| 215 | const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; |
| 216 | const unsigned blocksize = 1 << blkbits; |
| 217 | sector_t block_in_file; |
| 218 | sector_t last_block; |
| 219 | sector_t blocks[MAX_BUF_PER_PAGE]; |
| 220 | unsigned page_block; |
| 221 | unsigned first_hole = blocks_per_page; |
| 222 | struct block_device *bdev = NULL; |
| 223 | struct buffer_head bh; |
| 224 | int length; |
| 225 | int fully_mapped = 1; |
| 226 | |
| 227 | if (page_has_buffers(page)) |
| 228 | goto confused; |
| 229 | |
| 230 | block_in_file = page->index << (PAGE_CACHE_SHIFT - blkbits); |
| 231 | last_block = (i_size_read(inode) + blocksize - 1) >> blkbits; |
| 232 | |
| 233 | bh.b_page = page; |
| 234 | for (page_block = 0; page_block < blocks_per_page; |
| 235 | page_block++, block_in_file++) { |
| 236 | bh.b_state = 0; |
| 237 | if (block_in_file < last_block) { |
| 238 | if (get_block(inode, block_in_file, &bh, 0)) |
| 239 | goto confused; |
| 240 | } |
| 241 | |
| 242 | if (!buffer_mapped(&bh)) { |
| 243 | fully_mapped = 0; |
| 244 | if (first_hole == blocks_per_page) |
| 245 | first_hole = page_block; |
| 246 | continue; |
| 247 | } |
| 248 | |
| 249 | /* some filesystems will copy data into the page during |
| 250 | * the get_block call, in which case we don't want to |
| 251 | * read it again. map_buffer_to_page copies the data |
| 252 | * we just collected from get_block into the page's buffers |
| 253 | * so readpage doesn't have to repeat the get_block call |
| 254 | */ |
| 255 | if (buffer_uptodate(&bh)) { |
| 256 | map_buffer_to_page(page, &bh, page_block); |
| 257 | goto confused; |
| 258 | } |
| 259 | |
| 260 | if (first_hole != blocks_per_page) |
| 261 | goto confused; /* hole -> non-hole */ |
| 262 | |
| 263 | /* Contiguous blocks? */ |
| 264 | if (page_block && blocks[page_block-1] != bh.b_blocknr-1) |
| 265 | goto confused; |
| 266 | blocks[page_block] = bh.b_blocknr; |
| 267 | bdev = bh.b_bdev; |
| 268 | } |
| 269 | |
| 270 | if (first_hole != blocks_per_page) { |
| 271 | char *kaddr = kmap_atomic(page, KM_USER0); |
| 272 | memset(kaddr + (first_hole << blkbits), 0, |
| 273 | PAGE_CACHE_SIZE - (first_hole << blkbits)); |
| 274 | flush_dcache_page(page); |
| 275 | kunmap_atomic(kaddr, KM_USER0); |
| 276 | if (first_hole == 0) { |
| 277 | SetPageUptodate(page); |
| 278 | unlock_page(page); |
| 279 | goto out; |
| 280 | } |
| 281 | } else if (fully_mapped) { |
| 282 | SetPageMappedToDisk(page); |
| 283 | } |
| 284 | |
| 285 | /* |
| 286 | * This page will go to BIO. Do we need to send this BIO off first? |
| 287 | */ |
| 288 | if (bio && (*last_block_in_bio != blocks[0] - 1)) |
| 289 | bio = mpage_bio_submit(READ, bio); |
| 290 | |
| 291 | alloc_new: |
| 292 | if (bio == NULL) { |
| 293 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), |
| 294 | min_t(int, nr_pages, bio_get_nr_vecs(bdev)), |
| 295 | GFP_KERNEL); |
| 296 | if (bio == NULL) |
| 297 | goto confused; |
| 298 | } |
| 299 | |
| 300 | length = first_hole << blkbits; |
| 301 | if (bio_add_page(bio, page, length, 0) < length) { |
| 302 | bio = mpage_bio_submit(READ, bio); |
| 303 | goto alloc_new; |
| 304 | } |
| 305 | |
| 306 | if (buffer_boundary(&bh) || (first_hole != blocks_per_page)) |
| 307 | bio = mpage_bio_submit(READ, bio); |
| 308 | else |
| 309 | *last_block_in_bio = blocks[blocks_per_page - 1]; |
| 310 | out: |
| 311 | return bio; |
| 312 | |
| 313 | confused: |
| 314 | if (bio) |
| 315 | bio = mpage_bio_submit(READ, bio); |
| 316 | if (!PageUptodate(page)) |
| 317 | block_read_full_page(page, get_block); |
| 318 | else |
| 319 | unlock_page(page); |
| 320 | goto out; |
| 321 | } |
| 322 | |
| 323 | int |
| 324 | mpage_readpages(struct address_space *mapping, struct list_head *pages, |
| 325 | unsigned nr_pages, get_block_t get_block) |
| 326 | { |
| 327 | struct bio *bio = NULL; |
| 328 | unsigned page_idx; |
| 329 | sector_t last_block_in_bio = 0; |
| 330 | struct pagevec lru_pvec; |
| 331 | |
| 332 | pagevec_init(&lru_pvec, 0); |
| 333 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
| 334 | struct page *page = list_entry(pages->prev, struct page, lru); |
| 335 | |
| 336 | prefetchw(&page->flags); |
| 337 | list_del(&page->lru); |
| 338 | if (!add_to_page_cache(page, mapping, |
| 339 | page->index, GFP_KERNEL)) { |
| 340 | bio = do_mpage_readpage(bio, page, |
| 341 | nr_pages - page_idx, |
| 342 | &last_block_in_bio, get_block); |
| 343 | if (!pagevec_add(&lru_pvec, page)) |
| 344 | __pagevec_lru_add(&lru_pvec); |
| 345 | } else { |
| 346 | page_cache_release(page); |
| 347 | } |
| 348 | } |
| 349 | pagevec_lru_add(&lru_pvec); |
| 350 | BUG_ON(!list_empty(pages)); |
| 351 | if (bio) |
| 352 | mpage_bio_submit(READ, bio); |
| 353 | return 0; |
| 354 | } |
| 355 | EXPORT_SYMBOL(mpage_readpages); |
| 356 | |
| 357 | /* |
| 358 | * This isn't called much at all |
| 359 | */ |
| 360 | int mpage_readpage(struct page *page, get_block_t get_block) |
| 361 | { |
| 362 | struct bio *bio = NULL; |
| 363 | sector_t last_block_in_bio = 0; |
| 364 | |
| 365 | bio = do_mpage_readpage(bio, page, 1, |
| 366 | &last_block_in_bio, get_block); |
| 367 | if (bio) |
| 368 | mpage_bio_submit(READ, bio); |
| 369 | return 0; |
| 370 | } |
| 371 | EXPORT_SYMBOL(mpage_readpage); |
| 372 | |
| 373 | /* |
| 374 | * Writing is not so simple. |
| 375 | * |
| 376 | * If the page has buffers then they will be used for obtaining the disk |
| 377 | * mapping. We only support pages which are fully mapped-and-dirty, with a |
| 378 | * special case for pages which are unmapped at the end: end-of-file. |
| 379 | * |
| 380 | * If the page has no buffers (preferred) then the page is mapped here. |
| 381 | * |
| 382 | * If all blocks are found to be contiguous then the page can go into the |
| 383 | * BIO. Otherwise fall back to the mapping's writepage(). |
| 384 | * |
| 385 | * FIXME: This code wants an estimate of how many pages are still to be |
| 386 | * written, so it can intelligently allocate a suitably-sized BIO. For now, |
| 387 | * just allocate full-size (16-page) BIOs. |
| 388 | */ |
| 389 | static struct bio * |
| 390 | __mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, |
| 391 | sector_t *last_block_in_bio, int *ret, struct writeback_control *wbc, |
| 392 | writepage_t writepage_fn) |
| 393 | { |
| 394 | struct address_space *mapping = page->mapping; |
| 395 | struct inode *inode = page->mapping->host; |
| 396 | const unsigned blkbits = inode->i_blkbits; |
| 397 | unsigned long end_index; |
| 398 | const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; |
| 399 | sector_t last_block; |
| 400 | sector_t block_in_file; |
| 401 | sector_t blocks[MAX_BUF_PER_PAGE]; |
| 402 | unsigned page_block; |
| 403 | unsigned first_unmapped = blocks_per_page; |
| 404 | struct block_device *bdev = NULL; |
| 405 | int boundary = 0; |
| 406 | sector_t boundary_block = 0; |
| 407 | struct block_device *boundary_bdev = NULL; |
| 408 | int length; |
| 409 | struct buffer_head map_bh; |
| 410 | loff_t i_size = i_size_read(inode); |
| 411 | |
| 412 | if (page_has_buffers(page)) { |
| 413 | struct buffer_head *head = page_buffers(page); |
| 414 | struct buffer_head *bh = head; |
| 415 | |
| 416 | /* If they're all mapped and dirty, do it */ |
| 417 | page_block = 0; |
| 418 | do { |
| 419 | BUG_ON(buffer_locked(bh)); |
| 420 | if (!buffer_mapped(bh)) { |
| 421 | /* |
| 422 | * unmapped dirty buffers are created by |
| 423 | * __set_page_dirty_buffers -> mmapped data |
| 424 | */ |
| 425 | if (buffer_dirty(bh)) |
| 426 | goto confused; |
| 427 | if (first_unmapped == blocks_per_page) |
| 428 | first_unmapped = page_block; |
| 429 | continue; |
| 430 | } |
| 431 | |
| 432 | if (first_unmapped != blocks_per_page) |
| 433 | goto confused; /* hole -> non-hole */ |
| 434 | |
| 435 | if (!buffer_dirty(bh) || !buffer_uptodate(bh)) |
| 436 | goto confused; |
| 437 | if (page_block) { |
| 438 | if (bh->b_blocknr != blocks[page_block-1] + 1) |
| 439 | goto confused; |
| 440 | } |
| 441 | blocks[page_block++] = bh->b_blocknr; |
| 442 | boundary = buffer_boundary(bh); |
| 443 | if (boundary) { |
| 444 | boundary_block = bh->b_blocknr; |
| 445 | boundary_bdev = bh->b_bdev; |
| 446 | } |
| 447 | bdev = bh->b_bdev; |
| 448 | } while ((bh = bh->b_this_page) != head); |
| 449 | |
| 450 | if (first_unmapped) |
| 451 | goto page_is_mapped; |
| 452 | |
| 453 | /* |
| 454 | * Page has buffers, but they are all unmapped. The page was |
| 455 | * created by pagein or read over a hole which was handled by |
| 456 | * block_read_full_page(). If this address_space is also |
| 457 | * using mpage_readpages then this can rarely happen. |
| 458 | */ |
| 459 | goto confused; |
| 460 | } |
| 461 | |
| 462 | /* |
| 463 | * The page has no buffers: map it to disk |
| 464 | */ |
| 465 | BUG_ON(!PageUptodate(page)); |
| 466 | block_in_file = page->index << (PAGE_CACHE_SHIFT - blkbits); |
| 467 | last_block = (i_size - 1) >> blkbits; |
| 468 | map_bh.b_page = page; |
| 469 | for (page_block = 0; page_block < blocks_per_page; ) { |
| 470 | |
| 471 | map_bh.b_state = 0; |
| 472 | if (get_block(inode, block_in_file, &map_bh, 1)) |
| 473 | goto confused; |
| 474 | if (buffer_new(&map_bh)) |
| 475 | unmap_underlying_metadata(map_bh.b_bdev, |
| 476 | map_bh.b_blocknr); |
| 477 | if (buffer_boundary(&map_bh)) { |
| 478 | boundary_block = map_bh.b_blocknr; |
| 479 | boundary_bdev = map_bh.b_bdev; |
| 480 | } |
| 481 | if (page_block) { |
| 482 | if (map_bh.b_blocknr != blocks[page_block-1] + 1) |
| 483 | goto confused; |
| 484 | } |
| 485 | blocks[page_block++] = map_bh.b_blocknr; |
| 486 | boundary = buffer_boundary(&map_bh); |
| 487 | bdev = map_bh.b_bdev; |
| 488 | if (block_in_file == last_block) |
| 489 | break; |
| 490 | block_in_file++; |
| 491 | } |
| 492 | BUG_ON(page_block == 0); |
| 493 | |
| 494 | first_unmapped = page_block; |
| 495 | |
| 496 | page_is_mapped: |
| 497 | end_index = i_size >> PAGE_CACHE_SHIFT; |
| 498 | if (page->index >= end_index) { |
| 499 | /* |
| 500 | * The page straddles i_size. It must be zeroed out on each |
| 501 | * and every writepage invokation because it may be mmapped. |
| 502 | * "A file is mapped in multiples of the page size. For a file |
| 503 | * that is not a multiple of the page size, the remaining memory |
| 504 | * is zeroed when mapped, and writes to that region are not |
| 505 | * written out to the file." |
| 506 | */ |
| 507 | unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); |
| 508 | char *kaddr; |
| 509 | |
| 510 | if (page->index > end_index || !offset) |
| 511 | goto confused; |
| 512 | kaddr = kmap_atomic(page, KM_USER0); |
| 513 | memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); |
| 514 | flush_dcache_page(page); |
| 515 | kunmap_atomic(kaddr, KM_USER0); |
| 516 | } |
| 517 | |
| 518 | /* |
| 519 | * This page will go to BIO. Do we need to send this BIO off first? |
| 520 | */ |
| 521 | if (bio && *last_block_in_bio != blocks[0] - 1) |
| 522 | bio = mpage_bio_submit(WRITE, bio); |
| 523 | |
| 524 | alloc_new: |
| 525 | if (bio == NULL) { |
| 526 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), |
| 527 | bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH); |
| 528 | if (bio == NULL) |
| 529 | goto confused; |
| 530 | } |
| 531 | |
| 532 | /* |
| 533 | * Must try to add the page before marking the buffer clean or |
| 534 | * the confused fail path above (OOM) will be very confused when |
| 535 | * it finds all bh marked clean (i.e. it will not write anything) |
| 536 | */ |
| 537 | length = first_unmapped << blkbits; |
| 538 | if (bio_add_page(bio, page, length, 0) < length) { |
| 539 | bio = mpage_bio_submit(WRITE, bio); |
| 540 | goto alloc_new; |
| 541 | } |
| 542 | |
| 543 | /* |
| 544 | * OK, we have our BIO, so we can now mark the buffers clean. Make |
| 545 | * sure to only clean buffers which we know we'll be writing. |
| 546 | */ |
| 547 | if (page_has_buffers(page)) { |
| 548 | struct buffer_head *head = page_buffers(page); |
| 549 | struct buffer_head *bh = head; |
| 550 | unsigned buffer_counter = 0; |
| 551 | |
| 552 | do { |
| 553 | if (buffer_counter++ == first_unmapped) |
| 554 | break; |
| 555 | clear_buffer_dirty(bh); |
| 556 | bh = bh->b_this_page; |
| 557 | } while (bh != head); |
| 558 | |
| 559 | /* |
| 560 | * we cannot drop the bh if the page is not uptodate |
| 561 | * or a concurrent readpage would fail to serialize with the bh |
| 562 | * and it would read from disk before we reach the platter. |
| 563 | */ |
| 564 | if (buffer_heads_over_limit && PageUptodate(page)) |
| 565 | try_to_free_buffers(page); |
| 566 | } |
| 567 | |
| 568 | BUG_ON(PageWriteback(page)); |
| 569 | set_page_writeback(page); |
| 570 | unlock_page(page); |
| 571 | if (boundary || (first_unmapped != blocks_per_page)) { |
| 572 | bio = mpage_bio_submit(WRITE, bio); |
| 573 | if (boundary_block) { |
| 574 | write_boundary_block(boundary_bdev, |
| 575 | boundary_block, 1 << blkbits); |
| 576 | } |
| 577 | } else { |
| 578 | *last_block_in_bio = blocks[blocks_per_page - 1]; |
| 579 | } |
| 580 | goto out; |
| 581 | |
| 582 | confused: |
| 583 | if (bio) |
| 584 | bio = mpage_bio_submit(WRITE, bio); |
| 585 | |
| 586 | if (writepage_fn) { |
| 587 | *ret = (*writepage_fn)(page, wbc); |
| 588 | } else { |
| 589 | *ret = -EAGAIN; |
| 590 | goto out; |
| 591 | } |
| 592 | /* |
| 593 | * The caller has a ref on the inode, so *mapping is stable |
| 594 | */ |
| 595 | if (*ret) { |
| 596 | if (*ret == -ENOSPC) |
| 597 | set_bit(AS_ENOSPC, &mapping->flags); |
| 598 | else |
| 599 | set_bit(AS_EIO, &mapping->flags); |
| 600 | } |
| 601 | out: |
| 602 | return bio; |
| 603 | } |
| 604 | |
| 605 | /** |
| 606 | * mpage_writepages - walk the list of dirty pages of the given |
| 607 | * address space and writepage() all of them. |
| 608 | * |
| 609 | * @mapping: address space structure to write |
| 610 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write |
| 611 | * @get_block: the filesystem's block mapper function. |
| 612 | * If this is NULL then use a_ops->writepage. Otherwise, go |
| 613 | * direct-to-BIO. |
| 614 | * |
| 615 | * This is a library function, which implements the writepages() |
| 616 | * address_space_operation. |
| 617 | * |
| 618 | * If a page is already under I/O, generic_writepages() skips it, even |
| 619 | * if it's dirty. This is desirable behaviour for memory-cleaning writeback, |
| 620 | * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() |
| 621 | * and msync() need to guarantee that all the data which was dirty at the time |
| 622 | * the call was made get new I/O started against them. If wbc->sync_mode is |
| 623 | * WB_SYNC_ALL then we were called for data integrity and we must wait for |
| 624 | * existing IO to complete. |
| 625 | */ |
| 626 | int |
| 627 | mpage_writepages(struct address_space *mapping, |
| 628 | struct writeback_control *wbc, get_block_t get_block) |
| 629 | { |
| 630 | return __mpage_writepages(mapping, wbc, get_block, |
| 631 | mapping->a_ops->writepage); |
| 632 | } |
| 633 | |
| 634 | int |
| 635 | __mpage_writepages(struct address_space *mapping, |
| 636 | struct writeback_control *wbc, get_block_t get_block, |
| 637 | writepage_t writepage_fn) |
| 638 | { |
| 639 | struct backing_dev_info *bdi = mapping->backing_dev_info; |
| 640 | struct bio *bio = NULL; |
| 641 | sector_t last_block_in_bio = 0; |
| 642 | int ret = 0; |
| 643 | int done = 0; |
| 644 | int (*writepage)(struct page *page, struct writeback_control *wbc); |
| 645 | struct pagevec pvec; |
| 646 | int nr_pages; |
| 647 | pgoff_t index; |
| 648 | pgoff_t end = -1; /* Inclusive */ |
| 649 | int scanned = 0; |
| 650 | int is_range = 0; |
| 651 | |
| 652 | if (wbc->nonblocking && bdi_write_congested(bdi)) { |
| 653 | wbc->encountered_congestion = 1; |
| 654 | return 0; |
| 655 | } |
| 656 | |
| 657 | writepage = NULL; |
| 658 | if (get_block == NULL) |
| 659 | writepage = mapping->a_ops->writepage; |
| 660 | |
| 661 | pagevec_init(&pvec, 0); |
| 662 | if (wbc->sync_mode == WB_SYNC_NONE) { |
| 663 | index = mapping->writeback_index; /* Start from prev offset */ |
| 664 | } else { |
| 665 | index = 0; /* whole-file sweep */ |
| 666 | scanned = 1; |
| 667 | } |
| 668 | if (wbc->start || wbc->end) { |
| 669 | index = wbc->start >> PAGE_CACHE_SHIFT; |
| 670 | end = wbc->end >> PAGE_CACHE_SHIFT; |
| 671 | is_range = 1; |
| 672 | scanned = 1; |
| 673 | } |
| 674 | retry: |
| 675 | while (!done && (index <= end) && |
| 676 | (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, |
| 677 | PAGECACHE_TAG_DIRTY, |
| 678 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { |
| 679 | unsigned i; |
| 680 | |
| 681 | scanned = 1; |
| 682 | for (i = 0; i < nr_pages; i++) { |
| 683 | struct page *page = pvec.pages[i]; |
| 684 | |
| 685 | /* |
| 686 | * At this point we hold neither mapping->tree_lock nor |
| 687 | * lock on the page itself: the page may be truncated or |
| 688 | * invalidated (changing page->mapping to NULL), or even |
| 689 | * swizzled back from swapper_space to tmpfs file |
| 690 | * mapping |
| 691 | */ |
| 692 | |
| 693 | lock_page(page); |
| 694 | |
| 695 | if (unlikely(page->mapping != mapping)) { |
| 696 | unlock_page(page); |
| 697 | continue; |
| 698 | } |
| 699 | |
| 700 | if (unlikely(is_range) && page->index > end) { |
| 701 | done = 1; |
| 702 | unlock_page(page); |
| 703 | continue; |
| 704 | } |
| 705 | |
| 706 | if (wbc->sync_mode != WB_SYNC_NONE) |
| 707 | wait_on_page_writeback(page); |
| 708 | |
| 709 | if (PageWriteback(page) || |
| 710 | !clear_page_dirty_for_io(page)) { |
| 711 | unlock_page(page); |
| 712 | continue; |
| 713 | } |
| 714 | |
| 715 | if (writepage) { |
| 716 | ret = (*writepage)(page, wbc); |
| 717 | if (ret) { |
| 718 | if (ret == -ENOSPC) |
| 719 | set_bit(AS_ENOSPC, |
| 720 | &mapping->flags); |
| 721 | else |
| 722 | set_bit(AS_EIO, |
| 723 | &mapping->flags); |
| 724 | } |
| 725 | } else { |
| 726 | bio = __mpage_writepage(bio, page, get_block, |
| 727 | &last_block_in_bio, &ret, wbc, |
| 728 | writepage_fn); |
| 729 | } |
Nikita Danilov | 552fca4 | 2005-05-01 08:58:39 -0700 | [diff] [blame^] | 730 | if (unlikely(ret == WRITEPAGE_ACTIVATE)) |
| 731 | unlock_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 732 | if (ret || (--(wbc->nr_to_write) <= 0)) |
| 733 | done = 1; |
| 734 | if (wbc->nonblocking && bdi_write_congested(bdi)) { |
| 735 | wbc->encountered_congestion = 1; |
| 736 | done = 1; |
| 737 | } |
| 738 | } |
| 739 | pagevec_release(&pvec); |
| 740 | cond_resched(); |
| 741 | } |
| 742 | if (!scanned && !done) { |
| 743 | /* |
| 744 | * We hit the last page and there is more work to be done: wrap |
| 745 | * back to the start of the file |
| 746 | */ |
| 747 | scanned = 1; |
| 748 | index = 0; |
| 749 | goto retry; |
| 750 | } |
| 751 | if (!is_range) |
| 752 | mapping->writeback_index = index; |
| 753 | if (bio) |
| 754 | mpage_bio_submit(WRITE, bio); |
| 755 | return ret; |
| 756 | } |
| 757 | EXPORT_SYMBOL(mpage_writepages); |
| 758 | EXPORT_SYMBOL(__mpage_writepages); |
| 759 | |
| 760 | int mpage_writepage(struct page *page, get_block_t get_block, |
| 761 | struct writeback_control *wbc) |
| 762 | { |
| 763 | int ret = 0; |
| 764 | struct bio *bio; |
| 765 | sector_t last_block_in_bio = 0; |
| 766 | |
| 767 | bio = __mpage_writepage(NULL, page, get_block, |
| 768 | &last_block_in_bio, &ret, wbc, NULL); |
| 769 | if (bio) |
| 770 | mpage_bio_submit(WRITE, bio); |
| 771 | |
| 772 | return ret; |
| 773 | } |
| 774 | EXPORT_SYMBOL(mpage_writepage); |