blob: 70f989895d15bfe4ab9d9df2241196eed764d54a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_sb.h"
Nathan Scotta844f452005-11-02 14:38:42 +110023#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_dir2.h"
25#include "xfs_trans.h"
26#include "xfs_dmapi.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_alloc_btree.h"
30#include "xfs_ialloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_dir2_sf.h"
Nathan Scotta844f452005-11-02 14:38:42 +110032#include "xfs_attr_sf.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include "xfs_dinode.h"
34#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110035#include "xfs_alloc.h"
36#include "xfs_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "xfs_error.h"
38#include "xfs_rw.h"
39#include "xfs_iomap.h"
Christoph Hellwig739bfb22007-08-29 10:58:01 +100040#include "xfs_vnodeops.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/mpage.h>
Christoph Hellwig10ce4442006-01-11 20:48:14 +110042#include <linux/pagevec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/writeback.h>
44
Christoph Hellwig25e41b32008-12-03 12:20:39 +010045
46/*
47 * Prime number of hash buckets since address is used as the key.
48 */
49#define NVSYNC 37
50#define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
51static wait_queue_head_t xfs_ioend_wq[NVSYNC];
52
53void __init
54xfs_ioend_init(void)
55{
56 int i;
57
58 for (i = 0; i < NVSYNC; i++)
59 init_waitqueue_head(&xfs_ioend_wq[i]);
60}
61
62void
63xfs_ioend_wait(
64 xfs_inode_t *ip)
65{
66 wait_queue_head_t *wq = to_ioend_wq(ip);
67
68 wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
69}
70
71STATIC void
72xfs_ioend_wake(
73 xfs_inode_t *ip)
74{
75 if (atomic_dec_and_test(&ip->i_iocount))
76 wake_up(to_ioend_wq(ip));
77}
78
Nathan Scottf51623b2006-03-14 13:26:27 +110079STATIC void
80xfs_count_page_state(
81 struct page *page,
82 int *delalloc,
83 int *unmapped,
84 int *unwritten)
85{
86 struct buffer_head *bh, *head;
87
88 *delalloc = *unmapped = *unwritten = 0;
89
90 bh = head = page_buffers(page);
91 do {
92 if (buffer_uptodate(bh) && !buffer_mapped(bh))
93 (*unmapped) = 1;
Nathan Scottf51623b2006-03-14 13:26:27 +110094 else if (buffer_unwritten(bh))
95 (*unwritten) = 1;
96 else if (buffer_delay(bh))
97 (*delalloc) = 1;
98 } while ((bh = bh->b_this_page) != head);
99}
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#if defined(XFS_RW_TRACE)
102void
103xfs_page_trace(
104 int tag,
105 struct inode *inode,
106 struct page *page,
Nathan Scotted9d88f2006-09-28 10:56:43 +1000107 unsigned long pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108{
109 xfs_inode_t *ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 loff_t isize = i_size_read(inode);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100111 loff_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 int delalloc = -1, unmapped = -1, unwritten = -1;
113
114 if (page_has_buffers(page))
115 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
116
David Chinnere6064d32008-08-13 16:01:45 +1000117 ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 if (!ip->i_rwtrace)
119 return;
120
121 ktrace_enter(ip->i_rwtrace,
122 (void *)((unsigned long)tag),
123 (void *)ip,
124 (void *)inode,
125 (void *)page,
Nathan Scotted9d88f2006-09-28 10:56:43 +1000126 (void *)pgoff,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
128 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
129 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
130 (void *)((unsigned long)(isize & 0xffffffff)),
131 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
132 (void *)((unsigned long)(offset & 0xffffffff)),
133 (void *)((unsigned long)delalloc),
134 (void *)((unsigned long)unmapped),
135 (void *)((unsigned long)unwritten),
Yingping Luf1fdc842006-03-22 12:44:15 +1100136 (void *)((unsigned long)current_pid()),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 (void *)NULL);
138}
139#else
Nathan Scotted9d88f2006-09-28 10:56:43 +1000140#define xfs_page_trace(tag, inode, page, pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141#endif
142
Christoph Hellwig6214ed42007-09-14 15:23:17 +1000143STATIC struct block_device *
144xfs_find_bdev_for_inode(
145 struct xfs_inode *ip)
146{
147 struct xfs_mount *mp = ip->i_mount;
148
Eric Sandeen71ddabb2007-11-23 16:29:42 +1100149 if (XFS_IS_REALTIME_INODE(ip))
Christoph Hellwig6214ed42007-09-14 15:23:17 +1000150 return mp->m_rtdev_targp->bt_bdev;
151 else
152 return mp->m_ddev_targp->bt_bdev;
153}
154
Christoph Hellwig0829c362005-09-02 16:58:49 +1000155/*
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100156 * We're now finished for good with this ioend structure.
157 * Update the page state via the associated buffer_heads,
158 * release holds on the inode and bio, and finally free
159 * up memory. Do not use the ioend after this.
160 */
Christoph Hellwig0829c362005-09-02 16:58:49 +1000161STATIC void
162xfs_destroy_ioend(
163 xfs_ioend_t *ioend)
164{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100165 struct buffer_head *bh, *next;
Christoph Hellwig583fa582008-12-03 12:20:38 +0100166 struct xfs_inode *ip = XFS_I(ioend->io_inode);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100167
168 for (bh = ioend->io_buffer_head; bh; bh = next) {
169 next = bh->b_private;
Nathan Scott7d04a332006-06-09 14:58:38 +1000170 bh->b_end_io(bh, !ioend->io_error);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100171 }
Christoph Hellwig583fa582008-12-03 12:20:38 +0100172
173 /*
174 * Volume managers supporting multiple paths can send back ENODEV
175 * when the final path disappears. In this case continuing to fill
176 * the page cache with dirty data which cannot be written out is
177 * evil, so prevent that.
178 */
179 if (unlikely(ioend->io_error == -ENODEV)) {
180 xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ,
181 __FILE__, __LINE__);
Christoph Hellwigb677c212007-08-29 11:46:28 +1000182 }
Christoph Hellwig583fa582008-12-03 12:20:38 +0100183
Christoph Hellwig25e41b32008-12-03 12:20:39 +0100184 xfs_ioend_wake(ip);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000185 mempool_free(ioend, xfs_ioend_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186}
187
188/*
Dave Chinner932640e2009-10-06 20:29:29 +0000189 * If the end of the current ioend is beyond the current EOF,
190 * return the new EOF value, otherwise zero.
191 */
192STATIC xfs_fsize_t
193xfs_ioend_new_eof(
194 xfs_ioend_t *ioend)
195{
196 xfs_inode_t *ip = XFS_I(ioend->io_inode);
197 xfs_fsize_t isize;
198 xfs_fsize_t bsize;
199
200 bsize = ioend->io_offset + ioend->io_size;
201 isize = MAX(ip->i_size, ip->i_new_size);
202 isize = MIN(isize, bsize);
203 return isize > ip->i_d.di_size ? isize : 0;
204}
205
206/*
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000207 * Update on-disk file size now that data has been written to disk.
208 * The current in-memory file size is i_size. If a write is beyond
Christoph Hellwig613d7042007-10-11 17:44:08 +1000209 * eof i_new_size will be the intended file size until i_size is
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000210 * updated. If this write does not extend all the way to the valid
211 * file size then restrict this update to the end of the write.
212 */
Dave Chinner932640e2009-10-06 20:29:29 +0000213
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000214STATIC void
215xfs_setfilesize(
216 xfs_ioend_t *ioend)
217{
Christoph Hellwigb677c212007-08-29 11:46:28 +1000218 xfs_inode_t *ip = XFS_I(ioend->io_inode);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000219 xfs_fsize_t isize;
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000220
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000221 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
222 ASSERT(ioend->io_type != IOMAP_READ);
223
224 if (unlikely(ioend->io_error))
225 return;
226
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000227 xfs_ilock(ip, XFS_ILOCK_EXCL);
Dave Chinner932640e2009-10-06 20:29:29 +0000228 isize = xfs_ioend_new_eof(ioend);
229 if (isize) {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000230 ip->i_d.di_size = isize;
David Chinner94b97e32008-10-30 17:21:30 +1100231 xfs_mark_inode_dirty_sync(ip);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000232 }
233
234 xfs_iunlock(ip, XFS_ILOCK_EXCL);
235}
236
237/*
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100238 * Buffered IO write completion for delayed allocate extents.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100239 */
240STATIC void
241xfs_end_bio_delalloc(
David Howellsc4028952006-11-22 14:57:56 +0000242 struct work_struct *work)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100243{
David Howellsc4028952006-11-22 14:57:56 +0000244 xfs_ioend_t *ioend =
245 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100246
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000247 xfs_setfilesize(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100248 xfs_destroy_ioend(ioend);
249}
250
251/*
252 * Buffered IO write completion for regular, written extents.
253 */
254STATIC void
255xfs_end_bio_written(
David Howellsc4028952006-11-22 14:57:56 +0000256 struct work_struct *work)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100257{
David Howellsc4028952006-11-22 14:57:56 +0000258 xfs_ioend_t *ioend =
259 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100260
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000261 xfs_setfilesize(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100262 xfs_destroy_ioend(ioend);
263}
264
265/*
266 * IO write completion for unwritten extents.
267 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 * Issue transactions to convert a buffer range from unwritten
Christoph Hellwigf0973862005-09-05 08:22:52 +1000269 * to written extents.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 */
271STATIC void
Christoph Hellwig0829c362005-09-02 16:58:49 +1000272xfs_end_bio_unwritten(
David Howellsc4028952006-11-22 14:57:56 +0000273 struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274{
David Howellsc4028952006-11-22 14:57:56 +0000275 xfs_ioend_t *ioend =
276 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwig76428612007-09-14 15:23:31 +1000277 struct xfs_inode *ip = XFS_I(ioend->io_inode);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000278 xfs_off_t offset = ioend->io_offset;
279 size_t size = ioend->io_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000281 if (likely(!ioend->io_error)) {
David Chinnercc884662008-04-10 12:23:52 +1000282 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
283 int error;
284 error = xfs_iomap_write_unwritten(ip, offset, size);
285 if (error)
286 ioend->io_error = error;
287 }
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000288 xfs_setfilesize(ioend);
289 }
290 xfs_destroy_ioend(ioend);
291}
292
293/*
294 * IO read completion for regular, written extents.
295 */
296STATIC void
297xfs_end_bio_read(
298 struct work_struct *work)
299{
300 xfs_ioend_t *ioend =
301 container_of(work, xfs_ioend_t, io_work);
302
Christoph Hellwig0829c362005-09-02 16:58:49 +1000303 xfs_destroy_ioend(ioend);
304}
305
306/*
Dave Chinnerc626d172009-04-06 18:42:11 +0200307 * Schedule IO completion handling on a xfsdatad if this was
308 * the final hold on this ioend. If we are asked to wait,
309 * flush the workqueue.
310 */
311STATIC void
312xfs_finish_ioend(
313 xfs_ioend_t *ioend,
314 int wait)
315{
316 if (atomic_dec_and_test(&ioend->io_remaining)) {
317 struct workqueue_struct *wq = xfsdatad_workqueue;
318 if (ioend->io_work.func == xfs_end_bio_unwritten)
319 wq = xfsconvertd_workqueue;
320
321 queue_work(wq, &ioend->io_work);
322 if (wait)
323 flush_workqueue(wq);
324 }
325}
326
327/*
Christoph Hellwig0829c362005-09-02 16:58:49 +1000328 * Allocate and initialise an IO completion structure.
329 * We need to track unwritten extent write completion here initially.
330 * We'll need to extend this for updating the ondisk inode size later
331 * (vs. incore size).
332 */
333STATIC xfs_ioend_t *
334xfs_alloc_ioend(
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100335 struct inode *inode,
336 unsigned int type)
Christoph Hellwig0829c362005-09-02 16:58:49 +1000337{
338 xfs_ioend_t *ioend;
339
340 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
341
342 /*
343 * Set the count to 1 initially, which will prevent an I/O
344 * completion callback from happening before we have started
345 * all the I/O from calling the completion routine too early.
346 */
347 atomic_set(&ioend->io_remaining, 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000348 ioend->io_error = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100349 ioend->io_list = NULL;
350 ioend->io_type = type;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000351 ioend->io_inode = inode;
Christoph Hellwigc1a073b2005-09-05 08:23:35 +1000352 ioend->io_buffer_head = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100353 ioend->io_buffer_tail = NULL;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000354 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000355 ioend->io_offset = 0;
356 ioend->io_size = 0;
357
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100358 if (type == IOMAP_UNWRITTEN)
David Howellsc4028952006-11-22 14:57:56 +0000359 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100360 else if (type == IOMAP_DELAY)
David Howellsc4028952006-11-22 14:57:56 +0000361 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000362 else if (type == IOMAP_READ)
363 INIT_WORK(&ioend->io_work, xfs_end_bio_read);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100364 else
David Howellsc4028952006-11-22 14:57:56 +0000365 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000366
367 return ioend;
368}
369
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370STATIC int
371xfs_map_blocks(
372 struct inode *inode,
373 loff_t offset,
374 ssize_t count,
375 xfs_iomap_t *mapp,
376 int flags)
377{
Christoph Hellwig6bd16ff2008-12-03 12:20:32 +0100378 int nmaps = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379
Christoph Hellwig6bd16ff2008-12-03 12:20:32 +0100380 return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381}
382
David Chinner7989cb82007-02-10 18:34:56 +1100383STATIC_INLINE int
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100384xfs_iomap_valid(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 xfs_iomap_t *iomapp,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100386 loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100388 return offset >= iomapp->iomap_offset &&
389 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390}
391
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100392/*
393 * BIO completion handler for buffered IO.
394 */
Al Viro782e3b32007-10-12 07:17:47 +0100395STATIC void
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100396xfs_end_bio(
397 struct bio *bio,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100398 int error)
399{
400 xfs_ioend_t *ioend = bio->bi_private;
401
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100402 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000403 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100404
405 /* Toss bio and pass work off to an xfsdatad thread */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100406 bio->bi_private = NULL;
407 bio->bi_end_io = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100408 bio_put(bio);
Nathan Scott7d04a332006-06-09 14:58:38 +1000409
David Chinnere927af92007-06-05 16:24:36 +1000410 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100411}
412
413STATIC void
414xfs_submit_ioend_bio(
415 xfs_ioend_t *ioend,
416 struct bio *bio)
417{
418 atomic_inc(&ioend->io_remaining);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100419 bio->bi_private = ioend;
420 bio->bi_end_io = xfs_end_bio;
421
Dave Chinner932640e2009-10-06 20:29:29 +0000422 /*
423 * If the I/O is beyond EOF we mark the inode dirty immediately
424 * but don't update the inode size until I/O completion.
425 */
426 if (xfs_ioend_new_eof(ioend))
427 xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode));
428
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100429 submit_bio(WRITE, bio);
430 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
431 bio_put(bio);
432}
433
434STATIC struct bio *
435xfs_alloc_ioend_bio(
436 struct buffer_head *bh)
437{
438 struct bio *bio;
439 int nvecs = bio_get_nr_vecs(bh->b_bdev);
440
441 do {
442 bio = bio_alloc(GFP_NOIO, nvecs);
443 nvecs >>= 1;
444 } while (!bio);
445
446 ASSERT(bio->bi_private == NULL);
447 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
448 bio->bi_bdev = bh->b_bdev;
449 bio_get(bio);
450 return bio;
451}
452
453STATIC void
454xfs_start_buffer_writeback(
455 struct buffer_head *bh)
456{
457 ASSERT(buffer_mapped(bh));
458 ASSERT(buffer_locked(bh));
459 ASSERT(!buffer_delay(bh));
460 ASSERT(!buffer_unwritten(bh));
461
462 mark_buffer_async_write(bh);
463 set_buffer_uptodate(bh);
464 clear_buffer_dirty(bh);
465}
466
467STATIC void
468xfs_start_page_writeback(
469 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100470 int clear_dirty,
471 int buffers)
472{
473 ASSERT(PageLocked(page));
474 ASSERT(!PageWriteback(page));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100475 if (clear_dirty)
David Chinner92132022006-12-21 10:24:01 +1100476 clear_page_dirty_for_io(page);
477 set_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100478 unlock_page(page);
Fengguang Wu1f7decf2007-10-16 23:30:42 -0700479 /* If no buffers on the page are to be written, finish it here */
480 if (!buffers)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100481 end_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100482}
483
484static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
485{
486 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
487}
488
489/*
David Chinnerd88992f2006-01-18 13:38:12 +1100490 * Submit all of the bios for all of the ioends we have saved up, covering the
491 * initial writepage page and also any probed pages.
492 *
493 * Because we may have multiple ioends spanning a page, we need to start
494 * writeback on all the buffers before we submit them for I/O. If we mark the
495 * buffers as we got, then we can end up with a page that only has buffers
496 * marked async write and I/O complete on can occur before we mark the other
497 * buffers async write.
498 *
499 * The end result of this is that we trip a bug in end_page_writeback() because
500 * we call it twice for the one page as the code in end_buffer_async_write()
501 * assumes that all buffers on the page are started at the same time.
502 *
503 * The fix is two passes across the ioend list - one to start writeback on the
Nathan Scottc41564b2006-03-29 08:55:14 +1000504 * buffer_heads, and then submit them for I/O on the second pass.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100505 */
506STATIC void
507xfs_submit_ioend(
508 xfs_ioend_t *ioend)
509{
David Chinnerd88992f2006-01-18 13:38:12 +1100510 xfs_ioend_t *head = ioend;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100511 xfs_ioend_t *next;
512 struct buffer_head *bh;
513 struct bio *bio;
514 sector_t lastblock = 0;
515
David Chinnerd88992f2006-01-18 13:38:12 +1100516 /* Pass 1 - start writeback */
517 do {
518 next = ioend->io_list;
519 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
520 xfs_start_buffer_writeback(bh);
521 }
522 } while ((ioend = next) != NULL);
523
524 /* Pass 2 - submit I/O */
525 ioend = head;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100526 do {
527 next = ioend->io_list;
528 bio = NULL;
529
530 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100531
532 if (!bio) {
533 retry:
534 bio = xfs_alloc_ioend_bio(bh);
535 } else if (bh->b_blocknr != lastblock + 1) {
536 xfs_submit_ioend_bio(ioend, bio);
537 goto retry;
538 }
539
540 if (bio_add_buffer(bio, bh) != bh->b_size) {
541 xfs_submit_ioend_bio(ioend, bio);
542 goto retry;
543 }
544
545 lastblock = bh->b_blocknr;
546 }
547 if (bio)
548 xfs_submit_ioend_bio(ioend, bio);
David Chinnere927af92007-06-05 16:24:36 +1000549 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100550 } while ((ioend = next) != NULL);
551}
552
553/*
554 * Cancel submission of all buffer_heads so far in this endio.
555 * Toss the endio too. Only ever called for the initial page
556 * in a writepage request, so only ever one page.
557 */
558STATIC void
559xfs_cancel_ioend(
560 xfs_ioend_t *ioend)
561{
562 xfs_ioend_t *next;
563 struct buffer_head *bh, *next_bh;
564
565 do {
566 next = ioend->io_list;
567 bh = ioend->io_buffer_head;
568 do {
569 next_bh = bh->b_private;
570 clear_buffer_async_write(bh);
571 unlock_buffer(bh);
572 } while ((bh = next_bh) != NULL);
573
Christoph Hellwig25e41b32008-12-03 12:20:39 +0100574 xfs_ioend_wake(XFS_I(ioend->io_inode));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100575 mempool_free(ioend, xfs_ioend_pool);
576 } while ((ioend = next) != NULL);
577}
578
579/*
580 * Test to see if we've been building up a completion structure for
581 * earlier buffers -- if so, we try to append to this ioend if we
582 * can, otherwise we finish off any current ioend and start another.
583 * Return true if we've finished the given ioend.
584 */
585STATIC void
586xfs_add_to_ioend(
587 struct inode *inode,
588 struct buffer_head *bh,
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100589 xfs_off_t offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100590 unsigned int type,
591 xfs_ioend_t **result,
592 int need_ioend)
593{
594 xfs_ioend_t *ioend = *result;
595
596 if (!ioend || need_ioend || type != ioend->io_type) {
597 xfs_ioend_t *previous = *result;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100598
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100599 ioend = xfs_alloc_ioend(inode, type);
600 ioend->io_offset = offset;
601 ioend->io_buffer_head = bh;
602 ioend->io_buffer_tail = bh;
603 if (previous)
604 previous->io_list = ioend;
605 *result = ioend;
606 } else {
607 ioend->io_buffer_tail->b_private = bh;
608 ioend->io_buffer_tail = bh;
609 }
610
611 bh->b_private = NULL;
612 ioend->io_size += bh->b_size;
613}
614
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615STATIC void
Nathan Scott87cbc492006-03-14 13:26:43 +1100616xfs_map_buffer(
617 struct buffer_head *bh,
618 xfs_iomap_t *mp,
619 xfs_off_t offset,
620 uint block_bits)
621{
622 sector_t bn;
623
624 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
625
626 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
627 ((offset - mp->iomap_offset) >> block_bits);
628
629 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
630
631 bh->b_blocknr = bn;
632 set_buffer_mapped(bh);
633}
634
635STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636xfs_map_at_offset(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 struct buffer_head *bh,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100638 loff_t offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 int block_bits,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100640 xfs_iomap_t *iomapp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
643 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
645 lock_buffer(bh);
Nathan Scott87cbc492006-03-14 13:26:43 +1100646 xfs_map_buffer(bh, iomapp, offset, block_bits);
Nathan Scottce8e9222006-01-11 15:39:08 +1100647 bh->b_bdev = iomapp->iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 set_buffer_mapped(bh);
649 clear_buffer_delay(bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100650 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651}
652
653/*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100654 * Look for a page at index that is suitable for clustering.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 */
656STATIC unsigned int
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100657xfs_probe_page(
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100658 struct page *page,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100659 unsigned int pg_offset,
660 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 int ret = 0;
663
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100665 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
667 if (page->mapping && PageDirty(page)) {
668 if (page_has_buffers(page)) {
669 struct buffer_head *bh, *head;
670
671 bh = head = page_buffers(page);
672 do {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100673 if (!buffer_uptodate(bh))
674 break;
675 if (mapped != buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 break;
677 ret += bh->b_size;
678 if (ret >= pg_offset)
679 break;
680 } while ((bh = bh->b_this_page) != head);
681 } else
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100682 ret = mapped ? 0 : PAGE_CACHE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 }
684
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 return ret;
686}
687
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100688STATIC size_t
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100689xfs_probe_cluster(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 struct inode *inode,
691 struct page *startpage,
692 struct buffer_head *bh,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100693 struct buffer_head *head,
694 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100696 struct pagevec pvec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 pgoff_t tindex, tlast, tloff;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100698 size_t total = 0;
699 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
701 /* First sum forwards in this page */
702 do {
Eric Sandeen2353e8e2006-02-28 12:30:30 +1100703 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100704 return total;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 total += bh->b_size;
706 } while ((bh = bh->b_this_page) != head);
707
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100708 /* if we reached the end of the page, sum forwards in following pages */
709 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
710 tindex = startpage->index + 1;
711
712 /* Prune this back to avoid pathological behavior */
713 tloff = min(tlast, startpage->index + 64);
714
715 pagevec_init(&pvec, 0);
716 while (!done && tindex <= tloff) {
717 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
718
719 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
720 break;
721
722 for (i = 0; i < pagevec_count(&pvec); i++) {
723 struct page *page = pvec.pages[i];
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000724 size_t pg_offset, pg_len = 0;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100725
726 if (tindex == tlast) {
727 pg_offset =
728 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100729 if (!pg_offset) {
730 done = 1;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100731 break;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100732 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100733 } else
734 pg_offset = PAGE_CACHE_SIZE;
735
Nick Piggin529ae9a2008-08-02 12:01:03 +0200736 if (page->index == tindex && trylock_page(page)) {
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000737 pg_len = xfs_probe_page(page, pg_offset, mapped);
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100738 unlock_page(page);
739 }
740
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000741 if (!pg_len) {
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100742 done = 1;
743 break;
744 }
745
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000746 total += pg_len;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100747 tindex++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100749
750 pagevec_release(&pvec);
751 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100753
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 return total;
755}
756
757/*
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100758 * Test if a given page is suitable for writing as part of an unwritten
759 * or delayed allocate extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 */
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100761STATIC int
762xfs_is_delayed_page(
763 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100764 unsigned int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100767 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
769 if (page->mapping && page_has_buffers(page)) {
770 struct buffer_head *bh, *head;
771 int acceptable = 0;
772
773 bh = head = page_buffers(page);
774 do {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100775 if (buffer_unwritten(bh))
776 acceptable = (type == IOMAP_UNWRITTEN);
777 else if (buffer_delay(bh))
778 acceptable = (type == IOMAP_DELAY);
David Chinner2ddee842006-03-22 12:47:40 +1100779 else if (buffer_dirty(bh) && buffer_mapped(bh))
David Chinnerdf3c7242007-05-24 15:27:03 +1000780 acceptable = (type == IOMAP_NEW);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100781 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 } while ((bh = bh->b_this_page) != head);
784
785 if (acceptable)
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100786 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 }
788
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100789 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790}
791
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792/*
793 * Allocate & map buffers for page given the extent map. Write it out.
794 * except for the original page of a writepage, this is called on
795 * delalloc/unwritten pages only, for the original page it is possible
796 * that the page has no mapping at all.
797 */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100798STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799xfs_convert_page(
800 struct inode *inode,
801 struct page *page,
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100802 loff_t tindex,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100803 xfs_iomap_t *mp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100804 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 struct writeback_control *wbc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 int startio,
807 int all_bh)
808{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100809 struct buffer_head *bh, *head;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100810 xfs_off_t end_offset;
811 unsigned long p_offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100812 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 int bbits = inode->i_blkbits;
Nathan Scott24e17b52005-05-05 13:33:20 -0700814 int len, page_dirty;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100815 int count = 0, done = 0, uptodate = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100816 xfs_off_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100818 if (page->index != tindex)
819 goto fail;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200820 if (!trylock_page(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100821 goto fail;
822 if (PageWriteback(page))
823 goto fail_unlock_page;
824 if (page->mapping != inode->i_mapping)
825 goto fail_unlock_page;
826 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
827 goto fail_unlock_page;
828
Nathan Scott24e17b52005-05-05 13:33:20 -0700829 /*
830 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000831 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100832 *
833 * Derivation:
834 *
835 * End offset is the highest offset that this page should represent.
836 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
837 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
838 * hence give us the correct page_dirty count. On any other page,
839 * it will be zero and in that case we need page_dirty to be the
840 * count of buffers on the page.
Nathan Scott24e17b52005-05-05 13:33:20 -0700841 */
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100842 end_offset = min_t(unsigned long long,
843 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
844 i_size_read(inode));
845
Nathan Scott24e17b52005-05-05 13:33:20 -0700846 len = 1 << inode->i_blkbits;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100847 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
848 PAGE_CACHE_SIZE);
849 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
850 page_dirty = p_offset / len;
Nathan Scott24e17b52005-05-05 13:33:20 -0700851
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 bh = head = page_buffers(page);
853 do {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100854 if (offset >= end_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 break;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100856 if (!buffer_uptodate(bh))
857 uptodate = 0;
858 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
859 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100861 }
862
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100863 if (buffer_unwritten(bh) || buffer_delay(bh)) {
864 if (buffer_unwritten(bh))
865 type = IOMAP_UNWRITTEN;
866 else
867 type = IOMAP_DELAY;
868
869 if (!xfs_iomap_valid(mp, offset)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100870 done = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100871 continue;
872 }
873
874 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
875 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
876
877 xfs_map_at_offset(bh, offset, bbits, mp);
878 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100879 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100880 type, ioendp, done);
881 } else {
882 set_buffer_dirty(bh);
883 unlock_buffer(bh);
884 mark_buffer_dirty(bh);
885 }
886 page_dirty--;
887 count++;
888 } else {
David Chinnerdf3c7242007-05-24 15:27:03 +1000889 type = IOMAP_NEW;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100890 if (buffer_mapped(bh) && all_bh && startio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 lock_buffer(bh);
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100892 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100893 type, ioendp, done);
894 count++;
Nathan Scott24e17b52005-05-05 13:33:20 -0700895 page_dirty--;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100896 } else {
897 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 }
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100900 } while (offset += len, (bh = bh->b_this_page) != head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100902 if (uptodate && bh == head)
903 SetPageUptodate(page);
904
905 if (startio) {
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100906 if (count) {
David Chinner9fddaca2006-02-07 20:27:24 +1100907 wbc->nr_to_write--;
Wu Fengguang0d995192009-12-03 13:54:25 +0100908 if (wbc->nr_to_write <= 0)
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100909 done = 1;
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100910 }
Denys Vlasenkob41759c2008-05-19 16:34:11 +1000911 xfs_start_page_writeback(page, !page_dirty, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100913
914 return done;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100915 fail_unlock_page:
916 unlock_page(page);
917 fail:
918 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919}
920
921/*
922 * Convert & write out a cluster of pages in the same extent as defined
923 * by mp and following the start page.
924 */
925STATIC void
926xfs_cluster_write(
927 struct inode *inode,
928 pgoff_t tindex,
929 xfs_iomap_t *iomapp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100930 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 struct writeback_control *wbc,
932 int startio,
933 int all_bh,
934 pgoff_t tlast)
935{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100936 struct pagevec pvec;
937 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100939 pagevec_init(&pvec, 0);
940 while (!done && tindex <= tlast) {
941 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
942
943 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 break;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100945
946 for (i = 0; i < pagevec_count(&pvec); i++) {
947 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
948 iomapp, ioendp, wbc, startio, all_bh);
949 if (done)
950 break;
951 }
952
953 pagevec_release(&pvec);
954 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 }
956}
957
958/*
959 * Calling this without startio set means we are being asked to make a dirty
960 * page ready for freeing it's buffers. When called with startio set then
961 * we are coming from writepage.
962 *
963 * When called with startio set it is important that we write the WHOLE
964 * page if possible.
965 * The bh->b_state's cannot know if any of the blocks or which block for
966 * that matter are dirty due to mmap writes, and therefore bh uptodate is
Nathan Scottc41564b2006-03-29 08:55:14 +1000967 * only valid if the page itself isn't completely uptodate. Some layers
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 * may clear the page dirty flag prior to calling write page, under the
969 * assumption the entire page will be written out; by not writing out the
970 * whole page the page can be reused before all valid dirty data is
971 * written out. Note: in the case of a page that has been dirty'd by
972 * mapwrite and but partially setup by block_prepare_write the
973 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
974 * valid state, thus the whole page must be written out thing.
975 */
976
977STATIC int
978xfs_page_state_convert(
979 struct inode *inode,
980 struct page *page,
981 struct writeback_control *wbc,
982 int startio,
983 int unmapped) /* also implies page uptodate */
984{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100985 struct buffer_head *bh, *head;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100986 xfs_iomap_t iomap;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100987 xfs_ioend_t *ioend = NULL, *iohead = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 loff_t offset;
989 unsigned long p_offset = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100990 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 __uint64_t end_offset;
992 pgoff_t end_index, last_index, tlast;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100993 ssize_t size, len;
994 int flags, err, iomap_valid = 0, uptodate = 1;
Nathan Scott82721452006-04-11 15:10:55 +1000995 int page_dirty, count = 0;
996 int trylock = 0;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100997 int all_bh = unmapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
Nathan Scott82721452006-04-11 15:10:55 +1000999 if (startio) {
1000 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
1001 trylock |= BMAPI_TRYLOCK;
1002 }
Daniel Moore3ba08152005-05-05 13:31:34 -07001003
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 /* Is this page beyond the end of the file? */
1005 offset = i_size_read(inode);
1006 end_index = offset >> PAGE_CACHE_SHIFT;
1007 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
1008 if (page->index >= end_index) {
1009 if ((page->index >= end_index + 1) ||
1010 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
Nathan Scott19d5bcf2005-11-02 15:14:09 +11001011 if (startio)
1012 unlock_page(page);
1013 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 }
1015 }
1016
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 /*
Nathan Scott24e17b52005-05-05 13:33:20 -07001018 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +10001019 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001020 *
1021 * Derivation:
1022 *
1023 * End offset is the highest offset that this page should represent.
1024 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
1025 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
1026 * hence give us the correct page_dirty count. On any other page,
1027 * it will be zero and in that case we need page_dirty to be the
1028 * count of buffers on the page.
1029 */
1030 end_offset = min_t(unsigned long long,
1031 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
Nathan Scott24e17b52005-05-05 13:33:20 -07001032 len = 1 << inode->i_blkbits;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001033 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
1034 PAGE_CACHE_SIZE);
1035 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
Nathan Scott24e17b52005-05-05 13:33:20 -07001036 page_dirty = p_offset / len;
1037
Nathan Scott24e17b52005-05-05 13:33:20 -07001038 bh = head = page_buffers(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001039 offset = page_offset(page);
David Chinnerdf3c7242007-05-24 15:27:03 +10001040 flags = BMAPI_READ;
1041 type = IOMAP_NEW;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001042
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001043 /* TODO: cleanup count and page_dirty */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
1045 do {
1046 if (offset >= end_offset)
1047 break;
1048 if (!buffer_uptodate(bh))
1049 uptodate = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001050 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001051 /*
1052 * the iomap is actually still valid, but the ioend
1053 * isn't. shouldn't happen too often.
1054 */
1055 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001057 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001059 if (iomap_valid)
1060 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061
1062 /*
1063 * First case, map an unwritten extent and prepare for
1064 * extent state conversion transaction on completion.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001065 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 * Second case, allocate space for a delalloc buffer.
1067 * We can return EAGAIN here in the release page case.
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001068 *
1069 * Third case, an unmapped buffer was found, and we are
1070 * in a path where we need to write the whole page out.
David Chinnerdf3c7242007-05-24 15:27:03 +10001071 */
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001072 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1073 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1074 !buffer_mapped(bh) && (unmapped || startio))) {
David Chinnereffd1202007-06-18 16:49:58 +10001075 int new_ioend = 0;
1076
David Chinnerdf3c7242007-05-24 15:27:03 +10001077 /*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001078 * Make sure we don't use a read-only iomap
1079 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001080 if (flags == BMAPI_READ)
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001081 iomap_valid = 0;
1082
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001083 if (buffer_unwritten(bh)) {
1084 type = IOMAP_UNWRITTEN;
Nathan Scott82721452006-04-11 15:10:55 +10001085 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001086 } else if (buffer_delay(bh)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001087 type = IOMAP_DELAY;
Nathan Scott82721452006-04-11 15:10:55 +10001088 flags = BMAPI_ALLOCATE | trylock;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001089 } else {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001090 type = IOMAP_NEW;
Nathan Scott82721452006-04-11 15:10:55 +10001091 flags = BMAPI_WRITE | BMAPI_MMAP;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001092 }
1093
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001094 if (!iomap_valid) {
David Chinnereffd1202007-06-18 16:49:58 +10001095 /*
1096 * if we didn't have a valid mapping then we
1097 * need to ensure that we put the new mapping
1098 * in a new ioend structure. This needs to be
1099 * done to ensure that the ioends correctly
1100 * reflect the block mappings at io completion
1101 * for unwritten extent conversion.
1102 */
1103 new_ioend = 1;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001104 if (type == IOMAP_NEW) {
1105 size = xfs_probe_cluster(inode,
1106 page, bh, head, 0);
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001107 } else {
1108 size = len;
1109 }
1110
1111 err = xfs_map_blocks(inode, offset, size,
1112 &iomap, flags);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001113 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 goto error;
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001115 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 }
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001117 if (iomap_valid) {
1118 xfs_map_at_offset(bh, offset,
1119 inode->i_blkbits, &iomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001121 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001122 type, &ioend,
David Chinnereffd1202007-06-18 16:49:58 +10001123 new_ioend);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 } else {
1125 set_buffer_dirty(bh);
1126 unlock_buffer(bh);
1127 mark_buffer_dirty(bh);
1128 }
1129 page_dirty--;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001130 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001132 } else if (buffer_uptodate(bh) && startio) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001133 /*
1134 * we got here because the buffer is already mapped.
1135 * That means it must already have extents allocated
1136 * underneath it. Map the extent by reading it.
1137 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001138 if (!iomap_valid || flags != BMAPI_READ) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001139 flags = BMAPI_READ;
1140 size = xfs_probe_cluster(inode, page, bh,
1141 head, 1);
1142 err = xfs_map_blocks(inode, offset, size,
1143 &iomap, flags);
1144 if (err)
1145 goto error;
1146 iomap_valid = xfs_iomap_valid(&iomap, offset);
1147 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148
David Chinnerdf3c7242007-05-24 15:27:03 +10001149 /*
1150 * We set the type to IOMAP_NEW in case we are doing a
1151 * small write at EOF that is extending the file but
1152 * without needing an allocation. We need to update the
1153 * file size on I/O completion in this case so it is
1154 * the same case as having just allocated a new extent
1155 * that we are writing into for the first time.
1156 */
1157 type = IOMAP_NEW;
Nick Pigginca5de402008-08-02 12:02:13 +02001158 if (trylock_buffer(bh)) {
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001159 ASSERT(buffer_mapped(bh));
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001160 if (iomap_valid)
1161 all_bh = 1;
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001162 xfs_add_to_ioend(inode, bh, offset, type,
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001163 &ioend, !iomap_valid);
1164 page_dirty--;
1165 count++;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001166 } else {
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001167 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001169 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1170 (unmapped || startio)) {
1171 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001173
1174 if (!iohead)
1175 iohead = ioend;
1176
1177 } while (offset += len, ((bh = bh->b_this_page) != head));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
1179 if (uptodate && bh == head)
1180 SetPageUptodate(page);
1181
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001182 if (startio)
Denys Vlasenkob41759c2008-05-19 16:34:11 +10001183 xfs_start_page_writeback(page, 1, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001185 if (ioend && iomap_valid) {
1186 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 PAGE_CACHE_SHIFT;
Nathan Scott775bf6c2005-05-05 13:33:01 -07001188 tlast = min_t(pgoff_t, offset, last_index);
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001189 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001190 wbc, startio, all_bh, tlast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 }
1192
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001193 if (iohead)
1194 xfs_submit_ioend(iohead);
1195
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 return page_dirty;
1197
1198error:
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001199 if (iohead)
1200 xfs_cancel_ioend(iohead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
1202 /*
1203 * If it's delalloc and we have nowhere to put it,
1204 * throw it away, unless the lower layers told
1205 * us to try again.
1206 */
1207 if (err != -EAGAIN) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001208 if (!unmapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 block_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 ClearPageUptodate(page);
1211 }
1212 return err;
1213}
1214
Nathan Scottf51623b2006-03-14 13:26:27 +11001215/*
1216 * writepage: Called from one of two places:
1217 *
1218 * 1. we are flushing a delalloc buffer head.
1219 *
1220 * 2. we are writing out a dirty page. Typically the page dirty
1221 * state is cleared before we get here. In this case is it
1222 * conceivable we have no buffer heads.
1223 *
1224 * For delalloc space on the page we need to allocate space and
1225 * flush it. For unmapped buffer heads on the page we should
1226 * allocate space if the page is uptodate. For any other dirty
1227 * buffer heads on the page we should flush them.
1228 *
1229 * If we detect that a transaction would be required to flush
1230 * the page, we have to check the process flags first, if we
1231 * are already in a transaction or disk I/O during allocations
1232 * is off, we need to fail the writepage and redirty the page.
1233 */
1234
1235STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001236xfs_vm_writepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001237 struct page *page,
1238 struct writeback_control *wbc)
1239{
1240 int error;
1241 int need_trans;
1242 int delalloc, unmapped, unwritten;
1243 struct inode *inode = page->mapping->host;
1244
1245 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1246
1247 /*
1248 * We need a transaction if:
1249 * 1. There are delalloc buffers on the page
1250 * 2. The page is uptodate and we have unmapped buffers
1251 * 3. The page is uptodate and we have no buffers
1252 * 4. There are unwritten buffers on the page
1253 */
1254
1255 if (!page_has_buffers(page)) {
1256 unmapped = 1;
1257 need_trans = 1;
1258 } else {
1259 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1260 if (!PageUptodate(page))
1261 unmapped = 0;
1262 need_trans = delalloc + unmapped + unwritten;
1263 }
1264
1265 /*
1266 * If we need a transaction and the process flags say
1267 * we are already in a transaction, or no IO is allowed
1268 * then mark the page dirty again and leave the page
1269 * as is.
1270 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001271 if (current_test_flags(PF_FSTRANS) && need_trans)
Nathan Scottf51623b2006-03-14 13:26:27 +11001272 goto out_fail;
1273
1274 /*
1275 * Delay hooking up buffer heads until we have
1276 * made our go/no-go decision.
1277 */
1278 if (!page_has_buffers(page))
1279 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1280
Eric Sandeenc8a40512009-07-31 00:02:17 -05001281
1282 /*
1283 * VM calculation for nr_to_write seems off. Bump it way
1284 * up, this gets simple streaming writes zippy again.
1285 * To be reviewed again after Jens' writeback changes.
1286 */
1287 wbc->nr_to_write *= 4;
1288
Nathan Scottf51623b2006-03-14 13:26:27 +11001289 /*
1290 * Convert delayed allocate, unwritten or unmapped space
1291 * to real space and flush out to disk.
1292 */
1293 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1294 if (error == -EAGAIN)
1295 goto out_fail;
1296 if (unlikely(error < 0))
1297 goto out_unlock;
1298
1299 return 0;
1300
1301out_fail:
1302 redirty_page_for_writepage(wbc, page);
1303 unlock_page(page);
1304 return 0;
1305out_unlock:
1306 unlock_page(page);
1307 return error;
1308}
1309
Nathan Scott7d4fb402006-06-09 15:27:16 +10001310STATIC int
1311xfs_vm_writepages(
1312 struct address_space *mapping,
1313 struct writeback_control *wbc)
1314{
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +10001315 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
Nathan Scott7d4fb402006-06-09 15:27:16 +10001316 return generic_writepages(mapping, wbc);
1317}
1318
Nathan Scottf51623b2006-03-14 13:26:27 +11001319/*
1320 * Called to move a page into cleanable state - and from there
1321 * to be released. Possibly the page is already clean. We always
1322 * have buffer heads in this call.
1323 *
1324 * Returns 0 if the page is ok to release, 1 otherwise.
1325 *
1326 * Possible scenarios are:
1327 *
1328 * 1. We are being called to release a page which has been written
1329 * to via regular I/O. buffer heads will be dirty and possibly
1330 * delalloc. If no delalloc buffer heads in this case then we
1331 * can just return zero.
1332 *
1333 * 2. We are called to release a page which has been written via
1334 * mmap, all we need to do is ensure there is no delalloc
1335 * state in the buffer heads, if not we can let the caller
1336 * free them and we should come back later via writepage.
1337 */
1338STATIC int
Nathan Scott238f4c52006-03-17 17:26:25 +11001339xfs_vm_releasepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001340 struct page *page,
1341 gfp_t gfp_mask)
1342{
1343 struct inode *inode = page->mapping->host;
1344 int dirty, delalloc, unmapped, unwritten;
1345 struct writeback_control wbc = {
1346 .sync_mode = WB_SYNC_ALL,
1347 .nr_to_write = 1,
1348 };
1349
Nathan Scotted9d88f2006-09-28 10:56:43 +10001350 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
Nathan Scottf51623b2006-03-14 13:26:27 +11001351
Nathan Scott238f4c52006-03-17 17:26:25 +11001352 if (!page_has_buffers(page))
1353 return 0;
1354
Nathan Scottf51623b2006-03-14 13:26:27 +11001355 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1356 if (!delalloc && !unwritten)
1357 goto free_buffers;
1358
1359 if (!(gfp_mask & __GFP_FS))
1360 return 0;
1361
1362 /* If we are already inside a transaction or the thread cannot
1363 * do I/O, we cannot release this page.
1364 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001365 if (current_test_flags(PF_FSTRANS))
Nathan Scottf51623b2006-03-14 13:26:27 +11001366 return 0;
1367
1368 /*
1369 * Convert delalloc space to real space, do not flush the
1370 * data out to disk, that will be done by the caller.
1371 * Never need to allocate space here - we will always
1372 * come back to writepage in that case.
1373 */
1374 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1375 if (dirty == 0 && !unwritten)
1376 goto free_buffers;
1377 return 0;
1378
1379free_buffers:
1380 return try_to_free_buffers(page);
1381}
1382
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383STATIC int
Nathan Scottc2536662006-03-29 10:44:40 +10001384__xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 struct inode *inode,
1386 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 struct buffer_head *bh_result,
1388 int create,
1389 int direct,
1390 bmapi_flags_t flags)
1391{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 xfs_iomap_t iomap;
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001393 xfs_off_t offset;
1394 ssize_t size;
Nathan Scottc2536662006-03-29 10:44:40 +10001395 int niomap = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001398 offset = (xfs_off_t)iblock << inode->i_blkbits;
Nathan Scottc2536662006-03-29 10:44:40 +10001399 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1400 size = bh_result->b_size;
Lachlan McIlroy364f3582008-09-17 16:50:14 +10001401
1402 if (!create && direct && offset >= i_size_read(inode))
1403 return 0;
1404
Lachlan McIlroy541d7d32007-10-11 17:34:33 +10001405 error = xfs_iomap(XFS_I(inode), offset, size,
Nathan Scott67fcaa72006-06-09 17:00:52 +10001406 create ? flags : BMAPI_READ, &iomap, &niomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 if (error)
1408 return -error;
Nathan Scottc2536662006-03-29 10:44:40 +10001409 if (niomap == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 return 0;
1411
1412 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001413 /*
1414 * For unwritten extents do not report a disk address on
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 * the read case (treat as if we're reading into a hole).
1416 */
1417 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001418 xfs_map_buffer(bh_result, &iomap, offset,
1419 inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 }
1421 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1422 if (direct)
1423 bh_result->b_private = inode;
1424 set_buffer_unwritten(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 }
1426 }
1427
Nathan Scottc2536662006-03-29 10:44:40 +10001428 /*
1429 * If this is a realtime file, data may be on a different device.
1430 * to that pointed to from the buffer_head b_bdev currently.
1431 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001432 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433
Nathan Scottc2536662006-03-29 10:44:40 +10001434 /*
David Chinner549054a2007-02-10 18:36:35 +11001435 * If we previously allocated a block out beyond eof and we are now
1436 * coming back to use it then we will need to flag it as new even if it
1437 * has a disk address.
1438 *
1439 * With sub-block writes into unwritten extents we also need to mark
1440 * the buffer as new so that the unwritten parts of the buffer gets
1441 * correctly zeroed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 */
1443 if (create &&
1444 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
David Chinner549054a2007-02-10 18:36:35 +11001445 (offset >= i_size_read(inode)) ||
1446 (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 set_buffer_new(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
1449 if (iomap.iomap_flags & IOMAP_DELAY) {
1450 BUG_ON(direct);
1451 if (create) {
1452 set_buffer_uptodate(bh_result);
1453 set_buffer_mapped(bh_result);
1454 set_buffer_delay(bh_result);
1455 }
1456 }
1457
Nathan Scottc2536662006-03-29 10:44:40 +10001458 if (direct || size > (1 << inode->i_blkbits)) {
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001459 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1460 offset = min_t(xfs_off_t,
Nathan Scottc2536662006-03-29 10:44:40 +10001461 iomap.iomap_bsize - iomap.iomap_delta, size);
1462 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 }
1464
1465 return 0;
1466}
1467
1468int
Nathan Scottc2536662006-03-29 10:44:40 +10001469xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 struct inode *inode,
1471 sector_t iblock,
1472 struct buffer_head *bh_result,
1473 int create)
1474{
Nathan Scottc2536662006-03-29 10:44:40 +10001475 return __xfs_get_blocks(inode, iblock,
Badari Pulavartyfa30bd02006-03-26 01:38:01 -08001476 bh_result, create, 0, BMAPI_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477}
1478
1479STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001480xfs_get_blocks_direct(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 struct inode *inode,
1482 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 struct buffer_head *bh_result,
1484 int create)
1485{
Nathan Scottc2536662006-03-29 10:44:40 +10001486 return __xfs_get_blocks(inode, iblock,
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -08001487 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488}
1489
Christoph Hellwigf0973862005-09-05 08:22:52 +10001490STATIC void
Nathan Scotte4c573b2006-03-14 13:54:26 +11001491xfs_end_io_direct(
Christoph Hellwigf0973862005-09-05 08:22:52 +10001492 struct kiocb *iocb,
1493 loff_t offset,
1494 ssize_t size,
1495 void *private)
1496{
1497 xfs_ioend_t *ioend = iocb->private;
1498
1499 /*
1500 * Non-NULL private data means we need to issue a transaction to
1501 * convert a range from unwritten to written extents. This needs
Nathan Scottc41564b2006-03-29 08:55:14 +10001502 * to happen from process context but aio+dio I/O completion
Christoph Hellwigf0973862005-09-05 08:22:52 +10001503 * happens from irq context so we need to defer it to a workqueue.
Nathan Scottc41564b2006-03-29 08:55:14 +10001504 * This is not necessary for synchronous direct I/O, but we do
Christoph Hellwigf0973862005-09-05 08:22:52 +10001505 * it anyway to keep the code uniform and simpler.
1506 *
David Chinnere927af92007-06-05 16:24:36 +10001507 * Well, if only it were that simple. Because synchronous direct I/O
1508 * requires extent conversion to occur *before* we return to userspace,
1509 * we have to wait for extent conversion to complete. Look at the
1510 * iocb that has been passed to us to determine if this is AIO or
1511 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1512 * workqueue and wait for it to complete.
1513 *
Christoph Hellwigf0973862005-09-05 08:22:52 +10001514 * The core direct I/O code might be changed to always call the
1515 * completion handler in the future, in which case all this can
1516 * go away.
1517 */
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001518 ioend->io_offset = offset;
1519 ioend->io_size = size;
1520 if (ioend->io_type == IOMAP_READ) {
David Chinnere927af92007-06-05 16:24:36 +10001521 xfs_finish_ioend(ioend, 0);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001522 } else if (private && size > 0) {
David Chinnere927af92007-06-05 16:24:36 +10001523 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
Christoph Hellwigf0973862005-09-05 08:22:52 +10001524 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001525 /*
1526 * A direct I/O write ioend starts it's life in unwritten
1527 * state in case they map an unwritten extent. This write
1528 * didn't map an unwritten extent so switch it's completion
1529 * handler.
1530 */
1531 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
David Chinnere927af92007-06-05 16:24:36 +10001532 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf0973862005-09-05 08:22:52 +10001533 }
1534
1535 /*
Nathan Scottc41564b2006-03-29 08:55:14 +10001536 * blockdev_direct_IO can return an error even after the I/O
Christoph Hellwigf0973862005-09-05 08:22:52 +10001537 * completion handler was called. Thus we need to protect
1538 * against double-freeing.
1539 */
1540 iocb->private = NULL;
1541}
1542
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543STATIC ssize_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001544xfs_vm_direct_IO(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 int rw,
1546 struct kiocb *iocb,
1547 const struct iovec *iov,
1548 loff_t offset,
1549 unsigned long nr_segs)
1550{
1551 struct file *file = iocb->ki_filp;
1552 struct inode *inode = file->f_mapping->host;
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001553 struct block_device *bdev;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001554 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001556 bdev = xfs_find_bdev_for_inode(XFS_I(inode));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001558 if (rw == WRITE) {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001559 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001560 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001561 bdev, iov, offset, nr_segs,
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001562 xfs_get_blocks_direct,
1563 xfs_end_io_direct);
1564 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001565 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001566 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001567 bdev, iov, offset, nr_segs,
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001568 xfs_get_blocks_direct,
1569 xfs_end_io_direct);
1570 }
Christoph Hellwigf0973862005-09-05 08:22:52 +10001571
Zach Brown8459d862006-12-10 02:21:05 -08001572 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
Christoph Hellwigf0973862005-09-05 08:22:52 +10001573 xfs_destroy_ioend(iocb->private);
1574 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575}
1576
Nathan Scottf51623b2006-03-14 13:26:27 +11001577STATIC int
Nick Piggind79689c2007-10-16 01:25:06 -07001578xfs_vm_write_begin(
Nathan Scottf51623b2006-03-14 13:26:27 +11001579 struct file *file,
Nick Piggind79689c2007-10-16 01:25:06 -07001580 struct address_space *mapping,
1581 loff_t pos,
1582 unsigned len,
1583 unsigned flags,
1584 struct page **pagep,
1585 void **fsdata)
Nathan Scottf51623b2006-03-14 13:26:27 +11001586{
Nick Piggind79689c2007-10-16 01:25:06 -07001587 *pagep = NULL;
1588 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1589 xfs_get_blocks);
Nathan Scottf51623b2006-03-14 13:26:27 +11001590}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591
1592STATIC sector_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001593xfs_vm_bmap(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 struct address_space *mapping,
1595 sector_t block)
1596{
1597 struct inode *inode = (struct inode *)mapping->host;
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001598 struct xfs_inode *ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599
Lachlan McIlroycf441ee2008-02-07 16:42:19 +11001600 xfs_itrace_entry(XFS_I(inode));
Christoph Hellwig126468b2008-03-06 13:44:57 +11001601 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001602 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
Christoph Hellwig126468b2008-03-06 13:44:57 +11001603 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Nathan Scottc2536662006-03-29 10:44:40 +10001604 return generic_block_bmap(mapping, block, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605}
1606
1607STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001608xfs_vm_readpage(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 struct file *unused,
1610 struct page *page)
1611{
Nathan Scottc2536662006-03-29 10:44:40 +10001612 return mpage_readpage(page, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613}
1614
1615STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001616xfs_vm_readpages(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 struct file *unused,
1618 struct address_space *mapping,
1619 struct list_head *pages,
1620 unsigned nr_pages)
1621{
Nathan Scottc2536662006-03-29 10:44:40 +10001622 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623}
1624
NeilBrown2ff28e22006-03-26 01:37:18 -08001625STATIC void
Nathan Scott238f4c52006-03-17 17:26:25 +11001626xfs_vm_invalidatepage(
Nathan Scottbcec2b72005-09-02 16:40:17 +10001627 struct page *page,
1628 unsigned long offset)
1629{
1630 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1631 page->mapping->host, page, offset);
NeilBrown2ff28e22006-03-26 01:37:18 -08001632 block_invalidatepage(page, offset);
Nathan Scottbcec2b72005-09-02 16:40:17 +10001633}
1634
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001635const struct address_space_operations xfs_address_space_operations = {
Nathan Scotte4c573b2006-03-14 13:54:26 +11001636 .readpage = xfs_vm_readpage,
1637 .readpages = xfs_vm_readpages,
1638 .writepage = xfs_vm_writepage,
Nathan Scott7d4fb402006-06-09 15:27:16 +10001639 .writepages = xfs_vm_writepages,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 .sync_page = block_sync_page,
Nathan Scott238f4c52006-03-17 17:26:25 +11001641 .releasepage = xfs_vm_releasepage,
1642 .invalidatepage = xfs_vm_invalidatepage,
Nick Piggind79689c2007-10-16 01:25:06 -07001643 .write_begin = xfs_vm_write_begin,
1644 .write_end = generic_write_end,
Nathan Scotte4c573b2006-03-14 13:54:26 +11001645 .bmap = xfs_vm_bmap,
1646 .direct_IO = xfs_vm_direct_IO,
Christoph Lametere965f962006-02-01 03:05:41 -08001647 .migratepage = buffer_migrate_page,
Hisashi Hifumibddaafa2009-03-29 09:53:38 +02001648 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02001649 .error_remove_page = generic_error_remove_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650};