[XFS] Use the right offset when ensuring a delayed allocate conversion has covered the offset originally requested. Can cause data corruption when multiple processes are performing writeout on different areas of the same file. Quite difficult to hit though.
SGI Modid: xfs-linux:xfs-kern:22377a
Signed-off-by: Nathan Scott <nathans@sgi.com>
Signed-off-by: Christoph Hellwig <hch@sgi.com>
.
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 1edd3b6..9278e9a 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -558,7 +558,8 @@
int i;
BUG_ON(PageWriteback(page));
- set_page_writeback(page);
+ if (bh_count)
+ set_page_writeback(page);
if (clear_dirty)
clear_page_dirty(page);
unlock_page(page);
@@ -578,9 +579,6 @@
if (probed_page && clear_dirty)
wbc->nr_to_write--; /* Wrote an "extra" page */
- } else {
- end_page_writeback(page);
- wbc->pages_skipped++; /* We didn't write this page */
}
}
@@ -602,21 +600,26 @@
{
struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
xfs_iomap_t *mp = iomapp, *tmp;
- unsigned long end, offset;
- pgoff_t end_index;
- int i = 0, index = 0;
+ unsigned long offset, end_offset;
+ int index = 0;
int bbits = inode->i_blkbits;
+ int len, page_dirty;
- end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
- if (page->index < end_index) {
- end = PAGE_CACHE_SIZE;
- } else {
- end = i_size_read(inode) & (PAGE_CACHE_SIZE-1);
- }
+ end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1));
+
+ /*
+ * page_dirty is initially a count of buffers on the page before
+ * EOF and is decrememted as we move each into a cleanable state.
+ */
+ len = 1 << inode->i_blkbits;
+ end_offset = max(end_offset, PAGE_CACHE_SIZE);
+ end_offset = roundup(end_offset, len);
+ page_dirty = end_offset / len;
+
+ offset = 0;
bh = head = page_buffers(page);
do {
- offset = i << bbits;
- if (offset >= end)
+ if (offset >= end_offset)
break;
if (!(PageUptodate(page) || buffer_uptodate(bh)))
continue;
@@ -625,6 +628,7 @@
if (startio) {
lock_buffer(bh);
bh_arr[index++] = bh;
+ page_dirty--;
}
continue;
}
@@ -657,10 +661,11 @@
unlock_buffer(bh);
mark_buffer_dirty(bh);
}
- } while (i++, (bh = bh->b_this_page) != head);
+ page_dirty--;
+ } while (offset += len, (bh = bh->b_this_page) != head);
- if (startio) {
- xfs_submit_page(page, wbc, bh_arr, index, 1, index == i);
+ if (startio && index) {
+ xfs_submit_page(page, wbc, bh_arr, index, 1, !page_dirty);
} else {
unlock_page(page);
}
@@ -743,19 +748,22 @@
}
}
- offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
end_offset = min_t(unsigned long long,
- offset + PAGE_CACHE_SIZE, i_size_read(inode));
-
- bh = head = page_buffers(page);
- iomp = NULL;
+ (loff_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
+ offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
/*
- * page_dirty is initially a count of buffers on the page and
- * is decrememted as we move each into a cleanable state.
+ * page_dirty is initially a count of buffers on the page before
+ * EOF and is decrememted as we move each into a cleanable state.
*/
- len = bh->b_size;
- page_dirty = PAGE_CACHE_SIZE / len;
+ len = 1 << inode->i_blkbits;
+ p_offset = max(p_offset, PAGE_CACHE_SIZE);
+ p_offset = roundup(p_offset, len);
+ page_dirty = p_offset / len;
+
+ iomp = NULL;
+ p_offset = 0;
+ bh = head = page_buffers(page);
do {
if (offset >= end_offset)
@@ -877,8 +885,10 @@
if (uptodate && bh == head)
SetPageUptodate(page);
- if (startio)
- xfs_submit_page(page, wbc, bh_arr, cnt, 0, 1);
+ if (startio) {
+ WARN_ON(page_dirty);
+ xfs_submit_page(page, wbc, bh_arr, cnt, 0, !page_dirty);
+ }
if (iomp) {
offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >>