Btrfs: deal with enomem in the rewind path
We can get ENOMEM trying to allocate dummy bufs for the rewind operation of the
tree mod log. Instead of BUG_ON()'ing in this case pass up ENOMEM. I looked
back through the callers and I'm pretty sure I got everybody who did BUG_ON(ret)
in this path. Thanks,
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c137f98..ef25c7d 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4222,101 +4222,6 @@
kmem_cache_free(extent_buffer_cache, eb);
}
-static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
- u64 start,
- unsigned long len,
- gfp_t mask)
-{
- struct extent_buffer *eb = NULL;
-
- eb = kmem_cache_zalloc(extent_buffer_cache, mask);
- if (eb == NULL)
- return NULL;
- eb->start = start;
- eb->len = len;
- eb->tree = tree;
- eb->bflags = 0;
- rwlock_init(&eb->lock);
- atomic_set(&eb->write_locks, 0);
- atomic_set(&eb->read_locks, 0);
- atomic_set(&eb->blocking_readers, 0);
- atomic_set(&eb->blocking_writers, 0);
- atomic_set(&eb->spinning_readers, 0);
- atomic_set(&eb->spinning_writers, 0);
- eb->lock_nested = 0;
- init_waitqueue_head(&eb->write_lock_wq);
- init_waitqueue_head(&eb->read_lock_wq);
-
- btrfs_leak_debug_add(&eb->leak_list, &buffers);
-
- spin_lock_init(&eb->refs_lock);
- atomic_set(&eb->refs, 1);
- atomic_set(&eb->io_pages, 0);
-
- /*
- * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
- */
- BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
- > MAX_INLINE_EXTENT_BUFFER_SIZE);
- BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
-
- return eb;
-}
-
-struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
-{
- unsigned long i;
- struct page *p;
- struct extent_buffer *new;
- unsigned long num_pages = num_extent_pages(src->start, src->len);
-
- new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_ATOMIC);
- if (new == NULL)
- return NULL;
-
- for (i = 0; i < num_pages; i++) {
- p = alloc_page(GFP_ATOMIC);
- BUG_ON(!p);
- attach_extent_buffer_page(new, p);
- WARN_ON(PageDirty(p));
- SetPageUptodate(p);
- new->pages[i] = p;
- }
-
- copy_extent_buffer(new, src, 0, 0, src->len);
- set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
- set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
-
- return new;
-}
-
-struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
-{
- struct extent_buffer *eb;
- unsigned long num_pages = num_extent_pages(0, len);
- unsigned long i;
-
- eb = __alloc_extent_buffer(NULL, start, len, GFP_ATOMIC);
- if (!eb)
- return NULL;
-
- for (i = 0; i < num_pages; i++) {
- eb->pages[i] = alloc_page(GFP_ATOMIC);
- if (!eb->pages[i])
- goto err;
- }
- set_extent_buffer_uptodate(eb);
- btrfs_set_header_nritems(eb, 0);
- set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
-
- return eb;
-err:
- for (; i > 0; i--)
- __free_page(eb->pages[i - 1]);
- __free_extent_buffer(eb);
- return NULL;
-}
-
static int extent_buffer_under_io(struct extent_buffer *eb)
{
return (atomic_read(&eb->io_pages) ||
@@ -4387,6 +4292,104 @@
__free_extent_buffer(eb);
}
+static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
+ u64 start,
+ unsigned long len,
+ gfp_t mask)
+{
+ struct extent_buffer *eb = NULL;
+
+ eb = kmem_cache_zalloc(extent_buffer_cache, mask);
+ if (eb == NULL)
+ return NULL;
+ eb->start = start;
+ eb->len = len;
+ eb->tree = tree;
+ eb->bflags = 0;
+ rwlock_init(&eb->lock);
+ atomic_set(&eb->write_locks, 0);
+ atomic_set(&eb->read_locks, 0);
+ atomic_set(&eb->blocking_readers, 0);
+ atomic_set(&eb->blocking_writers, 0);
+ atomic_set(&eb->spinning_readers, 0);
+ atomic_set(&eb->spinning_writers, 0);
+ eb->lock_nested = 0;
+ init_waitqueue_head(&eb->write_lock_wq);
+ init_waitqueue_head(&eb->read_lock_wq);
+
+ btrfs_leak_debug_add(&eb->leak_list, &buffers);
+
+ spin_lock_init(&eb->refs_lock);
+ atomic_set(&eb->refs, 1);
+ atomic_set(&eb->io_pages, 0);
+
+ /*
+ * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
+ */
+ BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
+ > MAX_INLINE_EXTENT_BUFFER_SIZE);
+ BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
+
+ return eb;
+}
+
+struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
+{
+ unsigned long i;
+ struct page *p;
+ struct extent_buffer *new;
+ unsigned long num_pages = num_extent_pages(src->start, src->len);
+
+ new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_ATOMIC);
+ if (new == NULL)
+ return NULL;
+
+ for (i = 0; i < num_pages; i++) {
+ p = alloc_page(GFP_ATOMIC);
+ if (!p) {
+ btrfs_release_extent_buffer(new);
+ return NULL;
+ }
+ attach_extent_buffer_page(new, p);
+ WARN_ON(PageDirty(p));
+ SetPageUptodate(p);
+ new->pages[i] = p;
+ }
+
+ copy_extent_buffer(new, src, 0, 0, src->len);
+ set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
+ set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
+
+ return new;
+}
+
+struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
+{
+ struct extent_buffer *eb;
+ unsigned long num_pages = num_extent_pages(0, len);
+ unsigned long i;
+
+ eb = __alloc_extent_buffer(NULL, start, len, GFP_ATOMIC);
+ if (!eb)
+ return NULL;
+
+ for (i = 0; i < num_pages; i++) {
+ eb->pages[i] = alloc_page(GFP_ATOMIC);
+ if (!eb->pages[i])
+ goto err;
+ }
+ set_extent_buffer_uptodate(eb);
+ btrfs_set_header_nritems(eb, 0);
+ set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
+
+ return eb;
+err:
+ for (; i > 0; i--)
+ __free_page(eb->pages[i - 1]);
+ __free_extent_buffer(eb);
+ return NULL;
+}
+
static void check_buffer_tree_ref(struct extent_buffer *eb)
{
int refs;