Btrfs: deal with enomem in the rewind path

We can get ENOMEM trying to allocate dummy bufs for the rewind operation of the
tree mod log.  Instead of BUG_ON()'ing in this case pass up ENOMEM.  I looked
back through the callers and I'm pretty sure I got everybody who did BUG_ON(ret)
in this path.  Thanks,

Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
This commit is contained in:
Josef Bacik 2013-08-07 14:54:37 -04:00 коммит произвёл Chris Mason
Родитель ebdad913aa
Коммит db7f3436c1
2 изменённых файлов: 112 добавлений и 97 удалений

Просмотреть файл

@ -1211,7 +1211,11 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
BUG_ON(tm->slot != 0);
eb_rewin = alloc_dummy_extent_buffer(eb->start,
fs_info->tree_root->nodesize);
BUG_ON(!eb_rewin);
if (!eb_rewin) {
btrfs_tree_read_unlock(eb);
free_extent_buffer(eb);
return NULL;
}
btrfs_set_header_bytenr(eb_rewin, eb->start);
btrfs_set_header_backref_rev(eb_rewin,
btrfs_header_backref_rev(eb));
@ -1219,7 +1223,11 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
} else {
eb_rewin = btrfs_clone_extent_buffer(eb);
BUG_ON(!eb_rewin);
if (!eb_rewin) {
btrfs_tree_read_unlock(eb);
free_extent_buffer(eb);
return NULL;
}
}
btrfs_tree_read_unlock(eb);
@ -2772,6 +2780,10 @@ again:
BTRFS_READ_LOCK);
}
b = tree_mod_log_rewind(root->fs_info, b, time_seq);
if (!b) {
ret = -ENOMEM;
goto done;
}
p->locks[level] = BTRFS_READ_LOCK;
p->nodes[level] = b;
} else {

Просмотреть файл

@ -4222,101 +4222,6 @@ static void __free_extent_buffer(struct extent_buffer *eb)
kmem_cache_free(extent_buffer_cache, eb);
}
static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
u64 start,
unsigned long len,
gfp_t mask)
{
struct extent_buffer *eb = NULL;
eb = kmem_cache_zalloc(extent_buffer_cache, mask);
if (eb == NULL)
return NULL;
eb->start = start;
eb->len = len;
eb->tree = tree;
eb->bflags = 0;
rwlock_init(&eb->lock);
atomic_set(&eb->write_locks, 0);
atomic_set(&eb->read_locks, 0);
atomic_set(&eb->blocking_readers, 0);
atomic_set(&eb->blocking_writers, 0);
atomic_set(&eb->spinning_readers, 0);
atomic_set(&eb->spinning_writers, 0);
eb->lock_nested = 0;
init_waitqueue_head(&eb->write_lock_wq);
init_waitqueue_head(&eb->read_lock_wq);
btrfs_leak_debug_add(&eb->leak_list, &buffers);
spin_lock_init(&eb->refs_lock);
atomic_set(&eb->refs, 1);
atomic_set(&eb->io_pages, 0);
/*
* Sanity checks, currently the maximum is 64k covered by 16x 4k pages
*/
BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
> MAX_INLINE_EXTENT_BUFFER_SIZE);
BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
return eb;
}
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
{
unsigned long i;
struct page *p;
struct extent_buffer *new;
unsigned long num_pages = num_extent_pages(src->start, src->len);
new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_ATOMIC);
if (new == NULL)
return NULL;
for (i = 0; i < num_pages; i++) {
p = alloc_page(GFP_ATOMIC);
BUG_ON(!p);
attach_extent_buffer_page(new, p);
WARN_ON(PageDirty(p));
SetPageUptodate(p);
new->pages[i] = p;
}
copy_extent_buffer(new, src, 0, 0, src->len);
set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
return new;
}
struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
{
struct extent_buffer *eb;
unsigned long num_pages = num_extent_pages(0, len);
unsigned long i;
eb = __alloc_extent_buffer(NULL, start, len, GFP_ATOMIC);
if (!eb)
return NULL;
for (i = 0; i < num_pages; i++) {
eb->pages[i] = alloc_page(GFP_ATOMIC);
if (!eb->pages[i])
goto err;
}
set_extent_buffer_uptodate(eb);
btrfs_set_header_nritems(eb, 0);
set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
return eb;
err:
for (; i > 0; i--)
__free_page(eb->pages[i - 1]);
__free_extent_buffer(eb);
return NULL;
}
static int extent_buffer_under_io(struct extent_buffer *eb)
{
return (atomic_read(&eb->io_pages) ||
@ -4387,6 +4292,104 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
__free_extent_buffer(eb);
}
static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
u64 start,
unsigned long len,
gfp_t mask)
{
struct extent_buffer *eb = NULL;
eb = kmem_cache_zalloc(extent_buffer_cache, mask);
if (eb == NULL)
return NULL;
eb->start = start;
eb->len = len;
eb->tree = tree;
eb->bflags = 0;
rwlock_init(&eb->lock);
atomic_set(&eb->write_locks, 0);
atomic_set(&eb->read_locks, 0);
atomic_set(&eb->blocking_readers, 0);
atomic_set(&eb->blocking_writers, 0);
atomic_set(&eb->spinning_readers, 0);
atomic_set(&eb->spinning_writers, 0);
eb->lock_nested = 0;
init_waitqueue_head(&eb->write_lock_wq);
init_waitqueue_head(&eb->read_lock_wq);
btrfs_leak_debug_add(&eb->leak_list, &buffers);
spin_lock_init(&eb->refs_lock);
atomic_set(&eb->refs, 1);
atomic_set(&eb->io_pages, 0);
/*
* Sanity checks, currently the maximum is 64k covered by 16x 4k pages
*/
BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
> MAX_INLINE_EXTENT_BUFFER_SIZE);
BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
return eb;
}
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
{
unsigned long i;
struct page *p;
struct extent_buffer *new;
unsigned long num_pages = num_extent_pages(src->start, src->len);
new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_ATOMIC);
if (new == NULL)
return NULL;
for (i = 0; i < num_pages; i++) {
p = alloc_page(GFP_ATOMIC);
if (!p) {
btrfs_release_extent_buffer(new);
return NULL;
}
attach_extent_buffer_page(new, p);
WARN_ON(PageDirty(p));
SetPageUptodate(p);
new->pages[i] = p;
}
copy_extent_buffer(new, src, 0, 0, src->len);
set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
return new;
}
struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
{
struct extent_buffer *eb;
unsigned long num_pages = num_extent_pages(0, len);
unsigned long i;
eb = __alloc_extent_buffer(NULL, start, len, GFP_ATOMIC);
if (!eb)
return NULL;
for (i = 0; i < num_pages; i++) {
eb->pages[i] = alloc_page(GFP_ATOMIC);
if (!eb->pages[i])
goto err;
}
set_extent_buffer_uptodate(eb);
btrfs_set_header_nritems(eb, 0);
set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
return eb;
err:
for (; i > 0; i--)
__free_page(eb->pages[i - 1]);
__free_extent_buffer(eb);
return NULL;
}
static void check_buffer_tree_ref(struct extent_buffer *eb)
{
int refs;