btrfs: subpage: make btrfs_alloc_subpage() return btrfs_subpage directly
The existing calling convention of btrfs_alloc_subpage() is pretty awful. Change it to a more common pattern by returning struct btrfs_subpage directly and let the caller to determine if the call succeeded. Reviewed-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Родитель
fdf250db89
Коммит
651fb41927
|
@ -6138,9 +6138,9 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
|
|||
* support, so we still preallocate the memory in the loop.
|
||||
*/
|
||||
if (fs_info->sectorsize < PAGE_SIZE) {
|
||||
ret = btrfs_alloc_subpage(fs_info, &prealloc,
|
||||
BTRFS_SUBPAGE_METADATA);
|
||||
if (ret < 0) {
|
||||
prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
|
||||
if (IS_ERR(prealloc)) {
|
||||
ret = PTR_ERR(prealloc);
|
||||
unlock_page(p);
|
||||
put_page(p);
|
||||
exists = ERR_PTR(ret);
|
||||
|
|
|
@ -66,8 +66,7 @@
|
|||
int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
|
||||
struct page *page, enum btrfs_subpage_type type)
|
||||
{
|
||||
struct btrfs_subpage *subpage = NULL;
|
||||
int ret;
|
||||
struct btrfs_subpage *subpage;
|
||||
|
||||
/*
|
||||
* We have cases like a dummy extent buffer page, which is not mappped
|
||||
|
@ -75,13 +74,15 @@ int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
|
|||
*/
|
||||
if (page->mapping)
|
||||
ASSERT(PageLocked(page));
|
||||
|
||||
/* Either not subpage, or the page already has private attached */
|
||||
if (fs_info->sectorsize == PAGE_SIZE || PagePrivate(page))
|
||||
return 0;
|
||||
|
||||
ret = btrfs_alloc_subpage(fs_info, &subpage, type);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
subpage = btrfs_alloc_subpage(fs_info, type);
|
||||
if (IS_ERR(subpage))
|
||||
return PTR_ERR(subpage);
|
||||
|
||||
attach_page_private(page, subpage);
|
||||
return 0;
|
||||
}
|
||||
|
@ -100,23 +101,25 @@ void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
|
|||
btrfs_free_subpage(subpage);
|
||||
}
|
||||
|
||||
int btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_subpage **ret,
|
||||
enum btrfs_subpage_type type)
|
||||
struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
|
||||
enum btrfs_subpage_type type)
|
||||
{
|
||||
struct btrfs_subpage *ret;
|
||||
|
||||
ASSERT(fs_info->sectorsize < PAGE_SIZE);
|
||||
|
||||
*ret = kzalloc(sizeof(struct btrfs_subpage), GFP_NOFS);
|
||||
if (!*ret)
|
||||
return -ENOMEM;
|
||||
spin_lock_init(&(*ret)->lock);
|
||||
ret = kzalloc(sizeof(struct btrfs_subpage), GFP_NOFS);
|
||||
if (!ret)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
spin_lock_init(&ret->lock);
|
||||
if (type == BTRFS_SUBPAGE_METADATA) {
|
||||
atomic_set(&(*ret)->eb_refs, 0);
|
||||
atomic_set(&ret->eb_refs, 0);
|
||||
} else {
|
||||
atomic_set(&(*ret)->readers, 0);
|
||||
atomic_set(&(*ret)->writers, 0);
|
||||
atomic_set(&ret->readers, 0);
|
||||
atomic_set(&ret->writers, 0);
|
||||
}
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void btrfs_free_subpage(struct btrfs_subpage *subpage)
|
||||
|
|
|
@ -59,9 +59,8 @@ void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
|
|||
struct page *page);
|
||||
|
||||
/* Allocate additional data where page represents more than one sector */
|
||||
int btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_subpage **ret,
|
||||
enum btrfs_subpage_type type);
|
||||
struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
|
||||
enum btrfs_subpage_type type);
|
||||
void btrfs_free_subpage(struct btrfs_subpage *subpage);
|
||||
|
||||
void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
|
||||
|
|
Загрузка…
Ссылка в новой задаче