Btrfs: Add debugging checks to track down corrupted metadata
Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
Родитель
95819c0573
Коммит
a1b32a5932
|
@ -250,7 +250,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
|
|||
if (!ret &&
|
||||
!verify_parent_transid(io_tree, eb, parent_transid))
|
||||
return ret;
|
||||
|
||||
printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror_num);
|
||||
num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
|
||||
eb->start, eb->len);
|
||||
if (num_copies == 1)
|
||||
|
@ -348,6 +348,9 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
|
|||
|
||||
found_start = btrfs_header_bytenr(eb);
|
||||
if (found_start != start) {
|
||||
printk("bad tree block start %llu %llu\n",
|
||||
(unsigned long long)found_start,
|
||||
(unsigned long long)eb->start);
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
@ -709,6 +712,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
|
|||
|
||||
if (ret == 0) {
|
||||
buf->flags |= EXTENT_UPTODATE;
|
||||
} else {
|
||||
WARN_ON(1);
|
||||
}
|
||||
return buf;
|
||||
|
||||
|
|
|
@ -1811,6 +1811,7 @@ printk("2bad mapping end %Lu cur %Lu\n", end, cur);
|
|||
}
|
||||
/* the get_extent function already copied into the page */
|
||||
if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
|
||||
check_page_uptodate(tree, page);
|
||||
unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
|
||||
cur = cur + iosize;
|
||||
page_offset += iosize;
|
||||
|
@ -2785,21 +2786,20 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
|
|||
* properly set. releasepage may drop page->private
|
||||
* on us if the page isn't already dirty.
|
||||
*/
|
||||
lock_page(page);
|
||||
if (i == 0) {
|
||||
lock_page(page);
|
||||
set_page_extent_head(page, eb->len);
|
||||
} else if (PagePrivate(page) &&
|
||||
page->private != EXTENT_PAGE_PRIVATE) {
|
||||
lock_page(page);
|
||||
set_page_extent_mapped(page);
|
||||
unlock_page(page);
|
||||
}
|
||||
__set_page_dirty_nobuffers(extent_buffer_page(eb, i));
|
||||
if (i == 0)
|
||||
unlock_page(page);
|
||||
set_extent_dirty(tree, page_offset(page),
|
||||
page_offset(page) + PAGE_CACHE_SIZE -1,
|
||||
GFP_NOFS);
|
||||
unlock_page(page);
|
||||
}
|
||||
return set_extent_dirty(tree, eb->start,
|
||||
eb->start + eb->len - 1, GFP_NOFS);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(set_extent_buffer_dirty);
|
||||
|
||||
|
@ -2952,6 +2952,9 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
|
|||
if (all_uptodate) {
|
||||
if (start_i == 0)
|
||||
eb->flags |= EXTENT_UPTODATE;
|
||||
if (ret) {
|
||||
printk("all up to date but ret is %d\n", ret);
|
||||
}
|
||||
goto unlock_exit;
|
||||
}
|
||||
|
||||
|
@ -2968,6 +2971,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
|
|||
mirror_num);
|
||||
if (err) {
|
||||
ret = err;
|
||||
printk("err %d from __extent_read_full_page\n", ret);
|
||||
}
|
||||
} else {
|
||||
unlock_page(page);
|
||||
|
@ -2978,12 +2982,15 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
|
|||
submit_one_bio(READ, bio, mirror_num);
|
||||
|
||||
if (ret || !wait) {
|
||||
if (ret)
|
||||
printk("ret %d wait %d returning\n", ret, wait);
|
||||
return ret;
|
||||
}
|
||||
for (i = start_i; i < num_pages; i++) {
|
||||
page = extent_buffer_page(eb, i);
|
||||
wait_on_page_locked(page);
|
||||
if (!PageUptodate(page)) {
|
||||
printk("page not uptodate after wait_on_page_locked\n");
|
||||
ret = -EIO;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,9 +39,10 @@
|
|||
#include "compat.h"
|
||||
|
||||
|
||||
static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
|
||||
struct page **prepared_pages,
|
||||
const char __user * buf)
|
||||
static int noinline btrfs_copy_from_user(loff_t pos, int num_pages,
|
||||
int write_bytes,
|
||||
struct page **prepared_pages,
|
||||
const char __user * buf)
|
||||
{
|
||||
long page_fault = 0;
|
||||
int i;
|
||||
|
@ -69,7 +70,7 @@ static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
|
|||
return page_fault ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static void btrfs_drop_pages(struct page **pages, size_t num_pages)
|
||||
static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages)
|
||||
{
|
||||
size_t i;
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
|
@ -359,7 +360,7 @@ out_unlock:
|
|||
return err;
|
||||
}
|
||||
|
||||
int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
|
||||
int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
|
||||
{
|
||||
struct extent_map *em;
|
||||
struct extent_map *split = NULL;
|
||||
|
@ -515,7 +516,7 @@ out:
|
|||
* it is either truncated or split. Anything entirely inside the range
|
||||
* is deleted from the tree.
|
||||
*/
|
||||
int btrfs_drop_extents(struct btrfs_trans_handle *trans,
|
||||
int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, struct inode *inode,
|
||||
u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
|
||||
{
|
||||
|
@ -785,7 +786,7 @@ out:
|
|||
/*
|
||||
* this gets pages into the page cache and locks them down
|
||||
*/
|
||||
static int prepare_pages(struct btrfs_root *root, struct file *file,
|
||||
static int noinline prepare_pages(struct btrfs_root *root, struct file *file,
|
||||
struct page **pages, size_t num_pages,
|
||||
loff_t pos, unsigned long first_index,
|
||||
unsigned long last_index, size_t write_bytes)
|
||||
|
|
|
@ -94,8 +94,8 @@ int btrfs_cleanup_fs_uuids(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
|
||||
u8 *uuid)
|
||||
static noinline struct btrfs_device *__find_device(struct list_head *head,
|
||||
u64 devid, u8 *uuid)
|
||||
{
|
||||
struct btrfs_device *dev;
|
||||
struct list_head *cur;
|
||||
|
@ -110,7 +110,7 @@ static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct btrfs_fs_devices *find_fsid(u8 *fsid)
|
||||
static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
|
||||
{
|
||||
struct list_head *cur;
|
||||
struct btrfs_fs_devices *fs_devices;
|
||||
|
@ -134,7 +134,7 @@ static struct btrfs_fs_devices *find_fsid(u8 *fsid)
|
|||
* the list if the block device is congested. This way, multiple devices
|
||||
* can make progress from a single worker thread.
|
||||
*/
|
||||
int run_scheduled_bios(struct btrfs_device *device)
|
||||
static int noinline run_scheduled_bios(struct btrfs_device *device)
|
||||
{
|
||||
struct bio *pending;
|
||||
struct backing_dev_info *bdi;
|
||||
|
@ -233,7 +233,7 @@ void pending_bios_fn(struct btrfs_work *work)
|
|||
run_scheduled_bios(device);
|
||||
}
|
||||
|
||||
static int device_list_add(const char *path,
|
||||
static noinline int device_list_add(const char *path,
|
||||
struct btrfs_super_block *disk_super,
|
||||
u64 devid, struct btrfs_fs_devices **fs_devices_ret)
|
||||
{
|
||||
|
@ -480,10 +480,10 @@ error:
|
|||
* called very infrequently and that a given device has a small number
|
||||
* of extents
|
||||
*/
|
||||
static int find_free_dev_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_device *device,
|
||||
struct btrfs_path *path,
|
||||
u64 num_bytes, u64 *start)
|
||||
static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_device *device,
|
||||
struct btrfs_path *path,
|
||||
u64 num_bytes, u64 *start)
|
||||
{
|
||||
struct btrfs_key key;
|
||||
struct btrfs_root *root = device->dev_root;
|
||||
|
@ -645,7 +645,7 @@ int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
|
||||
int noinline btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_device *device,
|
||||
u64 chunk_tree, u64 chunk_objectid,
|
||||
u64 chunk_offset,
|
||||
|
@ -693,7 +693,8 @@ err:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
|
||||
static noinline int find_next_chunk(struct btrfs_root *root,
|
||||
u64 objectid, u64 *offset)
|
||||
{
|
||||
struct btrfs_path *path;
|
||||
int ret;
|
||||
|
@ -735,8 +736,8 @@ error:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
|
||||
u64 *objectid)
|
||||
static noinline int find_next_devid(struct btrfs_root *root,
|
||||
struct btrfs_path *path, u64 *objectid)
|
||||
{
|
||||
int ret;
|
||||
struct btrfs_key key;
|
||||
|
@ -1103,8 +1104,8 @@ out_close_bdev:
|
|||
goto out;
|
||||
}
|
||||
|
||||
int btrfs_update_device(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_device *device)
|
||||
int noinline btrfs_update_device(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_device *device)
|
||||
{
|
||||
int ret;
|
||||
struct btrfs_path *path;
|
||||
|
@ -1544,8 +1545,8 @@ int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
|
||||
int sub_stripes)
|
||||
static u64 noinline chunk_bytes_by_type(u64 type, u64 calc_size,
|
||||
int num_stripes, int sub_stripes)
|
||||
{
|
||||
if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
|
||||
return calc_size;
|
||||
|
@ -2141,8 +2142,9 @@ struct async_sched {
|
|||
* This will add one bio to the pending list for a device and make sure
|
||||
* the work struct is scheduled.
|
||||
*/
|
||||
int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
|
||||
int rw, struct bio *bio)
|
||||
static int noinline schedule_bio(struct btrfs_root *root,
|
||||
struct btrfs_device *device,
|
||||
int rw, struct bio *bio)
|
||||
{
|
||||
int should_queue = 1;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче