ext4: refactor code to read the extent tree block

Refactor out the code needed to read the extent tree block into a
single read_extent_tree_block() function.  In addition to simplifying
the code, it also makes sure that we call the ext4_ext_load_extent
tracepoint whenever we need to read an extent tree block from disk.

Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Reviewed-by: Zheng Liu <wenqing.lz@taobao.com>
This commit is contained in:
Theodore Ts'o 2013-08-16 21:20:41 -04:00
Родитель a361293f5f
Коммит 7d7ea89e75
1 изменённых файлов: 43 добавлений и 54 удалений

Просмотреть файл

@ -464,25 +464,39 @@ int ext4_ext_check_inode(struct inode *inode)
return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode)); return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
} }
static int __ext4_ext_check_block(const char *function, unsigned int line, static struct buffer_head *
struct inode *inode, __read_extent_tree_block(const char *function, unsigned int line,
struct ext4_extent_header *eh, struct inode *inode, ext4_fsblk_t pblk, int depth)
int depth,
struct buffer_head *bh)
{ {
int ret; struct buffer_head *bh;
int err;
bh = sb_getblk(inode->i_sb, pblk);
if (unlikely(!bh))
return ERR_PTR(-ENOMEM);
if (!bh_uptodate_or_lock(bh)) {
trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
err = bh_submit_read(bh);
if (err < 0)
goto errout;
}
if (buffer_verified(bh)) if (buffer_verified(bh))
return 0; return bh;
ret = ext4_ext_check(inode, eh, depth); err = __ext4_ext_check(function, line, inode,
if (ret) ext_block_hdr(bh), depth);
return ret; if (err)
goto errout;
set_buffer_verified(bh); set_buffer_verified(bh);
return ret; return bh;
errout:
put_bh(bh);
return ERR_PTR(err);
} }
#define ext4_ext_check_block(inode, eh, depth, bh) \ #define read_extent_tree_block(inode, pblk, depth) \
__ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh) __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), (depth))
#ifdef EXT_DEBUG #ifdef EXT_DEBUG
static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
@ -748,20 +762,12 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
path[ppos].p_depth = i; path[ppos].p_depth = i;
path[ppos].p_ext = NULL; path[ppos].p_ext = NULL;
bh = sb_getblk(inode->i_sb, path[ppos].p_block); bh = read_extent_tree_block(inode, path[ppos].p_block, --i);
if (unlikely(!bh)) { if (IS_ERR(bh)) {
ret = -ENOMEM; ret = PTR_ERR(bh);
goto err; goto err;
} }
if (!bh_uptodate_or_lock(bh)) {
trace_ext4_ext_load_extent(inode, block,
path[ppos].p_block);
ret = bh_submit_read(bh);
if (ret < 0) {
put_bh(bh);
goto err;
}
}
eh = ext_block_hdr(bh); eh = ext_block_hdr(bh);
ppos++; ppos++;
if (unlikely(ppos > depth)) { if (unlikely(ppos > depth)) {
@ -773,11 +779,6 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
} }
path[ppos].p_bh = bh; path[ppos].p_bh = bh;
path[ppos].p_hdr = eh; path[ppos].p_hdr = eh;
i--;
ret = ext4_ext_check_block(inode, eh, i, bh);
if (ret < 0)
goto err;
} }
path[ppos].p_depth = i; path[ppos].p_depth = i;
@ -1412,29 +1413,21 @@ got_index:
ix++; ix++;
block = ext4_idx_pblock(ix); block = ext4_idx_pblock(ix);
while (++depth < path->p_depth) { while (++depth < path->p_depth) {
bh = sb_bread(inode->i_sb, block);
if (bh == NULL)
return -EIO;
eh = ext_block_hdr(bh);
/* subtract from p_depth to get proper eh_depth */ /* subtract from p_depth to get proper eh_depth */
if (ext4_ext_check_block(inode, eh, bh = read_extent_tree_block(inode, block,
path->p_depth - depth, bh)) { path->p_depth - depth);
put_bh(bh); if (IS_ERR(bh))
return -EIO; return PTR_ERR(bh);
} eh = ext_block_hdr(bh);
ix = EXT_FIRST_INDEX(eh); ix = EXT_FIRST_INDEX(eh);
block = ext4_idx_pblock(ix); block = ext4_idx_pblock(ix);
put_bh(bh); put_bh(bh);
} }
bh = sb_bread(inode->i_sb, block); bh = read_extent_tree_block(inode, block, path->p_depth - depth);
if (bh == NULL) if (IS_ERR(bh))
return -EIO; return PTR_ERR(bh);
eh = ext_block_hdr(bh); eh = ext_block_hdr(bh);
if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) {
put_bh(bh);
return -EIO;
}
ex = EXT_FIRST_EXTENT(eh); ex = EXT_FIRST_EXTENT(eh);
found_extent: found_extent:
*logical = le32_to_cpu(ex->ee_block); *logical = le32_to_cpu(ex->ee_block);
@ -2829,10 +2822,11 @@ again:
ext_debug("move to level %d (block %llu)\n", ext_debug("move to level %d (block %llu)\n",
i + 1, ext4_idx_pblock(path[i].p_idx)); i + 1, ext4_idx_pblock(path[i].p_idx));
memset(path + i + 1, 0, sizeof(*path)); memset(path + i + 1, 0, sizeof(*path));
bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx)); bh = read_extent_tree_block(inode,
if (!bh) { ext4_idx_pblock(path[i].p_idx), depth - i - 1);
if (IS_ERR(bh)) {
/* should we reset i_size? */ /* should we reset i_size? */
err = -EIO; err = PTR_ERR(bh);
break; break;
} }
/* Yield here to deal with large extent trees. /* Yield here to deal with large extent trees.
@ -2842,11 +2836,6 @@ again:
err = -EIO; err = -EIO;
break; break;
} }
if (ext4_ext_check_block(inode, ext_block_hdr(bh),
depth - i - 1, bh)) {
err = -EIO;
break;
}
path[i + 1].p_bh = bh; path[i + 1].p_bh = bh;
/* save actual number of indexes since this /* save actual number of indexes since this