f2fs: remove unneeded pointer conversion

There are redundant pointer conversion in following call stack:
 - at position a, inode was been converted to f2fs_file_info.
 - at position b, f2fs_file_info was been converted to inode again.

 - truncate_blocks(inode,..)
  - fi = F2FS_I(inode)		---a
  - ADDRS_PER_PAGE(node_page, fi)
   - addrs_per_inode(fi)
    - inode = &fi->vfs_inode	---b
    - f2fs_has_inline_xattr(inode)
     - fi = F2FS_I(inode)
     - is_inode_flag_set(fi,..)

In order to avoid unneeded conversion, alter ADDRS_PER_PAGE and
addrs_per_inode to acept parameter with type of inode pointer.

Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Chao Yu 2016-01-26 15:39:35 +08:00 коммит произвёл Jaegeuk Kim
Родитель 5b8db7fada
Коммит 81ca7350ce
8 изменённых файлов: 31 добавлений и 36 удалений

Просмотреть файл

@ -497,7 +497,6 @@ got_it:
static int __allocate_data_block(struct dnode_of_data *dn) static int __allocate_data_block(struct dnode_of_data *dn)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct f2fs_inode_info *fi = F2FS_I(dn->inode);
struct f2fs_summary sum; struct f2fs_summary sum;
struct node_info ni; struct node_info ni;
int seg = CURSEG_WARM_DATA; int seg = CURSEG_WARM_DATA;
@ -525,7 +524,7 @@ alloc:
set_data_blkaddr(dn); set_data_blkaddr(dn);
/* update i_size */ /* update i_size */
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
dn->ofs_in_node; dn->ofs_in_node;
if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)) if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
i_size_write(dn->inode, i_size_write(dn->inode,
@ -592,7 +591,7 @@ next_dnode:
goto unlock_out; goto unlock_out;
} }
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
next_block: next_block:
blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

Просмотреть файл

@ -673,7 +673,6 @@ bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
void f2fs_update_extent_cache(struct dnode_of_data *dn) void f2fs_update_extent_cache(struct dnode_of_data *dn)
{ {
struct f2fs_inode_info *fi = F2FS_I(dn->inode);
pgoff_t fofs; pgoff_t fofs;
if (!f2fs_may_extent_tree(dn->inode)) if (!f2fs_may_extent_tree(dn->inode))
@ -682,7 +681,7 @@ void f2fs_update_extent_cache(struct dnode_of_data *dn)
f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR); f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
dn->ofs_in_node; dn->ofs_in_node;
if (f2fs_update_extent_tree_range(dn->inode, fofs, dn->data_blkaddr, 1)) if (f2fs_update_extent_tree_range(dn->inode, fofs, dn->data_blkaddr, 1))

Просмотреть файл

@ -1538,9 +1538,9 @@ static inline int f2fs_has_inline_xattr(struct inode *inode)
return is_inode_flag_set(F2FS_I(inode), FI_INLINE_XATTR); return is_inode_flag_set(F2FS_I(inode), FI_INLINE_XATTR);
} }
static inline unsigned int addrs_per_inode(struct f2fs_inode_info *fi) static inline unsigned int addrs_per_inode(struct inode *inode)
{ {
if (f2fs_has_inline_xattr(&fi->vfs_inode)) if (f2fs_has_inline_xattr(inode))
return DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS; return DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS;
return DEF_ADDRS_PER_INODE; return DEF_ADDRS_PER_INODE;
} }
@ -1694,10 +1694,10 @@ static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
/* get offset of first page in next direct node */ /* get offset of first page in next direct node */
#define PGOFS_OF_NEXT_DNODE(pgofs, fi) \ #define PGOFS_OF_NEXT_DNODE(pgofs, inode) \
((pgofs < ADDRS_PER_INODE(fi)) ? ADDRS_PER_INODE(fi) : \ ((pgofs < ADDRS_PER_INODE(inode)) ? ADDRS_PER_INODE(inode) : \
(pgofs - ADDRS_PER_INODE(fi) + ADDRS_PER_BLOCK) / \ (pgofs - ADDRS_PER_INODE(inode) + ADDRS_PER_BLOCK) / \
ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi)) ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode))
/* /*
* file.c * file.c
@ -1916,7 +1916,7 @@ int f2fs_release_page(struct page *, gfp_t);
*/ */
int start_gc_thread(struct f2fs_sb_info *); int start_gc_thread(struct f2fs_sb_info *);
void stop_gc_thread(struct f2fs_sb_info *); void stop_gc_thread(struct f2fs_sb_info *);
block_t start_bidx_of_node(unsigned int, struct f2fs_inode_info *); block_t start_bidx_of_node(unsigned int, struct inode *);
int f2fs_gc(struct f2fs_sb_info *, bool); int f2fs_gc(struct f2fs_sb_info *, bool);
void build_gc_manager(struct f2fs_sb_info *); void build_gc_manager(struct f2fs_sb_info *);

Просмотреть файл

@ -358,15 +358,14 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
} else if (err == -ENOENT) { } else if (err == -ENOENT) {
/* direct node does not exists */ /* direct node does not exists */
if (whence == SEEK_DATA) { if (whence == SEEK_DATA) {
pgofs = PGOFS_OF_NEXT_DNODE(pgofs, pgofs = PGOFS_OF_NEXT_DNODE(pgofs, inode);
F2FS_I(inode));
continue; continue;
} else { } else {
goto found; goto found;
} }
} }
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
/* find data/hole in dnode block */ /* find data/hole in dnode block */
for (; dn.ofs_in_node < end_offset; for (; dn.ofs_in_node < end_offset;
@ -480,7 +479,7 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
* we will invalidate all blkaddr in the whole range. * we will invalidate all blkaddr in the whole range.
*/ */
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
F2FS_I(dn->inode)) + ofs; dn->inode) + ofs;
f2fs_update_extent_cache_range(dn, fofs, 0, len); f2fs_update_extent_cache_range(dn, fofs, 0, len);
dec_valid_block_count(sbi, dn->inode, nr_free); dec_valid_block_count(sbi, dn->inode, nr_free);
sync_inode_page(dn); sync_inode_page(dn);
@ -568,7 +567,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
goto out; goto out;
} }
count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); count = ADDRS_PER_PAGE(dn.node_page, inode);
count -= dn.ofs_in_node; count -= dn.ofs_in_node;
f2fs_bug_on(sbi, count < 0); f2fs_bug_on(sbi, count < 0);
@ -768,7 +767,7 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
return err; return err;
} }
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
count = min(end_offset - dn.ofs_in_node, pg_end - pg_start); count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset); f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);

Просмотреть файл

@ -486,7 +486,7 @@ next_step:
* as indirect or double indirect node blocks, are given, it must be a caller's * as indirect or double indirect node blocks, are given, it must be a caller's
* bug. * bug.
*/ */
block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi) block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
{ {
unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4; unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
unsigned int bidx; unsigned int bidx;
@ -503,7 +503,7 @@ block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
bidx = node_ofs - 5 - dec; bidx = node_ofs - 5 - dec;
} }
return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi); return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
} }
static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
@ -722,7 +722,7 @@ next_step:
continue; continue;
} }
start_bidx = start_bidx_of_node(nofs, F2FS_I(inode)); start_bidx = start_bidx_of_node(nofs, inode);
data_page = get_read_data_page(inode, data_page = get_read_data_page(inode,
start_bidx + ofs_in_node, READA, true); start_bidx + ofs_in_node, READA, true);
if (IS_ERR(data_page)) { if (IS_ERR(data_page)) {
@ -738,7 +738,7 @@ next_step:
/* phase 3 */ /* phase 3 */
inode = find_gc_inode(gc_list, dni.ino); inode = find_gc_inode(gc_list, dni.ino);
if (inode) { if (inode) {
start_bidx = start_bidx_of_node(nofs, F2FS_I(inode)) start_bidx = start_bidx_of_node(nofs, inode)
+ ofs_in_node; + ofs_in_node;
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
move_encrypted_block(inode, start_bidx); move_encrypted_block(inode, start_bidx);

Просмотреть файл

@ -407,10 +407,10 @@ cache:
* The maximum depth is four. * The maximum depth is four.
* Offset[0] will have raw inode offset. * Offset[0] will have raw inode offset.
*/ */
static int get_node_path(struct f2fs_inode_info *fi, long block, static int get_node_path(struct inode *inode, long block,
int offset[4], unsigned int noffset[4]) int offset[4], unsigned int noffset[4])
{ {
const long direct_index = ADDRS_PER_INODE(fi); const long direct_index = ADDRS_PER_INODE(inode);
const long direct_blks = ADDRS_PER_BLOCK; const long direct_blks = ADDRS_PER_BLOCK;
const long dptrs_per_blk = NIDS_PER_BLOCK; const long dptrs_per_blk = NIDS_PER_BLOCK;
const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
@ -498,7 +498,7 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
int level, i; int level, i;
int err = 0; int err = 0;
level = get_node_path(F2FS_I(dn->inode), index, offset, noffset); level = get_node_path(dn->inode, index, offset, noffset);
nids[0] = dn->inode->i_ino; nids[0] = dn->inode->i_ino;
npage[0] = dn->inode_page; npage[0] = dn->inode_page;
@ -792,7 +792,7 @@ int truncate_inode_blocks(struct inode *inode, pgoff_t from)
trace_f2fs_truncate_inode_blocks_enter(inode, from); trace_f2fs_truncate_inode_blocks_enter(inode, from);
level = get_node_path(F2FS_I(inode), from, offset, noffset); level = get_node_path(inode, from, offset, noffset);
restart: restart:
page = get_node_page(sbi, inode->i_ino); page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(page)) { if (IS_ERR(page)) {

Просмотреть файл

@ -350,8 +350,7 @@ got_it:
inode = dn->inode; inode = dn->inode;
} }
bidx = start_bidx_of_node(offset, F2FS_I(inode)) + bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
le16_to_cpu(sum.ofs_in_node);
/* /*
* if inode page is locked, unlock temporarily, but its reference * if inode page is locked, unlock temporarily, but its reference
@ -386,10 +385,9 @@ truncate_out:
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
struct page *page, block_t blkaddr) struct page *page, block_t blkaddr)
{ {
struct f2fs_inode_info *fi = F2FS_I(inode);
unsigned int start, end;
struct dnode_of_data dn; struct dnode_of_data dn;
struct node_info ni; struct node_info ni;
unsigned int start, end;
int err = 0, recovered = 0; int err = 0, recovered = 0;
/* step 1: recover xattr */ /* step 1: recover xattr */
@ -409,8 +407,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
goto out; goto out;
/* step 3: recover data indices */ /* step 3: recover data indices */
start = start_bidx_of_node(ofs_of_node(page), fi); start = start_bidx_of_node(ofs_of_node(page), inode);
end = start + ADDRS_PER_PAGE(page, fi); end = start + ADDRS_PER_PAGE(page, inode);
set_new_dnode(&dn, inode, NULL, NULL, 0); set_new_dnode(&dn, inode, NULL, NULL, 0);

Просмотреть файл

@ -170,12 +170,12 @@ struct f2fs_extent {
#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */ #define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */ #define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */ #define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
#define ADDRS_PER_INODE(fi) addrs_per_inode(fi) #define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */ #define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */ #define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
#define ADDRS_PER_PAGE(page, fi) \ #define ADDRS_PER_PAGE(page, inode) \
(IS_INODE(page) ? ADDRS_PER_INODE(fi) : ADDRS_PER_BLOCK) (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK)
#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1) #define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1)
#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2) #define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2)