Merge tag 'for-f2fs-3.17-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs
Pull f2fs bug fixes from Jaegeuk Kim: "This series includes patches to: - fix recovery routines - fix bugs related to inline_data/xattr - fix when casting the dentry names - handle EIO or ENOMEM correctly - fix memory leak - fix lock coverage" * tag 'for-f2fs-3.17-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (28 commits) f2fs: reposition unlock_new_inode to prevent accessing invalid inode f2fs: fix wrong casting for dentry name f2fs: simplify by using a literal f2fs: truncate stale block for inline_data f2fs: use macro for code readability f2fs: introduce need_do_checkpoint for readability f2fs: fix incorrect calculation with total/free inode num f2fs: remove rename and use rename2 f2fs: skip if inline_data was converted already f2fs: remove rewrite_node_page f2fs: avoid double lock in truncate_blocks f2fs: prevent checkpoint during roll-forward f2fs: add WARN_ON in f2fs_bug_on f2fs: handle EIO not to break fs consistency f2fs: check s_dirty under cp_mutex f2fs: unlock_page when node page is redirtied out f2fs: introduce f2fs_cp_error for readability f2fs: give a chance to mount again when encountering errors f2fs: trigger release_dirty_inode in f2fs_put_super f2fs: don't skip checkpoint if there is no dirty node pages ...
This commit is contained in:
Коммит
70c8038dd6
|
@ -23,7 +23,7 @@ config F2FS_STAT_FS
|
||||||
mounted as f2fs. Each file shows the whole f2fs information.
|
mounted as f2fs. Each file shows the whole f2fs information.
|
||||||
|
|
||||||
/sys/kernel/debug/f2fs/status includes:
|
/sys/kernel/debug/f2fs/status includes:
|
||||||
- major file system information managed by f2fs currently
|
- major filesystem information managed by f2fs currently
|
||||||
- average SIT information about whole segments
|
- average SIT information about whole segments
|
||||||
- current memory footprint consumed by f2fs.
|
- current memory footprint consumed by f2fs.
|
||||||
|
|
||||||
|
@ -68,6 +68,6 @@ config F2FS_CHECK_FS
|
||||||
bool "F2FS consistency checking feature"
|
bool "F2FS consistency checking feature"
|
||||||
depends on F2FS_FS
|
depends on F2FS_FS
|
||||||
help
|
help
|
||||||
Enables BUG_ONs which check the file system consistency in runtime.
|
Enables BUG_ONs which check the filesystem consistency in runtime.
|
||||||
|
|
||||||
If you want to improve the performance, say N.
|
If you want to improve the performance, say N.
|
||||||
|
|
|
@ -160,14 +160,11 @@ static int f2fs_write_meta_page(struct page *page,
|
||||||
goto redirty_out;
|
goto redirty_out;
|
||||||
if (wbc->for_reclaim)
|
if (wbc->for_reclaim)
|
||||||
goto redirty_out;
|
goto redirty_out;
|
||||||
|
if (unlikely(f2fs_cp_error(sbi)))
|
||||||
/* Should not write any meta pages, if any IO error was occurred */
|
goto redirty_out;
|
||||||
if (unlikely(is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)))
|
|
||||||
goto no_write;
|
|
||||||
|
|
||||||
f2fs_wait_on_page_writeback(page, META);
|
f2fs_wait_on_page_writeback(page, META);
|
||||||
write_meta_page(sbi, page);
|
write_meta_page(sbi, page);
|
||||||
no_write:
|
|
||||||
dec_page_count(sbi, F2FS_DIRTY_META);
|
dec_page_count(sbi, F2FS_DIRTY_META);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -348,7 +345,7 @@ bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
|
||||||
return e ? true : false;
|
return e ? true : false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void release_dirty_inode(struct f2fs_sb_info *sbi)
|
void release_dirty_inode(struct f2fs_sb_info *sbi)
|
||||||
{
|
{
|
||||||
struct ino_entry *e, *tmp;
|
struct ino_entry *e, *tmp;
|
||||||
int i;
|
int i;
|
||||||
|
@ -446,8 +443,8 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
|
||||||
struct f2fs_orphan_block *orphan_blk = NULL;
|
struct f2fs_orphan_block *orphan_blk = NULL;
|
||||||
unsigned int nentries = 0;
|
unsigned int nentries = 0;
|
||||||
unsigned short index;
|
unsigned short index;
|
||||||
unsigned short orphan_blocks = (unsigned short)((sbi->n_orphans +
|
unsigned short orphan_blocks =
|
||||||
(F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK);
|
(unsigned short)GET_ORPHAN_BLOCKS(sbi->n_orphans);
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
struct ino_entry *orphan = NULL;
|
struct ino_entry *orphan = NULL;
|
||||||
|
|
||||||
|
@ -737,7 +734,7 @@ retry:
|
||||||
/*
|
/*
|
||||||
* Freeze all the FS-operations for checkpoint.
|
* Freeze all the FS-operations for checkpoint.
|
||||||
*/
|
*/
|
||||||
static void block_operations(struct f2fs_sb_info *sbi)
|
static int block_operations(struct f2fs_sb_info *sbi)
|
||||||
{
|
{
|
||||||
struct writeback_control wbc = {
|
struct writeback_control wbc = {
|
||||||
.sync_mode = WB_SYNC_ALL,
|
.sync_mode = WB_SYNC_ALL,
|
||||||
|
@ -745,6 +742,7 @@ static void block_operations(struct f2fs_sb_info *sbi)
|
||||||
.for_reclaim = 0,
|
.for_reclaim = 0,
|
||||||
};
|
};
|
||||||
struct blk_plug plug;
|
struct blk_plug plug;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
blk_start_plug(&plug);
|
blk_start_plug(&plug);
|
||||||
|
|
||||||
|
@ -754,11 +752,15 @@ retry_flush_dents:
|
||||||
if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
|
if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
|
||||||
f2fs_unlock_all(sbi);
|
f2fs_unlock_all(sbi);
|
||||||
sync_dirty_dir_inodes(sbi);
|
sync_dirty_dir_inodes(sbi);
|
||||||
|
if (unlikely(f2fs_cp_error(sbi))) {
|
||||||
|
err = -EIO;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
goto retry_flush_dents;
|
goto retry_flush_dents;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* POR: we should ensure that there is no dirty node pages
|
* POR: we should ensure that there are no dirty node pages
|
||||||
* until finishing nat/sit flush.
|
* until finishing nat/sit flush.
|
||||||
*/
|
*/
|
||||||
retry_flush_nodes:
|
retry_flush_nodes:
|
||||||
|
@ -767,9 +769,16 @@ retry_flush_nodes:
|
||||||
if (get_pages(sbi, F2FS_DIRTY_NODES)) {
|
if (get_pages(sbi, F2FS_DIRTY_NODES)) {
|
||||||
up_write(&sbi->node_write);
|
up_write(&sbi->node_write);
|
||||||
sync_node_pages(sbi, 0, &wbc);
|
sync_node_pages(sbi, 0, &wbc);
|
||||||
|
if (unlikely(f2fs_cp_error(sbi))) {
|
||||||
|
f2fs_unlock_all(sbi);
|
||||||
|
err = -EIO;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
goto retry_flush_nodes;
|
goto retry_flush_nodes;
|
||||||
}
|
}
|
||||||
|
out:
|
||||||
blk_finish_plug(&plug);
|
blk_finish_plug(&plug);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unblock_operations(struct f2fs_sb_info *sbi)
|
static void unblock_operations(struct f2fs_sb_info *sbi)
|
||||||
|
@ -813,8 +822,11 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
|
||||||
discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg));
|
discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg));
|
||||||
|
|
||||||
/* Flush all the NAT/SIT pages */
|
/* Flush all the NAT/SIT pages */
|
||||||
while (get_pages(sbi, F2FS_DIRTY_META))
|
while (get_pages(sbi, F2FS_DIRTY_META)) {
|
||||||
sync_meta_pages(sbi, META, LONG_MAX);
|
sync_meta_pages(sbi, META, LONG_MAX);
|
||||||
|
if (unlikely(f2fs_cp_error(sbi)))
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
next_free_nid(sbi, &last_nid);
|
next_free_nid(sbi, &last_nid);
|
||||||
|
|
||||||
|
@ -825,7 +837,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
|
||||||
ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
|
ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
|
||||||
ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
|
ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
|
||||||
ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
|
ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
|
||||||
for (i = 0; i < 3; i++) {
|
for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
|
||||||
ckpt->cur_node_segno[i] =
|
ckpt->cur_node_segno[i] =
|
||||||
cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
|
cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
|
||||||
ckpt->cur_node_blkoff[i] =
|
ckpt->cur_node_blkoff[i] =
|
||||||
|
@ -833,7 +845,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
|
||||||
ckpt->alloc_type[i + CURSEG_HOT_NODE] =
|
ckpt->alloc_type[i + CURSEG_HOT_NODE] =
|
||||||
curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
|
curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
|
||||||
}
|
}
|
||||||
for (i = 0; i < 3; i++) {
|
for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
|
||||||
ckpt->cur_data_segno[i] =
|
ckpt->cur_data_segno[i] =
|
||||||
cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
|
cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
|
||||||
ckpt->cur_data_blkoff[i] =
|
ckpt->cur_data_blkoff[i] =
|
||||||
|
@ -848,24 +860,23 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
|
||||||
|
|
||||||
/* 2 cp + n data seg summary + orphan inode blocks */
|
/* 2 cp + n data seg summary + orphan inode blocks */
|
||||||
data_sum_blocks = npages_for_summary_flush(sbi);
|
data_sum_blocks = npages_for_summary_flush(sbi);
|
||||||
if (data_sum_blocks < 3)
|
if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
|
||||||
set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
|
set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
|
||||||
else
|
else
|
||||||
clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
|
clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
|
||||||
|
|
||||||
orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1)
|
orphan_blocks = GET_ORPHAN_BLOCKS(sbi->n_orphans);
|
||||||
/ F2FS_ORPHANS_PER_BLOCK;
|
|
||||||
ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
|
ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
|
||||||
orphan_blocks);
|
orphan_blocks);
|
||||||
|
|
||||||
if (is_umount) {
|
if (is_umount) {
|
||||||
set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
|
set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
|
||||||
ckpt->cp_pack_total_block_count = cpu_to_le32(2 +
|
ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+
|
||||||
cp_payload_blks + data_sum_blocks +
|
cp_payload_blks + data_sum_blocks +
|
||||||
orphan_blocks + NR_CURSEG_NODE_TYPE);
|
orphan_blocks + NR_CURSEG_NODE_TYPE);
|
||||||
} else {
|
} else {
|
||||||
clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
|
clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
|
||||||
ckpt->cp_pack_total_block_count = cpu_to_le32(2 +
|
ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
|
||||||
cp_payload_blks + data_sum_blocks +
|
cp_payload_blks + data_sum_blocks +
|
||||||
orphan_blocks);
|
orphan_blocks);
|
||||||
}
|
}
|
||||||
|
@ -924,6 +935,9 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
|
||||||
/* wait for previous submitted node/meta pages writeback */
|
/* wait for previous submitted node/meta pages writeback */
|
||||||
wait_on_all_pages_writeback(sbi);
|
wait_on_all_pages_writeback(sbi);
|
||||||
|
|
||||||
|
if (unlikely(f2fs_cp_error(sbi)))
|
||||||
|
return;
|
||||||
|
|
||||||
filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX);
|
filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX);
|
||||||
filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX);
|
filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX);
|
||||||
|
|
||||||
|
@ -934,15 +948,17 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
|
||||||
/* Here, we only have one bio having CP pack */
|
/* Here, we only have one bio having CP pack */
|
||||||
sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
|
sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
|
||||||
|
|
||||||
if (!is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) {
|
|
||||||
clear_prefree_segments(sbi);
|
|
||||||
release_dirty_inode(sbi);
|
release_dirty_inode(sbi);
|
||||||
|
|
||||||
|
if (unlikely(f2fs_cp_error(sbi)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
clear_prefree_segments(sbi);
|
||||||
F2FS_RESET_SB_DIRT(sbi);
|
F2FS_RESET_SB_DIRT(sbi);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We guarantee that this checkpoint procedure should not fail.
|
* We guarantee that this checkpoint procedure will not fail.
|
||||||
*/
|
*/
|
||||||
void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
|
void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
|
||||||
{
|
{
|
||||||
|
@ -952,7 +968,13 @@ void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
|
||||||
trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops");
|
trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops");
|
||||||
|
|
||||||
mutex_lock(&sbi->cp_mutex);
|
mutex_lock(&sbi->cp_mutex);
|
||||||
block_operations(sbi);
|
|
||||||
|
if (!sbi->s_dirty)
|
||||||
|
goto out;
|
||||||
|
if (unlikely(f2fs_cp_error(sbi)))
|
||||||
|
goto out;
|
||||||
|
if (block_operations(sbi))
|
||||||
|
goto out;
|
||||||
|
|
||||||
trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops");
|
trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops");
|
||||||
|
|
||||||
|
@ -976,9 +998,9 @@ void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
|
||||||
do_checkpoint(sbi, is_umount);
|
do_checkpoint(sbi, is_umount);
|
||||||
|
|
||||||
unblock_operations(sbi);
|
unblock_operations(sbi);
|
||||||
mutex_unlock(&sbi->cp_mutex);
|
|
||||||
|
|
||||||
stat_inc_cp_count(sbi->stat_info);
|
stat_inc_cp_count(sbi->stat_info);
|
||||||
|
out:
|
||||||
|
mutex_unlock(&sbi->cp_mutex);
|
||||||
trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint");
|
trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -999,8 +1021,8 @@ void init_ino_entry_info(struct f2fs_sb_info *sbi)
|
||||||
* for cp pack we can have max 1020*504 orphan entries
|
* for cp pack we can have max 1020*504 orphan entries
|
||||||
*/
|
*/
|
||||||
sbi->n_orphans = 0;
|
sbi->n_orphans = 0;
|
||||||
sbi->max_orphans = (sbi->blocks_per_seg - 2 - NR_CURSEG_TYPE)
|
sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
|
||||||
* F2FS_ORPHANS_PER_BLOCK;
|
NR_CURSEG_TYPE) * F2FS_ORPHANS_PER_BLOCK;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __init create_checkpoint_caches(void)
|
int __init create_checkpoint_caches(void)
|
||||||
|
|
|
@ -53,7 +53,7 @@ static void f2fs_write_end_io(struct bio *bio, int err)
|
||||||
struct page *page = bvec->bv_page;
|
struct page *page = bvec->bv_page;
|
||||||
|
|
||||||
if (unlikely(err)) {
|
if (unlikely(err)) {
|
||||||
SetPageError(page);
|
set_page_dirty(page);
|
||||||
set_bit(AS_EIO, &page->mapping->flags);
|
set_bit(AS_EIO, &page->mapping->flags);
|
||||||
f2fs_stop_checkpoint(sbi);
|
f2fs_stop_checkpoint(sbi);
|
||||||
}
|
}
|
||||||
|
@ -691,7 +691,7 @@ get_next:
|
||||||
allocated = true;
|
allocated = true;
|
||||||
blkaddr = dn.data_blkaddr;
|
blkaddr = dn.data_blkaddr;
|
||||||
}
|
}
|
||||||
/* Give more consecutive addresses for the read ahead */
|
/* Give more consecutive addresses for the readahead */
|
||||||
if (blkaddr == (bh_result->b_blocknr + ofs)) {
|
if (blkaddr == (bh_result->b_blocknr + ofs)) {
|
||||||
ofs++;
|
ofs++;
|
||||||
dn.ofs_in_node++;
|
dn.ofs_in_node++;
|
||||||
|
@ -739,7 +739,7 @@ static int f2fs_read_data_page(struct file *file, struct page *page)
|
||||||
|
|
||||||
trace_f2fs_readpage(page, DATA);
|
trace_f2fs_readpage(page, DATA);
|
||||||
|
|
||||||
/* If the file has inline data, try to read it directlly */
|
/* If the file has inline data, try to read it directly */
|
||||||
if (f2fs_has_inline_data(inode))
|
if (f2fs_has_inline_data(inode))
|
||||||
ret = f2fs_read_inline_data(inode, page);
|
ret = f2fs_read_inline_data(inode, page);
|
||||||
else
|
else
|
||||||
|
@ -836,10 +836,19 @@ write:
|
||||||
|
|
||||||
/* Dentry blocks are controlled by checkpoint */
|
/* Dentry blocks are controlled by checkpoint */
|
||||||
if (S_ISDIR(inode->i_mode)) {
|
if (S_ISDIR(inode->i_mode)) {
|
||||||
|
if (unlikely(f2fs_cp_error(sbi)))
|
||||||
|
goto redirty_out;
|
||||||
err = do_write_data_page(page, &fio);
|
err = do_write_data_page(page, &fio);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* we should bypass data pages to proceed the kworkder jobs */
|
||||||
|
if (unlikely(f2fs_cp_error(sbi))) {
|
||||||
|
SetPageError(page);
|
||||||
|
unlock_page(page);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (!wbc->for_reclaim)
|
if (!wbc->for_reclaim)
|
||||||
need_balance_fs = true;
|
need_balance_fs = true;
|
||||||
else if (has_not_enough_free_secs(sbi, 0))
|
else if (has_not_enough_free_secs(sbi, 0))
|
||||||
|
@ -927,7 +936,7 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
|
||||||
|
|
||||||
if (to > inode->i_size) {
|
if (to > inode->i_size) {
|
||||||
truncate_pagecache(inode, inode->i_size);
|
truncate_pagecache(inode, inode->i_size);
|
||||||
truncate_blocks(inode, inode->i_size);
|
truncate_blocks(inode, inode->i_size, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -946,7 +955,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
|
||||||
|
|
||||||
f2fs_balance_fs(sbi);
|
f2fs_balance_fs(sbi);
|
||||||
repeat:
|
repeat:
|
||||||
err = f2fs_convert_inline_data(inode, pos + len);
|
err = f2fs_convert_inline_data(inode, pos + len, NULL);
|
||||||
if (err)
|
if (err)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
|
||||||
struct f2fs_stat_info *si = F2FS_STAT(sbi);
|
struct f2fs_stat_info *si = F2FS_STAT(sbi);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* valid check of the segment numbers */
|
/* validation check of the segment numbers */
|
||||||
si->hit_ext = sbi->read_hit_ext;
|
si->hit_ext = sbi->read_hit_ext;
|
||||||
si->total_ext = sbi->total_hit_ext;
|
si->total_ext = sbi->total_hit_ext;
|
||||||
si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
|
si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
|
||||||
|
@ -152,7 +152,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
|
||||||
si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(TOTAL_SEGS(sbi));
|
si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(TOTAL_SEGS(sbi));
|
||||||
si->base_mem += f2fs_bitmap_size(TOTAL_SECS(sbi));
|
si->base_mem += f2fs_bitmap_size(TOTAL_SECS(sbi));
|
||||||
|
|
||||||
/* buld nm */
|
/* build nm */
|
||||||
si->base_mem += sizeof(struct f2fs_nm_info);
|
si->base_mem += sizeof(struct f2fs_nm_info);
|
||||||
si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
|
si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
|
||||||
|
|
||||||
|
|
|
@ -124,7 +124,7 @@ static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For the most part, it should be a bug when name_len is zero.
|
* For the most part, it should be a bug when name_len is zero.
|
||||||
* We stop here for figuring out where the bugs are occurred.
|
* We stop here for figuring out where the bugs has occurred.
|
||||||
*/
|
*/
|
||||||
f2fs_bug_on(!de->name_len);
|
f2fs_bug_on(!de->name_len);
|
||||||
|
|
||||||
|
@ -391,7 +391,7 @@ put_error:
|
||||||
error:
|
error:
|
||||||
/* once the failed inode becomes a bad inode, i_mode is S_IFREG */
|
/* once the failed inode becomes a bad inode, i_mode is S_IFREG */
|
||||||
truncate_inode_pages(&inode->i_data, 0);
|
truncate_inode_pages(&inode->i_data, 0);
|
||||||
truncate_blocks(inode, 0);
|
truncate_blocks(inode, 0, false);
|
||||||
remove_dirty_dir_inode(inode);
|
remove_dirty_dir_inode(inode);
|
||||||
remove_inode_page(inode);
|
remove_inode_page(inode);
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
|
@ -563,7 +563,7 @@ fail:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It only removes the dentry from the dentry page,corresponding name
|
* It only removes the dentry from the dentry page, corresponding name
|
||||||
* entry in name page does not need to be touched during deletion.
|
* entry in name page does not need to be touched during deletion.
|
||||||
*/
|
*/
|
||||||
void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
|
void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
|
||||||
|
|
|
@ -24,7 +24,7 @@
|
||||||
#define f2fs_bug_on(condition) BUG_ON(condition)
|
#define f2fs_bug_on(condition) BUG_ON(condition)
|
||||||
#define f2fs_down_write(x, y) down_write_nest_lock(x, y)
|
#define f2fs_down_write(x, y) down_write_nest_lock(x, y)
|
||||||
#else
|
#else
|
||||||
#define f2fs_bug_on(condition)
|
#define f2fs_bug_on(condition) WARN_ON(condition)
|
||||||
#define f2fs_down_write(x, y) down_write(x)
|
#define f2fs_down_write(x, y) down_write(x)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -395,7 +395,7 @@ enum count_type {
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The below are the page types of bios used in submti_bio().
|
* The below are the page types of bios used in submit_bio().
|
||||||
* The available types are:
|
* The available types are:
|
||||||
* DATA User data pages. It operates as async mode.
|
* DATA User data pages. It operates as async mode.
|
||||||
* NODE Node pages. It operates as async mode.
|
* NODE Node pages. It operates as async mode.
|
||||||
|
@ -470,7 +470,7 @@ struct f2fs_sb_info {
|
||||||
struct list_head dir_inode_list; /* dir inode list */
|
struct list_head dir_inode_list; /* dir inode list */
|
||||||
spinlock_t dir_inode_lock; /* for dir inode list lock */
|
spinlock_t dir_inode_lock; /* for dir inode list lock */
|
||||||
|
|
||||||
/* basic file system units */
|
/* basic filesystem units */
|
||||||
unsigned int log_sectors_per_block; /* log2 sectors per block */
|
unsigned int log_sectors_per_block; /* log2 sectors per block */
|
||||||
unsigned int log_blocksize; /* log2 block size */
|
unsigned int log_blocksize; /* log2 block size */
|
||||||
unsigned int blocksize; /* block size */
|
unsigned int blocksize; /* block size */
|
||||||
|
@ -799,7 +799,7 @@ static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* odd numbered checkpoint should at cp segment 0
|
* odd numbered checkpoint should at cp segment 0
|
||||||
* and even segent must be at cp segment 1
|
* and even segment must be at cp segment 1
|
||||||
*/
|
*/
|
||||||
if (!(ckpt_version & 1))
|
if (!(ckpt_version & 1))
|
||||||
start_addr += sbi->blocks_per_seg;
|
start_addr += sbi->blocks_per_seg;
|
||||||
|
@ -1096,6 +1096,11 @@ static inline int f2fs_readonly(struct super_block *sb)
|
||||||
return sb->s_flags & MS_RDONLY;
|
return sb->s_flags & MS_RDONLY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
|
||||||
|
{
|
||||||
|
return is_set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi)
|
static inline void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi)
|
||||||
{
|
{
|
||||||
set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
|
set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
|
||||||
|
@ -1117,7 +1122,7 @@ static inline void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi)
|
||||||
*/
|
*/
|
||||||
int f2fs_sync_file(struct file *, loff_t, loff_t, int);
|
int f2fs_sync_file(struct file *, loff_t, loff_t, int);
|
||||||
void truncate_data_blocks(struct dnode_of_data *);
|
void truncate_data_blocks(struct dnode_of_data *);
|
||||||
int truncate_blocks(struct inode *, u64);
|
int truncate_blocks(struct inode *, u64, bool);
|
||||||
void f2fs_truncate(struct inode *);
|
void f2fs_truncate(struct inode *);
|
||||||
int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
|
int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
|
||||||
int f2fs_setattr(struct dentry *, struct iattr *);
|
int f2fs_setattr(struct dentry *, struct iattr *);
|
||||||
|
@ -1202,10 +1207,8 @@ int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
|
||||||
bool alloc_nid(struct f2fs_sb_info *, nid_t *);
|
bool alloc_nid(struct f2fs_sb_info *, nid_t *);
|
||||||
void alloc_nid_done(struct f2fs_sb_info *, nid_t);
|
void alloc_nid_done(struct f2fs_sb_info *, nid_t);
|
||||||
void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
|
void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
|
||||||
void recover_node_page(struct f2fs_sb_info *, struct page *,
|
|
||||||
struct f2fs_summary *, struct node_info *, block_t);
|
|
||||||
void recover_inline_xattr(struct inode *, struct page *);
|
void recover_inline_xattr(struct inode *, struct page *);
|
||||||
bool recover_xattr_data(struct inode *, struct page *, block_t);
|
void recover_xattr_data(struct inode *, struct page *, block_t);
|
||||||
int recover_inode_page(struct f2fs_sb_info *, struct page *);
|
int recover_inode_page(struct f2fs_sb_info *, struct page *);
|
||||||
int restore_node_summary(struct f2fs_sb_info *, unsigned int,
|
int restore_node_summary(struct f2fs_sb_info *, unsigned int,
|
||||||
struct f2fs_summary_block *);
|
struct f2fs_summary_block *);
|
||||||
|
@ -1238,8 +1241,6 @@ void write_data_page(struct page *, struct dnode_of_data *, block_t *,
|
||||||
void rewrite_data_page(struct page *, block_t, struct f2fs_io_info *);
|
void rewrite_data_page(struct page *, block_t, struct f2fs_io_info *);
|
||||||
void recover_data_page(struct f2fs_sb_info *, struct page *,
|
void recover_data_page(struct f2fs_sb_info *, struct page *,
|
||||||
struct f2fs_summary *, block_t, block_t);
|
struct f2fs_summary *, block_t, block_t);
|
||||||
void rewrite_node_page(struct f2fs_sb_info *, struct page *,
|
|
||||||
struct f2fs_summary *, block_t, block_t);
|
|
||||||
void allocate_data_block(struct f2fs_sb_info *, struct page *,
|
void allocate_data_block(struct f2fs_sb_info *, struct page *,
|
||||||
block_t, block_t *, struct f2fs_summary *, int);
|
block_t, block_t *, struct f2fs_summary *, int);
|
||||||
void f2fs_wait_on_page_writeback(struct page *, enum page_type);
|
void f2fs_wait_on_page_writeback(struct page *, enum page_type);
|
||||||
|
@ -1262,6 +1263,7 @@ int ra_meta_pages(struct f2fs_sb_info *, int, int, int);
|
||||||
long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
|
long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
|
||||||
void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
|
void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
|
||||||
void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
|
void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
|
||||||
|
void release_dirty_inode(struct f2fs_sb_info *);
|
||||||
bool exist_written_data(struct f2fs_sb_info *, nid_t, int);
|
bool exist_written_data(struct f2fs_sb_info *, nid_t, int);
|
||||||
int acquire_orphan_inode(struct f2fs_sb_info *);
|
int acquire_orphan_inode(struct f2fs_sb_info *);
|
||||||
void release_orphan_inode(struct f2fs_sb_info *);
|
void release_orphan_inode(struct f2fs_sb_info *);
|
||||||
|
@ -1439,8 +1441,8 @@ extern const struct inode_operations f2fs_special_inode_operations;
|
||||||
*/
|
*/
|
||||||
bool f2fs_may_inline(struct inode *);
|
bool f2fs_may_inline(struct inode *);
|
||||||
int f2fs_read_inline_data(struct inode *, struct page *);
|
int f2fs_read_inline_data(struct inode *, struct page *);
|
||||||
int f2fs_convert_inline_data(struct inode *, pgoff_t);
|
int f2fs_convert_inline_data(struct inode *, pgoff_t, struct page *);
|
||||||
int f2fs_write_inline_data(struct inode *, struct page *, unsigned int);
|
int f2fs_write_inline_data(struct inode *, struct page *, unsigned int);
|
||||||
void truncate_inline_data(struct inode *, u64);
|
void truncate_inline_data(struct inode *, u64);
|
||||||
int recover_inline_data(struct inode *, struct page *);
|
bool recover_inline_data(struct inode *, struct page *);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -41,6 +41,11 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
|
||||||
|
|
||||||
sb_start_pagefault(inode->i_sb);
|
sb_start_pagefault(inode->i_sb);
|
||||||
|
|
||||||
|
/* force to convert with normal data indices */
|
||||||
|
err = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1, page);
|
||||||
|
if (err)
|
||||||
|
goto out;
|
||||||
|
|
||||||
/* block allocation */
|
/* block allocation */
|
||||||
f2fs_lock_op(sbi);
|
f2fs_lock_op(sbi);
|
||||||
set_new_dnode(&dn, inode, NULL, NULL, 0);
|
set_new_dnode(&dn, inode, NULL, NULL, 0);
|
||||||
|
@ -110,6 +115,25 @@ static int get_parent_ino(struct inode *inode, nid_t *pino)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool need_do_checkpoint(struct inode *inode)
|
||||||
|
{
|
||||||
|
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
||||||
|
bool need_cp = false;
|
||||||
|
|
||||||
|
if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
|
||||||
|
need_cp = true;
|
||||||
|
else if (file_wrong_pino(inode))
|
||||||
|
need_cp = true;
|
||||||
|
else if (!space_for_roll_forward(sbi))
|
||||||
|
need_cp = true;
|
||||||
|
else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
|
||||||
|
need_cp = true;
|
||||||
|
else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
|
||||||
|
need_cp = true;
|
||||||
|
|
||||||
|
return need_cp;
|
||||||
|
}
|
||||||
|
|
||||||
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
||||||
{
|
{
|
||||||
struct inode *inode = file->f_mapping->host;
|
struct inode *inode = file->f_mapping->host;
|
||||||
|
@ -154,23 +178,12 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
||||||
/* guarantee free sections for fsync */
|
/* guarantee free sections for fsync */
|
||||||
f2fs_balance_fs(sbi);
|
f2fs_balance_fs(sbi);
|
||||||
|
|
||||||
down_read(&fi->i_sem);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Both of fdatasync() and fsync() are able to be recovered from
|
* Both of fdatasync() and fsync() are able to be recovered from
|
||||||
* sudden-power-off.
|
* sudden-power-off.
|
||||||
*/
|
*/
|
||||||
if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
|
down_read(&fi->i_sem);
|
||||||
need_cp = true;
|
need_cp = need_do_checkpoint(inode);
|
||||||
else if (file_wrong_pino(inode))
|
|
||||||
need_cp = true;
|
|
||||||
else if (!space_for_roll_forward(sbi))
|
|
||||||
need_cp = true;
|
|
||||||
else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
|
|
||||||
need_cp = true;
|
|
||||||
else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
|
|
||||||
need_cp = true;
|
|
||||||
|
|
||||||
up_read(&fi->i_sem);
|
up_read(&fi->i_sem);
|
||||||
|
|
||||||
if (need_cp) {
|
if (need_cp) {
|
||||||
|
@ -288,7 +301,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
|
||||||
if (err && err != -ENOENT) {
|
if (err && err != -ENOENT) {
|
||||||
goto fail;
|
goto fail;
|
||||||
} else if (err == -ENOENT) {
|
} else if (err == -ENOENT) {
|
||||||
/* direct node is not exist */
|
/* direct node does not exists */
|
||||||
if (whence == SEEK_DATA) {
|
if (whence == SEEK_DATA) {
|
||||||
pgofs = PGOFS_OF_NEXT_DNODE(pgofs,
|
pgofs = PGOFS_OF_NEXT_DNODE(pgofs,
|
||||||
F2FS_I(inode));
|
F2FS_I(inode));
|
||||||
|
@ -417,7 +430,7 @@ out:
|
||||||
f2fs_put_page(page, 1);
|
f2fs_put_page(page, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
int truncate_blocks(struct inode *inode, u64 from)
|
int truncate_blocks(struct inode *inode, u64 from, bool lock)
|
||||||
{
|
{
|
||||||
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
||||||
unsigned int blocksize = inode->i_sb->s_blocksize;
|
unsigned int blocksize = inode->i_sb->s_blocksize;
|
||||||
|
@ -433,6 +446,7 @@ int truncate_blocks(struct inode *inode, u64 from)
|
||||||
free_from = (pgoff_t)
|
free_from = (pgoff_t)
|
||||||
((from + blocksize - 1) >> (sbi->log_blocksize));
|
((from + blocksize - 1) >> (sbi->log_blocksize));
|
||||||
|
|
||||||
|
if (lock)
|
||||||
f2fs_lock_op(sbi);
|
f2fs_lock_op(sbi);
|
||||||
|
|
||||||
set_new_dnode(&dn, inode, NULL, NULL, 0);
|
set_new_dnode(&dn, inode, NULL, NULL, 0);
|
||||||
|
@ -440,6 +454,7 @@ int truncate_blocks(struct inode *inode, u64 from)
|
||||||
if (err) {
|
if (err) {
|
||||||
if (err == -ENOENT)
|
if (err == -ENOENT)
|
||||||
goto free_next;
|
goto free_next;
|
||||||
|
if (lock)
|
||||||
f2fs_unlock_op(sbi);
|
f2fs_unlock_op(sbi);
|
||||||
trace_f2fs_truncate_blocks_exit(inode, err);
|
trace_f2fs_truncate_blocks_exit(inode, err);
|
||||||
return err;
|
return err;
|
||||||
|
@ -458,6 +473,7 @@ int truncate_blocks(struct inode *inode, u64 from)
|
||||||
f2fs_put_dnode(&dn);
|
f2fs_put_dnode(&dn);
|
||||||
free_next:
|
free_next:
|
||||||
err = truncate_inode_blocks(inode, free_from);
|
err = truncate_inode_blocks(inode, free_from);
|
||||||
|
if (lock)
|
||||||
f2fs_unlock_op(sbi);
|
f2fs_unlock_op(sbi);
|
||||||
done:
|
done:
|
||||||
/* lastly zero out the first data page */
|
/* lastly zero out the first data page */
|
||||||
|
@ -475,7 +491,7 @@ void f2fs_truncate(struct inode *inode)
|
||||||
|
|
||||||
trace_f2fs_truncate(inode);
|
trace_f2fs_truncate(inode);
|
||||||
|
|
||||||
if (!truncate_blocks(inode, i_size_read(inode))) {
|
if (!truncate_blocks(inode, i_size_read(inode), true)) {
|
||||||
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
||||||
mark_inode_dirty(inode);
|
mark_inode_dirty(inode);
|
||||||
}
|
}
|
||||||
|
@ -533,7 +549,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
|
||||||
|
|
||||||
if ((attr->ia_valid & ATTR_SIZE) &&
|
if ((attr->ia_valid & ATTR_SIZE) &&
|
||||||
attr->ia_size != i_size_read(inode)) {
|
attr->ia_size != i_size_read(inode)) {
|
||||||
err = f2fs_convert_inline_data(inode, attr->ia_size);
|
err = f2fs_convert_inline_data(inode, attr->ia_size, NULL);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -622,7 +638,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
||||||
loff_t off_start, off_end;
|
loff_t off_start, off_end;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
ret = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1);
|
ret = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -678,7 +694,7 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = f2fs_convert_inline_data(inode, offset + len);
|
ret = f2fs_convert_inline_data(inode, offset + len, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,7 @@ static int gc_thread_func(void *data)
|
||||||
* 3. IO subsystem is idle by checking the # of requests in
|
* 3. IO subsystem is idle by checking the # of requests in
|
||||||
* bdev's request list.
|
* bdev's request list.
|
||||||
*
|
*
|
||||||
* Note) We have to avoid triggering GCs too much frequently.
|
* Note) We have to avoid triggering GCs frequently.
|
||||||
* Because it is possible that some segments can be
|
* Because it is possible that some segments can be
|
||||||
* invalidated soon after by user update or deletion.
|
* invalidated soon after by user update or deletion.
|
||||||
* So, I'd like to wait some time to collect dirty segments.
|
* So, I'd like to wait some time to collect dirty segments.
|
||||||
|
@ -222,7 +222,7 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
|
||||||
|
|
||||||
u = (vblocks * 100) >> sbi->log_blocks_per_seg;
|
u = (vblocks * 100) >> sbi->log_blocks_per_seg;
|
||||||
|
|
||||||
/* Handle if the system time is changed by user */
|
/* Handle if the system time has changed by the user */
|
||||||
if (mtime < sit_i->min_mtime)
|
if (mtime < sit_i->min_mtime)
|
||||||
sit_i->min_mtime = mtime;
|
sit_i->min_mtime = mtime;
|
||||||
if (mtime > sit_i->max_mtime)
|
if (mtime > sit_i->max_mtime)
|
||||||
|
@ -593,7 +593,7 @@ next_step:
|
||||||
|
|
||||||
if (phase == 2) {
|
if (phase == 2) {
|
||||||
inode = f2fs_iget(sb, dni.ino);
|
inode = f2fs_iget(sb, dni.ino);
|
||||||
if (IS_ERR(inode))
|
if (IS_ERR(inode) || is_bad_inode(inode))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
|
start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
|
||||||
|
@ -693,7 +693,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi)
|
||||||
gc_more:
|
gc_more:
|
||||||
if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
|
if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
|
||||||
goto stop;
|
goto stop;
|
||||||
if (unlikely(is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)))
|
if (unlikely(f2fs_cp_error(sbi)))
|
||||||
goto stop;
|
goto stop;
|
||||||
|
|
||||||
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
|
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
|
||||||
|
|
|
@ -91,7 +91,7 @@ static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
|
||||||
block_t invalid_user_blocks = sbi->user_block_count -
|
block_t invalid_user_blocks = sbi->user_block_count -
|
||||||
written_block_count(sbi);
|
written_block_count(sbi);
|
||||||
/*
|
/*
|
||||||
* Background GC is triggered with the following condition.
|
* Background GC is triggered with the following conditions.
|
||||||
* 1. There are a number of invalid blocks.
|
* 1. There are a number of invalid blocks.
|
||||||
* 2. There is not enough free space.
|
* 2. There is not enough free space.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -42,7 +42,8 @@ static void TEA_transform(unsigned int buf[4], unsigned int const in[])
|
||||||
buf[1] += b1;
|
buf[1] += b1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void str2hashbuf(const char *msg, size_t len, unsigned int *buf, int num)
|
static void str2hashbuf(const unsigned char *msg, size_t len,
|
||||||
|
unsigned int *buf, int num)
|
||||||
{
|
{
|
||||||
unsigned pad, val;
|
unsigned pad, val;
|
||||||
int i;
|
int i;
|
||||||
|
@ -73,9 +74,9 @@ f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info)
|
||||||
{
|
{
|
||||||
__u32 hash;
|
__u32 hash;
|
||||||
f2fs_hash_t f2fs_hash;
|
f2fs_hash_t f2fs_hash;
|
||||||
const char *p;
|
const unsigned char *p;
|
||||||
__u32 in[8], buf[4];
|
__u32 in[8], buf[4];
|
||||||
const char *name = name_info->name;
|
const unsigned char *name = name_info->name;
|
||||||
size_t len = name_info->len;
|
size_t len = name_info->len;
|
||||||
|
|
||||||
if ((len <= 2) && (name[0] == '.') &&
|
if ((len <= 2) && (name[0] == '.') &&
|
||||||
|
|
|
@ -68,7 +68,7 @@ out:
|
||||||
|
|
||||||
static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
|
static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
|
||||||
{
|
{
|
||||||
int err;
|
int err = 0;
|
||||||
struct page *ipage;
|
struct page *ipage;
|
||||||
struct dnode_of_data dn;
|
struct dnode_of_data dn;
|
||||||
void *src_addr, *dst_addr;
|
void *src_addr, *dst_addr;
|
||||||
|
@ -86,6 +86,10 @@ static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* someone else converted inline_data already */
|
||||||
|
if (!f2fs_has_inline_data(inode))
|
||||||
|
goto out;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* i_addr[0] is not used for inline data,
|
* i_addr[0] is not used for inline data,
|
||||||
* so reserving new block will not destroy inline data
|
* so reserving new block will not destroy inline data
|
||||||
|
@ -124,9 +128,10 @@ out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size)
|
int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size,
|
||||||
|
struct page *page)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *new_page = page;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!f2fs_has_inline_data(inode))
|
if (!f2fs_has_inline_data(inode))
|
||||||
|
@ -134,12 +139,15 @@ int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size)
|
||||||
else if (to_size <= MAX_INLINE_DATA)
|
else if (to_size <= MAX_INLINE_DATA)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
page = grab_cache_page(inode->i_mapping, 0);
|
if (!page || page->index != 0) {
|
||||||
if (!page)
|
new_page = grab_cache_page(inode->i_mapping, 0);
|
||||||
|
if (!new_page)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
err = __f2fs_convert_inline_data(inode, page);
|
err = __f2fs_convert_inline_data(inode, new_page);
|
||||||
f2fs_put_page(page, 1);
|
if (!page || page->index != 0)
|
||||||
|
f2fs_put_page(new_page, 1);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -199,7 +207,7 @@ void truncate_inline_data(struct inode *inode, u64 from)
|
||||||
f2fs_put_page(ipage, 1);
|
f2fs_put_page(ipage, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
int recover_inline_data(struct inode *inode, struct page *npage)
|
bool recover_inline_data(struct inode *inode, struct page *npage)
|
||||||
{
|
{
|
||||||
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
||||||
struct f2fs_inode *ri = NULL;
|
struct f2fs_inode *ri = NULL;
|
||||||
|
@ -218,7 +226,7 @@ int recover_inline_data(struct inode *inode, struct page *npage)
|
||||||
ri = F2FS_INODE(npage);
|
ri = F2FS_INODE(npage);
|
||||||
|
|
||||||
if (f2fs_has_inline_data(inode) &&
|
if (f2fs_has_inline_data(inode) &&
|
||||||
ri && ri->i_inline & F2FS_INLINE_DATA) {
|
ri && (ri->i_inline & F2FS_INLINE_DATA)) {
|
||||||
process_inline:
|
process_inline:
|
||||||
ipage = get_node_page(sbi, inode->i_ino);
|
ipage = get_node_page(sbi, inode->i_ino);
|
||||||
f2fs_bug_on(IS_ERR(ipage));
|
f2fs_bug_on(IS_ERR(ipage));
|
||||||
|
@ -230,7 +238,7 @@ process_inline:
|
||||||
memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
|
memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
|
||||||
update_inode(inode, ipage);
|
update_inode(inode, ipage);
|
||||||
f2fs_put_page(ipage, 1);
|
f2fs_put_page(ipage, 1);
|
||||||
return -1;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (f2fs_has_inline_data(inode)) {
|
if (f2fs_has_inline_data(inode)) {
|
||||||
|
@ -242,10 +250,10 @@ process_inline:
|
||||||
clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
|
clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
|
||||||
update_inode(inode, ipage);
|
update_inode(inode, ipage);
|
||||||
f2fs_put_page(ipage, 1);
|
f2fs_put_page(ipage, 1);
|
||||||
} else if (ri && ri->i_inline & F2FS_INLINE_DATA) {
|
} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
|
||||||
truncate_blocks(inode, 0);
|
truncate_blocks(inode, 0, false);
|
||||||
set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
|
set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
|
||||||
goto process_inline;
|
goto process_inline;
|
||||||
}
|
}
|
||||||
return 0;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -134,9 +134,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
|
||||||
return 0;
|
return 0;
|
||||||
out:
|
out:
|
||||||
clear_nlink(inode);
|
clear_nlink(inode);
|
||||||
unlock_new_inode(inode);
|
iget_failed(inode);
|
||||||
make_bad_inode(inode);
|
|
||||||
iput(inode);
|
|
||||||
alloc_nid_failed(sbi, ino);
|
alloc_nid_failed(sbi, ino);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -267,9 +265,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
|
||||||
return err;
|
return err;
|
||||||
out:
|
out:
|
||||||
clear_nlink(inode);
|
clear_nlink(inode);
|
||||||
unlock_new_inode(inode);
|
iget_failed(inode);
|
||||||
make_bad_inode(inode);
|
|
||||||
iput(inode);
|
|
||||||
alloc_nid_failed(sbi, inode->i_ino);
|
alloc_nid_failed(sbi, inode->i_ino);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -308,9 +304,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
|
||||||
out_fail:
|
out_fail:
|
||||||
clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
|
clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
|
||||||
clear_nlink(inode);
|
clear_nlink(inode);
|
||||||
unlock_new_inode(inode);
|
iget_failed(inode);
|
||||||
make_bad_inode(inode);
|
|
||||||
iput(inode);
|
|
||||||
alloc_nid_failed(sbi, inode->i_ino);
|
alloc_nid_failed(sbi, inode->i_ino);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -354,9 +348,7 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
|
||||||
return 0;
|
return 0;
|
||||||
out:
|
out:
|
||||||
clear_nlink(inode);
|
clear_nlink(inode);
|
||||||
unlock_new_inode(inode);
|
iget_failed(inode);
|
||||||
make_bad_inode(inode);
|
|
||||||
iput(inode);
|
|
||||||
alloc_nid_failed(sbi, inode->i_ino);
|
alloc_nid_failed(sbi, inode->i_ino);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -688,9 +680,7 @@ release_out:
|
||||||
out:
|
out:
|
||||||
f2fs_unlock_op(sbi);
|
f2fs_unlock_op(sbi);
|
||||||
clear_nlink(inode);
|
clear_nlink(inode);
|
||||||
unlock_new_inode(inode);
|
iget_failed(inode);
|
||||||
make_bad_inode(inode);
|
|
||||||
iput(inode);
|
|
||||||
alloc_nid_failed(sbi, inode->i_ino);
|
alloc_nid_failed(sbi, inode->i_ino);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -704,7 +694,6 @@ const struct inode_operations f2fs_dir_inode_operations = {
|
||||||
.mkdir = f2fs_mkdir,
|
.mkdir = f2fs_mkdir,
|
||||||
.rmdir = f2fs_rmdir,
|
.rmdir = f2fs_rmdir,
|
||||||
.mknod = f2fs_mknod,
|
.mknod = f2fs_mknod,
|
||||||
.rename = f2fs_rename,
|
|
||||||
.rename2 = f2fs_rename2,
|
.rename2 = f2fs_rename2,
|
||||||
.tmpfile = f2fs_tmpfile,
|
.tmpfile = f2fs_tmpfile,
|
||||||
.getattr = f2fs_getattr,
|
.getattr = f2fs_getattr,
|
||||||
|
|
|
@ -237,7 +237,7 @@ retry:
|
||||||
nat_get_blkaddr(e) != NULL_ADDR &&
|
nat_get_blkaddr(e) != NULL_ADDR &&
|
||||||
new_blkaddr == NEW_ADDR);
|
new_blkaddr == NEW_ADDR);
|
||||||
|
|
||||||
/* increament version no as node is removed */
|
/* increment version no as node is removed */
|
||||||
if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
|
if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
|
||||||
unsigned char version = nat_get_version(e);
|
unsigned char version = nat_get_version(e);
|
||||||
nat_set_version(e, inc_node_version(version));
|
nat_set_version(e, inc_node_version(version));
|
||||||
|
@ -274,7 +274,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function returns always success
|
* This function always returns success
|
||||||
*/
|
*/
|
||||||
void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
|
void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
|
||||||
{
|
{
|
||||||
|
@ -650,7 +650,7 @@ static int truncate_partial_nodes(struct dnode_of_data *dn,
|
||||||
|
|
||||||
/* get indirect nodes in the path */
|
/* get indirect nodes in the path */
|
||||||
for (i = 0; i < idx + 1; i++) {
|
for (i = 0; i < idx + 1; i++) {
|
||||||
/* refernece count'll be increased */
|
/* reference count'll be increased */
|
||||||
pages[i] = get_node_page(sbi, nid[i]);
|
pages[i] = get_node_page(sbi, nid[i]);
|
||||||
if (IS_ERR(pages[i])) {
|
if (IS_ERR(pages[i])) {
|
||||||
err = PTR_ERR(pages[i]);
|
err = PTR_ERR(pages[i]);
|
||||||
|
@ -823,22 +823,26 @@ int truncate_xattr_node(struct inode *inode, struct page *page)
|
||||||
*/
|
*/
|
||||||
void remove_inode_page(struct inode *inode)
|
void remove_inode_page(struct inode *inode)
|
||||||
{
|
{
|
||||||
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
|
||||||
struct page *page;
|
|
||||||
nid_t ino = inode->i_ino;
|
|
||||||
struct dnode_of_data dn;
|
struct dnode_of_data dn;
|
||||||
|
|
||||||
page = get_node_page(sbi, ino);
|
set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
|
||||||
if (IS_ERR(page))
|
if (get_dnode_of_data(&dn, 0, LOOKUP_NODE))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (truncate_xattr_node(inode, page)) {
|
if (truncate_xattr_node(inode, dn.inode_page)) {
|
||||||
f2fs_put_page(page, 1);
|
f2fs_put_dnode(&dn);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
/* 0 is possible, after f2fs_new_inode() is failed */
|
|
||||||
|
/* remove potential inline_data blocks */
|
||||||
|
if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
|
||||||
|
S_ISLNK(inode->i_mode))
|
||||||
|
truncate_data_blocks_range(&dn, 1);
|
||||||
|
|
||||||
|
/* 0 is possible, after f2fs_new_inode() has failed */
|
||||||
f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1);
|
f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1);
|
||||||
set_new_dnode(&dn, inode, page, page, ino);
|
|
||||||
|
/* will put inode & node pages */
|
||||||
truncate_node(&dn);
|
truncate_node(&dn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1129,7 +1133,10 @@ continue_unlock:
|
||||||
set_fsync_mark(page, 0);
|
set_fsync_mark(page, 0);
|
||||||
set_dentry_mark(page, 0);
|
set_dentry_mark(page, 0);
|
||||||
}
|
}
|
||||||
NODE_MAPPING(sbi)->a_ops->writepage(page, wbc);
|
|
||||||
|
if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
|
||||||
|
unlock_page(page);
|
||||||
|
else
|
||||||
wrote++;
|
wrote++;
|
||||||
|
|
||||||
if (--wbc->nr_to_write == 0)
|
if (--wbc->nr_to_write == 0)
|
||||||
|
@ -1212,6 +1219,8 @@ static int f2fs_write_node_page(struct page *page,
|
||||||
|
|
||||||
if (unlikely(sbi->por_doing))
|
if (unlikely(sbi->por_doing))
|
||||||
goto redirty_out;
|
goto redirty_out;
|
||||||
|
if (unlikely(f2fs_cp_error(sbi)))
|
||||||
|
goto redirty_out;
|
||||||
|
|
||||||
f2fs_wait_on_page_writeback(page, NODE);
|
f2fs_wait_on_page_writeback(page, NODE);
|
||||||
|
|
||||||
|
@ -1540,15 +1549,6 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
|
||||||
kmem_cache_free(free_nid_slab, i);
|
kmem_cache_free(free_nid_slab, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
|
|
||||||
struct f2fs_summary *sum, struct node_info *ni,
|
|
||||||
block_t new_blkaddr)
|
|
||||||
{
|
|
||||||
rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
|
|
||||||
set_node_addr(sbi, ni, new_blkaddr, false);
|
|
||||||
clear_node_page_dirty(page);
|
|
||||||
}
|
|
||||||
|
|
||||||
void recover_inline_xattr(struct inode *inode, struct page *page)
|
void recover_inline_xattr(struct inode *inode, struct page *page)
|
||||||
{
|
{
|
||||||
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
||||||
|
@ -1557,40 +1557,33 @@ void recover_inline_xattr(struct inode *inode, struct page *page)
|
||||||
struct page *ipage;
|
struct page *ipage;
|
||||||
struct f2fs_inode *ri;
|
struct f2fs_inode *ri;
|
||||||
|
|
||||||
if (!f2fs_has_inline_xattr(inode))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (!IS_INODE(page))
|
|
||||||
return;
|
|
||||||
|
|
||||||
ri = F2FS_INODE(page);
|
|
||||||
if (!(ri->i_inline & F2FS_INLINE_XATTR))
|
|
||||||
return;
|
|
||||||
|
|
||||||
ipage = get_node_page(sbi, inode->i_ino);
|
ipage = get_node_page(sbi, inode->i_ino);
|
||||||
f2fs_bug_on(IS_ERR(ipage));
|
f2fs_bug_on(IS_ERR(ipage));
|
||||||
|
|
||||||
|
ri = F2FS_INODE(page);
|
||||||
|
if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
|
||||||
|
clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR);
|
||||||
|
goto update_inode;
|
||||||
|
}
|
||||||
|
|
||||||
dst_addr = inline_xattr_addr(ipage);
|
dst_addr = inline_xattr_addr(ipage);
|
||||||
src_addr = inline_xattr_addr(page);
|
src_addr = inline_xattr_addr(page);
|
||||||
inline_size = inline_xattr_size(inode);
|
inline_size = inline_xattr_size(inode);
|
||||||
|
|
||||||
f2fs_wait_on_page_writeback(ipage, NODE);
|
f2fs_wait_on_page_writeback(ipage, NODE);
|
||||||
memcpy(dst_addr, src_addr, inline_size);
|
memcpy(dst_addr, src_addr, inline_size);
|
||||||
|
update_inode:
|
||||||
update_inode(inode, ipage);
|
update_inode(inode, ipage);
|
||||||
f2fs_put_page(ipage, 1);
|
f2fs_put_page(ipage, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
|
void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
|
||||||
{
|
{
|
||||||
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
||||||
nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
|
nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
|
||||||
nid_t new_xnid = nid_of_node(page);
|
nid_t new_xnid = nid_of_node(page);
|
||||||
struct node_info ni;
|
struct node_info ni;
|
||||||
|
|
||||||
if (!f2fs_has_xattr_block(ofs_of_node(page)))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/* 1: invalidate the previous xattr nid */
|
/* 1: invalidate the previous xattr nid */
|
||||||
if (!prev_xnid)
|
if (!prev_xnid)
|
||||||
goto recover_xnid;
|
goto recover_xnid;
|
||||||
|
@ -1618,7 +1611,6 @@ recover_xnid:
|
||||||
set_node_addr(sbi, &ni, blkaddr, false);
|
set_node_addr(sbi, &ni, blkaddr, false);
|
||||||
|
|
||||||
update_inode_page(inode);
|
update_inode_page(inode);
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
|
int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
|
||||||
|
@ -1651,6 +1643,7 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
|
||||||
dst->i_blocks = cpu_to_le64(1);
|
dst->i_blocks = cpu_to_le64(1);
|
||||||
dst->i_links = cpu_to_le32(1);
|
dst->i_links = cpu_to_le32(1);
|
||||||
dst->i_xattr_nid = 0;
|
dst->i_xattr_nid = 0;
|
||||||
|
dst->i_inline = src->i_inline & F2FS_INLINE_XATTR;
|
||||||
|
|
||||||
new_ni = old_ni;
|
new_ni = old_ni;
|
||||||
new_ni.ino = ino;
|
new_ni.ino = ino;
|
||||||
|
@ -1659,13 +1652,14 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
set_node_addr(sbi, &new_ni, NEW_ADDR, false);
|
set_node_addr(sbi, &new_ni, NEW_ADDR, false);
|
||||||
inc_valid_inode_count(sbi);
|
inc_valid_inode_count(sbi);
|
||||||
|
set_page_dirty(ipage);
|
||||||
f2fs_put_page(ipage, 1);
|
f2fs_put_page(ipage, 1);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ra_sum_pages() merge contiguous pages into one bio and submit.
|
* ra_sum_pages() merge contiguous pages into one bio and submit.
|
||||||
* these pre-readed pages are alloced in bd_inode's mapping tree.
|
* these pre-read pages are allocated in bd_inode's mapping tree.
|
||||||
*/
|
*/
|
||||||
static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages,
|
static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages,
|
||||||
int start, int nrpages)
|
int start, int nrpages)
|
||||||
|
@ -1709,7 +1703,7 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
|
||||||
for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) {
|
for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) {
|
||||||
nrpages = min(last_offset - i, bio_blocks);
|
nrpages = min(last_offset - i, bio_blocks);
|
||||||
|
|
||||||
/* read ahead node pages */
|
/* readahead node pages */
|
||||||
nrpages = ra_sum_pages(sbi, pages, addr, nrpages);
|
nrpages = ra_sum_pages(sbi, pages, addr, nrpages);
|
||||||
if (!nrpages)
|
if (!nrpages)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -1967,7 +1961,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
|
||||||
nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
|
nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
|
||||||
|
|
||||||
/* not used nids: 0, node, meta, (and root counted as valid node) */
|
/* not used nids: 0, node, meta, (and root counted as valid node) */
|
||||||
nm_i->available_nids = nm_i->max_nid - 3;
|
nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM;
|
||||||
nm_i->fcnt = 0;
|
nm_i->fcnt = 0;
|
||||||
nm_i->nat_cnt = 0;
|
nm_i->nat_cnt = 0;
|
||||||
nm_i->ram_thresh = DEF_RAM_THRESHOLD;
|
nm_i->ram_thresh = DEF_RAM_THRESHOLD;
|
||||||
|
|
|
@ -62,8 +62,10 @@ static int recover_dentry(struct page *ipage, struct inode *inode)
|
||||||
}
|
}
|
||||||
retry:
|
retry:
|
||||||
de = f2fs_find_entry(dir, &name, &page);
|
de = f2fs_find_entry(dir, &name, &page);
|
||||||
if (de && inode->i_ino == le32_to_cpu(de->ino))
|
if (de && inode->i_ino == le32_to_cpu(de->ino)) {
|
||||||
|
clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
|
||||||
goto out_unmap_put;
|
goto out_unmap_put;
|
||||||
|
}
|
||||||
if (de) {
|
if (de) {
|
||||||
einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
|
einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
|
||||||
if (IS_ERR(einode)) {
|
if (IS_ERR(einode)) {
|
||||||
|
@ -300,14 +302,19 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
|
||||||
struct node_info ni;
|
struct node_info ni;
|
||||||
int err = 0, recovered = 0;
|
int err = 0, recovered = 0;
|
||||||
|
|
||||||
|
/* step 1: recover xattr */
|
||||||
|
if (IS_INODE(page)) {
|
||||||
recover_inline_xattr(inode, page);
|
recover_inline_xattr(inode, page);
|
||||||
|
} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
|
||||||
|
recover_xattr_data(inode, page, blkaddr);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* step 2: recover inline data */
|
||||||
if (recover_inline_data(inode, page))
|
if (recover_inline_data(inode, page))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (recover_xattr_data(inode, page, blkaddr))
|
/* step 3: recover data indices */
|
||||||
goto out;
|
|
||||||
|
|
||||||
start = start_bidx_of_node(ofs_of_node(page), fi);
|
start = start_bidx_of_node(ofs_of_node(page), fi);
|
||||||
end = start + ADDRS_PER_PAGE(page, fi);
|
end = start + ADDRS_PER_PAGE(page, fi);
|
||||||
|
|
||||||
|
@ -364,8 +371,6 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
|
||||||
fill_node_footer(dn.node_page, dn.nid, ni.ino,
|
fill_node_footer(dn.node_page, dn.nid, ni.ino,
|
||||||
ofs_of_node(page), false);
|
ofs_of_node(page), false);
|
||||||
set_page_dirty(dn.node_page);
|
set_page_dirty(dn.node_page);
|
||||||
|
|
||||||
recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
|
|
||||||
err:
|
err:
|
||||||
f2fs_put_dnode(&dn);
|
f2fs_put_dnode(&dn);
|
||||||
f2fs_unlock_op(sbi);
|
f2fs_unlock_op(sbi);
|
||||||
|
@ -452,6 +457,9 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
|
||||||
/* step #1: find fsynced inode numbers */
|
/* step #1: find fsynced inode numbers */
|
||||||
sbi->por_doing = true;
|
sbi->por_doing = true;
|
||||||
|
|
||||||
|
/* prevent checkpoint */
|
||||||
|
mutex_lock(&sbi->cp_mutex);
|
||||||
|
|
||||||
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
|
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
|
||||||
|
|
||||||
err = find_fsync_dnodes(sbi, &inode_list);
|
err = find_fsync_dnodes(sbi, &inode_list);
|
||||||
|
@ -465,6 +473,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
|
||||||
|
|
||||||
/* step #2: recover data */
|
/* step #2: recover data */
|
||||||
err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
|
err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
|
||||||
|
if (!err)
|
||||||
f2fs_bug_on(!list_empty(&inode_list));
|
f2fs_bug_on(!list_empty(&inode_list));
|
||||||
out:
|
out:
|
||||||
destroy_fsync_dnodes(&inode_list);
|
destroy_fsync_dnodes(&inode_list);
|
||||||
|
@ -482,8 +491,13 @@ out:
|
||||||
/* Flush all the NAT/SIT pages */
|
/* Flush all the NAT/SIT pages */
|
||||||
while (get_pages(sbi, F2FS_DIRTY_META))
|
while (get_pages(sbi, F2FS_DIRTY_META))
|
||||||
sync_meta_pages(sbi, META, LONG_MAX);
|
sync_meta_pages(sbi, META, LONG_MAX);
|
||||||
|
set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
|
||||||
|
mutex_unlock(&sbi->cp_mutex);
|
||||||
} else if (need_writecp) {
|
} else if (need_writecp) {
|
||||||
|
mutex_unlock(&sbi->cp_mutex);
|
||||||
write_checkpoint(sbi, false);
|
write_checkpoint(sbi, false);
|
||||||
|
} else {
|
||||||
|
mutex_unlock(&sbi->cp_mutex);
|
||||||
}
|
}
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,7 +62,7 @@ static inline unsigned long __reverse_ffs(unsigned long word)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c becasue
|
* __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
|
||||||
* f2fs_set_bit makes MSB and LSB reversed in a byte.
|
* f2fs_set_bit makes MSB and LSB reversed in a byte.
|
||||||
* Example:
|
* Example:
|
||||||
* LSB <--> MSB
|
* LSB <--> MSB
|
||||||
|
@ -808,7 +808,7 @@ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function always allocates a used segment (from dirty seglist) by SSR
|
* This function always allocates a used segment(from dirty seglist) by SSR
|
||||||
* manner, so it should recover the existing segment information of valid blocks
|
* manner, so it should recover the existing segment information of valid blocks
|
||||||
*/
|
*/
|
||||||
static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
|
static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
|
||||||
|
@ -1103,55 +1103,6 @@ void recover_data_page(struct f2fs_sb_info *sbi,
|
||||||
mutex_unlock(&curseg->curseg_mutex);
|
mutex_unlock(&curseg->curseg_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
void rewrite_node_page(struct f2fs_sb_info *sbi,
|
|
||||||
struct page *page, struct f2fs_summary *sum,
|
|
||||||
block_t old_blkaddr, block_t new_blkaddr)
|
|
||||||
{
|
|
||||||
struct sit_info *sit_i = SIT_I(sbi);
|
|
||||||
int type = CURSEG_WARM_NODE;
|
|
||||||
struct curseg_info *curseg;
|
|
||||||
unsigned int segno, old_cursegno;
|
|
||||||
block_t next_blkaddr = next_blkaddr_of_node(page);
|
|
||||||
unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr);
|
|
||||||
struct f2fs_io_info fio = {
|
|
||||||
.type = NODE,
|
|
||||||
.rw = WRITE_SYNC,
|
|
||||||
};
|
|
||||||
|
|
||||||
curseg = CURSEG_I(sbi, type);
|
|
||||||
|
|
||||||
mutex_lock(&curseg->curseg_mutex);
|
|
||||||
mutex_lock(&sit_i->sentry_lock);
|
|
||||||
|
|
||||||
segno = GET_SEGNO(sbi, new_blkaddr);
|
|
||||||
old_cursegno = curseg->segno;
|
|
||||||
|
|
||||||
/* change the current segment */
|
|
||||||
if (segno != curseg->segno) {
|
|
||||||
curseg->next_segno = segno;
|
|
||||||
change_curseg(sbi, type, true);
|
|
||||||
}
|
|
||||||
curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
|
|
||||||
__add_sum_entry(sbi, type, sum);
|
|
||||||
|
|
||||||
/* change the current log to the next block addr in advance */
|
|
||||||
if (next_segno != segno) {
|
|
||||||
curseg->next_segno = next_segno;
|
|
||||||
change_curseg(sbi, type, true);
|
|
||||||
}
|
|
||||||
curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, next_blkaddr);
|
|
||||||
|
|
||||||
/* rewrite node page */
|
|
||||||
set_page_writeback(page);
|
|
||||||
f2fs_submit_page_mbio(sbi, page, new_blkaddr, &fio);
|
|
||||||
f2fs_submit_merged_bio(sbi, NODE, WRITE);
|
|
||||||
refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
|
|
||||||
locate_dirty_segment(sbi, old_cursegno);
|
|
||||||
|
|
||||||
mutex_unlock(&sit_i->sentry_lock);
|
|
||||||
mutex_unlock(&curseg->curseg_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool is_merged_page(struct f2fs_sb_info *sbi,
|
static inline bool is_merged_page(struct f2fs_sb_info *sbi,
|
||||||
struct page *page, enum page_type type)
|
struct page *page, enum page_type type)
|
||||||
{
|
{
|
||||||
|
|
|
@ -549,7 +549,7 @@ static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Summary block is always treated as invalid block
|
* Summary block is always treated as an invalid block
|
||||||
*/
|
*/
|
||||||
static inline void check_block_count(struct f2fs_sb_info *sbi,
|
static inline void check_block_count(struct f2fs_sb_info *sbi,
|
||||||
int segno, struct f2fs_sit_entry *raw_sit)
|
int segno, struct f2fs_sit_entry *raw_sit)
|
||||||
|
|
|
@ -432,9 +432,15 @@ static void f2fs_put_super(struct super_block *sb)
|
||||||
stop_gc_thread(sbi);
|
stop_gc_thread(sbi);
|
||||||
|
|
||||||
/* We don't need to do checkpoint when it's clean */
|
/* We don't need to do checkpoint when it's clean */
|
||||||
if (sbi->s_dirty && get_pages(sbi, F2FS_DIRTY_NODES))
|
if (sbi->s_dirty)
|
||||||
write_checkpoint(sbi, true);
|
write_checkpoint(sbi, true);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* normally superblock is clean, so we need to release this.
|
||||||
|
* In addition, EIO will skip do checkpoint, we need this as well.
|
||||||
|
*/
|
||||||
|
release_dirty_inode(sbi);
|
||||||
|
|
||||||
iput(sbi->node_inode);
|
iput(sbi->node_inode);
|
||||||
iput(sbi->meta_inode);
|
iput(sbi->meta_inode);
|
||||||
|
|
||||||
|
@ -457,9 +463,6 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
|
||||||
|
|
||||||
trace_f2fs_sync_fs(sb, sync);
|
trace_f2fs_sync_fs(sb, sync);
|
||||||
|
|
||||||
if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (sync) {
|
if (sync) {
|
||||||
mutex_lock(&sbi->gc_mutex);
|
mutex_lock(&sbi->gc_mutex);
|
||||||
write_checkpoint(sbi, false);
|
write_checkpoint(sbi, false);
|
||||||
|
@ -505,8 +508,8 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||||
buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
|
buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
|
||||||
buf->f_bavail = user_block_count - valid_user_blocks(sbi);
|
buf->f_bavail = user_block_count - valid_user_blocks(sbi);
|
||||||
|
|
||||||
buf->f_files = sbi->total_node_count;
|
buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
|
||||||
buf->f_ffree = sbi->total_node_count - valid_inode_count(sbi);
|
buf->f_ffree = buf->f_files - valid_inode_count(sbi);
|
||||||
|
|
||||||
buf->f_namelen = F2FS_NAME_LEN;
|
buf->f_namelen = F2FS_NAME_LEN;
|
||||||
buf->f_fsid.val[0] = (u32)id;
|
buf->f_fsid.val[0] = (u32)id;
|
||||||
|
@ -663,7 +666,7 @@ restore_gc:
|
||||||
if (need_restart_gc) {
|
if (need_restart_gc) {
|
||||||
if (start_gc_thread(sbi))
|
if (start_gc_thread(sbi))
|
||||||
f2fs_msg(sbi->sb, KERN_WARNING,
|
f2fs_msg(sbi->sb, KERN_WARNING,
|
||||||
"background gc thread is stop");
|
"background gc thread has stopped");
|
||||||
} else if (need_stop_gc) {
|
} else if (need_stop_gc) {
|
||||||
stop_gc_thread(sbi);
|
stop_gc_thread(sbi);
|
||||||
}
|
}
|
||||||
|
@ -812,7 +815,7 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
||||||
if (unlikely(fsmeta >= total))
|
if (unlikely(fsmeta >= total))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (unlikely(is_set_ckpt_flags(ckpt, CP_ERROR_FLAG))) {
|
if (unlikely(f2fs_cp_error(sbi))) {
|
||||||
f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
|
f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -899,8 +902,10 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
struct buffer_head *raw_super_buf;
|
struct buffer_head *raw_super_buf;
|
||||||
struct inode *root;
|
struct inode *root;
|
||||||
long err = -EINVAL;
|
long err = -EINVAL;
|
||||||
|
bool retry = true;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
try_onemore:
|
||||||
/* allocate memory for f2fs-specific super block info */
|
/* allocate memory for f2fs-specific super block info */
|
||||||
sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
|
sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
|
||||||
if (!sbi)
|
if (!sbi)
|
||||||
|
@ -1080,9 +1085,11 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
/* recover fsynced data */
|
/* recover fsynced data */
|
||||||
if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
|
if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
|
||||||
err = recover_fsync_data(sbi);
|
err = recover_fsync_data(sbi);
|
||||||
if (err)
|
if (err) {
|
||||||
f2fs_msg(sb, KERN_ERR,
|
f2fs_msg(sb, KERN_ERR,
|
||||||
"Cannot recover all fsync data errno=%ld", err);
|
"Cannot recover all fsync data errno=%ld", err);
|
||||||
|
goto free_kobj;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1123,6 +1130,13 @@ free_sb_buf:
|
||||||
brelse(raw_super_buf);
|
brelse(raw_super_buf);
|
||||||
free_sbi:
|
free_sbi:
|
||||||
kfree(sbi);
|
kfree(sbi);
|
||||||
|
|
||||||
|
/* give only one another chance */
|
||||||
|
if (retry) {
|
||||||
|
retry = 0;
|
||||||
|
shrink_dcache_sb(sb);
|
||||||
|
goto try_onemore;
|
||||||
|
}
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -528,7 +528,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
|
||||||
int free;
|
int free;
|
||||||
/*
|
/*
|
||||||
* If value is NULL, it is remove operation.
|
* If value is NULL, it is remove operation.
|
||||||
* In case of update operation, we caculate free.
|
* In case of update operation, we calculate free.
|
||||||
*/
|
*/
|
||||||
free = MIN_OFFSET(inode) - ((char *)last - (char *)base_addr);
|
free = MIN_OFFSET(inode) - ((char *)last - (char *)base_addr);
|
||||||
if (found)
|
if (found)
|
||||||
|
|
|
@ -24,6 +24,9 @@
|
||||||
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
|
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
|
||||||
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
|
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
|
||||||
|
|
||||||
|
/* 0, 1(node nid), 2(meta nid) are reserved node id */
|
||||||
|
#define F2FS_RESERVED_NODE_NUM 3
|
||||||
|
|
||||||
#define F2FS_ROOT_INO(sbi) (sbi->root_ino_num)
|
#define F2FS_ROOT_INO(sbi) (sbi->root_ino_num)
|
||||||
#define F2FS_NODE_INO(sbi) (sbi->node_ino_num)
|
#define F2FS_NODE_INO(sbi) (sbi->node_ino_num)
|
||||||
#define F2FS_META_INO(sbi) (sbi->meta_ino_num)
|
#define F2FS_META_INO(sbi) (sbi->meta_ino_num)
|
||||||
|
@ -87,6 +90,8 @@ struct f2fs_super_block {
|
||||||
#define CP_ORPHAN_PRESENT_FLAG 0x00000002
|
#define CP_ORPHAN_PRESENT_FLAG 0x00000002
|
||||||
#define CP_UMOUNT_FLAG 0x00000001
|
#define CP_UMOUNT_FLAG 0x00000001
|
||||||
|
|
||||||
|
#define F2FS_CP_PACKS 2 /* # of checkpoint packs */
|
||||||
|
|
||||||
struct f2fs_checkpoint {
|
struct f2fs_checkpoint {
|
||||||
__le64 checkpoint_ver; /* checkpoint block version number */
|
__le64 checkpoint_ver; /* checkpoint block version number */
|
||||||
__le64 user_block_count; /* # of user blocks */
|
__le64 user_block_count; /* # of user blocks */
|
||||||
|
@ -123,6 +128,9 @@ struct f2fs_checkpoint {
|
||||||
*/
|
*/
|
||||||
#define F2FS_ORPHANS_PER_BLOCK 1020
|
#define F2FS_ORPHANS_PER_BLOCK 1020
|
||||||
|
|
||||||
|
#define GET_ORPHAN_BLOCKS(n) ((n + F2FS_ORPHANS_PER_BLOCK - 1) / \
|
||||||
|
F2FS_ORPHANS_PER_BLOCK)
|
||||||
|
|
||||||
struct f2fs_orphan_block {
|
struct f2fs_orphan_block {
|
||||||
__le32 ino[F2FS_ORPHANS_PER_BLOCK]; /* inode numbers */
|
__le32 ino[F2FS_ORPHANS_PER_BLOCK]; /* inode numbers */
|
||||||
__le32 reserved; /* reserved */
|
__le32 reserved; /* reserved */
|
||||||
|
@ -144,6 +152,7 @@ struct f2fs_extent {
|
||||||
#define F2FS_NAME_LEN 255
|
#define F2FS_NAME_LEN 255
|
||||||
#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
|
#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
|
||||||
#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
|
#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
|
||||||
|
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
|
||||||
#define ADDRS_PER_INODE(fi) addrs_per_inode(fi)
|
#define ADDRS_PER_INODE(fi) addrs_per_inode(fi)
|
||||||
#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
|
#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
|
||||||
#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
|
#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
|
||||||
|
@ -163,8 +172,9 @@ struct f2fs_extent {
|
||||||
#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
|
#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
|
||||||
F2FS_INLINE_XATTR_ADDRS - 1))
|
F2FS_INLINE_XATTR_ADDRS - 1))
|
||||||
|
|
||||||
#define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) \
|
#define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) -\
|
||||||
- sizeof(__le32) * (DEF_ADDRS_PER_INODE + 5 - 1))
|
sizeof(__le32) * (DEF_ADDRS_PER_INODE + \
|
||||||
|
DEF_NIDS_PER_INODE - 1))
|
||||||
|
|
||||||
struct f2fs_inode {
|
struct f2fs_inode {
|
||||||
__le16 i_mode; /* file mode */
|
__le16 i_mode; /* file mode */
|
||||||
|
@ -194,7 +204,7 @@ struct f2fs_inode {
|
||||||
|
|
||||||
__le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
|
__le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
|
||||||
|
|
||||||
__le32 i_nid[5]; /* direct(2), indirect(2),
|
__le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2),
|
||||||
double_indirect(1) node id */
|
double_indirect(1) node id */
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче