f2fs: refactor resize_fs to avoid meta updates in progress
Sahitya raised an issue: - prevent meta updates while checkpoint is in progress allocate_segment_for_resize() can cause metapage updates if it requires to change the current node/data segments for resizing. Stop these meta updates when there is a checkpoint already in progress to prevent inconsistent CP data. Signed-off-by: Sahitya Tummala <stummala@codeaurora.org> Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Родитель
4fec3fc026
Коммит
b4b10061ef
|
@ -1559,7 +1559,8 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
|||
return 0;
|
||||
f2fs_warn(sbi, "Start checkpoint disabled!");
|
||||
}
|
||||
mutex_lock(&sbi->cp_mutex);
|
||||
if (cpc->reason != CP_RESIZE)
|
||||
mutex_lock(&sbi->cp_mutex);
|
||||
|
||||
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
|
||||
((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
|
||||
|
@ -1628,7 +1629,8 @@ stop:
|
|||
f2fs_update_time(sbi, CP_TIME);
|
||||
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
|
||||
out:
|
||||
mutex_unlock(&sbi->cp_mutex);
|
||||
if (cpc->reason != CP_RESIZE)
|
||||
mutex_unlock(&sbi->cp_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -194,6 +194,7 @@ enum {
|
|||
#define CP_DISCARD 0x00000010
|
||||
#define CP_TRIMMED 0x00000020
|
||||
#define CP_PAUSE 0x00000040
|
||||
#define CP_RESIZE 0x00000080
|
||||
|
||||
#define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
|
||||
#define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
|
||||
|
@ -1471,7 +1472,6 @@ struct f2fs_sb_info {
|
|||
unsigned int segs_per_sec; /* segments per section */
|
||||
unsigned int secs_per_zone; /* sections per zone */
|
||||
unsigned int total_sections; /* total section count */
|
||||
struct mutex resize_mutex; /* for resize exclusion */
|
||||
unsigned int total_node_count; /* total node block count */
|
||||
unsigned int total_valid_node_count; /* valid node block count */
|
||||
loff_t max_file_blocks; /* max block index of file */
|
||||
|
|
|
@ -3312,7 +3312,6 @@ static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
|
|||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
|
||||
__u64 block_count;
|
||||
int ret;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
@ -3324,9 +3323,7 @@ static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
|
|||
sizeof(block_count)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = f2fs_resize_fs(sbi, block_count);
|
||||
|
||||
return ret;
|
||||
return f2fs_resize_fs(sbi, block_count);
|
||||
}
|
||||
|
||||
static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
|
||||
|
|
119
fs/f2fs/gc.c
119
fs/f2fs/gc.c
|
@ -13,6 +13,7 @@
|
|||
#include <linux/kthread.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/sched/signal.h>
|
||||
|
||||
#include "f2fs.h"
|
||||
#include "node.h"
|
||||
|
@ -1405,12 +1406,29 @@ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
|
|||
GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
|
||||
}
|
||||
|
||||
static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start,
|
||||
unsigned int end)
|
||||
static int free_segment_range(struct f2fs_sb_info *sbi,
|
||||
unsigned int secs, bool gc_only)
|
||||
{
|
||||
int type;
|
||||
unsigned int segno, next_inuse;
|
||||
unsigned int segno, next_inuse, start, end;
|
||||
struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
|
||||
int gc_mode, gc_type;
|
||||
int err = 0;
|
||||
int type;
|
||||
|
||||
/* Force block allocation for GC */
|
||||
MAIN_SECS(sbi) -= secs;
|
||||
start = MAIN_SECS(sbi) * sbi->segs_per_sec;
|
||||
end = MAIN_SEGS(sbi) - 1;
|
||||
|
||||
mutex_lock(&DIRTY_I(sbi)->seglist_lock);
|
||||
for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
|
||||
if (SIT_I(sbi)->last_victim[gc_mode] >= start)
|
||||
SIT_I(sbi)->last_victim[gc_mode] = 0;
|
||||
|
||||
for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
|
||||
if (sbi->next_victim_seg[gc_type] >= start)
|
||||
sbi->next_victim_seg[gc_type] = NULL_SEGNO;
|
||||
mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
|
||||
|
||||
/* Move out cursegs from the target range */
|
||||
for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++)
|
||||
|
@ -1423,18 +1441,24 @@ static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start,
|
|||
.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
|
||||
};
|
||||
|
||||
down_write(&sbi->gc_lock);
|
||||
do_garbage_collect(sbi, segno, &gc_list, FG_GC);
|
||||
up_write(&sbi->gc_lock);
|
||||
put_gc_inode(&gc_list);
|
||||
|
||||
if (get_valid_blocks(sbi, segno, true))
|
||||
return -EAGAIN;
|
||||
if (!gc_only && get_valid_blocks(sbi, segno, true)) {
|
||||
err = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
if (fatal_signal_pending(current)) {
|
||||
err = -ERESTARTSYS;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (gc_only)
|
||||
goto out;
|
||||
|
||||
err = f2fs_sync_fs(sbi->sb, 1);
|
||||
err = f2fs_write_checkpoint(sbi, &cpc);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
|
||||
if (next_inuse <= end) {
|
||||
|
@ -1442,6 +1466,8 @@ static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start,
|
|||
next_inuse);
|
||||
f2fs_bug_on(sbi, 1);
|
||||
}
|
||||
out:
|
||||
MAIN_SECS(sbi) += secs;
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1487,6 +1513,7 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
|
|||
|
||||
SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
|
||||
MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
|
||||
MAIN_SECS(sbi) += secs;
|
||||
FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
|
||||
FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
|
||||
F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
|
||||
|
@ -1508,8 +1535,8 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
|
|||
int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
|
||||
{
|
||||
__u64 old_block_count, shrunk_blocks;
|
||||
struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
|
||||
unsigned int secs;
|
||||
int gc_mode, gc_type;
|
||||
int err = 0;
|
||||
__u32 rem;
|
||||
|
||||
|
@ -1544,10 +1571,27 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
freeze_bdev(sbi->sb->s_bdev);
|
||||
|
||||
shrunk_blocks = old_block_count - block_count;
|
||||
secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
|
||||
|
||||
/* stop other GC */
|
||||
if (!down_write_trylock(&sbi->gc_lock))
|
||||
return -EAGAIN;
|
||||
|
||||
/* stop CP to protect MAIN_SEC in free_segment_range */
|
||||
f2fs_lock_op(sbi);
|
||||
err = free_segment_range(sbi, secs, true);
|
||||
f2fs_unlock_op(sbi);
|
||||
up_write(&sbi->gc_lock);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
set_sbi_flag(sbi, SBI_IS_RESIZEFS);
|
||||
|
||||
freeze_super(sbi->sb);
|
||||
down_write(&sbi->gc_lock);
|
||||
mutex_lock(&sbi->cp_mutex);
|
||||
|
||||
spin_lock(&sbi->stat_lock);
|
||||
if (shrunk_blocks + valid_user_blocks(sbi) +
|
||||
sbi->current_reserved_blocks + sbi->unusable_block_count +
|
||||
|
@ -1556,69 +1600,44 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
|
|||
else
|
||||
sbi->user_block_count -= shrunk_blocks;
|
||||
spin_unlock(&sbi->stat_lock);
|
||||
if (err) {
|
||||
thaw_bdev(sbi->sb->s_bdev, sbi->sb);
|
||||
return err;
|
||||
}
|
||||
|
||||
mutex_lock(&sbi->resize_mutex);
|
||||
set_sbi_flag(sbi, SBI_IS_RESIZEFS);
|
||||
|
||||
mutex_lock(&DIRTY_I(sbi)->seglist_lock);
|
||||
|
||||
MAIN_SECS(sbi) -= secs;
|
||||
|
||||
for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
|
||||
if (SIT_I(sbi)->last_victim[gc_mode] >=
|
||||
MAIN_SECS(sbi) * sbi->segs_per_sec)
|
||||
SIT_I(sbi)->last_victim[gc_mode] = 0;
|
||||
|
||||
for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
|
||||
if (sbi->next_victim_seg[gc_type] >=
|
||||
MAIN_SECS(sbi) * sbi->segs_per_sec)
|
||||
sbi->next_victim_seg[gc_type] = NULL_SEGNO;
|
||||
|
||||
mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
|
||||
|
||||
err = free_segment_range(sbi, MAIN_SECS(sbi) * sbi->segs_per_sec,
|
||||
MAIN_SEGS(sbi) - 1);
|
||||
if (err)
|
||||
goto out;
|
||||
goto out_err;
|
||||
|
||||
err = free_segment_range(sbi, secs, false);
|
||||
if (err)
|
||||
goto recover_out;
|
||||
|
||||
update_sb_metadata(sbi, -secs);
|
||||
|
||||
err = f2fs_commit_super(sbi, false);
|
||||
if (err) {
|
||||
update_sb_metadata(sbi, secs);
|
||||
goto out;
|
||||
goto recover_out;
|
||||
}
|
||||
|
||||
mutex_lock(&sbi->cp_mutex);
|
||||
update_fs_metadata(sbi, -secs);
|
||||
clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
|
||||
set_sbi_flag(sbi, SBI_IS_DIRTY);
|
||||
mutex_unlock(&sbi->cp_mutex);
|
||||
|
||||
err = f2fs_sync_fs(sbi->sb, 1);
|
||||
err = f2fs_write_checkpoint(sbi, &cpc);
|
||||
if (err) {
|
||||
mutex_lock(&sbi->cp_mutex);
|
||||
update_fs_metadata(sbi, secs);
|
||||
mutex_unlock(&sbi->cp_mutex);
|
||||
update_sb_metadata(sbi, secs);
|
||||
f2fs_commit_super(sbi, false);
|
||||
}
|
||||
out:
|
||||
recover_out:
|
||||
if (err) {
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
|
||||
|
||||
MAIN_SECS(sbi) += secs;
|
||||
spin_lock(&sbi->stat_lock);
|
||||
sbi->user_block_count += shrunk_blocks;
|
||||
spin_unlock(&sbi->stat_lock);
|
||||
}
|
||||
out_err:
|
||||
mutex_unlock(&sbi->cp_mutex);
|
||||
up_write(&sbi->gc_lock);
|
||||
thaw_super(sbi->sb);
|
||||
clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
|
||||
mutex_unlock(&sbi->resize_mutex);
|
||||
thaw_bdev(sbi->sb->s_bdev, sbi->sb);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -3408,7 +3408,6 @@ try_onemore:
|
|||
init_rwsem(&sbi->gc_lock);
|
||||
mutex_init(&sbi->writepages);
|
||||
mutex_init(&sbi->cp_mutex);
|
||||
mutex_init(&sbi->resize_mutex);
|
||||
init_rwsem(&sbi->node_write);
|
||||
init_rwsem(&sbi->node_change);
|
||||
|
||||
|
|
|
@ -50,6 +50,7 @@ TRACE_DEFINE_ENUM(CP_RECOVERY);
|
|||
TRACE_DEFINE_ENUM(CP_DISCARD);
|
||||
TRACE_DEFINE_ENUM(CP_TRIMMED);
|
||||
TRACE_DEFINE_ENUM(CP_PAUSE);
|
||||
TRACE_DEFINE_ENUM(CP_RESIZE);
|
||||
|
||||
#define show_block_type(type) \
|
||||
__print_symbolic(type, \
|
||||
|
@ -126,7 +127,8 @@ TRACE_DEFINE_ENUM(CP_PAUSE);
|
|||
{ CP_RECOVERY, "Recovery" }, \
|
||||
{ CP_DISCARD, "Discard" }, \
|
||||
{ CP_PAUSE, "Pause" }, \
|
||||
{ CP_TRIMMED, "Trimmed" })
|
||||
{ CP_TRIMMED, "Trimmed" }, \
|
||||
{ CP_RESIZE, "Resize" })
|
||||
|
||||
#define show_fsync_cpreason(type) \
|
||||
__print_symbolic(type, \
|
||||
|
|
Загрузка…
Ссылка в новой задаче