f2fs: keep migration IO order in LFS mode
For non-migration IO, we will keep order of data/node blocks' submitting as allocation sequence by sorting IOs in per log io_list list, but for migration IO, it could be out-of-order. In LFS mode, we should keep all IOs including migration IO be ordered, so that this patch fixes to add an additional lock to keep submitting order. Signed-off-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Yunlong Song <yunlong.song@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Родитель
e5e5732d81
Коммит
107a805de8
|
@ -1114,6 +1114,8 @@ struct f2fs_sb_info {
|
|||
struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
|
||||
struct mutex wio_mutex[NR_PAGE_TYPE - 1][NR_TEMP_TYPE];
|
||||
/* bio ordering for NODE/DATA */
|
||||
/* keep migration IO order for LFS mode */
|
||||
struct rw_semaphore io_order_lock;
|
||||
mempool_t *write_io_dummy; /* Dummy pages */
|
||||
|
||||
/* for checkpoint */
|
||||
|
|
|
@ -614,6 +614,7 @@ static void move_data_block(struct inode *inode, block_t bidx,
|
|||
struct page *page;
|
||||
block_t newaddr;
|
||||
int err;
|
||||
bool lfs_mode = test_opt(fio.sbi, LFS);
|
||||
|
||||
/* do not read out */
|
||||
page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
|
||||
|
@ -654,6 +655,9 @@ static void move_data_block(struct inode *inode, block_t bidx,
|
|||
fio.page = page;
|
||||
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
|
||||
|
||||
if (lfs_mode)
|
||||
down_write(&fio.sbi->io_order_lock);
|
||||
|
||||
allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
|
||||
&sum, CURSEG_COLD_DATA, NULL, false);
|
||||
|
||||
|
@ -710,6 +714,8 @@ static void move_data_block(struct inode *inode, block_t bidx,
|
|||
put_page_out:
|
||||
f2fs_put_page(fio.encrypted_page, 1);
|
||||
recover_block:
|
||||
if (lfs_mode)
|
||||
up_write(&fio.sbi->io_order_lock);
|
||||
if (err)
|
||||
__f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
|
||||
true, true);
|
||||
|
|
|
@ -2750,7 +2750,10 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
|
|||
{
|
||||
int type = __get_segment_type(fio);
|
||||
int err;
|
||||
bool keep_order = (test_opt(fio->sbi, LFS) && type == CURSEG_COLD_DATA);
|
||||
|
||||
if (keep_order)
|
||||
down_read(&fio->sbi->io_order_lock);
|
||||
reallocate:
|
||||
allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
|
||||
&fio->new_blkaddr, sum, type, fio, true);
|
||||
|
@ -2763,6 +2766,8 @@ reallocate:
|
|||
} else if (!err) {
|
||||
update_device_state(fio);
|
||||
}
|
||||
if (keep_order)
|
||||
up_read(&fio->sbi->io_order_lock);
|
||||
}
|
||||
|
||||
void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
|
||||
|
|
|
@ -2369,6 +2369,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
|
|||
for (i = 0; i < NR_PAGE_TYPE - 1; i++)
|
||||
for (j = HOT; j < NR_TEMP_TYPE; j++)
|
||||
mutex_init(&sbi->wio_mutex[i][j]);
|
||||
init_rwsem(&sbi->io_order_lock);
|
||||
spin_lock_init(&sbi->cp_lock);
|
||||
|
||||
sbi->dirty_device = 0;
|
||||
|
|
Загрузка…
Ссылка в новой задаче