erofs: add sysfs node to control sync decompression strategy
Although readpage is a synchronous path, there will be no additional kworker scheduling overhead in non-atomic contexts together with dm-verity. Let's add a sysfs node to disable sync decompression as an option. Link: https://lore.kernel.org/r/20211206143552.8384-1-huangjianan@oppo.com Reviewed-by: Chao Yu <chao@kernel.org> Signed-off-by: Huang Jianan <huangjianan@oppo.com> Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
This commit is contained in:
Родитель
168e9a7620
Коммит
40452ffca3
|
@ -5,3 +5,12 @@ Description: Shows all enabled kernel features.
|
|||
Supported features:
|
||||
zero_padding, compr_cfgs, big_pcluster, chunked_file,
|
||||
device_table, compr_head2, sb_chksum.
|
||||
|
||||
What: /sys/fs/erofs/<disk>/sync_decompress
|
||||
Date: November 2021
|
||||
Contact: "Huang Jianan" <huangjianan@oppo.com>
|
||||
Description: Control strategy of sync decompression
|
||||
- 0 (default, auto): enable for readpage, and enable for
|
||||
readahead on atomic contexts only,
|
||||
- 1 (force on): enable for readpage and readahead.
|
||||
- 2 (force off): disable for all situations.
|
||||
|
|
|
@ -56,12 +56,18 @@ struct erofs_device_info {
|
|||
u32 mapped_blkaddr;
|
||||
};
|
||||
|
||||
enum {
|
||||
EROFS_SYNC_DECOMPRESS_AUTO,
|
||||
EROFS_SYNC_DECOMPRESS_FORCE_ON,
|
||||
EROFS_SYNC_DECOMPRESS_FORCE_OFF
|
||||
};
|
||||
|
||||
struct erofs_mount_opts {
|
||||
#ifdef CONFIG_EROFS_FS_ZIP
|
||||
/* current strategy of how to use managed cache */
|
||||
unsigned char cache_strategy;
|
||||
/* strategy of sync decompression (false - auto, true - force on) */
|
||||
bool readahead_sync_decompress;
|
||||
/* strategy of sync decompression (0 - auto, 1 - force on, 2 - force off) */
|
||||
unsigned int sync_decompress;
|
||||
|
||||
/* threshold for decompression synchronously */
|
||||
unsigned int max_sync_decompress_pages;
|
||||
|
|
|
@ -423,7 +423,7 @@ static void erofs_default_options(struct erofs_fs_context *ctx)
|
|||
#ifdef CONFIG_EROFS_FS_ZIP
|
||||
ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
|
||||
ctx->opt.max_sync_decompress_pages = 3;
|
||||
ctx->opt.readahead_sync_decompress = false;
|
||||
ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
|
||||
#endif
|
||||
#ifdef CONFIG_EROFS_FS_XATTR
|
||||
set_opt(&ctx->opt, XATTR_USER);
|
||||
|
|
|
@ -16,6 +16,7 @@ enum {
|
|||
|
||||
enum {
|
||||
struct_erofs_sb_info,
|
||||
struct_erofs_mount_opts,
|
||||
};
|
||||
|
||||
struct erofs_attr {
|
||||
|
@ -54,7 +55,14 @@ static struct erofs_attr erofs_attr_##_name = { \
|
|||
|
||||
#define ATTR_LIST(name) (&erofs_attr_##name.attr)
|
||||
|
||||
#ifdef CONFIG_EROFS_FS_ZIP
|
||||
EROFS_ATTR_RW_UI(sync_decompress, erofs_mount_opts);
|
||||
#endif
|
||||
|
||||
static struct attribute *erofs_attrs[] = {
|
||||
#ifdef CONFIG_EROFS_FS_ZIP
|
||||
ATTR_LIST(sync_decompress),
|
||||
#endif
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(erofs);
|
||||
|
@ -85,6 +93,8 @@ static unsigned char *__struct_ptr(struct erofs_sb_info *sbi,
|
|||
{
|
||||
if (struct_type == struct_erofs_sb_info)
|
||||
return (unsigned char *)sbi + offset;
|
||||
if (struct_type == struct_erofs_mount_opts)
|
||||
return (unsigned char *)&sbi->opt + offset;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -130,6 +140,11 @@ static ssize_t erofs_attr_store(struct kobject *kobj, struct attribute *attr,
|
|||
return ret;
|
||||
if (t != (unsigned int)t)
|
||||
return -ERANGE;
|
||||
#ifdef CONFIG_EROFS_FS_ZIP
|
||||
if (!strcmp(a->attr.name, "sync_decompress") &&
|
||||
(t > EROFS_SYNC_DECOMPRESS_FORCE_OFF))
|
||||
return -EINVAL;
|
||||
#endif
|
||||
*(unsigned int *)ptr = t;
|
||||
return len;
|
||||
case attr_pointer_bool:
|
||||
|
|
|
@ -762,6 +762,21 @@ err_out:
|
|||
goto out;
|
||||
}
|
||||
|
||||
static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
|
||||
unsigned int readahead_pages)
|
||||
{
|
||||
/* auto: enable for readpage, disable for readahead */
|
||||
if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) &&
|
||||
!readahead_pages)
|
||||
return true;
|
||||
|
||||
if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) &&
|
||||
(readahead_pages <= sbi->opt.max_sync_decompress_pages))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void z_erofs_decompressqueue_work(struct work_struct *work);
|
||||
static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
|
||||
bool sync, int bios)
|
||||
|
@ -784,7 +799,9 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
|
|||
/* Use workqueue and sync decompression for atomic contexts only */
|
||||
if (in_atomic() || irqs_disabled()) {
|
||||
queue_work(z_erofs_workqueue, &io->u.work);
|
||||
sbi->opt.readahead_sync_decompress = true;
|
||||
/* enable sync decompression for readahead */
|
||||
if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
|
||||
sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
|
||||
return;
|
||||
}
|
||||
z_erofs_decompressqueue_work(&io->u.work);
|
||||
|
@ -1435,6 +1452,7 @@ skip:
|
|||
static int z_erofs_readpage(struct file *file, struct page *page)
|
||||
{
|
||||
struct inode *const inode = page->mapping->host;
|
||||
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
|
||||
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
|
||||
struct page *pagepool = NULL;
|
||||
int err;
|
||||
|
@ -1450,7 +1468,8 @@ static int z_erofs_readpage(struct file *file, struct page *page)
|
|||
(void)z_erofs_collector_end(&f.clt);
|
||||
|
||||
/* if some compressed cluster ready, need submit them anyway */
|
||||
z_erofs_runqueue(inode->i_sb, &f, &pagepool, true);
|
||||
z_erofs_runqueue(inode->i_sb, &f, &pagepool,
|
||||
z_erofs_get_sync_decompress_policy(sbi, 0));
|
||||
|
||||
if (err)
|
||||
erofs_err(inode->i_sb, "failed to read, err [%d]", err);
|
||||
|
@ -1501,8 +1520,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
|
|||
(void)z_erofs_collector_end(&f.clt);
|
||||
|
||||
z_erofs_runqueue(inode->i_sb, &f, &pagepool,
|
||||
sbi->opt.readahead_sync_decompress &&
|
||||
nr_pages <= sbi->opt.max_sync_decompress_pages);
|
||||
z_erofs_get_sync_decompress_policy(sbi, nr_pages));
|
||||
if (f.map.mpage)
|
||||
put_page(f.map.mpage);
|
||||
erofs_release_pages(&pagepool);
|
||||
|
|
Загрузка…
Ссылка в новой задаче