erofs: clean up decompress queue stuffs

Previously, both z_erofs_unzip_io and z_erofs_unzip_io_sb
record decompress queues for backend to use.

The only difference is that z_erofs_unzip_io is used for
on-stack sync decompression so that it doesn't have a super
block field (since the caller can pass it in its context),
but it increases complexity with only a pointer saving.

Rename z_erofs_unzip_io to z_erofs_decompressqueue with
a fixed super_block member and kill the other entirely,
and it can fallback to sync decompression if memory
allocation failure.

Link: https://lore.kernel.org/r/20191008125616.183715-4-gaoxiang25@huawei.com
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
This commit is contained in:
Gao Xiang 2019-10-08 20:56:15 +08:00
Родитель 5ddcee1f3a
Коммит a4b1fab121
2 изменённых файлов: 60 добавлений и 81 удалений

Просмотреть файл

@ -693,13 +693,11 @@ err_out:
goto out; goto out;
} }
static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
bool sync, int bios)
{ {
tagptr1_t t = tagptr_init(tagptr1_t, ptr); /* wake up the caller thread for sync decompression */
struct z_erofs_unzip_io *io = tagptr_unfold_ptr(t); if (sync) {
bool background = tagptr_unfold_tags(t);
if (!background) {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&io->u.wait.lock, flags); spin_lock_irqsave(&io->u.wait.lock, flags);
@ -713,37 +711,30 @@ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
queue_work(z_erofs_workqueue, &io->u.work); queue_work(z_erofs_workqueue, &io->u.work);
} }
static inline void z_erofs_vle_read_endio(struct bio *bio) static void z_erofs_vle_read_endio(struct bio *bio)
{ {
struct erofs_sb_info *sbi = NULL; tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
blk_status_t err = bio->bi_status; blk_status_t err = bio->bi_status;
struct bio_vec *bvec; struct bio_vec *bvec;
struct bvec_iter_all iter_all; struct bvec_iter_all iter_all;
bio_for_each_segment_all(bvec, bio, iter_all) { bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
bool cachemngd = false;
DBG_BUGON(PageUptodate(page)); DBG_BUGON(PageUptodate(page));
DBG_BUGON(!page->mapping); DBG_BUGON(!page->mapping);
if (!sbi && !z_erofs_page_is_staging(page))
sbi = EROFS_SB(page->mapping->host->i_sb);
/* sbi should already be gotten if the page is managed */
if (sbi)
cachemngd = erofs_page_is_managed(sbi, page);
if (err) if (err)
SetPageError(page); SetPageError(page);
else if (cachemngd)
SetPageUptodate(page);
if (cachemngd) if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
if (!err)
SetPageUptodate(page);
unlock_page(page); unlock_page(page);
}
} }
z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
bio_put(bio); bio_put(bio);
} }
@ -948,8 +939,7 @@ out:
return err; return err;
} }
static void z_erofs_vle_unzip_all(struct super_block *sb, static void z_erofs_vle_unzip_all(const struct z_erofs_decompressqueue *io,
struct z_erofs_unzip_io *io,
struct list_head *pagepool) struct list_head *pagepool)
{ {
z_erofs_next_pcluster_t owned = io->head; z_erofs_next_pcluster_t owned = io->head;
@ -966,21 +956,21 @@ static void z_erofs_vle_unzip_all(struct super_block *sb,
pcl = container_of(owned, struct z_erofs_pcluster, next); pcl = container_of(owned, struct z_erofs_pcluster, next);
owned = READ_ONCE(pcl->next); owned = READ_ONCE(pcl->next);
z_erofs_decompress_pcluster(sb, pcl, pagepool); z_erofs_decompress_pcluster(io->sb, pcl, pagepool);
} }
} }
static void z_erofs_vle_unzip_wq(struct work_struct *work) static void z_erofs_vle_unzip_wq(struct work_struct *work)
{ {
struct z_erofs_unzip_io_sb *iosb = struct z_erofs_decompressqueue *bgq =
container_of(work, struct z_erofs_unzip_io_sb, io.u.work); container_of(work, struct z_erofs_decompressqueue, u.work);
LIST_HEAD(pagepool); LIST_HEAD(pagepool);
DBG_BUGON(iosb->io.head == Z_EROFS_PCLUSTER_TAIL_CLOSED); DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &pagepool); z_erofs_vle_unzip_all(bgq, &pagepool);
put_pages_list(&pagepool); put_pages_list(&pagepool);
kvfree(iosb); kvfree(bgq);
} }
static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
@ -1095,31 +1085,28 @@ out: /* the only exit (for tracing and debugging) */
return page; return page;
} }
static struct z_erofs_unzip_io *jobqueue_init(struct super_block *sb, static struct z_erofs_decompressqueue *
struct z_erofs_unzip_io *io, jobqueue_init(struct super_block *sb,
bool foreground) struct z_erofs_decompressqueue *fgq, bool *fg)
{ {
struct z_erofs_unzip_io_sb *iosb; struct z_erofs_decompressqueue *q;
if (foreground) { if (fg && !*fg) {
/* waitqueue available for foreground io */ q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
DBG_BUGON(!io); if (!q) {
*fg = true;
init_waitqueue_head(&io->u.wait); goto fg_out;
atomic_set(&io->pending_bios, 0); }
goto out; INIT_WORK(&q->u.work, z_erofs_vle_unzip_wq);
} else {
fg_out:
q = fgq;
init_waitqueue_head(&fgq->u.wait);
atomic_set(&fgq->pending_bios, 0);
} }
q->sb = sb;
iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL); q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
DBG_BUGON(!iosb); return q;
/* initialize fields in the allocated descriptor */
io = &iosb->io;
iosb->sb = sb;
INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
out:
io->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
return io;
} }
/* define decompression jobqueue types */ /* define decompression jobqueue types */
@ -1130,22 +1117,17 @@ enum {
}; };
static void *jobqueueset_init(struct super_block *sb, static void *jobqueueset_init(struct super_block *sb,
z_erofs_next_pcluster_t qtail[], struct z_erofs_decompressqueue *q[],
struct z_erofs_unzip_io *q[], struct z_erofs_decompressqueue *fgq, bool *fg)
struct z_erofs_unzip_io *fgq,
bool forcefg)
{ {
/* /*
* if managed cache is enabled, bypass jobqueue is needed, * if managed cache is enabled, bypass jobqueue is needed,
* no need to read from device for all pclusters in this queue. * no need to read from device for all pclusters in this queue.
*/ */
q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true); q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg);
q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg); return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg));
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
} }
static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
@ -1167,9 +1149,8 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
qtail[JQ_BYPASS] = &pcl->next; qtail[JQ_BYPASS] = &pcl->next;
} }
static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[], static bool postsubmit_is_all_bypassed(struct z_erofs_decompressqueue *q[],
unsigned int nr_bios, unsigned int nr_bios, bool force_fg)
bool force_fg)
{ {
/* /*
* although background is preferred, no one is pending for submission. * although background is preferred, no one is pending for submission.
@ -1178,19 +1159,19 @@ static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[],
if (force_fg || nr_bios) if (force_fg || nr_bios)
return false; return false;
kvfree(container_of(q[JQ_SUBMIT], struct z_erofs_unzip_io_sb, io)); kvfree(q[JQ_SUBMIT]);
return true; return true;
} }
static bool z_erofs_vle_submit_all(struct super_block *sb, static bool z_erofs_vle_submit_all(struct super_block *sb,
z_erofs_next_pcluster_t owned_head, z_erofs_next_pcluster_t owned_head,
struct list_head *pagepool, struct list_head *pagepool,
struct z_erofs_unzip_io *fgq, struct z_erofs_decompressqueue *fgq,
bool force_fg) bool *force_fg)
{ {
struct erofs_sb_info *const sbi = EROFS_SB(sb); struct erofs_sb_info *const sbi = EROFS_SB(sb);
z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
struct z_erofs_unzip_io *q[NR_JOBQUEUES]; struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
struct bio *bio; struct bio *bio;
void *bi_private; void *bi_private;
/* since bio will be NULL, no need to initialize last_index */ /* since bio will be NULL, no need to initialize last_index */
@ -1204,7 +1185,9 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
force_submit = false; force_submit = false;
bio = NULL; bio = NULL;
nr_bios = 0; nr_bios = 0;
bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg); bi_private = jobqueueset_init(sb, q, fgq, force_fg);
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
/* by default, all need io submission */ /* by default, all need io submission */
q[JQ_SUBMIT]->head = owned_head; q[JQ_SUBMIT]->head = owned_head;
@ -1280,10 +1263,10 @@ skippage:
if (bio) if (bio)
submit_bio(bio); submit_bio(bio);
if (postsubmit_is_all_bypassed(q, nr_bios, force_fg)) if (postsubmit_is_all_bypassed(q, nr_bios, *force_fg))
return true; return true;
z_erofs_vle_unzip_kickoff(bi_private, nr_bios); z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
return true; return true;
} }
@ -1292,14 +1275,14 @@ static void z_erofs_submit_and_unzip(struct super_block *sb,
struct list_head *pagepool, struct list_head *pagepool,
bool force_fg) bool force_fg)
{ {
struct z_erofs_unzip_io io[NR_JOBQUEUES]; struct z_erofs_decompressqueue io[NR_JOBQUEUES];
if (!z_erofs_vle_submit_all(sb, clt->owned_head, if (!z_erofs_vle_submit_all(sb, clt->owned_head,
pagepool, io, force_fg)) pagepool, io, &force_fg))
return; return;
/* decompress no I/O pclusters immediately */ /* decompress no I/O pclusters immediately */
z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool); z_erofs_vle_unzip_all(&io[JQ_BYPASS], pagepool);
if (!force_fg) if (!force_fg)
return; return;
@ -1309,7 +1292,7 @@ static void z_erofs_submit_and_unzip(struct super_block *sb,
!atomic_read(&io[JQ_SUBMIT].pending_bios)); !atomic_read(&io[JQ_SUBMIT].pending_bios));
/* let's synchronous decompression */ /* let's synchronous decompression */
z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool); z_erofs_vle_unzip_all(&io[JQ_SUBMIT], pagepool);
} }
static int z_erofs_vle_normalaccess_readpage(struct file *file, static int z_erofs_vle_normalaccess_readpage(struct file *file,

Просмотреть файл

@ -84,7 +84,8 @@ struct z_erofs_pcluster {
#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster) #define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster)
struct z_erofs_unzip_io { struct z_erofs_decompressqueue {
struct super_block *sb;
atomic_t pending_bios; atomic_t pending_bios;
z_erofs_next_pcluster_t head; z_erofs_next_pcluster_t head;
@ -94,11 +95,6 @@ struct z_erofs_unzip_io {
} u; } u;
}; };
struct z_erofs_unzip_io_sb {
struct z_erofs_unzip_io io;
struct super_block *sb;
};
#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi, static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
struct page *page) struct page *page)