mpage: Convert do_mpage_readpage() to use a folio
Pass in a folio from mpage_readahead(). Also convert map_buffer_to_page() to map_buffer_to_folio(). There's still no support for large folios here; there are numerous places which depend on the folio being PAGE_SIZE. The VM_BUG_ON prevents anyone from thinking that it will work. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
This commit is contained in:
Родитель
6ffcd825e7
Коммит
211d04445b
78
fs/mpage.c
78
fs/mpage.c
|
@ -75,26 +75,28 @@ static struct bio *mpage_bio_submit(struct bio *bio)
|
|||
* them. So when the buffer is up to date and the page size == block size,
|
||||
* this marks the page up to date instead of adding new buffers.
|
||||
*/
|
||||
static void
|
||||
map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
|
||||
static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh,
|
||||
int page_block)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct inode *inode = folio->mapping->host;
|
||||
struct buffer_head *page_bh, *head;
|
||||
int block = 0;
|
||||
|
||||
if (!page_has_buffers(page)) {
|
||||
head = folio_buffers(folio);
|
||||
if (!head) {
|
||||
/*
|
||||
* don't make any buffers if there is only one buffer on
|
||||
* the page and the page just needs to be set up to date
|
||||
* the folio and the folio just needs to be set up to date
|
||||
*/
|
||||
if (inode->i_blkbits == PAGE_SHIFT &&
|
||||
buffer_uptodate(bh)) {
|
||||
SetPageUptodate(page);
|
||||
folio_mark_uptodate(folio);
|
||||
return;
|
||||
}
|
||||
create_empty_buffers(page, i_blocksize(inode), 0);
|
||||
create_empty_buffers(&folio->page, i_blocksize(inode), 0);
|
||||
head = folio_buffers(folio);
|
||||
}
|
||||
head = page_buffers(page);
|
||||
|
||||
page_bh = head;
|
||||
do {
|
||||
if (block == page_block) {
|
||||
|
@ -110,7 +112,7 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
|
|||
|
||||
struct mpage_readpage_args {
|
||||
struct bio *bio;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
unsigned int nr_pages;
|
||||
bool is_readahead;
|
||||
sector_t last_block_in_bio;
|
||||
|
@ -130,8 +132,8 @@ struct mpage_readpage_args {
|
|||
*/
|
||||
static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
|
||||
{
|
||||
struct page *page = args->page;
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct folio *folio = args->folio;
|
||||
struct inode *inode = folio->mapping->host;
|
||||
const unsigned blkbits = inode->i_blkbits;
|
||||
const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
|
||||
const unsigned blocksize = 1 << blkbits;
|
||||
|
@ -148,17 +150,20 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
|
|||
int op = REQ_OP_READ;
|
||||
unsigned nblocks;
|
||||
unsigned relative_block;
|
||||
gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
|
||||
gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
|
||||
|
||||
/* MAX_BUF_PER_PAGE, for example */
|
||||
VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
|
||||
|
||||
if (args->is_readahead) {
|
||||
op |= REQ_RAHEAD;
|
||||
gfp |= __GFP_NORETRY | __GFP_NOWARN;
|
||||
}
|
||||
|
||||
if (page_has_buffers(page))
|
||||
if (folio_buffers(folio))
|
||||
goto confused;
|
||||
|
||||
block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
|
||||
block_in_file = (sector_t)folio->index << (PAGE_SHIFT - blkbits);
|
||||
last_block = block_in_file + args->nr_pages * blocks_per_page;
|
||||
last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
|
||||
if (last_block > last_block_in_file)
|
||||
|
@ -191,9 +196,9 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
|
|||
}
|
||||
|
||||
/*
|
||||
* Then do more get_blocks calls until we are done with this page.
|
||||
* Then do more get_blocks calls until we are done with this folio.
|
||||
*/
|
||||
map_bh->b_page = page;
|
||||
map_bh->b_page = &folio->page;
|
||||
while (page_block < blocks_per_page) {
|
||||
map_bh->b_state = 0;
|
||||
map_bh->b_size = 0;
|
||||
|
@ -216,12 +221,12 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
|
|||
|
||||
/* some filesystems will copy data into the page during
|
||||
* the get_block call, in which case we don't want to
|
||||
* read it again. map_buffer_to_page copies the data
|
||||
* we just collected from get_block into the page's buffers
|
||||
* so readpage doesn't have to repeat the get_block call
|
||||
* read it again. map_buffer_to_folio copies the data
|
||||
* we just collected from get_block into the folio's buffers
|
||||
* so read_folio doesn't have to repeat the get_block call
|
||||
*/
|
||||
if (buffer_uptodate(map_bh)) {
|
||||
map_buffer_to_page(page, map_bh, page_block);
|
||||
map_buffer_to_folio(folio, map_bh, page_block);
|
||||
goto confused;
|
||||
}
|
||||
|
||||
|
@ -246,18 +251,18 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
|
|||
}
|
||||
|
||||
if (first_hole != blocks_per_page) {
|
||||
zero_user_segment(page, first_hole << blkbits, PAGE_SIZE);
|
||||
folio_zero_segment(folio, first_hole << blkbits, PAGE_SIZE);
|
||||
if (first_hole == 0) {
|
||||
SetPageUptodate(page);
|
||||
unlock_page(page);
|
||||
folio_mark_uptodate(folio);
|
||||
folio_unlock(folio);
|
||||
goto out;
|
||||
}
|
||||
} else if (fully_mapped) {
|
||||
SetPageMappedToDisk(page);
|
||||
folio_set_mappedtodisk(folio);
|
||||
}
|
||||
|
||||
/*
|
||||
* This page will go to BIO. Do we need to send this BIO off first?
|
||||
* This folio will go to BIO. Do we need to send this BIO off first?
|
||||
*/
|
||||
if (args->bio && (args->last_block_in_bio != blocks[0] - 1))
|
||||
args->bio = mpage_bio_submit(args->bio);
|
||||
|
@ -266,7 +271,7 @@ alloc_new:
|
|||
if (args->bio == NULL) {
|
||||
if (first_hole == blocks_per_page) {
|
||||
if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9),
|
||||
page))
|
||||
&folio->page))
|
||||
goto out;
|
||||
}
|
||||
args->bio = bio_alloc(bdev, bio_max_segs(args->nr_pages), op,
|
||||
|
@ -277,7 +282,7 @@ alloc_new:
|
|||
}
|
||||
|
||||
length = first_hole << blkbits;
|
||||
if (bio_add_page(args->bio, page, length, 0) < length) {
|
||||
if (!bio_add_folio(args->bio, folio, length, 0)) {
|
||||
args->bio = mpage_bio_submit(args->bio);
|
||||
goto alloc_new;
|
||||
}
|
||||
|
@ -295,10 +300,10 @@ out:
|
|||
confused:
|
||||
if (args->bio)
|
||||
args->bio = mpage_bio_submit(args->bio);
|
||||
if (!PageUptodate(page))
|
||||
block_read_full_folio(page_folio(page), args->get_block);
|
||||
if (!folio_test_uptodate(folio))
|
||||
block_read_full_folio(folio, args->get_block);
|
||||
else
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -343,18 +348,17 @@ confused:
|
|||
*/
|
||||
void mpage_readahead(struct readahead_control *rac, get_block_t get_block)
|
||||
{
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
struct mpage_readpage_args args = {
|
||||
.get_block = get_block,
|
||||
.is_readahead = true,
|
||||
};
|
||||
|
||||
while ((page = readahead_page(rac))) {
|
||||
prefetchw(&page->flags);
|
||||
args.page = page;
|
||||
while ((folio = readahead_folio(rac))) {
|
||||
prefetchw(&folio->flags);
|
||||
args.folio = folio;
|
||||
args.nr_pages = readahead_count(rac);
|
||||
args.bio = do_mpage_readpage(&args);
|
||||
put_page(page);
|
||||
}
|
||||
if (args.bio)
|
||||
mpage_bio_submit(args.bio);
|
||||
|
@ -367,13 +371,11 @@ EXPORT_SYMBOL(mpage_readahead);
|
|||
int mpage_read_folio(struct folio *folio, get_block_t get_block)
|
||||
{
|
||||
struct mpage_readpage_args args = {
|
||||
.page = &folio->page,
|
||||
.folio = folio,
|
||||
.nr_pages = 1,
|
||||
.get_block = get_block,
|
||||
};
|
||||
|
||||
VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
|
||||
|
||||
args.bio = do_mpage_readpage(&args);
|
||||
if (args.bio)
|
||||
mpage_bio_submit(args.bio);
|
||||
|
|
Загрузка…
Ссылка в новой задаче