filemap: Convert filemap_get_read_batch() to use a folio_batch

This change ripples all the way through the filemap_read() call chain and
removes a lot of messing about converting folios to pages and back again.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
This commit is contained in:
Matthew Wilcox (Oracle) 2021-12-06 15:25:33 -05:00
Родитель d996fc7f61
Коммит 25d6a23e8d
1 изменённых файлов: 33 добавлений и 32 удалений

Просмотреть файл

@ -2325,16 +2325,16 @@ static void shrink_readahead_size_eio(struct file_ra_state *ra)
} }
/* /*
* filemap_get_read_batch - Get a batch of pages for read * filemap_get_read_batch - Get a batch of folios for read
* *
* Get a batch of pages which represent a contiguous range of bytes * Get a batch of folios which represent a contiguous range of bytes in
* in the file. No tail pages will be returned. If @index is in the * the file. No exceptional entries will be returned. If @index is in
* middle of a THP, the entire THP will be returned. The last page in * the middle of a folio, the entire folio will be returned. The last
* the batch may have Readahead set or be not Uptodate so that the * folio in the batch may have the readahead flag set or the uptodate flag
* caller can take the appropriate action. * clear so that the caller can take the appropriate action.
*/ */
static void filemap_get_read_batch(struct address_space *mapping, static void filemap_get_read_batch(struct address_space *mapping,
pgoff_t index, pgoff_t max, struct pagevec *pvec) pgoff_t index, pgoff_t max, struct folio_batch *fbatch)
{ {
XA_STATE(xas, &mapping->i_pages, index); XA_STATE(xas, &mapping->i_pages, index);
struct folio *folio; struct folio *folio;
@ -2349,9 +2349,9 @@ static void filemap_get_read_batch(struct address_space *mapping,
goto retry; goto retry;
if (unlikely(folio != xas_reload(&xas))) if (unlikely(folio != xas_reload(&xas)))
goto put_page; goto put_folio;
if (!pagevec_add(pvec, &folio->page)) if (!folio_batch_add(fbatch, folio))
break; break;
if (!folio_test_uptodate(folio)) if (!folio_test_uptodate(folio))
break; break;
@ -2360,7 +2360,7 @@ static void filemap_get_read_batch(struct address_space *mapping,
xas.xa_index = folio->index + folio_nr_pages(folio) - 1; xas.xa_index = folio->index + folio_nr_pages(folio) - 1;
xas.xa_offset = (xas.xa_index >> xas.xa_shift) & XA_CHUNK_MASK; xas.xa_offset = (xas.xa_index >> xas.xa_shift) & XA_CHUNK_MASK;
continue; continue;
put_page: put_folio:
folio_put(folio); folio_put(folio);
retry: retry:
xas_reset(&xas); xas_reset(&xas);
@ -2475,7 +2475,7 @@ unlock_mapping:
static int filemap_create_folio(struct file *file, static int filemap_create_folio(struct file *file,
struct address_space *mapping, pgoff_t index, struct address_space *mapping, pgoff_t index,
struct pagevec *pvec) struct folio_batch *fbatch)
{ {
struct folio *folio; struct folio *folio;
int error; int error;
@ -2510,7 +2510,7 @@ static int filemap_create_folio(struct file *file,
goto error; goto error;
filemap_invalidate_unlock_shared(mapping); filemap_invalidate_unlock_shared(mapping);
pagevec_add(pvec, &folio->page); folio_batch_add(fbatch, folio);
return 0; return 0;
error: error:
filemap_invalidate_unlock_shared(mapping); filemap_invalidate_unlock_shared(mapping);
@ -2531,7 +2531,7 @@ static int filemap_readahead(struct kiocb *iocb, struct file *file,
} }
static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter, static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter,
struct pagevec *pvec) struct folio_batch *fbatch)
{ {
struct file *filp = iocb->ki_filp; struct file *filp = iocb->ki_filp;
struct address_space *mapping = filp->f_mapping; struct address_space *mapping = filp->f_mapping;
@ -2546,32 +2546,33 @@ retry:
if (fatal_signal_pending(current)) if (fatal_signal_pending(current))
return -EINTR; return -EINTR;
filemap_get_read_batch(mapping, index, last_index, pvec); filemap_get_read_batch(mapping, index, last_index, fbatch);
if (!pagevec_count(pvec)) { if (!folio_batch_count(fbatch)) {
if (iocb->ki_flags & IOCB_NOIO) if (iocb->ki_flags & IOCB_NOIO)
return -EAGAIN; return -EAGAIN;
page_cache_sync_readahead(mapping, ra, filp, index, page_cache_sync_readahead(mapping, ra, filp, index,
last_index - index); last_index - index);
filemap_get_read_batch(mapping, index, last_index, pvec); filemap_get_read_batch(mapping, index, last_index, fbatch);
} }
if (!pagevec_count(pvec)) { if (!folio_batch_count(fbatch)) {
if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ)) if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
return -EAGAIN; return -EAGAIN;
err = filemap_create_folio(filp, mapping, err = filemap_create_folio(filp, mapping,
iocb->ki_pos >> PAGE_SHIFT, pvec); iocb->ki_pos >> PAGE_SHIFT, fbatch);
if (err == AOP_TRUNCATED_PAGE) if (err == AOP_TRUNCATED_PAGE)
goto retry; goto retry;
return err; return err;
} }
folio = page_folio(pvec->pages[pagevec_count(pvec) - 1]); folio = fbatch->folios[folio_batch_count(fbatch) - 1];
if (folio_test_readahead(folio)) { if (folio_test_readahead(folio)) {
err = filemap_readahead(iocb, filp, mapping, folio, last_index); err = filemap_readahead(iocb, filp, mapping, folio, last_index);
if (err) if (err)
goto err; goto err;
} }
if (!folio_test_uptodate(folio)) { if (!folio_test_uptodate(folio)) {
if ((iocb->ki_flags & IOCB_WAITQ) && pagevec_count(pvec) > 1) if ((iocb->ki_flags & IOCB_WAITQ) &&
folio_batch_count(fbatch) > 1)
iocb->ki_flags |= IOCB_NOWAIT; iocb->ki_flags |= IOCB_NOWAIT;
err = filemap_update_page(iocb, mapping, iter, folio); err = filemap_update_page(iocb, mapping, iter, folio);
if (err) if (err)
@ -2582,7 +2583,7 @@ retry:
err: err:
if (err < 0) if (err < 0)
folio_put(folio); folio_put(folio);
if (likely(--pvec->nr)) if (likely(--fbatch->nr))
return 0; return 0;
if (err == AOP_TRUNCATED_PAGE) if (err == AOP_TRUNCATED_PAGE)
goto retry; goto retry;
@ -2609,7 +2610,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
struct file_ra_state *ra = &filp->f_ra; struct file_ra_state *ra = &filp->f_ra;
struct address_space *mapping = filp->f_mapping; struct address_space *mapping = filp->f_mapping;
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct pagevec pvec; struct folio_batch fbatch;
int i, error = 0; int i, error = 0;
bool writably_mapped; bool writably_mapped;
loff_t isize, end_offset; loff_t isize, end_offset;
@ -2620,7 +2621,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
return 0; return 0;
iov_iter_truncate(iter, inode->i_sb->s_maxbytes); iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
pagevec_init(&pvec); folio_batch_init(&fbatch);
do { do {
cond_resched(); cond_resched();
@ -2636,7 +2637,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
if (unlikely(iocb->ki_pos >= i_size_read(inode))) if (unlikely(iocb->ki_pos >= i_size_read(inode)))
break; break;
error = filemap_get_pages(iocb, iter, &pvec); error = filemap_get_pages(iocb, iter, &fbatch);
if (error < 0) if (error < 0)
break; break;
@ -2650,7 +2651,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
*/ */
isize = i_size_read(inode); isize = i_size_read(inode);
if (unlikely(iocb->ki_pos >= isize)) if (unlikely(iocb->ki_pos >= isize))
goto put_pages; goto put_folios;
end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count); end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
/* /*
@ -2665,10 +2666,10 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
*/ */
if (iocb->ki_pos >> PAGE_SHIFT != if (iocb->ki_pos >> PAGE_SHIFT !=
ra->prev_pos >> PAGE_SHIFT) ra->prev_pos >> PAGE_SHIFT)
mark_page_accessed(pvec.pages[0]); folio_mark_accessed(fbatch.folios[0]);
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = page_folio(pvec.pages[i]); struct folio *folio = fbatch.folios[i];
size_t fsize = folio_size(folio); size_t fsize = folio_size(folio);
size_t offset = iocb->ki_pos & (fsize - 1); size_t offset = iocb->ki_pos & (fsize - 1);
size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos, size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos,
@ -2698,10 +2699,10 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
break; break;
} }
} }
put_pages: put_folios:
for (i = 0; i < pagevec_count(&pvec); i++) for (i = 0; i < folio_batch_count(&fbatch); i++)
put_page(pvec.pages[i]); folio_put(fbatch.folios[i]);
pagevec_reinit(&pvec); folio_batch_init(&fbatch);
} while (iov_iter_count(iter) && iocb->ki_pos < isize && !error); } while (iov_iter_count(iter) && iocb->ki_pos < isize && !error);
file_accessed(filp); file_accessed(filp);