nfs: Convert from invalidatepage to invalidate_folio

Print the folio index instead of the pointer, since this is more
useful.  We also don't need to use page_file_mapping() as we do not
invalidate swapcache pages.  Since this is the only caller of
nfs_wb_page_cancel(), convert it to nfs_wb_folio_cancel().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Tested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Acked-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs
Tested-by: David Howells <dhowells@redhat.com> # afs
This commit is contained in:
Matthew Wilcox (Oracle) 2022-02-09 20:21:47 +00:00
Родитель c5b56b50d7
Коммит 6d740c76ea
3 изменённых файлов: 13 добавлений и 13 удалений

Просмотреть файл

@ -406,17 +406,17 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
* - Called if either PG_private or PG_fscache is set on the page
* - Caller holds page lock
*/
static void nfs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
static void nfs_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n",
page, offset, length);
dfprintk(PAGECACHE, "NFS: invalidate_folio(%lu, %zu, %zu)\n",
folio->index, offset, length);
if (offset != 0 || length < PAGE_SIZE)
if (offset != 0 || length < folio_size(folio))
return;
/* Cancel any unstarted writes on this page */
nfs_wb_page_cancel(page_file_mapping(page)->host, page);
wait_on_page_fscache(page);
nfs_wb_folio_cancel(folio->mapping->host, folio);
folio_wait_fscache(folio);
}
/*
@ -520,7 +520,7 @@ const struct address_space_operations nfs_file_aops = {
.writepages = nfs_writepages,
.write_begin = nfs_write_begin,
.write_end = nfs_write_end,
.invalidatepage = nfs_invalidate_page,
.invalidate_folio = nfs_invalidate_folio,
.releasepage = nfs_release_page,
.direct_IO = nfs_direct_IO,
#ifdef CONFIG_MIGRATION

Просмотреть файл

@ -2049,21 +2049,21 @@ out:
}
EXPORT_SYMBOL_GPL(nfs_wb_all);
int nfs_wb_page_cancel(struct inode *inode, struct page *page)
int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio)
{
struct nfs_page *req;
int ret = 0;
wait_on_page_writeback(page);
folio_wait_writeback(folio);
/* blocking call to cancel all requests and join to a single (head)
* request */
req = nfs_lock_and_join_requests(page);
req = nfs_lock_and_join_requests(&folio->page);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
} else if (req) {
/* all requests from this page have been cancelled by
/* all requests from this folio have been cancelled by
* nfs_lock_and_join_requests, so just remove the head
* request from the inode / page_private pointer and
* release it */

Просмотреть файл

@ -583,7 +583,7 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_page(struct inode *inode, struct page *page);
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio);
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail);
extern void nfs_commit_free(struct nfs_commit_data *data);