NFS: Flesh out nfs_invalidate_page()
In the case of a call to truncate_inode_pages(), we should really try to cancel any pending writes on the page. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
This commit is contained in:
Родитель
c04871e634
Коммит
d2ccddf042
|
@ -303,7 +303,11 @@ static int nfs_commit_write(struct file *file, struct page *page, unsigned offse
|
|||
|
||||
static void nfs_invalidate_page(struct page *page, unsigned long offset)
|
||||
{
|
||||
/* FIXME: we really should cancel any unstarted writes on this page */
|
||||
struct inode *inode = page->mapping->host;
|
||||
|
||||
/* Cancel any unstarted writes on this page */
|
||||
if (offset == 0)
|
||||
nfs_sync_inode_wait(inode, page->index, 1, FLUSH_INVALIDATE);
|
||||
}
|
||||
|
||||
static int nfs_release_page(struct page *page, gfp_t gfp)
|
||||
|
|
|
@ -325,6 +325,7 @@ out:
|
|||
|
||||
/**
|
||||
* nfs_scan_list - Scan a list for matching requests
|
||||
* @nfsi: NFS inode
|
||||
* @head: One of the NFS inode request lists
|
||||
* @dst: Destination list
|
||||
* @idx_start: lower bound of page->index to scan
|
||||
|
@ -336,14 +337,15 @@ out:
|
|||
* The requests are *not* checked to ensure that they form a contiguous set.
|
||||
* You must be holding the inode's req_lock when calling this function
|
||||
*/
|
||||
int
|
||||
nfs_scan_list(struct list_head *head, struct list_head *dst,
|
||||
unsigned long idx_start, unsigned int npages)
|
||||
int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head,
|
||||
struct list_head *dst, unsigned long idx_start,
|
||||
unsigned int npages)
|
||||
{
|
||||
struct list_head *pos, *tmp;
|
||||
struct nfs_page *req;
|
||||
unsigned long idx_end;
|
||||
int res;
|
||||
struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES];
|
||||
struct nfs_page *req;
|
||||
unsigned long idx_end;
|
||||
int found, i;
|
||||
int res;
|
||||
|
||||
res = 0;
|
||||
if (npages == 0)
|
||||
|
@ -351,21 +353,28 @@ nfs_scan_list(struct list_head *head, struct list_head *dst,
|
|||
else
|
||||
idx_end = idx_start + npages - 1;
|
||||
|
||||
list_for_each_safe(pos, tmp, head) {
|
||||
|
||||
req = nfs_list_entry(pos);
|
||||
|
||||
if (req->wb_index < idx_start)
|
||||
continue;
|
||||
if (req->wb_index > idx_end)
|
||||
for (;;) {
|
||||
found = radix_tree_gang_lookup(&nfsi->nfs_page_tree,
|
||||
(void **)&pgvec[0], idx_start,
|
||||
NFS_SCAN_MAXENTRIES);
|
||||
if (found <= 0)
|
||||
break;
|
||||
for (i = 0; i < found; i++) {
|
||||
req = pgvec[i];
|
||||
if (req->wb_index > idx_end)
|
||||
goto out;
|
||||
idx_start = req->wb_index + 1;
|
||||
if (req->wb_list_head != head)
|
||||
continue;
|
||||
if (nfs_set_page_writeback_locked(req)) {
|
||||
nfs_list_remove_request(req);
|
||||
nfs_list_add_request(req, dst);
|
||||
res++;
|
||||
}
|
||||
}
|
||||
|
||||
if (!nfs_set_page_writeback_locked(req))
|
||||
continue;
|
||||
nfs_list_remove_request(req);
|
||||
nfs_list_add_request(req, dst);
|
||||
res++;
|
||||
}
|
||||
out:
|
||||
return res;
|
||||
}
|
||||
|
||||
|
|
|
@ -579,6 +579,17 @@ static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, un
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void nfs_cancel_requests(struct list_head *head)
|
||||
{
|
||||
struct nfs_page *req;
|
||||
while(!list_empty(head)) {
|
||||
req = nfs_list_entry(head->next);
|
||||
nfs_list_remove_request(req);
|
||||
nfs_inode_remove_request(req);
|
||||
nfs_clear_page_writeback(req);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* nfs_scan_dirty - Scan an inode for dirty requests
|
||||
* @inode: NFS inode to scan
|
||||
|
@ -623,7 +634,7 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_st
|
|||
int res = 0;
|
||||
|
||||
if (nfsi->ncommit != 0) {
|
||||
res = nfs_scan_list(&nfsi->commit, dst, idx_start, npages);
|
||||
res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages);
|
||||
nfsi->ncommit -= res;
|
||||
if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
|
||||
printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
|
||||
|
@ -1491,15 +1502,25 @@ int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start,
|
|||
pages = nfs_scan_dirty(inode, &head, idx_start, npages);
|
||||
if (pages != 0) {
|
||||
spin_unlock(&nfsi->req_lock);
|
||||
ret = nfs_flush_list(inode, &head, pages, how);
|
||||
if (how & FLUSH_INVALIDATE)
|
||||
nfs_cancel_requests(&head);
|
||||
else
|
||||
ret = nfs_flush_list(inode, &head, pages, how);
|
||||
spin_lock(&nfsi->req_lock);
|
||||
continue;
|
||||
}
|
||||
if (nocommit)
|
||||
break;
|
||||
pages = nfs_scan_commit(inode, &head, 0, 0);
|
||||
pages = nfs_scan_commit(inode, &head, idx_start, npages);
|
||||
if (pages == 0)
|
||||
break;
|
||||
if (how & FLUSH_INVALIDATE) {
|
||||
spin_unlock(&nfsi->req_lock);
|
||||
nfs_cancel_requests(&head);
|
||||
spin_lock(&nfsi->req_lock);
|
||||
continue;
|
||||
}
|
||||
pages += nfs_scan_commit(inode, &head, 0, 0);
|
||||
spin_unlock(&nfsi->req_lock);
|
||||
ret = nfs_commit_list(inode, &head, how);
|
||||
spin_lock(&nfsi->req_lock);
|
||||
|
|
|
@ -61,6 +61,7 @@
|
|||
#define FLUSH_LOWPRI 8 /* low priority background flush */
|
||||
#define FLUSH_HIGHPRI 16 /* high priority memory reclaim flush */
|
||||
#define FLUSH_NOCOMMIT 32 /* Don't send the NFSv3/v4 COMMIT */
|
||||
#define FLUSH_INVALIDATE 64 /* Invalidate the page cache */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
|
|
|
@ -63,8 +63,8 @@ extern void nfs_release_request(struct nfs_page *req);
|
|||
|
||||
extern int nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst,
|
||||
unsigned long idx_start, unsigned int npages);
|
||||
extern int nfs_scan_list(struct list_head *, struct list_head *,
|
||||
unsigned long, unsigned int);
|
||||
extern int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, struct list_head *dst,
|
||||
unsigned long idx_start, unsigned int npages);
|
||||
extern int nfs_coalesce_requests(struct list_head *, struct list_head *,
|
||||
unsigned int);
|
||||
extern int nfs_wait_on_request(struct nfs_page *);
|
||||
|
|
Загрузка…
Ссылка в новой задаче