NFS client fixes for 3.17
Highlights: - More fixes for read/write codepath regressions - Sleeping while holding the inode lock - Stricter enforcement of page contiguity when coalescing requests - Fix up error handling in the page coalescing code - Don't busy wait on SIGKILL in the file locking code -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJT+0LpAAoJEGcL54qWCgDyWfsP/imrpge47aZywi95chV8vgjM O85ITZbupTFwXbB7kE63CrcaxRGhFrSStk4UDhDCDkHfFb1ksjZaPR1mnkwvkR2p 4+JUoq0fkPfeX21+rqKCYmnhstpne/N8K8FJBsEs3/TqiCBWxWOelLXdyWun4H5B 9JBYQ7FYitUazeSiSiDXcl7Di/E09cFPi0H5VPKRyuNdYxySabnsBOELBE/28iXr egW1I9UKQR2EtBrvgazBbWE5XmB9XAm4X3sD1l0QD65mfSNkbnNhPFSiCdT7f/d6 9uxECR0Y4wNYgYAfVLBew5/MXJajcv03BFMKmTUeGj9fOQzycpBT4Dx2KxEWqfnt Xk2nNbISxBnO0koMflmo+LPv2lv+Br3kQ+eZCHHKknvBrX2a6bJdTCZkwACVtND9 LdbAveFQpdaeLrm/28TnRoE927r+VeAVM19yOSG8sNAskFFg4Yy51tR0e1GivkJT +qmmTRx+l78HjHvoPXOYdNgBC954r6APH5ST7su/7WxNClM36fEK6XxA9xbDLJWm wUzlGKvpwEeBJJhgjbQLwuU8BiksjFz/CaiObNvPOpc/d2GoKIhnTg19kNhg2R// UCDa2d5fep4z0Bo9p0s1KZm9pSBkkLjvRp9dm8WEIxLcdaF1jBK3dJECepm6ccvw dmEmEfjbMudVdt/ZhapJ =2wRt -----END PGP SIGNATURE----- Merge tag 'nfs-for-3.17-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs Pull NFS client fixes from Trond Myklebust: "Highlights: - more fixes for read/write codepath regressions * sleeping while holding the inode lock * stricter enforcement of page contiguity when coalescing requests * fix up error handling in the page coalescing code - don't busy wait on SIGKILL in the file locking code" * tag 'nfs-for-3.17-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: nfs: Don't busy-wait on SIGKILL in __nfs_iocounter_wait nfs: can_coalesce_requests must enforce contiguity nfs: disallow duplicate pages in pgio page vectors nfs: don't sleep with inode lock in lock_and_join_requests nfs: fix error handling in lock_and_join_requests nfs: use blocking page_group_lock in add_request nfs: fix nonblocking calls to nfs_page_group_lock nfs: change nfs_page_group_lock argument
This commit is contained in:
Коммит
f01bfc977e
|
@ -116,7 +116,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c)
|
|||
if (atomic_read(&c->io_count) == 0)
|
||||
break;
|
||||
ret = nfs_wait_bit_killable(&q.key);
|
||||
} while (atomic_read(&c->io_count) != 0);
|
||||
} while (atomic_read(&c->io_count) != 0 && !ret);
|
||||
finish_wait(wq, &q.wait);
|
||||
return ret;
|
||||
}
|
||||
|
@ -139,26 +139,49 @@ nfs_iocounter_wait(struct nfs_io_counter *c)
|
|||
/*
|
||||
* nfs_page_group_lock - lock the head of the page group
|
||||
* @req - request in group that is to be locked
|
||||
* @nonblock - if true don't block waiting for lock
|
||||
*
|
||||
* this lock must be held if modifying the page group list
|
||||
*
|
||||
* returns result from wait_on_bit_lock: 0 on success, < 0 on error
|
||||
* return 0 on success, < 0 on error: -EDELAY if nonblocking or the
|
||||
* result from wait_on_bit_lock
|
||||
*
|
||||
* NOTE: calling with nonblock=false should always have set the
|
||||
* lock bit (see fs/buffer.c and other uses of wait_on_bit_lock
|
||||
* with TASK_UNINTERRUPTIBLE), so there is no need to check the result.
|
||||
*/
|
||||
int
|
||||
nfs_page_group_lock(struct nfs_page *req, bool wait)
|
||||
nfs_page_group_lock(struct nfs_page *req, bool nonblock)
|
||||
{
|
||||
struct nfs_page *head = req->wb_head;
|
||||
int ret;
|
||||
|
||||
WARN_ON_ONCE(head != head->wb_head);
|
||||
|
||||
do {
|
||||
ret = wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
} while (wait && ret != 0);
|
||||
if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
|
||||
return 0;
|
||||
|
||||
WARN_ON_ONCE(ret > 0);
|
||||
return ret;
|
||||
if (!nonblock)
|
||||
return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/*
|
||||
* nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it
|
||||
* @req - a request in the group
|
||||
*
|
||||
* This is a blocking call to wait for the group lock to be cleared.
|
||||
*/
|
||||
void
|
||||
nfs_page_group_lock_wait(struct nfs_page *req)
|
||||
{
|
||||
struct nfs_page *head = req->wb_head;
|
||||
|
||||
WARN_ON_ONCE(head != head->wb_head);
|
||||
|
||||
wait_on_bit(&head->wb_flags, PG_HEADLOCK,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -219,7 +242,7 @@ bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
|
|||
{
|
||||
bool ret;
|
||||
|
||||
nfs_page_group_lock(req, true);
|
||||
nfs_page_group_lock(req, false);
|
||||
ret = nfs_page_group_sync_on_bit_locked(req, bit);
|
||||
nfs_page_group_unlock(req);
|
||||
|
||||
|
@ -701,10 +724,11 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
|
|||
struct nfs_pgio_header *hdr)
|
||||
{
|
||||
struct nfs_page *req;
|
||||
struct page **pages;
|
||||
struct page **pages,
|
||||
*last_page;
|
||||
struct list_head *head = &desc->pg_list;
|
||||
struct nfs_commit_info cinfo;
|
||||
unsigned int pagecount;
|
||||
unsigned int pagecount, pageused;
|
||||
|
||||
pagecount = nfs_page_array_len(desc->pg_base, desc->pg_count);
|
||||
if (!nfs_pgarray_set(&hdr->page_array, pagecount))
|
||||
|
@ -712,12 +736,23 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
|
|||
|
||||
nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
|
||||
pages = hdr->page_array.pagevec;
|
||||
last_page = NULL;
|
||||
pageused = 0;
|
||||
while (!list_empty(head)) {
|
||||
req = nfs_list_entry(head->next);
|
||||
nfs_list_remove_request(req);
|
||||
nfs_list_add_request(req, &hdr->pages);
|
||||
*pages++ = req->wb_page;
|
||||
|
||||
if (WARN_ON_ONCE(pageused >= pagecount))
|
||||
return nfs_pgio_error(desc, hdr);
|
||||
|
||||
if (!last_page || last_page != req->wb_page) {
|
||||
*pages++ = last_page = req->wb_page;
|
||||
pageused++;
|
||||
}
|
||||
}
|
||||
if (WARN_ON_ONCE(pageused != pagecount))
|
||||
return nfs_pgio_error(desc, hdr);
|
||||
|
||||
if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
|
||||
(desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
|
||||
|
@ -788,6 +823,14 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
|
|||
return false;
|
||||
if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
|
||||
return false;
|
||||
if (req->wb_page == prev->wb_page) {
|
||||
if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
|
||||
return false;
|
||||
} else {
|
||||
if (req->wb_pgbase != 0 ||
|
||||
prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
size = pgio->pg_ops->pg_test(pgio, prev, req);
|
||||
WARN_ON_ONCE(size > req->wb_bytes);
|
||||
|
@ -858,13 +901,8 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
|
|||
struct nfs_page *subreq;
|
||||
unsigned int bytes_left = 0;
|
||||
unsigned int offset, pgbase;
|
||||
int ret;
|
||||
|
||||
ret = nfs_page_group_lock(req, false);
|
||||
if (ret < 0) {
|
||||
desc->pg_error = ret;
|
||||
return 0;
|
||||
}
|
||||
nfs_page_group_lock(req, false);
|
||||
|
||||
subreq = req;
|
||||
bytes_left = subreq->wb_bytes;
|
||||
|
@ -886,11 +924,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
|
|||
if (desc->pg_recoalesce)
|
||||
return 0;
|
||||
/* retry add_request for this subreq */
|
||||
ret = nfs_page_group_lock(req, false);
|
||||
if (ret < 0) {
|
||||
desc->pg_error = ret;
|
||||
return 0;
|
||||
}
|
||||
nfs_page_group_lock(req, false);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -241,7 +241,7 @@ static bool nfs_page_group_covers_page(struct nfs_page *req)
|
|||
unsigned int pos = 0;
|
||||
unsigned int len = nfs_page_length(req->wb_page);
|
||||
|
||||
nfs_page_group_lock(req, true);
|
||||
nfs_page_group_lock(req, false);
|
||||
|
||||
do {
|
||||
tmp = nfs_page_group_search_locked(req->wb_head, pos);
|
||||
|
@ -478,10 +478,23 @@ try_again:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/* lock each request in the page group */
|
||||
ret = nfs_page_group_lock(head, false);
|
||||
if (ret < 0)
|
||||
/* holding inode lock, so always make a non-blocking call to try the
|
||||
* page group lock */
|
||||
ret = nfs_page_group_lock(head, true);
|
||||
if (ret < 0) {
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
||||
if (!nonblock && ret == -EAGAIN) {
|
||||
nfs_page_group_lock_wait(head);
|
||||
nfs_release_request(head);
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
nfs_release_request(head);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/* lock each request in the page group */
|
||||
subreq = head;
|
||||
do {
|
||||
/*
|
||||
|
|
|
@ -123,6 +123,7 @@ extern int nfs_wait_on_request(struct nfs_page *);
|
|||
extern void nfs_unlock_request(struct nfs_page *req);
|
||||
extern void nfs_unlock_and_release_request(struct nfs_page *);
|
||||
extern int nfs_page_group_lock(struct nfs_page *, bool);
|
||||
extern void nfs_page_group_lock_wait(struct nfs_page *);
|
||||
extern void nfs_page_group_unlock(struct nfs_page *);
|
||||
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче