From 6d17d653c9f152e113043d00f3bcf00c0eb5f5a2 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 9 Jul 2017 13:45:27 -0400 Subject: [PATCH 01/31] NFS: Simplify page writeback We don't expect the page header lock to ever be held across I/O, so it should always be safe to wait for it, even if we're doing nonblocking writebacks. Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 30 ++++++++++-------------------- 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index b1af5dee5e0a..1d447e37f472 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -366,7 +366,6 @@ nfs_page_group_clear_bits(struct nfs_page *req) * @inode - inode associated with request page group, must be holding inode lock * @head - head request of page group, must be holding head lock * @req - request that couldn't lock and needs to wait on the req bit lock - * @nonblock - if true, don't actually wait * * NOTE: this must be called holding page_group bit lock and inode spin lock * and BOTH will be released before returning. @@ -375,7 +374,7 @@ nfs_page_group_clear_bits(struct nfs_page *req) */ static int nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, - struct nfs_page *req, bool nonblock) + struct nfs_page *req) __releases(&inode->i_lock) { struct nfs_page *tmp; @@ -396,10 +395,7 @@ nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, /* release ref from nfs_page_find_head_request_locked */ nfs_release_request(head); - if (!nonblock) - ret = nfs_wait_on_request(req); - else - ret = -EAGAIN; + ret = nfs_wait_on_request(req); nfs_release_request(req); return ret; @@ -464,7 +460,6 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, * operations for this page. * * @page - the page used to lookup the "page group" of nfs_page structures - * @nonblock - if true, don't block waiting for request locks * * This function joins all sub requests to the head request by first * locking all requests in the group, cancelling any pending operations @@ -478,7 +473,7 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, * error was encountered. */ static struct nfs_page * -nfs_lock_and_join_requests(struct page *page, bool nonblock) +nfs_lock_and_join_requests(struct page *page) { struct inode *inode = page_file_mapping(page)->host; struct nfs_page *head, *subreq; @@ -511,14 +506,9 @@ try_again: if (ret < 0) { spin_unlock(&inode->i_lock); - if (!nonblock && ret == -EAGAIN) { - nfs_page_group_lock_wait(head); - nfs_release_request(head); - goto try_again; - } - + nfs_page_group_lock_wait(head); nfs_release_request(head); - return ERR_PTR(ret); + goto try_again; } /* lock each request in the page group */ @@ -543,7 +533,7 @@ try_again: /* releases page group bit lock and * inode spin lock and all references */ ret = nfs_unroll_locks_and_wait(inode, head, - subreq, nonblock); + subreq); if (ret == 0) goto try_again; @@ -624,12 +614,12 @@ nfs_error_is_fatal_on_server(int err) * May return an error if the user signalled nfs_wait_on_request(). */ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, - struct page *page, bool nonblock) + struct page *page) { struct nfs_page *req; int ret = 0; - req = nfs_lock_and_join_requests(page, nonblock); + req = nfs_lock_and_join_requests(page); if (!req) goto out; ret = PTR_ERR(req); @@ -672,7 +662,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, int ret; nfs_pageio_cond_complete(pgio, page_index(page)); - ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); + ret = nfs_page_async_flush(pgio, page); if (ret == -EAGAIN) { redirty_page_for_writepage(wbc, page); ret = 0; @@ -2015,7 +2005,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) /* blocking call to cancel all requests and join to a single (head) * request */ - req = nfs_lock_and_join_requests(page, false); + req = nfs_lock_and_join_requests(page); if (IS_ERR(req)) { ret = PTR_ERR(req); From 82749dd4efcec8e90fa7769eec3dd0afa2e3396a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 17 Jul 2017 09:30:13 -0400 Subject: [PATCH 02/31] NFS: Reduce lock contention in nfs_page_find_head_request() Add a lockless check for whether or not the page might be carrying an existing writeback before we grab the inode->i_lock. Reported-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 1d447e37f472..06e150c4e315 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -190,9 +190,11 @@ static struct nfs_page *nfs_page_find_head_request(struct page *page) struct inode *inode = page_file_mapping(page)->host; struct nfs_page *req = NULL; - spin_lock(&inode->i_lock); - req = nfs_page_find_head_request_locked(NFS_I(inode), page); - spin_unlock(&inode->i_lock); + if (PagePrivate(page) || PageSwapCache(page)) { + spin_lock(&inode->i_lock); + req = nfs_page_find_head_request_locked(NFS_I(inode), page); + spin_unlock(&inode->i_lock); + } return req; } From 1403390d8366c717139cab26b8e94d943915fa12 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 17 Jul 2017 09:37:12 -0400 Subject: [PATCH 03/31] NFS: Reduce lock contention in nfs_try_to_update_request() Micro-optimisation to move the lockless check into the for(;;) loop. Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 06e150c4e315..bb019096c331 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1097,13 +1097,12 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, unsigned int end; int error; - if (!PagePrivate(page)) - return NULL; - end = offset + bytes; - spin_lock(&inode->i_lock); for (;;) { + if (!(PagePrivate(page) || PageSwapCache(page))) + return NULL; + spin_lock(&inode->i_lock); req = nfs_page_find_head_request_locked(NFS_I(inode), page); if (req == NULL) goto out_unlock; @@ -1132,7 +1131,6 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, nfs_release_request(req); if (error != 0) goto out_err; - spin_lock(&inode->i_lock); } /* Okay, the request matches. Update the region */ From 08fead2ae5a9953d47677416cc5f6bcae448480d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 18 Jul 2017 19:31:10 -0400 Subject: [PATCH 04/31] NFS: Ensure we always dereference the page head last This fixes a race with nfs_page_group_sync_on_bit() whereby the call to wake_up_bit() in nfs_page_group_unlock() could occur after the page header had been freed. Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index de9066a92c0d..a6f2bbd709ba 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -306,14 +306,11 @@ static void nfs_page_group_destroy(struct kref *kref) { struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); + struct nfs_page *head = req->wb_head; struct nfs_page *tmp, *next; - /* subrequests must release the ref on the head request */ - if (req->wb_head != req) - nfs_release_request(req->wb_head); - if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN)) - return; + goto out; tmp = req; do { @@ -324,6 +321,10 @@ nfs_page_group_destroy(struct kref *kref) nfs_free_request(tmp); tmp = next; } while (tmp != req); +out: + /* subrequests must release the ref on the head request */ + if (head != req) + nfs_release_request(head); } /** From 7cb9cd9aa2eafe869935d4168031f1ed376d924c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 17 Jul 2017 10:11:11 -0400 Subject: [PATCH 05/31] NFS: Fix a reference and lock leak in nfs_lock_and_join_requests() Yes, this is a situation that should never happen (hence the WARN_ON) but we should still ensure that we free up the locks and references to the faulty pages. Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index bb019096c331..1ca759719429 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -526,8 +526,7 @@ try_again: } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset || ((subreq->wb_offset + subreq->wb_bytes) > (head->wb_offset + total_bytes)))) { - nfs_page_group_unlock(head); - spin_unlock(&inode->i_lock); + nfs_unroll_locks_and_wait(inode, head, subreq); return ERR_PTR(-EIO); } From a0e265bc78010d2d831a968d4cea3c40a0efac8b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 17 Jul 2017 10:29:32 -0400 Subject: [PATCH 06/31] NFS: Fix an ABBA issue in nfs_lock_and_join_requests() All other callers of nfs_page_group_lock() appear to already hold the page lock on the head page, so doing it in the opposite order here is inefficient, although not deadlock prone since we roll back all locks on contention. Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 1ca759719429..c940e615f5dc 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -383,7 +383,7 @@ nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, int ret; /* relinquish all the locks successfully grabbed this run */ - for (tmp = head ; tmp != req; tmp = tmp->wb_this_page) + for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) nfs_unlock_request(tmp); WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags)); @@ -395,7 +395,7 @@ nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, spin_unlock(&inode->i_lock); /* release ref from nfs_page_find_head_request_locked */ - nfs_release_request(head); + nfs_unlock_and_release_request(head); ret = nfs_wait_on_request(req); nfs_release_request(req); @@ -484,10 +484,6 @@ nfs_lock_and_join_requests(struct page *page) int ret; try_again: - total_bytes = 0; - - WARN_ON_ONCE(destroy_list); - spin_lock(&inode->i_lock); /* @@ -502,6 +498,16 @@ try_again: return NULL; } + /* lock the page head first in order to avoid an ABBA inefficiency */ + if (!nfs_lock_request(head)) { + spin_unlock(&inode->i_lock); + ret = nfs_wait_on_request(head); + nfs_release_request(head); + if (ret < 0) + return ERR_PTR(ret); + goto try_again; + } + /* holding inode lock, so always make a non-blocking call to try the * page group lock */ ret = nfs_page_group_lock(head, true); @@ -509,13 +515,14 @@ try_again: spin_unlock(&inode->i_lock); nfs_page_group_lock_wait(head); - nfs_release_request(head); + nfs_unlock_and_release_request(head); goto try_again; } /* lock each request in the page group */ - subreq = head; - do { + total_bytes = head->wb_bytes; + for (subreq = head->wb_this_page; subreq != head; + subreq = subreq->wb_this_page) { /* * Subrequests are always contiguous, non overlapping * and in order - but may be repeated (mirrored writes). @@ -541,9 +548,7 @@ try_again: return ERR_PTR(ret); } - - subreq = subreq->wb_this_page; - } while (subreq != head); + } /* Now that all requests are locked, make sure they aren't on any list. * Commit list removal accounting is done after locks are dropped */ From e14bebf6de11a4b8476cf2b0a75bf7c3e69112d5 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 17 Jul 2017 11:11:49 -0400 Subject: [PATCH 07/31] NFS: Don't check request offset and size without holding a lock Request offsets and sizes are not guaranteed to be stable unless you are holding the request locked. Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index c940e615f5dc..84b6818e5278 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -523,20 +523,6 @@ try_again: total_bytes = head->wb_bytes; for (subreq = head->wb_this_page; subreq != head; subreq = subreq->wb_this_page) { - /* - * Subrequests are always contiguous, non overlapping - * and in order - but may be repeated (mirrored writes). - */ - if (subreq->wb_offset == (head->wb_offset + total_bytes)) { - /* keep track of how many bytes this group covers */ - total_bytes += subreq->wb_bytes; - } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset || - ((subreq->wb_offset + subreq->wb_bytes) > - (head->wb_offset + total_bytes)))) { - nfs_unroll_locks_and_wait(inode, head, subreq); - return ERR_PTR(-EIO); - } - if (!nfs_lock_request(subreq)) { /* releases page group bit lock and * inode spin lock and all references */ @@ -548,6 +534,20 @@ try_again: return ERR_PTR(ret); } + /* + * Subrequests are always contiguous, non overlapping + * and in order - but may be repeated (mirrored writes). + */ + if (subreq->wb_offset == (head->wb_offset + total_bytes)) { + /* keep track of how many bytes this group covers */ + total_bytes += subreq->wb_bytes; + } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset || + ((subreq->wb_offset + subreq->wb_bytes) > + (head->wb_offset + total_bytes)))) { + nfs_unlock_request(subreq); + nfs_unroll_locks_and_wait(inode, head, subreq); + return ERR_PTR(-EIO); + } } /* Now that all requests are locked, make sure they aren't on any list. From 31a01f093edbc687e783a4c8adcd76d3cc91a559 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 18 Jul 2017 19:18:49 -0400 Subject: [PATCH 08/31] NFS: Don't unlock writebacks before declaring PG_WB_END We don't want nfs_lock_and_join_requests() to start fiddling with the request before the call to nfs_page_group_sync_on_bit(). Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 84b6818e5278..bb38c881fc48 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -335,8 +335,11 @@ static void nfs_end_page_writeback(struct nfs_page *req) { struct inode *inode = page_file_mapping(req->wb_page)->host; struct nfs_server *nfss = NFS_SERVER(inode); + bool is_done; - if (!nfs_page_group_sync_on_bit(req, PG_WB_END)) + is_done = nfs_page_group_sync_on_bit(req, PG_WB_END); + nfs_unlock_request(req); + if (!is_done) return; end_page_writeback(req->wb_page); @@ -596,7 +599,6 @@ try_again: static void nfs_write_error_remove_page(struct nfs_page *req) { - nfs_unlock_request(req); nfs_end_page_writeback(req); generic_error_remove_page(page_file_mapping(req->wb_page), req->wb_page); @@ -1019,7 +1021,6 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr) remove_req: nfs_inode_remove_request(req); next: - nfs_unlock_request(req); nfs_end_page_writeback(req); nfs_release_request(req); } @@ -1406,7 +1407,6 @@ static void nfs_redirty_request(struct nfs_page *req) { nfs_mark_request_dirty(req); set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); - nfs_unlock_request(req); nfs_end_page_writeback(req); nfs_release_request(req); } From b66aaa8dfeda7b5c7df513cf3b36e1290fa84055 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 18 Jul 2017 15:22:12 -0400 Subject: [PATCH 09/31] NFS: Fix the inode request accounting when pages have subrequests Both nfs_destroy_unlinked_subrequests() and nfs_lock_and_join_requests() manipulate the inode flags adjusting the NFS_I(inode)->nrequests. Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index bb38c881fc48..ee981353d4aa 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -418,7 +418,8 @@ nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, */ static void nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, - struct nfs_page *old_head) + struct nfs_page *old_head, + struct inode *inode) { while (destroy_list) { struct nfs_page *subreq = destroy_list; @@ -443,9 +444,12 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, nfs_page_group_clear_bits(subreq); /* release the PG_INODE_REF reference */ - if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) + if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { nfs_release_request(subreq); - else + spin_lock(&inode->i_lock); + NFS_I(inode)->nrequests--; + spin_unlock(&inode->i_lock); + } else WARN_ON_ONCE(1); } else { WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags)); @@ -572,25 +576,24 @@ try_again: head->wb_bytes = total_bytes; } + /* Postpone destruction of this request */ + if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) { + set_bit(PG_INODE_REF, &head->wb_flags); + kref_get(&head->wb_kref); + NFS_I(inode)->nrequests++; + } + /* * prepare head request to be added to new pgio descriptor */ nfs_page_group_clear_bits(head); - /* - * some part of the group was still on the inode list - otherwise - * the group wouldn't be involved in async write. - * grab a reference for the head request, iff it needs one. - */ - if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags)) - kref_get(&head->wb_kref); - nfs_page_group_unlock(head); /* drop lock to clean uprequests on destroy list */ spin_unlock(&inode->i_lock); - nfs_destroy_unlinked_subrequests(destroy_list, head); + nfs_destroy_unlinked_subrequests(destroy_list, head, inode); /* still holds ref on head from nfs_page_find_head_request_locked * and still has lock on head from lock loop */ From f6032f216fca8a1fa7f43a652f26cdf633183745 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 17 Jul 2017 19:50:23 -0400 Subject: [PATCH 10/31] NFS: Teach nfs_try_to_update_request() to deal with request page_groups Simplify the code, and avoid some flushes to disk. Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 60 +++++++++++++++++--------------------------------- 1 file changed, 20 insertions(+), 40 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index ee981353d4aa..0b4d1ef168e0 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1107,39 +1107,19 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, end = offset + bytes; - for (;;) { - if (!(PagePrivate(page) || PageSwapCache(page))) - return NULL; - spin_lock(&inode->i_lock); - req = nfs_page_find_head_request_locked(NFS_I(inode), page); - if (req == NULL) - goto out_unlock; + req = nfs_lock_and_join_requests(page); + if (IS_ERR_OR_NULL(req)) + return req; - /* should be handled by nfs_flush_incompatible */ - WARN_ON_ONCE(req->wb_head != req); - WARN_ON_ONCE(req->wb_this_page != req); - - rqend = req->wb_offset + req->wb_bytes; - /* - * Tell the caller to flush out the request if - * the offsets are non-contiguous. - * Note: nfs_flush_incompatible() will already - * have flushed out requests having wrong owners. - */ - if (offset > rqend - || end < req->wb_offset) - goto out_flushme; - - if (nfs_lock_request(req)) - break; - - /* The request is locked, so wait and then retry */ - spin_unlock(&inode->i_lock); - error = nfs_wait_on_request(req); - nfs_release_request(req); - if (error != 0) - goto out_err; - } + rqend = req->wb_offset + req->wb_bytes; + /* + * Tell the caller to flush out the request if + * the offsets are non-contiguous. + * Note: nfs_flush_incompatible() will already + * have flushed out requests having wrong owners. + */ + if (offset > rqend || end < req->wb_offset) + goto out_flushme; /* Okay, the request matches. Update the region */ if (offset < req->wb_offset) { @@ -1150,17 +1130,17 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, req->wb_bytes = end - req->wb_offset; else req->wb_bytes = rqend - req->wb_offset; -out_unlock: - if (req) - nfs_clear_request_commit(req); - spin_unlock(&inode->i_lock); return req; out_flushme: - spin_unlock(&inode->i_lock); - nfs_release_request(req); + /* + * Note: we mark the request dirty here because + * nfs_lock_and_join_requests() cannot preserve + * commit flags, so we have to replay the write. + */ + nfs_mark_request_dirty(req); + nfs_unlock_and_release_request(req); error = nfs_wb_page(inode, page); -out_err: - return ERR_PTR(error); + return (error < 0) ? ERR_PTR(error) : NULL; } /* From 7e6cca6caf7230b049bd681c5400b01c365ee452 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 17 Jul 2017 20:00:46 -0400 Subject: [PATCH 11/31] NFS: Remove page group limit in nfs_flush_incompatible() nfs_try_to_update_request() should be able to cope now. Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 0b4d1ef168e0..08c1ce968cce 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1205,8 +1205,6 @@ int nfs_flush_incompatible(struct file *file, struct page *page) l_ctx = req->wb_lock_context; do_flush = req->wb_page != page || !nfs_match_open_context(req->wb_context, ctx); - /* for now, flush if more than 1 request in page_group */ - do_flush |= req->wb_this_page != req; if (l_ctx && flctx && !(list_empty_careful(&flctx->flc_posix) && list_empty_careful(&flctx->flc_flock))) { From b5bab9bf91324a7fe21b365d6966cfd087d08e3a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 17 Jul 2017 10:34:21 -0400 Subject: [PATCH 12/31] NFS: Reduce inode->i_lock contention in nfs_lock_and_join_requests() We should no longer need the inode->i_lock, now that we've straightened out the request locking. The locking schema is now: 1) Lock page head request 2) Lock the page group 3) Lock the subrequests one by one Note that there is a subtle race with nfs_inode_remove_request() due to the fact that the latter does not lock the page head, when removing it from the struct page. Only the last subrequest is locked, hence we need to re-check that the PagePrivate(page) is still set after we've locked all the subrequests. Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 08c1ce968cce..ff7c90c7ff79 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -372,15 +372,14 @@ nfs_page_group_clear_bits(struct nfs_page *req) * @head - head request of page group, must be holding head lock * @req - request that couldn't lock and needs to wait on the req bit lock * - * NOTE: this must be called holding page_group bit lock and inode spin lock - * and BOTH will be released before returning. + * NOTE: this must be called holding page_group bit lock + * which will be released before returning. * * returns 0 on success, < 0 on error. */ static int nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, struct nfs_page *req) - __releases(&inode->i_lock) { struct nfs_page *tmp; int ret; @@ -395,7 +394,6 @@ nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, kref_get(&req->wb_kref); nfs_page_group_unlock(head); - spin_unlock(&inode->i_lock); /* release ref from nfs_page_find_head_request_locked */ nfs_unlock_and_release_request(head); @@ -491,8 +489,9 @@ nfs_lock_and_join_requests(struct page *page) int ret; try_again: + if (!(PagePrivate(page) || PageSwapCache(page))) + return NULL; spin_lock(&inode->i_lock); - /* * A reference is taken only on the head request which acts as a * reference to the whole page group - the group will not be destroyed @@ -514,16 +513,12 @@ try_again: return ERR_PTR(ret); goto try_again; } + spin_unlock(&inode->i_lock); - /* holding inode lock, so always make a non-blocking call to try the - * page group lock */ - ret = nfs_page_group_lock(head, true); + ret = nfs_page_group_lock(head, false); if (ret < 0) { - spin_unlock(&inode->i_lock); - - nfs_page_group_lock_wait(head); nfs_unlock_and_release_request(head); - goto try_again; + return ERR_PTR(ret); } /* lock each request in the page group */ @@ -531,8 +526,10 @@ try_again: for (subreq = head->wb_this_page; subreq != head; subreq = subreq->wb_this_page) { if (!nfs_lock_request(subreq)) { - /* releases page group bit lock and - * inode spin lock and all references */ + /* + * releases page group bit lock and + * page locks and all references + */ ret = nfs_unroll_locks_and_wait(inode, head, subreq); @@ -580,7 +577,9 @@ try_again: if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) { set_bit(PG_INODE_REF, &head->wb_flags); kref_get(&head->wb_kref); + spin_lock(&inode->i_lock); NFS_I(inode)->nrequests++; + spin_unlock(&inode->i_lock); } /* @@ -590,11 +589,14 @@ try_again: nfs_page_group_unlock(head); - /* drop lock to clean uprequests on destroy list */ - spin_unlock(&inode->i_lock); - nfs_destroy_unlinked_subrequests(destroy_list, head, inode); + /* Did we lose a race with nfs_inode_remove_request()? */ + if (!(PagePrivate(page) || PageSwapCache(page))) { + nfs_unlock_and_release_request(head); + return NULL; + } + /* still holds ref on head from nfs_page_find_head_request_locked * and still has lock on head from lock loop */ return head; @@ -968,7 +970,7 @@ nfs_clear_page_commit(struct page *page) WB_RECLAIMABLE); } -/* Called holding inode (/cinfo) lock */ +/* Called holding the request lock on @req */ static void nfs_clear_request_commit(struct nfs_page *req) { @@ -977,9 +979,11 @@ nfs_clear_request_commit(struct nfs_page *req) struct nfs_commit_info cinfo; nfs_init_cinfo_from_inode(&cinfo, inode); + spin_lock(&inode->i_lock); if (!pnfs_clear_request_commit(req, &cinfo)) { nfs_request_remove_commit_list(req, &cinfo); } + spin_unlock(&inode->i_lock); nfs_clear_page_commit(req->wb_page); } } From 74a6d4b5ae4ec7e93c72a92decb2f8c16c812416 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 19 Jul 2017 08:23:10 -0400 Subject: [PATCH 13/31] NFS: Further optimise nfs_lock_and_join_requests() When locking the entire group in order to remove subrequests, the locks are always taken in order, and with the page group lock being taken after the page head is locked. The intention is that: 1) The lock on the group head guarantees that requests may not be removed from the group (although new entries could be appended if we're not holding the group lock). 2) It is safe to drop and retake the page group lock while iterating through the list, in particular when waiting for a subrequest lock. Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 45 ++++++++++++++++++--------------------------- 1 file changed, 18 insertions(+), 27 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index ff7c90c7ff79..1ee5d89380d9 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -377,31 +377,17 @@ nfs_page_group_clear_bits(struct nfs_page *req) * * returns 0 on success, < 0 on error. */ -static int -nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, +static void +nfs_unroll_locks(struct inode *inode, struct nfs_page *head, struct nfs_page *req) { struct nfs_page *tmp; - int ret; /* relinquish all the locks successfully grabbed this run */ for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) nfs_unlock_request(tmp); WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags)); - - /* grab a ref on the request that will be waited on */ - kref_get(&req->wb_kref); - - nfs_page_group_unlock(head); - - /* release ref from nfs_page_find_head_request_locked */ - nfs_unlock_and_release_request(head); - - ret = nfs_wait_on_request(req); - nfs_release_request(req); - - return ret; } /* @@ -525,18 +511,21 @@ try_again: total_bytes = head->wb_bytes; for (subreq = head->wb_this_page; subreq != head; subreq = subreq->wb_this_page) { - if (!nfs_lock_request(subreq)) { + + while (!nfs_lock_request(subreq)) { /* - * releases page group bit lock and - * page locks and all references + * Unlock page to allow nfs_page_group_sync_on_bit() + * to succeed */ - ret = nfs_unroll_locks_and_wait(inode, head, - subreq); - - if (ret == 0) - goto try_again; - - return ERR_PTR(ret); + nfs_page_group_unlock(head); + ret = nfs_wait_on_request(subreq); + if (!ret) + ret = nfs_page_group_lock(head, false); + if (ret < 0) { + nfs_unroll_locks(inode, head, subreq); + nfs_unlock_and_release_request(head); + return ERR_PTR(ret); + } } /* * Subrequests are always contiguous, non overlapping @@ -549,7 +538,9 @@ try_again: ((subreq->wb_offset + subreq->wb_bytes) > (head->wb_offset + total_bytes)))) { nfs_unlock_request(subreq); - nfs_unroll_locks_and_wait(inode, head, subreq); + nfs_unroll_locks(inode, head, subreq); + nfs_page_group_unlock(head); + nfs_unlock_and_release_request(head); return ERR_PTR(-EIO); } } From 5b2b5187fa85665f0c47029ecaf49186ec138d9b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 19 Jul 2017 10:06:36 -0400 Subject: [PATCH 14/31] NFS: Fix nfs_page_group_destroy() and nfs_lock_and_join_requests() race cases Since nfs_page_group_destroy() does not take any locks on the requests to be freed, we need to ensure that we don't inadvertently free the request in nfs_destroy_unlinked_subrequests() while the last reference is being released elsewhere. Do this by: 1) Taking a reference to the request unless it is already being freed 2) Checking (under the page group lock) if PG_TEARDOWN is already set before freeing an unreferenced request in nfs_destroy_unlinked_subrequests() Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 62 +++++++++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 1ee5d89380d9..ffb9934607ef 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -384,10 +384,11 @@ nfs_unroll_locks(struct inode *inode, struct nfs_page *head, struct nfs_page *tmp; /* relinquish all the locks successfully grabbed this run */ - for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) - nfs_unlock_request(tmp); - - WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags)); + for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { + if (!kref_read(&tmp->wb_kref)) + continue; + nfs_unlock_and_release_request(tmp); + } } /* @@ -414,36 +415,32 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, WARN_ON_ONCE(old_head != subreq->wb_head); /* make sure old group is not used */ - subreq->wb_head = subreq; subreq->wb_this_page = subreq; + /* Note: races with nfs_page_group_destroy() */ + if (!kref_read(&subreq->wb_kref)) { + bool freeme = test_bit(PG_TEARDOWN, &subreq->wb_flags); + + nfs_page_group_clear_bits(subreq); + /* Check if we raced with nfs_page_group_destroy() */ + if (freeme) + nfs_free_request(subreq); + continue; + } + + subreq->wb_head = subreq; + + if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { + nfs_release_request(subreq); + spin_lock(&inode->i_lock); + NFS_I(inode)->nrequests--; + spin_unlock(&inode->i_lock); + } + + nfs_page_group_clear_bits(subreq); /* subreq is now totally disconnected from page group or any * write / commit lists. last chance to wake any waiters */ - nfs_unlock_request(subreq); - - if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) { - /* release ref on old head request */ - nfs_release_request(old_head); - - nfs_page_group_clear_bits(subreq); - - /* release the PG_INODE_REF reference */ - if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { - nfs_release_request(subreq); - spin_lock(&inode->i_lock); - NFS_I(inode)->nrequests--; - spin_unlock(&inode->i_lock); - } else - WARN_ON_ONCE(1); - } else { - WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags)); - /* zombie requests have already released the last - * reference and were waiting on the rest of the - * group to complete. Since it's no longer part of a - * group, simply free the request */ - nfs_page_group_clear_bits(subreq); - nfs_free_request(subreq); - } + nfs_unlock_and_release_request(subreq); } } @@ -512,6 +509,8 @@ try_again: for (subreq = head->wb_this_page; subreq != head; subreq = subreq->wb_this_page) { + if (!kref_get_unless_zero(&subreq->wb_kref)) + continue; while (!nfs_lock_request(subreq)) { /* * Unlock page to allow nfs_page_group_sync_on_bit() @@ -523,6 +522,7 @@ try_again: ret = nfs_page_group_lock(head, false); if (ret < 0) { nfs_unroll_locks(inode, head, subreq); + nfs_release_request(subreq); nfs_unlock_and_release_request(head); return ERR_PTR(ret); } @@ -537,8 +537,8 @@ try_again: } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset || ((subreq->wb_offset + subreq->wb_bytes) > (head->wb_offset + total_bytes)))) { - nfs_unlock_request(subreq); nfs_unroll_locks(inode, head, subreq); + nfs_unlock_and_release_request(subreq); nfs_page_group_unlock(head); nfs_unlock_and_release_request(head); return ERR_PTR(-EIO); From 902a4c00462a755fe4a6ca655813c8b2a51fab4c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 19 Jul 2017 13:50:07 -0400 Subject: [PATCH 15/31] NFS: Remove nfs_page_group_clear_bits() At this point, we only expect ever to potentially see PG_REMOVE and PG_TEARDOWN being set on the subrequests. Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 29 +++-------------------------- 1 file changed, 3 insertions(+), 26 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index ffb9934607ef..20d44ea328b6 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -347,22 +347,6 @@ static void nfs_end_page_writeback(struct nfs_page *req) clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); } - -/* nfs_page_group_clear_bits - * @req - an nfs request - * clears all page group related bits from @req - */ -static void -nfs_page_group_clear_bits(struct nfs_page *req) -{ - clear_bit(PG_TEARDOWN, &req->wb_flags); - clear_bit(PG_UNLOCKPAGE, &req->wb_flags); - clear_bit(PG_UPTODATE, &req->wb_flags); - clear_bit(PG_WB_END, &req->wb_flags); - clear_bit(PG_REMOVE, &req->wb_flags); -} - - /* * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req * @@ -417,13 +401,12 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, /* make sure old group is not used */ subreq->wb_this_page = subreq; + clear_bit(PG_REMOVE, &subreq->wb_flags); + /* Note: races with nfs_page_group_destroy() */ if (!kref_read(&subreq->wb_kref)) { - bool freeme = test_bit(PG_TEARDOWN, &subreq->wb_flags); - - nfs_page_group_clear_bits(subreq); /* Check if we raced with nfs_page_group_destroy() */ - if (freeme) + if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) nfs_free_request(subreq); continue; } @@ -437,7 +420,6 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, spin_unlock(&inode->i_lock); } - nfs_page_group_clear_bits(subreq); /* subreq is now totally disconnected from page group or any * write / commit lists. last chance to wake any waiters */ nfs_unlock_and_release_request(subreq); @@ -573,11 +555,6 @@ try_again: spin_unlock(&inode->i_lock); } - /* - * prepare head request to be added to new pgio descriptor - */ - nfs_page_group_clear_bits(head); - nfs_page_group_unlock(head); nfs_destroy_unlinked_subrequests(destroy_list, head, inode); From dee83046e73cb7ebbbae955c1ef0f4f55a0f44f9 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 17 Jul 2017 10:51:02 -0400 Subject: [PATCH 16/31] NFS: Remove unuse function nfs_page_group_lock_wait() Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 21 --------------------- include/linux/nfs_page.h | 1 - 2 files changed, 22 deletions(-) diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index a6f2bbd709ba..ced7974622dd 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -165,27 +165,6 @@ nfs_page_group_lock(struct nfs_page *req, bool nonblock) return -EAGAIN; } -/* - * nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it - * @req - a request in the group - * - * This is a blocking call to wait for the group lock to be cleared. - */ -void -nfs_page_group_lock_wait(struct nfs_page *req) -{ - struct nfs_page *head = req->wb_head; - - WARN_ON_ONCE(head != head->wb_head); - - if (!test_bit(PG_HEADLOCK, &head->wb_flags)) - return; - set_bit(PG_CONTENDED1, &head->wb_flags); - smp_mb__after_atomic(); - wait_on_bit(&head->wb_flags, PG_HEADLOCK, - TASK_UNINTERRUPTIBLE); -} - /* * nfs_page_group_unlock - unlock the head of the page group * @req - request in group that is to be unlocked diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index d67b67ae6c8b..de1d24cedaa2 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -140,7 +140,6 @@ extern int nfs_wait_on_request(struct nfs_page *); extern void nfs_unlock_request(struct nfs_page *req); extern void nfs_unlock_and_release_request(struct nfs_page *); extern int nfs_page_group_lock(struct nfs_page *, bool); -extern void nfs_page_group_lock_wait(struct nfs_page *); extern void nfs_page_group_unlock(struct nfs_page *); extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *); From 1344b7ea172b4911a8ee8a6ff26c5bc6b5abb302 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 17 Jul 2017 10:54:14 -0400 Subject: [PATCH 17/31] NFS: Remove unused parameter from nfs_page_group_lock() nfs_page_group_lock() is now always called with the 'nonblock' parameter set to 'false'. Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 31 +++++++++++-------------------- fs/nfs/write.c | 6 +++--- include/linux/nfs_page.h | 2 +- 3 files changed, 15 insertions(+), 24 deletions(-) diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index ced7974622dd..af6731dd4324 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -134,19 +134,14 @@ EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait); /* * nfs_page_group_lock - lock the head of the page group * @req - request in group that is to be locked - * @nonblock - if true don't block waiting for lock * - * this lock must be held if modifying the page group list + * this lock must be held when traversing or modifying the page + * group list * - * return 0 on success, < 0 on error: -EDELAY if nonblocking or the - * result from wait_on_bit_lock - * - * NOTE: calling with nonblock=false should always have set the - * lock bit (see fs/buffer.c and other uses of wait_on_bit_lock - * with TASK_UNINTERRUPTIBLE), so there is no need to check the result. + * return 0 on success, < 0 on error */ int -nfs_page_group_lock(struct nfs_page *req, bool nonblock) +nfs_page_group_lock(struct nfs_page *req) { struct nfs_page *head = req->wb_head; @@ -155,14 +150,10 @@ nfs_page_group_lock(struct nfs_page *req, bool nonblock) if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags)) return 0; - if (!nonblock) { - set_bit(PG_CONTENDED1, &head->wb_flags); - smp_mb__after_atomic(); - return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, + set_bit(PG_CONTENDED1, &head->wb_flags); + smp_mb__after_atomic(); + return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, TASK_UNINTERRUPTIBLE); - } - - return -EAGAIN; } /* @@ -225,7 +216,7 @@ bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit) { bool ret; - nfs_page_group_lock(req, false); + nfs_page_group_lock(req); ret = nfs_page_group_sync_on_bit_locked(req, bit); nfs_page_group_unlock(req); @@ -1016,7 +1007,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, unsigned int bytes_left = 0; unsigned int offset, pgbase; - nfs_page_group_lock(req, false); + nfs_page_group_lock(req); subreq = req; bytes_left = subreq->wb_bytes; @@ -1038,7 +1029,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, if (mirror->pg_recoalesce) return 0; /* retry add_request for this subreq */ - nfs_page_group_lock(req, false); + nfs_page_group_lock(req); continue; } @@ -1135,7 +1126,7 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, for (midx = 0; midx < desc->pg_mirror_count; midx++) { if (midx) { - nfs_page_group_lock(req, false); + nfs_page_group_lock(req); /* find the last request */ for (lastreq = req->wb_head; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 20d44ea328b6..0f418d825185 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -271,7 +271,7 @@ static bool nfs_page_group_covers_page(struct nfs_page *req) unsigned int pos = 0; unsigned int len = nfs_page_length(req->wb_page); - nfs_page_group_lock(req, false); + nfs_page_group_lock(req); do { tmp = nfs_page_group_search_locked(req->wb_head, pos); @@ -480,7 +480,7 @@ try_again: } spin_unlock(&inode->i_lock); - ret = nfs_page_group_lock(head, false); + ret = nfs_page_group_lock(head); if (ret < 0) { nfs_unlock_and_release_request(head); return ERR_PTR(ret); @@ -501,7 +501,7 @@ try_again: nfs_page_group_unlock(head); ret = nfs_wait_on_request(subreq); if (!ret) - ret = nfs_page_group_lock(head, false); + ret = nfs_page_group_lock(head); if (ret < 0) { nfs_unroll_locks(inode, head, subreq); nfs_release_request(subreq); diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index de1d24cedaa2..2f4fdafb6746 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -139,7 +139,7 @@ extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, extern int nfs_wait_on_request(struct nfs_page *); extern void nfs_unlock_request(struct nfs_page *req); extern void nfs_unlock_and_release_request(struct nfs_page *); -extern int nfs_page_group_lock(struct nfs_page *, bool); +extern int nfs_page_group_lock(struct nfs_page *); extern void nfs_page_group_unlock(struct nfs_page *); extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *); From 7e8a30f8b497315a6467d86c82f6cc8acaa9db61 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 17 Jul 2017 16:58:07 -0400 Subject: [PATCH 18/31] NFS: Fix up nfs_page_group_covers_page() Fix up the test in nfs_page_group_covers_page(). The simplest implementation is to check that we have a set of intersecting or contiguous subrequests that connect page offset 0 to nfs_page_length(req->wb_page). Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 0f418d825185..759e37d26acf 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -243,9 +243,6 @@ nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) { struct nfs_page *req; - WARN_ON_ONCE(head != head->wb_head); - WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_head->wb_flags)); - req = head; do { if (page_offset >= req->wb_pgbase && @@ -273,18 +270,15 @@ static bool nfs_page_group_covers_page(struct nfs_page *req) nfs_page_group_lock(req); - do { + for (;;) { tmp = nfs_page_group_search_locked(req->wb_head, pos); - if (tmp) { - /* no way this should happen */ - WARN_ON_ONCE(tmp->wb_pgbase != pos); - pos += tmp->wb_bytes - (pos - tmp->wb_pgbase); - } - } while (tmp && pos < len); + if (!tmp) + break; + pos = tmp->wb_pgbase + tmp->wb_bytes; + } nfs_page_group_unlock(req); - WARN_ON_ONCE(pos > len); - return pos == len; + return pos >= len; } /* We can set the PG_uptodate flag if we see that a write request From bd37d6fce184836bd5e7cd90ce40116a4fadaf2a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 1 Aug 2017 12:06:30 -0400 Subject: [PATCH 19/31] NFSv4: Convert nfs_lock_and_join_requests() to use nfs_page_find_head_request() Hide the locking from nfs_lock_and_join_requests() so that we can separate out the requirements for swapcache pages. Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 759e37d26acf..a06167e20b72 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -154,6 +154,14 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); } +static struct nfs_page * +nfs_page_private_request(struct page *page) +{ + if (!PagePrivate(page)) + return NULL; + return (struct nfs_page *)page_private(page); +} + /* * nfs_page_find_head_request_locked - find head request associated with @page * @@ -164,11 +172,10 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) static struct nfs_page * nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page) { - struct nfs_page *req = NULL; + struct nfs_page *req; - if (PagePrivate(page)) - req = (struct nfs_page *)page_private(page); - else if (unlikely(PageSwapCache(page))) + req = nfs_page_private_request(page); + if (!req && unlikely(PageSwapCache(page))) req = nfs_page_search_commits_for_head_request_locked(nfsi, page); @@ -448,31 +455,29 @@ nfs_lock_and_join_requests(struct page *page) int ret; try_again: - if (!(PagePrivate(page) || PageSwapCache(page))) - return NULL; - spin_lock(&inode->i_lock); /* * A reference is taken only on the head request which acts as a * reference to the whole page group - the group will not be destroyed * until the head reference is released. */ - head = nfs_page_find_head_request_locked(NFS_I(inode), page); - - if (!head) { - spin_unlock(&inode->i_lock); + head = nfs_page_find_head_request(page); + if (!head) return NULL; - } /* lock the page head first in order to avoid an ABBA inefficiency */ if (!nfs_lock_request(head)) { - spin_unlock(&inode->i_lock); ret = nfs_wait_on_request(head); nfs_release_request(head); if (ret < 0) return ERR_PTR(ret); goto try_again; } - spin_unlock(&inode->i_lock); + + /* Ensure that nobody removed the request before we locked it */ + if (head != nfs_page_private_request(page) && !PageSwapCache(page)) { + nfs_unlock_and_release_request(head); + goto try_again; + } ret = nfs_page_group_lock(head); if (ret < 0) { @@ -559,7 +564,7 @@ try_again: return NULL; } - /* still holds ref on head from nfs_page_find_head_request_locked + /* still holds ref on head from nfs_page_find_head_request * and still has lock on head from lock loop */ return head; } From b30d2f04c35d539bf8003b3e014c389abefc249b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 1 Aug 2017 12:26:53 -0400 Subject: [PATCH 20/31] NFS: Refactor nfs_page_find_head_request() Split out the 2 cases so that we can treat the locking differently. The issue is that the locking in the pageswapcache cache is highly linked to the commit list locking. Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 42 ++++++++++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index a06167e20b72..8d8fa6d4cfcc 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -170,20 +170,41 @@ nfs_page_private_request(struct page *page) * returns matching head request with reference held, or NULL if not found. */ static struct nfs_page * -nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page) +nfs_page_find_private_request(struct page *page) { + struct inode *inode = page_file_mapping(page)->host; struct nfs_page *req; + if (!PagePrivate(page)) + return NULL; + spin_lock(&inode->i_lock); req = nfs_page_private_request(page); - if (!req && unlikely(PageSwapCache(page))) - req = nfs_page_search_commits_for_head_request_locked(nfsi, - page); - if (req) { WARN_ON_ONCE(req->wb_head != req); kref_get(&req->wb_kref); } + spin_unlock(&inode->i_lock); + return req; +} +static struct nfs_page * +nfs_page_find_swap_request(struct page *page) +{ + struct inode *inode = page_file_mapping(page)->host; + struct nfs_inode *nfsi = NFS_I(inode); + struct nfs_page *req = NULL; + if (!PageSwapCache(page)) + return NULL; + spin_lock(&inode->i_lock); + if (PageSwapCache(page)) { + req = nfs_page_search_commits_for_head_request_locked(nfsi, + page); + if (req) { + WARN_ON_ONCE(req->wb_head != req); + kref_get(&req->wb_kref); + } + } + spin_unlock(&inode->i_lock); return req; } @@ -194,14 +215,11 @@ nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page) */ static struct nfs_page *nfs_page_find_head_request(struct page *page) { - struct inode *inode = page_file_mapping(page)->host; - struct nfs_page *req = NULL; + struct nfs_page *req; - if (PagePrivate(page) || PageSwapCache(page)) { - spin_lock(&inode->i_lock); - req = nfs_page_find_head_request_locked(NFS_I(inode), page); - spin_unlock(&inode->i_lock); - } + req = nfs_page_find_private_request(page); + if (!req) + req = nfs_page_find_swap_request(page); return req; } From e824f99adaaf1ed0e03eac8574599af6d992163d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 1 Aug 2017 11:53:49 -0400 Subject: [PATCH 21/31] NFSv4: Use a mutex to protect the per-inode commit lists The commit lists can get very large, so using the inode->i_lock can end up affecting general metadata performance. Signed-off-by: Trond Myklebust --- fs/nfs/direct.c | 4 ++-- fs/nfs/inode.c | 1 + fs/nfs/pnfs_nfs.c | 15 +++++++-------- fs/nfs/write.c | 24 ++++++++++++------------ include/linux/nfs_fs.h | 1 + 5 files changed, 23 insertions(+), 22 deletions(-) diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 6fb9fad2d1e6..d2972d537469 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -616,13 +616,13 @@ nfs_direct_write_scan_commit_list(struct inode *inode, struct list_head *list, struct nfs_commit_info *cinfo) { - spin_lock(&cinfo->inode->i_lock); + mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); #ifdef CONFIG_NFS_V4_1 if (cinfo->ds != NULL && cinfo->ds->nwritten != 0) NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo); #endif nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0); - spin_unlock(&cinfo->inode->i_lock); + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); } static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 109279d6d91b..34d9ebbc0dfd 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -2016,6 +2016,7 @@ static void init_once(void *foo) nfsi->commit_info.ncommit = 0; atomic_set(&nfsi->commit_info.rpcs_out, 0); init_rwsem(&nfsi->rmdir_sem); + mutex_init(&nfsi->commit_mutex); nfs4_init_once(nfsi); } diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 25f28fa64c57..2cdee8ce2094 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -98,14 +98,13 @@ pnfs_generic_transfer_commit_list(struct list_head *src, struct list_head *dst, if (!nfs_lock_request(req)) continue; kref_get(&req->wb_kref); - if (cond_resched_lock(&cinfo->inode->i_lock)) - list_safe_reset_next(req, tmp, wb_list); nfs_request_remove_commit_list(req, cinfo); clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); nfs_list_add_request(req, dst); ret++; if ((ret == max) && !cinfo->dreq) break; + cond_resched(); } return ret; } @@ -119,7 +118,7 @@ pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, struct list_head *dst = &bucket->committing; int ret; - lockdep_assert_held(&cinfo->inode->i_lock); + lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex); ret = pnfs_generic_transfer_commit_list(src, dst, cinfo, max); if (ret) { cinfo->ds->nwritten -= ret; @@ -142,7 +141,7 @@ int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, { int i, rv = 0, cnt; - lockdep_assert_held(&cinfo->inode->i_lock); + lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex); for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) { cnt = pnfs_generic_scan_ds_commit_list(&cinfo->ds->buckets[i], cinfo, max); @@ -162,7 +161,7 @@ void pnfs_generic_recover_commit_reqs(struct list_head *dst, int nwritten; int i; - lockdep_assert_held(&cinfo->inode->i_lock); + lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex); restart: for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) { nwritten = pnfs_generic_transfer_commit_list(&b->written, @@ -953,12 +952,12 @@ pnfs_layout_mark_request_commit(struct nfs_page *req, struct list_head *list; struct pnfs_commit_bucket *buckets; - spin_lock(&cinfo->inode->i_lock); + mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); buckets = cinfo->ds->buckets; list = &buckets[ds_commit_idx].written; if (list_empty(list)) { if (!pnfs_is_valid_lseg(lseg)) { - spin_unlock(&cinfo->inode->i_lock); + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); cinfo->completion_ops->resched_write(cinfo, req); return; } @@ -975,7 +974,7 @@ pnfs_layout_mark_request_commit(struct nfs_page *req, cinfo->ds->nwritten++; nfs_request_add_commit_list_locked(req, list, cinfo); - spin_unlock(&cinfo->inode->i_lock); + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); nfs_mark_page_unstable(req->wb_page, cinfo); } EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 8d8fa6d4cfcc..5ab5ca24b48a 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -195,7 +195,7 @@ nfs_page_find_swap_request(struct page *page) struct nfs_page *req = NULL; if (!PageSwapCache(page)) return NULL; - spin_lock(&inode->i_lock); + mutex_lock(&nfsi->commit_mutex); if (PageSwapCache(page)) { req = nfs_page_search_commits_for_head_request_locked(nfsi, page); @@ -204,7 +204,7 @@ nfs_page_find_swap_request(struct page *page) kref_get(&req->wb_kref); } } - spin_unlock(&inode->i_lock); + mutex_unlock(&nfsi->commit_mutex); return req; } @@ -856,7 +856,8 @@ nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, * number of outstanding requests requiring a commit as well as * the MM page stats. * - * The caller must hold cinfo->inode->i_lock, and the nfs_page lock. + * The caller must hold NFS_I(cinfo->inode)->commit_mutex, and the + * nfs_page lock. */ void nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst, @@ -884,9 +885,9 @@ EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked); void nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo) { - spin_lock(&cinfo->inode->i_lock); + mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo); - spin_unlock(&cinfo->inode->i_lock); + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); if (req->wb_page) nfs_mark_page_unstable(req->wb_page, cinfo); } @@ -964,11 +965,11 @@ nfs_clear_request_commit(struct nfs_page *req) struct nfs_commit_info cinfo; nfs_init_cinfo_from_inode(&cinfo, inode); - spin_lock(&inode->i_lock); + mutex_lock(&NFS_I(inode)->commit_mutex); if (!pnfs_clear_request_commit(req, &cinfo)) { nfs_request_remove_commit_list(req, &cinfo); } - spin_unlock(&inode->i_lock); + mutex_unlock(&NFS_I(inode)->commit_mutex); nfs_clear_page_commit(req->wb_page); } } @@ -1027,7 +1028,7 @@ nfs_reqs_to_commit(struct nfs_commit_info *cinfo) return cinfo->mds->ncommit; } -/* cinfo->inode->i_lock held by caller */ +/* NFS_I(cinfo->inode)->commit_mutex held by caller */ int nfs_scan_commit_list(struct list_head *src, struct list_head *dst, struct nfs_commit_info *cinfo, int max) @@ -1039,13 +1040,12 @@ nfs_scan_commit_list(struct list_head *src, struct list_head *dst, if (!nfs_lock_request(req)) continue; kref_get(&req->wb_kref); - if (cond_resched_lock(&cinfo->inode->i_lock)) - list_safe_reset_next(req, tmp, wb_list); nfs_request_remove_commit_list(req, cinfo); nfs_list_add_request(req, dst); ret++; if ((ret == max) && !cinfo->dreq) break; + cond_resched(); } return ret; } @@ -1065,7 +1065,7 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, { int ret = 0; - spin_lock(&cinfo->inode->i_lock); + mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); if (cinfo->mds->ncommit > 0) { const int max = INT_MAX; @@ -1073,7 +1073,7 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, cinfo, max); ret += pnfs_scan_commit_lists(inode, cinfo, max - ret); } - spin_unlock(&cinfo->inode->i_lock); + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); return ret; } diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 5cc91d6381a3..121a702888b4 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -163,6 +163,7 @@ struct nfs_inode { /* Readers: in-flight sillydelete RPC calls */ /* Writers: rmdir */ struct rw_semaphore rmdir_sem; + struct mutex commit_mutex; #if IS_ENABLED(CONFIG_NFS_V4) struct nfs4_cached_acl *nfs4_acl; From a6b6d5b85abf4914bbceade5dddd54c345c64136 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 1 Aug 2017 15:39:46 -0400 Subject: [PATCH 22/31] NFS: Use an atomic_long_t to count the number of requests Rather than forcing us to take the inode->i_lock just in order to bump the number. Signed-off-by: Trond Myklebust --- fs/nfs/callback_proc.c | 2 +- fs/nfs/delegation.c | 2 +- fs/nfs/inode.c | 7 +++---- fs/nfs/pagelist.c | 4 +--- fs/nfs/write.c | 18 +++++------------- include/linux/nfs_fs.h | 4 ++-- 6 files changed, 13 insertions(+), 24 deletions(-) diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 5427cdf04c5a..14358de173fb 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -51,7 +51,7 @@ __be32 nfs4_callback_getattr(void *argp, void *resp, goto out_iput; res->size = i_size_read(inode); res->change_attr = delegation->change_attr; - if (nfsi->nrequests != 0) + if (nfs_have_writebacks(inode)) res->change_attr++; res->ctime = inode->i_ctime; res->mtime = inode->i_mtime; diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index d7df5e67b0c1..606dd3871f66 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -1089,7 +1089,7 @@ bool nfs4_delegation_flush_on_close(const struct inode *inode) delegation = rcu_dereference(nfsi->delegation); if (delegation == NULL || !(delegation->type & FMODE_WRITE)) goto out; - if (nfsi->nrequests < delegation->pagemod_limit) + if (atomic_long_read(&nfsi->nrequests) < delegation->pagemod_limit) ret = false; out: rcu_read_unlock(); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 34d9ebbc0dfd..0480eb02299a 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1285,7 +1285,6 @@ static bool nfs_file_has_buffered_writers(struct nfs_inode *nfsi) static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr) { - struct nfs_inode *nfsi = NFS_I(inode); unsigned long ret = 0; if ((fattr->valid & NFS_ATTR_FATTR_PRECHANGE) @@ -1315,7 +1314,7 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE) && (fattr->valid & NFS_ATTR_FATTR_SIZE) && i_size_read(inode) == nfs_size_to_loff_t(fattr->pre_size) - && nfsi->nrequests == 0) { + && !nfs_have_writebacks(inode)) { i_size_write(inode, nfs_size_to_loff_t(fattr->size)); ret |= NFS_INO_INVALID_ATTR; } @@ -1823,7 +1822,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) if (new_isize != cur_isize) { /* Do we perhaps have any outstanding writes, or has * the file grown beyond our last write? */ - if (nfsi->nrequests == 0 || new_isize > cur_isize) { + if (!nfs_have_writebacks(inode) || new_isize > cur_isize) { i_size_write(inode, new_isize); if (!have_writers) invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; @@ -2012,7 +2011,7 @@ static void init_once(void *foo) INIT_LIST_HEAD(&nfsi->access_cache_entry_lru); INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); INIT_LIST_HEAD(&nfsi->commit_info.list); - nfsi->nrequests = 0; + atomic_long_set(&nfsi->nrequests, 0); nfsi->commit_info.ncommit = 0; atomic_set(&nfsi->commit_info.rpcs_out, 0); init_rwsem(&nfsi->rmdir_sem); diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index af6731dd4324..ec97c301899b 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -258,9 +258,7 @@ nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev) inode = page_file_mapping(req->wb_page)->host; set_bit(PG_INODE_REF, &req->wb_flags); kref_get(&req->wb_kref); - spin_lock(&inode->i_lock); - NFS_I(inode)->nrequests++; - spin_unlock(&inode->i_lock); + atomic_long_inc(&NFS_I(inode)->nrequests); } } } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 5ab5ca24b48a..08093552f115 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -434,9 +434,7 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { nfs_release_request(subreq); - spin_lock(&inode->i_lock); - NFS_I(inode)->nrequests--; - spin_unlock(&inode->i_lock); + atomic_long_dec(&NFS_I(inode)->nrequests); } /* subreq is now totally disconnected from page group or any @@ -567,9 +565,7 @@ try_again: if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) { set_bit(PG_INODE_REF, &head->wb_flags); kref_get(&head->wb_kref); - spin_lock(&inode->i_lock); - NFS_I(inode)->nrequests++; - spin_unlock(&inode->i_lock); + atomic_long_inc(&NFS_I(inode)->nrequests); } nfs_page_group_unlock(head); @@ -755,7 +751,7 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) nfs_lock_request(req); spin_lock(&inode->i_lock); - if (!nfsi->nrequests && + if (!nfs_have_writebacks(inode) && NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) inode->i_version++; /* @@ -767,7 +763,7 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) SetPagePrivate(req->wb_page); set_page_private(req->wb_page, (unsigned long)req); } - nfsi->nrequests++; + atomic_long_inc(&nfsi->nrequests); /* this a head request for a page group - mark it as having an * extra reference so sub groups can follow suit. * This flag also informs pgio layer when to bump nrequests when @@ -786,6 +782,7 @@ static void nfs_inode_remove_request(struct nfs_page *req) struct nfs_inode *nfsi = NFS_I(inode); struct nfs_page *head; + atomic_long_dec(&nfsi->nrequests); if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) { head = req->wb_head; @@ -795,11 +792,6 @@ static void nfs_inode_remove_request(struct nfs_page *req) ClearPagePrivate(head->wb_page); clear_bit(PG_MAPPED, &head->wb_flags); } - nfsi->nrequests--; - spin_unlock(&inode->i_lock); - } else { - spin_lock(&inode->i_lock); - nfsi->nrequests--; spin_unlock(&inode->i_lock); } diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 121a702888b4..238fdc4c46df 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -154,7 +154,7 @@ struct nfs_inode { */ __be32 cookieverf[2]; - unsigned long nrequests; + atomic_long_t nrequests; struct nfs_mds_commit_info commit_info; /* Open contexts for shared mmap writes */ @@ -511,7 +511,7 @@ extern void nfs_commit_free(struct nfs_commit_data *data); static inline int nfs_have_writebacks(struct inode *inode) { - return NFS_I(inode)->nrequests != 0; + return atomic_long_read(&NFS_I(inode)->nrequests) != 0; } /* From 5cb953d4b1e70a09084f71594c45d47458346bc2 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 1 Aug 2017 17:04:12 -0400 Subject: [PATCH 23/31] NFS: Use an atomic_long_t to count the number of commits Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 2 +- fs/nfs/write.c | 12 +++++++----- include/linux/nfs_xdr.h | 2 +- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 0480eb02299a..134d9f560240 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -2012,7 +2012,7 @@ static void init_once(void *foo) INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); INIT_LIST_HEAD(&nfsi->commit_info.list); atomic_long_set(&nfsi->nrequests, 0); - nfsi->commit_info.ncommit = 0; + atomic_long_set(&nfsi->commit_info.ncommit, 0); atomic_set(&nfsi->commit_info.rpcs_out, 0); init_rwsem(&nfsi->rmdir_sem); mutex_init(&nfsi->commit_mutex); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 08093552f115..12479c25028e 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -857,7 +857,7 @@ nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst, { set_bit(PG_CLEAN, &req->wb_flags); nfs_list_add_request(req, dst); - cinfo->mds->ncommit++; + atomic_long_inc(&cinfo->mds->ncommit); } EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked); @@ -903,7 +903,7 @@ nfs_request_remove_commit_list(struct nfs_page *req, if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) return; nfs_list_remove_request(req); - cinfo->mds->ncommit--; + atomic_long_dec(&cinfo->mds->ncommit); } EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list); @@ -1017,7 +1017,7 @@ out: unsigned long nfs_reqs_to_commit(struct nfs_commit_info *cinfo) { - return cinfo->mds->ncommit; + return atomic_long_read(&cinfo->mds->ncommit); } /* NFS_I(cinfo->inode)->commit_mutex held by caller */ @@ -1057,8 +1057,10 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, { int ret = 0; + if (!atomic_long_read(&cinfo->mds->ncommit)) + return 0; mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); - if (cinfo->mds->ncommit > 0) { + if (atomic_long_read(&cinfo->mds->ncommit) > 0) { const int max = INT_MAX; ret = nfs_scan_commit_list(&cinfo->mds->list, dst, @@ -1890,7 +1892,7 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) int ret = 0; /* no commits means nothing needs to be done */ - if (!nfsi->commit_info.ncommit) + if (!atomic_long_read(&nfsi->commit_info.ncommit)) return ret; if (wbc->sync_mode == WB_SYNC_NONE) { diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 62cbcb842f99..164d5359d4ab 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1476,7 +1476,7 @@ struct nfs_pgio_header { struct nfs_mds_commit_info { atomic_t rpcs_out; - unsigned long ncommit; + atomic_long_t ncommit; struct list_head list; }; From 4b9bb25b36baa3e2e42b91e451bcd3acfe197a1d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 1 Aug 2017 12:34:44 -0400 Subject: [PATCH 24/31] NFS: Switch to using mapping->private_lock for page writeback lookups. Switch from using the inode->i_lock for this to avoid contention with other metadata manipulation. Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 12479c25028e..866702823869 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -172,18 +172,18 @@ nfs_page_private_request(struct page *page) static struct nfs_page * nfs_page_find_private_request(struct page *page) { - struct inode *inode = page_file_mapping(page)->host; + struct address_space *mapping = page_file_mapping(page); struct nfs_page *req; if (!PagePrivate(page)) return NULL; - spin_lock(&inode->i_lock); + spin_lock(&mapping->private_lock); req = nfs_page_private_request(page); if (req) { WARN_ON_ONCE(req->wb_head != req); kref_get(&req->wb_kref); } - spin_unlock(&inode->i_lock); + spin_unlock(&mapping->private_lock); return req; } @@ -743,6 +743,7 @@ out_err: */ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) { + struct address_space *mapping = page_file_mapping(req->wb_page); struct nfs_inode *nfsi = NFS_I(inode); WARN_ON_ONCE(req->wb_this_page != req); @@ -750,19 +751,23 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) /* Lock the request! */ nfs_lock_request(req); - spin_lock(&inode->i_lock); - if (!nfs_have_writebacks(inode) && - NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) - inode->i_version++; /* * Swap-space should not get truncated. Hence no need to plug the race * with invalidate/truncate. */ + spin_lock(&mapping->private_lock); + if (!nfs_have_writebacks(inode) && + NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) { + spin_lock(&inode->i_lock); + inode->i_version++; + spin_unlock(&inode->i_lock); + } if (likely(!PageSwapCache(req->wb_page))) { set_bit(PG_MAPPED, &req->wb_flags); SetPagePrivate(req->wb_page); set_page_private(req->wb_page, (unsigned long)req); } + spin_unlock(&mapping->private_lock); atomic_long_inc(&nfsi->nrequests); /* this a head request for a page group - mark it as having an * extra reference so sub groups can follow suit. @@ -770,7 +775,6 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) * adding subrequests. */ WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags)); kref_get(&req->wb_kref); - spin_unlock(&inode->i_lock); } /* @@ -778,7 +782,8 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) */ static void nfs_inode_remove_request(struct nfs_page *req) { - struct inode *inode = d_inode(req->wb_context->dentry); + struct address_space *mapping = page_file_mapping(req->wb_page); + struct inode *inode = mapping->host; struct nfs_inode *nfsi = NFS_I(inode); struct nfs_page *head; @@ -786,13 +791,13 @@ static void nfs_inode_remove_request(struct nfs_page *req) if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) { head = req->wb_head; - spin_lock(&inode->i_lock); + spin_lock(&mapping->private_lock); if (likely(head->wb_page && !PageSwapCache(head->wb_page))) { set_page_private(head->wb_page, 0); ClearPagePrivate(head->wb_page); clear_bit(PG_MAPPED, &head->wb_flags); } - spin_unlock(&inode->i_lock); + spin_unlock(&mapping->private_lock); } if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) From 8205b9ce030288e104a3024344f2a0a086231e36 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 1 Aug 2017 17:07:02 -0400 Subject: [PATCH 25/31] NFSv4/pnfs: Replace pnfs_put_lseg_locked() with pnfs_put_lseg() Now that we no longer hold the inode->i_lock when manipulating the commit lists, it is safe to call pnfs_put_lseg() again. Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 41 ----------------------------------------- fs/nfs/pnfs.h | 2 -- fs/nfs/pnfs_nfs.c | 4 ++-- 3 files changed, 2 insertions(+), 45 deletions(-) diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index c383d0913b54..3125a9d7b237 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -529,47 +529,6 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg) } EXPORT_SYMBOL_GPL(pnfs_put_lseg); -static void pnfs_free_lseg_async_work(struct work_struct *work) -{ - struct pnfs_layout_segment *lseg; - struct pnfs_layout_hdr *lo; - - lseg = container_of(work, struct pnfs_layout_segment, pls_work); - lo = lseg->pls_layout; - - pnfs_free_lseg(lseg); - pnfs_put_layout_hdr(lo); -} - -static void pnfs_free_lseg_async(struct pnfs_layout_segment *lseg) -{ - INIT_WORK(&lseg->pls_work, pnfs_free_lseg_async_work); - schedule_work(&lseg->pls_work); -} - -void -pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg) -{ - if (!lseg) - return; - - assert_spin_locked(&lseg->pls_layout->plh_inode->i_lock); - - dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg, - atomic_read(&lseg->pls_refcount), - test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); - if (atomic_dec_and_test(&lseg->pls_refcount)) { - struct pnfs_layout_hdr *lo = lseg->pls_layout; - if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) - return; - pnfs_layout_remove_lseg(lo, lseg); - if (!pnfs_cache_lseg_for_layoutreturn(lo, lseg)) { - pnfs_get_layout_hdr(lo); - pnfs_free_lseg_async(lseg); - } - } -} - /* * is l2 fully contained in l1? * start1 end1 diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 99731e3e332f..87f144f14d1e 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -67,7 +67,6 @@ struct pnfs_layout_segment { u32 pls_seq; unsigned long pls_flags; struct pnfs_layout_hdr *pls_layout; - struct work_struct pls_work; }; enum pnfs_try_status { @@ -230,7 +229,6 @@ extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync); /* pnfs.c */ void pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo); void pnfs_put_lseg(struct pnfs_layout_segment *lseg); -void pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg); void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, struct nfs_fsinfo *); void unset_pnfs_layoutdriver(struct nfs_server *); diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 2cdee8ce2094..4b0a809653d1 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -83,7 +83,7 @@ pnfs_generic_clear_request_commit(struct nfs_page *req, } out: nfs_request_remove_commit_list(req, cinfo); - pnfs_put_lseg_locked(freeme); + pnfs_put_lseg(freeme); } EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit); @@ -126,7 +126,7 @@ pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, if (bucket->clseg == NULL) bucket->clseg = pnfs_get_lseg(bucket->wlseg); if (list_empty(src)) { - pnfs_put_lseg_locked(bucket->wlseg); + pnfs_put_lseg(bucket->wlseg); bucket->wlseg = NULL; } } From 2ce209c42c01ca976ad680fea52a8e8b9a53643b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 1 Aug 2017 17:29:29 -0400 Subject: [PATCH 26/31] NFS: Wait for requests that are locked on the commit list If a request is on the commit list, but is locked, we will currently skip it, which can lead to livelocking when the commit count doesn't reduce to zero. Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 2 ++ fs/nfs/pnfs_nfs.c | 18 ++++++++++++++---- fs/nfs/write.c | 17 +++++++++++++---- 3 files changed, 29 insertions(+), 8 deletions(-) diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index ec97c301899b..548ebc7256ff 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -434,6 +434,7 @@ void nfs_release_request(struct nfs_page *req) { kref_put(&req->wb_kref, nfs_page_group_destroy); } +EXPORT_SYMBOL_GPL(nfs_release_request); /** * nfs_wait_on_request - Wait for a request to complete. @@ -452,6 +453,7 @@ nfs_wait_on_request(struct nfs_page *req) return wait_on_bit_io(&req->wb_flags, PG_BUSY, TASK_UNINTERRUPTIBLE); } +EXPORT_SYMBOL_GPL(nfs_wait_on_request); /* * nfs_generic_pg_test - determine if requests can be coalesced diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 4b0a809653d1..303ff171cb5d 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -91,13 +91,23 @@ static int pnfs_generic_transfer_commit_list(struct list_head *src, struct list_head *dst, struct nfs_commit_info *cinfo, int max) { - struct nfs_page *req, *tmp; + struct nfs_page *req; int ret = 0; - list_for_each_entry_safe(req, tmp, src, wb_list) { - if (!nfs_lock_request(req)) - continue; + while(!list_empty(src)) { + req = list_first_entry(src, struct nfs_page, wb_list); + kref_get(&req->wb_kref); + if (!nfs_lock_request(req)) { + int status; + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); + status = nfs_wait_on_request(req); + nfs_release_request(req); + mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); + if (status < 0) + break; + continue; + } nfs_request_remove_commit_list(req, cinfo); clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); nfs_list_add_request(req, dst); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 866702823869..5dd3b212376e 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1030,13 +1030,22 @@ int nfs_scan_commit_list(struct list_head *src, struct list_head *dst, struct nfs_commit_info *cinfo, int max) { - struct nfs_page *req, *tmp; + struct nfs_page *req; int ret = 0; - list_for_each_entry_safe(req, tmp, src, wb_list) { - if (!nfs_lock_request(req)) - continue; + while(!list_empty(src)) { + req = list_first_entry(src, struct nfs_page, wb_list); kref_get(&req->wb_kref); + if (!nfs_lock_request(req)) { + int status; + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); + status = nfs_wait_on_request(req); + nfs_release_request(req); + mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); + if (status < 0) + break; + continue; + } nfs_request_remove_commit_list(req, cinfo); nfs_list_add_request(req, dst); ret++; From 729749bb8da186e68d97d1b0439f0b1e0059c41d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 13 Aug 2017 10:03:59 -0400 Subject: [PATCH 27/31] SUNRPC: Don't hold the transport lock across socket copy operations Instead add a mechanism to ensure that the request doesn't disappear from underneath us while copying from the socket. We do this by preventing xprt_release() from freeing the XDR buffers until the flag RPC_TASK_MSG_RECV has been cleared from the request. Signed-off-by: Trond Myklebust Reviewed-by: Chuck Lever --- include/linux/sunrpc/sched.h | 2 ++ include/linux/sunrpc/xprt.h | 2 ++ net/sunrpc/xprt.c | 43 ++++++++++++++++++++++++++++++++++++ net/sunrpc/xprtsock.c | 23 ++++++++++++++----- 4 files changed, 65 insertions(+), 5 deletions(-) diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 50a99a117da7..c1768f9d993b 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -139,6 +139,8 @@ struct rpc_task_setup { #define RPC_TASK_RUNNING 0 #define RPC_TASK_QUEUED 1 #define RPC_TASK_ACTIVE 2 +#define RPC_TASK_MSG_RECV 3 +#define RPC_TASK_MSG_RECV_WAIT 4 #define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) #define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index eab1c749e192..65b9e0224753 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -372,6 +372,8 @@ void xprt_write_space(struct rpc_xprt *xprt); void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result); struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid); void xprt_complete_rqst(struct rpc_task *task, int copied); +void xprt_pin_rqst(struct rpc_rqst *req); +void xprt_unpin_rqst(struct rpc_rqst *req); void xprt_release_rqst_cong(struct rpc_task *task); void xprt_disconnect_done(struct rpc_xprt *xprt); void xprt_force_disconnect(struct rpc_xprt *xprt); diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 4654a9934269..3eb9ec16eec4 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -844,6 +844,48 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) } EXPORT_SYMBOL_GPL(xprt_lookup_rqst); +/** + * xprt_pin_rqst - Pin a request on the transport receive list + * @req: Request to pin + * + * Caller must ensure this is atomic with the call to xprt_lookup_rqst() + * so should be holding the xprt transport lock. + */ +void xprt_pin_rqst(struct rpc_rqst *req) +{ + set_bit(RPC_TASK_MSG_RECV, &req->rq_task->tk_runstate); +} + +/** + * xprt_unpin_rqst - Unpin a request on the transport receive list + * @req: Request to pin + * + * Caller should be holding the xprt transport lock. + */ +void xprt_unpin_rqst(struct rpc_rqst *req) +{ + struct rpc_task *task = req->rq_task; + + clear_bit(RPC_TASK_MSG_RECV, &task->tk_runstate); + if (test_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate)) + wake_up_bit(&task->tk_runstate, RPC_TASK_MSG_RECV); +} + +static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req) +__must_hold(&req->rq_xprt->transport_lock) +{ + struct rpc_task *task = req->rq_task; + + if (task && test_bit(RPC_TASK_MSG_RECV, &task->tk_runstate)) { + spin_unlock_bh(&req->rq_xprt->transport_lock); + set_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate); + wait_on_bit(&task->tk_runstate, RPC_TASK_MSG_RECV, + TASK_UNINTERRUPTIBLE); + clear_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate); + spin_lock_bh(&req->rq_xprt->transport_lock); + } +} + static void xprt_update_rtt(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; @@ -1295,6 +1337,7 @@ void xprt_release(struct rpc_task *task) list_del(&req->rq_list); xprt->last_used = jiffies; xprt_schedule_autodisconnect(xprt); + xprt_wait_on_pinned_rqst(req); spin_unlock_bh(&xprt->transport_lock); if (req->rq_buffer) xprt->ops->buf_free(task); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 4f154d388748..04dbc7027712 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -973,6 +973,8 @@ static void xs_local_data_read_skb(struct rpc_xprt *xprt, rovr = xprt_lookup_rqst(xprt, *xp); if (!rovr) goto out_unlock; + xprt_pin_rqst(rovr); + spin_unlock_bh(&xprt->transport_lock); task = rovr->rq_task; copied = rovr->rq_private_buf.buflen; @@ -981,11 +983,14 @@ static void xs_local_data_read_skb(struct rpc_xprt *xprt, if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) { dprintk("RPC: sk_buff copy failed\n"); - goto out_unlock; + spin_lock_bh(&xprt->transport_lock); + goto out_unpin; } + spin_lock_bh(&xprt->transport_lock); xprt_complete_rqst(task, copied); - +out_unpin: + xprt_unpin_rqst(rovr); out_unlock: spin_unlock_bh(&xprt->transport_lock); } @@ -1054,6 +1059,8 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt, rovr = xprt_lookup_rqst(xprt, *xp); if (!rovr) goto out_unlock; + xprt_pin_rqst(rovr); + spin_unlock_bh(&xprt->transport_lock); task = rovr->rq_task; if ((copied = rovr->rq_private_buf.buflen) > repsize) @@ -1062,14 +1069,17 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt, /* Suck it into the iovec, verify checksum if not done by hw. */ if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) { __UDPX_INC_STATS(sk, UDP_MIB_INERRORS); - goto out_unlock; + spin_lock_bh(&xprt->transport_lock); + goto out_unpin; } __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS); + spin_lock_bh(&xprt->transport_lock); xprt_adjust_cwnd(xprt, task, copied); xprt_complete_rqst(task, copied); - +out_unpin: + xprt_unpin_rqst(rovr); out_unlock: spin_unlock_bh(&xprt->transport_lock); } @@ -1351,12 +1361,15 @@ static inline int xs_tcp_read_reply(struct rpc_xprt *xprt, spin_unlock_bh(&xprt->transport_lock); return -1; } + xprt_pin_rqst(req); + spin_unlock_bh(&xprt->transport_lock); xs_tcp_read_common(xprt, desc, req); + spin_lock_bh(&xprt->transport_lock); if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) xprt_complete_rqst(req->rq_task, transport->tcp_copied); - + xprt_unpin_rqst(req); spin_unlock_bh(&xprt->transport_lock); return 0; } From c89091c88db768e3d28992023ca2cff6636c402b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 16 Aug 2017 12:09:44 -0400 Subject: [PATCH 28/31] SUNRPC: Don't hold the transport lock when receiving backchannel data The backchannel request has no associated task, so it is going nowhere until we call xprt_complete_bc_request(). Signed-off-by: Trond Myklebust --- net/sunrpc/backchannel_rqst.c | 4 ++-- net/sunrpc/xprtsock.c | 5 +---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index ac701c28f44f..c2c68a15b59d 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c @@ -171,10 +171,10 @@ int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs) /* * Add the temporary list to the backchannel preallocation list */ - spin_lock_bh(&xprt->bc_pa_lock); + spin_lock(&xprt->bc_pa_lock); list_splice(&tmp_list, &xprt->bc_pa_list); xprt_inc_alloc_count(xprt, min_reqs); - spin_unlock_bh(&xprt->bc_pa_lock); + spin_unlock(&xprt->bc_pa_lock); dprintk("RPC: setup backchannel transport done\n"); return 0; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 04dbc7027712..a2312f14beb4 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1389,11 +1389,9 @@ static int xs_tcp_read_callback(struct rpc_xprt *xprt, container_of(xprt, struct sock_xprt, xprt); struct rpc_rqst *req; - /* Look up and lock the request corresponding to the given XID */ - spin_lock_bh(&xprt->transport_lock); + /* Look up the request corresponding to the given XID */ req = xprt_lookup_bc_request(xprt, transport->tcp_xid); if (req == NULL) { - spin_unlock_bh(&xprt->transport_lock); printk(KERN_WARNING "Callback slot table overflowed\n"); xprt_force_disconnect(xprt); return -1; @@ -1404,7 +1402,6 @@ static int xs_tcp_read_callback(struct rpc_xprt *xprt, if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) xprt_complete_bc_request(req, transport->tcp_copied); - spin_unlock_bh(&xprt->transport_lock); return 0; } From 8d6f97d698b2b9e71124ba95688a6bf1adad5055 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 12 Aug 2017 15:11:49 -0400 Subject: [PATCH 29/31] SUNRPC: Don't loop forever in xs_tcp_data_receive() Ensure that we don't hog the workqueue thread by requeuing the job every 64 loops. Signed-off-by: Trond Myklebust --- net/sunrpc/xprtsock.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index a2312f14beb4..e8f44fc76754 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1526,6 +1526,7 @@ static void xs_tcp_data_receive(struct sock_xprt *transport) .arg.data = xprt, }; unsigned long total = 0; + int loop; int read = 0; mutex_lock(&transport->recv_mutex); @@ -1534,20 +1535,20 @@ static void xs_tcp_data_receive(struct sock_xprt *transport) goto out; /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ - for (;;) { + for (loop = 0; loop < 64; loop++) { lock_sock(sk); read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); if (read <= 0) { clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); release_sock(sk); - if (!test_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) - break; - } else { - release_sock(sk); - total += read; + break; } + release_sock(sk); + total += read; rd_desc.count = 65536; } + if (test_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) + queue_work(xprtiod_workqueue, &transport->recv_worker); out: mutex_unlock(&transport->recv_mutex); trace_xs_tcp_data_ready(xprt, read, total); From 040249dfbeed9dd553fd323ef4b42c7a3270898b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 13 Aug 2017 02:13:45 -0400 Subject: [PATCH 30/31] SUNRPC: Cleanup xs_tcp_read_common() Simplify the code to avoid a full copy of the struct xdr_skb_reader. Signed-off-by: Trond Myklebust --- net/sunrpc/xprtsock.c | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index e8f44fc76754..a344bea15fc7 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1287,25 +1287,12 @@ static inline void xs_tcp_read_common(struct rpc_xprt *xprt, } len = desc->count; - if (len > transport->tcp_reclen - transport->tcp_offset) { - struct xdr_skb_reader my_desc; - - len = transport->tcp_reclen - transport->tcp_offset; - memcpy(&my_desc, desc, sizeof(my_desc)); - my_desc.count = len; - r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, - &my_desc, xdr_skb_read_bits); - desc->count -= r; - desc->offset += r; - } else - r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, + if (len > transport->tcp_reclen - transport->tcp_offset) + desc->count = transport->tcp_reclen - transport->tcp_offset; + r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, desc, xdr_skb_read_bits); - if (r > 0) { - transport->tcp_copied += r; - transport->tcp_offset += r; - } - if (r != len) { + if (desc->count) { /* Error when copying to the receive buffer, * usually because we weren't able to allocate * additional buffer pages. All we can do now @@ -1325,6 +1312,10 @@ static inline void xs_tcp_read_common(struct rpc_xprt *xprt, return; } + transport->tcp_copied += r; + transport->tcp_offset += r; + desc->count = len - r; + dprintk("RPC: XID %08x read %zd bytes\n", ntohl(transport->tcp_xid), r); dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, " From ce7c252a8c741aba7c38f817b86e34361f561e42 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 16 Aug 2017 15:30:35 -0400 Subject: [PATCH 31/31] SUNRPC: Add a separate spinlock to protect the RPC request receive list This further reduces contention with the transport_lock, and allows us to convert to using a non-bh-safe spinlock, since the list is now never accessed from a bh context. Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xprt.h | 1 + net/sunrpc/svcsock.c | 6 ++--- net/sunrpc/xprt.c | 20 +++++++++------ net/sunrpc/xprtrdma/rpc_rdma.c | 8 +++--- net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 7 +++-- net/sunrpc/xprtsock.c | 30 ++++++++++++---------- 6 files changed, 41 insertions(+), 31 deletions(-) diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 65b9e0224753..a97e6de5f9f2 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -232,6 +232,7 @@ struct rpc_xprt { */ spinlock_t transport_lock; /* lock transport info */ spinlock_t reserve_lock; /* lock slot table */ + spinlock_t recv_lock; /* lock receive list */ u32 xid; /* Next XID value to use */ struct rpc_task * snd_task; /* Task blocked in send */ struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 2b720fa35c4f..272063ca81e8 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1001,7 +1001,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) if (!bc_xprt) return -EAGAIN; - spin_lock_bh(&bc_xprt->transport_lock); + spin_lock(&bc_xprt->recv_lock); req = xprt_lookup_rqst(bc_xprt, xid); if (!req) goto unlock_notfound; @@ -1019,7 +1019,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) memcpy(dst->iov_base, src->iov_base, src->iov_len); xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len); rqstp->rq_arg.len = 0; - spin_unlock_bh(&bc_xprt->transport_lock); + spin_unlock(&bc_xprt->recv_lock); return 0; unlock_notfound: printk(KERN_NOTICE @@ -1028,7 +1028,7 @@ unlock_notfound: __func__, ntohl(calldir), bc_xprt, ntohl(xid)); unlock_eagain: - spin_unlock_bh(&bc_xprt->transport_lock); + spin_unlock(&bc_xprt->recv_lock); return -EAGAIN; } diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 3eb9ec16eec4..2af189c5ac3e 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -872,17 +872,17 @@ void xprt_unpin_rqst(struct rpc_rqst *req) } static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req) -__must_hold(&req->rq_xprt->transport_lock) +__must_hold(&req->rq_xprt->recv_lock) { struct rpc_task *task = req->rq_task; if (task && test_bit(RPC_TASK_MSG_RECV, &task->tk_runstate)) { - spin_unlock_bh(&req->rq_xprt->transport_lock); + spin_unlock(&req->rq_xprt->recv_lock); set_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate); wait_on_bit(&task->tk_runstate, RPC_TASK_MSG_RECV, TASK_UNINTERRUPTIBLE); clear_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate); - spin_lock_bh(&req->rq_xprt->transport_lock); + spin_lock(&req->rq_xprt->recv_lock); } } @@ -1008,13 +1008,13 @@ void xprt_transmit(struct rpc_task *task) /* * Add to the list only if we're expecting a reply */ - spin_lock_bh(&xprt->transport_lock); /* Update the softirq receive buffer */ memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(req->rq_private_buf)); /* Add request to the receive list */ + spin_lock(&xprt->recv_lock); list_add_tail(&req->rq_list, &xprt->recv); - spin_unlock_bh(&xprt->transport_lock); + spin_unlock(&xprt->recv_lock); xprt_reset_majortimeo(req); /* Turn off autodisconnect */ del_singleshot_timer_sync(&xprt->timer); @@ -1329,15 +1329,18 @@ void xprt_release(struct rpc_task *task) task->tk_ops->rpc_count_stats(task, task->tk_calldata); else if (task->tk_client) rpc_count_iostats(task, task->tk_client->cl_metrics); + spin_lock(&xprt->recv_lock); + if (!list_empty(&req->rq_list)) { + list_del(&req->rq_list); + xprt_wait_on_pinned_rqst(req); + } + spin_unlock(&xprt->recv_lock); spin_lock_bh(&xprt->transport_lock); xprt->ops->release_xprt(xprt, task); if (xprt->ops->release_request) xprt->ops->release_request(task); - if (!list_empty(&req->rq_list)) - list_del(&req->rq_list); xprt->last_used = jiffies; xprt_schedule_autodisconnect(xprt); - xprt_wait_on_pinned_rqst(req); spin_unlock_bh(&xprt->transport_lock); if (req->rq_buffer) xprt->ops->buf_free(task); @@ -1361,6 +1364,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net) spin_lock_init(&xprt->transport_lock); spin_lock_init(&xprt->reserve_lock); + spin_lock_init(&xprt->recv_lock); INIT_LIST_HEAD(&xprt->free); INIT_LIST_HEAD(&xprt->recv); diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index ca4d6e4528f3..dfa748a0c8de 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -1051,7 +1051,7 @@ rpcrdma_reply_handler(struct work_struct *work) * RPC completion while holding the transport lock to ensure * the rep, rqst, and rq_task pointers remain stable. */ - spin_lock_bh(&xprt->transport_lock); + spin_lock(&xprt->recv_lock); rqst = xprt_lookup_rqst(xprt, headerp->rm_xid); if (!rqst) goto out_norqst; @@ -1136,7 +1136,7 @@ out: xprt_release_rqst_cong(rqst->rq_task); xprt_complete_rqst(rqst->rq_task, status); - spin_unlock_bh(&xprt->transport_lock); + spin_unlock(&xprt->recv_lock); dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n", __func__, xprt, rqst, status); return; @@ -1187,12 +1187,12 @@ out_rdmaerr: r_xprt->rx_stats.bad_reply_count++; goto out; -/* The req was still available, but by the time the transport_lock +/* The req was still available, but by the time the recv_lock * was acquired, the rqst and task had been released. Thus the RPC * has already been terminated. */ out_norqst: - spin_unlock_bh(&xprt->transport_lock); + spin_unlock(&xprt->recv_lock); rpcrdma_buffer_put(req); dprintk("RPC: %s: race, no rqst left for req %p\n", __func__, req); diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index c676ed0efb5a..0d574cda242d 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -52,7 +52,7 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, if (src->iov_len < 24) goto out_shortreply; - spin_lock_bh(&xprt->transport_lock); + spin_lock(&xprt->recv_lock); req = xprt_lookup_rqst(xprt, xid); if (!req) goto out_notfound; @@ -69,17 +69,20 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, else if (credits > r_xprt->rx_buf.rb_bc_max_requests) credits = r_xprt->rx_buf.rb_bc_max_requests; + spin_lock_bh(&xprt->transport_lock); cwnd = xprt->cwnd; xprt->cwnd = credits << RPC_CWNDSHIFT; if (xprt->cwnd > cwnd) xprt_release_rqst_cong(req->rq_task); + spin_unlock_bh(&xprt->transport_lock); + ret = 0; xprt_complete_rqst(req->rq_task, rcvbuf->len); rcvbuf->len = 0; out_unlock: - spin_unlock_bh(&xprt->transport_lock); + spin_unlock(&xprt->recv_lock); out: return ret; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index a344bea15fc7..2b918137aaa0 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -969,12 +969,12 @@ static void xs_local_data_read_skb(struct rpc_xprt *xprt, return; /* Look up and lock the request corresponding to the given XID */ - spin_lock_bh(&xprt->transport_lock); + spin_lock(&xprt->recv_lock); rovr = xprt_lookup_rqst(xprt, *xp); if (!rovr) goto out_unlock; xprt_pin_rqst(rovr); - spin_unlock_bh(&xprt->transport_lock); + spin_unlock(&xprt->recv_lock); task = rovr->rq_task; copied = rovr->rq_private_buf.buflen; @@ -983,16 +983,16 @@ static void xs_local_data_read_skb(struct rpc_xprt *xprt, if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) { dprintk("RPC: sk_buff copy failed\n"); - spin_lock_bh(&xprt->transport_lock); + spin_lock(&xprt->recv_lock); goto out_unpin; } - spin_lock_bh(&xprt->transport_lock); + spin_lock(&xprt->recv_lock); xprt_complete_rqst(task, copied); out_unpin: xprt_unpin_rqst(rovr); out_unlock: - spin_unlock_bh(&xprt->transport_lock); + spin_unlock(&xprt->recv_lock); } static void xs_local_data_receive(struct sock_xprt *transport) @@ -1055,12 +1055,12 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt, return; /* Look up and lock the request corresponding to the given XID */ - spin_lock_bh(&xprt->transport_lock); + spin_lock(&xprt->recv_lock); rovr = xprt_lookup_rqst(xprt, *xp); if (!rovr) goto out_unlock; xprt_pin_rqst(rovr); - spin_unlock_bh(&xprt->transport_lock); + spin_unlock(&xprt->recv_lock); task = rovr->rq_task; if ((copied = rovr->rq_private_buf.buflen) > repsize) @@ -1069,7 +1069,7 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt, /* Suck it into the iovec, verify checksum if not done by hw. */ if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) { __UDPX_INC_STATS(sk, UDP_MIB_INERRORS); - spin_lock_bh(&xprt->transport_lock); + spin_lock(&xprt->recv_lock); goto out_unpin; } @@ -1077,11 +1077,13 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt, spin_lock_bh(&xprt->transport_lock); xprt_adjust_cwnd(xprt, task, copied); + spin_unlock_bh(&xprt->transport_lock); + spin_lock(&xprt->recv_lock); xprt_complete_rqst(task, copied); out_unpin: xprt_unpin_rqst(rovr); out_unlock: - spin_unlock_bh(&xprt->transport_lock); + spin_unlock(&xprt->recv_lock); } static void xs_udp_data_receive(struct sock_xprt *transport) @@ -1344,24 +1346,24 @@ static inline int xs_tcp_read_reply(struct rpc_xprt *xprt, dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid)); /* Find and lock the request corresponding to this xid */ - spin_lock_bh(&xprt->transport_lock); + spin_lock(&xprt->recv_lock); req = xprt_lookup_rqst(xprt, transport->tcp_xid); if (!req) { dprintk("RPC: XID %08x request not found!\n", ntohl(transport->tcp_xid)); - spin_unlock_bh(&xprt->transport_lock); + spin_unlock(&xprt->recv_lock); return -1; } xprt_pin_rqst(req); - spin_unlock_bh(&xprt->transport_lock); + spin_unlock(&xprt->recv_lock); xs_tcp_read_common(xprt, desc, req); - spin_lock_bh(&xprt->transport_lock); + spin_lock(&xprt->recv_lock); if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) xprt_complete_rqst(req->rq_task, transport->tcp_copied); xprt_unpin_rqst(req); - spin_unlock_bh(&xprt->transport_lock); + spin_unlock(&xprt->recv_lock); return 0; }