summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@primarydata.com>2017-07-17 10:34:21 -0400
committerTrond Myklebust <trond.myklebust@primarydata.com>2017-08-15 11:54:47 -0400
commitb5bab9bf91324a7fe21b365d6966cfd087d08e3a (patch)
treea7b26c72f233c65257b8008d0366bb98ecf95448 /fs
parent7e6cca6caf7230b049bd681c5400b01c365ee452 (diff)
NFS: Reduce inode->i_lock contention in nfs_lock_and_join_requests()
We should no longer need the inode->i_lock, now that we've straightened out the request locking. The locking schema is now: 1) Lock page head request 2) Lock the page group 3) Lock the subrequests one by one Note that there is a subtle race with nfs_inode_remove_request() due to the fact that the latter does not lock the page head, when removing it from the struct page. Only the last subrequest is locked, hence we need to re-check that the PagePrivate(page) is still set after we've locked all the subrequests. Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/nfs/write.c40
1 files changed, 22 insertions, 18 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 08c1ce968cce..ff7c90c7ff79 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -372,15 +372,14 @@ nfs_page_group_clear_bits(struct nfs_page *req)
* @head - head request of page group, must be holding head lock
* @req - request that couldn't lock and needs to wait on the req bit lock
*
- * NOTE: this must be called holding page_group bit lock and inode spin lock
- * and BOTH will be released before returning.
+ * NOTE: this must be called holding page_group bit lock
+ * which will be released before returning.
*
* returns 0 on success, < 0 on error.
*/
static int
nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
struct nfs_page *req)
- __releases(&inode->i_lock)
{
struct nfs_page *tmp;
int ret;
@@ -395,7 +394,6 @@ nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
kref_get(&req->wb_kref);
nfs_page_group_unlock(head);
- spin_unlock(&inode->i_lock);
/* release ref from nfs_page_find_head_request_locked */
nfs_unlock_and_release_request(head);
@@ -491,8 +489,9 @@ nfs_lock_and_join_requests(struct page *page)
int ret;
try_again:
+ if (!(PagePrivate(page) || PageSwapCache(page)))
+ return NULL;
spin_lock(&inode->i_lock);
-
/*
* A reference is taken only on the head request which acts as a
* reference to the whole page group - the group will not be destroyed
@@ -514,16 +513,12 @@ try_again:
return ERR_PTR(ret);
goto try_again;
}
+ spin_unlock(&inode->i_lock);
- /* holding inode lock, so always make a non-blocking call to try the
- * page group lock */
- ret = nfs_page_group_lock(head, true);
+ ret = nfs_page_group_lock(head, false);
if (ret < 0) {
- spin_unlock(&inode->i_lock);
-
- nfs_page_group_lock_wait(head);
nfs_unlock_and_release_request(head);
- goto try_again;
+ return ERR_PTR(ret);
}
/* lock each request in the page group */
@@ -531,8 +526,10 @@ try_again:
for (subreq = head->wb_this_page; subreq != head;
subreq = subreq->wb_this_page) {
if (!nfs_lock_request(subreq)) {
- /* releases page group bit lock and
- * inode spin lock and all references */
+ /*
+ * releases page group bit lock and
+ * page locks and all references
+ */
ret = nfs_unroll_locks_and_wait(inode, head,
subreq);
@@ -580,7 +577,9 @@ try_again:
if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) {
set_bit(PG_INODE_REF, &head->wb_flags);
kref_get(&head->wb_kref);
+ spin_lock(&inode->i_lock);
NFS_I(inode)->nrequests++;
+ spin_unlock(&inode->i_lock);
}
/*
@@ -590,11 +589,14 @@ try_again:
nfs_page_group_unlock(head);
- /* drop lock to clean uprequests on destroy list */
- spin_unlock(&inode->i_lock);
-
nfs_destroy_unlinked_subrequests(destroy_list, head, inode);
+ /* Did we lose a race with nfs_inode_remove_request()? */
+ if (!(PagePrivate(page) || PageSwapCache(page))) {
+ nfs_unlock_and_release_request(head);
+ return NULL;
+ }
+
/* still holds ref on head from nfs_page_find_head_request_locked
* and still has lock on head from lock loop */
return head;
@@ -968,7 +970,7 @@ nfs_clear_page_commit(struct page *page)
WB_RECLAIMABLE);
}
-/* Called holding inode (/cinfo) lock */
+/* Called holding the request lock on @req */
static void
nfs_clear_request_commit(struct nfs_page *req)
{
@@ -977,9 +979,11 @@ nfs_clear_request_commit(struct nfs_page *req)
struct nfs_commit_info cinfo;
nfs_init_cinfo_from_inode(&cinfo, inode);
+ spin_lock(&inode->i_lock);
if (!pnfs_clear_request_commit(req, &cinfo)) {
nfs_request_remove_commit_list(req, &cinfo);
}
+ spin_unlock(&inode->i_lock);
nfs_clear_page_commit(req->wb_page);
}
}