summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@primarydata.com>2017-06-20 19:35:37 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2017-07-13 15:58:05 -0400
commit919e3bd9a87593520a2c5dfda27bd3e6599852ed (patch)
tree94bbb463d528d3ecf32967d067a2781b776709bc /include
parentb5973a8c1ccf375c9ab9e2428e1185e3f799af06 (diff)
NFS: Ensure we commit after writeback is complete
If the page cache is being flushed, then we want to ensure that we do start a commit once the pages are done being flushed. If we just wait until all I/O is done to that file, we can end up livelocking until the balance_dirty_pages() mechanism puts its foot down and forces I/O to stop. So instead we do more or less the same thing that O_DIRECT does, and set up a counter to tell us when the flush is done, Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/nfs_page.h1
-rw-r--r--include/linux/nfs_xdr.h2
2 files changed, 3 insertions, 0 deletions
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 6138cf91346b..abbee2d15dce 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -93,6 +93,7 @@ struct nfs_pageio_descriptor {
const struct rpc_call_ops *pg_rpc_callops;
const struct nfs_pgio_completion_ops *pg_completion_ops;
struct pnfs_layout_segment *pg_lseg;
+ struct nfs_io_completion *pg_io_completion;
struct nfs_direct_req *pg_dreq;
unsigned int pg_bsize; /* default bsize for mirrors */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 7f1e04941763..89093341f076 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1422,6 +1422,7 @@ enum {
NFS_IOHDR_STAT,
};
+struct nfs_io_completion;
struct nfs_pgio_header {
struct inode *inode;
struct rpc_cred *cred;
@@ -1435,6 +1436,7 @@ struct nfs_pgio_header {
void (*release) (struct nfs_pgio_header *hdr);
const struct nfs_pgio_completion_ops *completion_ops;
const struct nfs_rw_ops *rw_ops;
+ struct nfs_io_completion *io_completion;
struct nfs_direct_req *dreq;
spinlock_t lock;
/* fields protected by lock */