summaryrefslogtreecommitdiff
path: root/fs/afs/write.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2020-02-06 14:22:29 +0000
committerDavid Howells <dhowells@redhat.com>2021-04-23 10:17:28 +0100
commit3003bbd0697b659944237f3459489cb596ba196c (patch)
tree9549110fad53b5950e1a9ab5851268d48d5f8fdd /fs/afs/write.c
parent5cbf03985c67c7f0ac8c5382cf5d4d0d630f95f3 (diff)
afs: Use the netfs_write_begin() helper
Make AFS use the new netfs_write_begin() helper to do the pre-reading required before the write. If successful, the helper returns with the required page filled in and locked. It may read more than just one page, expanding the read to meet cache granularity requirements as necessary. Note: A more advanced version of this could be made that does generic_perform_write() for a whole cache granule. This would make it easier to avoid doing the download/read for the data to be overwritten. Signed-off-by: David Howells <dhowells@redhat.com> Tested-By: Marc Dionne <marc.dionne@auristor.com> cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/160588546422.3465195.1546354372589291098.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161539563244.286939.16537296241609909980.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653819291.2770958.406013201547420544.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789102743.6155.17396591236631761195.stgit@warthog.procyon.org.uk/ # v6
Diffstat (limited to 'fs/afs/write.c')
-rw-r--r--fs/afs/write.c108
1 files changed, 12 insertions, 96 deletions
diff --git a/fs/afs/write.c b/fs/afs/write.c
index bc84c771b0fd..dc66ff15dd16 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -11,6 +11,8 @@
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
+#include <linux/netfs.h>
+#include <linux/fscache.h>
#include "internal.h"
/*
@@ -23,68 +25,6 @@ int afs_set_page_dirty(struct page *page)
}
/*
- * Handle completion of a read operation to fill a page.
- */
-static void afs_fill_hole(struct afs_read *req)
-{
- if (iov_iter_count(req->iter) > 0)
- /* The read was short - clear the excess buffer. */
- iov_iter_zero(iov_iter_count(req->iter), req->iter);
-}
-
-/*
- * partly or wholly fill a page that's under preparation for writing
- */
-static int afs_fill_page(struct file *file,
- loff_t pos, unsigned int len, struct page *page)
-{
- struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
- struct afs_read *req;
- size_t p;
- void *data;
- int ret;
-
- _enter(",,%llu", (unsigned long long)pos);
-
- if (pos >= vnode->vfs_inode.i_size) {
- p = pos & ~PAGE_MASK;
- ASSERTCMP(p + len, <=, PAGE_SIZE);
- data = kmap(page);
- memset(data + p, 0, len);
- kunmap(page);
- return 0;
- }
-
- req = kzalloc(sizeof(struct afs_read), GFP_KERNEL);
- if (!req)
- return -ENOMEM;
-
- refcount_set(&req->usage, 1);
- req->vnode = vnode;
- req->done = afs_fill_hole;
- req->key = key_get(afs_file_key(file));
- req->pos = pos;
- req->len = len;
- req->nr_pages = 1;
- req->iter = &req->def_iter;
- iov_iter_xarray(&req->def_iter, READ, &file->f_mapping->i_pages, pos, len);
-
- ret = afs_fetch_data(vnode, req);
- afs_put_read(req);
- if (ret < 0) {
- if (ret == -ENOENT) {
- _debug("got NOENT from server"
- " - marking file deleted and stale");
- set_bit(AFS_VNODE_DELETED, &vnode->flags);
- ret = -ESTALE;
- }
- }
-
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
* prepare to perform part of a write to a page
*/
int afs_write_begin(struct file *file, struct address_space *mapping,
@@ -102,24 +42,14 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
_enter("{%llx:%llu},%llx,%x",
vnode->fid.vid, vnode->fid.vnode, pos, len);
- page = grab_cache_page_write_begin(mapping, pos / PAGE_SIZE, flags);
- if (!page)
- return -ENOMEM;
-
- if (!PageUptodate(page) && len != PAGE_SIZE) {
- ret = afs_fill_page(file, pos & PAGE_MASK, PAGE_SIZE, page);
- if (ret < 0) {
- unlock_page(page);
- put_page(page);
- _leave(" = %d [prep]", ret);
- return ret;
- }
- SetPageUptodate(page);
- }
-
-#ifdef CONFIG_AFS_FSCACHE
- wait_on_page_fscache(page);
-#endif
+ /* Prefetch area to be written into the cache if we're caching this
+ * file. We need to do this before we get a lock on the page in case
+ * there's more than one writer competing for the same cache block.
+ */
+ ret = netfs_write_begin(file, mapping, pos, len, flags, &page, fsdata,
+ &afs_req_ops, NULL);
+ if (ret < 0)
+ return ret;
index = page->index;
from = pos - index * PAGE_SIZE;
@@ -184,7 +114,6 @@ int afs_write_end(struct file *file, struct address_space *mapping,
unsigned int f, from = pos & (thp_size(page) - 1);
unsigned int t, to = from + copied;
loff_t i_size, maybe_i_size;
- int ret = 0;
_enter("{%llx:%llu},{%lx}",
vnode->fid.vid, vnode->fid.vnode, page->index);
@@ -203,19 +132,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
write_sequnlock(&vnode->cb_lock);
}
- if (!PageUptodate(page)) {
- if (copied < len) {
- /* Try and load any missing data from the server. The
- * unmarshalling routine will take care of clearing any
- * bits that are beyond the EOF.
- */
- ret = afs_fill_page(file, pos + copied,
- len - copied, page);
- if (ret < 0)
- goto out;
- }
- SetPageUptodate(page);
- }
+ ASSERT(PageUptodate(page));
if (PagePrivate(page)) {
priv = page_private(page);
@@ -236,12 +153,11 @@ int afs_write_end(struct file *file, struct address_space *mapping,
if (set_page_dirty(page))
_debug("dirtied %lx", page->index);
- ret = copied;
out:
unlock_page(page);
put_page(page);
- return ret;
+ return copied;
}
/*