diff options
author | David Howells <dhowells@redhat.com> | 2020-02-06 14:22:29 +0000 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2021-04-23 10:17:28 +0100 |
commit | 5cbf03985c67c7f0ac8c5382cf5d4d0d630f95f3 (patch) | |
tree | d367003f6b2317b8a0dcc320dd5c6af1ef8c2959 /fs/afs/file.c | |
parent | dc4191841d0998978349e9119ab2ccb080b8b957 (diff) |
afs: Use new netfs lib read helper API
Make AFS use the new netfs read helpers to implement the VM read
operations:
- afs_readpage() now hands off responsibility to netfs_readpage().
- afs_readpages() is gone and replaced with afs_readahead().
- afs_readahead() just hands off responsibility to netfs_readahead().
These make use of the cache if a cookie is supplied, otherwise just call
the ->issue_op() method a sufficient number of times to complete the entire
request.
Changes:
v5:
- Use proper wait function for PG_fscache in afs_page_mkwrite()[1].
- Use killable wait for PG_writeback in afs_page_mkwrite()[1].
v4:
- Folded in error handling fixes to afs_req_issue_op().
- Added flag to netfs_subreq_terminated() to indicate that the caller may
have been running async and stuff that might sleep needs punting to a
workqueue.
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-By: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
cc: linux-cachefs@redhat.com
cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/2499407.1616505440@warthog.procyon.org.uk [1]
Link: https://lore.kernel.org/r/160588542733.3465195.7526541422073350302.stgit@warthog.procyon.org.uk/ # rfc
Link: https://lore.kernel.org/r/161118158436.1232039.3884845981224091996.stgit@warthog.procyon.org.uk/ # rfc
Link: https://lore.kernel.org/r/161161053540.2537118.14904446369309535330.stgit@warthog.procyon.org.uk/ # v2
Link: https://lore.kernel.org/r/161340418739.1303470.5908092911600241280.stgit@warthog.procyon.org.uk/ # v3
Link: https://lore.kernel.org/r/161539561926.286939.5729036262354802339.stgit@warthog.procyon.org.uk/ # v4
Link: https://lore.kernel.org/r/161653817977.2770958.17696456811587237197.stgit@warthog.procyon.org.uk/ # v5
Link: https://lore.kernel.org/r/161789101258.6155.3879271028895121537.stgit@warthog.procyon.org.uk/ # v6
Diffstat (limited to 'fs/afs/file.c')
-rw-r--r-- | fs/afs/file.c | 327 |
1 files changed, 78 insertions, 249 deletions
diff --git a/fs/afs/file.c b/fs/afs/file.c index 2db810467d3f..10c6eaaac2cc 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -14,6 +14,7 @@ #include <linux/gfp.h> #include <linux/task_io_accounting_ops.h> #include <linux/mm.h> +#include <linux/netfs.h> #include "internal.h" static int afs_file_mmap(struct file *file, struct vm_area_struct *vma); @@ -22,8 +23,7 @@ static void afs_invalidatepage(struct page *page, unsigned int offset, unsigned int length); static int afs_releasepage(struct page *page, gfp_t gfp_flags); -static int afs_readpages(struct file *filp, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages); +static void afs_readahead(struct readahead_control *ractl); const struct file_operations afs_file_operations = { .open = afs_open, @@ -47,7 +47,7 @@ const struct inode_operations afs_file_inode_operations = { const struct address_space_operations afs_fs_aops = { .readpage = afs_readpage, - .readpages = afs_readpages, + .readahead = afs_readahead, .set_page_dirty = afs_set_page_dirty, .launder_page = afs_launder_page, .releasepage = afs_releasepage, @@ -184,61 +184,17 @@ int afs_release(struct inode *inode, struct file *file) } /* - * Handle completion of a read operation. + * Allocate a new read record. */ -static void afs_file_read_done(struct afs_read *req) +struct afs_read *afs_alloc_read(gfp_t gfp) { - struct afs_vnode *vnode = req->vnode; - struct page *page; - pgoff_t index = req->pos >> PAGE_SHIFT; - pgoff_t last = index + req->nr_pages - 1; - - XA_STATE(xas, &vnode->vfs_inode.i_mapping->i_pages, index); - - if (iov_iter_count(req->iter) > 0) { - /* The read was short - clear the excess buffer. */ - _debug("afterclear %zx %zx %llx/%llx", - req->iter->iov_offset, - iov_iter_count(req->iter), - req->actual_len, req->len); - iov_iter_zero(iov_iter_count(req->iter), req->iter); - } - - rcu_read_lock(); - xas_for_each(&xas, page, last) { - page_endio(page, false, 0); - put_page(page); - } - rcu_read_unlock(); - - task_io_account_read(req->len); - req->cleanup = NULL; -} - -/* - * Dispose of our locks and refs on the pages if the read failed. - */ -static void afs_file_read_cleanup(struct afs_read *req) -{ - struct page *page; - pgoff_t index = req->pos >> PAGE_SHIFT; - pgoff_t last = index + req->nr_pages - 1; - - if (req->iter) { - XA_STATE(xas, &req->vnode->vfs_inode.i_mapping->i_pages, index); - - _enter("%lu,%u,%zu", index, req->nr_pages, iov_iter_count(req->iter)); + struct afs_read *req; - rcu_read_lock(); - xas_for_each(&xas, page, last) { - BUG_ON(xa_is_value(page)); - BUG_ON(PageCompound(page)); + req = kzalloc(sizeof(struct afs_read), gfp); + if (req) + refcount_set(&req->usage, 1); - page_endio(page, false, req->error); - put_page(page); - } - rcu_read_unlock(); - } + return req; } /* @@ -257,14 +213,20 @@ void afs_put_read(struct afs_read *req) static void afs_fetch_data_notify(struct afs_operation *op) { struct afs_read *req = op->fetch.req; + struct netfs_read_subrequest *subreq = req->subreq; int error = op->error; if (error == -ECONNABORTED) error = afs_abort_to_error(op->ac.abort_code); req->error = error; - if (req->done) + if (subreq) { + __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); + netfs_subreq_terminated(subreq, error ?: req->actual_len, false); + req->subreq = NULL; + } else if (req->done) { req->done(req); + } } static void afs_fetch_data_success(struct afs_operation *op) @@ -308,8 +270,11 @@ int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req) key_serial(req->key)); op = afs_alloc_operation(req->key, vnode->volume); - if (IS_ERR(op)) + if (IS_ERR(op)) { + if (req->subreq) + netfs_subreq_terminated(req->subreq, PTR_ERR(op), false); return PTR_ERR(op); + } afs_op_set_vnode(op, 0, vnode); @@ -318,222 +283,86 @@ int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req) return afs_do_sync_operation(op); } -/* - * read page from file, directory or symlink, given a key to use - */ -static int afs_page_filler(struct key *key, struct page *page) +static void afs_req_issue_op(struct netfs_read_subrequest *subreq) { - struct inode *inode = page->mapping->host; - struct afs_vnode *vnode = AFS_FS_I(inode); - struct afs_read *req; - int ret; - - _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index); + struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode); + struct afs_read *fsreq; - BUG_ON(!PageLocked(page)); - - ret = -ESTALE; - if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) - goto error; + fsreq = afs_alloc_read(GFP_NOFS); + if (!fsreq) + return netfs_subreq_terminated(subreq, -ENOMEM, false); - req = kzalloc(sizeof(struct afs_read), GFP_KERNEL); - if (!req) - goto enomem; - - refcount_set(&req->usage, 1); - req->vnode = vnode; - req->key = key_get(key); - req->pos = (loff_t)page->index << PAGE_SHIFT; - req->len = thp_size(page); - req->nr_pages = thp_nr_pages(page); - req->done = afs_file_read_done; - req->cleanup = afs_file_read_cleanup; - - get_page(page); - iov_iter_xarray(&req->def_iter, READ, &page->mapping->i_pages, - req->pos, req->len); - req->iter = &req->def_iter; - - ret = afs_fetch_data(vnode, req); - if (ret < 0) - goto fetch_error; + fsreq->subreq = subreq; + fsreq->pos = subreq->start + subreq->transferred; + fsreq->len = subreq->len - subreq->transferred; + fsreq->key = subreq->rreq->netfs_priv; + fsreq->vnode = vnode; + fsreq->iter = &fsreq->def_iter; - afs_put_read(req); - _leave(" = 0"); - return 0; + iov_iter_xarray(&fsreq->def_iter, READ, + &fsreq->vnode->vfs_inode.i_mapping->i_pages, + fsreq->pos, fsreq->len); -fetch_error: - switch (ret) { - case -EINTR: - case -ENOMEM: - case -ERESTARTSYS: - case -EAGAIN: - afs_put_read(req); - goto error; - case -ENOENT: - _debug("got NOENT from server - marking file deleted and stale"); - set_bit(AFS_VNODE_DELETED, &vnode->flags); - ret = -ESTALE; - /* Fall through */ - default: - page_endio(page, false, ret); - afs_put_read(req); - _leave(" = %d", ret); - return ret; - } - -enomem: - ret = -ENOMEM; -error: - unlock_page(page); - _leave(" = %d", ret); - return ret; + afs_fetch_data(fsreq->vnode, fsreq); } -/* - * read page from file, directory or symlink, given a file to nominate the key - * to be used - */ -static int afs_readpage(struct file *file, struct page *page) +static int afs_symlink_readpage(struct page *page) { - struct key *key; + struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); + struct afs_read *fsreq; int ret; - if (file) { - key = afs_file_key(file); - ASSERT(key != NULL); - ret = afs_page_filler(key, page); - } else { - struct inode *inode = page->mapping->host; - key = afs_request_key(AFS_FS_S(inode->i_sb)->cell); - if (IS_ERR(key)) { - ret = PTR_ERR(key); - } else { - ret = afs_page_filler(key, page); - key_put(key); - } - } - return ret; -} - -/* - * Read a contiguous set of pages. - */ -static int afs_readpages_one(struct file *file, struct address_space *mapping, - struct list_head *pages) -{ - struct afs_vnode *vnode = AFS_FS_I(mapping->host); - struct afs_read *req; - struct list_head *p; - struct page *first, *page; - pgoff_t index; - int ret, n; - - /* Count the number of contiguous pages at the front of the list. Note - * that the list goes prev-wards rather than next-wards. - */ - first = lru_to_page(pages); - index = first->index + 1; - n = 1; - for (p = first->lru.prev; p != pages; p = p->prev) { - page = list_entry(p, struct page, lru); - if (page->index != index) - break; - index++; - n++; - } - - req = kzalloc(sizeof(struct afs_read), GFP_NOFS); - if (!req) + fsreq = afs_alloc_read(GFP_NOFS); + if (!fsreq) return -ENOMEM; - refcount_set(&req->usage, 1); - req->vnode = vnode; - req->key = key_get(afs_file_key(file)); - req->done = afs_file_read_done; - req->cleanup = afs_file_read_cleanup; - req->pos = first->index; - req->pos <<= PAGE_SHIFT; - - /* Add pages to the LRU until it fails. We keep the pages ref'd and - * locked until the read is complete. - * - * Note that it's possible for the file size to change whilst we're - * doing this, but we rely on the server returning less than we asked - * for if the file shrank. We also rely on this to deal with a partial - * page at the end of the file. - */ - do { - page = lru_to_page(pages); - list_del(&page->lru); - index = page->index; - if (add_to_page_cache_lru(page, mapping, index, - readahead_gfp_mask(mapping))) { - put_page(page); - break; - } - - req->nr_pages++; - } while (req->nr_pages < n); - - if (req->nr_pages == 0) { - afs_put_read(req); - return 0; - } - - req->len = req->nr_pages * PAGE_SIZE; - iov_iter_xarray(&req->def_iter, READ, &file->f_mapping->i_pages, - req->pos, req->len); - req->iter = &req->def_iter; + fsreq->pos = page->index * PAGE_SIZE; + fsreq->len = PAGE_SIZE; + fsreq->vnode = vnode; + fsreq->iter = &fsreq->def_iter; + iov_iter_xarray(&fsreq->def_iter, READ, &page->mapping->i_pages, + fsreq->pos, fsreq->len); - ret = afs_fetch_data(vnode, req); - if (ret < 0) - goto error; + ret = afs_fetch_data(fsreq->vnode, fsreq); + page_endio(page, false, ret); + return ret; +} - afs_put_read(req); - return 0; +static void afs_init_rreq(struct netfs_read_request *rreq, struct file *file) +{ + rreq->netfs_priv = key_get(afs_file_key(file)); +} -error: - if (ret == -ENOENT) { - _debug("got NOENT from server - marking file deleted and stale"); - set_bit(AFS_VNODE_DELETED, &vnode->flags); - ret = -ESTALE; - } +static int afs_begin_cache_operation(struct netfs_read_request *rreq) +{ + struct afs_vnode *vnode = AFS_FS_I(rreq->inode); - afs_put_read(req); - return ret; + return fscache_begin_read_operation(rreq, afs_vnode_cache(vnode)); } -/* - * read a set of pages - */ -static int afs_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static void afs_priv_cleanup(struct address_space *mapping, void *netfs_priv) { - struct key *key = afs_file_key(file); - struct afs_vnode *vnode; - int ret = 0; - - _enter("{%d},{%lu},,%d", - key_serial(key), mapping->host->i_ino, nr_pages); + key_put(netfs_priv); +} - ASSERT(key != NULL); +static const struct netfs_read_request_ops afs_req_ops = { + .init_rreq = afs_init_rreq, + .begin_cache_operation = afs_begin_cache_operation, + .issue_op = afs_req_issue_op, + .cleanup = afs_priv_cleanup, +}; - vnode = AFS_FS_I(mapping->host); - if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { - _leave(" = -ESTALE"); - return -ESTALE; - } +static int afs_readpage(struct file *file, struct page *page) +{ + if (!file) + return afs_symlink_readpage(page); - /* attempt to read as many of the pages as possible */ - while (!list_empty(pages)) { - ret = afs_readpages_one(file, mapping, pages); - if (ret < 0) - break; - } + return netfs_readpage(file, page, &afs_req_ops, NULL); +} - _leave(" = %d [netting]", ret); - return ret; +static void afs_readahead(struct readahead_control *ractl) +{ + netfs_readahead(ractl, &afs_req_ops, NULL); } /* |