summaryrefslogtreecommitdiff
path: root/fs/erofs/fscache.c
diff options
context:
space:
mode:
authorJeffle Xu <jefflexu@linux.alibaba.com>2022-04-25 20:21:42 +0800
committerGao Xiang <hsiangkao@linux.alibaba.com>2022-05-18 00:11:21 +0800
commitc665b394b9e8c534e220da876adbd4db990b4c1b (patch)
tree44e93fc2cebd1827ba3014f825d9e67d71fa2eab /fs/erofs/fscache.c
parentbd735bdaa62fb64980c07f5443f24aefd0081569 (diff)
erofs: implement fscache-based data readahead
Implement fscache-based data readahead. Also registers an individual bdi for each erofs instance to enable readahead. Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com> Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com> Link: https://lore.kernel.org/r/20220425122143.56815-21-jefflexu@linux.alibaba.com Acked-by: Chao Yu <chao@kernel.org> Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Diffstat (limited to 'fs/erofs/fscache.c')
-rw-r--r--fs/erofs/fscache.c90
1 files changed, 90 insertions, 0 deletions
diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
index 5b779812a5ee..a402d8f0a063 100644
--- a/fs/erofs/fscache.c
+++ b/fs/erofs/fscache.c
@@ -162,12 +162,102 @@ out_unlock:
return ret;
}
+static void erofs_fscache_unlock_folios(struct readahead_control *rac,
+ size_t len)
+{
+ while (len) {
+ struct folio *folio = readahead_folio(rac);
+
+ len -= folio_size(folio);
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
+ }
+}
+
+static void erofs_fscache_readahead(struct readahead_control *rac)
+{
+ struct inode *inode = rac->mapping->host;
+ struct super_block *sb = inode->i_sb;
+ size_t len, count, done = 0;
+ erofs_off_t pos;
+ loff_t start, offset;
+ int ret;
+
+ if (!readahead_count(rac))
+ return;
+
+ start = readahead_pos(rac);
+ len = readahead_length(rac);
+
+ do {
+ struct erofs_map_blocks map;
+ struct erofs_map_dev mdev;
+
+ pos = start + done;
+ map.m_la = pos;
+
+ ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
+ if (ret)
+ return;
+
+ offset = start + done;
+ count = min_t(size_t, map.m_llen - (pos - map.m_la),
+ len - done);
+
+ if (!(map.m_flags & EROFS_MAP_MAPPED)) {
+ struct iov_iter iter;
+
+ iov_iter_xarray(&iter, READ, &rac->mapping->i_pages,
+ offset, count);
+ iov_iter_zero(count, &iter);
+
+ erofs_fscache_unlock_folios(rac, count);
+ ret = count;
+ continue;
+ }
+
+ if (map.m_flags & EROFS_MAP_META) {
+ struct folio *folio = readahead_folio(rac);
+
+ ret = erofs_fscache_readpage_inline(folio, &map);
+ if (!ret) {
+ folio_mark_uptodate(folio);
+ ret = folio_size(folio);
+ }
+
+ folio_unlock(folio);
+ continue;
+ }
+
+ mdev = (struct erofs_map_dev) {
+ .m_deviceid = map.m_deviceid,
+ .m_pa = map.m_pa,
+ };
+ ret = erofs_map_dev(sb, &mdev);
+ if (ret)
+ return;
+
+ ret = erofs_fscache_read_folios(mdev.m_fscache->cookie,
+ rac->mapping, offset, count,
+ mdev.m_pa + (pos - map.m_la));
+ /*
+ * For the error cases, the folios will be unlocked when
+ * .readahead() returns.
+ */
+ if (!ret) {
+ erofs_fscache_unlock_folios(rac, count);
+ ret = count;
+ }
+ } while (ret > 0 && ((done += ret) < len));
+}
+
static const struct address_space_operations erofs_fscache_meta_aops = {
.readpage = erofs_fscache_meta_readpage,
};
const struct address_space_operations erofs_fscache_access_aops = {
.readpage = erofs_fscache_readpage,
+ .readahead = erofs_fscache_readahead,
};
int erofs_fscache_register_cookie(struct super_block *sb,