diff options
author | Christoph Hellwig <hch@lst.de> | 2021-11-29 11:21:52 +0100 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2021-12-04 08:58:53 -0800 |
commit | c6f40468657d16e4010ef84bf32a761feb3469ea (patch) | |
tree | b86852a866cadaac4e68c67b7a5e3419ad32d588 /fs/dax.c | |
parent | e5c71954ca11df04d258a663a8a15262be0e17f6 (diff) |
fsdax: decouple zeroing from the iomap buffered I/O code
Unshare the DAX and iomap buffered I/O page zeroing code. This code
previously did a IS_DAX check deep inside the iomap code, which in
fact was the only DAX check in the code. Instead move these checks
into the callers. Most callers already have DAX special casing anyway
and XFS will need it for reflink support as well.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Link: https://lore.kernel.org/r/20211129102203.2243509-19-hch@lst.de
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'fs/dax.c')
-rw-r--r-- | fs/dax.c | 77 |
1 files changed, 63 insertions, 14 deletions
@@ -1135,24 +1135,73 @@ static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff, return ret; } -s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap) +static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero) { - pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); - long rc, id; - unsigned offset = offset_in_page(pos); - unsigned size = min_t(u64, PAGE_SIZE - offset, length); + const struct iomap *iomap = &iter->iomap; + const struct iomap *srcmap = iomap_iter_srcmap(iter); + loff_t pos = iter->pos; + u64 length = iomap_length(iter); + s64 written = 0; + + /* already zeroed? we're done. */ + if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) + return length; + + do { + unsigned offset = offset_in_page(pos); + unsigned size = min_t(u64, PAGE_SIZE - offset, length); + pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); + long rc; + int id; - id = dax_read_lock(); - if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE) - rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); - else - rc = dax_memzero(iomap->dax_dev, pgoff, offset, size); - dax_read_unlock(id); + id = dax_read_lock(); + if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE) + rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); + else + rc = dax_memzero(iomap->dax_dev, pgoff, offset, size); + dax_read_unlock(id); - if (rc < 0) - return rc; - return size; + if (rc < 0) + return rc; + pos += size; + length -= size; + written += size; + if (did_zero) + *did_zero = true; + } while (length > 0); + + return written; +} + +int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, + const struct iomap_ops *ops) +{ + struct iomap_iter iter = { + .inode = inode, + .pos = pos, + .len = len, + .flags = IOMAP_ZERO, + }; + int ret; + + while ((ret = iomap_iter(&iter, ops)) > 0) + iter.processed = dax_zero_iter(&iter, did_zero); + return ret; +} +EXPORT_SYMBOL_GPL(dax_zero_range); + +int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, + const struct iomap_ops *ops) +{ + unsigned int blocksize = i_blocksize(inode); + unsigned int off = pos & (blocksize - 1); + + /* Block boundary? Nothing to do */ + if (!off) + return 0; + return dax_zero_range(inode, pos, blocksize - off, did_zero, ops); } +EXPORT_SYMBOL_GPL(dax_truncate_page); static loff_t dax_iomap_iter(const struct iomap_iter *iomi, struct iov_iter *iter) |