diff options
author | Christoph Hellwig <hch@lst.de> | 2021-11-29 11:21:48 +0100 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2021-12-04 08:58:52 -0800 |
commit | 60696eb26a37ab0199f7833ddbc1b75138c36d16 (patch) | |
tree | 61e1603ff08e1f39bf0b6baa2425790ab060aa1e /fs/dax.c | |
parent | 429f8de70d9872c5ca9b3914b3c4db5659779331 (diff) |
fsdax: simplify the pgoff calculation
Replace the two steps of dax_iomap_sector and bdev_dax_pgoff with a
single dax_iomap_pgoff helper that avoids lots of cumbersome sector
conversions.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Link: https://lore.kernel.org/r/20211129102203.2243509-15-hch@lst.de
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'fs/dax.c')
-rw-r--r-- | fs/dax.c | 35 |
1 files changed, 10 insertions, 25 deletions
@@ -709,23 +709,22 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping, return __dax_invalidate_entry(mapping, index, false); } -static sector_t dax_iomap_sector(const struct iomap *iomap, loff_t pos) +static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos) { - return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9; + phys_addr_t paddr = iomap->addr + (pos & PAGE_MASK) - iomap->offset; + + if (iomap->bdev) + paddr += (get_start_sect(iomap->bdev) << SECTOR_SHIFT); + return PHYS_PFN(paddr); } static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) { - sector_t sector = dax_iomap_sector(&iter->iomap, iter->pos); + pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos); void *vto, *kaddr; - pgoff_t pgoff; long rc; int id; - rc = bdev_dax_pgoff(iter->iomap.bdev, sector, PAGE_SIZE, &pgoff); - if (rc) - return rc; - id = dax_read_lock(); rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, &kaddr, NULL); if (rc < 0) { @@ -1013,14 +1012,10 @@ EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size, pfn_t *pfnp) { - const sector_t sector = dax_iomap_sector(iomap, pos); - pgoff_t pgoff; + pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); int id, rc; long length; - rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff); - if (rc) - return rc; id = dax_read_lock(); length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), NULL, pfnp); @@ -1129,7 +1124,7 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap) { sector_t sector = iomap_sector(iomap, pos & PAGE_MASK); - pgoff_t pgoff; + pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); long rc, id; void *kaddr; bool page_aligned = false; @@ -1140,10 +1135,6 @@ s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap) (size == PAGE_SIZE)) page_aligned = true; - rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff); - if (rc) - return rc; - id = dax_read_lock(); if (page_aligned) @@ -1169,7 +1160,6 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi, const struct iomap *iomap = &iomi->iomap; loff_t length = iomap_length(iomi); loff_t pos = iomi->pos; - struct block_device *bdev = iomap->bdev; struct dax_device *dax_dev = iomap->dax_dev; loff_t end = pos + length, done = 0; ssize_t ret = 0; @@ -1203,9 +1193,8 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi, while (pos < end) { unsigned offset = pos & (PAGE_SIZE - 1); const size_t size = ALIGN(length + offset, PAGE_SIZE); - const sector_t sector = dax_iomap_sector(iomap, pos); + pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); ssize_t map_len; - pgoff_t pgoff; void *kaddr; if (fatal_signal_pending(current)) { @@ -1213,10 +1202,6 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi, break; } - ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); - if (ret) - break; - map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL); if (map_len < 0) { |