summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-27 15:49:30 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-27 15:49:30 -0700
commit35cdd8656eac470b9abc9de8d4bd268fbc0fb34b (patch)
tree261e2d2fc3dc5059ce3ffaf9bf231109bbbd6d0e /drivers
parentea6c3bc6ed93dd032568427e92424f6d33deb99b (diff)
parentf42e8e5088b9e791c8f7ac661f68e29a4996a4e3 (diff)
Merge tag 'libnvdimm-for-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm and DAX updates from Dan Williams: "New support for clearing memory errors when a file is in DAX mode, alongside with some other fixes and cleanups. Previously it was only possible to clear these errors using a truncate or hole-punch operation to trigger the filesystem to reallocate the block, now, any page aligned write can opportunistically clear errors as well. This change spans x86/mm, nvdimm, and fs/dax, and has received the appropriate sign-offs. Thanks to Jane for her work on this. Summary: - Add support for clearing memory error via pwrite(2) on DAX - Fix 'security overwrite' support in the presence of media errors - Miscellaneous cleanups and fixes for nfit_test (nvdimm unit tests)" * tag 'libnvdimm-for-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: pmem: implement pmem_recovery_write() pmem: refactor pmem_clear_poison() dax: add .recovery_write dax_operation dax: introduce DAX_RECOVERY_WRITE dax access mode mce: fix set_mce_nospec to always unmap the whole page x86/mce: relocate set{clear}_mce_nospec() functions acpi/nfit: rely on mce->misc to determine poison granularity testing: nvdimm: asm/mce.h is not needed in nfit.c testing: nvdimm: iomap: make __nfit_test_ioremap a macro nvdimm: Allow overwrite in the presence of disabled dimms tools/testing/nvdimm: remove unneeded flush_workqueue
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/nfit/mce.c4
-rw-r--r--drivers/dax/super.c14
-rw-r--r--drivers/md/dm-linear.c15
-rw-r--r--drivers/md/dm-log-writes.c15
-rw-r--r--drivers/md/dm-stripe.c15
-rw-r--r--drivers/md/dm-target.c4
-rw-r--r--drivers/md/dm-writecache.c7
-rw-r--r--drivers/md/dm.c25
-rw-r--r--drivers/nvdimm/pmem.c203
-rw-r--r--drivers/nvdimm/pmem.h5
-rw-r--r--drivers/nvdimm/security.c5
-rw-r--r--drivers/s390/block/dcssblk.c9
12 files changed, 238 insertions, 83 deletions
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index ee8d9973f60b..d48a388b796e 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -32,6 +32,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
*/
mutex_lock(&acpi_desc_lock);
list_for_each_entry(acpi_desc, &acpi_descs, list) {
+ unsigned int align = 1UL << MCI_MISC_ADDR_LSB(mce->misc);
struct device *dev = acpi_desc->dev;
int found_match = 0;
@@ -63,8 +64,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
/* If this fails due to an -ENOMEM, there is little we can do */
nvdimm_bus_add_badrange(acpi_desc->nvdimm_bus,
- ALIGN(mce->addr, L1_CACHE_BYTES),
- L1_CACHE_BYTES);
+ ALIGN_DOWN(mce->addr, align), align);
nvdimm_region_notify(nfit_spa->nd_region,
NVDIMM_REVALIDATE_POISON);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 0211e6f7b47a..50a08b2ec247 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -117,6 +117,7 @@ enum dax_device_flags {
* @dax_dev: a dax_device instance representing the logical memory range
* @pgoff: offset in pages from the start of the device to translate
* @nr_pages: number of consecutive pages caller can handle relative to @pfn
+ * @mode: indicator on normal access or recovery write
* @kaddr: output parameter that returns a virtual address mapping of pfn
* @pfn: output parameter that returns an absolute pfn translation of @pgoff
*
@@ -124,7 +125,7 @@ enum dax_device_flags {
* pages accessible at the device relative @pgoff.
*/
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
- void **kaddr, pfn_t *pfn)
+ enum dax_access_mode mode, void **kaddr, pfn_t *pfn)
{
long avail;
@@ -138,7 +139,7 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
return -EINVAL;
avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
- kaddr, pfn);
+ mode, kaddr, pfn);
if (!avail)
return -ERANGE;
return min(avail, nr_pages);
@@ -194,6 +195,15 @@ int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
}
EXPORT_SYMBOL_GPL(dax_zero_page_range);
+size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *iter)
+{
+ if (!dax_dev->ops->recovery_write)
+ return 0;
+ return dax_dev->ops->recovery_write(dax_dev, pgoff, addr, bytes, iter);
+}
+EXPORT_SYMBOL_GPL(dax_recovery_write);
+
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 0a6abbbe3745..3212ef6aa81b 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -165,11 +165,12 @@ static struct dax_device *linear_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
}
static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
{
struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
- return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
+ return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn);
}
static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
@@ -180,9 +181,18 @@ static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
return dax_zero_page_range(dax_dev, pgoff, nr_pages);
}
+static size_t linear_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+{
+ struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
+
+ return dax_recovery_write(dax_dev, pgoff, addr, bytes, i);
+}
+
#else
#define linear_dax_direct_access NULL
#define linear_dax_zero_page_range NULL
+#define linear_dax_recovery_write NULL
#endif
static struct target_type linear_target = {
@@ -200,6 +210,7 @@ static struct target_type linear_target = {
.iterate_devices = linear_iterate_devices,
.direct_access = linear_dax_direct_access,
.dax_zero_page_range = linear_dax_zero_page_range,
+ .dax_recovery_write = linear_dax_recovery_write,
};
int __init dm_linear_init(void)
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index e194226c89e5..20fd688f72e7 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -888,11 +888,12 @@ static struct dax_device *log_writes_dax_pgoff(struct dm_target *ti,
}
static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
{
struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
- return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
+ return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn);
}
static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
@@ -903,9 +904,18 @@ static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
return dax_zero_page_range(dax_dev, pgoff, nr_pages << PAGE_SHIFT);
}
+static size_t log_writes_dax_recovery_write(struct dm_target *ti,
+ pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
+{
+ struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
+
+ return dax_recovery_write(dax_dev, pgoff, addr, bytes, i);
+}
+
#else
#define log_writes_dax_direct_access NULL
#define log_writes_dax_zero_page_range NULL
+#define log_writes_dax_recovery_write NULL
#endif
static struct target_type log_writes_target = {
@@ -923,6 +933,7 @@ static struct target_type log_writes_target = {
.io_hints = log_writes_io_hints,
.direct_access = log_writes_dax_direct_access,
.dax_zero_page_range = log_writes_dax_zero_page_range,
+ .dax_recovery_write = log_writes_dax_recovery_write,
};
static int __init dm_log_writes_init(void)
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index c81d331d1afe..baa085cc67bd 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -315,11 +315,12 @@ static struct dax_device *stripe_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
}
static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
{
struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
- return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
+ return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn);
}
static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
@@ -330,9 +331,18 @@ static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
return dax_zero_page_range(dax_dev, pgoff, nr_pages);
}
+static size_t stripe_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+{
+ struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
+
+ return dax_recovery_write(dax_dev, pgoff, addr, bytes, i);
+}
+
#else
#define stripe_dax_direct_access NULL
#define stripe_dax_zero_page_range NULL
+#define stripe_dax_recovery_write NULL
#endif
/*
@@ -469,6 +479,7 @@ static struct target_type stripe_target = {
.io_hints = stripe_io_hints,
.direct_access = stripe_dax_direct_access,
.dax_zero_page_range = stripe_dax_zero_page_range,
+ .dax_recovery_write = stripe_dax_recovery_write,
};
int __init dm_stripe_init(void)
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 64dd0b34fcf4..8cd5184e62f0 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/bio.h>
+#include <linux/dax.h>
#define DM_MSG_PREFIX "target"
@@ -142,7 +143,8 @@ static void io_err_release_clone_rq(struct request *clone,
}
static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
{
return -EIO;
}
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 5630b470ba42..d74c5a7a0ab4 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -286,7 +286,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
id = dax_read_lock();
- da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn);
+ da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, DAX_ACCESS,
+ &wc->memory_map, &pfn);
if (da < 0) {
wc->memory_map = NULL;
r = da;
@@ -308,8 +309,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
i = 0;
do {
long daa;
- daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i,
- NULL, &pfn);
+ daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i,
+ p - i, DAX_ACCESS, NULL, &pfn);
if (daa <= 0) {
r = daa ? daa : -EINVAL;
goto err3;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index d62f1354ecbf..dfb0a551bd88 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1143,7 +1143,8 @@ static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
}
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
@@ -1161,7 +1162,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
if (len < 1)
goto out;
nr_pages = min(len, nr_pages);
- ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
+ ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn);
out:
dm_put_live_table(md, srcu_idx);
@@ -1196,6 +1197,25 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
return ret;
}
+static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+{
+ struct mapped_device *md = dax_get_private(dax_dev);
+ sector_t sector = pgoff * PAGE_SECTORS;
+ struct dm_target *ti;
+ int srcu_idx;
+ long ret = 0;
+
+ ti = dm_dax_get_live_target(md, sector, &srcu_idx);
+ if (!ti || !ti->type->dax_recovery_write)
+ goto out;
+
+ ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i);
+out:
+ dm_put_live_table(md, srcu_idx);
+ return ret;
+}
+
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
* allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
@@ -3231,6 +3251,7 @@ static const struct block_device_operations dm_rq_blk_dops = {
static const struct dax_operations dm_dax_ops = {
.direct_access = dm_dax_direct_access,
.zero_page_range = dm_dax_zero_page_range,
+ .recovery_write = dm_dax_recovery_write,
};
/*
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 58d95242a836..6b24ecada695 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -45,9 +45,25 @@ static struct nd_region *to_region(struct pmem_device *pmem)
return to_nd_region(to_dev(pmem)->parent);
}
-static void hwpoison_clear(struct pmem_device *pmem,
- phys_addr_t phys, unsigned int len)
+static phys_addr_t to_phys(struct pmem_device *pmem, phys_addr_t offset)
{
+ return pmem->phys_addr + offset;
+}
+
+static sector_t to_sect(struct pmem_device *pmem, phys_addr_t offset)
+{
+ return (offset - pmem->data_offset) >> SECTOR_SHIFT;
+}
+
+static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector)
+{
+ return (sector << SECTOR_SHIFT) + pmem->data_offset;
+}
+
+static void pmem_mkpage_present(struct pmem_device *pmem, phys_addr_t offset,
+ unsigned int len)
+{
+ phys_addr_t phys = to_phys(pmem, offset);
unsigned long pfn_start, pfn_end, pfn;
/* only pmem in the linear map supports HWPoison */
@@ -69,33 +85,40 @@ static void hwpoison_clear(struct pmem_device *pmem,
}
}
-static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
- phys_addr_t offset, unsigned int len)
+static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks)
{
- struct device *dev = to_dev(pmem);
- sector_t sector;
- long cleared;
- blk_status_t rc = BLK_STS_OK;
+ if (blks == 0)
+ return;
+ badblocks_clear(&pmem->bb, sector, blks);
+ if (pmem->bb_state)
+ sysfs_notify_dirent(pmem->bb_state);
+}
- sector = (offset - pmem->data_offset) / 512;
+static long __pmem_clear_poison(struct pmem_device *pmem,
+ phys_addr_t offset, unsigned int len)
+{
+ phys_addr_t phys = to_phys(pmem, offset);
+ long cleared = nvdimm_clear_poison(to_dev(pmem), phys, len);
- cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
- if (cleared < len)
- rc = BLK_STS_IOERR;
- if (cleared > 0 && cleared / 512) {
- hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
- cleared /= 512;
- dev_dbg(dev, "%#llx clear %ld sector%s\n",
- (unsigned long long) sector, cleared,
- cleared > 1 ? "s" : "");
- badblocks_clear(&pmem->bb, sector, cleared);
- if (pmem->bb_state)
- sysfs_notify_dirent(pmem->bb_state);
+ if (cleared > 0) {
+ pmem_mkpage_present(pmem, offset, cleared);
+ arch_invalidate_pmem(pmem->virt_addr + offset, len);
}
+ return cleared;
+}
+
+static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
+ phys_addr_t offset, unsigned int len)
+{
+ long cleared = __pmem_clear_poison(pmem, offset, len);
- arch_invalidate_pmem(pmem->virt_addr + offset, len);
+ if (cleared < 0)
+ return BLK_STS_IOERR;
- return rc;
+ pmem_clear_bb(pmem, to_sect(pmem, offset), cleared >> SECTOR_SHIFT);
+ if (cleared < len)
+ return BLK_STS_IOERR;
+ return BLK_STS_OK;
}
static void write_pmem(void *pmem_addr, struct page *page,
@@ -143,7 +166,7 @@ static blk_status_t pmem_do_read(struct pmem_device *pmem,
sector_t sector, unsigned int len)
{
blk_status_t rc;
- phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
+ phys_addr_t pmem_off = to_offset(pmem, sector);
void *pmem_addr = pmem->virt_addr + pmem_off;
if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
@@ -158,36 +181,20 @@ static blk_status_t pmem_do_write(struct pmem_device *pmem,
struct page *page, unsigned int page_off,
sector_t sector, unsigned int len)
{
- blk_status_t rc = BLK_STS_OK;
- bool bad_pmem = false;
- phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
+ phys_addr_t pmem_off = to_offset(pmem, sector);
void *pmem_addr = pmem->virt_addr + pmem_off;
- if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
- bad_pmem = true;
+ if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) {
+ blk_status_t rc = pmem_clear_poison(pmem, pmem_off, len);
+
+ if (rc != BLK_STS_OK)
+ return rc;
+ }
- /*
- * Note that we write the data both before and after
- * clearing poison. The write before clear poison
- * handles situations where the latest written data is
- * preserved and the clear poison operation simply marks
- * the address range as valid without changing the data.
- * In this case application software can assume that an
- * interrupted write will either return the new good
- * data or an error.
- *
- * However, if pmem_clear_poison() leaves the data in an
- * indeterminate state we need to perform the write
- * after clear poison.
- */
flush_dcache_page(page);
write_pmem(pmem_addr, page, page_off, len);
- if (unlikely(bad_pmem)) {
- rc = pmem_clear_poison(pmem, pmem_off, len);
- write_pmem(pmem_addr, page, page_off, len);
- }
- return rc;
+ return BLK_STS_OK;
}
static void pmem_submit_bio(struct bio *bio)
@@ -255,24 +262,47 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
{
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
-
- if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
- PFN_PHYS(nr_pages))))
- return -EIO;
+ sector_t sector = PFN_PHYS(pgoff) >> SECTOR_SHIFT;
+ unsigned int num = PFN_PHYS(nr_pages) >> SECTOR_SHIFT;
+ struct badblocks *bb = &pmem->bb;
+ sector_t first_bad;
+ int num_bad;
if (kaddr)
*kaddr = pmem->virt_addr + offset;
if (pfn)
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
+ if (bb->count &&
+ badblocks_check(bb, sector, num, &first_bad, &num_bad)) {
+ long actual_nr;
+
+ if (mode != DAX_RECOVERY_WRITE)
+ return -EIO;
+
+ /*
+ * Set the recovery stride is set to kernel page size because
+ * the underlying driver and firmware clear poison functions
+ * don't appear to handle large chunk(such as 2MiB) reliably.
+ */
+ actual_nr = PHYS_PFN(
+ PAGE_ALIGN((first_bad - sector) << SECTOR_SHIFT));
+ dev_dbg(pmem->bb.dev, "start sector(%llu), nr_pages(%ld), first_bad(%llu), actual_nr(%ld)\n",
+ sector, nr_pages, first_bad, actual_nr);
+ if (actual_nr)
+ return actual_nr;
+ return 1;
+ }
+
/*
- * If badblocks are present, limit known good range to the
- * requested range.
+ * If badblocks are present but not in the range, limit known good range
+ * to the requested range.
*/
- if (unlikely(pmem->bb.count))
+ if (bb->count)
return nr_pages;
return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
}
@@ -294,16 +324,73 @@ static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
}
static long pmem_dax_direct_access(struct dax_device *dax_dev,
- pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
+ pgoff_t pgoff, long nr_pages, enum dax_access_mode mode,
+ void **kaddr, pfn_t *pfn)
+{
+ struct pmem_device *pmem = dax_get_private(dax_dev);
+
+ return __pmem_direct_access(pmem, pgoff, nr_pages, mode, kaddr, pfn);
+}
+
+/*
+ * The recovery write thread started out as a normal pwrite thread and
+ * when the filesystem was told about potential media error in the
+ * range, filesystem turns the normal pwrite to a dax_recovery_write.
+ *
+ * The recovery write consists of clearing media poison, clearing page
+ * HWPoison bit, reenable page-wide read-write permission, flush the
+ * caches and finally write. A competing pread thread will be held
+ * off during the recovery process since data read back might not be
+ * valid, and this is achieved by clearing the badblock records after
+ * the recovery write is complete. Competing recovery write threads
+ * are already serialized by writer lock held by dax_iomap_rw().
+ */
+static size_t pmem_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
{
struct pmem_device *pmem = dax_get_private(dax_dev);
+ size_t olen, len, off;
+ phys_addr_t pmem_off;
+ struct device *dev = pmem->bb.dev;
+ long cleared;
+
+ off = offset_in_page(addr);
+ len = PFN_PHYS(PFN_UP(off + bytes));
+ if (!is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) >> SECTOR_SHIFT, len))
+ return _copy_from_iter_flushcache(addr, bytes, i);
+
+ /*
+ * Not page-aligned range cannot be recovered. This should not
+ * happen unless something else went wrong.
+ */
+ if (off || !PAGE_ALIGNED(bytes)) {
+ dev_dbg(dev, "Found poison, but addr(%p) or bytes(%#zx) not page aligned\n",
+ addr, bytes);
+ return 0;
+ }
+
+ pmem_off = PFN_PHYS(pgoff) + pmem->data_offset;
+ cleared = __pmem_clear_poison(pmem, pmem_off, len);
+ if (cleared > 0 && cleared < len) {
+ dev_dbg(dev, "poison cleared only %ld out of %zu bytes\n",
+ cleared, len);
+ return 0;
+ }
+ if (cleared < 0) {
+ dev_dbg(dev, "poison clear failed: %ld\n", cleared);
+ return 0;
+ }
+
+ olen = _copy_from_iter_flushcache(addr, bytes, i);
+ pmem_clear_bb(pmem, to_sect(pmem, pmem_off), cleared >> SECTOR_SHIFT);
- return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
+ return olen;
}
static const struct dax_operations pmem_dax_ops = {
.direct_access = pmem_dax_direct_access,
.zero_page_range = pmem_dax_zero_page_range,
+ .recovery_write = pmem_recovery_write,
};
static ssize_t write_cache_show(struct device *dev,
diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h
index 1f51a2361429..392b0b38acb9 100644
--- a/drivers/nvdimm/pmem.h
+++ b/drivers/nvdimm/pmem.h
@@ -8,6 +8,8 @@
#include <linux/pfn_t.h>
#include <linux/fs.h>
+enum dax_access_mode;
+
/* this definition is in it's own header for tools/testing/nvdimm to consume */
struct pmem_device {
/* One contiguous memory region per device */
@@ -28,7 +30,8 @@ struct pmem_device {
};
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn);
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn);
#ifdef CONFIG_MEMORY_FAILURE
static inline bool test_and_clear_pmem_poison(struct page *page)
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index 4b80150e4afa..b5aa55c61461 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -379,11 +379,6 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
|| !nvdimm->sec.flags)
return -EOPNOTSUPP;
- if (dev->driver == NULL) {
- dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
- return -EINVAL;
- }
-
rc = check_security_state(nvdimm);
if (rc)
return rc;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index d614843caf6c..8d0d0eaa3059 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -32,7 +32,8 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode);
static void dcssblk_release(struct gendisk *disk, fmode_t mode);
static void dcssblk_submit_bio(struct bio *bio);
static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn);
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn);
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
@@ -50,7 +51,8 @@ static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev,
long rc;
void *kaddr;
- rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
+ rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS,
+ &kaddr, NULL);
if (rc < 0)
return rc;
memset(kaddr, 0, nr_pages << PAGE_SHIFT);
@@ -927,7 +929,8 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
static long
dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
{
struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev);