summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJane Chu <jane.chu@oracle.com>2023-06-15 21:13:25 +0300
committerVishal Verma <vishal.l.verma@intel.com>2023-06-26 16:54:23 +0300
commit1ea7ca1b090145519aad998679222f0a14ab8fce (patch)
treefb2db2940309adf5d9a477fdfe742af55ad710c0 /drivers
parent95bf6df03d412f678a7b558da186c2ef797ac40c (diff)
downloadlinux-1ea7ca1b090145519aad998679222f0a14ab8fce.tar.xz
dax: enable dax fault handler to report VM_FAULT_HWPOISON
When multiple processes mmap() a dax file, then at some point, a process issues a 'load' and consumes a hwpoison, the process receives a SIGBUS with si_code = BUS_MCEERR_AR and with si_lsb set for the poison scope. Soon after, any other process issues a 'load' to the poisoned page (that is unmapped from the kernel side by memory_failure), it receives a SIGBUS with si_code = BUS_ADRERR and without valid si_lsb. This is confusing to user, and is different from page fault due to poison in RAM memory, also some helpful information is lost. Channel dax backend driver's poison detection to the filesystem such that instead of reporting VM_FAULT_SIGBUS, it could report VM_FAULT_HWPOISON. If user level block IO syscalls fail due to poison, the errno will be converted to EIO to maintain block API consistency. Signed-off-by: Jane Chu <jane.chu@oracle.com> Link: https://lore.kernel.org/r/20230615181325.1327259-2-jane.chu@oracle.com Reviewed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dax/super.c5
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/s390/block/dcssblk.c3
3 files changed, 7 insertions, 3 deletions
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index c4c4728a36e4..0da9232ea175 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -203,6 +203,8 @@ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages)
{
+ int ret;
+
if (!dax_alive(dax_dev))
return -ENXIO;
/*
@@ -213,7 +215,8 @@ int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
if (nr_pages != 1)
return -EIO;
- return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages);
+ ret = dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages);
+ return dax_mem2blk_err(ret);
}
EXPORT_SYMBOL_GPL(dax_zero_page_range);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index ceea55f621cc..46e094e56159 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -260,7 +260,7 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
long actual_nr;
if (mode != DAX_RECOVERY_WRITE)
- return -EIO;
+ return -EHWPOISON;
/*
* Set the recovery stride is set to kernel page size because
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index c09f2e053bf8..ee47ac520cd4 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -54,7 +54,8 @@ static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev,
rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS,
&kaddr, NULL);
if (rc < 0)
- return rc;
+ return dax_mem2blk_err(rc);
+
memset(kaddr, 0, nr_pages << PAGE_SHIFT);
dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
return 0;