summaryrefslogtreecommitdiff
path: root/drivers/nvdimm/pfn_devs.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>2019-10-31 13:57:41 +0300
committerDan Williams <dan.j.williams@intel.com>2019-11-15 06:08:47 +0300
commit8f4b01fcded2dc821349cc0edfa5311c05abe293 (patch)
treee5ffcdc65bcd6d76c7c0f1b3ba23dc302887c97c /drivers/nvdimm/pfn_devs.c
parentc1f45d86a522d568aef541dbbc066ccac262b4c3 (diff)
downloadlinux-8f4b01fcded2dc821349cc0edfa5311c05abe293.tar.xz
libnvdimm/namespace: Differentiate between probe mapping and runtime mapping
The nvdimm core currently maps the full namespace to an ioremap range while probing the namespace mode. This can result in probe failures on architectures that have limited ioremap space. For example, with a large btt namespace that consumes most of I/O remap range, depending on the sequence of namespace initialization, the user can find a pfn namespace initialization failure due to unavailable I/O remap space which nvdimm core uses for temporary mapping. nvdimm core can avoid this failure by only mapping the reserved info block area to check for pfn superblock type and map the full namespace resource only before using the namespace. Given that personalities like BTT can be layered on top of any namespace type create a generic form of devm_nsio_enable (devm_namespace_enable) and use it inside the per-personality attach routines. Now devm_namespace_enable() is always paired with disable unless the mapping is going to be used for long term runtime access. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Link: https://lore.kernel.org/r/20191017073308.32645-1-aneesh.kumar@linux.ibm.com [djbw: reworks to move devm_namespace_{en,dis}able into *attach helpers] Reported-by: kbuild test robot <lkp@intel.com> Link: https://lore.kernel.org/r/20191031105741.102793-2-aneesh.kumar@linux.ibm.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/nvdimm/pfn_devs.c')
-rw-r--r--drivers/nvdimm/pfn_devs.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 96727fd493f7..3ca6c97cd14d 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -382,6 +382,15 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
meta_start = (SZ_4K + sizeof(*pfn_sb)) >> 9;
meta_num = (le64_to_cpu(pfn_sb->dataoff) >> 9) - meta_start;
+ /*
+ * re-enable the namespace with correct size so that we can access
+ * the device memmap area.
+ */
+ devm_namespace_disable(&nd_pfn->dev, ndns);
+ rc = devm_namespace_enable(&nd_pfn->dev, ndns, le64_to_cpu(pfn_sb->dataoff));
+ if (rc)
+ return rc;
+
do {
unsigned long zero_len;
u64 nsoff;
@@ -635,11 +644,6 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
}
EXPORT_SYMBOL(nd_pfn_probe);
-static u32 info_block_reserve(void)
-{
- return ALIGN(SZ_8K, PAGE_SIZE);
-}
-
/*
* We hotplug memory at sub-section granularity, pad the reserved area
* from the previous section base to the namespace base address.
@@ -653,7 +657,7 @@ static unsigned long init_altmap_base(resource_size_t base)
static unsigned long init_altmap_reserve(resource_size_t base)
{
- unsigned long reserve = info_block_reserve() >> PAGE_SHIFT;
+ unsigned long reserve = nd_info_block_reserve() >> PAGE_SHIFT;
unsigned long base_pfn = PHYS_PFN(base);
reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn);
@@ -668,7 +672,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
u64 offset = le64_to_cpu(pfn_sb->dataoff);
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
- u32 reserve = info_block_reserve();
+ u32 reserve = nd_info_block_reserve();
struct nd_namespace_common *ndns = nd_pfn->ndns;
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
resource_size_t base = nsio->res.start + start_pad;